diff --git a/pve/Makefile b/pve/Makefile new file mode 100644 index 0000000..025e97e --- /dev/null +++ b/pve/Makefile @@ -0,0 +1,43 @@ +#!/usr/bin/make -f + +include ../scripts/check.mk + +PACKER ?= packer +PACKER_LOG ?= 0 +export PACKER_LOG + +BOOT ?= uefi +OVMF ?= OVMF + +ifeq ($(wildcard /usr/share/$(OVMF)/$(OVMF)_CODE.fd),) + OVMF_SFX ?= _4M +else + OVMF_SFX ?= +endif + +.PHONY: all clean + +all: pve + +$(eval $(call check_packages_deps,cloud-image-utils ovmf,cloud-image-utils ovmf)) + +lint: + packer validate . + packer fmt -check -diff . + +format: + packer fmt . + +OVMF_VARS.fd: /usr/share/${OVMF}/${OVMF}_VARS${OVMF_SFX}.fd + cp -v $< OVMF_VARS.fd + +OVMF_CODE.fd: /usr/share/${OVMF}/${OVMF}_CODE${OVMF_SFX}.fd + cp -v $< OVMF_CODE.fd + +pve: check-deps clean OVMF_CODE.fd OVMF_VARS.fd + ${PACKER} init . && ${PACKER} build . + +clean: + ${RM} -rf output-* \ + OVMF_VARS.fd \ + OVMF_CODE.fd diff --git a/pve/README.md b/pve/README.md new file mode 100644 index 0000000..ceb0ac8 --- /dev/null +++ b/pve/README.md @@ -0,0 +1,97 @@ +# Proxmox Virtual Environment (PVE) Packer Templates for MAAS + +## Introduction + +The Packer templates in this directory creates pve image for use with MAAS. + +## Prerequisites (to create the image) + +* A machine running Ubuntu 18.04+ with the ability to run KVM virtual machines. +* qemu-utils, libnbd-bin, nbdkit and fuse2fs +* qemu-system +* ovmf +* cloud-image-utils +* [Packer](https://www.packer.io/intro/getting-started/install.html), v1.7.0 or newer + +## Requirements (to deploy the image) + +* [MAAS](https://maas.io) 3.2+ +* [Curtin](https://launchpad.net/curtin) 21.0+ +* Create inside of MAAS a vmbr0 bridge with your interfaces. + +## Supported Proxmox Versions + +The builds and deployment has been tested on MAAS 3.5.4 with Noble ephemeral images, +in UEFI mode. The process currently works with the following PVE versions: + +* Proxmox Virtual Environment 8.4-1 + +## Supported Architectures + +Currently amd64 (x86_64) architecture is supported. + +## Known Issues + +* After the deployment process PXE Boot directly into PVE is not working +* Proxmox Services will not start because in /etc/hosts there is 127.0.0.1 configured for the hostname/fqdn. Please change the 127.0.0.1 to the correct ip address + +## pve.pkr.hcl + +This template builds a dd.gz image from the official Proxmox iso. + +### Building the image + +The build the image you give the template a script which has all the +customizations: + +```shell +packer init . +packer build . +``` + +Using make: + +```shell +make pve +``` + +### Custom Preseed for Proxmox + +As mentioned above, pve images require a custom preseed file to be present in the +preseeds directory of MAAS region controllers. + +When used snaps, the path is /var/snap/maas/current/preseeds/curtin_userdata_custom + +Example ready to use preesed files has been included with this repository. Please +see curtin_userdata_custom_amd64. + +**Please be aware** this could potentially create a conflict with the rest of custom +images present in your setup. To work around a conflict, it is possible to rename the +preseed file something similar to curtin_userdata_custom_amd64_generic_pve-8-4-1 assuming +the architecture was set to amd64/generic and the uploaded **name** was set to custom/pve-8-4-1. + +In other words, depending on the image name parameter used during the import, the preseed +file(s) can be renamed to apply in a targetted manner. + +For more information about the preseed file naming schema, see +[Custom node setup (Preseed)](https://github.com/CanonicalLtd/maas-docs/blob/master/en/nodes-custom.md) and +[Preseed filenames](https://github.com/canonical/maas/blob/master/src/maasserver/preseed.py#L756). + +### Makefile Parameters + +#### PACKER_LOG + +Enable (1) or Disable (0) verbose packer logs. The default value is set to 0. + +## Uploading images to MAAS + +DD.GZ + +```shell +maas $PROFILE boot-resources create \ + name='custom/pve-8-4-1' \ + title='Proxmox VE 8.4-1' \ + architecture='amd64/generic' \ + filetype='ddgz' \ + content@=pve_lvm.dd.gz +``` \ No newline at end of file diff --git a/pve/curtin_userdata_custom_amd64 b/pve/curtin_userdata_custom_amd64 new file mode 100644 index 0000000..09f9445 --- /dev/null +++ b/pve/curtin_userdata_custom_amd64 @@ -0,0 +1,24 @@ +#cloud-config +kernel: + fallback-package: proxmox-default-kernel + package: proxmox-default-kernel + +apt: + preserve_sources_list: true + +debconf_selections: + maas: | + {{for line in str(curtin_preseed).splitlines()}} + {{line}} + {{endfor}} + +late_commands: + maas: [wget, '--no-proxy', '{{node_disable_pxe_url}}', '--post-data', '{{node_disable_pxe_data}}', '-O', '/dev/null'] + late_1: mount --bind $TARGET_MOUNT_POINT /mnt + late_2: grep -A2 datasource /etc/cloud/cloud.cfg.d/91_kernel_cmdline_url.cfg | sed 's/curtin//' | tee /mnt/etc/cloud/cloud.cfg.d/debian.cfg + late_3: sed -i 's@ubuntu.com/ubuntu@debian.org/debian@g;s@archive@deb@g;s@ubuntu@debian@g;s@Ubuntu@Debian@g;s@security.debian.org/debian@security.debian.org@g' /mnt/etc/cloud/cloud.cfg + late_5: debver=$(cat /mnt/etc/debian_version | awk -F. '{print $1}'); if [ ${debver} -eq 11 ]; then rel="bullseye"; elif [ ${debver} -eq 12 ]; then rel="bookworm"; elif [ ${debver} -eq 13 ]; then rel="trixie"; fi; sed -i s/stable/${rel}/g /mnt/etc/apt/sources.list; + late_6: sed -i '/^set -e/{n;N;d}' /mnt/etc/kernel/postinst.d/zz-update-grub + late_7: rm -f /usr/local/bin/dpkg-query + late_8: rm -f /usr/local/bin/netplan + diff --git a/pve/files/99_eni.cfg b/pve/files/99_eni.cfg new file mode 100644 index 0000000..26d2bda --- /dev/null +++ b/pve/files/99_eni.cfg @@ -0,0 +1,4 @@ +system_info: + network: + renderers: ['eni'] + activators: ['eni'] \ No newline at end of file diff --git a/pve/files/interfaces b/pve/files/interfaces new file mode 100644 index 0000000..2c56f38 --- /dev/null +++ b/pve/files/interfaces @@ -0,0 +1,8 @@ +auto lo +iface lo inet loopback + +auto eth0 +allow-hotplug eth0 +iface eth0 inet dhcp + +source /etc/network/interfaces.d/* \ No newline at end of file diff --git a/pve/files/network_state.py b/pve/files/network_state.py new file mode 100644 index 0000000..7784558 --- /dev/null +++ b/pve/files/network_state.py @@ -0,0 +1,1127 @@ +# Copyright (C) 2017 Canonical Ltd. +# +# Author: Ryan Harper +# +# This file is part of cloud-init. See LICENSE file for license information. + +import copy +import functools +import logging +from typing import TYPE_CHECKING, Any, Dict, Optional + +from cloudinit import safeyaml, util +from cloudinit.net import ( + find_interface_name_from_mac, + get_interfaces_by_mac, + ipv4_mask_to_net_prefix, + ipv6_mask_to_net_prefix, + is_ip_network, + is_ipv4_network, + is_ipv6_address, + is_ipv6_network, + net_prefix_to_ipv4_mask, +) + +if TYPE_CHECKING: + from cloudinit.net.renderer import Renderer + +LOG = logging.getLogger(__name__) + +NETWORK_STATE_VERSION = 1 +NETWORK_STATE_REQUIRED_KEYS = { + 1: ["version", "config", "network_state"], +} +NETWORK_V2_KEY_FILTER = [ + "addresses", + "dhcp4", + "dhcp4-overrides", + "dhcp6", + "dhcp6-overrides", + "gateway4", + "gateway6", + "interfaces", + "match", + "mtu", + "nameservers", + "renderer", + "set-name", + "wakeonlan", + "accept-ra", +] + +NET_CONFIG_TO_V2: Dict[str, Dict[str, Any]] = { + "bond": { + "bond-ad-select": "ad-select", + "bond-arp-interval": "arp-interval", + "bond-arp-ip-target": "arp-ip-target", + "bond-arp-validate": "arp-validate", + "bond-downdelay": "down-delay", + "bond-fail-over-mac": "fail-over-mac-policy", + "bond-lacp-rate": "lacp-rate", + "bond-miimon": "mii-monitor-interval", + "bond-min-links": "min-links", + "bond-mode": "mode", + "bond-num-grat-arp": "gratuitous-arp", + "bond-primary": "primary", + "bond-primary-reselect": "primary-reselect-policy", + "bond-updelay": "up-delay", + "bond-xmit-hash-policy": "transmit-hash-policy", + }, + "bridge": { + "bridge_ageing": "ageing-time", + "bridge_bridgeprio": "priority", + "bridge_fd": "forward-delay", + "bridge_gcint": None, + "bridge_hello": "hello-time", + "bridge_maxage": "max-age", + "bridge_maxwait": None, + "bridge_pathcost": "path-cost", + "bridge_portprio": "port-priority", + "bridge_stp": "stp", + "bridge_waitport": None, + }, +} + + +def warn_deprecated_all_devices(dikt: dict) -> None: + """Warn about deprecations of v2 properties for all devices""" + if "gateway4" in dikt or "gateway6" in dikt: + LOG.warning( + "DEPRECATED: The use of `gateway4` and `gateway6` is" + " deprecated. For more info check out: " + "https://cloudinit.readthedocs.io/en/latest/topics/network-config-format-v2.html" # noqa: E501 + ) + + +def from_state_file(state_file): + state = util.read_conf(state_file) + nsi = NetworkStateInterpreter() + nsi.load(state) + return nsi + + +def diff_keys(expected, actual): + missing = set(expected) + for key in actual: + missing.discard(key) + return missing + + +class InvalidCommand(Exception): + pass + + +def ensure_command_keys(required_keys): + def wrapper(func): + @functools.wraps(func) + def decorator(self, command, *args, **kwargs): + if required_keys: + missing_keys = diff_keys(required_keys, command) + if missing_keys: + raise InvalidCommand( + "Command missing %s of required keys %s" + % (missing_keys, required_keys) + ) + return func(self, command, *args, **kwargs) + + return decorator + + return wrapper + + +class CommandHandlerMeta(type): + """Metaclass that dynamically creates a 'command_handlers' attribute. + + This will scan the to-be-created class for methods that start with + 'handle_' and on finding those will populate a class attribute mapping + so that those methods can be quickly located and called. + """ + + def __new__(cls, name, parents, dct): + command_handlers = {} + for attr_name, attr in dct.items(): + if callable(attr) and attr_name.startswith("handle_"): + handles_what = attr_name[len("handle_") :] + if handles_what: + command_handlers[handles_what] = attr + dct["command_handlers"] = command_handlers + return super(CommandHandlerMeta, cls).__new__(cls, name, parents, dct) + + +class NetworkState: + def __init__( + self, network_state: dict, version: int = NETWORK_STATE_VERSION + ): + self._network_state = copy.deepcopy(network_state) + self._version = version + self.use_ipv6 = network_state.get("use_ipv6", False) + self._has_default_route = None + + @property + def config(self) -> dict: + return self._network_state["config"] + + @property + def version(self): + return self._version + + @property + def dns_nameservers(self): + try: + return self._network_state["dns"]["nameservers"] + except KeyError: + return [] + + @property + def dns_searchdomains(self): + try: + return self._network_state["dns"]["search"] + except KeyError: + return [] + + @property + def has_default_route(self): + if self._has_default_route is None: + self._has_default_route = self._maybe_has_default_route() + return self._has_default_route + + def iter_interfaces(self, filter_func=None): + ifaces = self._network_state.get("interfaces", {}) + for iface in ifaces.values(): + if filter_func is None: + yield iface + else: + if filter_func(iface): + yield iface + + def iter_routes(self, filter_func=None): + for route in self._network_state.get("routes", []): + if filter_func is not None: + if filter_func(route): + yield route + else: + yield route + + def _maybe_has_default_route(self): + for route in self.iter_routes(): + if self._is_default_route(route): + return True + for iface in self.iter_interfaces(): + for subnet in iface.get("subnets", []): + for route in subnet.get("routes", []): + if self._is_default_route(route): + return True + return False + + def _is_default_route(self, route): + default_nets = ("::", "0.0.0.0") + return ( + route.get("prefix") == 0 and route.get("network") in default_nets + ) + + @classmethod + def to_passthrough(cls, network_state: dict) -> "NetworkState": + """Instantiates a `NetworkState` without interpreting its data. + + That means only `config` and `version` are copied. + + :param network_state: Network state data. + :return: Instance of `NetworkState`. + """ + kwargs = {} + if "version" in network_state: + kwargs["version"] = network_state["version"] + return cls({"config": network_state}, **kwargs) + + +class NetworkStateInterpreter(metaclass=CommandHandlerMeta): + + initial_network_state = { + "interfaces": {}, + "routes": [], + "dns": { + "nameservers": [], + "search": [], + }, + "use_ipv6": False, + "config": None, + } + + def __init__( + self, + version=NETWORK_STATE_VERSION, + config=None, + renderer: "Optional[Renderer]" = None, + ): + self._version = version + self._config = config + self._network_state = copy.deepcopy(self.initial_network_state) + self._network_state["config"] = config + self._parsed = False + self._interface_dns_map: dict = {} + self._renderer = renderer + + @property + def network_state(self) -> NetworkState: + from cloudinit.net.netplan import Renderer as NetplanRenderer + + if self._version == 2 and isinstance(self._renderer, NetplanRenderer): + LOG.debug("Passthrough netplan v2 config") + return NetworkState.to_passthrough(self._config) + return NetworkState(self._network_state, version=self._version) + + @property + def use_ipv6(self): + return self._network_state.get("use_ipv6") + + @use_ipv6.setter + def use_ipv6(self, val): + self._network_state.update({"use_ipv6": val}) + + def dump(self): + state = { + "version": self._version, + "config": self._config, + "network_state": self._network_state, + } + return safeyaml.dumps(state) + + def load(self, state): + if "version" not in state: + LOG.error("Invalid state, missing version field") + raise ValueError("Invalid state, missing version field") + + required_keys = NETWORK_STATE_REQUIRED_KEYS[state["version"]] + missing_keys = diff_keys(required_keys, state) + if missing_keys: + msg = "Invalid state, missing keys: %s" % (missing_keys) + LOG.error(msg) + raise ValueError(msg) + + # v1 - direct attr mapping, except version + for key in [k for k in required_keys if k not in ["version"]]: + setattr(self, key, state[key]) + + def dump_network_state(self): + return safeyaml.dumps(self._network_state) + + def as_dict(self): + return {"version": self._version, "config": self._config} + + def parse_config(self, skip_broken=True): + if self._version == 1: + self.parse_config_v1(skip_broken=skip_broken) + self._parsed = True + elif self._version == 2: + self.parse_config_v2(skip_broken=skip_broken) + self._parsed = True + + def parse_config_v1(self, skip_broken=True): + for command in self._config: + command_type = command["type"] + try: + handler = self.command_handlers[command_type] + except KeyError as e: + raise RuntimeError( + "No handler found for command '%s'" % command_type + ) from e + try: + handler(self, command) + except InvalidCommand: + if not skip_broken: + raise + else: + LOG.warning( + "Skipping invalid command: %s", command, exc_info=True + ) + LOG.debug(self.dump_network_state()) + for interface, dns in self._interface_dns_map.items(): + iface = None + try: + iface = self._network_state["interfaces"][interface] + except KeyError as e: + raise ValueError( + "Nameserver specified for interface {0}, " + "but interface {0} does not exist!".format(interface) + ) from e + if iface: + nameservers, search = dns + iface["dns"] = { + "addresses": nameservers, + "search": search, + } + + def parse_config_v2(self, skip_broken=True): + from cloudinit.net.netplan import Renderer as NetplanRenderer + + if isinstance(self._renderer, NetplanRenderer): + # Nothing to parse as we are going to perform a Netplan passthrough + return + + for command_type, command in self._config.items(): + if command_type in ["version", "renderer"]: + continue + try: + handler = self.command_handlers[command_type] + except KeyError as e: + raise RuntimeError( + "No handler found for command '%s'" % command_type + ) from e + try: + handler(self, command) + self._v2_common(command) + except InvalidCommand: + if not skip_broken: + raise + else: + LOG.warning( + "Skipping invalid command: %s", command, exc_info=True + ) + LOG.debug(self.dump_network_state()) + + @ensure_command_keys(["name"]) + def handle_loopback(self, command): + return self.handle_physical(command) + + @ensure_command_keys(["name"]) + def handle_physical(self, command): + """ + command = { + 'type': 'physical', + 'mac_address': 'c0:d6:9f:2c:e8:80', + 'name': 'eth0', + 'subnets': [ + {'type': 'dhcp4'} + ], + 'accept-ra': 'true' + } + """ + + interfaces = self._network_state.get("interfaces", {}) + iface = interfaces.get(command["name"], {}) + for param, val in command.get("params", {}).items(): + iface.update({param: val}) + + # convert subnet ipv6 netmask to cidr as needed + subnets = _normalize_subnets(command.get("subnets")) + + # automatically set 'use_ipv6' if any addresses are ipv6 + if not self.use_ipv6: + for subnet in subnets: + if subnet.get("type").endswith("6") or is_ipv6_address( + subnet.get("address") + ): + self.use_ipv6 = True + break + + accept_ra = command.get("accept-ra", None) + if accept_ra is not None: + accept_ra = util.is_true(accept_ra) + wakeonlan = command.get("wakeonlan", None) + if wakeonlan is not None: + wakeonlan = util.is_true(wakeonlan) + iface.update( + { + "name": command.get("name"), + "type": command.get("type"), + "mac_address": command.get("mac_address"), + "inet": "inet", + "mode": "manual", + "mtu": command.get("mtu"), + "address": None, + "gateway": None, + "subnets": subnets, + "accept-ra": accept_ra, + "wakeonlan": wakeonlan, + } + ) + self._network_state["interfaces"].update({command.get("name"): iface}) + self.dump_network_state() + + @ensure_command_keys(["name", "vlan_id", "vlan_link"]) + def handle_vlan(self, command): + """ + auto eth0.222 + iface eth0.222 inet static + address 10.10.10.1 + netmask 255.255.255.0 + hwaddress ether BC:76:4E:06:96:B3 + vlan-raw-device eth0 + """ + interfaces = self._network_state.get("interfaces", {}) + self.handle_physical(command) + iface = interfaces.get(command.get("name"), {}) + iface["vlan-raw-device"] = command.get("vlan_link") + iface["vlan_id"] = command.get("vlan_id") + interfaces.update({iface["name"]: iface}) + + @ensure_command_keys(["name", "bond_interfaces", "params"]) + def handle_bond(self, command): + """ + #/etc/network/interfaces + auto eth0 + iface eth0 inet manual + bond-master bond0 + bond-mode 802.3ad + + auto eth1 + iface eth1 inet manual + bond-master bond0 + bond-mode 802.3ad + + auto bond0 + iface bond0 inet static + address 192.168.0.10 + gateway 192.168.0.1 + netmask 255.255.255.0 + bond-slaves none + bond-mode 802.3ad + bond-miimon 100 + bond-downdelay 200 + bond-updelay 200 + bond-lacp-rate 4 + """ + + self.handle_physical(command) + interfaces = self._network_state.get("interfaces") + iface = interfaces.get(command.get("name"), {}) + for param, val in command.get("params").items(): + iface.update({param: val}) + + bond_slaves = [] + + # handle bond slaves + for ifname in command.get("bond_interfaces"): + bond_slaves.append(ifname) + if ifname not in interfaces: + cmd = { + "name": ifname, + "type": "bond", + } + # inject placeholder + self.handle_physical(cmd) + + iface.update({"bond-slaves": " ".join(bond_slaves)}) + self._network_state["interfaces"].update({iface["name"]: iface}) + + @ensure_command_keys(["name", "bridge_interfaces"]) + def handle_bridge(self, command): + """ + auto br0 + iface br0 inet static + address 10.10.10.1 + netmask 255.255.255.0 + bridge_ports eth0 eth1 + bridge_stp off + bridge_fd 0 + bridge_maxwait 0 + + bridge_params = [ + "bridge_ports", + "bridge_ageing", + "bridge_bridgeprio", + "bridge_fd", + "bridge_gcint", + "bridge_hello", + "bridge_hw", + "bridge_maxage", + "bridge_maxwait", + "bridge_pathcost", + "bridge_portprio", + "bridge_stp", + "bridge_waitport", + ] + """ + + # find one of the bridge port ifaces to get mac_addr + # handle bridge_slaves + interfaces = self._network_state.get("interfaces", {}) + for ifname in command.get("bridge_interfaces"): + if ifname in interfaces: + continue + + cmd = { + "name": ifname, + } + # inject placeholder + self.handle_physical(cmd) + + interfaces = self._network_state.get("interfaces", {}) + self.handle_physical(command) + iface = interfaces.get(command.get("name"), {}) + iface["bridge_ports"] = command["bridge_interfaces"] + for param, val in command.get("params", {}).items(): + iface.update({param: val}) + + # convert value to boolean + bridge_stp = iface.get("bridge_stp") + if bridge_stp is not None and type(bridge_stp) != bool: + if bridge_stp in ["on", "1", 1]: + bridge_stp = True + elif bridge_stp in ["off", "0", 0]: + bridge_stp = False + else: + raise ValueError( + "Cannot convert bridge_stp value ({stp}) to" + " boolean".format(stp=bridge_stp) + ) + iface.update({"bridge_stp": bridge_stp}) + + interfaces.update({iface["name"]: iface}) + + @ensure_command_keys(["name"]) + def handle_infiniband(self, command): + self.handle_physical(command) + + def _parse_dns(self, command): + nameservers = [] + search = [] + if "address" in command: + addrs = command["address"] + if not type(addrs) == list: + addrs = [addrs] + for addr in addrs: + nameservers.append(addr) + if "search" in command: + paths = command["search"] + if not isinstance(paths, list): + paths = [paths] + for path in paths: + search.append(path) + return nameservers, search + + @ensure_command_keys(["address"]) + def handle_nameserver(self, command): + dns = self._network_state.get("dns") + nameservers, search = self._parse_dns(command) + if "interface" in command: + self._interface_dns_map[command["interface"]] = ( + nameservers, + search, + ) + else: + dns["nameservers"].extend(nameservers) + dns["search"].extend(search) + + @ensure_command_keys(["address"]) + def _handle_individual_nameserver(self, command, iface): + _iface = self._network_state.get("interfaces") + nameservers, search = self._parse_dns(command) + _iface[iface]["dns"] = {"nameservers": nameservers, "search": search} + + @ensure_command_keys(["destination"]) + def handle_route(self, command): + self._network_state["routes"].append(_normalize_route(command)) + + # V2 handlers + def handle_bonds(self, command): + """ + v2_command = { + bond0: { + 'interfaces': ['interface0', 'interface1'], + 'parameters': { + 'mii-monitor-interval': 100, + 'mode': '802.3ad', + 'xmit_hash_policy': 'layer3+4'}}, + bond1: { + 'bond-slaves': ['interface2', 'interface7'], + 'parameters': { + 'mode': 1, + } + } + } + + v1_command = { + 'type': 'bond' + 'name': 'bond0', + 'bond_interfaces': [interface0, interface1], + 'params': { + 'bond-mode': '802.3ad', + 'bond_miimon: 100, + 'bond_xmit_hash_policy': 'layer3+4', + } + } + + """ + self._handle_bond_bridge(command, cmd_type="bond") + + def handle_bridges(self, command): + + """ + v2_command = { + br0: { + 'interfaces': ['interface0', 'interface1'], + 'forward-delay': 0, + 'stp': False, + 'maxwait': 0, + } + } + + v1_command = { + 'type': 'bridge' + 'name': 'br0', + 'bridge_interfaces': [interface0, interface1], + 'params': { + 'bridge_stp': 'off', + 'bridge_fd: 0, + 'bridge_maxwait': 0 + } + } + + """ + self._handle_bond_bridge(command, cmd_type="bridge") + + def handle_ethernets(self, command): + """ + ethernets: + eno1: + match: + macaddress: 00:11:22:33:44:55 + driver: hv_netsvc + wakeonlan: true + dhcp4: true + dhcp6: false + addresses: + - 192.168.14.2/24 + - 2001:1::1/64 + gateway4: 192.168.14.1 + gateway6: 2001:1::2 + nameservers: + search: [foo.local, bar.local] + addresses: [8.8.8.8, 8.8.4.4] + lom: + match: + driver: ixgbe + set-name: lom1 + dhcp6: true + accept-ra: true + switchports: + match: + name: enp2* + mtu: 1280 + + command = { + 'type': 'physical', + 'mac_address': 'c0:d6:9f:2c:e8:80', + 'name': 'eth0', + 'subnets': [ + {'type': 'dhcp4'} + ] + } + """ + + # Get the interfaces by MAC address to update an interface's + # device name to the name of the device that matches a provided + # MAC address when the set-name directive is not present. + # + # Please see https://bugs.launchpad.net/cloud-init/+bug/1855945 + # for more information. + ifaces_by_mac = get_interfaces_by_mac() + + for eth, cfg in command.items(): + phy_cmd = { + "type": "physical", + } + match = cfg.get("match", {}) + mac_address = match.get("macaddress", None) + if not mac_address: + LOG.debug( + 'NetworkState Version2: missing "macaddress" info ' + "in config entry: %s: %s", + eth, + str(cfg), + ) + phy_cmd["mac_address"] = mac_address + + # Determine the name of the interface by using one of the + # following in the order they are listed: + # * set-name + # * interface name looked up by mac + # * value of "eth" key from this loop + name = eth + set_name = cfg.get("set-name") + if set_name: + name = set_name + elif mac_address and ifaces_by_mac: + lcase_mac_address = mac_address.lower() + mac = find_interface_name_from_mac(lcase_mac_address) + if mac: + name = mac + phy_cmd["name"] = name + + driver = match.get("driver", None) + if driver: + phy_cmd["params"] = {"driver": driver} + for key in ["mtu", "match", "wakeonlan", "accept-ra"]: + if key in cfg: + phy_cmd[key] = cfg[key] + + warn_deprecated_all_devices(cfg) + + subnets = self._v2_to_v1_ipcfg(cfg) + if len(subnets) > 0: + phy_cmd.update({"subnets": subnets}) + + LOG.debug("v2(ethernets) -> v1(physical):\n%s", phy_cmd) + self.handle_physical(phy_cmd) + + def handle_vlans(self, command): + """ + v2_vlans = { + 'eth0.123': { + 'id': 123, + 'link': 'eth0', + 'dhcp4': True, + } + } + + v1_command = { + 'type': 'vlan', + 'name': 'eth0.123', + 'vlan_link': 'eth0', + 'vlan_id': 123, + 'subnets': [{'type': 'dhcp4'}], + } + """ + for vlan, cfg in command.items(): + vlan_cmd = { + "type": "vlan", + "name": vlan, + "vlan_id": cfg.get("id"), + "vlan_link": cfg.get("link"), + } + if "mtu" in cfg: + vlan_cmd["mtu"] = cfg["mtu"] + warn_deprecated_all_devices(cfg) + subnets = self._v2_to_v1_ipcfg(cfg) + if len(subnets) > 0: + vlan_cmd.update({"subnets": subnets}) + LOG.debug("v2(vlans) -> v1(vlan):\n%s", vlan_cmd) + self.handle_vlan(vlan_cmd) + + def handle_wifis(self, command): + LOG.warning( + "Wifi configuration is only available to distros with" + " netplan rendering support." + ) + + def _v2_common(self, cfg) -> None: + LOG.debug("v2_common: handling config:\n%s", cfg) + for iface, dev_cfg in cfg.items(): + if "set-name" in dev_cfg: + set_name_iface = dev_cfg.get("set-name") + if set_name_iface: + iface = set_name_iface + if "nameservers" in dev_cfg: + search = dev_cfg.get("nameservers").get("search", []) + dns = dev_cfg.get("nameservers").get("addresses", []) + name_cmd = {"type": "nameserver"} + if len(search) > 0: + name_cmd.update({"search": search}) + if len(dns) > 0: + name_cmd.update({"address": dns}) + self.handle_nameserver(name_cmd) + + mac_address: Optional[str] = dev_cfg.get("match", {}).get( + "macaddress" + ) + if mac_address: + real_if_name = find_interface_name_from_mac(mac_address) + if real_if_name: + iface = real_if_name + + self._handle_individual_nameserver(name_cmd, iface) + + def _handle_bond_bridge(self, command, cmd_type=None): + """Common handler for bond and bridge types""" + + # inverse mapping for v2 keynames to v1 keynames + v2key_to_v1 = dict( + (v, k) for k, v in NET_CONFIG_TO_V2.get(cmd_type).items() + ) + + for item_name, item_cfg in command.items(): + item_params = dict( + (key, value) + for (key, value) in item_cfg.items() + if key not in NETWORK_V2_KEY_FILTER + ) + # We accept both spellings (as netplan does). LP: #1756701 + # Normalize internally to the new spelling: + params = item_params.get("parameters", {}) + grat_value = params.pop("gratuitious-arp", None) + if grat_value: + params["gratuitous-arp"] = grat_value + + v1_cmd = { + "type": cmd_type, + "name": item_name, + cmd_type + "_interfaces": item_cfg.get("interfaces"), + "params": dict((v2key_to_v1[k], v) for k, v in params.items()), + } + if "mtu" in item_cfg: + v1_cmd["mtu"] = item_cfg["mtu"] + + warn_deprecated_all_devices(item_cfg) + subnets = self._v2_to_v1_ipcfg(item_cfg) + if len(subnets) > 0: + v1_cmd.update({"subnets": subnets}) + + LOG.debug("v2(%s) -> v1(%s):\n%s", cmd_type, cmd_type, v1_cmd) + if cmd_type == "bridge": + self.handle_bridge(v1_cmd) + elif cmd_type == "bond": + self.handle_bond(v1_cmd) + else: + raise ValueError( + "Unknown command type: {cmd_type}".format( + cmd_type=cmd_type + ) + ) + + def _v2_to_v1_ipcfg(self, cfg): + """Common ipconfig extraction from v2 to v1 subnets array.""" + + def _add_dhcp_overrides(overrides, subnet): + if "route-metric" in overrides: + subnet["metric"] = overrides["route-metric"] + + subnets = [] + if cfg.get("dhcp4"): + subnet = {"type": "dhcp4"} + _add_dhcp_overrides(cfg.get("dhcp4-overrides", {}), subnet) + subnets.append(subnet) + if cfg.get("dhcp6"): + subnet = {"type": "dhcp6"} + self.use_ipv6 = True + _add_dhcp_overrides(cfg.get("dhcp6-overrides", {}), subnet) + subnets.append(subnet) + + gateway4 = None + gateway6 = None + nameservers = {} + for address in cfg.get("addresses", []): + subnet = { + "type": "static", + "address": address, + } + + if ":" in address: + if "gateway6" in cfg and gateway6 is None: + gateway6 = cfg.get("gateway6") + subnet.update({"gateway": gateway6}) + else: + if "gateway4" in cfg and gateway4 is None: + gateway4 = cfg.get("gateway4") + subnet.update({"gateway": gateway4}) + + if "nameservers" in cfg and not nameservers: + addresses = cfg.get("nameservers").get("addresses") + if addresses: + nameservers["dns_nameservers"] = addresses + search = cfg.get("nameservers").get("search") + if search: + nameservers["dns_search"] = search + subnet.update(nameservers) + + subnets.append(subnet) + + routes = [] + for route in cfg.get("routes", []): + routes.append( + _normalize_route( + { + "destination": route.get("to"), + "gateway": route.get("via"), + } + ) + ) + + # v2 routes are bound to the interface, in v1 we add them under + # the first subnet since there isn't an equivalent interface level. + if len(subnets) and len(routes): + subnets[0]["routes"] = routes + + return subnets + + +def _normalize_subnet(subnet): + # Prune all keys with None values. + subnet = copy.deepcopy(subnet) + normal_subnet = dict((k, v) for k, v in subnet.items() if v) + + if subnet.get("type") in ("static", "static6"): + normal_subnet.update( + _normalize_net_keys( + normal_subnet, + address_keys=( + "address", + "ip_address", + ), + ) + ) + normal_subnet["routes"] = [ + _normalize_route(r) for r in subnet.get("routes", []) + ] + + def listify(snet, name): + if name in snet and not isinstance(snet[name], list): + snet[name] = snet[name].split() + + for k in ("dns_search", "dns_nameservers"): + listify(normal_subnet, k) + + return normal_subnet + + +def _normalize_net_keys(network, address_keys=()): + """Normalize dictionary network keys returning prefix and address keys. + + @param network: A dict of network-related definition containing prefix, + netmask and address_keys. + @param address_keys: A tuple of keys to search for representing the address + or cidr. The first address_key discovered will be used for + normalization. + + @returns: A dict containing normalized prefix and matching addr_key. + """ + net = {k: v for k, v in network.items() if v or v == 0} + addr_key = None + for key in address_keys: + if net.get(key): + addr_key = key + break + if not addr_key: + message = "No config network address keys [%s] found in %s" % ( + ",".join(address_keys), + network, + ) + LOG.error(message) + raise ValueError(message) + + addr = str(net.get(addr_key)) + if not is_ip_network(addr): + LOG.error("Address %s is not a valid ip network", addr) + raise ValueError(f"Address {addr} is not a valid ip address") + + ipv6 = is_ipv6_network(addr) + ipv4 = is_ipv4_network(addr) + + netmask = net.get("netmask") + if "/" in addr: + addr_part, _, maybe_prefix = addr.partition("/") + net[addr_key] = addr_part + if ipv6: + # this supports input of ffff:ffff:ffff:: + prefix = ipv6_mask_to_net_prefix(maybe_prefix) + elif ipv4: + # this supports input of 255.255.255.0 + prefix = ipv4_mask_to_net_prefix(maybe_prefix) + else: + # In theory this never happens, is_ip_network() should catch all + # invalid networks + LOG.error("Address %s is not a valid ip network", addr) + raise ValueError(f"Address {addr} is not a valid ip address") + elif "prefix" in net: + prefix = int(net["prefix"]) + elif netmask and ipv4: + prefix = ipv4_mask_to_net_prefix(netmask) + elif netmask and ipv6: + prefix = ipv6_mask_to_net_prefix(netmask) + else: + prefix = 64 if ipv6 else 24 + + if "prefix" in net and str(net["prefix"]) != str(prefix): + LOG.warning( + "Overwriting existing 'prefix' with '%s' in network info: %s", + prefix, + net, + ) + net["prefix"] = prefix + + if ipv6: + # TODO: we could/maybe should add this back with the very uncommon + # 'netmask' for ipv6. We need a 'net_prefix_to_ipv6_mask' for that. + if "netmask" in net: + del net["netmask"] + elif ipv4: + net["netmask"] = net_prefix_to_ipv4_mask(net["prefix"]) + + return net + + +def _normalize_route(route): + """normalize a route. + return a dictionary with only: + 'type': 'route' (only present if it was present in input) + 'network': the network portion of the route as a string. + 'prefix': the network prefix for address as an integer. + 'metric': integer metric (only if present in input). + 'netmask': netmask (string) equivalent to prefix iff network is ipv4. + """ + # Prune None-value keys. Specifically allow 0 (a valid metric). + normal_route = dict( + (k, v) for k, v in route.items() if v not in ("", None) + ) + if "destination" in normal_route: + normal_route["network"] = normal_route["destination"] + del normal_route["destination"] + + normal_route.update( + _normalize_net_keys( + normal_route, address_keys=("network", "destination") + ) + ) + + metric = normal_route.get("metric") + if metric: + try: + normal_route["metric"] = int(metric) + except ValueError as e: + raise TypeError( + "Route config metric {} is not an integer".format(metric) + ) from e + return normal_route + + +def _normalize_subnets(subnets): + if not subnets: + subnets = [] + return [_normalize_subnet(s) for s in subnets] + + +def parse_net_config_data( + net_config: dict, + skip_broken: bool = True, + renderer=None, # type: Optional[Renderer] +) -> NetworkState: + """Parses the config, returns NetworkState object + + :param net_config: curtin network config dict + """ + state = None + version = net_config.get("version") + config = net_config.get("config") + if version == 2: + # v2 does not have explicit 'config' key so we + # pass the whole net-config as-is + config = net_config + + if version and config is not None: + nsi = NetworkStateInterpreter( + version=version, config=config, renderer=renderer + ) + nsi.parse_config(skip_broken=skip_broken) + state = nsi.network_state + + if not state: + raise RuntimeError( + "No valid network_state object created from network config. " + "Did you specify the correct version? Network config:\n" + f"{net_config}" + ) + + return state + + +# vi: ts=4 expandtab diff --git a/pve/http/answer.toml.pkrtpl.hcl b/pve/http/answer.toml.pkrtpl.hcl new file mode 100644 index 0000000..5ce07d6 --- /dev/null +++ b/pve/http/answer.toml.pkrtpl.hcl @@ -0,0 +1,16 @@ +[global] +keyboard = "de" +country = "de" +fqdn = "pveauto.testinstall" +mailto = "mail@no.invalid" +timezone = "Europe/Berlin" +root-password = "${ssh_password}" + +[network] +source = "from-dhcp" + +[disk-setup] +filesystem = "ext4" +lvm.swapsize = 0 +lvm.maxvz = 0 +disk-list = ["vda"] diff --git a/pve/pve.pkr.hcl b/pve/pve.pkr.hcl new file mode 100644 index 0000000..0f8a90e --- /dev/null +++ b/pve/pve.pkr.hcl @@ -0,0 +1,95 @@ +locals { + proxy_env = [ + "http_proxy=${var.http_proxy}", + "https_proxy=${var.https_proxy}", + "no_proxy=${var.https_proxy}", + ] +} + +source "qemu" "pve" { + boot_command = [ + "", + "", + "", + "proxmox-fetch-answer http http://{{ .HTTPIP }}:{{ .HTTPPort }}/answer.toml >/run/automatic-installer-answers ", + "exit ", + ] + boot_wait = "3s" + cpus = 2 + memory = 2048 + disk_size = "10G" + format = "raw" + headless = var.headless + efi_boot = true + efi_firmware_code = "OVMF_CODE.fd" + efi_firmware_vars = "OVMF_VARS.fd" + efi_drop_efivars = true + http_content = { + "/answer.toml" = templatefile("${path.root}/http/answer.toml.pkrtpl.hcl", + { + ssh_password = var.ssh_password + } + ) + } + iso_checksum = "sha256:d237d70ca48a9f6eb47f95fd4fd337722c3f69f8106393844d027d28c26523d8" + iso_url = "https://enterprise.proxmox.com/iso/proxmox-ve_8.4-1.iso" + // qemu_img_args { + // create = ["-F", "qcow2"] + // } + shutdown_command = "shutdown -P now" + ssh_handshake_attempts = 50 + ssh_password = var.ssh_password + ssh_timeout = var.timeout + ssh_username = var.ssh_username + ssh_wait_timeout = var.timeout +} + +build { + sources = ["source.qemu.pve"] + + provisioner "shell" { + scripts = ["${path.root}/scripts/configure-repositories.sh"] + } + + provisioner "shell" { + environment_vars = concat(local.proxy_env, ["DEBIAN_FRONTEND=noninteractive", "DEBIAN_VERSION=${var.debian_version}", "BOOT_MODE=${var.boot_mode}"]) + scripts = ["${path.root}/scripts/essential-packages.sh", "${path.root}/scripts/setup-boot.sh", "${path.root}/scripts/networking.sh"] + } + + provisioner "shell" { + environment_vars = [ + "CLOUDIMG_CUSTOM_KERNEL=${var.kernel}", + "DEBIAN_FRONTEND=noninteractive" + ] + scripts = ["${path.root}/scripts/install-custom-kernel.sh"] + } + + provisioner "file" { + destination = "/tmp/" + sources = ["${path.root}/scripts/curtin-hooks"] + } + + provisioner "shell" { + environment_vars = ["CLOUDIMG_CUSTOM_KERNEL=${var.kernel}"] + scripts = ["${path.root}/scripts/setup-curtin.sh"] + } + + provisioner "file" { + destination = "/etc/network/" + sources = ["${path.root}/files/interfaces"] + } + + provisioner "file" { + destination = "/etc/cloud/cloud.cfg.d/" + sources = ["${path.root}/files/99_eni.cfg"] + } + + provisioner "file" { + destination = "/usr/lib/python3/dist-packages/cloudinit/net/network_state.py" + sources = ["${path.root}/files/network_state.py"] + } + + post-processor "compress" { + output = "pve_lvm.dd.gz" + } +} diff --git a/pve/scripts/cleanup.sh b/pve/scripts/cleanup.sh new file mode 100644 index 0000000..9af46d7 --- /dev/null +++ b/pve/scripts/cleanup.sh @@ -0,0 +1,29 @@ +#!/bin/bash -ex +# +# cleanup.sh - Clean up what we did to be able to build the image. +# +# Copyright (C) 2023 Canonical +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + + +# Everything in /packer_backup should be restored. +find /packer_backup +cp --preserve -r /packer_backup/ / +rm -rf /packer_backup + +# We had to allow root to ssh for the image setup. Let's try to revert that. +sed -i s/^root:[^:]*/root:*/ /etc/shadow +rm -r /root/.ssh +rm -r /etc/ssh/ssh_host_* diff --git a/pve/scripts/configure-repositories.sh b/pve/scripts/configure-repositories.sh new file mode 100644 index 0000000..412f3f7 --- /dev/null +++ b/pve/scripts/configure-repositories.sh @@ -0,0 +1,5 @@ +sed -i "s/^deb/\#deb/" /etc/apt/sources.list.d/pve-enterprise.list +sed -i "s/^deb/\#deb/" /etc/apt/sources.list.d/ceph.list +echo "deb http://download.proxmox.com/debian/pve $(grep "VERSION=" /etc/os-release | sed -n 's/.*(\(.*\)).*/\1/p') pve-no-subscription" > /etc/apt/sources.list.d/pve-no-enterprise.list +apt update +apt install -y cloud-init \ No newline at end of file diff --git a/pve/scripts/curtin copy.sh b/pve/scripts/curtin copy.sh new file mode 100644 index 0000000..c3355ad --- /dev/null +++ b/pve/scripts/curtin copy.sh @@ -0,0 +1,38 @@ +#!/bin/bash -ex +# +# curtin.sh - Move curtin scripts to final destination +# +# Author: Alexsander de Souza +# +# Copyright (C) 2023 Canonical +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +export DEBIAN_FRONTEND=noninteractive + +apt-get install -y jq +mkdir -p /curtin + +# install scripts +for s in curtin-hooks install-custom-packages setup-bootloader; do + if [ -f "/tmp/$s" ]; then + mv "/tmp/$s" /curtin/ + chmod 750 "/curtin/$s" + fi +done + +# copy custom packages +if [ -f /tmp/custom-packages.tar.gz ]; then + mv /tmp/custom-packages.tar.gz /curtin/ +fi diff --git a/pve/scripts/curtin-hooks b/pve/scripts/curtin-hooks new file mode 100644 index 0000000..0b02240 --- /dev/null +++ b/pve/scripts/curtin-hooks @@ -0,0 +1,211 @@ +#!/usr/bin/env python3 +# curtin-hooks - Curtin installation hooks for Ubuntu +# +# Copyright (C) 2022 Canonical +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import os +import shutil +import sys + +from curtin.config import load_command_config +from curtin.util import load_command_environment +from curtin import distro +from curtin import config +from curtin.log import LOG +from curtin.reporter import events +from curtin.commands.curthooks import * +from curtin import util + + +# import copy +# import glob +# import platform +# import re +# import textwrap + + +# from curtin import block +# from curtin.block import iscsi +# from curtin.block import lvm +# from curtin import net +# from curtin import futil +# from curtin import paths +# from curtin import swap +# from curtin import version as curtin_version +# from curtin.block import deps as bdeps +# from curtin.net import deps as ndeps +# from curtin.commands import apply_net, apt_config +# from curtin.commands.install_grub import install_grub +# from curtin.url_helper import get_maas_version + + +def configure_custom_kernel(config): + """Amend the curtin config to explicity specify the kernel to install. + + The name of the kernel to install should already have been written to the + CUSTOM_KERNEL file in the same directory as this file. + """ + custom_kernel_path = os.path.join( + os.path.dirname(__file__), "CUSTOM_KERNEL") + with open(custom_kernel_path, "r") as custom_kernel_file: + custom_kernel_package = custom_kernel_file.read().strip() + kernel_config = config.setdefault("kernel", {}) + kernel_config["package"] = custom_kernel_package + return config + +def cleanup(): + """Remove curtin-hooks so its as if we were never here.""" + curtin_dir = os.path.dirname(__file__) + shutil.rmtree(curtin_dir) + +def curthook(cfg, target, state): + LOG.info('Running curtin builtin curthooks') + stack_prefix = state.get('report_stack_prefix', '') + state_etcd = os.path.split(state['fstab'])[0] + machine = platform.machine() + + distro_info = distro.get_distroinfo(target=target) + if not distro_info: + raise RuntimeError('Failed to determine target distro') + osfamily = distro_info.family + LOG.info('Configuring target system for distro: %s osfamily: %s', + distro_info.variant, osfamily) + with events.ReportEventStack( + name=stack_prefix + '/writing-apt-config', + reporting_enabled=True, level="INFO", + description="configuring apt configuring apt"): + do_apt_config(cfg, target) + disable_overlayroot(cfg, target) + disable_update_initramfs(cfg, target, machine) + + # LP: #1742560 prevent zfs-dkms from being installed (Xenial) + if distro.lsb_release(target=target)['codename'] == 'xenial': + distro.apt_update(target=target) + with util.ChrootableTarget(target) as in_chroot: + in_chroot.subp(['apt-mark', 'hold', 'zfs-dkms']) + + # packages may be needed prior to installing kernel + with events.ReportEventStack( + name=stack_prefix + '/installing-missing-packages', + reporting_enabled=True, level="INFO", + description="installing missing packages"): + install_missing_packages(cfg, target, osfamily=osfamily) + + with events.ReportEventStack( + name=stack_prefix + '/configuring-iscsi-service', + reporting_enabled=True, level="INFO", + description="configuring iscsi service"): + configure_iscsi(cfg, state_etcd, target, osfamily=osfamily) + + with events.ReportEventStack( + name=stack_prefix + '/configuring-mdadm-service', + reporting_enabled=True, level="INFO", + description="configuring raid (mdadm) service"): + configure_mdadm(cfg, state_etcd, target, osfamily=osfamily) + + with events.ReportEventStack( + name=stack_prefix + '/installing-kernel', + reporting_enabled=True, level="INFO", + description="installing kernel"): + setup_zipl(cfg, target) + setup_kernel_img_conf(target) + install_kernel(cfg, target) + run_zipl(cfg, target) + restore_dist_interfaces(cfg, target) + chzdev_persist_active_online(cfg, target) + + # with events.ReportEventStack( + # name=stack_prefix + '/setting-up-swap', + # reporting_enabled=True, level="INFO", + # description="setting up swap"): + # add_swap(cfg, target, state.get('fstab')) + + with events.ReportEventStack( + name=stack_prefix + '/apply-networking-config', + reporting_enabled=True, level="INFO", + description="apply networking config"): + apply_networking(target, state) + + # with events.ReportEventStack( + # name=stack_prefix + '/writing-etc-fstab', + # reporting_enabled=True, level="INFO", + # description="writing etc/fstab"): + # copy_fstab(state.get('fstab'), target) + + with events.ReportEventStack( + name=stack_prefix + '/configuring-multipath', + reporting_enabled=True, level="INFO", + description="configuring multipath"): + detect_and_handle_multipath(cfg, target, osfamily=osfamily) + + with events.ReportEventStack( + name=stack_prefix + '/system-upgrade', + reporting_enabled=True, level="INFO", + description="updating packages on target system"): + system_upgrade(cfg, target, osfamily=osfamily) + + + with events.ReportEventStack( + name=stack_prefix + '/pollinate-user-agent', + reporting_enabled=True, level="INFO", + description="configuring pollinate user-agent on target"): + handle_pollinate_user_agent(cfg, target) + + # check for the zpool cache file and copy to target if present + zpool_cache = '/etc/zfs/zpool.cache' + if os.path.exists(zpool_cache): + copy_zpool_cache(zpool_cache, target) + + zkey_repository = '/etc/zkey/repository' + zkey_used = os.path.join(os.path.split(state['fstab'])[0], "zkey_used") + if all(map(os.path.exists, [zkey_repository, zkey_used])): + distro.install_packages(['s390-tools-zkey'], target=target, + osfamily=osfamily) + copy_zkey_repository(zkey_repository, target) + + # If a crypttab file was created by block_meta than it needs to be + # copied onto the target system, and update_initramfs() needs to be + # run, so that the cryptsetup hooks are properly configured on the + # installed system and it will be able to open encrypted volumes + # at boot. + crypttab_location = os.path.join(os.path.split(state['fstab'])[0], + "crypttab") + if os.path.exists(crypttab_location): + copy_crypttab(crypttab_location, target) + update_initramfs(target) + + # If udev dname rules were created, copy them to target + udev_rules_d = os.path.join(state['scratch'], "rules.d") + if os.path.isdir(udev_rules_d): + copy_dname_rules(udev_rules_d, target) + + with events.ReportEventStack( + name=stack_prefix + '/updating-initramfs-configuration', + reporting_enabled=True, level="INFO", + description="updating initramfs configuration"): + # re-enable update_initramfs + enable_update_initramfs(cfg, target, machine) + update_initramfs(target, all_kernels=True) + + +def main(): + state = load_command_environment() + config = configure_custom_kernel(load_command_config(None, state)) + curthook(config, state['target'], state) + cleanup() + +if __name__ == "__main__": + main() diff --git a/pve/scripts/curtin.sh b/pve/scripts/curtin.sh new file mode 100644 index 0000000..c3355ad --- /dev/null +++ b/pve/scripts/curtin.sh @@ -0,0 +1,38 @@ +#!/bin/bash -ex +# +# curtin.sh - Move curtin scripts to final destination +# +# Author: Alexsander de Souza +# +# Copyright (C) 2023 Canonical +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +export DEBIAN_FRONTEND=noninteractive + +apt-get install -y jq +mkdir -p /curtin + +# install scripts +for s in curtin-hooks install-custom-packages setup-bootloader; do + if [ -f "/tmp/$s" ]; then + mv "/tmp/$s" /curtin/ + chmod 750 "/curtin/$s" + fi +done + +# copy custom packages +if [ -f /tmp/custom-packages.tar.gz ]; then + mv /tmp/custom-packages.tar.gz /curtin/ +fi diff --git a/pve/scripts/essential-packages.sh b/pve/scripts/essential-packages.sh new file mode 100644 index 0000000..4e0ae8b --- /dev/null +++ b/pve/scripts/essential-packages.sh @@ -0,0 +1,34 @@ +#!/bin/bash -ex +# +# setup-boot.sh - Set up the image after initial boot +# +# Copyright (C) 2023 Canonical +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +export DEBIAN_FRONTEND=noninteractive + +# Configure apt proxy if needed. +packer_apt_proxy_config="/etc/apt/apt.conf.d/packer-proxy.conf" +if [ ! -z "${http_proxy}" ]; then + echo "Acquire::http::Proxy \"${http_proxy}\";" >> ${packer_apt_proxy_config} +fi +if [ ! -z "${https_proxy}" ]; then + echo "Acquire::https::Proxy \"${https_proxy}\";" >> ${packer_apt_proxy_config} +fi + +ARCH=$(dpkg --print-architecture) + +apt-get update +apt-get -y install lvm2 xfsprogs diff --git a/pve/scripts/install-custom-kernel.sh b/pve/scripts/install-custom-kernel.sh new file mode 100755 index 0000000..a192601 --- /dev/null +++ b/pve/scripts/install-custom-kernel.sh @@ -0,0 +1,31 @@ +#!/bin/bash -ex +# +# install-custom-kernel.sh - Install custom kernel, if specified +# +# Copyright (C) 2021 Canonical +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +export DEBIAN_FRONTEND=noninteractive + +if [ -z "${CLOUDIMG_CUSTOM_KERNEL}" ]; then + echo "Not installing custom kernel, since none was specified." + exit 0 +fi + +echo "Installing custom kernel ${CLOUDIMG_CUSTOM_KERNEL}" +apt-get install -y ${CLOUDIMG_CUSTOM_KERNEL} + +# Record the installed kernel version, so that the curtin hook knows about it. +mkdir -p /curtin +echo -n "${CLOUDIMG_CUSTOM_KERNEL}" > /curtin/CUSTOM_KERNEL diff --git a/pve/scripts/install-custom-packages b/pve/scripts/install-custom-packages new file mode 100644 index 0000000..ade9a94 --- /dev/null +++ b/pve/scripts/install-custom-packages @@ -0,0 +1,46 @@ +#!/bin/bash -ex +# +# install-custom-packages - Install custom packages +# +# Author: Alexsander de Souza +# +# Copyright (C) 2021 Canonical +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +export DEBIAN_FRONTEND=noninteractive + +PKG_TGZ="/curtin/custom-packages.tar.gz" + +if [ ! -f "${PKG_TGZ}" ]; then + exit 0 +fi + +WORKDIR=$(mktemp -d) + +cleanup() { + rm -rf "${WORKDIR}" +} +trap cleanup EXIT + +echo "remove existing kernels" +dpkg -l 'linux-image-*' 'linux-headers-*' | awk '/^ii/{print $2}' | xargs apt-get -y purge + +echo "install new kernel" +tar xzvf "${PKG_TGZ}" -C "${WORKDIR}" +DEBS=$(find "${WORKDIR}" -name '*.deb') +apt-get install -y --no-install-recommends ${DEBS} +apt-get install --fix-broken + +echo "purge unused packages" +apt-get autoremove -y diff --git a/pve/scripts/networking.sh b/pve/scripts/networking.sh new file mode 100644 index 0000000..4f9550e --- /dev/null +++ b/pve/scripts/networking.sh @@ -0,0 +1,92 @@ +#!/bin/bash -ex +# +# networking.sh - Prepare image to boot with cloud-init +# +# Author: Alexsander de Souza +# Author: Alan Baghumian +# +# Copyright (C) 2023 Canonical +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +export DEBIAN_FRONTEND=noninteractive + +# Configure apt proxy if needed. +packer_apt_proxy_config="/etc/apt/apt.conf.d/packer-proxy.conf" +if [ ! -z "${http_proxy}" ]; then + echo "Acquire::http::Proxy \"${http_proxy}\";" >> ${packer_apt_proxy_config} +fi +if [ ! -z "${https_proxy}" ]; then + echo "Acquire::https::Proxy \"${https_proxy}\";" >> ${packer_apt_proxy_config} +fi + +apt-get install -qy cloud-init netplan.io python3-serial + +cat > /etc/sysctl.d/99-cloudimg-ipv6.conf < /usr/local/bin/dpkg-query < /usr/local/bin/netplan < /etc/default/netplan +#systemctl disable networking; systemctl mask networking +#mv /etc/network/{interfaces,interfaces.save} +#systemctl enable systemd-networkd diff --git a/pve/scripts/setup-boot.sh b/pve/scripts/setup-boot.sh new file mode 100644 index 0000000..718d072 --- /dev/null +++ b/pve/scripts/setup-boot.sh @@ -0,0 +1,43 @@ +#!/bin/bash -ex +# +# setup-boot.sh - Set up the image after initial boot +# +# Copyright (C) 2023-2025 Canonical +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +export DEBIAN_FRONTEND=noninteractive + +# Configure apt proxy if needed. +packer_apt_proxy_config="/etc/apt/apt.conf.d/packer-proxy.conf" +if [ ! -z "${http_proxy}" ]; then + echo "Acquire::http::Proxy \"${http_proxy}\";" >> ${packer_apt_proxy_config} +fi +if [ ! -z "${https_proxy}" ]; then + echo "Acquire::https::Proxy \"${https_proxy}\";" >> ${packer_apt_proxy_config} +fi + +ARCH=$(dpkg --print-architecture) + +# Reset cloud-init, so that it can run again when MAAS deploy the image. +cloud-init clean --logs + +apt-get update + +# Bookworm+ does not include this, but curtin requires this during the installation. +if [ ${DEBIAN_VERSION} == '12' ] || [ ${DEBIAN_VERSION} == '13' ]; then + wget http://ftp.us.debian.org/debian/pool/main/e/efibootmgr/efibootmgr_17-1_${ARCH}.deb + dpkg -i efibootmgr_17-1_${ARCH}.deb + rm efibootmgr_17-1_${ARCH}.deb +fi diff --git a/pve/scripts/setup-bootloader b/pve/scripts/setup-bootloader new file mode 100644 index 0000000..bce05f6 --- /dev/null +++ b/pve/scripts/setup-bootloader @@ -0,0 +1,50 @@ +#!/bin/bash -ex +# +# setup-bootloader - Install bootloader in the boot disk +# +# Author: Alexsander de Souza +# +# Copyright (C) 2023 Canonical +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +export DEBIAN_FRONTEND=noninteractive + +# Configure apt proxy if needed. +packer_apt_proxy_config="/etc/apt/apt.conf.d/packer-proxy.conf" +if [ ! -z "${http_proxy}" ]; then + echo "Acquire::http::Proxy \"${http_proxy}\";" >> ${packer_apt_proxy_config} +fi +if [ ! -z "${https_proxy}" ]; then + echo "Acquire::https::Proxy \"${https_proxy}\";" >> ${packer_apt_proxy_config} +fi + +ARCH=$(dpkg --print-architecture) + +# Clean up remnants from packer-maas vm install +rm /var/cache/debconf/config.dat +dpkg --configure -a + +apt-get update + +if [ -f /sys/firmware/efi/runtime ]; then + if [ ${ARCH} == "amd64" ]; then + apt-get install -y grub-cloud-${ARCH} grub-efi-${ARCH} + else + apt-get install -y grub-efi-${ARCH}-signed shim-signed grub-efi-${ARCH} + fi +else + apt-get install -y grub-cloud-${ARCH} grub-pc +fi + diff --git a/pve/scripts/setup-curtin.sh b/pve/scripts/setup-curtin.sh new file mode 100755 index 0000000..145d8d3 --- /dev/null +++ b/pve/scripts/setup-curtin.sh @@ -0,0 +1,31 @@ +#!/bin/bash -ex +# +# cloud-img-setup-curtin.sh - Set up curtin curthooks, if needed. +# +# Copyright (C) 2022 Canonical +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +if [[ ! -f "/curtin/CUSTOM_KERNEL" ]]; then + echo "Skipping curtin setup, since no custom kernel is used." + exit 0 +fi + +echo "Configuring curtin to install custom kernel" + +mkdir -p /curtin + +FILENAME=curtin-hooks +mv "/tmp/${FILENAME}" /curtin/ +chmod 750 "/curtin/${FILENAME}" diff --git a/pve/variables.pkr.hcl b/pve/variables.pkr.hcl new file mode 100644 index 0000000..3d3ff27 --- /dev/null +++ b/pve/variables.pkr.hcl @@ -0,0 +1,80 @@ +packer { + required_version = ">= 1.7.0" + required_plugins { + qemu = { + version = "~> 1.0" + source = "github.com/hashicorp/qemu" + } + } +} + +variable "headless" { + type = bool + default = true + description = "Whether VNC viewer should not be launched." +} + +variable "http_directory" { + type = string + default = "http" +} + +variable "http_proxy" { + type = string + default = "${env("http_proxy")}" +} + +variable "https_proxy" { + type = string + default = "${env("https_proxy")}" +} + +variable "no_proxy" { + type = string + default = "${env("no_proxy")}" +} + +variable "ssh_password" { + type = string + default = "debian123" +} + +variable "ssh_username" { + type = string + default = "root" +} + +variable "ssh_debian_password" { + type = string + default = "debian" +} + +variable "timeout" { + type = string + default = "1h" + description = "Timeout for building the image" +} + +variable "filename" { + type = string + default = "pve.tar.gz" + description = "The filename of the tarball to produce" +} + +variable "kernel" { + type = string + default = "proxmox-default-kernel" + description = "The kernel to use for the image" +} + +variable "debian_version" { + type = string + default = "12" + description = "The version number of the debian series to build." +} + +variable "boot_mode" { + type = string + default = "uefi" + description = "The default boot mode support baked into the image." +} \ No newline at end of file