diff options
Diffstat (limited to 'cloudinit')
-rw-r--r-- | cloudinit/config/cc_apt_configure.py | 27 | ||||
-rw-r--r-- | cloudinit/config/cc_disk_setup.py | 146 | ||||
-rw-r--r-- | cloudinit/config/cc_ntp.py | 47 | ||||
-rw-r--r-- | cloudinit/config/cc_resizefs.py | 95 | ||||
-rw-r--r-- | cloudinit/config/cc_users_groups.py | 59 | ||||
-rwxr-xr-x | cloudinit/distros/__init__.py | 3 | ||||
-rw-r--r-- | cloudinit/distros/freebsd.py | 277 | ||||
-rw-r--r-- | cloudinit/net/__init__.py | 3 | ||||
-rwxr-xr-x | cloudinit/net/cmdline.py | 36 | ||||
-rw-r--r-- | cloudinit/net/netplan.py | 24 | ||||
-rw-r--r-- | cloudinit/net/network_state.py | 4 | ||||
-rw-r--r-- | cloudinit/net/sysconfig.py | 246 | ||||
-rw-r--r-- | cloudinit/settings.py | 2 | ||||
-rw-r--r-- | cloudinit/sources/DataSourceAzure.py | 265 | ||||
-rw-r--r-- | cloudinit/sources/DataSourceGCE.py | 2 | ||||
-rw-r--r-- | cloudinit/sources/DataSourceOpenStack.py | 2 | ||||
-rw-r--r-- | cloudinit/sources/helpers/azure.py | 11 | ||||
-rw-r--r-- | cloudinit/sources/helpers/digitalocean.py | 2 | ||||
-rw-r--r-- | cloudinit/stages.py | 2 | ||||
-rw-r--r-- | cloudinit/util.py | 94 |
20 files changed, 1069 insertions, 278 deletions
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 7e751776..177cbcf7 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -65,12 +65,12 @@ take a list of configs, allowing mirrors to be specified on a per-architecture basis. Each config is a dictionary which must have an entry for ``arches``, specifying which architectures that config entry is for. The keyword ``default`` applies to any architecture not explicitly listed. The mirror url -can be specified with the ``url`` key, or a list of mirrors to check can be +can be specified with the ``uri`` key, or a list of mirrors to check can be provided in order, with the first mirror that can be resolved being selected. This allows the same configuration to be used in different environment, with -different hosts used for a local apt mirror. If no mirror is provided by uri or -search, ``search_dns`` may be used to search for dns names in the format -``<distro>-mirror`` in each of the following: +different hosts used for a local apt mirror. If no mirror is provided by +``uri`` or ``search``, ``search_dns`` may be used to search for dns names in +the format ``<distro>-mirror`` in each of the following: - fqdn of this host per cloud metadata - localdomain @@ -282,16 +282,21 @@ def handle(name, ocfg, cloud, log, _): apply_apt(cfg, cloud, target) +def _should_configure_on_empty_apt(): + # if no config was provided, should apt configuration be done? + if util.system_is_snappy(): + return False, "system is snappy." + if not (util.which('apt-get') or util.which('apt')): + return False, "no apt commands." + return True, "Apt is available." + + def apply_apt(cfg, cloud, target): # cfg is the 'apt' top level dictionary already in 'v3' format. if not cfg: - # no config was provided. If apt configuration does not seem - # necessary on this system, then return. - if util.system_is_snappy(): - LOG.debug("Nothing to do: No apt config and running on snappy") - return - if not (util.which('apt-get') or util.which('apt')): - LOG.debug("Nothing to do: No apt config and no apt commands") + should_config, msg = _should_configure_on_empty_apt() + if not should_config: + LOG.debug("Nothing to do: No apt config and %s", msg) return LOG.debug("handling apt config: %s", cfg) diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index f49386e3..c2b83aea 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -68,6 +68,9 @@ specified using ``filesystem``. Using ``overwrite: true`` for filesystems is dangerous and can lead to data loss, so double check the entry in ``fs_setup``. +.. note:: + ``replace_fs`` is ignored unless ``partition`` is ``auto`` or ``any``. + **Internal name:** ``cc_disk_setup`` **Module frequency:** per instance @@ -127,7 +130,7 @@ def handle(_name, cfg, cloud, log, _args): log.debug("Partitioning disks: %s", str(disk_setup)) for disk, definition in disk_setup.items(): if not isinstance(definition, dict): - log.warn("Invalid disk definition for %s" % disk) + log.warning("Invalid disk definition for %s" % disk) continue try: @@ -144,7 +147,7 @@ def handle(_name, cfg, cloud, log, _args): update_fs_setup_devices(fs_setup, cloud.device_name_to_device) for definition in fs_setup: if not isinstance(definition, dict): - log.warn("Invalid file system definition: %s" % definition) + log.warning("Invalid file system definition: %s" % definition) continue try: @@ -199,8 +202,13 @@ def update_fs_setup_devices(disk_setup, tformer): definition['_origname'] = origname definition['device'] = tformed - if part and 'partition' in definition: - definition['_partition'] = definition['partition'] + if part: + # In origname with <dev>.N, N overrides 'partition' key. + if 'partition' in definition: + LOG.warning("Partition '%s' from dotted device name '%s' " + "overrides 'partition' key in %s", part, origname, + definition) + definition['_partition'] = definition['partition'] definition['partition'] = part @@ -423,7 +431,7 @@ def get_dyn_func(*args): raise Exception("No such function %s to call!" % func_name) -def get_mbr_hdd_size(device): +def get_hdd_size(device): try: size_in_bytes, _ = util.subp([BLKDEV_CMD, '--getsize64', device]) sector_size, _ = util.subp([BLKDEV_CMD, '--getss', device]) @@ -433,22 +441,6 @@ def get_mbr_hdd_size(device): return int(size_in_bytes) / int(sector_size) -def get_gpt_hdd_size(device): - out, _ = util.subp([SGDISK_CMD, '-p', device], update_env=LANG_C_ENV) - for line in out.splitlines(): - if line.startswith("Disk"): - return line.split()[2] - raise Exception("Failed to get %s size from sgdisk" % (device)) - - -def get_hdd_size(table_type, device): - """ - Returns the hard disk size. - This works with any disk type, including GPT. - """ - return get_dyn_func("get_%s_hdd_size", table_type, device) - - def check_partition_mbr_layout(device, layout): """ Returns true if the partition layout matches the one on the disk @@ -496,12 +488,35 @@ def check_partition_gpt_layout(device, layout): device, e)) out_lines = iter(out.splitlines()) - # Skip header + # Skip header. Output looks like: + # *************************************************************** + # Found invalid GPT and valid MBR; converting MBR to GPT format + # in memory. + # *************************************************************** + # + # Disk /dev/vdb: 83886080 sectors, 40.0 GiB + # Logical sector size: 512 bytes + # Disk identifier (GUID): 8A7F11AD-3953-491B-8051-077E01C8E9A7 + # Partition table holds up to 128 entries + # First usable sector is 34, last usable sector is 83886046 + # Partitions will be aligned on 2048-sector boundaries + # Total free space is 83476413 sectors (39.8 GiB) + # + # Number Start (sector) End (sector) Size Code Name + # 1 2048 206847 100.0 MiB 0700 Microsoft basic data for line in out_lines: if line.strip().startswith('Number'): break - return [line.strip().split()[-1] for line in out_lines] + codes = [line.strip().split()[5] for line in out_lines] + cleaned = [] + + # user would expect a code '83' to be Linux, but sgdisk outputs 8300. + for code in codes: + if len(code) == 4 and code.endswith("00"): + code = code[0:2] + cleaned.append(code) + return cleaned def check_partition_layout(table_type, device, layout): @@ -515,6 +530,8 @@ def check_partition_layout(table_type, device, layout): found_layout = get_dyn_func( "check_partition_%s_layout", table_type, device, layout) + LOG.debug("called check_partition_%s_layout(%s, %s), returned: %s", + table_type, device, layout, found_layout) if isinstance(layout, bool): # if we are using auto partitioning, or "True" be happy # if a single partition exists. @@ -522,18 +539,17 @@ def check_partition_layout(table_type, device, layout): return True return False - else: - if len(found_layout) != len(layout): - return False - else: - # This just makes sure that the number of requested - # partitions and the type labels are right - for x in range(1, len(layout) + 1): - if isinstance(layout[x - 1], tuple): - _, part_type = layout[x] - if int(found_layout[x]) != int(part_type): - return False - return True + elif len(found_layout) == len(layout): + # This just makes sure that the number of requested + # partitions and the type labels are right + layout_types = [str(x[1]) if isinstance(x, (tuple, list)) else None + for x in layout] + LOG.debug("Layout types=%s. Found types=%s", + layout_types, found_layout) + for itype, ftype in zip(layout_types, found_layout): + if itype is not None and str(ftype) != str(itype): + return False + return True return False @@ -664,14 +680,14 @@ def read_parttbl(device): reliable way to probe the partition table. """ blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device] - udev_cmd = [UDEVADM_CMD, 'settle'] + udevadm_settle() try: - util.subp(udev_cmd) util.subp(blkdev_cmd) - util.subp(udev_cmd) except Exception as e: util.logexc(LOG, "Failed reading the partition table %s" % e) + udevadm_settle() + def exec_mkpart_mbr(device, layout): """ @@ -696,9 +712,11 @@ def exec_mkpart_gpt(device, layout): util.subp([SGDISK_CMD, '-n', '{}:{}:{}'.format(index, start, end), device]) if partition_type is not None: + # convert to a 4 char (or more) string right padded with 0 + # 82 -> 8200. 'Linux' -> 'Linux' + pinput = str(partition_type).ljust(4, "0") util.subp( - [SGDISK_CMD, - '-t', '{}:{}'.format(index, partition_type), device]) + [SGDISK_CMD, '-t', '{}:{}'.format(index, pinput), device]) except Exception: LOG.warning("Failed to partition device %s", device) raise @@ -719,6 +737,24 @@ def exec_mkpart(table_type, device, layout): return get_dyn_func("exec_mkpart_%s", table_type, device, layout) +def udevadm_settle(): + util.subp(['udevadm', 'settle']) + + +def assert_and_settle_device(device): + """Assert that device exists and settle so it is fully recognized.""" + if not os.path.exists(device): + udevadm_settle() + if not os.path.exists(device): + raise RuntimeError("Device %s did not exist and was not created " + "with a udevamd settle." % device) + + # Whether or not the device existed above, it is possible that udev + # events that would populate udev database (for reading by lsdname) have + # not yet finished. So settle again. + udevadm_settle() + + def mkpart(device, definition): """ Creates the partition table. @@ -734,6 +770,7 @@ def mkpart(device, definition): device: the device to work on. """ # ensure that we get a real device rather than a symbolic link + assert_and_settle_device(device) device = os.path.realpath(device) LOG.debug("Checking values for %s definition", device) @@ -769,8 +806,8 @@ def mkpart(device, definition): LOG.debug("Skipping partitioning on configured device %s", device) return - LOG.debug("Checking for device size") - device_size = get_hdd_size(table_type, device) + LOG.debug("Checking for device size of %s", device) + device_size = get_hdd_size(device) LOG.debug("Calculating partition layout") part_definition = get_partition_layout(table_type, device_size, layout) @@ -834,6 +871,7 @@ def mkfs(fs_cfg): overwrite = fs_cfg.get('overwrite', False) # ensure that we get a real device rather than a symbolic link + assert_and_settle_device(device) device = os.path.realpath(device) # This allows you to define the default ephemeral or swap @@ -849,7 +887,8 @@ def mkfs(fs_cfg): # Check to see if the fs already exists LOG.debug("Checking device %s", device) check_label, check_fstype, _ = check_fs(device) - LOG.debug("Device %s has %s %s", device, check_label, check_fstype) + LOG.debug("Device '%s' has check_label='%s' check_fstype=%s", + device, check_label, check_fstype) if check_label == label and check_fstype == fs_type: LOG.debug("Existing file system found at %s", device) @@ -910,12 +949,23 @@ def mkfs(fs_cfg): "must be set.", label) # Create the commands + shell = False if fs_cmd: fs_cmd = fs_cfg['cmd'] % { 'label': label, 'filesystem': fs_type, 'device': device, } + shell = True + + if overwrite: + LOG.warning( + "fs_setup:overwrite ignored because cmd was specified: %s", + fs_cmd) + if fs_opts: + LOG.warning( + "fs_setup:extra_opts ignored because cmd was specified: %s", + fs_cmd) else: # Find the mkfs command mkfs_cmd = util.which("mkfs.%s" % fs_type) @@ -936,14 +986,14 @@ def mkfs(fs_cfg): if overwrite or device_type(device) == "disk": fs_cmd.append(lookup_force_flag(fs_type)) - # Add the extends FS options - if fs_opts: - fs_cmd.extend(fs_opts) + # Add the extends FS options + if fs_opts: + fs_cmd.extend(fs_opts) LOG.debug("Creating file system %s on %s", label, device) - LOG.debug(" Using cmd: %s", " ".join(fs_cmd)) + LOG.debug(" Using cmd: %s", str(fs_cmd)) try: - util.subp(fs_cmd) + util.subp(fs_cmd, shell=shell) except Exception as e: raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e)) diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index e33032fd..5cc54536 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -53,14 +53,12 @@ distros = ['centos', 'debian', 'fedora', 'opensuse', 'ubuntu'] def handle(name, cfg, cloud, log, _args): - """ - Enable and configure ntp + """Enable and configure ntp.""" - ntp: - pools: ['0.{{distro}}.pool.ntp.org', '1.{{distro}}.pool.ntp.org'] - servers: ['192.168.2.1'] - - """ + if 'ntp' not in cfg: + LOG.debug( + "Skipping module named %s, not present or disabled by cfg", name) + return ntp_cfg = cfg.get('ntp', {}) @@ -69,15 +67,18 @@ def handle(name, cfg, cloud, log, _args): " but not a dictionary type," " is a %s %instead"), type_utils.obj_name(ntp_cfg)) - if 'ntp' not in cfg: - LOG.debug("Skipping module named %s," - "not present or disabled by cfg", name) - return True - - install_ntp(cloud.distro.install_packages, packages=['ntp'], - check_exe="ntpd") rename_ntp_conf() + # ensure when ntp is installed it has a configuration file + # to use instead of starting up with packaged defaults write_ntp_config_template(ntp_cfg, cloud) + install_ntp(cloud.distro.install_packages, packages=['ntp'], + check_exe="ntpd") + # if ntp was already installed, it may not have started + try: + reload_ntp(systemd=cloud.distro.uses_systemd()) + except util.ProcessExecutionError as e: + LOG.exception("Failed to reload/start ntp service: %s", e) + raise def install_ntp(install_func, packages=None, check_exe="ntpd"): @@ -89,7 +90,10 @@ def install_ntp(install_func, packages=None, check_exe="ntpd"): install_func(packages) -def rename_ntp_conf(config=NTP_CONF): +def rename_ntp_conf(config=None): + """Rename any existing ntp.conf file and render from template""" + if config is None: # For testing + config = NTP_CONF if os.path.exists(config): util.rename(config, config + ".dist") @@ -107,8 +111,9 @@ def write_ntp_config_template(cfg, cloud): pools = cfg.get('pools', []) if len(servers) == 0 and len(pools) == 0: - LOG.debug('Adding distro default ntp pool servers') pools = generate_server_names(cloud.distro.name) + LOG.debug( + 'Adding distro default ntp pool servers: %s', ','.join(pools)) params = { 'servers': servers, @@ -125,4 +130,14 @@ def write_ntp_config_template(cfg, cloud): templater.render_to_file(template_fn, NTP_CONF, params) + +def reload_ntp(systemd=False): + service = 'ntp' + if systemd: + cmd = ['systemctl', 'reload-or-restart', service] + else: + cmd = ['service', service, 'restart'] + util.subp(cmd, capture=True) + + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 60e3ab53..ceee952b 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -33,7 +33,10 @@ disabled altogether by setting ``resize_rootfs`` to ``false``. """ import errno +import getopt import os +import re +import shlex import stat from cloudinit.settings import PER_ALWAYS @@ -58,6 +61,62 @@ def _resize_ufs(mount_point, devpth): return ('growfs', devpth) +def _get_dumpfs_output(mount_point): + dumpfs_res, err = util.subp(['dumpfs', '-m', mount_point]) + return dumpfs_res + + +def _get_gpart_output(part): + gpart_res, err = util.subp(['gpart', 'show', part]) + return gpart_res + + +def _can_skip_resize_ufs(mount_point, devpth): + # extract the current fs sector size + """ + # dumpfs -m / + # newfs command for / (/dev/label/rootfs) + newfs -O 2 -U -a 4 -b 32768 -d 32768 -e 4096 -f 4096 -g 16384 + -h 64 -i 8192 -j -k 6408 -m 8 -o time -s 58719232 /dev/label/rootf + """ + cur_fs_sz = None + frag_sz = None + dumpfs_res = _get_dumpfs_output(mount_point) + for line in dumpfs_res.splitlines(): + if not line.startswith('#'): + newfs_cmd = shlex.split(line) + opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:' + optlist, args = getopt.getopt(newfs_cmd[1:], opt_value) + for o, a in optlist: + if o == "-s": + cur_fs_sz = int(a) + if o == "-f": + frag_sz = int(a) + # check the current partition size + """ + # gpart show /dev/da0 +=> 40 62914480 da0 GPT (30G) + 40 1024 1 freebsd-boot (512K) + 1064 58719232 2 freebsd-ufs (28G) + 58720296 3145728 3 freebsd-swap (1.5G) + 61866024 1048496 - free - (512M) + """ + expect_sz = None + m = re.search('^(/dev/.+)p([0-9])$', devpth) + gpart_res = _get_gpart_output(m.group(1)) + for line in gpart_res.splitlines(): + if re.search(r"freebsd-ufs", line): + fields = line.split() + expect_sz = int(fields[1]) + # Normalize the gpart sector size, + # because the size is not exactly the same as fs size. + normal_expect_sz = (expect_sz - expect_sz % (frag_sz / 512)) + if normal_expect_sz == cur_fs_sz: + return True + else: + return False + + # Do not use a dictionary as these commands should be able to be used # for multiple filesystem types if possible, e.g. one command for # ext2, ext3 and ext4. @@ -68,9 +127,40 @@ RESIZE_FS_PREFIXES_CMDS = [ ('ufs', _resize_ufs), ] +RESIZE_FS_PRECHECK_CMDS = { + 'ufs': _can_skip_resize_ufs +} + NOBLOCK = "noblock" +def rootdev_from_cmdline(cmdline): + found = None + for tok in cmdline.split(): + if tok.startswith("root="): + found = tok[5:] + break + if found is None: + return None + + if found.startswith("/dev/"): + return found + if found.startswith("LABEL="): + return "/dev/disk/by-label/" + found[len("LABEL="):] + if found.startswith("UUID="): + return "/dev/disk/by-uuid/" + found[len("UUID="):] + + return "/dev/" + found + + +def can_skip_resize(fs_type, resize_what, devpth): + fstype_lc = fs_type.lower() + for i, func in RESIZE_FS_PRECHECK_CMDS.items(): + if fstype_lc.startswith(i): + return func(resize_what, devpth) + return False + + def handle(name, cfg, _cloud, log, args): if len(args) != 0: resize_root = args[0] @@ -139,6 +229,11 @@ def handle(name, cfg, _cloud, log, args): return resizer = None + if can_skip_resize(fs_type, resize_what, devpth): + log.debug("Skip resize filesystem type %s for %s", + fs_type, resize_what) + return + fstype_lc = fs_type.lower() for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS: if fstype_lc.startswith(pfix): diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py index 59649800..b80d1d36 100644 --- a/cloudinit/config/cc_users_groups.py +++ b/cloudinit/config/cc_users_groups.py @@ -25,28 +25,39 @@ entry of the ``users`` list. Each entry in the ``users`` list, other than a config keys for an entry in ``users`` are as follows: - ``name``: The user's login name - - ``homedir``: Optional. Home dir for user. Default is ``/home/<username>`` - - ``primary-group``: Optional. Primary group for user. Default to new group - named after user. + - ``expiredate``: Optional. Date on which the user's login will be + disabled. Default: none + - ``gecos``: Optional. Comment about the user, usually a comma-separated + string of real name and contact information. Default: none - ``groups``: Optional. Additional groups to add the user to. Default: none - - ``selinux-user``: Optional. SELinux user for user's login. Default to - default SELinux user. - - ``lock_passwd``: Optional. Disable password login. Default: true + - ``homedir``: Optional. Home dir for user. Default is ``/home/<username>`` - ``inactive``: Optional. Mark user inactive. Default: false - - ``passwd``: Hash of user password + - ``lock_passwd``: Optional. Disable password login. Default: true - ``no-create-home``: Optional. Do not create home directory. Default: false - - ``no-user-group``: Optional. Do not create group named after user. - Default: false - ``no-log-init``: Optional. Do not initialize lastlog and faillog for user. Default: false - - ``ssh-import-id``: Optional. SSH id to import for user. Default: none - - ``ssh-autorized-keys``: Optional. List of ssh keys to add to user's + - ``no-user-group``: Optional. Do not create group named after user. + Default: false + - ``passwd``: Hash of user password + - ``primary-group``: Optional. Primary group for user. Default to new group + named after user. + - ``selinux-user``: Optional. SELinux user for user's login. Default to + default SELinux user. + - ``shell``: Optional. The user's login shell. The default is to set no + shell, which results in a system-specific default being used. + - ``snapuser``: Optional. Specify an email address to create the user as + a Snappy user through ``snap create-user``. If an Ubuntu SSO account is + associated with the address, username and SSH keys will be requested from + there. Default: none + - ``ssh-authorized-keys``: Optional. List of ssh keys to add to user's authkeys file. Default: none + - ``ssh-import-id``: Optional. SSH id to import for user. Default: none - ``sudo``: Optional. Sudo rule to use, or list of sudo rules to use. Default: none. - ``system``: Optional. Create user as system user with no home directory. Default: false + - ``uid``: Optional. The user's ID. Default: The next available value. .. note:: Specifying a hash of a user's password with ``passwd`` is a security risk @@ -65,23 +76,33 @@ config keys for an entry in ``users`` are as follows: **Config keys**:: groups: - - ubuntu: [foo, bar] - - cloud-users + - <group>: [<user>, <user>] + - <group> users: - default - name: <username> - gecos: <real name> - primary-group: <primary group> - groups: <additional groups> - selinux-user: <selinux username> expiredate: <date> - ssh-import-id: <none/id> + gecos: <comment> + groups: <additional groups> + homedir: <home directory> + inactive: <true/false> lock_passwd: <true/false> + no-create-home: <true/false> + no-log-init: <true/false> + no-user-group: <true/false> passwd: <password> + primary-group: <primary group> + selinux-user: <selinux username> + shell: <shell path> + snapuser: <email> + ssh-authorized-keys: + - <key> + - <key> + ssh-import-id: <id> sudo: <sudo config> - inactive: <true/false> system: <true/false> + uid: <user id> """ # Ensure this is aliased to a name not 'distros' diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 28650b88..f56c0cf7 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -155,6 +155,9 @@ class Distro(object): ns, header=header, render_hwaddress=True) return self.apply_network(contents, bring_up=bring_up) + def generate_fallback_config(self): + return net.generate_fallback_config() + def apply_network_config(self, netconfig, bring_up=False): # apply network config netconfig # This method is preferred to apply_network which only takes diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 183e4452..bad112fe 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -30,6 +30,7 @@ class Distro(distros.Distro): login_conf_fn_bak = '/etc/login.conf.orig' resolv_conf_fn = '/etc/resolv.conf' ci_sudoers_fn = '/usr/local/etc/sudoers.d/90-cloud-init-users' + default_primary_nic = 'hn0' def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) @@ -38,6 +39,8 @@ class Distro(distros.Distro): # should only happen say once per instance...) self._runner = helpers.Runners(paths) self.osfamily = 'freebsd' + self.ipv4_pat = re.compile(r"\s+inet\s+\d+[.]\d+[.]\d+[.]\d+") + cfg['ssh_svcname'] = 'sshd' # Updates a key in /etc/rc.conf. def updatercconf(self, key, value): @@ -183,7 +186,6 @@ class Distro(distros.Distro): "gecos": '-c', "primary_group": '-g', "groups": '-G', - "passwd": '-h', "shell": '-s', "inactive": '-E', } @@ -193,19 +195,11 @@ class Distro(distros.Distro): "no_log_init": '--no-log-init', } - redact_opts = ['passwd'] - for key, val in kwargs.items(): if (key in adduser_opts and val and isinstance(val, six.string_types)): adduser_cmd.extend([adduser_opts[key], val]) - # Redact certain fields from the logs - if key in redact_opts: - log_adduser_cmd.extend([adduser_opts[key], 'REDACTED']) - else: - log_adduser_cmd.extend([adduser_opts[key], val]) - elif key in adduser_flags and val: adduser_cmd.append(adduser_flags[key]) log_adduser_cmd.append(adduser_flags[key]) @@ -226,19 +220,21 @@ class Distro(distros.Distro): except Exception as e: util.logexc(LOG, "Failed to create user %s", name) raise e + # Set the password if it is provided + # For security consideration, only hashed passwd is assumed + passwd_val = kwargs.get('passwd', None) + if passwd_val is not None: + self.set_passwd(name, passwd_val, hashed=True) def set_passwd(self, user, passwd, hashed=False): - cmd = ['pw', 'usermod', user] - if hashed: - cmd.append('-H') + hash_opt = "-H" else: - cmd.append('-h') - - cmd.append('0') + hash_opt = "-h" try: - util.subp(cmd, passwd, logstring="chpasswd for %s" % user) + util.subp(['pw', 'usermod', user, hash_opt, '0'], + data=passwd, logstring="chpasswd for %s" % user) except Exception as e: util.logexc(LOG, "Failed to set password for %s", user) raise e @@ -271,6 +267,255 @@ class Distro(distros.Distro): keys = set(kwargs['ssh_authorized_keys']) or [] ssh_util.setup_user_keys(keys, name, options=None) + @staticmethod + def get_ifconfig_list(): + cmd = ['ifconfig', '-l'] + (nics, err) = util.subp(cmd, rcs=[0, 1]) + if len(err): + LOG.warning("Error running %s: %s", cmd, err) + return None + return nics + + @staticmethod + def get_ifconfig_ifname_out(ifname): + cmd = ['ifconfig', ifname] + (if_result, err) = util.subp(cmd, rcs=[0, 1]) + if len(err): + LOG.warning("Error running %s: %s", cmd, err) + return None + return if_result + + @staticmethod + def get_ifconfig_ether(): + cmd = ['ifconfig', '-l', 'ether'] + (nics, err) = util.subp(cmd, rcs=[0, 1]) + if len(err): + LOG.warning("Error running %s: %s", cmd, err) + return None + return nics + + @staticmethod + def get_interface_mac(ifname): + if_result = Distro.get_ifconfig_ifname_out(ifname) + for item in if_result.splitlines(): + if item.find('ether ') != -1: + mac = str(item.split()[1]) + if mac: + return mac + + @staticmethod + def get_devicelist(): + nics = Distro.get_ifconfig_list() + return nics.split() + + @staticmethod + def get_ipv6(): + ipv6 = [] + nics = Distro.get_devicelist() + for nic in nics: + if_result = Distro.get_ifconfig_ifname_out(nic) + for item in if_result.splitlines(): + if item.find("inet6 ") != -1 and item.find("scopeid") == -1: + ipv6.append(nic) + return ipv6 + + def get_ipv4(self): + ipv4 = [] + nics = Distro.get_devicelist() + for nic in nics: + if_result = Distro.get_ifconfig_ifname_out(nic) + for item in if_result.splitlines(): + print(item) + if self.ipv4_pat.match(item): + ipv4.append(nic) + return ipv4 + + def is_up(self, ifname): + if_result = Distro.get_ifconfig_ifname_out(ifname) + pat = "^" + ifname + for item in if_result.splitlines(): + if re.match(pat, item): + flags = item.split('<')[1].split('>')[0] + if flags.find("UP") != -1: + return True + + def _get_current_rename_info(self, check_downable=True): + """Collect information necessary for rename_interfaces.""" + names = Distro.get_devicelist() + bymac = {} + for n in names: + bymac[Distro.get_interface_mac(n)] = { + 'name': n, 'up': self.is_up(n), 'downable': None} + + if check_downable: + nics_with_addresses = set() + ipv6 = self.get_ipv6() + ipv4 = self.get_ipv4() + for bytes_out in (ipv6, ipv4): + for i in ipv6: + nics_with_addresses.update(i) + for i in ipv4: + nics_with_addresses.update(i) + + for d in bymac.values(): + d['downable'] = (d['up'] is False or + d['name'] not in nics_with_addresses) + + return bymac + + def _rename_interfaces(self, renames): + if not len(renames): + LOG.debug("no interfaces to rename") + return + + current_info = self._get_current_rename_info() + + cur_bymac = {} + for mac, data in current_info.items(): + cur = data.copy() + cur['mac'] = mac + cur_bymac[mac] = cur + + def update_byname(bymac): + return dict((data['name'], data) + for data in bymac.values()) + + def rename(cur, new): + util.subp(["ifconfig", cur, "name", new], capture=True) + + def down(name): + util.subp(["ifconfig", name, "down"], capture=True) + + def up(name): + util.subp(["ifconfig", name, "up"], capture=True) + + ops = [] + errors = [] + ups = [] + cur_byname = update_byname(cur_bymac) + tmpname_fmt = "cirename%d" + tmpi = -1 + + for mac, new_name in renames: + cur = cur_bymac.get(mac, {}) + cur_name = cur.get('name') + cur_ops = [] + if cur_name == new_name: + # nothing to do + continue + + if not cur_name: + errors.append("[nic not present] Cannot rename mac=%s to %s" + ", not available." % (mac, new_name)) + continue + + if cur['up']: + msg = "[busy] Error renaming mac=%s from %s to %s" + if not cur['downable']: + errors.append(msg % (mac, cur_name, new_name)) + continue + cur['up'] = False + cur_ops.append(("down", mac, new_name, (cur_name,))) + ups.append(("up", mac, new_name, (new_name,))) + + if new_name in cur_byname: + target = cur_byname[new_name] + if target['up']: + msg = "[busy-target] Error renaming mac=%s from %s to %s." + if not target['downable']: + errors.append(msg % (mac, cur_name, new_name)) + continue + else: + cur_ops.append(("down", mac, new_name, (new_name,))) + + tmp_name = None + while tmp_name is None or tmp_name in cur_byname: + tmpi += 1 + tmp_name = tmpname_fmt % tmpi + + cur_ops.append(("rename", mac, new_name, (new_name, tmp_name))) + target['name'] = tmp_name + cur_byname = update_byname(cur_bymac) + if target['up']: + ups.append(("up", mac, new_name, (tmp_name,))) + + cur_ops.append(("rename", mac, new_name, (cur['name'], new_name))) + cur['name'] = new_name + cur_byname = update_byname(cur_bymac) + ops += cur_ops + + opmap = {'rename': rename, 'down': down, 'up': up} + if len(ops) + len(ups) == 0: + if len(errors): + LOG.debug("unable to do any work for renaming of %s", renames) + else: + LOG.debug("no work necessary for renaming of %s", renames) + else: + LOG.debug("achieving renaming of %s with ops %s", + renames, ops + ups) + + for op, mac, new_name, params in ops + ups: + try: + opmap.get(op)(*params) + except Exception as e: + errors.append( + "[unknown] Error performing %s%s for %s, %s: %s" % + (op, params, mac, new_name, e)) + if len(errors): + raise Exception('\n'.join(errors)) + + def apply_network_config_names(self, netcfg): + renames = [] + for ent in netcfg.get('config', {}): + if ent.get('type') != 'physical': + continue + mac = ent.get('mac_address') + name = ent.get('name') + if not mac: + continue + renames.append([mac, name]) + return self._rename_interfaces(renames) + + @classmethod + def generate_fallback_config(self): + nics = Distro.get_ifconfig_ether() + if nics is None: + LOG.debug("Fail to get network interfaces") + return None + potential_interfaces = nics.split() + connected = [] + for nic in potential_interfaces: + pat = "^" + nic + if_result = Distro.get_ifconfig_ifname_out(nic) + for item in if_result.split("\n"): + if re.match(pat, item): + flags = item.split('<')[1].split('>')[0] + if flags.find("RUNNING") != -1: + connected.append(nic) + if connected: + potential_interfaces = connected + names = list(sorted(potential_interfaces)) + default_pri_nic = Distro.default_primary_nic + if default_pri_nic in names: + names.remove(default_pri_nic) + names.insert(0, default_pri_nic) + target_name = None + target_mac = None + for name in names: + mac = Distro.get_interface_mac(name) + if mac: + target_name = name + target_mac = mac + break + if target_mac and target_name: + nconf = {'config': [], 'version': 1} + nconf['config'].append( + {'type': 'physical', 'name': target_name, + 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]}) + return nconf + else: + return None + def _write_network(self, settings): entries = net_util.translate_network(settings) nameservers = [] diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index a072a8d6..8c6cd057 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -393,6 +393,7 @@ def get_interfaces_by_mac(): else: raise ret = {} + empty_mac = '00:00:00:00:00:00' for name in devs: if not interface_has_own_mac(name): continue @@ -404,6 +405,8 @@ def get_interfaces_by_mac(): # some devices may not have a mac (tun0) if not mac: continue + if mac == empty_mac and name != 'lo': + continue if mac in ret: raise RuntimeError( "duplicate mac found! both '%s' and '%s' have mac '%s'" % diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py index 7c5d11a7..38b27a52 100755 --- a/cloudinit/net/cmdline.py +++ b/cloudinit/net/cmdline.py @@ -9,41 +9,12 @@ import base64 import glob import gzip import io -import shlex -import sys - -import six from . import get_devicelist from . import read_sys_net_safe from cloudinit import util -PY26 = sys.version_info[0:2] == (2, 6) - - -def _shlex_split(blob): - if PY26 and isinstance(blob, six.text_type): - # Older versions don't support unicode input - blob = blob.encode("utf8") - return shlex.split(blob) - - -def _load_shell_content(content, add_empty=False, empty_val=None): - """Given shell like syntax (key=value\nkey2=value2\n) in content - return the data in dictionary form. If 'add_empty' is True - then add entries in to the returned dictionary for 'VAR=' - variables. Set their value to empty_val.""" - data = {} - for line in _shlex_split(content): - key, value = line.split("=", 1) - if not value: - value = empty_val - if add_empty or value: - data[key] = value - - return data - def _klibc_to_config_entry(content, mac_addrs=None): """Convert a klibc written shell content file to a 'config' entry @@ -63,7 +34,7 @@ def _klibc_to_config_entry(content, mac_addrs=None): if mac_addrs is None: mac_addrs = {} - data = _load_shell_content(content) + data = util.load_shell_content(content) try: name = data['DEVICE'] if 'DEVICE' in data else data['DEVICE6'] except KeyError: @@ -100,6 +71,11 @@ def _klibc_to_config_entry(content, mac_addrs=None): cur_proto = data.get(pre + 'PROTO', proto) subnet = {'type': cur_proto, 'control': 'manual'} + # only populate address for static types. While the rendered config + # may have an address for dhcp, that is not really expected. + if cur_proto == 'static': + subnet['address'] = data[pre + 'ADDR'] + # these fields go right on the subnet for key in ('NETMASK', 'BROADCAST', 'GATEWAY'): if pre + key in data: diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index 825fe831..a715f3b0 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -4,7 +4,7 @@ import copy import os from . import renderer -from .network_state import subnet_is_ipv6 +from .network_state import mask2cidr, subnet_is_ipv6 from cloudinit import log as logging from cloudinit import util @@ -41,7 +41,7 @@ NET_CONFIG_TO_V2 = { 'bond-num-grat-arp': 'gratuitious-arp', 'bond-primary-reselect': 'primary-reselect-policy', 'bond-updelay': 'up-delay', - 'bond-xmit_hash_policy': 'transmit_hash_policy'}, + 'bond-xmit-hash-policy': 'transmit-hash-policy'}, 'bridge': {'bridge_ageing': 'ageing-time', 'bridge_bridgeprio': 'priority', 'bridge_fd': 'forward-delay', @@ -118,9 +118,10 @@ def _extract_addresses(config, entry): sn_type += '4' entry.update({sn_type: True}) elif sn_type in ['static']: - addr = "%s" % subnet.get('address') - if 'netmask' in subnet: - addr += "/%s" % subnet.get('netmask') + addr = '%s' % subnet.get('address') + netmask = subnet.get('netmask') + if netmask and '/' not in addr: + addr += '/%s' % mask2cidr(netmask) if 'gateway' in subnet and subnet.get('gateway'): gateway = subnet.get('gateway') if ":" in gateway: @@ -137,8 +138,9 @@ def _extract_addresses(config, entry): mtukey += '6' entry.update({mtukey: subnet.get('mtu')}) for route in subnet.get('routes', []): - to_net = "%s/%s" % (route.get('network'), - route.get('netmask')) + network = route.get('network') + netmask = route.get('netmask') + to_net = '%s/%s' % (network, mask2cidr(netmask)) route = { 'via': route.get('gateway'), 'to': to_net, @@ -205,7 +207,7 @@ class Renderer(renderer.Renderer): self._postcmds = config.get('postcmds', False) self.clean_default = config.get('clean_default', True) - def render_network_state(self, target, network_state): + def render_network_state(self, network_state, target): # check network state for version # if v2, then extract network_state.config # else render_v2_from_state @@ -294,7 +296,7 @@ class Renderer(renderer.Renderer): for match in ['bond_', 'bond-']: bond_params = _get_params_dict_by_match(ifcfg, match) for (param, value) in bond_params.items(): - newname = v2_bond_map.get(param) + newname = v2_bond_map.get(param.replace('_', '-')) if newname is None: continue bond_config.update({newname: value}) @@ -345,7 +347,9 @@ class Renderer(renderer.Renderer): 'id': ifcfg.get('vlan_id'), 'link': ifcfg.get('vlan-raw-device') } - + macaddr = ifcfg.get('mac_address', None) + if macaddr is not None: + vlan['macaddress'] = macaddr.lower() _extract_addresses(ifcfg, vlan) vlans.update({ifname: vlan}) diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index db3c3579..9e9c05a0 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -734,9 +734,9 @@ def ipv6mask2cidr(mask): def mask2cidr(mask): - if ':' in mask: + if ':' in str(mask): return ipv6mask2cidr(mask) - elif '.' in mask: + elif '.' in str(mask): return ipv4mask2cidr(mask) else: return mask diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 504e4d02..58c5713f 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -59,6 +59,9 @@ class ConfigMap(object): def __setitem__(self, key, value): self._conf[key] = value + def __getitem__(self, key): + return self._conf[key] + def drop(self, key): self._conf.pop(key, None) @@ -83,7 +86,8 @@ class ConfigMap(object): class Route(ConfigMap): """Represents a route configuration.""" - route_fn_tpl = '%(base)s/network-scripts/route-%(name)s' + route_fn_tpl_ipv4 = '%(base)s/network-scripts/route-%(name)s' + route_fn_tpl_ipv6 = '%(base)s/network-scripts/route6-%(name)s' def __init__(self, route_name, base_sysconf_dir): super(Route, self).__init__() @@ -102,9 +106,58 @@ class Route(ConfigMap): return r @property - def path(self): - return self.route_fn_tpl % ({'base': self._base_sysconf_dir, - 'name': self._route_name}) + def path_ipv4(self): + return self.route_fn_tpl_ipv4 % ({'base': self._base_sysconf_dir, + 'name': self._route_name}) + + @property + def path_ipv6(self): + return self.route_fn_tpl_ipv6 % ({'base': self._base_sysconf_dir, + 'name': self._route_name}) + + def is_ipv6_route(self, address): + return ':' in address + + def to_string(self, proto="ipv4"): + # only accept ipv4 and ipv6 + if proto not in ['ipv4', 'ipv6']: + raise ValueError("Unknown protocol '%s'" % (str(proto))) + buf = six.StringIO() + buf.write(_make_header()) + if self._conf: + buf.write("\n") + # need to reindex IPv4 addresses + # (because Route can contain a mix of IPv4 and IPv6) + reindex = -1 + for key in sorted(self._conf.keys()): + if 'ADDRESS' in key: + index = key.replace('ADDRESS', '') + address_value = str(self._conf[key]) + # only accept combinations: + # if proto ipv6 only display ipv6 routes + # if proto ipv4 only display ipv4 routes + # do not add ipv6 routes if proto is ipv4 + # do not add ipv4 routes if proto is ipv6 + # (this array will contain a mix of ipv4 and ipv6) + if proto == "ipv4" and not self.is_ipv6_route(address_value): + netmask_value = str(self._conf['NETMASK' + index]) + gateway_value = str(self._conf['GATEWAY' + index]) + # increase IPv4 index + reindex = reindex + 1 + buf.write("%s=%s\n" % ('ADDRESS' + str(reindex), + _quote_value(address_value))) + buf.write("%s=%s\n" % ('GATEWAY' + str(reindex), + _quote_value(gateway_value))) + buf.write("%s=%s\n" % ('NETMASK' + str(reindex), + _quote_value(netmask_value))) + elif proto == "ipv6" and self.is_ipv6_route(address_value): + netmask_value = str(self._conf['NETMASK' + index]) + gateway_value = str(self._conf['GATEWAY' + index]) + buf.write("%s/%s via %s\n" % (address_value, + netmask_value, + gateway_value)) + + return buf.getvalue() class NetInterface(ConfigMap): @@ -211,65 +264,119 @@ class Renderer(renderer.Renderer): iface_cfg[new_key] = old_value @classmethod - def _render_subnet(cls, iface_cfg, route_cfg, subnet): - subnet_type = subnet.get('type') - if subnet_type == 'dhcp6': - iface_cfg['DHCPV6C'] = True - iface_cfg['IPV6INIT'] = True - iface_cfg['BOOTPROTO'] = 'dhcp' - elif subnet_type in ['dhcp4', 'dhcp']: - iface_cfg['BOOTPROTO'] = 'dhcp' - elif subnet_type == 'static': - iface_cfg['BOOTPROTO'] = 'static' - if subnet_is_ipv6(subnet): - iface_cfg['IPV6ADDR'] = subnet['address'] + def _render_subnets(cls, iface_cfg, subnets): + # setting base values + iface_cfg['BOOTPROTO'] = 'none' + + # modifying base values according to subnets + for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): + subnet_type = subnet.get('type') + if subnet_type == 'dhcp6': iface_cfg['IPV6INIT'] = True + iface_cfg['DHCPV6C'] = True + iface_cfg['BOOTPROTO'] = 'dhcp' + elif subnet_type in ['dhcp4', 'dhcp']: + iface_cfg['BOOTPROTO'] = 'dhcp' + elif subnet_type == 'static': + # grep BOOTPROTO sysconfig.txt -A2 | head -3 + # BOOTPROTO=none|bootp|dhcp + # 'bootp' or 'dhcp' cause a DHCP client + # to run on the device. Any other + # value causes any static configuration + # in the file to be applied. + # ==> the following should not be set to 'static' + # but should remain 'none' + # if iface_cfg['BOOTPROTO'] == 'none': + # iface_cfg['BOOTPROTO'] = 'static' + if subnet_is_ipv6(subnet): + iface_cfg['IPV6INIT'] = True else: - iface_cfg['IPADDR'] = subnet['address'] - else: - raise ValueError("Unknown subnet type '%s' found" - " for interface '%s'" % (subnet_type, - iface_cfg.name)) - if 'netmask' in subnet: - iface_cfg['NETMASK'] = subnet['netmask'] - for route in subnet.get('routes', []): - if subnet.get('ipv6'): - gw_cfg = 'IPV6_DEFAULTGW' - else: - gw_cfg = 'GATEWAY' - - if _is_default_route(route): - if ( - (subnet.get('ipv4') and - route_cfg.has_set_default_ipv4) or - (subnet.get('ipv6') and - route_cfg.has_set_default_ipv6) - ): - raise ValueError("Duplicate declaration of default " - "route found for interface '%s'" - % (iface_cfg.name)) - # NOTE(harlowja): ipv6 and ipv4 default gateways - gw_key = 'GATEWAY0' - nm_key = 'NETMASK0' - addr_key = 'ADDRESS0' - # The owning interface provides the default route. - # - # TODO(harlowja): add validation that no other iface has - # also provided the default route? - iface_cfg['DEFROUTE'] = True - if 'gateway' in route: - iface_cfg[gw_cfg] = route['gateway'] - route_cfg.has_set_default = True - else: - gw_key = 'GATEWAY%s' % route_cfg.last_idx - nm_key = 'NETMASK%s' % route_cfg.last_idx - addr_key = 'ADDRESS%s' % route_cfg.last_idx - route_cfg.last_idx += 1 - for (old_key, new_key) in [('gateway', gw_key), - ('netmask', nm_key), - ('network', addr_key)]: - if old_key in route: - route_cfg[new_key] = route[old_key] + raise ValueError("Unknown subnet type '%s' found" + " for interface '%s'" % (subnet_type, + iface_cfg.name)) + + # set IPv4 and IPv6 static addresses + ipv4_index = -1 + ipv6_index = -1 + for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): + subnet_type = subnet.get('type') + if subnet_type == 'dhcp6': + continue + elif subnet_type in ['dhcp4', 'dhcp']: + continue + elif subnet_type == 'static': + if subnet_is_ipv6(subnet): + ipv6_index = ipv6_index + 1 + if 'netmask' in subnet and str(subnet['netmask']) != "": + ipv6_cidr = (subnet['address'] + + '/' + + str(subnet['netmask'])) + else: + ipv6_cidr = subnet['address'] + if ipv6_index == 0: + iface_cfg['IPV6ADDR'] = ipv6_cidr + elif ipv6_index == 1: + iface_cfg['IPV6ADDR_SECONDARIES'] = ipv6_cidr + else: + iface_cfg['IPV6ADDR_SECONDARIES'] = ( + iface_cfg['IPV6ADDR_SECONDARIES'] + + " " + ipv6_cidr) + else: + ipv4_index = ipv4_index + 1 + if ipv4_index == 0: + iface_cfg['IPADDR'] = subnet['address'] + if 'netmask' in subnet: + iface_cfg['NETMASK'] = subnet['netmask'] + else: + iface_cfg['IPADDR' + str(ipv4_index)] = \ + subnet['address'] + if 'netmask' in subnet: + iface_cfg['NETMASK' + str(ipv4_index)] = \ + subnet['netmask'] + + @classmethod + def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets): + for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): + for route in subnet.get('routes', []): + is_ipv6 = subnet.get('ipv6') + + if _is_default_route(route): + if ( + (subnet.get('ipv4') and + route_cfg.has_set_default_ipv4) or + (subnet.get('ipv6') and + route_cfg.has_set_default_ipv6) + ): + raise ValueError("Duplicate declaration of default " + "route found for interface '%s'" + % (iface_cfg.name)) + # NOTE(harlowja): ipv6 and ipv4 default gateways + gw_key = 'GATEWAY0' + nm_key = 'NETMASK0' + addr_key = 'ADDRESS0' + # The owning interface provides the default route. + # + # TODO(harlowja): add validation that no other iface has + # also provided the default route? + iface_cfg['DEFROUTE'] = True + if 'gateway' in route: + if is_ipv6: + iface_cfg['IPV6_DEFAULTGW'] = route['gateway'] + route_cfg.has_set_default_ipv6 = True + else: + iface_cfg['GATEWAY'] = route['gateway'] + route_cfg.has_set_default_ipv4 = True + + else: + gw_key = 'GATEWAY%s' % route_cfg.last_idx + nm_key = 'NETMASK%s' % route_cfg.last_idx + addr_key = 'ADDRESS%s' % route_cfg.last_idx + route_cfg.last_idx += 1 + for (old_key, new_key) in [('gateway', gw_key), + ('netmask', nm_key), + ('network', addr_key)]: + if old_key in route: + route_cfg[new_key] = route[old_key] @classmethod def _render_bonding_opts(cls, iface_cfg, iface): @@ -295,15 +402,9 @@ class Renderer(renderer.Renderer): iface_subnets = iface.get("subnets", []) iface_cfg = iface_contents[iface_name] route_cfg = iface_cfg.routes - if len(iface_subnets) == 1: - cls._render_subnet(iface_cfg, route_cfg, iface_subnets[0]) - elif len(iface_subnets) > 1: - for i, isubnet in enumerate(iface_subnets, - start=len(iface_cfg.children)): - iface_sub_cfg = iface_cfg.copy() - iface_sub_cfg.name = "%s:%s" % (iface_name, i) - iface_cfg.children.append(iface_sub_cfg) - cls._render_subnet(iface_sub_cfg, route_cfg, isubnet) + + cls._render_subnets(iface_cfg, iface_subnets) + cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) @classmethod def _render_bond_interfaces(cls, network_state, iface_contents): @@ -387,7 +488,10 @@ class Renderer(renderer.Renderer): if iface_cfg: contents[iface_cfg.path] = iface_cfg.to_string() if iface_cfg.routes: - contents[iface_cfg.routes.path] = iface_cfg.routes.to_string() + contents[iface_cfg.routes.path_ipv4] = \ + iface_cfg.routes.to_string("ipv4") + contents[iface_cfg.routes.path_ipv6] = \ + iface_cfg.routes.to_string("ipv6") return contents def render_network_state(self, network_state, target=None): diff --git a/cloudinit/settings.py b/cloudinit/settings.py index dbafead5..411960d8 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -39,7 +39,7 @@ CFG_BUILTIN = { ], 'def_log_file': '/var/log/cloud-init.log', 'log_cfgs': [], - 'syslog_fix_perms': ['syslog:adm', 'root:adm'], + 'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel'], 'system_info': { 'paths': { 'cloud_dir': '/var/lib/cloud', diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 04358b73..b9458ffa 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -10,6 +10,7 @@ import crypt from functools import partial import os import os.path +import re import time from xml.dom import minidom import xml.etree.ElementTree as ET @@ -32,19 +33,161 @@ BOUNCE_COMMAND = [ # azure systems will always have a resource disk, and 66-azure-ephemeral.rules # ensures that it gets linked to this path. RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource' +DEFAULT_PRIMARY_NIC = 'eth0' +LEASE_FILE = '/var/lib/dhcp/dhclient.eth0.leases' +DEFAULT_FS = 'ext4' + + +def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid): + # extract the 'X' from dev.storvsc.X. if deviceid matches + """ + dev.storvsc.1.%pnpinfo: + classid=32412632-86cb-44a2-9b5c-50d1417354f5 + deviceid=00000000-0001-8899-0000-000000000000 + """ + for line in sysctl_out.splitlines(): + if re.search(r"pnpinfo", line): + fields = line.split() + if len(fields) >= 3: + columns = fields[2].split('=') + if (len(columns) >= 2 and + columns[0] == "deviceid" and + columns[1].startswith(deviceid)): + comps = fields[0].split('.') + return comps[2] + return None + + +def find_busdev_from_disk(camcontrol_out, disk_drv): + # find the scbusX from 'camcontrol devlist -b' output + # if disk_drv matches the specified disk driver, i.e. blkvsc1 + """ + scbus0 on ata0 bus 0 + scbus1 on ata1 bus 0 + scbus2 on blkvsc0 bus 0 + scbus3 on blkvsc1 bus 0 + scbus4 on storvsc2 bus 0 + scbus5 on storvsc3 bus 0 + scbus-1 on xpt0 bus 0 + """ + for line in camcontrol_out.splitlines(): + if re.search(disk_drv, line): + items = line.split() + return items[0] + return None + + +def find_dev_from_busdev(camcontrol_out, busdev): + # find the daX from 'camcontrol devlist' output + # if busdev matches the specified value, i.e. 'scbus2' + """ + <Msft Virtual CD/ROM 1.0> at scbus1 target 0 lun 0 (cd0,pass0) + <Msft Virtual Disk 1.0> at scbus2 target 0 lun 0 (da0,pass1) + <Msft Virtual Disk 1.0> at scbus3 target 1 lun 0 (da1,pass2) + """ + for line in camcontrol_out.splitlines(): + if re.search(busdev, line): + items = line.split('(') + if len(items) == 2: + dev_pass = items[1].split(',') + return dev_pass[0] + return None + + +def get_dev_storvsc_sysctl(): + try: + sysctl_out, err = util.subp(['sysctl', 'dev.storvsc']) + except util.ProcessExecutionError: + LOG.debug("Fail to execute sysctl dev.storvsc") + return None + return sysctl_out + + +def get_camcontrol_dev_bus(): + try: + camcontrol_b_out, err = util.subp(['camcontrol', 'devlist', '-b']) + except util.ProcessExecutionError: + LOG.debug("Fail to execute camcontrol devlist -b") + return None + return camcontrol_b_out + + +def get_camcontrol_dev(): + try: + camcontrol_out, err = util.subp(['camcontrol', 'devlist']) + except util.ProcessExecutionError: + LOG.debug("Fail to execute camcontrol devlist") + return None + return camcontrol_out + + +def get_resource_disk_on_freebsd(port_id): + g0 = "00000000" + if port_id > 1: + g0 = "00000001" + port_id = port_id - 2 + g1 = "000" + str(port_id) + g0g1 = "{0}-{1}".format(g0, g1) + """ + search 'X' from + 'dev.storvsc.X.%pnpinfo: + classid=32412632-86cb-44a2-9b5c-50d1417354f5 + deviceid=00000000-0001-8899-0000-000000000000' + """ + sysctl_out = get_dev_storvsc_sysctl() + + storvscid = find_storvscid_from_sysctl_pnpinfo(sysctl_out, g0g1) + if not storvscid: + LOG.debug("Fail to find storvsc id from sysctl") + return None + + camcontrol_b_out = get_camcontrol_dev_bus() + camcontrol_out = get_camcontrol_dev() + # try to find /dev/XX from 'blkvsc' device + blkvsc = "blkvsc{0}".format(storvscid) + scbusx = find_busdev_from_disk(camcontrol_b_out, blkvsc) + if scbusx: + devname = find_dev_from_busdev(camcontrol_out, scbusx) + if devname is None: + LOG.debug("Fail to find /dev/daX") + return None + return devname + # try to find /dev/XX from 'storvsc' device + storvsc = "storvsc{0}".format(storvscid) + scbusx = find_busdev_from_disk(camcontrol_b_out, storvsc) + if scbusx: + devname = find_dev_from_busdev(camcontrol_out, scbusx) + if devname is None: + LOG.debug("Fail to find /dev/daX") + return None + return devname + return None + + +# update the FreeBSD specific information +if util.is_FreeBSD(): + DEFAULT_PRIMARY_NIC = 'hn0' + LEASE_FILE = '/var/db/dhclient.leases.hn0' + DEFAULT_FS = 'freebsd-ufs' + res_disk = get_resource_disk_on_freebsd(1) + if res_disk is not None: + LOG.debug("resource disk is not None") + RESOURCE_DISK_PATH = "/dev/" + res_disk + else: + LOG.debug("resource disk is None") BUILTIN_DS_CONFIG = { 'agent_command': AGENT_START_BUILTIN, 'data_dir': "/var/lib/waagent", 'set_hostname': True, 'hostname_bounce': { - 'interface': 'eth0', + 'interface': DEFAULT_PRIMARY_NIC, 'policy': True, 'command': BOUNCE_COMMAND, 'hostname_command': 'hostname', }, 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH}, - 'dhclient_lease_file': '/var/lib/dhcp/dhclient.eth0.leases', + 'dhclient_lease_file': LEASE_FILE, } BUILTIN_CLOUD_CONFIG = { @@ -53,9 +196,8 @@ BUILTIN_CLOUD_CONFIG = { 'layout': [100], 'overwrite': True}, }, - 'fs_setup': [{'filesystem': 'ext4', - 'device': 'ephemeral0.1', - 'replace_fs': 'ntfs'}], + 'fs_setup': [{'filesystem': DEFAULT_FS, + 'device': 'ephemeral0.1'}], } DS_CFG_PATH = ['datasource', DS_NAME] @@ -190,7 +332,11 @@ class DataSourceAzureNet(sources.DataSource): for cdev in candidates: try: if cdev.startswith("/dev/"): - ret = util.mount_cb(cdev, load_azure_ds_dir) + if util.is_FreeBSD(): + ret = util.mount_cb(cdev, load_azure_ds_dir, + mtype="udf", sync=False) + else: + ret = util.mount_cb(cdev, load_azure_ds_dir) else: ret = load_azure_ds_dir(cdev) @@ -218,11 +364,12 @@ class DataSourceAzureNet(sources.DataSource): LOG.debug("using files cached in %s", ddir) # azure / hyper-v provides random data here - seed = util.load_file("/sys/firmware/acpi/tables/OEM0", - quiet=True, decode=False) - if seed: - self.metadata['random_seed'] = seed - + if not util.is_FreeBSD(): + seed = util.load_file("/sys/firmware/acpi/tables/OEM0", + quiet=True, decode=False) + if seed: + self.metadata['random_seed'] = seed + # TODO. find the seed on FreeBSD platform # now update ds_cfg to reflect contents pass in config user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) @@ -266,56 +413,71 @@ class DataSourceAzureNet(sources.DataSource): return +def _partitions_on_device(devpath, maxnum=16): + # return a list of tuples (ptnum, path) for each part on devpath + for suff in ("-part", "p", ""): + found = [] + for pnum in range(1, maxnum): + ppath = devpath + suff + str(pnum) + if os.path.exists(ppath): + found.append((pnum, os.path.realpath(ppath))) + if found: + return found + return [] + + +def _has_ntfs_filesystem(devpath): + ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True) + LOG.debug('ntfs_devices found = %s', ntfs_devices) + return os.path.realpath(devpath) in ntfs_devices + + def can_dev_be_reformatted(devpath): - # determine if the ephemeral block device path devpath - # is newly formatted after a resize. + """Determine if block device devpath is newly formatted ephemeral. + + A newly formatted disk will: + a.) have a partition table (dos or gpt) + b.) have 1 partition that is ntfs formatted, or + have 2 partitions with the second partition ntfs formatted. + (larger instances with >2TB ephemeral disk have gpt, and will + have a microsoft reserved partition as part 1. LP: #1686514) + c.) the ntfs partition will have no files other than possibly + 'dataloss_warning_readme.txt'""" if not os.path.exists(devpath): return False, 'device %s does not exist' % devpath - realpath = os.path.realpath(devpath) - LOG.debug('Resolving realpath of %s -> %s', devpath, realpath) - - # it is possible that the block device might exist, but the kernel - # have not yet read the partition table and sent events. we udevadm settle - # to hope to resolve that. Better here would probably be to test and see, - # and then settle if we didn't find anything and try again. - if util.which("udevadm"): - util.subp(["udevadm", "settle"]) + LOG.debug('Resolving realpath of %s -> %s', devpath, + os.path.realpath(devpath)) # devpath of /dev/sd[a-z] or /dev/disk/cloud/azure_resource # where partitions are "<devpath>1" or "<devpath>-part1" or "<devpath>p1" - part1path = None - for suff in ("-part", "p", ""): - cand = devpath + suff + "1" - if os.path.exists(cand): - if os.path.exists(devpath + suff + "2"): - msg = ('device %s had more than 1 partition: %s, %s' % - devpath, cand, devpath + suff + "2") - return False, msg - part1path = cand - break - - if part1path is None: + partitions = _partitions_on_device(devpath) + if len(partitions) == 0: return False, 'device %s was not partitioned' % devpath + elif len(partitions) > 2: + msg = ('device %s had 3 or more partitions: %s' % + (devpath, ' '.join([p[1] for p in partitions]))) + return False, msg + elif len(partitions) == 2: + cand_part, cand_path = partitions[1] + else: + cand_part, cand_path = partitions[0] - real_part1path = os.path.realpath(part1path) - ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True) - LOG.debug('ntfs_devices found = %s', ntfs_devices) - if real_part1path not in ntfs_devices: - msg = ('partition 1 (%s -> %s) on device %s was not ntfs formatted' % - (part1path, real_part1path, devpath)) + if not _has_ntfs_filesystem(cand_path): + msg = ('partition %s (%s) on device %s was not ntfs formatted' % + (cand_part, cand_path, devpath)) return False, msg def count_files(mp): ignored = set(['dataloss_warning_readme.txt']) return len([f for f in os.listdir(mp) if f.lower() not in ignored]) - bmsg = ('partition 1 (%s -> %s) on device %s was ntfs formatted' % - (part1path, real_part1path, devpath)) + bmsg = ('partition %s (%s) on device %s was ntfs formatted' % + (cand_part, cand_path, devpath)) try: - file_count = util.mount_cb(part1path, count_files) + file_count = util.mount_cb(cand_path, count_files) except util.MountFailedError as e: - return False, bmsg + ' but mount of %s failed: %s' % (part1path, e) + return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) if file_count != 0: return False, bmsg + ' but had %d files on it.' % file_count @@ -633,8 +795,19 @@ def encrypt_pass(password, salt_id="$6$"): def list_possible_azure_ds_devs(): # return a sorted list of devices that might have a azure datasource devlist = [] - for fstype in ("iso9660", "udf"): - devlist.extend(util.find_devs_with("TYPE=%s" % fstype)) + if util.is_FreeBSD(): + cdrom_dev = "/dev/cd0" + try: + util.subp(["mount", "-o", "ro", "-t", "udf", cdrom_dev, + "/mnt/cdrom/secure"]) + except util.ProcessExecutionError: + LOG.debug("Fail to mount cd") + return devlist + util.subp(["umount", "/mnt/cdrom/secure"]) + devlist.append(cdrom_dev) + else: + for fstype in ("iso9660", "udf"): + devlist.extend(util.find_devs_with("TYPE=%s" % fstype)) devlist.sort(reverse=True) return devlist diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index e9afda9c..684eac86 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -71,7 +71,7 @@ class DataSourceGCE(sources.DataSource): ('availability-zone', ('instance/zone',), True, True), ('local-hostname', ('instance/hostname',), True, True), ('public-keys', ('project/attributes/sshKeys', - 'instance/attributes/sshKeys'), False, True), + 'instance/attributes/ssh-keys'), False, True), ('user-data', ('instance/attributes/user-data',), False, False), ('user-data-encoding', ('instance/attributes/user-data-encoding',), False, True), diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index f0a6bfce..b64a7f24 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -60,7 +60,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): try: retries = int(self.ds_cfg.get("retries", retries)) except Exception: - util.logexc(LOG, "Failed to get max wait. using %s", retries) + util.logexc(LOG, "Failed to get retries. using %s", retries) return (max_wait, timeout, retries) diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 6e01aa47..e22409d1 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -29,6 +29,14 @@ def cd(newdir): os.chdir(prevdir) +def _get_dhcp_endpoint_option_name(): + if util.is_FreeBSD(): + azure_endpoint = "option-245" + else: + azure_endpoint = "unknown-245" + return azure_endpoint + + class AzureEndpointHttpClient(object): headers = { @@ -235,8 +243,9 @@ class WALinuxAgentShim(object): leases = [] content = util.load_file(fallback_lease_file) LOG.debug("content is %s", content) + option_name = _get_dhcp_endpoint_option_name() for line in content.splitlines(): - if 'unknown-245' in line: + if option_name in line: # Example line from Ubuntu # option unknown-245 a8:3f:81:10; leases.append(line.strip(' ').split(' ', 2)[-1].strip(';\n"')) diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py index 257989e8..693f8d5c 100644 --- a/cloudinit/sources/helpers/digitalocean.py +++ b/cloudinit/sources/helpers/digitalocean.py @@ -162,7 +162,7 @@ def convert_network_configuration(config, dns_servers): continue sub_part = _get_subnet_part(raw_subnet) - if netdef in ('private', 'anchor_ipv4', 'anchor_ipv6'): + if nic_type != "public" or "anchor" in netdef: del sub_part['gateway'] subnets.append(sub_part) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index f7191b09..ad557827 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -624,7 +624,7 @@ class Init(object): return (None, loc) if ncfg: return (ncfg, loc) - return (net.generate_fallback_config(), "fallback") + return (self.distro.generate_fallback_config(), "fallback") def apply_network_config(self, bring_up): netcfg, src = self._find_networking_config() diff --git a/cloudinit/util.py b/cloudinit/util.py index 22af99dd..135e4608 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -24,6 +24,7 @@ import platform import pwd import random import re +import shlex import shutil import socket import stat @@ -75,6 +76,7 @@ CONTAINER_TESTS = (['systemd-detect-virt', '--quiet', '--container'], PROC_CMDLINE = None _LSB_RELEASE = {} +PY26 = sys.version_info[0:2] == (2, 6) def get_architecture(target=None): @@ -476,10 +478,11 @@ def decomp_gzip(data, quiet=True, decode=True): try: buf = six.BytesIO(encode_text(data)) with contextlib.closing(gzip.GzipFile(None, "rb", 1, buf)) as gh: + # E1101 is https://github.com/PyCQA/pylint/issues/1444 if decode: - return decode_binary(gh.read()) + return decode_binary(gh.read()) # pylint: disable=E1101 else: - return gh.read() + return gh.read() # pylint: disable=E1101 except Exception as e: if quiet: return data @@ -565,6 +568,10 @@ def is_ipv4(instr): return len(toks) == 4 +def is_FreeBSD(): + return system_info()['platform'].startswith('FreeBSD') + + def get_cfg_option_bool(yobj, key, default=False): if key not in yobj: return default @@ -2055,11 +2062,56 @@ def parse_mtab(path): return None +def find_freebsd_part(label_part): + if label_part.startswith("/dev/label/"): + target_label = label_part[5:] + (label_part, err) = subp(['glabel', 'status', '-s']) + for labels in label_part.split("\n"): + items = labels.split() + if len(items) > 0 and items[0].startswith(target_label): + label_part = items[2] + break + label_part = str(label_part) + return label_part + + +def get_path_dev_freebsd(path, mnt_list): + path_found = None + for line in mnt_list.split("\n"): + items = line.split() + if (len(items) > 2 and os.path.exists(items[1] + path)): + path_found = line + break + return path_found + + +def get_mount_info_freebsd(path, log=LOG): + (result, err) = subp(['mount', '-p', path], rcs=[0, 1]) + if len(err): + # find a path if the input is not a mounting point + (mnt_list, err) = subp(['mount', '-p']) + path_found = get_path_dev_freebsd(path, mnt_list) + if (path_found is None): + return None + result = path_found + ret = result.split() + label_part = find_freebsd_part(ret[0]) + return "/dev/" + label_part, ret[2], ret[1] + + def parse_mount(path): (mountoutput, _err) = subp("mount") mount_locs = mountoutput.splitlines() for line in mount_locs: m = re.search(r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$', line) + if not m: + continue + # check whether the dev refers to a label on FreeBSD + # for example, if dev is '/dev/label/rootfs', we should + # continue finding the real device like '/dev/da0'. + devm = re.search('^(/dev/.+)p([0-9])$', m.group(1)) + if (not devm and is_FreeBSD()): + return get_mount_info_freebsd(path) devpth = m.group(1) mount_point = m.group(2) fs_type = m.group(3) @@ -2336,7 +2388,8 @@ def read_dmi_data(key): uname_arch = os.uname()[4] if not (uname_arch == "x86_64" or (uname_arch.startswith("i") and uname_arch[2:] == "86") or - uname_arch == 'aarch64'): + uname_arch == 'aarch64' or + uname_arch == 'amd64'): LOG.debug("dmidata is not supported on %s", uname_arch) return None @@ -2374,6 +2427,18 @@ def system_is_snappy(): # channel.ini is configparser loadable. # snappy will move to using /etc/system-image/config.d/*.ini # this is certainly not a perfect test, but good enough for now. + orpath = "/etc/os-release" + try: + orinfo = load_shell_content(load_file(orpath, quiet=True)) + if orinfo.get('ID', '').lower() == "ubuntu-core": + return True + except ValueError as e: + LOG.warning("Unexpected error loading '%s': %s", orpath, e) + + cmdline = get_cmdline() + if 'snap_core=' in cmdline: + return True + content = load_file("/etc/system-image/channel.ini", quiet=True) if 'ubuntu-core' in content.lower(): return True @@ -2420,4 +2485,27 @@ def rootdev_from_cmdline(cmdline): return "/dev/" + found +def load_shell_content(content, add_empty=False, empty_val=None): + """Given shell like syntax (key=value\nkey2=value2\n) in content + return the data in dictionary form. If 'add_empty' is True + then add entries in to the returned dictionary for 'VAR=' + variables. Set their value to empty_val.""" + + def _shlex_split(blob): + if PY26 and isinstance(blob, six.text_type): + # Older versions don't support unicode input + blob = blob.encode("utf8") + return shlex.split(blob) + + data = {} + for line in _shlex_split(content): + key, value = line.split("=", 1) + if not value: + value = empty_val + if add_empty or value: + data[key] = value + + return data + + # vi: ts=4 expandtab |