diff options
Diffstat (limited to 'cloudinit/config')
| -rw-r--r-- | cloudinit/config/cc_apt_configure.py | 27 | ||||
| -rw-r--r-- | cloudinit/config/cc_disk_setup.py | 146 | ||||
| -rw-r--r-- | cloudinit/config/cc_ntp.py | 47 | ||||
| -rw-r--r-- | cloudinit/config/cc_resizefs.py | 95 | ||||
| -rw-r--r-- | cloudinit/config/cc_users_groups.py | 59 |
5 files changed, 280 insertions, 94 deletions
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 7e751776..177cbcf7 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -65,12 +65,12 @@ take a list of configs, allowing mirrors to be specified on a per-architecture basis. Each config is a dictionary which must have an entry for ``arches``, specifying which architectures that config entry is for. The keyword ``default`` applies to any architecture not explicitly listed. The mirror url -can be specified with the ``url`` key, or a list of mirrors to check can be +can be specified with the ``uri`` key, or a list of mirrors to check can be provided in order, with the first mirror that can be resolved being selected. This allows the same configuration to be used in different environment, with -different hosts used for a local apt mirror. If no mirror is provided by uri or -search, ``search_dns`` may be used to search for dns names in the format -``<distro>-mirror`` in each of the following: +different hosts used for a local apt mirror. If no mirror is provided by +``uri`` or ``search``, ``search_dns`` may be used to search for dns names in +the format ``<distro>-mirror`` in each of the following: - fqdn of this host per cloud metadata - localdomain @@ -282,16 +282,21 @@ def handle(name, ocfg, cloud, log, _): apply_apt(cfg, cloud, target) +def _should_configure_on_empty_apt(): + # if no config was provided, should apt configuration be done? + if util.system_is_snappy(): + return False, "system is snappy." + if not (util.which('apt-get') or util.which('apt')): + return False, "no apt commands." + return True, "Apt is available." + + def apply_apt(cfg, cloud, target): # cfg is the 'apt' top level dictionary already in 'v3' format. if not cfg: - # no config was provided. If apt configuration does not seem - # necessary on this system, then return. - if util.system_is_snappy(): - LOG.debug("Nothing to do: No apt config and running on snappy") - return - if not (util.which('apt-get') or util.which('apt')): - LOG.debug("Nothing to do: No apt config and no apt commands") + should_config, msg = _should_configure_on_empty_apt() + if not should_config: + LOG.debug("Nothing to do: No apt config and %s", msg) return LOG.debug("handling apt config: %s", cfg) diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index f49386e3..c2b83aea 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -68,6 +68,9 @@ specified using ``filesystem``. Using ``overwrite: true`` for filesystems is dangerous and can lead to data loss, so double check the entry in ``fs_setup``. +.. note:: + ``replace_fs`` is ignored unless ``partition`` is ``auto`` or ``any``. + **Internal name:** ``cc_disk_setup`` **Module frequency:** per instance @@ -127,7 +130,7 @@ def handle(_name, cfg, cloud, log, _args): log.debug("Partitioning disks: %s", str(disk_setup)) for disk, definition in disk_setup.items(): if not isinstance(definition, dict): - log.warn("Invalid disk definition for %s" % disk) + log.warning("Invalid disk definition for %s" % disk) continue try: @@ -144,7 +147,7 @@ def handle(_name, cfg, cloud, log, _args): update_fs_setup_devices(fs_setup, cloud.device_name_to_device) for definition in fs_setup: if not isinstance(definition, dict): - log.warn("Invalid file system definition: %s" % definition) + log.warning("Invalid file system definition: %s" % definition) continue try: @@ -199,8 +202,13 @@ def update_fs_setup_devices(disk_setup, tformer): definition['_origname'] = origname definition['device'] = tformed - if part and 'partition' in definition: - definition['_partition'] = definition['partition'] + if part: + # In origname with <dev>.N, N overrides 'partition' key. + if 'partition' in definition: + LOG.warning("Partition '%s' from dotted device name '%s' " + "overrides 'partition' key in %s", part, origname, + definition) + definition['_partition'] = definition['partition'] definition['partition'] = part @@ -423,7 +431,7 @@ def get_dyn_func(*args): raise Exception("No such function %s to call!" % func_name) -def get_mbr_hdd_size(device): +def get_hdd_size(device): try: size_in_bytes, _ = util.subp([BLKDEV_CMD, '--getsize64', device]) sector_size, _ = util.subp([BLKDEV_CMD, '--getss', device]) @@ -433,22 +441,6 @@ def get_mbr_hdd_size(device): return int(size_in_bytes) / int(sector_size) -def get_gpt_hdd_size(device): - out, _ = util.subp([SGDISK_CMD, '-p', device], update_env=LANG_C_ENV) - for line in out.splitlines(): - if line.startswith("Disk"): - return line.split()[2] - raise Exception("Failed to get %s size from sgdisk" % (device)) - - -def get_hdd_size(table_type, device): - """ - Returns the hard disk size. - This works with any disk type, including GPT. - """ - return get_dyn_func("get_%s_hdd_size", table_type, device) - - def check_partition_mbr_layout(device, layout): """ Returns true if the partition layout matches the one on the disk @@ -496,12 +488,35 @@ def check_partition_gpt_layout(device, layout): device, e)) out_lines = iter(out.splitlines()) - # Skip header + # Skip header. Output looks like: + # *************************************************************** + # Found invalid GPT and valid MBR; converting MBR to GPT format + # in memory. + # *************************************************************** + # + # Disk /dev/vdb: 83886080 sectors, 40.0 GiB + # Logical sector size: 512 bytes + # Disk identifier (GUID): 8A7F11AD-3953-491B-8051-077E01C8E9A7 + # Partition table holds up to 128 entries + # First usable sector is 34, last usable sector is 83886046 + # Partitions will be aligned on 2048-sector boundaries + # Total free space is 83476413 sectors (39.8 GiB) + # + # Number Start (sector) End (sector) Size Code Name + # 1 2048 206847 100.0 MiB 0700 Microsoft basic data for line in out_lines: if line.strip().startswith('Number'): break - return [line.strip().split()[-1] for line in out_lines] + codes = [line.strip().split()[5] for line in out_lines] + cleaned = [] + + # user would expect a code '83' to be Linux, but sgdisk outputs 8300. + for code in codes: + if len(code) == 4 and code.endswith("00"): + code = code[0:2] + cleaned.append(code) + return cleaned def check_partition_layout(table_type, device, layout): @@ -515,6 +530,8 @@ def check_partition_layout(table_type, device, layout): found_layout = get_dyn_func( "check_partition_%s_layout", table_type, device, layout) + LOG.debug("called check_partition_%s_layout(%s, %s), returned: %s", + table_type, device, layout, found_layout) if isinstance(layout, bool): # if we are using auto partitioning, or "True" be happy # if a single partition exists. @@ -522,18 +539,17 @@ def check_partition_layout(table_type, device, layout): return True return False - else: - if len(found_layout) != len(layout): - return False - else: - # This just makes sure that the number of requested - # partitions and the type labels are right - for x in range(1, len(layout) + 1): - if isinstance(layout[x - 1], tuple): - _, part_type = layout[x] - if int(found_layout[x]) != int(part_type): - return False - return True + elif len(found_layout) == len(layout): + # This just makes sure that the number of requested + # partitions and the type labels are right + layout_types = [str(x[1]) if isinstance(x, (tuple, list)) else None + for x in layout] + LOG.debug("Layout types=%s. Found types=%s", + layout_types, found_layout) + for itype, ftype in zip(layout_types, found_layout): + if itype is not None and str(ftype) != str(itype): + return False + return True return False @@ -664,14 +680,14 @@ def read_parttbl(device): reliable way to probe the partition table. """ blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device] - udev_cmd = [UDEVADM_CMD, 'settle'] + udevadm_settle() try: - util.subp(udev_cmd) util.subp(blkdev_cmd) - util.subp(udev_cmd) except Exception as e: util.logexc(LOG, "Failed reading the partition table %s" % e) + udevadm_settle() + def exec_mkpart_mbr(device, layout): """ @@ -696,9 +712,11 @@ def exec_mkpart_gpt(device, layout): util.subp([SGDISK_CMD, '-n', '{}:{}:{}'.format(index, start, end), device]) if partition_type is not None: + # convert to a 4 char (or more) string right padded with 0 + # 82 -> 8200. 'Linux' -> 'Linux' + pinput = str(partition_type).ljust(4, "0") util.subp( - [SGDISK_CMD, - '-t', '{}:{}'.format(index, partition_type), device]) + [SGDISK_CMD, '-t', '{}:{}'.format(index, pinput), device]) except Exception: LOG.warning("Failed to partition device %s", device) raise @@ -719,6 +737,24 @@ def exec_mkpart(table_type, device, layout): return get_dyn_func("exec_mkpart_%s", table_type, device, layout) +def udevadm_settle(): + util.subp(['udevadm', 'settle']) + + +def assert_and_settle_device(device): + """Assert that device exists and settle so it is fully recognized.""" + if not os.path.exists(device): + udevadm_settle() + if not os.path.exists(device): + raise RuntimeError("Device %s did not exist and was not created " + "with a udevamd settle." % device) + + # Whether or not the device existed above, it is possible that udev + # events that would populate udev database (for reading by lsdname) have + # not yet finished. So settle again. + udevadm_settle() + + def mkpart(device, definition): """ Creates the partition table. @@ -734,6 +770,7 @@ def mkpart(device, definition): device: the device to work on. """ # ensure that we get a real device rather than a symbolic link + assert_and_settle_device(device) device = os.path.realpath(device) LOG.debug("Checking values for %s definition", device) @@ -769,8 +806,8 @@ def mkpart(device, definition): LOG.debug("Skipping partitioning on configured device %s", device) return - LOG.debug("Checking for device size") - device_size = get_hdd_size(table_type, device) + LOG.debug("Checking for device size of %s", device) + device_size = get_hdd_size(device) LOG.debug("Calculating partition layout") part_definition = get_partition_layout(table_type, device_size, layout) @@ -834,6 +871,7 @@ def mkfs(fs_cfg): overwrite = fs_cfg.get('overwrite', False) # ensure that we get a real device rather than a symbolic link + assert_and_settle_device(device) device = os.path.realpath(device) # This allows you to define the default ephemeral or swap @@ -849,7 +887,8 @@ def mkfs(fs_cfg): # Check to see if the fs already exists LOG.debug("Checking device %s", device) check_label, check_fstype, _ = check_fs(device) - LOG.debug("Device %s has %s %s", device, check_label, check_fstype) + LOG.debug("Device '%s' has check_label='%s' check_fstype=%s", + device, check_label, check_fstype) if check_label == label and check_fstype == fs_type: LOG.debug("Existing file system found at %s", device) @@ -910,12 +949,23 @@ def mkfs(fs_cfg): "must be set.", label) # Create the commands + shell = False if fs_cmd: fs_cmd = fs_cfg['cmd'] % { 'label': label, 'filesystem': fs_type, 'device': device, } + shell = True + + if overwrite: + LOG.warning( + "fs_setup:overwrite ignored because cmd was specified: %s", + fs_cmd) + if fs_opts: + LOG.warning( + "fs_setup:extra_opts ignored because cmd was specified: %s", + fs_cmd) else: # Find the mkfs command mkfs_cmd = util.which("mkfs.%s" % fs_type) @@ -936,14 +986,14 @@ def mkfs(fs_cfg): if overwrite or device_type(device) == "disk": fs_cmd.append(lookup_force_flag(fs_type)) - # Add the extends FS options - if fs_opts: - fs_cmd.extend(fs_opts) + # Add the extends FS options + if fs_opts: + fs_cmd.extend(fs_opts) LOG.debug("Creating file system %s on %s", label, device) - LOG.debug(" Using cmd: %s", " ".join(fs_cmd)) + LOG.debug(" Using cmd: %s", str(fs_cmd)) try: - util.subp(fs_cmd) + util.subp(fs_cmd, shell=shell) except Exception as e: raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e)) diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index e33032fd..5cc54536 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -53,14 +53,12 @@ distros = ['centos', 'debian', 'fedora', 'opensuse', 'ubuntu'] def handle(name, cfg, cloud, log, _args): - """ - Enable and configure ntp + """Enable and configure ntp.""" - ntp: - pools: ['0.{{distro}}.pool.ntp.org', '1.{{distro}}.pool.ntp.org'] - servers: ['192.168.2.1'] - - """ + if 'ntp' not in cfg: + LOG.debug( + "Skipping module named %s, not present or disabled by cfg", name) + return ntp_cfg = cfg.get('ntp', {}) @@ -69,15 +67,18 @@ def handle(name, cfg, cloud, log, _args): " but not a dictionary type," " is a %s %instead"), type_utils.obj_name(ntp_cfg)) - if 'ntp' not in cfg: - LOG.debug("Skipping module named %s," - "not present or disabled by cfg", name) - return True - - install_ntp(cloud.distro.install_packages, packages=['ntp'], - check_exe="ntpd") rename_ntp_conf() + # ensure when ntp is installed it has a configuration file + # to use instead of starting up with packaged defaults write_ntp_config_template(ntp_cfg, cloud) + install_ntp(cloud.distro.install_packages, packages=['ntp'], + check_exe="ntpd") + # if ntp was already installed, it may not have started + try: + reload_ntp(systemd=cloud.distro.uses_systemd()) + except util.ProcessExecutionError as e: + LOG.exception("Failed to reload/start ntp service: %s", e) + raise def install_ntp(install_func, packages=None, check_exe="ntpd"): @@ -89,7 +90,10 @@ def install_ntp(install_func, packages=None, check_exe="ntpd"): install_func(packages) -def rename_ntp_conf(config=NTP_CONF): +def rename_ntp_conf(config=None): + """Rename any existing ntp.conf file and render from template""" + if config is None: # For testing + config = NTP_CONF if os.path.exists(config): util.rename(config, config + ".dist") @@ -107,8 +111,9 @@ def write_ntp_config_template(cfg, cloud): pools = cfg.get('pools', []) if len(servers) == 0 and len(pools) == 0: - LOG.debug('Adding distro default ntp pool servers') pools = generate_server_names(cloud.distro.name) + LOG.debug( + 'Adding distro default ntp pool servers: %s', ','.join(pools)) params = { 'servers': servers, @@ -125,4 +130,14 @@ def write_ntp_config_template(cfg, cloud): templater.render_to_file(template_fn, NTP_CONF, params) + +def reload_ntp(systemd=False): + service = 'ntp' + if systemd: + cmd = ['systemctl', 'reload-or-restart', service] + else: + cmd = ['service', service, 'restart'] + util.subp(cmd, capture=True) + + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 60e3ab53..ceee952b 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -33,7 +33,10 @@ disabled altogether by setting ``resize_rootfs`` to ``false``. """ import errno +import getopt import os +import re +import shlex import stat from cloudinit.settings import PER_ALWAYS @@ -58,6 +61,62 @@ def _resize_ufs(mount_point, devpth): return ('growfs', devpth) +def _get_dumpfs_output(mount_point): + dumpfs_res, err = util.subp(['dumpfs', '-m', mount_point]) + return dumpfs_res + + +def _get_gpart_output(part): + gpart_res, err = util.subp(['gpart', 'show', part]) + return gpart_res + + +def _can_skip_resize_ufs(mount_point, devpth): + # extract the current fs sector size + """ + # dumpfs -m / + # newfs command for / (/dev/label/rootfs) + newfs -O 2 -U -a 4 -b 32768 -d 32768 -e 4096 -f 4096 -g 16384 + -h 64 -i 8192 -j -k 6408 -m 8 -o time -s 58719232 /dev/label/rootf + """ + cur_fs_sz = None + frag_sz = None + dumpfs_res = _get_dumpfs_output(mount_point) + for line in dumpfs_res.splitlines(): + if not line.startswith('#'): + newfs_cmd = shlex.split(line) + opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:' + optlist, args = getopt.getopt(newfs_cmd[1:], opt_value) + for o, a in optlist: + if o == "-s": + cur_fs_sz = int(a) + if o == "-f": + frag_sz = int(a) + # check the current partition size + """ + # gpart show /dev/da0 +=> 40 62914480 da0 GPT (30G) + 40 1024 1 freebsd-boot (512K) + 1064 58719232 2 freebsd-ufs (28G) + 58720296 3145728 3 freebsd-swap (1.5G) + 61866024 1048496 - free - (512M) + """ + expect_sz = None + m = re.search('^(/dev/.+)p([0-9])$', devpth) + gpart_res = _get_gpart_output(m.group(1)) + for line in gpart_res.splitlines(): + if re.search(r"freebsd-ufs", line): + fields = line.split() + expect_sz = int(fields[1]) + # Normalize the gpart sector size, + # because the size is not exactly the same as fs size. + normal_expect_sz = (expect_sz - expect_sz % (frag_sz / 512)) + if normal_expect_sz == cur_fs_sz: + return True + else: + return False + + # Do not use a dictionary as these commands should be able to be used # for multiple filesystem types if possible, e.g. one command for # ext2, ext3 and ext4. @@ -68,9 +127,40 @@ RESIZE_FS_PREFIXES_CMDS = [ ('ufs', _resize_ufs), ] +RESIZE_FS_PRECHECK_CMDS = { + 'ufs': _can_skip_resize_ufs +} + NOBLOCK = "noblock" +def rootdev_from_cmdline(cmdline): + found = None + for tok in cmdline.split(): + if tok.startswith("root="): + found = tok[5:] + break + if found is None: + return None + + if found.startswith("/dev/"): + return found + if found.startswith("LABEL="): + return "/dev/disk/by-label/" + found[len("LABEL="):] + if found.startswith("UUID="): + return "/dev/disk/by-uuid/" + found[len("UUID="):] + + return "/dev/" + found + + +def can_skip_resize(fs_type, resize_what, devpth): + fstype_lc = fs_type.lower() + for i, func in RESIZE_FS_PRECHECK_CMDS.items(): + if fstype_lc.startswith(i): + return func(resize_what, devpth) + return False + + def handle(name, cfg, _cloud, log, args): if len(args) != 0: resize_root = args[0] @@ -139,6 +229,11 @@ def handle(name, cfg, _cloud, log, args): return resizer = None + if can_skip_resize(fs_type, resize_what, devpth): + log.debug("Skip resize filesystem type %s for %s", + fs_type, resize_what) + return + fstype_lc = fs_type.lower() for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS: if fstype_lc.startswith(pfix): diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py index 59649800..b80d1d36 100644 --- a/cloudinit/config/cc_users_groups.py +++ b/cloudinit/config/cc_users_groups.py @@ -25,28 +25,39 @@ entry of the ``users`` list. Each entry in the ``users`` list, other than a config keys for an entry in ``users`` are as follows: - ``name``: The user's login name - - ``homedir``: Optional. Home dir for user. Default is ``/home/<username>`` - - ``primary-group``: Optional. Primary group for user. Default to new group - named after user. + - ``expiredate``: Optional. Date on which the user's login will be + disabled. Default: none + - ``gecos``: Optional. Comment about the user, usually a comma-separated + string of real name and contact information. Default: none - ``groups``: Optional. Additional groups to add the user to. Default: none - - ``selinux-user``: Optional. SELinux user for user's login. Default to - default SELinux user. - - ``lock_passwd``: Optional. Disable password login. Default: true + - ``homedir``: Optional. Home dir for user. Default is ``/home/<username>`` - ``inactive``: Optional. Mark user inactive. Default: false - - ``passwd``: Hash of user password + - ``lock_passwd``: Optional. Disable password login. Default: true - ``no-create-home``: Optional. Do not create home directory. Default: false - - ``no-user-group``: Optional. Do not create group named after user. - Default: false - ``no-log-init``: Optional. Do not initialize lastlog and faillog for user. Default: false - - ``ssh-import-id``: Optional. SSH id to import for user. Default: none - - ``ssh-autorized-keys``: Optional. List of ssh keys to add to user's + - ``no-user-group``: Optional. Do not create group named after user. + Default: false + - ``passwd``: Hash of user password + - ``primary-group``: Optional. Primary group for user. Default to new group + named after user. + - ``selinux-user``: Optional. SELinux user for user's login. Default to + default SELinux user. + - ``shell``: Optional. The user's login shell. The default is to set no + shell, which results in a system-specific default being used. + - ``snapuser``: Optional. Specify an email address to create the user as + a Snappy user through ``snap create-user``. If an Ubuntu SSO account is + associated with the address, username and SSH keys will be requested from + there. Default: none + - ``ssh-authorized-keys``: Optional. List of ssh keys to add to user's authkeys file. Default: none + - ``ssh-import-id``: Optional. SSH id to import for user. Default: none - ``sudo``: Optional. Sudo rule to use, or list of sudo rules to use. Default: none. - ``system``: Optional. Create user as system user with no home directory. Default: false + - ``uid``: Optional. The user's ID. Default: The next available value. .. note:: Specifying a hash of a user's password with ``passwd`` is a security risk @@ -65,23 +76,33 @@ config keys for an entry in ``users`` are as follows: **Config keys**:: groups: - - ubuntu: [foo, bar] - - cloud-users + - <group>: [<user>, <user>] + - <group> users: - default - name: <username> - gecos: <real name> - primary-group: <primary group> - groups: <additional groups> - selinux-user: <selinux username> expiredate: <date> - ssh-import-id: <none/id> + gecos: <comment> + groups: <additional groups> + homedir: <home directory> + inactive: <true/false> lock_passwd: <true/false> + no-create-home: <true/false> + no-log-init: <true/false> + no-user-group: <true/false> passwd: <password> + primary-group: <primary group> + selinux-user: <selinux username> + shell: <shell path> + snapuser: <email> + ssh-authorized-keys: + - <key> + - <key> + ssh-import-id: <id> sudo: <sudo config> - inactive: <true/false> system: <true/false> + uid: <user id> """ # Ensure this is aliased to a name not 'distros' |
