diff options
66 files changed, 2395 insertions, 648 deletions
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 2b59d10a..0aa97dd4 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,17 +1,19 @@ ## Proposed Commit Message <!-- Include a proposed commit message because all PRs are squash merged --> -> summary: no more than 70 characters -> -> A description of what the change being made is and why it is being -> made, if the summary line is insufficient. The blank line above is -> required. This should be wrapped at 72 characters, but otherwise has -> no particular length requirements. -> -> If you need to write multiple paragraphs, feel free. -> -> LP: #NNNNNNN (replace with the appropriate bug reference or remove -> this line entirely if there is no associated bug) +``` +summary: no more than 70 characters + +A description of what the change being made is and why it is being +made, if the summary line is insufficient. The blank line above is +required. This should be wrapped at 72 characters, but otherwise has +no particular length requirements. + +If you need to write multiple paragraphs, feel free. + +LP: #NNNNNNN (replace with the appropriate bug reference or remove +this line entirely if there is no associated bug) +``` ## Additional Context <!-- If relevant --> diff --git a/.travis.yml b/.travis.yml index 2fad49f3..f15752bc 100644 --- a/.travis.yml +++ b/.travis.yml @@ -33,96 +33,17 @@ install: script: - tox +env: + TOXENV=py3 + PYTEST_ADDOPTS=-v # List all tests run by pytest + matrix: fast_finish: true - allow_failures: - - name: "Integration Tests (WIP)" include: - python: 3.6 - env: - TOXENV=py3 - PYTEST_ADDOPTS=-v # List all tests run by pytest - - if: NOT branch =~ /^ubuntu\// - cache: - - directories: - - lxd_images - - chroots - before_cache: - - | - # Find the most recent image file - latest_file="$(sudo ls -Art /var/snap/lxd/common/lxd/images/ | tail -n 1)" - # This might be <hash>.rootfs or <hash>, normalise - latest_file="$(basename $latest_file .rootfs)" - # Find all files with that prefix and copy them to our cache dir - sudo find /var/snap/lxd/common/lxd/images/ -name $latest_file* -print -exec cp {} "$TRAVIS_BUILD_DIR/lxd_images/" \; - install: - - git fetch --unshallow - - sudo apt-get install -y --install-recommends sbuild ubuntu-dev-tools fakeroot tox debhelper - - pip install . - - pip install tox - # bionic has lxd from deb installed, remove it first to ensure - # pylxd talks only to the lxd from snap - - sudo apt remove --purge lxd lxd-client - - sudo rm -Rf /var/lib/lxd - - sudo snap install lxd - - sudo lxd init --auto - - sudo mkdir --mode=1777 -p /var/snap/lxd/common/consoles - # Move any cached lxd images into lxd's image dir - - sudo find "$TRAVIS_BUILD_DIR/lxd_images/" -type f -print -exec mv {} /var/snap/lxd/common/lxd/images/ \; - - sudo usermod -a -G lxd $USER - - sudo sbuild-adduser $USER - - cp /usr/share/doc/sbuild/examples/example.sbuildrc /home/$USER/.sbuildrc - script: - # Ubuntu LTS: Build - - ./packages/bddeb -S -d --release xenial - - | - needs_caching=false - if [ -e "$TRAVIS_BUILD_DIR/chroots/xenial-amd64.tar" ]; then - # If we have a cached chroot, move it into place - sudo mkdir -p /var/lib/schroot/chroots/xenial-amd64 - sudo tar --sparse --xattrs --preserve-permissions --numeric-owner -xf "$TRAVIS_BUILD_DIR/chroots/xenial-amd64.tar" -C /var/lib/schroot/chroots/xenial-amd64 - # Write its configuration - cat > sbuild-xenial-amd64 << EOM - [xenial-amd64] - description=xenial-amd64 - groups=sbuild,root,admin - root-groups=sbuild,root,admin - # Uncomment these lines to allow members of these groups to access - # the -source chroots directly (useful for automated updates, etc). - #source-root-users=sbuild,root,admin - #source-root-groups=sbuild,root,admin - type=directory - profile=sbuild - union-type=overlay - directory=/var/lib/schroot/chroots/xenial-amd64 - EOM - sudo mv sbuild-xenial-amd64 /etc/schroot/chroot.d/ - sudo chown root /etc/schroot/chroot.d/sbuild-xenial-amd64 - # And ensure it's up-to-date. - before_pkgs="$(sudo schroot -c source:xenial-amd64 -d / dpkg -l | sha256sum)" - sudo schroot -c source:xenial-amd64 -d / -- sh -c "apt-get update && apt-get -qqy upgrade" - after_pkgs=$(sudo schroot -c source:xenial-amd64 -d / dpkg -l | sha256sum) - if [ "$before_pkgs" != "$after_pkgs" ]; then - needs_caching=true - fi - else - # Otherwise, create the chroot - sudo -E su $USER -c 'mk-sbuild xenial' - needs_caching=true - fi - # If there are changes to the schroot (or it's entirely new), - # tar up the schroot (to preserve ownership/permissions) and - # move it into the cached dir; no need to compress it because - # Travis will do that anyway - if [ "$needs_caching" = "true" ]; then - sudo tar --sparse --xattrs --xattrs-include=* -cf "$TRAVIS_BUILD_DIR/chroots/xenial-amd64.tar" -C /var/lib/schroot/chroots/xenial-amd64 . - fi - # Use sudo to get a new shell where we're in the sbuild group - - sudo -E su $USER -c 'sbuild --nolog --no-run-lintian --verbose --dist=xenial cloud-init_*.dsc' - # Ubuntu LTS: Integration - - sg lxd -c 'tox -e citest -- run --verbose --preserve-data --data-dir results --os-name xenial --test modules/apt_configure_sources_list.yaml --test modules/ntp_servers --test modules/set_password_list --test modules/user_groups --deb cloud-init_*_all.deb' - - name: "Integration Tests (WIP)" + - name: "Integration Tests" if: NOT branch =~ /^ubuntu\// + env: {} cache: - directories: - lxd_images @@ -220,3 +141,9 @@ matrix: env: TOXENV=pylint - python: 3.6 env: TOXENV=doc + # Test all supported Python versions (but at the end, so we schedule + # longer-running jobs first) + - python: 3.9 + - python: 3.8 + - python: 3.7 + - python: 3.5 @@ -531,7 +531,7 @@ - docs: add additional details to per-instance/once [Joshua Powers] - Update doc-requirements.txt [Joshua Powers] - doc-requirements: add missing dep [Joshua Powers] - - dhcp: Support RedHat dhcp rfc3442 lease format for option 121 (#76) + - dhcp: Support Red Hat dhcp rfc3442 lease format for option 121 (#76) [Eric Lafontaine] (LP: #1850642) - network_state: handle empty v1 config (#45) (LP: #1852496) - docs: Add document on how to report bugs [Joshua Powers] diff --git a/HACKING.rst b/HACKING.rst index 8a12e3e3..623b3136 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -98,6 +98,11 @@ The cloud-init team will review the two merge proposals and verify that the CLA has been signed for the Launchpad user and record the associated GitHub account. +.. note:: + If you are a first time contributor, you will not need to touch + Launchpad to contribute to cloud-init: all new CLA signatures are + handled as part of the GitHub pull request process described above. + Do these things for each feature or bug ======================================= @@ -173,154 +178,11 @@ Cloud Config Modules * Any new modules should use underscores in any new config options and not hyphens (e.g. `new_option` and *not* `new-option`). -.. _unit_testing: - -Testing ------------- - -cloud-init has both unit tests and integration tests. Unit tests can -be found in-tree alongside the source code, as well as -at ``tests/unittests``. Integration tests can be found at -``tests/integration_tests``. Documentation specifically for integration -tests can be found on the :ref:`integration_tests` page, but -the guidelines specified below apply to both types of tests. - -cloud-init uses `pytest`_ to run its tests, and has tests written both -as ``unittest.TestCase`` sub-classes and as un-subclassed pytest tests. -The following guidelines should be followed: - -* For ease of organisation and greater accessibility for developers not - familiar with pytest, all cloud-init unit tests must be contained - within test classes - - * Put another way, module-level test functions should not be used - -* pytest test classes should use `pytest fixtures`_ to share - functionality instead of inheritance - -* As all tests are contained within classes, it is acceptable to mix - ``TestCase`` test classes and pytest test classes within the same - test file - - * These can be easily distinguished by their definition: pytest - classes will not use inheritance at all (e.g. - `TestGetPackageMirrorInfo`_), whereas ``TestCase`` classes will - subclass (indirectly) from ``TestCase`` (e.g. - `TestPrependBaseCommands`_) - -* pytest tests should use bare ``assert`` statements, to take advantage - of pytest's `assertion introspection`_ - - * For ``==`` and other commutative assertions, the expected value - should be placed before the value under test: - ``assert expected_value == function_under_test()`` - -* As we still support Ubuntu 16.04 (Xenial Xerus), we can only use - pytest features that are available in v2.8.7. This is an - inexhaustive list of ways in which this may catch you out: - - * Support for using ``yield`` in ``pytest.fixture`` functions was - only introduced in `pytest 3.0`_. Such functions must instead use - the ``pytest.yield_fixture`` decorator. - - * Only the following built-in fixtures are available - [#fixture-list]_: - - * ``cache`` - * ``capfd`` - * ``caplog`` (provided by ``python3-pytest-catchlog`` on xenial) - * ``capsys`` - * ``monkeypatch`` - * ``pytestconfig`` - * ``record_xml_property`` - * ``recwarn`` - * ``tmpdir_factory`` - * ``tmpdir`` - - * On xenial, the objects returned by the ``tmpdir`` fixture cannot be - used where paths are required; they are rejected as invalid paths. - You must instead use their ``.strpath`` attribute. - - * For example, instead of - ``util.write_file(tmpdir.join("some_file"), ...)``, you should - write ``util.write_file(tmpdir.join("some_file").strpath, ...)``. - - * The `pytest.param`_ function cannot be used. It was introduced in - pytest 3.1, which means it is not available on xenial. The more - limited mechanism it replaced was removed in pytest 4.0, so is not - available in focal or later. The only available alternatives are - to write mark-requiring test instances as completely separate - tests, without utilising parameterisation, or to apply the mark to - the entire parameterized test (and therefore every test instance). - -* Variables/parameter names for ``Mock`` or ``MagicMock`` instances - should start with ``m_`` to clearly distinguish them from non-mock - variables - - * For example, ``m_readurl`` (which would be a mock for ``readurl``) - -* The ``assert_*`` methods that are available on ``Mock`` and - ``MagicMock`` objects should be avoided, as typos in these method - names may not raise ``AttributeError`` (and so can cause tests to - silently pass). An important exception: if a ``Mock`` is - `autospecced`_ then misspelled assertion methods *will* raise an - ``AttributeError``, so these assertion methods may be used on - autospecced ``Mock`` objects. - - For non-autospecced ``Mock`` s, these substitutions can be used - (``m`` is assumed to be a ``Mock``): - - * ``m.assert_any_call(*args, **kwargs)`` => ``assert - mock.call(*args, **kwargs) in m.call_args_list`` - * ``m.assert_called()`` => ``assert 0 != m.call_count`` - * ``m.assert_called_once()`` => ``assert 1 == m.call_count`` - * ``m.assert_called_once_with(*args, **kwargs)`` => ``assert - [mock.call(*args, **kwargs)] == m.call_args_list`` - * ``m.assert_called_with(*args, **kwargs)`` => ``assert - mock.call(*args, **kwargs) == m.call_args_list[-1]`` - * ``m.assert_has_calls(call_list, any_order=True)`` => ``for call in - call_list: assert call in m.call_args_list`` - - * ``m.assert_has_calls(...)`` and ``m.assert_has_calls(..., - any_order=False)`` are not easily replicated in a single - statement, so their use when appropriate is acceptable. - - * ``m.assert_not_called()`` => ``assert 0 == m.call_count`` - -* Test arguments should be ordered as follows: - - * ``mock.patch`` arguments. When used as a decorator, ``mock.patch`` - partially applies its generated ``Mock`` object as the first - argument, so these arguments must go first. - * ``pytest.mark.parametrize`` arguments, in the order specified to - the ``parametrize`` decorator. These arguments are also provided - by a decorator, so it's natural that they sit next to the - ``mock.patch`` arguments. - * Fixture arguments, alphabetically. These are not provided by a - decorator, so they are last, and their order has no defined - meaning, so we default to alphabetical. - -* It follows from this ordering of test arguments (so that we retain - the property that arguments left-to-right correspond to decorators - bottom-to-top) that test decorators should be ordered as follows: - - * ``pytest.mark.parametrize`` - * ``mock.patch`` - -* When there are multiple patch calls in a test file for the module it - is testing, it may be desirable to capture the shared string prefix - for these patch calls in a module-level variable. If used, such - variables should be named ``M_PATH`` or, for datasource tests, - ``DS_PATH``. - -.. _pytest: https://docs.pytest.org/ -.. _pytest fixtures: https://docs.pytest.org/en/latest/fixture.html -.. _TestGetPackageMirrorInfo: https://github.com/canonical/cloud-init/blob/42f69f410ab8850c02b1f53dd67c132aa8ef64f5/cloudinit/distros/tests/test_init.py\#L15 -.. _TestPrependBaseCommands: https://github.com/canonical/cloud-init/blob/master/cloudinit/tests/test_subp.py#L9 -.. _assertion introspection: https://docs.pytest.org/en/latest/assert.html -.. _pytest 3.0: https://docs.pytest.org/en/latest/changelog.html#id1093 -.. _pytest.param: https://docs.pytest.org/en/latest/reference.html#pytest-param -.. _autospecced: https://docs.python.org/3.8/library/unittest.mock.html#autospeccing +Tests +----- + +Submissions to cloud-init must include testing. See :ref:`testing` for +details on these requirements. Type Annotations ---------------- @@ -344,13 +206,6 @@ variable annotations specified in `PEP-526`_ were introduced in Python .. _PEP-484: https://www.python.org/dev/peps/pep-0484/ .. _PEP-526: https://www.python.org/dev/peps/pep-0526/ -.. [#fixture-list] This list of fixtures (with markup) can be - reproduced by running:: - - py.test-3 --fixtures -q | grep "^[^ -]" | grep -v '\(no\|capturelog\)' | sort | sed 's/.*/* ``\0``/' - - in a xenial lxd container with python3-pytest-catchlog installed. - Feature Flags ------------- diff --git a/cloud-tests-requirements.txt b/cloud-tests-requirements.txt index b4cd18d5..eecab63e 100644 --- a/cloud-tests-requirements.txt +++ b/cloud-tests-requirements.txt @@ -10,7 +10,7 @@ boto3==1.14.53 # ssh communication paramiko==2.7.2 -cryptography==3.1 +cryptography==3.2 # lxd backend pylxd==2.2.11 diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py index 80d217ca..0668ffa3 100755 --- a/cloudinit/cmd/devel/net_convert.py +++ b/cloudinit/cmd/devel/net_convert.py @@ -28,11 +28,13 @@ def get_parser(parser=None): if not parser: parser = argparse.ArgumentParser(prog=NAME, description=__doc__) parser.add_argument("-p", "--network-data", type=open, - metavar="PATH", required=True) + metavar="PATH", required=True, + help="The network configuration to read") parser.add_argument("-k", "--kind", choices=['eni', 'network_data.json', 'yaml', 'azure-imds', 'vmware-imc'], - required=True) + required=True, + help="The format of the given network config") parser.add_argument("-d", "--directory", metavar="PATH", help="directory to place output in", @@ -50,7 +52,8 @@ def get_parser(parser=None): help='enable debug logging to stderr.') parser.add_argument("-O", "--output-kind", choices=['eni', 'netplan', 'sysconfig'], - required=True) + required=True, + help="The network config format to emit") return parser diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index a5446da7..baf1381f 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -1,4 +1,3 @@ -#!/usr/bin/python # # Copyright (C) 2012 Canonical Ltd. # Copyright (C) 2012 Hewlett-Packard Development Company, L.P. diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 73d8719f..bb8a1278 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -389,7 +389,7 @@ PRIMARY_ARCH_MIRRORS = {"PRIMARY": "http://archive.ubuntu.com/ubuntu/", PORTS_MIRRORS = {"PRIMARY": "http://ports.ubuntu.com/ubuntu-ports", "SECURITY": "http://ports.ubuntu.com/ubuntu-ports"} PRIMARY_ARCHES = ['amd64', 'i386'] -PORTS_ARCHES = ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el'] +PORTS_ARCHES = ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el', 'riscv64'] def get_default_mirrors(arch=None, target=None): diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py index 3c453d91..bd7bead9 100644 --- a/cloudinit/config/cc_ca_certs.py +++ b/cloudinit/config/cc_ca_certs.py @@ -25,7 +25,7 @@ can be removed from the system with the configuration option **Module frequency:** per instance -**Supported distros:** alpine, debian, ubuntu +**Supported distros:** alpine, debian, ubuntu, rhel **Config keys**:: @@ -44,60 +44,104 @@ import os from cloudinit import subp from cloudinit import util -CA_CERT_PATH = "/usr/share/ca-certificates/" -CA_CERT_FILENAME = "cloud-init-ca-certs.crt" -CA_CERT_CONFIG = "/etc/ca-certificates.conf" -CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/" -CA_CERT_FULL_PATH = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME) +DEFAULT_CONFIG = { + 'ca_cert_path': '/usr/share/ca-certificates/', + 'ca_cert_filename': 'cloud-init-ca-certs.crt', + 'ca_cert_config': '/etc/ca-certificates.conf', + 'ca_cert_system_path': '/etc/ssl/certs/', + 'ca_cert_update_cmd': ['update-ca-certificates'] +} +DISTRO_OVERRIDES = { + 'rhel': { + 'ca_cert_path': '/usr/share/pki/ca-trust-source/', + 'ca_cert_filename': 'anchors/cloud-init-ca-certs.crt', + 'ca_cert_config': None, + 'ca_cert_system_path': '/etc/pki/ca-trust/', + 'ca_cert_update_cmd': ['update-ca-trust'] + } +} -distros = ['alpine', 'debian', 'ubuntu'] +distros = ['alpine', 'debian', 'ubuntu', 'rhel'] -def update_ca_certs(): + +def _distro_ca_certs_configs(distro_name): + """Return a distro-specific ca_certs config dictionary + + @param distro_name: String providing the distro class name. + @returns: Dict of distro configurations for ca-cert. + """ + cfg = DISTRO_OVERRIDES.get(distro_name, DEFAULT_CONFIG) + cfg['ca_cert_full_path'] = os.path.join(cfg['ca_cert_path'], + cfg['ca_cert_filename']) + return cfg + + +def update_ca_certs(distro_cfg): """ Updates the CA certificate cache on the current machine. + + @param distro_cfg: A hash providing _distro_ca_certs_configs function. """ - subp.subp(["update-ca-certificates"], capture=False) + subp.subp(distro_cfg['ca_cert_update_cmd'], capture=False) -def add_ca_certs(certs): +def add_ca_certs(distro_cfg, certs): """ Adds certificates to the system. To actually apply the new certificates you must also call L{update_ca_certs}. + @param distro_cfg: A hash providing _distro_ca_certs_configs function. @param certs: A list of certificate strings. """ - if certs: - # First ensure they are strings... - cert_file_contents = "\n".join([str(c) for c in certs]) - util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0o644) - - if os.stat(CA_CERT_CONFIG).st_size == 0: - # If the CA_CERT_CONFIG file is empty (i.e. all existing - # CA certs have been deleted) then simply output a single - # line with the cloud-init cert filename. - out = "%s\n" % CA_CERT_FILENAME - else: - # Append cert filename to CA_CERT_CONFIG file. - # We have to strip the content because blank lines in the file - # causes subsequent entries to be ignored. (LP: #1077020) - orig = util.load_file(CA_CERT_CONFIG) - cur_cont = '\n'.join([line for line in orig.splitlines() - if line != CA_CERT_FILENAME]) - out = "%s\n%s\n" % (cur_cont.rstrip(), CA_CERT_FILENAME) - util.write_file(CA_CERT_CONFIG, out, omode="wb") - - -def remove_default_ca_certs(distro_name): + if not certs: + return + # First ensure they are strings... + cert_file_contents = "\n".join([str(c) for c in certs]) + util.write_file(distro_cfg['ca_cert_full_path'], + cert_file_contents, + mode=0o644) + update_cert_config(distro_cfg) + + +def update_cert_config(distro_cfg): + """ + Update Certificate config file to add the file path managed cloud-init + + @param distro_cfg: A hash providing _distro_ca_certs_configs function. + """ + if distro_cfg['ca_cert_config'] is None: + return + if os.stat(distro_cfg['ca_cert_config']).st_size == 0: + # If the CA_CERT_CONFIG file is empty (i.e. all existing + # CA certs have been deleted) then simply output a single + # line with the cloud-init cert filename. + out = "%s\n" % distro_cfg['ca_cert_filename'] + else: + # Append cert filename to CA_CERT_CONFIG file. + # We have to strip the content because blank lines in the file + # causes subsequent entries to be ignored. (LP: #1077020) + orig = util.load_file(distro_cfg['ca_cert_config']) + cr_cont = '\n'.join([line for line in orig.splitlines() + if line != distro_cfg['ca_cert_filename']]) + out = "%s\n%s\n" % (cr_cont.rstrip(), + distro_cfg['ca_cert_filename']) + util.write_file(distro_cfg['ca_cert_config'], out, omode="wb") + + +def remove_default_ca_certs(distro_name, distro_cfg): """ Removes all default trusted CA certificates from the system. To actually apply the change you must also call L{update_ca_certs}. + + @param distro_name: String providing the distro class name. + @param distro_cfg: A hash providing _distro_ca_certs_configs function. """ - util.delete_dir_contents(CA_CERT_PATH) - util.delete_dir_contents(CA_CERT_SYSTEM_PATH) - util.write_file(CA_CERT_CONFIG, "", mode=0o644) + util.delete_dir_contents(distro_cfg['ca_cert_path']) + util.delete_dir_contents(distro_cfg['ca_cert_system_path']) + util.write_file(distro_cfg['ca_cert_config'], "", mode=0o644) - if distro_name != 'alpine': + if distro_name in ['debian', 'ubuntu']: debconf_sel = ( "ca-certificates ca-certificates/trust_new_crts " + "select no") subp.subp(('debconf-set-selections', '-'), debconf_sel) @@ -120,22 +164,23 @@ def handle(name, cfg, cloud, log, _args): return ca_cert_cfg = cfg['ca-certs'] + distro_cfg = _distro_ca_certs_configs(cloud.distro.name) # If there is a remove-defaults option set to true, remove the system # default trusted CA certs first. if ca_cert_cfg.get("remove-defaults", False): log.debug("Removing default certificates") - remove_default_ca_certs(cloud.distro.name) + remove_default_ca_certs(cloud.distro.name, distro_cfg) # If we are given any new trusted CA certs to add, add them. if "trusted" in ca_cert_cfg: trusted_certs = util.get_cfg_option_list(ca_cert_cfg, "trusted") if trusted_certs: log.debug("Adding %d certificates" % len(trusted_certs)) - add_ca_certs(trusted_certs) + add_ca_certs(distro_cfg, trusted_certs) # Update the system with the new cert configuration. log.debug("Updating certificates") - update_ca_certs() + update_ca_certs(distro_cfg) # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py index 7beb11ca..466dad03 100644 --- a/cloudinit/config/cc_resolv_conf.py +++ b/cloudinit/config/cc_resolv_conf.py @@ -14,12 +14,12 @@ Resolv Conf This module is intended to manage resolv.conf in environments where early configuration of resolv.conf is necessary for further bootstrapping and/or where configuration management such as puppet or chef own dns configuration. -As Debian/Ubuntu will, by default, utilize resolvconf, and similarly RedHat +As Debian/Ubuntu will, by default, utilize resolvconf, and similarly Red Hat will use sysconfig, this module is likely to be of little use unless those are configured correctly. .. note:: - For RedHat with sysconfig, be sure to set PEERDNS=no for all DHCP + For Red Hat with sysconfig, be sure to set PEERDNS=no for all DHCP enabled NICs. .. note:: diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py index 28d62e9d..693317c2 100644 --- a/cloudinit/config/cc_rh_subscription.py +++ b/cloudinit/config/cc_rh_subscription.py @@ -5,15 +5,15 @@ # This file is part of cloud-init. See LICENSE file for license information. """ -RedHat Subscription -------------------- +Red Hat Subscription +-------------------- **Summary:** register red hat enterprise linux based system -Register a RedHat system either by username and password *or* activation and +Register a Red Hat system either by username and password *or* activation and org. Following a sucessful registration, you can auto-attach subscriptions, set the service level, add subscriptions based on pool id, enable/disable yum repositories based on repo id, and alter the rhsm_baseurl and server-hostname -in ``/etc/rhsm/rhs.conf``. For more details, see the ``Register RedHat +in ``/etc/rhsm/rhs.conf``. For more details, see the ``Register Red Hat Subscription`` example config. **Internal name:** ``cc_rh_subscription`` diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py index 4fb9b44e..911789c7 100644 --- a/cloudinit/config/cc_seed_random.py +++ b/cloudinit/config/cc_seed_random.py @@ -24,15 +24,19 @@ Configuration for this module is under the ``random_seed`` config key. The optionally be specified in encoded form, with the encoding specified in ``encoding``. +If the cloud provides its own random seed data, it will be appended to ``data`` +before it is written to ``file``. + .. note:: when using a multiline value for ``data`` or specifying binary data, be sure to follow yaml syntax and use the ``|`` and ``!binary`` yaml format specifiers when appropriate -Instead of specifying a data string, a command can be run to generate/collect -the data to be written. The command should be specified as a list of args in -the ``command`` key. If a command is specified that cannot be run, no error -will be reported unless ``command_required`` is set to true. +If the ``command`` key is specified, the given command will be executed. This +will happen after ``file`` has been populated. That command's environment will +contain the value of the ``file`` key as ``RANDOM_SEED_FILE``. If a command is +specified that cannot be run, no error will be reported unless +``command_required`` is set to true. For example, to use ``pollinate`` to gather data from a remote entropy server and write it to ``/dev/urandom``, the following could be diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py index 967be168..378a6daa 100644 --- a/cloudinit/distros/arch.py +++ b/cloudinit/distros/arch.py @@ -152,6 +152,8 @@ class Distro(distros.Distro): elif args and isinstance(args, list): cmd.extend(args) + if command == "upgrade": + command = "-u" if command: cmd.append(command) diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index 0074691b..a89e5ad2 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -387,6 +387,8 @@ class Renderer(renderer.Renderer): if k == 'network': if ':' in route[k]: route_line += ' -A inet6' + elif route.get('prefix') == 32: + route_line += ' -host' else: route_line += ' -net' if 'prefix' in route: diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index a930e612..99a4bae4 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -396,6 +396,13 @@ class Renderer(renderer.Renderer): # Only IPv6 is DHCP, IPv4 may be static iface_cfg['BOOTPROTO'] = 'dhcp6' iface_cfg['DHCLIENT6_MODE'] = 'managed' + # only if rhel AND dhcpv6 stateful + elif (flavor == 'rhel' and + subnet_type == 'ipv6_dhcpv6-stateful'): + iface_cfg['BOOTPROTO'] = 'dhcp' + iface_cfg['DHCPV6C'] = True + iface_cfg['IPV6INIT'] = True + iface_cfg['IPV6_AUTOCONF'] = False else: iface_cfg['IPV6INIT'] = True # Configure network settings using DHCPv6 diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 04ff2131..090dd66b 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -651,6 +651,10 @@ class DataSourceAzure(sources.DataSource): LOG.debug('Retrieving public SSH keys') ssh_keys = [] try: + raise KeyError( + "Not using public SSH keys from IMDS" + ) + # pylint:disable=unreachable ssh_keys = [ public_key['keyData'] for public_key @@ -983,6 +987,7 @@ class DataSourceAzure(sources.DataSource): if nl_sock: nl_sock.close() + @azure_ds_telemetry_reporter def _poll_imds(self): """Poll IMDS for the new provisioning data until we get a valid response. Then return the returned JSON object.""" @@ -1271,6 +1276,10 @@ class DataSourceAzure(sources.DataSource): pubkey_info = None try: + raise KeyError( + "Not using public SSH keys from IMDS" + ) + # pylint:disable=unreachable public_keys = self.metadata['imds']['compute']['publicKeys'] LOG.debug( 'Successfully retrieved %s key(s) from IMDS', @@ -1969,6 +1978,7 @@ def _generate_network_config_from_imds_metadata(imds_metadata) -> dict: netconfig = {'version': 2, 'ethernets': {}} network_metadata = imds_metadata['network'] for idx, intf in enumerate(network_metadata['interface']): + has_ip_address = False # First IPv4 and/or IPv6 address will be obtained via DHCP. # Any additional IPs of each type will be set as static # addresses. @@ -1978,6 +1988,11 @@ def _generate_network_config_from_imds_metadata(imds_metadata) -> dict: 'dhcp6': False} for addr_type in ('ipv4', 'ipv6'): addresses = intf.get(addr_type, {}).get('ipAddress', []) + # If there are no available IP addresses, then we don't + # want to add this interface to the generated config. + if not addresses: + continue + has_ip_address = True if addr_type == 'ipv4': default_prefix = '24' else: @@ -1998,7 +2013,7 @@ def _generate_network_config_from_imds_metadata(imds_metadata) -> dict: dev_config['addresses'].append( '{ip}/{prefix}'.format( ip=privateIp, prefix=netPrefix)) - if dev_config: + if dev_config and has_ip_address: mac = ':'.join(re.findall(r'..', intf['macAddress'])) dev_config.update({ 'match': {'macaddress': mac.lower()}, diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 741c140a..bbeada0b 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -16,6 +16,7 @@ from xml.dom import minidom from cloudinit import dmi from cloudinit import log as logging +from cloudinit import safeyaml from cloudinit import sources from cloudinit import subp from cloudinit import util @@ -47,6 +48,7 @@ LOG = logging.getLogger(__name__) CONFGROUPNAME_GUESTCUSTOMIZATION = "deployPkg" GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS = "enable-custom-scripts" +VMWARE_IMC_DIR = "/var/run/vmware-imc" class DataSourceOVF(sources.DataSource): @@ -99,9 +101,7 @@ class DataSourceOVF(sources.DataSource): if not self.vmware_customization_supported: LOG.debug("Skipping the check for " "VMware Customization support") - elif not util.get_cfg_option_bool( - self.sys_cfg, "disable_vmware_customization", True): - + else: search_paths = ( "/usr/lib/vmware-tools", "/usr/lib64/vmware-tools", "/usr/lib/open-vm-tools", "/usr/lib64/open-vm-tools") @@ -119,7 +119,9 @@ class DataSourceOVF(sources.DataSource): # When the VM is powered on, the "VMware Tools" daemon # copies the customization specification file to # /var/run/vmware-imc directory. cloud-init code needs - # to search for the file in that directory. + # to search for the file in that directory which indicates + # that required metadata and userdata files are now + # present. max_wait = get_max_wait_from_cfg(self.ds_cfg) vmwareImcConfigFilePath = util.log_time( logfunc=LOG.debug, @@ -129,26 +131,83 @@ class DataSourceOVF(sources.DataSource): else: LOG.debug("Did not find the customization plugin.") + md_path = None if vmwareImcConfigFilePath: + imcdirpath = os.path.dirname(vmwareImcConfigFilePath) + cf = ConfigFile(vmwareImcConfigFilePath) + self._vmware_cust_conf = Config(cf) LOG.debug("Found VMware Customization Config File at %s", vmwareImcConfigFilePath) - nicspath = wait_for_imc_cfg_file( - filename="nics.txt", maxwait=10, naplen=5) + try: + (md_path, ud_path, nicspath) = collect_imc_file_paths( + self._vmware_cust_conf) + except FileNotFoundError as e: + _raise_error_status( + "File(s) missing in directory", + e, + GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, + vmwareImcConfigFilePath, + self._vmware_cust_conf) else: LOG.debug("Did not find VMware Customization Config File") - else: - LOG.debug("Customization for VMware platform is disabled.") - if vmwareImcConfigFilePath: + # Honor disable_vmware_customization setting on metadata absent + if not md_path: + if util.get_cfg_option_bool(self.sys_cfg, + "disable_vmware_customization", + True): + LOG.debug( + "Customization for VMware platform is disabled.") + # reset vmwareImcConfigFilePath to None to avoid + # customization for VMware platform + vmwareImcConfigFilePath = None + + use_raw_data = bool(vmwareImcConfigFilePath and md_path) + if use_raw_data: + set_gc_status(self._vmware_cust_conf, "Started") + LOG.debug("Start to load cloud-init meta data and user data") + try: + (md, ud, cfg, network) = load_cloudinit_data(md_path, ud_path) + + if network: + self._network_config = network + else: + self._network_config = ( + self.distro.generate_fallback_config() + ) + + except safeyaml.YAMLError as e: + _raise_error_status( + "Error parsing the cloud-init meta data", + e, + GuestCustErrorEnum.GUESTCUST_ERROR_WRONG_META_FORMAT, + vmwareImcConfigFilePath, + self._vmware_cust_conf) + except Exception as e: + _raise_error_status( + "Error loading cloud-init configuration", + e, + GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, + vmwareImcConfigFilePath, + self._vmware_cust_conf) + + self._vmware_cust_found = True + found.append('vmware-tools') + + util.del_dir(imcdirpath) + set_customization_status( + GuestCustStateEnum.GUESTCUST_STATE_DONE, + GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS) + set_gc_status(self._vmware_cust_conf, "Successful") + + elif vmwareImcConfigFilePath: + # Load configuration from vmware_imc self._vmware_nics_to_enable = "" try: - cf = ConfigFile(vmwareImcConfigFilePath) - self._vmware_cust_conf = Config(cf) set_gc_status(self._vmware_cust_conf, "Started") (md, ud, cfg) = read_vmware_imc(self._vmware_cust_conf) self._vmware_nics_to_enable = get_nics_to_enable(nicspath) - imcdirpath = os.path.dirname(vmwareImcConfigFilePath) product_marker = self._vmware_cust_conf.marker_id hasmarkerfile = check_marker_exists( product_marker, os.path.join(self.paths.cloud_dir, 'data')) @@ -357,7 +416,7 @@ class DataSourceOVFNet(DataSourceOVF): def get_max_wait_from_cfg(cfg): - default_max_wait = 90 + default_max_wait = 15 max_wait_cfg_option = 'vmware_cust_file_max_wait' max_wait = default_max_wait @@ -684,4 +743,83 @@ def _raise_error_status(prefix, error, event, config_file, conf): util.del_dir(os.path.dirname(config_file)) raise error + +def load_cloudinit_data(md_path, ud_path): + """ + Load the cloud-init meta data, user data, cfg and network from the + given files + + @return: 4-tuple of configuration + metadata, userdata, cfg={}, network + + @raises: FileNotFoundError if md_path or ud_path are absent + """ + LOG.debug('load meta data from: %s: user data from: %s', + md_path, ud_path) + md = {} + ud = None + network = None + + md = safeload_yaml_or_dict(util.load_file(md_path)) + + if 'network' in md: + network = md['network'] + + if ud_path: + ud = util.load_file(ud_path).replace("\r", "") + return md, ud, {}, network + + +def safeload_yaml_or_dict(data): + ''' + The meta data could be JSON or YAML. Since YAML is a strict superset of + JSON, we will unmarshal the data as YAML. If data is None then a new + dictionary is returned. + ''' + if not data: + return {} + return safeyaml.load(data) + + +def collect_imc_file_paths(cust_conf): + ''' + collect all the other imc files. + + metadata is preferred to nics.txt configuration data. + + If metadata file exists because it is specified in customization + configuration, then metadata is required and userdata is optional. + + @return a 3-tuple containing desired configuration file paths if present + Expected returns: + 1. user provided metadata and userdata (md_path, ud_path, None) + 2. user provided metadata (md_path, None, None) + 3. user-provided network config (None, None, nics_path) + 4. No config found (None, None, None) + ''' + md_path = None + ud_path = None + nics_path = None + md_file = cust_conf.meta_data_name + if md_file: + md_path = os.path.join(VMWARE_IMC_DIR, md_file) + if not os.path.exists(md_path): + raise FileNotFoundError("meta data file is not found: %s" + % md_path) + + ud_file = cust_conf.user_data_name + if ud_file: + ud_path = os.path.join(VMWARE_IMC_DIR, ud_file) + if not os.path.exists(ud_path): + raise FileNotFoundError("user data file is not found: %s" + % ud_path) + else: + nics_path = os.path.join(VMWARE_IMC_DIR, "nics.txt") + if not os.path.exists(nics_path): + LOG.debug('%s does not exist.', nics_path) + nics_path = None + + return md_path, ud_path, nics_path + + # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py index 7109aef3..bdfab5a0 100644 --- a/cloudinit/sources/helpers/vmware/imc/config.py +++ b/cloudinit/sources/helpers/vmware/imc/config.py @@ -27,6 +27,8 @@ class Config(object): UTC = 'DATETIME|UTC' POST_GC_STATUS = 'MISC|POST-GC-STATUS' DEFAULT_RUN_POST_SCRIPT = 'MISC|DEFAULT-RUN-POST-CUST-SCRIPT' + CLOUDINIT_META_DATA = 'CLOUDINIT|METADATA' + CLOUDINIT_USER_DATA = 'CLOUDINIT|USERDATA' def __init__(self, configFile): self._configFile = configFile @@ -130,4 +132,14 @@ class Config(object): raise ValueError('defaultRunPostScript value should be yes/no') return defaultRunPostScript == 'yes' + @property + def meta_data_name(self): + """Return the name of cloud-init meta data.""" + return self._configFile.get(Config.CLOUDINIT_META_DATA, None) + + @property + def user_data_name(self): + """Return the name of cloud-init user data.""" + return self._configFile.get(Config.CLOUDINIT_USER_DATA, None) + # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py index 65ae7390..96d839b8 100644 --- a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py @@ -11,5 +11,6 @@ class GuestCustErrorEnum(object): GUESTCUST_ERROR_SUCCESS = 0 GUESTCUST_ERROR_SCRIPT_DISABLED = 6 + GUESTCUST_ERROR_WRONG_META_FORMAT = 9 # vi: ts=4 expandtab diff --git a/doc/man/cloud-init.1 b/doc/man/cloud-init.1 index 9b52dc8d..3fde4148 100644 --- a/doc/man/cloud-init.1 +++ b/doc/man/cloud-init.1 @@ -10,7 +10,7 @@ cloud-init \- Cloud instance initialization Cloud-init provides a mechanism for cloud instance initialization. This is done by identifying the cloud platform that is in use, reading provided cloud metadata and optional vendor and user -data, and then intializing the instance as requested. +data, and then initializing the instance as requested. Generally, this command is not normally meant to be run directly by the user. However, some subcommands may useful for development or diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst index ddcb0b31..10e8228f 100644 --- a/doc/rtd/index.rst +++ b/doc/rtd/index.rst @@ -75,6 +75,7 @@ Having trouble? We would like to help! topics/dir_layout.rst topics/analyze.rst topics/docs.rst + topics/testing.rst topics/integration_tests.rst topics/cloud_tests.rst diff --git a/doc/rtd/topics/datasources/aliyun.rst b/doc/rtd/topics/datasources/aliyun.rst index 3f4f40ca..587bd4f4 100644 --- a/doc/rtd/topics/datasources/aliyun.rst +++ b/doc/rtd/topics/datasources/aliyun.rst @@ -12,6 +12,21 @@ The Alibaba Cloud metadata service is available at the well known url Alibaba Cloud ECS on `metadata <https://www.alibabacloud.com/help/zh/faq-detail/49122.htm>`__. +Configuration +------------- +The following configuration can be set for the datasource in system +configuration (in ``/etc/cloud/cloud.cfg`` or ``/etc/cloud/cloud.cfg.d/``). + +An example configuration with the default values is provided below: + +.. sourcecode:: yaml + + datasource: + AliYun: + metadata_urls: ["http://100.100.100.200"] + timeout: 50 + max_wait: 120 + Versions ^^^^^^^^ Like the EC2 metadata service, Alibaba Cloud's metadata service provides diff --git a/doc/rtd/topics/datasources/cloudstack.rst b/doc/rtd/topics/datasources/cloudstack.rst index a24de34f..325aeeaf 100644 --- a/doc/rtd/topics/datasources/cloudstack.rst +++ b/doc/rtd/topics/datasources/cloudstack.rst @@ -46,8 +46,6 @@ An example configuration with the default values is provided below: CloudStack: max_wait: 120 timeout: 50 - datasource_list: - - CloudStack .. _Apache CloudStack: http://cloudstack.apache.org/ diff --git a/doc/rtd/topics/datasources/ovf.rst b/doc/rtd/topics/datasources/ovf.rst index 6256e624..85b0c377 100644 --- a/doc/rtd/topics/datasources/ovf.rst +++ b/doc/rtd/topics/datasources/ovf.rst @@ -13,6 +13,15 @@ source code tree in doc/sources/ovf Configuration ------------- +The following configuration can be set for the datasource in system +configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`). + +The settings that may be configured are: + + * vmware_cust_file_max_wait: the maximum amount of clock time in seconds that + should be spent waiting for vmware customization files. (default: 15) + + On VMware platforms, VMTools use is required for OVF datasource configuration settings as well as vCloud and vSphere admin configuration. User could change the VMTools configuration options with command:: diff --git a/doc/rtd/topics/debugging.rst b/doc/rtd/topics/debugging.rst index 0d416f32..fb3006fe 100644 --- a/doc/rtd/topics/debugging.rst +++ b/doc/rtd/topics/debugging.rst @@ -1,6 +1,6 @@ -******************************** -Testing and debugging cloud-init -******************************** +******************** +Debugging cloud-init +******************** Overview ======== diff --git a/doc/rtd/topics/examples.rst b/doc/rtd/topics/examples.rst index 81860f85..97fd616d 100644 --- a/doc/rtd/topics/examples.rst +++ b/doc/rtd/topics/examples.rst @@ -149,8 +149,8 @@ Disk setup :language: yaml :linenos: -Register RedHat Subscription -============================ +Register Red Hat Subscription +============================= .. literalinclude:: ../../examples/cloud-config-rh_subscription.txt :language: yaml diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst index d03e4caf..fa8aa925 100644 --- a/doc/rtd/topics/format.rst +++ b/doc/rtd/topics/format.rst @@ -23,9 +23,11 @@ Using a mime-multi part file, the user can specify more than one type of data. For example, both a user data script and a cloud-config type could be specified. -Supported content-types are listed from the cloud-init subcommand make-mime:: +Supported content-types are listed from the cloud-init subcommand make-mime: - % cloud-init devel make-mime --list-types +.. code-block:: shell-session + + $ cloud-init devel make-mime --list-types cloud-boothook cloud-config cloud-config-archive @@ -47,9 +49,11 @@ The cloud-init subcommand can generate MIME multi-part files: `make-mime`_. separated by a colon (e.g. ``config.yaml:cloud-config``) and emits a MIME multipart message to stdout. An example invocation, assuming you have your cloud config in ``config.yaml`` and a shell script in ``script.sh`` and want -to store the multipart message in ``user-data``:: +to store the multipart message in ``user-data``: + +.. code-block:: shell-session - % cloud-init devel make-mime -a config.yaml:cloud-config -a script.sh:x-shellscript > user-data + $ cloud-init devel make-mime -a config.yaml:cloud-config -a script.sh:x-shellscript > user-data .. _make-mime: https://github.com/canonical/cloud-init/blob/master/cloudinit/cmd/devel/make_mime.py @@ -70,7 +74,7 @@ archive. Example ------- -:: +.. code-block:: shell-session $ cat myscript.sh diff --git a/doc/rtd/topics/integration_tests.rst b/doc/rtd/topics/integration_tests.rst index aeda326c..6c124ad9 100644 --- a/doc/rtd/topics/integration_tests.rst +++ b/doc/rtd/topics/integration_tests.rst @@ -9,11 +9,41 @@ Overview Integration tests are written using pytest and are located at ``tests/integration_tests``. General design principles -laid out in :ref:`unit_testing` should be followed for integration tests. +laid out in :ref:`testing` should be followed for integration tests. Setup is accomplished via a set of fixtures located in ``tests/integration_tests/conftest.py``. +Image Selection +=============== + +Each integration testing run uses a single image as its basis. This +image is configured using the ``OS_IMAGE`` variable; see +:ref:`Configuration` for details of how configuration works. + +``OS_IMAGE`` can take two types of value: an Ubuntu series name (e.g. +"focal"), or an image specification. If an Ubuntu series name is +given, then the most recent image for that series on the target cloud +will be used. For other use cases, an image specification is used. + +In its simplest form, an image specification can simply be a cloud's +image ID (e.g. "ami-deadbeef", "ubuntu:focal"). In this case, the +image so-identified will be used as the basis for this testing run. + +This has a drawback, however: as we do not know what OS or release is +within the image, the integration testing framework will run *all* +tests against the image in question. If it's a RHEL8 image, then we +would expect Ubuntu-specific tests to fail (and vice versa). + +To address this, a full image specification can be given. This is of +the form: ``<image_id>[::<os>[::<release]]`` where ``image_id`` is a +cloud's image ID, ``os`` is the OS name, and ``release`` is the OS +release name. So, for example, Ubuntu 18.04 (Bionic Beaver) on LXD is +``ubuntu:bionic::ubuntu::bionic`` or RHEL 8 on Amazon is +``ami-justanexample::rhel::8``. When a full specification is given, +only tests which are intended for use on that OS and release will be +executed. + Image Setup =========== diff --git a/doc/rtd/topics/network-config-format-v1.rst b/doc/rtd/topics/network-config-format-v1.rst index 92e81897..17732c2a 100644 --- a/doc/rtd/topics/network-config-format-v1.rst +++ b/doc/rtd/topics/network-config-format-v1.rst @@ -414,9 +414,19 @@ Subnet types are one of the following: - ``dhcp6``: Configure this interface with IPv6 dhcp. - ``static``: Configure this interface with a static IPv4. - ``static6``: Configure this interface with a static IPv6 . +- ``ipv6_dhcpv6-stateful``: Configure this interface with ``dhcp6`` +- ``ipv6_dhcpv6-stateless``: Configure this interface with SLAAC and DHCP +- ``ipv6_slaac``: Configure address with SLAAC -When making use of ``dhcp`` types, no additional configuration is needed in -the subnet dictionary. +When making use of ``dhcp`` or either of the ``ipv6_dhcpv6`` types, +no additional configuration is needed in the subnet dictionary. + +Using ``ipv6_dhcpv6-stateless`` or ``ipv6_slaac`` allows the IPv6 address to be +automatically configured with StateLess Address AutoConfiguration (`SLAAC`_). +SLAAC requires support from the network, so verify that your cloud or network +offering has support before trying it out. With ``ipv6_dhcpv6-stateless``, +DHCPv6 is still used to fetch other subnet details such as gateway or DNS +servers. If you only want to discover the address, use ``ipv6_slaac``. **Subnet DHCP Example**:: @@ -603,4 +613,6 @@ Some more examples to explore the various options available. - dellstack type: nameserver +.. _SLAAC: https://tools.ietf.org/html/rfc4862 + .. vi: textwidth=78 diff --git a/doc/rtd/topics/network-config-format-v2.rst b/doc/rtd/topics/network-config-format-v2.rst index aa17bef5..af65a4ce 100644 --- a/doc/rtd/topics/network-config-format-v2.rst +++ b/doc/rtd/topics/network-config-format-v2.rst @@ -8,9 +8,25 @@ version 2 format defined for the `netplan`_ tool. Cloud-init supports both reading and writing of Version 2; the latter support requires a distro with `netplan`_ present. +Netplan Passthrough +------------------- + +On a system with netplan present, cloud-init will pass Version 2 configuration +through to netplan without modification. On such systems, you do not need to +limit yourself to the below subset of netplan's configuration format. + +.. warning:: + If you are writing or generating network configuration that may be used on + non-netplan systems, you **must** limit yourself to the subset described in + this document, or you will see network configuration failures on + non-netplan systems. + +Version 2 Configuration Format +------------------------------ + The ``network`` key has at least two required elements. First it must include ``version: 2`` and one or more of possible device -``types``.. +``types``. Cloud-init will read this format from system config. For example the following could be present in @@ -34,9 +50,6 @@ Each type block contains device definitions as a map where the keys (called "configuration IDs"). Each entry under the ``types`` may include IP and/or device configuration. -Cloud-init does not current support ``wifis`` type that is present in native -`netplan`_. - Device configuration IDs ------------------------ @@ -478,6 +491,11 @@ This is a complex example which shows most available features: :: nameservers: search: [foo.local, bar.local] addresses: [8.8.8.8] + # static routes + routes: + - to: 192.0.2.0/24 + via: 11.0.0.1 + metric: 3 lom: match: driver: ixgbe @@ -506,11 +524,6 @@ This is a complex example which shows most available features: :: id: 1 link: id0 dhcp4: yes - # static routes - routes: - - to: 0.0.0.0/0 - via: 11.0.0.1 - metric: 3 -.. _netplan: https://launchpad.net/netplan +.. _netplan: https://netplan.io .. vi: textwidth=78 diff --git a/doc/rtd/topics/testing.rst b/doc/rtd/topics/testing.rst new file mode 100644 index 00000000..5b702bd2 --- /dev/null +++ b/doc/rtd/topics/testing.rst @@ -0,0 +1,173 @@ +******* +Testing +******* + +cloud-init has both unit tests and integration tests. Unit tests can +be found in-tree alongside the source code, as well as +at ``tests/unittests``. Integration tests can be found at +``tests/integration_tests``. Documentation specifically for integration +tests can be found on the :ref:`integration_tests` page, but +the guidelines specified below apply to both types of tests. + +cloud-init uses `pytest`_ to run its tests, and has tests written both +as ``unittest.TestCase`` sub-classes and as un-subclassed pytest tests. + +Guidelines +========== + +The following guidelines should be followed. + +Test Layout +----------- + +* For ease of organisation and greater accessibility for developers not + familiar with pytest, all cloud-init unit tests must be contained + within test classes + + * Put another way, module-level test functions should not be used + +* As all tests are contained within classes, it is acceptable to mix + ``TestCase`` test classes and pytest test classes within the same + test file + + * These can be easily distinguished by their definition: pytest + classes will not use inheritance at all (e.g. + `TestGetPackageMirrorInfo`_), whereas ``TestCase`` classes will + subclass (indirectly) from ``TestCase`` (e.g. + `TestPrependBaseCommands`_) + +``pytest`` Tests +---------------- + +* pytest test classes should use `pytest fixtures`_ to share + functionality instead of inheritance + +* pytest tests should use bare ``assert`` statements, to take advantage + of pytest's `assertion introspection`_ + + * For ``==`` and other commutative assertions, the expected value + should be placed before the value under test: + ``assert expected_value == function_under_test()`` + + +``pytest`` Version Gotchas +-------------------------- + +As we still support Ubuntu 16.04 (Xenial Xerus), we can only use pytest +features that are available in v2.8.7. This is an inexhaustive list of +ways in which this may catch you out: + +* Support for using ``yield`` in ``pytest.fixture`` functions was only + introduced in `pytest 3.0`_. Such functions must instead use the + ``pytest.yield_fixture`` decorator. + +* Only the following built-in fixtures are available [#fixture-list]_: + + * ``cache`` + * ``capfd`` + * ``caplog`` (provided by ``python3-pytest-catchlog`` on xenial) + * ``capsys`` + * ``monkeypatch`` + * ``pytestconfig`` + * ``record_xml_property`` + * ``recwarn`` + * ``tmpdir_factory`` + * ``tmpdir`` + +* On xenial, the objects returned by the ``tmpdir`` fixture cannot be + used where paths are required; they are rejected as invalid paths. + You must instead use their ``.strpath`` attribute. + + * For example, instead of ``util.write_file(tmpdir.join("some_file"), + ...)``, you should write + ``util.write_file(tmpdir.join("some_file").strpath, ...)``. + +* The `pytest.param`_ function cannot be used. It was introduced in + pytest 3.1, which means it is not available on xenial. The more + limited mechanism it replaced was removed in pytest 4.0, so is not + available in focal or later. The only available alternatives are to + write mark-requiring test instances as completely separate tests, + without utilising parameterisation, or to apply the mark to the + entire parameterized test (and therefore every test instance). + +Mocking and Assertions +---------------------- + +* Variables/parameter names for ``Mock`` or ``MagicMock`` instances + should start with ``m_`` to clearly distinguish them from non-mock + variables + + * For example, ``m_readurl`` (which would be a mock for ``readurl``) + +* The ``assert_*`` methods that are available on ``Mock`` and + ``MagicMock`` objects should be avoided, as typos in these method + names may not raise ``AttributeError`` (and so can cause tests to + silently pass). An important exception: if a ``Mock`` is + `autospecced`_ then misspelled assertion methods *will* raise an + ``AttributeError``, so these assertion methods may be used on + autospecced ``Mock`` objects. + + For non-autospecced ``Mock`` s, these substitutions can be used + (``m`` is assumed to be a ``Mock``): + + * ``m.assert_any_call(*args, **kwargs)`` => ``assert + mock.call(*args, **kwargs) in m.call_args_list`` + * ``m.assert_called()`` => ``assert 0 != m.call_count`` + * ``m.assert_called_once()`` => ``assert 1 == m.call_count`` + * ``m.assert_called_once_with(*args, **kwargs)`` => ``assert + [mock.call(*args, **kwargs)] == m.call_args_list`` + * ``m.assert_called_with(*args, **kwargs)`` => ``assert + mock.call(*args, **kwargs) == m.call_args_list[-1]`` + * ``m.assert_has_calls(call_list, any_order=True)`` => ``for call in + call_list: assert call in m.call_args_list`` + + * ``m.assert_has_calls(...)`` and ``m.assert_has_calls(..., + any_order=False)`` are not easily replicated in a single + statement, so their use when appropriate is acceptable. + + * ``m.assert_not_called()`` => ``assert 0 == m.call_count`` + +* When there are multiple patch calls in a test file for the module it + is testing, it may be desirable to capture the shared string prefix + for these patch calls in a module-level variable. If used, such + variables should be named ``M_PATH`` or, for datasource tests, + ``DS_PATH``. + +Test Argument Ordering +---------------------- + +* Test arguments should be ordered as follows: + + * ``mock.patch`` arguments. When used as a decorator, ``mock.patch`` + partially applies its generated ``Mock`` object as the first + argument, so these arguments must go first. + * ``pytest.mark.parametrize`` arguments, in the order specified to + the ``parametrize`` decorator. These arguments are also provided + by a decorator, so it's natural that they sit next to the + ``mock.patch`` arguments. + * Fixture arguments, alphabetically. These are not provided by a + decorator, so they are last, and their order has no defined + meaning, so we default to alphabetical. + +* It follows from this ordering of test arguments (so that we retain + the property that arguments left-to-right correspond to decorators + bottom-to-top) that test decorators should be ordered as follows: + + * ``pytest.mark.parametrize`` + * ``mock.patch`` + +.. [#fixture-list] This list of fixtures (with markup) can be + reproduced by running:: + + py.test-3 --fixtures -q | grep "^[^ -]" | grep -v '\(no\|capturelog\)' | sort | sed 's/.*/* ``\0``/' + + in a xenial lxd container with python3-pytest-catchlog installed. + +.. _pytest: https://docs.pytest.org/ +.. _pytest fixtures: https://docs.pytest.org/en/latest/fixture.html +.. _TestGetPackageMirrorInfo: https://github.com/canonical/cloud-init/blob/42f69f410ab8850c02b1f53dd67c132aa8ef64f5/cloudinit/distros/tests/test_init.py\#L15 +.. _TestPrependBaseCommands: https://github.com/canonical/cloud-init/blob/master/cloudinit/tests/test_subp.py#L9 +.. _assertion introspection: https://docs.pytest.org/en/latest/assert.html +.. _pytest 3.0: https://docs.pytest.org/en/latest/changelog.html#id1093 +.. _pytest.param: https://docs.pytest.org/en/latest/reference.html#pytest-param +.. _autospecced: https://docs.python.org/3.8/library/unittest.mock.html#autospeccing diff --git a/integration-requirements.txt b/integration-requirements.txt index 3648a0f1..c959001e 100644 --- a/integration-requirements.txt +++ b/integration-requirements.txt @@ -1,5 +1,5 @@ # PyPI requirements for cloud-init integration testing # https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html # -pycloudlib @ git+https://github.com/canonical/pycloudlib.git@4b8d2cd5ac6316810ce16d081842da575625ca4f +pycloudlib @ git+https://github.com/canonical/pycloudlib.git@878981e3c7caaf583a8c7c5494dba9d9447acee8 pytest diff --git a/tests/cloud_tests/testcases/examples/TODO.md b/tests/cloud_tests/testcases/examples/TODO.md index 8db0e98e..cde699a7 100644 --- a/tests/cloud_tests/testcases/examples/TODO.md +++ b/tests/cloud_tests/testcases/examples/TODO.md @@ -6,7 +6,7 @@ Below lists each of the issing examples and why it is not currently added. - Puppet (takes > 60 seconds to run) - Manage resolve.conf (lxd backend overrides changes) - Adding a yum repository (need centos system) - - Register RedHat Subscription (need centos system + subscription) + - Register Red Hat Subscription (need centos system + subscription) - Adjust mount points mounted (need multiple disks) - Call a url when finished (need end point) - Reboot/poweroff when finished (how to test) diff --git a/tests/integration_tests/bugs/test_gh570.py b/tests/integration_tests/bugs/test_gh570.py new file mode 100644 index 00000000..534cfb9a --- /dev/null +++ b/tests/integration_tests/bugs/test_gh570.py @@ -0,0 +1,39 @@ +"""Integration test for #570. + +Test that we can add optional vendor-data to the seedfrom file in a +NoCloud environment +""" + +from tests.integration_tests.instances import IntegrationInstance +import pytest + +VENDOR_DATA = """\ +#cloud-config +runcmd: + - touch /var/tmp/seeded_vendordata_test_file +""" + + +# Only running on LXD because we need NoCloud for this test +@pytest.mark.sru_2020_11 +@pytest.mark.lxd_container +@pytest.mark.lxd_vm +def test_nocloud_seedfrom_vendordata(client: IntegrationInstance): + seed_dir = '/var/tmp/test_seed_dir' + result = client.execute( + "mkdir {seed_dir} && " + "touch {seed_dir}/user-data && " + "touch {seed_dir}/meta-data && " + "echo 'seedfrom: {seed_dir}/' > " + "/var/lib/cloud/seed/nocloud-net/meta-data".format(seed_dir=seed_dir) + ) + assert result.return_code == 0 + + client.write_to_file( + '{}/vendor-data'.format(seed_dir), + VENDOR_DATA, + ) + client.execute('cloud-init clean --logs') + client.restart() + assert client.execute('cloud-init status').ok + assert 'seeded_vendordata_test_file' in client.execute('ls /var/tmp') diff --git a/tests/integration_tests/bugs/test_gh626.py b/tests/integration_tests/bugs/test_gh626.py new file mode 100644 index 00000000..2d336462 --- /dev/null +++ b/tests/integration_tests/bugs/test_gh626.py @@ -0,0 +1,42 @@ +"""Integration test for gh-626. + +Ensure if wakeonlan is specified in the network config that it is rendered +in the /etc/network/interfaces or netplan config. +""" + +import pytest +import yaml + +from tests.integration_tests.clouds import ImageSpecification +from tests.integration_tests.instances import IntegrationInstance + + +NETWORK_CONFIG = """\ +version: 2 +ethernets: + eth0: + dhcp4: true + wakeonlan: true +""" + +EXPECTED_ENI_END = """\ +iface eth0 inet dhcp + ethernet-wol g""" + + +@pytest.mark.sru_2020_11 +@pytest.mark.lxd_container +@pytest.mark.lxd_vm +@pytest.mark.lxd_config_dict({ + 'user.network-config': NETWORK_CONFIG +}) +def test_wakeonlan(client: IntegrationInstance): + if ImageSpecification.from_os_image().release == 'xenial': + eni = client.execute('cat /etc/network/interfaces.d/50-cloud-init.cfg') + assert eni.endswith(EXPECTED_ENI_END) + return + + netplan_cfg = client.execute('cat /etc/netplan/50-cloud-init.yaml') + netplan_yaml = yaml.safe_load(netplan_cfg) + assert 'wakeonlan' in netplan_yaml['network']['ethernets']['eth0'] + assert netplan_yaml['network']['ethernets']['eth0']['wakeonlan'] is True diff --git a/tests/integration_tests/bugs/test_gh632.py b/tests/integration_tests/bugs/test_gh632.py new file mode 100644 index 00000000..3c1f9347 --- /dev/null +++ b/tests/integration_tests/bugs/test_gh632.py @@ -0,0 +1,33 @@ +"""Integration test for gh-632. + +Verify that if cloud-init is using DataSourceRbxCloud, there is +no traceback if the metadata disk cannot be found. +""" + +import pytest + +from tests.integration_tests.instances import IntegrationInstance + + +# With some datasource hacking, we can run this on a NoCloud instance +@pytest.mark.lxd_container +@pytest.mark.lxd_vm +@pytest.mark.sru_2020_11 +def test_datasource_rbx_no_stacktrace(client: IntegrationInstance): + client.write_to_file( + '/etc/cloud/cloud.cfg.d/90_dpkg.cfg', + 'datasource_list: [ RbxCloud, NoCloud ]\n', + ) + client.write_to_file( + '/etc/cloud/ds-identify.cfg', + 'policy: enabled\n', + ) + client.execute('cloud-init clean --logs') + client.restart() + + log = client.read_from_file('/var/log/cloud-init.log') + assert 'WARNING' not in log + assert 'Traceback' not in log + assert 'Failed to load metadata and userdata' not in log + assert ("Getting data from <class 'cloudinit.sources.DataSourceRbxCloud." + "DataSourceRbxCloud'> failed") not in log diff --git a/tests/integration_tests/bugs/test_gh668.py b/tests/integration_tests/bugs/test_gh668.py new file mode 100644 index 00000000..a3a0c374 --- /dev/null +++ b/tests/integration_tests/bugs/test_gh668.py @@ -0,0 +1,37 @@ +"""Integration test for gh-668. + +Ensure that static route to host is working correctly. +The original problem is specific to the ENI renderer but that test is suitable +for all network configuration outputs. +""" + +import pytest + +from tests.integration_tests.instances import IntegrationInstance + + +DESTINATION_IP = "172.16.0.10" +GATEWAY_IP = "10.0.0.100" + +NETWORK_CONFIG = """\ +version: 2 +ethernets: + eth0: + addresses: [10.0.0.10/8] + dhcp4: false + routes: + - to: {}/32 + via: {} +""".format(DESTINATION_IP, GATEWAY_IP) + +EXPECTED_ROUTE = "{} via {}".format(DESTINATION_IP, GATEWAY_IP) + + +@pytest.mark.lxd_container +@pytest.mark.lxd_vm +@pytest.mark.lxd_config_dict({ + "user.network-config": NETWORK_CONFIG, +}) +def test_static_route_to_host(client: IntegrationInstance): + route = client.execute("ip route | grep {}".format(DESTINATION_IP)) + assert route.startswith(EXPECTED_ROUTE) diff --git a/tests/integration_tests/bugs/test_gh671.py b/tests/integration_tests/bugs/test_gh671.py new file mode 100644 index 00000000..5e90cdda --- /dev/null +++ b/tests/integration_tests/bugs/test_gh671.py @@ -0,0 +1,55 @@ +"""Integration test for gh-671. + +Verify that on Azure that if a default user and password are specified +through the Azure API that a change in the default password overwrites +the old password +""" + +import crypt + +import pytest + +from tests.integration_tests.clouds import IntegrationCloud + +OLD_PASSWORD = 'DoIM33tTheComplexityRequirements!??' +NEW_PASSWORD = 'DoIM33tTheComplexityRequirementsNow!??' + + +def _check_password(instance, unhashed_password): + shadow_password = instance.execute('getent shadow ubuntu').split(':')[1] + salt = shadow_password.rsplit('$', 1)[0] + hashed_password = crypt.crypt(unhashed_password, salt) + assert shadow_password == hashed_password + + +@pytest.mark.azure +@pytest.mark.sru_2020_11 +def test_update_default_password(setup_image, session_cloud: IntegrationCloud): + os_profile = { + 'os_profile': { + 'admin_password': '', + 'linux_configuration': { + 'disable_password_authentication': False + } + } + } + os_profile['os_profile']['admin_password'] = OLD_PASSWORD + instance1 = session_cloud.launch(launch_kwargs={'vm_params': os_profile}) + + _check_password(instance1, OLD_PASSWORD) + + snapshot_id = instance1.cloud.cloud_instance.snapshot( + instance1.instance, + delete_provisioned_user=False + ) + + os_profile['os_profile']['admin_password'] = NEW_PASSWORD + try: + with session_cloud.launch(launch_kwargs={ + 'image_id': snapshot_id, + 'vm_params': os_profile, + }) as instance2: + _check_password(instance2, NEW_PASSWORD) + finally: + session_cloud.cloud_instance.delete_image(snapshot_id) + instance1.destroy() diff --git a/tests/integration_tests/bugs/test_lp1813396.py b/tests/integration_tests/bugs/test_lp1813396.py new file mode 100644 index 00000000..7ad0e809 --- /dev/null +++ b/tests/integration_tests/bugs/test_lp1813396.py @@ -0,0 +1,34 @@ +"""Integration test for lp-1813396 + +Ensure gpg is called with no tty flag. +""" + +import pytest + +from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.log_utils import verify_ordered_items_in_text + + +USER_DATA = """\ +#cloud-config +apt: + sources: + cloudinit: + source: 'deb [arch=amd64] http://ppa.launchpad.net/cloud-init-dev/daily/ubuntu focal main' + keyserver: keyserver.ubuntu.com + keyid: E4D304DF +""" # noqa: E501 + + +@pytest.mark.sru_2020_11 +@pytest.mark.user_data(USER_DATA) +def test_gpg_no_tty(client: IntegrationInstance): + log = client.read_from_file('/var/log/cloud-init.log') + to_verify = [ + "Running command ['gpg', '--no-tty', " + "'--keyserver=keyserver.ubuntu.com', '--recv-keys', 'E4D304DF'] " + "with allowed return codes [0] (shell=False, capture=True)", + "Imported key 'E4D304DF' from keyserver 'keyserver.ubuntu.com'", + "finish: modules-config/config-apt-configure: SUCCESS", + ] + verify_ordered_items_in_text(to_verify, log) diff --git a/tests/integration_tests/bugs/test_lp1898997.py b/tests/integration_tests/bugs/test_lp1898997.py new file mode 100644 index 00000000..54c88d82 --- /dev/null +++ b/tests/integration_tests/bugs/test_lp1898997.py @@ -0,0 +1,71 @@ +"""Integration test for LP: #1898997 + +cloud-init was incorrectly excluding Open vSwitch bridge members from its list +of interfaces. This meant that instances which had only one interface which +was in an Open vSwitch bridge would not boot correctly: cloud-init would not +find the expected physical interfaces, so would not apply network config. + +This test checks that cloud-init believes it has successfully applied the +network configuration, and confirms that the bridge can be used to ping the +default gateway. +""" +import pytest + +MAC_ADDRESS = "de:ad:be:ef:12:34" + + +NETWORK_CONFIG = """\ +bridges: + ovs-br: + dhcp4: true + interfaces: + - enp5s0 + macaddress: 52:54:00:d9:08:1c + mtu: 1500 + openvswitch: {{}} +ethernets: + enp5s0: + mtu: 1500 + set-name: enp5s0 + match: + macaddress: {} +version: 2 +""".format(MAC_ADDRESS) + + +@pytest.mark.lxd_config_dict({ + "user.network-config": NETWORK_CONFIG, + "volatile.eth0.hwaddr": MAC_ADDRESS, +}) +@pytest.mark.lxd_vm +@pytest.mark.not_bionic +@pytest.mark.not_xenial +@pytest.mark.sru_2020_11 +@pytest.mark.ubuntu +class TestInterfaceListingWithOpenvSwitch: + def test_ovs_member_interfaces_not_excluded(self, client): + # We need to install openvswitch for our provided network configuration + # to apply (on next boot), so DHCP on our default interface to fetch it + client.execute("dhclient enp5s0") + client.execute("apt update -qqy") + client.execute("apt-get install -qqy openvswitch-switch") + + # Now our networking config should successfully apply on a clean reboot + client.execute("cloud-init clean --logs") + client.restart() + + cloudinit_output = client.read_from_file("/var/log/cloud-init.log") + + # Confirm that the network configuration was applied successfully + assert "WARN" not in cloudinit_output + # Confirm that the applied network config created the OVS bridge + assert "ovs-br" in client.execute("ip addr") + + # Test that we can ping our gateway using our bridge + gateway = client.execute( + "ip -4 route show default | awk '{ print $3 }'" + ) + ping_result = client.execute( + "ping -c 1 -W 1 -I ovs-br {}".format(gateway) + ) + assert ping_result.ok diff --git a/tests/integration_tests/bugs/test_lp1900837.py b/tests/integration_tests/bugs/test_lp1900837.py index 3fe7d0d0..fcc2b751 100644 --- a/tests/integration_tests/bugs/test_lp1900837.py +++ b/tests/integration_tests/bugs/test_lp1900837.py @@ -22,7 +22,8 @@ class TestLogPermissionsNotResetOnReboot: assert "600" == _get_log_perms(client) # Reboot - client.instance.restart() + client.restart() + assert client.execute('cloud-init status').ok # Check that permissions are not reset on reboot assert "600" == _get_log_perms(client) diff --git a/tests/integration_tests/bugs/test_lp1910835.py b/tests/integration_tests/bugs/test_lp1910835.py new file mode 100644 index 00000000..87f92d5e --- /dev/null +++ b/tests/integration_tests/bugs/test_lp1910835.py @@ -0,0 +1,66 @@ +"""Integration test for LP: #1910835. + +If users do not provide an SSH key and instead ask Azure to generate a key for +them, the key material available in the IMDS may include CRLF sequences. Prior +to e56b55452549cb037da0a4165154ffa494e9678a, the Azure datasource handled keys +via a certificate, the tooling for which removed these sequences. This test +ensures that cloud-init does not regress support for this Azure behaviour. + +This test provides the SSH key configured for tests to the instance in two +ways: firstly, with CRLFs to mimic the generated keys, via the Azure API; +secondly, as user-data in unmodified form. This means that even on systems +which exhibit the bug fetching the platform's metadata, we can SSH into the SUT +to confirm this (instead of having to assert SSH failure; there are lots of +reasons SSH might fail). + +Once SSH'd in, we check that the two keys in .ssh/authorized_keys have the same +material: if the Azure datasource has removed the CRLFs correctly, then they +will match. +""" +import pytest + + +USER_DATA_TMPL = """\ +#cloud-config +ssh_authorized_keys: + - {}""" + + +@pytest.mark.sru_2021_01 +@pytest.mark.azure +def test_crlf_in_azure_metadata_ssh_keys(session_cloud, setup_image): + authorized_keys_path = "/home/{}/.ssh/authorized_keys".format( + session_cloud.cloud_instance.username + ) + # Pass in user-data to allow us to access the instance when the normal + # path fails + key_data = session_cloud.cloud_instance.key_pair.public_key_content + user_data = USER_DATA_TMPL.format(key_data) + # Throw a CRLF into the otherwise good key data, to emulate Azure's + # behaviour for generated keys + key_data = key_data[:20] + "\r\n" + key_data[20:] + vm_params = { + "os_profile": { + "linux_configuration": { + "ssh": { + "public_keys": [ + {"path": authorized_keys_path, "key_data": key_data} + ] + } + } + } + } + with session_cloud.launch( + launch_kwargs={"vm_params": vm_params, "user_data": user_data} + ) as client: + authorized_keys = ( + client.read_from_file(authorized_keys_path).strip().splitlines() + ) + # We expect one key from the cloud, one from user-data + assert 2 == len(authorized_keys) + # And those two keys should be the same, except for a possible key + # comment, which Azure strips out + assert ( + authorized_keys[0].rsplit(" ")[:2] + == authorized_keys[1].split(" ")[:2] + ) diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py index 88ac4408..9eebb10a 100644 --- a/tests/integration_tests/clouds.py +++ b/tests/integration_tests/clouds.py @@ -6,7 +6,7 @@ from pycloudlib import EC2, GCE, Azure, OCI, LXDContainer, LXDVirtualMachine from pycloudlib.lxd.instance import LXDInstance import cloudinit -from cloudinit.subp import subp +from cloudinit.subp import subp, ProcessExecutionError from tests.integration_tests import integration_settings from tests.integration_tests.instances import ( IntegrationEc2Instance, @@ -25,6 +25,65 @@ except ImportError: log = logging.getLogger('integration_testing') +def _get_ubuntu_series() -> list: + """Use distro-info-data's ubuntu.csv to get a list of Ubuntu series""" + out = "" + try: + out, _err = subp(["ubuntu-distro-info", "-a"]) + except ProcessExecutionError: + log.info( + "ubuntu-distro-info (from the distro-info package) must be" + " installed to guess Ubuntu os/release" + ) + return out.splitlines() + + +class ImageSpecification: + """A specification of an image to launch for testing. + + If either of ``os`` and ``release`` are not specified, an attempt will be + made to infer the correct values for these on instantiation. + + :param image_id: + The image identifier used by the rest of the codebase to launch this + image. + :param os: + An optional string describing the operating system this image is for + (e.g. "ubuntu", "rhel", "freebsd"). + :param release: + A optional string describing the operating system release (e.g. + "focal", "8"; the exact values here will depend on the OS). + """ + + def __init__( + self, + image_id: str, + os: "Optional[str]" = None, + release: "Optional[str]" = None, + ): + if image_id in _get_ubuntu_series(): + if os is None: + os = "ubuntu" + if release is None: + release = image_id + + self.image_id = image_id + self.os = os + self.release = release + log.info( + "Detected image: image_id=%s os=%s release=%s", + self.image_id, + self.os, + self.release, + ) + + @classmethod + def from_os_image(cls): + """Return an ImageSpecification for integration_settings.OS_IMAGE.""" + parts = integration_settings.OS_IMAGE.split("::", 2) + return cls(*parts) + + class IntegrationCloud(ABC): datasource = None # type: Optional[str] integration_instance_cls = IntegrationInstance @@ -32,7 +91,23 @@ class IntegrationCloud(ABC): def __init__(self, settings=integration_settings): self.settings = settings self.cloud_instance = self._get_cloud_instance() - self.image_id = self._get_initial_image() + if settings.PUBLIC_SSH_KEY is not None: + # If we have a non-default key, use it. + self.cloud_instance.use_key( + settings.PUBLIC_SSH_KEY, name=settings.KEYPAIR_NAME + ) + elif settings.KEYPAIR_NAME is not None: + # Even if we're using the default key, it may still have a + # different name in the clouds, so we need to set it separately. + self.cloud_instance.key_pair.name = settings.KEYPAIR_NAME + self._released_image_id = self._get_initial_image() + self.snapshot_id = None + + @property + def image_id(self): + if self.snapshot_id: + return self.snapshot_id + return self._released_image_id def emit_settings_to_log(self) -> None: log.info( @@ -50,21 +125,20 @@ class IntegrationCloud(ABC): raise NotImplementedError def _get_initial_image(self): - image_id = self.settings.OS_IMAGE + image = ImageSpecification.from_os_image() try: - image_id = self.cloud_instance.released_image( - self.settings.OS_IMAGE) + return self.cloud_instance.released_image(image.image_id) except (ValueError, IndexError): - pass - return image_id + return image.image_id def _perform_launch(self, launch_kwargs): pycloudlib_instance = self.cloud_instance.launch(**launch_kwargs) - pycloudlib_instance.wait(raise_on_cloudinit_failure=False) return pycloudlib_instance def launch(self, user_data=None, launch_kwargs=None, settings=integration_settings): + if launch_kwargs is None: + launch_kwargs = {} if self.settings.EXISTING_INSTANCE_ID: log.info( 'Not launching instance due to EXISTING_INSTANCE_ID. ' @@ -76,10 +150,8 @@ class IntegrationCloud(ABC): kwargs = { 'image_id': self.image_id, 'user_data': user_data, - 'wait': False, } - if launch_kwargs: - kwargs.update(launch_kwargs) + kwargs.update(launch_kwargs) log.info( "Launching instance with launch_kwargs:\n{}".format( "\n".join("{}={}".format(*item) for item in kwargs.items()) @@ -87,9 +159,18 @@ class IntegrationCloud(ABC): ) pycloudlib_instance = self._perform_launch(kwargs) - log.info('Launched instance: %s', pycloudlib_instance) - return self.get_instance(pycloudlib_instance, settings) + instance = self.get_instance(pycloudlib_instance, settings) + if kwargs.get('wait', True): + # If we aren't waiting, we can't rely on command execution here + log.info( + 'cloud-init version: %s', + instance.execute("cloud-init --version") + ) + serial = instance.execute("grep serial /etc/cloud/build.info") + if serial: + log.info('image serial: %s', serial.split()[1]) + return instance def get_instance(self, cloud_instance, settings=integration_settings): return self.integration_instance_cls(self, cloud_instance, settings) @@ -100,6 +181,19 @@ class IntegrationCloud(ABC): def snapshot(self, instance): return self.cloud_instance.snapshot(instance, clean=True) + def delete_snapshot(self): + if self.snapshot_id: + if self.settings.KEEP_IMAGE: + log.info( + 'NOT deleting snapshot image created for this testrun ' + 'because KEEP_IMAGE is True: %s', self.snapshot_id) + else: + log.info( + 'Deleting snapshot image created for this testrun: %s', + self.snapshot_id + ) + self.cloud_instance.delete_image(self.snapshot_id) + class Ec2Cloud(IntegrationCloud): datasource = 'ec2' @@ -116,9 +210,6 @@ class GceCloud(IntegrationCloud): def _get_cloud_instance(self): return GCE( tag='gce-integration-test', - project=self.settings.GCE_PROJECT, - region=self.settings.GCE_REGION, - zone=self.settings.GCE_ZONE, ) @@ -130,7 +221,14 @@ class AzureCloud(IntegrationCloud): return Azure(tag='azure-integration-test') def destroy(self): - self.cloud_instance.delete_resource_group() + if self.settings.KEEP_INSTANCE: + log.info( + 'NOT deleting resource group because KEEP_INSTANCE is true ' + 'and deleting resource group would also delete instance. ' + 'Instance and resource group must both be manually deleted.' + ) + else: + self.cloud_instance.delete_resource_group() class OciCloud(IntegrationCloud): @@ -139,8 +237,7 @@ class OciCloud(IntegrationCloud): def _get_cloud_instance(self): return OCI( - tag='oci-integration-test', - compartment_id=self.settings.OCI_COMPARTMENT_ID + tag='oci-integration-test' ) @@ -174,7 +271,7 @@ class _LxdIntegrationCloud(IntegrationCloud): def _perform_launch(self, launch_kwargs): launch_kwargs['inst_type'] = launch_kwargs.pop('instance_type', None) - launch_kwargs.pop('wait') + wait = launch_kwargs.pop('wait', True) release = launch_kwargs.pop('image_id') try: @@ -190,8 +287,7 @@ class _LxdIntegrationCloud(IntegrationCloud): ) if self.settings.CLOUD_INIT_SOURCE == 'IN_PLACE': self._mount_source(pycloudlib_instance) - pycloudlib_instance.start(wait=False) - pycloudlib_instance.wait(raise_on_cloudinit_failure=False) + pycloudlib_instance.start(wait=wait) return pycloudlib_instance diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index 73b44bfc..99dd8d9e 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -1,18 +1,29 @@ # This file is part of cloud-init. See LICENSE file for license information. +import datetime +import functools import logging -import os import pytest +import os import sys +from tarfile import TarFile from contextlib import contextmanager +from pathlib import Path from tests.integration_tests import integration_settings from tests.integration_tests.clouds import ( + AzureCloud, Ec2Cloud, GceCloud, - AzureCloud, - OciCloud, + ImageSpecification, + IntegrationCloud, LxdContainerCloud, LxdVmCloud, + OciCloud, + _LxdIntegrationCloud, +) +from tests.integration_tests.instances import ( + CloudInitSource, + IntegrationInstance, ) @@ -28,6 +39,9 @@ platforms = { 'lxd_container': LxdContainerCloud, 'lxd_vm': LxdVmCloud, } +os_list = ["ubuntu"] + +session_start_time = datetime.datetime.now().strftime('%y%m%d%H%M%S') def pytest_runtest_setup(item): @@ -54,6 +68,18 @@ def pytest_runtest_setup(item): if supported_platforms and current_platform not in supported_platforms: pytest.skip(unsupported_message) + image = ImageSpecification.from_os_image() + current_os = image.os + supported_os_set = set(os_list).intersection(test_marks) + if current_os and supported_os_set and current_os not in supported_os_set: + pytest.skip("Cannot run on OS {}".format(current_os)) + if 'unstable' in test_marks and not integration_settings.RUN_UNSTABLE: + pytest.skip('Test marked unstable. Manually remove mark to run it') + + current_release = image.release + if "not_{}".format(current_release) in test_marks: + pytest.skip("Cannot run on release {}".format(current_release)) + # disable_subp_usage is defined at a higher level, but we don't # want it applied here @@ -75,82 +101,137 @@ def session_cloud(): cloud = platforms[integration_settings.PLATFORM]() cloud.emit_settings_to_log() yield cloud - cloud.destroy() + try: + cloud.delete_snapshot() + finally: + cloud.destroy() + +def get_validated_source( + session_cloud: IntegrationCloud, + source=integration_settings.CLOUD_INIT_SOURCE +) -> CloudInitSource: + if source == 'NONE': + return CloudInitSource.NONE + elif source == 'IN_PLACE': + if session_cloud.datasource not in ['lxd_container', 'lxd_vm']: + raise ValueError( + 'IN_PLACE as CLOUD_INIT_SOURCE only works for LXD') + return CloudInitSource.IN_PLACE + elif source == 'PROPOSED': + return CloudInitSource.PROPOSED + elif source.startswith('ppa:'): + return CloudInitSource.PPA + elif os.path.isfile(str(source)): + return CloudInitSource.DEB_PACKAGE + raise ValueError( + 'Invalid value for CLOUD_INIT_SOURCE setting: {}'.format(source)) -@pytest.fixture(scope='session', autouse=True) -def setup_image(session_cloud): + +@pytest.fixture(scope='session') +def setup_image(session_cloud: IntegrationCloud): """Setup the target environment with the correct version of cloud-init. So we can launch instances / run tests with the correct image """ - client = None + + source = get_validated_source(session_cloud) + if not source.installs_new_version(): + return log.info('Setting up environment for %s', session_cloud.datasource) - if integration_settings.CLOUD_INIT_SOURCE == 'NONE': - pass # that was easy - elif integration_settings.CLOUD_INIT_SOURCE == 'IN_PLACE': - if session_cloud.datasource not in ['lxd_container', 'lxd_vm']: - raise ValueError( - 'IN_PLACE as CLOUD_INIT_SOURCE only works for LXD') - # The mount needs to happen after the instance is created, so - # no further action needed here - elif integration_settings.CLOUD_INIT_SOURCE == 'PROPOSED': - client = session_cloud.launch() - client.install_proposed_image() - elif integration_settings.CLOUD_INIT_SOURCE.startswith('ppa:'): - client = session_cloud.launch() - client.install_ppa(integration_settings.CLOUD_INIT_SOURCE) - elif os.path.isfile(str(integration_settings.CLOUD_INIT_SOURCE)): - client = session_cloud.launch() - client.install_deb() - else: - raise ValueError( - 'Invalid value for CLOUD_INIT_SOURCE setting: {}'.format( - integration_settings.CLOUD_INIT_SOURCE)) - if client: - # Even if we're keeping instances, we don't want to keep this - # one around as it was just for image creation - client.destroy() + client = session_cloud.launch() + client.install_new_cloud_init(source) + # Even if we're keeping instances, we don't want to keep this + # one around as it was just for image creation + client.destroy() log.info('Done with environment setup') +def _collect_logs(instance: IntegrationInstance, node_id: str, + test_failed: bool): + """Collect logs from remote instance. + + Args: + instance: The current IntegrationInstance to collect logs from + node_id: The pytest representation of this test, E.g.: + tests/integration_tests/test_example.py::TestExample.test_example + test_failed: If test failed or not + """ + if any([ + integration_settings.COLLECT_LOGS == 'NEVER', + integration_settings.COLLECT_LOGS == 'ON_ERROR' and not test_failed + ]): + return + instance.execute( + 'cloud-init collect-logs -u -t /var/tmp/cloud-init.tar.gz') + node_id_path = Path( + node_id + .replace('.py', '') # Having a directory with '.py' would be weird + .replace('::', os.path.sep) # Turn classes/tests into paths + .replace('[', '-') # For parametrized names + .replace(']', '') # For parameterized names + ) + log_dir = Path( + integration_settings.LOCAL_LOG_PATH + ) / session_start_time / node_id_path + log.info("Writing logs to %s", log_dir) + if not log_dir.exists(): + log_dir.mkdir(parents=True) + tarball_path = log_dir / 'cloud-init.tar.gz' + instance.pull_file('/var/tmp/cloud-init.tar.gz', tarball_path) + + tarball = TarFile.open(str(tarball_path)) + tarball.extractall(path=str(log_dir)) + tarball_path.unlink() + + @contextmanager -def _client(request, fixture_utils, session_cloud): +def _client(request, fixture_utils, session_cloud: IntegrationCloud): """Fixture implementation for the client fixtures. Launch the dynamic IntegrationClient instance using any provided userdata, yield to the test, then cleanup """ - user_data = fixture_utils.closest_marker_first_arg_or( - request, 'user_data', None) - name = fixture_utils.closest_marker_first_arg_or( - request, 'instance_name', None + getter = functools.partial( + fixture_utils.closest_marker_first_arg_or, request, default=None ) + user_data = getter('user_data') + name = getter('instance_name') + lxd_config_dict = getter('lxd_config_dict') + launch_kwargs = {} if name is not None: - launch_kwargs = {"name": name} + launch_kwargs["name"] = name + if lxd_config_dict is not None: + if not isinstance(session_cloud, _LxdIntegrationCloud): + pytest.skip("lxd_config_dict requires LXD") + launch_kwargs["config_dict"] = lxd_config_dict + with session_cloud.launch( user_data=user_data, launch_kwargs=launch_kwargs ) as instance: + previous_failures = request.session.testsfailed yield instance + test_failed = request.session.testsfailed - previous_failures > 0 + _collect_logs(instance, request.node.nodeid, test_failed) @pytest.yield_fixture -def client(request, fixture_utils, session_cloud): +def client(request, fixture_utils, session_cloud, setup_image): """Provide a client that runs for every test.""" with _client(request, fixture_utils, session_cloud) as client: yield client @pytest.yield_fixture(scope='module') -def module_client(request, fixture_utils, session_cloud): +def module_client(request, fixture_utils, session_cloud, setup_image): """Provide a client that runs once per module.""" with _client(request, fixture_utils, session_cloud) as client: yield client @pytest.yield_fixture(scope='class') -def class_client(request, fixture_utils, session_cloud): +def class_client(request, fixture_utils, session_cloud, setup_image): """Provide a client that runs once per class.""" with _client(request, fixture_utils, session_cloud) as client: yield client @@ -180,3 +261,20 @@ def pytest_assertrepr_compare(op, left, right): '"{}" not in cloud-init.log string; unexpectedly found on' " these lines:".format(left) ] + found_lines + + +def pytest_configure(config): + """Perform initial configuration, before the test runs start. + + This hook is only called if integration tests are being executed, so we can + use it to configure defaults for integration testing that differ from the + rest of the tests in the codebase. + + See + https://docs.pytest.org/en/latest/reference.html#_pytest.hookspec.pytest_configure + for pytest's documentation. + """ + if "log_cli_level" in config.option and not config.option.log_cli_level: + # If log_cli_level is available in this version of pytest and not set + # to anything, set it to INFO. + config.option.log_cli_level = "INFO" diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py index 9b13288c..0d1e1aef 100644 --- a/tests/integration_tests/instances.py +++ b/tests/integration_tests/instances.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +from enum import Enum import logging import os import uuid @@ -25,9 +26,27 @@ def _get_tmp_path(): return '/var/tmp/{}.tmp'.format(tmp_filename) -class IntegrationInstance: - use_sudo = True +class CloudInitSource(Enum): + """Represents the cloud-init image source setting as a defined value. + + Values here represent all possible values for CLOUD_INIT_SOURCE in + tests/integration_tests/integration_settings.py. See that file for an + explanation of these values. If the value set there can't be parsed into + one of these values, an exception will be raised + """ + NONE = 1 + IN_PLACE = 2 + PROPOSED = 3 + PPA = 4 + DEB_PACKAGE = 5 + + def installs_new_version(self): + if self.name in [self.NONE.name, self.IN_PLACE.name]: + return False + return True + +class IntegrationInstance: def __init__(self, cloud: 'IntegrationCloud', instance: BaseInstance, settings=integration_settings): self.cloud = cloud @@ -37,24 +56,30 @@ class IntegrationInstance: def destroy(self): self.instance.delete() - def execute(self, command, *, use_sudo=None) -> Result: + def restart(self): + """Restart this instance (via cloud mechanism) and wait for boot. + + This wraps pycloudlib's `BaseInstance.restart` + """ + log.info("Restarting instance and waiting for boot") + self.instance.restart() + + def execute(self, command, *, use_sudo=True) -> Result: if self.instance.username == 'root' and use_sudo is False: raise Exception('Root user cannot run unprivileged') - if use_sudo is None: - use_sudo = self.use_sudo return self.instance.execute(command, use_sudo=use_sudo) def pull_file(self, remote_path, local_path): # First copy to a temporary directory because of permissions issues tmp_path = _get_tmp_path() - self.instance.execute('cp {} {}'.format(remote_path, tmp_path)) - self.instance.pull_file(tmp_path, local_path) + self.instance.execute('cp {} {}'.format(str(remote_path), tmp_path)) + self.instance.pull_file(tmp_path, str(local_path)) def push_file(self, local_path, remote_path): # First push to a temporary directory because of permissions issues tmp_path = _get_tmp_path() - self.instance.push_file(local_path, tmp_path) - self.execute('mv {} {}'.format(tmp_path, remote_path)) + self.instance.push_file(str(local_path), tmp_path) + self.execute('mv {} {}'.format(tmp_path, str(remote_path))) def read_from_file(self, remote_path) -> str: result = self.execute('cat {}'.format(remote_path)) @@ -83,36 +108,52 @@ class IntegrationInstance: os.unlink(tmp_file.name) def snapshot(self): - return self.cloud.snapshot(self.instance) - - def _install_new_cloud_init(self, remote_script): - self.execute(remote_script) + image_id = self.cloud.snapshot(self.instance) + log.info('Created new image: %s', image_id) + return image_id + + def install_new_cloud_init( + self, + source: CloudInitSource, + take_snapshot=True + ): + if source == CloudInitSource.DEB_PACKAGE: + self.install_deb() + elif source == CloudInitSource.PPA: + self.install_ppa() + elif source == CloudInitSource.PROPOSED: + self.install_proposed_image() + else: + raise Exception( + "Specified to install {} which isn't supported here".format( + source) + ) version = self.execute('cloud-init -v').split()[-1] log.info('Installed cloud-init version: %s', version) self.instance.clean() - image_id = self.snapshot() - log.info('Created new image: %s', image_id) - self.cloud.image_id = image_id + if take_snapshot: + snapshot_id = self.snapshot() + self.cloud.snapshot_id = snapshot_id def install_proposed_image(self): log.info('Installing proposed image') remote_script = ( - '{sudo} echo deb "http://archive.ubuntu.com/ubuntu ' + 'echo deb "http://archive.ubuntu.com/ubuntu ' '$(lsb_release -sc)-proposed main" | ' - '{sudo} tee /etc/apt/sources.list.d/proposed.list\n' - '{sudo} apt-get update -q\n' - '{sudo} apt-get install -qy cloud-init' - ).format(sudo='sudo' if self.use_sudo else '') - self._install_new_cloud_init(remote_script) + 'tee /etc/apt/sources.list.d/proposed.list\n' + 'apt-get update -q\n' + 'apt-get install -qy cloud-init' + ) + self.execute(remote_script) - def install_ppa(self, repo): + def install_ppa(self): log.info('Installing PPA') remote_script = ( - '{sudo} add-apt-repository {repo} -y && ' - '{sudo} apt-get update -q && ' - '{sudo} apt-get install -qy cloud-init' - ).format(sudo='sudo' if self.use_sudo else '', repo=repo) - self._install_new_cloud_init(remote_script) + 'add-apt-repository {repo} -y && ' + 'apt-get update -q && ' + 'apt-get install -qy cloud-init' + ).format(repo=self.settings.CLOUD_INIT_SOURCE) + self.execute(remote_script) def install_deb(self): log.info('Installing deb package') @@ -122,9 +163,8 @@ class IntegrationInstance: self.push_file( local_path=integration_settings.CLOUD_INIT_SOURCE, remote_path=remote_path) - remote_script = '{sudo} dpkg -i {path}'.format( - sudo='sudo' if self.use_sudo else '', path=remote_path) - self._install_new_cloud_init(remote_script) + remote_script = 'dpkg -i {path}'.format(path=remote_path) + self.execute(remote_script) def __enter__(self): return self @@ -151,4 +191,4 @@ class IntegrationOciInstance(IntegrationInstance): class IntegrationLxdInstance(IntegrationInstance): - use_sudo = False + pass diff --git a/tests/integration_tests/integration_settings.py b/tests/integration_tests/integration_settings.py index a0609f7e..22b4fdda 100644 --- a/tests/integration_tests/integration_settings.py +++ b/tests/integration_tests/integration_settings.py @@ -1,15 +1,22 @@ # This file is part of cloud-init. See LICENSE file for license information. import os +from distutils.util import strtobool + ################################################################## # LAUNCH SETTINGS ################################################################## # Keep instance (mostly for debugging) when test is finished KEEP_INSTANCE = False +# Keep snapshot image (mostly for debugging) when test is finished +KEEP_IMAGE = False +# Run tests marked as unstable. Expect failures and dragons. +RUN_UNSTABLE = False # One of: # lxd_container +# lxd_vm # azure # ec2 # gce @@ -21,8 +28,11 @@ PLATFORM = 'lxd_container' INSTANCE_TYPE = None # Determines the base image to use or generate new images from. -# Can be the name of the OS if running a stock image, -# otherwise the id of the image being used if using a custom image +# +# This can be the name of an Ubuntu release, or in the format +# <image_id>[::<os>[::<release>]]. If given, os and release should describe +# the image specified by image_id. (Ubuntu releases are converted to this +# format internally; in this case, to "focal::ubuntu::focal".) OS_IMAGE = 'focal' # Populate if you want to use a pre-launched instance instead of @@ -55,23 +65,27 @@ EXISTING_INSTANCE_ID = None # A path to a valid package to be uploaded and installed CLOUD_INIT_SOURCE = 'NONE' +# Before an instance is torn down, we run `cloud-init collect-logs` +# and transfer them locally. These settings specify when to collect these +# logs and where to put them on the local filesystem +# One of: +# 'ALWAYS' +# 'ON_ERROR' +# 'NEVER' +COLLECT_LOGS = 'ON_ERROR' +LOCAL_LOG_PATH = '/tmp/cloud_init_test_logs' + ################################################################## -# GCE SPECIFIC SETTINGS +# SSH KEY SETTINGS ################################################################## -# Required for GCE -GCE_PROJECT = None -# You probably want to override these -GCE_REGION = 'us-central1' -GCE_ZONE = 'a' +# A path to the public SSH key to use for test runs. (Defaults to pycloudlib's +# default behaviour, using ~/.ssh/id_rsa.pub.) +PUBLIC_SSH_KEY = None -################################################################## -# OCI SPECIFIC SETTINGS -################################################################## -# Compartment-id found at -# https://console.us-phoenix-1.oraclecloud.com/a/identity/compartments -# Required for Oracle -OCI_COMPARTMENT_ID = None +# For clouds which use named keypairs for SSH connection, the name that is used +# for the keypair. (Defaults to pycloudlib's default behaviour.) +KEYPAIR_NAME = None ################################################################## # USER SETTINGS OVERRIDES @@ -91,6 +105,12 @@ except ImportError: # Perhaps a bit too hacky, but it works :) current_settings = [var for var in locals() if var.isupper()] for setting in current_settings: - globals()[setting] = os.getenv( + env_setting = os.getenv( 'CLOUD_INIT_{}'.format(setting), globals()[setting] ) + if isinstance(env_setting, str): + try: + env_setting = bool(strtobool(env_setting.strip())) + except ValueError: + pass + globals()[setting] = env_setting diff --git a/tests/integration_tests/log_utils.py b/tests/integration_tests/log_utils.py new file mode 100644 index 00000000..40baae7b --- /dev/null +++ b/tests/integration_tests/log_utils.py @@ -0,0 +1,11 @@ +def verify_ordered_items_in_text(to_verify: list, text: str): + """Assert all items in list appear in order in text. + + Examples: + verify_ordered_items_in_text(['a', '1'], 'ab1') # passes + verify_ordered_items_in_text(['1', 'a'], 'ab1') # raises AssertionError + """ + index = 0 + for item in to_verify: + index = text[index:].find(item) + assert index > -1, "Expected item not found: '{}'".format(item) diff --git a/tests/integration_tests/modules/test_apt_configure_sources_list.py b/tests/integration_tests/modules/test_apt_configure_sources_list.py index d2bcc61a..28cbe19f 100644 --- a/tests/integration_tests/modules/test_apt_configure_sources_list.py +++ b/tests/integration_tests/modules/test_apt_configure_sources_list.py @@ -40,6 +40,7 @@ EXPECTED_REGEXES = [ @pytest.mark.ci +@pytest.mark.ubuntu class TestAptConfigureSourcesList: @pytest.mark.user_data(USER_DATA) diff --git a/tests/integration_tests/modules/test_ca_certs.py b/tests/integration_tests/modules/test_ca_certs.py new file mode 100644 index 00000000..89c01a9c --- /dev/null +++ b/tests/integration_tests/modules/test_ca_certs.py @@ -0,0 +1,91 @@ +"""Integration tests for cc_ca_certs. + +(This is ported from ``tests/cloud_tests//testcases/modules/ca_certs.yaml``.) + +TODO: +* Mark this as running on Debian and Alpine (once we have marks for that) +* Implement testing for the RHEL-specific paths +""" +import os.path + +import pytest + + +USER_DATA = """\ +#cloud-config +ca-certs: + remove-defaults: true + trusted: + - | + -----BEGIN CERTIFICATE----- + MIIGJzCCBA+gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBsjELMAkGA1UEBhMCRlIx + DzANBgNVBAgMBkFsc2FjZTETMBEGA1UEBwwKU3RyYXNib3VyZzEYMBYGA1UECgwP + d3d3LmZyZWVsYW4ub3JnMRAwDgYDVQQLDAdmcmVlbGFuMS0wKwYDVQQDDCRGcmVl + bGFuIFNhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxIjAgBgkqhkiG9w0BCQEW + E2NvbnRhY3RAZnJlZWxhbi5vcmcwHhcNMTIwNDI3MTAzMTE4WhcNMjIwNDI1MTAz + MTE4WjB+MQswCQYDVQQGEwJGUjEPMA0GA1UECAwGQWxzYWNlMRgwFgYDVQQKDA93 + d3cuZnJlZWxhbi5vcmcxEDAOBgNVBAsMB2ZyZWVsYW4xDjAMBgNVBAMMBWFsaWNl + MSIwIAYJKoZIhvcNAQkBFhNjb250YWN0QGZyZWVsYW4ub3JnMIICIjANBgkqhkiG + 9w0BAQEFAAOCAg8AMIICCgKCAgEA3W29+ID6194bH6ejLrIC4hb2Ugo8v6ZC+Mrc + k2dNYMNPjcOKABvxxEtBamnSaeU/IY7FC/giN622LEtV/3oDcrua0+yWuVafyxmZ + yTKUb4/GUgafRQPf/eiX9urWurtIK7XgNGFNUjYPq4dSJQPPhwCHE/LKAykWnZBX + RrX0Dq4XyApNku0IpjIjEXH+8ixE12wH8wt7DEvdO7T3N3CfUbaITl1qBX+Nm2Z6 + q4Ag/u5rl8NJfXg71ZmXA3XOj7zFvpyapRIZcPmkvZYn7SMCp8dXyXHPdpSiIWL2 + uB3KiO4JrUYvt2GzLBUThp+lNSZaZ/Q3yOaAAUkOx+1h08285Pi+P8lO+H2Xic4S + vMq1xtLg2bNoPC5KnbRfuFPuUD2/3dSiiragJ6uYDLOyWJDivKGt/72OVTEPAL9o + 6T2pGZrwbQuiFGrGTMZOvWMSpQtNl+tCCXlT4mWqJDRwuMGrI4DnnGzt3IKqNwS4 + Qyo9KqjMIPwnXZAmWPm3FOKe4sFwc5fpawKO01JZewDsYTDxVj+cwXwFxbE2yBiF + z2FAHwfopwaH35p3C6lkcgP2k/zgAlnBluzACUI+MKJ/G0gv/uAhj1OHJQ3L6kn1 + SpvQ41/ueBjlunExqQSYD7GtZ1Kg8uOcq2r+WISE3Qc9MpQFFkUVllmgWGwYDuN3 + Zsez95kCAwEAAaN7MHkwCQYDVR0TBAIwADAsBglghkgBhvhCAQ0EHxYdT3BlblNT + TCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFFlfyRO6G8y5qEFKikl5 + ajb2fT7XMB8GA1UdIwQYMBaAFCNsLT0+KV14uGw+quK7Lh5sh/JTMA0GCSqGSIb3 + DQEBBQUAA4ICAQAT5wJFPqervbja5+90iKxi1d0QVtVGB+z6aoAMuWK+qgi0vgvr + mu9ot2lvTSCSnRhjeiP0SIdqFMORmBtOCFk/kYDp9M/91b+vS+S9eAlxrNCB5VOf + PqxEPp/wv1rBcE4GBO/c6HcFon3F+oBYCsUQbZDKSSZxhDm3mj7pb67FNbZbJIzJ + 70HDsRe2O04oiTx+h6g6pW3cOQMgIAvFgKN5Ex727K4230B0NIdGkzuj4KSML0NM + slSAcXZ41OoSKNjy44BVEZv0ZdxTDrRM4EwJtNyggFzmtTuV02nkUj1bYYYC5f0L + ADr6s0XMyaNk8twlWYlYDZ5uKDpVRVBfiGcq0uJIzIvemhuTrofh8pBQQNkPRDFT + Rq1iTo1Ihhl3/Fl1kXk1WR3jTjNb4jHX7lIoXwpwp767HAPKGhjQ9cFbnHMEtkro + RlJYdtRq5mccDtwT0GFyoJLLBZdHHMHJz0F9H7FNk2tTQQMhK5MVYwg+LIaee586 + CQVqfbscp7evlgjLW98H+5zylRHAgoH2G79aHljNKMp9BOuq6SnEglEsiWGVtu2l + hnx8SB3sVJZHeer8f/UQQwqbAO+Kdy70NmbSaqaVtp8jOxLiidWkwSyRTsuU6D8i + DiH5uEqBXExjrj0FslxcVKdVj5glVcSmkLwZKbEU1OKwleT/iXFhvooWhQ== + -----END CERTIFICATE----- +""" + + +@pytest.mark.ubuntu +@pytest.mark.user_data(USER_DATA) +class TestCaCerts: + def test_certs_updated(self, class_client): + """Test that /etc/ssl/certs is updated as we expect.""" + root = "/etc/ssl/certs" + filenames = class_client.execute(["ls", "-1", root]).splitlines() + unlinked_files = [] + links = {} + for filename in filenames: + full_path = os.path.join(root, filename) + symlink_target = class_client.execute(["readlink", full_path]) + is_symlink = symlink_target.ok + if is_symlink: + links[filename] = symlink_target + else: + unlinked_files.append(filename) + + assert ["ca-certificates.crt"] == unlinked_files + assert "cloud-init-ca-certs.pem" == links["a535c1f3.0"] + assert ( + "/usr/share/ca-certificates/cloud-init-ca-certs.crt" + == links["cloud-init-ca-certs.pem"] + ) + + def test_cert_installed(self, class_client): + """Test that our specified cert has been installed""" + checksum = class_client.execute( + "sha256sum /etc/ssl/certs/ca-certificates.crt" + ) + assert ( + "78e875f18c73c1aab9167ae0bd323391e52222cc2dbcda42d129537219300062" + in checksum + ) diff --git a/tests/integration_tests/modules/test_cli.py b/tests/integration_tests/modules/test_cli.py new file mode 100644 index 00000000..3f41b34d --- /dev/null +++ b/tests/integration_tests/modules/test_cli.py @@ -0,0 +1,45 @@ +"""Integration tests for CLI functionality + +These would be for behavior manually invoked by user from the command line +""" + +import pytest + +from tests.integration_tests.instances import IntegrationInstance + + +VALID_USER_DATA = """\ +#cloud-config +runcmd: + - echo 'hi' > /var/tmp/test +""" + +INVALID_USER_DATA = """\ +runcmd: + - echo 'hi' > /var/tmp/test +""" + + +@pytest.mark.sru_2020_11 +@pytest.mark.user_data(VALID_USER_DATA) +def test_valid_userdata(client: IntegrationInstance): + """Test `cloud-init devel schema` with valid userdata. + + PR #575 + """ + result = client.execute('cloud-init devel schema --system') + assert result.ok + assert 'Valid cloud-config: system userdata' == result.stdout.strip() + + +@pytest.mark.sru_2020_11 +@pytest.mark.user_data(INVALID_USER_DATA) +def test_invalid_userdata(client: IntegrationInstance): + """Test `cloud-init devel schema` with invalid userdata. + + PR #575 + """ + result = client.execute('cloud-init devel schema --system') + assert not result.ok + assert 'Cloud config schema errors' in result.stderr + assert 'needs to begin with "#cloud-config"' in result.stderr diff --git a/tests/integration_tests/modules/test_lxd_bridge.py b/tests/integration_tests/modules/test_lxd_bridge.py new file mode 100644 index 00000000..cbf11179 --- /dev/null +++ b/tests/integration_tests/modules/test_lxd_bridge.py @@ -0,0 +1,48 @@ +"""Integration tests for LXD bridge creation. + +(This is ported from +``tests/cloud_tests/testcases/modules/lxd_bridge.yaml``.) +""" +import pytest +import yaml + + +USER_DATA = """\ +#cloud-config +lxd: + init: + storage_backend: dir + bridge: + mode: new + name: lxdbr0 + ipv4_address: 10.100.100.1 + ipv4_netmask: 24 + ipv4_dhcp_first: 10.100.100.100 + ipv4_dhcp_last: 10.100.100.200 + ipv4_nat: true + domain: lxd +""" + + +@pytest.mark.no_container +@pytest.mark.user_data(USER_DATA) +class TestLxdBridge: + + @pytest.mark.parametrize("binary_name", ["lxc", "lxd"]) + def test_binaries_installed(self, class_client, binary_name): + """Check that the expected LXD binaries are installed""" + assert class_client.execute(["which", binary_name]).ok + + @pytest.mark.not_xenial + @pytest.mark.sru_2020_11 + def test_bridge(self, class_client): + """Check that the given bridge is configured""" + cloud_init_log = class_client.read_from_file("/var/log/cloud-init.log") + assert "WARN" not in cloud_init_log + + # The bridge should exist + assert class_client.execute("ip addr show lxdbr0") + + raw_network_config = class_client.execute("lxc network show lxdbr0") + network_config = yaml.safe_load(raw_network_config) + assert "10.100.100.1/24" == network_config["config"]["ipv4.address"] diff --git a/tests/integration_tests/modules/test_package_update_upgrade_install.py b/tests/integration_tests/modules/test_package_update_upgrade_install.py index 8a38ad84..28d741bc 100644 --- a/tests/integration_tests/modules/test_package_update_upgrade_install.py +++ b/tests/integration_tests/modules/test_package_update_upgrade_install.py @@ -26,6 +26,7 @@ package_upgrade: true """ +@pytest.mark.ubuntu @pytest.mark.user_data(USER_DATA) class TestPackageUpdateUpgradeInstall: diff --git a/tests/integration_tests/modules/test_power_state_change.py b/tests/integration_tests/modules/test_power_state_change.py new file mode 100644 index 00000000..32dfc86d --- /dev/null +++ b/tests/integration_tests/modules/test_power_state_change.py @@ -0,0 +1,90 @@ +"""Integration test of the cc_power_state_change module. + +Test that the power state config options work as expected. +""" + +import time + +import pytest + +from tests.integration_tests.clouds import IntegrationCloud +from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.log_utils import verify_ordered_items_in_text + +USER_DATA = """\ +#cloud-config +power_state: + delay: {delay} + mode: {mode} + message: msg + timeout: {timeout} + condition: {condition} +""" + + +def _detect_reboot(instance: IntegrationInstance): + # We'll wait for instance up here, but we don't know if we're + # detecting the first boot or second boot, so we also check + # the logs to ensure we've booted twice. If the logs show we've + # only booted once, wait until we've booted twice + instance.instance.wait() + for _ in range(600): + try: + log = instance.read_from_file('/var/log/cloud-init.log') + boot_count = log.count("running 'init-local'") + if boot_count == 1: + instance.instance.wait() + elif boot_count > 1: + break + except Exception: + pass + time.sleep(1) + else: + raise Exception('Could not detect reboot') + + +def _can_connect(instance): + return instance.execute('true').ok + + +# This test is marked unstable because even though it should be able to +# run anywhere, I can only get it to run in an lxd container, and even then +# occasionally some timing issues will crop up. +@pytest.mark.unstable +@pytest.mark.sru_2020_11 +@pytest.mark.ubuntu +@pytest.mark.lxd_container +class TestPowerChange: + @pytest.mark.parametrize('mode,delay,timeout,expected', [ + ('poweroff', 'now', '10', 'will execute: shutdown -P now msg'), + ('reboot', 'now', '0', 'will execute: shutdown -r now msg'), + ('halt', '+1', '0', 'will execute: shutdown -H +1 msg'), + ]) + def test_poweroff(self, session_cloud: IntegrationCloud, + mode, delay, timeout, expected): + with session_cloud.launch( + user_data=USER_DATA.format( + delay=delay, mode=mode, timeout=timeout, condition='true'), + wait=False + ) as instance: + if mode == 'reboot': + _detect_reboot(instance) + else: + instance.instance.wait_for_stop() + instance.instance.start(wait=True) + log = instance.read_from_file('/var/log/cloud-init.log') + assert _can_connect(instance) + lines_to_check = [ + 'Running module power-state-change', + expected, + "running 'init-local'", + 'config-power-state-change already ran', + ] + verify_ordered_items_in_text(lines_to_check, log) + + @pytest.mark.user_data(USER_DATA.format(delay='0', mode='poweroff', + timeout='0', condition='false')) + def test_poweroff_false_condition(self, client: IntegrationInstance): + log = client.read_from_file('/var/log/cloud-init.log') + assert _can_connect(client) + assert 'Condition was false. Will not perform state change' in log diff --git a/tests/integration_tests/modules/test_seed_random_data.py b/tests/integration_tests/modules/test_seed_random_data.py index b365fa98..f6a67c19 100644 --- a/tests/integration_tests/modules/test_seed_random_data.py +++ b/tests/integration_tests/modules/test_seed_random_data.py @@ -25,4 +25,4 @@ class TestSeedRandomData: @pytest.mark.user_data(USER_DATA) def test_seed_random_data(self, client): seed_output = client.read_from_file("/root/seed") - assert seed_output.strip() == "MYUb34023nD:LFDK10913jk;dfnk:Df" + assert seed_output.startswith("MYUb34023nD:LFDK10913jk;dfnk:Df") diff --git a/tests/integration_tests/modules/test_snap.py b/tests/integration_tests/modules/test_snap.py index b626f6b0..481edbaa 100644 --- a/tests/integration_tests/modules/test_snap.py +++ b/tests/integration_tests/modules/test_snap.py @@ -20,6 +20,7 @@ snap: @pytest.mark.ci +@pytest.mark.ubuntu class TestSnap: @pytest.mark.user_data(USER_DATA) diff --git a/tests/integration_tests/modules/test_ssh_import_id.py b/tests/integration_tests/modules/test_ssh_import_id.py index 45d37d6c..3db573b5 100644 --- a/tests/integration_tests/modules/test_ssh_import_id.py +++ b/tests/integration_tests/modules/test_ssh_import_id.py @@ -3,6 +3,10 @@ This test specifies ssh keys to be imported by the ``ssh_import_id`` module and then checks that if the ssh keys were successfully imported. +TODO: +* This test assumes that SSH keys will be imported into the /home/ubuntu; this + will need modification to run on other OSes. + (This is ported from ``tests/cloud_tests/testcases/modules/ssh_import_id.yaml``.)""" @@ -18,6 +22,7 @@ ssh_import_id: @pytest.mark.ci +@pytest.mark.ubuntu class TestSshImportId: @pytest.mark.user_data(USER_DATA) diff --git a/tests/integration_tests/modules/test_ssh_keys_provided.py b/tests/integration_tests/modules/test_ssh_keys_provided.py index 27d193c1..6aae96ae 100644 --- a/tests/integration_tests/modules/test_ssh_keys_provided.py +++ b/tests/integration_tests/modules/test_ssh_keys_provided.py @@ -83,66 +83,78 @@ ssh_keys: @pytest.mark.user_data(USER_DATA) class TestSshKeysProvided: - def test_ssh_dsa_keys_provided(self, class_client): - """Test dsa public key was imported.""" - out = class_client.read_from_file("/etc/ssh/ssh_host_dsa_key.pub") - assert ( - "AAAAB3NzaC1kc3MAAACBAPkWy1zbchVIN7qTgM0/yyY8q4R" - "ZS8cNM4ZpeuE5UB/Nnr6OSU/nmbO8LuM") in out - - """Test dsa private key was imported.""" - out = class_client.read_from_file("/etc/ssh/ssh_host_dsa_key") - assert ( - "MIIBuwIBAAKBgQD5Fstc23IVSDe6k4DNP8smPKuEWUvHDTOGaXr" - "hOVAfzZ6+jklP") in out - - def test_ssh_rsa_keys_provided(self, class_client): - """Test rsa public key was imported.""" - out = class_client.read_from_file("/etc/ssh/ssh_host_rsa_key.pub") - assert ( - "AAAAB3NzaC1yc2EAAAADAQABAAABAQC0/Ho+o3eJISydO2JvIgT" - "LnZOtrxPl+fSvJfKDjoOLY0HB2eOjy2s2/2N6d9X9SGZ4") in out - - """Test rsa private key was imported.""" - out = class_client.read_from_file("/etc/ssh/ssh_host_rsa_key") - assert ( - "4DOkqNiUGl80Zp1RgZNohHUXlJMtAbrIlAVEk+mTmg7vjfyp2un" - "RQvLZpMRdywBm") in out - - def test_ssh_rsa_certificate_provided(self, class_client): - """Test rsa certificate was imported.""" - out = class_client.read_from_file("/etc/ssh/ssh_host_rsa_key-cert.pub") - assert ( - "AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgMpg" - "BP4Phn3L8I7Vqh7lmHKcOfIokEvSEbHDw83Y3JloAAAAD") in out - - def test_ssh_certificate_updated_sshd_config(self, class_client): - """Test ssh certificate was added to /etc/ssh/sshd_config.""" - out = class_client.read_from_file("/etc/ssh/sshd_config").strip() - assert "HostCertificate /etc/ssh/ssh_host_rsa_key-cert.pub" in out - - def test_ssh_ecdsa_keys_provided(self, class_client): - """Test ecdsa public key was imported.""" - out = class_client.read_from_file("/etc/ssh/ssh_host_ecdsa_key.pub") - assert ( - "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAAB" - "BBFsS5Tvky/IC/dXhE/afxxU") in out - - """Test ecdsa private key generated.""" - out = class_client.read_from_file("/etc/ssh/ssh_host_ecdsa_key") - assert ( - "AwEHoUQDQgAEWxLlO+TL8gL91eET9p/HFQbqR1A691AkJgZk3jY" - "5mpZqxgX4vcgb") in out - - def test_ssh_ed25519_keys_provided(self, class_client): - """Test ed25519 public key was imported.""" - out = class_client.read_from_file("/etc/ssh/ssh_host_ed25519_key.pub") - assert ( - "AAAAC3NzaC1lZDI1NTE5AAAAINudAZSu4vjZpVWzId5pXmZg1M6" - "G15dqjQ2XkNVOEnb5") in out - - """Test ed25519 private key was imported.""" - out = class_client.read_from_file("/etc/ssh/ssh_host_ed25519_key") - assert ( - "XAAAAAtzc2gtZWQyNTUxOQAAACDbnQGUruL42aVVsyHeaV5mYNT" - "OhteXao0Nl5DVThJ2+Q") in out + @pytest.mark.parametrize( + "config_path,expected_out", + ( + ( + "/etc/ssh/ssh_host_dsa_key.pub", + ( + "AAAAB3NzaC1kc3MAAACBAPkWy1zbchVIN7qTgM0/yyY8q4R" + "ZS8cNM4ZpeuE5UB/Nnr6OSU/nmbO8LuM" + ), + ), + ( + "/etc/ssh/ssh_host_dsa_key", + ( + "MIIBuwIBAAKBgQD5Fstc23IVSDe6k4DNP8smPKuEWUvHDTOGaXr" + "hOVAfzZ6+jklP" + ), + ), + ( + "/etc/ssh/ssh_host_rsa_key.pub", + ( + "AAAAB3NzaC1yc2EAAAADAQABAAABAQC0/Ho+o3eJISydO2JvIgT" + "LnZOtrxPl+fSvJfKDjoOLY0HB2eOjy2s2/2N6d9X9SGZ4" + ), + ), + ( + "/etc/ssh/ssh_host_rsa_key", + ( + "4DOkqNiUGl80Zp1RgZNohHUXlJMtAbrIlAVEk+mTmg7vjfyp2un" + "RQvLZpMRdywBm" + ), + ), + ( + "/etc/ssh/ssh_host_rsa_key-cert.pub", + ( + "AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgMpg" + "BP4Phn3L8I7Vqh7lmHKcOfIokEvSEbHDw83Y3JloAAAAD" + ), + ), + ( + "/etc/ssh/sshd_config", + "HostCertificate /etc/ssh/ssh_host_rsa_key-cert.pub", + ), + ( + "/etc/ssh/ssh_host_ecdsa_key.pub", + ( + "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAAB" + "BBFsS5Tvky/IC/dXhE/afxxU" + ), + ), + ( + "/etc/ssh/ssh_host_ecdsa_key", + ( + "AwEHoUQDQgAEWxLlO+TL8gL91eET9p/HFQbqR1A691AkJgZk3jY" + "5mpZqxgX4vcgb" + ), + ), + ( + "/etc/ssh/ssh_host_ed25519_key.pub", + ( + "AAAAC3NzaC1lZDI1NTE5AAAAINudAZSu4vjZpVWzId5pXmZg1M6" + "G15dqjQ2XkNVOEnb5" + ), + ), + ( + "/etc/ssh/ssh_host_ed25519_key", + ( + "XAAAAAtzc2gtZWQyNTUxOQAAACDbnQGUruL42aVVsyHeaV5mYNT" + "OhteXao0Nl5DVThJ2+Q" + ), + ), + ) + ) + def test_ssh_provided_keys(self, config_path, expected_out, class_client): + out = class_client.read_from_file(config_path).strip() + assert expected_out in out diff --git a/tests/integration_tests/modules/test_users_groups.py b/tests/integration_tests/modules/test_users_groups.py index 6a51f5a6..ee08d87b 100644 --- a/tests/integration_tests/modules/test_users_groups.py +++ b/tests/integration_tests/modules/test_users_groups.py @@ -2,6 +2,10 @@ This test specifies a number of users and groups via user-data, and confirms that they have been configured correctly in the system under test. + +TODO: +* This test assumes that the "ubuntu" user will be created when "default" is + specified; this will need modification to run on other OSes. """ import re @@ -41,6 +45,7 @@ AHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/ @pytest.mark.ci @pytest.mark.user_data(USER_DATA) class TestUsersGroups: + @pytest.mark.ubuntu @pytest.mark.parametrize( "getent_args,regex", [ diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py new file mode 100644 index 00000000..233a574b --- /dev/null +++ b/tests/integration_tests/test_upgrade.py @@ -0,0 +1,98 @@ +import logging +import pytest +import time +from pathlib import Path + +from tests.integration_tests.clouds import ImageSpecification, IntegrationCloud +from tests.integration_tests.conftest import ( + get_validated_source, + session_start_time, +) + +log = logging.getLogger('integration_testing') + +USER_DATA = """\ +#cloud-config +hostname: SRU-worked +""" + + +def _output_to_compare(instance, file_path, netcfg_path): + commands = [ + 'hostname', + 'dpkg-query --show cloud-init', + 'cat /run/cloud-init/result.json', + # 'cloud-init init' helps us understand if our pickling upgrade paths + # have broken across re-constitution of a cached datasource. Some + # platforms invalidate their datasource cache on reboot, so we run + # it here to ensure we get a dirty run. + 'cloud-init init', + 'grep Trace /var/log/cloud-init.log', + 'cloud-id', + 'cat {}'.format(netcfg_path), + 'systemd-analyze', + 'systemd-analyze blame', + 'cloud-init analyze show', + 'cloud-init analyze blame', + ] + with file_path.open('w') as f: + for command in commands: + f.write('===== {} ====='.format(command) + '\n') + f.write(instance.execute(command) + '\n') + + +def _restart(instance): + # work around pad.lv/1908287 + instance.restart() + if not instance.execute('cloud-init status --wait --long').ok: + for _ in range(10): + time.sleep(5) + result = instance.execute('cloud-init status --wait --long') + if result.ok: + return + raise Exception("Cloud-init didn't finish starting up") + + +@pytest.mark.sru_2020_11 +def test_upgrade(session_cloud: IntegrationCloud): + source = get_validated_source(session_cloud) + if not source.installs_new_version(): + pytest.skip("Install method '{}' not supported for this test".format( + source + )) + return # type checking doesn't understand that skip raises + + launch_kwargs = { + 'image_id': session_cloud._get_initial_image(), + } + + image = ImageSpecification.from_os_image() + + # Get the paths to write test logs + output_dir = Path(session_cloud.settings.LOCAL_LOG_PATH) + output_dir.mkdir(parents=True, exist_ok=True) + base_filename = 'test_upgrade_{platform}_{os}_{{stage}}_{time}.log'.format( + platform=session_cloud.settings.PLATFORM, + os=image.release, + time=session_start_time, + ) + before_path = output_dir / base_filename.format(stage='before') + after_path = output_dir / base_filename.format(stage='after') + + # Get the network cfg file + netcfg_path = '/dev/null' + if image.os == 'ubuntu': + netcfg_path = '/etc/netplan/50-cloud-init.yaml' + if image.release == 'xenial': + netcfg_path = '/etc/network/interfaces.d/50-cloud-init.cfg' + + with session_cloud.launch( + launch_kwargs=launch_kwargs, user_data=USER_DATA, wait=True, + ) as instance: + _output_to_compare(instance, before_path, netcfg_path) + instance.install_new_cloud_init(source, take_snapshot=False) + instance.execute('hostname something-else') + _restart(instance) + _output_to_compare(instance, after_path, netcfg_path) + + log.info('Wrote upgrade test logs to %s and %s', before_path, after_path) diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index e363c1f9..dc615309 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -159,6 +159,22 @@ SECONDARY_INTERFACE = { } } +SECONDARY_INTERFACE_NO_IP = { + "macAddress": "220D3A047598", + "ipv6": { + "ipAddress": [] + }, + "ipv4": { + "subnet": [ + { + "prefix": "24", + "address": "10.0.1.0" + } + ], + "ipAddress": [] + } +} + IMDS_NETWORK_METADATA = { "interface": [ { @@ -1139,6 +1155,30 @@ scbus-1 on xpt0 bus 0 dsrc.get_data() self.assertEqual(expected_network_config, dsrc.network_config) + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value=None) + def test_network_config_set_from_imds_for_secondary_nic_no_ip( + self, m_driver): + """If an IP address is empty then there should no config for it.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + expected_network_config = { + 'ethernets': { + 'eth0': {'set-name': 'eth0', + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}}}, + 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data['network']['interface'].append(SECONDARY_INTERFACE_NO_IP) + self.m_get_metadata_from_imds.return_value = imds_data + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual(expected_network_config, dsrc.network_config) + def test_availability_zone_set_from_imds(self): """Datasource.availability returns IMDS platformFaultDomain.""" sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} @@ -1757,7 +1797,9 @@ scbus-1 on xpt0 bus 0 dsrc.get_data() dsrc.setup(True) ssh_keys = dsrc.get_public_ssh_keys() - self.assertEqual(ssh_keys, ['key1']) + # Temporarily alter this test so that SSH public keys + # from IMDS are *not* going to be in use to fix a regression. + self.assertEqual(ssh_keys, []) self.assertEqual(m_parse_certificates.call_count, 0) @mock.patch(MOCKPATH + 'get_metadata_from_imds') diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index 16773de5..dce01f5d 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -17,6 +17,7 @@ from cloudinit.helpers import Paths from cloudinit.sources import DataSourceOVF as dsovf from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( CustomScriptNotFound) +from cloudinit.safeyaml import YAMLError MPATH = 'cloudinit.sources.DataSourceOVF.' @@ -138,16 +139,29 @@ class TestDatasourceOVF(CiTestCase): 'DEBUG: No system-product-name found', self.logs.getvalue()) def test_get_data_no_vmware_customization_disabled(self): - """When vmware customization is disabled via sys_cfg log a message.""" + """When cloud-init workflow for vmware is disabled via sys_cfg and + no meta data provided, log a message. + """ paths = Paths({'cloud_dir': self.tdir}) ds = self.datasource( sys_cfg={'disable_vmware_customization': True}, distro={}, paths=paths) + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CUSTOM-SCRIPT] + SCRIPT-NAME = test-script + [MISC] + MARKER-ID = 12345345 + """) + util.write_file(conf_file, conf_content) retcode = wrap_and_call( 'cloudinit.sources.DataSourceOVF', {'dmi.read_dmi_data': 'vmware', 'transport_iso9660': NOT_FOUND, - 'transport_vmware_guestinfo': NOT_FOUND}, + 'transport_vmware_guestinfo': NOT_FOUND, + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file}, ds.get_data) self.assertFalse(retcode, 'Expected False return from ds.get_data') self.assertIn( @@ -344,6 +358,279 @@ class TestDatasourceOVF(CiTestCase): 'vmware (%s/seed/ovf-env.xml)' % self.tdir, ds.subplatform) + def test_get_data_cloudinit_metadata_json(self): + """Test metadata can be loaded to cloud-init metadata and network. + The metadata format is json. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + """) + util.write_file(conf_file, conf_content) + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + metadata_content = dedent("""\ + { + "instance-id": "cloud-vm", + "local-hostname": "my-host.domain.com", + "network": { + "version": 2, + "ethernets": { + "eths": { + "match": { + "name": "ens*" + }, + "dhcp4": true + } + } + } + } + """) + util.write_file(metadata_file, metadata_content) + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + result = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'collect_imc_file_paths': [self.tdir + '/test-meta', '', ''], + 'get_nics_to_enable': ''}, + ds._get_data) + + self.assertTrue(result) + self.assertEqual("cloud-vm", ds.metadata['instance-id']) + self.assertEqual("my-host.domain.com", ds.metadata['local-hostname']) + self.assertEqual(2, ds.network_config['version']) + self.assertTrue(ds.network_config['ethernets']['eths']['dhcp4']) + + def test_get_data_cloudinit_metadata_yaml(self): + """Test metadata can be loaded to cloud-init metadata and network. + The metadata format is yaml. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + """) + util.write_file(conf_file, conf_content) + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + metadata_content = dedent("""\ + instance-id: cloud-vm + local-hostname: my-host.domain.com + network: + version: 2 + ethernets: + nics: + match: + name: ens* + dhcp4: yes + """) + util.write_file(metadata_file, metadata_content) + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + result = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'collect_imc_file_paths': [self.tdir + '/test-meta', '', ''], + 'get_nics_to_enable': ''}, + ds._get_data) + + self.assertTrue(result) + self.assertEqual("cloud-vm", ds.metadata['instance-id']) + self.assertEqual("my-host.domain.com", ds.metadata['local-hostname']) + self.assertEqual(2, ds.network_config['version']) + self.assertTrue(ds.network_config['ethernets']['nics']['dhcp4']) + + def test_get_data_cloudinit_metadata_not_valid(self): + """Test metadata is not JSON or YAML format. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True}, distro={}, + paths=paths) + + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + """) + util.write_file(conf_file, conf_content) + + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + metadata_content = "[This is not json or yaml format]a=b" + util.write_file(metadata_file, metadata_content) + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(YAMLError) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'collect_imc_file_paths': [ + self.tdir + '/test-meta', '', '' + ], + 'get_nics_to_enable': ''}, + ds.get_data) + + self.assertIn("expected '<document start>', but found '<scalar>'", + str(context.exception)) + + def test_get_data_cloudinit_metadata_not_found(self): + """Test metadata file can't be found. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + """) + util.write_file(conf_file, conf_content) + # Don't prepare the meta data file + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(FileNotFoundError) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + + self.assertIn('is not found', str(context.exception)) + + def test_get_data_cloudinit_userdata(self): + """Test user data can be loaded to cloud-init user data. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': False}, distro={}, + paths=paths) + + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + USERDATA = test-user + """) + util.write_file(conf_file, conf_content) + + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + metadata_content = dedent("""\ + instance-id: cloud-vm + local-hostname: my-host.domain.com + network: + version: 2 + ethernets: + nics: + match: + name: ens* + dhcp4: yes + """) + util.write_file(metadata_file, metadata_content) + + # Prepare the user data file + userdata_file = self.tmp_path('test-user', self.tdir) + userdata_content = "This is the user data" + util.write_file(userdata_file, userdata_content) + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + result = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'collect_imc_file_paths': [self.tdir + '/test-meta', + self.tdir + '/test-user', ''], + 'get_nics_to_enable': ''}, + ds._get_data) + + self.assertTrue(result) + self.assertEqual("cloud-vm", ds.metadata['instance-id']) + self.assertEqual(userdata_content, ds.userdata_raw) + + def test_get_data_cloudinit_userdata_not_found(self): + """Test userdata file can't be found. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True}, distro={}, + paths=paths) + + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + USERDATA = test-user + """) + util.write_file(conf_file, conf_content) + + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + metadata_content = dedent("""\ + instance-id: cloud-vm + local-hostname: my-host.domain.com + network: + version: 2 + ethernets: + nics: + match: + name: ens* + dhcp4: yes + """) + util.write_file(metadata_file, metadata_content) + + # Don't prepare the user data file + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(FileNotFoundError) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + + self.assertIn('is not found', str(context.exception)) + class TestTransportIso9660(CiTestCase): diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py index e74a0a08..6e3831ed 100644 --- a/tests/unittests/test_handler/test_handler_ca_certs.py +++ b/tests/unittests/test_handler/test_handler_ca_certs.py @@ -47,12 +47,20 @@ class TestConfig(TestCase): def setUp(self): super(TestConfig, self).setUp() self.name = "ca-certs" - distro = self._fetch_distro('ubuntu') self.paths = None - self.cloud = cloud.Cloud(None, self.paths, None, distro, None) self.log = logging.getLogger("TestNoConfig") self.args = [] + def _fetch_distro(self, kind): + cls = distros.fetch(kind) + paths = helpers.Paths({}) + return cls(kind, {}, paths) + + def _get_cloud(self, kind): + distro = self._fetch_distro(kind) + return cloud.Cloud(None, self.paths, None, distro, None) + + def _mock_init(self): self.mocks = ExitStack() self.addCleanup(self.mocks.close) @@ -64,11 +72,6 @@ class TestConfig(TestCase): self.mock_remove = self.mocks.enter_context( mock.patch.object(cc_ca_certs, 'remove_default_ca_certs')) - def _fetch_distro(self, kind): - cls = distros.fetch(kind) - paths = helpers.Paths({}) - return cls(kind, {}, paths) - def test_no_trusted_list(self): """ Test that no certificates are written if the 'trusted' key is not @@ -76,71 +79,95 @@ class TestConfig(TestCase): """ config = {"ca-certs": {}} - cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = self._get_cloud(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - self.assertEqual(self.mock_add.call_count, 0) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 0) + self.assertEqual(self.mock_add.call_count, 0) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 0) def test_empty_trusted_list(self): """Test that no certificate are written if 'trusted' list is empty.""" config = {"ca-certs": {"trusted": []}} - cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = self._get_cloud(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - self.assertEqual(self.mock_add.call_count, 0) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 0) + self.assertEqual(self.mock_add.call_count, 0) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 0) def test_single_trusted(self): """Test that a single cert gets passed to add_ca_certs.""" config = {"ca-certs": {"trusted": ["CERT1"]}} - cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = self._get_cloud(distro_name) + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - self.mock_add.assert_called_once_with(['CERT1']) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 0) + self.mock_add.assert_called_once_with(conf, ['CERT1']) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 0) def test_multiple_trusted(self): """Test that multiple certs get passed to add_ca_certs.""" config = {"ca-certs": {"trusted": ["CERT1", "CERT2"]}} - cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = self._get_cloud(distro_name) + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - self.mock_add.assert_called_once_with(['CERT1', 'CERT2']) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 0) + self.mock_add.assert_called_once_with(conf, ['CERT1', 'CERT2']) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 0) def test_remove_default_ca_certs(self): """Test remove_defaults works as expected.""" config = {"ca-certs": {"remove-defaults": True}} - cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = self._get_cloud(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - self.assertEqual(self.mock_add.call_count, 0) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 1) + self.assertEqual(self.mock_add.call_count, 0) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 1) def test_no_remove_defaults_if_false(self): """Test remove_defaults is not called when config value is False.""" config = {"ca-certs": {"remove-defaults": False}} - cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = self._get_cloud(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - self.assertEqual(self.mock_add.call_count, 0) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 0) + self.assertEqual(self.mock_add.call_count, 0) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 0) def test_correct_order_for_remove_then_add(self): """Test remove_defaults is not called when config value is False.""" config = {"ca-certs": {"remove-defaults": True, "trusted": ["CERT1"]}} - cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = self._get_cloud(distro_name) + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - self.mock_add.assert_called_once_with(['CERT1']) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 1) + self.mock_add.assert_called_once_with(conf, ['CERT1']) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 1) class TestAddCaCerts(TestCase): @@ -152,12 +179,20 @@ class TestAddCaCerts(TestCase): self.paths = helpers.Paths({ 'cloud_dir': tmpdir, }) + self.add_patch("cloudinit.config.cc_ca_certs.os.stat", "m_stat") + + def _fetch_distro(self, kind): + cls = distros.fetch(kind) + paths = helpers.Paths({}) + return cls(kind, {}, paths) def test_no_certs_in_list(self): """Test that no certificate are written if not provided.""" - with mock.patch.object(util, 'write_file') as mockobj: - cc_ca_certs.add_ca_certs([]) - self.assertEqual(mockobj.call_count, 0) + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + with mock.patch.object(util, 'write_file') as mockobj: + cc_ca_certs.add_ca_certs(conf, []) + self.assertEqual(mockobj.call_count, 0) def test_single_cert_trailing_cr(self): """Test adding a single certificate to the trusted CAs @@ -167,20 +202,28 @@ class TestAddCaCerts(TestCase): ca_certs_content = "line1\nline2\ncloud-init-ca-certs.crt\nline3\n" expected = "line1\nline2\nline3\ncloud-init-ca-certs.crt\n" - with ExitStack() as mocks: - mock_write = mocks.enter_context( - mock.patch.object(util, 'write_file')) - mock_load = mocks.enter_context( - mock.patch.object(util, 'load_file', - return_value=ca_certs_content)) + self.m_stat.return_value.st_size = 1 - cc_ca_certs.add_ca_certs([cert]) + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - mock_write.assert_has_calls([ - mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt", - cert, mode=0o644), - mock.call("/etc/ca-certificates.conf", expected, omode="wb")]) - mock_load.assert_called_once_with("/etc/ca-certificates.conf") + with ExitStack() as mocks: + mock_write = mocks.enter_context( + mock.patch.object(util, 'write_file')) + mock_load = mocks.enter_context( + mock.patch.object(util, 'load_file', + return_value=ca_certs_content)) + + cc_ca_certs.add_ca_certs(conf, [cert]) + + mock_write.assert_has_calls([ + mock.call(conf['ca_cert_full_path'], + cert, mode=0o644)]) + if conf['ca_cert_config'] is not None: + mock_write.assert_has_calls([ + mock.call(conf['ca_cert_config'], + expected, omode="wb")]) + mock_load.assert_called_once_with(conf['ca_cert_config']) def test_single_cert_no_trailing_cr(self): """Test adding a single certificate to the trusted CAs @@ -189,24 +232,31 @@ class TestAddCaCerts(TestCase): ca_certs_content = "line1\nline2\nline3" - with ExitStack() as mocks: - mock_write = mocks.enter_context( - mock.patch.object(util, 'write_file')) - mock_load = mocks.enter_context( - mock.patch.object(util, 'load_file', - return_value=ca_certs_content)) + self.m_stat.return_value.st_size = 1 + + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - cc_ca_certs.add_ca_certs([cert]) + with ExitStack() as mocks: + mock_write = mocks.enter_context( + mock.patch.object(util, 'write_file')) + mock_load = mocks.enter_context( + mock.patch.object(util, 'load_file', + return_value=ca_certs_content)) - mock_write.assert_has_calls([ - mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt", - cert, mode=0o644), - mock.call("/etc/ca-certificates.conf", - "%s\n%s\n" % (ca_certs_content, - "cloud-init-ca-certs.crt"), - omode="wb")]) + cc_ca_certs.add_ca_certs(conf, [cert]) - mock_load.assert_called_once_with("/etc/ca-certificates.conf") + mock_write.assert_has_calls([ + mock.call(conf['ca_cert_full_path'], + cert, mode=0o644)]) + if conf['ca_cert_config'] is not None: + mock_write.assert_has_calls([ + mock.call(conf['ca_cert_config'], + "%s\n%s\n" % (ca_certs_content, + conf['ca_cert_filename']), + omode="wb")]) + + mock_load.assert_called_once_with(conf['ca_cert_config']) def test_single_cert_to_empty_existing_ca_file(self): """Test adding a single certificate to the trusted CAs @@ -215,20 +265,22 @@ class TestAddCaCerts(TestCase): expected = "cloud-init-ca-certs.crt\n" - with ExitStack() as mocks: - mock_write = mocks.enter_context( - mock.patch.object(util, 'write_file', autospec=True)) - mock_stat = mocks.enter_context( - mock.patch("cloudinit.config.cc_ca_certs.os.stat") - ) - mock_stat.return_value.st_size = 0 + self.m_stat.return_value.st_size = 0 - cc_ca_certs.add_ca_certs([cert]) + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + with mock.patch.object(util, 'write_file', + autospec=True) as m_write: - mock_write.assert_has_calls([ - mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt", - cert, mode=0o644), - mock.call("/etc/ca-certificates.conf", expected, omode="wb")]) + cc_ca_certs.add_ca_certs(conf, [cert]) + + m_write.assert_has_calls([ + mock.call(conf['ca_cert_full_path'], + cert, mode=0o644)]) + if conf['ca_cert_config'] is not None: + m_write.assert_has_calls([ + mock.call(conf['ca_cert_config'], + expected, omode="wb")]) def test_multiple_certs(self): """Test adding multiple certificates to the trusted CAs.""" @@ -236,32 +288,41 @@ class TestAddCaCerts(TestCase): expected_cert_file = "\n".join(certs) ca_certs_content = "line1\nline2\nline3" - with ExitStack() as mocks: - mock_write = mocks.enter_context( - mock.patch.object(util, 'write_file')) - mock_load = mocks.enter_context( - mock.patch.object(util, 'load_file', - return_value=ca_certs_content)) + self.m_stat.return_value.st_size = 1 + + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + + with ExitStack() as mocks: + mock_write = mocks.enter_context( + mock.patch.object(util, 'write_file')) + mock_load = mocks.enter_context( + mock.patch.object(util, 'load_file', + return_value=ca_certs_content)) - cc_ca_certs.add_ca_certs(certs) + cc_ca_certs.add_ca_certs(conf, certs) - mock_write.assert_has_calls([ - mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt", - expected_cert_file, mode=0o644), - mock.call("/etc/ca-certificates.conf", - "%s\n%s\n" % (ca_certs_content, - "cloud-init-ca-certs.crt"), - omode='wb')]) + mock_write.assert_has_calls([ + mock.call(conf['ca_cert_full_path'], + expected_cert_file, mode=0o644)]) + if conf['ca_cert_config'] is not None: + mock_write.assert_has_calls([ + mock.call(conf['ca_cert_config'], + "%s\n%s\n" % (ca_certs_content, + conf['ca_cert_filename']), + omode='wb')]) - mock_load.assert_called_once_with("/etc/ca-certificates.conf") + mock_load.assert_called_once_with(conf['ca_cert_config']) class TestUpdateCaCerts(unittest.TestCase): def test_commands(self): - with mock.patch.object(subp, 'subp') as mockobj: - cc_ca_certs.update_ca_certs() - mockobj.assert_called_once_with( - ["update-ca-certificates"], capture=False) + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + with mock.patch.object(subp, 'subp') as mockobj: + cc_ca_certs.update_ca_certs(conf) + mockobj.assert_called_once_with( + conf['ca_cert_update_cmd'], capture=False) class TestRemoveDefaultCaCerts(TestCase): @@ -275,24 +336,31 @@ class TestRemoveDefaultCaCerts(TestCase): }) def test_commands(self): - with ExitStack() as mocks: - mock_delete = mocks.enter_context( - mock.patch.object(util, 'delete_dir_contents')) - mock_write = mocks.enter_context( - mock.patch.object(util, 'write_file')) - mock_subp = mocks.enter_context(mock.patch.object(subp, 'subp')) - - cc_ca_certs.remove_default_ca_certs('ubuntu') - - mock_delete.assert_has_calls([ - mock.call("/usr/share/ca-certificates/"), - mock.call("/etc/ssl/certs/")]) - - mock_write.assert_called_once_with( - "/etc/ca-certificates.conf", "", mode=0o644) - - mock_subp.assert_called_once_with( - ('debconf-set-selections', '-'), - "ca-certificates ca-certificates/trust_new_crts select no") + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + + with ExitStack() as mocks: + mock_delete = mocks.enter_context( + mock.patch.object(util, 'delete_dir_contents')) + mock_write = mocks.enter_context( + mock.patch.object(util, 'write_file')) + mock_subp = mocks.enter_context( + mock.patch.object(subp, 'subp')) + + cc_ca_certs.remove_default_ca_certs(distro_name, conf) + + mock_delete.assert_has_calls([ + mock.call(conf['ca_cert_path']), + mock.call(conf['ca_cert_system_path'])]) + + if conf['ca_cert_config'] is not None: + mock_write.assert_called_once_with( + conf['ca_cert_config'], "", mode=0o644) + + if distro_name in ['debian', 'ubuntu']: + mock_subp.assert_called_once_with( + ('debconf-set-selections', '-'), + "ca-certificates \ +ca-certificates/trust_new_crts select no") # vi: ts=4 expandtab diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 70453683..38d934d4 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1365,10 +1365,11 @@ NETWORK_CONFIGS = { }, 'expected_sysconfig_rhel': { 'ifcfg-iface0': textwrap.dedent("""\ - BOOTPROTO=none + BOOTPROTO=dhcp DEVICE=iface0 DHCPV6C=yes IPV6INIT=yes + IPV6_AUTOCONF=no IPV6_FORCE_ACCEPT_RA=yes DEVICE=iface0 NM_CONTROLLED=no @@ -4819,6 +4820,9 @@ class TestEniRoundTrip(CiTestCase): {'type': 'route', 'id': 6, 'metric': 1, 'destination': '10.0.200.0/16', 'gateway': '172.23.31.1'}, + {'type': 'route', 'id': 7, + 'metric': 1, 'destination': '10.0.0.100/32', + 'gateway': '172.23.31.1'}, ] files = self._render_and_read( @@ -4842,6 +4846,10 @@ class TestEniRoundTrip(CiTestCase): '172.23.31.1 metric 1 || true'), ('pre-down route del -net 10.0.200.0/16 gw ' '172.23.31.1 metric 1 || true'), + ('post-up route add -host 10.0.0.100/32 gw ' + '172.23.31.1 metric 1 || true'), + ('pre-down route del -host 10.0.0.100/32 gw ' + '172.23.31.1 metric 1 || true'), ] found = files['/etc/network/interfaces'].splitlines() diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/test_vmware_config_file.py index 9c7d25fa..430cc69f 100644 --- a/tests/unittests/test_vmware_config_file.py +++ b/tests/unittests/test_vmware_config_file.py @@ -525,5 +525,21 @@ class TestVmwareNetConfig(CiTestCase): 'gateway': '10.20.87.253'}]}], nc.generate()) + def test_meta_data(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + conf = Config(cf) + self.assertIsNone(conf.meta_data_name) + cf._insertKey("CLOUDINIT|METADATA", "test-metadata") + conf = Config(cf) + self.assertEqual("test-metadata", conf.meta_data_name) + + def test_user_data(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + conf = Config(cf) + self.assertIsNone(conf.user_data_name) + cf._insertKey("CLOUDINIT|USERDATA", "test-userdata") + conf = Config(cf) + self.assertEqual("test-userdata", conf.user_data_name) + # vi: ts=4 expandtab diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index 1e0c3ea4..689d7902 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -1,17 +1,23 @@ ader1990 +ajmyyra AlexBaranowski Aman306 +andrewbogott +antonyc aswinrajamannar beezly bipinbachhao BirknerAlex candlerb +cawamata +dankenigsberg dermotbradley dhensby eandersson emmanuelthome izzyleung johnsonshi +jordimassaguerpla jqueuniet jsf9k landon912 @@ -20,14 +26,20 @@ lungj manuelisimo marlluslustosa matthewruffell +mitechie nishigori +olivierlemasle omBratteng onitake +qubidt riedel slyon smoser sshedi TheRealFalcon +tnt-dev tomponline tsanghan WebSpider +xiachen-rh +xnox @@ -26,9 +26,9 @@ deps = pylint==2.6.0 # test-requirements because unit tests are now present in cloudinit tree -r{toxinidir}/test-requirements.txt - -r{toxinidir}/cloud-tests-requirements.txt -r{toxinidir}/integration-requirements.txt -commands = {envpython} -m pylint {posargs:cloudinit tests tools} +commands = {envpython} -m pylint {posargs:cloudinit tests --ignore=cloud_tests tools} + [testenv:py3] basepython = python3 @@ -123,13 +123,12 @@ commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/} deps = flake8 [testenv:tip-pylint] -commands = {envpython} -m pylint {posargs:cloudinit tests tools} +commands = {envpython} -m pylint {posargs:cloudinit tests --ignore=cloud_tests tools} deps = # requirements pylint # test-requirements -r{toxinidir}/test-requirements.txt - -r{toxinidir}/cloud-tests-requirements.txt -r{toxinidir}/integration-requirements.txt [testenv:citest] @@ -164,6 +163,8 @@ setenv = # TODO: s/--strict/--strict-markers/ once xenial support is dropped testpaths = cloudinit tests/unittests addopts = --strict +log_format = %(asctime)s %(levelname)-9s %(name)s:%(filename)s:%(lineno)d %(message)s +log_date_format = %Y-%m-%d %H:%M:%S markers = allow_subp_for: allow subp usage for the given commands (disable_subp_usage) allow_all_subp: allow all subp usage (disable_subp_usage) @@ -173,9 +174,15 @@ markers = gce: test will only run on GCE platform azure: test will only run on Azure platform oci: test will only run on OCI platform + lxd_config_dict: set the config_dict passed on LXD instance creation lxd_container: test will only run in LXD container lxd_vm: test will only run in LXD VM + not_xenial: test cannot run on the xenial release + not_bionic: test cannot run on the bionic release no_container: test cannot run in a container user_data: the user data to be passed to the test instance instance_name: the name to be used for the test instance sru_2020_11: test is part of the 2020/11 SRU verification + sru_2021_01: test is part of the 2021/01 SRU verification + ubuntu: this test should run on Ubuntu + unstable: skip this test because it is flakey |