diff options
399 files changed, 16574 insertions, 13416 deletions
diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml new file mode 100644 index 00000000..34e11c2d --- /dev/null +++ b/.github/workflows/cla.yml @@ -0,0 +1,29 @@ +name: Verify Contributor License Agreement + +on: [pull_request] + +jobs: + cla-validate: + + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v1 + - run: | + echo "::set-env name=CLA_SIGNED::$(grep -q ': \"${{ github.actor }}\"' ./tools/.lp-to-git-user && echo CLA signed || echo CLA not signed)" + - name: Add CLA label + run: | + # POST a new label to this issue + curl --request POST \ + --url https://api.github.com/repos/${{ github.repository }}/issues/${{ github.event.number }}/labels \ + --header 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' \ + --header 'content-type: application/json' \ + --data '{"labels": ["${{env.CLA_SIGNED}}"]}' + - name: Comment about CLA signing + if: env.CLA_SIGNED == 'CLA not signed' + run: | + # POST a comment directing submitter to sign the CLA + curl --request POST \ + --url https://api.github.com/repos/${{ github.repository }}/issues/${{ github.event.number }}/comments \ + --header 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' \ + --header 'content-type: application/json' \ + --data '{"body": "Hello ${{ github.actor }},\n\nThank you for your contribution to cloud-init.\n\nIn order for us to merge this pull request, you need\nto have signed the Contributor License Agreement (CLA).\nPlease ensure that you have signed the CLA by following our\nhacking guide at:\n\nhttps://cloudinit.readthedocs.io/en/latest/topics/hacking.html\n\nThanks,\nYour friendly cloud-init upstream\n"}' @@ -11,3 +11,16 @@ prime stage *.snap *.cover +.idea/ +.venv/ + +# Ignore packaging artifacts +cloud-init.dsc +cloud-init_*.build +cloud-init_*.buildinfo +cloud-init_*.changes +cloud-init_*.deb +cloud-init_*.dsc +cloud-init_*.orig.tar.gz +cloud-init_*.tar.xz +cloud-init_*.upload @@ -62,10 +62,10 @@ ignored-modules= # for classes with dynamically set attributes). This supports the use of # qualified names. # argparse.Namespace from https://github.com/PyCQA/pylint/issues/2413 -ignored-classes=argparse.Namespace,optparse.Values,thread._local +ignored-classes=argparse.Namespace,optparse.Values,thread._local,ImageManager,ContainerManager # List of members which are set dynamically and missed by pylint inference # system, and so shouldn't trigger E1101 when accessed. Python regular # expressions are accepted. -generated-members=types,http.client,command_handlers,m_.* +generated-members=types,http.client,command_handlers,m_.*,enter_context diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 00000000..46af8ee7 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,8 @@ +version: 2 + +formats: all + +python: + install: + - requirements: doc-requirements.txt + - path: . diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 00000000..d2651c0b --- /dev/null +++ b/.travis.yml @@ -0,0 +1,57 @@ +language: python +dist: bionic + +install: + # Required so `git describe` will definitely find a tag; see + # https://github.com/travis-ci/travis-ci/issues/7422 + - git fetch --unshallow + - pip install tox + +script: + - tox + +matrix: + fast_finish: true + include: + - python: 3.6 + env: + TOXENV=py3 + NOSE_VERBOSE=2 # List all tests run by nose + - install: + - git fetch --unshallow + - sudo apt-get build-dep -y cloud-init + - sudo apt-get install -y --install-recommends sbuild ubuntu-dev-tools fakeroot tox + # These are build deps but not pulled in by the build-dep call above + - sudo apt-get install -y --install-recommends dh-systemd python3-coverage python3-contextlib2 + - pip install . + - pip install tox + # bionic has lxd from deb installed, remove it first to ensure + # pylxd talks only to the lxd from snap + - sudo apt remove --purge lxd lxd-client + - sudo rm -Rf /var/lib/lxd + - sudo snap install lxd + - sudo lxd init --auto + - sudo mkdir --mode=1777 -p /var/snap/lxd/common/consoles + - sudo usermod -a -G lxd $USER + - sudo sbuild-adduser $USER + - cp /usr/share/doc/sbuild/examples/example.sbuildrc /home/$USER/.sbuildrc + script: + # Ubuntu LTS: Build + - ./packages/bddeb -S + # Use this to get a new shell where we're in the sbuild group + - sudo -E su $USER -c 'mk-sbuild xenial' + - sudo -E su $USER -c 'sbuild --nolog --verbose --dist=xenial cloud-init_*.dsc' + # Ubuntu LTS: Integration + - sg lxd -c 'tox -e citest -- run --verbose --preserve-data --data-dir results --os-name xenial --test modules/apt_configure_sources_list.yaml --test modules/ntp_servers --test modules/set_password_list --test modules/user_groups --deb cloud-init_*_all.deb' + - python: 3.5 + env: + TOXENV=xenial + NOSE_VERBOSE=2 # List all tests run by nose + # Travis doesn't support Python 3.4 on bionic, so use xenial + dist: xenial + - python: 3.6 + env: TOXENV=pycodestyle + - python: 3.6 + env: TOXENV=pyflakes + - python: 3.6 + env: TOXENV=pylint @@ -1,3 +1,404 @@ +20.1 + - ec2: Do not log IMDSv2 token values, instead use REDACTED (#219) + (LP: #1863943) + - utils: use SystemRandom when generating random password. (#204) + [Dimitri John Ledkov] + - docs: mount_default_files is a list of 6 items, not 7 (#212) + - azurecloud: fix issues with instances not starting (#205) (LP: #1861921) + - unittest: fix stderr leak in cc_set_password random unittest + output. (#208) + - cc_disk_setup: add swap filesystem force flag (#207) + - import sysvinit patches from freebsd-ports tree (#161) [Igor Galić] + - docs: fix typo (#195) [Edwin Kofler] + - sysconfig: distro-specific config rendering for BOOTPROTO option (#162) + [Robert Schweikert] (LP: #1800854) + - cloudinit: replace "from six import X" imports (except in util.py) (#183) + - run-container: use 'test -n' instead of 'test ! -z' (#202) + [Paride Legovini] + - net/cmdline: correctly handle static ip= config (#201) + [Dimitri John Ledkov] (LP: #1861412) + - Replace mock library with unittest.mock (#186) + - HACKING.rst: update CLA link (#199) + - Scaleway: Fix DatasourceScaleway to avoid backtrace (#128) + [Louis Bouchard] + - cloudinit/cmd/devel/net_convert.py: add missing space (#191) + - tools/run-container: drop support for python2 (#192) [Paride Legovini] + - Print ssh key fingerprints using sha256 hash (#188) (LP: #1860789) + - Make the RPM build use Python 3 (#190) [Paride Legovini] + - cc_set_password: increase random pwlength from 9 to 20 (#189) + (LP: #1860795) + - .travis.yml: use correct Python version for xenial tests (#185) + - cloudinit: remove ImportError handling for mock imports (#182) + - Do not use fallocate in swap file creation on xfs. (#70) + [Eduardo Otubo] (LP: #1781781) + - .readthedocs.yaml: install cloud-init when building docs (#181) + (LP: #1860450) + - Introduce an RTD config file, and pin the Sphinx version to the RTD + default (#180) + - Drop most of the remaining use of six (#179) + - Start removing dependency on six (#178) + - Add Rootbox & HyperOne to list of cloud in README (#176) [Adam Dobrawy] + - docs: add proposed SRU testing procedure (#167) + - util: rename get_architecture to get_dpkg_architecture (#173) + - Ensure util.get_architecture() runs only once (#172) + - Only use gpart if it is the BSD gpart (#131) [Conrad Hoffmann] + - freebsd: remove superflu exception mapping (#166) [Gonéri Le Bouder] + - ssh_auth_key_fingerprints_disable test: fix capitalization (#165) + [Paride Legovini] + - util: move uptime's else branch into its own boottime function (#53) + [Igor Galić] (LP: #1853160) + - workflows: add contributor license agreement checker (#155) + - net: fix rendering of 'static6' in network config (#77) (LP: #1850988) + - Make tests work with Python 3.8 (#139) [Conrad Hoffmann] + - fixed minor bug with mkswap in cc_disk_setup.py (#143) [andreaf74] + - freebsd: fix create_group() cmd (#146) [Gonéri Le Bouder] + - doc: make apt_update example consistent (#154) + - doc: add modules page toc with links (#153) (LP: #1852456) + - Add support for the amazon variant in cloud.cfg.tmpl (#119) + [Frederick Lefebvre] + - ci: remove Python 2.7 from CI runs (#137) + - modules: drop cc_snap_config config module (#134) + - migrate-lp-user-to-github: ensure Launchpad repo exists (#136) + - docs: add initial troubleshooting to FAQ (#104) [Joshua Powers] + - doc: update cc_set_hostname frequency and descrip (#109) + [Joshua Powers] (LP: #1827021) + - freebsd: introduce the freebsd renderer (#61) [Gonéri Le Bouder] + - cc_snappy: remove deprecated module (#127) + - HACKING.rst: clarify that everyone needs to do the LP->GH dance (#130) + - freebsd: cloudinit service requires devd (#132) [Gonéri Le Bouder] + - cloud-init: fix capitalisation of SSH (#126) + - doc: update cc_ssh clarify host and auth keys + [Joshua Powers] (LP: #1827021) + - ci: emit names of tests run in Travis (#120) + +19.4 + - doc: specify _ over - in cloud config modules + [Joshua Powers] (LP: #1293254) + - tools: Detect python to use via env in migrate-lp-user-to-github + [Adam Dobrawy] + - Partially revert "fix unlocking method on FreeBSD" (#116) + - tests: mock uid when running as root (#113) + [Joshua Powers] (LP: #1856096) + - cloudinit/netinfo: remove unused getgateway (#111) + - docs: clear up apt config sections (#107) [Joshua Powers] (LP: #1832823) + - doc: add kernel command line option to user data (#105) + [Joshua Powers] (LP: #1846524) + - config/cloud.cfg.d: update README [Joshua Powers] (LP: #1855006) + - azure: avoid re-running cloud-init when instance-id is byte-swapped + (#84) [AOhassan] + - fix unlocking method on FreeBSD [Igor Galić] (LP: #1854594) + - debian: add reference to the manpages [Joshua Powers] + - ds_identify: if /sys is not available use dmidecode (#42) + [Igor Galić] (LP: #1852442) + - docs: add cloud-id manpage [Joshua Powers] + - docs: add cloud-init-per manpage [Joshua Powers] + - docs: add cloud-init manpage [Joshua Powers] + - docs: add additional details to per-instance/once [Joshua Powers] + - Update doc-requirements.txt [Joshua Powers] + - doc-requirements: add missing dep [Joshua Powers] + - dhcp: Support RedHat dhcp rfc3442 lease format for option 121 (#76) + [Eric Lafontaine] (LP: #1850642) + - network_state: handle empty v1 config (#45) (LP: #1852496) + - docs: Add document on how to report bugs [Joshua Powers] + - Add an Amazon distro in the redhat OS family [Frederick Lefebvre] + - removed a couple of "the"s [gaughen] + - docs: fix line length and remove highlighting [Joshua Powers] + - docs: Add security.md to readthedocs [Joshua Powers] + - Multiple file fix for AuthorizedKeysFile config (#60) [Eduardo Otubo] + - Revert "travis: only run CI on pull requests" + - doc: update links on README.md [Joshua Powers] + - doc: Updates to wording of README.md [Joshua Powers] + - Add security.md [Joshua Powers] + - setup.py: Amazon Linux sets libexec to /usr/libexec (#52) + [Frederick Lefebvre] + - Fix linting failure in test_url_helper (#83) [Eric Lafontaine] + - url_helper: read_file_or_url should pass headers param into readurl + (#66) (LP: #1854084) + - dmidecode: log result *after* stripping n [Igor Galić] + - cloud_tests: add azure platform support to integration tests + [ahosmanmsft] + - set_passwords: support for FreeBSD (#46) [Igor Galić] + - tools: migrate-lp-user-to-github removes repo_dir if created (#35) + - Correct jumbled documentation for cc_set_hostname module (#64) + [do3meli] (LP: #1853543) + - FreeBSD: fix for get_linux_distro() and lru_cache (#59) + [Igor Galić] (LP: #1815030) + - ec2: Add support for AWS IMDS v2 (session-oriented) (#55) + - tests: Fix cloudsigma tests when no dmidecode data is present. (#57) + [Scott Moser] + - net: IPv6, accept_ra, slaac, stateless (#51) + [Harald] (LP: #1806014, #1808647) + - docs: Update the configdrive datasource links (#44) + [Joshua Powers] (LP: #1852461) + - distro: correctly set usr_lib_exec path for FreeBSD distro (#40) + [Igor Galić] (LP: #1852491) + - azure: support secondary ipv6 addresses (#33) + - Fix metadata check when local-hostname is null (#32) + [Mark Goddard] (LP: #1852100) + - switch default FreeBSD salt minion pkg from py27 to py36 + [Dominic Schlegel] + - travis: only run CI on pull requests + - add data-server dns entry as new metadata server detection [Joshua Hügli] + - pycodestyle: remove unused local variable + - reporting: Using a uuid to enforce uniqueness on the KVP keys. [momousta] + - docs: touchups in rtd intro and README.md + - doc: update launchpad git refs to github + - github: drop pull-request template to prepare for migration + - tools: add migrate-lp-user-to-github script to link LP to github + - github: new basic project readme + +19.3 + - azure: support matching dhcp route-metrics for dual-stack ipv4 ipv6 + (LP: #1850308) + - configdrive: fix subplatform config-drive for /config-drive source + [David Kindred] (LP: #1849731) + - DataSourceSmartOS: reconfigure network on each boot + [Mike Gerdts] (LP: #1765801) + - Add config for ssh-key import and consuming user-data [Pavel Zakharov] + - net: fix subnet_is_ipv6() for stateless|stateful + [Harald JensÃ¥s] (LP: #1848690) + - OVF: disable custom script execution by default [Xiaofeng Wang] + - cc_puppet: Implement csr_attributes.yaml support [Matthias Baur] + - cloud-init.service: on centos/fedora/redhat wait on NetworkManager.service + (LP: #1843334) + - azure: Do not lock user on instance id change [Sam Eiderman] (LP: #1849677) + - net/netplan: use ipv6-mtu key for specifying ipv6 mtu values + - Fix usages of yaml, and move yaml_dump to safeyaml.dumps. (LP: #1849640) + - exoscale: Increase url_max_wait to 120s. [Chris Glass] + - net/sysconfig: fix available check on SUSE distros + [Robert Schweikert] (LP: #1849378) + - docs: Fix incorrect Azure IMDS IP address [Joshua Powers] (LP: #1849508) + - introduce .travis.yml + - net: enable infiniband support in eni and sysconfig renderers + [Darren Birkett] (LP: #1847114) + - guestcust_util: handle special characters in config file [Xiaofeng Wang] + - fix some more typos in comments [Dominic Schlegel] + - replace any deprecated log.warn with log.warning + [Dominic Schlegel] (LP: #1508442) + - net: handle openstack dhcpv6-stateless configuration + [Harald JensÃ¥s] (LP: #1847517) + - Add .venv/ to .gitignore [Dominic Schlegel] + - Small typo fixes in code comments. [Dominic Schlegel] + - cloud_test/lxd: Retry container delete a few times + - Add Support for e24cloud to Ec2 datasource. (LP: #1696476) + - Add RbxCloud datasource [Adam Dobrawy] + - get_interfaces: don't exclude bridge and bond members (LP: #1846535) + - Add support for Arch Linux in render-cloudcfg [Conrad Hoffmann] + - util: json.dumps on python 2.7 will handle UnicodeDecodeError on binary + (LP: #1801364) + - debian/ubuntu: add missing word to netplan/ENI header (LP: #1845669) + - ovf: do not generate random instance-id for IMC customization path + - sysconfig: only write resolv.conf if network_state has DNS values + (LP: #1843634) + - sysconfig: use distro variant to check if available (LP: #1843584) + - systemd/cloud-init.service.tmpl: start after wicked.service + [Robert Schweikert] + - docs: fix zstack documentation lints + - analyze/show: remove trailing space in output + - Add missing space in warning: "not avalid seed" [Brian Candler] + - pylintrc: add 'enter_context' to generated-members list + - Add datasource for ZStack platform. [Shixin Ruan] (LP: #1841181) + - docs: organize TOC and update summary of project [Joshua Powers] + - tools: make clean now cleans the dev directory, not the system + - docs: create cli specific page [Joshua Powers] + - docs: added output examples to analyze.rst [Joshua Powers] + - docs: doc8 fixes for instancedata page [Joshua Powers] + - docs: clean up formatting, organize boot page [Joshua Powers] + - net: add is_master check for filtering device list (LP: #1844191) + - docs: more complete list of availability [Joshua Powers] + - docs: start FAQ page [Joshua Powers] + - docs: cleanup output & order of datasource page [Joshua Powers] + - Brightbox: restrict detection to require full domain match .brightbox.com + - VMWware: add option into VMTools config to enable/disable custom script. + [Xiaofeng Wang] + - net,Oracle: Add support for netfailover detection + - atomic_helper: add DEBUG logging to write_file (LP: #1843276) + - doc: document doc, create makefile and tox target [Joshua Powers] + - .gitignore: ignore files produced by package builds + - docs: fix whitespace, spelling, and line length [Joshua Powers] + - docs: remove unnecessary file in doc directory [Joshua Powers] + - Oracle: Render secondary vnic IP and MTU values only + - exoscale: fix sysconfig cloud_config_modules overrides (LP: #1841454) + - net/cmdline: refactor to allow multiple initramfs network config sources + - ubuntu-drivers: call db_x_loadtemplatefile to accept NVIDIA EULA + (LP: #1840080) + - Add missing #cloud-config comment on first example in documentation. + [Florian Müller] + - ubuntu-drivers: emit latelink=true debconf to accept nvidia eula + (LP: #1840080) + - DataSourceOracle: prefer DS network config over initramfs + - format.rst: add text/jinja2 to list of content types (+ cleanups) + - Add GitHub pull request template to point people at hacking doc + - cloudinit/distros/parsers/sys_conf: add docstring to SysConf + - pyflakes: remove unused variable [Joshua Powers] + - Azure: Record boot timestamps, system information, and diagnostic events + [Anh Vo] + - DataSourceOracle: configure secondary NICs on Virtual Machines + - distros: fix confusing variable names + - azure/net: generate_fallback_nic emits network v2 config instead of v1 + - Add support for publishing host keys to GCE guest attributes [Rick Wright] + - New data source for the Exoscale.com cloud platform [Chris Glass] + - doc: remove intersphinx extension + - cc_set_passwords: rewrite documentation (LP: #1838794) + - net/cmdline: split interfaces_by_mac and init network config determination + - stages: allow data sources to override network config source order + - cloud_tests: updates and fixes + - Fix bug rendering MTU on bond or vlan when input was netplan. (LP: #1836949) + - net: update net sequence, include wait on netdevs, opensuse netrules path + (LP: #1817368) +19.2: + - net: add rfc3442 (classless static routes) to EphemeralDHCP + (LP: #1821102) + - templates/ntp.conf.debian.tmpl: fix missing newline for pools + (LP: #1836598) + - Support netplan renderer in Arch Linux [Conrad Hoffmann] + - Fix typo in publicly viewable documentation. [David Medberry] + - Add a cdrom size checker for OVF ds to ds-identify + [Pengpeng Sun] (LP: #1806701) + - VMWare: Trigger the post customization script via cc_scripts module. + [Xiaofeng Wang] (LP: #1833192) + - Cloud-init analyze module: Added ability to analyze boot events. + [Sam Gilson] + - Update debian eni network configuration location, retain Ubuntu setting + [Janos Lenart] + - net: skip bond interfaces in get_interfaces + [Stanislav Makar] (LP: #1812857) + - Fix a couple of issues raised by a coverity scan + - Add missing dsname for Hetzner Cloud datasource [Markus Schade] + - doc: indicate that netplan is default in Ubuntu now + - azure: add region and AZ properties from imds compute location metadata + - sysconfig: support more bonding options [Penghui Liao] + - cloud-init-generator: use libexec path to ds-identify on redhat systems + (LP: #1833264) + - tools/build-on-freebsd: update to python3 [Gonéri Le Bouder] + - Allow identification of OpenStack by Asset Tag + [Mark T. Voelker] (LP: #1669875) + - Fix spelling error making 'an Ubuntu' consistent. [Brian Murray] + - run-container: centos: comment out the repo mirrorlist [Paride Legovini] + - netplan: update netplan key mappings for gratuitous-arp (LP: #1827238) + - freebsd: fix the name of cloudcfg VARIANT [Gonéri Le Bouder] + - freebsd: ability to grow root file system [Gonéri Le Bouder] + - freebsd: NoCloud data source support [Gonéri Le Bouder] (LP: #1645824) + - Azure: Return static fallback address as if failed to find endpoint + [Jason Zions (MSFT)] + +19.1: + - freebsd: add chpasswd pkg in the image [Gonéri Le Bouder] + - tests: add Eoan release [Paride Legovini] + - cc_mounts: check if mount -a on no-change fstab path + [Jason Zions (MSFT)] (LP: #1825596) + - replace remaining occurrences of LOG.warn [Daniel Watkins] + - DataSourceAzure: Adjust timeout for polling IMDS [Anh Vo] + - Azure: Changes to the Hyper-V KVP Reporter [Anh Vo] + - git tests: no longer show warning about safe yaml. + - tools/read-version: handle errors [Chad Miller] + - net/sysconfig: only indicate available on known sysconfig distros + (LP: #1819994) + - packages: update rpm specs for new bash completion path + [Daniel Watkins] (LP: #1825444) + - test_azure: mock util.SeLinuxGuard where needed + [Jason Zions (MSFT)] (LP: #1825253) + - setup.py: install bash completion script in new location [Daniel Watkins] + - mount_cb: do not pass sync and rw options to mount + [Gonéri Le Bouder] (LP: #1645824) + - cc_apt_configure: fix typo in apt documentation [Dominic Schlegel] + - Revert "DataSource: move update_events from a class to an instance..." + [Daniel Watkins] + - Change DataSourceNoCloud to ignore file system label's case. + [Risto Oikarinen] + - cmd:main.py: Fix missing 'modules-init' key in modes dict + [Antonio Romito] (LP: #1815109) + - ubuntu_advantage: rewrite cloud-config module + - Azure: Treat _unset network configuration as if it were absent + [Jason Zions (MSFT)] (LP: #1823084) + - DatasourceAzure: add additional logging for azure datasource [Anh Vo] + - cloud_tests: fix apt_pipelining test-cases + - Azure: Ensure platform random_seed is always serializable as JSON. + [Jason Zions (MSFT)] + - net/sysconfig: write out SUSE-compatible IPv6 config [Robert Schweikert] + - tox: Update testenv for openSUSE Leap to 15.0 [Thomas Bechtold] + - net: Fix ipv6 static routes when using eni renderer + [Raphael Glon] (LP: #1818669) + - Add ubuntu_drivers config module [Daniel Watkins] + - doc: Refresh Azure walinuxagent docs [Daniel Watkins] + - tox: bump pylint version to latest (2.3.1) [Daniel Watkins] + - DataSource: move update_events from a class to an instance attribute + [Daniel Watkins] (LP: #1819913) + - net/sysconfig: Handle default route setup for dhcp configured NICs + [Robert Schweikert] (LP: #1812117) + - DataSourceEc2: update RELEASE_BLOCKER to be more accurate + [Daniel Watkins] + - cloud-init-per: POSIX sh does not support string subst, use sed + (LP: #1819222) + - Support locking user with usermod if passwd is not available. + - Example for Microsoft Azure data disk added. [Anton Olifir] + - clean: correctly determine the path for excluding seed directory + [Daniel Watkins] (LP: #1818571) + - helpers/openstack: Treat unknown link types as physical + [Daniel Watkins] (LP: #1639263) + - drop Python 2.6 support and our NIH version detection [Daniel Watkins] + - tip-pylint: Fix assignment-from-return-none errors + - net: append type:dhcp[46] only if dhcp[46] is True in v2 netconfig + [Kurt Stieger] (LP: #1818032) + - cc_apt_pipelining: stop disabling pipelining by default + [Daniel Watkins] (LP: #1794982) + - tests: fix some slow tests and some leaking state [Daniel Watkins] + - util: don't determine string_types ourselves [Daniel Watkins] + - cc_rsyslog: Escape possible nested set [Daniel Watkins] (LP: #1816967) + - Enable encrypted_data_bag_secret support for Chef + [Eric Williams] (LP: #1817082) + - azure: Filter list of ssh keys pulled from fabric [Jason Zions (MSFT)] + - doc: update merging doc with fixes and some additional details/examples + - tests: integration test failure summary to use traceback if empty error + - This is to fix https://bugs.launchpad.net/cloud-init/+bug/1812676 + [Vitaly Kuznetsov] + - EC2: Rewrite network config on AWS Classic instances every boot + [Guilherme G. Piccoli] (LP: #1802073) + - netinfo: Adjust ifconfig output parsing for FreeBSD ipv6 entries + (LP: #1779672) + - netplan: Don't render yaml aliases when dumping netplan (LP: #1815051) + - add PyCharm IDE .idea/ path to .gitignore [Dominic Schlegel] + - correct grammar issue in instance metadata documentation + [Dominic Schlegel] (LP: #1802188) + - clean: cloud-init clean should not trace when run from within cloud_dir + (LP: #1795508) + - Resolve flake8 comparison and pycodestyle over-ident issues + [Paride Legovini] + - opennebula: also exclude epochseconds from changed environment vars + (LP: #1813641) + - systemd: Render generator from template to account for system + differences. [Robert Schweikert] + - sysconfig: On SUSE, use STARTMODE instead of ONBOOT + [Robert Schweikert] (LP: #1799540) + - flake8: use ==/!= to compare str, bytes, and int literals + [Paride Legovini] + - opennebula: exclude EPOCHREALTIME as known bash env variable with a + delta (LP: #1813383) + - tox: fix disco httpretty dependencies for py37 (LP: #1813361) + - run-container: uncomment baseurl in yum.repos.d/*.repo when using a + proxy [Paride Legovini] + - lxd: install zfs-linux instead of zfs meta package + [Johnson Shi] (LP: #1799779) + - net/sysconfig: do not write a resolv.conf file with only the header. + [Robert Schweikert] + - net: Make sysconfig renderer compatible with Network Manager. + [Eduardo Otubo] + - cc_set_passwords: Fix regex when parsing hashed passwords + [Marlin Cremers] (LP: #1811446) + - net: Wait for dhclient to daemonize before reading lease file + [Jason Zions] (LP: #1794399) + - [Azure] Increase retries when talking to Wireserver during metadata walk + [Jason Zions] + - Add documentation on adding a datasource. + - doc: clean up some datasource documentation. + - ds-identify: fix wrong variable name in ovf_vmware_transport_guestinfo. + - Scaleway: Support ssh keys provided inside an instance tag. [PORTE Loïc] + - OVF: simplify expected return values of transport functions. + - Vmware: Add support for the com.vmware.guestInfo OVF transport. + (LP: #1807466) + - HACKING.rst: change contact info to Josh Powers + - Update to pylint 2.2.2. + 18.5: - tests: add Disco release [Joshua Powers] - net: render 'metric' values in per-subnet routes (LP: #1805871) diff --git a/HACKING.rst b/HACKING.rst index 3bb555c2..e050fa93 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -3,18 +3,41 @@ Hacking on cloud-init ********************* This document describes how to contribute changes to cloud-init. -It assumes you have a `Launchpad`_ account, and refers to your launchpad user -as ``LP_USER`` throughout. +It assumes you have a `GitHub`_ account, and refers to your GitHub user +as ``GH_USER`` throughout. Do these things once ==================== * To contribute, you must sign the Canonical `contributor license agreement`_ - If you have already signed it as an individual, your Launchpad user will be listed in the `contributor-agreement-canonical`_ group. Unfortunately there is no easy way to check if an organization or company you are doing work for has signed. If you are unsure or have questions, email `Scott Moser <mailto:scott.moser@canonical.com>`_ or ping smoser in ``#cloud-init`` channel via freenode. + If you have already signed it as an individual, your Launchpad user will be + listed in the `contributor-agreement-canonical`_ group. Unfortunately there + is no easy way to check if an organization or company you are doing work for + has signed. When signing the CLA and prompted for 'Project contact' or + 'Canonical Project Manager' enter 'Josh Powers'. - When prompted for 'Project contact' or 'Canonical Project Manager' enter - 'Scott Moser'. + For first-time signers, or for existing contributors who have already signed + the agreement in Launchpad, we need to verify the link between your + `Launchpad`_ account and your `GitHub`_ account. To enable us to do this, we + ask that you create a branch with both your Launchpad and GitHub usernames + against both the Launchpad and GitHub cloud-init repositories. We've added a + tool (``tools/migrate-lp-user-to-github``) to the cloud-init repository to + handle this migration as automatically as possible. + + The cloud-init team will review the two merge proposals and verify + that the CLA has been signed for the Launchpad user and record the + associated GitHub account. We will reply to the email address + associated with your Launchpad account that you've been clear to + contribute to cloud-init on GitHub. + + If your company has signed the CLA for you, please contact us to help + in verifying which launchad/GitHub accounts are associated with the + company. For any questions or help with the process, please email: + + `Josh Powers <mailto:josh.powers@canonical.com>`_ with the subject: Cloud-Init CLA + + You also may contanct user ``powersj`` in ``#cloud-init`` channel via IRC freenode. * Configure git with your email and name for commit messages. @@ -24,26 +47,24 @@ Do these things once git config user.name "Your Name" git config user.email "Your Email" -* Clone the upstream `repository`_ on Launchpad:: +* Sign into your `GitHub`_ account - git clone https://git.launchpad.net/cloud-init - cd cloud-init +* Fork the upstream `repository`_ on Github and clicking on the ``Fork`` button - There is more information on Launchpad as a git hosting site in - `Launchpad git documentation`_. - -* Create a new remote pointing to your personal Launchpad repository. - This is equivalent to 'fork' on github. +* Create a new remote pointing to your personal GitHub repository. .. code:: sh - git remote add LP_USER ssh://LP_USER@git.launchpad.net/~LP_USER/cloud-init - git push LP_USER master + git clone git://github.com/canonical/cloud-init + cd cloud-init + git remote add GH_USER git@github.com:GH_USER/cloud-init.git + git push GH_USER master -.. _repository: https://git.launchpad.net/cloud-init -.. _contributor license agreement: http://www.canonical.com/contributors +.. _GitHub: https://github.com +.. _Launchpad: https://launchpad.net +.. _repository: https://github.com/canonical/cloud-init +.. _contributor license agreement: https://ubuntu.com/legal/contributors .. _contributor-agreement-canonical: https://launchpad.net/%7Econtributor-agreement-canonical/+members -.. _Launchpad git documentation: https://help.launchpad.net/Code/Git Do these things for each feature or bug ======================================= @@ -61,29 +82,21 @@ Do these things for each feature or bug tox -* Push your changes to your personal Launchpad repository:: +* Push your changes to your personal GitHub repository:: - git push -u LP_USER my-topic-branch + git push -u GH_USER my-topic-branch * Use your browser to create a merge request: - - Open the branch on Launchpad. + - Open the branch on GitHub - You can see a web view of your repository and navigate to the branch at: - ``https://code.launchpad.net/~LP_USER/cloud-init/`` - - - It will typically be at: + ``https://github.com/GH_USER/cloud-init/tree/my-topic-branch`` - ``https://code.launchpad.net/~LP_USER/cloud-init/+git/cloud-init/+ref/BRANCHNAME`` - - for example, here is larsks move-to-git branch: https://code.launchpad.net/~larsks/cloud-init/+git/cloud-init/+ref/feature/move-to-git - - - Click 'Propose for merging' - - Select 'lp:cloud-init' as the target repository - - Type '``master``' as the Target reference path - - Click 'Propose Merge' - - On the next page, hit 'Set commit message' and type a git combined git style commit message like:: + - Click 'Pull Request` + - Fill out the pull request title, summarizing the change and a longer + message indicating important details about the changes included, like :: Activate the frobnicator. @@ -100,12 +113,28 @@ Do these things for each feature or bug LP: #1 -Then, someone in the `cloud-init-dev`_ group will review your changes and -follow up in the merge request. + Note that the project continues to use LP: #NNNNN format for closing + launchpad bugs rather than GitHub Issues. + + - Click 'Create Pull Request` + +Then, someone in the `Ubuntu Server`_ team will review your changes and +follow up in the pull request. Feel free to ping and/or join ``#cloud-init`` on freenode irc if you have any questions. .. _tox: https://tox.readthedocs.io/en/latest/ -.. _Launchpad: https://launchpad.net -.. _cloud-init-dev: https://launchpad.net/~cloud-init-dev/+members#active +.. _Ubuntu Server: https://github.com/orgs/canonical/teams/ubuntu-server + +Design +====== + +This section captures design decisions that are helpful to know when +hacking on cloud-init. + +Cloud Config Modules +-------------------- + +* Any new modules should use underscores in any new config options and not + hyphens (e.g. `new_option` and *not* `new-option`). diff --git a/Jenkinsfile b/Jenkinsfile index ed98477f..1b9d23e4 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -107,6 +107,16 @@ pipeline { } } } + stage('Install missed dependencies') { + steps { + script { + dir('build') { + sh 'sudo apt update' + sh 'sudo apt install -y python3-jsonschema python3-contextlib2 cloud-utils' + } + } + } + } stage('Build') { steps { script { @@ -114,7 +124,7 @@ pipeline { def commitId = sh(returnStdout: true, script: 'git rev-parse --short=11 HEAD').trim() currentBuild.description = sprintf('Git SHA1: %s', commitId[-11..-1]) - sh 'dpkg-buildpackage -b -us -uc -tc' + sh './packages/bddeb' } } } @@ -128,7 +138,7 @@ pipeline { script { // archive *.deb artifact on custom builds, deploy to repo otherwise if ( isCustomBuild()) { - archiveArtifacts artifacts: '*.deb', fingerprint: true + archiveArtifacts artifacts: 'cloud-init_*_all.deb', fingerprint: true } else { // publish build result, using SSH-dev.packages.vyos.net Jenkins Credentials sshagent(['SSH-dev.packages.vyos.net']) { @@ -148,7 +158,7 @@ pipeline { echo "Uploading package(s) and updating package(s) in the repository ..." - files = findFiles(glob: '*.deb') + files = findFiles(glob: 'cloud-init_*_all.deb') files.each { PACKAGE -> def ARCH = sh(returnStdout: true, script: "dpkg-deb -f ${PACKAGE} Architecture").trim() def SUBSTRING = sh(returnStdout: true, script: "dpkg-deb -f ${PACKAGE} Package").trim() @@ -47,6 +47,12 @@ pyflakes: pyflakes3: @$(CWD)/tools/run-pyflakes3 +unittest: clean_pyc + nosetests $(noseopts) tests/unittests cloudinit + +unittest3: clean_pyc + nosetests3 $(noseopts) tests/unittests cloudinit + ci-deps-ubuntu: @$(PYVER) $(CWD)/tools/read-dependencies --distro ubuntu --test-distro @@ -61,6 +67,8 @@ pip-test-requirements: @echo "Installing cloud-init test dependencies..." $(PIP_INSTALL) -r "$@.txt" -q +test: $(unittests) + check_version: @if [ "$(READ_VERSION)" != "$(CODE_VERSION)" ]; then \ echo "Error: read-version version '$(READ_VERSION)'" \ @@ -72,9 +80,10 @@ config/cloud.cfg: clean_pyc: @find . -type f -name "*.pyc" -delete + @find . -type d -name __pycache__ -delete clean: clean_pyc - rm -rf /var/log/cloud-init.log /var/lib/cloud/ + rm -rf doc/rtd_html .tox .coverage yaml: @$(PYVER) $(CWD)/tools/validate-yaml.py $(YAML_FILES) @@ -98,7 +107,9 @@ deb-src: echo sudo apt-get install devscripts; exit 1; } $(PYVER) ./packages/bddeb -S -d +doc: + tox -e doc .PHONY: test pyflakes pyflakes3 clean pep8 rpm srpm deb deb-src yaml .PHONY: check_version pip-test-requirements pip-requirements clean_pyc -.PHONY: unittest unittest3 style-check +.PHONY: unittest unittest3 style-check doc diff --git a/README.md b/README.md new file mode 100644 index 00000000..872ea44e --- /dev/null +++ b/README.md @@ -0,0 +1,56 @@ +# cloud-init + +[![Build Status](https://travis-ci.org/canonical/cloud-init.svg?branch=master)](https://travis-ci.org/canonical/cloud-init) [![Read the Docs](https://readthedocs.org/projects/cloudinit/badge/?version=latest&style=flat)](https://cloudinit.readthedocs.org) + +Cloud-init is the *industry standard* multi-distribution method for +cross-platform cloud instance initialization. It is supported across all +major public cloud providers, provisioning systems for private cloud +infrastructure, and bare-metal installations. + +Cloud instances are initialized from a disk image and instance data: + +- Cloud metadata +- User data (optional) +- Vendor data (optional) + +Cloud-init will identify the cloud it is running on during boot, read any +provided metadata from the cloud and initialize the system accordingly. This +may involve setting up network and storage devices to configuring SSH +access key and many other aspects of a system. Later on cloud-init will +also parse and process any optional user or vendor data that was passed to the +instance. + +## Getting help + +If you need support, start with the [user documentation](https://cloudinit.readthedocs.io/en/latest/). + +If you need additional help consider reaching out with one of the following options: + +- Ask a question in the [``#cloud-init`` IRC channel on Freenode](https://webchat.freenode.net/?channel=#cloud-init) +- Search the cloud-init [mailing list archive](https://lists.launchpad.net/cloud-init/) +- Better yet, join the [cloud-init mailing list](https://launchpad.net/~cloud-init) and participate +- Find a bug? [Report bugs on Launchpad](https://bugs.launchpad.net/cloud-init/+filebug) + +## Distribution and cloud support + +Below are a list of the many OSes and clouds that contain and ship with cloud-init. If your +distribution or cloud is not listed or does not have a recent version of cloud-init, please +get in contact with that distribution and send them our way! + +| Supported OSes | Supported Public Clouds | Supported Private Clouds | +| --- | --- | --- | +| Ubuntu<br />SLES/openSUSE<br />RHEL/CentOS<br />Fedora<br />Gentoo Linux<br />Debian<br />ArchLinux<br />FreeBSD<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />Digital Ocean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br />HyperOne<br />Rootbox<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />| + +## To start developing cloud-init + +Checkout the [hacking](https://cloudinit.readthedocs.io/en/latest/topics/hacking.html) +document that outlines the steps necessary to develop, test, and submit code. + +## Daily builds + +Daily builds are useful if you want to try the latest upstream code for the latest +features or to verify bug fixes. + +For Ubuntu, see the [Daily PPAs](https://code.launchpad.net/~cloud-init-dev/+archive/ubuntu/daily) + +For CentOS, see the [COPR build repos](https://copr.fedorainfracloud.org/coprs/g/cloud-init/cloud-init-dev/) diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 00000000..69360bb7 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,64 @@ +# Security Policy + +The following documents the upstream cloud-init security policy. + +## Reporting + +If a user finds a security issue, they are requested to file a [private +security bug on Launchpad](https://bugs.launchpad.net/cloud-init/+filebug). +To ensure the information stays private, change the "This bug contains +information that is:" from "Public" to "Private Security" when filing. + +After the bug is received, the issue is triaged within 2 working days of +being reported and a response is sent to the reporter. + +## cloud-init-security + +The cloud-init-security Launchpad team is a private, invite-only team used to +discuss and coordinate security issues with the project. + +Any issues disclosed to the cloud-init-security mailing list are considered +embargoed and should only be discussed with other members of the +cloud-init-security mailing list before the coordinated release date, unless +specific exception is granted by the administrators of the mailing list. This +includes disclosure of any details related to the vulnerability or the +presence of a vulnerability itself. Violation of this policy may result in +removal from the list for the company or individual involved. + +## Evaluation + +If the reported bug is deemed a real security issue a CVE is assigned by +the Canonical Security Team as CVE Numbering Authority (CNA). + +If it is deemed a regular, non-security, issue, the reporter will be asked to +follow typical bug reporting procedures. + +In addition to the disclosure timeline, the core Canonical cloud-init team +will enlist the expertise of the Ubuntu Security team for guidance on +industry-standard disclosure practices as necessary. + +If an issue specifically involves another distro or cloud vendor, additional +individuals will be informed of the issue to help in evaluation. + +## Disclosure + +Disclosure of security issues will be made with a public statement. Once the +determined time for disclosure has arrived the following will occur: + +* A public bug is filed/made public with vulnerability details, CVE, + mitigations and where to obtain the fix +* An email is sent to the [public cloud-init mailing list](https://lists.launchpad.net/cloud-init/) + +The disclosure timeframe is coordinated with the reporter and members of the +cloud-init-security list. This depends on a number of factors: + +* The reporter might have their own disclosure timeline (e.g. Google Project + Zero and many others use a 90-days after initial report OR when a fix + becomes public) +* It might take time to decide upon and develop an appropriate fix +* A distros might want extra time to backport any possible fixes before + the fix becomes public +* A cloud may need additional time to prepare to help customers or impliment + a fix +* The issue might be deemed low priority +* May wish to to align with an upcoming planned release diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py index f8613656..99e5c203 100644 --- a/cloudinit/analyze/__main__.py +++ b/cloudinit/analyze/__main__.py @@ -7,7 +7,7 @@ import re import sys from cloudinit.util import json_dumps - +from datetime import datetime from . import dump from . import show @@ -52,9 +52,93 @@ def get_parser(parser=None): dest='outfile', default='-', help='specify where to write output. ') parser_dump.set_defaults(action=('dump', analyze_dump)) + parser_boot = subparsers.add_parser( + 'boot', help='Print list of boot times for kernel and cloud-init') + parser_boot.add_argument('-i', '--infile', action='store', + dest='infile', default='/var/log/cloud-init.log', + help='specify where to read input. ') + parser_boot.add_argument('-o', '--outfile', action='store', + dest='outfile', default='-', + help='specify where to write output.') + parser_boot.set_defaults(action=('boot', analyze_boot)) return parser +def analyze_boot(name, args): + """Report a list of how long different boot operations took. + + For Example: + -- Most Recent Boot Record -- + Kernel Started at: <time> + Kernel ended boot at: <time> + Kernel time to boot (seconds): <time> + Cloud-init activated by systemd at: <time> + Time between Kernel end boot and Cloud-init activation (seconds):<time> + Cloud-init start: <time> + """ + infh, outfh = configure_io(args) + kernel_info = show.dist_check_timestamp() + status_code, kernel_start, kernel_end, ci_sysd_start = \ + kernel_info + kernel_start_timestamp = datetime.utcfromtimestamp(kernel_start) + kernel_end_timestamp = datetime.utcfromtimestamp(kernel_end) + ci_sysd_start_timestamp = datetime.utcfromtimestamp(ci_sysd_start) + try: + last_init_local = \ + [e for e in _get_events(infh) if e['name'] == 'init-local' and + 'starting search' in e['description']][-1] + ci_start = datetime.utcfromtimestamp(last_init_local['timestamp']) + except IndexError: + ci_start = 'Could not find init-local log-line in cloud-init.log' + status_code = show.FAIL_CODE + + FAILURE_MSG = 'Your Linux distro or container does not support this ' \ + 'functionality.\n' \ + 'You must be running a Kernel Telemetry supported ' \ + 'distro.\nPlease check ' \ + 'https://cloudinit.readthedocs.io/en/latest' \ + '/topics/analyze.html for more ' \ + 'information on supported distros.\n' + + SUCCESS_MSG = '-- Most Recent Boot Record --\n' \ + ' Kernel Started at: {k_s_t}\n' \ + ' Kernel ended boot at: {k_e_t}\n' \ + ' Kernel time to boot (seconds): {k_r}\n' \ + ' Cloud-init activated by systemd at: {ci_sysd_t}\n' \ + ' Time between Kernel end boot and Cloud-init ' \ + 'activation (seconds): {bt_r}\n' \ + ' Cloud-init start: {ci_start}\n' + + CONTAINER_MSG = '-- Most Recent Container Boot Record --\n' \ + ' Container started at: {k_s_t}\n' \ + ' Cloud-init activated by systemd at: {ci_sysd_t}\n' \ + ' Cloud-init start: {ci_start}\n' \ + + status_map = { + show.FAIL_CODE: FAILURE_MSG, + show.CONTAINER_CODE: CONTAINER_MSG, + show.SUCCESS_CODE: SUCCESS_MSG + } + + kernel_runtime = kernel_end - kernel_start + between_process_runtime = ci_sysd_start - kernel_end + + kwargs = { + 'k_s_t': kernel_start_timestamp, + 'k_e_t': kernel_end_timestamp, + 'k_r': kernel_runtime, + 'bt_r': between_process_runtime, + 'k_e': kernel_end, + 'k_s': kernel_start, + 'ci_sysd': ci_sysd_start, + 'ci_sysd_t': ci_sysd_start_timestamp, + 'ci_start': ci_start + } + + outfh.write(status_map[status_code].format(**kwargs)) + return status_code + + def analyze_blame(name, args): """Report a list of records sorted by largest time delta. @@ -119,7 +203,7 @@ def analyze_dump(name, args): def _get_events(infile): rawdata = None - events, rawdata = show.load_events(infile, None) + events, rawdata = show.load_events_infile(infile) if not events: events, _ = dump.dump_events(rawdata=rawdata) return events diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py index 3e778b8b..fb152b1d 100644 --- a/cloudinit/analyze/show.py +++ b/cloudinit/analyze/show.py @@ -8,8 +8,11 @@ import base64 import datetime import json import os +import time +import sys from cloudinit import util +from cloudinit.distros import uses_systemd # An event: ''' @@ -49,6 +52,10 @@ format_key = { formatting_help = " ".join(["{0}: {1}".format(k.replace('%', '%%'), v) for k, v in format_key.items()]) +SUCCESS_CODE = 'successful' +FAIL_CODE = 'failure' +CONTAINER_CODE = 'container' +TIMESTAMP_UNKNOWN = (FAIL_CODE, -1, -1, -1) def format_record(msg, event): @@ -125,9 +132,175 @@ def total_time_record(total_time): return 'Total Time: %3.5f seconds\n' % total_time +class SystemctlReader(object): + ''' + Class for dealing with all systemctl subp calls in a consistent manner. + ''' + def __init__(self, property, parameter=None): + self.epoch = None + self.args = ['/bin/systemctl', 'show'] + if parameter: + self.args.append(parameter) + self.args.extend(['-p', property]) + # Don't want the init of our object to break. Instead of throwing + # an exception, set an error code that gets checked when data is + # requested from the object + self.failure = self.subp() + + def subp(self): + ''' + Make a subp call based on set args and handle errors by setting + failure code + + :return: whether the subp call failed or not + ''' + try: + value, err = util.subp(self.args, capture=True) + if err: + return err + self.epoch = value + return None + except Exception as systemctl_fail: + return systemctl_fail + + def parse_epoch_as_float(self): + ''' + If subp call succeeded, return the timestamp from subp as a float. + + :return: timestamp as a float + ''' + # subp has 2 ways to fail: it either fails and throws an exception, + # or returns an error code. Raise an exception here in order to make + # sure both scenarios throw exceptions + if self.failure: + raise RuntimeError('Subprocess call to systemctl has failed, ' + 'returning error code ({})' + .format(self.failure)) + # Output from systemctl show has the format Property=Value. + # For example, UserspaceMonotonic=1929304 + timestamp = self.epoch.split('=')[1] + # Timestamps reported by systemctl are in microseconds, converting + return float(timestamp) / 1000000 + + +def dist_check_timestamp(): + ''' + Determine which init system a particular linux distro is using. + Each init system (systemd, upstart, etc) has a different way of + providing timestamps. + + :return: timestamps of kernelboot, kernelendboot, and cloud-initstart + or TIMESTAMP_UNKNOWN if the timestamps cannot be retrieved. + ''' + + if uses_systemd(): + return gather_timestamps_using_systemd() + + # Use dmesg to get timestamps if the distro does not have systemd + if util.is_FreeBSD() or 'gentoo' in \ + util.system_info()['system'].lower(): + return gather_timestamps_using_dmesg() + + # this distro doesn't fit anything that is supported by cloud-init. just + # return error codes + return TIMESTAMP_UNKNOWN + + +def gather_timestamps_using_dmesg(): + ''' + Gather timestamps that corresponds to kernel begin initialization, + kernel finish initialization using dmesg as opposed to systemctl + + :return: the two timestamps plus a dummy timestamp to keep consistency + with gather_timestamps_using_systemd + ''' + try: + data, _ = util.subp(['dmesg'], capture=True) + split_entries = data[0].splitlines() + for i in split_entries: + if i.decode('UTF-8').find('user') != -1: + splitup = i.decode('UTF-8').split() + stripped = splitup[1].strip(']') + + # kernel timestamp from dmesg is equal to 0, + # with the userspace timestamp relative to it. + user_space_timestamp = float(stripped) + kernel_start = float(time.time()) - float(util.uptime()) + kernel_end = kernel_start + user_space_timestamp + + # systemd wont start cloud-init in this case, + # so we cannot get that timestamp + return SUCCESS_CODE, kernel_start, kernel_end, \ + kernel_end + + except Exception: + pass + return TIMESTAMP_UNKNOWN + + +def gather_timestamps_using_systemd(): + ''' + Gather timestamps that corresponds to kernel begin initialization, + kernel finish initialization. and cloud-init systemd unit activation + + :return: the three timestamps + ''' + kernel_start = float(time.time()) - float(util.uptime()) + try: + delta_k_end = SystemctlReader('UserspaceTimestampMonotonic')\ + .parse_epoch_as_float() + delta_ci_s = SystemctlReader('InactiveExitTimestampMonotonic', + 'cloud-init-local').parse_epoch_as_float() + base_time = kernel_start + status = SUCCESS_CODE + # lxc based containers do not set their monotonic zero point to be when + # the container starts, instead keep using host boot as zero point + # time.CLOCK_MONOTONIC_RAW is only available in python 3.3 + if util.is_container(): + # clock.monotonic also uses host boot as zero point + if sys.version_info >= (3, 3): + base_time = float(time.time()) - float(time.monotonic()) + # TODO: lxcfs automatically truncates /proc/uptime to seconds + # in containers when https://github.com/lxc/lxcfs/issues/292 + # is fixed, util.uptime() should be used instead of stat on + try: + file_stat = os.stat('/proc/1/cmdline') + kernel_start = file_stat.st_atime + except OSError as err: + raise RuntimeError('Could not determine container boot ' + 'time from /proc/1/cmdline. ({})' + .format(err)) + status = CONTAINER_CODE + else: + status = FAIL_CODE + kernel_end = base_time + delta_k_end + cloudinit_sysd = base_time + delta_ci_s + + except Exception as e: + # Except ALL exceptions as Systemctl reader can throw many different + # errors, but any failure in systemctl means that timestamps cannot be + # obtained + print(e) + return TIMESTAMP_UNKNOWN + return status, kernel_start, kernel_end, cloudinit_sysd + + def generate_records(events, blame_sort=False, print_format="(%n) %d seconds in %I%D", dump_files=False, log_datafiles=False): + ''' + Take in raw events and create parent-child dependencies between events + in order to order events in chronological order. + + :param events: JSONs from dump that represents events taken from logs + :param blame_sort: whether to sort by timestamp or by time taken. + :param print_format: formatting to represent event, time stamp, + and time taken by the event in one line + :param dump_files: whether to dump files into JSONs + :param log_datafiles: whether or not to log events generated + + :return: boot records ordered chronologically + ''' sorted_events = sorted(events, key=lambda x: x['timestamp']) records = [] @@ -176,7 +349,7 @@ def generate_records(events, blame_sort=False, if event_name(event) == event_name(prev_evt): record = event_record(start_time, prev_evt, event) records.append(format_record("Finished stage: " - "(%n) %d seconds ", + "(%n) %d seconds", record) + "\n") total_time += record.get('delta') else: @@ -189,19 +362,28 @@ def generate_records(events, blame_sort=False, def show_events(events, print_format): + ''' + A passthrough method that makes it easier to call generate_records() + + :param events: JSONs from dump that represents events taken from logs + :param print_format: formatting to represent event, time stamp, + and time taken by the event in one line + + :return: boot records ordered chronologically + ''' return generate_records(events, print_format=print_format) -def load_events(infile, rawdata=None): - if rawdata: - data = rawdata.read() - else: - data = infile.read() +def load_events_infile(infile): + ''' + Takes in a log file, read it, and convert to json. + + :param infile: The Log file to be read - j = None + :return: json version of logfile, raw file + ''' + data = infile.read() try: - j = json.loads(data) + return json.loads(data), data except ValueError: - pass - - return j, data + return None, data diff --git a/cloudinit/analyze/tests/test_boot.py b/cloudinit/analyze/tests/test_boot.py new file mode 100644 index 00000000..f4001c14 --- /dev/null +++ b/cloudinit/analyze/tests/test_boot.py @@ -0,0 +1,170 @@ +import os +from cloudinit.analyze.__main__ import (analyze_boot, get_parser) +from cloudinit.tests.helpers import CiTestCase, mock +from cloudinit.analyze.show import dist_check_timestamp, SystemctlReader, \ + FAIL_CODE, CONTAINER_CODE + +err_code = (FAIL_CODE, -1, -1, -1) + + +class TestDistroChecker(CiTestCase): + + @mock.patch('cloudinit.util.system_info', return_value={'dist': ('', '', + ''), + 'system': ''}) + @mock.patch('cloudinit.util.get_linux_distro', return_value=('', '', '')) + @mock.patch('cloudinit.util.is_FreeBSD', return_value=False) + def test_blank_distro(self, m_sys_info, m_get_linux_distro, m_free_bsd): + self.assertEqual(err_code, dist_check_timestamp()) + + @mock.patch('cloudinit.util.system_info', return_value={'dist': ('', '', + '')}) + @mock.patch('cloudinit.util.get_linux_distro', return_value=('', '', '')) + @mock.patch('cloudinit.util.is_FreeBSD', return_value=True) + def test_freebsd_gentoo_cant_find(self, m_sys_info, + m_get_linux_distro, m_is_FreeBSD): + self.assertEqual(err_code, dist_check_timestamp()) + + @mock.patch('cloudinit.util.subp', return_value=(0, 1)) + def test_subp_fails(self, m_subp): + self.assertEqual(err_code, dist_check_timestamp()) + + +class TestSystemCtlReader(CiTestCase): + + def test_systemctl_invalid_property(self): + reader = SystemctlReader('dummyProperty') + with self.assertRaises(RuntimeError): + reader.parse_epoch_as_float() + + def test_systemctl_invalid_parameter(self): + reader = SystemctlReader('dummyProperty', 'dummyParameter') + with self.assertRaises(RuntimeError): + reader.parse_epoch_as_float() + + @mock.patch('cloudinit.util.subp', return_value=('U=1000000', None)) + def test_systemctl_works_correctly_threshold(self, m_subp): + reader = SystemctlReader('dummyProperty', 'dummyParameter') + self.assertEqual(1.0, reader.parse_epoch_as_float()) + thresh = 1.0 - reader.parse_epoch_as_float() + self.assertTrue(thresh < 1e-6) + self.assertTrue(thresh > (-1 * 1e-6)) + + @mock.patch('cloudinit.util.subp', return_value=('U=0', None)) + def test_systemctl_succeed_zero(self, m_subp): + reader = SystemctlReader('dummyProperty', 'dummyParameter') + self.assertEqual(0.0, reader.parse_epoch_as_float()) + + @mock.patch('cloudinit.util.subp', return_value=('U=1', None)) + def test_systemctl_succeed_distinct(self, m_subp): + reader = SystemctlReader('dummyProperty', 'dummyParameter') + val1 = reader.parse_epoch_as_float() + m_subp.return_value = ('U=2', None) + reader2 = SystemctlReader('dummyProperty', 'dummyParameter') + val2 = reader2.parse_epoch_as_float() + self.assertNotEqual(val1, val2) + + @mock.patch('cloudinit.util.subp', return_value=('100', None)) + def test_systemctl_epoch_not_splittable(self, m_subp): + reader = SystemctlReader('dummyProperty', 'dummyParameter') + with self.assertRaises(IndexError): + reader.parse_epoch_as_float() + + @mock.patch('cloudinit.util.subp', return_value=('U=foobar', None)) + def test_systemctl_cannot_convert_epoch_to_float(self, m_subp): + reader = SystemctlReader('dummyProperty', 'dummyParameter') + with self.assertRaises(ValueError): + reader.parse_epoch_as_float() + + +class TestAnalyzeBoot(CiTestCase): + + def set_up_dummy_file_ci(self, path, log_path): + infh = open(path, 'w+') + infh.write('2019-07-08 17:40:49,601 - util.py[DEBUG]: Cloud-init v. ' + '19.1-1-gbaa47854-0ubuntu1~18.04.1 running \'init-local\' ' + 'at Mon, 08 Jul 2019 17:40:49 +0000. Up 18.84 seconds.') + infh.close() + outfh = open(log_path, 'w+') + outfh.close() + + def set_up_dummy_file(self, path, log_path): + infh = open(path, 'w+') + infh.write('dummy data') + infh.close() + outfh = open(log_path, 'w+') + outfh.close() + + def remove_dummy_file(self, path, log_path): + if os.path.isfile(path): + os.remove(path) + if os.path.isfile(log_path): + os.remove(log_path) + + @mock.patch('cloudinit.analyze.show.dist_check_timestamp', + return_value=err_code) + def test_boot_invalid_distro(self, m_dist_check_timestamp): + + path = os.path.dirname(os.path.abspath(__file__)) + log_path = path + '/boot-test.log' + path += '/dummy.log' + self.set_up_dummy_file(path, log_path) + + parser = get_parser() + args = parser.parse_args(args=['boot', '-i', path, '-o', + log_path]) + name_default = '' + analyze_boot(name_default, args) + # now args have been tested, go into outfile and make sure error + # message is in the outfile + outfh = open(args.outfile, 'r') + data = outfh.read() + err_string = 'Your Linux distro or container does not support this ' \ + 'functionality.\nYou must be running a Kernel ' \ + 'Telemetry supported distro.\nPlease check ' \ + 'https://cloudinit.readthedocs.io/en/latest/topics' \ + '/analyze.html for more information on supported ' \ + 'distros.\n' + + self.remove_dummy_file(path, log_path) + self.assertEqual(err_string, data) + + @mock.patch("cloudinit.util.is_container", return_value=True) + @mock.patch('cloudinit.util.subp', return_value=('U=1000000', None)) + def test_container_no_ci_log_line(self, m_is_container, m_subp): + path = os.path.dirname(os.path.abspath(__file__)) + log_path = path + '/boot-test.log' + path += '/dummy.log' + self.set_up_dummy_file(path, log_path) + + parser = get_parser() + args = parser.parse_args(args=['boot', '-i', path, '-o', + log_path]) + name_default = '' + + finish_code = analyze_boot(name_default, args) + + self.remove_dummy_file(path, log_path) + self.assertEqual(FAIL_CODE, finish_code) + + @mock.patch("cloudinit.util.is_container", return_value=True) + @mock.patch('cloudinit.util.subp', return_value=('U=1000000', None)) + @mock.patch('cloudinit.analyze.__main__._get_events', return_value=[{ + 'name': 'init-local', 'description': 'starting search', 'timestamp': + 100000}]) + @mock.patch('cloudinit.analyze.show.dist_check_timestamp', + return_value=(CONTAINER_CODE, 1, 1, 1)) + def test_container_ci_log_line(self, m_is_container, m_subp, m_get, m_g): + path = os.path.dirname(os.path.abspath(__file__)) + log_path = path + '/boot-test.log' + path += '/dummy.log' + self.set_up_dummy_file_ci(path, log_path) + + parser = get_parser() + args = parser.parse_args(args=['boot', '-i', path, '-o', + log_path]) + name_default = '' + finish_code = analyze_boot(name_default, args) + + self.remove_dummy_file(path, log_path) + self.assertEqual(CONTAINER_CODE, finish_code) diff --git a/cloudinit/apport.py b/cloudinit/apport.py index 22cb7fde..1f2c2e7e 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -22,7 +22,9 @@ KNOWN_CLOUD_NAMES = [ 'CloudSigma', 'CloudStack', 'DigitalOcean', + 'E24Cloud', 'GCE - Google Compute Engine', + 'Exoscale', 'Hetzner Cloud', 'IBM - (aka SoftLayer or BlueMix)', 'LXD', @@ -32,11 +34,14 @@ KNOWN_CLOUD_NAMES = [ 'OpenStack', 'Oracle', 'OVF', + 'RbxCloud - (HyperOne, Rootbox, Rubikon)', 'OpenTelekomCloud', 'Scaleway', 'SmartOS', 'VMware', - 'Other'] + 'ZStack', + 'Other' +] # Potentially clear text collected logs CLOUDINIT_LOG = '/var/log/cloud-init.log' diff --git a/cloudinit/atomic_helper.py b/cloudinit/atomic_helper.py index 587b9945..1f61faa2 100644 --- a/cloudinit/atomic_helper.py +++ b/cloudinit/atomic_helper.py @@ -1,11 +1,13 @@ # This file is part of cloud-init. See LICENSE file for license information. import json +import logging import os import stat import tempfile _DEF_PERMS = 0o644 +LOG = logging.getLogger(__name__) def write_file(filename, content, mode=_DEF_PERMS, @@ -23,6 +25,10 @@ def write_file(filename, content, mode=_DEF_PERMS, try: tf = tempfile.NamedTemporaryFile(dir=os.path.dirname(filename), delete=False, mode=omode) + LOG.debug( + "Atomically writing to file %s (via temporary file %s) - %s: [%o]" + " %d bytes/chars", + filename, tf.name, omode, mode, len(content)) tf.write(content) tf.close() os.chmod(tf.name, mode) diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py index de22f7f2..30e49de0 100644 --- a/cloudinit/cmd/clean.py +++ b/cloudinit/cmd/clean.py @@ -5,12 +5,13 @@ """Define 'clean' utility and handler as part of cloud-init commandline.""" import argparse +import glob import os import sys from cloudinit.stages import Init from cloudinit.util import ( - ProcessExecutionError, chdir, del_dir, del_file, get_config_logfiles, + ProcessExecutionError, del_dir, del_file, get_config_logfiles, is_link, subp) @@ -61,18 +62,18 @@ def remove_artifacts(remove_logs, remove_seed=False): if not os.path.isdir(init.paths.cloud_dir): return 0 # Artifacts dir already cleaned - with chdir(init.paths.cloud_dir): - for path in os.listdir('.'): - if path == 'seed' and not remove_seed: - continue - try: - if os.path.isdir(path) and not is_link(path): - del_dir(path) - else: - del_file(path) - except OSError as e: - error('Could not remove {0}: {1}'.format(path, str(e))) - return 1 + seed_path = os.path.join(init.paths.cloud_dir, 'seed') + for path in glob.glob('%s/*' % init.paths.cloud_dir): + if path == seed_path and not remove_seed: + continue + try: + if os.path.isdir(path) and not is_link(path): + del_dir(path) + else: + del_file(path) + except OSError as e: + error('Could not remove {0}: {1}'.format(path, str(e))) + return 1 return 0 diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py index 1ad7e0bd..80d217ca 100755 --- a/cloudinit/cmd/devel/net_convert.py +++ b/cloudinit/cmd/devel/net_convert.py @@ -5,13 +5,12 @@ import argparse import json import os import sys -import yaml from cloudinit.sources.helpers import openstack from cloudinit.sources import DataSourceAzure as azure from cloudinit.sources import DataSourceOVF as ovf -from cloudinit import distros +from cloudinit import distros, safeyaml from cloudinit.net import eni, netplan, network_state, sysconfig from cloudinit import log @@ -78,13 +77,12 @@ def handle_args(name, args): if args.kind == "eni": pre_ns = eni.convert_eni_data(net_data) elif args.kind == "yaml": - pre_ns = yaml.load(net_data) + pre_ns = safeyaml.load(net_data) if 'network' in pre_ns: pre_ns = pre_ns.get('network') if args.debug: sys.stderr.write('\n'.join( - ["Input YAML", - yaml.dump(pre_ns, default_flow_style=False, indent=4), ""])) + ["Input YAML", safeyaml.dumps(pre_ns), ""])) elif args.kind == 'network_data.json': pre_ns = openstack.convert_net_json( json.loads(net_data), known_macs=known_macs) @@ -97,12 +95,11 @@ def handle_args(name, args): ns = network_state.parse_net_config_data(pre_ns) if not ns: raise RuntimeError("No valid network_state object created from" - "input data") + " input data") if args.debug: - sys.stderr.write('\n'.join([ - "", "Internal State", - yaml.dump(ns, default_flow_style=False, indent=4), ""])) + sys.stderr.write('\n'.join( + ["", "Internal State", safeyaml.dumps(ns), ""])) distro_cls = distros.fetch(args.distro) distro = distro_cls(args.distro, {}, None) config = {} @@ -116,6 +113,8 @@ def handle_args(name, args): config['postcmds'] = False # trim leading slash config['netplan_path'] = config['netplan_path'][1:] + # enable some netplan features + config['features'] = ['dhcp-use-domains', 'ipv6-mtu'] else: r_cls = sysconfig.Renderer config = distro.renderer_configs.get('sysconfig') diff --git a/cloudinit/cmd/devel/tests/test_logs.py b/cloudinit/cmd/devel/tests/test_logs.py index 4951797b..d2dfa8de 100644 --- a/cloudinit/cmd/devel/tests/test_logs.py +++ b/cloudinit/cmd/devel/tests/test_logs.py @@ -2,7 +2,7 @@ from datetime import datetime import os -from six import StringIO +from io import StringIO from cloudinit.cmd.devel import logs from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE diff --git a/cloudinit/cmd/devel/tests/test_render.py b/cloudinit/cmd/devel/tests/test_render.py index 988bba03..a7fcf2ce 100644 --- a/cloudinit/cmd/devel/tests/test_render.py +++ b/cloudinit/cmd/devel/tests/test_render.py @@ -1,7 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. -from six import StringIO import os +from io import StringIO from collections import namedtuple from cloudinit.cmd.devel import render diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 933c019a..a5446da7 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -632,13 +632,14 @@ def status_wrapper(name, args, data_d=None, link_d=None): 'start': None, 'finished': None, } + if status is None: status = {'v1': {}} - for m in modes: - status['v1'][m] = nullstatus.copy() status['v1']['datasource'] = None - elif mode not in status['v1']: - status['v1'][mode] = nullstatus.copy() + + for m in modes: + if m not in status['v1']: + status['v1'][m] = nullstatus.copy() v1 = status['v1'] v1['stage'] = mode diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py index 1d888b9d..e3db8679 100644 --- a/cloudinit/cmd/query.py +++ b/cloudinit/cmd/query.py @@ -5,7 +5,6 @@ import argparse from errno import EACCES import os -import six import sys from cloudinit.handlers.jinja_template import ( @@ -149,7 +148,7 @@ def handle_args(name, args): response = '\n'.join(sorted(response.keys())) elif args.list_keys: response = '\n'.join(sorted(response.keys())) - if not isinstance(response, six.string_types): + if not isinstance(response, str): response = util.json_dumps(response) print(response) return 0 diff --git a/cloudinit/cmd/tests/test_clean.py b/cloudinit/cmd/tests/test_clean.py index 5a3ec3bf..13a69aa1 100644 --- a/cloudinit/cmd/tests/test_clean.py +++ b/cloudinit/cmd/tests/test_clean.py @@ -5,7 +5,7 @@ from cloudinit.util import ensure_dir, sym_link, write_file from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock from collections import namedtuple import os -from six import StringIO +from io import StringIO mypaths = namedtuple('MyPaths', 'cloud_dir') @@ -22,7 +22,8 @@ class TestClean(CiTestCase): class FakeInit(object): cfg = {'def_log_file': self.log1, 'output': {'all': '|tee -a {0}'.format(self.log2)}} - paths = mypaths(cloud_dir=self.artifact_dir) + # Ensure cloud_dir has a trailing slash, to match real behaviour + paths = mypaths(cloud_dir='{}/'.format(self.artifact_dir)) def __init__(self, ds_deps): pass @@ -136,7 +137,8 @@ class TestClean(CiTestCase): clean.remove_artifacts, remove_logs=False) self.assertEqual(1, retcode) self.assertEqual( - 'ERROR: Could not remove dir1: oops\n', m_stderr.getvalue()) + 'ERROR: Could not remove %s/dir1: oops\n' % self.artifact_dir, + m_stderr.getvalue()) def test_handle_clean_args_reboots(self): """handle_clean_args_reboots when reboot arg is provided.""" diff --git a/cloudinit/cmd/tests/test_cloud_id.py b/cloudinit/cmd/tests/test_cloud_id.py index 73738170..3f3727fd 100644 --- a/cloudinit/cmd/tests/test_cloud_id.py +++ b/cloudinit/cmd/tests/test_cloud_id.py @@ -4,7 +4,7 @@ from cloudinit import util from collections import namedtuple -from six import StringIO +from io import StringIO from cloudinit.cmd import cloud_id diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py index a1e534fb..384fddc6 100644 --- a/cloudinit/cmd/tests/test_main.py +++ b/cloudinit/cmd/tests/test_main.py @@ -3,11 +3,12 @@ from collections import namedtuple import copy import os -from six import StringIO +from io import StringIO from cloudinit.cmd import main +from cloudinit import safeyaml from cloudinit.util import ( - ensure_dir, load_file, write_file, yaml_dumps) + ensure_dir, load_file, write_file) from cloudinit.tests.helpers import ( FilesystemMockingTestCase, wrap_and_call) @@ -39,7 +40,7 @@ class TestMain(FilesystemMockingTestCase): ], 'cloud_init_modules': ['write-files', 'runcmd'], } - cloud_cfg = yaml_dumps(self.cfg) + cloud_cfg = safeyaml.dumps(self.cfg) ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) self.cloud_cfg_file = os.path.join( self.new_root, 'etc', 'cloud', 'cloud.cfg') @@ -113,7 +114,7 @@ class TestMain(FilesystemMockingTestCase): """When local-hostname metadata is present, call cc_set_hostname.""" self.cfg['datasource'] = { 'None': {'metadata': {'local-hostname': 'md-hostname'}}} - cloud_cfg = yaml_dumps(self.cfg) + cloud_cfg = safeyaml.dumps(self.cfg) write_file(self.cloud_cfg_file, cloud_cfg) cmdargs = myargs( debug=False, files=None, force=False, local=False, reporter=None, diff --git a/cloudinit/cmd/tests/test_query.py b/cloudinit/cmd/tests/test_query.py index 28738b1e..6d36a4ea 100644 --- a/cloudinit/cmd/tests/test_query.py +++ b/cloudinit/cmd/tests/test_query.py @@ -1,7 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import errno -from six import StringIO +from io import StringIO from textwrap import dedent import os @@ -150,7 +150,9 @@ class TestQuery(CiTestCase): instance_data=self.instance_data, list_keys=False, user_data='ud', vendor_data='vd', varname=None) with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - self.assertEqual(0, query.handle_args('anyname', args)) + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 100 + self.assertEqual(0, query.handle_args('anyname', args)) self.assertEqual( '{\n "my_var": "it worked",\n "userdata": "<%s> file:ud",\n' ' "vendordata": "<%s> file:vd"\n}\n' % ( @@ -165,7 +167,9 @@ class TestQuery(CiTestCase): instance_data=self.instance_data, list_keys=False, user_data='ud', vendor_data='vd', varname='my_var') with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - self.assertEqual(0, query.handle_args('anyname', args)) + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 100 + self.assertEqual(0, query.handle_args('anyname', args)) self.assertEqual('it worked\n', m_stdout.getvalue()) def test_handle_args_returns_nested_varname(self): @@ -177,7 +181,9 @@ class TestQuery(CiTestCase): instance_data=self.instance_data, user_data='ud', vendor_data='vd', list_keys=False, varname='v1.key_2') with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - self.assertEqual(0, query.handle_args('anyname', args)) + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 100 + self.assertEqual(0, query.handle_args('anyname', args)) self.assertEqual('value-2\n', m_stdout.getvalue()) def test_handle_args_returns_standardized_vars_to_top_level_aliases(self): @@ -206,7 +212,9 @@ class TestQuery(CiTestCase): instance_data=self.instance_data, user_data='ud', vendor_data='vd', list_keys=False, varname=None) with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - self.assertEqual(0, query.handle_args('anyname', args)) + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 100 + self.assertEqual(0, query.handle_args('anyname', args)) self.assertEqual(expected, m_stdout.getvalue()) def test_handle_args_list_keys_sorts_top_level_keys_when_no_varname(self): @@ -221,7 +229,9 @@ class TestQuery(CiTestCase): instance_data=self.instance_data, list_keys=True, user_data='ud', vendor_data='vd', varname=None) with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - self.assertEqual(0, query.handle_args('anyname', args)) + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 100 + self.assertEqual(0, query.handle_args('anyname', args)) self.assertEqual(expected, m_stdout.getvalue()) def test_handle_args_list_keys_sorts_nested_keys_when_varname(self): @@ -236,7 +246,9 @@ class TestQuery(CiTestCase): instance_data=self.instance_data, list_keys=True, user_data='ud', vendor_data='vd', varname='v1') with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - self.assertEqual(0, query.handle_args('anyname', args)) + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 100 + self.assertEqual(0, query.handle_args('anyname', args)) self.assertEqual(expected, m_stdout.getvalue()) def test_handle_args_list_keys_errors_when_varname_is_not_a_dict(self): @@ -252,7 +264,9 @@ class TestQuery(CiTestCase): vendor_data='vd', varname='top') with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - self.assertEqual(1, query.handle_args('anyname', args)) + with mock.patch('os.getuid') as m_getuid: + m_getuid.return_value = 100 + self.assertEqual(1, query.handle_args('anyname', args)) self.assertEqual('', m_stdout.getvalue()) self.assertIn(expected_error, m_stderr.getvalue()) diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py index aded8580..1ed10896 100644 --- a/cloudinit/cmd/tests/test_status.py +++ b/cloudinit/cmd/tests/test_status.py @@ -2,7 +2,7 @@ from collections import namedtuple import os -from six import StringIO +from io import StringIO from textwrap import dedent from cloudinit.atomic_helper import write_json diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index e18944ec..c44dec45 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -127,7 +127,7 @@ to ``^[\\w-]+:\\w`` Source list entries can be specified as a dictionary under the ``sources`` config key, with key in the dict representing a different source file. The key -The key of each source entry will be used as an id that can be referenced in +of each source entry will be used as an id that can be referenced in other config entries, as well as the filename for the source's configuration under ``/etc/apt/sources.list.d``. If the name does not end with ``.list``, it will be appended. If there is no configuration for a key in ``sources``, no @@ -253,7 +253,7 @@ def get_default_mirrors(arch=None, target=None): architecture, for more see: https://wiki.ubuntu.com/UbuntuDevelopment/PackageArchive#Ports""" if arch is None: - arch = util.get_architecture(target) + arch = util.get_dpkg_architecture(target) if arch in PRIMARY_ARCHES: return PRIMARY_ARCH_MIRRORS.copy() if arch in PORTS_ARCHES: @@ -303,13 +303,13 @@ def apply_apt(cfg, cloud, target): LOG.debug("handling apt config: %s", cfg) release = util.lsb_release(target=target)['codename'] - arch = util.get_architecture(target) + arch = util.get_dpkg_architecture(target) mirrors = find_apt_mirror_info(cfg, cloud, arch=arch) LOG.debug("Apt Mirror info: %s", mirrors) if util.is_false(cfg.get('preserve_sources_list', False)): generate_sources_list(cfg, release, mirrors, cloud) - rename_apt_lists(mirrors, target) + rename_apt_lists(mirrors, target, arch) try: apply_apt_config(cfg, APT_PROXY_FN, APT_CONFIG_FN) @@ -332,6 +332,8 @@ def apply_apt(cfg, cloud, target): def debconf_set_selections(selections, target=None): + if not selections.endswith(b'\n'): + selections += b'\n' util.subp(['debconf-set-selections'], data=selections, target=target, capture=True) @@ -374,7 +376,7 @@ def apply_debconf_selections(cfg, target=None): selections = '\n'.join( [selsets[key] for key in sorted(selsets.keys())]) - debconf_set_selections(selections.encode() + b"\n", target=target) + debconf_set_selections(selections.encode(), target=target) # get a complete list of packages listed in input pkgs_cfgd = set() @@ -425,9 +427,9 @@ def mirrorurl_to_apt_fileprefix(mirror): return string -def rename_apt_lists(new_mirrors, target=None): +def rename_apt_lists(new_mirrors, target, arch): """rename_apt_lists - rename apt lists to preserve old cache data""" - default_mirrors = get_default_mirrors(util.get_architecture(target)) + default_mirrors = get_default_mirrors(arch) pre = util.target_path(target, APT_LISTS) for (name, omirror) in default_mirrors.items(): @@ -894,7 +896,7 @@ def find_apt_mirror_info(cfg, cloud, arch=None): """ if arch is None: - arch = util.get_architecture() + arch = util.get_dpkg_architecture() LOG.debug("got arch for mirror selection: %s", arch) pmirror = get_mirror(cfg, "primary", arch, cloud) LOG.debug("got primary mirror: %s", pmirror) diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py index cdf28cd9..225d0905 100644 --- a/cloudinit/config/cc_apt_pipelining.py +++ b/cloudinit/config/cc_apt_pipelining.py @@ -49,7 +49,7 @@ APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n" def handle(_name, cfg, _cloud, log, _args): - apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False) + apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", 'os') apt_pipe_value_s = str(apt_pipe_value).lower().strip() if apt_pipe_value_s == "false": @@ -59,7 +59,7 @@ def handle(_name, cfg, _cloud, log, _args): elif apt_pipe_value_s in [str(b) for b in range(0, 6)]: write_apt_snippet(apt_pipe_value_s, log, DEFAULT_FILE) else: - log.warn("Invalid option for apt_pipeling: %s", apt_pipe_value) + log.warning("Invalid option for apt_pipelining: %s", apt_pipe_value) def write_apt_snippet(setting, log, f_name): diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py index 8570da15..0b4352c8 100755 --- a/cloudinit/config/cc_byobu.py +++ b/cloudinit/config/cc_byobu.py @@ -60,7 +60,7 @@ def handle(name, cfg, cloud, log, args): valid = ("enable-user", "enable-system", "enable", "disable-user", "disable-system", "disable") if value not in valid: - log.warn("Unknown value %s for byobu_by_default", value) + log.warning("Unknown value %s for byobu_by_default", value) mod_user = value.endswith("-user") mod_sys = value.endswith("-system") @@ -80,8 +80,8 @@ def handle(name, cfg, cloud, log, args): (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) (user, _user_config) = ug_util.extract_default(users) if not user: - log.warn(("No default byobu user provided, " - "can not launch %s for the default user"), bl_inst) + log.warning(("No default byobu user provided, " + "can not launch %s for the default user"), bl_inst) else: shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst) shcmd += " || X=$(($X+1)); " diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index 46abedd1..01d61fa1 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -51,6 +51,7 @@ file). chef: client_key: + encrypted_data_bag_secret: environment: file_backup_path: file_cache_path: @@ -78,8 +79,6 @@ from cloudinit import templater from cloudinit import url_helper from cloudinit import util -import six - RUBY_VERSION_DEFAULT = "1.8" CHEF_DIRS = tuple([ @@ -114,6 +113,7 @@ CHEF_RB_TPL_DEFAULTS = { 'file_backup_path': "/var/backups/chef", 'pid_file': "/var/run/chef/client.pid", 'show_time': True, + 'encrypted_data_bag_secret': None, } CHEF_RB_TPL_BOOL_KEYS = frozenset(['show_time']) CHEF_RB_TPL_PATH_KEYS = frozenset([ @@ -124,6 +124,7 @@ CHEF_RB_TPL_PATH_KEYS = frozenset([ 'json_attribs', 'file_cache_path', 'pid_file', + 'encrypted_data_bag_secret', ]) CHEF_RB_TPL_KEYS = list(CHEF_RB_TPL_DEFAULTS.keys()) CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_BOOL_KEYS) @@ -193,7 +194,7 @@ def handle(name, cfg, cloud, log, _args): # If there isn't a chef key in the configuration don't do anything if 'chef' not in cfg: log.debug(("Skipping module named %s," - " no 'chef' key in configuration"), name) + " no 'chef' key in configuration"), name) return chef_cfg = cfg['chef'] @@ -212,9 +213,9 @@ def handle(name, cfg, cloud, log, _args): if vcert != "system": util.write_file(vkey_path, vcert) elif not os.path.isfile(vkey_path): - log.warn("chef validation_cert provided as 'system', but " - "validation_key path '%s' does not exist.", - vkey_path) + log.warning("chef validation_cert provided as 'system', but " + "validation_key path '%s' does not exist.", + vkey_path) # Create the chef config from template template_fn = cloud.get_template_filename('chef_client.rb') @@ -231,8 +232,8 @@ def handle(name, cfg, cloud, log, _args): util.ensure_dirs(param_paths) templater.render_to_file(template_fn, CHEF_RB_PATH, params) else: - log.warn("No template found, not rendering to %s", - CHEF_RB_PATH) + log.warning("No template found, not rendering to %s", + CHEF_RB_PATH) # Set the firstboot json fb_filename = util.get_cfg_option_str(chef_cfg, 'firstboot_path', @@ -270,12 +271,12 @@ def run_chef(chef_cfg, log): cmd_args = chef_cfg['exec_arguments'] if isinstance(cmd_args, (list, tuple)): cmd.extend(cmd_args) - elif isinstance(cmd_args, six.string_types): + elif isinstance(cmd_args, str): cmd.append(cmd_args) else: - log.warn("Unknown type %s provided for chef" - " 'exec_arguments' expected list, tuple," - " or string", type(cmd_args)) + log.warning("Unknown type %s provided for chef" + " 'exec_arguments' expected list, tuple," + " or string", type(cmd_args)) cmd.extend(CHEF_EXEC_DEF_ARGS) else: cmd.extend(CHEF_EXEC_DEF_ARGS) @@ -331,7 +332,7 @@ def install_chef(cloud, chef_cfg, log): retries=util.get_cfg_option_int(chef_cfg, "omnibus_url_retries"), omnibus_version=omnibus_version) else: - log.warn("Unknown chef install type '%s'", install_type) + log.warning("Unknown chef install type '%s'", install_type) run = False return run diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py index 0a039eb3..4d5a6aa2 100644 --- a/cloudinit/config/cc_debug.py +++ b/cloudinit/config/cc_debug.py @@ -28,11 +28,11 @@ location that this cloud-init has been configured with when running. """ import copy - -from six import StringIO +from io import StringIO from cloudinit import type_utils from cloudinit import util +from cloudinit import safeyaml SKIP_KEYS = frozenset(['log_cfgs']) @@ -49,7 +49,7 @@ def _make_header(text): def _dumps(obj): - text = util.yaml_dumps(obj, explicit_start=False, explicit_end=False) + text = safeyaml.dumps(obj, explicit_start=False, explicit_end=False) return text.rstrip() diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index 29e192e8..0796cb7b 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -825,6 +825,7 @@ def lookup_force_flag(fs): 'btrfs': '-f', 'xfs': '-f', 'reiserfs': '-f', + 'swap': '-f', } if 'ext' in fs.lower(): @@ -982,7 +983,9 @@ def mkfs(fs_cfg): # File systems that support the -F flag if overwrite or device_type(device) == "disk": - fs_cmd.append(lookup_force_flag(fs_type)) + force_flag = lookup_force_flag(fs_type) + if force_flag: + fs_cmd.append(force_flag) # Add the extends FS options if fs_opts: diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py index eb9fbe66..b342e04d 100644 --- a/cloudinit/config/cc_emit_upstart.py +++ b/cloudinit/config/cc_emit_upstart.py @@ -69,6 +69,6 @@ def handle(name, _cfg, cloud, log, args): util.subp(cmd) except Exception as e: # TODO(harlowja), use log exception from utils?? - log.warn("Emission of upstart event %s failed due to: %s", n, e) + log.warning("Emission of upstart event %s failed due to: %s", n, e) # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py index c61f03d4..fd141541 100644 --- a/cloudinit/config/cc_final_message.py +++ b/cloudinit/config/cc_final_message.py @@ -83,6 +83,6 @@ def handle(_name, cfg, cloud, log, args): util.logexc(log, "Failed to write boot finished file %s", boot_fin_fn) if cloud.datasource.is_disconnected: - log.warn("Used fallback datasource") + log.warning("Used fallback datasource") # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index bafca9d8..1b512a06 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -22,11 +22,11 @@ mountpoint in the filesystem or a path to the block device in ``/dev``. The utility to use for resizing can be selected using the ``mode`` config key. If ``mode`` key is set to ``auto``, then any available utility (either -``growpart`` or ``gpart``) will be used. If neither utility is available, no -error will be raised. If ``mode`` is set to ``growpart``, then the ``growpart`` -utility will be used. If this utility is not available on the system, this will -result in an error. If ``mode`` is set to ``off`` or ``false``, then -``cc_growpart`` will take no action. +``growpart`` or BSD ``gpart``) will be used. If neither utility is available, +no error will be raised. If ``mode`` is set to ``growpart``, then the +``growpart`` utility will be used. If this utility is not available on the +system, this will result in an error. If ``mode`` is set to ``off`` or +``false``, then ``cc_growpart`` will take no action. There is some functionality overlap between this module and the ``growroot`` functionality of ``cloud-initramfs-tools``. However, there are some situations @@ -132,7 +132,7 @@ class ResizeGrowPart(object): try: (out, _err) = util.subp(["growpart", "--help"], env=myenv) - if re.search(r"--update\s+", out, re.DOTALL): + if re.search(r"--update\s+", out): return True except util.ProcessExecutionError: @@ -161,9 +161,17 @@ class ResizeGrowPart(object): class ResizeGpart(object): def available(self): - if not util.which('gpart'): - return False - return True + myenv = os.environ.copy() + myenv['LANG'] = 'C' + + try: + (_out, err) = util.subp(["gpart", "help"], env=myenv, rcs=[0, 1]) + if re.search(r"gpart recover ", err): + return True + + except util.ProcessExecutionError: + pass + return False def resize(self, diskdev, partnum, partdev): """ @@ -215,7 +223,8 @@ def device_part_info(devpath): # FreeBSD doesn't know of sysfs so just get everything we need from # the device, like /dev/vtbd0p2. if util.is_FreeBSD(): - m = re.search('^(/dev/.+)p([0-9])$', devpath) + freebsd_part = "/dev/" + util.find_freebsd_part(devpath) + m = re.search('^(/dev/.+)p([0-9])$', freebsd_part) return (m.group(1), m.group(2)) if not os.path.exists(syspath): @@ -320,7 +329,7 @@ def handle(_name, cfg, _cloud, log, _args): mycfg = cfg.get('growpart') if not isinstance(mycfg, dict): - log.warn("'growpart' in config was not a dict") + log.warning("'growpart' in config was not a dict") return mode = mycfg.get('mode', "auto") diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py index aff4010e..3d2ded3d 100644 --- a/cloudinit/config/cc_keys_to_console.py +++ b/cloudinit/config/cc_keys_to_console.py @@ -9,10 +9,10 @@ """ Keys to Console --------------- -**Summary:** control which ssh keys may be written to console +**Summary:** control which SSH keys may be written to console -For security reasons it may be desirable not to write ssh fingerprints and keys -to the console. To avoid the fingerprint of types of ssh keys being written to +For security reasons it may be desirable not to write SSH fingerprints and keys +to the console. To avoid the fingerprint of types of SSH keys being written to console the ``ssh_fp_console_blacklist`` config key can be used. By default all types of keys will have their fingerprints written to console. To avoid keys of a key type being written to console the ``ssh_key_console_blacklist`` config @@ -52,8 +52,8 @@ def _get_helper_tool_path(distro): def handle(name, cfg, cloud, log, _args): helper_path = _get_helper_tool_path(cloud.distro) if not os.path.exists(helper_path): - log.warn(("Unable to activate module %s," - " helper tool not found at %s"), name, helper_path) + log.warning(("Unable to activate module %s," + " helper tool not found at %s"), name, helper_path) return fp_blacklist = util.get_cfg_option_list(cfg, @@ -68,7 +68,7 @@ def handle(name, cfg, cloud, log, _args): util.multi_log("%s\n" % (stdout.strip()), stderr=False, console=True) except Exception: - log.warn("Writing keys to the system console failed!") + log.warning("Writing keys to the system console failed!") raise # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py index eaf1e940..a9c04d86 100644 --- a/cloudinit/config/cc_landscape.py +++ b/cloudinit/config/cc_landscape.py @@ -56,8 +56,7 @@ The following default client config is provided, but can be overridden:: """ import os - -from six import BytesIO +from io import BytesIO from configobj import ConfigObj diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py index 24a8ebea..151a9844 100644 --- a/cloudinit/config/cc_lxd.py +++ b/cloudinit/config/cc_lxd.py @@ -66,21 +66,21 @@ def handle(name, cfg, cloud, log, args): name) return if not isinstance(lxd_cfg, dict): - log.warn("lxd config must be a dictionary. found a '%s'", - type(lxd_cfg)) + log.warning("lxd config must be a dictionary. found a '%s'", + type(lxd_cfg)) return # Grab the configuration init_cfg = lxd_cfg.get('init') if not isinstance(init_cfg, dict): - log.warn("lxd/init config must be a dictionary. found a '%s'", - type(init_cfg)) + log.warning("lxd/init config must be a dictionary. found a '%s'", + type(init_cfg)) init_cfg = {} bridge_cfg = lxd_cfg.get('bridge', {}) if not isinstance(bridge_cfg, dict): - log.warn("lxd/bridge config must be a dictionary. found a '%s'", - type(bridge_cfg)) + log.warning("lxd/bridge config must be a dictionary. found a '%s'", + type(bridge_cfg)) bridge_cfg = {} # Install the needed packages @@ -89,13 +89,13 @@ def handle(name, cfg, cloud, log, args): packages.append('lxd') if init_cfg.get("storage_backend") == "zfs" and not util.which('zfs'): - packages.append('zfs') + packages.append('zfsutils-linux') if len(packages): try: cloud.distro.install_packages(packages) except util.ProcessExecutionError as exc: - log.warn("failed to install packages %s: %s", packages, exc) + log.warning("failed to install packages %s: %s", packages, exc) return # Set up lxd if init config is given @@ -152,7 +152,7 @@ def handle(name, cfg, cloud, log, args): if cmd_attach: log.debug("Setting up default lxd bridge: %s" % - " ".join(cmd_create)) + " ".join(cmd_attach)) _lxc(cmd_attach) elif bridge_cfg: @@ -301,5 +301,4 @@ def maybe_cleanup_default(net_name, did_init, create, attach, raise e LOG.debug(msg, nic_name, profile, fail_assume_enoent) - # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py index d5f63f5f..351183f1 100644 --- a/cloudinit/config/cc_mcollective.py +++ b/cloudinit/config/cc_mcollective.py @@ -49,9 +49,7 @@ private certificates for mcollective. Their values will be written to """ import errno - -import six -from six import BytesIO +import io # Used since this can maintain comments # and doesn't need a top level section @@ -73,7 +71,7 @@ def configure(config, server_cfg=SERVER_CFG, # original file in order to be able to mix the rest up. try: old_contents = util.load_file(server_cfg, quiet=False, decode=False) - mcollective_config = ConfigObj(BytesIO(old_contents)) + mcollective_config = ConfigObj(io.BytesIO(old_contents)) except IOError as e: if e.errno != errno.ENOENT: raise @@ -93,7 +91,7 @@ def configure(config, server_cfg=SERVER_CFG, 'plugin.ssl_server_private'] = pricert_file mcollective_config['securityprovider'] = 'ssl' else: - if isinstance(cfg, six.string_types): + if isinstance(cfg, str): # Just set it in the 'main' section mcollective_config[cfg_name] = cfg elif isinstance(cfg, (dict)): @@ -119,7 +117,7 @@ def configure(config, server_cfg=SERVER_CFG, raise # Now we got the whole (new) file, write to disk... - contents = BytesIO() + contents = io.BytesIO() mcollective_config.write(contents) util.write_file(server_cfg, contents.getvalue(), mode=0o644) diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index 339baba9..4ae3f1fc 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -25,7 +25,7 @@ mountpoint (i.e. ``[ sda1 ]`` or ``[ sda1, null ]``). The ``mount_default_fields`` config key allows default options to be specified for the values in a ``mounts`` entry that are not specified, aside from the -``fs_spec`` and the ``fs_file``. If specified, this must be a list containing 7 +``fs_spec`` and the ``fs_file``. If specified, this must be a list containing 6 values. It defaults to:: mount_default_fields: [none, none, "auto", "defaults,nobootwait", "0", "2"] @@ -223,13 +223,58 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None): return size +def create_swapfile(fname, size): + """Size is in MiB.""" + + errmsg = "Failed to create swapfile '%s' of size %dMB via %s: %s" + + def create_swap(fname, size, method): + LOG.debug("Creating swapfile in '%s' on fstype '%s' using '%s'", + fname, fstype, method) + + if method == "fallocate": + cmd = ['fallocate', '-l', '%dM' % size, fname] + elif method == "dd": + cmd = ['dd', 'if=/dev/zero', 'of=%s' % fname, 'bs=1M', + 'count=%d' % size] + + try: + util.subp(cmd, capture=True) + except util.ProcessExecutionError as e: + LOG.warning(errmsg, fname, size, method, e) + util.del_file(fname) + + swap_dir = os.path.dirname(fname) + util.ensure_dir(swap_dir) + + fstype = util.get_mount_info(swap_dir)[1] + + if fstype in ("xfs", "btrfs"): + create_swap(fname, size, "dd") + else: + try: + create_swap(fname, size, "fallocate") + except util.ProcessExecutionError as e: + LOG.warning(errmsg, fname, size, "dd", e) + LOG.warning("Will attempt with dd.") + create_swap(fname, size, "dd") + + util.chmod(fname, 0o600) + try: + util.subp(['mkswap', fname]) + except util.ProcessExecutionError: + util.del_file(fname) + raise + + def setup_swapfile(fname, size=None, maxsize=None): """ fname: full path string of filename to setup size: the size to create. set to "auto" for recommended maxsize: the maximum size """ - tdir = os.path.dirname(fname) + swap_dir = os.path.dirname(fname) + mibsize = str(int(size / (2 ** 20))) if str(size).lower() == "auto": try: memsize = util.read_meminfo()['total'] @@ -237,28 +282,16 @@ def setup_swapfile(fname, size=None, maxsize=None): LOG.debug("Not creating swap: failed to read meminfo") return - util.ensure_dir(tdir) - size = suggested_swapsize(fsys=tdir, maxsize=maxsize, + util.ensure_dir(swap_dir) + size = suggested_swapsize(fsys=swap_dir, maxsize=maxsize, memsize=memsize) if not size: LOG.debug("Not creating swap: suggested size was 0") return - mbsize = str(int(size / (2 ** 20))) - msg = "creating swap file '%s' of %sMB" % (fname, mbsize) - try: - util.ensure_dir(tdir) - util.log_time(LOG.debug, msg, func=util.subp, - args=[['sh', '-c', - ('rm -f "$1" && umask 0066 && ' - '{ fallocate -l "${2}M" "$1" || ' - ' dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && ' - 'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'), - 'setup_swap', fname, mbsize]]) - - except Exception as e: - raise IOError("Failed %s: %s" % (msg, e)) + util.log_time(LOG.debug, msg="Setting up swap file", func=create_swapfile, + args=[fname, mibsize]) return fname @@ -347,8 +380,8 @@ def handle(_name, cfg, cloud, log, _args): for i in range(len(cfgmnt)): # skip something that wasn't a list if not isinstance(cfgmnt[i], list): - log.warn("Mount option %s not a list, got a %s instead", - (i + 1), type_utils.obj_name(cfgmnt[i])) + log.warning("Mount option %s not a list, got a %s instead", + (i + 1), type_utils.obj_name(cfgmnt[i])) continue start = str(cfgmnt[i][0]) @@ -439,6 +472,7 @@ def handle(_name, cfg, cloud, log, _args): cc_lines = [] needswap = False + need_mount_all = False dirs = [] for line in actlist: # write 'comment' in the fs_mntops, entry, claiming this @@ -449,11 +483,18 @@ def handle(_name, cfg, cloud, log, _args): dirs.append(line[1]) cc_lines.append('\t'.join(line)) + mount_points = [v['mountpoint'] for k, v in util.mounts().items() + if 'mountpoint' in v] for d in dirs: try: util.ensure_dir(d) except Exception: util.logexc(log, "Failed to make '%s' config-mount", d) + # dirs is list of directories on which a volume should be mounted. + # If any of them does not already show up in the list of current + # mount points, we will definitely need to do mount -a. + if not need_mount_all and d not in mount_points: + need_mount_all = True sadds = [WS.sub(" ", n) for n in cc_lines] sdrops = [WS.sub(" ", n) for n in fstab_removed] @@ -473,6 +514,9 @@ def handle(_name, cfg, cloud, log, _args): log.debug("No changes to /etc/fstab made.") else: log.debug("Changes to fstab: %s", sops) + need_mount_all = True + + if need_mount_all: activate_cmds.append(["mount", "-a"]) if uses_systemd: activate_cmds.append(["systemctl", "daemon-reload"]) @@ -484,7 +528,7 @@ def handle(_name, cfg, cloud, log, _args): util.subp(cmd) log.debug(fmt, "PASS") except util.ProcessExecutionError: - log.warn(fmt, "FAIL") + log.warning(fmt, "FAIL") util.logexc(log, fmt, "FAIL") # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index 9e074bda..5498bbaa 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -6,19 +6,17 @@ """NTP: enable and configure ntp""" -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +import copy +import os +from textwrap import dedent + from cloudinit import log as logging -from cloudinit.settings import PER_INSTANCE from cloudinit import temp_utils from cloudinit import templater from cloudinit import type_utils from cloudinit import util - -import copy -import os -import six -from textwrap import dedent +from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema +from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) @@ -460,7 +458,7 @@ def supplemental_schema_validation(ntp_config): for key, value in sorted(ntp_config.items()): keypath = 'ntp:config:' + key if key == 'confpath': - if not all([value, isinstance(value, six.string_types)]): + if not all([value, isinstance(value, str)]): errors.append( 'Expected a config file path {keypath}.' ' Found ({value})'.format(keypath=keypath, value=value)) @@ -472,11 +470,11 @@ def supplemental_schema_validation(ntp_config): elif key in ('template', 'template_name'): if value is None: # Either template or template_name can be none continue - if not isinstance(value, six.string_types): + if not isinstance(value, str): errors.append( 'Expected a string type for {keypath}.' ' Found ({value})'.format(keypath=keypath, value=value)) - elif not isinstance(value, six.string_types): + elif not isinstance(value, str): errors.append( 'Expected a string type for {keypath}.' ' Found ({value})'.format(keypath=keypath, value=value)) diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py index 17b91011..86afffef 100644 --- a/cloudinit/config/cc_package_update_upgrade_install.py +++ b/cloudinit/config/cc_package_update_upgrade_install.py @@ -108,7 +108,8 @@ def handle(_name, cfg, cloud, log, _args): reboot_fn_exists = os.path.isfile(REBOOT_FILE) if (upgrade or pkglist) and reboot_if_required and reboot_fn_exists: try: - log.warn("Rebooting after upgrade or install per %s", REBOOT_FILE) + log.warning("Rebooting after upgrade or install per " + "%s", REBOOT_FILE) # Flush the above warning + anything else out... logging.flushLoggers(log) _fire_reboot(log) @@ -117,8 +118,8 @@ def handle(_name, cfg, cloud, log, _args): errors.append(e) if len(errors): - log.warn("%s failed with exceptions, re-raising the last one", - len(errors)) + log.warning("%s failed with exceptions, re-raising the last one", + len(errors)) raise errors[-1] # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index 3be0d1c1..b8e27090 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -79,8 +79,8 @@ def handle(name, cfg, cloud, log, args): ph_cfg = cfg['phone_home'] if 'url' not in ph_cfg: - log.warn(("Skipping module named %s, " - "no 'url' found in 'phone_home' configuration"), name) + log.warning(("Skipping module named %s, " + "no 'url' found in 'phone_home' configuration"), name) return url = ph_cfg['url'] @@ -91,7 +91,7 @@ def handle(name, cfg, cloud, log, args): except Exception: tries = 10 util.logexc(log, "Configuration entry 'tries' is not an integer, " - "using %s instead", tries) + "using %s instead", tries) if post_list == "all": post_list = POST_LIST_ALL @@ -112,7 +112,7 @@ def handle(name, cfg, cloud, log, args): all_keys[n] = util.load_file(path) except Exception: util.logexc(log, "%s: failed to open, can not phone home that " - "data!", path) + "data!", path) submit_keys = {} for k in post_list: @@ -120,8 +120,8 @@ def handle(name, cfg, cloud, log, args): submit_keys[k] = all_keys[k] else: submit_keys[k] = None - log.warn(("Requested key %s from 'post'" - " configuration list not available"), k) + log.warning(("Requested key %s from 'post'" + " configuration list not available"), k) # Get them read to be posted real_submit_keys = {} diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 50b37470..3e81a3c7 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -49,16 +49,15 @@ key returns 0. condition: <true/false/command> """ -from cloudinit.settings import PER_INSTANCE -from cloudinit import util - import errno import os import re -import six import subprocess import time +from cloudinit.settings import PER_INSTANCE +from cloudinit import util + frequency = PER_INSTANCE EXIT_FAIL = 254 @@ -103,24 +102,23 @@ def check_condition(cond, log=None): return False else: if log: - log.warn(pre + "unexpected exit %s. " % ret + - "do not apply change.") + log.warning(pre + "unexpected exit %s. " % ret + + "do not apply change.") return False except Exception as e: if log: - log.warn(pre + "Unexpected error: %s" % e) + log.warning(pre + "Unexpected error: %s" % e) return False def handle(_name, cfg, _cloud, log, _args): - try: (args, timeout, condition) = load_power_state(cfg) if args is None: log.debug("no power_state provided. doing nothing") return except Exception as e: - log.warn("%s Not performing power state change!" % str(e)) + log.warning("%s Not performing power state change!" % str(e)) return if condition is False: @@ -131,7 +129,7 @@ def handle(_name, cfg, _cloud, log, _args): cmdline = givecmdline(mypid) if not cmdline: - log.warn("power_state: failed to get cmdline of current process") + log.warning("power_state: failed to get cmdline of current process") return devnull_fp = open(os.devnull, "w") @@ -184,7 +182,7 @@ def load_power_state(cfg): pstate['timeout']) condition = pstate.get("condition", True) - if not isinstance(condition, six.string_types + (list, bool)): + if not isinstance(condition, (str, list, bool)): raise TypeError("condition type %s invalid. must be list, bool, str") return (args, timeout, condition) @@ -214,7 +212,7 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args): def fatal(msg): if log: - log.warn(msg) + log.warning(msg) doexit(EXIT_FAIL) known_errnos = (errno.ENOENT, errno.ESRCH) diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index 4190a20b..c01f5b8f 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -24,9 +24,10 @@ module will attempt to start puppet even if no installation was performed. The module also provides keys for configuring the new puppet 4 paths and installing the puppet package from the puppetlabs repositories: https://docs.puppet.com/puppet/4.2/reference/whered_it_go.html -The keys are ``package_name``, ``conf_file`` and ``ssl_dir``. If unset, their -values will default to ones that work with puppet 3.x and with distributions -that ship modified puppet 4.x that uses the old paths. +The keys are ``package_name``, ``conf_file``, ``ssl_dir`` and +``csr_attributes_path``. If unset, their values will default to +ones that work with puppet 3.x and with distributions that ship modified +puppet 4.x that uses the old paths. Puppet configuration can be specified under the ``conf`` key. The configuration is specified as a dictionary containing high-level ``<section>`` @@ -40,6 +41,10 @@ If ``ca_cert`` is present, it will not be written to ``puppet.conf``, but instead will be used as the puppermaster certificate. It should be specified in pem format as a multi-line string (using the ``|`` yaml notation). +Additionally it's possible to create a csr_attributes.yaml for +CSR attributes and certificate extension requests. +See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html + **Internal name:** ``cc_puppet`` **Module frequency:** per instance @@ -53,6 +58,7 @@ in pem format as a multi-line string (using the ``|`` yaml notation). version: <version> conf_file: '/etc/puppet/puppet.conf' ssl_dir: '/var/lib/puppet/ssl' + csr_attributes_path: '/etc/puppet/csr_attributes.yaml' package_name: 'puppet' conf: agent: @@ -62,28 +68,38 @@ in pem format as a multi-line string (using the ``|`` yaml notation). -------BEGIN CERTIFICATE------- <cert data> -------END CERTIFICATE------- + csr_attributes: + custom_attributes: + 1.2.840.113549.1.9.7: 342thbjkt82094y0uthhor289jnqthpc2290 + extension_requests: + pp_uuid: ED803750-E3C7-44F5-BB08-41A04433FE2E + pp_image_name: my_ami_image + pp_preshared_key: 342thbjkt82094y0uthhor289jnqthpc2290 """ -from six import StringIO - import os import socket +import yaml +from io import StringIO from cloudinit import helpers from cloudinit import util PUPPET_CONF_PATH = '/etc/puppet/puppet.conf' PUPPET_SSL_DIR = '/var/lib/puppet/ssl' +PUPPET_CSR_ATTRIBUTES_PATH = '/etc/puppet/csr_attributes.yaml' PUPPET_PACKAGE_NAME = 'puppet' class PuppetConstants(object): - def __init__(self, puppet_conf_file, puppet_ssl_dir, log): + def __init__(self, puppet_conf_file, puppet_ssl_dir, + csr_attributes_path, log): self.conf_path = puppet_conf_file self.ssl_dir = puppet_ssl_dir self.ssl_cert_dir = os.path.join(puppet_ssl_dir, "certs") self.ssl_cert_path = os.path.join(self.ssl_cert_dir, "ca.pem") + self.csr_attributes_path = csr_attributes_path def _autostart_puppet(log): @@ -98,8 +114,8 @@ def _autostart_puppet(log): elif os.path.exists('/sbin/chkconfig'): util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False) else: - log.warn(("Sorry we do not know how to enable" - " puppet services on this system")) + log.warning(("Sorry we do not know how to enable" + " puppet services on this system")) def handle(name, cfg, cloud, log, _args): @@ -118,11 +134,13 @@ def handle(name, cfg, cloud, log, _args): conf_file = util.get_cfg_option_str( puppet_cfg, 'conf_file', PUPPET_CONF_PATH) ssl_dir = util.get_cfg_option_str(puppet_cfg, 'ssl_dir', PUPPET_SSL_DIR) + csr_attributes_path = util.get_cfg_option_str( + puppet_cfg, 'csr_attributes_path', PUPPET_CSR_ATTRIBUTES_PATH) - p_constants = PuppetConstants(conf_file, ssl_dir, log) + p_constants = PuppetConstants(conf_file, ssl_dir, csr_attributes_path, log) if not install and version: - log.warn(("Puppet install set false but version supplied," - " doing nothing.")) + log.warning(("Puppet install set false but version supplied," + " doing nothing.")) elif install: log.debug(("Attempting to install puppet %s,"), version if version else 'latest') @@ -141,7 +159,7 @@ def handle(name, cfg, cloud, log, _args): cleaned_lines = [i.lstrip() for i in contents.splitlines()] cleaned_contents = '\n'.join(cleaned_lines) # Move to puppet_config.read_file when dropping py2.7 - puppet_config.readfp( # pylint: disable=W1505 + puppet_config.readfp( # pylint: disable=W1505 StringIO(cleaned_contents), filename=p_constants.conf_path) for (cfg_name, cfg) in puppet_cfg['conf'].items(): @@ -176,6 +194,11 @@ def handle(name, cfg, cloud, log, _args): % (p_constants.conf_path)) util.write_file(p_constants.conf_path, puppet_config.stringify()) + if 'csr_attributes' in puppet_cfg: + util.write_file(p_constants.csr_attributes_path, + yaml.dump(puppet_cfg['csr_attributes'], + default_flow_style=False)) + # Set it up so it autostarts _autostart_puppet(log) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 076b9d5a..01dfc125 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -8,7 +8,6 @@ """Resizefs: cloud-config module which resizes the filesystem""" - import errno import getopt import os @@ -81,7 +80,7 @@ def _resize_xfs(mount_point, devpth): def _resize_ufs(mount_point, devpth): - return ('growfs', '-y', devpth) + return ('growfs', '-y', mount_point) def _resize_zfs(mount_point, devpth): @@ -101,7 +100,7 @@ def _can_skip_resize_ufs(mount_point, devpth): """ # dumpfs -m / # newfs command for / (/dev/label/rootfs) - newfs -O 2 -U -a 4 -b 32768 -d 32768 -e 4096 -f 4096 -g 16384 + newfs -L rootf -O 2 -U -a 4 -b 32768 -d 32768 -e 4096 -f 4096 -g 16384 -h 64 -i 8192 -j -k 6408 -m 8 -o time -s 58719232 /dev/label/rootf """ cur_fs_sz = None @@ -110,7 +109,7 @@ def _can_skip_resize_ufs(mount_point, devpth): for line in dumpfs_res.splitlines(): if not line.startswith('#'): newfs_cmd = shlex.split(line) - opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:' + opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:L:' optlist, _args = getopt.getopt(newfs_cmd[1:], opt_value) for o, a in optlist: if o == "-s": @@ -183,7 +182,7 @@ def maybe_get_writable_device_path(devpath, info, log): not container): devpath = util.rootdev_from_cmdline(util.get_cmdline()) if devpath is None: - log.warn("Unable to find device '/dev/root'") + log.warning("Unable to find device '/dev/root'") return None log.debug("Converted /dev/root to '%s' per kernel cmdline", devpath) @@ -212,8 +211,8 @@ def maybe_get_writable_device_path(devpath, info, log): log.debug("Device '%s' did not exist in container. " "cannot resize: %s", devpath, info) elif exc.errno == errno.ENOENT: - log.warn("Device '%s' did not exist. cannot resize: %s", - devpath, info) + log.warning("Device '%s' did not exist. cannot resize: %s", + devpath, info) else: raise exc return None @@ -223,8 +222,8 @@ def maybe_get_writable_device_path(devpath, info, log): log.debug("device '%s' not a block device in container." " cannot resize: %s" % (devpath, info)) else: - log.warn("device '%s' not a block device. cannot resize: %s" % - (devpath, info)) + log.warning("device '%s' not a block device. cannot resize: %s" % + (devpath, info)) return None return devpath # The writable block devpath @@ -243,7 +242,7 @@ def handle(name, cfg, _cloud, log, args): resize_what = "/" result = util.get_mount_info(resize_what, log) if not result: - log.warn("Could not determine filesystem type of %s", resize_what) + log.warning("Could not determine filesystem type of %s", resize_what) return (devpth, fs_type, mount_point) = result @@ -280,8 +279,8 @@ def handle(name, cfg, _cloud, log, args): break if not resizer: - log.warn("Not resizing unknown filesystem type %s for %s", - fs_type, resize_what) + log.warning("Not resizing unknown filesystem type %s for %s", + fs_type, resize_what) return resize_cmd = resizer(resize_what, devpth) diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py index 9812562a..69f4768a 100644 --- a/cloudinit/config/cc_resolv_conf.py +++ b/cloudinit/config/cc_resolv_conf.py @@ -102,11 +102,11 @@ def handle(name, cfg, cloud, log, _args): return if "resolv_conf" not in cfg: - log.warn("manage_resolv_conf True but no parameters provided!") + log.warning("manage_resolv_conf True but no parameters provided!") template_fn = cloud.get_template_filename('resolv.conf') if not template_fn: - log.warn("No template found, not rendering /etc/resolv.conf") + log.warning("No template found, not rendering /etc/resolv.conf") return generate_resolv_conf(template_fn=template_fn, params=cfg["resolv_conf"]) diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py index edee01e5..28c79b83 100644 --- a/cloudinit/config/cc_rh_subscription.py +++ b/cloudinit/config/cc_rh_subscription.py @@ -249,14 +249,14 @@ class SubscriptionManager(object): except util.ProcessExecutionError as e: if e.stdout.rstrip() != '': for line in e.stdout.split("\n"): - if line is not '': + if line != '': self.log_warn(line) else: self.log_warn("Setting the service level failed with: " "{0}".format(e.stderr.strip())) return False for line in return_out.split("\n"): - if line is not "": + if line != "": self.log.debug(line) return True @@ -268,7 +268,7 @@ class SubscriptionManager(object): self.log_warn("Auto-attach failed with: {0}".format(e)) return False for line in return_out.split("\n"): - if line is not "": + if line != "": self.log.debug(line) return True diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py index 4e34c7e9..a5aca038 100644 --- a/cloudinit/config/cc_rightscale_userdata.py +++ b/cloudinit/config/cc_rightscale_userdata.py @@ -50,13 +50,12 @@ user scripts configuration directory, to be run later by ``cc_scripts_user``. # import os +from urllib.parse import parse_qs from cloudinit.settings import PER_INSTANCE from cloudinit import url_helper as uhelp from cloudinit import util -from six.moves.urllib_parse import parse_qs - frequency = PER_INSTANCE MY_NAME = "cc_rightscale_userdata" @@ -111,8 +110,8 @@ def handle(name, _cfg, cloud, log, _args): log.debug("%s urls were skipped or failed", skipped) if captured_excps: - log.warn("%s failed with exceptions, re-raising the last one", - len(captured_excps)) + log.warning("%s failed with exceptions, re-raising the last one", + len(captured_excps)) raise captured_excps[-1] # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py index 27d2366c..5df0137d 100644 --- a/cloudinit/config/cc_rsyslog.py +++ b/cloudinit/config/cc_rsyslog.py @@ -180,7 +180,6 @@ config entries. Legacy to new mappings are as follows: import os import re -import six from cloudinit import log as logging from cloudinit import util @@ -203,7 +202,7 @@ LOG = logging.getLogger(__name__) COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*') HOST_PORT_RE = re.compile( r'^(?P<proto>[@]{0,2})' - r'(([[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))' + r'(([\[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))' r'([:](?P<port>[0-9]+))?$') @@ -233,9 +232,9 @@ def load_config(cfg): fillup = ( (KEYNAME_CONFIGS, [], list), - (KEYNAME_DIR, DEF_DIR, six.string_types), - (KEYNAME_FILENAME, DEF_FILENAME, six.string_types), - (KEYNAME_RELOAD, DEF_RELOAD, six.string_types + (list,)), + (KEYNAME_DIR, DEF_DIR, str), + (KEYNAME_FILENAME, DEF_FILENAME, str), + (KEYNAME_RELOAD, DEF_RELOAD, (str, list)), (KEYNAME_REMOTES, DEF_REMOTES, dict)) for key, default, vtypes in fillup: @@ -432,7 +431,7 @@ def handle(name, cfg, cloud, log, _args): systemd=cloud.distro.uses_systemd()), except util.ProcessExecutionError as e: restarted = False - log.warn("Failed to reload syslog", e) + log.warning("Failed to reload syslog", e) if restarted: # This only needs to run if we *actually* restarted diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py index d6a21d72..5dd8de37 100644 --- a/cloudinit/config/cc_salt_minion.py +++ b/cloudinit/config/cc_salt_minion.py @@ -45,7 +45,9 @@ specify them with ``pkg_name``, ``service_name`` and ``config_dir``. import os -from cloudinit import util +from cloudinit import safeyaml, util +from cloudinit.distros import rhel_util + # Note: see https://docs.saltstack.com/en/latest/topics/installation/ # Note: see https://docs.saltstack.com/en/latest/ref/configuration/ @@ -59,7 +61,7 @@ class SaltConstants(object): # constants tailored for FreeBSD if util.is_FreeBSD(): - self.pkg_name = 'py27-salt' + self.pkg_name = 'py36-salt' self.srv_name = 'salt_minion' self.conf_dir = '/usr/local/etc/salt' # constants for any other OS @@ -97,13 +99,13 @@ def handle(name, cfg, cloud, log, _args): if 'conf' in s_cfg: # Add all sections from the conf object to minion config file minion_config = os.path.join(const.conf_dir, 'minion') - minion_data = util.yaml_dumps(s_cfg.get('conf')) + minion_data = safeyaml.dumps(s_cfg.get('conf')) util.write_file(minion_config, minion_data) if 'grains' in s_cfg: # add grains to /etc/salt/grains grains_config = os.path.join(const.conf_dir, 'grains') - grains_data = util.yaml_dumps(s_cfg.get('grains')) + grains_data = safeyaml.dumps(s_cfg.get('grains')) util.write_file(grains_config, grains_data) # ... copy the key pair if specified @@ -123,7 +125,8 @@ def handle(name, cfg, cloud, log, _args): # we need to have the salt minion service enabled in rc in order to be # able to start the service. this does only apply on FreeBSD servers. if cloud.distro.osfamily == 'freebsd': - cloud.distro.updatercconf('salt_minion_enable', 'YES') + rhel_util.update_sysconfig_file( + '/etc/rc.conf', {'salt_minion_enable': 'YES'}) # restart salt-minion. 'service' will start even if not started. if it # was started, it needs to be restarted for config change. diff --git a/cloudinit/config/cc_scripts_per_boot.py b/cloudinit/config/cc_scripts_per_boot.py index b03255c7..588e1b03 100644 --- a/cloudinit/config/cc_scripts_per_boot.py +++ b/cloudinit/config/cc_scripts_per_boot.py @@ -40,8 +40,8 @@ def handle(name, _cfg, cloud, log, _args): try: util.runparts(runparts_path) except Exception: - log.warn("Failed to run module %s (%s in %s)", - name, SCRIPT_SUBDIR, runparts_path) + log.warning("Failed to run module %s (%s in %s)", + name, SCRIPT_SUBDIR, runparts_path) raise # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_scripts_per_instance.py b/cloudinit/config/cc_scripts_per_instance.py index baee5cc4..75549b52 100644 --- a/cloudinit/config/cc_scripts_per_instance.py +++ b/cloudinit/config/cc_scripts_per_instance.py @@ -15,6 +15,9 @@ Any scripts in the ``scripts/per-instance`` directory on the datasource will be run when a new instance is first booted. Scripts will be run in alphabetical order. This module does not accept any config keys. +Some cloud platforms change instance-id if a significant change was made to +the system. As a result per-instance scripts will run again. + **Internal name:** ``cc_scripts_per_instance`` **Module frequency:** per instance @@ -40,8 +43,8 @@ def handle(name, _cfg, cloud, log, _args): try: util.runparts(runparts_path) except Exception: - log.warn("Failed to run module %s (%s in %s)", - name, SCRIPT_SUBDIR, runparts_path) + log.warning("Failed to run module %s (%s in %s)", + name, SCRIPT_SUBDIR, runparts_path) raise # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_scripts_per_once.py b/cloudinit/config/cc_scripts_per_once.py index 4943e9aa..259bdfab 100644 --- a/cloudinit/config/cc_scripts_per_once.py +++ b/cloudinit/config/cc_scripts_per_once.py @@ -12,8 +12,9 @@ Scripts Per Once **Summary:** run one time scripts Any scripts in the ``scripts/per-once`` directory on the datasource will be run -only once. Scripts will be run in alphabetical order. This module does not -accept any config keys. +only once. Changes to the instance will not force a re-run. The only way to +re-run these scripts is to run the clean subcommand and reboot. Scripts will +be run in alphabetical order. This module does not accept any config keys. **Internal name:** ``cc_scripts_per_once`` @@ -40,8 +41,8 @@ def handle(name, _cfg, cloud, log, _args): try: util.runparts(runparts_path) except Exception: - log.warn("Failed to run module %s (%s in %s)", - name, SCRIPT_SUBDIR, runparts_path) + log.warning("Failed to run module %s (%s in %s)", + name, SCRIPT_SUBDIR, runparts_path) raise # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_scripts_user.py b/cloudinit/config/cc_scripts_user.py index 6c66481e..d940dbd6 100644 --- a/cloudinit/config/cc_scripts_user.py +++ b/cloudinit/config/cc_scripts_user.py @@ -44,8 +44,8 @@ def handle(name, _cfg, cloud, log, _args): try: util.runparts(runparts_path) except Exception: - log.warn("Failed to run module %s (%s in %s)", - name, SCRIPT_SUBDIR, runparts_path) + log.warning("Failed to run module %s (%s in %s)", + name, SCRIPT_SUBDIR, runparts_path) raise # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py index 0292eafb..faac9242 100644 --- a/cloudinit/config/cc_scripts_vendor.py +++ b/cloudinit/config/cc_scripts_vendor.py @@ -48,8 +48,8 @@ def handle(name, cfg, cloud, log, _args): try: util.runparts(runparts_path, exe_prefix=prefix) except Exception: - log.warn("Failed to run module %s (%s in %s)", - name, SCRIPT_SUBDIR, runparts_path) + log.warning("Failed to run module %s (%s in %s)", + name, SCRIPT_SUBDIR, runparts_path) raise # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py index 65f6e777..b65f3ed9 100644 --- a/cloudinit/config/cc_seed_random.py +++ b/cloudinit/config/cc_seed_random.py @@ -61,8 +61,7 @@ used:: import base64 import os - -from six import BytesIO +from io import BytesIO from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE @@ -131,7 +130,7 @@ def handle(name, cfg, cloud, log, _args): env['RANDOM_SEED_FILE'] = seed_path handle_random_seed_command(command=command, required=req, env=env) except ValueError as e: - log.warn("handling random command [%s] failed: %s", command, e) + log.warning("handling random command [%s] failed: %s", command, e) raise e # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py index 3d2b2da3..10d6d197 100644 --- a/cloudinit/config/cc_set_hostname.py +++ b/cloudinit/config/cc_set_hostname.py @@ -21,7 +21,17 @@ key, and the fqdn of the cloud wil be used. If a fqdn specified with the the ``fqdn`` config key. If both ``fqdn`` and ``hostname`` are set, ``fqdn`` will be used. -**Internal name:** per instance +This module will run in the init-local stage before networking is configured +if the hostname is set by metadata or user data on the local system. + +This will occur on datasources like nocloud and ovf where metadata and user +data are available locally. This ensures that the desired hostname is applied +before any DHCP requests are preformed on these platforms where dynamic DNS is +based on initial hostname. + +**Internal name:** ``cc_set_hostname`` + +**Module frequency:** per always **Supported distros:** all diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index 5ef97376..4943d545 100755 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -9,27 +9,40 @@ """ Set Passwords ------------- -**Summary:** Set user passwords - -Set system passwords and enable or disable ssh password authentication. -The ``chpasswd`` config key accepts a dictionary containing a single one of two -keys, either ``expire`` or ``list``. If ``expire`` is specified and is set to -``false``, then the ``password`` global config key is used as the password for -all user accounts. If the ``expire`` key is specified and is set to ``true`` -then user passwords will be expired, preventing the default system passwords -from being used. - -If the ``list`` key is provided, a list of -``username:password`` pairs can be specified. The usernames specified -must already exist on the system, or have been created using the -``cc_users_groups`` module. A password can be randomly generated using -``username:RANDOM`` or ``username:R``. A hashed password can be specified -using ``username:$6$salt$hash``. Password ssh authentication can be -enabled, disabled, or left to system defaults using ``ssh_pwauth``. +**Summary:** Set user passwords and enable/disable SSH password authentication + +This module consumes three top-level config keys: ``ssh_pwauth``, ``chpasswd`` +and ``password``. + +The ``ssh_pwauth`` config key determines whether or not sshd will be configured +to accept password authentication. True values will enable password auth, +false values will disable password auth, and the literal string ``unchanged`` +will leave it unchanged. Setting no value will also leave the current setting +on-disk unchanged. + +The ``chpasswd`` config key accepts a dictionary containing either or both of +``expire`` and ``list``. + +If the ``list`` key is provided, it should contain a list of +``username:password`` pairs. This can be either a YAML list (of strings), or a +multi-line string with one pair per line. Each user will have the +corresponding password set. A password can be randomly generated by specifying +``RANDOM`` or ``R`` as a user's password. A hashed password, created by a tool +like ``mkpasswd``, can be specified; a regex +(``r'\\$(1|2a|2y|5|6)(\\$.+){2}'``) is used to determine if a password value +should be treated as a hash. .. note:: - if using ``expire: true`` then a ssh authkey should be specified or it may - not be possible to login to the system + The users specified must already exist on the system. Users will have been + created by the ``cc_users_groups`` module at this point. + +By default, all users on the system will have their passwords expired (meaning +that they will have to be reset the next time the user logs in). To disable +this behaviour, set ``expire`` under ``chpasswd`` to a false value. + +If a ``list`` of user/password pairs is not specified under ``chpasswd``, then +the value of the ``password`` config key will be used to set the default user's +password. **Internal name:** ``cc_set_passwords`` @@ -99,7 +112,7 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"): elif util.is_false(pw_auth): cfg_val = 'no' else: - bmsg = "Leaving ssh config '%s' unchanged." % cfg_name + bmsg = "Leaving SSH config '%s' unchanged." % cfg_name if pw_auth is None or pw_auth.lower() == 'unchanged': LOG.debug("%s ssh_pwauth=%s", bmsg, pw_auth) else: @@ -108,7 +121,7 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"): updated = update_ssh_config({cfg_name: cfg_val}) if not updated: - LOG.debug("No need to restart ssh service, %s not updated.", cfg_name) + LOG.debug("No need to restart SSH service, %s not updated.", cfg_name) return if 'systemctl' in service_cmd: @@ -116,7 +129,7 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"): else: cmd = list(service_cmd) + [service_name, "restart"] util.subp(cmd) - LOG.debug("Restarted the ssh daemon.") + LOG.debug("Restarted the SSH daemon.") def handle(_name, cfg, cloud, log, args): @@ -151,7 +164,7 @@ def handle(_name, cfg, cloud, log, args): if user: plist = ["%s:%s" % (user, password)] else: - log.warn("No default or defined user to change password for.") + log.warning("No default or defined user to change password for.") errors = [] if plist: @@ -160,24 +173,27 @@ def handle(_name, cfg, cloud, log, args): hashed_users = [] randlist = [] users = [] - prog = re.compile(r'\$[1,2a,2y,5,6](\$.+){2}') + # N.B. This regex is included in the documentation (i.e. the module + # docstring), so any changes to it should be reflected there. + prog = re.compile(r'\$(1|2a|2y|5|6)(\$.+){2}') for line in plist: u, p = line.split(':', 1) if prog.match(p) is not None and ":" not in p: - hashed_plist_in.append("%s:%s" % (u, p)) + hashed_plist_in.append(line) hashed_users.append(u) else: + # in this else branch, we potentially change the password + # hence, a deviation from .append(line) if p == "R" or p == "RANDOM": p = rand_user_password() randlist.append("%s:%s" % (u, p)) plist_in.append("%s:%s" % (u, p)) users.append(u) - ch_in = '\n'.join(plist_in) + '\n' if users: try: log.debug("Changing password for %s:", users) - util.subp(['chpasswd'], ch_in) + chpasswd(cloud.distro, ch_in) except Exception as e: errors.append(e) util.logexc( @@ -187,7 +203,7 @@ def handle(_name, cfg, cloud, log, args): if hashed_users: try: log.debug("Setting hashed password for %s:", hashed_users) - util.subp(['chpasswd', '-e'], hashed_ch_in) + chpasswd(cloud.distro, hashed_ch_in, hashed=True) except Exception as e: errors.append(e) util.logexc( @@ -203,7 +219,7 @@ def handle(_name, cfg, cloud, log, args): expired_users = [] for u in users: try: - util.subp(['passwd', '--expire', u]) + cloud.distro.expire_passwd(u) expired_users.append(u) except Exception as e: errors.append(e) @@ -220,7 +236,17 @@ def handle(_name, cfg, cloud, log, args): raise errors[-1] -def rand_user_password(pwlen=9): +def rand_user_password(pwlen=20): return util.rand_str(pwlen, select_from=PW_SET) + +def chpasswd(distro, plist_in, hashed=False): + if util.is_FreeBSD(): + for pentry in plist_in.splitlines(): + u, p = pentry.split(":") + distro.set_passwd(u, p, hashed=hashed) + else: + cmd = ['chpasswd'] + (['-e'] if hashed else []) + util.subp(cmd, plist_in) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_snap_config.py b/cloudinit/config/cc_snap_config.py deleted file mode 100644 index afe297ee..00000000 --- a/cloudinit/config/cc_snap_config.py +++ /dev/null @@ -1,184 +0,0 @@ -# Copyright (C) 2016 Canonical Ltd. -# -# Author: Ryan Harper <ryan.harper@canonical.com> -# -# This file is part of cloud-init. See LICENSE file for license information. - -# RELEASE_BLOCKER: Remove this deprecated module in 18.3 -""" -Snap Config ------------ -**Summary:** snap_config modules allows configuration of snapd. - -**Deprecated**: Use :ref:`snap` module instead. This module will not exist -in cloud-init 18.3. - -This module uses the same ``snappy`` namespace for configuration but -acts only only a subset of the configuration. - -If ``assertions`` is set and the user has included a list of assertions -then cloud-init will collect the assertions into a single assertion file -and invoke ``snap ack <path to file with assertions>`` which will attempt -to load the provided assertions into the snapd assertion database. - -If ``email`` is set, this value is used to create an authorized user for -contacting and installing snaps from the Ubuntu Store. This is done by -calling ``snap create-user`` command. - -If ``known`` is set to True, then it is expected the user also included -an assertion of type ``system-user``. When ``snap create-user`` is called -cloud-init will append '--known' flag which instructs snapd to look for -a system-user assertion with the details. If ``known`` is not set, then -``snap create-user`` will contact the Ubuntu SSO for validating and importing -a system-user for the instance. - -.. note:: - If the system is already managed, then cloud-init will not attempt to - create a system-user. - -**Internal name:** ``cc_snap_config`` - -**Module frequency:** per instance - -**Supported distros:** any with 'snapd' available - -**Config keys**:: - - #cloud-config - snappy: - assertions: - - | - <assertion 1> - - | - <assertion 2> - email: user@user.org - known: true - -""" - -from cloudinit import log as logging -from cloudinit.settings import PER_INSTANCE -from cloudinit import util - -LOG = logging.getLogger(__name__) - -frequency = PER_INSTANCE -SNAPPY_CMD = "snap" -ASSERTIONS_FILE = "/var/lib/cloud/instance/snapd.assertions" - - -""" -snappy: - assertions: - - | - <snap assertion 1> - - | - <snap assertion 2> - email: foo@foo.io - known: true -""" - - -def add_assertions(assertions=None): - """Import list of assertions. - - Import assertions by concatenating each assertion into a - string separated by a '\n'. Write this string to a instance file and - then invoke `snap ack /path/to/file` and check for errors. - If snap exits 0, then all assertions are imported. - """ - if not assertions: - assertions = [] - - if not isinstance(assertions, list): - raise ValueError( - 'assertion parameter was not a list: {assertions}'.format( - assertions=assertions)) - - snap_cmd = [SNAPPY_CMD, 'ack'] - combined = "\n".join(assertions) - if len(combined) == 0: - raise ValueError("Assertion list is empty") - - for asrt in assertions: - LOG.debug('Acking: %s', asrt.split('\n')[0:2]) - - util.write_file(ASSERTIONS_FILE, combined.encode('utf-8')) - util.subp(snap_cmd + [ASSERTIONS_FILE], capture=True) - - -def add_snap_user(cfg=None): - """Add a snap system-user if provided with email under snappy config. - - - Check that system is not already managed. - - Check that if using a system-user assertion, that it's - imported into snapd. - - Returns a dictionary to be passed to Distro.create_user - """ - - if not cfg: - cfg = {} - - if not isinstance(cfg, dict): - raise ValueError( - 'configuration parameter was not a dict: {cfg}'.format(cfg=cfg)) - - snapuser = cfg.get('email', None) - if not snapuser: - return - - usercfg = { - 'snapuser': snapuser, - 'known': cfg.get('known', False), - } - - # query if we're already registered - out, _ = util.subp([SNAPPY_CMD, 'managed'], capture=True) - if out.strip() == "true": - LOG.warning('This device is already managed. ' - 'Skipping system-user creation') - return - - if usercfg.get('known'): - # Check that we imported a system-user assertion - out, _ = util.subp([SNAPPY_CMD, 'known', 'system-user'], - capture=True) - if len(out) == 0: - LOG.error('Missing "system-user" assertion. ' - 'Check "snappy" user-data assertions.') - return - - return usercfg - - -def handle(name, cfg, cloud, log, args): - cfgin = cfg.get('snappy') - if not cfgin: - LOG.debug('No snappy config provided, skipping') - return - - log.warning( - 'DEPRECATION: snap_config module will be dropped in 18.3 release.' - ' Use snap module instead') - if not(util.system_is_snappy()): - LOG.debug("%s: system not snappy", name) - return - - assertions = cfgin.get('assertions', []) - if len(assertions) > 0: - LOG.debug('Importing user-provided snap assertions') - add_assertions(assertions) - - # Create a snap user if requested. - # Snap systems contact the store with a user's email - # and extract information needed to create a local user. - # A user may provide a 'system-user' assertion which includes - # the required information. Using such an assertion to create - # a local user requires specifying 'known: true' in the supplied - # user-data. - usercfg = add_snap_user(cfg=cfgin) - if usercfg: - cloud.distro.create_user(usercfg.get('snapuser'), **usercfg) - -# vi: ts=4 expandtab diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py deleted file mode 100644 index 15bee2d3..00000000 --- a/cloudinit/config/cc_snappy.py +++ /dev/null @@ -1,321 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -# RELEASE_BLOCKER: Remove this deprecated module in 18.3 -""" -Snappy ------- -**Summary:** snappy modules allows configuration of snappy. - -**Deprecated**: Use :ref:`snap` module instead. This module will not exist -in cloud-init 18.3. - -The below example config config would install ``etcd``, and then install -``pkg2.smoser`` with a ``<config-file>`` argument where ``config-file`` has -``config-blob`` inside it. If ``pkgname`` is installed already, then -``snappy config pkgname <file>`` -will be called where ``file`` has ``pkgname-config-blob`` as its content. - -Entries in ``config`` can be namespaced or non-namespaced for a package. -In either case, the config provided to snappy command is non-namespaced. -The package name is provided as it appears. - -If ``packages_dir`` has files in it that end in ``.snap``, then they are -installed. Given 3 files: - - - <packages_dir>/foo.snap - - <packages_dir>/foo.config - - <packages_dir>/bar.snap - -cloud-init will invoke: - - - snappy install <packages_dir>/foo.snap <packages_dir>/foo.config - - snappy install <packages_dir>/bar.snap - -.. note:: - that if provided a ``config`` entry for ``ubuntu-core``, then - cloud-init will invoke: snappy config ubuntu-core <config> - Allowing you to configure ubuntu-core in this way. - -The ``ssh_enabled`` key controls the system's ssh service. The default value -is ``auto``. Options are: - - - **True:** enable ssh service - - **False:** disable ssh service - - **auto:** enable ssh service if either ssh keys have been provided - or user has requested password authentication (ssh_pwauth). - -**Internal name:** ``cc_snappy`` - -**Module frequency:** per instance - -**Supported distros:** ubuntu - -**Config keys**:: - - #cloud-config - snappy: - system_snappy: auto - ssh_enabled: auto - packages: [etcd, pkg2.smoser] - config: - pkgname: - key2: value2 - pkg2: - key1: value1 - packages_dir: '/writable/user-data/cloud-init/snaps' -""" - -from cloudinit import log as logging -from cloudinit.settings import PER_INSTANCE -from cloudinit import temp_utils -from cloudinit import util - -import glob -import os - -LOG = logging.getLogger(__name__) - -frequency = PER_INSTANCE -SNAPPY_CMD = "snappy" -NAMESPACE_DELIM = '.' - -BUILTIN_CFG = { - 'packages': [], - 'packages_dir': '/writable/user-data/cloud-init/snaps', - 'ssh_enabled': "auto", - 'system_snappy': "auto", - 'config': {}, -} - -distros = ['ubuntu'] - - -def parse_filename(fname): - fname = os.path.basename(fname) - fname_noext = fname.rpartition(".")[0] - name = fname_noext.partition("_")[0] - shortname = name.partition(".")[0] - return(name, shortname, fname_noext) - - -def get_fs_package_ops(fspath): - if not fspath: - return [] - ops = [] - for snapfile in sorted(glob.glob(os.path.sep.join([fspath, '*.snap']))): - (name, shortname, fname_noext) = parse_filename(snapfile) - cfg = None - for cand in (fname_noext, name, shortname): - fpcand = os.path.sep.join([fspath, cand]) + ".config" - if os.path.isfile(fpcand): - cfg = fpcand - break - ops.append(makeop('install', name, config=None, - path=snapfile, cfgfile=cfg)) - return ops - - -def makeop(op, name, config=None, path=None, cfgfile=None): - return({'op': op, 'name': name, 'config': config, 'path': path, - 'cfgfile': cfgfile}) - - -def get_package_config(configs, name): - # load the package's config from the configs dict. - # prefer full-name entry (config-example.canonical) - # over short name entry (config-example) - if name in configs: - return configs[name] - return configs.get(name.partition(NAMESPACE_DELIM)[0]) - - -def get_package_ops(packages, configs, installed=None, fspath=None): - # get the install an config operations that should be done - if installed is None: - installed = read_installed_packages() - short_installed = [p.partition(NAMESPACE_DELIM)[0] for p in installed] - - if not packages: - packages = [] - if not configs: - configs = {} - - ops = [] - ops += get_fs_package_ops(fspath) - - for name in packages: - ops.append(makeop('install', name, get_package_config(configs, name))) - - to_install = [f['name'] for f in ops] - short_to_install = [f['name'].partition(NAMESPACE_DELIM)[0] for f in ops] - - for name in configs: - if name in to_install: - continue - shortname = name.partition(NAMESPACE_DELIM)[0] - if shortname in short_to_install: - continue - if name in installed or shortname in short_installed: - ops.append(makeop('config', name, - config=get_package_config(configs, name))) - - # prefer config entries to filepath entries - for op in ops: - if op['op'] != 'install' or not op['cfgfile']: - continue - name = op['name'] - fromcfg = get_package_config(configs, op['name']) - if fromcfg: - LOG.debug("preferring configs[%(name)s] over '%(cfgfile)s'", op) - op['cfgfile'] = None - op['config'] = fromcfg - - return ops - - -def render_snap_op(op, name, path=None, cfgfile=None, config=None): - if op not in ('install', 'config'): - raise ValueError("cannot render op '%s'" % op) - - shortname = name.partition(NAMESPACE_DELIM)[0] - try: - cfg_tmpf = None - if config is not None: - # input to 'snappy config packagename' must have nested data. odd. - # config: - # packagename: - # config - # Note, however, we do not touch config files on disk. - nested_cfg = {'config': {shortname: config}} - (fd, cfg_tmpf) = temp_utils.mkstemp() - os.write(fd, util.yaml_dumps(nested_cfg).encode()) - os.close(fd) - cfgfile = cfg_tmpf - - cmd = [SNAPPY_CMD, op] - if op == 'install': - if path: - cmd.append("--allow-unauthenticated") - cmd.append(path) - else: - cmd.append(name) - if cfgfile: - cmd.append(cfgfile) - elif op == 'config': - cmd += [name, cfgfile] - - util.subp(cmd) - - finally: - if cfg_tmpf: - os.unlink(cfg_tmpf) - - -def read_installed_packages(): - ret = [] - for (name, _date, _version, dev) in read_pkg_data(): - if dev: - ret.append(NAMESPACE_DELIM.join([name, dev])) - else: - ret.append(name) - return ret - - -def read_pkg_data(): - out, _err = util.subp([SNAPPY_CMD, "list"]) - pkg_data = [] - for line in out.splitlines()[1:]: - toks = line.split(sep=None, maxsplit=3) - if len(toks) == 3: - (name, date, version) = toks - dev = None - else: - (name, date, version, dev) = toks - pkg_data.append((name, date, version, dev,)) - return pkg_data - - -def disable_enable_ssh(enabled): - LOG.debug("setting enablement of ssh to: %s", enabled) - # do something here that would enable or disable - not_to_be_run = "/etc/ssh/sshd_not_to_be_run" - if enabled: - util.del_file(not_to_be_run) - # this is an indempotent operation - util.subp(["systemctl", "start", "ssh"]) - else: - # this is an indempotent operation - util.subp(["systemctl", "stop", "ssh"]) - util.write_file(not_to_be_run, "cloud-init\n") - - -def set_snappy_command(): - global SNAPPY_CMD - if util.which("snappy-go"): - SNAPPY_CMD = "snappy-go" - elif util.which("snappy"): - SNAPPY_CMD = "snappy" - else: - SNAPPY_CMD = "snap" - LOG.debug("snappy command is '%s'", SNAPPY_CMD) - - -def handle(name, cfg, cloud, log, args): - cfgin = cfg.get('snappy') - if not cfgin: - cfgin = {} - mycfg = util.mergemanydict([cfgin, BUILTIN_CFG]) - - sys_snappy = str(mycfg.get("system_snappy", "auto")) - if util.is_false(sys_snappy): - LOG.debug("%s: System is not snappy. disabling", name) - return - - if sys_snappy.lower() == "auto" and not(util.system_is_snappy()): - LOG.debug("%s: 'auto' mode, and system not snappy", name) - return - - log.warning( - 'DEPRECATION: snappy module will be dropped in 18.3 release.' - ' Use snap module instead') - - set_snappy_command() - - pkg_ops = get_package_ops(packages=mycfg['packages'], - configs=mycfg['config'], - fspath=mycfg['packages_dir']) - - fails = [] - for pkg_op in pkg_ops: - try: - render_snap_op(**pkg_op) - except Exception as e: - fails.append((pkg_op, e,)) - LOG.warning("'%s' failed for '%s': %s", - pkg_op['op'], pkg_op['name'], e) - - # Default to disabling SSH - ssh_enabled = mycfg.get('ssh_enabled', "auto") - - # If the user has not explicitly enabled or disabled SSH, then enable it - # when password SSH authentication is requested or there are SSH keys - if ssh_enabled == "auto": - user_ssh_keys = cloud.get_public_ssh_keys() or None - password_auth_enabled = cfg.get('ssh_pwauth', False) - if user_ssh_keys: - LOG.debug("Enabling SSH, ssh keys found in datasource") - ssh_enabled = True - elif cfg.get('ssh_authorized_keys'): - LOG.debug("Enabling SSH, ssh keys found in config") - elif password_auth_enabled: - LOG.debug("Enabling SSH, password authentication requested") - ssh_enabled = True - elif ssh_enabled not in (True, False): - LOG.warning("Unknown value '%s' in ssh_enabled", ssh_enabled) - - disable_enable_ssh(ssh_enabled) - - if fails: - raise Exception("failed to install/configure snaps") - -# vi: ts=4 expandtab diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index f8f7cb35..163cce99 100755 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -9,43 +9,23 @@ """ SSH --- -**Summary:** configure ssh and ssh keys +**Summary:** configure SSH and SSH keys (host and authorized) -This module handles most configuration for ssh and ssh keys. Many images have -default ssh keys, which can be removed using ``ssh_deletekeys``. Since removing -default keys is usually the desired behavior this option is enabled by default. +This module handles most configuration for SSH and both host and authorized SSH +keys. -Keys can be added using the ``ssh_keys`` configuration key. The argument to -this config key should be a dictionary entries for the public and private keys -of each desired key type. Entries in the ``ssh_keys`` config dict should -have keys in the format ``<key type>_private`` and ``<key type>_public``, e.g. -``rsa_private: <key>`` and ``rsa_public: <key>``. See below for supported key -types. Not all key types have to be specified, ones left unspecified will not -be used. If this config option is used, then no keys will be generated. +Authorized Keys +^^^^^^^^^^^^^^^ -.. note:: - when specifying private keys in cloud-config, care should be taken to - ensure that the communication between the data source and the instance is - secure +Authorized keys are a list of public SSH keys that are allowed to connect to a +a user account on a system. They are stored in `.ssh/authorized_keys` in that +account's home directory. Authorized keys for the default user defined in +``users`` can be specified using ``ssh_authorized_keys``. Keys +should be specified as a list of public keys. .. note:: - to specify multiline private keys, use yaml multiline syntax - -If no keys are specified using ``ssh_keys``, then keys will be generated using -``ssh-keygen``. By default one public/private pair of each supported key type -will be generated. The key types to generate can be specified using the -``ssh_genkeytypes`` config flag, which accepts a list of key types to use. For -each key type for which this module has been instructed to create a keypair, if -a key of the same type is already present on the system (i.e. if -``ssh_deletekeys`` was false), no key will be generated. - -Supported key types for the ``ssh_keys`` and the ``ssh_genkeytypes`` config -flags are: - - - rsa - - dsa - - ecdsa - - ed25519 + see the ``cc_set_passwords`` module documentation to enable/disable SSH + password authentication Root login can be enabled/disabled using the ``disable_root`` config key. Root login options can be manually specified with ``disable_root_opts``. If @@ -55,13 +35,46 @@ root login is disabled, and root login opts are set to:: no-port-forwarding,no-agent-forwarding,no-X11-forwarding -Authorized keys for the default user/first user defined in ``users`` can be -specified using `ssh_authorized_keys``. Keys should be specified as a list of -public keys. +Host Keys +^^^^^^^^^ + +Host keys are for authenticating a specific instance. Many images have default +host SSH keys, which can be removed using ``ssh_deletekeys``. This prevents +re-use of a private host key from an image on multiple machines. Since +removing default host keys is usually the desired behavior this option is +enabled by default. + +Host keys can be added using the ``ssh_keys`` configuration key. The argument +to this config key should be a dictionary entries for the public and private +keys of each desired key type. Entries in the ``ssh_keys`` config dict should +have keys in the format ``<key type>_private`` and ``<key type>_public``, +e.g. ``rsa_private: <key>`` and ``rsa_public: <key>``. See below for supported +key types. Not all key types have to be specified, ones left unspecified will +not be used. If this config option is used, then no keys will be generated. .. note:: - see the ``cc_set_passwords`` module documentation to enable/disable ssh - password authentication + when specifying private host keys in cloud-config, care should be taken to + ensure that the communication between the data source and the instance is + secure + +.. note:: + to specify multiline private host keys, use yaml multiline syntax + +If no host keys are specified using ``ssh_keys``, then keys will be generated +using ``ssh-keygen``. By default one public/private pair of each supported +host key type will be generated. The key types to generate can be specified +using the ``ssh_genkeytypes`` config flag, which accepts a list of host key +types to use. For each host key type for which this module has been instructed +to create a keypair, if a key of the same type is already present on the +system (i.e. if ``ssh_deletekeys`` was false), no key will be generated. + +Supported host key types for the ``ssh_keys`` and the ``ssh_genkeytypes`` +config flags are: + + - rsa + - dsa + - ecdsa + - ed25519 **Internal name:** ``cc_ssh`` @@ -91,6 +104,10 @@ public keys. ssh_authorized_keys: - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUU ... - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZ ... + allow_public_ssh_keys: <true/false> + ssh_publish_hostkeys: + enabled: <true/false> (Defaults to true) + blacklist: <list of key types> (Defaults to [dsa]) """ import glob @@ -104,6 +121,10 @@ from cloudinit import util GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519'] KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key' +PUBLISH_HOST_KEYS = True +# Don't publish the dsa hostkey by default since OpenSSH recommends not using +# it. +HOST_KEY_PUBLISH_BLACKLIST = ['dsa'] CONFIG_KEY_TO_FILE = {} PRIV_TO_PUB = {} @@ -176,6 +197,23 @@ def handle(_name, cfg, cloud, log, _args): util.logexc(log, "Failed generating key type %s to " "file %s", keytype, keyfile) + if "ssh_publish_hostkeys" in cfg: + host_key_blacklist = util.get_cfg_option_list( + cfg["ssh_publish_hostkeys"], "blacklist", + HOST_KEY_PUBLISH_BLACKLIST) + publish_hostkeys = util.get_cfg_option_bool( + cfg["ssh_publish_hostkeys"], "enabled", PUBLISH_HOST_KEYS) + else: + host_key_blacklist = HOST_KEY_PUBLISH_BLACKLIST + publish_hostkeys = PUBLISH_HOST_KEYS + + if publish_hostkeys: + hostkeys = get_public_host_keys(blacklist=host_key_blacklist) + try: + cloud.datasource.publish_host_keys(hostkeys) + except Exception: + util.logexc(log, "Publishing host keys failed!") + try: (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) (user, _user_config) = ug_util.extract_default(users) @@ -183,14 +221,20 @@ def handle(_name, cfg, cloud, log, _args): disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts", ssh_util.DISABLE_USER_OPTS) - keys = cloud.get_public_ssh_keys() or [] + keys = [] + if util.get_cfg_option_bool(cfg, 'allow_public_ssh_keys', True): + keys = cloud.get_public_ssh_keys() or [] + else: + log.debug('Skipping import of publish SSH keys per ' + 'config setting: allow_public_ssh_keys=False') + if "ssh_authorized_keys" in cfg: cfgkeys = cfg["ssh_authorized_keys"] keys.extend(cfgkeys) apply_credentials(keys, user, disable_root, disable_root_opts) except Exception: - util.logexc(log, "Applying ssh credentials failed!") + util.logexc(log, "Applying SSH credentials failed!") def apply_credentials(keys, user, disable_root, disable_root_opts): @@ -209,4 +253,35 @@ def apply_credentials(keys, user, disable_root, disable_root_opts): ssh_util.setup_user_keys(keys, 'root', options=key_prefix) + +def get_public_host_keys(blacklist=None): + """Read host keys from /etc/ssh/*.pub files and return them as a list. + + @param blacklist: List of key types to ignore. e.g. ['dsa', 'rsa'] + @returns: List of keys, each formatted as a two-element tuple. + e.g. [('ssh-rsa', 'AAAAB3Nz...'), ('ssh-ed25519', 'AAAAC3Nx...')] + """ + public_key_file_tmpl = '%s.pub' % (KEY_FILE_TPL,) + key_list = [] + blacklist_files = [] + if blacklist: + # Convert blacklist to filenames: + # 'dsa' -> '/etc/ssh/ssh_host_dsa_key.pub' + blacklist_files = [public_key_file_tmpl % (key_type,) + for key_type in blacklist] + # Get list of public key files and filter out blacklisted files. + file_list = [hostfile for hostfile + in glob.glob(public_key_file_tmpl % ('*',)) + if hostfile not in blacklist_files] + + # Read host key files, retrieve first two fields as a tuple and + # append that tuple to key_list. + for file_name in file_list: + file_contents = util.load_file(file_name) + key_data = file_contents.split() + if key_data and len(key_data) > 1: + key_list.append(tuple(key_data[:2])) + return key_list + + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index 98b0e665..7ac1c8cf 100755 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -7,11 +7,11 @@ """ SSH Authkey Fingerprints ------------------------ -**Summary:** log fingerprints of user ssh keys +**Summary:** log fingerprints of user SSH keys Write fingerprints of authorized keys for each user to log. This is enabled by default, but can be disabled using ``no_ssh_fingerprints``. The hash type for -the keys can be specified, but defaults to ``md5``. +the keys can be specified, but defaults to ``sha256``. **Internal name:** `` cc_ssh_authkey_fingerprints`` @@ -42,7 +42,7 @@ def _split_hash(bin_hash): return split_up -def _gen_fingerprint(b64_text, hash_meth='md5'): +def _gen_fingerprint(b64_text, hash_meth='sha256'): if not b64_text: return '' # TBD(harlowja): Maybe we should feed this into 'ssh -lf'? @@ -65,10 +65,10 @@ def _is_printable_key(entry): return False -def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5', +def _pprint_key_entries(user, key_fn, key_entries, hash_meth='sha256', prefix='ci-info: '): if not key_entries: - message = ("%sno authorized ssh keys fingerprints found for user %s.\n" + message = ("%sno authorized SSH keys fingerprints found for user %s.\n" % (prefix, user)) util.multi_log(message) return @@ -98,10 +98,10 @@ def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5', def handle(name, cfg, cloud, log, _args): if util.is_true(cfg.get('no_ssh_fingerprints', False)): log.debug(("Skipping module named %s, " - "logging of ssh fingerprints disabled"), name) + "logging of SSH fingerprints disabled"), name) return - hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5") + hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "sha256") (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) for (user_name, _cfg) in users.items(): (key_fn, key_entries) = ssh_util.extract_authorized_keys(user_name) diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py index 6b46dafe..63f87298 100755 --- a/cloudinit/config/cc_ssh_import_id.py +++ b/cloudinit/config/cc_ssh_import_id.py @@ -9,9 +9,9 @@ """ SSH Import Id ------------- -**Summary:** import ssh id +**Summary:** import SSH id -This module imports ssh keys from either a public keyserver, usually launchpad +This module imports SSH keys from either a public keyserver, usually launchpad or github using ``ssh-import-id``. Keys are referenced by the username they are associated with on the keyserver. The keyserver can be specified by prepending either ``lp:`` for launchpad or ``gh:`` for github to the username. @@ -98,12 +98,12 @@ def import_ssh_ids(ids, user, log): raise exc cmd = ["sudo", "-Hu", user, "ssh-import-id"] + ids - log.debug("Importing ssh ids for user %s.", user) + log.debug("Importing SSH ids for user %s.", user) try: util.subp(cmd, capture=False) except util.ProcessExecutionError as exc: - util.logexc(log, "Failed to run command to import %s ssh ids", user) + util.logexc(log, "Failed to run command to import %s SSH ids", user) raise exc # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py index 5e082bd6..8b6d2a1a 100644 --- a/cloudinit/config/cc_ubuntu_advantage.py +++ b/cloudinit/config/cc_ubuntu_advantage.py @@ -1,150 +1,141 @@ -# Copyright (C) 2018 Canonical Ltd. -# # This file is part of cloud-init. See LICENSE file for license information. -"""Ubuntu advantage: manage ubuntu-advantage offerings from Canonical.""" +"""ubuntu_advantage: Configure Ubuntu Advantage support services""" -import sys from textwrap import dedent -from cloudinit import log as logging from cloudinit.config.schema import ( get_schema_doc, validate_cloudconfig_schema) +from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE -from cloudinit.subp import prepend_base_command from cloudinit import util -distros = ['ubuntu'] -frequency = PER_INSTANCE +UA_URL = 'https://ubuntu.com/advantage' -LOG = logging.getLogger(__name__) +distros = ['ubuntu'] schema = { 'id': 'cc_ubuntu_advantage', 'name': 'Ubuntu Advantage', - 'title': 'Install, configure and manage ubuntu-advantage offerings', + 'title': 'Configure Ubuntu Advantage support services', 'description': dedent("""\ - This module provides configuration options to setup ubuntu-advantage - subscriptions. - - .. note:: - Both ``commands`` value can be either a dictionary or a list. If - the configuration provided is a dictionary, the keys are only used - to order the execution of the commands and the dictionary is - merged with any vendor-data ubuntu-advantage configuration - provided. If a ``commands`` is provided as a list, any vendor-data - ubuntu-advantage ``commands`` are ignored. - - Ubuntu-advantage ``commands`` is a dictionary or list of - ubuntu-advantage commands to run on the deployed machine. - These commands can be used to enable or disable subscriptions to - various ubuntu-advantage products. See 'man ubuntu-advantage' for more - information on supported subcommands. - - .. note:: - Each command item can be a string or list. If the item is a list, - 'ubuntu-advantage' can be omitted and it will automatically be - inserted as part of the command. + Attach machine to an existing Ubuntu Advantage support contract and + enable or disable support services such as Livepatch, ESM, + FIPS and FIPS Updates. When attaching a machine to Ubuntu Advantage, + one can also specify services to enable. When the 'enable' + list is present, any named service will be enabled and all absent + services will remain disabled. + + Note that when enabling FIPS or FIPS updates you will need to schedule + a reboot to ensure the machine is running the FIPS-compliant kernel. + See :ref:`Power State Change` for information on how to configure + cloud-init to perform this reboot. """), 'distros': distros, 'examples': [dedent("""\ - # Enable Extended Security Maintenance using your service auth token - ubuntu-advantage: - commands: - 00: ubuntu-advantage enable-esm <token> - """), dedent("""\ - # Enable livepatch by providing your livepatch token + # Attach the machine to an Ubuntu Advantage support contract with a + # UA contract token obtained from %s. + ubuntu_advantage: + token: <ua_contract_token> + """ % UA_URL), dedent("""\ + # Attach the machine to an Ubuntu Advantage support contract enabling + # only fips and esm services. Services will only be enabled if + # the environment supports said service. Otherwise warnings will + # be logged for incompatible services specified. ubuntu-advantage: - commands: - 00: ubuntu-advantage enable-livepatch <livepatch-token> - + token: <ua_contract_token> + enable: + - fips + - esm """), dedent("""\ - # Convenience: the ubuntu-advantage command can be omitted when - # specifying commands as a list and 'ubuntu-advantage' will - # automatically be prepended. - # The following commands are equivalent + # Attach the machine to an Ubuntu Advantage support contract and enable + # the FIPS service. Perform a reboot once cloud-init has + # completed. + power_state: + mode: reboot ubuntu-advantage: - commands: - 00: ['enable-livepatch', 'my-token'] - 01: ['ubuntu-advantage', 'enable-livepatch', 'my-token'] - 02: ubuntu-advantage enable-livepatch my-token - 03: 'ubuntu-advantage enable-livepatch my-token' - """)], + token: <ua_contract_token> + enable: + - fips + """)], 'frequency': PER_INSTANCE, 'type': 'object', 'properties': { - 'ubuntu-advantage': { + 'ubuntu_advantage': { 'type': 'object', 'properties': { - 'commands': { - 'type': ['object', 'array'], # Array of strings or dict - 'items': { - 'oneOf': [ - {'type': 'array', 'items': {'type': 'string'}}, - {'type': 'string'}] - }, - 'additionalItems': False, # Reject non-string & non-list - 'minItems': 1, - 'minProperties': 1, + 'enable': { + 'type': 'array', + 'items': {'type': 'string'}, + }, + 'token': { + 'type': 'string', + 'description': ( + 'A contract token obtained from %s.' % UA_URL) } }, - 'additionalProperties': False, # Reject keys not in schema - 'required': ['commands'] + 'required': ['token'], + 'additionalProperties': False } } } -# TODO schema for 'assertions' and 'commands' are too permissive at the moment. -# Once python-jsonschema supports schema draft 6 add support for arbitrary -# object keys with 'patternProperties' constraint to validate string values. - __doc__ = get_schema_doc(schema) # Supplement python help() -UA_CMD = "ubuntu-advantage" - - -def run_commands(commands): - """Run the commands provided in ubuntu-advantage:commands config. +LOG = logging.getLogger(__name__) - Commands are run individually. Any errors are collected and reported - after attempting all commands. - @param commands: A list or dict containing commands to run. Keys of a - dict will be used to order the commands provided as dict values. - """ - if not commands: - return - LOG.debug('Running user-provided ubuntu-advantage commands') - if isinstance(commands, dict): - # Sort commands based on dictionary key - commands = [v for _, v in sorted(commands.items())] - elif not isinstance(commands, list): - raise TypeError( - 'commands parameter was not a list or dict: {commands}'.format( - commands=commands)) - - fixed_ua_commands = prepend_base_command('ubuntu-advantage', commands) - - cmd_failures = [] - for command in fixed_ua_commands: - shell = isinstance(command, str) - try: - util.subp(command, shell=shell, status_cb=sys.stderr.write) - except util.ProcessExecutionError as e: - cmd_failures.append(str(e)) - if cmd_failures: - msg = ( - 'Failures running ubuntu-advantage commands:\n' - '{cmd_failures}'.format( - cmd_failures=cmd_failures)) +def configure_ua(token=None, enable=None): + """Call ua commandline client to attach or enable services.""" + error = None + if not token: + error = ('ubuntu_advantage: token must be provided') + LOG.error(error) + raise RuntimeError(error) + + if enable is None: + enable = [] + elif isinstance(enable, str): + LOG.warning('ubuntu_advantage: enable should be a list, not' + ' a string; treating as a single enable') + enable = [enable] + elif not isinstance(enable, list): + LOG.warning('ubuntu_advantage: enable should be a list, not' + ' a %s; skipping enabling services', + type(enable).__name__) + enable = [] + + attach_cmd = ['ua', 'attach', token] + LOG.debug('Attaching to Ubuntu Advantage. %s', ' '.join(attach_cmd)) + try: + util.subp(attach_cmd) + except util.ProcessExecutionError as e: + msg = 'Failure attaching Ubuntu Advantage:\n{error}'.format( + error=str(e)) util.logexc(LOG, msg) raise RuntimeError(msg) + enable_errors = [] + for service in enable: + try: + cmd = ['ua', 'enable', service] + util.subp(cmd, capture=True) + except util.ProcessExecutionError as e: + enable_errors.append((service, e)) + if enable_errors: + for service, error in enable_errors: + msg = 'Failure enabling "{service}":\n{error}'.format( + service=service, error=str(error)) + util.logexc(LOG, msg) + raise RuntimeError( + 'Failure enabling Ubuntu Advantage service(s): {}'.format( + ', '.join('"{}"'.format(service) + for service, _ in enable_errors))) def maybe_install_ua_tools(cloud): """Install ubuntu-advantage-tools if not present.""" - if util.which('ubuntu-advantage'): + if util.which('ua'): return try: cloud.distro.update_package_sources() @@ -159,14 +150,28 @@ def maybe_install_ua_tools(cloud): def handle(name, cfg, cloud, log, args): - cfgin = cfg.get('ubuntu-advantage') - if cfgin is None: - LOG.debug(("Skipping module named %s," - " no 'ubuntu-advantage' key in configuration"), name) + ua_section = None + if 'ubuntu-advantage' in cfg: + LOG.warning('Deprecated configuration key "ubuntu-advantage" provided.' + ' Expected underscore delimited "ubuntu_advantage"; will' + ' attempt to continue.') + ua_section = cfg['ubuntu-advantage'] + if 'ubuntu_advantage' in cfg: + ua_section = cfg['ubuntu_advantage'] + if ua_section is None: + LOG.debug("Skipping module named %s," + " no 'ubuntu_advantage' configuration found", name) return - validate_cloudconfig_schema(cfg, schema) + if 'commands' in ua_section: + msg = ( + 'Deprecated configuration "ubuntu-advantage: commands" provided.' + ' Expected "token"') + LOG.error(msg) + raise RuntimeError(msg) + maybe_install_ua_tools(cloud) - run_commands(cfgin.get('commands', [])) + configure_ua(token=ua_section.get('token'), + enable=ua_section.get('enable')) # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py new file mode 100644 index 00000000..297451d6 --- /dev/null +++ b/cloudinit/config/cc_ubuntu_drivers.py @@ -0,0 +1,160 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Ubuntu Drivers: Interact with third party drivers in Ubuntu.""" + +import os +from textwrap import dedent + +from cloudinit.config.schema import ( + get_schema_doc, validate_cloudconfig_schema) +from cloudinit import log as logging +from cloudinit.settings import PER_INSTANCE +from cloudinit import temp_utils +from cloudinit import type_utils +from cloudinit import util + +LOG = logging.getLogger(__name__) + +frequency = PER_INSTANCE +distros = ['ubuntu'] +schema = { + 'id': 'cc_ubuntu_drivers', + 'name': 'Ubuntu Drivers', + 'title': 'Interact with third party drivers in Ubuntu.', + 'description': dedent("""\ + This module interacts with the 'ubuntu-drivers' command to install + third party driver packages."""), + 'distros': distros, + 'examples': [dedent("""\ + drivers: + nvidia: + license-accepted: true + """)], + 'frequency': frequency, + 'type': 'object', + 'properties': { + 'drivers': { + 'type': 'object', + 'additionalProperties': False, + 'properties': { + 'nvidia': { + 'type': 'object', + 'additionalProperties': False, + 'required': ['license-accepted'], + 'properties': { + 'license-accepted': { + 'type': 'boolean', + 'description': ("Do you accept the NVIDIA driver" + " license?"), + }, + 'version': { + 'type': 'string', + 'description': ( + 'The version of the driver to install (e.g.' + ' "390", "410"). Defaults to the latest' + ' version.'), + }, + }, + }, + }, + }, + }, +} +OLD_UBUNTU_DRIVERS_STDERR_NEEDLE = ( + "ubuntu-drivers: error: argument <command>: invalid choice: 'install'") + +__doc__ = get_schema_doc(schema) # Supplement python help() + + +# Use a debconf template to configure a global debconf variable +# (linux/nvidia/latelink) setting this to "true" allows the +# 'linux-restricted-modules' deb to accept the NVIDIA EULA and the package +# will automatically link the drivers to the running kernel. + +# EOL_XENIAL: can then drop this script and use python3-debconf which is only +# available in Bionic and later. Can't use python3-debconf currently as it +# isn't in Xenial and doesn't yet support X_LOADTEMPLATEFILE debconf command. + +NVIDIA_DEBCONF_CONTENT = """\ +Template: linux/nvidia/latelink +Type: boolean +Default: true +Description: Late-link NVIDIA kernel modules? + Enable this to link the NVIDIA kernel modules in cloud-init and + make them available for use. +""" + +NVIDIA_DRIVER_LATELINK_DEBCONF_SCRIPT = """\ +#!/bin/sh +# Allow cloud-init to trigger EULA acceptance via registering a debconf +# template to set linux/nvidia/latelink true +. /usr/share/debconf/confmodule +db_x_loadtemplatefile "$1" cloud-init +""" + + +def install_drivers(cfg, pkg_install_func): + if not isinstance(cfg, dict): + raise TypeError( + "'drivers' config expected dict, found '%s': %s" % + (type_utils.obj_name(cfg), cfg)) + + cfgpath = 'nvidia/license-accepted' + # Call translate_bool to ensure that we treat string values like "yes" as + # acceptance and _don't_ treat string values like "nah" as acceptance + # because they're True-ish + nv_acc = util.translate_bool(util.get_cfg_by_path(cfg, cfgpath)) + if not nv_acc: + LOG.debug("Not installing NVIDIA drivers. %s=%s", cfgpath, nv_acc) + return + + if not util.which('ubuntu-drivers'): + LOG.debug("'ubuntu-drivers' command not available. " + "Installing ubuntu-drivers-common") + pkg_install_func(['ubuntu-drivers-common']) + + driver_arg = 'nvidia' + version_cfg = util.get_cfg_by_path(cfg, 'nvidia/version') + if version_cfg: + driver_arg += ':{}'.format(version_cfg) + + LOG.debug("Installing and activating NVIDIA drivers (%s=%s, version=%s)", + cfgpath, nv_acc, version_cfg if version_cfg else 'latest') + + # Register and set debconf selection linux/nvidia/latelink = true + tdir = temp_utils.mkdtemp(needs_exe=True) + debconf_file = os.path.join(tdir, 'nvidia.template') + debconf_script = os.path.join(tdir, 'nvidia-debconf.sh') + try: + util.write_file(debconf_file, NVIDIA_DEBCONF_CONTENT) + util.write_file( + debconf_script, + util.encode_text(NVIDIA_DRIVER_LATELINK_DEBCONF_SCRIPT), + mode=0o755) + util.subp([debconf_script, debconf_file]) + except Exception as e: + util.logexc( + LOG, "Failed to register NVIDIA debconf template: %s", str(e)) + raise + finally: + if os.path.isdir(tdir): + util.del_dir(tdir) + + try: + util.subp(['ubuntu-drivers', 'install', '--gpgpu', driver_arg]) + except util.ProcessExecutionError as exc: + if OLD_UBUNTU_DRIVERS_STDERR_NEEDLE in exc.stderr: + LOG.warning('the available version of ubuntu-drivers is' + ' too old to perform requested driver installation') + elif 'No drivers found for installation.' in exc.stdout: + LOG.warning('ubuntu-drivers found no drivers for installation') + raise + + +def handle(name, cfg, cloud, log, _args): + if "drivers" not in cfg: + log.debug("Skipping module named %s, no 'drivers' key in config", name) + return + + validate_cloudconfig_schema(cfg, schema) + install_drivers(cfg['drivers'], cloud.distro.install_packages) diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py index c96eede1..03fffb96 100644 --- a/cloudinit/config/cc_update_etc_hosts.py +++ b/cloudinit/config/cc_update_etc_hosts.py @@ -62,8 +62,8 @@ def handle(name, cfg, cloud, log, _args): if util.translate_bool(manage_hosts, addons=['template']): (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) if not hostname: - log.warn(("Option 'manage_etc_hosts' was set," - " but no hostname was found")) + log.warning(("Option 'manage_etc_hosts' was set," + " but no hostname was found")) return # Render from a template file @@ -80,8 +80,8 @@ def handle(name, cfg, cloud, log, _args): elif manage_hosts == "localhost": (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) if not hostname: - log.warn(("Option 'manage_etc_hosts' was set," - " but no hostname was found")) + log.warning(("Option 'manage_etc_hosts' was set," + " but no hostname was found")) return log.debug("Managing localhost in /etc/hosts") diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py index c32a743a..13764e60 100644 --- a/cloudinit/config/cc_users_groups.py +++ b/cloudinit/config/cc_users_groups.py @@ -51,14 +51,14 @@ config keys for an entry in ``users`` are as follows: a Snappy user through ``snap create-user``. If an Ubuntu SSO account is associated with the address, username and SSH keys will be requested from there. Default: none - - ``ssh_authorized_keys``: Optional. List of ssh keys to add to user's + - ``ssh_authorized_keys``: Optional. List of SSH keys to add to user's authkeys file. Default: none. This key can not be combined with ``ssh_redirect_user``. - ``ssh_import_id``: Optional. SSH id to import for user. Default: none. This key can not be combined with ``ssh_redirect_user``. - ``ssh_redirect_user``: Optional. Boolean set to true to disable SSH - logins for this user. When specified, all cloud meta-data public ssh - keys will be set up in a disabled state for this username. Any ssh login + logins for this user. When specified, all cloud meta-data public SSH + keys will be set up in a disabled state for this username. Any SSH login as this username will timeout and prompt with a message to login instead as the configured <default_username> for this instance. Default: false. This key can not be combined with ``ssh_import_id`` or diff --git a/cloudinit/config/cc_vyos.py b/cloudinit/config/cc_vyos.py index bd595397..e51ed7f2 100644 --- a/cloudinit/config/cc_vyos.py +++ b/cloudinit/config/cc_vyos.py @@ -24,17 +24,22 @@ import os import re import sys import ast -import subprocess -from ipaddress import IPv4Network +import ipaddress from cloudinit import stages from cloudinit import util from cloudinit.distros import ug_util from cloudinit.settings import PER_INSTANCE +from cloudinit import handlers +from cloudinit import log as logging from vyos.configtree import ConfigTree +# configure logging +logger = logging.getLogger(__name__) +logger.setLevel(logging.DEBUG) + frequency = PER_INSTANCE class VyosError(Exception): @@ -43,6 +48,7 @@ class VyosError(Exception): """ pass +# configure user account with password def set_pass_login(config, user, password, encrypted_pass): if encrypted_pass: config.set(['system', 'login', 'user', user, 'authentication', 'encrypted-password'], value=password, replace=True) @@ -50,16 +56,15 @@ def set_pass_login(config, user, password, encrypted_pass): config.set(['system', 'login', 'user', user, 'authentication', 'plaintext-password'], value=password, replace=True) config.set_tag(['system', 'login', 'user']) - config.set(['system', 'login', 'user', user, 'level'], value='admin', replace=True) - -def set_ssh_login(config, log, user, key_string, key_x): +# configure user account with ssh key +def set_ssh_login(config, user, key_string, key_x): key_type = None key_data = None key_name = None if key_string == '': - log.debug("No keys found.") + logger.error("No keys found.") return key_parts = key_string.split(None) @@ -72,11 +77,11 @@ def set_ssh_login(config, log, user, key_string, key_x): key_data = key if not key_type: - util.logexc(log, 'Key type not defined, wrong ssh key format.') + logger.error("Key type not defined, wrong ssh key format.") return if not key_data: - util.logexc(log, 'Key base64 not defined, wrong ssh key format.') + logger.error("Key base64 not defined, wrong ssh key format.") return if len(key_parts) > 2: @@ -91,9 +96,9 @@ def set_ssh_login(config, log, user, key_string, key_x): config.set(['system', 'login', 'user', user, 'authentication', 'public-keys', key_name , 'type'], value=key_type, replace=True) config.set_tag(['system', 'login', 'user']) config.set_tag(['system', 'login', 'user', user, 'authentication', 'public-keys']) - config.set(['system', 'login', 'user', user, 'level'], value='admin', replace=True) +# configure system parameters from OVF template def set_config_ovf(config, hostname, metadata): ip_0 = metadata['ip0'] mask_0 = metadata['netmask0'] @@ -105,7 +110,7 @@ def set_config_ovf(config, hostname, metadata): APIDEBUG = metadata['APIDEBUG'] if ip_0 and ip_0 != 'null' and mask_0 and mask_0 != 'null' and gateway and gateway != 'null': - cidr = str(IPv4Network('0.0.0.0/' + mask_0).prefixlen) + cidr = str(ipaddress.IPv4Network('0.0.0.0/' + mask_0).prefixlen) ipcidr = ip_0 + '/' + cidr config.set(['interfaces', 'ethernet', 'eth0', 'address'], value=ipcidr, replace=True) @@ -148,59 +153,83 @@ def set_config_ovf(config, hostname, metadata): config.set(['system', 'host-name'], value='vyos', replace=True) -def set_config_interfaces(config, interface): - for item in interface['subnets']: - if item['type'] == 'static': - if 'address' in item and runcommand("/usr/bin/ipaddrcheck --is-ipv4 " + item['address']) == 0: - cidr = str(IPv4Network('0.0.0.0/' + item['netmask']).prefixlen) - ipcidr = item['address'] + '/' + cidr - config.set(['interfaces', 'ethernet', interface['name'], 'address'], value=ipcidr, replace=True) - config.set_tag(['interfaces', 'ethernet']) - if item['gateway']: - config.set(['protocols', 'static', 'route', '0.0.0.0/0', 'next-hop'], value=item['gateway'], replace=True) - config.set_tag(['protocols', 'static', 'route']) - config.set_tag(['protocols', 'static', 'route', '0.0.0.0/0', 'next-hop']) +# configure interface +def set_config_interfaces(config, iface_name, iface_config): + # configure DHCP client + if 'dhcp4' in iface_config: + if iface_config['dhcp4'] == True: + config.set(['interfaces', 'ethernet', iface_name, 'address'], value='dhcp', replace=True) + config.set_tag(['interfaces', 'ethernet']) + if 'dhcp6' in iface_config: + if iface_config['dhcp6'] == True: + config.set(['interfaces', 'ethernet', iface_name, 'address'], value='dhcp6', replace=True) + config.set_tag(['interfaces', 'ethernet']) - if 'address' in item and runcommand("/usr/bin/ipaddrcheck --is-ipv6 " + item['address']) == 0: - config.set(['interfaces', 'ethernet', interface['name'], 'address'], value=item['address'], replace=False) - config.set_tag(['interfaces', 'ethernet']) - if item['gateway']: - config.set(['protocols', 'static', 'route6', '::/0', 'next-hop'], value=item['gateway'], replace=True) - config.set_tag(['protocols', 'static', 'route6']) - config.set_tag(['protocols', 'static', 'route6', '::/0', 'next-hop']) - else: - config.set(['interfaces', 'ethernet', interface['name'], 'address'], value='dhcp', replace=True) + # configure static addresses + if 'addresses' in iface_config: + for item in iface_config['addresses']: + config.set(['interfaces', 'ethernet', iface_name, 'address'], value=item, replace=True) config.set_tag(['interfaces', 'ethernet']) + # configure gateways + if 'gateway4' in iface_config: + config.set(['protocols', 'static', 'route', '0.0.0.0/0', 'next-hop'], value=item, replace=True) + config.set_tag(['protocols', 'static', 'route']) + config.set_tag(['protocols', 'static', 'route', '0.0.0.0/0', 'next-hop']) + if 'gateway6' in iface_config: + config.set(['protocols', 'static', 'route6', '::/0', 'next-hop'], value=item, replace=True) + config.set_tag(['protocols', 'static', 'route6']) + config.set_tag(['protocols', 'static', 'route6', '::/0', 'next-hop']) + + # configre MTU + if 'mtu' in iface_config: + config.set(['interfaces', 'ethernet', iface_name, 'mtu'], value=iface_config['mtu'], replace=True) + config.set_tag(['interfaces', 'ethernet']) + + # configure routes + if 'routes' in iface_config: + for item in iface_config['routes']: + try: + if ipaddress.ip_network(item['to']).version == 4: + config.set(['protocols', 'static', 'route', item['to'], 'next-hop'], value=item['via'], replace=True) + config.set_tag(['protocols', 'static', 'route']) + config.set_tag(['protocols', 'static', 'route', item['to'], 'next-hop']) + if ipaddress.ip_network(item['to']).version == 6: + config.set(['protocols', 'static', 'route6', item['to'], 'next-hop'], value=item['via'], replace=True) + config.set_tag(['protocols', 'static', 'route6']) + config.set_tag(['protocols', 'static', 'route6', item['to'], 'next-hop']) + except Exception as err: + logger.error("Impossible to detect IP protocol version: {}".format(err)) -def set_config_nameserver(config, log, interface): - if 'address' in interface: - for server in interface['address']: - config.set(['system', 'name-server'], value=server, replace=False) - else: - log.debug("No name-servers found.") - if 'search' in interface: - for server in interface['search']: - config.set(['system', 'domain-search'], value=server, replace=False) - else: - log.debug("No search-domains found.") + # configure nameservers + if 'nameservers' in iface_config: + if 'search' in iface_config['nameservers']: + for item in iface_config['nameservers']['search']: + config.set(['system', 'domain-search'], value=item, replace=False) + if 'addresses' in iface_config['nameservers']: + for item in iface_config['nameservers']['addresses']: + config.set(['system', 'name-server'], value=item, replace=False) +# configure DHCP client for interface def set_config_dhcp(config): config.set(['interfaces', 'ethernet', 'eth0', 'address'], value='dhcp', replace=True) config.set_tag(['interfaces', 'ethernet']) +# configure SSH server service def set_config_ssh(config): config.set(['service', 'ssh'], replace=True) config.set(['service', 'ssh', 'port'], value='22', replace=True) config.set(['service', 'ssh', 'client-keepalive-interval'], value='180', replace=True) +# configure hostname def set_config_hostname(config, hostname): config.set(['system', 'host-name'], value=hostname, replace=True) +# configure SSH, eth0 interface and hostname def set_config_cloud(config, hostname): config.set(['service', 'ssh'], replace=True) config.set(['service', 'ssh', 'port'], value='22', replace=True) @@ -210,16 +239,7 @@ def set_config_cloud(config, hostname): config.set(['system', 'host-name'], value=hostname, replace=True) -def runcommand(cmd): - proc = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - shell=True, - universal_newlines=True) - std_out, std_err = proc.communicate() - return proc.returncode - - +# main config handler def handle(name, cfg, cloud, log, _args): init = stages.Init() dc = init.fetch() @@ -256,7 +276,7 @@ def handle(name, cfg, cloud, log, _args): vyos_keys = metadata['public-keys'] for ssh_key in vyos_keys: - set_ssh_login(config, log, user, ssh_key, key_x) + set_ssh_login(config, user, ssh_key, key_x) key_x = key_x + 1 else: encrypted_pass = False @@ -284,20 +304,17 @@ def handle(name, cfg, cloud, log, _args): vyos_keys.extend(cfgkeys) for ssh_key in vyos_keys: - set_ssh_login(config, log, user, ssh_key, key_x) + set_ssh_login(config, user, ssh_key, key_x) key_x = key_x + 1 if 'OVF' in dc.dsname: set_config_ovf(config, hostname, metadata) key_y = 1 elif netcfg: - for interface in netcfg['config']: - if interface['type'] == 'physical': - key_y = 1 - set_config_interfaces(config, interface) - - if interface['type'] == 'nameserver': - set_config_nameserver(config, log, interface) + if 'ethernets' in netcfg: + key_y = 1 + for interface_name, interface_config in netcfg['ethernets'].items(): + set_config_interfaces(config, interface_name, interface_config) set_config_ssh(config) set_config_hostname(config, hostname) @@ -313,4 +330,4 @@ def handle(name, cfg, cloud, log, _args): with open(cfg_file_name, 'w') as f: f.write(config.to_string()) except Exception as e: - util.logexc(log, "Failed to write configs into file %s error %s", file_name, e) + logger.error("Failed to write configs into file %s error %s", file_name, e) diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py index 0b6546e2..bd87e9e5 100644 --- a/cloudinit/config/cc_write_files.py +++ b/cloudinit/config/cc_write_files.py @@ -57,7 +57,6 @@ binary gzip data can be specified and will be decoded before being written. import base64 import os -import six from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE @@ -126,7 +125,7 @@ def decode_perms(perm, default): if perm is None: return default try: - if isinstance(perm, six.integer_types + (float,)): + if isinstance(perm, (int, float)): # Just 'downcast' it (if a float) return int(perm) else: diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 6a42f499..3673166a 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -30,13 +30,9 @@ entry, the config entry will be skipped. # any repository configuration options (see man yum.conf) """ +import io import os - -try: - from configparser import ConfigParser -except ImportError: - from ConfigParser import ConfigParser -import six +from configparser import ConfigParser from cloudinit import util @@ -57,7 +53,7 @@ def _format_repo_value(val): # Can handle 'lists' in certain cases # See: https://linux.die.net/man/5/yum.conf return "\n".join([_format_repo_value(v) for v in val]) - if not isinstance(val, six.string_types): + if not isinstance(val, str): return str(val) return val @@ -72,7 +68,7 @@ def _format_repository_config(repo_id, repo_config): # For now assume that people using this know # the format of yum and don't verify keys/values further to_be.set(repo_id, k, _format_repo_value(v)) - to_be_stream = six.StringIO() + to_be_stream = io.StringIO() to_be.write(to_be_stream) to_be_stream.seek(0) lines = to_be_stream.readlines() @@ -113,16 +109,16 @@ def handle(name, cfg, _cloud, log, _args): missing_required = 0 for req_field in ['baseurl']: if req_field not in repo_config: - log.warn(("Repository %s does not contain a %s" - " configuration 'required' entry"), - repo_id, req_field) + log.warning(("Repository %s does not contain a %s" + " configuration 'required' entry"), + repo_id, req_field) missing_required += 1 if not missing_required: repo_configs[canon_repo_id] = repo_config repo_locations[canon_repo_id] = repo_fn_pth else: - log.warn("Repository %s is missing %s required fields, skipping!", - repo_id, missing_required) + log.warning("Repository %s is missing %s required fields, " + "skipping!", repo_id, missing_required) for (c_repo_id, path) in repo_locations.items(): repo_blob = _format_repository_config(c_repo_id, repo_configs.get(c_repo_id)) diff --git a/cloudinit/config/cc_zypper_add_repo.py b/cloudinit/config/cc_zypper_add_repo.py index aba26952..05855b0c 100644 --- a/cloudinit/config/cc_zypper_add_repo.py +++ b/cloudinit/config/cc_zypper_add_repo.py @@ -7,7 +7,6 @@ import configobj import os -from six import string_types from textwrap import dedent from cloudinit.config.schema import get_schema_doc @@ -110,7 +109,7 @@ def _format_repo_value(val): return 1 if val else 0 if isinstance(val, (list, tuple)): return "\n ".join([_format_repo_value(v) for v in val]) - if not isinstance(val, string_types): + if not isinstance(val, str): return str(val) return val diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index 080a6d06..807c3eee 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -367,7 +367,7 @@ def handle_schema_args(name, args): if not args.annotate: error(str(e)) except RuntimeError as e: - error(str(e)) + error(str(e)) else: print("Valid cloud-config file {0}".format(args.config_file)) if args.doc: diff --git a/cloudinit/config/tests/test_apt_pipelining.py b/cloudinit/config/tests/test_apt_pipelining.py new file mode 100644 index 00000000..2a6bb10b --- /dev/null +++ b/cloudinit/config/tests/test_apt_pipelining.py @@ -0,0 +1,28 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Tests cc_apt_pipelining handler""" + +import cloudinit.config.cc_apt_pipelining as cc_apt_pipelining + +from cloudinit.tests.helpers import CiTestCase, mock + + +class TestAptPipelining(CiTestCase): + + @mock.patch('cloudinit.config.cc_apt_pipelining.util.write_file') + def test_not_disabled_by_default(self, m_write_file): + """ensure that default behaviour is to not disable pipelining""" + cc_apt_pipelining.handle('foo', {}, None, mock.MagicMock(), None) + self.assertEqual(0, m_write_file.call_count) + + @mock.patch('cloudinit.config.cc_apt_pipelining.util.write_file') + def test_false_disables_pipelining(self, m_write_file): + """ensure that pipelining can be disabled with correct config""" + cc_apt_pipelining.handle( + 'foo', {'apt_pipelining': 'false'}, None, mock.MagicMock(), None) + self.assertEqual(1, m_write_file.call_count) + args, _ = m_write_file.call_args + self.assertEqual(cc_apt_pipelining.DEFAULT_FILE, args[0]) + self.assertIn('Pipeline-Depth "0"', args[1]) + +# vi: ts=4 expandtab diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py index b051ec82..8247c388 100644 --- a/cloudinit/config/tests/test_set_passwords.py +++ b/cloudinit/config/tests/test_set_passwords.py @@ -1,6 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. -import mock +from unittest import mock from cloudinit.config import cc_set_passwords as setpass from cloudinit.tests.helpers import CiTestCase @@ -45,7 +45,7 @@ class TestHandleSshPwauth(CiTestCase): """If config is not updated, then no system restart should be done.""" setpass.handle_ssh_pwauth(True) m_subp.assert_not_called() - self.assertIn("No need to restart ssh", self.logs.getvalue()) + self.assertIn("No need to restart SSH", self.logs.getvalue()) @mock.patch(MODPATH + "update_ssh_config", return_value=True) @mock.patch(MODPATH + "util.subp") @@ -68,4 +68,88 @@ class TestHandleSshPwauth(CiTestCase): m_update.assert_called_with({optname: optval}) m_subp.assert_not_called() + +class TestSetPasswordsHandle(CiTestCase): + """Test cc_set_passwords.handle""" + + with_logs = True + + def setUp(self): + super(TestSetPasswordsHandle, self).setUp() + self.add_patch('cloudinit.config.cc_set_passwords.sys.stderr', 'm_err') + + def test_handle_on_empty_config(self, *args): + """handle logs that no password has changed when config is empty.""" + cloud = self.tmp_cloud(distro='ubuntu') + setpass.handle( + 'IGNORED', cfg={}, cloud=cloud, log=self.logger, args=[]) + self.assertEqual( + "DEBUG: Leaving SSH config 'PasswordAuthentication' unchanged. " + 'ssh_pwauth=None\n', + self.logs.getvalue()) + + @mock.patch(MODPATH + "util.subp") + def test_handle_on_chpasswd_list_parses_common_hashes(self, m_subp): + """handle parses command password hashes.""" + cloud = self.tmp_cloud(distro='ubuntu') + valid_hashed_pwds = [ + 'root:$2y$10$8BQjxjVByHA/Ee.O1bCXtO8S7Y5WojbXWqnqYpUW.BrPx/' + 'Dlew1Va', + 'ubuntu:$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9acWCVEoakMMC7dR52q' + 'SDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXazGGx3oo1'] + cfg = {'chpasswd': {'list': valid_hashed_pwds}} + with mock.patch(MODPATH + 'util.subp') as m_subp: + setpass.handle( + 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[]) + self.assertIn( + 'DEBUG: Handling input for chpasswd as list.', + self.logs.getvalue()) + self.assertIn( + "DEBUG: Setting hashed password for ['root', 'ubuntu']", + self.logs.getvalue()) + self.assertEqual( + [mock.call(['chpasswd', '-e'], + '\n'.join(valid_hashed_pwds) + '\n')], + m_subp.call_args_list) + + @mock.patch(MODPATH + "util.is_FreeBSD") + @mock.patch(MODPATH + "util.subp") + def test_freebsd_calls_custom_pw_cmds_to_set_and_expire_passwords( + self, m_subp, m_is_freebsd): + """FreeBSD calls custom pw commands instead of chpasswd and passwd""" + m_is_freebsd.return_value = True + cloud = self.tmp_cloud(distro='freebsd') + valid_pwds = ['ubuntu:passw0rd'] + cfg = {'chpasswd': {'list': valid_pwds}} + setpass.handle( + 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[]) + self.assertEqual([ + mock.call(['pw', 'usermod', 'ubuntu', '-h', '0'], data='passw0rd', + logstring="chpasswd for ubuntu"), + mock.call(['pw', 'usermod', 'ubuntu', '-p', '01-Jan-1970'])], + m_subp.call_args_list) + + @mock.patch(MODPATH + "util.is_FreeBSD") + @mock.patch(MODPATH + "util.subp") + def test_handle_on_chpasswd_list_creates_random_passwords(self, m_subp, + m_is_freebsd): + """handle parses command set random passwords.""" + m_is_freebsd.return_value = False + cloud = self.tmp_cloud(distro='ubuntu') + valid_random_pwds = [ + 'root:R', + 'ubuntu:RANDOM'] + cfg = {'chpasswd': {'expire': 'false', 'list': valid_random_pwds}} + with mock.patch(MODPATH + 'util.subp') as m_subp: + setpass.handle( + 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[]) + self.assertIn( + 'DEBUG: Handling input for chpasswd as list.', + self.logs.getvalue()) + self.assertNotEqual( + [mock.call(['chpasswd'], + '\n'.join(valid_random_pwds) + '\n')], + m_subp.call_args_list) + + # vi: ts=4 expandtab diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py index 3c472891..cbbb173d 100644 --- a/cloudinit/config/tests/test_snap.py +++ b/cloudinit/config/tests/test_snap.py @@ -1,7 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import re -from six import StringIO +from io import StringIO from cloudinit.config.cc_snap import ( ASSERTIONS_FILE, add_assertions, handle, maybe_install_squashfuse, diff --git a/cloudinit/config/tests/test_ssh.py b/cloudinit/config/tests/test_ssh.py index c8a4271f..0c554414 100644 --- a/cloudinit/config/tests/test_ssh.py +++ b/cloudinit/config/tests/test_ssh.py @@ -1,9 +1,13 @@ # This file is part of cloud-init. See LICENSE file for license information. +import os.path from cloudinit.config import cc_ssh from cloudinit import ssh_util from cloudinit.tests.helpers import CiTestCase, mock +import logging + +LOG = logging.getLogger(__name__) MODPATH = "cloudinit.config.cc_ssh." @@ -12,6 +16,25 @@ MODPATH = "cloudinit.config.cc_ssh." class TestHandleSsh(CiTestCase): """Test cc_ssh handling of ssh config.""" + def _publish_hostkey_test_setup(self): + self.test_hostkeys = { + 'dsa': ('ssh-dss', 'AAAAB3NzaC1kc3MAAACB'), + 'ecdsa': ('ecdsa-sha2-nistp256', 'AAAAE2VjZ'), + 'ed25519': ('ssh-ed25519', 'AAAAC3NzaC1lZDI'), + 'rsa': ('ssh-rsa', 'AAAAB3NzaC1yc2EAAA'), + } + self.test_hostkey_files = [] + hostkey_tmpdir = self.tmp_dir() + for key_type in ['dsa', 'ecdsa', 'ed25519', 'rsa']: + key_data = self.test_hostkeys[key_type] + filename = 'ssh_host_%s_key.pub' % key_type + filepath = os.path.join(hostkey_tmpdir, filename) + self.test_hostkey_files.append(filepath) + with open(filepath, 'w') as f: + f.write(' '.join(key_data)) + + cc_ssh.KEY_FILE_TPL = os.path.join(hostkey_tmpdir, 'ssh_host_%s_key') + def test_apply_credentials_with_user(self, m_setup_keys): """Apply keys for the given user and root.""" keys = ["key1"] @@ -64,9 +87,10 @@ class TestHandleSsh(CiTestCase): # Mock os.path.exits to True to short-circuit the key writing logic m_path_exists.return_value = True m_nug.return_value = ([], {}) + cc_ssh.PUBLISH_HOST_KEYS = False cloud = self.tmp_cloud( distro='ubuntu', metadata={'public-keys': keys}) - cc_ssh.handle("name", cfg, cloud, None, None) + cc_ssh.handle("name", cfg, cloud, LOG, None) options = ssh_util.DISABLE_USER_OPTS.replace("$USER", "NONE") options = options.replace("$DISABLE_USER", "root") m_glob.assert_called_once_with('/etc/ssh/ssh_host_*key*') @@ -82,6 +106,31 @@ class TestHandleSsh(CiTestCase): @mock.patch(MODPATH + "glob.glob") @mock.patch(MODPATH + "ug_util.normalize_users_groups") @mock.patch(MODPATH + "os.path.exists") + def test_dont_allow_public_ssh_keys(self, m_path_exists, m_nug, + m_glob, m_setup_keys): + """Test allow_public_ssh_keys=False ignores ssh public keys from + platform. + """ + cfg = {"allow_public_ssh_keys": False} + keys = ["key1"] + user = "clouduser" + m_glob.return_value = [] # Return no matching keys to prevent removal + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cc_ssh.handle("name", cfg, cloud, LOG, None) + + options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user) + options = options.replace("$DISABLE_USER", "root") + self.assertEqual([mock.call(set(), user), + mock.call(set(), "root", options=options)], + m_setup_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") def test_handle_no_cfg_and_default_root(self, m_path_exists, m_nug, m_glob, m_setup_keys): """Test handle with no config and a default distro user.""" @@ -94,7 +143,7 @@ class TestHandleSsh(CiTestCase): m_nug.return_value = ({user: {"default": user}}, {}) cloud = self.tmp_cloud( distro='ubuntu', metadata={'public-keys': keys}) - cc_ssh.handle("name", cfg, cloud, None, None) + cc_ssh.handle("name", cfg, cloud, LOG, None) options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user) options = options.replace("$DISABLE_USER", "root") @@ -119,7 +168,7 @@ class TestHandleSsh(CiTestCase): m_nug.return_value = ({user: {"default": user}}, {}) cloud = self.tmp_cloud( distro='ubuntu', metadata={'public-keys': keys}) - cc_ssh.handle("name", cfg, cloud, None, None) + cc_ssh.handle("name", cfg, cloud, LOG, None) options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user) options = options.replace("$DISABLE_USER", "root") @@ -144,8 +193,153 @@ class TestHandleSsh(CiTestCase): cloud = self.tmp_cloud( distro='ubuntu', metadata={'public-keys': keys}) cloud.get_public_ssh_keys = mock.Mock(return_value=keys) - cc_ssh.handle("name", cfg, cloud, None, None) + cc_ssh.handle("name", cfg, cloud, LOG, None) self.assertEqual([mock.call(set(keys), user), mock.call(set(keys), "root", options="")], m_setup_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_publish_hostkeys_default( + self, m_path_exists, m_nug, m_glob, m_setup_keys): + """Test handle with various configs for ssh_publish_hostkeys.""" + self._publish_hostkey_test_setup() + cc_ssh.PUBLISH_HOST_KEYS = True + keys = ["key1"] + user = "clouduser" + # Return no matching keys for first glob, test keys for second. + m_glob.side_effect = iter([ + [], + self.test_hostkey_files, + ]) + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cloud.datasource.publish_host_keys = mock.Mock() + + cfg = {} + expected_call = [self.test_hostkeys[key_type] for key_type + in ['ecdsa', 'ed25519', 'rsa']] + cc_ssh.handle("name", cfg, cloud, LOG, None) + self.assertEqual([mock.call(expected_call)], + cloud.datasource.publish_host_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_publish_hostkeys_config_enable( + self, m_path_exists, m_nug, m_glob, m_setup_keys): + """Test handle with various configs for ssh_publish_hostkeys.""" + self._publish_hostkey_test_setup() + cc_ssh.PUBLISH_HOST_KEYS = False + keys = ["key1"] + user = "clouduser" + # Return no matching keys for first glob, test keys for second. + m_glob.side_effect = iter([ + [], + self.test_hostkey_files, + ]) + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cloud.datasource.publish_host_keys = mock.Mock() + + cfg = {'ssh_publish_hostkeys': {'enabled': True}} + expected_call = [self.test_hostkeys[key_type] for key_type + in ['ecdsa', 'ed25519', 'rsa']] + cc_ssh.handle("name", cfg, cloud, LOG, None) + self.assertEqual([mock.call(expected_call)], + cloud.datasource.publish_host_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_publish_hostkeys_config_disable( + self, m_path_exists, m_nug, m_glob, m_setup_keys): + """Test handle with various configs for ssh_publish_hostkeys.""" + self._publish_hostkey_test_setup() + cc_ssh.PUBLISH_HOST_KEYS = True + keys = ["key1"] + user = "clouduser" + # Return no matching keys for first glob, test keys for second. + m_glob.side_effect = iter([ + [], + self.test_hostkey_files, + ]) + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cloud.datasource.publish_host_keys = mock.Mock() + + cfg = {'ssh_publish_hostkeys': {'enabled': False}} + cc_ssh.handle("name", cfg, cloud, LOG, None) + self.assertFalse(cloud.datasource.publish_host_keys.call_args_list) + cloud.datasource.publish_host_keys.assert_not_called() + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_publish_hostkeys_config_blacklist( + self, m_path_exists, m_nug, m_glob, m_setup_keys): + """Test handle with various configs for ssh_publish_hostkeys.""" + self._publish_hostkey_test_setup() + cc_ssh.PUBLISH_HOST_KEYS = True + keys = ["key1"] + user = "clouduser" + # Return no matching keys for first glob, test keys for second. + m_glob.side_effect = iter([ + [], + self.test_hostkey_files, + ]) + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cloud.datasource.publish_host_keys = mock.Mock() + + cfg = {'ssh_publish_hostkeys': {'enabled': True, + 'blacklist': ['dsa', 'rsa']}} + expected_call = [self.test_hostkeys[key_type] for key_type + in ['ecdsa', 'ed25519']] + cc_ssh.handle("name", cfg, cloud, LOG, None) + self.assertEqual([mock.call(expected_call)], + cloud.datasource.publish_host_keys.call_args_list) + + @mock.patch(MODPATH + "glob.glob") + @mock.patch(MODPATH + "ug_util.normalize_users_groups") + @mock.patch(MODPATH + "os.path.exists") + def test_handle_publish_hostkeys_empty_blacklist( + self, m_path_exists, m_nug, m_glob, m_setup_keys): + """Test handle with various configs for ssh_publish_hostkeys.""" + self._publish_hostkey_test_setup() + cc_ssh.PUBLISH_HOST_KEYS = True + keys = ["key1"] + user = "clouduser" + # Return no matching keys for first glob, test keys for second. + m_glob.side_effect = iter([ + [], + self.test_hostkey_files, + ]) + # Mock os.path.exits to True to short-circuit the key writing logic + m_path_exists.return_value = True + m_nug.return_value = ({user: {"default": user}}, {}) + cloud = self.tmp_cloud( + distro='ubuntu', metadata={'public-keys': keys}) + cloud.datasource.publish_host_keys = mock.Mock() + + cfg = {'ssh_publish_hostkeys': {'enabled': True, + 'blacklist': []}} + expected_call = [self.test_hostkeys[key_type] for key_type + in ['dsa', 'ecdsa', 'ed25519', 'rsa']] + cc_ssh.handle("name", cfg, cloud, LOG, None) + self.assertEqual([mock.call(expected_call)], + cloud.datasource.publish_host_keys.call_args_list) diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py index b7cf9bee..8c4161ef 100644 --- a/cloudinit/config/tests/test_ubuntu_advantage.py +++ b/cloudinit/config/tests/test_ubuntu_advantage.py @@ -1,10 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. -import re -from six import StringIO - from cloudinit.config.cc_ubuntu_advantage import ( - handle, maybe_install_ua_tools, run_commands, schema) + configure_ua, handle, maybe_install_ua_tools, schema) from cloudinit.config.schema import validate_cloudconfig_schema from cloudinit import util from cloudinit.tests.helpers import ( @@ -20,90 +17,120 @@ class FakeCloud(object): self.distro = distro -class TestRunCommands(CiTestCase): +class TestConfigureUA(CiTestCase): with_logs = True allowed_subp = [CiTestCase.SUBP_SHELL_TRUE] def setUp(self): - super(TestRunCommands, self).setUp() + super(TestConfigureUA, self).setUp() self.tmp = self.tmp_dir() @mock.patch('%s.util.subp' % MPATH) - def test_run_commands_on_empty_list(self, m_subp): - """When provided with an empty list, run_commands does nothing.""" - run_commands([]) - self.assertEqual('', self.logs.getvalue()) - m_subp.assert_not_called() - - def test_run_commands_on_non_list_or_dict(self): - """When provided an invalid type, run_commands raises an error.""" - with self.assertRaises(TypeError) as context_manager: - run_commands(commands="I'm Not Valid") + def test_configure_ua_attach_error(self, m_subp): + """Errors from ua attach command are raised.""" + m_subp.side_effect = util.ProcessExecutionError( + 'Invalid token SomeToken') + with self.assertRaises(RuntimeError) as context_manager: + configure_ua(token='SomeToken') self.assertEqual( - "commands parameter was not a list or dict: I'm Not Valid", + 'Failure attaching Ubuntu Advantage:\nUnexpected error while' + ' running command.\nCommand: -\nExit code: -\nReason: -\n' + 'Stdout: Invalid token SomeToken\nStderr: -', str(context_manager.exception)) - def test_run_command_logs_commands_and_exit_codes_to_stderr(self): - """All exit codes are logged to stderr.""" - outfile = self.tmp_path('output.log', dir=self.tmp) - - cmd1 = 'echo "HI" >> %s' % outfile - cmd2 = 'bogus command' - cmd3 = 'echo "MOM" >> %s' % outfile - commands = [cmd1, cmd2, cmd3] - - mock_path = '%s.sys.stderr' % MPATH - with mock.patch(mock_path, new_callable=StringIO) as m_stderr: - with self.assertRaises(RuntimeError) as context_manager: - run_commands(commands=commands) - - self.assertIsNotNone( - re.search(r'bogus: (command )?not found', - str(context_manager.exception)), - msg='Expected bogus command not found') - expected_stderr_log = '\n'.join([ - 'Begin run command: {cmd}'.format(cmd=cmd1), - 'End run command: exit(0)', - 'Begin run command: {cmd}'.format(cmd=cmd2), - 'ERROR: End run command: exit(127)', - 'Begin run command: {cmd}'.format(cmd=cmd3), - 'End run command: exit(0)\n']) - self.assertEqual(expected_stderr_log, m_stderr.getvalue()) - - def test_run_command_as_lists(self): - """When commands are specified as a list, run them in order.""" - outfile = self.tmp_path('output.log', dir=self.tmp) - - cmd1 = 'echo "HI" >> %s' % outfile - cmd2 = 'echo "MOM" >> %s' % outfile - commands = [cmd1, cmd2] - with mock.patch('%s.sys.stderr' % MPATH, new_callable=StringIO): - run_commands(commands=commands) + @mock.patch('%s.util.subp' % MPATH) + def test_configure_ua_attach_with_token(self, m_subp): + """When token is provided, attach the machine to ua using the token.""" + configure_ua(token='SomeToken') + m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken']) + self.assertEqual( + 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', + self.logs.getvalue()) + + @mock.patch('%s.util.subp' % MPATH) + def test_configure_ua_attach_on_service_error(self, m_subp): + """all services should be enabled and then any failures raised""" + def fake_subp(cmd, capture=None): + fail_cmds = [['ua', 'enable', svc] for svc in ['esm', 'cc']] + if cmd in fail_cmds and capture: + svc = cmd[-1] + raise util.ProcessExecutionError( + 'Invalid {} credentials'.format(svc.upper())) + + m_subp.side_effect = fake_subp + + with self.assertRaises(RuntimeError) as context_manager: + configure_ua(token='SomeToken', enable=['esm', 'cc', 'fips']) + self.assertEqual( + m_subp.call_args_list, + [mock.call(['ua', 'attach', 'SomeToken']), + mock.call(['ua', 'enable', 'esm'], capture=True), + mock.call(['ua', 'enable', 'cc'], capture=True), + mock.call(['ua', 'enable', 'fips'], capture=True)]) self.assertIn( - 'DEBUG: Running user-provided ubuntu-advantage commands', + 'WARNING: Failure enabling "esm":\nUnexpected error' + ' while running command.\nCommand: -\nExit code: -\nReason: -\n' + 'Stdout: Invalid ESM credentials\nStderr: -\n', self.logs.getvalue()) - self.assertEqual('HI\nMOM\n', util.load_file(outfile)) self.assertIn( - 'WARNING: Non-ubuntu-advantage commands in ubuntu-advantage' - ' config:', + 'WARNING: Failure enabling "cc":\nUnexpected error' + ' while running command.\nCommand: -\nExit code: -\nReason: -\n' + 'Stdout: Invalid CC credentials\nStderr: -\n', + self.logs.getvalue()) + self.assertEqual( + 'Failure enabling Ubuntu Advantage service(s): "esm", "cc"', + str(context_manager.exception)) + + @mock.patch('%s.util.subp' % MPATH) + def test_configure_ua_attach_with_empty_services(self, m_subp): + """When services is an empty list, do not auto-enable attach.""" + configure_ua(token='SomeToken', enable=[]) + m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken']) + self.assertEqual( + 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', self.logs.getvalue()) - def test_run_command_dict_sorted_as_command_script(self): - """When commands are a dict, sort them and run.""" - outfile = self.tmp_path('output.log', dir=self.tmp) - cmd1 = 'echo "HI" >> %s' % outfile - cmd2 = 'echo "MOM" >> %s' % outfile - commands = {'02': cmd1, '01': cmd2} - with mock.patch('%s.sys.stderr' % MPATH, new_callable=StringIO): - run_commands(commands=commands) + @mock.patch('%s.util.subp' % MPATH) + def test_configure_ua_attach_with_specific_services(self, m_subp): + """When services a list, only enable specific services.""" + configure_ua(token='SomeToken', enable=['fips']) + self.assertEqual( + m_subp.call_args_list, + [mock.call(['ua', 'attach', 'SomeToken']), + mock.call(['ua', 'enable', 'fips'], capture=True)]) + self.assertEqual( + 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', + self.logs.getvalue()) + + @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock()) + @mock.patch('%s.util.subp' % MPATH) + def test_configure_ua_attach_with_string_services(self, m_subp): + """When services a string, treat as singleton list and warn""" + configure_ua(token='SomeToken', enable='fips') + self.assertEqual( + m_subp.call_args_list, + [mock.call(['ua', 'attach', 'SomeToken']), + mock.call(['ua', 'enable', 'fips'], capture=True)]) + self.assertEqual( + 'WARNING: ubuntu_advantage: enable should be a list, not a' + ' string; treating as a single enable\n' + 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', + self.logs.getvalue()) - expected_messages = [ - 'DEBUG: Running user-provided ubuntu-advantage commands'] - for message in expected_messages: - self.assertIn(message, self.logs.getvalue()) - self.assertEqual('MOM\nHI\n', util.load_file(outfile)) + @mock.patch('%s.util.subp' % MPATH) + def test_configure_ua_attach_with_weird_services(self, m_subp): + """When services not string or list, warn but still attach""" + configure_ua(token='SomeToken', enable={'deffo': 'wont work'}) + self.assertEqual( + m_subp.call_args_list, + [mock.call(['ua', 'attach', 'SomeToken'])]) + self.assertEqual( + 'WARNING: ubuntu_advantage: enable should be a list, not a' + ' dict; skipping enabling services\n' + 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', + self.logs.getvalue()) @skipUnlessJsonSchema() @@ -112,90 +139,50 @@ class TestSchema(CiTestCase, SchemaTestCaseMixin): with_logs = True schema = schema - def test_schema_warns_on_ubuntu_advantage_not_as_dict(self): - """If ubuntu-advantage configuration is not a dict, emit a warning.""" - validate_cloudconfig_schema({'ubuntu-advantage': 'wrong type'}, schema) + @mock.patch('%s.maybe_install_ua_tools' % MPATH) + @mock.patch('%s.configure_ua' % MPATH) + def test_schema_warns_on_ubuntu_advantage_not_dict(self, _cfg, _): + """If ubuntu_advantage configuration is not a dict, emit a warning.""" + validate_cloudconfig_schema({'ubuntu_advantage': 'wrong type'}, schema) self.assertEqual( - "WARNING: Invalid config:\nubuntu-advantage: 'wrong type' is not" + "WARNING: Invalid config:\nubuntu_advantage: 'wrong type' is not" " of type 'object'\n", self.logs.getvalue()) - @mock.patch('%s.run_commands' % MPATH) - def test_schema_disallows_unknown_keys(self, _): - """Unknown keys in ubuntu-advantage configuration emit warnings.""" + @mock.patch('%s.maybe_install_ua_tools' % MPATH) + @mock.patch('%s.configure_ua' % MPATH) + def test_schema_disallows_unknown_keys(self, _cfg, _): + """Unknown keys in ubuntu_advantage configuration emit warnings.""" validate_cloudconfig_schema( - {'ubuntu-advantage': {'commands': ['ls'], 'invalid-key': ''}}, + {'ubuntu_advantage': {'token': 'winner', 'invalid-key': ''}}, schema) self.assertIn( - 'WARNING: Invalid config:\nubuntu-advantage: Additional properties' + 'WARNING: Invalid config:\nubuntu_advantage: Additional properties' " are not allowed ('invalid-key' was unexpected)", self.logs.getvalue()) - def test_warn_schema_requires_commands(self): - """Warn when ubuntu-advantage configuration lacks commands.""" - validate_cloudconfig_schema( - {'ubuntu-advantage': {}}, schema) - self.assertEqual( - "WARNING: Invalid config:\nubuntu-advantage: 'commands' is a" - " required property\n", - self.logs.getvalue()) - - @mock.patch('%s.run_commands' % MPATH) - def test_warn_schema_commands_is_not_list_or_dict(self, _): - """Warn when ubuntu-advantage:commands config is not a list or dict.""" + @mock.patch('%s.maybe_install_ua_tools' % MPATH) + @mock.patch('%s.configure_ua' % MPATH) + def test_warn_schema_requires_token(self, _cfg, _): + """Warn if ubuntu_advantage configuration lacks token.""" validate_cloudconfig_schema( - {'ubuntu-advantage': {'commands': 'broken'}}, schema) + {'ubuntu_advantage': {'enable': ['esm']}}, schema) self.assertEqual( - "WARNING: Invalid config:\nubuntu-advantage.commands: 'broken' is" - " not of type 'object', 'array'\n", - self.logs.getvalue()) + "WARNING: Invalid config:\nubuntu_advantage:" + " 'token' is a required property\n", self.logs.getvalue()) - @mock.patch('%s.run_commands' % MPATH) - def test_warn_schema_when_commands_is_empty(self, _): - """Emit warnings when ubuntu-advantage:commands is empty.""" - validate_cloudconfig_schema( - {'ubuntu-advantage': {'commands': []}}, schema) + @mock.patch('%s.maybe_install_ua_tools' % MPATH) + @mock.patch('%s.configure_ua' % MPATH) + def test_warn_schema_services_is_not_list_or_dict(self, _cfg, _): + """Warn when ubuntu_advantage:enable config is not a list.""" validate_cloudconfig_schema( - {'ubuntu-advantage': {'commands': {}}}, schema) + {'ubuntu_advantage': {'enable': 'needslist'}}, schema) self.assertEqual( - "WARNING: Invalid config:\nubuntu-advantage.commands: [] is too" - " short\nWARNING: Invalid config:\nubuntu-advantage.commands: {}" - " does not have enough properties\n", + "WARNING: Invalid config:\nubuntu_advantage: 'token' is a" + " required property\nubuntu_advantage.enable: 'needslist'" + " is not of type 'array'\n", self.logs.getvalue()) - @mock.patch('%s.run_commands' % MPATH) - def test_schema_when_commands_are_list_or_dict(self, _): - """No warnings when ubuntu-advantage:commands are a list or dict.""" - validate_cloudconfig_schema( - {'ubuntu-advantage': {'commands': ['valid']}}, schema) - validate_cloudconfig_schema( - {'ubuntu-advantage': {'commands': {'01': 'also valid'}}}, schema) - self.assertEqual('', self.logs.getvalue()) - - def test_duplicates_are_fine_array_array(self): - """Duplicated commands array/array entries are allowed.""" - self.assertSchemaValid( - {'commands': [["echo", "bye"], ["echo" "bye"]]}, - "command entries can be duplicate.") - - def test_duplicates_are_fine_array_string(self): - """Duplicated commands array/string entries are allowed.""" - self.assertSchemaValid( - {'commands': ["echo bye", "echo bye"]}, - "command entries can be duplicate.") - - def test_duplicates_are_fine_dict_array(self): - """Duplicated commands dict/array entries are allowed.""" - self.assertSchemaValid( - {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}}, - "command entries can be duplicate.") - - def test_duplicates_are_fine_dict_string(self): - """Duplicated commands dict/string entries are allowed.""" - self.assertSchemaValid( - {'commands': {'00': "echo bye", '01': "echo bye"}}, - "command entries can be duplicate.") - class TestHandle(CiTestCase): @@ -205,41 +192,89 @@ class TestHandle(CiTestCase): super(TestHandle, self).setUp() self.tmp = self.tmp_dir() - @mock.patch('%s.run_commands' % MPATH) @mock.patch('%s.validate_cloudconfig_schema' % MPATH) - def test_handle_no_config(self, m_schema, m_run): + def test_handle_no_config(self, m_schema): """When no ua-related configuration is provided, nothing happens.""" cfg = {} handle('ua-test', cfg=cfg, cloud=None, log=self.logger, args=None) self.assertIn( - "DEBUG: Skipping module named ua-test, no 'ubuntu-advantage' key" - " in config", + "DEBUG: Skipping module named ua-test, no 'ubuntu_advantage'" + ' configuration found', self.logs.getvalue()) m_schema.assert_not_called() - m_run.assert_not_called() + @mock.patch('%s.configure_ua' % MPATH) @mock.patch('%s.maybe_install_ua_tools' % MPATH) - def test_handle_tries_to_install_ubuntu_advantage_tools(self, m_install): + def test_handle_tries_to_install_ubuntu_advantage_tools( + self, m_install, m_cfg): """If ubuntu_advantage is provided, try installing ua-tools package.""" - cfg = {'ubuntu-advantage': {}} + cfg = {'ubuntu_advantage': {'token': 'valid'}} mycloud = FakeCloud(None) handle('nomatter', cfg=cfg, cloud=mycloud, log=self.logger, args=None) m_install.assert_called_once_with(mycloud) + @mock.patch('%s.configure_ua' % MPATH) @mock.patch('%s.maybe_install_ua_tools' % MPATH) - def test_handle_runs_commands_provided(self, m_install): - """When commands are specified as a list, run them.""" - outfile = self.tmp_path('output.log', dir=self.tmp) + def test_handle_passes_credentials_and_services_to_configure_ua( + self, m_install, m_configure_ua): + """All ubuntu_advantage config keys are passed to configure_ua.""" + cfg = {'ubuntu_advantage': {'token': 'token', 'enable': ['esm']}} + handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) + m_configure_ua.assert_called_once_with( + token='token', enable=['esm']) + + @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock()) + @mock.patch('%s.configure_ua' % MPATH) + def test_handle_warns_on_deprecated_ubuntu_advantage_key_w_config( + self, m_configure_ua): + """Warning when ubuntu-advantage key is present with new config""" + cfg = {'ubuntu-advantage': {'token': 'token', 'enable': ['esm']}} + handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) + self.assertEqual( + 'WARNING: Deprecated configuration key "ubuntu-advantage"' + ' provided. Expected underscore delimited "ubuntu_advantage";' + ' will attempt to continue.', + self.logs.getvalue().splitlines()[0]) + m_configure_ua.assert_called_once_with( + token='token', enable=['esm']) + + def test_handle_error_on_deprecated_commands_key_dashed(self): + """Error when commands is present in ubuntu-advantage key.""" + cfg = {'ubuntu-advantage': {'commands': 'nogo'}} + with self.assertRaises(RuntimeError) as context_manager: + handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) + self.assertEqual( + 'Deprecated configuration "ubuntu-advantage: commands" provided.' + ' Expected "token"', + str(context_manager.exception)) + + def test_handle_error_on_deprecated_commands_key_underscored(self): + """Error when commands is present in ubuntu_advantage key.""" + cfg = {'ubuntu_advantage': {'commands': 'nogo'}} + with self.assertRaises(RuntimeError) as context_manager: + handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) + self.assertEqual( + 'Deprecated configuration "ubuntu-advantage: commands" provided.' + ' Expected "token"', + str(context_manager.exception)) + @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock()) + @mock.patch('%s.configure_ua' % MPATH) + def test_handle_prefers_new_style_config( + self, m_configure_ua): + """ubuntu_advantage should be preferred over ubuntu-advantage""" cfg = { - 'ubuntu-advantage': {'commands': ['echo "HI" >> %s' % outfile, - 'echo "MOM" >> %s' % outfile]}} - mock_path = '%s.sys.stderr' % MPATH - with self.allow_subp([CiTestCase.SUBP_SHELL_TRUE]): - with mock.patch(mock_path, new_callable=StringIO): - handle('nomatter', cfg=cfg, cloud=None, log=self.logger, - args=None) - self.assertEqual('HI\nMOM\n', util.load_file(outfile)) + 'ubuntu-advantage': {'token': 'nope', 'enable': ['wrong']}, + 'ubuntu_advantage': {'token': 'token', 'enable': ['esm']}, + } + handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) + self.assertEqual( + 'WARNING: Deprecated configuration key "ubuntu-advantage"' + ' provided. Expected underscore delimited "ubuntu_advantage";' + ' will attempt to continue.', + self.logs.getvalue().splitlines()[0]) + m_configure_ua.assert_called_once_with( + token='token', enable=['esm']) class TestMaybeInstallUATools(CiTestCase): @@ -253,7 +288,7 @@ class TestMaybeInstallUATools(CiTestCase): @mock.patch('%s.util.which' % MPATH) def test_maybe_install_ua_tools_noop_when_ua_tools_present(self, m_which): """Do nothing if ubuntu-advantage-tools already exists.""" - m_which.return_value = '/usr/bin/ubuntu-advantage' # already installed + m_which.return_value = '/usr/bin/ua' # already installed distro = mock.MagicMock() distro.update_package_sources.side_effect = RuntimeError( 'Some apt error') diff --git a/cloudinit/config/tests/test_ubuntu_drivers.py b/cloudinit/config/tests/test_ubuntu_drivers.py new file mode 100644 index 00000000..46952692 --- /dev/null +++ b/cloudinit/config/tests/test_ubuntu_drivers.py @@ -0,0 +1,237 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import copy +import os + +from cloudinit.tests.helpers import CiTestCase, skipUnlessJsonSchema, mock +from cloudinit.config.schema import ( + SchemaValidationError, validate_cloudconfig_schema) +from cloudinit.config import cc_ubuntu_drivers as drivers +from cloudinit.util import ProcessExecutionError + +MPATH = "cloudinit.config.cc_ubuntu_drivers." +M_TMP_PATH = MPATH + "temp_utils.mkdtemp" +OLD_UBUNTU_DRIVERS_ERROR_STDERR = ( + "ubuntu-drivers: error: argument <command>: invalid choice: 'install' " + "(choose from 'list', 'autoinstall', 'devices', 'debug')\n") + + +class AnyTempScriptAndDebconfFile(object): + + def __init__(self, tmp_dir, debconf_file): + self.tmp_dir = tmp_dir + self.debconf_file = debconf_file + + def __eq__(self, cmd): + if not len(cmd) == 2: + return False + script, debconf_file = cmd + if bool(script.startswith(self.tmp_dir) and script.endswith('.sh')): + return debconf_file == self.debconf_file + return False + + +class TestUbuntuDrivers(CiTestCase): + cfg_accepted = {'drivers': {'nvidia': {'license-accepted': True}}} + install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia'] + + with_logs = True + + @skipUnlessJsonSchema() + def test_schema_requires_boolean_for_license_accepted(self): + with self.assertRaisesRegex( + SchemaValidationError, ".*license-accepted.*TRUE.*boolean"): + validate_cloudconfig_schema( + {'drivers': {'nvidia': {'license-accepted': "TRUE"}}}, + schema=drivers.schema, strict=True) + + @mock.patch(M_TMP_PATH) + @mock.patch(MPATH + "util.subp", return_value=('', '')) + @mock.patch(MPATH + "util.which", return_value=False) + def _assert_happy_path_taken( + self, config, m_which, m_subp, m_tmp): + """Positive path test through handle. Package should be installed.""" + tdir = self.tmp_dir() + debconf_file = os.path.join(tdir, 'nvidia.template') + m_tmp.return_value = tdir + myCloud = mock.MagicMock() + drivers.handle('ubuntu_drivers', config, myCloud, None, None) + self.assertEqual([mock.call(['ubuntu-drivers-common'])], + myCloud.distro.install_packages.call_args_list) + self.assertEqual( + [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)), + mock.call(self.install_gpgpu)], + m_subp.call_args_list) + + def test_handle_does_package_install(self): + self._assert_happy_path_taken(self.cfg_accepted) + + def test_trueish_strings_are_considered_approval(self): + for true_value in ['yes', 'true', 'on', '1']: + new_config = copy.deepcopy(self.cfg_accepted) + new_config['drivers']['nvidia']['license-accepted'] = true_value + self._assert_happy_path_taken(new_config) + + @mock.patch(M_TMP_PATH) + @mock.patch(MPATH + "util.subp") + @mock.patch(MPATH + "util.which", return_value=False) + def test_handle_raises_error_if_no_drivers_found( + self, m_which, m_subp, m_tmp): + """If ubuntu-drivers doesn't install any drivers, raise an error.""" + tdir = self.tmp_dir() + debconf_file = os.path.join(tdir, 'nvidia.template') + m_tmp.return_value = tdir + myCloud = mock.MagicMock() + + def fake_subp(cmd): + if cmd[0].startswith(tdir): + return + raise ProcessExecutionError( + stdout='No drivers found for installation.\n', exit_code=1) + m_subp.side_effect = fake_subp + + with self.assertRaises(Exception): + drivers.handle( + 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None) + self.assertEqual([mock.call(['ubuntu-drivers-common'])], + myCloud.distro.install_packages.call_args_list) + self.assertEqual( + [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)), + mock.call(self.install_gpgpu)], + m_subp.call_args_list) + self.assertIn('ubuntu-drivers found no drivers for installation', + self.logs.getvalue()) + + @mock.patch(MPATH + "util.subp", return_value=('', '')) + @mock.patch(MPATH + "util.which", return_value=False) + def _assert_inert_with_config(self, config, m_which, m_subp): + """Helper to reduce repetition when testing negative cases""" + myCloud = mock.MagicMock() + drivers.handle('ubuntu_drivers', config, myCloud, None, None) + self.assertEqual(0, myCloud.distro.install_packages.call_count) + self.assertEqual(0, m_subp.call_count) + + def test_handle_inert_if_license_not_accepted(self): + """Ensure we don't do anything if the license is rejected.""" + self._assert_inert_with_config( + {'drivers': {'nvidia': {'license-accepted': False}}}) + + def test_handle_inert_if_garbage_in_license_field(self): + """Ensure we don't do anything if unknown text is in license field.""" + self._assert_inert_with_config( + {'drivers': {'nvidia': {'license-accepted': 'garbage'}}}) + + def test_handle_inert_if_no_license_key(self): + """Ensure we don't do anything if no license key.""" + self._assert_inert_with_config({'drivers': {'nvidia': {}}}) + + def test_handle_inert_if_no_nvidia_key(self): + """Ensure we don't do anything if other license accepted.""" + self._assert_inert_with_config( + {'drivers': {'acme': {'license-accepted': True}}}) + + def test_handle_inert_if_string_given(self): + """Ensure we don't do anything if string refusal given.""" + for false_value in ['no', 'false', 'off', '0']: + self._assert_inert_with_config( + {'drivers': {'nvidia': {'license-accepted': false_value}}}) + + @mock.patch(MPATH + "install_drivers") + def test_handle_no_drivers_does_nothing(self, m_install_drivers): + """If no 'drivers' key in the config, nothing should be done.""" + myCloud = mock.MagicMock() + myLog = mock.MagicMock() + drivers.handle('ubuntu_drivers', {'foo': 'bzr'}, myCloud, myLog, None) + self.assertIn('Skipping module named', + myLog.debug.call_args_list[0][0][0]) + self.assertEqual(0, m_install_drivers.call_count) + + @mock.patch(M_TMP_PATH) + @mock.patch(MPATH + "util.subp", return_value=('', '')) + @mock.patch(MPATH + "util.which", return_value=True) + def test_install_drivers_no_install_if_present( + self, m_which, m_subp, m_tmp): + """If 'ubuntu-drivers' is present, no package install should occur.""" + tdir = self.tmp_dir() + debconf_file = os.path.join(tdir, 'nvidia.template') + m_tmp.return_value = tdir + pkg_install = mock.MagicMock() + drivers.install_drivers(self.cfg_accepted['drivers'], + pkg_install_func=pkg_install) + self.assertEqual(0, pkg_install.call_count) + self.assertEqual([mock.call('ubuntu-drivers')], + m_which.call_args_list) + self.assertEqual( + [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)), + mock.call(self.install_gpgpu)], + m_subp.call_args_list) + + def test_install_drivers_rejects_invalid_config(self): + """install_drivers should raise TypeError if not given a config dict""" + pkg_install = mock.MagicMock() + with self.assertRaisesRegex(TypeError, ".*expected dict.*"): + drivers.install_drivers("mystring", pkg_install_func=pkg_install) + self.assertEqual(0, pkg_install.call_count) + + @mock.patch(M_TMP_PATH) + @mock.patch(MPATH + "util.subp") + @mock.patch(MPATH + "util.which", return_value=False) + def test_install_drivers_handles_old_ubuntu_drivers_gracefully( + self, m_which, m_subp, m_tmp): + """Older ubuntu-drivers versions should emit message and raise error""" + tdir = self.tmp_dir() + debconf_file = os.path.join(tdir, 'nvidia.template') + m_tmp.return_value = tdir + myCloud = mock.MagicMock() + + def fake_subp(cmd): + if cmd[0].startswith(tdir): + return + raise ProcessExecutionError( + stderr=OLD_UBUNTU_DRIVERS_ERROR_STDERR, exit_code=2) + m_subp.side_effect = fake_subp + + with self.assertRaises(Exception): + drivers.handle( + 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None) + self.assertEqual([mock.call(['ubuntu-drivers-common'])], + myCloud.distro.install_packages.call_args_list) + self.assertEqual( + [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)), + mock.call(self.install_gpgpu)], + m_subp.call_args_list) + self.assertIn('WARNING: the available version of ubuntu-drivers is' + ' too old to perform requested driver installation', + self.logs.getvalue()) + + +# Sub-class TestUbuntuDrivers to run the same test cases, but with a version +class TestUbuntuDriversWithVersion(TestUbuntuDrivers): + cfg_accepted = { + 'drivers': {'nvidia': {'license-accepted': True, 'version': '123'}}} + install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia:123'] + + @mock.patch(M_TMP_PATH) + @mock.patch(MPATH + "util.subp", return_value=('', '')) + @mock.patch(MPATH + "util.which", return_value=False) + def test_version_none_uses_latest(self, m_which, m_subp, m_tmp): + tdir = self.tmp_dir() + debconf_file = os.path.join(tdir, 'nvidia.template') + m_tmp.return_value = tdir + myCloud = mock.MagicMock() + version_none_cfg = { + 'drivers': {'nvidia': {'license-accepted': True, 'version': None}}} + drivers.handle( + 'ubuntu_drivers', version_none_cfg, myCloud, None, None) + self.assertEqual( + [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)), + mock.call(['ubuntu-drivers', 'install', '--gpgpu', 'nvidia'])], + m_subp.call_args_list) + + def test_specifying_a_version_doesnt_override_license_acceptance(self): + self._assert_inert_with_config({ + 'drivers': {'nvidia': {'license-accepted': False, + 'version': '123'}} + }) + +# vi: ts=4 expandtab diff --git a/cloudinit/config/tests/test_users_groups.py b/cloudinit/config/tests/test_users_groups.py index ba0afae3..f620b597 100644 --- a/cloudinit/config/tests/test_users_groups.py +++ b/cloudinit/config/tests/test_users_groups.py @@ -46,6 +46,34 @@ class TestHandleUsersGroups(CiTestCase): mock.call('me2', default=False)]) m_group.assert_not_called() + @mock.patch('cloudinit.distros.freebsd.Distro.create_group') + @mock.patch('cloudinit.distros.freebsd.Distro.create_user') + def test_handle_users_in_cfg_calls_create_users_on_bsd( + self, + m_fbsd_user, + m_fbsd_group, + m_linux_user, + m_linux_group, + ): + """When users in config, create users with freebsd.create_user.""" + cfg = {'users': ['default', {'name': 'me2'}]} # merged cloud-config + # System config defines a default user for the distro. + sys_cfg = {'default_user': {'name': 'freebsd', 'lock_passwd': True, + 'groups': ['wheel'], + 'shell': '/bin/tcsh'}} + metadata = {} + cloud = self.tmp_cloud( + distro='freebsd', sys_cfg=sys_cfg, metadata=metadata) + cc_users_groups.handle('modulename', cfg, cloud, None, None) + self.assertItemsEqual( + m_fbsd_user.call_args_list, + [mock.call('freebsd', groups='wheel', lock_passwd=True, + shell='/bin/tcsh'), + mock.call('me2', default=False)]) + m_fbsd_group.assert_not_called() + m_linux_group.assert_not_called() + m_linux_user.assert_not_called() + def test_users_with_ssh_redirect_user_passes_keys(self, m_user, m_group): """When ssh_redirect_user is True pass default user and cloud keys.""" cfg = { diff --git a/cloudinit/cs_utils.py b/cloudinit/cs_utils.py index 51c09582..8bac9c44 100644 --- a/cloudinit/cs_utils.py +++ b/cloudinit/cs_utils.py @@ -14,7 +14,7 @@ Having the server definition accessible by the VM can ve useful in various ways. For example it is possible to easily determine from within the VM, which network interfaces are connected to public and which to private network. Another use is to pass some data to initial VM setup scripts, like setting the -hostname to the VM name or passing ssh public keys through server meta. +hostname to the VM name or passing SSH public keys through server meta. For more information take a look at the Server Context section of CloudSigma API Docs: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index ef618c28..92598a2d 100644..100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -9,13 +9,11 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import six -from six import StringIO - import abc import os import re import stat +from io import StringIO from cloudinit import importer from cloudinit import log as logging @@ -36,7 +34,7 @@ ALL_DISTROS = 'all' OSFAMILIES = { 'debian': ['debian', 'ubuntu'], - 'redhat': ['centos', 'fedora', 'rhel'], + 'redhat': ['amazon', 'centos', 'fedora', 'rhel'], 'gentoo': ['gentoo'], 'freebsd': ['freebsd'], 'suse': ['opensuse', 'sles'], @@ -53,8 +51,7 @@ _EC2_AZ_RE = re.compile('^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$') PREFERRED_NTP_CLIENTS = ['chrony', 'systemd-timesyncd', 'ntp', 'ntpdate'] -@six.add_metaclass(abc.ABCMeta) -class Distro(object): +class Distro(metaclass=abc.ABCMeta): usr_lib_exec = "/usr/lib" hosts_fn = "/etc/hosts" @@ -145,7 +142,7 @@ class Distro(object): # Write it out # pylint: disable=assignment-from-no-return - # We have implementations in arch, freebsd and gentoo still + # We have implementations in arch and gentoo still dev_names = self._write_network(settings) # pylint: enable=assignment-from-no-return # Now try to bring them up @@ -385,7 +382,7 @@ class Distro(object): Add a user to the system using standard GNU tools """ # XXX need to make add_user idempotent somehow as we - # still want to add groups or modify ssh keys on pre-existing + # still want to add groups or modify SSH keys on pre-existing # users in the image. if util.is_user(name): LOG.info("User %s already exists, skipping.", name) @@ -396,16 +393,16 @@ class Distro(object): else: create_groups = True - adduser_cmd = ['useradd', name] - log_adduser_cmd = ['useradd', name] + useradd_cmd = ['useradd', name] + log_useradd_cmd = ['useradd', name] if util.system_is_snappy(): - adduser_cmd.append('--extrausers') - log_adduser_cmd.append('--extrausers') + useradd_cmd.append('--extrausers') + log_useradd_cmd.append('--extrausers') # Since we are creating users, we want to carefully validate the # inputs. If something goes wrong, we can end up with a system # that nobody can login to. - adduser_opts = { + useradd_opts = { "gecos": '--comment', "homedir": '--home', "primary_group": '--gid', @@ -418,7 +415,7 @@ class Distro(object): "selinux_user": '--selinux-user', } - adduser_flags = { + useradd_flags = { "no_user_group": '--no-user-group', "system": '--system', "no_log_init": '--no-log-init', @@ -429,7 +426,7 @@ class Distro(object): # support kwargs having groups=[list] or groups="g1,g2" groups = kwargs.get('groups') if groups: - if isinstance(groups, six.string_types): + if isinstance(groups, str): groups = groups.split(",") # remove any white spaces in group names, most likely @@ -453,32 +450,32 @@ class Distro(object): # Check the values and create the command for key, val in sorted(kwargs.items()): - if key in adduser_opts and val and isinstance(val, str): - adduser_cmd.extend([adduser_opts[key], val]) + if key in useradd_opts and val and isinstance(val, str): + useradd_cmd.extend([useradd_opts[key], val]) # Redact certain fields from the logs if key in redact_opts: - log_adduser_cmd.extend([adduser_opts[key], 'REDACTED']) + log_useradd_cmd.extend([useradd_opts[key], 'REDACTED']) else: - log_adduser_cmd.extend([adduser_opts[key], val]) + log_useradd_cmd.extend([useradd_opts[key], val]) - elif key in adduser_flags and val: - adduser_cmd.append(adduser_flags[key]) - log_adduser_cmd.append(adduser_flags[key]) + elif key in useradd_flags and val: + useradd_cmd.append(useradd_flags[key]) + log_useradd_cmd.append(useradd_flags[key]) # Don't create the home directory if directed so or if the user is a # system user if kwargs.get('no_create_home') or kwargs.get('system'): - adduser_cmd.append('-M') - log_adduser_cmd.append('-M') + useradd_cmd.append('-M') + log_useradd_cmd.append('-M') else: - adduser_cmd.append('-m') - log_adduser_cmd.append('-m') + useradd_cmd.append('-m') + log_useradd_cmd.append('-m') # Run the command LOG.debug("Adding user %s", name) try: - util.subp(adduser_cmd, logstring=log_adduser_cmd) + util.subp(useradd_cmd, logstring=log_useradd_cmd) except Exception as e: util.logexc(LOG, "Failed to create user %s", name) raise e @@ -490,15 +487,15 @@ class Distro(object): snapuser = kwargs.get('snapuser') known = kwargs.get('known', False) - adduser_cmd = ["snap", "create-user", "--sudoer", "--json"] + create_user_cmd = ["snap", "create-user", "--sudoer", "--json"] if known: - adduser_cmd.append("--known") - adduser_cmd.append(snapuser) + create_user_cmd.append("--known") + create_user_cmd.append(snapuser) # Run the command LOG.debug("Adding snap user %s", name) try: - (out, err) = util.subp(adduser_cmd, logstring=adduser_cmd, + (out, err) = util.subp(create_user_cmd, logstring=create_user_cmd, capture=True) LOG.debug("snap create-user returned: %s:%s", out, err) jobj = util.load_json(out) @@ -544,7 +541,7 @@ class Distro(object): if 'ssh_authorized_keys' in kwargs: # Try to handle this in a smart manner. keys = kwargs['ssh_authorized_keys'] - if isinstance(keys, six.string_types): + if isinstance(keys, str): keys = [keys] elif isinstance(keys, dict): keys = list(keys.values()) @@ -561,7 +558,7 @@ class Distro(object): cloud_keys = kwargs.get('cloud_public_ssh_keys', []) if not cloud_keys: LOG.warning( - 'Unable to disable ssh logins for %s given' + 'Unable to disable SSH logins for %s given' ' ssh_redirect_user: %s. No cloud public-keys present.', name, kwargs['ssh_redirect_user']) else: @@ -577,15 +574,27 @@ class Distro(object): """ Lock the password of a user, i.e., disable password logins """ + # passwd must use short '-l' due to SLES11 lacking long form '--lock' + lock_tools = (['passwd', '-l', name], ['usermod', '--lock', name]) try: - # Need to use the short option name '-l' instead of '--lock' - # (which would be more descriptive) since SLES 11 doesn't know - # about long names. - util.subp(['passwd', '-l', name]) + cmd = next(l for l in lock_tools if util.which(l[0])) + except StopIteration: + raise RuntimeError(( + "Unable to lock user account '%s'. No tools available. " + " Tried: %s.") % (name, [c[0] for c in lock_tools])) + try: + util.subp(cmd) except Exception as e: util.logexc(LOG, 'Failed to disable password for user %s', name) raise e + def expire_passwd(self, user): + try: + util.subp(['passwd', '--expire', user]) + except Exception as e: + util.logexc(LOG, "Failed to set 'expire' for %s", user) + raise e + def set_passwd(self, user, passwd, hashed=False): pass_string = '%s:%s' % (user, passwd) cmd = ['chpasswd'] @@ -656,7 +665,7 @@ class Distro(object): if isinstance(rules, (list, tuple)): for rule in rules: lines.append("%s %s" % (user, rule)) - elif isinstance(rules, six.string_types): + elif isinstance(rules, str): lines.append("%s %s" % (user, rules)) else: msg = "Can not create sudoers rule addition with type %r" diff --git a/cloudinit/distros/amazon.py b/cloudinit/distros/amazon.py new file mode 100644 index 00000000..ff9a549f --- /dev/null +++ b/cloudinit/distros/amazon.py @@ -0,0 +1,26 @@ +# Copyright (C) 2012 Canonical Ltd. +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012 Yahoo! Inc. +# Copyright (C) 2014 Amazon.com, Inc. or its affiliates. +# +# Author: Scott Moser <scott.moser@canonical.com> +# Author: Juerg Haefliger <juerg.haefliger@hp.com> +# Author: Joshua Harlow <harlowja@yahoo-inc.com> +# Author: Andrew Jorgensen <ajorgens@amazon.com> +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.distros import rhel + +from cloudinit import log as logging + +LOG = logging.getLogger(__name__) + + +class Distro(rhel.Distro): + + def update_package_sources(self): + return None + + +# vi: ts=4 expandtab diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py index b814c8ba..9f89c5f9 100644 --- a/cloudinit/distros/arch.py +++ b/cloudinit/distros/arch.py @@ -12,6 +12,8 @@ from cloudinit import util from cloudinit.distros import net_util from cloudinit.distros.parsers.hostname import HostnameConf +from cloudinit.net.renderers import RendererNotFoundError + from cloudinit.settings import PER_INSTANCE import os @@ -24,6 +26,11 @@ class Distro(distros.Distro): network_conf_dir = "/etc/netctl" resolve_conf_fn = "/etc/resolv.conf" init_cmd = ['systemctl'] # init scripts + renderer_configs = { + "netplan": {"netplan_path": "/etc/netplan/50-cloud-init.yaml", + "netplan_header": "# generated by cloud-init\n", + "postcmds": True} + } def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) @@ -50,6 +57,13 @@ class Distro(distros.Distro): self.update_package_sources() self.package_command('', pkgs=pkglist) + def _write_network_config(self, netconfig): + try: + return self._supported_write_network_config(netconfig) + except RendererNotFoundError: + # Fall back to old _write_network + raise NotImplementedError + def _write_network(self, settings): entries = net_util.translate_network(settings) LOG.debug("Translated ubuntu style network settings %s into %s", diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 0ad93ffe..128bb523 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -29,9 +29,10 @@ APT_GET_WRAPPER = { 'enabled': 'auto', } -ENI_HEADER = """# This file is generated from information provided by -# the datasource. Changes to it will not persist across an instance. -# To disable cloud-init's network configuration capabilities, write a file +NETWORK_FILE_HEADER = """\ +# This file is generated from information provided by the datasource. Changes +# to it will not persist across an instance reboot. To disable cloud-init's +# network configuration capabilities, write a file # /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: # network: {config: disabled} """ @@ -48,9 +49,9 @@ class Distro(distros.Distro): } renderer_configs = { "eni": {"eni_path": network_conf_fn["eni"], - "eni_header": ENI_HEADER}, + "eni_header": NETWORK_FILE_HEADER}, "netplan": {"netplan_path": network_conf_fn["netplan"], - "netplan_header": ENI_HEADER, + "netplan_header": NETWORK_FILE_HEADER, "postcmds": True} } @@ -204,8 +205,7 @@ class Distro(distros.Distro): ["update"], freq=PER_INSTANCE) def get_primary_arch(self): - (arch, _err) = util.subp(['dpkg', '--print-architecture']) - return str(arch).strip() + return util.get_dpkg_architecture() def _get_wrapper_prefix(cmd, mode): diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index ff22d568..026d1142 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -5,32 +5,28 @@ # This file is part of cloud-init. See LICENSE file for license information. import os -import six -from six import StringIO - import re +from io import StringIO from cloudinit import distros from cloudinit import helpers from cloudinit import log as logging +from cloudinit import net from cloudinit import ssh_util from cloudinit import util - -from cloudinit.distros import net_util -from cloudinit.distros.parsers.resolv_conf import ResolvConf - +from cloudinit.distros import rhel_util from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) class Distro(distros.Distro): + usr_lib_exec = '/usr/local/lib' rc_conf_fn = "/etc/rc.conf" login_conf_fn = '/etc/login.conf' login_conf_fn_bak = '/etc/login.conf.orig' - resolv_conf_fn = '/etc/resolv.conf' ci_sudoers_fn = '/usr/local/etc/sudoers.d/90-cloud-init-users' - default_primary_nic = 'hn0' + hostname_conf_fn = '/etc/rc.conf' def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) @@ -39,99 +35,8 @@ class Distro(distros.Distro): # should only happen say once per instance...) self._runner = helpers.Runners(paths) self.osfamily = 'freebsd' - self.ipv4_pat = re.compile(r"\s+inet\s+\d+[.]\d+[.]\d+[.]\d+") cfg['ssh_svcname'] = 'sshd' - # Updates a key in /etc/rc.conf. - def updatercconf(self, key, value): - LOG.debug("Checking %s for: %s = %s", self.rc_conf_fn, key, value) - conf = self.loadrcconf() - config_changed = False - if key not in conf: - LOG.debug("Adding key in %s: %s = %s", self.rc_conf_fn, key, - value) - conf[key] = value - config_changed = True - else: - for item in conf.keys(): - if item == key and conf[item] != value: - conf[item] = value - LOG.debug("Changing key in %s: %s = %s", self.rc_conf_fn, - key, value) - config_changed = True - - if config_changed: - LOG.info("Writing %s", self.rc_conf_fn) - buf = StringIO() - for keyval in conf.items(): - buf.write('%s="%s"\n' % keyval) - util.write_file(self.rc_conf_fn, buf.getvalue()) - - # Load the contents of /etc/rc.conf and store all keys in a dict. Make sure - # quotes are ignored: - # hostname="bla" - def loadrcconf(self): - RE_MATCH = re.compile(r'^(\w+)\s*=\s*(.*)\s*') - conf = {} - lines = util.load_file(self.rc_conf_fn).splitlines() - for line in lines: - m = RE_MATCH.match(line) - if not m: - LOG.debug("Skipping line from /etc/rc.conf: %s", line) - continue - key = m.group(1).rstrip() - val = m.group(2).rstrip() - # Kill them quotes (not completely correct, aka won't handle - # quoted values, but should be ok ...) - if val[0] in ('"', "'"): - val = val[1:] - if val[-1] in ('"', "'"): - val = val[0:-1] - if len(val) == 0: - LOG.debug("Skipping empty value from /etc/rc.conf: %s", line) - continue - conf[key] = val - return conf - - def readrcconf(self, key): - conf = self.loadrcconf() - try: - val = conf[key] - except KeyError: - val = None - return val - - # NOVA will inject something like eth0, rewrite that to use the FreeBSD - # adapter. Since this adapter is based on the used driver, we need to - # figure out which interfaces are available. On KVM platforms this is - # vtnet0, where Xen would use xn0. - def getnetifname(self, dev): - LOG.debug("Translating network interface %s", dev) - if dev.startswith('lo'): - return dev - - n = re.search(r'\d+$', dev) - index = n.group(0) - - (out, _err) = util.subp(['ifconfig', '-a']) - ifconfigoutput = [x for x in (out.strip()).splitlines() - if len(x.split()) > 0] - bsddev = 'NOT_FOUND' - for line in ifconfigoutput: - m = re.match(r'^\w+', line) - if m: - if m.group(0).startswith('lo'): - continue - # Just settle with the first non-lo adapter we find, since it's - # rather unlikely there will be multiple nicdrivers involved. - bsddev = m.group(0) - break - - # Replace the index with the one we're after. - bsddev = re.sub(r'\d+$', index, bsddev) - LOG.debug("Using network interface %s", bsddev) - return bsddev - def _select_hostname(self, hostname, fqdn): # Should be FQDN if available. See rc.conf(5) in FreeBSD if fqdn: @@ -139,56 +44,54 @@ class Distro(distros.Distro): return hostname def _read_system_hostname(self): - sys_hostname = self._read_hostname(filename=None) - return ('rc.conf', sys_hostname) + sys_hostname = self._read_hostname(self.hostname_conf_fn) + return (self.hostname_conf_fn, sys_hostname) def _read_hostname(self, filename, default=None): - hostname = None - try: - hostname = self.readrcconf('hostname') - except IOError: - pass - if not hostname: + (_exists, contents) = rhel_util.read_sysconfig_file(filename) + if contents.get('hostname'): + return contents['hostname'] + else: return default - return hostname def _write_hostname(self, hostname, filename): - self.updatercconf('hostname', hostname) + rhel_util.update_sysconfig_file(filename, {'hostname': hostname}) def create_group(self, name, members): - group_add_cmd = ['pw', '-n', name] + group_add_cmd = ['pw', 'group', 'add', name] if util.is_group(name): LOG.warning("Skipping creation of existing group '%s'", name) else: try: util.subp(group_add_cmd) LOG.info("Created new group %s", name) - except Exception as e: + except Exception: util.logexc(LOG, "Failed to create group %s", name) - raise e - - if len(members) > 0: - for member in members: - if not util.is_user(member): - LOG.warning("Unable to add group member '%s' to group '%s'" - "; user does not exist.", member, name) - continue - try: - util.subp(['pw', 'usermod', '-n', name, '-G', member]) - LOG.info("Added user '%s' to group '%s'", member, name) - except Exception: - util.logexc(LOG, "Failed to add user '%s' to group '%s'", - member, name) + raise + if not members: + members = [] + + for member in members: + if not util.is_user(member): + LOG.warning("Unable to add group member '%s' to group '%s'" + "; user does not exist.", member, name) + continue + try: + util.subp(['pw', 'usermod', '-n', name, '-G', member]) + LOG.info("Added user '%s' to group '%s'", member, name) + except Exception: + util.logexc(LOG, "Failed to add user '%s' to group '%s'", + member, name) def add_user(self, name, **kwargs): if util.is_user(name): LOG.info("User %s already exists, skipping.", name) return False - adduser_cmd = ['pw', 'useradd', '-n', name] - log_adduser_cmd = ['pw', 'useradd', '-n', name] + pw_useradd_cmd = ['pw', 'useradd', '-n', name] + log_pw_useradd_cmd = ['pw', 'useradd', '-n', name] - adduser_opts = { + pw_useradd_opts = { "homedir": '-d', "gecos": '-c', "primary_group": '-g', @@ -196,43 +99,49 @@ class Distro(distros.Distro): "shell": '-s', "inactive": '-E', } - adduser_flags = { + pw_useradd_flags = { "no_user_group": '--no-user-group', "system": '--system', "no_log_init": '--no-log-init', } for key, val in kwargs.items(): - if (key in adduser_opts and val and - isinstance(val, six.string_types)): - adduser_cmd.extend([adduser_opts[key], val]) + if key in pw_useradd_opts and val and isinstance(val, str): + pw_useradd_cmd.extend([pw_useradd_opts[key], val]) - elif key in adduser_flags and val: - adduser_cmd.append(adduser_flags[key]) - log_adduser_cmd.append(adduser_flags[key]) + elif key in pw_useradd_flags and val: + pw_useradd_cmd.append(pw_useradd_flags[key]) + log_pw_useradd_cmd.append(pw_useradd_flags[key]) if 'no_create_home' in kwargs or 'system' in kwargs: - adduser_cmd.append('-d/nonexistent') - log_adduser_cmd.append('-d/nonexistent') + pw_useradd_cmd.append('-d/nonexistent') + log_pw_useradd_cmd.append('-d/nonexistent') else: - adduser_cmd.append('-d/usr/home/%s' % name) - adduser_cmd.append('-m') - log_adduser_cmd.append('-d/usr/home/%s' % name) - log_adduser_cmd.append('-m') + pw_useradd_cmd.append('-d/usr/home/%s' % name) + pw_useradd_cmd.append('-m') + log_pw_useradd_cmd.append('-d/usr/home/%s' % name) + log_pw_useradd_cmd.append('-m') # Run the command LOG.info("Adding user %s", name) try: - util.subp(adduser_cmd, logstring=log_adduser_cmd) - except Exception as e: + util.subp(pw_useradd_cmd, logstring=log_pw_useradd_cmd) + except Exception: util.logexc(LOG, "Failed to create user %s", name) - raise e + raise # Set the password if it is provided # For security consideration, only hashed passwd is assumed passwd_val = kwargs.get('passwd', None) if passwd_val is not None: self.set_passwd(name, passwd_val, hashed=True) + def expire_passwd(self, user): + try: + util.subp(['pw', 'usermod', user, '-p', '01-Jan-1970']) + except Exception: + util.logexc(LOG, "Failed to set pw expiration for %s", user) + raise + def set_passwd(self, user, passwd, hashed=False): if hashed: hash_opt = "-H" @@ -242,16 +151,16 @@ class Distro(distros.Distro): try: util.subp(['pw', 'usermod', user, hash_opt, '0'], data=passwd, logstring="chpasswd for %s" % user) - except Exception as e: + except Exception: util.logexc(LOG, "Failed to set password for %s", user) - raise e + raise def lock_passwd(self, name): try: util.subp(['pw', 'usermod', name, '-h', '-']) - except Exception as e: + except Exception: util.logexc(LOG, "Failed to lock user %s", name) - raise e + raise def create_user(self, name, **kwargs): self.add_user(name, **kwargs) @@ -274,309 +183,16 @@ class Distro(distros.Distro): keys = set(kwargs['ssh_authorized_keys']) or [] ssh_util.setup_user_keys(keys, name, options=None) - @staticmethod - def get_ifconfig_list(): - cmd = ['ifconfig', '-l'] - (nics, err) = util.subp(cmd, rcs=[0, 1]) - if len(err): - LOG.warning("Error running %s: %s", cmd, err) - return None - return nics - - @staticmethod - def get_ifconfig_ifname_out(ifname): - cmd = ['ifconfig', ifname] - (if_result, err) = util.subp(cmd, rcs=[0, 1]) - if len(err): - LOG.warning("Error running %s: %s", cmd, err) - return None - return if_result - - @staticmethod - def get_ifconfig_ether(): - cmd = ['ifconfig', '-l', 'ether'] - (nics, err) = util.subp(cmd, rcs=[0, 1]) - if len(err): - LOG.warning("Error running %s: %s", cmd, err) - return None - return nics - - @staticmethod - def get_interface_mac(ifname): - if_result = Distro.get_ifconfig_ifname_out(ifname) - for item in if_result.splitlines(): - if item.find('ether ') != -1: - mac = str(item.split()[1]) - if mac: - return mac - - @staticmethod - def get_devicelist(): - nics = Distro.get_ifconfig_list() - return nics.split() - - @staticmethod - def get_ipv6(): - ipv6 = [] - nics = Distro.get_devicelist() - for nic in nics: - if_result = Distro.get_ifconfig_ifname_out(nic) - for item in if_result.splitlines(): - if item.find("inet6 ") != -1 and item.find("scopeid") == -1: - ipv6.append(nic) - return ipv6 - - def get_ipv4(self): - ipv4 = [] - nics = Distro.get_devicelist() - for nic in nics: - if_result = Distro.get_ifconfig_ifname_out(nic) - for item in if_result.splitlines(): - print(item) - if self.ipv4_pat.match(item): - ipv4.append(nic) - return ipv4 - - def is_up(self, ifname): - if_result = Distro.get_ifconfig_ifname_out(ifname) - pat = "^" + ifname - for item in if_result.splitlines(): - if re.match(pat, item): - flags = item.split('<')[1].split('>')[0] - if flags.find("UP") != -1: - return True - - def _get_current_rename_info(self, check_downable=True): - """Collect information necessary for rename_interfaces.""" - names = Distro.get_devicelist() - bymac = {} - for n in names: - bymac[Distro.get_interface_mac(n)] = { - 'name': n, 'up': self.is_up(n), 'downable': None} - - nics_with_addresses = set() - if check_downable: - nics_with_addresses = set(self.get_ipv4() + self.get_ipv6()) - - for d in bymac.values(): - d['downable'] = (d['up'] is False or - d['name'] not in nics_with_addresses) - - return bymac - - def _rename_interfaces(self, renames): - if not len(renames): - LOG.debug("no interfaces to rename") - return - - current_info = self._get_current_rename_info() - - cur_bymac = {} - for mac, data in current_info.items(): - cur = data.copy() - cur['mac'] = mac - cur_bymac[mac] = cur - - def update_byname(bymac): - return dict((data['name'], data) - for data in bymac.values()) - - def rename(cur, new): - util.subp(["ifconfig", cur, "name", new], capture=True) - - def down(name): - util.subp(["ifconfig", name, "down"], capture=True) - - def up(name): - util.subp(["ifconfig", name, "up"], capture=True) - - ops = [] - errors = [] - ups = [] - cur_byname = update_byname(cur_bymac) - tmpname_fmt = "cirename%d" - tmpi = -1 - - for mac, new_name in renames: - cur = cur_bymac.get(mac, {}) - cur_name = cur.get('name') - cur_ops = [] - if cur_name == new_name: - # nothing to do - continue - - if not cur_name: - errors.append("[nic not present] Cannot rename mac=%s to %s" - ", not available." % (mac, new_name)) - continue - - if cur['up']: - msg = "[busy] Error renaming mac=%s from %s to %s" - if not cur['downable']: - errors.append(msg % (mac, cur_name, new_name)) - continue - cur['up'] = False - cur_ops.append(("down", mac, new_name, (cur_name,))) - ups.append(("up", mac, new_name, (new_name,))) - - if new_name in cur_byname: - target = cur_byname[new_name] - if target['up']: - msg = "[busy-target] Error renaming mac=%s from %s to %s." - if not target['downable']: - errors.append(msg % (mac, cur_name, new_name)) - continue - else: - cur_ops.append(("down", mac, new_name, (new_name,))) - - tmp_name = None - while tmp_name is None or tmp_name in cur_byname: - tmpi += 1 - tmp_name = tmpname_fmt % tmpi - - cur_ops.append(("rename", mac, new_name, (new_name, tmp_name))) - target['name'] = tmp_name - cur_byname = update_byname(cur_bymac) - if target['up']: - ups.append(("up", mac, new_name, (tmp_name,))) - - cur_ops.append(("rename", mac, new_name, (cur['name'], new_name))) - cur['name'] = new_name - cur_byname = update_byname(cur_bymac) - ops += cur_ops - - opmap = {'rename': rename, 'down': down, 'up': up} - if len(ops) + len(ups) == 0: - if len(errors): - LOG.debug("unable to do any work for renaming of %s", renames) - else: - LOG.debug("no work necessary for renaming of %s", renames) - else: - LOG.debug("achieving renaming of %s with ops %s", - renames, ops + ups) - - for op, mac, new_name, params in ops + ups: - try: - opmap.get(op)(*params) - except Exception as e: - errors.append( - "[unknown] Error performing %s%s for %s, %s: %s" % - (op, params, mac, new_name, e)) - if len(errors): - raise Exception('\n'.join(errors)) - - def apply_network_config_names(self, netcfg): - renames = [] - for ent in netcfg.get('config', {}): - if ent.get('type') != 'physical': - continue - mac = ent.get('mac_address') - name = ent.get('name') - if not mac: - continue - renames.append([mac, name]) - return self._rename_interfaces(renames) - - @classmethod def generate_fallback_config(self): - nics = Distro.get_ifconfig_ether() - if nics is None: - LOG.debug("Fail to get network interfaces") - return None - potential_interfaces = nics.split() - connected = [] - for nic in potential_interfaces: - pat = "^" + nic - if_result = Distro.get_ifconfig_ifname_out(nic) - for item in if_result.split("\n"): - if re.match(pat, item): - flags = item.split('<')[1].split('>')[0] - if flags.find("RUNNING") != -1: - connected.append(nic) - if connected: - potential_interfaces = connected - names = list(sorted(potential_interfaces)) - default_pri_nic = Distro.default_primary_nic - if default_pri_nic in names: - names.remove(default_pri_nic) - names.insert(0, default_pri_nic) - target_name = None - target_mac = None - for name in names: - mac = Distro.get_interface_mac(name) - if mac: - target_name = name - target_mac = mac - break - if target_mac and target_name: - nconf = {'config': [], 'version': 1} + nconf = {'config': [], 'version': 1} + for mac, name in net.get_interfaces_by_mac().items(): nconf['config'].append( - {'type': 'physical', 'name': target_name, - 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]}) - return nconf - else: - return None - - def _write_network(self, settings): - entries = net_util.translate_network(settings) - nameservers = [] - searchdomains = [] - dev_names = entries.keys() - for (device, info) in entries.items(): - # Skip the loopback interface. - if device.startswith('lo'): - continue - - dev = self.getnetifname(device) - - LOG.info('Configuring interface %s', dev) - - if info.get('bootproto') == 'static': - LOG.debug('Configuring dev %s with %s / %s', dev, - info.get('address'), info.get('netmask')) - # Configure an ipv4 address. - ifconfig = (info.get('address') + ' netmask ' + - info.get('netmask')) - - # Configure the gateway. - self.updatercconf('defaultrouter', info.get('gateway')) - - if 'dns-nameservers' in info: - nameservers.extend(info['dns-nameservers']) - if 'dns-search' in info: - searchdomains.extend(info['dns-search']) - else: - ifconfig = 'DHCP' - - self.updatercconf('ifconfig_' + dev, ifconfig) - - # Try to read the /etc/resolv.conf or just start from scratch if that - # fails. - try: - resolvconf = ResolvConf(util.load_file(self.resolv_conf_fn)) - resolvconf.parse() - except IOError: - util.logexc(LOG, "Failed to parse %s, use new empty file", - self.resolv_conf_fn) - resolvconf = ResolvConf('') - resolvconf.parse() - - # Add some nameservers - for server in nameservers: - try: - resolvconf.add_nameserver(server) - except ValueError: - util.logexc(LOG, "Failed to add nameserver %s", server) - - # And add any searchdomains. - for domain in searchdomains: - try: - resolvconf.add_search_domain(domain) - except ValueError: - util.logexc(LOG, "Failed to add search domain %s", domain) - util.write_file(self.resolv_conf_fn, str(resolvconf), 0o644) + {'type': 'physical', 'name': name, + 'mac_address': mac, 'subnets': [{'type': 'dhcp'}]}) + return nconf - return dev_names + def _write_network_config(self, netconfig): + return self._supported_write_network_config(netconfig) def apply_locale(self, locale, out_fn=None): # Adjust the locals value to the new value @@ -604,18 +220,12 @@ class Distro(distros.Distro): util.logexc(LOG, "Failed to restore %s backup", self.login_conf_fn) - def _bring_up_interface(self, device_name): - if device_name.startswith('lo'): - return - dev = self.getnetifname(device_name) - cmd = ['/etc/rc.d/netif', 'start', dev] - LOG.debug("Attempting to bring up interface %s using command %s", - dev, cmd) - # This could return 1 when the interface has already been put UP by the - # OS. This is just fine. - (_out, err) = util.subp(cmd, rcs=[0, 1]) - if len(err): - LOG.warning("Error running %s: %s", cmd, err) + def apply_network_config_names(self, netconfig): + # This is handled by the freebsd network renderer. It writes in + # /etc/rc.conf a line with the following format: + # ifconfig_OLDNAME_name=NEWNAME + # FreeBSD network script will rename the interface automatically. + return def install_packages(self, pkglist): self.update_package_sources() diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py index 1bfe0478..dd56a3f4 100644 --- a/cloudinit/distros/opensuse.py +++ b/cloudinit/distros/opensuse.py @@ -37,7 +37,10 @@ class Distro(distros.Distro): renderer_configs = { 'sysconfig': { 'control': 'etc/sysconfig/network/config', + 'flavor': 'suse', 'iface_templates': '%(base)s/network/ifcfg-%(name)s', + 'netrules_path': ( + 'etc/udev/rules.d/85-persistent-net-cloud-init.rules'), 'route_templates': { 'ipv4': '%(base)s/network/ifroute-%(name)s', 'ipv6': '%(base)s/network/ifroute-%(name)s', diff --git a/cloudinit/distros/parsers/hostname.py b/cloudinit/distros/parsers/hostname.py index dd434ac6..e74c083c 100644 --- a/cloudinit/distros/parsers/hostname.py +++ b/cloudinit/distros/parsers/hostname.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from six import StringIO +from io import StringIO from cloudinit.distros.parsers import chop_comment diff --git a/cloudinit/distros/parsers/hosts.py b/cloudinit/distros/parsers/hosts.py index 64444581..54e4e934 100644 --- a/cloudinit/distros/parsers/hosts.py +++ b/cloudinit/distros/parsers/hosts.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from six import StringIO +from io import StringIO from cloudinit.distros.parsers import chop_comment diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py index a62055ae..299d54b5 100644 --- a/cloudinit/distros/parsers/resolv_conf.py +++ b/cloudinit/distros/parsers/resolv_conf.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from six import StringIO +from io import StringIO from cloudinit.distros.parsers import chop_comment from cloudinit import log as logging diff --git a/cloudinit/distros/parsers/sys_conf.py b/cloudinit/distros/parsers/sys_conf.py index c27b5d5d..dee4c551 100644 --- a/cloudinit/distros/parsers/sys_conf.py +++ b/cloudinit/distros/parsers/sys_conf.py @@ -4,11 +4,9 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import six -from six import StringIO - import pipes import re +from io import StringIO # This library is used to parse/write # out the various sysconfig files edited (best attempt effort) @@ -43,6 +41,13 @@ def _contains_shell_variable(text): class SysConf(configobj.ConfigObj): + """A configobj.ConfigObj subclass specialised for sysconfig files. + + :param contents: + The sysconfig file to parse, in a format accepted by + ``configobj.ConfigObj.__init__`` (i.e. "a filename, file like object, + or list of lines"). + """ def __init__(self, contents): configobj.ConfigObj.__init__(self, contents, interpolation=False, @@ -58,7 +63,7 @@ class SysConf(configobj.ConfigObj): return out_contents.getvalue() def _quote(self, value, multiline=False): - if not isinstance(value, six.string_types): + if not isinstance(value, str): raise ValueError('Value "%s" is not a string' % (value)) if len(value) == 0: return '' diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py index e5fcbc58..23be3bdd 100644 --- a/cloudinit/distros/ubuntu.py +++ b/cloudinit/distros/ubuntu.py @@ -30,9 +30,9 @@ class Distro(debian.Distro): } self.renderer_configs = { "eni": {"eni_path": self.network_conf_fn["eni"], - "eni_header": debian.ENI_HEADER}, + "eni_header": debian.NETWORK_FILE_HEADER}, "netplan": {"netplan_path": self.network_conf_fn["netplan"], - "netplan_header": debian.ENI_HEADER, + "netplan_header": debian.NETWORK_FILE_HEADER, "postcmds": True} } diff --git a/cloudinit/distros/ug_util.py b/cloudinit/distros/ug_util.py index 9378dd78..08446a95 100755 --- a/cloudinit/distros/ug_util.py +++ b/cloudinit/distros/ug_util.py @@ -9,8 +9,6 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import six - from cloudinit import log as logging from cloudinit import type_utils from cloudinit import util @@ -29,7 +27,7 @@ LOG = logging.getLogger(__name__) # is the standard form used in the rest # of cloud-init def _normalize_groups(grp_cfg): - if isinstance(grp_cfg, six.string_types): + if isinstance(grp_cfg, str): grp_cfg = grp_cfg.strip().split(",") if isinstance(grp_cfg, list): c_grp_cfg = {} @@ -39,7 +37,7 @@ def _normalize_groups(grp_cfg): if k not in c_grp_cfg: if isinstance(v, list): c_grp_cfg[k] = list(v) - elif isinstance(v, six.string_types): + elif isinstance(v, str): c_grp_cfg[k] = [v] else: raise TypeError("Bad group member type %s" % @@ -47,12 +45,12 @@ def _normalize_groups(grp_cfg): else: if isinstance(v, list): c_grp_cfg[k].extend(v) - elif isinstance(v, six.string_types): + elif isinstance(v, str): c_grp_cfg[k].append(v) else: raise TypeError("Bad group member type %s" % type_utils.obj_name(v)) - elif isinstance(i, six.string_types): + elif isinstance(i, str): if i not in c_grp_cfg: c_grp_cfg[i] = [] else: @@ -89,7 +87,7 @@ def _normalize_users(u_cfg, def_user_cfg=None): if isinstance(u_cfg, dict): ad_ucfg = [] for (k, v) in u_cfg.items(): - if isinstance(v, (bool, int, float) + six.string_types): + if isinstance(v, (bool, int, float, str)): if util.is_true(v): ad_ucfg.append(str(k)) elif isinstance(v, dict): @@ -99,12 +97,12 @@ def _normalize_users(u_cfg, def_user_cfg=None): raise TypeError(("Unmappable user value type %s" " for key %s") % (type_utils.obj_name(v), k)) u_cfg = ad_ucfg - elif isinstance(u_cfg, six.string_types): + elif isinstance(u_cfg, str): u_cfg = util.uniq_merge_sorted(u_cfg) users = {} for user_config in u_cfg: - if isinstance(user_config, (list,) + six.string_types): + if isinstance(user_config, (list, str)): for u in util.uniq_merge(user_config): if u and u not in users: users[u] = {} @@ -209,7 +207,7 @@ def normalize_users_groups(cfg, distro): old_user = cfg['user'] # Translate it into the format that is more useful # going forward - if isinstance(old_user, six.string_types): + if isinstance(old_user, str): old_user = { 'name': old_user, } @@ -238,7 +236,7 @@ def normalize_users_groups(cfg, distro): default_user_config = util.mergemanydict([old_user, distro_user_config]) base_users = cfg.get('users', []) - if not isinstance(base_users, (list, dict) + six.string_types): + if not isinstance(base_users, (list, dict, str)): LOG.warning(("Format for 'users' key must be a comma separated string" " or a dictionary or a list and not %s"), type_utils.obj_name(base_users)) @@ -252,7 +250,7 @@ def normalize_users_groups(cfg, distro): base_users.append({'name': 'default'}) elif isinstance(base_users, dict): base_users['default'] = dict(base_users).get('default', True) - elif isinstance(base_users, six.string_types): + elif isinstance(base_users, str): # Just append it on to be re-parsed later base_users += ",default" diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 3b7b17f1..34acfe84 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -134,25 +134,30 @@ class MetadataMaterializer(object): return joined -def _skip_retry_on_codes(status_codes, _request_args, cause): +def skip_retry_on_codes(status_codes, _request_args, cause): """Returns False if cause.code is in status_codes.""" return cause.code not in status_codes def get_instance_userdata(api_version='latest', metadata_address='http://169.254.169.254', - ssl_details=None, timeout=5, retries=5): + ssl_details=None, timeout=5, retries=5, + headers_cb=None, headers_redact=None, + exception_cb=None): ud_url = url_helper.combine_url(metadata_address, api_version) ud_url = url_helper.combine_url(ud_url, 'user-data') user_data = '' try: - # It is ok for userdata to not exist (thats why we are stopping if - # NOT_FOUND occurs) and just in that case returning an empty string. - exception_cb = functools.partial(_skip_retry_on_codes, - SKIP_USERDATA_CODES) + if not exception_cb: + # It is ok for userdata to not exist (thats why we are stopping if + # NOT_FOUND occurs) and just in that case returning an empty + # string. + exception_cb = functools.partial(skip_retry_on_codes, + SKIP_USERDATA_CODES) response = url_helper.read_file_or_url( ud_url, ssl_details=ssl_details, timeout=timeout, - retries=retries, exception_cb=exception_cb) + retries=retries, exception_cb=exception_cb, headers_cb=headers_cb, + headers_redact=headers_redact) user_data = response.contents except url_helper.UrlError as e: if e.code not in SKIP_USERDATA_CODES: @@ -165,11 +170,15 @@ def get_instance_userdata(api_version='latest', def _get_instance_metadata(tree, api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5, - leaf_decoder=None): + leaf_decoder=None, headers_cb=None, + headers_redact=None, + exception_cb=None): md_url = url_helper.combine_url(metadata_address, api_version, tree) caller = functools.partial( url_helper.read_file_or_url, ssl_details=ssl_details, - timeout=timeout, retries=retries) + timeout=timeout, retries=retries, headers_cb=headers_cb, + headers_redact=headers_redact, + exception_cb=exception_cb) def mcaller(url): return caller(url).contents @@ -191,22 +200,32 @@ def _get_instance_metadata(tree, api_version='latest', def get_instance_metadata(api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5, - leaf_decoder=None): + leaf_decoder=None, headers_cb=None, + headers_redact=None, + exception_cb=None): # Note, 'meta-data' explicitly has trailing /. # this is required for CloudStack (LP: #1356855) return _get_instance_metadata(tree='meta-data/', api_version=api_version, metadata_address=metadata_address, ssl_details=ssl_details, timeout=timeout, - retries=retries, leaf_decoder=leaf_decoder) + retries=retries, leaf_decoder=leaf_decoder, + headers_redact=headers_redact, + headers_cb=headers_cb, + exception_cb=exception_cb) def get_instance_identity(api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5, - leaf_decoder=None): + leaf_decoder=None, headers_cb=None, + headers_redact=None, + exception_cb=None): return _get_instance_metadata(tree='dynamic/instance-identity', api_version=api_version, metadata_address=metadata_address, ssl_details=ssl_details, timeout=timeout, - retries=retries, leaf_decoder=leaf_decoder) + retries=retries, leaf_decoder=leaf_decoder, + headers_redact=headers_redact, + headers_cb=headers_cb, + exception_cb=exception_cb) # vi: ts=4 expandtab diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index 0db75af9..a409ff8a 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -10,14 +10,12 @@ import abc import os -import six - -from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE, FREQUENCIES) from cloudinit import importer from cloudinit import log as logging from cloudinit import type_utils from cloudinit import util +from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE, FREQUENCIES) LOG = logging.getLogger(__name__) @@ -60,8 +58,7 @@ INCLUSION_SRCH = sorted(list(INCLUSION_TYPES_MAP.keys()), key=(lambda e: 0 - len(e))) -@six.add_metaclass(abc.ABCMeta) -class Handler(object): +class Handler(metaclass=abc.ABCMeta): def __init__(self, frequency, version=2): self.handler_version = version @@ -159,7 +156,7 @@ def _extract_first_or_bytes(blob, size): # Extract the first line or upto X symbols for text objects # Extract first X bytes for binary objects try: - if isinstance(blob, six.string_types): + if isinstance(blob, str): start = blob.split("\n", 1)[0] else: # We want to avoid decoding the whole blob (it might be huge) diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index 99bf0e61..2a307364 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -14,6 +14,7 @@ from cloudinit import handlers from cloudinit import log as logging from cloudinit import mergers from cloudinit import util +from cloudinit import safeyaml from cloudinit.settings import (PER_ALWAYS) @@ -75,7 +76,7 @@ class CloudConfigPartHandler(handlers.Handler): '', ] lines.extend(file_lines) - lines.append(util.yaml_dumps(self.cloud_buf)) + lines.append(safeyaml.dumps(self.cloud_buf)) else: lines = [] util.write_file(self.cloud_fn, "\n".join(lines), 0o600) diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py index 83fb0724..003cad60 100644 --- a/cloudinit/handlers/upstart_job.py +++ b/cloudinit/handlers/upstart_job.py @@ -89,7 +89,7 @@ def _has_suitable_upstart(): util.subp(["dpkg", "--compare-versions", dpkg_ver, "ge", good]) return True except util.ProcessExecutionError as e: - if e.exit_code is 1: + if e.exit_code == 1: pass else: util.logexc(LOG, "dpkg --compare-versions failed [%s]", diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index dcd2645e..7d2a3305 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -12,10 +12,8 @@ from time import time import contextlib import os - -from six import StringIO -from six.moves.configparser import ( - NoSectionError, NoOptionError, RawConfigParser) +from configparser import NoSectionError, NoOptionError, RawConfigParser +from io import StringIO from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE, CFG_ENV_NAME) diff --git a/cloudinit/log.py b/cloudinit/log.py index 5ae312ba..827db12b 100644 --- a/cloudinit/log.py +++ b/cloudinit/log.py @@ -8,17 +8,13 @@ # # This file is part of cloud-init. See LICENSE file for license information. +import collections +import io import logging import logging.config import logging.handlers - -import collections import os import sys - -import six -from six import StringIO - import time # Logging levels for easy access @@ -74,13 +70,13 @@ def setupLogging(cfg=None): log_cfgs = [] log_cfg = cfg.get('logcfg') - if log_cfg and isinstance(log_cfg, six.string_types): + if log_cfg and isinstance(log_cfg, str): # If there is a 'logcfg' entry in the config, # respect it, it is the old keyname log_cfgs.append(str(log_cfg)) elif "log_cfgs" in cfg: for a_cfg in cfg['log_cfgs']: - if isinstance(a_cfg, six.string_types): + if isinstance(a_cfg, str): log_cfgs.append(a_cfg) elif isinstance(a_cfg, (collections.Iterable)): cfg_str = [str(c) for c in a_cfg] @@ -100,7 +96,7 @@ def setupLogging(cfg=None): # is acting as a file) pass else: - log_cfg = StringIO(log_cfg) + log_cfg = io.StringIO(log_cfg) # Attempt to load its config logging.config.fileConfig(log_cfg) # The first one to work wins! diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py index 7fbc25ff..668e3cd6 100644 --- a/cloudinit/mergers/__init__.py +++ b/cloudinit/mergers/__init__.py @@ -6,8 +6,6 @@ import re -import six - from cloudinit import importer from cloudinit import log as logging from cloudinit import type_utils @@ -85,7 +83,7 @@ def dict_extract_mergers(config): raw_mergers = config.pop('merge_type', None) if raw_mergers is None: return parsed_mergers - if isinstance(raw_mergers, six.string_types): + if isinstance(raw_mergers, str): return string_extract_mergers(raw_mergers) for m in raw_mergers: if isinstance(m, (dict)): diff --git a/cloudinit/mergers/m_dict.py b/cloudinit/mergers/m_dict.py index 6c5fddc2..93472f13 100644 --- a/cloudinit/mergers/m_dict.py +++ b/cloudinit/mergers/m_dict.py @@ -4,8 +4,6 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import six - DEF_MERGE_TYPE = 'no_replace' MERGE_TYPES = ('replace', DEF_MERGE_TYPE,) @@ -47,7 +45,7 @@ class Merger(object): return new_v if isinstance(new_v, (list, tuple)) and self._recurse_array: return self._merger.merge(old_v, new_v) - if isinstance(new_v, six.string_types) and self._recurse_str: + if isinstance(new_v, str) and self._recurse_str: return self._merger.merge(old_v, new_v) if isinstance(new_v, (dict)) and self._recurse_dict: return self._merger.merge(old_v, new_v) diff --git a/cloudinit/mergers/m_list.py b/cloudinit/mergers/m_list.py index daa0469a..19f32771 100644 --- a/cloudinit/mergers/m_list.py +++ b/cloudinit/mergers/m_list.py @@ -4,8 +4,6 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import six - DEF_MERGE_TYPE = 'replace' MERGE_TYPES = ('append', 'prepend', DEF_MERGE_TYPE, 'no_replace') @@ -63,7 +61,7 @@ class Merger(object): return old_v if isinstance(new_v, (list, tuple)) and self._recurse_array: return self._merger.merge(old_v, new_v) - if isinstance(new_v, six.string_types) and self._recurse_str: + if isinstance(new_v, str) and self._recurse_str: return self._merger.merge(old_v, new_v) if isinstance(new_v, (dict)) and self._recurse_dict: return self._merger.merge(old_v, new_v) diff --git a/cloudinit/mergers/m_str.py b/cloudinit/mergers/m_str.py index 629df58e..539e3e29 100644 --- a/cloudinit/mergers/m_str.py +++ b/cloudinit/mergers/m_str.py @@ -4,8 +4,6 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import six - class Merger(object): def __init__(self, _merger, opts): @@ -23,13 +21,10 @@ class Merger(object): # perform the following action, if appending we will # merge them together, otherwise we will just return value. def _on_str(self, value, merge_with): - if not isinstance(value, six.string_types): + if not isinstance(value, str): return merge_with if not self._append: return merge_with - if isinstance(value, six.text_type): - return value + six.text_type(merge_with) - else: - return value + six.binary_type(merge_with) + return value + merge_with # vi: ts=4 expandtab diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 3642fb1f..1d5eb535 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -9,6 +9,7 @@ import errno import logging import os import re +from functools import partial from cloudinit.net.network_state import mask_to_net_prefix from cloudinit import util @@ -108,6 +109,141 @@ def is_bond(devname): return os.path.exists(sys_dev_path(devname, "bonding")) +def get_master(devname): + """Return the master path for devname, or None if no master""" + path = sys_dev_path(devname, path="master") + if os.path.exists(path): + return path + return None + + +def master_is_bridge_or_bond(devname): + """Return a bool indicating if devname's master is a bridge or bond""" + master_path = get_master(devname) + if master_path is None: + return False + bonding_path = os.path.join(master_path, "bonding") + bridge_path = os.path.join(master_path, "bridge") + return (os.path.exists(bonding_path) or os.path.exists(bridge_path)) + + +def is_netfailover(devname, driver=None): + """ netfailover driver uses 3 nics, master, primary and standby. + this returns True if the device is either the primary or standby + as these devices are to be ignored. + """ + if driver is None: + driver = device_driver(devname) + if is_netfail_primary(devname, driver) or is_netfail_standby(devname, + driver): + return True + return False + + +def get_dev_features(devname): + """ Returns a str from reading /sys/class/net/<devname>/device/features.""" + features = '' + try: + features = read_sys_net(devname, 'device/features') + except Exception: + pass + return features + + +def has_netfail_standby_feature(devname): + """ Return True if VIRTIO_NET_F_STANDBY bit (62) is set. + + https://github.com/torvalds/linux/blob/ \ + 089cf7f6ecb266b6a4164919a2e69bd2f938374a/ \ + include/uapi/linux/virtio_net.h#L60 + """ + features = get_dev_features(devname) + if not features or len(features) < 64: + return False + return features[62] == "1" + + +def is_netfail_master(devname, driver=None): + """ A device is a "netfail master" device if: + + - The device does NOT have the 'master' sysfs attribute + - The device driver is 'virtio_net' + - The device has the standby feature bit set + + Return True if all of the above is True. + """ + if get_master(devname) is not None: + return False + + if driver is None: + driver = device_driver(devname) + + if driver != "virtio_net": + return False + + if not has_netfail_standby_feature(devname): + return False + + return True + + +def is_netfail_primary(devname, driver=None): + """ A device is a "netfail primary" device if: + + - the device has a 'master' sysfs file + - the device driver is not 'virtio_net' + - the 'master' sysfs file points to device with virtio_net driver + - the 'master' device has the 'standby' feature bit set + + Return True if all of the above is True. + """ + # /sys/class/net/<devname>/master -> ../../<master devname> + master_sysfs_path = sys_dev_path(devname, path='master') + if not os.path.exists(master_sysfs_path): + return False + + if driver is None: + driver = device_driver(devname) + + if driver == "virtio_net": + return False + + master_devname = os.path.basename(os.path.realpath(master_sysfs_path)) + master_driver = device_driver(master_devname) + if master_driver != "virtio_net": + return False + + master_has_standby = has_netfail_standby_feature(master_devname) + if not master_has_standby: + return False + + return True + + +def is_netfail_standby(devname, driver=None): + """ A device is a "netfail standby" device if: + + - The device has a 'master' sysfs attribute + - The device driver is 'virtio_net' + - The device has the standby feature bit set + + Return True if all of the above is True. + """ + if get_master(devname) is None: + return False + + if driver is None: + driver = device_driver(devname) + + if driver != "virtio_net": + return False + + if not has_netfail_standby_feature(devname): + return False + + return True + + def is_renamed(devname): """ /* interface name assignment types (sysfs name_assign_type attribute) */ @@ -171,6 +307,9 @@ def device_devid(devname): def get_devicelist(): + if util.is_FreeBSD(): + return list(get_interfaces_by_mac().values()) + try: devs = os.listdir(get_sys_class_path()) except OSError as e: @@ -193,6 +332,35 @@ def is_disabled_cfg(cfg): def find_fallback_nic(blacklist_drivers=None): """Return the name of the 'fallback' network device.""" + if util.is_FreeBSD(): + return find_fallback_nic_on_freebsd(blacklist_drivers) + else: + return find_fallback_nic_on_linux(blacklist_drivers) + + +def find_fallback_nic_on_freebsd(blacklist_drivers=None): + """Return the name of the 'fallback' network device on FreeBSD. + + @param blacklist_drivers: currently ignored + @return default interface, or None + + + we'll use the first interface from ``ifconfig -l -u ether`` + """ + stdout, _stderr = util.subp(['ifconfig', '-l', '-u', 'ether']) + values = stdout.split() + if values: + return values[0] + # On FreeBSD <= 10, 'ifconfig -l' ignores the interfaces with DOWN + # status + values = list(get_interfaces_by_mac().values()) + values.sort() + if values: + return values[0] + + +def find_fallback_nic_on_linux(blacklist_drivers=None): + """Return the name of the 'fallback' network device on Linux.""" if not blacklist_drivers: blacklist_drivers = [] @@ -226,6 +394,9 @@ def find_fallback_nic(blacklist_drivers=None): if is_bond(interface): # skip any bonds continue + if is_netfailover(interface): + # ignore netfailover primary/standby interfaces + continue carrier = read_sys_net_int(interface, 'carrier') if carrier: connected.append(interface) @@ -250,7 +421,7 @@ def find_fallback_nic(blacklist_drivers=None): potential_interfaces = possibly_connected # if eth0 exists use it above anything else, otherwise get the interface - # that we can read 'first' (using the sorted defintion of first). + # that we can read 'first' (using the sorted definition of first). names = list(sorted(potential_interfaces, key=natural_sort_key)) if DEFAULT_PRIMARY_INTERFACE in names: names.remove(DEFAULT_PRIMARY_INTERFACE) @@ -264,46 +435,34 @@ def find_fallback_nic(blacklist_drivers=None): def generate_fallback_config(blacklist_drivers=None, config_driver=None): - """Determine which attached net dev is most likely to have a connection and - generate network state to run dhcp on that interface""" - + """Generate network cfg v2 for dhcp on the NIC most likely connected.""" if not config_driver: config_driver = False target_name = find_fallback_nic(blacklist_drivers=blacklist_drivers) - if target_name: - target_mac = read_sys_net_safe(target_name, 'address') - nconf = {'config': [], 'version': 1} - cfg = {'type': 'physical', 'name': target_name, - 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]} - # inject the device driver name, dev_id into config if enabled and - # device has a valid device driver value - if config_driver: - driver = device_driver(target_name) - if driver: - cfg['params'] = { - 'driver': driver, - 'device_id': device_devid(target_name), - } - nconf['config'].append(cfg) - return nconf - else: + if not target_name: # can't read any interfaces addresses (or there are none); give up return None + # netfail cannot use mac for matching, they have duplicate macs + if is_netfail_master(target_name): + match = {'name': target_name} + else: + match = { + 'macaddress': read_sys_net_safe(target_name, 'address').lower()} + cfg = {'dhcp4': True, 'set-name': target_name, 'match': match} + if config_driver: + driver = device_driver(target_name) + if driver: + cfg['match']['driver'] = driver + nconf = {'ethernets': {target_name: cfg}, 'version': 2} + return nconf -def apply_network_config_names(netcfg, strict_present=True, strict_busy=True): - """read the network config and rename devices accordingly. - if strict_present is false, then do not raise exception if no devices - match. if strict_busy is false, then do not raise exception if the - device cannot be renamed because it is currently configured. - renames are only attempted for interfaces of type 'physical'. It is - expected that the network system will create other devices with the - correct name in place.""" +def extract_physdevs(netcfg): def _version_1(netcfg): - renames = [] + physdevs = [] for ent in netcfg.get('config', {}): if ent.get('type') != 'physical': continue @@ -317,11 +476,11 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True): driver = device_driver(name) if not device_id: device_id = device_devid(name) - renames.append([mac, name, driver, device_id]) - return renames + physdevs.append([mac, name, driver, device_id]) + return physdevs def _version_2(netcfg): - renames = [] + physdevs = [] for ent in netcfg.get('ethernets', {}).values(): # only rename if configured to do so name = ent.get('set-name') @@ -337,16 +496,69 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True): driver = device_driver(name) if not device_id: device_id = device_devid(name) - renames.append([mac, name, driver, device_id]) - return renames + physdevs.append([mac, name, driver, device_id]) + return physdevs + + version = netcfg.get('version') + if version == 1: + return _version_1(netcfg) + elif version == 2: + return _version_2(netcfg) + + raise RuntimeError('Unknown network config version: %s' % version) - if netcfg.get('version') == 1: - return _rename_interfaces(_version_1(netcfg)) - elif netcfg.get('version') == 2: - return _rename_interfaces(_version_2(netcfg)) - raise RuntimeError('Failed to apply network config names. Found bad' - ' network config version: %s' % netcfg.get('version')) +def wait_for_physdevs(netcfg, strict=True): + physdevs = extract_physdevs(netcfg) + + # set of expected iface names and mac addrs + expected_ifaces = dict([(iface[0], iface[1]) for iface in physdevs]) + expected_macs = set(expected_ifaces.keys()) + + # set of current macs + present_macs = get_interfaces_by_mac().keys() + + # compare the set of expected mac address values to + # the current macs present; we only check MAC as cloud-init + # has not yet renamed interfaces and the netcfg may include + # such renames. + for _ in range(0, 5): + if expected_macs.issubset(present_macs): + LOG.debug('net: all expected physical devices present') + return + + missing = expected_macs.difference(present_macs) + LOG.debug('net: waiting for expected net devices: %s', missing) + for mac in missing: + # trigger a settle, unless this interface exists + syspath = sys_dev_path(expected_ifaces[mac]) + settle = partial(util.udevadm_settle, exists=syspath) + msg = 'Waiting for udev events to settle or %s exists' % syspath + util.log_time(LOG.debug, msg, func=settle) + + # update present_macs after settles + present_macs = get_interfaces_by_mac().keys() + + msg = 'Not all expected physical devices present: %s' % missing + LOG.warning(msg) + if strict: + raise RuntimeError(msg) + + +def apply_network_config_names(netcfg, strict_present=True, strict_busy=True): + """read the network config and rename devices accordingly. + if strict_present is false, then do not raise exception if no devices + match. if strict_busy is false, then do not raise exception if the + device cannot be renamed because it is currently configured. + + renames are only attempted for interfaces of type 'physical'. It is + expected that the network system will create other devices with the + correct name in place.""" + + try: + _rename_interfaces(extract_physdevs(netcfg)) + except RuntimeError as e: + raise RuntimeError('Failed to apply network config names: %s' % e) def interface_has_own_mac(ifname, strict=False): @@ -585,6 +797,40 @@ def get_ib_interface_hwaddr(ifname, ethernet_format): def get_interfaces_by_mac(): + if util.is_FreeBSD(): + return get_interfaces_by_mac_on_freebsd() + else: + return get_interfaces_by_mac_on_linux() + + +def get_interfaces_by_mac_on_freebsd(): + (out, _) = util.subp(['ifconfig', '-a', 'ether']) + + # flatten each interface block in a single line + def flatten(out): + curr_block = '' + for l in out.split('\n'): + if l.startswith('\t'): + curr_block += l + else: + if curr_block: + yield curr_block + curr_block = l + yield curr_block + + # looks for interface and mac in a list of flatten block + def find_mac(flat_list): + for block in flat_list: + m = re.search( + r"^(?P<ifname>\S*): .*ether\s(?P<mac>[\da-f:]{17}).*", + block) + if m: + yield (m.group('mac'), m.group('ifname')) + results = {mac: ifname for mac, ifname in find_mac(flatten(out))} + return results + + +def get_interfaces_by_mac_on_linux(): """Build a dictionary of tuples {mac: name}. Bridges and any devices that have a 'stolen' mac are excluded.""" @@ -622,6 +868,12 @@ def get_interfaces(): continue if is_vlan(name): continue + if is_bond(name): + continue + if get_master(name) is not None and not master_is_bridge_or_bond(name): + continue + if is_netfailover(name): + continue mac = get_interface_mac(name) # some devices may not have a mac (tun0) if not mac: @@ -677,7 +929,7 @@ class EphemeralIPv4Network(object): """ def __init__(self, interface, ip, prefix_or_mask, broadcast, router=None, - connectivity_url=None): + connectivity_url=None, static_routes=None): """Setup context manager and validate call signature. @param interface: Name of the network interface to bring up. @@ -688,6 +940,7 @@ class EphemeralIPv4Network(object): @param router: Optionally the default gateway IP. @param connectivity_url: Optionally, a URL to verify if a usable connection already exists. + @param static_routes: Optionally a list of static routes from DHCP """ if not all([interface, ip, prefix_or_mask, broadcast]): raise ValueError( @@ -704,6 +957,7 @@ class EphemeralIPv4Network(object): self.ip = ip self.broadcast = broadcast self.router = router + self.static_routes = static_routes self.cleanup_cmds = [] # List of commands to run to cleanup state. def __enter__(self): @@ -716,7 +970,21 @@ class EphemeralIPv4Network(object): return self._bringup_device() - if self.router: + + # rfc3442 requires us to ignore the router config *if* classless static + # routes are provided. + # + # https://tools.ietf.org/html/rfc3442 + # + # If the DHCP server returns both a Classless Static Routes option and + # a Router option, the DHCP client MUST ignore the Router option. + # + # Similarly, if the DHCP server returns both a Classless Static Routes + # option and a Static Routes option, the DHCP client MUST ignore the + # Static Routes option. + if self.static_routes: + self._bringup_static_routes() + elif self.router: self._bringup_router() def __exit__(self, excp_type, excp_value, excp_traceback): @@ -760,6 +1028,20 @@ class EphemeralIPv4Network(object): ['ip', '-family', 'inet', 'addr', 'del', cidr, 'dev', self.interface]) + def _bringup_static_routes(self): + # static_routes = [("169.254.169.254/32", "130.56.248.255"), + # ("0.0.0.0/0", "130.56.240.1")] + for net_address, gateway in self.static_routes: + via_arg = [] + if gateway != "0.0.0.0/0": + via_arg = ['via', gateway] + util.subp( + ['ip', '-4', 'route', 'add', net_address] + via_arg + + ['dev', self.interface], capture=True) + self.cleanup_cmds.insert( + 0, ['ip', '-4', 'route', 'del', net_address] + via_arg + + ['dev', self.interface]) + def _bringup_router(self): """Perform the ip commands to fully setup the router if needed.""" # Check if a default route exists and exit if it does diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py index f89a0f73..64e1c699 100755 --- a/cloudinit/net/cmdline.py +++ b/cloudinit/net/cmdline.py @@ -5,20 +5,92 @@ # # This file is part of cloud-init. See LICENSE file for license information. +import abc import base64 import glob import gzip import io import os +from cloudinit import util + from . import get_devicelist from . import read_sys_net_safe -from cloudinit import util - _OPEN_ISCSI_INTERFACE_FILE = "/run/initramfs/open-iscsi.interface" +class InitramfsNetworkConfigSource(metaclass=abc.ABCMeta): + """ABC for net config sources that read config written by initramfses""" + + @abc.abstractmethod + def is_applicable(self): + # type: () -> bool + """Is this initramfs config source applicable to the current system?""" + pass + + @abc.abstractmethod + def render_config(self): + # type: () -> dict + """Render a v1 network config from the initramfs configuration""" + pass + + +class KlibcNetworkConfigSource(InitramfsNetworkConfigSource): + """InitramfsNetworkConfigSource for klibc initramfs (i.e. Debian/Ubuntu) + + Has three parameters, but they are intended to make testing simpler, _not_ + for use in production code. (This is indicated by the prepended + underscores.) + """ + + def __init__(self, _files=None, _mac_addrs=None, _cmdline=None): + self._files = _files + self._mac_addrs = _mac_addrs + self._cmdline = _cmdline + + # Set defaults here, as they require computation that we don't want to + # do at method definition time + if self._files is None: + self._files = _get_klibc_net_cfg_files() + if self._cmdline is None: + self._cmdline = util.get_cmdline() + if self._mac_addrs is None: + self._mac_addrs = {} + for k in get_devicelist(): + mac_addr = read_sys_net_safe(k, 'address') + if mac_addr: + self._mac_addrs[k] = mac_addr + + def is_applicable(self): + # type: () -> bool + """ + Return whether this system has klibc initramfs network config or not + + Will return True if: + (a) klibc files exist in /run, AND + (b) either: + (i) ip= or ip6= are on the kernel cmdline, OR + (ii) an open-iscsi interface file is present in the system + """ + if self._files: + if 'ip=' in self._cmdline or 'ip6=' in self._cmdline: + return True + if os.path.exists(_OPEN_ISCSI_INTERFACE_FILE): + # iBft can configure networking without ip= + return True + return False + + def render_config(self): + # type: () -> dict + return config_from_klibc_net_cfg( + files=self._files, mac_addrs=self._mac_addrs, + ) + + +_INITRAMFS_CONFIG_SOURCES = [KlibcNetworkConfigSource] + + def _klibc_to_config_entry(content, mac_addrs=None): """Convert a klibc written shell content file to a 'config' entry When ip= is seen on the kernel command line in debian initramfs @@ -29,9 +101,12 @@ def _klibc_to_config_entry(content, mac_addrs=None): provided here. There is no good documentation on this unfortunately. DEVICE=<name> is expected/required and PROTO should indicate if - this is 'static' or 'dhcp' or 'dhcp6' (LP: #1621507). + this is 'none' (static) or 'dhcp' or 'dhcp6' (LP: #1621507). note that IPV6PROTO is also written by newer code to address the possibility of both ipv4 and ipv6 getting addresses. + + Full syntax is documented at: + https://git.kernel.org/pub/scm/libs/klibc/klibc.git/plain/usr/kinit/ipconfig/README.ipconfig """ if mac_addrs is None: @@ -50,9 +125,9 @@ def _klibc_to_config_entry(content, mac_addrs=None): if data.get('filename'): proto = 'dhcp' else: - proto = 'static' + proto = 'none' - if proto not in ('static', 'dhcp', 'dhcp6'): + if proto not in ('none', 'dhcp', 'dhcp6'): raise ValueError("Unexpected value for PROTO: %s" % proto) iface = { @@ -72,6 +147,9 @@ def _klibc_to_config_entry(content, mac_addrs=None): # PROTO for ipv4, IPV6PROTO for ipv6 cur_proto = data.get(pre + 'PROTO', proto) + # ipconfig's 'none' is called 'static' + if cur_proto == 'none': + cur_proto = 'static' subnet = {'type': cur_proto, 'control': 'manual'} # only populate address for static types. While the rendered config @@ -137,6 +215,24 @@ def config_from_klibc_net_cfg(files=None, mac_addrs=None): return {'config': entries, 'version': 1} +def read_initramfs_config(): + """ + Return v1 network config for initramfs-configured networking (or None) + + This will consider each _INITRAMFS_CONFIG_SOURCES entry in turn, and return + v1 network configuration for the first one that is applicable. If none are + applicable, return None. + """ + for src_cls in _INITRAMFS_CONFIG_SOURCES: + cfg_source = src_cls() + + if not cfg_source.is_applicable(): + continue + + return cfg_source.render_config() + return None + + def _decomp_gzip(blob, strict=True): # decompress blob. raise exception if not compressed unless strict=False. with io.BytesIO(blob) as iobuf: @@ -167,23 +263,10 @@ def _b64dgz(b64str, gzipped="try"): return _decomp_gzip(blob, strict=gzipped != "try") -def _is_initramfs_netconfig(files, cmdline): - if files: - if 'ip=' in cmdline or 'ip6=' in cmdline: - return True - if os.path.exists(_OPEN_ISCSI_INTERFACE_FILE): - # iBft can configure networking without ip= - return True - return False - - -def read_kernel_cmdline_config(files=None, mac_addrs=None, cmdline=None): +def read_kernel_cmdline_config(cmdline=None): if cmdline is None: cmdline = util.get_cmdline() - if files is None: - files = _get_klibc_net_cfg_files() - if 'network-config=' in cmdline: data64 = None for tok in cmdline.split(): @@ -192,16 +275,6 @@ def read_kernel_cmdline_config(files=None, mac_addrs=None, cmdline=None): if data64: return util.load_yaml(_b64dgz(data64)) - if not _is_initramfs_netconfig(files, cmdline): - return None - - if mac_addrs is None: - mac_addrs = {} - for k in get_devicelist(): - mac_addr = read_sys_net_safe(k, 'address') - if mac_addr: - mac_addrs[k] = mac_addr - - return config_from_klibc_net_cfg(files=files, mac_addrs=mac_addrs) + return None # vi: ts=4 expandtab diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py index 0db991db..19d0199c 100644 --- a/cloudinit/net/dhcp.py +++ b/cloudinit/net/dhcp.py @@ -9,6 +9,8 @@ import logging import os import re import signal +import time +from io import StringIO from cloudinit.net import ( EphemeralIPv4Network, find_fallback_nic, get_devicelist, @@ -16,7 +18,6 @@ from cloudinit.net import ( from cloudinit.net.network_state import mask_and_ipv4_to_bcast_addr as bcip from cloudinit import temp_utils from cloudinit import util -from six import StringIO LOG = logging.getLogger(__name__) @@ -91,10 +92,17 @@ class EphemeralDHCPv4(object): nmap = {'interface': 'interface', 'ip': 'fixed-address', 'prefix_or_mask': 'subnet-mask', 'broadcast': 'broadcast-address', + 'static_routes': [ + 'rfc3442-classless-static-routes', + 'classless-static-routes' + ], 'router': 'routers'} - kwargs = dict([(k, self.lease.get(v)) for k, v in nmap.items()]) + kwargs = self.extract_dhcp_options_mapping(nmap) if not kwargs['broadcast']: kwargs['broadcast'] = bcip(kwargs['prefix_or_mask'], kwargs['ip']) + if kwargs['static_routes']: + kwargs['static_routes'] = ( + parse_static_routes(kwargs['static_routes'])) if self.connectivity_url: kwargs['connectivity_url'] = self.connectivity_url ephipv4 = EphemeralIPv4Network(**kwargs) @@ -102,6 +110,25 @@ class EphemeralDHCPv4(object): self._ephipv4 = ephipv4 return self.lease + def extract_dhcp_options_mapping(self, nmap): + result = {} + for internal_reference, lease_option_names in nmap.items(): + if isinstance(lease_option_names, list): + self.get_first_option_value( + internal_reference, + lease_option_names, + result + ) + else: + result[internal_reference] = self.lease.get(lease_option_names) + return result + + def get_first_option_value(self, internal_mapping, + lease_option_names, result): + for different_names in lease_option_names: + if not result.get(internal_mapping): + result[internal_mapping] = self.lease.get(different_names) + def maybe_perform_dhcp_discovery(nic=None): """Perform dhcp discovery if nic valid and dhclient command exists. @@ -127,7 +154,9 @@ def maybe_perform_dhcp_discovery(nic=None): if not dhclient_path: LOG.debug('Skip dhclient configuration: No dhclient command found.') return [] - with temp_utils.tempdir(prefix='cloud-init-dhcp-', needs_exe=True) as tdir: + with temp_utils.tempdir(rmtree_ignore_errors=True, + prefix='cloud-init-dhcp-', + needs_exe=True) as tdir: # Use /var/tmp because /run/cloud-init/tmp is mounted noexec return dhcp_discovery(dhclient_path, nic, tdir) @@ -195,24 +224,39 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir): '-pf', pid_file, interface, '-sf', '/bin/true'] util.subp(cmd, capture=True) - # dhclient doesn't write a pid file until after it forks when it gets a - # proper lease response. Since cleandir is a temp directory that gets - # removed, we need to wait for that pidfile creation before the - # cleandir is removed, otherwise we get FileNotFound errors. + # Wait for pid file and lease file to appear, and for the process + # named by the pid file to daemonize (have pid 1 as its parent). If we + # try to read the lease file before daemonization happens, we might try + # to read it before the dhclient has actually written it. We also have + # to wait until the dhclient has become a daemon so we can be sure to + # kill the correct process, thus freeing cleandir to be deleted back + # up the callstack. missing = util.wait_for_files( [pid_file, lease_file], maxwait=5, naplen=0.01) if missing: LOG.warning("dhclient did not produce expected files: %s", ', '.join(os.path.basename(f) for f in missing)) return [] - pid_content = util.load_file(pid_file).strip() - try: - pid = int(pid_content) - except ValueError: - LOG.debug( - "pid file contains non-integer content '%s'", pid_content) - else: - os.kill(pid, signal.SIGKILL) + + ppid = 'unknown' + for _ in range(0, 1000): + pid_content = util.load_file(pid_file).strip() + try: + pid = int(pid_content) + except ValueError: + pass + else: + ppid = util.get_proc_ppid(pid) + if ppid == 1: + LOG.debug('killing dhclient with pid=%s', pid) + os.kill(pid, signal.SIGKILL) + return parse_dhcp_lease_file(lease_file) + time.sleep(0.01) + + LOG.error( + 'dhclient(pid=%s, parentpid=%s) failed to daemonize after %s seconds', + pid_content, ppid, 0.01 * 1000 + ) return parse_dhcp_lease_file(lease_file) @@ -254,4 +298,96 @@ def networkd_get_option_from_leases(keyname, leases_d=None): return data[keyname] return None + +def parse_static_routes(rfc3442): + """ parse rfc3442 format and return a list containing tuple of strings. + + The tuple is composed of the network_address (including net length) and + gateway for a parsed static route. It can parse two formats of rfc3442, + one from dhcpcd and one from dhclient (isc). + + @param rfc3442: string in rfc3442 format (isc or dhcpd) + @returns: list of tuple(str, str) for all valid parsed routes until the + first parsing error. + + E.g. + sr=parse_static_routes("32,169,254,169,254,130,56,248,255,0,130,56,240,1") + sr=[ + ("169.254.169.254/32", "130.56.248.255"), ("0.0.0.0/0", "130.56.240.1") + ] + + sr2 = parse_static_routes("24.191.168.128 192.168.128.1,0 192.168.128.1") + sr2 = [ + ("191.168.128.0/24", "192.168.128.1"), ("0.0.0.0/0", "192.168.128.1") + ] + + Python version of isc-dhclient's hooks: + /etc/dhcp/dhclient-exit-hooks.d/rfc3442-classless-routes + """ + # raw strings from dhcp lease may end in semi-colon + rfc3442 = rfc3442.rstrip(";") + tokens = [tok for tok in re.split(r"[, .]", rfc3442) if tok] + static_routes = [] + + def _trunc_error(cidr, required, remain): + msg = ("RFC3442 string malformed. Current route has CIDR of %s " + "and requires %s significant octets, but only %s remain. " + "Verify DHCP rfc3442-classless-static-routes value: %s" + % (cidr, required, remain, rfc3442)) + LOG.error(msg) + + current_idx = 0 + for idx, tok in enumerate(tokens): + if idx < current_idx: + continue + net_length = int(tok) + if net_length in range(25, 33): + req_toks = 9 + if len(tokens[idx:]) < req_toks: + _trunc_error(net_length, req_toks, len(tokens[idx:])) + return static_routes + net_address = ".".join(tokens[idx+1:idx+5]) + gateway = ".".join(tokens[idx+5:idx+req_toks]) + current_idx = idx + req_toks + elif net_length in range(17, 25): + req_toks = 8 + if len(tokens[idx:]) < req_toks: + _trunc_error(net_length, req_toks, len(tokens[idx:])) + return static_routes + net_address = ".".join(tokens[idx+1:idx+4] + ["0"]) + gateway = ".".join(tokens[idx+4:idx+req_toks]) + current_idx = idx + req_toks + elif net_length in range(9, 17): + req_toks = 7 + if len(tokens[idx:]) < req_toks: + _trunc_error(net_length, req_toks, len(tokens[idx:])) + return static_routes + net_address = ".".join(tokens[idx+1:idx+3] + ["0", "0"]) + gateway = ".".join(tokens[idx+3:idx+req_toks]) + current_idx = idx + req_toks + elif net_length in range(1, 9): + req_toks = 6 + if len(tokens[idx:]) < req_toks: + _trunc_error(net_length, req_toks, len(tokens[idx:])) + return static_routes + net_address = ".".join(tokens[idx+1:idx+2] + ["0", "0", "0"]) + gateway = ".".join(tokens[idx+2:idx+req_toks]) + current_idx = idx + req_toks + elif net_length == 0: + req_toks = 5 + if len(tokens[idx:]) < req_toks: + _trunc_error(net_length, req_toks, len(tokens[idx:])) + return static_routes + net_address = "0.0.0.0" + gateway = ".".join(tokens[idx+1:idx+req_toks]) + current_idx = idx + req_toks + else: + LOG.error('Parsed invalid net length "%s". Verify DHCP ' + 'rfc3442-classless-static-routes value.', net_length) + return static_routes + + static_routes.append(("%s/%s" % (net_address, net_length), gateway)) + + return static_routes + # vi: ts=4 expandtab diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index 64236320..2f714563 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -94,7 +94,7 @@ def _iface_add_attrs(iface, index, ipv4_subnet_mtu): ] renames = {'mac_address': 'hwaddress'} - if iface['type'] not in ['bond', 'bridge', 'vlan']: + if iface['type'] not in ['bond', 'bridge', 'infiniband', 'vlan']: ignore_map.append('mac_address') for key, value in iface.items(): @@ -366,8 +366,6 @@ class Renderer(renderer.Renderer): down = indent + "pre-down route del" or_true = " || true" mapping = { - 'network': '-net', - 'netmask': 'netmask', 'gateway': 'gw', 'metric': 'metric', } @@ -379,13 +377,21 @@ class Renderer(renderer.Renderer): default_gw = ' -A inet6 default' route_line = '' - for k in ['network', 'netmask', 'gateway', 'metric']: - if default_gw and k in ['network', 'netmask']: + for k in ['network', 'gateway', 'metric']: + if default_gw and k == 'network': continue if k == 'gateway': route_line += '%s %s %s' % (default_gw, mapping[k], route[k]) elif k in route: - route_line += ' %s %s' % (mapping[k], route[k]) + if k == 'network': + if ':' in route[k]: + route_line += ' -A inet6' + else: + route_line += ' -net' + if 'prefix' in route: + route_line += ' %s/%s' % (route[k], route['prefix']) + else: + route_line += ' %s %s' % (mapping[k], route[k]) content.append(up + route_line + or_true) content.append(down + route_line + or_true) return content @@ -393,6 +399,7 @@ class Renderer(renderer.Renderer): def _render_iface(self, iface, render_hwaddress=False): sections = [] subnets = iface.get('subnets', {}) + accept_ra = iface.pop('accept-ra', None) if subnets: for index, subnet in enumerate(subnets): ipv4_subnet_mtu = None @@ -405,8 +412,29 @@ class Renderer(renderer.Renderer): else: ipv4_subnet_mtu = subnet.get('mtu') iface['inet'] = subnet_inet - if subnet['type'].startswith('dhcp'): + if (subnet['type'] == 'dhcp4' or subnet['type'] == 'dhcp6' or + subnet['type'] == 'ipv6_dhcpv6-stateful'): + # Configure network settings using DHCP or DHCPv6 iface['mode'] = 'dhcp' + if accept_ra is not None: + # Accept router advertisements (0=off, 1=on) + iface['accept_ra'] = '1' if accept_ra else '0' + elif subnet['type'] == 'ipv6_dhcpv6-stateless': + # Configure network settings using SLAAC from RAs + iface['mode'] = 'auto' + # Use stateless DHCPv6 (0=off, 1=on) + iface['dhcp'] = '1' + elif subnet['type'] == 'ipv6_slaac': + # Configure network settings using SLAAC from RAs + iface['mode'] = 'auto' + # Use stateless DHCPv6 (0=off, 1=on) + iface['dhcp'] = '0' + elif subnet_is_ipv6(subnet): + # mode might be static6, eni uses 'static' + iface['mode'] = 'static' + if accept_ra is not None: + # Accept router advertisements (0=off, 1=on) + iface['accept_ra'] = '1' if accept_ra else '0' # do not emit multiple 'auto $IFACE' lines as older (precise) # ifupdown complains @@ -461,9 +489,10 @@ class Renderer(renderer.Renderer): order = { 'loopback': 0, 'physical': 1, - 'bond': 2, - 'bridge': 3, - 'vlan': 4, + 'infiniband': 2, + 'bond': 3, + 'bridge': 4, + 'vlan': 5, } sections = [] diff --git a/cloudinit/net/freebsd.py b/cloudinit/net/freebsd.py new file mode 100644 index 00000000..d6f61da3 --- /dev/null +++ b/cloudinit/net/freebsd.py @@ -0,0 +1,175 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import re + +from cloudinit import log as logging +from cloudinit import net +from cloudinit import util +from cloudinit.distros import rhel_util +from cloudinit.distros.parsers.resolv_conf import ResolvConf + +from . import renderer + +LOG = logging.getLogger(__name__) + + +class Renderer(renderer.Renderer): + resolv_conf_fn = 'etc/resolv.conf' + rc_conf_fn = 'etc/rc.conf' + + def __init__(self, config=None): + if not config: + config = {} + self.dhcp_interfaces = [] + self._postcmds = config.get('postcmds', True) + + def _update_rc_conf(self, settings, target=None): + fn = util.target_path(target, self.rc_conf_fn) + rhel_util.update_sysconfig_file(fn, settings) + + def _write_ifconfig_entries(self, settings, target=None): + ifname_by_mac = net.get_interfaces_by_mac() + for interface in settings.iter_interfaces(): + device_name = interface.get("name") + device_mac = interface.get("mac_address") + if device_name and re.match(r'^lo\d+$', device_name): + continue + if device_mac not in ifname_by_mac: + LOG.info('Cannot find any device with MAC %s', device_mac) + elif device_mac and device_name: + cur_name = ifname_by_mac[device_mac] + if cur_name != device_name: + LOG.info('netif service will rename interface %s to %s', + cur_name, device_name) + self._update_rc_conf( + {'ifconfig_%s_name' % cur_name: device_name}, + target=target) + else: + device_name = ifname_by_mac[device_mac] + + LOG.info('Configuring interface %s', device_name) + ifconfig = 'DHCP' # default + + for subnet in interface.get("subnets", []): + if ifconfig != 'DHCP': + LOG.info('The FreeBSD provider only set the first subnet.') + break + if subnet.get('type') == 'static': + if not subnet.get('netmask'): + LOG.debug( + 'Skipping IP %s, because there is no netmask', + subnet.get('address')) + continue + LOG.debug('Configuring dev %s with %s / %s', device_name, + subnet.get('address'), subnet.get('netmask')) + # Configure an ipv4 address. + ifconfig = ( + subnet.get('address') + ' netmask ' + + subnet.get('netmask')) + + if ifconfig == 'DHCP': + self.dhcp_interfaces.append(device_name) + self._update_rc_conf( + {'ifconfig_' + device_name: ifconfig}, + target=target) + + def _write_route_entries(self, settings, target=None): + routes = list(settings.iter_routes()) + for interface in settings.iter_interfaces(): + subnets = interface.get("subnets", []) + for subnet in subnets: + if subnet.get('type') != 'static': + continue + gateway = subnet.get('gateway') + if gateway and len(gateway.split('.')) == 4: + routes.append({ + 'network': '0.0.0.0', + 'netmask': '0.0.0.0', + 'gateway': gateway}) + routes += subnet.get('routes', []) + route_cpt = 0 + for route in routes: + network = route.get('network') + if not network: + LOG.debug('Skipping a bad route entry') + continue + netmask = route.get('netmask') + gateway = route.get('gateway') + route_cmd = "-route %s/%s %s" % (network, netmask, gateway) + if network == '0.0.0.0': + self._update_rc_conf( + {'defaultrouter': gateway}, target=target) + else: + self._update_rc_conf( + {'route_net%d' % route_cpt: route_cmd}, target=target) + route_cpt += 1 + + def _write_resolve_conf(self, settings, target=None): + nameservers = settings.dns_nameservers + searchdomains = settings.dns_searchdomains + for interface in settings.iter_interfaces(): + for subnet in interface.get("subnets", []): + if 'dns_nameservers' in subnet: + nameservers.extend(subnet['dns_nameservers']) + if 'dns_search' in subnet: + searchdomains.extend(subnet['dns_search']) + # Try to read the /etc/resolv.conf or just start from scratch if that + # fails. + try: + resolvconf = ResolvConf(util.load_file(util.target_path( + target, self.resolv_conf_fn))) + resolvconf.parse() + except IOError: + util.logexc(LOG, "Failed to parse %s, use new empty file", + util.target_path(target, self.resolv_conf_fn)) + resolvconf = ResolvConf('') + resolvconf.parse() + + # Add some nameservers + for server in nameservers: + try: + resolvconf.add_nameserver(server) + except ValueError: + util.logexc(LOG, "Failed to add nameserver %s", server) + + # And add any searchdomains. + for domain in searchdomains: + try: + resolvconf.add_search_domain(domain) + except ValueError: + util.logexc(LOG, "Failed to add search domain %s", domain) + util.write_file( + util.target_path(target, self.resolv_conf_fn), + str(resolvconf), 0o644) + + def _write_network(self, settings, target=None): + self._write_ifconfig_entries(settings, target=target) + self._write_route_entries(settings, target=target) + self._write_resolve_conf(settings, target=target) + + self.start_services(run=self._postcmds) + + def render_network_state(self, network_state, templates=None, target=None): + self._write_network(network_state, target=target) + + def start_services(self, run=False): + if not run: + LOG.debug("freebsd generate postcmd disabled") + return + + util.subp(['service', 'netif', 'restart'], capture=True) + # On FreeBSD 10, the restart of routing and dhclient is likely to fail + # because + # - routing: it cannot remove the loopback route, but it will still set + # up the default route as expected. + # - dhclient: it cannot stop the dhclient started by the netif service. + # In both case, the situation is ok, and we can proceed. + util.subp(['service', 'routing', 'restart'], capture=True, rcs=[0, 1]) + for dhcp_interface in self.dhcp_interfaces: + util.subp(['service', 'dhclient', 'restart', dhcp_interface], + rcs=[0, 1], + capture=True) + + +def available(target=None): + return util.is_FreeBSD() diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index 21517fda..89855270 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -4,10 +4,11 @@ import copy import os from . import renderer -from .network_state import subnet_is_ipv6, NET_CONFIG_TO_V2 +from .network_state import subnet_is_ipv6, NET_CONFIG_TO_V2, IPV6_DYNAMIC_TYPES from cloudinit import log as logging from cloudinit import util +from cloudinit import safeyaml from cloudinit.net import SYS_CLASS_NET, get_devicelist KNOWN_SNAPD_CONFIG = b"""\ @@ -34,7 +35,7 @@ def _get_params_dict_by_match(config, match): if key.startswith(match)) -def _extract_addresses(config, entry, ifname): +def _extract_addresses(config, entry, ifname, features=None): """This method parse a cloudinit.net.network_state dictionary (config) and maps netstate keys/values into a dictionary (entry) to represent netplan yaml. @@ -51,7 +52,8 @@ def _extract_addresses(config, entry, ifname): 'mtu': 1480, 'netmask': 64, 'type': 'static'}], - 'type: physical' + 'type: physical', + 'accept-ra': 'true' } An entry dictionary looks like: @@ -66,7 +68,7 @@ def _extract_addresses(config, entry, ifname): 'match': {'macaddress': '52:54:00:12:34:00'}, 'mtu': 1501, 'address': ['192.168.1.2/24', '2001:4800:78ff:1b:be76:4eff:fe06:1000"], - 'mtu6': 1480} + 'ipv6-mtu': 1480} """ @@ -79,6 +81,8 @@ def _extract_addresses(config, entry, ifname): else: return [obj, ] + if features is None: + features = [] addresses = [] routes = [] nameservers = [] @@ -92,7 +96,9 @@ def _extract_addresses(config, entry, ifname): if sn_type == 'dhcp': sn_type += '4' entry.update({sn_type: True}) - elif sn_type in ['static']: + elif sn_type in IPV6_DYNAMIC_TYPES: + entry.update({'dhcp6': True}) + elif sn_type in ['static', 'static6']: addr = "%s" % subnet.get('address') if 'prefix' in subnet: addr += "/%d" % subnet.get('prefix') @@ -108,8 +114,8 @@ def _extract_addresses(config, entry, ifname): searchdomains += _listify(subnet.get('dns_search', [])) if 'mtu' in subnet: mtukey = 'mtu' - if subnet_is_ipv6(subnet): - mtukey += '6' + if subnet_is_ipv6(subnet) and 'ipv6-mtu' in features: + mtukey = 'ipv6-mtu' entry.update({mtukey: subnet.get('mtu')}) for route in subnet.get('routes', []): to_net = "%s/%s" % (route.get('network'), @@ -144,6 +150,8 @@ def _extract_addresses(config, entry, ifname): ns = entry.get('nameservers', {}) ns.update({'search': searchdomains}) entry.update({'nameservers': ns}) + if 'accept-ra' in config and config['accept-ra'] is not None: + entry.update({'accept-ra': util.is_true(config.get('accept-ra'))}) def _extract_bond_slaves_by_name(interfaces, entry, bond_master): @@ -179,6 +187,7 @@ class Renderer(renderer.Renderer): """Renders network information in a /etc/netplan/network.yaml format.""" NETPLAN_GENERATE = ['netplan', 'generate'] + NETPLAN_INFO = ['netplan', 'info'] def __init__(self, config=None): if not config: @@ -188,6 +197,22 @@ class Renderer(renderer.Renderer): self.netplan_header = config.get('netplan_header', None) self._postcmds = config.get('postcmds', False) self.clean_default = config.get('clean_default', True) + self._features = config.get('features', None) + + @property + def features(self): + if self._features is None: + try: + info_blob, _err = util.subp(self.NETPLAN_INFO, capture=True) + info = util.load_yaml(info_blob) + self._features = info['netplan.io']['features'] + except util.ProcessExecutionError: + # if the info subcommand is not present then we don't have any + # new features + pass + except (TypeError, KeyError) as e: + LOG.debug('Failed to list features from netplan info: %s', e) + return self._features def render_network_state(self, network_state, templates=None, target=None): # check network state for version @@ -235,9 +260,9 @@ class Renderer(renderer.Renderer): # if content already in netplan format, pass it back if network_state.version == 2: LOG.debug('V2 to V2 passthrough') - return util.yaml_dumps({'network': network_state.config}, - explicit_start=False, - explicit_end=False) + return safeyaml.dumps({'network': network_state.config}, + explicit_start=False, + explicit_end=False) ethernets = {} wifis = {} @@ -271,7 +296,7 @@ class Renderer(renderer.Renderer): else: del eth['match'] del eth['set-name'] - _extract_addresses(ifcfg, eth, ifname) + _extract_addresses(ifcfg, eth, ifname, self.features) ethernets.update({ifname: eth}) elif if_type == 'bond': @@ -296,7 +321,7 @@ class Renderer(renderer.Renderer): slave_interfaces = ifcfg.get('bond-slaves') if slave_interfaces == 'none': _extract_bond_slaves_by_name(interfaces, bond, ifname) - _extract_addresses(ifcfg, bond, ifname) + _extract_addresses(ifcfg, bond, ifname, self.features) bonds.update({ifname: bond}) elif if_type == 'bridge': @@ -331,7 +356,7 @@ class Renderer(renderer.Renderer): bridge.update({'parameters': br_config}) if ifcfg.get('mac_address'): bridge['macaddress'] = ifcfg.get('mac_address').lower() - _extract_addresses(ifcfg, bridge, ifname) + _extract_addresses(ifcfg, bridge, ifname, self.features) bridges.update({ifname: bridge}) elif if_type == 'vlan': @@ -343,7 +368,7 @@ class Renderer(renderer.Renderer): macaddr = ifcfg.get('mac_address', None) if macaddr is not None: vlan['macaddress'] = macaddr.lower() - _extract_addresses(ifcfg, vlan, ifname) + _extract_addresses(ifcfg, vlan, ifname, self.features) vlans.update({ifname: vlan}) # inject global nameserver values under each all interface which @@ -359,9 +384,10 @@ class Renderer(renderer.Renderer): # workaround yaml dictionary key sorting when dumping def _render_section(name, section): if section: - dump = util.yaml_dumps({name: section}, - explicit_start=False, - explicit_end=False) + dump = safeyaml.dumps({name: section}, + explicit_start=False, + explicit_end=False, + noalias=True) txt = util.indent(dump, ' ' * 4) return [txt] return [] diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index f76e508a..63d6e291 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -10,19 +10,23 @@ import logging import socket import struct -import six - +from cloudinit import safeyaml from cloudinit import util LOG = logging.getLogger(__name__) NETWORK_STATE_VERSION = 1 +IPV6_DYNAMIC_TYPES = ['dhcp6', + 'ipv6_slaac', + 'ipv6_dhcpv6-stateless', + 'ipv6_dhcpv6-stateful'] NETWORK_STATE_REQUIRED_KEYS = { 1: ['version', 'config', 'network_state'], } NETWORK_V2_KEY_FILTER = [ - 'addresses', 'dhcp4', 'dhcp6', 'gateway4', 'gateway6', 'interfaces', - 'match', 'mtu', 'nameservers', 'renderer', 'set-name', 'wakeonlan' + 'addresses', 'dhcp4', 'dhcp4-overrides', 'dhcp6', 'dhcp6-overrides', + 'gateway4', 'gateway6', 'interfaces', 'match', 'mtu', 'nameservers', + 'renderer', 'set-name', 'wakeonlan', 'accept-ra' ] NET_CONFIG_TO_V2 = { @@ -67,7 +71,7 @@ def parse_net_config_data(net_config, skip_broken=True): # pass the whole net-config as-is config = net_config - if version and config: + if version and config is not None: nsi = NetworkStateInterpreter(version=version, config=config) nsi.parse_config(skip_broken=skip_broken) state = nsi.get_network_state() @@ -148,6 +152,7 @@ class NetworkState(object): self._network_state = copy.deepcopy(network_state) self._version = version self.use_ipv6 = network_state.get('use_ipv6', False) + self._has_default_route = None @property def config(self): @@ -157,14 +162,6 @@ class NetworkState(object): def version(self): return self._version - def iter_routes(self, filter_func=None): - for route in self._network_state.get('routes', []): - if filter_func is not None: - if filter_func(route): - yield route - else: - yield route - @property def dns_nameservers(self): try: @@ -179,18 +176,49 @@ class NetworkState(object): except KeyError: return [] + @property + def has_default_route(self): + if self._has_default_route is None: + self._has_default_route = self._maybe_has_default_route() + return self._has_default_route + def iter_interfaces(self, filter_func=None): ifaces = self._network_state.get('interfaces', {}) - for iface in six.itervalues(ifaces): + for iface in ifaces.values(): if filter_func is None: yield iface else: if filter_func(iface): yield iface + def iter_routes(self, filter_func=None): + for route in self._network_state.get('routes', []): + if filter_func is not None: + if filter_func(route): + yield route + else: + yield route + + def _maybe_has_default_route(self): + for route in self.iter_routes(): + if self._is_default_route(route): + return True + for iface in self.iter_interfaces(): + for subnet in iface.get('subnets', []): + for route in subnet.get('routes', []): + if self._is_default_route(route): + return True + return False + + def _is_default_route(self, route): + default_nets = ('::', '0.0.0.0') + return ( + route.get('prefix') == 0 + and route.get('network') in default_nets + ) -@six.add_metaclass(CommandHandlerMeta) -class NetworkStateInterpreter(object): + +class NetworkStateInterpreter(metaclass=CommandHandlerMeta): initial_network_state = { 'interfaces': {}, @@ -228,7 +256,7 @@ class NetworkStateInterpreter(object): 'config': self._config, 'network_state': self._network_state, } - return util.yaml_dumps(state) + return safeyaml.dumps(state) def load(self, state): if 'version' not in state: @@ -247,7 +275,7 @@ class NetworkStateInterpreter(object): setattr(self, key, state[key]) def dump_network_state(self): - return util.yaml_dumps(self._network_state) + return safeyaml.dumps(self._network_state) def as_dict(self): return {'version': self._version, 'config': self._config} @@ -315,7 +343,8 @@ class NetworkStateInterpreter(object): 'name': 'eth0', 'subnets': [ {'type': 'dhcp4'} - ] + ], + 'accept-ra': 'true' } ''' @@ -335,6 +364,9 @@ class NetworkStateInterpreter(object): self.use_ipv6 = True break + accept_ra = command.get('accept-ra', None) + if accept_ra is not None: + accept_ra = util.is_true(accept_ra) iface.update({ 'name': command.get('name'), 'type': command.get('type'), @@ -345,6 +377,7 @@ class NetworkStateInterpreter(object): 'address': None, 'gateway': None, 'subnets': subnets, + 'accept-ra': accept_ra }) self._network_state['interfaces'].update({command.get('name'): iface}) self.dump_network_state() @@ -571,6 +604,7 @@ class NetworkStateInterpreter(object): eno1: match: macaddress: 00:11:22:33:44:55 + driver: hv_netsvc wakeonlan: true dhcp4: true dhcp6: false @@ -587,6 +621,7 @@ class NetworkStateInterpreter(object): driver: ixgbe set-name: lom1 dhcp6: true + accept-ra: true switchports: match: name: enp2* @@ -606,15 +641,18 @@ class NetworkStateInterpreter(object): 'type': 'physical', 'name': cfg.get('set-name', eth), } - mac_address = cfg.get('match', {}).get('macaddress', None) + match = cfg.get('match', {}) + mac_address = match.get('macaddress', None) if not mac_address: LOG.debug('NetworkState Version2: missing "macaddress" info ' 'in config entry: %s: %s', eth, str(cfg)) - phy_cmd.update({'mac_address': mac_address}) - - for key in ['mtu', 'match', 'wakeonlan']: + phy_cmd['mac_address'] = mac_address + driver = match.get('driver', None) + if driver: + phy_cmd['params'] = {'driver': driver} + for key in ['mtu', 'match', 'wakeonlan', 'accept-ra']: if key in cfg: - phy_cmd.update({key: cfg.get(key)}) + phy_cmd[key] = cfg[key] subnets = self._v2_to_v1_ipcfg(cfg) if len(subnets) > 0: @@ -648,6 +686,8 @@ class NetworkStateInterpreter(object): 'vlan_id': cfg.get('id'), 'vlan_link': cfg.get('link'), } + if 'mtu' in cfg: + vlan_cmd['mtu'] = cfg['mtu'] subnets = self._v2_to_v1_ipcfg(cfg) if len(subnets) > 0: vlan_cmd.update({'subnets': subnets}) @@ -682,6 +722,14 @@ class NetworkStateInterpreter(object): item_params = dict((key, value) for (key, value) in item_cfg.items() if key not in NETWORK_V2_KEY_FILTER) + # we accept the fixed spelling, but write the old for compatability + # Xenial does not have an updated netplan which supports the + # correct spelling. LP: #1756701 + params = item_params['parameters'] + grat_value = params.pop('gratuitous-arp', None) + if grat_value: + params['gratuitious-arp'] = grat_value + v1_cmd = { 'type': cmd_type, 'name': item_name, @@ -689,6 +737,8 @@ class NetworkStateInterpreter(object): 'params': dict((v2key_to_v1[k], v) for k, v in item_params.get('parameters', {}).items()) } + if 'mtu' in item_cfg: + v1_cmd['mtu'] = item_cfg['mtu'] subnets = self._v2_to_v1_ipcfg(item_cfg) if len(subnets) > 0: v1_cmd.update({'subnets': subnets}) @@ -705,12 +755,20 @@ class NetworkStateInterpreter(object): def _v2_to_v1_ipcfg(self, cfg): """Common ipconfig extraction from v2 to v1 subnets array.""" + def _add_dhcp_overrides(overrides, subnet): + if 'route-metric' in overrides: + subnet['metric'] = overrides['route-metric'] + subnets = [] - if 'dhcp4' in cfg: - subnets.append({'type': 'dhcp4'}) - if 'dhcp6' in cfg: + if cfg.get('dhcp4'): + subnet = {'type': 'dhcp4'} + _add_dhcp_overrides(cfg.get('dhcp4-overrides', {}), subnet) + subnets.append(subnet) + if cfg.get('dhcp6'): + subnet = {'type': 'dhcp6'} self.use_ipv6 = True - subnets.append({'type': 'dhcp6'}) + _add_dhcp_overrides(cfg.get('dhcp6-overrides', {}), subnet) + subnets.append(subnet) gateway4 = None gateway6 = None @@ -877,9 +935,10 @@ def is_ipv6_addr(address): def subnet_is_ipv6(subnet): """Common helper for checking network_state subnets for ipv6.""" - # 'static6' or 'dhcp6' - if subnet['type'].endswith('6'): - # This is a request for DHCPv6. + # 'static6', 'dhcp6', 'ipv6_dhcpv6-stateful', 'ipv6_dhcpv6-stateless' or + # 'ipv6_slaac' + if subnet['type'].endswith('6') or subnet['type'] in IPV6_DYNAMIC_TYPES: + # This is a request either static6 type or DHCPv6. return True elif subnet['type'] == 'static' and is_ipv6_addr(subnet.get('address')): return True @@ -908,7 +967,7 @@ def ipv4_mask_to_net_prefix(mask): """ if isinstance(mask, int): return mask - if isinstance(mask, six.string_types): + if isinstance(mask, str): try: return int(mask) except ValueError: @@ -935,7 +994,7 @@ def ipv6_mask_to_net_prefix(mask): if isinstance(mask, int): return mask - if isinstance(mask, six.string_types): + if isinstance(mask, str): try: return int(mask) except ValueError: diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py index 5f32e90f..2a61a7a8 100644 --- a/cloudinit/net/renderer.py +++ b/cloudinit/net/renderer.py @@ -6,7 +6,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import abc -import six +import io from .network_state import parse_net_config_data from .udev import generate_udev_rule @@ -34,7 +34,7 @@ class Renderer(object): """Given state, emit udev rules to map mac to ifname.""" # TODO(harlowja): this seems shared between eni renderer and # this, so move it to a shared location. - content = six.StringIO() + content = io.StringIO() for iface in network_state.iter_interfaces(filter_by_physical): # for physical interfaces write out a persist net udev rule if 'name' in iface and iface.get('mac_address'): diff --git a/cloudinit/net/renderers.py b/cloudinit/net/renderers.py index 5117b4a5..b98dbbe3 100644 --- a/cloudinit/net/renderers.py +++ b/cloudinit/net/renderers.py @@ -1,17 +1,19 @@ # This file is part of cloud-init. See LICENSE file for license information. from . import eni +from . import freebsd from . import netplan from . import RendererNotFoundError from . import sysconfig NAME_TO_RENDERER = { "eni": eni, + "freebsd": freebsd, "netplan": netplan, "sysconfig": sysconfig, } -DEFAULT_PRIORITY = ["eni", "sysconfig", "netplan"] +DEFAULT_PRIORITY = ["eni", "sysconfig", "netplan", "freebsd"] def search(priority=None, target=None, first=False): diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 17293e1d..0a387377 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -1,20 +1,24 @@ # This file is part of cloud-init. See LICENSE file for license information. +import copy +import io import os import re -import six +from configobj import ConfigObj -from cloudinit.distros.parsers import networkmanager_conf -from cloudinit.distros.parsers import resolv_conf from cloudinit import log as logging from cloudinit import util +from cloudinit.distros.parsers import networkmanager_conf +from cloudinit.distros.parsers import resolv_conf from . import renderer from .network_state import ( - is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6) + is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6, IPV6_DYNAMIC_TYPES) LOG = logging.getLogger(__name__) +NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf" +KNOWN_DISTROS = ['centos', 'fedora', 'rhel', 'suse'] def _make_header(sep='#'): @@ -46,6 +50,24 @@ def _quote_value(value): return value +def enable_ifcfg_rh(path): + """Add ifcfg-rh to NetworkManager.cfg plugins if main section is present""" + config = ConfigObj(path) + if 'main' in config: + if 'plugins' in config['main']: + if 'ifcfg-rh' in config['main']['plugins']: + return + else: + config['main']['plugins'] = [] + + if isinstance(config['main']['plugins'], list): + config['main']['plugins'].append('ifcfg-rh') + else: + config['main']['plugins'] = [config['main']['plugins'], 'ifcfg-rh'] + config.write() + LOG.debug('Enabled ifcfg-rh NetworkManager plugins') + + class ConfigMap(object): """Sysconfig like dictionary object.""" @@ -64,6 +86,9 @@ class ConfigMap(object): def __getitem__(self, key): return self._conf[key] + def get(self, key): + return self._conf.get(key) + def __contains__(self, key): return key in self._conf @@ -74,7 +99,7 @@ class ConfigMap(object): return len(self._conf) def to_string(self): - buf = six.StringIO() + buf = io.StringIO() buf.write(_make_header()) if self._conf: buf.write("\n") @@ -82,11 +107,14 @@ class ConfigMap(object): value = self._conf[key] if isinstance(value, bool): value = self._bool_map[value] - if not isinstance(value, six.string_types): + if not isinstance(value, str): value = str(value) buf.write("%s=%s\n" % (key, _quote_value(value))) return buf.getvalue() + def update(self, updates): + self._conf.update(updates) + class Route(ConfigMap): """Represents a route configuration.""" @@ -128,7 +156,7 @@ class Route(ConfigMap): # only accept ipv4 and ipv6 if proto not in ['ipv4', 'ipv6']: raise ValueError("Unknown protocol '%s'" % (str(proto))) - buf = six.StringIO() + buf = io.StringIO() buf.write(_make_header()) if self._conf: buf.write("\n") @@ -247,12 +275,29 @@ class Renderer(renderer.Renderer): # s1-networkscripts-interfaces.html (or other docs for # details about this) - iface_defaults = tuple([ - ('ONBOOT', True), - ('USERCTL', False), - ('NM_CONTROLLED', False), - ('BOOTPROTO', 'none'), - ]) + iface_defaults = { + 'rhel': {'ONBOOT': True, 'USERCTL': False, 'NM_CONTROLLED': False, + 'BOOTPROTO': 'none'}, + 'suse': {'BOOTPROTO': 'static', 'STARTMODE': 'auto'}, + } + + cfg_key_maps = { + 'rhel': { + 'accept-ra': 'IPV6_FORCE_ACCEPT_RA', + 'bridge_stp': 'STP', + 'bridge_ageing': 'AGEING', + 'bridge_bridgeprio': 'PRIO', + 'mac_address': 'HWADDR', + 'mtu': 'MTU', + }, + 'suse': { + 'bridge_stp': 'BRIDGE_STP', + 'bridge_ageing': 'BRIDGE_AGEINGTIME', + 'bridge_bridgeprio': 'BRIDGE_PRIORITY', + 'mac_address': 'LLADDR', + 'mtu': 'MTU', + }, + } # If these keys exist, then their values will be used to form # a BONDING_OPTS grouping; otherwise no grouping will be set. @@ -260,12 +305,18 @@ class Renderer(renderer.Renderer): ('bond_mode', "mode=%s"), ('bond_xmit_hash_policy', "xmit_hash_policy=%s"), ('bond_miimon', "miimon=%s"), - ]) - - bridge_opts_keys = tuple([ - ('bridge_stp', 'STP'), - ('bridge_ageing', 'AGEING'), - ('bridge_bridgeprio', 'PRIO'), + ('bond_min_links', "min_links=%s"), + ('bond_arp_interval', "arp_interval=%s"), + ('bond_arp_ip_target', "arp_ip_target=%s"), + ('bond_arp_validate', "arp_validate=%s"), + ('bond_ad_select', "ad_select=%s"), + ('bond_num_grat_arp', "num_grat_arp=%s"), + ('bond_downdelay', "downdelay=%s"), + ('bond_updelay', "updelay=%s"), + ('bond_lacp_rate', "lacp_rate=%s"), + ('bond_fail_over_mac', "fail_over_mac=%s"), + ('bond_primary', "primary=%s"), + ('bond_primary_reselect', "primary_reselect=%s"), ]) templates = {} @@ -285,46 +336,101 @@ class Renderer(renderer.Renderer): 'iface_templates': config.get('iface_templates'), 'route_templates': config.get('route_templates'), } + self.flavor = config.get('flavor', 'rhel') @classmethod - def _render_iface_shared(cls, iface, iface_cfg): - for k, v in cls.iface_defaults: - iface_cfg[k] = v + def _render_iface_shared(cls, iface, iface_cfg, flavor): + flavor_defaults = copy.deepcopy(cls.iface_defaults.get(flavor, {})) + iface_cfg.update(flavor_defaults) - for (old_key, new_key) in [('mac_address', 'HWADDR'), ('mtu', 'MTU')]: + for old_key in ('mac_address', 'mtu', 'accept-ra'): old_value = iface.get(old_key) if old_value is not None: # only set HWADDR on physical interfaces - if old_key == 'mac_address' and iface['type'] != 'physical': + if (old_key == 'mac_address' and + iface['type'] not in ['physical', 'infiniband']): continue - iface_cfg[new_key] = old_value + new_key = cls.cfg_key_maps[flavor].get(old_key) + if new_key: + iface_cfg[new_key] = old_value @classmethod - def _render_subnets(cls, iface_cfg, subnets): + def _render_subnets(cls, iface_cfg, subnets, has_default_route, flavor): # setting base values - iface_cfg['BOOTPROTO'] = 'none' + if flavor == 'suse': + iface_cfg['BOOTPROTO'] = 'static' + if 'BRIDGE' in iface_cfg: + iface_cfg['BOOTPROTO'] = 'dhcp' + iface_cfg.drop('BRIDGE') + else: + iface_cfg['BOOTPROTO'] = 'none' # modifying base values according to subnets for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): mtu_key = 'MTU' subnet_type = subnet.get('type') - if subnet_type == 'dhcp6': - iface_cfg['IPV6INIT'] = True - iface_cfg['DHCPV6C'] = True + if subnet_type == 'dhcp6' or subnet_type == 'ipv6_dhcpv6-stateful': + if flavor == 'suse': + # User wants dhcp for both protocols + if iface_cfg['BOOTPROTO'] == 'dhcp4': + iface_cfg['BOOTPROTO'] = 'dhcp' + else: + # Only IPv6 is DHCP, IPv4 may be static + iface_cfg['BOOTPROTO'] = 'dhcp6' + iface_cfg['DHCLIENT6_MODE'] = 'managed' + else: + iface_cfg['IPV6INIT'] = True + # Configure network settings using DHCPv6 + iface_cfg['DHCPV6C'] = True + elif subnet_type == 'ipv6_dhcpv6-stateless': + if flavor == 'suse': + # User wants dhcp for both protocols + if iface_cfg['BOOTPROTO'] == 'dhcp4': + iface_cfg['BOOTPROTO'] = 'dhcp' + else: + # Only IPv6 is DHCP, IPv4 may be static + iface_cfg['BOOTPROTO'] = 'dhcp6' + iface_cfg['DHCLIENT6_MODE'] = 'info' + else: + iface_cfg['IPV6INIT'] = True + # Configure network settings using SLAAC from RAs and + # optional info from dhcp server using DHCPv6 + iface_cfg['IPV6_AUTOCONF'] = True + iface_cfg['DHCPV6C'] = True + # Use Information-request to get only stateless + # configuration parameters (i.e., without address). + iface_cfg['DHCPV6C_OPTIONS'] = '-S' + elif subnet_type == 'ipv6_slaac': + if flavor == 'suse': + # User wants dhcp for both protocols + if iface_cfg['BOOTPROTO'] == 'dhcp4': + iface_cfg['BOOTPROTO'] = 'dhcp' + else: + # Only IPv6 is DHCP, IPv4 may be static + iface_cfg['BOOTPROTO'] = 'dhcp6' + iface_cfg['DHCLIENT6_MODE'] = 'info' + else: + iface_cfg['IPV6INIT'] = True + # Configure network settings using SLAAC from RAs + iface_cfg['IPV6_AUTOCONF'] = True elif subnet_type in ['dhcp4', 'dhcp']: + bootproto_in = iface_cfg['BOOTPROTO'] iface_cfg['BOOTPROTO'] = 'dhcp' - elif subnet_type == 'static': + if flavor == 'suse' and subnet_type == 'dhcp4': + # If dhcp6 is already specified the user wants dhcp + # for both protocols + if bootproto_in != 'dhcp6': + # Only IPv4 is DHCP, IPv6 may be static + iface_cfg['BOOTPROTO'] = 'dhcp4' + elif subnet_type in ['static', 'static6']: + # RH info # grep BOOTPROTO sysconfig.txt -A2 | head -3 # BOOTPROTO=none|bootp|dhcp # 'bootp' or 'dhcp' cause a DHCP client # to run on the device. Any other # value causes any static configuration # in the file to be applied. - # ==> the following should not be set to 'static' - # but should remain 'none' - # if iface_cfg['BOOTPROTO'] == 'none': - # iface_cfg['BOOTPROTO'] = 'static' - if subnet_is_ipv6(subnet): + if subnet_is_ipv6(subnet) and flavor != 'suse': mtu_key = 'IPV6_MTU' iface_cfg['IPV6INIT'] = True if 'mtu' in subnet: @@ -335,37 +441,70 @@ class Renderer(renderer.Renderer): 'Network config: ignoring %s device-level mtu:%s' ' because ipv4 subnet-level mtu:%s provided.', iface_cfg.name, iface_cfg[mtu_key], subnet['mtu']) - iface_cfg[mtu_key] = subnet['mtu'] + if subnet_is_ipv6(subnet): + if flavor == 'suse': + # TODO(rjschwei) write mtu setting to + # /etc/sysctl.d/ + pass + else: + iface_cfg[mtu_key] = subnet['mtu'] + else: + iface_cfg[mtu_key] = subnet['mtu'] elif subnet_type == 'manual': - # If the subnet has an MTU setting, then ONBOOT=True - # to apply the setting - iface_cfg['ONBOOT'] = mtu_key in iface_cfg + if flavor == 'suse': + LOG.debug('Unknown subnet type setting "%s"', subnet_type) + else: + # If the subnet has an MTU setting, then ONBOOT=True + # to apply the setting + iface_cfg['ONBOOT'] = mtu_key in iface_cfg else: raise ValueError("Unknown subnet type '%s' found" " for interface '%s'" % (subnet_type, iface_cfg.name)) if subnet.get('control') == 'manual': - iface_cfg['ONBOOT'] = False + if flavor == 'suse': + iface_cfg['STARTMODE'] = 'manual' + else: + iface_cfg['ONBOOT'] = False # set IPv4 and IPv6 static addresses ipv4_index = -1 ipv6_index = -1 for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): subnet_type = subnet.get('type') - if subnet_type == 'dhcp6': + # metric may apply to both dhcp and static config + if 'metric' in subnet: + if flavor != 'suse': + iface_cfg['METRIC'] = subnet['metric'] + if subnet_type in ['dhcp', 'dhcp4']: + # On SUSE distros 'DHCLIENT_SET_DEFAULT_ROUTE' is a global + # setting in /etc/sysconfig/network/dhcp + if flavor != 'suse': + if has_default_route and iface_cfg['BOOTPROTO'] != 'none': + iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = False continue - elif subnet_type in ['dhcp4', 'dhcp']: + elif subnet_type in IPV6_DYNAMIC_TYPES: continue - elif subnet_type == 'static': + elif subnet_type in ['static', 'static6']: if subnet_is_ipv6(subnet): ipv6_index = ipv6_index + 1 ipv6_cidr = "%s/%s" % (subnet['address'], subnet['prefix']) if ipv6_index == 0: - iface_cfg['IPV6ADDR'] = ipv6_cidr + if flavor == 'suse': + iface_cfg['IPADDR6'] = ipv6_cidr + else: + iface_cfg['IPV6ADDR'] = ipv6_cidr elif ipv6_index == 1: - iface_cfg['IPV6ADDR_SECONDARIES'] = ipv6_cidr + if flavor == 'suse': + iface_cfg['IPADDR6_1'] = ipv6_cidr + else: + iface_cfg['IPV6ADDR_SECONDARIES'] = ipv6_cidr else: - iface_cfg['IPV6ADDR_SECONDARIES'] += " " + ipv6_cidr + if flavor == 'suse': + iface_cfg['IPADDR6_%d' % ipv6_index] = ipv6_cidr + else: + iface_cfg['IPV6ADDR_SECONDARIES'] += \ + " " + ipv6_cidr else: ipv4_index = ipv4_index + 1 suff = "" if ipv4_index == 0 else str(ipv4_index) @@ -373,20 +512,17 @@ class Renderer(renderer.Renderer): iface_cfg['NETMASK' + suff] = \ net_prefix_to_ipv4_mask(subnet['prefix']) - if 'gateway' in subnet: + if 'gateway' in subnet and flavor != 'suse': iface_cfg['DEFROUTE'] = True if is_ipv6_addr(subnet['gateway']): iface_cfg['IPV6_DEFAULTGW'] = subnet['gateway'] else: iface_cfg['GATEWAY'] = subnet['gateway'] - if 'metric' in subnet: - iface_cfg['METRIC'] = subnet['metric'] - - if 'dns_search' in subnet: + if 'dns_search' in subnet and flavor != 'suse': iface_cfg['DOMAIN'] = ' '.join(subnet['dns_search']) - if 'dns_nameservers' in subnet: + if 'dns_nameservers' in subnet and flavor != 'suse': if len(subnet['dns_nameservers']) > 3: # per resolv.conf(5) MAXNS sets this to 3. LOG.debug("%s has %d entries in dns_nameservers. " @@ -396,12 +532,21 @@ class Renderer(renderer.Renderer): iface_cfg['DNS' + str(i)] = k @classmethod - def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets): + def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets, flavor): + # TODO(rjschwei): route configuration on SUSE distro happens via + # ifroute-* files, see lp#1812117. SUSE currently carries a local + # patch in their package. + if flavor == 'suse': + return for _, subnet in enumerate(subnets, start=len(iface_cfg.children)): + subnet_type = subnet.get('type') for route in subnet.get('routes', []): is_ipv6 = subnet.get('ipv6') or is_ipv6_addr(route['gateway']) - if _is_default_route(route): + # Any dynamic configuration method, slaac, dhcpv6-stateful/ + # stateless should get router information from router RA's. + if (_is_default_route(route) and subnet_type not in + IPV6_DYNAMIC_TYPES): if ( (subnet.get('ipv4') and route_cfg.has_set_default_ipv4) or @@ -420,8 +565,10 @@ class Renderer(renderer.Renderer): # TODO(harlowja): add validation that no other iface has # also provided the default route? iface_cfg['DEFROUTE'] = True + if iface_cfg['BOOTPROTO'] in ('dhcp', 'dhcp4'): + iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = True if 'gateway' in route: - if is_ipv6 or is_ipv6_addr(route['gateway']): + if is_ipv6: iface_cfg['IPV6_DEFAULTGW'] = route['gateway'] route_cfg.has_set_default_ipv6 = True else: @@ -462,7 +609,9 @@ class Renderer(renderer.Renderer): iface_cfg['BONDING_OPTS'] = " ".join(bond_opts) @classmethod - def _render_physical_interfaces(cls, network_state, iface_contents): + def _render_physical_interfaces( + cls, network_state, iface_contents, flavor + ): physical_filter = renderer.filter_by_physical for iface in network_state.iter_interfaces(physical_filter): iface_name = iface['name'] @@ -470,11 +619,16 @@ class Renderer(renderer.Renderer): iface_cfg = iface_contents[iface_name] route_cfg = iface_cfg.routes - cls._render_subnets(iface_cfg, iface_subnets) - cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) + cls._render_subnets( + iface_cfg, iface_subnets, network_state.has_default_route, + flavor + ) + cls._render_subnet_routes( + iface_cfg, route_cfg, iface_subnets, flavor + ) @classmethod - def _render_bond_interfaces(cls, network_state, iface_contents): + def _render_bond_interfaces(cls, network_state, iface_contents, flavor): bond_filter = renderer.filter_by_type('bond') slave_filter = renderer.filter_by_attr('bond-master') for iface in network_state.iter_interfaces(bond_filter): @@ -488,15 +642,24 @@ class Renderer(renderer.Renderer): master_cfgs.extend(iface_cfg.children) for master_cfg in master_cfgs: master_cfg['BONDING_MASTER'] = True - master_cfg.kind = 'bond' + if flavor != 'suse': + master_cfg.kind = 'bond' if iface.get('mac_address'): - iface_cfg['MACADDR'] = iface.get('mac_address') + if flavor == 'suse': + iface_cfg['LLADDR'] = iface.get('mac_address') + else: + iface_cfg['MACADDR'] = iface.get('mac_address') iface_subnets = iface.get("subnets", []) route_cfg = iface_cfg.routes - cls._render_subnets(iface_cfg, iface_subnets) - cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) + cls._render_subnets( + iface_cfg, iface_subnets, network_state.has_default_route, + flavor + ) + cls._render_subnet_routes( + iface_cfg, route_cfg, iface_subnets, flavor + ) # iter_interfaces on network-state is not sorted to produce # consistent numbers we need to sort. @@ -506,29 +669,51 @@ class Renderer(renderer.Renderer): if slave_iface['bond-master'] == iface_name]) for index, bond_slave in enumerate(bond_slaves): - slavestr = 'BONDING_SLAVE%s' % index + if flavor == 'suse': + slavestr = 'BONDING_SLAVE_%s' % index + else: + slavestr = 'BONDING_SLAVE%s' % index iface_cfg[slavestr] = bond_slave slave_cfg = iface_contents[bond_slave] - slave_cfg['MASTER'] = iface_name - slave_cfg['SLAVE'] = True + if flavor == 'suse': + slave_cfg['BOOTPROTO'] = 'none' + slave_cfg['STARTMODE'] = 'hotplug' + else: + slave_cfg['MASTER'] = iface_name + slave_cfg['SLAVE'] = True @classmethod - def _render_vlan_interfaces(cls, network_state, iface_contents): + def _render_vlan_interfaces(cls, network_state, iface_contents, flavor): vlan_filter = renderer.filter_by_type('vlan') for iface in network_state.iter_interfaces(vlan_filter): iface_name = iface['name'] iface_cfg = iface_contents[iface_name] - iface_cfg['VLAN'] = True - iface_cfg['PHYSDEV'] = iface_name[:iface_name.rfind('.')] + if flavor == 'suse': + vlan_id = iface.get('vlan_id') + if vlan_id: + iface_cfg['VLAN_ID'] = vlan_id + iface_cfg['ETHERDEVICE'] = iface_name[:iface_name.rfind('.')] + else: + iface_cfg['VLAN'] = True + iface_cfg['PHYSDEV'] = iface_name[:iface_name.rfind('.')] iface_subnets = iface.get("subnets", []) route_cfg = iface_cfg.routes - cls._render_subnets(iface_cfg, iface_subnets) - cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) + cls._render_subnets( + iface_cfg, iface_subnets, network_state.has_default_route, + flavor + ) + cls._render_subnet_routes( + iface_cfg, route_cfg, iface_subnets, flavor + ) @staticmethod def _render_dns(network_state, existing_dns_path=None): + # skip writing resolv.conf if network_state doesn't include any input. + if not any([len(network_state.dns_nameservers), + len(network_state.dns_searchdomains)]): + return None content = resolv_conf.ResolvConf("") if existing_dns_path and os.path.isfile(existing_dns_path): content = resolv_conf.ResolvConf(util.load_file(existing_dns_path)) @@ -558,19 +743,39 @@ class Renderer(renderer.Renderer): return out @classmethod - def _render_bridge_interfaces(cls, network_state, iface_contents): + def _render_bridge_interfaces(cls, network_state, iface_contents, flavor): + bridge_key_map = { + old_k: new_k for old_k, new_k in cls.cfg_key_maps[flavor].items() + if old_k.startswith('bridge')} bridge_filter = renderer.filter_by_type('bridge') + for iface in network_state.iter_interfaces(bridge_filter): iface_name = iface['name'] iface_cfg = iface_contents[iface_name] - iface_cfg.kind = 'bridge' - for old_key, new_key in cls.bridge_opts_keys: + if flavor != 'suse': + iface_cfg.kind = 'bridge' + for old_key, new_key in bridge_key_map.items(): if old_key in iface: iface_cfg[new_key] = iface[old_key] - if iface.get('mac_address'): - iface_cfg['MACADDR'] = iface.get('mac_address') + if flavor == 'suse': + if 'BRIDGE_STP' in iface_cfg: + if iface_cfg.get('BRIDGE_STP'): + iface_cfg['BRIDGE_STP'] = 'on' + else: + iface_cfg['BRIDGE_STP'] = 'off' + if iface.get('mac_address'): + key = 'MACADDR' + if flavor == 'suse': + key = 'LLADDRESS' + iface_cfg[key] = iface.get('mac_address') + + if flavor == 'suse': + if iface.get('bridge_ports', []): + iface_cfg['BRIDGE_PORTS'] = '%s' % " ".join( + iface.get('bridge_ports') + ) # Is this the right key to get all the connected interfaces? for bridged_iface_name in iface.get('bridge_ports', []): # Ensure all bridged interfaces are correctly tagged @@ -579,15 +784,23 @@ class Renderer(renderer.Renderer): bridged_cfgs = [bridged_cfg] bridged_cfgs.extend(bridged_cfg.children) for bridge_cfg in bridged_cfgs: - bridge_cfg['BRIDGE'] = iface_name + bridge_value = iface_name + if flavor == 'suse': + bridge_value = 'yes' + bridge_cfg['BRIDGE'] = bridge_value iface_subnets = iface.get("subnets", []) route_cfg = iface_cfg.routes - cls._render_subnets(iface_cfg, iface_subnets) - cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) + cls._render_subnets( + iface_cfg, iface_subnets, network_state.has_default_route, + flavor + ) + cls._render_subnet_routes( + iface_cfg, route_cfg, iface_subnets, flavor + ) @classmethod - def _render_ib_interfaces(cls, network_state, iface_contents): + def _render_ib_interfaces(cls, network_state, iface_contents, flavor): ib_filter = renderer.filter_by_type('infiniband') for iface in network_state.iter_interfaces(ib_filter): iface_name = iface['name'] @@ -595,11 +808,16 @@ class Renderer(renderer.Renderer): iface_cfg.kind = 'infiniband' iface_subnets = iface.get("subnets", []) route_cfg = iface_cfg.routes - cls._render_subnets(iface_cfg, iface_subnets) - cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) + cls._render_subnets( + iface_cfg, iface_subnets, network_state.has_default_route, + flavor + ) + cls._render_subnet_routes( + iface_cfg, route_cfg, iface_subnets, flavor + ) @classmethod - def _render_sysconfig(cls, base_sysconf_dir, network_state, + def _render_sysconfig(cls, base_sysconf_dir, network_state, flavor, templates=None): '''Given state, return /etc/sysconfig files + contents''' if not templates: @@ -610,13 +828,17 @@ class Renderer(renderer.Renderer): continue iface_name = iface['name'] iface_cfg = NetInterface(iface_name, base_sysconf_dir, templates) - cls._render_iface_shared(iface, iface_cfg) + if flavor == 'suse': + iface_cfg.drop('DEVICE') + # If type detection fails it is considered a bug in SUSE + iface_cfg.drop('TYPE') + cls._render_iface_shared(iface, iface_cfg, flavor) iface_contents[iface_name] = iface_cfg - cls._render_physical_interfaces(network_state, iface_contents) - cls._render_bond_interfaces(network_state, iface_contents) - cls._render_vlan_interfaces(network_state, iface_contents) - cls._render_bridge_interfaces(network_state, iface_contents) - cls._render_ib_interfaces(network_state, iface_contents) + cls._render_physical_interfaces(network_state, iface_contents, flavor) + cls._render_bond_interfaces(network_state, iface_contents, flavor) + cls._render_vlan_interfaces(network_state, iface_contents, flavor) + cls._render_bridge_interfaces(network_state, iface_contents, flavor) + cls._render_ib_interfaces(network_state, iface_contents, flavor) contents = {} for iface_name, iface_cfg in iface_contents.items(): if iface_cfg or iface_cfg.children: @@ -638,14 +860,15 @@ class Renderer(renderer.Renderer): file_mode = 0o644 base_sysconf_dir = util.target_path(target, self.sysconf_dir) for path, data in self._render_sysconfig(base_sysconf_dir, - network_state, + network_state, self.flavor, templates=templates).items(): util.write_file(path, data, file_mode) if self.dns_path: dns_path = util.target_path(target, self.dns_path) resolv_content = self._render_dns(network_state, existing_dns_path=dns_path) - util.write_file(dns_path, resolv_content, file_mode) + if resolv_content: + util.write_file(dns_path, resolv_content, file_mode) if self.networkmanager_conf_path: nm_conf_path = util.target_path(target, self.networkmanager_conf_path) @@ -657,6 +880,8 @@ class Renderer(renderer.Renderer): netrules_content = self._render_persistent_net(network_state) netrules_path = util.target_path(target, self.netrules_path) util.write_file(netrules_path, netrules_content, file_mode) + if available_nm(target=target): + enable_ifcfg_rh(util.target_path(target, path=NM_CFG_FILE)) sysconfig_path = util.target_path(target, templates.get('control')) # Distros configuring /etc/sysconfig/network as a file e.g. Centos @@ -671,6 +896,13 @@ class Renderer(renderer.Renderer): def available(target=None): + sysconfig = available_sysconfig(target=target) + nm = available_nm(target=target) + return (util.system_info()['variant'] in KNOWN_DISTROS + and any([nm, sysconfig])) + + +def available_sysconfig(target=None): expected = ['ifup', 'ifdown'] search = ['/sbin', '/usr/sbin'] for p in expected: @@ -679,10 +911,16 @@ def available(target=None): expected_paths = [ 'etc/sysconfig/network-scripts/network-functions', - 'etc/sysconfig/network-scripts/ifdown-eth'] + 'etc/sysconfig/config'] for p in expected_paths: - if not os.path.isfile(util.target_path(target, p)): - return False + if os.path.isfile(util.target_path(target, p)): + return True + return False + + +def available_nm(target=None): + if not os.path.isfile(util.target_path(target, path=NM_CFG_FILE)): + return False return True diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py index cd3e7328..c3fa1e04 100644 --- a/cloudinit/net/tests/test_dhcp.py +++ b/cloudinit/net/tests/test_dhcp.py @@ -8,7 +8,8 @@ from textwrap import dedent import cloudinit.net as net from cloudinit.net.dhcp import ( InvalidDHCPLeaseFileError, maybe_perform_dhcp_discovery, - parse_dhcp_lease_file, dhcp_discovery, networkd_load_leases) + parse_dhcp_lease_file, dhcp_discovery, networkd_load_leases, + parse_static_routes) from cloudinit.util import ensure_file, write_file from cloudinit.tests.helpers import ( CiTestCase, HttprettyTestCase, mock, populate_dir, wrap_and_call) @@ -64,6 +65,188 @@ class TestParseDHCPLeasesFile(CiTestCase): self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file)) +class TestDHCPRFC3442(CiTestCase): + + def test_parse_lease_finds_rfc3442_classless_static_routes(self): + """parse_dhcp_lease_file returns rfc3442-classless-static-routes.""" + lease_file = self.tmp_path('leases') + content = dedent(""" + lease { + interface "wlp3s0"; + fixed-address 192.168.2.74; + option subnet-mask 255.255.255.0; + option routers 192.168.2.1; + option rfc3442-classless-static-routes 0,130,56,240,1; + renew 4 2017/07/27 18:02:30; + expire 5 2017/07/28 07:08:15; + } + """) + expected = [ + {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', + 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1', + 'rfc3442-classless-static-routes': '0,130,56,240,1', + 'renew': '4 2017/07/27 18:02:30', + 'expire': '5 2017/07/28 07:08:15'}] + write_file(lease_file, content) + self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file)) + + def test_parse_lease_finds_classless_static_routes(self): + """ + parse_dhcp_lease_file returns classless-static-routes + for Centos lease format. + """ + lease_file = self.tmp_path('leases') + content = dedent(""" + lease { + interface "wlp3s0"; + fixed-address 192.168.2.74; + option subnet-mask 255.255.255.0; + option routers 192.168.2.1; + option classless-static-routes 0 130.56.240.1; + renew 4 2017/07/27 18:02:30; + expire 5 2017/07/28 07:08:15; + } + """) + expected = [ + {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', + 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1', + 'classless-static-routes': '0 130.56.240.1', + 'renew': '4 2017/07/27 18:02:30', + 'expire': '5 2017/07/28 07:08:15'}] + write_file(lease_file, content) + self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file)) + + @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + def test_obtain_lease_parses_static_routes(self, m_maybe, m_ipv4): + """EphemeralDHPCv4 parses rfc3442 routes for EphemeralIPv4Network""" + lease = [ + {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', + 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1', + 'rfc3442-classless-static-routes': '0,130,56,240,1', + 'renew': '4 2017/07/27 18:02:30', + 'expire': '5 2017/07/28 07:08:15'}] + m_maybe.return_value = lease + eph = net.dhcp.EphemeralDHCPv4() + eph.obtain_lease() + expected_kwargs = { + 'interface': 'wlp3s0', + 'ip': '192.168.2.74', + 'prefix_or_mask': '255.255.255.0', + 'broadcast': '192.168.2.255', + 'static_routes': [('0.0.0.0/0', '130.56.240.1')], + 'router': '192.168.2.1'} + m_ipv4.assert_called_with(**expected_kwargs) + + @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + def test_obtain_centos_lease_parses_static_routes(self, m_maybe, m_ipv4): + """ + EphemeralDHPCv4 parses rfc3442 routes for EphemeralIPv4Network + for Centos Lease format + """ + lease = [ + {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', + 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1', + 'classless-static-routes': '0 130.56.240.1', + 'renew': '4 2017/07/27 18:02:30', + 'expire': '5 2017/07/28 07:08:15'}] + m_maybe.return_value = lease + eph = net.dhcp.EphemeralDHCPv4() + eph.obtain_lease() + expected_kwargs = { + 'interface': 'wlp3s0', + 'ip': '192.168.2.74', + 'prefix_or_mask': '255.255.255.0', + 'broadcast': '192.168.2.255', + 'static_routes': [('0.0.0.0/0', '130.56.240.1')], + 'router': '192.168.2.1'} + m_ipv4.assert_called_with(**expected_kwargs) + + +class TestDHCPParseStaticRoutes(CiTestCase): + + with_logs = True + + def parse_static_routes_empty_string(self): + self.assertEqual([], parse_static_routes("")) + + def test_parse_static_routes_invalid_input_returns_empty_list(self): + rfc3442 = "32,169,254,169,254,130,56,248" + self.assertEqual([], parse_static_routes(rfc3442)) + + def test_parse_static_routes_bogus_width_returns_empty_list(self): + rfc3442 = "33,169,254,169,254,130,56,248" + self.assertEqual([], parse_static_routes(rfc3442)) + + def test_parse_static_routes_single_ip(self): + rfc3442 = "32,169,254,169,254,130,56,248,255" + self.assertEqual([('169.254.169.254/32', '130.56.248.255')], + parse_static_routes(rfc3442)) + + def test_parse_static_routes_single_ip_handles_trailing_semicolon(self): + rfc3442 = "32,169,254,169,254,130,56,248,255;" + self.assertEqual([('169.254.169.254/32', '130.56.248.255')], + parse_static_routes(rfc3442)) + + def test_parse_static_routes_default_route(self): + rfc3442 = "0,130,56,240,1" + self.assertEqual([('0.0.0.0/0', '130.56.240.1')], + parse_static_routes(rfc3442)) + + def test_parse_static_routes_class_c_b_a(self): + class_c = "24,192,168,74,192,168,0,4" + class_b = "16,172,16,172,16,0,4" + class_a = "8,10,10,0,0,4" + rfc3442 = ",".join([class_c, class_b, class_a]) + self.assertEqual(sorted([ + ("192.168.74.0/24", "192.168.0.4"), + ("172.16.0.0/16", "172.16.0.4"), + ("10.0.0.0/8", "10.0.0.4") + ]), sorted(parse_static_routes(rfc3442))) + + def test_parse_static_routes_logs_error_truncated(self): + bad_rfc3442 = { + "class_c": "24,169,254,169,10", + "class_b": "16,172,16,10", + "class_a": "8,10,10", + "gateway": "0,0", + "netlen": "33,0", + } + for rfc3442 in bad_rfc3442.values(): + self.assertEqual([], parse_static_routes(rfc3442)) + + logs = self.logs.getvalue() + self.assertEqual(len(bad_rfc3442.keys()), len(logs.splitlines())) + + def test_parse_static_routes_returns_valid_routes_until_parse_err(self): + class_c = "24,192,168,74,192,168,0,4" + class_b = "16,172,16,172,16,0,4" + class_a_error = "8,10,10,0,0" + rfc3442 = ",".join([class_c, class_b, class_a_error]) + self.assertEqual(sorted([ + ("192.168.74.0/24", "192.168.0.4"), + ("172.16.0.0/16", "172.16.0.4"), + ]), sorted(parse_static_routes(rfc3442))) + + logs = self.logs.getvalue() + self.assertIn(rfc3442, logs.splitlines()[0]) + + def test_redhat_format(self): + redhat_format = "24.191.168.128 192.168.128.1,0 192.168.128.1" + self.assertEqual(sorted([ + ("191.168.128.0/24", "192.168.128.1"), + ("0.0.0.0/0", "192.168.128.1") + ]), sorted(parse_static_routes(redhat_format))) + + def test_redhat_format_with_a_space_too_much_after_comma(self): + redhat_format = "24.191.168.128 192.168.128.1, 0 192.168.128.1" + self.assertEqual(sorted([ + ("191.168.128.0/24", "192.168.128.1"), + ("0.0.0.0/0", "192.168.128.1") + ]), sorted(parse_static_routes(redhat_format))) + + class TestDHCPDiscoveryClean(CiTestCase): with_logs = True @@ -117,6 +300,7 @@ class TestDHCPDiscoveryClean(CiTestCase): self.assertEqual('eth9', call[0][1]) self.assertIn('/var/tmp/cloud-init/cloud-init-dhcp-', call[0][2]) + @mock.patch('time.sleep', mock.MagicMock()) @mock.patch('cloudinit.net.dhcp.os.kill') @mock.patch('cloudinit.net.dhcp.util.subp') def test_dhcp_discovery_run_in_sandbox_warns_invalid_pid(self, m_subp, @@ -145,16 +329,20 @@ class TestDHCPDiscoveryClean(CiTestCase): 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}], dhcp_discovery(dhclient_script, 'eth9', tmpdir)) self.assertIn( - "pid file contains non-integer content ''", self.logs.getvalue()) + "dhclient(pid=, parentpid=unknown) failed " + "to daemonize after 10.0 seconds", + self.logs.getvalue()) m_kill.assert_not_called() + @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') @mock.patch('cloudinit.net.dhcp.os.kill') @mock.patch('cloudinit.net.dhcp.util.wait_for_files') @mock.patch('cloudinit.net.dhcp.util.subp') def test_dhcp_discovery_run_in_sandbox_waits_on_lease_and_pid(self, m_subp, m_wait, - m_kill): + m_kill, + m_getppid): """dhcp_discovery waits for the presence of pidfile and dhcp.leases.""" tmpdir = self.tmp_dir() dhclient_script = os.path.join(tmpdir, 'dhclient.orig') @@ -164,6 +352,7 @@ class TestDHCPDiscoveryClean(CiTestCase): pidfile = self.tmp_path('dhclient.pid', tmpdir) leasefile = self.tmp_path('dhcp.leases', tmpdir) m_wait.return_value = [pidfile] # Return the missing pidfile wait for + m_getppid.return_value = 1 # Indicate that dhclient has daemonized self.assertEqual([], dhcp_discovery(dhclient_script, 'eth9', tmpdir)) self.assertEqual( mock.call([pidfile, leasefile], maxwait=5, naplen=0.01), @@ -173,9 +362,10 @@ class TestDHCPDiscoveryClean(CiTestCase): self.logs.getvalue()) m_kill.assert_not_called() + @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') @mock.patch('cloudinit.net.dhcp.os.kill') @mock.patch('cloudinit.net.dhcp.util.subp') - def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill): + def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill, m_getppid): """dhcp_discovery brings up the interface and runs dhclient. It also returns the parsed dhcp.leases file generated in the sandbox. @@ -197,6 +387,7 @@ class TestDHCPDiscoveryClean(CiTestCase): pid_file = os.path.join(tmpdir, 'dhclient.pid') my_pid = 1 write_file(pid_file, "%d\n" % my_pid) + m_getppid.return_value = 1 # Indicate that dhclient has daemonized self.assertItemsEqual( [{'interface': 'eth9', 'fixed-address': '192.168.2.74', @@ -355,3 +546,5 @@ class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase): self.assertEqual(fake_lease, lease) # Ensure that dhcp discovery occurs m_dhcp.called_once_with() + +# vi: ts=4 expandtab diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py index f55c31e8..5081a337 100644 --- a/cloudinit/net/tests/test_init.py +++ b/cloudinit/net/tests/test_init.py @@ -3,15 +3,15 @@ import copy import errno import httpretty -import mock import os import requests import textwrap -import yaml +from unittest import mock import cloudinit.net as net from cloudinit.util import ensure_file, write_file, ProcessExecutionError from cloudinit.tests.helpers import CiTestCase, HttprettyTestCase +from cloudinit import safeyaml as yaml class TestSysDevPath(CiTestCase): @@ -157,6 +157,41 @@ class TestReadSysNet(CiTestCase): ensure_file(os.path.join(self.sysdir, 'eth0', 'bonding')) self.assertTrue(net.is_bond('eth0')) + def test_get_master(self): + """get_master returns the path when /sys/net/devname/master exists.""" + self.assertIsNone(net.get_master('enP1s1')) + master_path = os.path.join(self.sysdir, 'enP1s1', 'master') + ensure_file(master_path) + self.assertEqual(master_path, net.get_master('enP1s1')) + + def test_master_is_bridge_or_bond(self): + bridge_mac = 'aa:bb:cc:aa:bb:cc' + bond_mac = 'cc:bb:aa:cc:bb:aa' + + # No master => False + write_file(os.path.join(self.sysdir, 'eth1', 'address'), bridge_mac) + write_file(os.path.join(self.sysdir, 'eth2', 'address'), bond_mac) + + self.assertFalse(net.master_is_bridge_or_bond('eth1')) + self.assertFalse(net.master_is_bridge_or_bond('eth2')) + + # masters without bridge/bonding => False + write_file(os.path.join(self.sysdir, 'br0', 'address'), bridge_mac) + write_file(os.path.join(self.sysdir, 'bond0', 'address'), bond_mac) + + os.symlink('../br0', os.path.join(self.sysdir, 'eth1', 'master')) + os.symlink('../bond0', os.path.join(self.sysdir, 'eth2', 'master')) + + self.assertFalse(net.master_is_bridge_or_bond('eth1')) + self.assertFalse(net.master_is_bridge_or_bond('eth2')) + + # masters with bridge/bonding => True + write_file(os.path.join(self.sysdir, 'br0', 'bridge'), '') + write_file(os.path.join(self.sysdir, 'bond0', 'bonding'), '') + + self.assertTrue(net.master_is_bridge_or_bond('eth1')) + self.assertTrue(net.master_is_bridge_or_bond('eth2')) + def test_is_vlan(self): """is_vlan is True when /sys/net/devname/uevent has DEVTYPE=vlan.""" ensure_file(os.path.join(self.sysdir, 'eth0', 'uevent')) @@ -204,6 +239,10 @@ class TestGenerateFallbackConfig(CiTestCase): self.add_patch('cloudinit.net.util.is_container', 'm_is_container', return_value=False) self.add_patch('cloudinit.net.util.udevadm_settle', 'm_settle') + self.add_patch('cloudinit.net.is_netfailover', 'm_netfail', + return_value=False) + self.add_patch('cloudinit.net.is_netfail_master', 'm_netfail_master', + return_value=False) def test_generate_fallback_finds_connected_eth_with_mac(self): """generate_fallback_config finds any connected device with a mac.""" @@ -212,9 +251,9 @@ class TestGenerateFallbackConfig(CiTestCase): mac = 'aa:bb:cc:aa:bb:cc' write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac) expected = { - 'config': [{'type': 'physical', 'mac_address': mac, - 'name': 'eth1', 'subnets': [{'type': 'dhcp'}]}], - 'version': 1} + 'ethernets': {'eth1': {'match': {'macaddress': mac}, + 'dhcp4': True, 'set-name': 'eth1'}}, + 'version': 2} self.assertEqual(expected, net.generate_fallback_config()) def test_generate_fallback_finds_dormant_eth_with_mac(self): @@ -223,9 +262,9 @@ class TestGenerateFallbackConfig(CiTestCase): mac = 'aa:bb:cc:aa:bb:cc' write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac) expected = { - 'config': [{'type': 'physical', 'mac_address': mac, - 'name': 'eth0', 'subnets': [{'type': 'dhcp'}]}], - 'version': 1} + 'ethernets': {'eth0': {'match': {'macaddress': mac}, 'dhcp4': True, + 'set-name': 'eth0'}}, + 'version': 2} self.assertEqual(expected, net.generate_fallback_config()) def test_generate_fallback_finds_eth_by_operstate(self): @@ -233,9 +272,10 @@ class TestGenerateFallbackConfig(CiTestCase): mac = 'aa:bb:cc:aa:bb:cc' write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac) expected = { - 'config': [{'type': 'physical', 'mac_address': mac, - 'name': 'eth0', 'subnets': [{'type': 'dhcp'}]}], - 'version': 1} + 'ethernets': { + 'eth0': {'dhcp4': True, 'match': {'macaddress': mac}, + 'set-name': 'eth0'}}, + 'version': 2} valid_operstates = ['dormant', 'down', 'lowerlayerdown', 'unknown'] for state in valid_operstates: write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state) @@ -267,6 +307,61 @@ class TestGenerateFallbackConfig(CiTestCase): ensure_file(os.path.join(self.sysdir, 'eth0', 'bonding')) self.assertIsNone(net.generate_fallback_config()) + def test_generate_fallback_config_skips_netfail_devs(self): + """gen_fallback_config ignores netfail primary,sby no mac on master.""" + mac = 'aa:bb:cc:aa:bb:cc' # netfailover devs share the same mac + for iface in ['ens3', 'ens3sby', 'enP0s1f3']: + write_file(os.path.join(self.sysdir, iface, 'carrier'), '1') + write_file( + os.path.join(self.sysdir, iface, 'addr_assign_type'), '0') + write_file( + os.path.join(self.sysdir, iface, 'address'), mac) + + def is_netfail(iface, _driver=None): + # ens3 is the master + if iface == 'ens3': + return False + return True + self.m_netfail.side_effect = is_netfail + + def is_netfail_master(iface, _driver=None): + # ens3 is the master + if iface == 'ens3': + return True + return False + self.m_netfail_master.side_effect = is_netfail_master + expected = { + 'ethernets': { + 'ens3': {'dhcp4': True, 'match': {'name': 'ens3'}, + 'set-name': 'ens3'}}, + 'version': 2} + result = net.generate_fallback_config() + self.assertEqual(expected, result) + + +class TestNetFindFallBackNic(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestNetFindFallBackNic, self).setUp() + sys_mock = mock.patch('cloudinit.net.get_sys_class_path') + self.m_sys_path = sys_mock.start() + self.sysdir = self.tmp_dir() + '/' + self.m_sys_path.return_value = self.sysdir + self.addCleanup(sys_mock.stop) + self.add_patch('cloudinit.net.util.is_container', 'm_is_container', + return_value=False) + self.add_patch('cloudinit.net.util.udevadm_settle', 'm_settle') + + def test_generate_fallback_finds_first_connected_eth_with_mac(self): + """find_fallback_nic finds any connected device with a mac.""" + write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), '1') + write_file(os.path.join(self.sysdir, 'eth1', 'carrier'), '1') + mac = 'aa:bb:cc:aa:bb:cc' + write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac) + self.assertEqual('eth1', net.find_fallback_nic()) + class TestGetDeviceList(CiTestCase): @@ -364,6 +459,57 @@ class TestGetInterfaceMAC(CiTestCase): expected = [('eth2', 'aa:bb:cc:aa:bb:cc', None, None)] self.assertEqual(expected, net.get_interfaces()) + def test_get_interfaces_by_mac_skips_master_devs(self): + """Ignore interfaces with a master device which would have dup mac.""" + mac1 = mac2 = 'aa:bb:cc:aa:bb:cc' + write_file(os.path.join(self.sysdir, 'eth1', 'addr_assign_type'), '0') + write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac1) + write_file(os.path.join(self.sysdir, 'eth1', 'master'), "blah") + write_file(os.path.join(self.sysdir, 'eth2', 'addr_assign_type'), '0') + write_file(os.path.join(self.sysdir, 'eth2', 'address'), mac2) + expected = [('eth2', mac2, None, None)] + self.assertEqual(expected, net.get_interfaces()) + + @mock.patch('cloudinit.net.is_netfailover') + def test_get_interfaces_by_mac_skips_netfailvoer(self, m_netfail): + """Ignore interfaces if netfailover primary or standby.""" + mac = 'aa:bb:cc:aa:bb:cc' # netfailover devs share the same mac + for iface in ['ens3', 'ens3sby', 'enP0s1f3']: + write_file( + os.path.join(self.sysdir, iface, 'addr_assign_type'), '0') + write_file( + os.path.join(self.sysdir, iface, 'address'), mac) + + def is_netfail(iface, _driver=None): + # ens3 is the master + if iface == 'ens3': + return False + else: + return True + m_netfail.side_effect = is_netfail + expected = [('ens3', mac, None, None)] + self.assertEqual(expected, net.get_interfaces()) + + def test_get_interfaces_does_not_skip_phys_members_of_bridges_and_bonds( + self + ): + bridge_mac = 'aa:bb:cc:aa:bb:cc' + bond_mac = 'cc:bb:aa:cc:bb:aa' + write_file(os.path.join(self.sysdir, 'br0', 'address'), bridge_mac) + write_file(os.path.join(self.sysdir, 'br0', 'bridge'), '') + + write_file(os.path.join(self.sysdir, 'bond0', 'address'), bond_mac) + write_file(os.path.join(self.sysdir, 'bond0', 'bonding'), '') + + write_file(os.path.join(self.sysdir, 'eth1', 'address'), bridge_mac) + os.symlink('../br0', os.path.join(self.sysdir, 'eth1', 'master')) + + write_file(os.path.join(self.sysdir, 'eth2', 'address'), bond_mac) + os.symlink('../bond0', os.path.join(self.sysdir, 'eth2', 'master')) + + interface_names = [interface[0] for interface in net.get_interfaces()] + self.assertEqual(['eth1', 'eth2'], sorted(interface_names)) + class TestInterfaceHasOwnMAC(CiTestCase): @@ -549,6 +695,45 @@ class TestEphemeralIPV4Network(CiTestCase): self.assertEqual(expected_setup_calls, m_subp.call_args_list) m_subp.assert_has_calls(expected_teardown_calls) + def test_ephemeral_ipv4_network_with_rfc3442_static_routes(self, m_subp): + params = { + 'interface': 'eth0', 'ip': '192.168.2.2', + 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255', + 'static_routes': [('169.254.169.254/32', '192.168.2.1'), + ('0.0.0.0/0', '192.168.2.1')], + 'router': '192.168.2.1'} + expected_setup_calls = [ + mock.call( + ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24', + 'broadcast', '192.168.2.255', 'dev', 'eth0'], + capture=True, update_env={'LANG': 'C'}), + mock.call( + ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'up'], + capture=True), + mock.call( + ['ip', '-4', 'route', 'add', '169.254.169.254/32', + 'via', '192.168.2.1', 'dev', 'eth0'], capture=True), + mock.call( + ['ip', '-4', 'route', 'add', '0.0.0.0/0', + 'via', '192.168.2.1', 'dev', 'eth0'], capture=True)] + expected_teardown_calls = [ + mock.call( + ['ip', '-4', 'route', 'del', '0.0.0.0/0', + 'via', '192.168.2.1', 'dev', 'eth0'], capture=True), + mock.call( + ['ip', '-4', 'route', 'del', '169.254.169.254/32', + 'via', '192.168.2.1', 'dev', 'eth0'], capture=True), + mock.call( + ['ip', '-family', 'inet', 'link', 'set', 'dev', + 'eth0', 'down'], capture=True), + mock.call( + ['ip', '-family', 'inet', 'addr', 'del', + '192.168.2.2/24', 'dev', 'eth0'], capture=True) + ] + with net.EphemeralIPv4Network(**params): + self.assertEqual(expected_setup_calls, m_subp.call_args_list) + m_subp.assert_has_calls(expected_setup_calls + expected_teardown_calls) + class TestApplyNetworkCfgNames(CiTestCase): V1_CONFIG = textwrap.dedent("""\ @@ -669,3 +854,447 @@ class TestHasURLConnectivity(HttprettyTestCase): httpretty.register_uri(httpretty.GET, self.url, body={}, status=404) self.assertFalse( net.has_url_connectivity(self.url), 'Expected False on url fail') + + +def _mk_v1_phys(mac, name, driver, device_id): + v1_cfg = {'type': 'physical', 'name': name, 'mac_address': mac} + params = {} + if driver: + params.update({'driver': driver}) + if device_id: + params.update({'device_id': device_id}) + + if params: + v1_cfg.update({'params': params}) + + return v1_cfg + + +def _mk_v2_phys(mac, name, driver=None, device_id=None): + v2_cfg = {'set-name': name, 'match': {'macaddress': mac}} + if driver: + v2_cfg['match'].update({'driver': driver}) + if device_id: + v2_cfg['match'].update({'device_id': device_id}) + + return v2_cfg + + +class TestExtractPhysdevs(CiTestCase): + + def setUp(self): + super(TestExtractPhysdevs, self).setUp() + self.add_patch('cloudinit.net.device_driver', 'm_driver') + self.add_patch('cloudinit.net.device_devid', 'm_devid') + + def test_extract_physdevs_looks_up_driver_v1(self): + driver = 'virtio' + self.m_driver.return_value = driver + physdevs = [ + ['aa:bb:cc:dd:ee:ff', 'eth0', None, '0x1000'], + ] + netcfg = { + 'version': 1, + 'config': [_mk_v1_phys(*args) for args in physdevs], + } + # insert the driver value for verification + physdevs[0][2] = driver + self.assertEqual(sorted(physdevs), + sorted(net.extract_physdevs(netcfg))) + self.m_driver.assert_called_with('eth0') + + def test_extract_physdevs_looks_up_driver_v2(self): + driver = 'virtio' + self.m_driver.return_value = driver + physdevs = [ + ['aa:bb:cc:dd:ee:ff', 'eth0', None, '0x1000'], + ] + netcfg = { + 'version': 2, + 'ethernets': {args[1]: _mk_v2_phys(*args) for args in physdevs}, + } + # insert the driver value for verification + physdevs[0][2] = driver + self.assertEqual(sorted(physdevs), + sorted(net.extract_physdevs(netcfg))) + self.m_driver.assert_called_with('eth0') + + def test_extract_physdevs_looks_up_devid_v1(self): + devid = '0x1000' + self.m_devid.return_value = devid + physdevs = [ + ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', None], + ] + netcfg = { + 'version': 1, + 'config': [_mk_v1_phys(*args) for args in physdevs], + } + # insert the driver value for verification + physdevs[0][3] = devid + self.assertEqual(sorted(physdevs), + sorted(net.extract_physdevs(netcfg))) + self.m_devid.assert_called_with('eth0') + + def test_extract_physdevs_looks_up_devid_v2(self): + devid = '0x1000' + self.m_devid.return_value = devid + physdevs = [ + ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', None], + ] + netcfg = { + 'version': 2, + 'ethernets': {args[1]: _mk_v2_phys(*args) for args in physdevs}, + } + # insert the driver value for verification + physdevs[0][3] = devid + self.assertEqual(sorted(physdevs), + sorted(net.extract_physdevs(netcfg))) + self.m_devid.assert_called_with('eth0') + + def test_get_v1_type_physical(self): + physdevs = [ + ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'], + ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'], + ['09:87:65:43:21:10', 'ens0p1', 'mlx4_core', '0:0:1000'], + ] + netcfg = { + 'version': 1, + 'config': [_mk_v1_phys(*args) for args in physdevs], + } + self.assertEqual(sorted(physdevs), + sorted(net.extract_physdevs(netcfg))) + + def test_get_v2_type_physical(self): + physdevs = [ + ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'], + ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'], + ['09:87:65:43:21:10', 'ens0p1', 'mlx4_core', '0:0:1000'], + ] + netcfg = { + 'version': 2, + 'ethernets': {args[1]: _mk_v2_phys(*args) for args in physdevs}, + } + self.assertEqual(sorted(physdevs), + sorted(net.extract_physdevs(netcfg))) + + def test_get_v2_type_physical_skips_if_no_set_name(self): + netcfg = { + 'version': 2, + 'ethernets': { + 'ens3': { + 'match': {'macaddress': '00:11:22:33:44:55'}, + } + } + } + self.assertEqual([], net.extract_physdevs(netcfg)) + + def test_runtime_error_on_unknown_netcfg_version(self): + with self.assertRaises(RuntimeError): + net.extract_physdevs({'version': 3, 'awesome_config': []}) + + +class TestWaitForPhysdevs(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestWaitForPhysdevs, self).setUp() + self.add_patch('cloudinit.net.get_interfaces_by_mac', + 'm_get_iface_mac') + self.add_patch('cloudinit.util.udevadm_settle', 'm_udev_settle') + + def test_wait_for_physdevs_skips_settle_if_all_present(self): + physdevs = [ + ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'], + ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'], + ] + netcfg = { + 'version': 2, + 'ethernets': {args[1]: _mk_v2_phys(*args) + for args in physdevs}, + } + self.m_get_iface_mac.side_effect = iter([ + {'aa:bb:cc:dd:ee:ff': 'eth0', + '00:11:22:33:44:55': 'ens3'}, + ]) + net.wait_for_physdevs(netcfg) + self.assertEqual(0, self.m_udev_settle.call_count) + + def test_wait_for_physdevs_calls_udev_settle_on_missing(self): + physdevs = [ + ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'], + ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'], + ] + netcfg = { + 'version': 2, + 'ethernets': {args[1]: _mk_v2_phys(*args) + for args in physdevs}, + } + self.m_get_iface_mac.side_effect = iter([ + {'aa:bb:cc:dd:ee:ff': 'eth0'}, # first call ens3 is missing + {'aa:bb:cc:dd:ee:ff': 'eth0', + '00:11:22:33:44:55': 'ens3'}, # second call has both + ]) + net.wait_for_physdevs(netcfg) + self.m_udev_settle.assert_called_with(exists=net.sys_dev_path('ens3')) + + def test_wait_for_physdevs_raise_runtime_error_if_missing_and_strict(self): + physdevs = [ + ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'], + ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'], + ] + netcfg = { + 'version': 2, + 'ethernets': {args[1]: _mk_v2_phys(*args) + for args in physdevs}, + } + self.m_get_iface_mac.return_value = {} + with self.assertRaises(RuntimeError): + net.wait_for_physdevs(netcfg) + + self.assertEqual(5 * len(physdevs), self.m_udev_settle.call_count) + + def test_wait_for_physdevs_no_raise_if_not_strict(self): + physdevs = [ + ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'], + ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'], + ] + netcfg = { + 'version': 2, + 'ethernets': {args[1]: _mk_v2_phys(*args) + for args in physdevs}, + } + self.m_get_iface_mac.return_value = {} + net.wait_for_physdevs(netcfg, strict=False) + self.assertEqual(5 * len(physdevs), self.m_udev_settle.call_count) + + +class TestNetFailOver(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestNetFailOver, self).setUp() + self.add_patch('cloudinit.net.util', 'm_util') + self.add_patch('cloudinit.net.read_sys_net', 'm_read_sys_net') + self.add_patch('cloudinit.net.device_driver', 'm_device_driver') + + def test_get_dev_features(self): + devname = self.random_string() + features = self.random_string() + self.m_read_sys_net.return_value = features + + self.assertEqual(features, net.get_dev_features(devname)) + self.assertEqual(1, self.m_read_sys_net.call_count) + self.assertEqual(mock.call(devname, 'device/features'), + self.m_read_sys_net.call_args_list[0]) + + def test_get_dev_features_none_returns_empty_string(self): + devname = self.random_string() + self.m_read_sys_net.side_effect = Exception('error') + self.assertEqual('', net.get_dev_features(devname)) + self.assertEqual(1, self.m_read_sys_net.call_count) + self.assertEqual(mock.call(devname, 'device/features'), + self.m_read_sys_net.call_args_list[0]) + + @mock.patch('cloudinit.net.get_dev_features') + def test_has_netfail_standby_feature(self, m_dev_features): + devname = self.random_string() + standby_features = ('0' * 62) + '1' + '0' + m_dev_features.return_value = standby_features + self.assertTrue(net.has_netfail_standby_feature(devname)) + + @mock.patch('cloudinit.net.get_dev_features') + def test_has_netfail_standby_feature_short_is_false(self, m_dev_features): + devname = self.random_string() + standby_features = self.random_string() + m_dev_features.return_value = standby_features + self.assertFalse(net.has_netfail_standby_feature(devname)) + + @mock.patch('cloudinit.net.get_dev_features') + def test_has_netfail_standby_feature_not_present_is_false(self, + m_dev_features): + devname = self.random_string() + standby_features = '0' * 64 + m_dev_features.return_value = standby_features + self.assertFalse(net.has_netfail_standby_feature(devname)) + + @mock.patch('cloudinit.net.get_dev_features') + def test_has_netfail_standby_feature_no_features_is_false(self, + m_dev_features): + devname = self.random_string() + standby_features = None + m_dev_features.return_value = standby_features + self.assertFalse(net.has_netfail_standby_feature(devname)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + def test_is_netfail_master(self, m_exists, m_standby): + devname = self.random_string() + driver = 'virtio_net' + m_exists.return_value = False # no master sysfs attr + m_standby.return_value = True # has standby feature flag + self.assertTrue(net.is_netfail_master(devname, driver)) + + @mock.patch('cloudinit.net.sys_dev_path') + def test_is_netfail_master_checks_master_attr(self, m_sysdev): + devname = self.random_string() + driver = 'virtio_net' + m_sysdev.return_value = self.random_string() + self.assertFalse(net.is_netfail_master(devname, driver)) + self.assertEqual(1, m_sysdev.call_count) + self.assertEqual(mock.call(devname, path='master'), + m_sysdev.call_args_list[0]) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + def test_is_netfail_master_wrong_driver(self, m_exists, m_standby): + devname = self.random_string() + driver = self.random_string() + self.assertFalse(net.is_netfail_master(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + def test_is_netfail_master_has_master_attr(self, m_exists, m_standby): + devname = self.random_string() + driver = 'virtio_net' + m_exists.return_value = True # has master sysfs attr + self.assertFalse(net.is_netfail_master(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + def test_is_netfail_master_no_standby_feat(self, m_exists, m_standby): + devname = self.random_string() + driver = 'virtio_net' + m_exists.return_value = False # no master sysfs attr + m_standby.return_value = False # no standby feature flag + self.assertFalse(net.is_netfail_master(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + @mock.patch('cloudinit.net.sys_dev_path') + def test_is_netfail_primary(self, m_sysdev, m_exists, m_standby): + devname = self.random_string() + driver = self.random_string() # device not virtio_net + master_devname = self.random_string() + m_sysdev.return_value = "%s/%s" % (self.random_string(), + master_devname) + m_exists.return_value = True # has master sysfs attr + self.m_device_driver.return_value = 'virtio_net' # master virtio_net + m_standby.return_value = True # has standby feature flag + self.assertTrue(net.is_netfail_primary(devname, driver)) + self.assertEqual(1, self.m_device_driver.call_count) + self.assertEqual(mock.call(master_devname), + self.m_device_driver.call_args_list[0]) + self.assertEqual(1, m_standby.call_count) + self.assertEqual(mock.call(master_devname), + m_standby.call_args_list[0]) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + @mock.patch('cloudinit.net.sys_dev_path') + def test_is_netfail_primary_wrong_driver(self, m_sysdev, m_exists, + m_standby): + devname = self.random_string() + driver = 'virtio_net' + self.assertFalse(net.is_netfail_primary(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + @mock.patch('cloudinit.net.sys_dev_path') + def test_is_netfail_primary_no_master(self, m_sysdev, m_exists, m_standby): + devname = self.random_string() + driver = self.random_string() # device not virtio_net + m_exists.return_value = False # no master sysfs attr + self.assertFalse(net.is_netfail_primary(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + @mock.patch('cloudinit.net.sys_dev_path') + def test_is_netfail_primary_bad_master(self, m_sysdev, m_exists, + m_standby): + devname = self.random_string() + driver = self.random_string() # device not virtio_net + master_devname = self.random_string() + m_sysdev.return_value = "%s/%s" % (self.random_string(), + master_devname) + m_exists.return_value = True # has master sysfs attr + self.m_device_driver.return_value = 'XXXX' # master not virtio_net + self.assertFalse(net.is_netfail_primary(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + @mock.patch('cloudinit.net.sys_dev_path') + def test_is_netfail_primary_no_standby(self, m_sysdev, m_exists, + m_standby): + devname = self.random_string() + driver = self.random_string() # device not virtio_net + master_devname = self.random_string() + m_sysdev.return_value = "%s/%s" % (self.random_string(), + master_devname) + m_exists.return_value = True # has master sysfs attr + self.m_device_driver.return_value = 'virtio_net' # master virtio_net + m_standby.return_value = False # master has no standby feature flag + self.assertFalse(net.is_netfail_primary(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + def test_is_netfail_standby(self, m_exists, m_standby): + devname = self.random_string() + driver = 'virtio_net' + m_exists.return_value = True # has master sysfs attr + m_standby.return_value = True # has standby feature flag + self.assertTrue(net.is_netfail_standby(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + def test_is_netfail_standby_wrong_driver(self, m_exists, m_standby): + devname = self.random_string() + driver = self.random_string() + self.assertFalse(net.is_netfail_standby(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + def test_is_netfail_standby_no_master(self, m_exists, m_standby): + devname = self.random_string() + driver = 'virtio_net' + m_exists.return_value = False # has master sysfs attr + self.assertFalse(net.is_netfail_standby(devname, driver)) + + @mock.patch('cloudinit.net.has_netfail_standby_feature') + @mock.patch('cloudinit.net.os.path.exists') + def test_is_netfail_standby_no_standby_feature(self, m_exists, m_standby): + devname = self.random_string() + driver = 'virtio_net' + m_exists.return_value = True # has master sysfs attr + m_standby.return_value = False # has standby feature flag + self.assertFalse(net.is_netfail_standby(devname, driver)) + + @mock.patch('cloudinit.net.is_netfail_standby') + @mock.patch('cloudinit.net.is_netfail_primary') + def test_is_netfailover_primary(self, m_primary, m_standby): + devname = self.random_string() + driver = self.random_string() + m_primary.return_value = True + m_standby.return_value = False + self.assertTrue(net.is_netfailover(devname, driver)) + + @mock.patch('cloudinit.net.is_netfail_standby') + @mock.patch('cloudinit.net.is_netfail_primary') + def test_is_netfailover_standby(self, m_primary, m_standby): + devname = self.random_string() + driver = self.random_string() + m_primary.return_value = False + m_standby.return_value = True + self.assertTrue(net.is_netfailover(devname, driver)) + + @mock.patch('cloudinit.net.is_netfail_standby') + @mock.patch('cloudinit.net.is_netfail_primary') + def test_is_netfailover_returns_false(self, m_primary, m_standby): + devname = self.random_string() + driver = self.random_string() + m_primary.return_value = False + m_standby.return_value = False + self.assertFalse(net.is_netfailover(devname, driver)) + +# vi: ts=4 expandtab diff --git a/cloudinit/net/tests/test_network_state.py b/cloudinit/net/tests/test_network_state.py new file mode 100644 index 00000000..55880852 --- /dev/null +++ b/cloudinit/net/tests/test_network_state.py @@ -0,0 +1,48 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from unittest import mock + +from cloudinit.net import network_state +from cloudinit.tests.helpers import CiTestCase + +netstate_path = 'cloudinit.net.network_state' + + +class TestNetworkStateParseConfig(CiTestCase): + + def setUp(self): + super(TestNetworkStateParseConfig, self).setUp() + nsi_path = netstate_path + '.NetworkStateInterpreter' + self.add_patch(nsi_path, 'm_nsi') + + def test_missing_version_returns_none(self): + ncfg = {} + self.assertEqual(None, network_state.parse_net_config_data(ncfg)) + + def test_unknown_versions_returns_none(self): + ncfg = {'version': 13.2} + self.assertEqual(None, network_state.parse_net_config_data(ncfg)) + + def test_version_2_passes_self_as_config(self): + ncfg = {'version': 2, 'otherconfig': {}, 'somemore': [1, 2, 3]} + network_state.parse_net_config_data(ncfg) + self.assertEqual([mock.call(version=2, config=ncfg)], + self.m_nsi.call_args_list) + + def test_valid_config_gets_network_state(self): + ncfg = {'version': 2, 'otherconfig': {}, 'somemore': [1, 2, 3]} + result = network_state.parse_net_config_data(ncfg) + self.assertNotEqual(None, result) + + def test_empty_v1_config_gets_network_state(self): + ncfg = {'version': 1, 'config': []} + result = network_state.parse_net_config_data(ncfg) + self.assertNotEqual(None, result) + + def test_empty_v2_config_gets_network_state(self): + ncfg = {'version': 2} + result = network_state.parse_net_config_data(ncfg) + self.assertNotEqual(None, result) + + +# vi: ts=4 expandtab diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index 9ff929c2..6ba21f4d 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -141,6 +141,9 @@ def _netdev_info_ifconfig(ifconfig_data): res = re.match(r'.*<(\S+)>', toks[i + 1]) if res: devs[curdev]['ipv6'][-1]['scope6'] = res.group(1) + else: + devs[curdev]['ipv6'][-1]['scope6'] = toks[i + 1] + return devs @@ -355,18 +358,6 @@ def route_info(): return routes -def getgateway(): - try: - routes = route_info() - except Exception: - pass - else: - for r in routes.get('ipv4', []): - if r['flags'].find("G") >= 0: - return "%s[%s]" % (r['gateway'], r['iface']) - return None - - def netdev_pformat(): lines = [] empty = "." @@ -389,8 +380,8 @@ def netdev_pformat(): addr.get('scope', empty), data["hwaddr"])) for addr in data.get('ipv6'): tbl.add_row( - (dev, data["up"], addr["ip"], empty, addr["scope6"], - data["hwaddr"])) + (dev, data["up"], addr["ip"], empty, + addr.get("scope6", empty), data["hwaddr"])) if len(data.get('ipv6')) + len(data.get('ipv4')) == 0: tbl.add_row((dev, data["up"], empty, empty, empty, data["hwaddr"])) diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py index 6d23558e..946df7e0 100644..100755 --- a/cloudinit/reporting/handlers.py +++ b/cloudinit/reporting/handlers.py @@ -3,22 +3,17 @@ import abc import fcntl import json -import six import os -import re +import queue import struct import threading import time +import uuid +from datetime import datetime from cloudinit import log as logging from cloudinit.registry import DictRegistry from cloudinit import (url_helper, util) -from datetime import datetime - -if six.PY2: - from multiprocessing.queues import JoinableQueue as JQueue -else: - from queue import Queue as JQueue LOG = logging.getLogger(__name__) @@ -27,8 +22,7 @@ class ReportException(Exception): pass -@six.add_metaclass(abc.ABCMeta) -class ReportingHandler(object): +class ReportingHandler(metaclass=abc.ABCMeta): """Base class for report handlers. Implement :meth:`~publish_event` for controlling what @@ -129,24 +123,50 @@ class HyperVKvpReportingHandler(ReportingHandler): DESC_IDX_KEY = 'msg_i' JSON_SEPARATORS = (',', ':') KVP_POOL_FILE_GUEST = '/var/lib/hyperv/.kvp_pool_1' + _already_truncated_pool_file = False def __init__(self, kvp_file_path=KVP_POOL_FILE_GUEST, event_types=None): super(HyperVKvpReportingHandler, self).__init__() self._kvp_file_path = kvp_file_path + HyperVKvpReportingHandler._truncate_guest_pool_file( + self._kvp_file_path) + self._event_types = event_types - self.q = JQueue() - self.kvp_file = None + self.q = queue.Queue() self.incarnation_no = self._get_incarnation_no() self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX, self.incarnation_no) - self._current_offset = 0 self.publish_thread = threading.Thread( target=self._publish_event_routine) self.publish_thread.daemon = True self.publish_thread.start() + @classmethod + def _truncate_guest_pool_file(cls, kvp_file): + """ + Truncate the pool file if it has not been truncated since boot. + This should be done exactly once for the file indicated by + KVP_POOL_FILE_GUEST constant above. This method takes a filename + so that we can use an arbitrary file during unit testing. + Since KVP is a best-effort telemetry channel we only attempt to + truncate the file once and only if the file has not been modified + since boot. Additional truncation can lead to loss of existing + KVPs. + """ + if cls._already_truncated_pool_file: + return + boot_time = time.time() - float(util.uptime()) + try: + if os.path.getmtime(kvp_file) < boot_time: + with open(kvp_file, "w"): + pass + except (OSError, IOError) as e: + LOG.warning("failed to truncate kvp pool file, %s", e) + finally: + cls._already_truncated_pool_file = True + def _get_incarnation_no(self): """ use the time passed as the incarnation number. @@ -162,28 +182,24 @@ class HyperVKvpReportingHandler(ReportingHandler): def _iterate_kvps(self, offset): """iterate the kvp file from the current offset.""" - try: - with open(self._kvp_file_path, 'rb+') as f: - self.kvp_file = f - fcntl.flock(f, fcntl.LOCK_EX) - f.seek(offset) + with open(self._kvp_file_path, 'rb') as f: + fcntl.flock(f, fcntl.LOCK_EX) + f.seek(offset) + record_data = f.read(self.HV_KVP_RECORD_SIZE) + while len(record_data) == self.HV_KVP_RECORD_SIZE: + kvp_item = self._decode_kvp_item(record_data) + yield kvp_item record_data = f.read(self.HV_KVP_RECORD_SIZE) - while len(record_data) == self.HV_KVP_RECORD_SIZE: - self._current_offset += self.HV_KVP_RECORD_SIZE - kvp_item = self._decode_kvp_item(record_data) - yield kvp_item - record_data = f.read(self.HV_KVP_RECORD_SIZE) - fcntl.flock(f, fcntl.LOCK_UN) - finally: - self.kvp_file = None + fcntl.flock(f, fcntl.LOCK_UN) def _event_key(self, event): """ the event key format is: - CLOUD_INIT|<incarnation number>|<event_type>|<event_name> + CLOUD_INIT|<incarnation number>|<event_type>|<event_name>|<time> """ - return u"{0}|{1}|{2}".format(self.event_key_prefix, - event.event_type, event.name) + return u"{0}|{1}|{2}|{3}".format(self.event_key_prefix, + event.event_type, event.name, + uuid.uuid4()) def _encode_kvp_item(self, key, value): data = (struct.pack("%ds%ds" % ( @@ -207,23 +223,13 @@ class HyperVKvpReportingHandler(ReportingHandler): return {'key': k, 'value': v} - def _update_kvp_item(self, record_data): - if self.kvp_file is None: - raise ReportException( - "kvp file '{0}' not opened." - .format(self._kvp_file_path)) - self.kvp_file.seek(-self.HV_KVP_RECORD_SIZE, 1) - self.kvp_file.write(record_data) - def _append_kvp_item(self, record_data): - with open(self._kvp_file_path, 'rb+') as f: + with open(self._kvp_file_path, 'ab') as f: fcntl.flock(f, fcntl.LOCK_EX) - # seek to end of the file - f.seek(0, 2) - f.write(record_data) + for data in record_data: + f.write(data) f.flush() fcntl.flock(f, fcntl.LOCK_UN) - self._current_offset = f.tell() def _break_down(self, key, meta_data, description): del meta_data[self.MSG_KEY] @@ -279,40 +285,26 @@ class HyperVKvpReportingHandler(ReportingHandler): def _publish_event_routine(self): while True: + items_from_queue = 0 try: event = self.q.get(block=True) - need_append = True + items_from_queue += 1 + encoded_data = [] + while event is not None: + encoded_data += self._encode_event(event) + try: + # get all the rest of the events in the queue + event = self.q.get(block=False) + items_from_queue += 1 + except queue.Empty: + event = None try: - if not os.path.exists(self._kvp_file_path): - LOG.warning( - "skip writing events %s to %s. file not present.", - event.as_string(), - self._kvp_file_path) - encoded_event = self._encode_event(event) - # for each encoded_event - for encoded_data in (encoded_event): - for kvp in self._iterate_kvps(self._current_offset): - match = ( - re.match( - r"^{0}\|(\d+)\|.+" - .format(self.EVENT_PREFIX), - kvp['key'] - )) - if match: - match_groups = match.groups(0) - if int(match_groups[0]) < self.incarnation_no: - need_append = False - self._update_kvp_item(encoded_data) - continue - if need_append: - self._append_kvp_item(encoded_data) - except IOError as e: - LOG.warning( - "failed posting event to kvp: %s e:%s", - event.as_string(), e) + self._append_kvp_item(encoded_data) + except (OSError, IOError) as e: + LOG.warning("failed posting events to kvp, %s", e) finally: - self.q.task_done() - + for _ in range(items_from_queue): + self.q.task_done() # when main process exits, q.get() will through EOFError # indicating we should exit this thread. except EOFError: @@ -322,7 +314,7 @@ class HyperVKvpReportingHandler(ReportingHandler): # if the kvp pool already contains a chunk of data, # so defer it to another thread. def publish_event(self, event): - if (not self._event_types or event.event_type in self._event_types): + if not self._event_types or event.event_type in self._event_types: self.q.put(event) def flush(self): diff --git a/cloudinit/safeyaml.py b/cloudinit/safeyaml.py index 7bcf9dd3..d6f5f95b 100644 --- a/cloudinit/safeyaml.py +++ b/cloudinit/safeyaml.py @@ -6,6 +6,8 @@ import yaml +YAMLError = yaml.YAMLError + class _CustomSafeLoader(yaml.SafeLoader): def construct_python_unicode(self, node): @@ -17,7 +19,27 @@ _CustomSafeLoader.add_constructor( _CustomSafeLoader.construct_python_unicode) +class NoAliasSafeDumper(yaml.dumper.SafeDumper): + """A class which avoids constructing anchors/aliases on yaml dump""" + + def ignore_aliases(self, data): + return True + + def load(blob): return(yaml.load(blob, Loader=_CustomSafeLoader)) + +def dumps(obj, explicit_start=True, explicit_end=True, noalias=False): + """Return data in nicely formatted yaml.""" + + return yaml.dump(obj, + line_break="\n", + indent=4, + explicit_start=explicit_start, + explicit_end=explicit_end, + default_flow_style=False, + Dumper=(NoAliasSafeDumper + if noalias else yaml.dumper.Dumper)) + # vi: ts=4 expandtab diff --git a/cloudinit/settings.py b/cloudinit/settings.py index b1ebaade..ca4ffa8e 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -39,6 +39,8 @@ CFG_BUILTIN = { 'Hetzner', 'IBMCloud', 'Oracle', + 'Exoscale', + 'RbxCloud', # At the end to act as a 'catch' when none of the above work... 'None', ], diff --git a/cloudinit/signal_handler.py b/cloudinit/signal_handler.py index 12fdfe6c..9272d22d 100644 --- a/cloudinit/signal_handler.py +++ b/cloudinit/signal_handler.py @@ -9,8 +9,7 @@ import inspect import signal import sys - -from six import StringIO +from io import StringIO from cloudinit import log as logging from cloudinit import util diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index a06e6e1f..61ec522a 100644..100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -13,7 +13,6 @@ import os import os.path import re from time import time -from subprocess import call from xml.dom import minidom import xml.etree.ElementTree as ET @@ -22,10 +21,20 @@ from cloudinit import net from cloudinit.event import EventType from cloudinit.net.dhcp import EphemeralDHCPv4 from cloudinit import sources -from cloudinit.sources.helpers.azure import get_metadata_from_fabric from cloudinit.sources.helpers import netlink from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc from cloudinit import util +from cloudinit.reporting import events + +from cloudinit.sources.helpers.azure import ( + azure_ds_reporter, + azure_ds_telemetry_reporter, + get_metadata_from_fabric, + get_boot_telemetry, + get_system_info, + report_diagnostic_event, + EphemeralDHCPv4WithReporting, + is_byte_swapped) LOG = logging.getLogger(__name__) @@ -54,8 +63,14 @@ AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready" AGENT_SEED_DIR = '/var/lib/waagent' + +# In the event where the IMDS primary server is not +# available, it takes 1s to fallback to the secondary one +IMDS_TIMEOUT_IN_SECONDS = 2 IMDS_URL = "http://169.254.169.254/metadata/" +PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0" + # List of static scripts and network config artifacts created by # stock ubuntu suported images. UBUNTU_EXTENDED_NETWORK_SCRIPTS = [ @@ -196,6 +211,8 @@ if util.is_FreeBSD(): RESOURCE_DISK_PATH = "/dev/" + res_disk else: LOG.debug("resource disk is None") + # TODO Find where platform entropy data is surfaced + PLATFORM_ENTROPY_SOURCE = None BUILTIN_DS_CONFIG = { 'agent_command': AGENT_START_BUILTIN, @@ -242,6 +259,7 @@ def set_hostname(hostname, hostname_command='hostname'): util.subp([hostname_command, hostname]) +@azure_ds_telemetry_reporter @contextlib.contextmanager def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): """ @@ -269,11 +287,6 @@ class DataSourceAzure(sources.DataSource): dsname = 'Azure' _negotiated = False _metadata_imds = sources.UNSET - process_name = 'dhclient' - - tmpps = os.popen("ps -Af").read() - if process_name not in tmpps[:]: - call(['/sbin/dhclient', DEFAULT_PRIMARY_NIC]) def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -293,6 +306,7 @@ class DataSourceAzure(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s]" % (root, self.seed) + @azure_ds_telemetry_reporter def bounce_network_with_azure_hostname(self): # When using cloud-init to provision, we have to set the hostname from # the metadata and "bounce" the network to force DDNS to update via @@ -318,6 +332,7 @@ class DataSourceAzure(sources.DataSource): util.logexc(LOG, "handling set_hostname failed") return False + @azure_ds_telemetry_reporter def get_metadata_from_agent(self): temp_hostname = self.metadata.get('local-hostname') agent_cmd = self.ds_cfg['agent_command'] @@ -340,22 +355,25 @@ class DataSourceAzure(sources.DataSource): for pk in self.cfg.get('_pubkeys', []): if pk.get('value', None): key_value = pk['value'] - LOG.debug("ssh authentication: using value from fabric") + LOG.debug("SSH authentication: using value from fabric") else: bname = str(pk['fingerprint'] + ".crt") fp_files += [os.path.join(ddir, bname)] - LOG.debug("ssh authentication: " - "using fingerprint from fabirc") - - # wait very long for public SSH keys to arrive - # https://bugs.launchpad.net/cloud-init/+bug/1717611 - missing = util.log_time(logfunc=LOG.debug, - msg="waiting for SSH public key files", - func=util.wait_for_files, - args=(fp_files, 900)) - - if len(missing): - LOG.warning("Did not find files, but going on: %s", missing) + LOG.debug("SSH authentication: " + "using fingerprint from fabric") + + with events.ReportEventStack( + name="waiting-for-ssh-public-key", + description="wait for agents to retrieve SSH keys", + parent=azure_ds_reporter): + # wait very long for public SSH keys to arrive + # https://bugs.launchpad.net/cloud-init/+bug/1717611 + missing = util.log_time(logfunc=LOG.debug, + msg="waiting for SSH public key files", + func=util.wait_for_files, + args=(fp_files, 900)) + if len(missing): + LOG.warning("Did not find files, but going on: %s", missing) metadata = {} metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) @@ -369,6 +387,7 @@ class DataSourceAzure(sources.DataSource): subplatform_type = 'seed-dir' return '%s (%s)' % (subplatform_type, self.seed) + @azure_ds_telemetry_reporter def crawl_metadata(self): """Walk all instance metadata sources returning a dict on success. @@ -399,19 +418,24 @@ class DataSourceAzure(sources.DataSource): elif cdev.startswith("/dev/"): if util.is_FreeBSD(): ret = util.mount_cb(cdev, load_azure_ds_dir, - mtype="udf", sync=False) + mtype="udf") else: ret = util.mount_cb(cdev, load_azure_ds_dir) else: ret = load_azure_ds_dir(cdev) except NonAzureDataSource: + report_diagnostic_event( + "Did not find Azure data source in %s" % cdev) continue except BrokenAzureDataSource as exc: msg = 'BrokenAzureDataSource: %s' % exc + report_diagnostic_event(msg) raise sources.InvalidMetaDataException(msg) except util.MountFailedError: - LOG.warning("%s was not mountable", cdev) + msg = '%s was not mountable' % cdev + report_diagnostic_event(msg) + LOG.warning(msg) continue perform_reprovision = reprovision or self._should_reprovision(ret) @@ -419,10 +443,11 @@ class DataSourceAzure(sources.DataSource): if util.is_FreeBSD(): msg = "Free BSD is not supported for PPS VMs" LOG.error(msg) + report_diagnostic_event(msg) raise sources.InvalidMetaDataException(msg) ret = self._reprovision() imds_md = get_metadata_from_imds( - self.fallback_interface, retries=3) + self.fallback_interface, retries=10) (md, userdata_raw, cfg, files) = ret self.seed = cdev crawled_data.update({ @@ -437,7 +462,9 @@ class DataSourceAzure(sources.DataSource): break if not found: - raise sources.InvalidMetaDataException('No Azure metadata found') + msg = 'No Azure metadata found' + report_diagnostic_event(msg) + raise sources.InvalidMetaDataException(msg) if found == ddir: LOG.debug("using files cached in %s", ddir) @@ -445,8 +472,7 @@ class DataSourceAzure(sources.DataSource): seed = _get_random_seed() if seed: crawled_data['metadata']['random_seed'] = seed - crawled_data['metadata']['instance-id'] = util.read_dmi_data( - 'system-uuid') + crawled_data['metadata']['instance-id'] = self._iid() if perform_reprovision: LOG.info("Reporting ready to Azure after getting ReprovisionData") @@ -456,9 +482,14 @@ class DataSourceAzure(sources.DataSource): self._report_ready(lease=self._ephemeral_dhcp_ctx.lease) self._ephemeral_dhcp_ctx.clean_network() # Teardown ephemeral else: - with EphemeralDHCPv4() as lease: - self._report_ready(lease=lease) - + try: + with EphemeralDHCPv4WithReporting( + azure_ds_reporter) as lease: + self._report_ready(lease=lease) + except Exception as e: + report_diagnostic_event( + "exception while reporting ready: %s" % e) + raise return crawled_data def _is_platform_viable(self): @@ -470,6 +501,7 @@ class DataSourceAzure(sources.DataSource): super(DataSourceAzure, self).clear_cached_attrs(attr_defaults) self._metadata_imds = sources.UNSET + @azure_ds_telemetry_reporter def _get_data(self): """Crawl and process datasource metadata caching metadata as attrs. @@ -479,6 +511,16 @@ class DataSourceAzure(sources.DataSource): if not self._is_platform_viable(): return False try: + get_boot_telemetry() + except Exception as e: + LOG.warning("Failed to get boot telemetry: %s", e) + + try: + get_system_info() + except Exception as e: + LOG.warning("Failed to get system information: %s", e) + + try: crawled_data = util.log_time( logfunc=LOG.debug, msg='Crawl of metadata service', func=self.crawl_metadata) @@ -516,6 +558,17 @@ class DataSourceAzure(sources.DataSource): # quickly (local check only) if self.instance_id is still valid return sources.instance_id_matches_system_uuid(self.get_instance_id()) + def _iid(self, previous=None): + prev_iid_path = os.path.join( + self.paths.get_cpath('data'), 'instance-id') + iid = util.read_dmi_data('system-uuid') + if os.path.exists(prev_iid_path): + previous = util.load_file(prev_iid_path).strip() + if is_byte_swapped(previous, iid): + return previous + return iid + + @azure_ds_telemetry_reporter def setup(self, is_new_instance): if self._negotiated is False: LOG.debug("negotiating for %s (new_instance=%s)", @@ -536,27 +589,55 @@ class DataSourceAzure(sources.DataSource): headers = {"Metadata": "true"} nl_sock = None report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE)) + self.imds_logging_threshold = 1 + self.imds_poll_counter = 1 + dhcp_attempts = 0 + vnet_switched = False + return_val = None def exc_cb(msg, exception): if isinstance(exception, UrlError) and exception.code == 404: + if self.imds_poll_counter == self.imds_logging_threshold: + # Reducing the logging frequency as we are polling IMDS + self.imds_logging_threshold *= 2 + LOG.debug("Call to IMDS with arguments %s failed " + "with status code %s after %s retries", + msg, exception.code, self.imds_poll_counter) + LOG.debug("Backing off logging threshold for the same " + "exception to %d", self.imds_logging_threshold) + self.imds_poll_counter += 1 return True + # If we get an exception while trying to call IMDS, we # call DHCP and setup the ephemeral network to acquire the new IP. + LOG.debug("Call to IMDS with arguments %s failed with " + "status code %s", msg, exception.code) + report_diagnostic_event("polling IMDS failed with exception %s" + % exception.code) return False LOG.debug("Wait for vnetswitch to happen") while True: try: - # Save our EphemeralDHCPv4 context so we avoid repeated dhcp - self._ephemeral_dhcp_ctx = EphemeralDHCPv4() - lease = self._ephemeral_dhcp_ctx.obtain_lease() + # Save our EphemeralDHCPv4 context to avoid repeated dhcp + with events.ReportEventStack( + name="obtain-dhcp-lease", + description="obtain dhcp lease", + parent=azure_ds_reporter): + self._ephemeral_dhcp_ctx = EphemeralDHCPv4() + lease = self._ephemeral_dhcp_ctx.obtain_lease() + + if vnet_switched: + dhcp_attempts += 1 if report_ready: try: nl_sock = netlink.create_bound_netlink_socket() except netlink.NetlinkCreateSocketError as e: + report_diagnostic_event(e) LOG.warning(e) self._ephemeral_dhcp_ctx.clean_network() - return + break + path = REPORTED_READY_MARKER_FILE LOG.info( "Creating a marker file to report ready: %s", path) @@ -564,17 +645,33 @@ class DataSourceAzure(sources.DataSource): pid=os.getpid(), time=time())) self._report_ready(lease=lease) report_ready = False - try: - netlink.wait_for_media_disconnect_connect( - nl_sock, lease['interface']) - except AssertionError as error: - LOG.error(error) - return + + with events.ReportEventStack( + name="wait-for-media-disconnect-connect", + description="wait for vnet switch", + parent=azure_ds_reporter): + try: + netlink.wait_for_media_disconnect_connect( + nl_sock, lease['interface']) + except AssertionError as error: + report_diagnostic_event(error) + LOG.error(error) + break + + vnet_switched = True self._ephemeral_dhcp_ctx.clean_network() else: - return readurl(url, timeout=1, headers=headers, - exception_cb=exc_cb, infinite=True, - log_req_resp=False).contents + with events.ReportEventStack( + name="get-reprovision-data-from-imds", + description="get reprovision data from imds", + parent=azure_ds_reporter): + return_val = readurl(url, + timeout=IMDS_TIMEOUT_IN_SECONDS, + headers=headers, + exception_cb=exc_cb, + infinite=True, + log_req_resp=False).contents + break except UrlError: # Teardown our EphemeralDHCPv4 context on failure as we retry self._ephemeral_dhcp_ctx.clean_network() @@ -583,6 +680,15 @@ class DataSourceAzure(sources.DataSource): if nl_sock: nl_sock.close() + if vnet_switched: + report_diagnostic_event("attempted dhcp %d times after reuse" % + dhcp_attempts) + report_diagnostic_event("polled imds %d times after reuse" % + self.imds_poll_counter) + + return return_val + + @azure_ds_telemetry_reporter def _report_ready(self, lease): """Tells the fabric provisioning has completed """ try: @@ -620,9 +726,14 @@ class DataSourceAzure(sources.DataSource): def _reprovision(self): """Initiate the reprovisioning workflow.""" contents = self._poll_imds() - md, ud, cfg = read_azure_ovf(contents) - return (md, ud, cfg, {'ovf-env.xml': contents}) - + with events.ReportEventStack( + name="reprovisioning-read-azure-ovf", + description="read azure ovf during reprovisioning", + parent=azure_ds_reporter): + md, ud, cfg = read_azure_ovf(contents) + return (md, ud, cfg, {'ovf-env.xml': contents}) + + @azure_ds_telemetry_reporter def _negotiate(self): """Negotiate with fabric and return data from it. @@ -633,9 +744,11 @@ class DataSourceAzure(sources.DataSource): if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN: self.bounce_network_with_azure_hostname() + pubkey_info = self.cfg.get('_pubkeys', None) metadata_func = partial(get_metadata_from_fabric, fallback_lease_file=self. - dhclient_lease_file) + dhclient_lease_file, + pubkey_info=pubkey_info) else: metadata_func = self.get_metadata_from_agent @@ -643,15 +756,20 @@ class DataSourceAzure(sources.DataSource): self.ds_cfg['agent_command']) try: fabric_data = metadata_func() - except Exception: + except Exception as e: + report_diagnostic_event( + "Error communicating with Azure fabric; You may experience " + "connectivity issues: %s" % e) LOG.warning( - "Error communicating with Azure fabric; You may experience." + "Error communicating with Azure fabric; You may experience " "connectivity issues.", exc_info=True) return False + util.del_file(REPORTED_READY_MARKER_FILE) util.del_file(REPROVISION_MARKER_FILE) return fabric_data + @azure_ds_telemetry_reporter def activate(self, cfg, is_new_instance): address_ephemeral_resize(is_new_instance=is_new_instance, preserve_ntfs=self.ds_cfg.get( @@ -659,6 +777,11 @@ class DataSourceAzure(sources.DataSource): return @property + def availability_zone(self): + return self.metadata.get( + 'imds', {}).get('compute', {}).get('platformFaultDomain') + + @property def network_config(self): """Generate a network config like net.generate_fallback_network() with the following exceptions. @@ -668,7 +791,7 @@ class DataSourceAzure(sources.DataSource): 2. Generate a fallback network config that does not include any of the blacklisted devices. """ - if not self._network_config: + if not self._network_config or self._network_config == sources.UNSET: if self.ds_cfg.get('apply_network_config'): nc_src = self._metadata_imds else: @@ -676,6 +799,10 @@ class DataSourceAzure(sources.DataSource): self._network_config = parse_network_config(nc_src) return self._network_config + @property + def region(self): + return self.metadata.get('imds', {}).get('compute', {}).get('location') + def _partitions_on_device(devpath, maxnum=16): # return a list of tuples (ptnum, path) for each part on devpath @@ -690,12 +817,14 @@ def _partitions_on_device(devpath, maxnum=16): return [] +@azure_ds_telemetry_reporter def _has_ntfs_filesystem(devpath): ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True) LOG.debug('ntfs_devices found = %s', ntfs_devices) return os.path.realpath(devpath) in ntfs_devices +@azure_ds_telemetry_reporter def can_dev_be_reformatted(devpath, preserve_ntfs): """Determine if the ephemeral drive at devpath should be reformatted. @@ -744,43 +873,59 @@ def can_dev_be_reformatted(devpath, preserve_ntfs): (cand_part, cand_path, devpath)) return False, msg + @azure_ds_telemetry_reporter def count_files(mp): ignored = set(['dataloss_warning_readme.txt']) return len([f for f in os.listdir(mp) if f.lower() not in ignored]) bmsg = ('partition %s (%s) on device %s was ntfs formatted' % (cand_part, cand_path, devpath)) - try: - file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", - update_env_for_mount={'LANG': 'C'}) - except util.MountFailedError as e: - if "unknown filesystem type 'ntfs'" in str(e): - return True, (bmsg + ' but this system cannot mount NTFS,' - ' assuming there are no important files.' - ' Formatting allowed.') - return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) - - if file_count != 0: - LOG.warning("it looks like you're using NTFS on the ephemeral disk, " - 'to ensure that filesystem does not get wiped, set ' - '%s.%s in config', '.'.join(DS_CFG_PATH), - DS_CFG_KEY_PRESERVE_NTFS) - return False, bmsg + ' but had %d files on it.' % file_count + + with events.ReportEventStack( + name="mount-ntfs-and-count", + description="mount-ntfs-and-count", + parent=azure_ds_reporter) as evt: + try: + file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", + update_env_for_mount={'LANG': 'C'}) + except util.MountFailedError as e: + evt.description = "cannot mount ntfs" + if "unknown filesystem type 'ntfs'" in str(e): + return True, (bmsg + ' but this system cannot mount NTFS,' + ' assuming there are no important files.' + ' Formatting allowed.') + return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) + + if file_count != 0: + evt.description = "mounted and counted %d files" % file_count + LOG.warning("it looks like you're using NTFS on the ephemeral" + " disk, to ensure that filesystem does not get wiped," + " set %s.%s in config", '.'.join(DS_CFG_PATH), + DS_CFG_KEY_PRESERVE_NTFS) + return False, bmsg + ' but had %d files on it.' % file_count return True, bmsg + ' and had no important files. Safe for reformatting.' +@azure_ds_telemetry_reporter def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, is_new_instance=False, preserve_ntfs=False): # wait for ephemeral disk to come up naplen = .2 - missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen, - log_pre="Azure ephemeral disk: ") - - if missing: - LOG.warning("ephemeral device '%s' did not appear after %d seconds.", - devpath, maxwait) - return + with events.ReportEventStack( + name="wait-for-ephemeral-disk", + description="wait for ephemeral disk", + parent=azure_ds_reporter): + missing = util.wait_for_files([devpath], + maxwait=maxwait, + naplen=naplen, + log_pre="Azure ephemeral disk: ") + + if missing: + LOG.warning("ephemeral device '%s' did" + " not appear after %d seconds.", + devpath, maxwait) + return result = False msg = None @@ -808,6 +953,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, return +@azure_ds_telemetry_reporter def perform_hostname_bounce(hostname, cfg, prev_hostname): # set the hostname to 'hostname' if it is not already set to that. # then, if policy is not off, bounce the interface using command @@ -843,6 +989,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname): return True +@azure_ds_telemetry_reporter def crtfile_to_pubkey(fname, data=None): pipeline = ('openssl x509 -noout -pubkey < "$0" |' 'ssh-keygen -i -m PKCS8 -f /dev/stdin') @@ -851,6 +998,7 @@ def crtfile_to_pubkey(fname, data=None): return out.rstrip() +@azure_ds_telemetry_reporter def pubkeys_from_crt_files(flist): pubkeys = [] errors = [] @@ -866,6 +1014,7 @@ def pubkeys_from_crt_files(flist): return pubkeys +@azure_ds_telemetry_reporter def write_files(datadir, files, dirmode=None): def _redact_password(cnt, fname): @@ -893,6 +1042,7 @@ def write_files(datadir, files, dirmode=None): util.write_file(filename=fname, content=content, mode=0o600) +@azure_ds_telemetry_reporter def invoke_agent(cmd): # this is a function itself to simplify patching it for test if cmd: @@ -912,16 +1062,19 @@ def find_child(node, filter_func): return ret +@azure_ds_telemetry_reporter def load_azure_ovf_pubkeys(sshnode): # This parses a 'SSH' node formatted like below, and returns # an array of dicts. - # [{'fp': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7', - # 'path': 'where/to/go'}] + # [{'fingerprint': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7', + # 'path': '/where/to/go'}] # # <SSH><PublicKeys> - # <PublicKey><Fingerprint>ABC</FingerPrint><Path>/ABC</Path> + # <PublicKey><Fingerprint>ABC</FingerPrint><Path>/x/y/z</Path> # ... # </PublicKeys></SSH> + # Under some circumstances, there may be a <Value> element along with the + # Fingerprint and Path. Pass those along if they appear. results = find_child(sshnode, lambda n: n.localName == "PublicKeys") if len(results) == 0: return [] @@ -962,11 +1115,14 @@ def load_azure_ovf_pubkeys(sshnode): return found +@azure_ds_telemetry_reporter def read_azure_ovf(contents): try: dom = minidom.parseString(contents) except Exception as e: - raise BrokenAzureDataSource("Invalid ovf-env.xml: %s" % e) + error_str = "Invalid ovf-env.xml: %s" % e + report_diagnostic_event(error_str) + raise BrokenAzureDataSource(error_str) results = find_child(dom.documentElement, lambda n: n.localName == "ProvisioningSection") @@ -986,8 +1142,8 @@ def read_azure_ovf(contents): raise NonAzureDataSource("No LinuxProvisioningConfigurationSet") if len(lpcs_nodes) > 1: raise BrokenAzureDataSource("found '%d' %ss" % - ("LinuxProvisioningConfigurationSet", - len(lpcs_nodes))) + (len(lpcs_nodes), + "LinuxProvisioningConfigurationSet")) lpcs = lpcs_nodes[0] if not lpcs.hasChildNodes(): @@ -1047,9 +1203,10 @@ def read_azure_ovf(contents): defuser = {} if username: defuser['name'] = username - if password and DEF_PASSWD_REDACTION != password: - defuser['passwd'] = encrypt_pass(password) + if password: defuser['lock_passwd'] = False + if DEF_PASSWD_REDACTION != password: + defuser['passwd'] = encrypt_pass(password) if defuser: cfg['system_info'] = {'default_user': defuser} @@ -1062,6 +1219,7 @@ def read_azure_ovf(contents): return (md, ud, cfg) +@azure_ds_telemetry_reporter def _extract_preprovisioned_vm_setting(dom): """Read the preprovision flag from the ovf. It should not exist unless true.""" @@ -1090,6 +1248,7 @@ def encrypt_pass(password, salt_id="$6$"): return crypt.crypt(password, salt_id + util.rand_str(strlen=16)) +@azure_ds_telemetry_reporter def _check_freebsd_cdrom(cdrom_dev): """Return boolean indicating path to cdrom device has content.""" try: @@ -1101,18 +1260,31 @@ def _check_freebsd_cdrom(cdrom_dev): return False -def _get_random_seed(): +@azure_ds_telemetry_reporter +def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE): """Return content random seed file if available, otherwise, return None.""" # azure / hyper-v provides random data here - # TODO. find the seed on FreeBSD platform # now update ds_cfg to reflect contents pass in config - if util.is_FreeBSD(): + if source is None: return None - return util.load_file("/sys/firmware/acpi/tables/OEM0", - quiet=True, decode=False) + seed = util.load_file(source, quiet=True, decode=False) + + # The seed generally contains non-Unicode characters. load_file puts + # them into a str (in python 2) or bytes (in python 3). In python 2, + # bad octets in a str cause util.json_dumps() to throw an exception. In + # python 3, bytes is a non-serializable type, and the handler load_file + # uses applies b64 encoding *again* to handle it. The simplest solution + # is to just b64encode the data and then decode it to a serializable + # string. Same number of bits of entropy, just with 25% more zeroes. + # There's no need to undo this base64-encoding when the random seed is + # actually used in cc_seed_random.py. + seed = base64.b64encode(seed).decode() + return seed + +@azure_ds_telemetry_reporter def list_possible_azure_ds_devs(): devlist = [] if util.is_FreeBSD(): @@ -1127,6 +1299,7 @@ def list_possible_azure_ds_devs(): return devlist +@azure_ds_telemetry_reporter def load_azure_ds_dir(source_dir): ovf_file = os.path.join(source_dir, "ovf-env.xml") @@ -1149,47 +1322,62 @@ def parse_network_config(imds_metadata): @param: imds_metadata: Dict of content read from IMDS network service. @return: Dictionary containing network version 2 standard configuration. """ - if imds_metadata != sources.UNSET and imds_metadata: - netconfig = {'version': 2, 'ethernets': {}} - LOG.debug('Azure: generating network configuration from IMDS') - network_metadata = imds_metadata['network'] - for idx, intf in enumerate(network_metadata['interface']): - nicname = 'eth{idx}'.format(idx=idx) - dev_config = {} - for addr4 in intf['ipv4']['ipAddress']: - privateIpv4 = addr4['privateIpAddress'] - if privateIpv4: - if dev_config.get('dhcp4', False): - # Append static address config for nic > 1 - netPrefix = intf['ipv4']['subnet'][0].get( - 'prefix', '24') + with events.ReportEventStack( + name="parse_network_config", + description="", + parent=azure_ds_reporter) as evt: + if imds_metadata != sources.UNSET and imds_metadata: + netconfig = {'version': 2, 'ethernets': {}} + LOG.debug('Azure: generating network configuration from IMDS') + network_metadata = imds_metadata['network'] + for idx, intf in enumerate(network_metadata['interface']): + # First IPv4 and/or IPv6 address will be obtained via DHCP. + # Any additional IPs of each type will be set as static + # addresses. + nicname = 'eth{idx}'.format(idx=idx) + dhcp_override = {'route-metric': (idx + 1) * 100} + dev_config = {'dhcp4': True, 'dhcp4-overrides': dhcp_override, + 'dhcp6': False} + for addr_type in ('ipv4', 'ipv6'): + addresses = intf.get(addr_type, {}).get('ipAddress', []) + if addr_type == 'ipv4': + default_prefix = '24' + else: + default_prefix = '128' + if addresses: + dev_config['dhcp6'] = True + # non-primary interfaces should have a higher + # route-metric (cost) so default routes prefer + # primary nic due to lower route-metric value + dev_config['dhcp6-overrides'] = dhcp_override + for addr in addresses[1:]: + # Append static address config for ip > 1 + netPrefix = intf[addr_type]['subnet'][0].get( + 'prefix', default_prefix) + privateIp = addr['privateIpAddress'] if not dev_config.get('addresses'): dev_config['addresses'] = [] dev_config['addresses'].append( '{ip}/{prefix}'.format( - ip=privateIpv4, prefix=netPrefix)) - else: - dev_config['dhcp4'] = True - for addr6 in intf['ipv6']['ipAddress']: - privateIpv6 = addr6['privateIpAddress'] - if privateIpv6: - dev_config['dhcp6'] = True - break - if dev_config: - mac = ':'.join(re.findall(r'..', intf['macAddress'])) - dev_config.update( - {'match': {'macaddress': mac.lower()}, - 'set-name': nicname}) - netconfig['ethernets'][nicname] = dev_config - else: - blacklist = ['mlx4_core'] - LOG.debug('Azure: generating fallback configuration') - # generate a network config, blacklist picking mlx4_core devs - netconfig = net.generate_fallback_config( - blacklist_drivers=blacklist, config_driver=True) - return netconfig + ip=privateIp, prefix=netPrefix)) + if dev_config: + mac = ':'.join(re.findall(r'..', intf['macAddress'])) + dev_config.update( + {'match': {'macaddress': mac.lower()}, + 'set-name': nicname}) + netconfig['ethernets'][nicname] = dev_config + evt.description = "network config from imds" + else: + blacklist = ['mlx4_core'] + LOG.debug('Azure: generating fallback configuration') + # generate a network config, blacklist picking mlx4_core devs + netconfig = net.generate_fallback_config( + blacklist_drivers=blacklist, config_driver=True) + evt.description = "network config from fallback" + return netconfig +@azure_ds_telemetry_reporter def get_metadata_from_imds(fallback_nic, retries): """Query Azure's network metadata service, returning a dictionary. @@ -1210,29 +1398,39 @@ def get_metadata_from_imds(fallback_nic, retries): if net.is_up(fallback_nic): return util.log_time(**kwargs) else: - with EphemeralDHCPv4(fallback_nic): - return util.log_time(**kwargs) + try: + with EphemeralDHCPv4WithReporting( + azure_ds_reporter, fallback_nic): + return util.log_time(**kwargs) + except Exception as e: + report_diagnostic_event("exception while getting metadata: %s" % e) + raise +@azure_ds_telemetry_reporter def _get_metadata_from_imds(retries): url = IMDS_URL + "instance?api-version=2017-12-01" headers = {"Metadata": "true"} try: response = readurl( - url, timeout=1, headers=headers, retries=retries, - exception_cb=retry_on_url_exc) + url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers, + retries=retries, exception_cb=retry_on_url_exc) except Exception as e: - LOG.debug('Ignoring IMDS instance metadata: %s', e) + msg = 'Ignoring IMDS instance metadata: %s' % e + report_diagnostic_event(msg) + LOG.debug(msg) return {} try: return util.load_json(str(response)) - except json.decoder.JSONDecodeError: + except json.decoder.JSONDecodeError as e: + report_diagnostic_event('non-json imds response' % e) LOG.warning( 'Ignoring non-json IMDS instance metadata: %s', str(response)) return {} +@azure_ds_telemetry_reporter def maybe_remove_ubuntu_network_config_scripts(paths=None): """Remove Azure-specific ubuntu network config for non-primary nics. @@ -1270,14 +1468,22 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None): def _is_platform_viable(seed_dir): - """Check platform environment to report if this datasource may run.""" - asset_tag = util.read_dmi_data('chassis-asset-tag') - if asset_tag == AZURE_CHASSIS_ASSET_TAG: - return True - LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) - if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')): - return True - return False + with events.ReportEventStack( + name="check-platform-viability", + description="found azure asset tag", + parent=azure_ds_reporter) as evt: + + """Check platform environment to report if this datasource may run.""" + asset_tag = util.read_dmi_data('chassis-asset-tag') + if asset_tag == AZURE_CHASSIS_ASSET_TAG: + return True + msg = "Non-Azure DMI asset tag '%s' discovered." % asset_tag + LOG.debug(msg) + evt.description = msg + report_diagnostic_event(msg) + if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')): + return True + return False class BrokenAzureDataSource(Exception): diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py index 2955d3f0..df88f677 100644 --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -42,12 +42,8 @@ class DataSourceCloudSigma(sources.DataSource): if not sys_product_name: LOG.debug("system-product-name not available in dmi data") return False - else: - LOG.debug("detected hypervisor as %s", sys_product_name) - return 'cloudsigma' in sys_product_name.lower() - - LOG.warning("failed to query dmi data for system product name") - return False + LOG.debug("detected hypervisor as %s", sys_product_name) + return 'cloudsigma' in sys_product_name.lower() def _get_data(self): """ diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index d4b758f2..2013bed7 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -13,7 +13,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import os -from socket import inet_ntoa +from socket import inet_ntoa, getaddrinfo, gaierror from struct import pack import time @@ -93,9 +93,9 @@ class DataSourceCloudStack(sources.DataSource): urls = [uhelp.combine_url(self.metadata_address, 'latest/meta-data/instance-id')] start_time = time.time() - url = uhelp.wait_for_url( + url, _response = uhelp.wait_for_url( urls=urls, max_wait=url_params.max_wait_seconds, - timeout=url_params.timeout_seconds, status_cb=LOG.warn) + timeout=url_params.timeout_seconds, status_cb=LOG.warning) if url: LOG.debug("Using metadata source: '%s'", url) @@ -156,6 +156,17 @@ class DataSourceCloudStack(sources.DataSource): return self.metadata['availability-zone'] +def get_data_server(): + # Returns the metadataserver from dns + try: + addrinfo = getaddrinfo("data-server.", 80) + except gaierror: + LOG.debug("DNS Entry data-server not found") + return None + else: + return addrinfo[0][4][0] # return IP + + def get_default_gateway(): # Returns the default gateway ip address in the dotted format. lines = util.load_file("/proc/net/route").splitlines() @@ -218,7 +229,14 @@ def get_vr_address(): # If no virtual router is detected, fallback on default gateway. # See http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/4.8/virtual_machines/user-data.html # noqa - # Try networkd first... + # Try data-server DNS entry first + latest_address = get_data_server() + if latest_address: + LOG.debug("Found metadata server '%s' via data-server DNS entry", + latest_address) + return latest_address + + # Try networkd second... latest_address = dhcp.networkd_get_option_from_leases('SERVER_ADDRESS') if latest_address: LOG.debug("Found SERVER_ADDRESS '%s' via networkd_leases", diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 564e3eb3..f77923c2 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -72,15 +72,12 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): dslist = self.sys_cfg.get('datasource_list') for dev in find_candidate_devs(dslist=dslist): try: - # Set mtype if freebsd and turn off sync - if dev.startswith("/dev/cd"): + if util.is_FreeBSD() and dev.startswith("/dev/cd"): mtype = "cd9660" - sync = False else: mtype = None - sync = True results = util.mount_cb(dev, read_config_drive, - mtype=mtype, sync=sync) + mtype=mtype) found = dev except openstack.NonReadable: pass @@ -166,10 +163,10 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): def _get_subplatform(self): """Return the subplatform metadata source details.""" - if self.seed_dir in self.source: - subplatform_type = 'seed-dir' - elif self.source.startswith('/dev'): + if self.source.startswith('/dev'): subplatform_type = 'config-disk' + else: + subplatform_type = 'seed-dir' return '%s (%s)' % (subplatform_type, self.source) @@ -237,7 +234,7 @@ def find_candidate_devs(probe_optical=True, dslist=None): config drive v2: Disk should be: - * either vfat or iso9660 formated + * either vfat or iso9660 formatted * labeled with 'config-2' or 'CONFIG-2' """ if dslist is None: diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index b49a08db..0f2bfef4 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -10,7 +10,6 @@ import os import time -from subprocess import call from cloudinit import ec2_utils as ec2 from cloudinit import log as logging @@ -20,6 +19,7 @@ from cloudinit import sources from cloudinit import url_helper as uhelp from cloudinit import util from cloudinit import warnings +from cloudinit.event import EventType LOG = logging.getLogger(__name__) @@ -27,13 +27,21 @@ SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND]) STRICT_ID_PATH = ("datasource", "Ec2", "strict_id") STRICT_ID_DEFAULT = "warn" -DEFAULT_PRIMARY_NIC = 'eth0' + +API_TOKEN_ROUTE = 'latest/api/token' +API_TOKEN_DISABLED = '_ec2_disable_api_token' +AWS_TOKEN_TTL_SECONDS = '21600' +AWS_TOKEN_PUT_HEADER = 'X-aws-ec2-metadata-token' +AWS_TOKEN_REQ_HEADER = AWS_TOKEN_PUT_HEADER + '-ttl-seconds' +AWS_TOKEN_REDACT = [AWS_TOKEN_PUT_HEADER, AWS_TOKEN_REQ_HEADER] class CloudNames(object): ALIYUN = "aliyun" AWS = "aws" BRIGHTBOX = "brightbox" + ZSTACK = "zstack" + E24CLOUD = "e24cloud" # UNKNOWN indicates no positive id. If strict_id is 'warn' or 'false', # then an attempt at the Ec2 Metadata service will be made. UNKNOWN = "unknown" @@ -45,12 +53,6 @@ class CloudNames(object): class DataSourceEc2(sources.DataSource): dsname = 'Ec2' - process_name = 'dhclient' - - tmpps = os.popen("ps -Af").read() - if process_name not in tmpps[:]: - call(['/sbin/dhclient', DEFAULT_PRIMARY_NIC]) - # Default metadata urls that will be used if none are provided # They will be checked for 'resolveability' and some of the # following may be discarded if they do not resolve @@ -67,6 +69,7 @@ class DataSourceEc2(sources.DataSource): url_max_wait = 120 url_timeout = 50 + _api_token = None # API token for accessing the metadata service _network_config = sources.UNSET # Used to cache calculated network cfg v1 # Whether we want to get network configuration from the metadata service. @@ -115,6 +118,19 @@ class DataSourceEc2(sources.DataSource): 'dynamic', {}).get('instance-identity', {}).get('document', {}) return True + def is_classic_instance(self): + """Report if this instance type is Ec2 Classic (non-vpc).""" + if not self.metadata: + # Can return False on inconclusive as we are also called in + # network_config where metadata will be present. + # Secondary call site is in packaging postinst script. + return False + ifaces_md = self.metadata.get('network', {}).get('interfaces', {}) + for _mac, mac_data in ifaces_md.get('macs', {}).items(): + if 'vpc-id' in mac_data: + return False + return True + @property def launch_index(self): if not self.metadata: @@ -140,11 +156,13 @@ class DataSourceEc2(sources.DataSource): min_metadata_version. """ # Assumes metadata service is already up + url_tmpl = '{0}/{1}/meta-data/instance-id' + headers = self._get_headers() for api_ver in self.extended_metadata_versions: - url = '{0}/{1}/meta-data/instance-id'.format( - self.metadata_address, api_ver) + url = url_tmpl.format(self.metadata_address, api_ver) try: - resp = uhelp.readurl(url=url) + resp = uhelp.readurl(url=url, headers=headers, + headers_redact=AWS_TOKEN_REDACT) except uhelp.UrlError as e: LOG.debug('url %s raised exception %s', url, e) else: @@ -164,12 +182,41 @@ class DataSourceEc2(sources.DataSource): # setup self.identity. So we need to do that now. api_version = self.get_metadata_api_version() self.identity = ec2.get_instance_identity( - api_version, self.metadata_address).get('document', {}) + api_version, self.metadata_address, + headers_cb=self._get_headers, + headers_redact=AWS_TOKEN_REDACT, + exception_cb=self._refresh_stale_aws_token_cb).get( + 'document', {}) return self.identity.get( 'instanceId', self.metadata['instance-id']) else: return self.metadata['instance-id'] + def _maybe_fetch_api_token(self, mdurls, timeout=None, max_wait=None): + if self.cloud_name != CloudNames.AWS: + return + + urls = [] + url2base = {} + url_path = API_TOKEN_ROUTE + request_method = 'PUT' + for url in mdurls: + cur = '{0}/{1}'.format(url, url_path) + urls.append(cur) + url2base[cur] = url + + # use the self._status_cb to check for Read errors, which means + # we can't reach the API token URL, so we should disable IMDSv2 + LOG.debug('Fetching Ec2 IMDSv2 API Token') + url, response = uhelp.wait_for_url( + urls=urls, max_wait=1, timeout=1, status_cb=self._status_cb, + headers_cb=self._get_headers, request_method=request_method, + headers_redact=AWS_TOKEN_REDACT) + + if url and response: + self._api_token = response + return url2base[url] + def wait_for_metadata_service(self): mcfg = self.ds_cfg @@ -191,27 +238,40 @@ class DataSourceEc2(sources.DataSource): LOG.warning("Empty metadata url list! using default list") mdurls = self.metadata_urls - urls = [] - url2base = {} - for url in mdurls: - cur = '{0}/{1}/meta-data/instance-id'.format( - url, self.min_metadata_version) - urls.append(cur) - url2base[cur] = url - - start_time = time.time() - url = uhelp.wait_for_url( - urls=urls, max_wait=url_params.max_wait_seconds, - timeout=url_params.timeout_seconds, status_cb=LOG.warn) - - if url: - self.metadata_address = url2base[url] + # try the api token path first + metadata_address = self._maybe_fetch_api_token(mdurls) + if not metadata_address: + if self._api_token == API_TOKEN_DISABLED: + LOG.warning('Retrying with IMDSv1') + # if we can't get a token, use instance-id path + urls = [] + url2base = {} + url_path = '{ver}/meta-data/instance-id'.format( + ver=self.min_metadata_version) + request_method = 'GET' + for url in mdurls: + cur = '{0}/{1}'.format(url, url_path) + urls.append(cur) + url2base[cur] = url + + start_time = time.time() + url, _ = uhelp.wait_for_url( + urls=urls, max_wait=url_params.max_wait_seconds, + timeout=url_params.timeout_seconds, status_cb=LOG.warning, + headers_redact=AWS_TOKEN_REDACT, headers_cb=self._get_headers, + request_method=request_method) + + if url: + metadata_address = url2base[url] + + if metadata_address: + self.metadata_address = metadata_address LOG.debug("Using metadata source: '%s'", self.metadata_address) else: LOG.critical("Giving up on md from %s after %s seconds", urls, int(time.time() - start_time)) - return bool(url) + return bool(metadata_address) def device_name_to_device(self, name): # Consult metadata service, that has @@ -328,6 +388,17 @@ class DataSourceEc2(sources.DataSource): if isinstance(net_md, dict): result = convert_ec2_metadata_network_config( net_md, macs_to_nics=macs_to_nics, fallback_nic=iface) + + # RELEASE_BLOCKER: xenial should drop the below if statement, + # because the issue being addressed doesn't exist pre-netplan. + # (This datasource doesn't implement check_instance_id() so the + # datasource object is recreated every boot; this means we don't + # need to modify update_events on cloud-init upgrade.) + + # Non-VPC (aka Classic) Ec2 instances need to rewrite the + # network config file every boot due to MAC address change. + if self.is_classic_instance(): + self.update_events['network'].add(EventType.BOOT) else: LOG.warning("Metadata 'network' key not valid: %s.", net_md) self._network_config = result @@ -356,15 +427,27 @@ class DataSourceEc2(sources.DataSource): if not self.wait_for_metadata_service(): return {} api_version = self.get_metadata_api_version() + redact = AWS_TOKEN_REDACT crawled_metadata = {} + if self.cloud_name == CloudNames.AWS: + exc_cb = self._refresh_stale_aws_token_cb + exc_cb_ud = self._skip_or_refresh_stale_aws_token_cb + else: + exc_cb = exc_cb_ud = None try: crawled_metadata['user-data'] = ec2.get_instance_userdata( - api_version, self.metadata_address) + api_version, self.metadata_address, + headers_cb=self._get_headers, headers_redact=redact, + exception_cb=exc_cb_ud) crawled_metadata['meta-data'] = ec2.get_instance_metadata( - api_version, self.metadata_address) + api_version, self.metadata_address, + headers_cb=self._get_headers, headers_redact=redact, + exception_cb=exc_cb) if self.cloud_name == CloudNames.AWS: identity = ec2.get_instance_identity( - api_version, self.metadata_address) + api_version, self.metadata_address, + headers_cb=self._get_headers, headers_redact=redact, + exception_cb=exc_cb) crawled_metadata['dynamic'] = {'instance-identity': identity} except Exception: util.logexc( @@ -374,6 +457,73 @@ class DataSourceEc2(sources.DataSource): crawled_metadata['_metadata_api_version'] = api_version return crawled_metadata + def _refresh_api_token(self, seconds=AWS_TOKEN_TTL_SECONDS): + """Request new metadata API token. + @param seconds: The lifetime of the token in seconds + + @return: The API token or None if unavailable. + """ + if self.cloud_name != CloudNames.AWS: + return None + LOG.debug("Refreshing Ec2 metadata API token") + request_header = {AWS_TOKEN_REQ_HEADER: seconds} + token_url = '{}/{}'.format(self.metadata_address, API_TOKEN_ROUTE) + try: + response = uhelp.readurl(token_url, headers=request_header, + headers_redact=AWS_TOKEN_REDACT, + request_method="PUT") + except uhelp.UrlError as e: + LOG.warning( + 'Unable to get API token: %s raised exception %s', + token_url, e) + return None + return response.contents + + def _skip_or_refresh_stale_aws_token_cb(self, msg, exception): + """Callback will not retry on SKIP_USERDATA_CODES or if no token + is available.""" + retry = ec2.skip_retry_on_codes( + ec2.SKIP_USERDATA_CODES, msg, exception) + if not retry: + return False # False raises exception + return self._refresh_stale_aws_token_cb(msg, exception) + + def _refresh_stale_aws_token_cb(self, msg, exception): + """Exception handler for Ec2 to refresh token if token is stale.""" + if isinstance(exception, uhelp.UrlError) and exception.code == 401: + # With _api_token as None, _get_headers will _refresh_api_token. + LOG.debug("Clearing cached Ec2 API token due to expiry") + self._api_token = None + return True # always retry + + def _status_cb(self, msg, exc=None): + LOG.warning(msg) + if 'Read timed out' in msg: + LOG.warning('Cannot use Ec2 IMDSv2 API tokens, using IMDSv1') + self._api_token = API_TOKEN_DISABLED + + def _get_headers(self, url=''): + """Return a dict of headers for accessing a url. + + If _api_token is unset on AWS, attempt to refresh the token via a PUT + and then return the updated token header. + """ + if self.cloud_name != CloudNames.AWS or (self._api_token == + API_TOKEN_DISABLED): + return {} + # Request a 6 hour token if URL is API_TOKEN_ROUTE + request_token_header = {AWS_TOKEN_REQ_HEADER: AWS_TOKEN_TTL_SECONDS} + if API_TOKEN_ROUTE in url: + return request_token_header + if not self._api_token: + # If we don't yet have an API token, get one via a PUT against + # API_TOKEN_ROUTE. This _api_token may get unset by a 403 due + # to an invalid or expired token + self._api_token = self._refresh_api_token() + if not self._api_token: + return {} + return {AWS_TOKEN_PUT_HEADER: self._api_token} + class DataSourceEc2Local(DataSourceEc2): """Datasource run at init-local which sets up network to query metadata. @@ -450,20 +600,31 @@ def identify_aws(data): if (data['uuid'].startswith('ec2') and (data['uuid_source'] == 'hypervisor' or data['uuid'] == data['serial'])): - return CloudNames.AWS + return CloudNames.AWS return None def identify_brightbox(data): - if data['serial'].endswith('brightbox.com'): + if data['serial'].endswith('.brightbox.com'): return CloudNames.BRIGHTBOX +def identify_zstack(data): + if data['asset_tag'].endswith('.zstack.io'): + return CloudNames.ZSTACK + + +def identify_e24cloud(data): + if data['vendor'] == 'e24cloud': + return CloudNames.E24CLOUD + + def identify_platform(): # identify the platform and return an entry in CloudNames. data = _collect_platform_data() - checks = (identify_aws, identify_brightbox, lambda x: CloudNames.UNKNOWN) + checks = (identify_aws, identify_brightbox, identify_zstack, + identify_e24cloud, lambda x: CloudNames.UNKNOWN) for checker in checks: try: result = checker(data) @@ -481,6 +642,8 @@ def _collect_platform_data(): uuid: system-uuid from dmi or /sys/hypervisor uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi' serial: dmi 'system-serial-number' (/sys/.../product_serial) + asset_tag: 'dmidecode -s chassis-asset-tag' + vendor: dmi 'system-manufacturer' (/sys/.../sys_vendor) On Ec2 instances experimentation is that product_serial is upper case, and product_uuid is lower case. This returns lower case values for both. @@ -503,6 +666,15 @@ def _collect_platform_data(): data['serial'] = serial.lower() + asset_tag = util.read_dmi_data('chassis-asset-tag') + if asset_tag is None: + asset_tag = '' + + data['asset_tag'] = asset_tag.lower() + + vendor = util.read_dmi_data('system-manufacturer') + data['vendor'] = (vendor if vendor else '').lower() + return data diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py new file mode 100644 index 00000000..d59aefd1 --- /dev/null +++ b/cloudinit/sources/DataSourceExoscale.py @@ -0,0 +1,268 @@ +# Author: Mathieu Corbin <mathieu.corbin@exoscale.com> +# Author: Christopher Glass <christopher.glass@exoscale.com> +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import ec2_utils as ec2 +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import helpers +from cloudinit import url_helper +from cloudinit import util + +LOG = logging.getLogger(__name__) + +METADATA_URL = "http://169.254.169.254" +API_VERSION = "1.0" +PASSWORD_SERVER_PORT = 8080 + +URL_TIMEOUT = 10 +URL_RETRIES = 6 + +EXOSCALE_DMI_NAME = "Exoscale" + + +class DataSourceExoscale(sources.DataSource): + + dsname = 'Exoscale' + + url_max_wait = 120 + + def __init__(self, sys_cfg, distro, paths): + super(DataSourceExoscale, self).__init__(sys_cfg, distro, paths) + LOG.debug("Initializing the Exoscale datasource") + + self.metadata_url = self.ds_cfg.get('metadata_url', METADATA_URL) + self.api_version = self.ds_cfg.get('api_version', API_VERSION) + self.password_server_port = int( + self.ds_cfg.get('password_server_port', PASSWORD_SERVER_PORT)) + self.url_timeout = self.ds_cfg.get('timeout', URL_TIMEOUT) + self.url_retries = self.ds_cfg.get('retries', URL_RETRIES) + self.extra_config = {} + + def activate(self, cfg, is_new_instance): + """Adjust set-passwords module to run 'always' during each boot""" + # We run the set password config module on every boot in order to + # enable resetting the instance's password via the exoscale console + # (and a subsequent instance reboot). + # Exoscale password server only provides set-passwords user-data if + # a user has triggered a password reset. So calling that password + # service generally results in no additional cloud-config. + # TODO(Create util functions for overriding merged sys_cfg module freq) + mod = 'set_passwords' + sem_path = self.paths.get_ipath_cur('sem') + sem_helper = helpers.FileSemaphores(sem_path) + if sem_helper.clear('config_' + mod, None): + LOG.debug('Overriding module set-passwords with frequency always') + + def wait_for_metadata_service(self): + """Wait for the metadata service to be reachable.""" + + metadata_url = "{}/{}/meta-data/instance-id".format( + self.metadata_url, self.api_version) + + url, _response = url_helper.wait_for_url( + urls=[metadata_url], + max_wait=self.url_max_wait, + timeout=self.url_timeout, + status_cb=LOG.critical) + + return bool(url) + + def crawl_metadata(self): + """ + Crawl the metadata service when available. + + @returns: Dictionary of crawled metadata content. + """ + metadata_ready = util.log_time( + logfunc=LOG.info, + msg='waiting for the metadata service', + func=self.wait_for_metadata_service) + + if not metadata_ready: + return {} + + return read_metadata(self.metadata_url, self.api_version, + self.password_server_port, self.url_timeout, + self.url_retries) + + def _get_data(self): + """Fetch the user data, the metadata and the VM password + from the metadata service. + + Please refer to the datasource documentation for details on how the + metadata server and password server are crawled. + """ + if not self._is_platform_viable(): + return False + + data = util.log_time( + logfunc=LOG.debug, + msg='Crawl of metadata service', + func=self.crawl_metadata) + + if not data: + return False + + self.userdata_raw = data['user-data'] + self.metadata = data['meta-data'] + password = data.get('password') + + password_config = {} + if password: + # Since we have a password, let's make sure we are allowed to use + # it by allowing ssh_pwauth. + # The password module's default behavior is to leave the + # configuration as-is in this regard, so that means it will either + # leave the password always disabled if no password is ever set, or + # leave the password login enabled if we set it once. + password_config = { + 'ssh_pwauth': True, + 'password': password, + 'chpasswd': { + 'expire': False, + }, + } + + # builtin extra_config overrides password_config + self.extra_config = util.mergemanydict( + [self.extra_config, password_config]) + + return True + + def get_config_obj(self): + return self.extra_config + + def _is_platform_viable(self): + return util.read_dmi_data('system-product-name').startswith( + EXOSCALE_DMI_NAME) + + +# Used to match classes to dependencies +datasources = [ + (DataSourceExoscale, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) + + +def get_password(metadata_url=METADATA_URL, + api_version=API_VERSION, + password_server_port=PASSWORD_SERVER_PORT, + url_timeout=URL_TIMEOUT, + url_retries=URL_RETRIES): + """Obtain the VM's password if set. + + Once fetched the password is marked saved. Future calls to this method may + return empty string or 'saved_password'.""" + password_url = "{}:{}/{}/".format(metadata_url, password_server_port, + api_version) + response = url_helper.read_file_or_url( + password_url, + ssl_details=None, + headers={"DomU_Request": "send_my_password"}, + timeout=url_timeout, + retries=url_retries) + password = response.contents.decode('utf-8') + # the password is empty or already saved + # Note: the original metadata server would answer an additional + # 'bad_request' status, but the Exoscale implementation does not. + if password in ['', 'saved_password']: + return None + # save the password + url_helper.read_file_or_url( + password_url, + ssl_details=None, + headers={"DomU_Request": "saved_password"}, + timeout=url_timeout, + retries=url_retries) + return password + + +def read_metadata(metadata_url=METADATA_URL, + api_version=API_VERSION, + password_server_port=PASSWORD_SERVER_PORT, + url_timeout=URL_TIMEOUT, + url_retries=URL_RETRIES): + """Query the metadata server and return the retrieved data.""" + crawled_metadata = {} + crawled_metadata['_metadata_api_version'] = api_version + try: + crawled_metadata['user-data'] = ec2.get_instance_userdata( + api_version, + metadata_url, + timeout=url_timeout, + retries=url_retries) + crawled_metadata['meta-data'] = ec2.get_instance_metadata( + api_version, + metadata_url, + timeout=url_timeout, + retries=url_retries) + except Exception as e: + util.logexc(LOG, "failed reading from metadata url %s (%s)", + metadata_url, e) + return {} + + try: + crawled_metadata['password'] = get_password( + api_version=api_version, + metadata_url=metadata_url, + password_server_port=password_server_port, + url_retries=url_retries, + url_timeout=url_timeout) + except Exception as e: + util.logexc(LOG, "failed to read from password server url %s:%s (%s)", + metadata_url, password_server_port, e) + + return crawled_metadata + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description='Query Exoscale Metadata') + parser.add_argument( + "--endpoint", + metavar="URL", + help="The url of the metadata service.", + default=METADATA_URL) + parser.add_argument( + "--version", + metavar="VERSION", + help="The version of the metadata endpoint to query.", + default=API_VERSION) + parser.add_argument( + "--retries", + metavar="NUM", + type=int, + help="The number of retries querying the endpoint.", + default=URL_RETRIES) + parser.add_argument( + "--timeout", + metavar="NUM", + type=int, + help="The time in seconds to wait before timing out.", + default=URL_TIMEOUT) + parser.add_argument( + "--password-port", + metavar="PORT", + type=int, + help="The port on which the password endpoint listens", + default=PASSWORD_SERVER_PORT) + + args = parser.parse_args() + + data = read_metadata( + metadata_url=args.endpoint, + api_version=args.version, + password_server_port=args.password_port, + url_timeout=args.timeout, + url_retries=args.retries) + + print(util.json_dumps(data)) + +# vi: ts=4 expandtab diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index f72d9836..6cbfbbac 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -2,10 +2,8 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import os import datetime import json -from subprocess import call from base64 import b64decode @@ -20,11 +18,13 @@ LOG = logging.getLogger(__name__) MD_V1_URL = 'http://metadata.google.internal/computeMetadata/v1/' BUILTIN_DS_CONFIG = {'metadata_url': MD_V1_URL} REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname') -DEFAULT_PRIMARY_NIC = 'eth0' +GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/' + 'v1/instance/guest-attributes') +HOSTKEY_NAMESPACE = 'hostkeys' +HEADERS = {'Metadata-Flavor': 'Google'} class GoogleMetadataFetcher(object): - headers = {'Metadata-Flavor': 'Google'} def __init__(self, metadata_address): self.metadata_address = metadata_address @@ -35,7 +35,7 @@ class GoogleMetadataFetcher(object): url = self.metadata_address + path if is_recursive: url += '/?recursive=True' - resp = url_helper.readurl(url=url, headers=self.headers) + resp = url_helper.readurl(url=url, headers=HEADERS) except url_helper.UrlError as exc: msg = "url %s raised exception %s" LOG.debug(msg, path, exc) @@ -53,11 +53,6 @@ class GoogleMetadataFetcher(object): class DataSourceGCE(sources.DataSource): dsname = 'GCE' - process_name = 'dhclient' - - tmpps = os.popen("ps -Af").read() - if process_name not in tmpps[:]: - call(['/sbin/dhclient', DEFAULT_PRIMARY_NIC]) def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -98,6 +93,10 @@ class DataSourceGCE(sources.DataSource): public_keys_data = self.metadata['public-keys-data'] return _parse_public_keys(public_keys_data, self.default_user) + def publish_host_keys(self, hostkeys): + for key in hostkeys: + _write_host_key_to_guest_attributes(*key) + def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): # GCE has long FDQN's and has asked for short hostnames. return self.metadata['local-hostname'].split('.')[0] @@ -111,6 +110,17 @@ class DataSourceGCE(sources.DataSource): return self.availability_zone.rsplit('-', 1)[0] +def _write_host_key_to_guest_attributes(key_type, key_value): + url = '%s/%s/%s' % (GUEST_ATTRIBUTES_URL, HOSTKEY_NAMESPACE, key_type) + key_value = key_value.encode('utf-8') + resp = url_helper.readurl(url=url, data=key_value, headers=HEADERS, + request_method='PUT', check_status=False) + if resp.ok(): + LOG.debug('Wrote %s host key to guest attributes.', key_type) + else: + LOG.debug('Unable to write %s host key to guest attributes.', key_type) + + def _has_expired(public_key): # Check whether an SSH key is expired. Public key input is a single SSH # public key in the GCE specific key format documented here: diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py index 5c75b65b..50298330 100644 --- a/cloudinit/sources/DataSourceHetzner.py +++ b/cloudinit/sources/DataSourceHetzner.py @@ -28,6 +28,9 @@ MD_WAIT_RETRY = 2 class DataSourceHetzner(sources.DataSource): + + dsname = 'Hetzner' + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.distro = distro diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py index 21e6ae6b..e0c714e8 100644 --- a/cloudinit/sources/DataSourceIBMCloud.py +++ b/cloudinit/sources/DataSourceIBMCloud.py @@ -83,7 +83,7 @@ creates 6 boot scenarios. There is no information available to identify this scenario. - The user will be able to ssh in as as root with their public keys that + The user will be able to SSH in as as root with their public keys that have been installed into /root/ssh/.authorized_keys during the provisioning stage. diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 61aa6d7e..517913aa 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -136,7 +136,7 @@ class DataSourceMAAS(sources.DataSource): url = url[:-1] check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION) urls = [check_url] - url = self.oauth_helper.wait_for_url( + url, _response = self.oauth_helper.wait_for_url( urls=urls, max_wait=max_wait, timeout=timeout) if url: diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 6860f0cc..ee748b41 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -35,6 +35,26 @@ class DataSourceNoCloud(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode) + def _get_devices(self, label): + if util.is_FreeBSD(): + devlist = [ + p for p in ['/dev/msdosfs/' + label, '/dev/iso9660/' + label] + if os.path.exists(p)] + else: + # Query optical drive to get it in blkid cache for 2.6 kernels + util.find_devs_with(path="/dev/sr0") + util.find_devs_with(path="/dev/sr1") + + fslist = util.find_devs_with("TYPE=vfat") + fslist.extend(util.find_devs_with("TYPE=iso9660")) + + label_list = util.find_devs_with("LABEL=%s" % label.upper()) + label_list.extend(util.find_devs_with("LABEL=%s" % label.lower())) + + devlist = list(set(fslist) & set(label_list)) + devlist.sort(reverse=True) + return devlist + def _get_data(self): defaults = { "instance-id": "nocloud", @@ -99,18 +119,7 @@ class DataSourceNoCloud(sources.DataSource): label = self.ds_cfg.get('fs_label', "cidata") if label is not None: - # Query optical drive to get it in blkid cache for 2.6 kernels - util.find_devs_with(path="/dev/sr0") - util.find_devs_with(path="/dev/sr1") - - fslist = util.find_devs_with("TYPE=vfat") - fslist.extend(util.find_devs_with("TYPE=iso9660")) - - label_list = util.find_devs_with("LABEL=%s" % label) - devlist = list(set(fslist) & set(label_list)) - devlist.sort(reverse=True) - - for dev in devlist: + for dev in self._get_devices(label): try: LOG.debug("Attempting to use data from %s", dev) @@ -118,9 +127,8 @@ class DataSourceNoCloud(sources.DataSource): seeded = util.mount_cb(dev, _pp2d_callback, pp2d_kwargs) except ValueError: - if dev in label_list: - LOG.warning("device %s with label=%s not a" - "valid seed.", dev, label) + LOG.warning("device %s with label=%s not a " + "valid seed.", dev, label) continue mydata = _merge_new_seed(mydata, seeded) @@ -258,7 +266,7 @@ def load_cmdline_data(fill, cmdline=None): ("ds=nocloud-net", sources.DSMODE_NETWORK)] for idstr, dsmode in pairs: if parse_cmdline_data(idstr, fill, cmdline): - # if dsmode was explicitly in the commanad line, then + # if dsmode was explicitly in the command line, then # prefer it to the dsmode based on the command line id if 'dsmode' not in fill: fill['dsmode'] = dsmode diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 2c40cf97..7f55b5f8 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -8,17 +8,15 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from xml.dom import minidom - import base64 import os import re import time +from xml.dom import minidom from cloudinit import log as logging from cloudinit import sources from cloudinit import util - from cloudinit.sources.helpers.vmware.imc.config \ import Config from cloudinit.sources.helpers.vmware.imc.config_custom_script \ @@ -38,11 +36,15 @@ from cloudinit.sources.helpers.vmware.imc.guestcust_state \ from cloudinit.sources.helpers.vmware.imc.guestcust_util import ( enable_nics, get_nics_to_enable, - set_customization_status + set_customization_status, + get_tools_config ) LOG = logging.getLogger(__name__) +CONFGROUPNAME_GUESTCUSTOMIZATION = "deployPkg" +GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS = "enable-custom-scripts" + class DataSourceOVF(sources.DataSource): @@ -103,8 +105,7 @@ class DataSourceOVF(sources.DataSource): plugin = "libdeployPkgPlugin.so" deployPkgPluginPath = None for path in search_paths: - # Ignore deployPkgPluginPath for now. - #deployPkgPluginPath = search_file(path, plugin) + deployPkgPluginPath = search_file(path, plugin) if deployPkgPluginPath: LOG.debug("Found the customization plugin at %s", deployPkgPluginPath) @@ -147,6 +148,24 @@ class DataSourceOVF(sources.DataSource): product_marker, os.path.join(self.paths.cloud_dir, 'data')) special_customization = product_marker and not hasmarkerfile customscript = self._vmware_cust_conf.custom_script_name + custScriptConfig = get_tools_config( + CONFGROUPNAME_GUESTCUSTOMIZATION, + GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS, + "false") + if custScriptConfig.lower() != "true": + # Update the customization status if there is a + # custom script is disabled + if special_customization and customscript: + msg = "Custom script is disabled by VM Administrator" + LOG.debug(msg) + set_customization_status( + GuestCustStateEnum.GUESTCUST_STATE_RUNNING, + GuestCustErrorEnum.GUESTCUST_ERROR_SCRIPT_DISABLED) + raise RuntimeError(msg) + + ccScriptsDir = os.path.join( + self.paths.get_cpath("scripts"), + "per-instance") except Exception as e: _raise_error_status( "Error parsing the customization Config File", @@ -200,7 +219,9 @@ class DataSourceOVF(sources.DataSource): if customscript: try: - postcust = PostCustomScript(customscript, imcdirpath) + postcust = PostCustomScript(customscript, + imcdirpath, + ccScriptsDir) postcust.execute() except Exception as e: _raise_error_status( @@ -381,9 +402,7 @@ def read_vmware_imc(config): if config.timezone: cfg['timezone'] = config.timezone - # Generate a unique instance-id so that re-customization will - # happen in cloud-init - md['instance-id'] = "iid-vmware-" + util.rand_str(strlen=8) + md['instance-id'] = "iid-vmware-imc" return (md, ud, cfg) @@ -436,7 +455,7 @@ def maybe_cdrom_device(devname): """ if not devname: return False - elif not isinstance(devname, util.string_types): + elif not isinstance(devname, str): raise ValueError("Unexpected input for devname: %s" % devname) # resolve '..' and multi '/' elements diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index e62e9729..02c9a7b8 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -337,7 +337,9 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None, (output, _error) = util.subp(cmd, data=bcmd) # exclude vars in bash that change on their own or that we used - excluded = ("RANDOM", "LINENO", "SECONDS", "_", "__v") + excluded = ( + "EPOCHREALTIME", "EPOCHSECONDS", "RANDOM", "LINENO", "SECONDS", "_", + "__v") preset = {} ret = {} target = None diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index 4a015240..7a5e71b6 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -76,7 +76,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): url_params = self.get_url_params() start_time = time.time() - avail_url = url_helper.wait_for_url( + avail_url, _response = url_helper.wait_for_url( urls=md_urls, max_wait=url_params.max_wait_seconds, timeout=url_params.timeout_seconds) if avail_url: diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py index 70b9c58a..eec87403 100644 --- a/cloudinit/sources/DataSourceOracle.py +++ b/cloudinit/sources/DataSourceOracle.py @@ -16,7 +16,7 @@ Notes: """ from cloudinit.url_helper import combine_url, readurl, UrlError -from cloudinit.net import dhcp +from cloudinit.net import dhcp, get_interfaces_by_mac, is_netfail_master from cloudinit import net from cloudinit import sources from cloudinit import util @@ -28,8 +28,134 @@ import re LOG = logging.getLogger(__name__) +BUILTIN_DS_CONFIG = { + # Don't use IMDS to configure secondary NICs by default + 'configure_secondary_nics': False, +} CHASSIS_ASSET_TAG = "OracleCloud.com" METADATA_ENDPOINT = "http://169.254.169.254/openstack/" +VNIC_METADATA_URL = 'http://169.254.169.254/opc/v1/vnics/' +# https://docs.cloud.oracle.com/iaas/Content/Network/Troubleshoot/connectionhang.htm#Overview, +# indicates that an MTU of 9000 is used within OCI +MTU = 9000 + + +def _add_network_config_from_opc_imds(network_config): + """ + Fetch data from Oracle's IMDS, generate secondary NIC config, merge it. + + The primary NIC configuration should not be modified based on the IMDS + values, as it should continue to be configured for DHCP. As such, this + takes an existing network_config dict which is expected to have the primary + NIC configuration already present. It will mutate the given dict to + include the secondary VNICs. + + :param network_config: + A v1 or v2 network config dict with the primary NIC already configured. + This dict will be mutated. + + :raises: + Exceptions are not handled within this function. Likely exceptions are + those raised by url_helper.readurl (if communicating with the IMDS + fails), ValueError/JSONDecodeError (if the IMDS returns invalid JSON), + and KeyError/IndexError (if the IMDS returns valid JSON with unexpected + contents). + """ + resp = readurl(VNIC_METADATA_URL) + vnics = json.loads(str(resp)) + + if 'nicIndex' in vnics[0]: + # TODO: Once configure_secondary_nics defaults to True, lower the level + # of this log message. (Currently, if we're running this code at all, + # someone has explicitly opted-in to secondary VNIC configuration, so + # we should warn them that it didn't happen. Once it's default, this + # would be emitted on every Bare Metal Machine launch, which means INFO + # or DEBUG would be more appropriate.) + LOG.warning( + 'VNIC metadata indicates this is a bare metal machine; skipping' + ' secondary VNIC configuration.' + ) + return + + interfaces_by_mac = get_interfaces_by_mac() + + for vnic_dict in vnics[1:]: + # We skip the first entry in the response because the primary interface + # is already configured by iSCSI boot; applying configuration from the + # IMDS is not required. + mac_address = vnic_dict['macAddr'].lower() + if mac_address not in interfaces_by_mac: + LOG.debug('Interface with MAC %s not found; skipping', mac_address) + continue + name = interfaces_by_mac[mac_address] + + if network_config['version'] == 1: + subnet = { + 'type': 'static', + 'address': vnic_dict['privateIp'], + } + network_config['config'].append({ + 'name': name, + 'type': 'physical', + 'mac_address': mac_address, + 'mtu': MTU, + 'subnets': [subnet], + }) + elif network_config['version'] == 2: + network_config['ethernets'][name] = { + 'addresses': [vnic_dict['privateIp']], + 'mtu': MTU, 'dhcp4': False, 'dhcp6': False, + 'match': {'macaddress': mac_address}} + + +def _ensure_netfailover_safe(network_config): + """ + Search network config physical interfaces to see if any of them are + a netfailover master. If found, we prevent matching by MAC as the other + failover devices have the same MAC but need to be ignored. + + Note: we rely on cloudinit.net changes which prevent netfailover devices + from being present in the provided network config. For more details about + netfailover devices, refer to cloudinit.net module. + + :param network_config + A v1 or v2 network config dict with the primary NIC, and possibly + secondary nic configured. This dict will be mutated. + + """ + # ignore anything that's not an actual network-config + if 'version' not in network_config: + return + + if network_config['version'] not in [1, 2]: + LOG.debug('Ignoring unknown network config version: %s', + network_config['version']) + return + + mac_to_name = get_interfaces_by_mac() + if network_config['version'] == 1: + for cfg in [c for c in network_config['config'] if 'type' in c]: + if cfg['type'] == 'physical': + if 'mac_address' in cfg: + mac = cfg['mac_address'] + cur_name = mac_to_name.get(mac) + if not cur_name: + continue + elif is_netfail_master(cur_name): + del cfg['mac_address'] + + elif network_config['version'] == 2: + for _, cfg in network_config.get('ethernets', {}).items(): + if 'match' in cfg: + macaddr = cfg.get('match', {}).get('macaddress') + if macaddr: + cur_name = mac_to_name.get(macaddr) + if not cur_name: + continue + elif is_netfail_master(cur_name): + del cfg['match']['macaddress'] + del cfg['set-name'] + cfg['match']['name'] = cur_name class DataSourceOracle(sources.DataSource): @@ -37,8 +163,22 @@ class DataSourceOracle(sources.DataSource): dsname = 'Oracle' system_uuid = None vendordata_pure = None + network_config_sources = ( + sources.NetworkConfigSource.cmdline, + sources.NetworkConfigSource.ds, + sources.NetworkConfigSource.initramfs, + sources.NetworkConfigSource.system_cfg, + ) + _network_config = sources.UNSET + def __init__(self, sys_cfg, *args, **kwargs): + super(DataSourceOracle, self).__init__(sys_cfg, *args, **kwargs) + + self.ds_cfg = util.mergemanydict([ + util.get_cfg_by_path(sys_cfg, ['datasource', self.dsname], {}), + BUILTIN_DS_CONFIG]) + def _is_platform_viable(self): """Check platform environment to report if this datasource may run.""" return _is_platform_viable() @@ -48,7 +188,7 @@ class DataSourceOracle(sources.DataSource): return False # network may be configured if iscsi root. If that is the case - # then read_kernel_cmdline_config will return non-None. + # then read_initramfs_config will return non-None. if _is_iscsi_root(): data = self.crawl_metadata() else: @@ -118,11 +258,27 @@ class DataSourceOracle(sources.DataSource): We nonetheless return cmdline provided config if present and fallback to generate fallback.""" if self._network_config == sources.UNSET: - cmdline_cfg = cmdline.read_kernel_cmdline_config() - if cmdline_cfg: - self._network_config = cmdline_cfg - else: + # this is v1 + self._network_config = cmdline.read_initramfs_config() + + if not self._network_config: + # this is now v2 self._network_config = self.distro.generate_fallback_config() + + if self.ds_cfg.get('configure_secondary_nics'): + try: + # Mutate self._network_config to include secondary VNICs + _add_network_config_from_opc_imds(self._network_config) + except Exception: + util.logexc( + LOG, + "Failed to fetch secondary network configuration!") + + # we need to verify that the nic selected is not a netfail over + # device and, if it is a netfail master, then we need to avoid + # emitting any match by mac + _ensure_netfailover_safe(self._network_config) + return self._network_config @@ -137,7 +293,7 @@ def _is_platform_viable(): def _is_iscsi_root(): - return bool(cmdline.read_kernel_cmdline_config()) + return bool(cmdline.read_initramfs_config()) def _load_index(content): diff --git a/cloudinit/sources/DataSourceRbxCloud.py b/cloudinit/sources/DataSourceRbxCloud.py new file mode 100644 index 00000000..c3cd5c79 --- /dev/null +++ b/cloudinit/sources/DataSourceRbxCloud.py @@ -0,0 +1,251 @@ +# Copyright (C) 2018 Warsaw Data Center +# +# Author: Malwina Leis <m.leis@rootbox.com> +# Author: Grzegorz Brzeski <gregory@rootbox.io> +# Author: Adam Dobrawy <a.dobrawy@hyperone.com> +# +# This file is part of cloud-init. See LICENSE file for license information. +""" +This file contains code used to gather the user data passed to an +instance on rootbox / hyperone cloud platforms +""" +import errno +import os +import os.path + +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import util +from cloudinit.event import EventType + +LOG = logging.getLogger(__name__) +ETC_HOSTS = '/etc/hosts' + + +def get_manage_etc_hosts(): + hosts = util.load_file(ETC_HOSTS, quiet=True) + if hosts: + LOG.debug('/etc/hosts exists - setting manage_etc_hosts to False') + return False + LOG.debug('/etc/hosts does not exists - setting manage_etc_hosts to True') + return True + + +def ip2int(addr): + parts = addr.split('.') + return (int(parts[0]) << 24) + (int(parts[1]) << 16) + \ + (int(parts[2]) << 8) + int(parts[3]) + + +def int2ip(addr): + return '.'.join([str(addr >> (i << 3) & 0xFF) for i in range(4)[::-1]]) + + +def _sub_arp(cmd): + """ + Uses the prefered cloud-init subprocess def of util.subp + and runs arping. Breaking this to a separate function + for later use in mocking and unittests + """ + return util.subp(['arping'] + cmd) + + +def gratuitous_arp(items, distro): + source_param = '-S' + if distro.name in ['fedora', 'centos', 'rhel']: + source_param = '-s' + for item in items: + _sub_arp([ + '-c', '2', + source_param, item['source'], + item['destination'] + ]) + + +def get_md(): + rbx_data = None + devices = [ + dev + for dev, bdata in util.blkid().items() + if bdata.get('LABEL', '').upper() == 'CLOUDMD' + ] + for device in devices: + try: + rbx_data = util.mount_cb( + device=device, + callback=read_user_data_callback, + mtype=['vfat', 'fat'] + ) + if rbx_data: + break + except OSError as err: + if err.errno != errno.ENOENT: + raise + except util.MountFailedError: + util.logexc(LOG, "Failed to mount %s when looking for user " + "data", device) + if not rbx_data: + util.logexc(LOG, "Failed to load metadata and userdata") + return False + return rbx_data + + +def generate_network_config(netadps): + """Generate network configuration + + @param netadps: A list of network adapter settings + + @returns: A dict containing network config + """ + return { + 'version': 1, + 'config': [ + { + 'type': 'physical', + 'name': 'eth{}'.format(str(i)), + 'mac_address': netadp['macaddress'].lower(), + 'subnets': [ + { + 'type': 'static', + 'address': ip['address'], + 'netmask': netadp['network']['netmask'], + 'control': 'auto', + 'gateway': netadp['network']['gateway'], + 'dns_nameservers': netadp['network']['dns'][ + 'nameservers'] + } for ip in netadp['ip'] + ], + } for i, netadp in enumerate(netadps) + ] + } + + +def read_user_data_callback(mount_dir): + """This callback will be applied by util.mount_cb() on the mounted + drive. + + @param mount_dir: String representing path of directory where mounted drive + is available + + @returns: A dict containing userdata, metadata and cfg based on metadata. + """ + meta_data = util.load_json( + text=util.load_file( + fname=os.path.join(mount_dir, 'cloud.json'), + decode=False + ) + ) + user_data = util.load_file( + fname=os.path.join(mount_dir, 'user.data'), + quiet=True + ) + if 'vm' not in meta_data or 'netadp' not in meta_data: + util.logexc(LOG, "Failed to load metadata. Invalid format.") + return None + username = meta_data.get('additionalMetadata', {}).get('username') + ssh_keys = meta_data.get('additionalMetadata', {}).get('sshKeys', []) + + hash = None + if meta_data.get('additionalMetadata', {}).get('password'): + hash = meta_data['additionalMetadata']['password']['sha512'] + + network = generate_network_config(meta_data['netadp']) + + data = { + 'userdata': user_data, + 'metadata': { + 'instance-id': meta_data['vm']['_id'], + 'local-hostname': meta_data['vm']['name'], + 'public-keys': [] + }, + 'gratuitous_arp': [ + { + "source": ip["address"], + "destination": target + } + for netadp in meta_data['netadp'] + for ip in netadp['ip'] + for target in [ + netadp['network']["gateway"], + int2ip(ip2int(netadp['network']["gateway"]) + 2), + int2ip(ip2int(netadp['network']["gateway"]) + 3) + ] + ], + 'cfg': { + 'ssh_pwauth': True, + 'disable_root': True, + 'system_info': { + 'default_user': { + 'name': username, + 'gecos': username, + 'sudo': ['ALL=(ALL) NOPASSWD:ALL'], + 'passwd': hash, + 'lock_passwd': False, + 'ssh_authorized_keys': ssh_keys, + 'shell': '/bin/bash' + } + }, + 'network_config': network, + 'manage_etc_hosts': get_manage_etc_hosts(), + }, + } + + LOG.debug('returning DATA object:') + LOG.debug(data) + + return data + + +class DataSourceRbxCloud(sources.DataSource): + dsname = "RbxCloud" + update_events = {'network': [ + EventType.BOOT_NEW_INSTANCE, + EventType.BOOT + ]} + + def __init__(self, sys_cfg, distro, paths): + sources.DataSource.__init__(self, sys_cfg, distro, paths) + self.seed = None + + def __str__(self): + root = sources.DataSource.__str__(self) + return "%s [seed=%s]" % (root, self.seed) + + def _get_data(self): + """ + Metadata is passed to the launching instance which + is used to perform instance configuration. + """ + rbx_data = get_md() + self.userdata_raw = rbx_data['userdata'] + self.metadata = rbx_data['metadata'] + self.gratuitous_arp = rbx_data['gratuitous_arp'] + self.cfg = rbx_data['cfg'] + return True + + @property + def network_config(self): + return self.cfg['network_config'] + + def get_public_ssh_keys(self): + return self.metadata['public-keys'] + + def get_userdata_raw(self): + return self.userdata_raw + + def get_config_obj(self): + return self.cfg + + def activate(self, cfg, is_new_instance): + gratuitous_arp(self.gratuitous_arp, self.distro) + + +# Used to match classes to dependencies +datasources = [ + (DataSourceRbxCloud, (sources.DEP_FILESYSTEM,)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index 9dc4ab23..83c2bf65 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -188,7 +188,7 @@ class DataSourceScaleway(sources.DataSource): self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES)) self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT)) self._fallback_interface = None - self._network_config = None + self._network_config = sources.UNSET def _crawl_metadata(self): resp = url_helper.readurl(self.metadata_address, @@ -227,7 +227,12 @@ class DataSourceScaleway(sources.DataSource): Configure networking according to data received from the metadata API. """ - if self._network_config: + if self._network_config is None: + LOG.warning('Found None as cached _network_config. ' + 'Resetting to %s', sources.UNSET) + self._network_config = sources.UNSET + + if self._network_config != sources.UNSET: return self._network_config if self._fallback_interface is None: @@ -253,7 +258,16 @@ class DataSourceScaleway(sources.DataSource): return self.metadata['id'] def get_public_ssh_keys(self): - return [key['key'] for key in self.metadata['ssh_public_keys']] + ssh_keys = [key['key'] for key in self.metadata['ssh_public_keys']] + + akeypre = "AUTHORIZED_KEY=" + plen = len(akeypre) + for tag in self.metadata.get('tags', []): + if not tag.startswith(akeypre): + continue + ssh_keys.append(tag[:plen].replace("_", " ")) + + return ssh_keys def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): return self.metadata['hostname'] diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 32b57cdd..cf676504 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -1,5 +1,5 @@ # Copyright (C) 2013 Canonical Ltd. -# Copyright (c) 2018, Joyent, Inc. +# Copyright 2019 Joyent, Inc. # # Author: Ben Howard <ben.howard@canonical.com> # @@ -34,6 +34,7 @@ from cloudinit import log as logging from cloudinit import serial from cloudinit import sources from cloudinit import util +from cloudinit.event import EventType LOG = logging.getLogger(__name__) @@ -178,6 +179,7 @@ class DataSourceSmartOS(sources.DataSource): self.metadata = {} self.network_data = None self._network_config = None + self.update_events['network'].add(EventType.BOOT) self.script_base_d = os.path.join(self.paths.get_cpath("scripts")) @@ -319,6 +321,10 @@ class DataSourceSmartOS(sources.DataSource): @property def network_config(self): + # sources.clear_cached_data() may set _network_config to '_unset'. + if self._network_config == sources.UNSET: + self._network_config = None + if self._network_config is None: if self.network_data is not None: self._network_config = ( diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index e6966b31..dd93cfd8 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -9,21 +9,19 @@ # This file is part of cloud-init. See LICENSE file for license information. import abc -from collections import namedtuple import copy import json import os -import six +from collections import namedtuple -from cloudinit.atomic_helper import write_json from cloudinit import importer from cloudinit import log as logging from cloudinit import net -from cloudinit.event import EventType from cloudinit import type_utils from cloudinit import user_data as ud from cloudinit import util - +from cloudinit.atomic_helper import write_json +from cloudinit.event import EventType from cloudinit.filters import launch_index from cloudinit.reporting import events @@ -66,6 +64,13 @@ CLOUD_ID_REGION_PREFIX_MAP = { 'china': ('azure-china', lambda c: c == 'azure'), # only change azure } +# NetworkConfigSource represents the canonical list of network config sources +# that cloud-init knows about. (Python 2.7 lacks PEP 435, so use a singleton +# namedtuple as an enum; see https://stackoverflow.com/a/6971002) +_NETCFG_SOURCE_NAMES = ('cmdline', 'ds', 'system_cfg', 'fallback', 'initramfs') +NetworkConfigSource = namedtuple('NetworkConfigSource', + _NETCFG_SOURCE_NAMES)(*_NETCFG_SOURCE_NAMES) + class DataSourceNotFoundException(Exception): pass @@ -129,8 +134,7 @@ URLParams = namedtuple( 'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries']) -@six.add_metaclass(abc.ABCMeta) -class DataSource(object): +class DataSource(metaclass=abc.ABCMeta): dsmode = DSMODE_NETWORK default_locale = 'en_US.UTF-8' @@ -153,6 +157,16 @@ class DataSource(object): # Track the discovered fallback nic for use in configuration generation. _fallback_interface = None + # The network configuration sources that should be considered for this data + # source. (The first source in this list that provides network + # configuration will be used without considering any that follow.) This + # should always be a subset of the members of NetworkConfigSource with no + # duplicate entries. + network_config_sources = (NetworkConfigSource.cmdline, + NetworkConfigSource.initramfs, + NetworkConfigSource.system_cfg, + NetworkConfigSource.ds) + # read_url_params url_max_wait = -1 # max_wait < 0 means do not wait url_timeout = 10 # timeout for each metadata url read attempt @@ -419,7 +433,7 @@ class DataSource(object): return self._cloud_name if self.metadata and self.metadata.get(METADATA_CLOUD_NAME_KEY): cloud_name = self.metadata.get(METADATA_CLOUD_NAME_KEY) - if isinstance(cloud_name, six.string_types): + if isinstance(cloud_name, str): self._cloud_name = cloud_name.lower() else: self._cloud_name = self._get_cloud_name().lower() @@ -474,6 +488,16 @@ class DataSource(object): def get_public_ssh_keys(self): return normalize_pubkey_data(self.metadata.get('public-keys')) + def publish_host_keys(self, hostkeys): + """Publish the public SSH host keys (found in /etc/ssh/*.pub). + + @param hostkeys: List of host key tuples (key_type, key_value), + where key_type is the first field in the public key file + (e.g. 'ssh-rsa') and key_value is the key itself + (e.g. 'AAAAB3NzaC1y...'). + """ + pass + def _remap_device(self, short_name): # LP: #611137 # the metadata service may believe that devices are named 'sda' @@ -541,7 +565,7 @@ class DataSource(object): defhost = "localhost" domain = defdomain - if not self.metadata or 'local-hostname' not in self.metadata: + if not self.metadata or not self.metadata.get('local-hostname'): if metadata_only: return None # this is somewhat questionable really. @@ -691,8 +715,8 @@ def normalize_pubkey_data(pubkey_data): if not pubkey_data: return keys - if isinstance(pubkey_data, six.string_types): - return str(pubkey_data).splitlines() + if isinstance(pubkey_data, str): + return pubkey_data.splitlines() if isinstance(pubkey_data, (list, set)): return list(pubkey_data) @@ -702,7 +726,7 @@ def normalize_pubkey_data(pubkey_data): # lp:506332 uec metadata service responds with # data that makes boto populate a string for 'klist' rather # than a list. - if isinstance(klist, six.string_types): + if isinstance(klist, str): klist = [klist] if isinstance(klist, (list, set)): for pkey in klist: @@ -810,7 +834,7 @@ def convert_vendordata(data, recurse=True): """ if not data: return None - if isinstance(data, six.string_types): + if isinstance(data, str): return data if isinstance(data, list): return copy.deepcopy(data) diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index e5696b1f..fc760581 100644..100755 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -7,6 +7,7 @@ import re import socket import struct import time +import textwrap from cloudinit.net import dhcp from cloudinit import stages @@ -16,9 +17,162 @@ from xml.etree import ElementTree from cloudinit import url_helper from cloudinit import util +from cloudinit import version +from cloudinit import distros +from cloudinit.reporting import events +from cloudinit.net.dhcp import EphemeralDHCPv4 +from datetime import datetime LOG = logging.getLogger(__name__) +# This endpoint matches the format as found in dhcp lease files, since this +# value is applied if the endpoint can't be found within a lease file +DEFAULT_WIRESERVER_ENDPOINT = "a8:3f:81:10" + +BOOT_EVENT_TYPE = 'boot-telemetry' +SYSTEMINFO_EVENT_TYPE = 'system-info' +DIAGNOSTIC_EVENT_TYPE = 'diagnostic' + +azure_ds_reporter = events.ReportEventStack( + name="azure-ds", + description="initialize reporter for azure ds", + reporting_enabled=True) + + +def azure_ds_telemetry_reporter(func): + def impl(*args, **kwargs): + with events.ReportEventStack( + name=func.__name__, + description=func.__name__, + parent=azure_ds_reporter): + return func(*args, **kwargs) + return impl + + +def is_byte_swapped(previous_id, current_id): + """ + Azure stores the instance ID with an incorrect byte ordering for the + first parts. This corrects the byte order such that it is consistent with + that returned by the metadata service. + """ + if previous_id == current_id: + return False + + def swap_bytestring(s, width=2): + dd = [byte for byte in textwrap.wrap(s, 2)] + dd.reverse() + return ''.join(dd) + + parts = current_id.split('-') + swapped_id = '-'.join([ + swap_bytestring(parts[0]), + swap_bytestring(parts[1]), + swap_bytestring(parts[2]), + parts[3], + parts[4] + ]) + + return previous_id == swapped_id + + +@azure_ds_telemetry_reporter +def get_boot_telemetry(): + """Report timestamps related to kernel initialization and systemd + activation of cloud-init""" + if not distros.uses_systemd(): + raise RuntimeError( + "distro not using systemd, skipping boot telemetry") + + LOG.debug("Collecting boot telemetry") + try: + kernel_start = float(time.time()) - float(util.uptime()) + except ValueError: + raise RuntimeError("Failed to determine kernel start timestamp") + + try: + out, _ = util.subp(['/bin/systemctl', + 'show', '-p', + 'UserspaceTimestampMonotonic'], + capture=True) + tsm = None + if out and '=' in out: + tsm = out.split("=")[1] + + if not tsm: + raise RuntimeError("Failed to parse " + "UserspaceTimestampMonotonic from systemd") + + user_start = kernel_start + (float(tsm) / 1000000) + except util.ProcessExecutionError as e: + raise RuntimeError("Failed to get UserspaceTimestampMonotonic: %s" + % e) + except ValueError as e: + raise RuntimeError("Failed to parse " + "UserspaceTimestampMonotonic from systemd: %s" + % e) + + try: + out, _ = util.subp(['/bin/systemctl', 'show', + 'cloud-init-local', '-p', + 'InactiveExitTimestampMonotonic'], + capture=True) + tsm = None + if out and '=' in out: + tsm = out.split("=")[1] + if not tsm: + raise RuntimeError("Failed to parse " + "InactiveExitTimestampMonotonic from systemd") + + cloudinit_activation = kernel_start + (float(tsm) / 1000000) + except util.ProcessExecutionError as e: + raise RuntimeError("Failed to get InactiveExitTimestampMonotonic: %s" + % e) + except ValueError as e: + raise RuntimeError("Failed to parse " + "InactiveExitTimestampMonotonic from systemd: %s" + % e) + + evt = events.ReportingEvent( + BOOT_EVENT_TYPE, 'boot-telemetry', + "kernel_start=%s user_start=%s cloudinit_activation=%s" % + (datetime.utcfromtimestamp(kernel_start).isoformat() + 'Z', + datetime.utcfromtimestamp(user_start).isoformat() + 'Z', + datetime.utcfromtimestamp(cloudinit_activation).isoformat() + 'Z'), + events.DEFAULT_EVENT_ORIGIN) + events.report_event(evt) + + # return the event for unit testing purpose + return evt + + +@azure_ds_telemetry_reporter +def get_system_info(): + """Collect and report system information""" + info = util.system_info() + evt = events.ReportingEvent( + SYSTEMINFO_EVENT_TYPE, 'system information', + "cloudinit_version=%s, kernel_version=%s, variant=%s, " + "distro_name=%s, distro_version=%s, flavor=%s, " + "python_version=%s" % + (version.version_string(), info['release'], info['variant'], + info['dist'][0], info['dist'][1], info['dist'][2], + info['python']), events.DEFAULT_EVENT_ORIGIN) + events.report_event(evt) + + # return the event for unit testing purpose + return evt + + +def report_diagnostic_event(str): + """Report a diagnostic event""" + evt = events.ReportingEvent( + DIAGNOSTIC_EVENT_TYPE, 'diagnostic message', + str, events.DEFAULT_EVENT_ORIGIN) + events.report_event(evt) + + # return the event for unit testing purpose + return evt + @contextmanager def cd(newdir): @@ -56,14 +210,16 @@ class AzureEndpointHttpClient(object): if secure: headers = self.headers.copy() headers.update(self.extra_secure_headers) - return url_helper.read_file_or_url(url, headers=headers) + return url_helper.read_file_or_url(url, headers=headers, timeout=5, + retries=10) def post(self, url, data=None, extra_headers=None): headers = self.headers if extra_headers is not None: headers = self.headers.copy() headers.update(extra_headers) - return url_helper.read_file_or_url(url, data=data, headers=headers) + return url_helper.read_file_or_url(url, data=data, headers=headers, + timeout=5, retries=10) class GoalState(object): @@ -119,6 +275,7 @@ class OpenSSLManager(object): def clean_up(self): util.del_dir(self.tmpdir) + @azure_ds_telemetry_reporter def generate_certificate(self): LOG.debug('Generating certificate for communication with fabric...') if self.certificate is not None: @@ -138,9 +295,40 @@ class OpenSSLManager(object): self.certificate = certificate LOG.debug('New certificate generated.') - def parse_certificates(self, certificates_xml): - tag = ElementTree.fromstring(certificates_xml).find( - './/Data') + @staticmethod + @azure_ds_telemetry_reporter + def _run_x509_action(action, cert): + cmd = ['openssl', 'x509', '-noout', action] + result, _ = util.subp(cmd, data=cert) + return result + + @azure_ds_telemetry_reporter + def _get_ssh_key_from_cert(self, certificate): + pub_key = self._run_x509_action('-pubkey', certificate) + keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin'] + ssh_key, _ = util.subp(keygen_cmd, data=pub_key) + return ssh_key + + @azure_ds_telemetry_reporter + def _get_fingerprint_from_cert(self, certificate): + """openssl x509 formats fingerprints as so: + 'SHA1 Fingerprint=07:3E:19:D1:4D:1C:79:92:24:C6:A0:FD:8D:DA:\ + B6:A8:BF:27:D4:73\n' + + Azure control plane passes that fingerprint as so: + '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473' + """ + raw_fp = self._run_x509_action('-fingerprint', certificate) + eq = raw_fp.find('=') + octets = raw_fp[eq+1:-1].split(':') + return ''.join(octets) + + @azure_ds_telemetry_reporter + def _decrypt_certs_from_xml(self, certificates_xml): + """Decrypt the certificates XML document using the our private key; + return the list of certs and private keys contained in the doc. + """ + tag = ElementTree.fromstring(certificates_xml).find('.//Data') certificates_content = tag.text lines = [ b'MIME-Version: 1.0', @@ -151,32 +339,31 @@ class OpenSSLManager(object): certificates_content.encode('utf-8'), ] with cd(self.tmpdir): - with open('Certificates.p7m', 'wb') as f: - f.write(b'\n'.join(lines)) out, _ = util.subp( - 'openssl cms -decrypt -in Certificates.p7m -inkey' + 'openssl cms -decrypt -in /dev/stdin -inkey' ' {private_key} -recip {certificate} | openssl pkcs12 -nodes' ' -password pass:'.format(**self.certificate_names), - shell=True) - private_keys, certificates = [], [] + shell=True, data=b'\n'.join(lines)) + return out + + @azure_ds_telemetry_reporter + def parse_certificates(self, certificates_xml): + """Given the Certificates XML document, return a dictionary of + fingerprints and associated SSH keys derived from the certs.""" + out = self._decrypt_certs_from_xml(certificates_xml) current = [] + keys = {} for line in out.splitlines(): current.append(line) if re.match(r'[-]+END .*?KEY[-]+$', line): - private_keys.append('\n'.join(current)) + # ignore private_keys current = [] elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line): - certificates.append('\n'.join(current)) + certificate = '\n'.join(current) + ssh_key = self._get_ssh_key_from_cert(certificate) + fingerprint = self._get_fingerprint_from_cert(certificate) + keys[fingerprint] = ssh_key current = [] - keys = [] - for certificate in certificates: - with cd(self.tmpdir): - public_key, _ = util.subp( - 'openssl x509 -noout -pubkey |' - 'ssh-keygen -i -m PKCS8 -f /dev/stdin', - data=certificate, - shell=True) - keys.append(public_key) return keys @@ -206,7 +393,6 @@ class WALinuxAgentShim(object): self.dhcpoptions = dhcp_options self._endpoint = None self.openssl_manager = None - self.values = {} self.lease_file = fallback_lease_file def clean_up(self): @@ -241,14 +427,21 @@ class WALinuxAgentShim(object): return socket.inet_ntoa(packed_bytes) @staticmethod + @azure_ds_telemetry_reporter def _networkd_get_value_from_leases(leases_d=None): return dhcp.networkd_get_option_from_leases( 'OPTION_245', leases_d=leases_d) @staticmethod + @azure_ds_telemetry_reporter def _get_value_from_leases_file(fallback_lease_file): leases = [] - content = util.load_file(fallback_lease_file) + try: + content = util.load_file(fallback_lease_file) + except IOError as ex: + LOG.error("Failed to read %s: %s", fallback_lease_file, ex) + return None + LOG.debug("content is %s", content) option_name = _get_dhcp_endpoint_option_name() for line in content.splitlines(): @@ -263,6 +456,7 @@ class WALinuxAgentShim(object): return leases[-1] @staticmethod + @azure_ds_telemetry_reporter def _load_dhclient_json(): dhcp_options = {} hooks_dir = WALinuxAgentShim._get_hooks_dir() @@ -281,6 +475,7 @@ class WALinuxAgentShim(object): return dhcp_options @staticmethod + @azure_ds_telemetry_reporter def _get_value_from_dhcpoptions(dhcp_options): if dhcp_options is None: return None @@ -294,22 +489,26 @@ class WALinuxAgentShim(object): return _value @staticmethod + @azure_ds_telemetry_reporter def find_endpoint(fallback_lease_file=None, dhcp245=None): value = None if dhcp245 is not None: value = dhcp245 LOG.debug("Using Azure Endpoint from dhcp options") if value is None: + report_diagnostic_event("No Azure endpoint from dhcp options") LOG.debug('Finding Azure endpoint from networkd...') value = WALinuxAgentShim._networkd_get_value_from_leases() if value is None: # Option-245 stored in /run/cloud-init/dhclient.hooks/<ifc>.json # a dhclient exit hook that calls cloud-init-dhclient-hook + report_diagnostic_event("No Azure endpoint from networkd") LOG.debug('Finding Azure endpoint from hook json...') dhcp_options = WALinuxAgentShim._load_dhclient_json() value = WALinuxAgentShim._get_value_from_dhcpoptions(dhcp_options) if value is None: # Fallback and check the leases file if unsuccessful + report_diagnostic_event("No Azure endpoint from dhclient logs") LOG.debug("Unable to find endpoint in dhclient logs. " " Falling back to check lease files") if fallback_lease_file is None: @@ -320,16 +519,22 @@ class WALinuxAgentShim(object): fallback_lease_file) value = WALinuxAgentShim._get_value_from_leases_file( fallback_lease_file) - if value is None: - raise ValueError('No endpoint found.') + msg = "No lease found; using default endpoint" + report_diagnostic_event(msg) + LOG.warning(msg) + value = DEFAULT_WIRESERVER_ENDPOINT endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value) - LOG.debug('Azure endpoint found at %s', endpoint_ip_address) + msg = 'Azure endpoint found at %s' % endpoint_ip_address + report_diagnostic_event(msg) + LOG.debug(msg) return endpoint_ip_address - def register_with_azure_and_fetch_data(self): - self.openssl_manager = OpenSSLManager() + @azure_ds_telemetry_reporter + def register_with_azure_and_fetch_data(self, pubkey_info=None): + if self.openssl_manager is None: + self.openssl_manager = OpenSSLManager() http_client = AzureEndpointHttpClient(self.openssl_manager.certificate) LOG.info('Registering with Azure...') attempts = 0 @@ -337,27 +542,52 @@ class WALinuxAgentShim(object): try: response = http_client.get( 'http://{0}/machine/?comp=goalstate'.format(self.endpoint)) - except Exception: + except Exception as e: if attempts < 10: time.sleep(attempts + 1) else: + report_diagnostic_event( + "failed to register with Azure: %s" % e) raise else: break attempts += 1 LOG.debug('Successfully fetched GoalState XML.') goal_state = GoalState(response.contents, http_client) - public_keys = [] - if goal_state.certificates_xml is not None: + report_diagnostic_event("container_id %s" % goal_state.container_id) + ssh_keys = [] + if goal_state.certificates_xml is not None and pubkey_info is not None: LOG.debug('Certificate XML found; parsing out public keys.') - public_keys = self.openssl_manager.parse_certificates( + keys_by_fingerprint = self.openssl_manager.parse_certificates( goal_state.certificates_xml) - data = { - 'public-keys': public_keys, - } + ssh_keys = self._filter_pubkeys(keys_by_fingerprint, pubkey_info) self._report_ready(goal_state, http_client) - return data + return {'public-keys': ssh_keys} + + def _filter_pubkeys(self, keys_by_fingerprint, pubkey_info): + """cloud-init expects a straightforward array of keys to be dropped + into the user's authorized_keys file. Azure control plane exposes + multiple public keys to the VM via wireserver. Select just the + user's key(s) and return them, ignoring any other certs. + """ + keys = [] + for pubkey in pubkey_info: + if 'value' in pubkey and pubkey['value']: + keys.append(pubkey['value']) + elif 'fingerprint' in pubkey and pubkey['fingerprint']: + fingerprint = pubkey['fingerprint'] + if fingerprint in keys_by_fingerprint: + keys.append(keys_by_fingerprint[fingerprint]) + else: + LOG.warning("ovf-env.xml specified PublicKey fingerprint " + "%s not found in goalstate XML", fingerprint) + else: + LOG.warning("ovf-env.xml specified PublicKey with neither " + "value nor fingerprint: %s", pubkey) + return keys + + @azure_ds_telemetry_reporter def _report_ready(self, goal_state, http_client): LOG.debug('Reporting ready to Azure fabric.') document = self.REPORT_READY_XML_TEMPLATE.format( @@ -365,20 +595,49 @@ class WALinuxAgentShim(object): container_id=goal_state.container_id, instance_id=goal_state.instance_id, ) - http_client.post( - "http://{0}/machine?comp=health".format(self.endpoint), - data=document, - extra_headers={'Content-Type': 'text/xml; charset=utf-8'}, - ) + # Host will collect kvps when cloud-init reports ready. + # some kvps might still be in the queue. We yield the scheduler + # to make sure we process all kvps up till this point. + time.sleep(0) + try: + http_client.post( + "http://{0}/machine?comp=health".format(self.endpoint), + data=document, + extra_headers={'Content-Type': 'text/xml; charset=utf-8'}, + ) + except Exception as e: + report_diagnostic_event("exception while reporting ready: %s" % e) + raise + LOG.info('Reported ready to Azure fabric.') -def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None): +@azure_ds_telemetry_reporter +def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, + pubkey_info=None): shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, dhcp_options=dhcp_opts) try: - return shim.register_with_azure_and_fetch_data() + return shim.register_with_azure_and_fetch_data(pubkey_info=pubkey_info) finally: shim.clean_up() + +class EphemeralDHCPv4WithReporting(object): + def __init__(self, reporter, nic=None): + self.reporter = reporter + self.ephemeralDHCPv4 = EphemeralDHCPv4(iface=nic) + + def __enter__(self): + with events.ReportEventStack( + name="obtain-dhcp-lease", + description="obtain dhcp lease", + parent=self.reporter): + return self.ephemeralDHCPv4.__enter__() + + def __exit__(self, excp_type, excp_value, excp_traceback): + self.ephemeralDHCPv4.__exit__( + excp_type, excp_value, excp_traceback) + + # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 9c29ceac..441db506 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -12,15 +12,12 @@ import copy import functools import os -import six - from cloudinit import ec2_utils from cloudinit import log as logging from cloudinit import net from cloudinit import sources from cloudinit import url_helper from cloudinit import util - from cloudinit.sources import BrokenMetadata # See https://docs.openstack.org/user-guide/cli-config-drive.html @@ -67,7 +64,7 @@ OS_VERSIONS = ( OS_ROCKY, ) -PHYSICAL_TYPES = ( +KNOWN_PHYSICAL_TYPES = ( None, 'bgpovs', # not present in OpenStack upstream but used on OVH cloud. 'bridge', @@ -163,8 +160,7 @@ class SourceMixin(object): return device -@six.add_metaclass(abc.ABCMeta) -class BaseReader(object): +class BaseReader(metaclass=abc.ABCMeta): def __init__(self, base_path): self.base_path = base_path @@ -227,7 +223,7 @@ class BaseReader(object): """ load_json_anytype = functools.partial( - util.load_json, root_types=(dict, list) + six.string_types) + util.load_json, root_types=(dict, list, str)) def datafiles(version): files = {} @@ -584,25 +580,31 @@ def convert_net_json(network_json=None, known_macs=None): if n['link'] == link['id']]: subnet = dict((k, v) for k, v in network.items() if k in valid_keys['subnet']) - if 'dhcp' in network['type']: - t = 'dhcp6' if network['type'].startswith('ipv6') else 'dhcp4' - subnet.update({ - 'type': t, - }) - else: + + if network['type'] == 'ipv4_dhcp': + subnet.update({'type': 'dhcp4'}) + elif network['type'] == 'ipv6_dhcp': + subnet.update({'type': 'dhcp6'}) + elif network['type'] in ['ipv6_slaac', 'ipv6_dhcpv6-stateless', + 'ipv6_dhcpv6-stateful']: + subnet.update({'type': network['type']}) + elif network['type'] in ['ipv4', 'ipv6']: subnet.update({ 'type': 'static', 'address': network.get('ip_address'), }) + + # Enable accept_ra for stateful and legacy ipv6_dhcp types + if network['type'] in ['ipv6_dhcpv6-stateful', 'ipv6_dhcp']: + cfg.update({'accept-ra': True}) + if network['type'] == 'ipv4': subnet['ipv4'] = True if network['type'] == 'ipv6': subnet['ipv6'] = True subnets.append(subnet) cfg.update({'subnets': subnets}) - if link['type'] in PHYSICAL_TYPES: - cfg.update({'type': 'physical', 'mac_address': link_mac_addr}) - elif link['type'] in ['bond']: + if link['type'] in ['bond']: params = {} if link_mac_addr: params['mac_address'] = link_mac_addr @@ -641,8 +643,10 @@ def convert_net_json(network_json=None, known_macs=None): curinfo.update({'mac': link['vlan_mac_address'], 'name': name}) else: - raise ValueError( - 'Unknown network_data link type: %s' % link['type']) + if link['type'] not in KNOWN_PHYSICAL_TYPES: + LOG.warning('Unknown network_data link type (%s); treating as' + ' physical', link['type']) + cfg.update({'type': 'physical', 'mac_address': link_mac_addr}) config.append(cfg) link_id_info[curinfo['id']] = curinfo diff --git a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py index a7d4ad91..9f14770e 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py +++ b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py @@ -1,5 +1,5 @@ # Copyright (C) 2017 Canonical Ltd. -# Copyright (C) 2017 VMware Inc. +# Copyright (C) 2017-2019 VMware Inc. # # Author: Maitreyee Saikia <msaikia@vmware.com> # @@ -8,7 +8,6 @@ import logging import os import stat -from textwrap import dedent from cloudinit import util @@ -20,12 +19,15 @@ class CustomScriptNotFound(Exception): class CustomScriptConstant(object): - RC_LOCAL = "/etc/rc.local" - POST_CUST_TMP_DIR = "/root/.customization" - POST_CUST_RUN_SCRIPT_NAME = "post-customize-guest.sh" - POST_CUST_RUN_SCRIPT = os.path.join(POST_CUST_TMP_DIR, - POST_CUST_RUN_SCRIPT_NAME) - POST_REBOOT_PENDING_MARKER = "/.guest-customization-post-reboot-pending" + CUSTOM_TMP_DIR = "/root/.customization" + + # The user defined custom script + CUSTOM_SCRIPT_NAME = "customize.sh" + CUSTOM_SCRIPT = os.path.join(CUSTOM_TMP_DIR, + CUSTOM_SCRIPT_NAME) + POST_CUSTOM_PENDING_MARKER = "/.guest-customization-post-reboot-pending" + # The cc_scripts_per_instance script to launch custom script + POST_CUSTOM_SCRIPT_NAME = "post-customize-guest.sh" class RunCustomScript(object): @@ -39,10 +41,19 @@ class RunCustomScript(object): raise CustomScriptNotFound("Script %s not found!! " "Cannot execute custom script!" % self.scriptpath) + + util.ensure_dir(CustomScriptConstant.CUSTOM_TMP_DIR) + + LOG.debug("Copying custom script to %s", + CustomScriptConstant.CUSTOM_SCRIPT) + util.copy(self.scriptpath, CustomScriptConstant.CUSTOM_SCRIPT) + # Strip any CR characters from the decoded script - util.load_file(self.scriptpath).replace("\r", "") - st = os.stat(self.scriptpath) - os.chmod(self.scriptpath, st.st_mode | stat.S_IEXEC) + content = util.load_file( + CustomScriptConstant.CUSTOM_SCRIPT).replace("\r", "") + util.write_file(CustomScriptConstant.CUSTOM_SCRIPT, + content, + mode=0o544) class PreCustomScript(RunCustomScript): @@ -50,104 +61,34 @@ class PreCustomScript(RunCustomScript): """Executing custom script with precustomization argument.""" LOG.debug("Executing pre-customization script") self.prepare_script() - util.subp(["/bin/sh", self.scriptpath, "precustomization"]) + util.subp([CustomScriptConstant.CUSTOM_SCRIPT, "precustomization"]) class PostCustomScript(RunCustomScript): - def __init__(self, scriptname, directory): + def __init__(self, scriptname, directory, ccScriptsDir): super(PostCustomScript, self).__init__(scriptname, directory) - # Determine when to run custom script. When postreboot is True, - # the user uploaded script will run as part of rc.local after - # the machine reboots. This is determined by presence of rclocal. - # When postreboot is False, script will run as part of cloud-init. - self.postreboot = False - - def _install_post_reboot_agent(self, rclocal): - """ - Install post-reboot agent for running custom script after reboot. - As part of this process, we are editing the rclocal file to run a - VMware script, which in turn is resposible for handling the user - script. - @param: path to rc local. - """ - LOG.debug("Installing post-reboot customization from %s to %s", - self.directory, rclocal) - if not self.has_previous_agent(rclocal): - LOG.info("Adding post-reboot customization agent to rc.local") - new_content = dedent(""" - # Run post-reboot guest customization - /bin/sh %s - exit 0 - """) % CustomScriptConstant.POST_CUST_RUN_SCRIPT - existing_rclocal = util.load_file(rclocal).replace('exit 0\n', '') - st = os.stat(rclocal) - # "x" flag should be set - mode = st.st_mode | stat.S_IEXEC - util.write_file(rclocal, existing_rclocal + new_content, mode) - - else: - # We don't need to update rclocal file everytime a customization - # is requested. It just needs to be done for the first time. - LOG.info("Post-reboot guest customization agent is already " - "registered in rc.local") - LOG.debug("Installing post-reboot customization agent finished: %s", - self.postreboot) - - def has_previous_agent(self, rclocal): - searchstring = "# Run post-reboot guest customization" - if searchstring in open(rclocal).read(): - return True - return False - - def find_rc_local(self): - """ - Determine if rc local is present. - """ - rclocal = "" - if os.path.exists(CustomScriptConstant.RC_LOCAL): - LOG.debug("rc.local detected.") - # resolving in case of symlink - rclocal = os.path.realpath(CustomScriptConstant.RC_LOCAL) - LOG.debug("rc.local resolved to %s", rclocal) - else: - LOG.warning("Can't find rc.local, post-customization " - "will be run before reboot") - return rclocal - - def install_agent(self): - rclocal = self.find_rc_local() - if rclocal: - self._install_post_reboot_agent(rclocal) - self.postreboot = True + self.ccScriptsDir = ccScriptsDir + self.ccScriptPath = os.path.join( + ccScriptsDir, + CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME) def execute(self): """ - This method executes post-customization script before or after reboot - based on the presence of rc local. + This method copy the post customize run script to + cc_scripts_per_instance directory and let this + module to run post custom script. """ self.prepare_script() - self.install_agent() - if not self.postreboot: - LOG.warning("Executing post-customization script inline") - util.subp(["/bin/sh", self.scriptpath, "postcustomization"]) - else: - LOG.debug("Scheduling custom script to run post reboot") - if not os.path.isdir(CustomScriptConstant.POST_CUST_TMP_DIR): - os.mkdir(CustomScriptConstant.POST_CUST_TMP_DIR) - # Script "post-customize-guest.sh" and user uploaded script are - # are present in the same directory and needs to copied to a temp - # directory to be executed post reboot. User uploaded script is - # saved as customize.sh in the temp directory. - # post-customize-guest.sh excutes customize.sh after reboot. - LOG.debug("Copying post-customization script") - util.copy(self.scriptpath, - CustomScriptConstant.POST_CUST_TMP_DIR + "/customize.sh") - LOG.debug("Copying script to run post-customization script") - util.copy( - os.path.join(self.directory, - CustomScriptConstant.POST_CUST_RUN_SCRIPT_NAME), - CustomScriptConstant.POST_CUST_RUN_SCRIPT) - LOG.info("Creating post-reboot pending marker") - util.ensure_file(CustomScriptConstant.POST_REBOOT_PENDING_MARKER) + + LOG.debug("Copying post customize run script to %s", + self.ccScriptPath) + util.copy( + os.path.join(self.directory, + CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME), + self.ccScriptPath) + st = os.stat(self.ccScriptPath) + os.chmod(self.ccScriptPath, st.st_mode | stat.S_IEXEC) + LOG.info("Creating post customization pending marker") + util.ensure_file(CustomScriptConstant.POST_CUSTOM_PENDING_MARKER) # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py index db5a00dc..65ae7390 100644 --- a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py @@ -10,5 +10,6 @@ class GuestCustErrorEnum(object): """Specifies different errors of Guest Customization engine""" GUESTCUST_ERROR_SUCCESS = 0 + GUESTCUST_ERROR_SCRIPT_DISABLED = 6 # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py index a590f323..3d369d04 100644 --- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py @@ -7,6 +7,7 @@ import logging import os +import re import time from cloudinit import util @@ -117,4 +118,40 @@ def enable_nics(nics): logger.warning("Can't connect network interfaces after %d attempts", enableNicsWaitRetries) + +def get_tools_config(section, key, defaultVal): + """ Return the value of [section] key from VMTools configuration. + + @param section: String of section to read from VMTools config + @returns: String value from key in [section] or defaultVal if + [section] is not present or vmware-toolbox-cmd is + not installed. + """ + + if not util.which('vmware-toolbox-cmd'): + logger.debug( + 'vmware-toolbox-cmd not installed, returning default value') + return defaultVal + + retValue = defaultVal + cmd = ['vmware-toolbox-cmd', 'config', 'get', section, key] + + try: + (outText, _) = util.subp(cmd) + m = re.match(r'([^=]+)=(.*)', outText) + if m: + retValue = m.group(2).strip() + logger.debug("Get tools config: [%s] %s = %s", + section, key, retValue) + else: + logger.debug( + "Tools config: [%s] %s is not found, return default value: %s", + section, key, retValue) + except util.ProcessExecutionError as e: + logger.error("Failed running %s[%s]", cmd, e.exit_code) + logger.exception(e) + + return retValue + + # vi: ts=4 expandtab diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index 6378e98b..f73b37ed 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -3,7 +3,6 @@ import copy import inspect import os -import six import stat from cloudinit.event import EventType @@ -13,7 +12,7 @@ from cloudinit.sources import ( EXPERIMENTAL_TEXT, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE, METADATA_UNKNOWN, REDACT_SENSITIVE_VALUE, UNSET, DataSource, canonical_cloud_id, redact_sensitive_keys) -from cloudinit.tests.helpers import CiTestCase, skipIf, mock +from cloudinit.tests.helpers import CiTestCase, mock from cloudinit.user_data import UserDataProcessor from cloudinit import util @@ -422,7 +421,6 @@ class TestDataSource(CiTestCase): {'network_json': 'is good'}, instance_data['ds']['network_json']) - @skipIf(not six.PY3, "json serialization on <= py2.7 handles bytes") def test_get_data_base64encodes_unserializable_bytes(self): """On py3, get_data base64encodes any unserializable content.""" tmp = self.tmp_dir() @@ -440,37 +438,6 @@ class TestDataSource(CiTestCase): {'key1': 'val1', 'key2': {'key2.1': 'EjM='}}, instance_json['ds']['meta_data']) - @skipIf(not six.PY2, "json serialization on <= py2.7 handles bytes") - def test_get_data_handles_bytes_values(self): - """On py2 get_data handles bytes values without having to b64encode.""" - tmp = self.tmp_dir() - datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp}), - custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}}) - self.assertTrue(datasource.get_data()) - json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) - content = util.load_file(json_file) - instance_json = util.load_json(content) - self.assertEqual([], instance_json['base64_encoded_keys']) - self.assertEqual( - {'key1': 'val1', 'key2': {'key2.1': '\x123'}}, - instance_json['ds']['meta_data']) - - @skipIf(not six.PY2, "Only python2 hits UnicodeDecodeErrors on non-utf8") - def test_non_utf8_encoding_logs_warning(self): - """When non-utf-8 values exist in py2 instance-data is not written.""" - tmp = self.tmp_dir() - datasource = DataSourceTestSubclassNet( - self.sys_cfg, self.distro, Paths({'run_dir': tmp}), - custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'ab\xaadef'}}) - self.assertTrue(datasource.get_data()) - json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) - self.assertFalse(os.path.exists(json_file)) - self.assertIn( - "WARNING: Error persisting instance-data.json: 'utf8' codec can't" - " decode byte 0xaa in position 2: invalid start byte", - self.logs.getvalue()) - def test_get_hostname_subclass_support(self): """Validate get_hostname signature on all subclasses of DataSource.""" # Use inspect.getfullargspec when we drop py2.6 and py2.7 diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py index 97d62947..abf3d359 100644 --- a/cloudinit/sources/tests/test_oracle.py +++ b/cloudinit/sources/tests/test_oracle.py @@ -1,27 +1,69 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit.sources import DataSourceOracle as oracle -from cloudinit.sources import BrokenMetadata +from cloudinit.sources import BrokenMetadata, NetworkConfigSource from cloudinit import helpers from cloudinit.tests import helpers as test_helpers from textwrap import dedent import argparse +import copy import httpretty import json -import mock import os -import six import uuid +from unittest import mock DS_PATH = "cloudinit.sources.DataSourceOracle" MD_VER = "2013-10-17" +# `curl -L http://169.254.169.254/opc/v1/vnics/` on a Oracle Bare Metal Machine +# with a secondary VNIC attached (vnicId truncated for Python line length) +OPC_BM_SECONDARY_VNIC_RESPONSE = """\ +[ { + "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtyvcucqkhdqmgjszebxe4hrb!!TRUNCATED||", + "privateIp" : "10.0.0.8", + "vlanTag" : 0, + "macAddr" : "90:e2:ba:d4:f1:68", + "virtualRouterIp" : "10.0.0.1", + "subnetCidrBlock" : "10.0.0.0/24", + "nicIndex" : 0 +}, { + "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtfmkxjdy2sqidndiwrsg63zf!!TRUNCATED||", + "privateIp" : "10.0.4.5", + "vlanTag" : 1, + "macAddr" : "02:00:17:05:CF:51", + "virtualRouterIp" : "10.0.4.1", + "subnetCidrBlock" : "10.0.4.0/24", + "nicIndex" : 0 +} ]""" + +# `curl -L http://169.254.169.254/opc/v1/vnics/` on a Oracle Virtual Machine +# with a secondary VNIC attached +OPC_VM_SECONDARY_VNIC_RESPONSE = """\ +[ { + "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtch72z5pd76cc2636qeqh7z_truncated", + "privateIp" : "10.0.0.230", + "vlanTag" : 1039, + "macAddr" : "02:00:17:05:D1:DB", + "virtualRouterIp" : "10.0.0.1", + "subnetCidrBlock" : "10.0.0.0/24" +}, { + "vnicId" : "ocid1.vnic.oc1.phx.abyhqljt4iew3gwmvrwrhhf3bp5drj_truncated", + "privateIp" : "10.0.0.231", + "vlanTag" : 1041, + "macAddr" : "00:00:17:02:2B:B1", + "virtualRouterIp" : "10.0.0.1", + "subnetCidrBlock" : "10.0.0.0/24" +} ]""" + class TestDataSourceOracle(test_helpers.CiTestCase): """Test datasource DataSourceOracle.""" + with_logs = True + ds_class = oracle.DataSourceOracle my_uuid = str(uuid.uuid4()) @@ -79,6 +121,16 @@ class TestDataSourceOracle(test_helpers.CiTestCase): self.assertEqual( 'metadata (http://169.254.169.254/openstack/)', ds.subplatform) + def test_sys_cfg_can_enable_configure_secondary_nics(self): + # Confirm that behaviour is toggled by sys_cfg + ds, _mocks = self._get_ds() + self.assertFalse(ds.ds_cfg['configure_secondary_nics']) + + sys_cfg = { + 'datasource': {'Oracle': {'configure_secondary_nics': True}}} + ds, _mocks = self._get_ds(sys_cfg=sys_cfg) + self.assertTrue(ds.ds_cfg['configure_secondary_nics']) + @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) def test_without_userdata(self, m_is_iscsi_root): """If no user-data is provided, it should not be in return dict.""" @@ -133,9 +185,12 @@ class TestDataSourceOracle(test_helpers.CiTestCase): self.assertEqual(self.my_md['uuid'], ds.get_instance_id()) self.assertEqual(my_userdata, ds.userdata_raw) - @mock.patch(DS_PATH + ".cmdline.read_kernel_cmdline_config") + @mock.patch(DS_PATH + "._add_network_config_from_opc_imds", + side_effect=lambda network_config: network_config) + @mock.patch(DS_PATH + ".cmdline.read_initramfs_config") @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) - def test_network_cmdline(self, m_is_iscsi_root, m_cmdline_config): + def test_network_cmdline(self, m_is_iscsi_root, m_initramfs_config, + _m_add_network_config_from_opc_imds): """network_config should read kernel cmdline.""" distro = mock.MagicMock() ds, _ = self._get_ds(distro=distro, patches={ @@ -145,15 +200,18 @@ class TestDataSourceOracle(test_helpers.CiTestCase): MD_VER: {'system_uuid': self.my_uuid, 'meta_data': self.my_md}}}}) ncfg = {'version': 1, 'config': [{'a': 'b'}]} - m_cmdline_config.return_value = ncfg + m_initramfs_config.return_value = ncfg self.assertTrue(ds._get_data()) self.assertEqual(ncfg, ds.network_config) - m_cmdline_config.assert_called_once_with() + self.assertEqual([mock.call()], m_initramfs_config.call_args_list) self.assertFalse(distro.generate_fallback_config.called) - @mock.patch(DS_PATH + ".cmdline.read_kernel_cmdline_config") + @mock.patch(DS_PATH + "._add_network_config_from_opc_imds", + side_effect=lambda network_config: network_config) + @mock.patch(DS_PATH + ".cmdline.read_initramfs_config") @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) - def test_network_fallback(self, m_is_iscsi_root, m_cmdline_config): + def test_network_fallback(self, m_is_iscsi_root, m_initramfs_config, + _m_add_network_config_from_opc_imds): """test that fallback network is generated if no kernel cmdline.""" distro = mock.MagicMock() ds, _ = self._get_ds(distro=distro, patches={ @@ -163,18 +221,95 @@ class TestDataSourceOracle(test_helpers.CiTestCase): MD_VER: {'system_uuid': self.my_uuid, 'meta_data': self.my_md}}}}) ncfg = {'version': 1, 'config': [{'a': 'b'}]} - m_cmdline_config.return_value = None + m_initramfs_config.return_value = None self.assertTrue(ds._get_data()) ncfg = {'version': 1, 'config': [{'distro1': 'value'}]} distro.generate_fallback_config.return_value = ncfg self.assertEqual(ncfg, ds.network_config) - m_cmdline_config.assert_called_once_with() + self.assertEqual([mock.call()], m_initramfs_config.call_args_list) distro.generate_fallback_config.assert_called_once_with() - self.assertEqual(1, m_cmdline_config.call_count) # test that the result got cached, and the methods not re-called. self.assertEqual(ncfg, ds.network_config) - self.assertEqual(1, m_cmdline_config.call_count) + self.assertEqual(1, m_initramfs_config.call_count) + + @mock.patch(DS_PATH + "._add_network_config_from_opc_imds") + @mock.patch(DS_PATH + ".cmdline.read_initramfs_config", + return_value={'some': 'config'}) + @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) + def test_secondary_nics_added_to_network_config_if_enabled( + self, _m_is_iscsi_root, _m_initramfs_config, + m_add_network_config_from_opc_imds): + + needle = object() + + def network_config_side_effect(network_config): + network_config['secondary_added'] = needle + + m_add_network_config_from_opc_imds.side_effect = ( + network_config_side_effect) + + distro = mock.MagicMock() + ds, _ = self._get_ds(distro=distro, patches={ + '_is_platform_viable': {'return_value': True}, + 'crawl_metadata': { + 'return_value': { + MD_VER: {'system_uuid': self.my_uuid, + 'meta_data': self.my_md}}}}) + ds.ds_cfg['configure_secondary_nics'] = True + self.assertEqual(needle, ds.network_config['secondary_added']) + + @mock.patch(DS_PATH + "._add_network_config_from_opc_imds") + @mock.patch(DS_PATH + ".cmdline.read_initramfs_config", + return_value={'some': 'config'}) + @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) + def test_secondary_nics_not_added_to_network_config_by_default( + self, _m_is_iscsi_root, _m_initramfs_config, + m_add_network_config_from_opc_imds): + + def network_config_side_effect(network_config): + network_config['secondary_added'] = True + + m_add_network_config_from_opc_imds.side_effect = ( + network_config_side_effect) + + distro = mock.MagicMock() + ds, _ = self._get_ds(distro=distro, patches={ + '_is_platform_viable': {'return_value': True}, + 'crawl_metadata': { + 'return_value': { + MD_VER: {'system_uuid': self.my_uuid, + 'meta_data': self.my_md}}}}) + self.assertNotIn('secondary_added', ds.network_config) + + @mock.patch(DS_PATH + "._add_network_config_from_opc_imds") + @mock.patch(DS_PATH + ".cmdline.read_initramfs_config") + @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) + def test_secondary_nic_failure_isnt_blocking( + self, _m_is_iscsi_root, m_initramfs_config, + m_add_network_config_from_opc_imds): + + m_add_network_config_from_opc_imds.side_effect = Exception() + + distro = mock.MagicMock() + ds, _ = self._get_ds(distro=distro, patches={ + '_is_platform_viable': {'return_value': True}, + 'crawl_metadata': { + 'return_value': { + MD_VER: {'system_uuid': self.my_uuid, + 'meta_data': self.my_md}}}}) + ds.ds_cfg['configure_secondary_nics'] = True + self.assertEqual(ds.network_config, m_initramfs_config.return_value) + self.assertIn('Failed to fetch secondary network configuration', + self.logs.getvalue()) + + def test_ds_network_cfg_preferred_over_initramfs(self): + """Ensure that DS net config is preferred over initramfs config""" + network_config_sources = oracle.DataSourceOracle.network_config_sources + self.assertLess( + network_config_sources.index(NetworkConfigSource.ds), + network_config_sources.index(NetworkConfigSource.initramfs) + ) @mock.patch(DS_PATH + "._read_system_uuid", return_value=str(uuid.uuid4())) @@ -198,7 +333,7 @@ class TestReadMetaData(test_helpers.HttprettyTestCase): for k, v in data.items(): httpretty.register_uri( httpretty.GET, self.mdurl + MD_VER + "/" + k, - v if not isinstance(v, six.text_type) else v.encode('utf-8')) + v if not isinstance(v, str) else v.encode('utf-8')) def test_broken_no_sys_uuid(self, m_read_system_uuid): """Datasource requires ability to read system_uuid and true return.""" @@ -336,4 +471,265 @@ class TestLoadIndex(test_helpers.CiTestCase): oracle._load_index("\n".join(["meta_data.json", "user_data"]))) +class TestNetworkConfigFromOpcImds(test_helpers.CiTestCase): + + with_logs = True + + def setUp(self): + super(TestNetworkConfigFromOpcImds, self).setUp() + self.add_patch(DS_PATH + '.readurl', 'm_readurl') + self.add_patch(DS_PATH + '.get_interfaces_by_mac', + 'm_get_interfaces_by_mac') + + def test_failure_to_readurl(self): + # readurl failures should just bubble out to the caller + self.m_readurl.side_effect = Exception('oh no') + with self.assertRaises(Exception) as excinfo: + oracle._add_network_config_from_opc_imds({}) + self.assertEqual(str(excinfo.exception), 'oh no') + + def test_empty_response(self): + # empty response error should just bubble out to the caller + self.m_readurl.return_value = '' + with self.assertRaises(Exception): + oracle._add_network_config_from_opc_imds([]) + + def test_invalid_json(self): + # invalid JSON error should just bubble out to the caller + self.m_readurl.return_value = '{' + with self.assertRaises(Exception): + oracle._add_network_config_from_opc_imds([]) + + def test_no_secondary_nics_does_not_mutate_input(self): + self.m_readurl.return_value = json.dumps([{}]) + # We test this by passing in a non-dict to ensure that no dict + # operations are used; failure would be seen as exceptions + oracle._add_network_config_from_opc_imds(object()) + + def test_bare_metal_machine_skipped(self): + # nicIndex in the first entry indicates a bare metal machine + self.m_readurl.return_value = OPC_BM_SECONDARY_VNIC_RESPONSE + # We test this by passing in a non-dict to ensure that no dict + # operations are used + self.assertFalse(oracle._add_network_config_from_opc_imds(object())) + self.assertIn('bare metal machine', self.logs.getvalue()) + + def test_missing_mac_skipped(self): + self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE + self.m_get_interfaces_by_mac.return_value = {} + + network_config = {'version': 1, 'config': [{'primary': 'nic'}]} + oracle._add_network_config_from_opc_imds(network_config) + + self.assertEqual(1, len(network_config['config'])) + self.assertIn( + 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping', + self.logs.getvalue()) + + def test_missing_mac_skipped_v2(self): + self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE + self.m_get_interfaces_by_mac.return_value = {} + + network_config = {'version': 2, 'ethernets': {'primary': {'nic': {}}}} + oracle._add_network_config_from_opc_imds(network_config) + + self.assertEqual(1, len(network_config['ethernets'])) + self.assertIn( + 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping', + self.logs.getvalue()) + + def test_secondary_nic(self): + self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE + mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3' + self.m_get_interfaces_by_mac.return_value = { + mac_addr: nic_name, + } + + network_config = {'version': 1, 'config': [{'primary': 'nic'}]} + oracle._add_network_config_from_opc_imds(network_config) + + # The input is mutated + self.assertEqual(2, len(network_config['config'])) + + secondary_nic_cfg = network_config['config'][1] + self.assertEqual(nic_name, secondary_nic_cfg['name']) + self.assertEqual('physical', secondary_nic_cfg['type']) + self.assertEqual(mac_addr, secondary_nic_cfg['mac_address']) + self.assertEqual(9000, secondary_nic_cfg['mtu']) + + self.assertEqual(1, len(secondary_nic_cfg['subnets'])) + subnet_cfg = secondary_nic_cfg['subnets'][0] + # These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE + self.assertEqual('10.0.0.231', subnet_cfg['address']) + + def test_secondary_nic_v2(self): + self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE + mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3' + self.m_get_interfaces_by_mac.return_value = { + mac_addr: nic_name, + } + + network_config = {'version': 2, 'ethernets': {'primary': {'nic': {}}}} + oracle._add_network_config_from_opc_imds(network_config) + + # The input is mutated + self.assertEqual(2, len(network_config['ethernets'])) + + secondary_nic_cfg = network_config['ethernets']['ens3'] + self.assertFalse(secondary_nic_cfg['dhcp4']) + self.assertFalse(secondary_nic_cfg['dhcp6']) + self.assertEqual(mac_addr, secondary_nic_cfg['match']['macaddress']) + self.assertEqual(9000, secondary_nic_cfg['mtu']) + + self.assertEqual(1, len(secondary_nic_cfg['addresses'])) + # These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE + self.assertEqual('10.0.0.231', secondary_nic_cfg['addresses'][0]) + + +class TestNetworkConfigFiltersNetFailover(test_helpers.CiTestCase): + + with_logs = True + + def setUp(self): + super(TestNetworkConfigFiltersNetFailover, self).setUp() + self.add_patch(DS_PATH + '.get_interfaces_by_mac', + 'm_get_interfaces_by_mac') + self.add_patch(DS_PATH + '.is_netfail_master', 'm_netfail_master') + + def test_ignore_bogus_network_config(self): + netcfg = {'something': 'here'} + passed_netcfg = copy.copy(netcfg) + oracle._ensure_netfailover_safe(passed_netcfg) + self.assertEqual(netcfg, passed_netcfg) + + def test_ignore_network_config_unknown_versions(self): + netcfg = {'something': 'here', 'version': 3} + passed_netcfg = copy.copy(netcfg) + oracle._ensure_netfailover_safe(passed_netcfg) + self.assertEqual(netcfg, passed_netcfg) + + def test_checks_v1_type_physical_interfaces(self): + mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3' + self.m_get_interfaces_by_mac.return_value = { + mac_addr: nic_name, + } + netcfg = {'version': 1, 'config': [ + {'type': 'physical', 'name': nic_name, 'mac_address': mac_addr, + 'subnets': [{'type': 'dhcp4'}]}]} + passed_netcfg = copy.copy(netcfg) + self.m_netfail_master.return_value = False + oracle._ensure_netfailover_safe(passed_netcfg) + self.assertEqual(netcfg, passed_netcfg) + self.assertEqual([mock.call(nic_name)], + self.m_netfail_master.call_args_list) + + def test_checks_v1_skips_non_phys_interfaces(self): + mac_addr, nic_name = '00:00:17:02:2b:b1', 'bond0' + self.m_get_interfaces_by_mac.return_value = { + mac_addr: nic_name, + } + netcfg = {'version': 1, 'config': [ + {'type': 'bond', 'name': nic_name, 'mac_address': mac_addr, + 'subnets': [{'type': 'dhcp4'}]}]} + passed_netcfg = copy.copy(netcfg) + oracle._ensure_netfailover_safe(passed_netcfg) + self.assertEqual(netcfg, passed_netcfg) + self.assertEqual(0, self.m_netfail_master.call_count) + + def test_removes_master_mac_property_v1(self): + nic_master, mac_master = 'ens3', self.random_string() + nic_other, mac_other = 'ens7', self.random_string() + nic_extra, mac_extra = 'enp0s1f2', self.random_string() + self.m_get_interfaces_by_mac.return_value = { + mac_master: nic_master, + mac_other: nic_other, + mac_extra: nic_extra, + } + netcfg = {'version': 1, 'config': [ + {'type': 'physical', 'name': nic_master, + 'mac_address': mac_master}, + {'type': 'physical', 'name': nic_other, 'mac_address': mac_other}, + {'type': 'physical', 'name': nic_extra, 'mac_address': mac_extra}, + ]} + + def _is_netfail_master(iface): + if iface == 'ens3': + return True + return False + self.m_netfail_master.side_effect = _is_netfail_master + expected_cfg = {'version': 1, 'config': [ + {'type': 'physical', 'name': nic_master}, + {'type': 'physical', 'name': nic_other, 'mac_address': mac_other}, + {'type': 'physical', 'name': nic_extra, 'mac_address': mac_extra}, + ]} + oracle._ensure_netfailover_safe(netcfg) + self.assertEqual(expected_cfg, netcfg) + + def test_checks_v2_type_ethernet_interfaces(self): + mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3' + self.m_get_interfaces_by_mac.return_value = { + mac_addr: nic_name, + } + netcfg = {'version': 2, 'ethernets': { + nic_name: {'dhcp4': True, 'critical': True, 'set-name': nic_name, + 'match': {'macaddress': mac_addr}}}} + passed_netcfg = copy.copy(netcfg) + self.m_netfail_master.return_value = False + oracle._ensure_netfailover_safe(passed_netcfg) + self.assertEqual(netcfg, passed_netcfg) + self.assertEqual([mock.call(nic_name)], + self.m_netfail_master.call_args_list) + + def test_skips_v2_non_ethernet_interfaces(self): + mac_addr, nic_name = '00:00:17:02:2b:b1', 'wlps0' + self.m_get_interfaces_by_mac.return_value = { + mac_addr: nic_name, + } + netcfg = {'version': 2, 'wifis': { + nic_name: {'dhcp4': True, 'critical': True, 'set-name': nic_name, + 'match': {'macaddress': mac_addr}}}} + passed_netcfg = copy.copy(netcfg) + oracle._ensure_netfailover_safe(passed_netcfg) + self.assertEqual(netcfg, passed_netcfg) + self.assertEqual(0, self.m_netfail_master.call_count) + + def test_removes_master_mac_property_v2(self): + nic_master, mac_master = 'ens3', self.random_string() + nic_other, mac_other = 'ens7', self.random_string() + nic_extra, mac_extra = 'enp0s1f2', self.random_string() + self.m_get_interfaces_by_mac.return_value = { + mac_master: nic_master, + mac_other: nic_other, + mac_extra: nic_extra, + } + netcfg = {'version': 2, 'ethernets': { + nic_extra: {'dhcp4': True, 'set-name': nic_extra, + 'match': {'macaddress': mac_extra}}, + nic_other: {'dhcp4': True, 'set-name': nic_other, + 'match': {'macaddress': mac_other}}, + nic_master: {'dhcp4': True, 'set-name': nic_master, + 'match': {'macaddress': mac_master}}, + }} + + def _is_netfail_master(iface): + if iface == 'ens3': + return True + return False + self.m_netfail_master.side_effect = _is_netfail_master + + expected_cfg = {'version': 2, 'ethernets': { + nic_master: {'dhcp4': True, 'match': {'name': nic_master}}, + nic_extra: {'dhcp4': True, 'set-name': nic_extra, + 'match': {'macaddress': mac_extra}}, + nic_other: {'dhcp4': True, 'set-name': nic_other, + 'match': {'macaddress': mac_other}}, + }} + oracle._ensure_netfailover_safe(netcfg) + import pprint + pprint.pprint(netcfg) + print('---- ^^ modified ^^ ---- vv original vv ----') + pprint.pprint(expected_cfg) + self.assertEqual(expected_cfg, netcfg) + + # vi: ts=4 expandtab diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 3f99b58c..c3a9b5b7 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -17,7 +17,7 @@ LOG = logging.getLogger(__name__) # See: man sshd_config DEF_SSHD_CFG = "/etc/ssh/sshd_config" -# taken from openssh source openssh-7.3p1/sshkey.c: +# taken from OpenSSH source openssh-7.3p1/sshkey.c: # static const struct keytype keytypes[] = { ... } VALID_KEY_TYPES = ( "dsa", @@ -160,19 +160,19 @@ class AuthKeyLineParser(object): comment=comment, options=options) -def parse_authorized_keys(fname): +def parse_authorized_keys(fnames): lines = [] - try: - if os.path.isfile(fname): - lines = util.load_file(fname).splitlines() - except (IOError, OSError): - util.logexc(LOG, "Error reading lines from %s", fname) - lines = [] - parser = AuthKeyLineParser() contents = [] - for line in lines: - contents.append(parser.parse(line)) + for fname in fnames: + try: + if os.path.isfile(fname): + lines = util.load_file(fname).splitlines() + for line in lines: + contents.append(parser.parse(line)) + except (IOError, OSError): + util.logexc(LOG, "Error reading lines from %s", fname) + return contents @@ -207,36 +207,50 @@ def update_authorized_keys(old_entries, keys): def users_ssh_info(username): pw_ent = pwd.getpwnam(username) if not pw_ent or not pw_ent.pw_dir: - raise RuntimeError("Unable to get ssh info for user %r" % (username)) + raise RuntimeError("Unable to get SSH info for user %r" % (username)) return (os.path.join(pw_ent.pw_dir, '.ssh'), pw_ent) -def extract_authorized_keys(username): +def render_authorizedkeysfile_paths(value, homedir, username): + # The 'AuthorizedKeysFile' may contain tokens + # of the form %T which are substituted during connection set-up. + # The following tokens are defined: %% is replaced by a literal + # '%', %h is replaced by the home directory of the user being + # authenticated and %u is replaced by the username of that user. + macros = (("%h", homedir), ("%u", username), ("%%", "%")) + if not value: + value = "%h/.ssh/authorized_keys" + paths = value.split() + rendered = [] + for path in paths: + for macro, field in macros: + path = path.replace(macro, field) + if not path.startswith("/"): + path = os.path.join(homedir, path) + rendered.append(path) + return rendered + + +def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG): (ssh_dir, pw_ent) = users_ssh_info(username) - auth_key_fn = None + default_authorizedkeys_file = os.path.join(ssh_dir, 'authorized_keys') + auth_key_fns = [] with util.SeLinuxGuard(ssh_dir, recursive=True): try: - # The 'AuthorizedKeysFile' may contain tokens - # of the form %T which are substituted during connection set-up. - # The following tokens are defined: %% is replaced by a literal - # '%', %h is replaced by the home directory of the user being - # authenticated and %u is replaced by the username of that user. - ssh_cfg = parse_ssh_config_map(DEF_SSHD_CFG) - auth_key_fn = ssh_cfg.get("authorizedkeysfile", '').strip() - if not auth_key_fn: - auth_key_fn = "%h/.ssh/authorized_keys" - auth_key_fn = auth_key_fn.replace("%h", pw_ent.pw_dir) - auth_key_fn = auth_key_fn.replace("%u", username) - auth_key_fn = auth_key_fn.replace("%%", '%') - if not auth_key_fn.startswith('/'): - auth_key_fn = os.path.join(pw_ent.pw_dir, auth_key_fn) + ssh_cfg = parse_ssh_config_map(sshd_cfg_file) + auth_key_fns = render_authorizedkeysfile_paths( + ssh_cfg.get("authorizedkeysfile", "%h/.ssh/authorized_keys"), + pw_ent.pw_dir, username) + except (IOError, OSError): # Give up and use a default key filename - auth_key_fn = os.path.join(ssh_dir, 'authorized_keys') - util.logexc(LOG, "Failed extracting 'AuthorizedKeysFile' in ssh " + auth_key_fns[0] = default_authorizedkeys_file + util.logexc(LOG, "Failed extracting 'AuthorizedKeysFile' in SSH " "config from %r, using 'AuthorizedKeysFile' file " - "%r instead", DEF_SSHD_CFG, auth_key_fn) - return (auth_key_fn, parse_authorized_keys(auth_key_fn)) + "%r instead", DEF_SSHD_CFG, auth_key_fns[0]) + + # always store all the keys in the user's private file + return (default_authorizedkeys_file, parse_authorized_keys(auth_key_fns)) def setup_user_keys(keys, username, options=None): @@ -335,7 +349,7 @@ def update_ssh_config(updates, fname=DEF_SSHD_CFG): def update_ssh_config_lines(lines, updates): - """Update the ssh config lines per updates. + """Update the SSH config lines per updates. @param lines: array of SshdConfigLine. This array is updated in place. @param updates: dictionary of desired values {Option: value} diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 8a064124..db8ba64c 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -6,11 +6,9 @@ import copy import os +import pickle import sys -import six -from six.moves import cPickle as pickle - from cloudinit.settings import ( FREQUENCIES, CLOUD_CONFIG, PER_INSTANCE, RUN_CLOUD_CONFIG) @@ -24,6 +22,7 @@ from cloudinit.handlers.shell_script import ShellScriptPartHandler from cloudinit.handlers.upstart_job import UpstartJobPartHandler from cloudinit.event import EventType +from cloudinit.sources import NetworkConfigSource from cloudinit import cloud from cloudinit import config @@ -500,7 +499,7 @@ class Init(object): # Init the handlers first for (_ctype, mod) in c_handlers.items(): if mod in c_handlers.initialized: - # Avoid initing the same module twice (if said module + # Avoid initiating the same module twice (if said module # is registered to more than one content-type). continue handlers.call_begin(mod, data, frequency) @@ -548,11 +547,15 @@ class Init(object): with events.ReportEventStack("consume-user-data", "reading and applying user-data", parent=self.reporter): + if util.get_cfg_option_bool(self.cfg, 'allow_userdata', True): self._consume_userdata(frequency) + else: + LOG.debug('allow_userdata = False: discarding user-data') + with events.ReportEventStack("consume-vendor-data", "reading and applying vendor-data", parent=self.reporter): - self._consume_vendordata(frequency) + self._consume_vendordata(frequency) # Perform post-consumption adjustments so that # modules that run during the init stage reflect @@ -630,32 +633,54 @@ class Init(object): if os.path.exists(disable_file): return (None, disable_file) - cmdline_cfg = ('cmdline', cmdline.read_kernel_cmdline_config()) - dscfg = ('ds', None) + available_cfgs = { + NetworkConfigSource.cmdline: cmdline.read_kernel_cmdline_config(), + NetworkConfigSource.initramfs: cmdline.read_initramfs_config(), + NetworkConfigSource.ds: None, + NetworkConfigSource.system_cfg: self.cfg.get('network'), + } + if self.datasource and hasattr(self.datasource, 'network_config'): - dscfg = ('ds', self.datasource.network_config) - sys_cfg = ('system_cfg', self.cfg.get('network')) + available_cfgs[NetworkConfigSource.ds] = ( + self.datasource.network_config) - for loc, ncfg in (cmdline_cfg, sys_cfg, dscfg): + if self.datasource: + order = self.datasource.network_config_sources + else: + order = sources.DataSource.network_config_sources + for cfg_source in order: + if not hasattr(NetworkConfigSource, cfg_source): + LOG.warning('data source specifies an invalid network' + ' cfg_source: %s', cfg_source) + continue + if cfg_source not in available_cfgs: + LOG.warning('data source specifies an unavailable network' + ' cfg_source: %s', cfg_source) + continue + ncfg = available_cfgs[cfg_source] if net.is_disabled_cfg(ncfg): - LOG.debug("network config disabled by %s", loc) - return (None, loc) + LOG.debug("network config disabled by %s", cfg_source) + return (None, cfg_source) if ncfg: - return (ncfg, loc) - return (self.distro.generate_fallback_config(), "fallback") - - def apply_network_config(self, bring_up): - netcfg, src = self._find_networking_config() - if netcfg is None: - LOG.info("network config is disabled by %s", src) - return + return (ncfg, cfg_source) + return (self.distro.generate_fallback_config(), + NetworkConfigSource.fallback) + def _apply_netcfg_names(self, netcfg): try: LOG.debug("applying net config names for %s", netcfg) self.distro.apply_network_config_names(netcfg) except Exception as e: LOG.warning("Failed to rename devices: %s", e) + def apply_network_config(self, bring_up): + # get a network config + netcfg, src = self._find_networking_config() + if netcfg is None: + LOG.info("network config is disabled by %s", src) + return + + # request an update if needed/available if self.datasource is not NULL_DATA_SOURCE: if not self.is_new_instance(): if not self.datasource.update_metadata([EventType.BOOT]): @@ -663,8 +688,20 @@ class Init(object): "No network config applied. Neither a new instance" " nor datasource network update on '%s' event", EventType.BOOT) + # nothing new, but ensure proper names + self._apply_netcfg_names(netcfg) return + else: + # refresh netcfg after update + netcfg, src = self._find_networking_config() + + # ensure all physical devices in config are present + net.wait_for_physdevs(netcfg) + + # apply renames from config + self._apply_netcfg_names(netcfg) + # rendering config LOG.info("Applying network configuration from %s bringup=%s: %s", src, bring_up, netcfg) try: @@ -719,7 +756,7 @@ class Modules(object): for item in cfg_mods: if not item: continue - if isinstance(item, six.string_types): + if isinstance(item, str): module_list.append({ 'mod': item.strip(), }) diff --git a/cloudinit/temp_utils.py b/cloudinit/temp_utils.py index c98a1b53..346276ec 100644 --- a/cloudinit/temp_utils.py +++ b/cloudinit/temp_utils.py @@ -81,7 +81,7 @@ def ExtendedTemporaryFile(**kwargs): @contextlib.contextmanager -def tempdir(**kwargs): +def tempdir(rmtree_ignore_errors=False, **kwargs): # This seems like it was only added in python 3.2 # Make it since its useful... # See: http://bugs.python.org/file12970/tempdir.patch @@ -89,7 +89,7 @@ def tempdir(**kwargs): try: yield tdir finally: - shutil.rmtree(tdir) + shutil.rmtree(tdir, ignore_errors=rmtree_ignore_errors) def mkdtemp(**kwargs): diff --git a/cloudinit/templater.py b/cloudinit/templater.py index b668674b..e47cdeda 100644 --- a/cloudinit/templater.py +++ b/cloudinit/templater.py @@ -44,7 +44,7 @@ MISSING_JINJA_PREFIX = u'CI_MISSING_JINJA_VAR/' @implements_to_string # Needed for python2.7. Otherwise cached super.__str__ class UndefinedJinjaVariable(JUndefined): - """Class used to represent any undefined jinja template varible.""" + """Class used to represent any undefined jinja template variable.""" def __str__(self): return u'%s%s' % (MISSING_JINJA_PREFIX, self._undefined_name) @@ -58,7 +58,7 @@ class UndefinedJinjaVariable(JUndefined): def basic_render(content, params): - """This does sumple replacement of bash variable like templates. + """This does simple replacement of bash variable like templates. It identifies patterns like ${a} or $a and can also identify patterns like ${a.b} or $a.b which will look for a key 'b' in the dictionary rooted diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 2eb7b0cd..70f6bad7 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -4,15 +4,17 @@ from __future__ import print_function import functools import httpretty +import io import logging import os +import random import shutil +import string import sys import tempfile import time +from unittest import mock -import mock -import six import unittest2 from unittest2.util import strclass @@ -41,26 +43,6 @@ _real_subp = util.subp SkipTest = unittest2.SkipTest skipIf = unittest2.skipIf -# Used for detecting different python versions -PY2 = False -PY26 = False -PY27 = False -PY3 = False - -_PY_VER = sys.version_info -_PY_MAJOR, _PY_MINOR, _PY_MICRO = _PY_VER[0:3] -if (_PY_MAJOR, _PY_MINOR) <= (2, 6): - if (_PY_MAJOR, _PY_MINOR) == (2, 6): - PY26 = True - if (_PY_MAJOR, _PY_MINOR) >= (2, 0): - PY2 = True -else: - if (_PY_MAJOR, _PY_MINOR) == (2, 7): - PY27 = True - PY2 = True - if (_PY_MAJOR, _PY_MINOR) >= (3, 0): - PY3 = True - # Makes the old path start # with new base instead of whatever @@ -90,7 +72,7 @@ def retarget_many_wrapper(new_base, am, old_func): # Python 3 some of these now accept file-descriptors (integers). # That breaks rebase_path() so in lieu of a better solution, just # don't rebase if we get a fd. - if isinstance(path, six.string_types): + if isinstance(path, str): n_args[i] = rebase_path(path, new_base) return old_func(*n_args, **kwds) return wrapper @@ -167,7 +149,7 @@ class CiTestCase(TestCase): if self.with_logs: # Create a log handler so unit tests can search expected logs. self.logger = logging.getLogger() - self.logs = six.StringIO() + self.logs = io.StringIO() formatter = logging.Formatter('%(levelname)s: %(message)s') handler = logging.StreamHandler(self.logs) handler.setFormatter(formatter) @@ -184,7 +166,7 @@ class CiTestCase(TestCase): else: cmd = args[0] - if not isinstance(cmd, six.string_types): + if not isinstance(cmd, str): cmd = cmd[0] pass_through = False if not isinstance(self.allowed_subp, (list, bool)): @@ -207,6 +189,7 @@ class CiTestCase(TestCase): if self.with_logs: # Remove the handler we setup logging.getLogger().handlers = self.old_handlers + logging.getLogger().level = None util.subp = _real_subp super(CiTestCase, self).tearDown() @@ -217,7 +200,8 @@ class CiTestCase(TestCase): prefix="ci-%s." % self.__class__.__name__) else: tmpd = tempfile.mkdtemp(dir=dir) - self.addCleanup(functools.partial(shutil.rmtree, tmpd)) + self.addCleanup( + functools.partial(shutil.rmtree, tmpd, ignore_errors=True)) return tmpd def tmp_path(self, path, dir=None): @@ -261,6 +245,12 @@ class CiTestCase(TestCase): myds.metadata.update(metadata) return cloud.Cloud(myds, self.paths, sys_cfg, mydist, None) + @classmethod + def random_string(cls, length=8): + """ return a random lowercase string with default length of 8""" + return ''.join( + random.choice(string.ascii_lowercase) for _ in range(length)) + class ResourceUsingTestCase(CiTestCase): @@ -356,8 +346,9 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): def patchOpen(self, new_root): trap_func = retarget_many_wrapper(new_root, 1, open) - name = 'builtins.open' if PY3 else '__builtin__.open' - self.patched_funcs.enter_context(mock.patch(name, trap_func)) + self.patched_funcs.enter_context( + mock.patch('builtins.open', trap_func) + ) def patchStdoutAndStderr(self, stdout=None, stderr=None): if stdout is not None: @@ -430,7 +421,7 @@ def populate_dir(path, files): p = os.path.sep.join([path, name]) util.ensure_dir(os.path.dirname(p)) with open(p, "wb") as fp: - if isinstance(content, six.binary_type): + if isinstance(content, bytes): fp.write(content) else: fp.write(content.encode('utf-8')) diff --git a/cloudinit/tests/test_dhclient_hook.py b/cloudinit/tests/test_dhclient_hook.py index 7aab8dd5..eadae81c 100644 --- a/cloudinit/tests/test_dhclient_hook.py +++ b/cloudinit/tests/test_dhclient_hook.py @@ -7,8 +7,8 @@ from cloudinit.tests.helpers import CiTestCase, dir2dict, populate_dir import argparse import json -import mock import os +from unittest import mock class TestDhclientHook(CiTestCase): diff --git a/cloudinit/tests/test_gpg.py b/cloudinit/tests/test_gpg.py index 0562b966..8dd57137 100644 --- a/cloudinit/tests/test_gpg.py +++ b/cloudinit/tests/test_gpg.py @@ -1,12 +1,12 @@ # This file is part of cloud-init. See LICENSE file for license information. """Test gpg module.""" +from unittest import mock + from cloudinit import gpg from cloudinit import util from cloudinit.tests.helpers import CiTestCase -import mock - @mock.patch("cloudinit.gpg.time.sleep") @mock.patch("cloudinit.gpg.util.subp") diff --git a/cloudinit/tests/test_netinfo.py b/cloudinit/tests/test_netinfo.py index d76e768e..1c8a791e 100644 --- a/cloudinit/tests/test_netinfo.py +++ b/cloudinit/tests/test_netinfo.py @@ -11,6 +11,7 @@ from cloudinit.tests.helpers import CiTestCase, mock, readResource # Example ifconfig and route output SAMPLE_OLD_IFCONFIG_OUT = readResource("netinfo/old-ifconfig-output") SAMPLE_NEW_IFCONFIG_OUT = readResource("netinfo/new-ifconfig-output") +SAMPLE_FREEBSD_IFCONFIG_OUT = readResource("netinfo/freebsd-ifconfig-output") SAMPLE_IPADDRSHOW_OUT = readResource("netinfo/sample-ipaddrshow-output") SAMPLE_ROUTE_OUT_V4 = readResource("netinfo/sample-route-output-v4") SAMPLE_ROUTE_OUT_V6 = readResource("netinfo/sample-route-output-v6") @@ -18,6 +19,7 @@ SAMPLE_IPROUTE_OUT_V4 = readResource("netinfo/sample-iproute-output-v4") SAMPLE_IPROUTE_OUT_V6 = readResource("netinfo/sample-iproute-output-v6") NETDEV_FORMATTED_OUT = readResource("netinfo/netdev-formatted-output") ROUTE_FORMATTED_OUT = readResource("netinfo/route-formatted-output") +FREEBSD_NETDEV_OUT = readResource("netinfo/freebsd-netdev-formatted-output") class TestNetInfo(CiTestCase): @@ -45,6 +47,18 @@ class TestNetInfo(CiTestCase): @mock.patch('cloudinit.netinfo.util.which') @mock.patch('cloudinit.netinfo.util.subp') + def test_netdev_freebsd_nettools_pformat(self, m_subp, m_which): + """netdev_pformat properly rendering netdev new nettools info.""" + m_subp.return_value = (SAMPLE_FREEBSD_IFCONFIG_OUT, '') + m_which.side_effect = lambda x: x if x == 'ifconfig' else None + content = netdev_pformat() + print() + print(content) + print() + self.assertEqual(FREEBSD_NETDEV_OUT, content) + + @mock.patch('cloudinit.netinfo.util.which') + @mock.patch('cloudinit.netinfo.util.subp') def test_netdev_iproute_pformat(self, m_subp, m_which): """netdev_pformat properly rendering ip route info.""" m_subp.return_value = (SAMPLE_IPADDRSHOW_OUT, '') diff --git a/cloudinit/tests/test_stages.py b/cloudinit/tests/test_stages.py index 94b6b255..d5c9c0e4 100644 --- a/cloudinit/tests/test_stages.py +++ b/cloudinit/tests/test_stages.py @@ -6,6 +6,7 @@ import os from cloudinit import stages from cloudinit import sources +from cloudinit.sources import NetworkConfigSource from cloudinit.event import EventType from cloudinit.util import write_file @@ -37,6 +38,7 @@ class FakeDataSource(sources.DataSource): class TestInit(CiTestCase): with_logs = True + allowed_subp = False def setUp(self): super(TestInit, self).setUp() @@ -57,84 +59,189 @@ class TestInit(CiTestCase): (None, disable_file), self.init._find_networking_config()) + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_disabled_by_kernel(self, m_cmdline): + def test_wb__find_networking_config_disabled_by_kernel( + self, m_cmdline, m_initramfs): """find_networking_config returns when disabled by kernel cmdline.""" m_cmdline.return_value = {'config': 'disabled'} + m_initramfs.return_value = {'config': ['fake_initrd']} self.assertEqual( - (None, 'cmdline'), + (None, NetworkConfigSource.cmdline), self.init._find_networking_config()) self.assertEqual('DEBUG: network config disabled by cmdline\n', self.logs.getvalue()) + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_disabled_by_datasrc(self, m_cmdline): + def test_wb__find_networking_config_disabled_by_initrd( + self, m_cmdline, m_initramfs): + """find_networking_config returns when disabled by kernel cmdline.""" + m_cmdline.return_value = {} + m_initramfs.return_value = {'config': 'disabled'} + self.assertEqual( + (None, NetworkConfigSource.initramfs), + self.init._find_networking_config()) + self.assertEqual('DEBUG: network config disabled by initramfs\n', + self.logs.getvalue()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test_wb__find_networking_config_disabled_by_datasrc( + self, m_cmdline, m_initramfs): """find_networking_config returns when disabled by datasource cfg.""" m_cmdline.return_value = {} # Kernel doesn't disable networking + m_initramfs.return_value = {} # initramfs doesn't disable networking self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, 'network': {}} # system config doesn't disable self.init.datasource = FakeDataSource( network_config={'config': 'disabled'}) self.assertEqual( - (None, 'ds'), + (None, NetworkConfigSource.ds), self.init._find_networking_config()) self.assertEqual('DEBUG: network config disabled by ds\n', self.logs.getvalue()) + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_disabled_by_sysconfig(self, m_cmdline): + def test_wb__find_networking_config_disabled_by_sysconfig( + self, m_cmdline, m_initramfs): """find_networking_config returns when disabled by system config.""" m_cmdline.return_value = {} # Kernel doesn't disable networking + m_initramfs.return_value = {} # initramfs doesn't disable networking self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, 'network': {'config': 'disabled'}} self.assertEqual( - (None, 'system_cfg'), + (None, NetworkConfigSource.system_cfg), self.init._find_networking_config()) self.assertEqual('DEBUG: network config disabled by system_cfg\n', self.logs.getvalue()) + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test__find_networking_config_uses_datasrc_order( + self, m_cmdline, m_initramfs): + """find_networking_config should check sources in DS defined order""" + # cmdline and initramfs, which would normally be preferred over other + # sources, disable networking; in this case, though, the DS moves them + # later so its own config is preferred + m_cmdline.return_value = {'config': 'disabled'} + m_initramfs.return_value = {'config': 'disabled'} + + ds_net_cfg = {'config': {'needle': True}} + self.init.datasource = FakeDataSource(network_config=ds_net_cfg) + self.init.datasource.network_config_sources = [ + NetworkConfigSource.ds, NetworkConfigSource.system_cfg, + NetworkConfigSource.cmdline, NetworkConfigSource.initramfs] + + self.assertEqual( + (ds_net_cfg, NetworkConfigSource.ds), + self.init._find_networking_config()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test__find_networking_config_warns_if_datasrc_uses_invalid_src( + self, m_cmdline, m_initramfs): + """find_networking_config should check sources in DS defined order""" + ds_net_cfg = {'config': {'needle': True}} + self.init.datasource = FakeDataSource(network_config=ds_net_cfg) + self.init.datasource.network_config_sources = [ + 'invalid_src', NetworkConfigSource.ds] + + self.assertEqual( + (ds_net_cfg, NetworkConfigSource.ds), + self.init._find_networking_config()) + self.assertIn('WARNING: data source specifies an invalid network' + ' cfg_source: invalid_src', + self.logs.getvalue()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_returns_kernel(self, m_cmdline): + def test__find_networking_config_warns_if_datasrc_uses_unavailable_src( + self, m_cmdline, m_initramfs): + """find_networking_config should check sources in DS defined order""" + ds_net_cfg = {'config': {'needle': True}} + self.init.datasource = FakeDataSource(network_config=ds_net_cfg) + self.init.datasource.network_config_sources = [ + NetworkConfigSource.fallback, NetworkConfigSource.ds] + + self.assertEqual( + (ds_net_cfg, NetworkConfigSource.ds), + self.init._find_networking_config()) + self.assertIn('WARNING: data source specifies an unavailable network' + ' cfg_source: fallback', + self.logs.getvalue()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test_wb__find_networking_config_returns_kernel( + self, m_cmdline, m_initramfs): """find_networking_config returns kernel cmdline config if present.""" expected_cfg = {'config': ['fakekernel']} m_cmdline.return_value = expected_cfg + m_initramfs.return_value = {'config': ['fake_initrd']} self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, 'network': {'config': ['fakesys_config']}} self.init.datasource = FakeDataSource( network_config={'config': ['fakedatasource']}) self.assertEqual( - (expected_cfg, 'cmdline'), + (expected_cfg, NetworkConfigSource.cmdline), self.init._find_networking_config()) + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_returns_system_cfg(self, m_cmdline): + def test_wb__find_networking_config_returns_initramfs( + self, m_cmdline, m_initramfs): + """find_networking_config returns kernel cmdline config if present.""" + expected_cfg = {'config': ['fake_initrd']} + m_cmdline.return_value = {} + m_initramfs.return_value = expected_cfg + self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, + 'network': {'config': ['fakesys_config']}} + self.init.datasource = FakeDataSource( + network_config={'config': ['fakedatasource']}) + self.assertEqual( + (expected_cfg, NetworkConfigSource.initramfs), + self.init._find_networking_config()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test_wb__find_networking_config_returns_system_cfg( + self, m_cmdline, m_initramfs): """find_networking_config returns system config when present.""" m_cmdline.return_value = {} # No kernel network config + m_initramfs.return_value = {} # no initramfs network config expected_cfg = {'config': ['fakesys_config']} self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, 'network': expected_cfg} self.init.datasource = FakeDataSource( network_config={'config': ['fakedatasource']}) self.assertEqual( - (expected_cfg, 'system_cfg'), + (expected_cfg, NetworkConfigSource.system_cfg), self.init._find_networking_config()) + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_returns_datasrc_cfg(self, m_cmdline): + def test_wb__find_networking_config_returns_datasrc_cfg( + self, m_cmdline, m_initramfs): """find_networking_config returns datasource net config if present.""" m_cmdline.return_value = {} # No kernel network config + m_initramfs.return_value = {} # no initramfs network config # No system config for network in setUp expected_cfg = {'config': ['fakedatasource']} self.init.datasource = FakeDataSource(network_config=expected_cfg) self.assertEqual( - (expected_cfg, 'ds'), + (expected_cfg, NetworkConfigSource.ds), self.init._find_networking_config()) + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') - def test_wb__find_networking_config_returns_fallback(self, m_cmdline): + def test_wb__find_networking_config_returns_fallback( + self, m_cmdline, m_initramfs): """find_networking_config returns fallback config if not defined.""" m_cmdline.return_value = {} # Kernel doesn't disable networking + m_initramfs.return_value = {} # no initramfs network config # Neither datasource nor system_info disable or provide network fake_cfg = {'config': [{'type': 'physical', 'name': 'eth9'}], @@ -147,7 +254,7 @@ class TestInit(CiTestCase): distro = self.init.distro distro.generate_fallback_config = fake_generate_fallback self.assertEqual( - (fake_cfg, 'fallback'), + (fake_cfg, NetworkConfigSource.fallback), self.init._find_networking_config()) self.assertNotIn('network config disabled', self.logs.getvalue()) @@ -166,8 +273,9 @@ class TestInit(CiTestCase): 'INFO: network config is disabled by %s' % disable_file, self.logs.getvalue()) + @mock.patch('cloudinit.net.get_interfaces_by_mac') @mock.patch('cloudinit.distros.ubuntu.Distro') - def test_apply_network_on_new_instance(self, m_ubuntu): + def test_apply_network_on_new_instance(self, m_ubuntu, m_macs): """Call distro apply_network_config methods on is_new_instance.""" net_cfg = { 'version': 1, 'config': [ @@ -175,7 +283,9 @@ class TestInit(CiTestCase): 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]} def fake_network_config(): - return net_cfg, 'fallback' + return net_cfg, NetworkConfigSource.fallback + + m_macs.return_value = {'42:42:42:42:42:42': 'eth9'} self.init._find_networking_config = fake_network_config self.init.apply_network_config(True) @@ -195,7 +305,7 @@ class TestInit(CiTestCase): 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]} def fake_network_config(): - return net_cfg, 'fallback' + return net_cfg, NetworkConfigSource.fallback self.init._find_networking_config = fake_network_config self.init.apply_network_config(True) @@ -206,8 +316,9 @@ class TestInit(CiTestCase): " nor datasource network update on '%s' event" % EventType.BOOT, self.logs.getvalue()) + @mock.patch('cloudinit.net.get_interfaces_by_mac') @mock.patch('cloudinit.distros.ubuntu.Distro') - def test_apply_network_on_datasource_allowed_event(self, m_ubuntu): + def test_apply_network_on_datasource_allowed_event(self, m_ubuntu, m_macs): """Apply network if datasource.update_metadata permits BOOT event.""" old_instance_id = os.path.join( self.init.paths.get_cpath('data'), 'instance-id') @@ -218,7 +329,9 @@ class TestInit(CiTestCase): 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]} def fake_network_config(): - return net_cfg, 'fallback' + return net_cfg, NetworkConfigSource.fallback + + m_macs.return_value = {'42:42:42:42:42:42': 'eth9'} self.init._find_networking_config = fake_network_config self.init.datasource = FakeDataSource(paths=self.init.paths) diff --git a/cloudinit/tests/test_temp_utils.py b/cloudinit/tests/test_temp_utils.py index ffbb92cd..4a52ef89 100644 --- a/cloudinit/tests/test_temp_utils.py +++ b/cloudinit/tests/test_temp_utils.py @@ -2,8 +2,9 @@ """Tests for cloudinit.temp_utils""" -from cloudinit.temp_utils import mkdtemp, mkstemp +from cloudinit.temp_utils import mkdtemp, mkstemp, tempdir from cloudinit.tests.helpers import CiTestCase, wrap_and_call +import os class TestTempUtils(CiTestCase): @@ -98,4 +99,19 @@ class TestTempUtils(CiTestCase): self.assertEqual('/fake/return/path', retval) self.assertEqual([{'dir': '/run/cloud-init/tmp'}], calls) + def test_tempdir_error_suppression(self): + """test tempdir suppresses errors during directory removal.""" + + with self.assertRaises(OSError): + with tempdir(prefix='cloud-init-dhcp-') as tdir: + os.rmdir(tdir) + # As a result, the directory is already gone, + # so shutil.rmtree should raise OSError + + with tempdir(rmtree_ignore_errors=True, + prefix='cloud-init-dhcp-') as tdir: + os.rmdir(tdir) + # Since the directory is already gone, shutil.rmtree would raise + # OSError, but we suppress that + # vi: ts=4 expandtab diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py index aa9f3ec1..1674120f 100644 --- a/cloudinit/tests/test_url_helper.py +++ b/cloudinit/tests/test_url_helper.py @@ -4,6 +4,7 @@ from cloudinit.url_helper import ( NOT_FOUND, UrlError, oauth_headers, read_file_or_url, retry_on_url_exc) from cloudinit.tests.helpers import CiTestCase, mock, skipIf from cloudinit import util +from cloudinit import version import httpretty import requests @@ -17,6 +18,9 @@ except ImportError: _missing_oauthlib_dep = True +M_PATH = 'cloudinit.url_helper.' + + class TestOAuthHeaders(CiTestCase): def test_oauth_headers_raises_not_implemented_when_oathlib_missing(self): @@ -67,6 +71,55 @@ class TestReadFileOrUrl(CiTestCase): self.assertEqual(result.contents, data) self.assertEqual(str(result), data.decode('utf-8')) + @mock.patch(M_PATH + 'readurl') + def test_read_file_or_url_passes_params_to_readurl(self, m_readurl): + """read_file_or_url passes all params through to readurl.""" + url = 'http://hostname/path' + response = 'This is my url content\n' + m_readurl.return_value = response + params = {'url': url, 'timeout': 1, 'retries': 2, + 'headers': {'somehdr': 'val'}, + 'data': 'data', 'sec_between': 1, + 'ssl_details': {'cert_file': '/path/cert.pem'}, + 'headers_cb': 'headers_cb', 'exception_cb': 'exception_cb'} + self.assertEqual(response, read_file_or_url(**params)) + params.pop('url') # url is passed in as a positional arg + self.assertEqual([mock.call(url, **params)], m_readurl.call_args_list) + + def test_wb_read_url_defaults_honored_by_read_file_or_url_callers(self): + """Readurl param defaults used when unspecified by read_file_or_url + + Param defaults tested are as follows: + retries: 0, additional headers None beyond default, method: GET, + data: None, check_status: True and allow_redirects: True + """ + url = 'http://hostname/path' + + m_response = mock.MagicMock() + + class FakeSession(requests.Session): + @classmethod + def request(cls, **kwargs): + self.assertEqual( + {'url': url, 'allow_redirects': True, 'method': 'GET', + 'headers': { + 'User-Agent': 'Cloud-Init/%s' % ( + version.version_string())}}, + kwargs) + return m_response + + with mock.patch(M_PATH + 'requests.Session') as m_session: + error = requests.exceptions.HTTPError('broke') + m_session.side_effect = [error, FakeSession()] + # assert no retries and check_status == True + with self.assertRaises(UrlError) as context_manager: + response = read_file_or_url(url) + self.assertEqual('broke', str(context_manager.exception)) + # assert default headers, method, url and allow_redirects True + # Success on 2nd call with FakeSession + response = read_file_or_url(url) + self.assertEqual(m_response, response._response) + class TestRetryOnUrlExc(CiTestCase): diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index e3d2dbaa..11f37000 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -2,7 +2,9 @@ """Tests for cloudinit.util""" +import base64 import logging +import json import platform import cloudinit.util as util @@ -187,6 +189,21 @@ class TestUtil(CiTestCase): self.assertEqual(is_rw, False) +class TestUptime(CiTestCase): + + @mock.patch('cloudinit.util.boottime') + @mock.patch('cloudinit.util.os.path.exists') + @mock.patch('cloudinit.util.time.time') + def test_uptime_non_linux_path(self, m_time, m_exists, m_boottime): + boottime = 1000.0 + uptime = 10.0 + m_boottime.return_value = boottime + m_time.return_value = boottime + uptime + m_exists.return_value = False + result = util.uptime() + self.assertEqual(str(uptime), result) + + class TestShellify(CiTestCase): def test_input_dict_raises_type_error(self): @@ -385,6 +402,11 @@ class TestUdevadmSettle(CiTestCase): @mock.patch('os.path.exists') class TestGetLinuxDistro(CiTestCase): + def setUp(self): + # python2 has no lru_cache, and therefore, no cache_clear() + if hasattr(util.get_linux_distro, "cache_clear"): + util.get_linux_distro.cache_clear() + @classmethod def os_release_exists(self, path): """Side effect function""" @@ -397,6 +419,12 @@ class TestGetLinuxDistro(CiTestCase): if path == '/etc/redhat-release': return 1 + @classmethod + def freebsd_version_exists(self, path): + """Side effect function """ + if path == '/bin/freebsd-version': + return 1 + @mock.patch('cloudinit.util.load_file') def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists): """Verify we get the correct name if the os-release file has @@ -415,6 +443,14 @@ class TestGetLinuxDistro(CiTestCase): dist = util.get_linux_distro() self.assertEqual(('ubuntu', '16.04', 'xenial'), dist) + @mock.patch('cloudinit.util.subp') + def test_get_linux_freebsd(self, m_subp, m_path_exists): + """Verify we get the correct name and release name on FreeBSD.""" + m_path_exists.side_effect = TestGetLinuxDistro.freebsd_version_exists + m_subp.return_value = ("12.0-RELEASE-p10\n", '') + dist = util.get_linux_distro() + self.assertEqual(('freebsd', '12.0-RELEASE-p10', ''), dist) + @mock.patch('cloudinit.util.load_file') def test_get_linux_centos6(self, m_os_release, m_path_exists): """Verify we get the correct name and release name on CentOS 6.""" @@ -502,7 +538,7 @@ class TestGetLinuxDistro(CiTestCase): self.assertEqual( ('opensuse-tumbleweed', '20180920', platform.machine()), dist) - @mock.patch('platform.dist') + @mock.patch('platform.dist', create=True) def test_get_linux_distro_no_data(self, m_platform_dist, m_path_exists): """Verify we get no information if os-release does not exist""" m_platform_dist.return_value = ('', '', '') @@ -510,7 +546,7 @@ class TestGetLinuxDistro(CiTestCase): dist = util.get_linux_distro() self.assertEqual(('', '', ''), dist) - @mock.patch('platform.dist') + @mock.patch('platform.dist', create=True) def test_get_linux_distro_no_impl(self, m_platform_dist, m_path_exists): """Verify we get an empty tuple when no information exists and Exceptions are not propagated""" @@ -519,7 +555,7 @@ class TestGetLinuxDistro(CiTestCase): dist = util.get_linux_distro() self.assertEqual(('', '', ''), dist) - @mock.patch('platform.dist') + @mock.patch('platform.dist', create=True) def test_get_linux_distro_plat_data(self, m_platform_dist, m_path_exists): """Verify we get the correct platform information""" m_platform_dist.return_value = ('foo', '1.1', 'aarch64') @@ -528,6 +564,24 @@ class TestGetLinuxDistro(CiTestCase): self.assertEqual(('foo', '1.1', 'aarch64'), dist) +class TestJsonDumps(CiTestCase): + def test_is_str(self): + """json_dumps should return a string.""" + self.assertTrue(isinstance(util.json_dumps({'abc': '123'}), str)) + + def test_utf8(self): + smiley = '\\ud83d\\ude03' + self.assertEqual( + {'smiley': smiley}, + json.loads(util.json_dumps({'smiley': smiley}))) + + def test_non_utf8(self): + blob = b'\xba\x03Qx-#y\xea' + self.assertEqual( + {'blob': 'ci-b64:' + base64.b64encode(blob).decode('utf-8')}, + json.loads(util.json_dumps({'blob': blob}))) + + @mock.patch('os.path.exists') class TestIsLXD(CiTestCase): diff --git a/cloudinit/tests/test_version.py b/cloudinit/tests/test_version.py index a96c2a47..778a762c 100644 --- a/cloudinit/tests/test_version.py +++ b/cloudinit/tests/test_version.py @@ -1,10 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. +from unittest import mock + from cloudinit.tests.helpers import CiTestCase from cloudinit import version -import mock - class TestExportsFeatures(CiTestCase): def test_has_network_config_v1(self): diff --git a/cloudinit/type_utils.py b/cloudinit/type_utils.py index 6132654b..2c1ae368 100644 --- a/cloudinit/type_utils.py +++ b/cloudinit/type_utils.py @@ -10,29 +10,18 @@ import types -import six - -if six.PY3: - _NAME_TYPES = ( - types.ModuleType, - types.FunctionType, - types.LambdaType, - type, - ) -else: - _NAME_TYPES = ( - types.TypeType, - types.ModuleType, - types.FunctionType, - types.LambdaType, - types.ClassType, - ) +_NAME_TYPES = ( + types.ModuleType, + types.FunctionType, + types.LambdaType, + type, +) def obj_name(obj): if isinstance(obj, _NAME_TYPES): - return six.text_type(obj.__name__) + return str(obj.__name__) else: if not hasattr(obj, '__class__'): return repr(obj) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 396d69ae..eeb27aa8 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -8,39 +8,31 @@ # # This file is part of cloud-init. See LICENSE file for license information. +import copy import json import os -import requests -import six import time - from email.utils import parsedate from errno import ENOENT from functools import partial +from http.client import NOT_FOUND from itertools import count -from requests import exceptions +from urllib.parse import urlparse, urlunparse, quote -from six.moves.urllib.parse import ( - urlparse, urlunparse, - quote as urlquote) +import requests +from requests import exceptions from cloudinit import log as logging from cloudinit import version LOG = logging.getLogger(__name__) -if six.PY2: - import httplib - NOT_FOUND = httplib.NOT_FOUND -else: - import http.client - NOT_FOUND = http.client.NOT_FOUND - # Check if requests has ssl support (added in requests >= 0.8.8) SSL_ENABLED = False CONFIG_ENABLED = False # This was added in 0.7 (but taken out in >=1.0) _REQ_VER = None +REDACTED = 'REDACTED' try: from distutils.version import LooseVersion import pkg_resources @@ -71,7 +63,7 @@ def combine_url(base, *add_ons): path = url_parsed[2] if path and not path.endswith("/"): path += "/" - path += urlquote(str(add_on), safe="/:") + path += quote(str(add_on), safe="/:") url_parsed[2] = path return urlunparse(url_parsed) @@ -81,14 +73,19 @@ def combine_url(base, *add_ons): return url -def read_file_or_url(url, timeout=5, retries=10, - headers=None, data=None, sec_between=1, ssl_details=None, - headers_cb=None, exception_cb=None): +def read_file_or_url(url, **kwargs): + """Wrapper function around readurl to allow passing a file path as url. + + When url is not a local file path, passthrough any kwargs to readurl. + + In the case of parameter passthrough to readurl, default values for some + parameters. See: call-signature of readurl in this module for param docs. + """ url = url.lstrip() if url.startswith("/"): url = "file://%s" % url if url.lower().startswith("file://"): - if data: + if kwargs.get("data"): LOG.warning("Unable to post data to file resource %s", url) file_path = url[len("file://"):] try: @@ -101,10 +98,7 @@ def read_file_or_url(url, timeout=5, retries=10, raise UrlError(cause=e, code=code, headers=None, url=url) return FileResponse(file_path, contents=contents) else: - return readurl(url, timeout=timeout, retries=retries, headers=headers, - headers_cb=headers_cb, data=data, - sec_between=sec_between, ssl_details=ssl_details, - exception_cb=exception_cb) + return readurl(url, **kwargs) # Made to have same accessors as UrlResponse so that the @@ -197,20 +191,53 @@ def _get_ssl_args(url, ssl_details): def readurl(url, data=None, timeout=None, retries=0, sec_between=1, - headers=None, headers_cb=None, ssl_details=None, - check_status=True, allow_redirects=True, exception_cb=None, - session=None, infinite=False, log_req_resp=True): + headers=None, headers_cb=None, headers_redact=None, + ssl_details=None, check_status=True, allow_redirects=True, + exception_cb=None, session=None, infinite=False, log_req_resp=True, + request_method=None): + """Wrapper around requests.Session to read the url and retry if necessary + + :param url: Mandatory url to request. + :param data: Optional form data to post the URL. Will set request_method + to 'POST' if present. + :param timeout: Timeout in seconds to wait for a response + :param retries: Number of times to retry on exception if exception_cb is + None or exception_cb returns True for the exception caught. Default is + to fail with 0 retries on exception. + :param sec_between: Default 1: amount of seconds passed to time.sleep + between retries. None or -1 means don't sleep. + :param headers: Optional dict of headers to send during request + :param headers_cb: Optional callable returning a dict of values to send as + headers during request + :param headers_redact: Optional list of header names to redact from the log + :param ssl_details: Optional dict providing key_file, ca_certs, and + cert_file keys for use on in ssl connections. + :param check_status: Optional boolean set True to raise when HTTPError + occurs. Default: True. + :param allow_redirects: Optional boolean passed straight to Session.request + as 'allow_redirects'. Default: True. + :param exception_cb: Optional callable which accepts the params + msg and exception and returns a boolean True if retries are permitted. + :param session: Optional exiting requests.Session instance to reuse. + :param infinite: Bool, set True to retry indefinitely. Default: False. + :param log_req_resp: Set False to turn off verbose debug messages. + :param request_method: String passed as 'method' to Session.request. + Typically GET, or POST. Default: POST if data is provided, GET + otherwise. + """ url = _cleanurl(url) req_args = { 'url': url, } req_args.update(_get_ssl_args(url, ssl_details)) req_args['allow_redirects'] = allow_redirects - req_args['method'] = 'GET' + if not request_method: + request_method = 'POST' if data else 'GET' + req_args['method'] = request_method if timeout is not None: req_args['timeout'] = max(float(timeout), 0) - if data: - req_args['method'] = 'POST' + if headers_redact is None: + headers_redact = [] # It doesn't seem like config # was added in older library versions (or newer ones either), thus we # need to manually do the retries if it wasn't... @@ -255,6 +282,12 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, if k == 'data': continue filtered_req_args[k] = v + if k == 'headers': + for hkey, _hval in v.items(): + if hkey in headers_redact: + filtered_req_args[k][hkey] = ( + copy.deepcopy(req_args[k][hkey])) + filtered_req_args[k][hkey] = REDACTED try: if log_req_resp: @@ -307,9 +340,9 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, return None # Should throw before this... -def wait_for_url(urls, max_wait=None, timeout=None, - status_cb=None, headers_cb=None, sleep_time=1, - exception_cb=None, sleep_time_cb=None): +def wait_for_url(urls, max_wait=None, timeout=None, status_cb=None, + headers_cb=None, headers_redact=None, sleep_time=1, + exception_cb=None, sleep_time_cb=None, request_method=None): """ urls: a list of urls to try max_wait: roughly the maximum time to wait before giving up @@ -320,15 +353,18 @@ def wait_for_url(urls, max_wait=None, timeout=None, status_cb: call method with string message when a url is not available headers_cb: call method with single argument of url to get headers for request. + headers_redact: a list of header names to redact from the log exception_cb: call method with 2 arguments 'msg' (per status_cb) and 'exception', the exception that occurred. sleep_time_cb: call method with 2 arguments (response, loop_n) that generates the next sleep time. + request_method: indicate the type of HTTP request, GET, PUT, or POST + returns: tuple of (url, response contents), on failure, (False, None) - the idea of this routine is to wait for the EC2 metdata service to + the idea of this routine is to wait for the EC2 metadata service to come up. On both Eucalyptus and EC2 we have seen the case where the instance hit the MD before the MD service was up. EC2 seems - to have permenantely fixed this, though. + to have permanently fixed this, though. In openstack, the metadata service might be painfully slow, and unable to avoid hitting a timeout of even up to 10 seconds or more @@ -337,7 +373,7 @@ def wait_for_url(urls, max_wait=None, timeout=None, Offset those needs with the need to not hang forever (and block boot) on a system where cloud-init is configured to look for EC2 Metadata service but is not going to find one. It is possible that the instance - data host (169.254.169.254) may be firewalled off Entirely for a sytem, + data host (169.254.169.254) may be firewalled off Entirely for a system, meaning that the connection will block forever unless a timeout is set. A value of None for max_wait will retry indefinitely. @@ -380,8 +416,10 @@ def wait_for_url(urls, max_wait=None, timeout=None, else: headers = {} - response = readurl(url, headers=headers, timeout=timeout, - check_status=False) + response = readurl( + url, headers=headers, headers_redact=headers_redact, + timeout=timeout, check_status=False, + request_method=request_method) if not response.contents: reason = "empty response [%s]" % (response.code) url_exc = UrlError(ValueError(reason), code=response.code, @@ -391,7 +429,7 @@ def wait_for_url(urls, max_wait=None, timeout=None, url_exc = UrlError(ValueError(reason), code=response.code, headers=response.headers, url=url) else: - return url + return url, response.contents except UrlError as e: reason = "request error [%s]" % e url_exc = e @@ -420,7 +458,7 @@ def wait_for_url(urls, max_wait=None, timeout=None, sleep_time) time.sleep(sleep_time) - return False + return False, None class OauthUrlHelper(object): @@ -521,7 +559,7 @@ class OauthUrlHelper(object): if extra_exception_cb: ret = extra_exception_cb(msg, exception) finally: - self.exception_cb(msg, exception) + self.exception_cb(msg, exception) return ret def _headers_cb(self, extra_headers_cb, url): diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index ed83d2d8..6f41b03a 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -9,14 +9,11 @@ # This file is part of cloud-init. See LICENSE file for license information. import os - from email.mime.base import MIMEBase from email.mime.multipart import MIMEMultipart from email.mime.nonmultipart import MIMENonMultipart from email.mime.text import MIMEText -import six - from cloudinit import handlers from cloudinit import log as logging from cloudinit.url_helper import read_file_or_url, UrlError @@ -224,7 +221,7 @@ class UserDataProcessor(object): content = util.load_file(include_once_fn) else: try: - resp = read_file_or_url(include_url, + resp = read_file_or_url(include_url, timeout=5, retries=10, ssl_details=self.ssl_details) if include_once_on and resp.ok(): util.write_file(include_once_fn, resp.contents, @@ -259,7 +256,7 @@ class UserDataProcessor(object): # filename and type not be present # or # scalar(payload) - if isinstance(ent, six.string_types): + if isinstance(ent, str): ent = {'content': ent} if not isinstance(ent, (dict)): # TODO(harlowja) raise? @@ -269,13 +266,13 @@ class UserDataProcessor(object): mtype = ent.get('type') if not mtype: default = ARCHIVE_UNDEF_TYPE - if isinstance(content, six.binary_type): + if isinstance(content, bytes): default = ARCHIVE_UNDEF_BINARY_TYPE mtype = handlers.type_from_starts_with(content, default) maintype, subtype = mtype.split('/', 1) if maintype == "text": - if isinstance(content, six.binary_type): + if isinstance(content, bytes): content = content.decode() msg = MIMEText(content, _subtype=subtype) else: @@ -348,7 +345,7 @@ def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE): msg.set_payload(data) return msg - if isinstance(raw_data, six.text_type): + if isinstance(raw_data, str): bdata = raw_data.encode('utf-8') else: bdata = raw_data diff --git a/cloudinit/util.py b/cloudinit/util.py index 7800f7bc..c02b3d9a 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -10,7 +10,6 @@ import contextlib import copy as obj_copy -import ctypes import email import glob import grp @@ -38,7 +37,6 @@ from base64 import b64decode, b64encode from six.moves.urllib import parse as urlparse import six -import yaml from cloudinit import importer from cloudinit import log as logging @@ -52,9 +50,14 @@ from cloudinit import version from cloudinit.settings import (CFG_BUILTIN) try: - string_types = (basestring,) -except NameError: - string_types = (str,) + from functools import lru_cache +except ImportError: + def lru_cache(): + """pass-thru replace for Python3's lru_cache()""" + def wrapper(f): + return f + return wrapper + _DNS_REDIRECT_IP = None LOG = logging.getLogger(__name__) @@ -74,19 +77,21 @@ CONTAINER_TESTS = (['systemd-detect-virt', '--quiet', '--container'], ['running-in-container'], ['lxc-is-container']) -PROC_CMDLINE = None - -_LSB_RELEASE = {} -PY26 = sys.version_info[0:2] == (2, 6) +@lru_cache() +def get_dpkg_architecture(target=None): + """Return the sanitized string output by `dpkg --print-architecture`. -def get_architecture(target=None): + N.B. This function is wrapped in functools.lru_cache, so repeated calls + won't shell out every time. + """ out, _ = subp(['dpkg', '--print-architecture'], capture=True, target=target) return out.strip() -def _lsb_release(target=None): +@lru_cache() +def lsb_release(target=None): fmap = {'Codename': 'codename', 'Description': 'description', 'Distributor ID': 'id', 'Release': 'release'} @@ -109,23 +114,11 @@ def _lsb_release(target=None): return data -def lsb_release(target=None): - if target_path(target) != "/": - # do not use or update cache if target is provided - return _lsb_release(target) - - global _LSB_RELEASE - if not _LSB_RELEASE: - data = _lsb_release() - _LSB_RELEASE.update(data) - return _LSB_RELEASE - - def target_path(target, path=None): # return 'path' inside target, accepting target as None if target in (None, ""): target = "/" - elif not isinstance(target, string_types): + elif not isinstance(target, six.string_types): raise ValueError("Unexpected input for target: %s" % target) else: target = os.path.abspath(target) @@ -404,9 +397,10 @@ def translate_bool(val, addons=None): def rand_str(strlen=32, select_from=None): + r = random.SystemRandom() if not select_from: select_from = string.ascii_letters + string.digits - return "".join([random.choice(select_from) for _x in range(0, strlen)]) + return "".join([r.choice(select_from) for _x in range(0, strlen)]) def rand_dict_key(dictionary, postfix=None): @@ -553,6 +547,7 @@ def is_ipv4(instr): return len(toks) == 4 +@lru_cache() def is_FreeBSD(): return system_info()['variant'] == "freebsd" @@ -602,6 +597,7 @@ def _parse_redhat_release(release_file=None): return {} +@lru_cache() def get_linux_distro(): distro_name = '' distro_version = '' @@ -629,11 +625,15 @@ def get_linux_distro(): flavor = match.groupdict()['codename'] if distro_name == 'rhel': distro_name = 'redhat' + elif os.path.exists('/bin/freebsd-version'): + distro_name = 'freebsd' + distro_version, _ = subp(['uname', '-r']) + distro_version = distro_version.strip() else: dist = ('', '', '') try: - # Will be removed in 3.7 - dist = platform.dist() # pylint: disable=W1505 + # Was removed in 3.8 + dist = platform.dist() # pylint: disable=W1505,E1101 except Exception: pass finally: @@ -649,6 +649,7 @@ def get_linux_distro(): return (distro_name, distro_version, flavor) +@lru_cache() def system_info(): info = { 'platform': platform.platform(), @@ -662,7 +663,8 @@ def system_info(): var = 'unknown' if system == "linux": linux_dist = info['dist'][0].lower() - if linux_dist in ('centos', 'debian', 'fedora', 'rhel', 'suse'): + if linux_dist in ( + 'arch', 'centos', 'debian', 'fedora', 'rhel', 'suse'): var = linux_dist elif linux_dist in ('ubuntu', 'linuxmint', 'mint'): var = 'ubuntu' @@ -709,6 +711,21 @@ def get_cfg_option_list(yobj, key, default=None): # get a cfg entry by its path array # for f['a']['b']: get_cfg_by_path(mycfg,('a','b')) def get_cfg_by_path(yobj, keyp, default=None): + """Return the value of the item at path C{keyp} in C{yobj}. + + example: + get_cfg_by_path({'a': {'b': {'num': 4}}}, 'a/b/num') == 4 + get_cfg_by_path({'a': {'b': {'num': 4}}}, 'c/d') == None + + @param yobj: A dictionary. + @param keyp: A path inside yobj. it can be a '/' delimited string, + or an iterable. + @param default: The default to return if the path does not exist. + @return: The value of the item at keyp." + is not found.""" + + if isinstance(keyp, six.string_types): + keyp = keyp.split("/") cur = yobj for tok in keyp: if tok not in cur: @@ -948,7 +965,7 @@ def load_yaml(blob, default=None, allowed=(dict,)): " but got %s instead") % (allowed, type_utils.obj_name(converted))) loaded = converted - except (yaml.YAMLError, TypeError, ValueError) as e: + except (safeyaml.YAMLError, TypeError, ValueError) as e: msg = 'Failed loading yaml blob' mark = None if hasattr(e, 'context_mark') and getattr(e, 'context_mark'): @@ -966,13 +983,6 @@ def load_yaml(blob, default=None, allowed=(dict,)): def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): - if base.startswith("/"): - base = "file://%s" % base - - # default retries for file is 0. for network is 10 - if base.startswith("file://"): - retries = file_retries - if base.find("%s") >= 0: ud_url = base % ("user-data" + ext) md_url = base % ("meta-data" + ext) @@ -980,14 +990,14 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): ud_url = "%s%s%s" % (base, "user-data", ext) md_url = "%s%s%s" % (base, "meta-data", ext) - md_resp = url_helper.read_file_or_url(md_url, timeout, retries, - file_retries) + md_resp = url_helper.read_file_or_url(md_url, timeout=timeout, + retries=retries) md = None if md_resp.ok(): md = load_yaml(decode_binary(md_resp.contents), default={}) - ud_resp = url_helper.read_file_or_url(ud_url, timeout, retries, - file_retries) + ud_resp = url_helper.read_file_or_url(ud_url, timeout=timeout, + retries=retries) ud = None if ud_resp.ok(): ud = ud_resp.contents @@ -1362,14 +1372,8 @@ def load_file(fname, read_cb=None, quiet=False, decode=True): return contents -def get_cmdline(): - if 'DEBUG_PROC_CMDLINE' in os.environ: - return os.environ["DEBUG_PROC_CMDLINE"] - - global PROC_CMDLINE - if PROC_CMDLINE is not None: - return PROC_CMDLINE - +@lru_cache() +def _get_cmdline(): if is_container(): try: contents = load_file("/proc/1/cmdline") @@ -1384,10 +1388,16 @@ def get_cmdline(): except Exception: cmdline = "" - PROC_CMDLINE = cmdline return cmdline +def get_cmdline(): + if 'DEBUG_PROC_CMDLINE' in os.environ: + return os.environ["DEBUG_PROC_CMDLINE"] + + return _get_cmdline() + + def pipe_in_out(in_fh, out_fh, chunk_size=1024, chunk_cb=None): bytes_piped = 0 while True: @@ -1590,20 +1600,33 @@ def json_serialize_default(_obj): return 'Warning: redacted unserializable type {0}'.format(type(_obj)) -def json_dumps(data): - """Return data in nicely formatted json.""" - return json.dumps(data, indent=1, sort_keys=True, - separators=(',', ': '), default=json_serialize_default) +def json_preserialize_binary(data): + """Preserialize any discovered binary values to avoid json.dumps issues. + Used only on python 2.7 where default type handling is not honored for + failure to encode binary data. LP: #1801364. + TODO(Drop this function when py2.7 support is dropped from cloud-init) + """ + data = obj_copy.deepcopy(data) + for key, value in data.items(): + if isinstance(value, (dict)): + data[key] = json_preserialize_binary(value) + if isinstance(value, bytes): + data[key] = 'ci-b64:{0}'.format(b64e(value)) + return data -def yaml_dumps(obj, explicit_start=True, explicit_end=True): - """Return data in nicely formatted yaml.""" - return yaml.safe_dump(obj, - line_break="\n", - indent=4, - explicit_start=explicit_start, - explicit_end=explicit_end, - default_flow_style=False) + +def json_dumps(data): + """Return data in nicely formatted json.""" + try: + return json.dumps( + data, indent=1, sort_keys=True, separators=(',', ': '), + default=json_serialize_default) + except UnicodeDecodeError: + if sys.version_info[:2] == (2, 7): + data = json_preserialize_binary(data) + return json.dumps(data) + raise def ensure_dir(path, mode=None): @@ -1667,7 +1690,7 @@ def mounts(): return mounted -def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True, +def mount_cb(device, callback, data=None, mtype=None, update_env_for_mount=None): """ Mount the device, call method 'callback' passing the directory @@ -1714,18 +1737,7 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True, for mtype in mtypes: mountpoint = None try: - mountcmd = ['mount'] - mountopts = [] - if rw: - mountopts.append('rw') - else: - mountopts.append('ro') - if sync: - # This seems like the safe approach to do - # (ie where this is on by default) - mountopts.append("sync") - if mountopts: - mountcmd.extend(["-o", ",".join(mountopts)]) + mountcmd = ['mount', '-o', 'ro'] if mtype: mountcmd.extend(['-t', mtype]) mountcmd.append(device) @@ -1792,6 +1804,33 @@ def time_rfc2822(): return ts +def boottime(): + """Use sysctlbyname(3) via ctypes to find kern.boottime + + kern.boottime is of type struct timeval. Here we create a + private class to easier unpack it. + + @return boottime: float to be compatible with linux + """ + import ctypes + + NULL_BYTES = b"\x00" + + class timeval(ctypes.Structure): + _fields_ = [ + ("tv_sec", ctypes.c_int64), + ("tv_usec", ctypes.c_int64) + ] + libc = ctypes.CDLL('/lib/libc.so.7') + size = ctypes.c_size_t() + size.value = ctypes.sizeof(timeval) + buf = timeval() + if libc.sysctlbyname(b"kern.boottime" + NULL_BYTES, ctypes.byref(buf), + ctypes.byref(size), None, 0) != -1: + return buf.tv_sec + buf.tv_usec / 1000000.0 + raise RuntimeError("Unable to retrieve kern.boottime on this system") + + def uptime(): uptime_str = '??' method = 'unknown' @@ -1803,15 +1842,8 @@ def uptime(): uptime_str = contents.split()[0] else: method = 'ctypes' - libc = ctypes.CDLL('/lib/libc.so.7') - size = ctypes.c_size_t() - buf = ctypes.c_int() - size.value = ctypes.sizeof(buf) - libc.sysctlbyname("kern.boottime", ctypes.byref(buf), - ctypes.byref(size), None, 0) - now = time.time() - bootup = buf.value - uptime_str = now - bootup + # This is the *BSD codepath + uptime_str = str(time.time() - boottime()) except Exception: logexc(LOG, "Unable to read uptime using method: %s" % method) @@ -2336,17 +2368,21 @@ def parse_mtab(path): return None -def find_freebsd_part(label_part): - if label_part.startswith("/dev/label/"): - target_label = label_part[5:] - (label_part, _err) = subp(['glabel', 'status', '-s']) - for labels in label_part.split("\n"): +def find_freebsd_part(fs): + splitted = fs.split('/') + if len(splitted) == 3: + return splitted[2] + elif splitted[2] in ['label', 'gpt', 'ufs']: + target_label = fs[5:] + (part, _err) = subp(['glabel', 'status', '-s']) + for labels in part.split("\n"): items = labels.split() - if len(items) > 0 and items[0].startswith(target_label): - label_part = items[2] + if len(items) > 0 and items[0] == target_label: + part = items[2] break - label_part = str(label_part) - return label_part + return str(part) + else: + LOG.warning("Unexpected input in find_freebsd_part: %s", fs) def get_path_dev_freebsd(path, mnt_list): @@ -2665,8 +2701,8 @@ def _call_dmidecode(key, dmidecode_path): try: cmd = [dmidecode_path, "--string", key] (result, _err) = subp(cmd) - LOG.debug("dmidecode returned '%s' for '%s'", result, key) result = result.strip() + LOG.debug("dmidecode returned '%s' for '%s'", result, key) if result.replace(".", "") == "": return "" return result @@ -2817,9 +2853,6 @@ def load_shell_content(content, add_empty=False, empty_val=None): variables. Set their value to empty_val.""" def _shlex_split(blob): - if PY26 and isinstance(blob, six.text_type): - # Older versions don't support unicode input - blob = blob.encode("utf8") return shlex.split(blob, comments=True) data = {} @@ -2876,4 +2909,20 @@ def udevadm_settle(exists=None, timeout=None): return subp(settle_cmd) +def get_proc_ppid(pid): + """ + Return the parent pid of a process. + """ + ppid = 0 + try: + contents = load_file("/proc/%s/stat" % pid, quiet=True) + except IOError as e: + LOG.warning('Failed to load /proc/%s/stat. %s', pid, e) + if contents: + parts = contents.split(" ", 4) + # man proc says + # ppid %d (4) The PID of the parent. + ppid = int(parts[3]) + return ppid + # vi: ts=4 expandtab diff --git a/cloudinit/version.py b/cloudinit/version.py index a2c5d43a..1bc1899c 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -__VERSION__ = "18.5" +__VERSION__ = "20.1" _PACKAGED_VERSION = '@@PACKAGED_VERSION@@' FEATURES = [ diff --git a/config/cloud.cfg.d/README b/config/cloud.cfg.d/README index 60702e9d..036b80bf 100644 --- a/config/cloud.cfg.d/README +++ b/config/cloud.cfg.d/README @@ -1,3 +1,3 @@ -# All files in this directory will be read by cloud-init -# They are read in lexical order. Later files overwrite values in +# All files with the '.cfg' extension in this directory will be read by +# cloud-init. They are read in lexical order. Later files overwrite values in # earlier files. diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 0847ab3d..8f98cb96 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -2,15 +2,30 @@ # The top level settings are used as module # and system configuration. +# A set of users which may be applied and/or used by various modules +# when a 'default' entry is found it will reference the 'default_user' +# from the distro configuration specified below +users: + - default + +# This will cause the set+update hostname module to not operate (if true) +preserve_hostname: true + +# Example datasource config +# datasource: +# Ec2: +# metadata_urls: [ 'blah.com' ] +# timeout: 5 # (defaults to 50 seconds) +# max_wait: 10 # (defaults to 120 seconds) datasource: Azure: agent_command: [/usr/bin/python3, -u, /usr/sbin/waagent, -start] -# This will cause the set+update hostname module to not operate (if true) -preserve_hostname: true +# disable customization for VMware +disable_vmware_customization: true -users: - - default +# The modules that run in the 'init' stage +cloud_init_modules: # The modules that run in the 'config' stage cloud_config_modules: diff --git a/debian/README.source b/debian/README.source deleted file mode 100644 index 3db12bd8..00000000 --- a/debian/README.source +++ /dev/null @@ -1,30 +0,0 @@ -=== General Info === -Packaging branches for the Ubuntu development release are maintained in -upstream git under ubuntu/devel branch. - - git clone git://git.launchpad.net/cloud-init -b ubuntu/devel - -Stable release branches are also available in ubuntu/<codename> - $ git clone git://git.launchpad.net/cloud-init - $ cd cloud-init - $ git branch -r - origin/HEAD -> origin/master - origin/master - origin/ubuntu/devel - origin/ubuntu/precise - origin/ubuntu/trusty - origin/ubuntu/xenial - -Patches in debian/patches are stored un-applied. - -== New snapshot == -Upstream maintains a script to handle new upstream snapshots at: - https://github.com/cloud-init/qa-scripts/ -Its usage is like: - new-upstream-snapshot master - -== Cherry Pick == -To cherry pick an upstream commit: - ./debian/cherry-pick <hash> - -That will add a patch to debian/patches/ and debian/patches/series. diff --git a/debian/apport-launcher.py b/debian/apport-launcher.py deleted file mode 100644 index 30fea31b..00000000 --- a/debian/apport-launcher.py +++ /dev/null @@ -1,6 +0,0 @@ -'''Wrapper for cloudinit apport interface''' - -from cloudinit.apport import add_info as cloudinit_add_info - -def add_info(report, ui): - return cloudinit_add_info(report, ui) diff --git a/debian/changelog b/debian/changelog deleted file mode 100644 index 74e8dc2d..00000000 --- a/debian/changelog +++ /dev/null @@ -1,3423 +0,0 @@ -cloud-init (18.4-0ubuntu1~16.04.2) xenial; urgency=medium - - * cherry-pick 1d5e9aef: azure: Add apply_network_config option to - disable network (LP: #1798424) - * debian/patches/openstack-no-network-config.patch - add patch to default Azure apply_network_config to False. Only - fallback network config on eth0 is generated by cloud-init. IMDS - network_config is ignored. - - -- Chad Smith <chad.smith@canonical.com> Wed, 17 Oct 2018 12:51:09 -0600 - -cloud-init (18.4-0ubuntu1~16.04.1) xenial-proposed; urgency=medium - - * drop the following cherry-picks now included: - + cpick-3cee0bf8-oracle-fix-detect_openstack-to-report-True-on - * refresh patches: - + debian/patches/azure-use-walinux-agent.patch - + debian/patches/openstack-no-network-config.patch - * refresh patches: - + debian/patches/ds-identify-behavior-xenial.patch - * New upstream release. (LP: #1795953) - - release 18.4 - - tests: allow skipping an entire cloud_test without running. - - tests: disable lxd tests on cosmic - - cii-tests: use unittest2.SkipTest in ntp_chrony due to new deps - - lxd: adjust to snap installed lxd. - - docs: surface experimental doc in instance-data.json - - tests: fix ec2 integration tests. process meta_data instead of meta-data - - Add support for Infiniband network interfaces (IPoIB). [Mark Goddard] - - cli: add cloud-init query subcommand to query instance metadata - - tools/tox-venv: update for new features. - - pylint: ignore warning assignment-from-no-return for _write_network - - stages: Fix bug causing datasource to have incorrect sys_cfg. - - Remove dead-code _write_network distro implementations. - - net_util: ensure static configs have netmask in translate_network result - [Thomas Berger] - - Fall back to root:root on syslog permissions if other options fail. - [Robert Schweikert] - - tests: Add mock for util.get_hostname. [Robert Schweikert] - - ds-identify: doc string cleanup. - - OpenStack: Support setting mac address on bond. [Fabian Wiesel] - - bash_completion/cloud-init: fix shell syntax error. - - EphemeralIPv4Network: Be more explicit when adding default route. - - OpenStack: support reading of newer versions of metdata. - - OpenStack: fix bug causing 'latest' version to be used from network. - - user-data: jinja template to render instance-data.json in cloud-config - - config: disable ssh access to a configured user account - - tests: print failed testname instead of docstring upon failure - - tests: Disallow use of util.subp except for where needed. - - sysconfig: refactor sysconfig to accept distro specific templates paths - - Add unit tests for config/cc_ssh.py [Francis Ginther] - - Fix the built-in cloudinit/tests/helpers:skipIf - - read-version: enhance error message [Joshua Powers] - - hyperv_reporting_handler: simplify threaded publisher - - VMWare: Fix a network config bug in vm with static IPv4 and no gateway. - [Pengpeng Sun] - - logging: Add logging config type hyperv for reporting via Azure KVP - [Andy Liu] - - tests: disable other snap test as well [Joshua Powers] - - tests: disable snap, fix write_files binary [Joshua Powers] - - Add datasource Oracle Compute Infrastructure (OCI). - - azure: allow azure to generate network configuration from IMDS per boot. - - Scaleway: Add network configuration to the DataSource [Louis Bouchard] - - docs: Fix example cloud-init analyze command to match output. - [Wesley Gao] - - netplan: Correctly render macaddress on a bonds and bridges when - provided. - - tools: Add 'net-convert' subcommand command to 'cloud-init devel'. - - redhat: remove ssh keys on new instance. - - Use typeset or local in profile.d scripts. - - OpenNebula: Fix null gateway6 [Akihiko Ota] - - oracle: fix detect_openstack to report True on OracleCloud.com DMI data - - tests: improve LXDInstance trying to workaround or catch bug. - - update_metadata re-config on every boot comments and tests not quite - right [Mike Gerdts] - - tests: Collect build_info from system if available. - - pylint: Fix pylint warnings reported in pylint 2.0.0. - - get_linux_distro: add support for rhel via redhat-release. - - get_linux_distro: add support for centos6 and rawhide flavors of redhat - - tools: add '--debug' to tools/net-convert.py - - tests: bump the version of paramiko to 2.4.1. - - -- Chad Smith <chad.smith@canonical.com> Wed, 03 Oct 2018 12:10:25 -0600 - -cloud-init (18.3-9-g2e62cb8a-0ubuntu1~16.04.2) xenial-proposed; urgency=medium - - * cherry-pick 3cee0bf8: oracle: fix detect_openstack to report True on - (LP: #1784685) - - -- Chad Smith <chad.smith@canonical.com> Tue, 31 Jul 2018 13:57:21 -0600 - -cloud-init (18.3-9-g2e62cb8a-0ubuntu1~16.04.1) xenial-proposed; urgency=medium - - * New upstream snapshot. (LP: #1777912) - - docs: note in rtd about avoiding /tmp when writing files - - ubuntu,centos,debian: get_linux_distro to align with platform.dist - - Fix boothook docs on environment variable name (INSTANCE_I -> - INSTANCE_ID) [Marc Tamsky] - - update_metadata: a datasource can support network re-config every boot - - tests: drop salt-minion integration test - - Retry on failed import of gpg receive keys. - - tools: Fix run-container when neither source or binary package requested. - - docs: Fix a small spelling error. [Oz N Tiram] - - tox: use simplestreams from git repository rather than bzr. - - -- Chad Smith <chad.smith@canonical.com> Mon, 09 Jul 2018 15:34:52 -0600 - -cloud-init (18.3-0ubuntu1~16.04.1) xenial-proposed; urgency=medium - - * debian/rules: update version.version_string to contain packaged version. - (LP: #1770712) - * debian/patches/openstack-no-network-config.patch - add patch to ignore Openstack network_config from network_data.json by - default - * Refresh patches against upstream: - + azure-use-walinux-agent.patch - + ds-identify-behavior-xenial.patch - * New upstream release. (LP: #1777912) - - release 18.3 - - docs: represent sudo:false in docs for user_groups config module - - Explicitly prevent `sudo` access for user module [Jacob Bednarz] - - lxd: Delete default network and detach device if lxd-init created them. - - openstack: avoid unneeded metadata probe on non-openstack platforms - - stages: fix tracebacks if a module stage is undefined or empty - [Robert Schweikert] - - Be more safe on string/bytes when writing multipart user-data to disk. - - Fix get_proc_env for pids that have non-utf8 content in environment. - - tests: fix salt_minion integration test on bionic and later - - tests: provide human-readable integration test summary when --verbose - - tests: skip chrony integration tests on lxd running artful or older - - test: add optional --preserve-instance arg to integraiton tests - - netplan: fix mtu if provided by network config for all rendered types - - tests: remove pip install workarounds for pylxd, take upstream fix. - - subp: support combine_capture argument. - - tests: ordered tox dependencies for pylxd install - - util: add get_linux_distro function to replace platform.dist - [Robert Schweikert] - - pyflakes: fix unused variable references identified by pyflakes 2.0.0. - - - Do not use the systemd_prefix macro, not available in this environment - [Robert Schweikert] - - doc: Add config info to ec2, openstack and cloudstack datasource docs - - Enable SmartOS network metadata to work with netplan via per-subnet - routes [Dan McDonald] - - openstack: Allow discovery in init-local using dhclient in a sandbox. - - tests: Avoid using https in httpretty, improve HttPretty test case. - - yaml_load/schema: Add invalid line and column nums to error message - - Azure: Ignore NTFS mount errors when checking ephemeral drive - [Paul Meyer] - - packages/brpm: Get proper dependencies for cmdline distro. - - packages: Make rpm spec files patch in package version like in debs. - - tools/run-container: replace tools/run-centos with more generic. - - Update version.version_string to contain packaged version. - - cc_mounts: Do not add devices to fstab that are already present. - [Lars Kellogg-Stedman] - - ds-identify: ensure that we have certain tokens in PATH. - - tests: enable Ubuntu Cosmic in integration tests [Joshua Powers] - - read_file_or_url: move to url_helper, fix bug in its FileResponse. - - cloud_tests: help pylint - - flake8: fix flake8 errors in previous commit. - - typos: Fix spelling mistakes in cc_mounts.py log messages [Stephen Ford] - - tests: restructure SSH and initial connections [Joshua Powers] - - ds-identify: recognize container-other as a container, test SmartOS. - - cloud-config.service: run After snap.seeded.service. - - tests: do not rely on host /proc/cmdline in test_net.py - [Lars Kellogg-Stedman] - - ds-identify: Remove dupe call to is_ds_enabled, improve debug message. - - SmartOS: fix get_interfaces for nics that do not have addr_assign_type. - - tests: fix package and ca_cert cloud_tests on bionic - - ds-identify: make shellcheck 0.4.6 happy with ds-identify. - - pycodestyle: Fix deprecated string literals, move away from flake8. - - azure: Add reported ready marker file. [Joshua Chan] - - tools: Support adding a release suffix through packages/bddeb. - - FreeBSD: Invoke growfs on ufs filesystems such that it does not prompt. - [Harm Weites] - - tools: Re-use the orig tarball in packages/bddeb if it is around. - - netinfo: fix netdev_pformat when a nic does not have an address assigned. - - collect-logs: add -v flag, write to stderr, limit journal to single boot. - - IBMCloud: Disable config-drive and nocloud only if IBMCloud is enabled. - - Add reporting events and log_time around early source of blocking time - - IBMCloud: recognize provisioning environment during debug boots. - - net: detect unstable network names and trigger a settle if needed - - IBMCloud: improve documentation in datasource. - - sysconfig: dhcp6 subnet type should not imply dhcpv4 [Vitaly Kuznetsov] - - packages/debian/control.in: add missing dependency on iproute2. - - DataSourceSmartOS: add locking of serial device. [Mike Gerdts] - - DataSourceSmartOS: sdc:hostname is ignored [Mike Gerdts] - - DataSourceSmartOS: list() should always return a list [Mike Gerdts] - - schema: in validation, raise ImportError if strict but no jsonschema. - - set_passwords: Add newline to end of sshd config, only restart if - updated. - - pylint: pay attention to unused variable warnings. - - doc: Add documentation for AliYun datasource. [Junjie Wang] - - Schema: do not warn on duplicate items in commands. - - net: Depend on iproute2's ip instead of net-tools ifconfig or route - - DataSourceSmartOS: fix hang when metadata service is down [Mike Gerdts] - - DataSourceSmartOS: change default fs on ephemeral disk from ext3 to - ext4. [Mike Gerdts] - - pycodestyle: Fix invalid escape sequences in string literals. - - Implement bash completion script for cloud-init command line - - tools: Fix make-tarball cli tool usage for development - - renderer: support unicode in render_from_file. - - Implement ntp client spec with auto support for distro selection - - Apport: add Brightbox, IBM, LXD, and OpenTelekomCloud to list of clouds. - - tests: fix ec2 integration network metadata validation - - -- Chad Smith <chad.smith@canonical.com> Thu, 21 Jun 2018 14:32:29 -0600 - -cloud-init (18.2-4-g05926e48-0ubuntu1~16.04.2) xenial-proposed; urgency=medium - - * cherry-pick 6ef92c98: IBMCloud: recognize provisioning environment - during debug (LP: #1767166) - * cherry-pick 11172924: IBMCloud: Disable config-drive and nocloud - only if IBMCloud (LP: #1766401) - - -- Chad Smith <chad.smith@canonical.com> Mon, 30 Apr 2018 15:52:05 -0600 - -cloud-init (18.2-4-g05926e48-0ubuntu1~16.04.1) xenial-proposed; urgency=medium - - * debian/new-upstream-snapshot: Remove script, now maintained elsewhere. - * New upstream snapshot. (LP: #1759406) - - tests: fix integration tests to support lxd 3.0 release - - correct documentation to match correct attribute name usage. - [Dominic Schlegel] - - cc_resizefs, util: handle no /dev/zfs - - doc: Fix links in OpenStack datasource documentation. [Dominic Schlegel] - - -- Chad Smith <chad.smith@canonical.com> Tue, 03 Apr 2018 17:01:55 -0600 - -cloud-init (18.2-0ubuntu1~16.04.1) xenial-proposed; urgency=medium - - * Drop the following cherry picks in debian/patches. They are now - incorporated now incorporated in the upstream source: - + cpick-40e7738-GCE-fix-reading-of-user-data-that-is-not-base64-encoded - * Refresh patches against upstream: - + azure-use-walinux-agent.patch - + ds-identify-behavior-xenial.patch - * New upstream release. (LP: #1759406) - - release 18.2 - - Hetzner: Exit early if dmi system-manufacturer is not Hetzner. - - Add missing dependency on isc-dhcp-client to trunk ubuntu packaging. - - FreeBSD: resizefs module now able to handle zfs/zpool. [Dominic Schlegel] - - cc_puppet: Revert regression of puppet creating ssl and ssl_cert dirs - - Enable IBMCloud datasource in settings.py. - - IBMCloud: Initial IBM Cloud datasource. - - tests: remove jsonschema from xenial tox environment. - - tests: Fix newly added schema unit tests to skip if no jsonschema. - - ec2: Adjust ec2 datasource after exception_cb change. - - Reduce AzurePreprovisioning HTTP timeouts. [Douglas Jordan] - - Revert the logic of exception_cb in read_url. [Kurt Garloff] - - ubuntu-advantage: Add new config module to support ubuntu-advantage-tools - - Handle global dns entries in netplan - - Identify OpenTelekomCloud Xen as OpenStack DS. [Kurt Garloff] - - datasources: fix DataSource subclass get_hostname method signature - - OpenNebula: Update network to return v2 config rather than ENI. - [Akihiko Ota] - - Add Hetzner Cloud DataSource - - net: recognize iscsi root cases without ip= on kernel command line. - - tests: fix flakes warning for unused variable - - tests: patch leaked stderr messages from snap unit tests - - cc_snap: Add new module to install and configure snapd and snap packages. - - tests: Make pylint happy and fix python2.6 uses of assertRaisesRegex. - - netplan: render bridge port-priority values - - util: Fix subp regression. Allow specifying subp command as a string. - - doc: fix all warnings issued by 'tox -e doc' - - FreeBSD: Set hostname to FQDN. [Dominic Schlegel] - - tests: fix run_tree and bddeb - - tests: Fix some warnings in tests that popped up with newer python. - - set_hostname: When present in metadata, set it before network bringup. - - tests: Centralize and re-use skipTest based on json schema presense. - - This commit fixes get_hostname on the AzureDataSource. [Douglas Jordan] - - shellify: raise TypeError on bad input. - - Make salt minion module work on FreeBSD. [Dominic Schlegel] - - Simplify some comparisions. [Rémy Léone] - - Change some list creation and population to literal. [Rémy Léone] - - GCE: fix reading of user-data that is not base64 encoded. - - doc: fix chef install from apt packages example in RTD. - - Implement puppet 4 support [Romanos Skiadas] - - subp: Fix subp usage with non-ascii characters when no system locale. - - salt: configure grains in grains file rather than in minion config. - [Daniel Wallace] - - release 18.1 - - OVF: Fix VMware support for 64-bit platforms. [Sankar Tanguturi] - - ds-identify: Fix searching for iso9660 OVF cdroms. - - SUSE: Fix groups used for ownership of cloud-init.log [Robert Schweikert] - - ds-identify: check /writable/system-data/ for nocloud seed. - - tests: run nosetests in cloudinit/ directory, fix py26 fallout. - - tools: run-centos: git clone rather than tar. - - -- Chad Smith <chad.smith@canonical.com> Wed, 28 Mar 2018 12:32:23 -0600 - -cloud-init (17.2-35-gf576b2a2-0ubuntu1~16.04.2) xenial-proposed; urgency=medium - - * cherry-pick 40e7738: GCE: fix reading of user-data that is not - base64 encoded. (LP: #1752711) - - -- Chad Smith <chad.smith@canonical.com> Thu, 01 Mar 2018 16:05:39 -0700 - -cloud-init (17.2-35-gf576b2a2-0ubuntu1~16.04.1) xenial-proposed; urgency=medium - - * New upstream snapshot. (LP: #1747059) - - tests: add support for logs with lxd from snap and future lxd 3. - - EC2: Fix get_instance_id called against cached datasource pickle. - - cli: fix cloud-init status to report running when before result.json - - net: accept network-config in netplan format for renaming interfaces - - Fix ssh keys validation in ssh_util [Tatiana Kholkina] - - -- Chad Smith <chad.smith@canonical.com> Mon, 12 Feb 2018 10:18:13 -0700 - -cloud-init (17.2-30-gf7deaf15-0ubuntu1~16.04.1) xenial-proposed; urgency=medium - - * debian/patches/ds-identify-behavior-xenial.patch: refresh patch. - * debian/grub-legacy-ec2.install: install post(inst|rm) files correctly. - [Simon Deziel] (LP: #1581416) - * New upstream snapshot (LP: #1747059) - - docs: Update RTD content for cloud-init subcommands. - - OVF: Extend well-known labels to include OVFENV. - - Fix potential cases of uninitialized variables. - - tests: Collect script output as binary, collect systemd journal, fix lxd. - - HACKING.rst: mention setting user name and email via git config. - - Azure VM Preprovisioning support. [Douglas Jordan] - - tools/read-version: Fix read-version when in a git worktree. - - docs: Fix typos in docs and one debug message. [Florian Grignon] - - btrfs: support resizing if root is mounted ro. - [Robert Schweikert] - - OpenNebula: Improve network configuration support. - [Akihiko Ota] - - tests: Fix EC2 Platform to return console output as bytes. - - tests: Fix attempted use of /run in a test case. - - GCE: Improvements and changes to ssh key behavior for default user. - [Max Illfelder] - - subp: make ProcessExecutionError have expected types in stderr, stdout. - - tests: when querying ntp server, do not do dns resolution. - - Recognize uppercase vfat disk labels [James Penick] - - tests: remove zesty as supported OS to test - - Do not log warning on config files that represent None. - - tests: Use git hash pip dependency format for pylxd. - - tests: add integration requirements text file - - MAAS: add check_instance_id based off oauth tokens. - - tests: update apt sources list test - - tests: clean up image properties - - tests: rename test ssh keys to avoid appearance of leaking private keys. - - tests: Enable AWS EC2 Integration Testing - - cli: cloud-init clean handles symlinks - - SUSE: Add a basic test of network config rendering. [Robert Schweikert] - - Azure: Only bounce network when necessary. - - lint: Fix lints seen by pylint version 1.8.1. - - cli: Fix error in cloud-init modules --mode=init. - - release 17.2 - - ds-identify: failure in NoCloud due to unset variable usage. - - tests: fix collect_console when not implemented - - ec2: Use instance-identity doc for region and instance-id - [Andrew Jorgensen] - - tests: remove leaked tmp files in config drive tests. - - setup.py: Do not include rendered files in SOURCES.txt - - SUSE: remove delta in systemd local template for SUSE [Robert Schweikert] - - tests: move to using tox 1.7.5 - - OVF: improve ds-identify to support finding OVF iso transport. - - VMware: Support for user provided pre and post-customization scripts - [Maitreyee Saikia] - - citest: In NoCloudKVM provide keys via metadata not userdata. - - pylint: Update pylint to 1.7.1, run on tests/ and tools and fix - complaints. - - Datasources: Formalize DataSource get_data and related properties. - - cli: Add clean and status subcommands - - tests: consolidate platforms into specific dirs - - -- Chad Smith <chad.smith@canonical.com> Fri, 02 Feb 2018 12:37:30 -0700 - -cloud-init (17.1-46-g7acc9e68-0ubuntu1~16.04.1) xenial-proposed; urgency=medium - - * New upstream snapshot. - - ec2: Fix sandboxed dhclient background process cleanup. - (LP: #1735331) - - tests: NoCloudKVMImage do not modify the original local cache image. - - tests: Enable bionic in integration tests. [Joshua Powers] - - tests: Use apt-get to install a deb so that depends get resolved. - - sysconfig: Correctly render dns and dns search info. - [Ryan McCabe] - - -- Chad Smith <chad.smith@canonical.com> Fri, 01 Dec 2017 10:05:01 -0700 - -cloud-init (17.1-41-g76243487-0ubuntu1~16.04.1) xenial-proposed; urgency=medium - - * debian/cloud-init.templates: Fix capitilazation in 'AliYun' - (LP: #1728186) - * New upstream snapshot (LP: #1733653) - - integration test: replace curtin test ppa with cloud-init test ppa. - - EC2: Fix bug using fallback_nic and metadata when restoring from cache. - - EC2: Kill dhclient process used in sandbox dhclient. - - ntp: fix configuration template rendering for openSUSE and SLES - - centos: Provide the failed #include url in error messages - - Catch UrlError when #include'ing URLs [Andrew Jorgensen] - - hosts: Fix openSUSE and SLES setup for /etc/hosts and clarify docs. - [Robert Schweikert] - - rh_subscription: Perform null checks for enabled and disabled repos. - [Dave Mulford] - - Improve warning message when a template is not found. - [Robert Schweikert] - - Replace the temporary i9n.brickies.net with i9n.cloud-init.io. - - Azure: don't generate network configuration for SRIOV devices - - tests: address some minor feedback missed in last merge. - - tests: integration test cleanup and full pass of nocloud-kvm. - - Gentoo: chmod +x on all files in sysvinit/gentoo/ - [Carlos Konstanski] - - -- Chad Smith <chad.smith@canonical.com> Tue, 21 Nov 2017 11:45:23 -0700 - -cloud-init (17.1-27-geb292c18-0ubuntu1~16.04.1) xenial-proposed; urgency=medium - - * New upstream snapshot. - - EC2: Limit network config to fallback nic, fix local-ipv4 only - instances. (LP: #1728152) - - Gentoo: Use "rc-service" rather than "service". [Carlos Konstanski] - - -- Chad Smith <chad.smith@canonical.com> Tue, 31 Oct 2017 13:10:51 -0600 - -cloud-init (17.1-25-g17a15f9e-0ubuntu1~16.04.1) xenial-proposed; urgency=medium - - * New upstream snapshot. - - resizefs: Fix regression when system booted with root=PARTUUID= - (LP: #1725067) - - tools: make yum package installation more reliable - - citest: fix remaining warnings raised by integration tests. - - citest: show the class actual class name in results. - - ntp: fix config module schema to allow empty ntp config - (LP: #1724951) - - tools: disable fastestmirror if using proxy [Joshua Powers] - - -- Chad Smith <chad.smith@canonical.com> Mon, 23 Oct 2017 14:54:05 -0600 - -cloud-init (17.1-18-gd4f70470-0ubuntu1~16.04.2) xenial-proposed; urgency=medium - - * cherry-pick 41152f1: schema: Log debug instead of warning when - jsonschema is absent (LP: #1724354) - - -- Chad Smith <chad.smith@canonical.com> Wed, 18 Oct 2017 15:11:25 -0600 - -cloud-init (17.1-18-gd4f70470-0ubuntu1~16.04.1) xenial-proposed; urgency=medium - - * drop the following cherry picks, now incorporated in snapshot. - + debian/patches/cpick-a2f8ce9c-Do-not-provide-systemd-fsck-drop... - * debian/copyright: dep5 updates, reorganize, add Apache 2.0 license. - (LP: #1718681) - * debian/control: drop dependency on python3-prettytable - * debian/rules: install rsyslog file with 0644 mode instead of 0755. - * debian/rules, debian/apport-launcher.py: add an apport hook. (LP: #1607345) - * New upstream snapshot. (LP: #1721847) - - simpletable: Fix get_string method to return table-formatted string - - net: Handle bridge stp values of 0 and convert to boolean type - [Chad Smith] - - tools: Give specific --abbrev=8 to "git describe" - - network: bridge_stp value not always correct [Ryan Harper] - - tests: re-enable tox with nocloud-kvm support [Joshua Powers] - - systemd: remove limit on tasks created by cloud-init-final.service. - [Robert Schweikert] - - suse: Support addition of zypper repos via cloud-config. - [Robert Schweikert] - - tests: Combine integration configs and testcases [Joshua Powers] - - Azure, CloudStack: Support reading dhcp options from systemd-networkd. - [Dimitri John Ledkov] - - packages/debian/copyright: remove mention of boto and MIT license - - systemd: only mention Before=apt-daily.service on debian based distros. - [Robert Schweikert] - - Add missing simpletable and simpletable tests for failed merge - [Chad Smith] - - Remove prettytable dependency, introduce simpletable [Andrew Jorgensen] - - debian/copyright: dep5 updates, reorganize, add Apache 2.0 license. - [Joshua Powers] - - tests: remove dependency on shlex [Joshua Powers] - - AltCloud: Trust PATH for udevadm and modprobe. - - DataSourceOVF: use util.find_devs_with(TYPE=iso9660) - [Ryan Harper] - - tests: remove a temp file used in bootcmd tests. - - release 17.1 - - doc: document GCE datasource. [Arnd Hannemann] - - suse: updates to templates to support openSUSE and SLES. - [Robert Schweikert] - - suse: Copy sysvinit files from redhat with slight changes. - [Robert Schweikert] - - docs: fix sphinx module schema documentation [Chad Smith] - - tests: Add cloudinit package to all test targets [Chad Smith] - - Makefile: No longer look for yaml files in obsolete ./bin/. - - tests: fix ds-identify unit tests to set EC2_STRICT_ID_DEFAULT. - - ec2: Fix maybe_perform_dhcp_discovery to use /var/tmp as a tmpdir - [Chad Smith] - - Azure: wait longer for SSH pub keys to arrive. - [Paul Meyer] - - GCE: Fix usage of user-data. - - cmdline: add collect-logs subcommand. [Chad Smith] - - CloudStack: consider dhclient lease files named with a hyphen. - - resizefs: Drop check for read-only device file, do not warn on - overlayroot. [Chad Smith] - - tests: Enable the NoCloud KVM platform [Joshua Powers] - - resizefs: pass mount point to xfs_growfs [Dusty Mabe] - - vmware: Enable nics before sending the SUCCESS event. [Sankar Tanguturi] - - cloud-config modules: honor distros definitions in each module - [Chad Smith] - - chef: Add option to pin chef omnibus install version - [Ethan Apodaca] - - tests: execute: support command as string [Joshua Powers] - - schema and docs: Add jsonschema to resizefs and bootcmd modules - [Chad Smith] - - tools: Add xkvm script, wrapper around qemu-system [Joshua Powers] - - vmware customization: return network config format - [Sankar Tanguturi] - - Ec2: only attempt to operate at local mode on known platforms. - - Use /run/cloud-init for tempfile operations. - - ds-identify: Make OpenStack return maybe on arch other than intel. - - tests: mock missed openstack metadata uri network_data.json - [Chad Smith] - - relocate tests/unittests/helpers.py to cloudinit/tests - [Lars Kellogg-Stedman] - - tox: add nose timer output [Joshua Powers] - - upstart: do not package upstart jobs, drop ubuntu-init-switch module. - - tests: Stop leaking calls through unmocked metadata addresses - [Chad Smith] - - distro: allow distro to specify a default locale [Ryan Harper] - - tests: fix two recently added tests for sles distro. - - url_helper: dynamically import oauthlib import from inside oauth_headers - [Chad Smith] - - tox: make xenial environment run with python3.6 - - suse: Add support for openSUSE and return SLES to a working state. - [Robert Schweikert] - - GCE: Add a main to the GCE Datasource. - - ec2: Add IPv6 dhcp support to Ec2DataSource. [Chad Smith] - - url_helper: fail gracefully if oauthlib is not available - [Lars Kellogg-Stedman] - - cloud-init analyze: fix issues running under python 2. [Andrew Jorgensen] - - Configure logging module to always use UTC time. - [Ryan Harper] - - Log a helpful message if a user script does not include shebang. - [Andrew Jorgensen] - - cli: Fix command line parsing of coniditionally loaded subcommands. - [Chad Smith] - - doc: Explain error behavior in user data include file format. - [Jason Butz] - - cc_landscape & cc_puppet: Fix six.StringIO use in writing configs - [Chad Smith] - - schema cli: Add schema subcommand to cloud-init cli and cc_runcmd schema - [Chad Smith] - - Debian: Remove non-free repositories from apt sources template. - [Joonas Kylmälä] - - tools: Add tooling for basic cloud-init performance analysis. - [Chad Smith] - - network: add v2 passthrough and fix parsing v2 config with bonds/bridge - params [Ryan Harper] - - doc: update capabilities with features available, link doc reference, - cli example [Ryan Harper] - - vcloud directory: Guest Customization support for passwords - [Maitreyee Saikia] - - ec2: Allow Ec2 to run in init-local using dhclient in a sandbox. - [Chad Smith] - - cc_ntp: fallback on timesyncd configuration if ntp is not installable - [Ryan Harper] - - net: Reduce duplicate code. Have get_interfaces_by_mac use - get_interfaces. - - tests: Fix build tree integration tests [Joshua Powers] - - sysconfig: Dont repeat header when rendering resolv.conf - [Ryan Harper] - - archlinux: Fix bug with empty dns, do not render 'lo' devices. - - -- Chad Smith <chad.smith@canonical.com> Tue, 10 Oct 2017 14:13:24 -0600 - -cloud-init (0.7.9-233-ge586fe35-0ubuntu1~16.04.2) xenial-proposed; urgency=medium - - * cherry-pick a2f8ce9c: Do not provide systemd-fsck drop-in which - could cause systemd ordering loops (LP: #1717477). - - -- Scott Moser <smoser@ubuntu.com> Fri, 15 Sep 2017 15:23:38 -0400 - -cloud-init (0.7.9-233-ge586fe35-0ubuntu1~16.04.1) xenial-proposed; urgency=medium - - * debian/cloud-init.templates: enable Scaleway cloud. - * debian/cloud-init.templates: enable Aliyun cloud. - * drop the following cherry picks, now incorporated in snapshot. - + debian/patches/cpick-5fb49bac-azure-identify-platform... - + debian/patches/cpick-003c6678-net-remove-systemd-link... - + debian/patches/cpick-1cd4323b-azure-remove-accidental... - + debian/patches/cpick-ebc9ecbc-Azure-Add-network-config... - + debian/patches/cpick-11121fe4-systemd-make-cloud-final... - * debian/patches/stable-release-no-jsonschema-dep.patch: - add patch to remove optional dependency on jsonschema. - * New upstream snapshot. - - cloudinit.net: add initialize_network_device function and tests - [Chad Smith] - - makefile: fix ci-deps-ubuntu target [Chad Smith] - - tests: adjust locale integration test to parse default locale. - - tests: remove 'yakkety' from releases as it is EOL. - - centos: do not package systemd-fsck drop-in. - - systemd: make systemd-fsck run after cloud-init.service (LP: #1691489) - - tests: Add initial tests for EC2 and improve a docstring. - - locale: Do not re-run locale-gen if provided locale is system default. - - archlinux: fix set hostname usage of write_file. [Joshua Powers] - - sysconfig: support subnet type of 'manual'. - - tools/run-centos: make running with no argument show help. - - Drop rand_str() usage in DNS redirection detection - [Bob Aman] (LP: #1088611) - - sysconfig: use MACADDR on bonds/bridges to configure mac_address - [Ryan Harper] - - net: eni route rendering missed ipv6 default route config - [Ryan Harper] (LP: #1701097) - - sysconfig: enable mtu set per subnet, including ipv6 mtu - [Ryan Harper] - - sysconfig: handle manual type subnets [Ryan Harper] - - sysconfig: fix ipv6 gateway routes [Ryan Harper] - - sysconfig: fix rendering of bond, bridge and vlan types. - [Ryan Harper] - - Templatize systemd unit files for cross distro deltas. [Ryan Harper] - - sysconfig: ipv6 and default gateway fixes. [Ryan Harper] - - net: fix renaming of nics to support mac addresses written in upper - case. (LP: #1705147) - - tests: fixes for issues uncovered when moving to python 3.6. - - sysconfig: include GATEWAY value if set in subnet - [Ryan Harper] - - Scaleway: add datasource with user and vendor data for Scaleway. - [Julien Castets] - - Support comments in content read by load_shell_content. - - cloudinitlocal fail to run during boot [Hongjiang Zhang] - - doc: fix disk setup example table_type options [Sandor Zeestraten] - - tools: Fix exception handling. [Joonas Kylmälä] - - tests: fix usage of mock in GCE test. - - test_gce: Fix invalid mock of platform_reports_gce to return False - [Chad Smith] - - test: fix incorrect keyid for apt repository. [Joshua Powers] - - tests: Update version of pylxd [Joshua Powers] - - write_files: Remove log from helper function signatures. - [Andrew Jorgensen] - - doc: document the cmdline options to NoCloud [Brian Candler] - - read_dmi_data: always return None when inside a container. (LP: #1701325) - - requirements.txt: remove trailing white space. - - Tests: Simplify the check on ssh-import-id [Joshua Powers] - - tests: update ntp tests after sntp added [Joshua Powers] - - FreeBSD: Make freebsd a variant, fix unittests and - tools/build-on-freebsd. - - FreeBSD: fix test failure - - FreeBSD: replace ifdown/ifup with "ifconfig down" and "ifconfig up". - [Hongjiang Zhang] - - FreeBSD: fix cdrom mounting failure if /mnt/cdrom/secure did not exist. - [Hongjiang Zhang] - - main: Don't use templater to format the welcome message - [Andrew Jorgensen] - - docs: Automatically generate module docs form schema if present. - [Chad Smith] - - debian: fix path comment in /etc/hosts template. [Jens Sandmann] - - suse: add hostname and fully qualified domain to template. - [Jens Sandmann] - - write_file(s): Print permissions as octal, not decimal [Andrew Jorgensen] - - ci deps: Add --test-distro to read-dependencies to install all deps - [Chad Smith] - - tools/run-centos: cleanups and move to using read-dependencies - - pkg build ci: Add make ci-deps-<distro> target to install pkgs - [Chad Smith] - - selinux: Allow restorecon to be non-fatal. [Ryan Harper] - - net: Allow netinfo subprocesses to return 0 or 1 due to selinux. - [Ryan Harper] - - net: Allow for NetworkManager configuration [Ryan McCabe] - - Use distro release version to determine if we use systemd in redhat spec - [Ryan Harper] - - net: normalize data in network_state object - - Integration Testing: tox env, pyxld 2.2.3, and revamp framework - [Wesley Wiedenmeier] - - Chef: Update omnibus url to chef.io, minor doc changes. [JJ Asghar] - - tools: add centos scripts to build and test [Joshua Powers] - - Drop cheetah python module as it is not needed by trunk [Ryan Harper] - - rhel/centos spec cleanups. - - cloud.cfg: move to a template. setup.py changes along the way. - - Makefile: add deb-src and srpm targets. use PYVER more places. - - makefile: fix python 2/3 detection in the Makefile [Chad Smith] - - snap: Removing snapcraft plug line [Joshua Powers] - - RHEL/CentOS: Fix default routes for IPv4/IPv6 configuration. - [Andreas Karis] - - test: Fix pyflakes complaint of unused import. [Joshua Powers] - - NoCloud: support seed of nocloud from smbios information - [Vladimir Pouzanov] (LP: #1691772) - - net: when selecting a network device, use natural sort order - [Marc-Aurèle Brothier] - - fix typos and remove whitespace in various docs [Stephan Telling] - - systemd: Fix typo in comment in cloud-init.target. [Chen-Han Hsiao] - - Tests: Skip jsonschema related unit tests when dependency is absent. - [Chad Smith] - - tools/net-convert.py: support old cloudinit versions by using kwargs. - - ntp: Add schema definition and passive schema validation. - [Chad Smith] (LP: #1692916) - - Fix eni rendering for bridge params that require repeated key for - values. [Ryan Harper] (LP: #1706752) - - AliYun: Enable platform identification and enable by default. - [Junjie Wang] (LP: #1638931) - - -- Scott Moser <smoser@ubuntu.com> Mon, 31 Jul 2017 16:36:16 -0400 - -cloud-init (0.7.9-153-g16a7302f-0ubuntu1~16.04.2) xenial-proposed; urgency=medium - - * debian/patches/ds-identify-behavior-xenial.patch: refresh patch. - * cherry-pick 5fb49bac: azure: identify platform by well known value - in chassis asset (LP: #1693939) - * cherry-pick 003c6678: net: remove systemd link file writing from eni - renderer - * cherry-pick 1cd4323b: azure: remove accidental duplicate line in - merge. - * cherry-pick ebc9ecbc: Azure: Add network-config, Refactor net layer - to handle duplicate macs. (LP: #1690430) - * cherry-pick 11121fe4: systemd: make cloud-final.service run before - apt daily (LP: #1693361) - - -- Scott Moser <smoser@ubuntu.com> Wed, 28 Jun 2017 17:17:18 -0400 - -cloud-init (0.7.9-153-g16a7302f-0ubuntu1~16.04.1) xenial-proposed; urgency=medium - - * New upstream snapshot. - - net: fix reading and rendering addresses in cidr format. - [Dimitri John Ledkov] (LP: #1689346, #1684349) - - disk_setup: udev settle before attempting partitioning or fs creation. - (LP: #1692093) - - GCE: Update the attribute used to find instance SSH keys. - [Daniel Watkins] (LP: #1693582) - - nplan: For bonds, allow dashed or underscore names of keys. - [Dimitri John Ledkov] (LP: #1690480) - - tests: python2.6: fix unit tests usage of assertNone and format. - - tests: update docstring on test_configured_list_with_none - - fix tools/ds-identify to not write None twice. - - tox/build: do not package depend on style requirements. - - tests: ntp: Restructure cc_ntp unit tests. [Chad Smith] - - flake8: move the pinned version of flake8 up to 3.3.0 - - tests: Apply workaround for snapd bug in test case. [Joshua Powers] - - RHEL/CentOS: Fix dual stack IPv4/IPv6 configuration. [Andreas Karis] - - disk_setup: fix several issues with gpt disk partitions. (LP: #1692087) - - function spelling & docstring update [Joshua Powers] - - tests: Fix unittest bug in ntp tests. [Joshua Powers] - - tox: move pylint target to 1.7.1 - - Fix get_interfaces_by_mac for empty macs (LP: #1692028) - - DigitalOcean: remove routes except for the public interface. - [Ben Howard] (LP: #1681531.) - - netplan: pass macaddress, when specified, for vlans - [Dimitri John Ledkov] (LP: #1690388) - - doc: various improvements for the docs on cc_users_groups. - [Felix Dreissig] - - cc_ntp: write template before installing and add service restart - [Ryan Harper] (LP: #1645644) - - tests: fix cloudstack unit tests to avoid accessing - /var/lib/NetworkManager [Lars Kellogg-Stedman] - - tests: fix hardcoded path to mkfs.ext4 [Joshua Powers] (LP: #1691517) - - Actually skip warnings when .skip file is present. - [Chris Brinker] (LP: #1691551) - - netplan: fix netplan render_network_state signature. - [Dimitri John Ledkov] (LP: #1685944) - - Azure: fix reformatting of ephemeral disks on resize to large types. - (LP: #1686514) - - make deb: Add devscripts dependency for make deb. - Cleanup packages/bddeb. [Chad Smith] (LP: #1685935) - - openstack: fix log message copy/paste typo in _get_url_settings - [Lars Kellogg-Stedman] - - unittests: fix unittests run on centos [Joshua Powers] - - Improve detection of snappy to include os-release and kernel cmdline. - (LP: #1689944) - - Add address to config entry generated by _klibc_to_config_entry. - [Julien Castets] (LP: #1691135) - - sysconfig: Raise ValueError when multiple default gateways are present. - [Chad Smith] (LP: #1687485) - - FreeBSD: improvements and fixes for use on Azure - [Hongjiang Zhang] (LP: #1636345) - - Add unit tests for ds-identify, fix Ec2 bug found. - - fs_setup: if cmd is specified, use shell interpretation. - [Paul Meyer] (LP: #1687712) - - doc: document network configuration defaults policy and formats. - [Ryan Harper] - - doc: Fix name of "uri" key in docs for "cc_apt_configure" module - [Felix Dreissig] - - tests: Enable artful in integration tests [Joshua Powers] - - -- Scott Moser <smoser@ubuntu.com> Fri, 26 May 2017 15:58:48 -0400 - -cloud-init (0.7.9-113-g513e99e0-0ubuntu1~16.04.1) xenial-proposed; urgency=medium - - * debian/update-grub-legacy-ec2: fix early exit failure no /etc/fstab - file. (LP: #1682160) - * New upstream snapshot. - - nova-lxd: read product_name from environment, not platform. - (LP: #1685810) - - Fix yum repo config where keys contain array values [Dylan Perry] - - template: Update debian backports template [Joshua Powers] - - rsyslog: replace ~ with stop [Joshua Powers] (LP: #1367899) - - Doc: add additional RTD examples [Joshua Powers] - - Fix growpart for some cases when booted with root=PARTUUID. - (LP: #1684869) - - pylint: update output style to parseable [Joshua Powers] - - pylint: fix all logging warnings [Joshua Powers] - - CloudStack: Add NetworkManager to list of supported DHCP lease dirs. - [Syed Mushtaq Ahmed] - - net: kernel lies about vlans not stealing mac addresses, when they do - [Dimitri John Ledkov] (LP: #1682871) - - ds-identify: Check correct path for "latest" config drive - [Daniel Watkins] (LP: #1673637) - - doc: Fix example for resolv.conf configuration. [Jon Grimm] - - Fix examples that reference upstream chef repository. [Jon Grimm] - - doc: correct grammar and improve clarity in merging documentation. - [David Tagatac] - - doc: Add missing doc link to snap-config module. [Ryan Harper] - - snap: allows for creating cloud-init snap [Joshua Powers] - - DigitalOcean: assign IPv4ll address to lowest indexed interface. - [Ben Howard] (LP: #1676908) - - DigitalOcean: configure all NICs presented in meta-data. - [Ben Howard] (LP: #1676908) - - Remove (and/or fix) URL shortener references [Jon Grimm] - - HACKING.rst: more info on filling out contributors agreement. - - util: teach write_file about copy_mode option - [Lars Kellogg-Stedman] (LP: #1644064) - - DigitalOcean: bind resolvers to loopback interface. - [Ben Howard] (LP: #1676908) - - tests: fix AltCloud tests to not rely on blkid (LP: #1636531) - - -- Scott Moser <smoser@ubuntu.com> Thu, 27 Apr 2017 12:51:04 -0400 - -cloud-init (0.7.9-90-g61eb03fe-0ubuntu1~16.04.1) xenial-proposed; urgency=medium - - * debian/cloud-init.templates: add Bigstep to list of sources. (LP: #1676460) - * New upstream snapshot. - - OpenStack: add 'dvs' to the list of physical link types. (LP: #1674946) - - Fix bug that resulted in an attempt to rename bonds or vlans. - (LP: #1669860) - - tests: update OpenNebula and Digital Ocean to not rely on host - interfaces. - - net: in netplan renderer delete known image-builtin content. - (LP: #1675576) - - doc: correct grammar in capabilities.rst [David Tagatac] - - ds-identify: fix detecting of maas datasource. (LP: #1677710) - - netplan: remove debugging prints, add debug logging [Ryan Harper] - - ds-identify: do not write None twice to datasource_list. - - support resizing partition and rootfs on system booted without - initramfs. [Steve Langasek] (LP: #1677376) - - apt_configure: run only when needed. (LP: #1675185) - - OpenStack: identify OpenStack by product 'OpenStack Compute'. - (LP: #1675349) - - GCE: Search GCE in ds-identify, consider serial number in check. - (LP: #1674861) - - Add support for setting hashed passwords [Tore S. Lonoy] (LP: #1570325) - - Fix filesystem creation when using "partition: auto" - [Jonathan Ballet] (LP: #1634678) - - ConfigDrive: support reading config drive data from /config-drive. - (LP: #1673411) - - ds-identify: fix detection of Bigstep datasource. (LP: #1674766) - - test: add running of pylint [Joshua Powers] - - ds-identify: fix bug where filename expansion was left on. - - advertise network config v2 support (NETWORK_CONFIG_V2) in features. - - Bigstep: fix bug when executing in python3. [root] - - Fix unit test when running in a system deployed with cloud-init. - - Bounce network interface for Azure when using the built-in path. - [Brent Baude] (LP: #1674685) - - cloudinit.net: add network config v2 parsing and rendering [Ryan Harper] - - net: Fix incorrect call to isfile [Joshua Powers] (LP: #1674317) - - net: add renderers for automatically selecting the renderer. - - doc: fix config drive doc with regard to unpartitioned disks. - (LP: #1673818) - - test: Adding integratiron test for password as list [Joshua Powers] - - render_network_state: switch arguments around, do not require target - - support 'loopback' as a device type. - - Integration Testing: improve testcase subclassing [Wesley Wiedenmeier] - - gitignore: adding doc/rtd_html [Joshua Powers] - - doc: add instructions for running integration tests via tox. - [Joshua Powers] - - test: avoid differences in 'date' output due to daylight savings. - - Fix chef config module in omnibus install. [Jeremy Melvin] (LP: #1583837) - - Add feature flags to cloudinit.version. [Wesley Wiedenmeier] - - tox: add a citest environment - - Support chpasswd/list being a list in addition to a string. - [Sergio Lystopad] (LP: #1665694) - - doc: Fix configuration example for cc_set_passwords module. - [Sergio Lystopad] (LP: #1665773) - - net: support both ipv4 and ipv6 gateways in sysconfig. - [Lars Kellogg-Stedman] (LP: #1669504) - - net: do not raise exception for > 3 nameservers - [Lars Kellogg-Stedman] (LP: #1670052) - - -- Scott Moser <smoser@ubuntu.com> Mon, 03 Apr 2017 11:52:56 -0400 - -cloud-init (0.7.9-48-g1c795b9-0ubuntu1~16.04.1) xenial-proposed; urgency=medium - - * debian/rules: install Z99-cloudinit-warnings.sh to /etc/profile.d - * debian/patches/ds-identify-behavior-xenial.patch: adjust default - behavior of ds-identify for SRU (LP: #1669675, #1660385). - * New upstream snapshot. - - Support warning if the used datasource is not in ds-identify's list - (LP: #1669675). - - DatasourceEc2: add warning message when not on AWS. (LP: #1660385) - - Z99-cloudinit-warnings: Add profile.d script for showing warnings on - - Z99-cloud-locale-test.sh: convert tabs to spaces, remove unneccesary - execute bit in permissions. - - (RedHat) net: correct errors in cloudinit/net/sysconfig.py - [Lars Kellogg-Stedman] - - ec2_utils: fix MetadataLeafDecoder that returned bytes on empty - - Fix eni rendering of multiple IPs per interface [Ryan Harper] - (LP: #1657940) - - Add 3 ecdsa-sha2-nistp* ssh key types now that they are standardized - [Lars Kellogg-Stedman] - - EC2: Do not cache security credentials on disk [Andrew Jorgensen] - (LP: #1638312) - - OpenStack: Use timeout and retries from config in get_data. - [Lars Kellogg-Stedman] (LP: #1657130) - - Fixed Misc issues related to VMware customization. [Sankar Tanguturi] - - (RedHat) Use dnf instead of yum when available [Lars Kellogg-Stedman] - - Get early logging logged, including failures of cmdline url. - - test / doc / build environment changes - - Remove style checking during build and add latest style checks to - tox [Joshua Powers] - - code-style: make master pass pycodestyle (2.3.1) cleanly, currently - [Joshua Powers] - - Fix small typo and change iso-filename for consistency - - tools/mock-meta: support python2 or python3 and ipv6 in both. - - tests: remove executable bit on test_net, so it runs, and fix it. - - tests: No longer monkey patch httpretty for python 3.4.2 - - reset httppretty for each test [Lars Kellogg-Stedman] - - build: fix running Make on a branch with tags other than master - - doc: Fix typos and clarify some aspects of the part-handler - [Erik M. Bray] - - doc: add some documentation on OpenStack datasource. - - Fix minor docs typo: perserve > preserve [Jeremy Bicha] - - validate-yaml: use python rather than explicitly python3 - - -- Scott Moser <smoser@ubuntu.com> Mon, 06 Mar 2017 16:34:10 -0500 - -cloud-init (0.7.9-0ubuntu1~16.04.2) xenial-proposed; urgency=medium - - * debian/update-grub-legacy-ec2: fix shell syntax error. (LP: #1662221) - - -- Scott Moser <smoser@ubuntu.com> Mon, 06 Feb 2017 16:18:28 -0500 - -cloud-init (0.7.9-0ubuntu1~16.04.1) xenial-proposed; urgency=medium - - * debian/copyright: update License field to include Apache. - * debian/update-grub-legacy-ec2: fix to include kernels whose config - has CONFIG_XEN=y (LP: #1379080). - * debian/patches/azure-use-walinux-agent.patch: continue relying on - walinux agent in stable release. - * New upstream release. - - doc: adjust headers in tests documentation for consistency. - - pep8: fix issue found in zesty build with pycodestyle. - - integration test: initial commit of integration test framework - [Wesley Wiedenmeier] - - LICENSE: Allow dual licensing GPL-3 or Apache 2.0 [Jon Grimm] - - Fix config order of precedence, putting kernel command line over system. - [Wesley Wiedenmeier] (LP: #1582323) - - pep8: whitespace fix [Scott Moser] - - Update the list of valid ssh keys. [Michael Felt] - - network: add ENI unit test for statically rendered routes. - - set_hostname: avoid erroneously appending domain to fqdn - [Lars Kellogg-Stedman] (LP: #1647910) - - doc: change 'nobootwait' to 'nofail' in docs [Anhad Jai Singh] - - Replace an expired bit.ly link in code comment. [Joshua Harlow] - - user-groups: fix bug when groups was provided as string and had spaces - [Scott Moser] (LP: #1354694) - - when adding a user, strip whitespace from group list - [Lars Kellogg-Stedman] (LP: #1354694) - - fix decoding of utf-8 chars in yaml test - - Replace usage of sys_netdev_info with read_sys_net - [Joshua Harlow] (LP: #1625766) - - fix problems found in python2.6 test. [Joshua Harlow] - - Just use file logging by default [Joshua Harlow] (LP: #1643990) - - Improve formatting for ProcessExecutionError [Wesley Wiedenmeier] - - flake8: fix trailing white space - - Doc: various documentation fixes [Sean Bright] - - cloudinit/config/cc_rh_subscription.py: Remove repos before adding - [Brent Baude] - - packages/redhat: fix rpm spec file. - - main: set TZ in environment if not already set. [Ryan Harper] - - -- Scott Moser <smoser@ubuntu.com> Fri, 20 Jan 2017 10:43:12 -0500 - -cloud-init (0.7.8-49-g9e904bb-0ubuntu1~16.04.4) xenial; urgency=medium - - * debian/update-grub-legacy-ec2: - - Correctly detect kernels ending in -aws as kernels that can boot on EC2 - (LP: #1655934) - - -- Daniel Watkins <daniel.watkins@canonical.com> Thu, 12 Jan 2017 11:56:03 +0000 - -cloud-init (0.7.8-49-g9e904bb-0ubuntu1~16.04.3) xenial-proposed; urgency=medium - - * debian/cherry-pick: use git format-patch rather than git show - * cherry-pick a9d41de: CloudSigma: Fix bug where datasource was not - loaded in local (LP: #1648380) - * cherry-pick c9c9197: mounts: use mount -a again to accomplish mounts - (LP: #1647708) - - -- Scott Moser <smoser@ubuntu.com> Tue, 13 Dec 2016 16:02:50 -0500 - -cloud-init (0.7.8-49-g9e904bb-0ubuntu1~16.04.2) xenial-proposed; urgency=medium - - * cherry-pick 18203bf: disk_setup: Use sectors as unit when formatting - MBR disks with sfdisk. (LP: #1460715) - * cherry-pick 6e92c5f: net/cmdline: Consider ip= or ip6= on command - line not only ip= (LP: #1639930) - * cherry-pick 8c6878a: tests: fix assumptions that expected no eth0 in - system. (LP: #1644043) - * cherry-pick 2d2ec70: OpenStack: extend physical types to include - hyperv, hw_veb, vhost_user. (LP: #1642679) - - -- Scott Moser <smoser@ubuntu.com> Thu, 01 Dec 2016 16:57:39 -0500 - -cloud-init (0.7.8-49-g9e904bb-0ubuntu1~16.04.1) xenial-proposed; urgency=medium - - * debian/cloud-init.postinst: update /etc/fstab on Azure to fix - future resize operations. (LP: #1611074) - * New upstream snapshot. - - Add activate_datasource, for datasource specific code paths. - (LP: #1611074) - - systemd: cloud-init-local use RequiresMountsFor=/var/lib/cloud - (LP: #1642062) - - -- Scott Moser <smoser@ubuntu.com> Fri, 18 Nov 2016 16:51:54 -0500 - -cloud-init (0.7.8-47-gb6561a1-0ubuntu1~16.04.1) xenial-proposed; urgency=medium - - * debian/cloud-init.templates: enable DigitalOcean by default [Ben Howard] - * New upstream snapshot. - - systemd/cloud-init-local.service: - + replace 'Wants' and 'After' on local-fs.target with more granular - After=systemd-remount-fs.service and RequiresMountsFor=/var/lib - and Before=sysinit.target. - This is done run sufficiently early enough to update /etc/fstab. - (LP: #1611074) - + add Before=NetworkManager.service so that cloud-init can render - NetworkManager network config before it would apply them. - - systemd/cloud-init.service: - + add Before=sysinit.target and DefaultDependencies=no (LP: #1611074) - + drop Requires=networking.service to work where networking.service is - not needed. - + add Conflicts=shutdown.target - + drop unnecessary Wants=local-fs.target - - net: support reading ipv6 dhcp config from initramfs [LaMont Jones] - (LP: #1621615) - - dmidecode: Allow dmidecode to be used on aarch64, and only attempt - usage on x86, x86_64, and aarch64. [Robert Schweikert] - - disk-config: udev settle after partitioning in gpt format. - (LP: #1626243) - - Add support for snap create-user on Ubuntu Core images. [Ryan Harper] - (LP: #1619393) - - Fix sshd restarts for rhel distros. [Jim Gorz] - - Move user/group functions to new ug_util file [Joshua Harlow] - - update Gentoo initscripts to run in the correct order [Matthew Thode] - - MAAS: improve the debugging tool in datasource to consider - config provided on kernel cmdline. - - lxd: Update network config for LXD 2.3 [Stéphane Graber] (LP: #1640556) - - Decode unicode types in decode_binary [Robert Schweikert] - - Allow ephemeral drive to be unpartitioned [Paul Meyer] - - subp: add 'update_env' argument which allows for more easily adding - environment variables to a subprocess call. - - Adjust mounts and disk configuration for systemd. (LP: #1611074) - - DataSources: - + Ec2: protect against non-dictionary in block-device-mapping. - + AliYun: Add new datasource for Ali-Cloud ECS, that is - available but not enabled by default [kaihuan.pkh] - + DigitalOcean: use meta-data for network configuration and - enable data source by default. [Ben Howard] - + OpenNebula: replace parsing of 'ip' command with similar function - available in cloudinit.net. This fixed unit tests when running - in environment with no networking. - - doc changes: - + Add documentation on stages of boot. - + make the RST files consistently formated and other improvements. - + fixed example to not overwrite /etc/hosts [Chris Glass] - + fix spelling / typos in ca_certs and scripts_vendor. - + improve HACKING.rst file - + Add documentation for logging features. [Wesley Wiedenmeier] - + Improve module documentation and doc cleanup. [Wesley Wiedenmeier] - - code style and unit test changes: - + pep8: fix style errors reported by pycodestyle 2.1.0 - + pyflakes: fix issue with pyflakes 1.3 found in ubuntu zesty-proposed. - + Add coverage dependency to bddeb to fix package build. - + Add coverage collection to tox unit tests. [Joshua Powers] - + do not read system /etc/cloud/cloud.cfg.d (LP: #1635350) - + tests: silence the Cheetah UserWarning about NameMapper C version. - + Fix python2.6 things found running in centos 6. - - -- Scott Moser <smoser@ubuntu.com> Tue, 15 Nov 2016 17:29:12 -0500 - -cloud-init (0.7.8-1-g3705bb5-0ubuntu1~16.04.3) xenial-proposed; urgency=medium - - * ntp: move to run after apt configuration (LP: #1628337). - - -- Scott Moser <smoser@ubuntu.com> Mon, 03 Oct 2016 12:22:26 -0400 - -cloud-init (0.7.8-1-g3705bb5-0ubuntu1~16.04.2) xenial; urgency=medium - - * Support IPv6 config coming from initramfs. LP: #1621615. - - -- LaMont Jones <lamont@ubuntu.com> Fri, 23 Sep 2016 20:54:40 -0600 - -cloud-init (0.7.8-1-g3705bb5-0ubuntu1~16.04.1) xenial-proposed; urgency=medium - - * New upstream release 0.7.8. - * New upstream snapshot. - - systemd: put cloud-init.target After multi-user.target (LP: #1623868) - - -- Scott Moser <smoser@ubuntu.com> Thu, 15 Sep 2016 09:57:27 -0400 - -cloud-init (0.7.7-31-g65ace7b-0ubuntu1~16.04.2) xenial-proposed; urgency=medium - - * debian/control: add Breaks of older versions of walinuxagent (LP: #1623570) - - -- Scott Moser <smoser@ubuntu.com> Wed, 14 Sep 2016 16:39:50 -0400 - -cloud-init (0.7.7-31-g65ace7b-0ubuntu1~16.04.1) xenial-proposed; urgency=medium - - * debian/control: fix missing dependency on python3-serial, - and make SmartOS datasource work. - * debian/cloud-init.templates fix capitalisation in template so - dpkg-reconfigure works to select OpenStack. (LP: #1575727) - * d/README.source, d/control, d/new-upstream-snapshot, d/rules: sync - with yakkety for changes due to move to git. - * d/rules: change PYVER=python3 to PYVER=3 to adjust to upstream change. - * debian/rules, debian/cloud-init.install: remove install file - to ensure expected files are collected into cloud-init deb. - (LP: #1615745) - * debian/dirs: remove obsolete / unused file. - * upstream move from bzr to git. - * New upstream snapshot. - - Allow link type of null in network_data.json [Jon Grimm] (LP: #1621968) - - DataSourceOVF: fix user-data as base64 with python3 (LP: #1619394) - - remove obsolete .bzrignore - - systemd: Better support package and upgrade. (LP: #1576692, #1621336) - - tests: cleanup tempdirs in apt_source tests - - apt config conversion: treat empty string as not provided. (LP: #1621180) - - Fix typo in default keys for phone_home [Roland Sommer] (LP: #1607810) - - salt minion: update default pki directory for newer salt minion. - (LP: #1609899) - - bddeb: add --release flag to specify the release in changelog. - - apt-config: allow both old and new format to be present. - [Christian Ehrhardt] (LP: #1616831) - - python2.6: fix dict comprehension usage in _lsb_release. [Joshua Harlow] - - Add a module that can configure spacewalk. [Joshua Harlow] - - add install option for openrc [Matthew Thode] - - Generate a dummy bond name for OpenStack (LP: #1605749) - - network: fix get_interface_mac for bond slave, read_sys_net for ENOTDIR - - azure dhclient-hook cleanups - - Minor cleanups to atomic_helper and add unit tests. - - Fix Gentoo net config generation [Matthew Thode] - - distros: fix get_primary_arch method use of os.uname [Andrew Jorgensen] - - Apt: add new apt configuration format [Christian Ehrhardt] - - Get Azure endpoint server from DHCP client [Brent Baude] - - DigitalOcean: use the v1.json endpoint [Ben Howard] - - MAAS: add vendor-data support (LP: #1612313) - - Upgrade to a configobj package new enough to work [Joshua Harlow] - - ConfigDrive: recognize 'tap' as a link type. (LP: #1610784) - - NoCloud: fix bug providing network-interfaces via meta-data. - (LP: 1577982) - - Add distro tags on config modules that should have it [Joshua Harlow] - - ChangeLog: update changelog for previous commit. - - add ntp config module [Ryan Harper] - - SmartOS: more improvements for network configuration - - tools/read-version: update to address change in version - - make-tarball: older versions of git with --format=tar. - - read-version: do not attempt git-describe if no git. - - Newer requests have strong type validation [Joshua Harlow] - - For upstream snapshot versions do not modify git-describe output. - - adjust signal_handler for version changes. - - revert unintended change to ubuntu sources list - - drop modification of version during make-tarball, tools changes. - - adjust tools and version information. - - Update build tools to work with git [Lars Kellogg-Stedman] - - fix pep8 errors in mcollective unit tests - - mcollective: add tests, cleanups and bug fix when no config in /etc. - - -- Scott Moser <smoser@ubuntu.com> Mon, 12 Sep 2016 16:05:25 -0400 - -cloud-init (0.7.7~bzr1256-0ubuntu1~16.04.1) xenial-proposed; urgency=medium - - * New upstream snapshot. - - distros/debian.py: fix eni renderer to not render .link files - - fixes for execution in python2.6. - - ConfigDrive: fix writing of 'injected' files and legacy networking - (LP: #1602373) - - improvements to /etc/network/interfaces rendering including rendering - of 'lo' devices and sorting attributes within a interface section. - - fix mcollective module that was completely broken if using python3 - (LP: #1597699) - - -- Scott Moser <smoser@ubuntu.com> Fri, 15 Jul 2016 13:27:04 -0400 - -cloud-init (0.7.7~bzr1246-0ubuntu1~16.04.1) xenial-proposed; urgency=medium - - * New upstream snapshot. - - fix restoring from a datasource that did not have dsmode (LP: #1596690) - - -- Scott Moser <smoser@ubuntu.com> Mon, 27 Jun 2016 16:31:37 -0400 - -cloud-init (0.7.7~bzr1245-0ubuntu1~16.04.1) xenial-proposed; urgency=medium - - * debian/new-upstream-snapshot: minor change supporting revision - passed in as an argument. - * debian/control: Build-Depends on python3-unittest2 - * SRU Upstream to 16.04 (LP: #1595302). - - user_data: fix error when user-data is not utf-8 decodable - - write_files: if no permissions are provided, use the default without - logging a warning. - - do not write /etc/systemd/network/50-cloud-init-*.link files - - fix several potential errors identified by pylint. - - move 'main' into cloudinit/cmd/ for easier testing - - Remove trailing dot from GCE metadata URL [Phil Roche] - - Refactor cloudinit networking module to improve testing - - Change missing Cheetah log warning to debug [Andrew Jorgensen] - - network configuration improvements - - centrally handle 'dsmode' (DataSource mode) to be 'local' or 'net. - - support networking information being read on dreamcompute - - support reading and applying networking information on SmartOS - - improve reading networking from openstack network_data.json - - support for renaming devices in a container. - - remove blocking of udev rules - - Apt sources configuration improvements - - cloud-config specified on kernel command line will now override - system settings. - - fix timestamp in reporting events. - - Paths: fix instance path if datasource's id has a '/'. - - Config Drive: fix check_instance_id signature. - - cloudstack: Only use DHCPv4 lease files as a datasource - - -- Scott Moser <smoser@ubuntu.com> Wed, 22 Jun 2016 16:06:49 -0400 - -cloud-init (0.7.7~bzr1212-0ubuntu1) xenial; urgency=medium - - * New upstream snapshot. - - fix iscsi root by not writing interface as 'auto' when networking - information comes from kernel command line (LP: #1568637) - - apply networking less often, when possible only on first - instance boot (LP: #1571004). - - no longer delete /etc/network/interfaces.d/eth0.cfg on - ubuntu (LP: #1563487) - - -- Scott Moser <smoser@ubuntu.com> Fri, 15 Apr 2016 16:25:43 -0400 - -cloud-init (0.7.7~bzr1209-0ubuntu1) xenial; urgency=medium - - * New upstream snapshot. - - fallback net config: do not consider devices starting with - 'veth' (LP: #1569064) - - -- Scott Moser <smoser@ubuntu.com> Thu, 14 Apr 2016 16:24:38 -0400 - -cloud-init (0.7.7~bzr1208-0ubuntu1) xenial; urgency=medium - - * New upstream snapshot. - - phone_home: allow usage of fqdn (LP: #1566824) [Ollie Armstrong] - - chef: straighten out validation_cert and validation_key (LP: #1568940) - - skip bridges when generating fallback networking (LP: #1569974) - - rh_subscription: only check subscription if configured (LP: #1536706) - - SmartOS, CloudSigma: fix error when dmi data is not availble - (LP: #1569469) - - DataSourceNoCloud: fix check_instance_id when upgraded (LP: #1568150) - - lxd: adds basic support for dpkg based lxd-bridge - configuration. (LP: #1569018) - - centos: Ensure that a resolve conf object is written as a string. - (LP: #1479988) - - -- Scott Moser <smoser@ubuntu.com> Wed, 13 Apr 2016 13:19:03 -0400 - -cloud-init (0.7.7~bzr1200-0ubuntu1) xenial; urgency=medium - - * New upstream snapshot. - - fix adding of users when no group is specified (LP: #1562918) - - fix write_files with compressed content (LP: #1565638) - - systemd: do not specify After of obsolete syslog.target (LP: #1536964) - - chef: fix chef installation from gems (LP: #1553345) - - disk_setup: correctly send force to mkfs on block devices (LP: #1548772) - - locale: list unsupported environment settings in warning (LP: #1558069) - - fix NoCloud and other datasources if no datasource config (LP: #1514407) - - -- Scott Moser <smoser@ubuntu.com> Wed, 06 Apr 2016 13:07:27 -0400 - -cloud-init (0.7.7~bzr1192-0ubuntu2) xenial; urgency=medium - - [Logan Rosen] - * debian/cloud-init.postinst: fix shell syntax in upgrade (LP: #1564187) - - -- Scott Moser <smoser@ubuntu.com> Thu, 31 Mar 2016 10:09:09 -0400 - -cloud-init (0.7.7~bzr1192-0ubuntu1) xenial; urgency=medium - - * New upstream snapshot. - - Misc fixes for VMware Support. - - network config improvements: - - do not raise exception on non-debian if network config is found. - - apply network config in 'cloud-init --local' even if there is - no datasource found. - - do not write 70-persistent-net.rules only systemd .link files. - - improve how cloud-init-wait waits: remove the check and sleep - loop and replace it with 'udevadm settle'. - - -- Scott Moser <smoser@ubuntu.com> Tue, 29 Mar 2016 16:34:25 -0400 - -cloud-init (0.7.7~bzr1189-0ubuntu1) xenial; urgency=medium - - * d/control: adjust build dependency as python3-pyflakes in xenial - now provides python3 modules for pyflakes. - * d/cloud-init.install, d/control, d/dirs, d/grub-legacy-ec2.install: - run wrap-and-sort - * d/cloud-init.install: add lib/udev/ files. - * d/cloud-init.postinst: touch /var/lib/cloud/data/upgraded-network - if upgrading from version that did not have network support. - * New upstream snapshot. - * initial support for support network configuration in cloud-init --local - * do not rely on network metadata service on every boot for - ConfigDrive Openstack Azure (LP: #1553815) - - -- Scott Moser <smoser@ubuntu.com> Thu, 24 Mar 2016 17:36:40 -0400 - -cloud-init (0.7.7~bzr1182-0ubuntu1) xenial; urgency=medium - - * New upstream snapshot. - * systemd changes enforcing intended ordering (cloud-init-local.service - before networking and cloud-init.service before it comes up). - * when reading dmidecode data, return found but unset value as "" rather - than failing to decode that value. - * add default user to 'lxd' group and create groups when necessary - (LP: #1539317) - * No longer run pollinate in seed_random (LP: #1554152) - * Enable BigStep data source. - - -- Scott Moser <smoser@ubuntu.com> Mon, 14 Mar 2016 09:58:56 -0400 - -cloud-init (0.7.7~bzr1176-0ubuntu1) xenial; urgency=medium - - * d/README.source, d/new-upstream-snapshot: simplify the README.source - with a script. - * d/rules: support DEB_BUILD_OPTIONS=nocheck and remove unused code. - * d/rules: make tests with python3 - * d/control: add pep8 as a build depends - * d/cloud-init.preinst, d/cloud-init.postinst adjust upgrade path - to adjust systemd jobs that put cloud-init unit jobs directly - in multi-user.target. - * New upstream snapshot. - * Add Image Customization Parser for VMware vSphere Hypervisor Support. - Disabled by default. [Sankar Tanguturi] - * lxd: add initial support for setting up lxd using 'lxd init' - * Handle escaped quotes in WALinuxAgentShim.find_endpoint (LP: #1488891) - * timezone: use a symlink when updating /etc/localtime (LP: #1543025) - * enable more code testing in 'make check' - * Added Bigstep datasource [Daniel Watkins] - * Enable password changing via a hashed string [Alex Sirbu] - - -- Scott Moser <smoser@ubuntu.com> Fri, 04 Mar 2016 15:44:02 -0500 - -cloud-init (0.7.7~bzr1160-0ubuntu1) xenial; urgency=medium - - * New upstream snapshot. - * SmartOS: Add support for Joyent LX-Brand Zones (LP: #1540965) - * systemd/power_state: fix power_state when cloud-final exited failure - (LP: #1449318) - - -- Scott Moser <smoser@ubuntu.com> Thu, 04 Feb 2016 17:22:36 -0500 - -cloud-init (0.7.7~bzr1156-0ubuntu2) xenial; urgency=medium - - * debian/cloud-init.postinst, systemd_detect_virt.patch: Call - systemd-detect-virt instead of the Ubuntu specific running-in-container - wrapper. (LP: #1539016) - - -- Martin Pitt <martin.pitt@ubuntu.com> Thu, 28 Jan 2016 14:12:51 +0100 - -cloud-init (0.7.7~bzr1156-0ubuntu1) xenial; urgency=medium - - * New upstream snapshot. - * d/cloud-init.preinst: migrate Azure instance ID from old ID to stable - ID (LP: #1506187). - - -- Ben Howard <ben.howard@ubuntu.com> Tue, 17 Nov 2015 11:59:49 -0700 - -cloud-init (0.7.7~bzr1155-0ubuntu1) xenial; urgency=medium - - * New upstream snapshot. - * cc_mounts: use 'nofail' rather than 'nobootwait' if system uses systemd. - (LP: #1514485). - - -- Scott Moser <smoser@ubuntu.com> Tue, 10 Nov 2015 12:35:31 -0500 - -cloud-init (0.7.7~bzr1154-0ubuntu1) xenial; urgency=medium - - * New upstream snapshot. - * create the same /etc/apt/sources.list that is present in default server - ISO installs. This change adds restricted, multiverse, and -backports - (LP: #1177432). - - -- Scott Moser <smoser@ubuntu.com> Thu, 05 Nov 2015 12:10:00 -0500 - -cloud-init (0.7.7~bzr1149-0ubuntu1) wily; urgency=medium - - * New upstream snapshot. - azure: support extracting SSH key values from ovf-env.xml (LP: #1506244) - - -- Scott Moser <smoser@ubuntu.com> Wed, 14 Oct 2015 20:38:45 -0400 - -cloud-init (0.7.7~bzr1148-0ubuntu1) wily; urgency=medium - - * New upstream snapshot. - * support configuring and installing the Ubuntu fan driver (LP: #1504604) - - -- Scott Moser <smoser@ubuntu.com> Fri, 09 Oct 2015 13:00:12 -0400 - -cloud-init (0.7.7~bzr1147-0ubuntu1) wily; urgency=medium - - * New upstream snapshot. - * MAAS: fix oauth when system clock is bad (LP: #1499869) - - -- Scott Moser <smoser@ubuntu.com> Tue, 29 Sep 2015 20:16:57 -0400 - -cloud-init (0.7.7~bzr1146-0ubuntu2) wily; urgency=medium - - * replace usage of python with python3 in postinst (LP: #1498493) - - -- Scott Moser <smoser@ubuntu.com> Tue, 22 Sep 2015 09:49:01 -0400 - -cloud-init (0.7.7~bzr1146-0ubuntu1) wily; urgency=medium - - * New upstream snapshot. - * make the webhook reporter post json data rather than - urlencoded data (LP: #1496960) - - -- Scott Moser <smoser@ubuntu.com> Thu, 17 Sep 2015 15:59:35 -0400 - -cloud-init (0.7.7~bzr1145-0ubuntu1) wily; urgency=medium - - * New upstream snapshot. - * fix default builtin config of snappy to set ssh_enabled=auto. - - -- Scott Moser <smoser@ubuntu.com> Tue, 15 Sep 2015 15:33:19 -0400 - -cloud-init (0.7.7~bzr1144-0ubuntu1) wily; urgency=medium - - * New upstream snapshot. - [ Ben Howard ] - * snappy: enable ssh on snappy if ssh keys are provided or - password authentication is requested (LP: #1494816) - - -- Scott Moser <smoser@ubuntu.com> Fri, 11 Sep 2015 17:00:16 -0400 - -cloud-init (0.7.7~bzr1143-0ubuntu1) wily; urgency=medium - - * New upstream snapshot. - * NoCloud: fix consumption of vendor-data (LP: #1493453) - * power_state: support power change only on 'condition' - - -- Scott Moser <smoser@ubuntu.com> Thu, 10 Sep 2015 16:20:44 -0400 - -cloud-init (0.7.7~bzr1141-0ubuntu1) wily; urgency=medium - - * New upstream snapshot. - * fix bug in cloud-init main preventing any usage. - - -- Scott Moser <smoser@ubuntu.com> Thu, 03 Sep 2015 11:36:37 -0400 - -cloud-init (0.7.7~bzr1140-0ubuntu1) wily; urgency=medium - - * New upstream snapshot. - * Handle symlink mount points in mount_cb (LP: #1490796). - * sync curtin reporting changes back to cloud-init. - - -- Scott Moser <smoser@ubuntu.com> Wed, 02 Sep 2015 16:50:54 -0400 - -cloud-init (0.7.7~bzr1138-0ubuntu1) wily; urgency=medium - - * New upstream snapshot. - * MAAS: fixes to data source and OauthUrlHelper (LP: #1488507) - - -- Scott Moser <smoser@ubuntu.com> Tue, 25 Aug 2015 15:28:06 -0400 - -cloud-init (0.7.7~bzr1137-0ubuntu1) wily; urgency=medium - - * New upstream snapshot. - * swap: use fallocate to create swapfile for speedup. (LP: #1482994) - * reporting: print handler fix. - - -- Scott Moser <smoser@ubuntu.com> Wed, 12 Aug 2015 12:54:51 -0400 - -cloud-init (0.7.7~bzr1135-0ubuntu1) wily; urgency=medium - - * New upstream snapshot. - * some stack traces fixed. - * improved rsyslog/syslog config format - * add reporting module for webhook or log reporting of events. - - -- Scott Moser <smoser@ubuntu.com> Fri, 07 Aug 2015 17:16:51 -0400 - -cloud-init (0.7.7~bzr1127-0ubuntu1) wily; urgency=medium - - [ Scott Moser ] - * d/README.source, debian/cherry-pick-rev: improve packaging tool - - [ Daniel Watkins ] - * d/cloud-init.templates: Include SmartOS data source in the default list - and choices. (LP: #1398997) - - [ Scott Moser ] - * New upstream snapshot. - * check for systemd using sd_booted symantics (LP: #1461201) - * fix importing of gpg keys in python3 (LP: #1463373) - * fix specification of devices to growpart (LP: #1465436) - * reliably detect and use Azure disks using udev rules (LP: #1411582) - * support selection of Ubuntu mirrors on GCE (LP: #1470890) - * ssh: generate ed25519 host keys if supported (LP: #1461242) - * test fixes and cleanups - * fix reading of availability-zone on GCE (LP: #1470880) - * fix cloudsigma datasource with python3 (LP: #1475215) - * fix rightscale user-data - * fix consumption of CloudStack passwords on newer CloudStack platforms - (LP: #1440263, #1464253) - - -- Scott Moser <smoser@ubuntu.com> Wed, 22 Jul 2015 17:06:18 -0400 - -cloud-init (0.7.7~bzr1109-0ubuntu2) wily; urgency=medium - - * d/control: cloud-init Recommends gdisk (LP: #1462521) - - -- Scott Moser <smoser@ubuntu.com> Fri, 05 Jun 2015 16:33:38 -0400 - -cloud-init (0.7.7~bzr1109-0ubuntu1) wily; urgency=medium - - * New upstream snapshot. - * Azure: Redact on-disk user password in /var/lib/ovf-env.xml - (LP: #1311827) - * EC2: be aware of eu-central-1 availability zone (LP: #1456684) - - -- Scott Moser <smoser@ubuntu.com> Thu, 28 May 2015 10:54:45 -0400 - -cloud-init (0.7.7~bzr1106-0ubuntu1) wily; urgency=medium - - * New upstream snapshot. - * Azure: remove strict dependency on walinux-agent, but still utilize - it for the time being. - * fix read_seeded that is used in seeding user-data and meta-data - from additional locations (LP: #1455233) - * fix bug preventing partitioning of disks in many cases. (LP: #1311463) - * Azure: do not override hostname if user has set it (LP: #1375252) - * Fix GCE datasource not handling per-instance SSH keys (LP: #1403617) - * Allow specifying of uid in user/group config. - - -- Scott Moser <smoser@ubuntu.com> Fri, 15 May 2015 17:04:19 -0400 - -cloud-init (0.7.7~bzr1091-0ubuntu1) vivid; urgency=medium - - * New upstream snapshot. - * fix processing of user-data in cloud-config-archive format (LP: #1445143) - - -- Scott Moser <smoser@ubuntu.com> Fri, 17 Apr 2015 12:04:16 -0400 - -cloud-init (0.7.7~bzr1088-0ubuntu3) vivid; urgency=medium - - * systemd/*.service: don't declare a Wants/Requires on network.target; this - is a passive target that should only be pulled in by implementors of the - networking service. The requirement for network needs to be - expressed as a dependency on network-online.target. LP: #1440180. - - -- Steve Langasek <steve.langasek@ubuntu.com> Thu, 09 Apr 2015 07:35:55 -0700 - -cloud-init (0.7.7~bzr1088-0ubuntu2) vivid; urgency=medium - - [ Didier Roche ] - * Don't start or restart cloud-init services on install and upgrade - (LP: #1438520) - - [ Scott Moser ] - * d/control: Build-Depends on iproute2 (tests) - * d/control: Only Recommend (not both Depend and Recommend) - software-properties-common - - -- Scott Moser <smoser@ubuntu.com> Fri, 03 Apr 2015 11:13:28 -0400 - -cloud-init (0.7.7~bzr1088-0ubuntu1) vivid; urgency=medium - - * New upstream snapshot. - * adjust cc_snappy for snappy install package with config. (LP: #1438836) - snappy install takes config as argument rather than '--config' flag. - - -- Scott Moser <smoser@ubuntu.com> Tue, 31 Mar 2015 14:21:48 -0400 - -cloud-init (0.7.7~bzr1087-0ubuntu1) vivid; urgency=medium - - * New upstream snapshot. - * SmartOS: update ds to use v2 metadata. (LP: #1436417) [Daniel Watkins] - * Fix NoCloud local datasource to only activate if told to do so. - * fix snappy package installation. (LP: #1437137) - - -- Scott Moser <smoser@ubuntu.com> Fri, 27 Mar 2015 17:09:34 -0400 - -cloud-init (0.7.7~bzr1084-0ubuntu1) vivid; urgency=medium - - * New upstream snapshot. - * systemd: update config and final to run even if init jobs fail - (LP: #1432758) - * emit_upstart: fix use of undeclared variable - * SmartOS: fixes for python3 reading from serial device. - - -- Scott Moser <smoser@ubuntu.com> Tue, 17 Mar 2015 16:48:42 -0400 - -cloud-init (0.7.7~bzr1081-0ubuntu1) vivid; urgency=medium - - * fix failure of configure due to postinst usage of iteritems with python3 - * New upstream snapshot. - * better python3 handling of ignored binary mime parts - * DataSourceMAAS: fix usage of oauthlib and 'timestamp' - - -- Scott Moser <smoser@ubuntu.com> Wed, 11 Mar 2015 13:52:04 -0400 - -cloud-init (0.7.7~bzr1078-0ubuntu1) vivid; urgency=medium - - * New upstream snapshot. - * run snappy module only on snappy (LP: #1428495) - * MAAS: adjust timestamp on oauthlib when needed (LP: #1427939) - - -- Scott Moser <smoser@ubuntu.com> Thu, 05 Mar 2015 15:22:53 -0500 - -cloud-init (0.7.7~bzr1076-0ubuntu1) vivid; urgency=medium - - * New upstream snapshot. - * test case fixes for http_pretty - * python2.6 fixes - * Convert dmidecode values to sysfs names before looking. (LP: #1427687) - * add snappy support (LP: #1428139) - - -- Scott Moser <smoser@ubuntu.com> Wed, 04 Mar 2015 17:13:55 -0500 - -cloud-init (0.7.7~bzr1072-0ubuntu1) vivid; urgency=medium - - * d/control: move software-properties-common to Recommends - this helps reduce snappy builds - * New upstream snapshot. - * fix MAAS datasource (LP: #1427263) - - -- Scott Moser <smoser@ubuntu.com> Tue, 03 Mar 2015 20:46:41 -0500 - -cloud-init (0.7.7~bzr1067-0ubuntu1) vivid; urgency=medium - - * New upstream snapshot. - * fix broken consumption of gzipped user-data (LP: #1424900) - * functional user-data on Azure again (LP: #1423972) - * CloudStack: support fetching password from virtual router (LP: #1422388) - - -- Scott Moser <smoser@ubuntu.com> Thu, 26 Feb 2015 14:19:16 -0500 - -cloud-init (0.7.7~bzr1060-0ubuntu1) vivid; urgency=medium - - * New upstream snapshot. - * Fix for ascii decode in DataSourceAzure (LP: #1422993). - - -- Scott Moser <smoser@ubuntu.com> Fri, 20 Feb 2015 08:05:20 -0500 - -cloud-init (0.7.7~bzr1059-0ubuntu1) vivid; urgency=medium - - * New upstream snapshot. - * support for gpt partitioning, utilized in Azure [Daniel Watkins] - * fix bug in exception handling in mount_cb. - - -- Scott Moser <smoser@ubuntu.com> Fri, 13 Feb 2015 16:05:59 -0500 - -cloud-init (0.7.7~bzr1055-0ubuntu1) vivid; urgency=medium - - * New upstream snapshot. - * move to python3 (LP: #1247132) - * systemd: run cloud-init before systemd-user-sessions.service - * Use the GCE short hostname. (LP: #1383794) - * Enable user-data encoding support for GCE. (LP: #1404311) - * Update to use a newer and better OMNIBUS_URL - * Be more tolerant of 'ssh_authorized_keys' types - * Fix parse_ssh_config failing in ssh_util.py - * Increase the robustness/configurability of the chef module. - * retain trailing newline from template files when using - jinja2 (LP: #1355343) - * fix broken output handling (LP: #1387340) - * digital ocean datasource - * update url in config drive documentation - * freebsd: enable correct behavior on Ec2. - * freebsd: Use the proper virtio FreeBSD network interface name. - - -- Scott Moser <smoser@ubuntu.com> Wed, 11 Feb 2015 15:55:58 -0500 - -cloud-init (0.7.6~bzr1022-0ubuntu1) utopic; urgency=medium - - * New upstream snapshot. - * support for writing swap files per user config (disabled by default). - * fixes for freebsd support of config drive. - - -- Scott Moser <smoser@ubuntu.com> Wed, 01 Oct 2014 16:27:44 -0400 - -cloud-init (0.7.6~bzr1020-0ubuntu1) utopic; urgency=medium - - [ Ben Howard ] - * Fix for cloud-init misidentifying grub install device (LP: #1336855). - - [ Scott Moser ] - * New upstream snapshot. - * cc_grub_dpkg: consider /dev/xvda as candidate for grub installation - (LP: #1336855) - * resizefs: fix backgrounding of resizefs (LP: #1338614) - * cloud-init-blocknet: remove debug code - - -- Scott Moser <smoser@ubuntu.com> Tue, 23 Sep 2014 14:20:09 -0400 - -cloud-init (0.7.6~bzr1016-0ubuntu1) utopic; urgency=medium - - * New upstream snapshot. - * upstart: block networking from coming up until after - cloud-init-local has run. (LP: #1368861) - * upstart: ensure /run mounted before cloud-init-local (LP: #1353008) - * log failures in route info. - * Openstack: do not search redundant urls for openstack datasource. - do not load urls just to check if they're present when crawling. - * ConfigDrive: fixes for better consumption of vendor-data - * resizefs: make sure target is writable before attempting - in order to avoid failures in containers. (LP: #1366891) - * freebsd fixes. - * ssh_authkey_fingerprints: correctly allow disabling (LP: #1340903) - * CloudStack: work around bug in metadata service (LP: #1356855) - * docs: fix doc about disk-setup 'table_type' (LP: #1313114) - * resolv.conf: fix rendering if options not provided (LP: #1328953) - * ensure keys exist before running ssh on systemd (LP: #1333920) - - -- Scott Moser <smoser@ubuntu.com> Fri, 12 Sep 2014 18:12:31 -0400 - -cloud-init (0.7.6~bzr992-0ubuntu1) utopic; urgency=medium - - * New upstream snapshot. - * ubuntu-init-switch: fixes and minor tweaks. - - -- Scott Moser <smoser@ubuntu.com> Thu, 31 Jul 2014 16:11:27 -0400 - -cloud-init (0.7.6~bzr990-0ubuntu1) utopic; urgency=medium - - * debian/README.source: get changelog from trunk when new snapshot - * New upstream snapshot. - * add ubuntu-init-switch module for testing systemd. - * do not put comments in /etc/timezone (LP: #1341710) - - -- Scott Moser <smoser@ubuntu.com> Thu, 31 Jul 2014 15:05:51 -0400 - -cloud-init (0.7.6~bzr987-0ubuntu2) utopic; urgency=medium - - * debian/control: drop build-depends on python-selinux as it is - not in main. - - -- Scott Moser <smoser@ubuntu.com> Fri, 25 Jul 2014 12:47:39 -0400 - -cloud-init (0.7.6~bzr987-0ubuntu1) utopic; urgency=medium - - * incorporate systemd and build system changes from trunk. - * New upstream snapshot. - * less noisy logs - * allow usage of jinja2 templates (LP: #1219223) - * test case cleanups - * change build system to be dh and pybuild - - -- Scott Moser <smoser@ubuntu.com> Thu, 24 Jul 2014 19:55:30 -0400 - -cloud-init (0.7.6~bzr976-0ubuntu1) utopic; urgency=medium - - * debian/cloud-init.templates: fix choices so dpkg-reconfigure works as - expected (LP: #1325746) - * New upstream snapshot. - * tests: SmartOS test not depend on /dev/ttyS1 device node (LP: #1316597) - * poll ttyS1 only after check for 'cloudsigma' in dmidecode (LP: #1316475) - * cloudsigma: support vendor-data (LP: #1303986) - - -- Scott Moser <smoser@ubuntu.com> Tue, 03 Jun 2014 16:41:07 -0400 - -cloud-init (0.7.5-0ubuntu1) trusty; urgency=medium - - * New upstream release. - * support base64 encoded user-data in OpenNebula, required - to allow arbitrary content in user-data (LP: #1300941) - * pep8 and pylint fixes - - -- Scott Moser <smoser@ubuntu.com> Tue, 01 Apr 2014 14:39:03 -0400 - -cloud-init (0.7.5~bzr970-0ubuntu1) trusty; urgency=medium - - * New upstream snapshot. - * fix NoCloud and seedfrom on the kernel command line (LP: #1295223) - - -- Scott Moser <smoser@ubuntu.com> Thu, 20 Mar 2014 12:35:58 -0400 - -cloud-init (0.7.5~bzr969-0ubuntu1) trusty; urgency=medium - - * New upstream snapshot. - * Azure: Reformat ephemeral disk if it got re-provisioned - by the cloud on any reboot (LP: #1292648) - * final_message: fix replacement of upper case keynames (LP: #1286164) - * seed_random: do not capture output. Correctly provide - environment variable RANDOM_SEED_FILE to command. - * CloudSigma: support base64 encoded user-data - - -- Scott Moser <smoser@ubuntu.com> Wed, 19 Mar 2014 14:04:34 -0400 - -cloud-init (0.7.5~bzr964-0ubuntu1) trusty; urgency=medium - - * New upstream snapshot. - * SmartOS, AltCloud: disable running on arm systems due to bug - (LP: #1243287, #1285686) [Oleg Strikov] - * Allow running a command to seed random, default is 'pollinate -q' - (LP: #1286316) [Dustin Kirkland] - * Write status to /run/cloud-init/status.json for consumption by - other programs (LP: #1284439) - * fix output of network information to not include 'addr:' (LP: #1285185) - - -- Scott Moser <smoser@ubuntu.com> Mon, 03 Mar 2014 16:59:27 -0500 - -cloud-init (0.7.5~bzr952-0ubuntu1) trusty; urgency=medium - - * New upstream snapshot. - * fix broken seed of DAtaSourceNoCloud via external disk. - - -- Scott Moser <smoser@ubuntu.com> Tue, 18 Feb 2014 14:10:52 -0500 - -cloud-init (0.7.5~bzr950-0ubuntu1) trusty; urgency=medium - - * New upstream snapshot. - * support for vendor-data in NoCloud - * fix in is_ipv4 to accept IP addresses with a '0' in them. - * Azure: fix issue when stale data in /var/lib/waagent (LP: #1269626) - * skip config_modules that declare themselves only verified on a set of - distros. Add them to 'unverified_modules' list to run anyway. - * Add CloudSigma datasource [Kiril Vladimiroff] - * Add initial support for Gentoo and Arch distributions [Nate House] - * Add GCE datasource [Vaidas Jablonskis] - * Add native Openstack datasource which reads openstack metadata - rather than relying on EC2 data in openstack metadata service. - - -- Scott Moser <smoser@ubuntu.com> Fri, 14 Feb 2014 14:39:56 -0500 - -cloud-init (0.7.5~bzr933-0ubuntu1) trusty; urgency=medium - - * debian/control: bump Standards-Version to 3.9.5 - * debian/control: drop boto dependency no longer required in trunk. - * New upstream snapshot. - * ConfigDrive: consider partitions labelled correctly as possible sources. - * find root filesystem for resizing in cases where there is no initramfs - * removal of dependency on python-boto - * vendor-data support, and usage of that in Joyent datasource. - * change default output to be logged to /var/log/cloud-init-output.log - * SeLinuxGuard: Cast file path to string. (LP: #1260072) - * drop support for resizing via parted (LP: #1212492) - * SmartOS: changes to address changes in platform (LP: #1272115) - * FreeBSD support. - - -- Scott Moser <smoser@ubuntu.com> Fri, 24 Jan 2014 22:41:57 -0500 - -cloud-init (0.7.5~bzr902-0ubuntu1) trusty; urgency=medium - - * debian/control: Build-Depend on python-jsonpatch as #717916 is now fixed. - * debian/control: Recommend eatmydata (LP: #1236531) - * New upstream snapshot. - * support invoking apt with 'eatmydata' (LP: #1236531) - * add a message in log about dynamic import failures - * New in '0.7.4' release. - * fix reading of mount information on kernels < 2.6.26 (LP: #1248625) - * SmartOS: change 'region' to 'datacenter_name' to address change - in data provided to instance (LP: #1249124) - * support calling 'add-apt-repository' for 'cloud-archive:' entries - (LP: #1244355) - * DataSourceAzure: fix incompatibility with python 2.6 (LP: #1232175) - * fix bug mounting first partition of a alias'd name. (LP: #1236594) - * SmartOS: fix bug with hostname due to trailing whitespace (LP: #1236445) - * fix creation of partitions on Azure (LP: #1233698) - * cc_growpart: respect /etc/growroot-disabled (LP: #1234331) - * ubuntu config: add default user to 'sudo' group (LP: #1228228) - * Fix usage of libselinux-python when selinux is disabled - * add OpenNebula datasource - - -- Scott Moser <smoser@ubuntu.com> Tue, 17 Dec 2013 16:51:30 -0500 - -cloud-init (0.7.3-0ubuntu2) saucy; urgency=low - - * fix bug where a mount entry of 'ephemeral0' would only consider - the unpartitioned device, not also the first partition (LP: #1236594) - - -- Scott Moser <smoser@ubuntu.com> Mon, 07 Oct 2013 20:16:02 -0400 - -cloud-init (0.7.3-0ubuntu1) saucy; urgency=low - - * New upstream release. - * Fix for SmartOS datasource when hostname is provided via dmi - data (LP: #1236445) - - -- Scott Moser <smoser@ubuntu.com> Mon, 07 Oct 2013 14:49:56 -0400 - -cloud-init (0.7.3~bzr884-0ubuntu1) saucy; urgency=low - - * New upstream snapshot. - * allow disabling of growpart via file /etc/growroot-disabled - (LP: #1234331) - * add default user to sudo group (LP: #1228228) - * fix disk creation on azure (LP: #1233698) - * DatasourceSmartOS: allow availabiltity-zone to be fed from the - datasource via 'region' (which allows 'mirrors' and other things - to make use of it). - - -- Scott Moser <smoser@ubuntu.com> Fri, 04 Oct 2013 21:08:07 -0400 - -cloud-init (0.7.3~bzr879-0ubuntu1) saucy; urgency=low - - * New upstream snapshot. - * fixes to disk_setup config module and enabling of partition - creation and filesystem creation on Azure. - - -- Scott Moser <smoser@ubuntu.com> Fri, 27 Sep 2013 19:47:37 -0400 - -cloud-init (0.7.3~bzr871-0ubuntu1) saucy; urgency=low - - * New upstream snapshot. - * add 'disk_setup' config module for partitioning disks and - creating filesystems. (LP: #1218506) - - -- Scott Moser <smoser@ubuntu.com> Fri, 20 Sep 2013 20:46:08 -0400 - -cloud-init (0.7.3~bzr869-0ubuntu1) saucy; urgency=low - - * depend on cloud-utils or cloud-guest-utils (LP: #1224003) - * New upstream snapshot. - * Add OpenNebula datasource. - * Support reading 'random_seed' from metadata and writing to /dev/urandom - * fix for bug in log_time. - - -- Scott Moser <smoser@ubuntu.com> Wed, 11 Sep 2013 17:04:45 -0400 - -cloud-init (0.7.3~bzr862-0ubuntu1) saucy; urgency=low - - * New upstream snapshot. - * support base64 encoded data in the smart os datasource - - -- Scott Moser <smoser@ubuntu.com> Thu, 29 Aug 2013 04:54:39 -0400 - -cloud-init (0.7.3~bzr861-0ubuntu1) saucy; urgency=low - - * New upstream snapshot. - * fix publishing hostname on azure (LP: #1214541) - - -- Scott Moser <smoser@ubuntu.com> Tue, 20 Aug 2013 16:06:22 -0400 - -cloud-init (0.7.3~bzr860-0ubuntu1) saucy; urgency=low - - * New upstream snapshot. - * fix setting of password for a user on azure. (LP: #1212723) - - -- Scott Moser <smoser@ubuntu.com> Thu, 15 Aug 2013 16:01:40 -0400 - -cloud-init (0.7.3~bzr858-0ubuntu1) saucy; urgency=low - - * New upstream snapshot. - * fix resizing of root partition by preferring the functional 'growpart' - support over the broken 'parted resizepart' support (LP: #1212444) - * add options for apt_ftp_proxy, apt_https_proxy and apt_config - (LP: #1057195) - - -- Scott Moser <smoser@ubuntu.com> Wed, 14 Aug 2013 21:44:22 -0400 - -cloud-init (0.7.3~bzr851-0ubuntu1) saucy; urgency=low - - * New upstream snapshot. - * azure: do not wait for output of bouncing interface (ifdown; ifup) - as that waits on output of all ifupdown scripts to close all file - descriptors. - - -- Scott Moser <smoser@ubuntu.com> Mon, 29 Jul 2013 12:21:08 -0400 - -cloud-init (0.7.3~bzr850-0ubuntu1) saucy; urgency=low - - * New upstream snapshot. - * fix bouncing of interface. environment was not being modified - so command invoked did not have access to 'interface'. - * debian/README.source: update to read upstream version from trunk - - -- Scott Moser <smoser@ubuntu.com> Fri, 26 Jul 2013 14:34:02 -0400 - -cloud-init (0.7.3~bzr849-0ubuntu2) saucy; urgency=low - - * debian/control: fix bad dependency on python-jsonpatch - by build-depending on python-json-patch, so dh_python2 - can find the right package (LP: #1205358). - - -- Scott Moser <smoser@ubuntu.com> Fri, 26 Jul 2013 10:47:59 -0400 - -cloud-init (0.7.3~bzr849-0ubuntu1) saucy; urgency=low - - * New upstream snapshot. - * azure: support publishing hostname via bouncing interface (LP: #1202758) - - -- Scott Moser <smoser@ubuntu.com> Thu, 25 Jul 2013 17:08:30 -0400 - -cloud-init (0.7.3~bzr845-0ubuntu2) saucy; urgency=low - - * debian/control: fix dependency python-json-patch. - - -- Scott Moser <smoser@ubuntu.com> Wed, 24 Jul 2013 15:01:24 -0400 - -cloud-init (0.7.3~bzr845-0ubuntu1) saucy; urgency=low - - * Reads the currently set value in /etc/cloud/cloud.cfg.d/90_dpkg.cfg to - db_set the value of cloud-init/datasources. (Closes: #709773) - * New upstream snapshot. - * azure: use deployment-id rather than static instance-id (LP: #1204190) - * config-drive: make 'availability_zone' available. (LP: #1190431) - * finalize handlers even on error (LP: #1203368) - * azure: fix password based access (LP: #1201969) - * add smartos (Joyent cloud) datasource - * support patching cloud-config via jsonp (LP: #1200476) - * debian/control: add dependency on python-jsonp - - -- Scott Moser <smoser@ubuntu.com> Wed, 24 Jul 2013 13:47:53 -0400 - -cloud-init (0.7.3~bzr829-0ubuntu1) saucy; urgency=low - - * New upstream snapshot. - * fix to upstart_job handler if version upstart is version 1.8. - * Azure datasource: allow userdata to be found in node named - 'UserData' or 'CustomData' - - -- Scott Moser <smoser@ubuntu.com> Thu, 11 Jul 2013 10:20:03 -0400 - -cloud-init (0.7.3~bzr826-0ubuntu2) saucy; urgency=low - - * debian/cloud-init.templates: add 'Azure' datasource to list - of available datasources. - - -- Scott Moser <smoser@ubuntu.com> Wed, 10 Jul 2013 16:31:48 -0400 - -cloud-init (0.7.3~bzr826-0ubuntu1) saucy; urgency=low - - * New upstream snapshot. - * Fix omnibus support (LP: #1182265) - * invoke 'initctl reload-configuration' on upstart jobs again - (LP: #1124384) - * Remove unowned files in /etc/apt/apt.conf.d/ after purge. (Closes #674237) - - -- Scott Moser <smoser@ubuntu.com> Wed, 10 Jul 2013 13:35:59 -0400 - -cloud-init (0.7.2-0ubuntu1) saucy; urgency=low - - * New upstream release. - * fix merging routines to be backwards compatible (LP: #1180867) - * fix for python 2.6 - - -- Scott Moser <smoser@ubuntu.com> Wed, 05 Jun 2013 11:12:46 -0400 - -cloud-init (0.7.2~bzr812-0ubuntu1) saucy; urgency=low - - * New upstream snapshot. - * catch up with upstream, which is hopefully 0.7.2 - * straighten out the merging routines - * fix a bug in Maas datasource - - -- Scott Moser <smoser@ubuntu.com> Fri, 10 May 2013 17:53:49 -0400 - -cloud-init (0.7.2~bzr809-0ubuntu1) raring; urgency=low - - * New upstream snapshot. - * make apt-get invoke 'dist-upgrade' rather than 'upgrade' for - package_upgrade. (LP: #1164147) - * workaround 2.6 kernel issue that stopped blkid from showing /dev/sr0 - - -- Scott Moser <smoser@ubuntu.com> Thu, 11 Apr 2013 12:55:51 -0400 - -cloud-init (0.7.2~bzr804-0ubuntu1) raring; urgency=low - - * New upstream snapshot. - * use python-requests rather than urllib2 for http (LP: #1067888) - * handle failure of resizefs better. Specifically, do not show - warnings or stack trace in lxc (LP: #1160462) - - -- Scott Moser <smoser@ubuntu.com> Wed, 27 Mar 2013 10:04:41 -0400 - -cloud-init (0.7.2~bzr795-0ubuntu1) raring; urgency=low - - * New upstream snapshot. - * documentation on write-files module (LP: #1111205) - * support for specifying package versions in package installs - * DataSourceNoCloud: allow specifyin user-data and meta-data in - the datasource config (LP: #1115833) - * work around bug in upstart for now (1124384) - * support resizing btrfs fileystems - * parse ssh keys more correctly (LP: #1136343) - * upstart/cloud-init-nonet.conf: handle sigterm gracefully (LP: #1015223) - * support growing partitions (LP: #1136936) - * use --force-unsafe-io for dpkg installations to improve speed - This is sane as it happens on instance initialization. - * more powerful and user-suppliable cloud-config merge mechanisms - (LP: #1023179) - - -- Scott Moser <smoser@ubuntu.com> Thu, 07 Mar 2013 17:33:59 -0500 - -cloud-init (0.7.1-0ubuntu5) raring; urgency=low - - * catchup-751-760.patch (sync to 760) - * DataSourceConfigDrive: allow config-drive data to come from a - CD-ROM (LP: #1100545) - * Allow 'sr0' to be used as a source for mount config [Vlastimil Holer] - * do not log passwords provided via config (LP: #1096417) - * DataSourceCloudStack: Attempt to find the 'virtual router' as provided - from dhcp responses, rather than assuming it is the default route - (LP: #1089989) [Gerard Dethier] - in the CloudStack environment use virtual router rather than default route - * notify upstart after writing upstart jobs to support filesystems that - do not support inotify such as overlayfs (LP: #1080841) - * fix cloud-config 'lock_password' user setup (LP: #1096423) - * debian/README.source: minor improvements. - - -- Scott Moser <smoser@ubuntu.com> Fri, 18 Jan 2013 10:12:34 -0500 - -cloud-init (0.7.1-0ubuntu4) raring; urgency=low - - * cherry pick relevant patches from trunk up to revision 750 - * use short form of '--stderr' argument to logger for better cross - distro support (LP: #1083715) - * puppet: make installation configurable (LP: #1090205) - * chef: add omnibus installation method - * fix allowing cloud-config input via user-data to affect the - apt mirror selection (LP: #090482) - - -- Scott Moser <smoser@ubuntu.com> Mon, 17 Dec 2012 10:48:23 -0500 - -cloud-init (0.7.1-0ubuntu3) raring; urgency=low - - * cherry pick relevant patches from trunk up to revision 745 - * fix writing of sudoers when suders rule is a string rather than - an array (LP: #1079002) - * add trailing slash to sudoers files that are written - * fix resizefs module when 'noblock' was provided (LP: #1080985) - * make sure there is no blank line before cloud-init entry in - there are no blank lines in /etc/ca-certificates.conf (LP: #1077020) - - -- Scott Moser <smoser@ubuntu.com> Mon, 03 Dec 2012 21:45:48 -0500 - -cloud-init (0.7.1-0ubuntu2) raring; urgency=low - - * debian/watch: add watch file - * add 'ubuntu' user to sudoers (LP: #1080717) - * set 'ubuntu' user shell to bash - - -- Scott Moser <smoser@ubuntu.com> Mon, 19 Nov 2012 09:38:29 -0500 - -cloud-init (0.7.1-0ubuntu1) raring; urgency=low - - * New upstream release. - * landscape: install landscape-client package if not installed. - only take action if cloud-config is present (LP: #1066115) - * landscape: restart landscape after install or config (LP: #1070345) - * multipart/archive: do not fail on unknown headers in multipart - mime or cloud-archive config (LP: #1065116). - * tools/Z99-cloud-locale-test.sh: avoid warning when user's shell is - zsh (LP: #1073077) - * fix stack trace when unknown user-data input had unicode (LP: #1075756) - * split 'apt-update-upgrade' config module into 'apt-configure' and - 'package-update-upgrade-install'. The 'package-update-upgrade-install' - will be a cross distro module. - * fix bug where cloud-config from user-data could not affect system_info - settings (LP: #1076811) - * add yum_add_repo configuration module for adding additional yum repos - * fix public key importing with config-drive-v2 datasource (LP: #1077700) - * handle renaming and fixing up of marker names (LP: #1075980) - this relieves that burden from the distro/packaging. - * group config: fix how group members weren't being translated correctly - when the group: [member, member...] format was used (LP: #1077245) - * work around an issue with boto > 0.6.0 that lazy loaded the return from - get_instance_metadata(). This resulted in failure for cloud-init to - install ssh keys. (LP: #1068801) - * add power_state_change config module for shutting down stystem after - cloud-init finishes. (LP: #1064665) - - - -- Scott Moser <smoser@ubuntu.com> Wed, 14 Nov 2012 15:18:50 -0500 - -cloud-init (0.7.0-0ubuntu2) quantal; urgency=low - - * config-drive: copy metadata entry 'hostname' to 'local-hostname' to fix - config modules (set_hostname) will function as expected (LP: #1061964) - - -- Scott Moser <smoser@ubuntu.com> Fri, 05 Oct 2012 11:45:15 -0400 - -cloud-init (0.7.0-0ubuntu1) quantal; urgency=low - - * New upstream release. - * minor change to oauth header fix (LP: #978127). - * incorporation of 0.7.0 upstream release. - - -- Scott Moser <smoser@ubuntu.com> Mon, 01 Oct 2012 14:19:46 -0400 - -cloud-init (0.7.0~bzr677-0ubuntu1) quantal; urgency=low - - * add CloudStack to DataSources listed by dpkg-reconfigure (LP: #1002155) - * New upstream snapshot. - * 0440 permissions on /etc/sudoers.d files rather than 0644 - * get host ssh keys to the console (LP: #1055688) - * MAAS DataSource adjust timestamp in oauth header to one based on the - timestamp in the response of a 403. This accounts for a bad local - clock. (LP: #978127) - * re-start the salt daemon rather than start to ensure config changes - are taken. - * allow for python unicode types in yaml that is loaded. - * cleanup in how config modules get at users and groups. - - -- Scott Moser <smoser@ubuntu.com> Sun, 30 Sep 2012 14:29:04 -0400 - -cloud-init (0.7.0~bzr659-0ubuntu2) quantal; urgency=low - - * debian/cloud-init.templates: fix bad template file (LP: #1053239) - - -- Scott Moser <smoser@ubuntu.com> Thu, 20 Sep 2012 09:18:20 -0400 - -cloud-init (0.7.0~bzr659-0ubuntu1) quantal; urgency=low - - * New upstream snapshot. - * add signal handlers to more cleanly exit - * add logging fallback in case logging fails - * documentation fix for landscape config (LP: #1042764) - * do not write a comment in /etc/hostname (LP: #1052664) - * fix incorrect ubuntu mirrors for 'ports' arches [Robbie Basak] - * fix generation of hostname based on ip address in datasource - (LP: #1050962) [Andy Grimm] - * remove 'start networking' from cloud-init-nonet and replace it - with container specific fixes (LP: #1031065) - * fix landscape configuration so client will run (LP: #1042758) - * enable all available datasources (adding AltCloud and None) - * fix bug where user data scripts re-ran on upgrade from 10.04 versions - (LP: #1049146) - - -- Scott Moser <smoser@ubuntu.com> Wed, 19 Sep 2012 22:08:51 -0400 - -cloud-init (0.7.0~bzr644-0ubuntu1) quantal; urgency=low - - * New upstream snapshot. - * fix cloud-archives (LP: #1044594) - * fix set_passwords for usergroups as a list (LP: #1044553) - * fix 'failed to setup apt-pipelining' warning on install due to - old 0.6 style usage of cloud-init. - - -- Scott Moser <smoser@ubuntu.com> Sat, 01 Sep 2012 20:11:34 -0400 - -cloud-init (0.7.0~bzr642-0ubuntu1) quantal; urgency=low - - * New upstream snapshot. - * support using launch-index (ami-launch-index) (LP: #1023177) - * usergroup related fixes (LP: #1041384, #1044044, #1044508) - - -- Scott Moser <smoser@ubuntu.com> Fri, 31 Aug 2012 17:04:06 -0400 - -cloud-init (0.7.0~bzr639-0ubuntu1) quantal; urgency=low - - * New upstream snapshot. - * fix broken ssh_import_id, which would give stack trace - - -- Scott Moser <smoser@ubuntu.com> Tue, 28 Aug 2012 14:09:47 -0400 - -cloud-init (0.7.0~bzr637-0ubuntu1) quantal; urgency=low - - * New upstream snapshot. - * fix issue with public keys not being added to 'ubuntu' user - since the user was not created yet. (LP: #1042459) - * only search the top level domain 'instance-data' for the EC2 - metadata service, to avoid misconfiguration or unexpected results - by searching search entries in /etc/resolv.conf (LP: #1040200) - - -- Scott Moser <smoser@ubuntu.com> Mon, 27 Aug 2012 20:27:06 -0400 - -cloud-init (0.7.0~bzr634-0ubuntu1) quantal; urgency=low - - * New upstream snapshot. - * support for datasource from config-drive-v2 (LP: #1037567) - - -- Scott Moser <smoser@ubuntu.com> Fri, 24 Aug 2012 17:24:26 -0400 - -cloud-init (0.7.0~bzr633-0ubuntu1) quantal; urgency=low - - * New upstream snapshot. - * support creating users on boot. remove requirement for a 'ubuntu' - user to be previously present in image. (LP: #1028503) - * add experimental apt_reboot_if_required flag to reboot if necessary - after upgrade or package install (LP: #1038108) - * improve mirror selection for a distro: - * support arm mirrors (LP: #1028501) - * support seeding security mirror (LP: #1006963) - * support dns mirrors including availability-zone reference - (LP: #1037727) - * include a "None" datasource so items like ssh host key generation - occur if there is no other metadata service. (LP: #906669) - * print authorized_keys for users to the console (LP: #1010582) - * Add RHEVm and vSphere support as datasource AltCloud [Joseph VLcek] - - -- Scott Moser <smoser@ubuntu.com> Thu, 23 Aug 2012 01:06:34 -0400 - -cloud-init (0.7.0~bzr614-0ubuntu1) quantal; urgency=low - - * New upstream snapshot. - * disable searching for 'ubuntu-mirror' in local dns to find a - local mirror (LP: #974509) - * emit the cloud-config event (LP: #1028674) - * write timestamps to console on reboot and shutdown (LP: #1018554) - - -- Scott Moser <smoser@ubuntu.com> Fri, 03 Aug 2012 14:55:37 -0400 - -cloud-init (0.7.0~bzr608-0ubuntu1) quantal; urgency=low - - * New upstream snapshot. - * fix issue with EC2 datasource that prevented /mnt from being mounted. - - -- Scott Moser <smoser@ubuntu.com> Mon, 16 Jul 2012 16:49:55 -0400 - -cloud-init (0.7.0~bzr604-0ubuntu1) quantal; urgency=low - - * New upstream snapshot. - * add cc_write_file for injecting files via cloud-config (LP: #1012854) - * fix issue with empty user data - * remove some un-needed warnings to console output in DataSourceOVF - * allow user-data scripts output through to the console - - -- Scott Moser <smoser@ubuntu.com> Thu, 12 Jul 2012 16:11:01 -0400 - -cloud-init (0.7.0~bzr583-0ubuntu1) quantal; urgency=low - - * New upstream snapshot. - * debian/control: wrap-and-sort - * debian/control: actually depend on software-properties-common - * debian/control: depend on python-cheetah again instead of python-tempita - - -- Scott Moser <smoser@ubuntu.com> Mon, 09 Jul 2012 17:41:22 -0400 - -cloud-init (0.7.0~bzr564-0ubuntu2) quantal; urgency=low - - * debian/control: Build-Depends on python-setuptools (LP: #1022101) - - -- Angel Abad <angelabad@ubuntu.com> Sat, 07 Jul 2012 18:43:05 +0200 - -cloud-init (0.7.0~bzr564-0ubuntu1) quantal; urgency=low - - * New upstream snapshot. - Thanks to Joshua Harlow for hard work. - * depend on software-properties-common rather than - python-software-properties (LP: #1021418) - - -- Scott Moser <smoser@ubuntu.com> Fri, 06 Jul 2012 17:31:01 -0400 - -cloud-init (0.6.3-0ubuntu3) quantal; urgency=low - - * grub-legacy-ec2: add missing dependency on ucf (LP: #960336). - - -- Robie Basak <robie.basak@ubuntu.com> Sun, 24 Jun 2012 05:10:13 +0100 - -cloud-init (0.6.3-0ubuntu2) quantal; urgency=high - - * Added -generic to Xen kernels list since -virtual has been dropped with - Quantal. (LP: #1005551) - - -- Ben Howard <ben.howard@ubuntu.com> Tue, 29 May 2012 12:59:01 -0600 - -cloud-init (0.6.3-0ubuntu1) precise; urgency=low - - * New upstream release. - * improve chef examples for working configurations on 11.10 and 12.04 - [Lorin Hochstein] (LP: #960564) - * fix bug in landscape module if /etc/landscape did not exist - (LP: #978329) - - -- Scott Moser <smoser@ubuntu.com> Wed, 11 Apr 2012 00:05:00 -0400 - -cloud-init (0.6.3~bzr554-0ubuntu1) precise; urgency=low - - * New upstream snapshot. - * Fix bug in Chef support that required 'validation_cert' (LP: #960547) - * Provide user-friendly message when a user ssh's in with an invalid - locale (LP: #960547) - * Support reading a url reference to cloud-config from the kernel - command line. - - -- Scott Moser <smoser@ubuntu.com> Thu, 05 Apr 2012 01:24:42 -0400 - -cloud-init (0.6.3~bzr551-0ubuntu1) precise; urgency=low - - * New upstream snapshot. - * support running resize2fs in the background (default=off) (LP: #961226) - - -- Scott Moser <smoser@ubuntu.com> Thu, 22 Mar 2012 14:33:59 -0400 - -cloud-init (0.6.3~bzr548-0ubuntu1) precise; urgency=low - - * New upstream snapshot. - * If public-keys is a string, split it into multiple keys on newline - This specifically helps the MAAS data source, and should not negatively - affect others. - - -- Scott Moser <smoser@ubuntu.com> Mon, 19 Mar 2012 13:50:50 -0400 - -cloud-init (0.6.3~bzr547-0ubuntu1) precise; urgency=low - - * New upstream snapshot. - * rename DataSourceMaaS to DataSourceMAAS. - * support public-keys in DataSourceMAAS - * Warn in user-data processing on non-multipart, non-handled data - * CloudStack data source added (not enabled by default) - * fix bug in cloud-init.postinst where the name used was wrong - causing config-apt-pipelining to run more than intended - - -- Scott Moser <smoser@ubuntu.com> Fri, 16 Mar 2012 14:12:38 -0400 - -cloud-init (0.6.3~bzr539-0ubuntu3) precise; urgency=low - - * make maas config file only readable by root (LP: #954721) - - -- Scott Moser <smoser@ubuntu.com> Wed, 14 Mar 2012 01:19:32 -0400 - -cloud-init (0.6.3~bzr539-0ubuntu2) precise; urgency=low - - [Cosmin Luta] - * add dependency on python-oauth (LP: #953915) - - -- Scott Moser <smoser@ubuntu.com> Tue, 13 Mar 2012 11:36:11 -0400 - -cloud-init (0.6.3~bzr539-0ubuntu1) precise; urgency=low - - * New upstream snapshot. - * add ability to configure Acquire::http::Pipeline-Depth via - cloud-config setting 'apt_pipelining' (LP: #942061) - * if cloud-config settings removed default certificats - (remove-defaults), then seed package ca-certificates to not - install new ones on upgrade. - * run-parts now uses internal implementation rather than - separate command. - * add MaaS datasource (LP: #942061) - * debian/cloud-init.postinst: address population of apt_pipeline - setting on installation. - * debian/cloud-init.postinst: support configuring cloud-init - maas datasource via preseed values cloud-init/maas-metadata-url and - cloud-init/maas-credentials. (LP: #942061) - * debian/cloud-init.postinst: support for (LP: #924375) - - -- Scott Moser <smoser@ubuntu.com> Fri, 09 Mar 2012 16:37:01 -0500 - -cloud-init (0.6.3~bzr530-0ubuntu1) precise; urgency=low - - * New upstream snapshot. - - fix DataSourceNoCloud seeded from local or cmdline (LP: #942695) - - change 'islxc' to 'iscontainer' and use 'running-in-container' utility - from upstart rather than 'lxc-is-container' (LP: #941955) - - Do not fail on bad part handlers, instead catch error and log - - -- Scott Moser <smoser@ubuntu.com> Tue, 28 Feb 2012 19:15:19 -0500 - -cloud-init (0.6.3~bzr527-0ubuntu1) precise; urgency=low - - * New upstream snapshot. - - exit 0 in cloud-init if no metadata is found (nothing to do) - - documentation improvements - - support network config in DataSourceNoCloud - - -- Scott Moser <smoser@ubuntu.com> Fri, 17 Feb 2012 17:11:50 -0500 - -cloud-init (0.6.3~bzr519-0ubuntu1) precise; urgency=low - - * New upstream snapshot. - - [Mike Milner] add support for managing CA Certificates (LP: #915232) - - in ci-info lines, use '.' to for empty field for easier machine reading - - support empty lines in "#include" files (LP: #923043) - - [Jef Baeur] support configuration of salt minions Bauer) (LP: #927795) - - DataSourceOVF: only search for OVF data on ISO9660 filesystems (LP: #898373) - - DataSourceConfigDrive: support getting data from openstack config drive - (LP: #857378) - - [Juerg Haefliger] formating and pylint cleanups - * increase timeouts for initial config check for metadata service - to address slow metadata service in openstack - * add awareness of ConfigDrive data source - - -- Scott Moser <smoser@ubuntu.com> Thu, 16 Feb 2012 17:27:05 -0500 - -cloud-init (0.6.3~bzr502-0ubuntu1) precise; urgency=low - - * New upstream snapshot. - - [Mike Milner] add test case framework (LP: #890851) - - [Juerg Haefliger] fix pylint warnings (LP: #914739) - - fix regression where ec2 mirrors were not selected (LP: #915282) - - -- Scott Moser <smoser@ubuntu.com> Thu, 12 Jan 2012 17:56:52 +0100 - -cloud-init (0.6.3~bzr497-0ubuntu1) precise; urgency=low - - * New upstream snapshot. - - cloud-config support for configuring apt-proxy - - selection of local mirror based on presense of 'ubuntu-mirror' dns - entry in local domain. (LP: #897688) - - DataSourceEc2: more resilliant to slow metadata service (LP: #894279) - - close stdin in all programs launched by cloud-init (LP: #903993) - - revert management of /etc/hosts to 0.6.1 style (LP: #890501, LP: #871966) - - write full ssh keys to console for easy machine consumption (LP: #893400) - - put INSTANCE_ID environment variable in bootcmd scripts - - add 'cloud-init-per' script for easily running things with a given freq - (this replaced cloud-init-run-module) - - support configuration of landscape-client via cloud-config (LP: #857366) - - part-handlers now get base64 decoded content rather than 2xbase64 encoded - in the payload parameter. (LP: #874342) - - -- Scott Moser <smoser@ubuntu.com> Thu, 22 Dec 2011 04:07:38 -0500 - -cloud-init (0.6.2-0ubuntu2) precise; urgency=low - - * Build using dh_python2. LP: #904248. - * debian/rules: Explicitly set DEB_PYTHON2_MODULE_PACKAGES = cloud-init. - - -- Matthias Klose <doko@ubuntu.com> Sat, 17 Dec 2011 21:08:23 +0000 - -cloud-init (0.6.2-0ubuntu1) precise; urgency=low - - * New upstream release - - -- Scott Moser <smoser@ubuntu.com> Thu, 27 Oct 2011 23:05:15 -0400 - -cloud-init (0.6.1-0ubuntu20) oneiric; urgency=low - - * fix broken path if local-hostname was not in metadata (LP: #857380) - * redirect output of 'start networking' in 'cloud-init-nonet' to /dev/null - * include GPLv3 in source tree - - -- Scott Moser <smoser@ubuntu.com> Fri, 23 Sep 2011 09:24:27 -0400 - -cloud-init (0.6.1-0ubuntu19) oneiric; urgency=low - - * If local-hostname is not in meta-data, attempt to look up hostname in - an alias in /etc/hosts. This will avoid setting domain portion of fqdn - to 'localdomain' in some cases (LP: #850206). - - -- Scott Moser <smoser@ubuntu.com> Wed, 14 Sep 2011 15:15:00 -0400 - -cloud-init (0.6.1-0ubuntu18) oneiric; urgency=low - - * minor documentation improvement. - - [Mike Moulton, Avishai Ish-Shalom] - * Chef support fixes. support for environment and initial attr (LP: #845208) - - -- Scott Moser <smoser@ubuntu.com> Tue, 13 Sep 2011 17:02:48 -0400 - -cloud-init (0.6.1-0ubuntu17) oneiric; urgency=low - - * fix issues with chef (LP: #845161) - * be more forgiving on metadata for public-keys (LP: #845155) - - -- Scott Moser <smoser@ubuntu.com> Fri, 09 Sep 2011 14:19:03 -0700 - -cloud-init (0.6.1-0ubuntu16) oneiric; urgency=low - - * catch up with trunk at revision 439 - * warn on failure to set hostname (LP: #832175) - * properly wait for all static interfaces to be up before - cloud-init runs (depends on fix in LP:# 838968). - * in DataSources NoCloud and OVF, do not set hostname to the - static value 'ubuntuhost' if local-hostname is not in metadata - (LP: #838280) - * improve the way ssh_authorized_keys is updated, so that the - values given will be used. (LP: #434076, LP: #833499) - * cloud-init-notnet.conf: minor changes to config - - -- Scott Moser <smoser@ubuntu.com> Thu, 01 Sep 2011 21:14:09 -0400 - -cloud-init (0.6.1-0ubuntu15) oneiric; urgency=low - - * catch up with trunk at revision 431 - * add network debug info to console when cloud-init runs (LP: #828186) - * fix issue where subprocesses (apt-add-repository) where given - the console and would attempt to prompt user and hang boot (LP: #831505) - * add awareness of ecdsa to cc_ssh - - -- Scott Moser <smoser@ubuntu.com> Tue, 23 Aug 2011 00:01:01 -0400 - -cloud-init (0.6.1-0ubuntu14) oneiric; urgency=low - - * change the handling of user-data (LP: #810044) - * boothooks will now run more than once as they were intended - * cloud-config and user-scripts will be updated from user data every boot - * Add a second type of part-handler that will be called on every boot - * fix bad handling of /etc/hosts if manage_etc_hosts was false - - -- Scott Moser <smoser@ubuntu.com> Mon, 08 Aug 2011 12:46:56 -0500 - -cloud-init (0.6.1-0ubuntu13) oneiric; urgency=low - - * do not install 92-uec-upgrade-available as a motd hook. This file was - installed but did not do anything since updates-check was removed. - * support multiple staticly configured network devices, as long as - all of them come up early (LP: #810044) - - [Marc Cluet] - * add support for passing mcollective keys via cloud-config - * add support for 'include-once' type. include-once urls are only - retrieved once-per-instance rather than on every boot. - - -- Scott Moser <smoser@ubuntu.com> Mon, 01 Aug 2011 16:45:40 -0400 - -cloud-init (0.6.1-0ubuntu12) oneiric; urgency=low - - * do not give trace on failure to resize in lxc container (LP: #800856) - * increase the timeout on url gets for "seedfrom" values (LP: #812646) - * do not write entries for ephemeral0 on t1.micro (LP: #744019) - - [Adam Gandalman] - * improve the updating of /etc/hosts with correct fqdn when possible - (LP: #812539) - - [Avishai Ish-Shalom] - * add chef support (cloudinit/CloudConfig/cc_chef.py) (LP: #798844) - - -- Scott Moser <smoser@ubuntu.com> Thu, 21 Jul 2011 05:51:03 -0400 - -cloud-init (0.6.1-0ubuntu11) oneiric; urgency=low - - [Marc Cluet] - * sanitize hosts file for system's hostname to 127.0.1.1 (LP: #802637) - - -- Scott Moser <smoser@ubuntu.com> Thu, 30 Jun 2011 14:12:47 -0400 - -cloud-init (0.6.1-0ubuntu10) oneiric; urgency=low - - * sync with trunk (rev 405) - * fix cloud-init in ubuntu lxc containers (LP: #800824) - - -- Scott Moser <smoser@ubuntu.com> Tue, 28 Jun 2011 06:42:45 -0400 - -cloud-init (0.6.1-0ubuntu9) oneiric; urgency=low - - * sync with trunk (rev 404) - * make metadata urls configurable, to support eucalyptus in - STATIC or SYSTEM modes (LP: #761847) - * support disabling byobu in cloud-config (LP: #797336) - * guarantee that ssh_config runs before sshd starts (LP: #781101) - * make prefix for keys added to /root/.ssh/authorized_keys configurable - and add 'no-port-forwarding,no-agent-forwarding,no-X11-forwarding' - to the default (LP: #798505) - * make 'cloud-config ready' command configurable (LP: #785551) - * make fstab fields used to 'fill in' shorthand mount entries configurable - (LP: #785542) - * read sshd_config to properly get path for authorized_keys (LP: #731849) - - -- Scott Moser <smoser@ubuntu.com> Fri, 17 Jun 2011 12:18:34 -0400 - -cloud-init (0.6.1-0ubuntu8) natty; urgency=low - - * instead of including /boot/grub, create it in postinst - of grub-legacy-ec2. - - -- Scott Moser <smoser@ubuntu.com> Fri, 15 Apr 2011 13:01:17 -0400 - -cloud-init (0.6.1-0ubuntu7) natty; urgency=low - - * grub-legacy-ec2: add /boot/grub directory so installation does - not depend on it already existing (LP: #759885) - - -- Scott Moser <smoser@ubuntu.com> Wed, 13 Apr 2011 11:03:04 -0400 - -cloud-init (0.6.1-0ubuntu6) natty; urgency=low - - * avoid upgrade prompt for grub-pc when devices are named xvdX (LP: #752361) - * catchup to trunk cloud-init (rev 395) - - -- Scott Moser <smoser@ubuntu.com> Wed, 06 Apr 2011 06:46:55 -0400 - -cloud-init (0.6.1-0ubuntu5) natty; urgency=low - - * fix --purge of grub-legacy-ec2 package (LP: #749444) - * catchup to trunk cloud-init (rev 394) - * support user-data formated in dos format by converting to unix for - user-scripts, boothooks, and upstart jobs (LP: #744965) - * removal of some debug code, minor documentation fix - - -- Scott Moser <smoser@ubuntu.com> Mon, 04 Apr 2011 13:20:27 -0400 - -cloud-init (0.6.1-0ubuntu4) natty; urgency=low - - * catch up to trunk cloud-init (rev 389). - * fix bug in part-handler code, that broke part handlers (LP: #739694) - * fix sporadic resizefs failure (LP: #726938) - - -- Scott Moser <smoser@ubuntu.com> Mon, 21 Mar 2011 22:06:59 -0400 - -cloud-init (0.6.1-0ubuntu3) natty; urgency=low - - * catch up to trunk cloud-init (rev 385). - * attempt to install packages on failed apt-get update (LP: #728167) - * enabled timezone and mcollective cloud-config plugins - - -- Scott Moser <smoser@ubuntu.com> Fri, 04 Mar 2011 21:17:21 -0500 - -cloud-init (0.6.1-0ubuntu2) natty; urgency=low - - * grub-legacy-ec2: Use dpkg-query --control-path instead of hard-coding a - path to debconf templates file, for compatibility with multiarch. - - -- Scott Moser <smoser@ubuntu.com> Tue, 01 Mar 2011 23:23:55 -0500 - -cloud-init (0.6.1-0ubuntu1) natty; urgency=low - - * New upstream release. - * fix for puppet configuration options (LP: #709946) [Ryan Lane] - * fix pickling of DataSource, which broke seeding. - * turn resize_rootfs default to True - * avoid mounts in DataSourceOVF if 'read' on device fails - 'mount /dev/sr0' for an empty virtual cdrom device was taking 18 seconds - * add 'manual_cache_clean' option to select manual cleaning of - the /var/lib/cloud/instance/ link, for a data source that might - not be present on every boot - * make DataSourceEc2 retries and timeout configurable - * add 'bootcmd' like 'runcmd' to cloud-config syntax for running things early - * move from '#opt_include' in config file format to conf_d. - now local config files should live in /etc/cloud/cloud.cfg.d/ - * move /etc/cloud/distro.cfg to /etc/cloud/cloud.cfg.d/90_dpkg.cfg - * allow /etc/hosts to be written from hosts.tmpl. which allows - getting local-hostname into /etc/hosts (LP: #720440) - * better handle startup if there is no eth0 (LP: #714807) - * update rather than append in puppet config [Marc Cluet] - * add cloud-config for mcollective [Marc Cluet] - - -- Scott Moser <smoser@ubuntu.com> Sat, 19 Feb 2011 01:16:10 -0500 - -cloud-init (0.6.0-0ubuntu4) natty; urgency=low - - * fix running of user scripts (LP: #711480) (cherry pick 344) - * fix 2 lintian warnings - - -- Scott Moser <smoser@ubuntu.com> Tue, 01 Feb 2011 16:15:30 -0500 - -cloud-init (0.6.0-0ubuntu3) natty; urgency=low - - * make a better attempt at deciding if DataSourceEc2 should be used - on first install or upgrade - * fix behavior if def_log_file is empty in cloud-config (cherry pick 333) - * improve comment strings in rsyslog config (cherry pick 334) - * do not package cloud-init query (cherry pick 335) - * add previous-instance-id and previous-datasource to cloud/data - and cloud/instance/datasource files (cherry pick 337) - * allow setting of passwords and enabling/disabling ssh password auth - via cloud-config (cherry pick 338) - - -- Scott Moser <smoser@ubuntu.com> Mon, 31 Jan 2011 12:48:39 -0500 - -cloud-init (0.6.0-0ubuntu2) natty; urgency=low - - * add a debian/README.source file - * fix bug in fixing permission on /var/log/cloud-init.log (cherry pick) - * remove dependency on update-motd as updates-check was removed - * fix failure on cloud-init package purge - * add configuration of DataSources via debconf. Default to not searching - Ec2. (LP: #635188) - * fix naming of pre-processed (now user-data.txt.i) (cherry pick) - * upgrade existing content in /var/lib/cloud to 0.6.x format - - -- Scott Moser <smoser@ubuntu.com> Thu, 27 Jan 2011 16:32:44 -0500 - -cloud-init (0.6.0-0ubuntu1) natty; urgency=low - - * New upstream release. - * fix permissions on cloud-init.log so syslog can write to it (LP: ##704509) - * rework of /var/lib/cloud layout - * remove updates-check (LP: #653220) - * support resizing root partition on first boot (enabled by default) - * added cloud-config options for setting hostname, phone_home - * indicate "all the way up" with message to console and file creation - in /var/lib/cloud/instance/ (LP: #653271) - * write ssh keys to console late in boot to ensure they're in console buffer - * add support for redirecting output of cloud-init, cloud-config, - cloud-final via the config file, or user data config file - * add support for posting data about the instance to a url (phone_home) - * add minimal OVF transport (iso) support - * make DataSources that are attempted dynamic and configurable from - config. config option 'cloud_type' replaced by 'datasource_list' - * add 'timezone' option to cloud-config (LP: #645458) - * Added an additional archive format, that can be used for multi-part - input to cloud-init. This may be more user friendly then mime-multipart - (LP: #641504) - * add support for reading Rightscale style user data (LP: #668400) - * make the message on 'disable_root' more clear (LP: #672417) - * do not require public key if private is given in ssh cloud-config - (LP: #648905) - - -- Scott Moser <smoser@ubuntu.com> Wed, 26 Jan 2011 17:28:36 -0500 - -cloud-init (0.5.15-0ubuntu4) natty; urgency=low - - * Rebuild with python 2.7 as the python default. - - -- Matthias Klose <doko@ubuntu.com> Wed, 08 Dec 2010 15:01:36 +0000 - -cloud-init (0.5.15-0ubuntu3) maverick; urgency=low - - * do not use ec2 ubuntu archive if instance is VPC (LP: #615545) - - -- Scott Moser <smoser@ubuntu.com> Thu, 16 Sep 2010 04:28:55 -0400 - -cloud-init (0.5.15-0ubuntu2) maverick; urgency=low - - * grub-legacy-ec2: boot with console=hvc0 (LP: #606373) - - -- Scott Moser <smoser@ubuntu.com> Wed, 15 Sep 2010 16:41:48 -0400 - -cloud-init (0.5.15-0ubuntu1) maverick; urgency=low - - * New upstream release. - * fix /etc/fstab cloudconfig entries for t1.micro and - change default fstab values for ephemeral0 to nobootwait (LP: #634102) - * grub-legacy-ec2: do not write chainload for grub2 to menu.lst - (LP: #627451) - * seed grub-pc correctly so update-grub runs on ec2 or uec(LP: #623609) - - -- Scott Moser <smoser@ubuntu.com> Sun, 12 Sep 2010 15:23:39 -0400 - -cloud-init (0.5.14-0ubuntu5) maverick; urgency=low - - * add missing imports for cc_puppet (LP: #632744) - * append to apt_sources files rather than truncating (LP: #627597) - * get double commented lines into sources.list (LP: #627439) - - -- Scott Moser <smoser@ubuntu.com> Wed, 08 Sep 2010 10:31:58 -0400 - -cloud-init (0.5.14-0ubuntu4) maverick; urgency=low - - * add commented out entries for partner, backports, and multiverse - (LP: #620572) - - -- Scott Moser <smoser@ubuntu.com> Thu, 26 Aug 2010 16:44:48 -0400 - -cloud-init (0.5.14-0ubuntu3) maverick; urgency=low - - * fix syntax error in cloudinit/util.py (failed installation) - - -- Scott Moser <smoser@ubuntu.com> Tue, 17 Aug 2010 22:22:06 -0400 - -cloud-init (0.5.14-0ubuntu2) maverick; urgency=low - - * fix bug preventing 'seedfrom' from working (LP:617400) - - -- Scott Moser <smoser@ubuntu.com> Tue, 17 Aug 2010 15:49:13 -0400 - -cloud-init (0.5.14-0ubuntu1) maverick; urgency=low - - * New upstream release. - - support for reading metadata and userdata from filesystem - - support for boot without metadata at all - - -- Scott Moser <smoser@ubuntu.com> Thu, 12 Aug 2010 14:45:28 -0400 - -cloud-init (0.5.13-0ubuntu3) maverick; urgency=low - - * grub-legacy-ec2: fix 'apt-get --reinstall' and dpkg-divert (LP: #611812) - * enable -virtual kernels as "xen" kernels (pv_ops now functional) - * fix bad syntax in cloud-init-run-module.py - - -- Scott Moser <smoser@ubuntu.com> Mon, 02 Aug 2010 16:26:48 -0400 - -cloud-init (0.5.13-0ubuntu2) maverick; urgency=low - - * debian/control: drop ssh-import as a recommends, as this has been - subsumed by openssh-server - - -- Dustin Kirkland <kirkland@ubuntu.com> Sat, 24 Jul 2010 21:02:40 +0200 - -cloud-init (0.5.13-0ubuntu1) maverick; urgency=low - - * New upstream release. - * invoke dpkg with --force-confold (LP: #607642) - - -- Scott Moser <smoser@ubuntu.com> Wed, 21 Jul 2010 11:58:53 -0400 - -cloud-init (0.5.12-0ubuntu8) maverick; urgency=low - - * update-grub-legacy-ec2: - - add code to stop use of 'uuid' grub syntax - - change fallback grub device from (hd0,0) to (hd0) - - change timeout in menu.lst to 0 - * grub-legacy-ec2: add grub-set-default functionality (LP: #605961) - - -- Scott Moser <smoser@ubuntu.com> Thu, 15 Jul 2010 13:07:01 -0400 - -cloud-init (0.5.12-0ubuntu7) maverick; urgency=low - - * update-grub-legacy-ec2: - - force setting of indomU so output of build process has - a menu.lst with -ec2 kernels listed. - - remove 'quite splash' from kernel options - - make sure grub_root_device is set to hd0 in image build - - -- Scott Moser <smoser@ubuntu.com> Tue, 13 Jul 2010 16:33:51 -0400 - -cloud-init (0.5.12-0ubuntu6) maverick; urgency=low - - * fix installation error - * add quilt-setup rule for package development - - -- Scott Moser <smoser@ubuntu.com> Tue, 13 Jul 2010 12:04:21 -0400 - -cloud-init (0.5.12-0ubuntu5) maverick; urgency=low - - * sync with upstream r226. - * fix bug where nfs/network mounts could not be specified (LP: #603329) - * manage hostname setting better (LP: #596993) - * add legacy-grub-ec2 package. - - -- Scott Moser <smoser@ubuntu.com> Thu, 08 Jul 2010 22:24:59 -0400 - -cloud-init (0.5.12-0ubuntu4) maverick; urgency=low - - * handle hostname managing better with ebs root. (LP: #596993) - - -- Scott Moser <smoser@ubuntu.com> Wed, 07 Jul 2010 11:54:10 -0400 - -cloud-init (0.5.12-0ubuntu3) maverick; urgency=low - - * fix cloud-boothook input type (LP: #600799) - * sync with upstream. - - -- Scott Moser <smoser@ubuntu.com> Thu, 01 Jul 2010 21:19:13 -0400 - -cloud-init (0.5.12-0ubuntu2) maverick; urgency=low - - * fix cloud config 'apt-update-upgrade' failure due to missing import - - -- Scott Moser <smoser@ubuntu.com> Mon, 21 Jun 2010 15:08:32 -0400 - -cloud-init (0.5.12-0ubuntu1) maverick; urgency=low - - * New upstream release. - * fix cloud-init-run-module to allow 'always' (LP: #568139) - * add support for setting debconf selections - * add cloud-config support for debconf selections (LP: #582667), - byobu enablement, and ssh-import-lp-id - - -- Scott Moser <smoser@ubuntu.com> Fri, 18 Jun 2010 15:48:14 -0400 - -cloud-init (0.5.11-0ubuntu2) maverick; urgency=low - - * handle renaming sem/markers for config items to avoid - running per-instance again after package upgrade - * pull 'config-' prefix on sem/ items for cloud-config modules from - 0.5.12 - - -- Scott Moser <smoser@ubuntu.com> Fri, 18 Jun 2010 12:52:10 -0400 - -cloud-init (0.5.11-0ubuntu1) maverick; urgency=low - - * New upstream release. - * remove ec2-get-info. It is replaced by cloudutils ec2metadata - * use python logging - * reduce number of upstart jobs - * add "boothook" user data type - * Switch to dpkg-source 3.0 (quilt) format - - -- Scott Moser <smoser@ubuntu.com> Fri, 18 Jun 2010 01:04:58 -0400 - -cloud-init (0.5.10-0ubuntu1) lucid; urgency=low - - * New upstream release. - * ec2-get-info: fix for python traceback - * ephemeral mount will show up in /etc/mtab or df on first boot - LP: #527825 - - -- Scott Moser <smoser@ubuntu.com> Fri, 26 Mar 2010 00:57:28 -0400 - -cloud-init (0.5.9-0ubuntu1) lucid; urgency=low - - * New upstream release. - * rename apt list files. 'ubuntu-bug' now works without update (LP: #513060) - * replace 'cloudconfig' entries in fstab rather than appending (LP: #524562) - * fix to fstab writing on ebs-root instances - - -- Scott Moser <smoser@ubuntu.com> Mon, 08 Mar 2010 13:07:02 -0500 - -cloud-init (0.5.8-0ubuntu1) lucid; urgency=low - - * New upstream release. - * cache data from metadata service, LP: #527364 - * fix format of cron entry in cron.d/cloudinit-updates - * package egg-info file - - -- Scott Moser <smoser@ubuntu.com> Tue, 02 Mar 2010 15:48:04 -0500 - -cloud-init (0.5.7-0ubuntu4) lucid; urgency=low - - * fix empty package previous package was emptpy for cloud-init - due to adding the ec2-init package - - -- Scott Moser <smoser@ubuntu.com> Fri, 26 Feb 2010 17:06:05 -0500 - -cloud-init (0.5.7-0ubuntu3) lucid; urgency=low - - * debian/control: - - recommend ssh-import, such that the ssh-import-lp-id - utility is available in UEC images for convenient importing of - ssh public keys stored in Launchpad, LP: #524101 - - build a transitional ec2-init package to handle the rename - gracefully on upgrades, LP: #527187 - - -- Dustin Kirkland <kirkland@ubuntu.com> Thu, 25 Feb 2010 16:22:10 -0600 - -cloud-init (0.5.7-0ubuntu2) lucid; urgency=low - - * fix packaging but that put the message-of-the-day hook file - into a subdir of etc/update-motd.d, remove old file (LP: #524999) - - -- Scott Moser <smoser@ubuntu.com> Fri, 19 Feb 2010 21:02:10 -0500 - -cloud-init (0.5.7-0ubuntu1) lucid; urgency=low - - * New upstream release. - * run cloud-init early in boot process (LP: #504883, #524516) - - -- Scott Moser <smoser@ubuntu.com> Fri, 19 Feb 2010 18:27:45 -0500 - -cloud-init (0.5.6-0ubuntu1) lucid; urgency=low - - * New upstream release. - * supports 'runcmd' in cloud-config - * enable the update check code (LP: #524258) - * fix retry_url in boto_utils.py when metadata service not around - (LP: #523832) - * run cloud-config-puppet.conf later (LP: #523625) - [ Scott Moser 0.5.5 ] - * New upstream release, supports checking for updates - - -- Scott Moser <smoser@ubuntu.com> Fri, 19 Feb 2010 03:13:22 -0500 - -cloud-init (0.5.4-0ubuntu1) lucid; urgency=low - - * New upstream release. - * fix broken user-data scripts - * merge mathiaz work for cloud-config-puppet - * fix bug causing apt update to fail - * rename EC2Init class to CloudInit - * only set hostname once per instance. (LP: #514492) - - -- Scott Moser <smoser@ubuntu.com> Wed, 17 Feb 2010 09:40:30 -0500 - -cloud-init (0.5.3-0ubuntu2) lucid; urgency=low - - * divert ureadahead.conf in postinst (LP: #499520) - * lintian cleanups - - -- Scott Moser <smoser@ubuntu.com> Fri, 05 Feb 2010 15:48:21 -0500 - -cloud-init (0.5.3-0ubuntu1) lucid; urgency=low - - * Rename ec2-init to cloud-init. New upstream release. - * set hostname to ip-u.x.y.z if local-hostname provides a ip addr - (LP: #475354) - - -- Scott Moser <smoser@ubuntu.com> Thu, 04 Feb 2010 03:00:05 -0500 - -ec2-init (0.5.2-0ubuntu1) lucid; urgency=low - - * new upstream release - - -- Scott Moser <smoser@ubuntu.com> Fri, 29 Jan 2010 13:30:52 -0500 - -ec2-init (0.5.1-0ubuntu1) lucid; urgency=low - - * new upstream release - - -- Scott Moser <smoser@ubuntu.com> Fri, 22 Jan 2010 16:19:30 -0500 - -ec2-init (0.5.0-0ubuntu4) lucid; urgency=low - - * add an upstart job to get ssh keys regenerated and written - to console (LP: #506599, LP: #507070) - - -- Scott Moser <smoser@ubuntu.com> Thu, 14 Jan 2010 13:10:55 -0500 - -ec2-init (0.5.0-0ubuntu3) lucid; urgency=low - - * work around difference in uec/ec2 metadata service (LP:506332) - - -- Scott Moser <smoser@ubuntu.com> Tue, 12 Jan 2010 11:33:11 -0500 - -ec2-init (0.5.0-0ubuntu2) lucid; urgency=low - - * pull changes from devel branch to get functional on ec2 - - -- Scott Moser <smoser@ubuntu.com> Mon, 11 Jan 2010 12:03:45 -0500 - -ec2-init (0.5.0-0ubuntu2) lucid; urgency=low - - * new upstream release - - -- Scott Moser <smoser@ubuntu.com> Thu, 07 Jan 2010 22:00:38 -0500 - -ec2-init (0.4.999-0ubuntu8) lucid; urgency=low - - * fix mirror selection for us-west-1 (LP: #494185) - - -- Scott Moser <smoser@ubuntu.com> Fri, 11 Dec 2009 15:12:19 -0500 - -ec2-init (0.4.999-0ubuntu7) karmic; urgency=low - - * work around differences in eucalyptus ephemeral mounts (LP: #458850) - * get 'ec2:' prefix on ssh public key fingerprint (LP: #458576) - - -- Scott Moser <smoser@ubuntu.com> Mon, 26 Oct 2009 16:18:06 -0400 - -ec2-init (0.4.999-0ubuntu6) karmic; urgency=low - - * make sources.list components for 'karmic-security' the same as - 'karmic' and 'karmic-updates' (main, restricted) (LP: #457866) - - -- Scott Moser <smoser@ubuntu.com> Thu, 22 Oct 2009 08:55:58 -0400 - -ec2-init (0.4.999-0ubuntu5) karmic; urgency=low - - * write regenerate_ssh_host_keys output directly to /dev/console - to ensure that it gets there. (LP: #451881) - - -- Scott Moser <smoser@ubuntu.com> Wed, 21 Oct 2009 17:23:38 -0400 - -ec2-init (0.4.999-0ubuntu4) karmic; urgency=low - - * set locale to en_US.UTF-8 if get_location_from_availability_zone - doesn't have a match (LP: #407949) - - -- Scott Moser <smoser@ubuntu.com> Tue, 20 Oct 2009 09:57:49 -0400 - -ec2-init (0.4.999-0ubuntu3) karmic; urgency=low - - * split running of user-data out of ec2-init into ec2-init-user-data - run this at S99. (LP : #431255) - - -- Scott Moser <smoser@ubuntu.com> Fri, 25 Sep 2009 14:17:17 -0400 - -ec2-init (0.4.999-0ubuntu2) karmic; urgency=low - - * remove rightscale-init from package (see LP: #434181, LP: #434693) - * fix lintian warning, specify path to GPL-3 - * replace multiple '| logger' in regenerate_ssh_host_keys single one - * add ec2-is-compat-env, and disable init script by default. it can be - enabled by setting 'compat=1' in /etc/ec2-init/is-compat-env - - -- Scott Moser <smoser@ubuntu.com> Thu, 24 Sep 2009 16:32:42 -0400 - -ec2-init (0.4.999-0ubuntu1) karmic; urgency=low - - * New upstream release - - -- Soren Hansen <soren@ubuntu.com> Wed, 26 Aug 2009 01:23:52 +0200 - -ec2-init (0.4.99-0ubuntu3) karmic; urgency=low - - * Also update /etc/default/locale when setting the locale based on - locality. - - -- Soren Hansen <soren@ubuntu.com> Tue, 11 Aug 2009 21:49:33 +0200 - -ec2-init (0.4.99-0ubuntu2) karmic; urgency=low - - * Consolidate build-dependencies a bit. - * Sync default configuration with code. - * Create /var/lib/ec2 in package. - * Make ec2-get-info more robust in testing environments. - * Handle missing public keys more gracefully. - * Set proper ownership of user's authorized_keys. - - -- Soren Hansen <soren@ubuntu.com> Tue, 11 Aug 2009 09:54:16 +0200 - -ec2-init (0.4.99-0ubuntu1) karmic; urgency=low - - * Massive rewrite. (LP: #407871, #407919, #308530, #407949, #407950, #407892 - and probably many others) - * First Ubuntu version with the upstream tarball split out. - * Switch to arch: all. There is no arch specific code here. - - -- Soren Hansen <soren@ubuntu.com> Tue, 11 Aug 2009 08:33:33 +0200 - -ec2-init (0.3.4ubuntu9) karmic; urgency=low - - * Really include the action id in the semaphore filename. - - -- Soren Hansen <soren@ubuntu.com> Sat, 11 Jul 2009 09:50:31 +0200 - -ec2-init (0.3.4ubuntu8) karmic; urgency=low - - * Add Vcs-Bzr header to debian/control. - * Include the action id in the semaphore filename. - - -- Soren Hansen <soren@ubuntu.com> Sat, 11 Jul 2009 02:35:43 +0200 - -ec2-init (0.3.4ubuntu7) karmic; urgency=low - - * Re-add accidentally removed locale template. - * Fix indentation in init script. - - -- Soren Hansen <soren@ubuntu.com> Fri, 10 Jul 2009 23:47:27 +0200 - -ec2-init (0.3.4ubuntu6) karmic; urgency=low - - * Replace calls to ec2-get-data with calls to ec2-get-info. - * Make this package arch: all. - - -- Soren Hansen <soren@ubuntu.com> Fri, 10 Jul 2009 23:16:35 +0200 - -ec2-init (0.3.4ubuntu5) karmic; urgency=low - - * debian/init: Move instance reboot detection out of python - scripts and move them to the init script. - * debian/ec2-set-defaults.py: - - Point to the right location for locate (LP: #387611) - - Default to UTC (LP: #341060) - * debian/ec2-set-apt-sources.py: If you cant contact EC2 then use the - regular archive (LP: #387027) - * debian/ec2-setup-hostname.py: Use the local hostname in /etc/hosts - and dont change once rebooted. (LP: #352745) - - -- Chuck Short <zulcss@ubuntu.com> Wed, 08 Jul 2009 09:48:49 -0400 - -ec2-init (0.3.4ubuntu4) karmic; urgency=low - - * debian/init: Run update-motd regardless whether its a firstboot or not. - * debian/init: Fix comments (LP: #373057) - * debian/control: Add update-motd as a depends. - * ec2-set-defaults.py: Wait for network to become available. (LP: #308530) - - -- Chuck Short <zulcss@ubuntu.com> Thu, 28 May 2009 05:04:31 -0400 - -ec2-init (0.3.4ubuntu3) karmic; urgency=low - - * debian/init: Move init script to run before ssh and regenerate the ssh - host kes in the init script rather than /etc/rc.local (LP: #370628) - * ec2-set-apt-sources.py: - - Move sources.list to /var/ec2 so it doesnt get removed after user - reboots. - * ec2-set-defaults.py: - - Move locale to /var/ec2/ so it doesnt get remove after user reboots. - * ec2-set-hostname.py - - Create an /etc/hostname as well. - - -- Chuck Short <zulcss@ubuntu.com> Thu, 14 May 2009 11:11:49 -0400 - -ec2-init (0.3.4ubuntu2) karmic; urgency=low - - * Really change the locale when setting up an instance. (LP: #341066) - * Run ec2-run-user-data script last. (LP: #373055) - * Minor comment tweaks. (LP: #373057) - - -- Chuck Short <zulcss@ubuntu.com> Wed, 13 May 2009 13:41:35 -0400 - -ec2-init (0.3.4ubuntu1) karmic; urgency=low - - * Add more smarts to ec2 instance bring up. (LP: #371936) - - -- Chuck Short <zulcss@ubuntu.com> Tue, 05 May 2009 08:59:54 -0400 - -ec2-init (0.3.3ubuntu12) jaunty; urgency=low - - * ec2-run-user-data.py: Fix error. - - -- Chuck Short <zulcss@ubuntu.com> Tue, 07 Apr 2009 08:14:07 -0400 - -ec2-init (0.3.3ubuntu11) jaunty; urgency=low - - * debian/control: - - Add python-cheetah and python-apt as a dependency. - * debian/ec2-config.cfg: - - Remove distro due to the change in ec2-set-apt-sources.py - * debian/inistall - - Install the templates in the right place. - * ec2-set-apt-sources.py: - - Use python-apt to update the sources.list. - - -- Chuck Short <zulcss@ubuntu.com> Wed, 01 Apr 2009 13:58:43 -0400 - -ec2-init (0.3.3ubuntu10) jaunty; urgency=low - - * ec2-set-hostname.py: - - Use template for /etc/hosts creation. - - Dont use public_hostname in /etc/hosts. (LP: #352745) - - -- Chuck Short <zulcss@ubuntu.com> Wed, 01 Apr 2009 08:48:05 -0400 - -ec2-init (0.3.3ubuntu9) jaunty; urgency=low - - * ec2-set-apt-sources.py: - - Use a template to generate the sources.list and generate it based on the lsb_release. - - -- Chuck Short <zulcss@ubuntu.com> Tue, 31 Mar 2009 15:15:55 -0400 - -ec2-init (0.3.3ubuntu8) jaunty; urgency=low - - * ec2-set-apt-sources.py: - - Add the ubuntu-on-ec2 ppa. - - -- Chuck Short <zulcss@ubuntu.com> Tue, 31 Mar 2009 09:37:13 -0400 - -ec2-init (0.3.3ubuntu7) jaunty; urgency=low - - * debian/rules: Fix typo. - - -- Chuck Short <zulcss@ubuntu.com> Sun, 22 Mar 2009 17:14:16 -0400 - -ec2-init (0.3.3ubuntu6) jaunty; urgency=low - - * Set the configuration file to jaunty. - * ec2-fetch-credentials: Fix typo. - * ec2-set-defaults.py: - - Remove timezone change when booting the instance. - - Redirect output to /dev/null. - * ec2-set-apt-sources.py: - - Run apt-get update after the /etc/apt/sources.list and - redirect the output to /dev/null. - * rightscale-init: Updated rightscale-init - - -- Chuck Short <zulcss@ubuntu.com> Thu, 19 Mar 2009 20:52:59 -0400 - -ec2-init (0.3.3ubuntu5) jaunty; urgency=low - - * debian/ec2-config.cfg: - - Add disable root option. - * debian/ec2-init.rightscale-init.init: - - Add rightscale detection script. - * ec2-get-info.py: - - Display the information about an AMI instance. - - -- Chuck Short <zulcss@ubuntu.com> Mon, 16 Mar 2009 08:54:49 -0400 - -ec2-init (0.3.3ubuntu4) jaunty; urgency=low - - * ec2-fetch-credentials.py: - - Allow user to choose which user they wish to configure for. - - Allow user to disable root user if they wish to. - * ec2-set-defaults.py: - - Set default timezone to UTC. - - Set locale depending on zone. - * debian/init: - - Removed nash plugin. - - Add ec2-set-defaults. - - -- Chuck Short <zulcss@ubuntu.com> Wed, 04 Mar 2009 08:33:01 -0500 - -ec2-init (0.3.3ubuntu3~intrepid4) intrepid; urgency=low - - * set distro to intrepid. - - -- Chuck Short <zulcss@ubuntu.com> Thu, 26 Feb 2009 10:28:06 -0500 - -ec2-init (0.3.3ubuntu3) jaunty; urgency=low - - * debian/ec2-init: Log results of ec2-run-user-data to syslog. - * ec2-run-user-data.py :Dont leave files around and log the - output to syslog. - - -- Chuck Short <zulcss@ubuntu.com> Thu, 26 Feb 2009 10:24:35 -0500 - -ec2-init (0.3.3ubuntu2) jaunty; urgency=low - - * ec2-set-apt-sources.py: - - Use the ec2 mirrors. (LP: #317065, #333897) - - Update the /etc/apt/sources.list (LP: #333904) - * ec2-fetch-credentials.py: - - Better error checking (LP: #325067) - - -- Chuck Short <zulcss@ubuntu.com> Tue, 24 Feb 2009 14:02:37 -0500 - -ec2-init (0.3.3ubuntu1) jaunty; urgency=low - - * debian/init: Fix init script. - - -- Chuck Short <zulcss@ubuntu.com> Fri, 20 Feb 2009 09:22:54 -0500 - -ec2-init (0.3.3) jaunty; urgency=low - - * ec2-set-apt-sources.py - - Determine the zone that the user is in and generate - a /etc/apt/sources.list.d/ based on that. - * debian/init: - - Check to see if there is an /var/run/ec2 and create - it if it doesnt exist. - - Start ec2-set-apt-sources at first bootup. - * debian/rules: - - Install ec2-set-apt-sources. - * debian/control: - - Add python-configobj as a dependency. - * debian/{install,dirs} - - Create an /etc/ec2-init to read the configuration file and install it. - - -- Chuck Short <zulcss@ubuntu.com> Mon, 09 Feb 2009 10:35:56 -0500 - -ec2-init (0.3.2) jaunty; urgency=low - - * debian/init: - - Remove already ran detection - - Log the running of ec2-run-user-data to /var/log/ec2-user-data.log - * ec2-set-hostname.py: - - set hostname to the Ec2 local-hostname - - Update the /etc/hosts to change the ubuntu hostname to the - public hostname. - * ec2-fetch-credentials: - - Copy the ssh keys to the ubuntu user. - - Setup authorized keys for root to tell the user to login as the - ubuntu user when they try to connect. - * ec2-run-user-data: - - Create an .already-ran file to check to see if ec2-run-user-data - already ran. - - Save the ec2-run-user-data script in /var/ec2. - - -- Chuck Short <zulcss@ubuntu.com> Wed, 04 Feb 2009 09:32:08 -0500 - -ec2-init (0.3.1) jaunty; urgency=low - - * debian/dir: Install /var/ec2 to save user-data scripts. - * debian/rules: Start ec2-init after ssh. - * ec2-run-user-data.py: Save run-user-data script with ami-id. - - -- Chuck Short <zulcss@ubuntu.com> Mon, 26 Jan 2009 10:40:52 -0500 - -ec2-init (0.3) jaunty; urgency=low - - * ec2-run-user-data: Fix python error when writing a file to the disk. - - -- Chuck Short <zulcss@ubuntu.com> Thu, 15 Jan 2009 11:49:08 -0500 - -ec2-init (0.2) jaunty; urgency=low - - * debian/init: Run fetch-credentials before anything else. - (LP: #308533) - * Add ec2-set-hostname.py: Queries ec2 metdada for public-hostname - and then sets it (LP: #316201) - - -- Chuck Short <zulcss@ubuntu.com> Tue, 13 Jan 2009 15:20:21 -0500 - -ec2-init (0.1) intrepid; urgency=low - - * Initial release (LP: #269434). - - -- Soren Hansen <soren@ubuntu.com> Fri, 12 Sep 2008 15:30:32 +0200 - diff --git a/debian/cherry-pick b/debian/cherry-pick deleted file mode 100755 index dd557246..00000000 --- a/debian/cherry-pick +++ /dev/null @@ -1,197 +0,0 @@ -#!/bin/bash - -VERBOSITY=0 -TEMP_D="" -CR=$'\n' - -error() { echo "$@" 1>&2; } -fail() { [ $# -eq 0 ] || error "$@"; exit 1; } - -Usage() { - cat <<EOF -Usage: ${0##*/} [ options ] <<ARGUMENTS>> - - Cherry pick a patch into debian/patches. - Useful to grab an upstream commit to the current packaging branch. - - options: - -h | --help show help -EOF -} - -bad_Usage() { Usage 1>&2; [ $# -eq 0 ] || error "$@"; return 1; } -cleanup() { - [ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}" -} - -debug() { - local level=${1}; shift; - [ "${level}" -gt "${VERBOSITY}" ] && return - error "${@}" -} - -shorten() { - local name="$1" len="70" - while [ "${#name}" -gt "$len" ]; do - name="${name%-*}" - done - _RET="$name" -} - -print_commit() { - local subject="$1" author="$2" bugs="$3" aname="" - aname=${author% <*} - echo "$subject${bugs:+ (LP: ${bugs})}" -} - -print_bugs() { - local subject="$1" author="$2" bugs="$3" aname="" - echo "$bugs" -} - -git_log_to_dch() { - # call printer with subject, author and bugs as extracted - # from either git format-patch output or git show output. - local line="" commit="" lcommit="" bugs="" - local printer="${1:-print_commit}" - while :; do - read line || break - case "$line" in - commit\ *|From\ *) - if [ -n "$commit" ]; then - "$printer" "$subject" "$author" "$bugs" - fi - commit=${line#* } - commit=${commit%% *} - bugs="" - author="" - subject="" - ;; - Author:\ *|From:\ *) author="${line#*: }";; - LP:*) bugs="${bugs:+${bugs}, }${line#*: }";; - "") [ -z "$subject" ] && read subject;; - Subject:\ *) - subject="${line#Subject: }" - subject="${subject#\[PATCH\] }" - ;; - esac - done - if [ -n "$commit" ]; then - "$printer" "$subject" "$author" "$bugs" - fi -} - -main() { - local short_opts="ho:v" - local long_opts="help,verbose" - local getopt_out="" - getopt_out=$(getopt --name "${0##*/}" \ - --options "${short_opts}" --long "${long_opts}" -- "$@") && - eval set -- "${getopt_out}" || - { bad_Usage; return; } - - local cur="" next="" - - while [ $# -ne 0 ]; do - cur="$1"; next="$2"; - case "$cur" in - -h|--help) Usage ; exit 0;; - -v|--verbose) VERBOSITY=$((${VERBOSITY}+1));; - --) shift; break;; - esac - shift; - done - - [ -n "$TEMP_D" ] || - TEMP_D=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXX") || - { error "failed to make tempdir"; return 1; } - trap cleanup EXIT - - [ $# -gt 0 ] || { bad_Usage "must provide commit-ish"; return; } - - local r="" commit_in="$1" chash="" shash="" sname="" fname="" cur_br="" - cur_br=$(git rev-parse --abbrev-ref HEAD) || - { error "failed to get current branch"; return 1; } - chash=$(git show --quiet "--pretty=format:%H" "${commit_in}") || - { error "failed git show $commit_in"; return 1; } - - if git merge-base --is-ancestor "$chash" HEAD; then - error "current branch '$cur_br' already contains $commit_in ($chash)" - return 1 - fi - - out=$(git show --quiet "--pretty=format:%h %f" "$chash") || - { error "failed git show $chash"; return 1; } - - shash=${out% *} - sname=${out#* } - longname="cpick-$shash-$sname" - shorten "$longname" - fname="$_RET" - - [ -d debian/patches ] || mkdir -p debian/patches || - { error "failed to make debian/patches"; return 1; } - - local series="debian/patches/series" fpath="debian/patches/$fname" - if [ -e "$series" ] && out=$(grep -- "-${shash}-" "$series"); then - error "$chash already exists in $series" - error " $out" - return 1 - fi - - if [ -e "$series" ]; then - if out=$(quilt applied 2>&1); then - error "there are quilt patches applied!" - error "$out" - return 1 - fi - fi - - git format-patch --stdout -1 "$chash" > "$fpath" || - { error "failed git format-patch -1 $chash > $fpath"; return 1; } - - echo "$fname" >> "$series" || - { error "failed to write to $series"; return 1; } - - quilt push "$fname" || - { error "patches do not cleanly apply"; return 1; } - quilt refresh && quilt pop -a || - { error "failed to refresh or pop quilt"; return 1; } - - local message="" - message=$(git_log_to_dch < "$fpath") || - { error "failed getting log entry from $fpath"; return 1; } - dch -i "cherry-pick $shash: $message" - - dch -e || { - r=$?; - error "dch -e exited $r"; - return $r; - } - - local commit_files="" - commit_files=( "$series" "$fpath" ) - git diff HEAD "${commit_files[@]}" - - echo -n "Commit this change? (Y/n): " - read answer || fail "failed to read answer" - case "$answer" in - n|[Nn][oO]) exit 1;; - esac - - bugs=$(git_log_to_dch print_bugs < "$fpath") - msg="cherry pick $shash${bugs:+${CR}${CR}LP: ${bugs}}" - git add "$series" "$fpath" || - { error "failed to git add $series $fpath"; return 1; } - - git commit -m "$msg" "${commit_files[@]}" || - fail "failed to commit '$msg'" - - git commit -m "update changelog" debian/changelog || - fail "failed to commit update to debian changelog." - - return 0 -} - -main "$@" -# vi: ts=4 expandtab diff --git a/debian/cloud-init.config b/debian/cloud-init.config deleted file mode 100644 index 6e9c6f73..00000000 --- a/debian/cloud-init.config +++ /dev/null @@ -1,78 +0,0 @@ -#!/bin/sh -e - -. /usr/share/debconf/confmodule - -hasEc2Md() { - local d=/var/lib/cloud i=/var/lib/cloud/instance/ f="" - local flist="${i}/datasource ${i}/obj.pkl ${d}/cache/obj.pkl" - # search current instance data source information - for f in ${flist}; do - [ -f "${f}" ] || continue - grep -q DataSourceEc2 "${f}" 2>/dev/null && return 0 - done - - - # there was no data above found that would have been indicated - # by a upgrade. lets just see if we can't find the metadata - # service. if wget is not present (it is not 'essential') - # then we will quietly return 1 - local url="http://169.254.169.254/2009-04-04/meta-data/instance-id" - i="" - if command -v wget >/dev/null 2>&1; then - local tout="--connect-timeout 5 --read-timeout 5 --tries 1" - i=$(wget "${url}" ${tout} -O - 2>/dev/null) || : - elif command -v curl >/dev/null 2>&1; then - i=$(curl "${url}" --max-time 1) || : - fi - # if instance-id starts with 'i-' then in all likelyhood its good - [ "${i#i-}" != "${i}" ] && return 0 - return 1 -} - -get_yaml_list() { - # get_yaml_list(file, key, def): return a comma delimited list with the value - # for the yaml array defined in 'key' from 'file'. if not found , return 'def' - # only really supports 'key: [en1, en2 ]' format. - local file="$1" key="$2" default="$3" - [ -f "$file" ] || return 1 - # any thing that didn't match the key is deleted so the final 'p' only - # prints things that matched. - RET=$(sed -n -e "/^$key:/"'!'d -e "s/$key:[ \[]*//"\ - -e "s, \]$,," -e p "$file") - [ -n "$RET" ] || RET="$default" -} - -# old_dpkg_cfg is very old file that is no longer read by cloud-init. -# it gets re-named to cloud.cfg.d/90_dpkg.cfg in the preinst. -dpkg_cfg="/etc/cloud/cloud.cfg.d/90_dpkg.cfg" -old_dpkg_cfg="/etc/cloud/distro.cfg" -if [ -f "${old_dpkg_cfg}" -a ! -f "$dpkg_cfg" ]; then - dpkg_cfg="${old_dpkg_cfg}" - echo "WARN: reading value from ${old_dpkg_cfg}" 1>&2 -fi -if [ -f "$dpkg_cfg" ]; then - if get_yaml_list "$dpkg_cfg" datasource_list NOTFOUND && - val="$RET" && [ "$val" != "NOTFOUND" ]; then - db_set cloud-init/datasources $val - else - echo "WARN: failed to read datasource_list from $dpkg_cfg" 1>&2 - fi -elif { db_fget cloud-init/datasources seen || : ; } && - [ "${RET}" = "false" ]; then - # this is the first time this we've run (installation or re-install after - # purge). try to determine if the Ec2 datasource is there. - # if it is, and Ec2 was not in the default list, then add it. - db_get cloud-init/datasources - def="${RET}" - case " ${def}," in - *\ Ec2,*) :;; - *) hasEc2Md && - db_set cloud-init/datasources "${def:+${def}, }Ec2";; - esac -fi - -db_input low cloud-init/datasources || true -db_go - -exit 0 -# vi: ts=4 noexpandtab diff --git a/debian/cloud-init.lintian-overrides b/debian/cloud-init.lintian-overrides deleted file mode 100644 index 591b651e..00000000 --- a/debian/cloud-init.lintian-overrides +++ /dev/null @@ -1,3 +0,0 @@ -# this explicitly diverts ureadahead.conf in ureadahead package -# see LP: #499520 for more discussion -cloud-init binary: diversion-for-unknown-file diff --git a/debian/cloud-init.postinst b/debian/cloud-init.postinst deleted file mode 100644 index 420420b5..00000000 --- a/debian/cloud-init.postinst +++ /dev/null @@ -1,331 +0,0 @@ -#!/bin/sh -e - -. /usr/share/debconf/confmodule - -set -f # disable pathname expansion -db_capb escape # to support carriage return / multi-line values - -debug() { - [ "${_CI_UPGRADE_DEBUG:-0}" = "0" ] && return 0 - echo "$@" 1>&2 || : -} - -update_cfg() { - # takes filename, header, new object (in yaml), optionally 'remover' - # and merges new into existing object in filename, and then updates file - # remover a string that means "delete existing entry" - python3 -c ' -import sys, yaml - -def update(src, cand): - if not (isinstance(src, dict) and isinstance(cand, dict)): - return cand - for k, v in cand.items(): - # if the candidate has _ as value, delete source - if v == REMOVER: - if k in src: - del src[k] - continue - if k not in src: - src[k] = v - else: - src[k] = update(src[k], v) - return src - -(fname, header, newyaml) = sys.argv[1:4] -REMOVER = object -if len(sys.argv) == 5: - REMOVER = sys.argv[4] -newcfg = yaml.load(newyaml) - -with open(fname, "r") as fp: - cfg = yaml.load(fp) -if not cfg: cfg = {} - -cfg = update(cfg, newcfg) - -with open(fname, "w") as fp: - fp.write(header + "\n") - fp.write(yaml.dump(cfg))' "$@" -} - -handle_preseed_maas() { - local cfg_file="/etc/cloud/cloud.cfg.d/90_dpkg_maas.cfg" - local md_url="" creds_all="" c_key="" t_key="" t_sec="" c_sec=""; - - db_get "cloud-init/maas-metadata-url" && md_url="$RET" || : - db_get "cloud-init/maas-metadata-credentials" && creds_all="$RET" || : - - # nothing to do - [ -n "$md_url" -o -n "$creds_all" ] || return 0 - - # change a url query string format into : delimited - if [ -n "$creds_all" -a "${creds_all#*&}" != "${creds_all}" ]; then - # the command here ends up looking like: - # python3 -c '...' 'oauth_consumer_key=v1&oauth_token_key=v2...' \ - # oauth_consumer_key oauth_token_key oauth_token_secret - creds_all=$(python3 -c 'from six.moves.urllib.parse import parse_qs; -import sys; -keys = parse_qs(sys.argv[1]) -for k in sys.argv[2:]: - sys.stdout.write("%s:" % keys.get(k,[""])[0])' "$creds_all" \ - oauth_consumer_key oauth_token_key oauth_token_secret -) - fi - - # now, if non-empty creds_all is: consumer_key:token_key:token_secret - if [ -n "$creds_all" ]; then - OIFS="$IFS"; IFS=:; set -- $creds_all; IFS="$OIFS" - c_key=$1; t_key=$2; t_sec=$3 - fi - - if [ "$md_url" = "_" -a "${c_key}:${t_key}:${t_sec}" = "_:_:_" ]; then - # if all these values were '_', the delete value, just delete the file. - rm -f "$cfg_file" - else - local header="# written by cloud-init debian package per preseed entries -# cloud-init/{maas-metadata-url,/maas-metadata-credentials}" - - local pair="" k="" v="" pload="" orig_umask="" - for pair in "metadata_url:$md_url" "consumer_key:${c_key}" \ - "token_key:${t_key}" "token_secret:$t_sec"; do - k=${pair%%:*} - v=${pair#${k}:} - [ -n "$v" ] && pload="${pload} $k: \"$v\"," - done - - # '_' would indicate "delete", otherwise, existing entries are left - orig_umask=$(umask) - umask 066 - : >> "$cfg_file" && chmod 600 "$cfg_file" - update_cfg "$cfg_file" "$header" "datasource: { MAAS: { ${pload%,} } }" _ - umask ${orig_umask} - fi - - # now clear the database of the values, as they've been consumed - db_unregister "cloud-init/maas-metadata-url" || : - db_unregister "cloud-init/maas-metadata-credentials" || : -} - -handle_preseed_local_cloud_config() { - local ccfg="" debconf_name="cloud-init/local-cloud-config" - local cfg_file="/etc/cloud/cloud.cfg.d/90_dpkg_local_cloud_config.cfg" - local header="# written by cloud-init debian package per preseed entry -# $debconf_name" - - db_get "${debconf_name}" && ccfg="$RET" || : - - if [ "$ccfg" = "_" ]; then - rm -f "$cfg_file" - elif [ -n "$ccfg" ]; then - { echo "$header"; echo "$ccfg"; } > "$cfg_file" - fi - db_unregister "${debconf_name}" || : -} - -fix_1336855() { - ### Begin fix for LP: 1336855 - # fix issue where cloud-init misidentifies the location of grub and - # where grub misidentifies the location of the device - - # if cloud-init's grub module did not run, then it did not break anything. - [ -f /var/lib/cloud/instance/sem/config_grub_dpkg ] || return 0 - - # This bug only happened on /dev/xvda devices - [ -b /dev/xvda ] || return 0 - - # we can't fix the system without /proc/cmdline - [ -r /proc/cmdline ] || return 0 - - # Don't do anything unless we have grub - [ -x /usr/sbin/grub-install ] || return 0 - - # First, identify the kernel device for the parent. - for parm in $(cat /proc/cmdline); do - dev=$(echo $parm | awk -F\= '{print$NF}') - case $parm in - root=UUID*) [ -d /dev/disk/by-uuid ] && - root_dev=$(readlink -f /dev/disk/by-uuid/$dev);; - root=LABEL*) [ -d /dev/disk/by-label ] && - root_dev=$(readlink -f /dev/disk/by-label/$dev);; - root=/dev*) [ -d /dev ] && - root_dev=$(readlink -f $dev);; - esac - [ -n "$root_dev" ] && break - done - - # Don't continue if we don't have a root directive - [ -z "$root_dev" ] && return 0 - - # Only deal with simple, cloud-based devices - case $root_dev in - /dev/vda*|/dev/xvda*|/dev/sda*) ;; - *) return 0;; - esac - - # Make sure that we are not chrooted. - [ "$(stat -c %d:%i /)" != "$(stat -c %d:%i /proc/1/root/.)" ] && return 0 - - # Check if we are in a container, i.e. LXC - if systemd-detect-virt --quiet --container || lxc-is-container 2>/dev/null; then - return 0 - fi - - # Find out where grub thinks the root device is. Only continue if - # grub postinst would install/reinstall grub - db_get grub-pc/install_devices && grub_cfg_dev=${RET} || return 0 - db_get grub-pc/install_devices_empty && grub_dev_empty=${RET} || return 0 - - # Find out the parent device for the root device. - # example output: sda/sda1 - block_path=$(udevadm info -q path -n $root_dev | awk '-Fblock/' '{print$NF}') - - # Extract the parent device name. This works where the device is a block device - # example output: /dev/sda - parent_dev=$(echo $block_path | awk '-F/' '$1 { if ( $1 ) {print"/dev/"$1}}') - [ -b "${parent_dev}" ] || return 0 - - # Do nothing if the device that the grub postinst would install is already used - [ "$grub_cfg_dev" = "$parent_dev" -o "$grub_cfg_dev" = "$root_dev" ] && return 0 - - # If we get here, do the installation - echo "Reconfiguring grub install device due to mismatch (LP: #1336855)" - echo " Grub should use $parent_dev but is configured for $grub_cfg_dev" - db_set grub-pc/install_devices "$parent_dev" - grub-install $parent_dev && - echo "Reinstalled grub" || - echo "WARNING! Unable to fix grub device mismatch. You may be broken." - -} - -cleanup_lp1552999() { - local oldver="$1" last_bad_ver="0.7.7~bzr1178" - dpkg --compare-versions "$oldver" le "$last_bad_ver" || return 0 - local edir="/etc/systemd/system/multi-user.target.wants" - rm -f "$edir/cloud-config.service" "$edir/cloud-final.service" \ - "$edir/cloud-init-local.service" "$edir/cloud-init.service" -} - -disable_network_config_on_upgrade() { - local oldver="$1" last_without_net="0.7.7~bzr1182-0ubuntu1" - if [ ! -f /var/lib/cloud/instance/obj.pkl ]; then - # this is a fresh system not one that has been booted. - return 0 - fi - if dpkg --compare-versions "$oldver" le "$last_without_net"; then - echo "dpkg upgrade from $oldver" > /var/lib/cloud/data/upgraded-network - fi -} - -fix_azure_upgrade_1611074() { - # adjust /etc/fstab on azure so boot after resize does not mount - # /mnt as ntfs and stop re-formatting. - local fixed_ver="0.7.8-49-1" dspath="/var/lib/cloud/instance/datasource" - local oldver="$1" tmpf="" r="" wmsg="" me="cloud-init postinst" - - # if not on azure, or not booted with instance/ skip out. - if [ ! -e "$dspath" ]; then - debug "no $dspath" - return 0 - fi - if ! grep -qi azure "$dspath"; then - debug "not on azure per $dspath" - return 0 - fi - - # if there is no /etc/fstab, then nothing to fix. - if [ ! -e /etc/fstab ]; then - debug "no /etc/fstab" - return 0 - fi - - if dpkg --compare-versions "$oldver" ge "$fixed_ver"; then - debug "previous version was fixed" - return 0 - fi - - wmsg="WARN: $me failed." - wmsg="$wmsg Subsequent resize may not update ephemeral correctly." - tmpf=$(mktemp "${TMPDIR:-/tmp}/cloud-init-upgrade.XXXXXX") || { - echo "$wmsg (mktemp failed with $?)" 1>&2 - return 0; - } - - awk '{ - if ($4 !~ /x-systemd.requires/ && $4 ~ /comment=cloudconfig/) { - sub(/comment=cloudconfig/, "x-systemd.requires=cloud-init.service,comment=cloudconfig") - } - printf("%s\n", $0)}' /etc/fstab > "$tmpf" || { - echo "$wmsg (awk reading of /etc/fstab failed with $?)" 1>&2 - rm -f "$tmpf" - return 0; - } - if cmp /etc/fstab "$tmpf" >/dev/null 2>&1; then - debug "no changes needed." - else - cat "$tmpf" > /etc/fstab || { - r=$? - echo "$wmsg (cp $tmpf /etc/fstab failed with $r)" - echo ==== expected to write the following to /etc/fstab ===== - cat "$tmpf" - echo ======================================================== - return $r - } 1>&2 - echo "$me fixed /etc/fstab for x-systemd.requires" 1>&2 - fi - rm "$tmpf" || : -} - - -if [ "$1" = "configure" ]; then - # disable ureadahead (LP: #499520) - dpkg-divert --package cloud-init --rename --divert \ - /etc/init/ureadahead.conf.disabled --add /etc/init/ureadahead.conf - if db_get cloud-init/datasources; then - values="$RET" - if [ "${values#*MaaS}" != "${values}" ]; then - # if db had old MAAS spelling, fix it. - values=$(echo "$values" | sed 's,MaaS,MAAS,g') - db_set cloud-init/datasources "$values" - fi - cat > /etc/cloud/cloud.cfg.d/90_dpkg.cfg <<EOF -# to update this file, run dpkg-reconfigure cloud-init -datasource_list: [ $values ] -EOF - fi - - # we want to affect apt_pipelining on install, not wait for - # cloud-init to run it on next boot. - pipeline_f="/etc/apt/apt.conf.d/90cloud-init-pipelining" - if [ -f /var/lib/cloud/instance/obj.pkl ]; then - cloud-init single --name apt-pipelining --frequency once >/dev/null 2>&1 || - echo "Warning: failed to setup apt-pipelining" 1>&2 - elif [ ! -f "$pipeline_f" ]; then - # there was no cloud available, so populate it ourselves. - cat > "$pipeline_f" <<EOF -//Written by cloud-init per 'apt_pipelining' -Acquire::http::Pipeline-Depth "0"; -EOF - fi - - # if there are maas settings pre-seeded apply them - handle_preseed_maas - - # if there is generic cloud-config preseed, apply them - handle_preseed_local_cloud_config - - # fix issue where cloud-init misidentifies the location of grub - fix_1336855 - - # make upgrades disable network changes by cloud-init - disable_network_config_on_upgrade "$2" - - fix_azure_upgrade_1611074 "$2" -fi - -#DEBHELPER# - -if [ "$1" = "configure" ]; then - oldver="$2" - cleanup_lp1552999 "$oldver" -fi diff --git a/debian/cloud-init.postrm b/debian/cloud-init.postrm deleted file mode 100644 index 81b66812..00000000 --- a/debian/cloud-init.postrm +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -set -e - -case "$1" in - purge) - rm -f /etc/cloud/cloud.cfg.d/90_dpkg.cfg - rm -f /etc/apt/apt.conf.d/90cloud-init-pipelining - ;; - remove) - dpkg-divert --package cloud-init --remove --rename --divert \ - /etc/init/ureadahead.conf.disabled /etc/init/ureadahead.conf - rm -f /etc/cloud/cloud.cfg.d/90cloud-init-pipelining - ;; -esac - -#DEBHELPER# diff --git a/debian/cloud-init.preinst b/debian/cloud-init.preinst deleted file mode 100644 index c8406e31..00000000 --- a/debian/cloud-init.preinst +++ /dev/null @@ -1,254 +0,0 @@ -#!/bin/sh -set -e - -# Remove a no-longer used conffile -rm_conffile() { - local PKGNAME="$1" - local CONFFILE="$2" - - [ -e "$CONFFILE" ] || return 0 - - local md5sum="$(md5sum $CONFFILE | sed -e 's/ .*//')" - local old_md5sum="$(dpkg-query -W -f='${Conffiles}' $PKGNAME | \ - sed -n -e "\' $CONFFILE ' { s/ obsolete$//; s/.* //; p }")" - if [ "$md5sum" != "$old_md5sum" ]; then - echo "Obsolete conffile $CONFFILE has been modified by you." - echo "Saving as $CONFFILE.dpkg-bak ..." - mv -f "$CONFFILE" "$CONFFILE".dpkg-bak - else - echo "Removing obsolete conffile $CONFFILE ..." - rm -f "$CONFFILE" - fi -} - -# move_sem(src,targets) -# rename sem/* items named $src to $targets -# (with hard links if more than one) -move_sem() { - local src=$1 f="" targ="" freqid="" dir=/var/lib/cloud/sem - shift - # link the remaining targets to src, if it exists - for f in "${dir}/${src}."*; do - # if there were no src entries, nothing to do - [ -f "${f}" ] || return 0 - freqid=${f#${dir}/${src}.} # 'i-abcdefg' or 'always' - for targ in "$@"; do - [ -e "${dir}/${targ}.${freqid}" ] && continue - ln -f "${f}" "${dir}/${targ}.${freqid}" - done - rm "${f}" - done - return 0 -} - -fix_ephemeral0_micro() { - # make ephemeral0 entries in /etc/fstab written by cloudconfig - # 'nobootwait', so they do not block subsequent boots (LP: #634102) - local out="" oldver=$1 dev="" adop="nobootwait" - local s="[[:space:]]" ns="[^[:space:]]" # space and "not space" - local remain="${s}\+.*" first4="" - for dev in /dev/sda2 /dev/sdb; do - first4="${dev}$s\+$ns\+$s\+$ns\+$s\+$ns\+" - out=$(awk '$1 == dev && $4 ~ /cloudconfig/ && $4 !~ op { print $1 ; }' \ - dev="${dev}" "op=${adop}" /etc/fstab) || return 0 - [ -n "${out}" ] || continue - echo "making ephemeral ${dev} in /etc/fstab ${adop} (LP: #634102)" 1>&2 - sed -i "s|^\(${first4}\)\(${remain}\)|\1,${adop}\2|" /etc/fstab - done -} - -convert_varlib_05x_06x() { - local url="http://169.254.169.254/2009-04-04/meta-data/instance-id" - local tout="--connect-timeout .5 --read-timeout .5" - local iid="" f="" uptime="" bn="" - iid=$(wget "${url}" ${tout} --tries 1 -O - 2>/dev/null) || iid="" - - [ -n "${iid}" -a -d /var/lib/cloud ] || return 0 - cd /var/lib/cloud - mkdir -p "instances/${iid}" "instances/${iid}/sem" - [ -e instance ] || ln -sf "instances/${iid}" instance - for f in data/*; do - [ -f "$f" ] || continue - case "${f#*/}" in - user-data.txt.i|user-data.txt|cloud-config.txt) - mv "${f}" instance/ - ;; - esac - done - [ -f data/cache/obj.pkl ] && mv data/cache/obj.pkl instance/ - for f in sem/*.${iid}; do - [ -f "${f}" ] || continue - bn=${f#*/}; bn=${bn%.${iid}} - case "${bn}" in - set_defaults) mv "${f}" "instance/sem/config-locale";; - set_hostname) mv "${f}" "instance/sem/config-set_hostname";; - *) mv "${f}" "instance/sem/${bn}";; - esac - done - [ ! -f sem/update_hostname.always ] || - mv sem/update_hostname.always sem/config-update_hostname.always - rmdir data/cache 2>/dev/null || : - rm -f data/available.build - - mkdir -p instance/scripts - if [ -d data/scripts ]; then - mv data/scripts/* instance/scripts || : - rmdir data/scripts || : - fi - [ -d data/scripts/* ] && mv data/scripts instance/ - - [ ! -e instance/boot-finished ] && - [ -f /proc/uptime ] && read uptime other </proc/uptime && - echo "${uptime}" > instance/boot-finished || : - - return 0 -} - -azure_apply_new_instance_id_1506187() { - # With LP: #1506187, the Azure instance ID detection method was changed - # to use the DMI data. In order to prevent existing instances from thinking - # they are new instances, the instance ID needs to be updated here. - - if grep DataSourceAzure /var/lib/cloud/instance/datasource > /dev/null 2>&1; then - - product_id_f="/sys/devices/virtual/dmi/id/product_uuid" - instance_id_f="/var/lib/cloud/data/instance-id" - - if [ ! -e "${product_id_f}" -o ! -e "${instance_id_f}" ]; then - return 0 - fi - - # Get the current instance ID's (new and old) - new_instance_id="$(cat ${product_id_f})" - old_instance_id="$(cat ${instance_id_f})" - - if [ "${new_instance_id}" = "${old_instance_id}" ]; then - # this may have been applied for a prior version, i.e. upgrading - # from 14.04 to 16.04 - return 0 - - elif [ -z "${new_instance_id}" -o -z "${old_instance_id}" ]; then - cat <<EOM - -WARNING: Failed to migrate old instance ID to new instance ID. - Cloud-init may detect this instance as a new instance upon reboot. - Please see: https://bugs.launchpad.net/bug/1506187 - -EOM - - elif [ "${new_instance_id}" != "${old_instance_id}" ]; then - cat <<EOM - -AZURE: this instance uses an unstable instance ID. Cloud-init will - migrate the instance ID from: - ${old_instance_id} - to: - ${new_instance_id} - For more information about this change, please see: - https://bugs.launchpad.net/bug/1506187 - https://azure.microsoft.com/en-us/blog/accessing-and-using-azure-vm-unique-id - -EOM - - # Write the new instance id - echo "${new_instance_id}" > /var/lib/cloud/data/instance-id - - # Remove the symlink for the instance - rm /var/lib/cloud/instance - - # Rename the old instance id to the new one - mv /var/lib/cloud/instances/${old_instance_id} \ - /var/lib/cloud/instances/${new_instance_id} - - # Link the old id to the new one, just incase - ln -s /var/lib/cloud/instances/${new_instance_id} \ - /var/lib/cloud/instances/${old_instance_id} - - # Make the active instance the new id - ln -s /var/lib/cloud/instances/${new_instance_id} \ - /var/lib/cloud/instance - fi -fi -} - -cleanup_lp1552999() { - local oldver="$1" last_bad_ver="0.7.7~bzr1178" - dpkg --compare-versions "$oldver" le "$last_bad_ver" || return 0 - local hdir="/var/lib/systemd/deb-systemd-helper-enabled" - hdir="$hdir/multi-user.target.wants" - local edir="/etc/systemd/system/multi-user.target.wants" - rm -f "$hdir/cloud-config.service" "$hdir/cloud-final.service" \ - "$hdir/cloud-init-local.service" "$hdir/cloud-init.service" -} - -case "$1" in - install|upgrade) - # removing obsolete conffiles from the 'ec2-init' package - if dpkg --compare-versions "$2" le "0.5.1"; then - rm_conffile ec2-init "/etc/init/cloud-ssh-keygen.conf" - fi - if dpkg --compare-versions "$2" lt "0.5.3"; then - rm_conffile ec2-init "/etc/init/ec2init.conf" - fi - if [ "$2" = "0.5.7-0ubuntu1" ]; then - bad_d=/etc/update-motd.d/92-ec2-upgrade-available - rm_conffile cloud-init "${bad_d}/motd-hook" - # the dir for this file is almost certainly empty, but - # if the file above was only moved, or other files there - # then leave it be - rmdir "${bad_d}" 2>/dev/null || true - fi - - if dpkg --compare-versions "$2" le "0.5.10-0ubuntu2"; then - old_confs="cloud-apt-update-upgrade cloud-config-misc - cloud-config-mounts cloud-config-puppet - cloud-config-ssh cloud-disable-ec2-metadata" - for f in ${old_confs}; do - rm_conffile cloud-init "/etc/init/${f}.conf" - done - fi - - if dpkg --compare-versions "$2" le "0.5.11-0ubuntu1"; then - # rename the config entries in sem/ so they're not run again - - # transition names in 0.5.11 had only short name (no config- prefix) - # so create config- entries for each - for name in apt-update-upgrade disable-ec2-metadata mounts \ - puppet runcmd ssh updates-check; do - move_sem ${name} config-${name} - done - - # 0.5.11 split 'config-misc' into 'updates-check' and 'runcmd' - move_sem config-misc config-updates-check config-runcmd - fi - - if dpkg --compare-versions "$2" le "0.5.14-0ubuntu5"; then - fix_ephemeral0_micro - fi - - if dpkg --compare-versions "$2" le 0.6.0-0ubuntu1; then - # convert /var/lib/cloud from 0.5.x layout to 0.6.x - convert_varlib_05x_06x - fi - - # 0.6.0 changed 'user-scripts' to config-scripts-user (LP: #1049146) - if [ -e /var/lib/cloud/instance/sem/user-scripts ]; then - ln -sf user-scripts /var/lib/cloud/instance/sem/config-scripts-user - fi - - # 0.7.7-bzr1556 introduced new instance ID source for Azure - if dpkg --compare-versions "$2" le "0.7.7~bzr1556-0ubuntu1"; then - azure_apply_new_instance_id_1506187 - fi - - d=/etc/cloud/ - if [ -f "$d/distro.cfg" ] && [ ! -f "$d/cloud.cfg.d/90_dpkg.cfg" ]; then - echo "moving $d/distro.cfg -> $d/cloud.cfg.d/90_dpkg.cfg" - [ -d "${d}/cloud.cfg.d" ] || mkdir "${d}/cloud.cfg.d" - mv "$d/distro.cfg" "$d/cloud.cfg.d/90_dpkg.cfg" - fi - - cleanup_lp1552999 "$oldver" -esac - -#DEBHELPER# diff --git a/debian/cloud-init.prerm b/debian/cloud-init.prerm deleted file mode 100644 index 8954fa4d..00000000 --- a/debian/cloud-init.prerm +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh - -set -e -rm -f /etc/cron.d/cloudinit-updates - -#DEBHELPER# diff --git a/debian/cloud-init.templates b/debian/cloud-init.templates deleted file mode 100644 index 5ed37f73..00000000 --- a/debian/cloud-init.templates +++ /dev/null @@ -1,12 +0,0 @@ -Template: cloud-init/datasources -Type: multiselect -Default: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, None -Choices-C: NoCloud, ConfigDrive, OpenNebula, DigitalOcean, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, SmartOS, Bigstep, Scaleway, AliYun, Ec2, CloudStack, None -Choices: NoCloud: Reads info from /var/lib/cloud/seed only, ConfigDrive: Reads data from Openstack Config Drive, OpenNebula: read from OpenNebula context disk, DigitalOcean: reads data from Droplet datasource, Azure: read from MS Azure cdrom. Requires walinux-agent, AltCloud: config disks for RHEVm and vSphere, OVF: Reads data from OVF Transports, MAAS: Reads data from Ubuntu MAAS, GCE: google compute metadata service, OpenStack: native openstack metadata service, CloudSigma: metadata over serial for cloudsigma.com, SmartOS: Read from SmartOS metadata service, Bigstep: Bigstep metadata service, Scaleway: Scaleway metadata service, AliYun: Alibaba metadata service, Ec2: reads data from EC2 Metadata service, CloudStack: Read from CloudStack metadata service, None: Failsafe datasource -Description: Which data sources should be searched? - Cloud-init supports searching different "Data Sources" for information - that it uses to configure a cloud instance. - . - Warning: Only select 'Ec2' if this system will be run on a system with - the EC2 metadata service present. Doing so incorrectly will result in - a substantial timeout on boot. diff --git a/debian/compat b/debian/compat deleted file mode 100644 index ec635144..00000000 --- a/debian/compat +++ /dev/null @@ -1 +0,0 @@ -9 diff --git a/debian/control b/debian/control deleted file mode 100644 index 1de4f2f5..00000000 --- a/debian/control +++ /dev/null @@ -1,70 +0,0 @@ -Source: cloud-init -Section: admin -Priority: extra -Maintainer: Scott Moser <smoser@ubuntu.com> -Build-Depends: debhelper (>= 9), - dh-python, - dh-systemd, - iproute2, - pep8, - po-debconf, - pyflakes, - python3, - python3-configobj, - python3-httpretty, - python3-jinja2, - python3-jsonpatch, - python3-mock, - python3-nose, - python3-oauthlib, - python3-pep8, - python3-pyflakes | pyflakes (<< 1.1.0-2), - python3-requests, - python3-serial, - python3-setuptools, - python3-six, - python3-unittest2, - python3-yaml -XS-Python-Version: all -Vcs-Browser: https://git.launchpad.net/cloud-init/?h=ubuntu/devel -Vcs-Git: git://git.launchpad.net/cloud-init -b ubuntu/devel -Standards-Version: 3.9.6 - -Package: cloud-init -Architecture: all -Depends: cloud-guest-utils | cloud-utils, - ifupdown (>= 0.6.10ubuntu5), - procps, - python3, - python3-requests (>= 0.8.2), - python3-serial, - ${misc:Depends}, - ${python3:Depends} -Breaks: walinuxagent (<< 2.1.3-0ubuntu4.1) -Recommends: eatmydata, gdisk, software-properties-common -Provides: ec2-init -Replaces: ec2-init (<<0.5.3) -Conflicts: ec2-init (<<0.5.3) -XB-Python-Version: ${python:Versions} -Description: Init scripts for cloud instances - Cloud instances need special scripts to run during initialisation - to retrieve and install ssh keys and to let the user run various scripts. - -Package: grub-legacy-ec2 -Depends: debconf (>= 1.5.19) | cdebconf, - ucf, - util-linux (>= 2.15-1), - ${misc:Depends} -Conflicts: grub -Suggests: grub-legacy-doc -Architecture: all -Description: Handles update-grub for ec2 instances - EC2 instances that use grub-legacy as a bootloader need a way to keep - /boot/grub/menu.lst up to date while not conflicting with grub-pc. - This package provides that. - -Package: ec2-init -Depends: cloud-init, ${misc:Depends} -Architecture: all -Description: package renamed -> cloud-init - This package has been renamed to 'cloud-init'. diff --git a/debian/copyright b/debian/copyright deleted file mode 100644 index 598cda14..00000000 --- a/debian/copyright +++ /dev/null @@ -1,40 +0,0 @@ -Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: cloud-init -Upstream-Contact: cloud-init-dev@lists.launchpad.net -Source: https://launchpad.net/cloud-init - -Files: * -Copyright: 2010, Canonical Ltd. -License: GPL-3 or Apache-2.0 - -License: GPL-3 - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License version 3, as - published by the Free Software Foundation. - . - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - . - You should have received a copy of the GNU General Public License - along with this program. If not, see <http://www.gnu.org/licenses/>. - . - The complete text of the GPL version 3 can be seen in - /usr/share/common-licenses/GPL-3. - -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - On Debian-based systems the full text of the Apache version 2.0 license - can be found in `/usr/share/common-licenses/Apache-2.0'. diff --git a/debian/grub-legacy-ec2.install b/debian/grub-legacy-ec2.install deleted file mode 100644 index f9ac6281..00000000 --- a/debian/grub-legacy-ec2.install +++ /dev/null @@ -1,4 +0,0 @@ -debian/grub-set-default usr/sbin -debian/grub-set-default-legacy-ec2 usr/sbin -debian/kernel etc/ -debian/update-grub-legacy-ec2 usr/sbin diff --git a/debian/grub-legacy-ec2.lintian-overrides b/debian/grub-legacy-ec2.lintian-overrides deleted file mode 100644 index ed8c77f1..00000000 --- a/debian/grub-legacy-ec2.lintian-overrides +++ /dev/null @@ -1,6 +0,0 @@ -# no-debconf-config and debconf-is-not-a-registry lintian errors -# are present in the ubuntu grub package that these were pulled from. -# they're due to the use of debconf for merging prompt in -# update-grub-legacy-ec2 -grub-legacy-ec2 binary: no-debconf-config -grub-legacy-ec2 binary: debconf-is-not-a-registry diff --git a/debian/grub-legacy-ec2.postinst b/debian/grub-legacy-ec2.postinst deleted file mode 100644 index 3c4aa5cd..00000000 --- a/debian/grub-legacy-ec2.postinst +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh - -set -e - -if [ "$1" = "configure" ]; then - mkdir -p /boot/grub - update-grub-legacy-ec2 -y -fi - -#DEBHELPER# diff --git a/debian/grub-legacy-ec2.postrm b/debian/grub-legacy-ec2.postrm deleted file mode 100644 index 50184234..00000000 --- a/debian/grub-legacy-ec2.postrm +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh -set -e - -if [ "$1" = purge ]; then - if [ -x "`which ucf 2>/dev/null`" ]; then - ucf --purge /var/run/grub/menu.lst - fi - if [ -x "`which ucfr 2>/dev/null`" ]; then - ucfr --purge grub /var/run/grub/menu.lst - fi -fi - -if [ "$1" = "remove" ]; then - dpkg-divert --package grub-legacy-ec2 --remove --rename --divert \ - /usr/sbin/grub-set-default.real /usr/sbin/grub-set-default -fi -#DEBHELPER# diff --git a/debian/grub-legacy-ec2.preinst b/debian/grub-legacy-ec2.preinst deleted file mode 100644 index ac73b428..00000000 --- a/debian/grub-legacy-ec2.preinst +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/sh -set -e - -if [ "$1" = "install" -o "$1" = "upgrade" ]; then - dpkg-divert --package grub-legacy-ec2 --rename --divert \ - /usr/sbin/grub-set-default.real --add /usr/sbin/grub-set-default -fi - -#DEBHELPER# diff --git a/debian/grub-legacy-ec2.templates b/debian/grub-legacy-ec2.templates deleted file mode 100644 index 0562c8cb..00000000 --- a/debian/grub-legacy-ec2.templates +++ /dev/null @@ -1,9 +0,0 @@ -Template: grub/update_grub_changeprompt_threeway -Type: select -# Translators, please keep translations *short* (less than 65 columns) -__Choices: install the package maintainer's version, keep the local version currently installed, show the differences between the versions, show a side-by-side difference between the versions, show a 3-way difference between available versions, do a 3-way merge between available versions (experimental), start a new shell to examine the situation -Choices-C: install_new, keep_current, diff, sdiff, diff_threeway, merge_threeway, shell -Default: keep_current -_Description: What would you like to do about ${BASENAME}? - A new version of /boot/grub/menu.lst is available, but the version installed - currently has been locally modified. diff --git a/debian/grub-set-default b/debian/grub-set-default deleted file mode 100755 index 8816bc7e..00000000 --- a/debian/grub-set-default +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/sh - -diverted=/usr/sbin/grub-set-default.real -legacy_ec2=/usr/sbin/grub-set-default-legacy-ec2 -normal=/usr/sbin/grub-set-default - -warn() { echo "WARNING:" "$@" 1>&2; } -Usage() { - cat <<EOF -${0} is a wrapper provided by grub-legacy-ec2. -It ensures that the appropriate 'set-default' is called. -For specific version or help info invoke one of the following directly: - ${diverted} - and - ${legacy_ec2} -EOF -} - -[ $# -eq 0 ] && { Usage; exit 1; } -for a in "$@"; do - case "$a" in - --help|-h|-v|--version) Usage; exit 0;; - esac -done - -"${legacy_ec2}" "${@}" -lret=$? -[ ${lret} -eq 0 ] || warn "${legacy_ec2} returned ${lret}" - -dret=0 -if [ -x "${diverted}" -a -f "/boot/grub/grub.cfg" ]; then - "${diverted}" "$@" - dret=$? -fi -[ ${dret} -eq 0 ] || warn "${diverted} returned ${dret}" - -[ $lret -eq 0 -a $dret -eq 0 ] -exit diff --git a/debian/grub-set-default-legacy-ec2 b/debian/grub-set-default-legacy-ec2 deleted file mode 100644 index dcd88fb1..00000000 --- a/debian/grub-set-default-legacy-ec2 +++ /dev/null @@ -1,141 +0,0 @@ -#! /bin/sh - -# Set a default boot entry for GRUB -# Copyright (C) 2004 Free Software Foundation, Inc. -# -# This file is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - -# Initialize some variables. -PACKAGE=grub-legacy-ec2 -VERSION=0.97-29ubuntu60 - -rootdir= -entry= - -# Usage: usage -# Print the usage. -usage () { - cat <<EOF -Usage: grub-set-default [OPTION] entry -Set the default boot entry for GRUB. - - -h, --help print this message and exit - -v, --version print the version information and exit - --root-directory=DIR Use the directory DIR instead of the root directory - -ENTRY is a number or the special keyword \`default\'. - -Report bugs to <bug-grub@gnu.org>. -EOF -} - -# Check the arguments. -for option in "$@"; do - case "$option" in - -h | --help) - usage - exit 0 ;; - -v | --version) - echo "grub-set-default (GNU GRUB ${VERSION})" - exit 0 ;; - --root-directory=*) - rootdir=`echo "$option" | sed 's/--root-directory=//'` ;; - -*) - echo "Unrecognized option \`$option'" 1>&2 - usage - exit 1 - ;; - *) - if test "x$entry" != x; then - echo "More than one entries?" 1>&2 - usage - exit 1 - fi - # We don't care about what the user specified actually. - entry="${option}" ;; - esac -done - -if test "x$entry" = x; then - echo "entry not specified." 1>&2 - usage - exit 1 -fi - -find_grub_dir () -{ - echo -n "Searching for GRUB installation directory ... " >&2 - - for d in $grub_dirs ; do - if [ -d "$d" ] ; then - grub_dir="$d" - break - fi - done - - if [ -z "$grub_dir" ] ; then - abort "No GRUB directory found.\n###" - else - echo "found: $grub_dir" >&2 - fi - - echo $grub_dir -} - -grub_dirs="/boot/grub /boot/boot/grub" - -# Determine the GRUB directory. This is different among OSes. -# if rootdir has been informed use it or find grubdir otherwise -if [ -n "${rootdir}" ]; then - grubdir=${rootdir}/boot/grub - if test -d ${grubdir}; then - : - else - grubdir=${rootdir}/grub - if test -d ${grubdir}; then - : - else - echo "No GRUB directory found under ${rootdir}/" 1>&2 - exit 1 - fi - fi -else - grubdir=$(find_grub_dir) -fi - -file=${grubdir}/default -if test -f ${file}; then - chmod 0600 ${file} - rm -f ${file} -fi -cat <<EOF > $file -$entry -# -# -# -# -# -# -# -# -# -# -# WARNING: If you want to edit this file directly, do not remove any line -# from this file, including this warning. Using \`grub-set-default\' is -# strongly recommended. -EOF - -# Bye. -exit 0 diff --git a/debian/kernel/postinst.d/x-grub-legacy-ec2 b/debian/kernel/postinst.d/x-grub-legacy-ec2 deleted file mode 100755 index 33436510..00000000 --- a/debian/kernel/postinst.d/x-grub-legacy-ec2 +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/sh - -version="$1" -bootopt="" - -# passing the kernel version is required -[ -z "${version}" ] && exit 0 - -# avoid running multiple times -if [ -n "$DEB_MAINT_PARAMS" ]; then - eval set -- "$DEB_MAINT_PARAMS" - if [ -z "$1" ] || [ "$1" != "configure" ]; then - exit 0 - fi -fi - -update=/usr/sbin/update-grub-legacy-ec2 -[ ! -x "${update}" ] || exec "${update}" diff --git a/debian/kernel/postrm.d/x-grub-legacy-ec2 b/debian/kernel/postrm.d/x-grub-legacy-ec2 deleted file mode 100755 index 6f6aa4a4..00000000 --- a/debian/kernel/postrm.d/x-grub-legacy-ec2 +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/sh - -version="$1" -bootopt="" - -# passing the kernel version is required -[ -z "${version}" ] && exit 0 - -# avoid running multiple times -if [ -n "$DEB_MAINT_PARAMS" ]; then - eval set -- "$DEB_MAINT_PARAMS" - if [ -z "$1" ] || [ "$1" != "remove" ]; then - exit 0 - fi -fi - -update=/usr/sbin/update-grub-legacy-ec2 -[ ! -x "${update}" ] || exec "${update}" diff --git a/debian/patches/azure-apply-network-config-false.patch b/debian/patches/azure-apply-network-config-false.patch deleted file mode 100644 index 281c19c6..00000000 --- a/debian/patches/azure-apply-network-config-false.patch +++ /dev/null @@ -1,23 +0,0 @@ -Description: Azure apply_network_config default to False - Azure cloud-images on Xenial already contain hotplug network scripts so - default behavior for should remain to only generate fallback network - configuration which is to dhcp on eth0 and let image hotplug scripts add - network configuration for any additional nics that show up. -Author: Chad Smith <chad.smith@canonical.com> -Origin: backport -Bug: https://bugs.launchpad.net/cloud-init/+bug/1798424 -Forwarded: not-needed -Last-Update: 2018-10-17 -Index: cloud-init/cloudinit/sources/DataSourceAzure.py -=================================================================== ---- cloud-init.orig/cloudinit/sources/DataSourceAzure.py -+++ cloud-init/cloudinit/sources/DataSourceAzure.py -@@ -207,7 +207,7 @@ BUILTIN_DS_CONFIG = { - }, - 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH}, - 'dhclient_lease_file': LEASE_FILE, -- 'apply_network_config': True, # Use IMDS published network configuration -+ 'apply_network_config': False, # Use fallback network config not IMDS - } - # RELEASE_BLOCKER: Xenial and earlier apply_network_config default is False - diff --git a/debian/patches/azure-use-walinux-agent.patch b/debian/patches/azure-use-walinux-agent.patch deleted file mode 100644 index 3c858824..00000000 --- a/debian/patches/azure-use-walinux-agent.patch +++ /dev/null @@ -1,17 +0,0 @@ -Description: Use walinux-agent rather than builtin fabric support - Upstream now uses the built-in support for instance initialization on Azure. - On a stable release, we want to continue to use the walinux-agent integration. - Upstream made this change under bug 1538522. -Forwarded: not-needed -Author: Scott Moser <smoser@ubuntu.com> ---- a/cloudinit/sources/DataSourceAzure.py -+++ b/cloudinit/sources/DataSourceAzure.py -@@ -196,7 +196,7 @@ if util.is_FreeBSD(): - LOG.debug("resource disk is None") - - BUILTIN_DS_CONFIG = { -- 'agent_command': AGENT_START_BUILTIN, -+ 'agent_command': AGENT_START, - 'data_dir': AGENT_SEED_DIR, - 'set_hostname': True, - 'hostname_bounce': { diff --git a/debian/patches/ds-identify-behavior-xenial.patch b/debian/patches/ds-identify-behavior-xenial.patch deleted file mode 100644 index ba7639ab..00000000 --- a/debian/patches/ds-identify-behavior-xenial.patch +++ /dev/null @@ -1,32 +0,0 @@ -Description: Adjust behavior of ds-identify for SRU - To make this acceptable as a SRU we have changed ds-identify to - act in 'report only' mode, and to only 'warn' when it cloud-init - finds itself to be using a Ec2 Datasource on an unknown and - non AWS platform. -Forwarded: not-needed -Author: Scott Moser <smoser@ubuntu.com> -Bug-ubuntu: http://bugs.launchpad.net/bugs/1669675 -Bug-ubuntu: http://bugs.launchpad.net/bugs/1660385 - ---- a/tools/ds-identify -+++ b/tools/ds-identify -@@ -93,8 +93,8 @@ _DI_LOGGED="" - DI_MAIN=${DI_MAIN:-main} - - DI_BLKID_OUTPUT="" --DI_DEFAULT_POLICY="search,found=all,maybe=all,notfound=${DI_DISABLED}" --DI_DEFAULT_POLICY_NO_DMI="search,found=all,maybe=all,notfound=${DI_ENABLED}" -+DI_DEFAULT_POLICY="report,found=all,maybe=all,notfound=${DI_ENABLED}" -+DI_DEFAULT_POLICY_NO_DMI="report,found=all,maybe=all,notfound=${DI_ENABLED}" - DI_DMI_CHASSIS_ASSET_TAG="" - DI_DMI_PRODUCT_NAME="" - DI_DMI_SYS_VENDOR="" -@@ -131,7 +131,7 @@ DI_ON_FOUND="" - DI_ON_MAYBE="" - DI_ON_NOTFOUND="" - --DI_EC2_STRICT_ID_DEFAULT="true" -+DI_EC2_STRICT_ID_DEFAULT="warn" - - _IS_IBM_CLOUD="" - diff --git a/debian/patches/openstack-no-network-config.patch b/debian/patches/openstack-no-network-config.patch deleted file mode 100644 index 88449d1d..00000000 --- a/debian/patches/openstack-no-network-config.patch +++ /dev/null @@ -1,40 +0,0 @@ -Description: Fallback network config instead of network_data.json for OpenStack - To make this acceptable as a SRU we keep the same behavior as is - in the stable release which is to generate network for fallback nic - only. - . - In this series, OpenStack datasource can optionally generate - network_config from network_data.json if the datasource is configured - with a file like /etc/cloud.cfg.d/openstack-net.cfg: - . - datasource: - OpenStack: - apply_network_config: true -Forwarded: not-needed -Author: Chad Smith <chad.smith@canonical.com> - ---- a/cloudinit/sources/DataSourceOpenStack.py -+++ b/cloudinit/sources/DataSourceOpenStack.py -@@ -98,10 +98,9 @@ class DataSourceOpenStack(openstack.Sour - if self._network_config != sources.UNSET: - return self._network_config - -- # RELEASE_BLOCKER: SRU to Xenial and Artful SRU should not provide -+ # Xenial, Artful and Bionic will not provide - # network_config by default unless configured in /etc/cloud/cloud.cfg*. -- # Patch Xenial and Artful before release to default to False. -- if util.is_false(self.ds_cfg.get('apply_network_config', True)): -+ if util.is_false(self.ds_cfg.get('apply_network_config', False)): - self._network_config = None - return self._network_config - if self.network_json == sources.UNSET: ---- a/tests/unittests/test_datasource/test_openstack.py -+++ b/tests/unittests/test_datasource/test_openstack.py -@@ -345,6 +345,7 @@ class TestOpenStackDataSource(test_helpe - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) - sample_json = {'links': [{'ethernet_mac_address': 'mymac'}], - 'networks': [], 'services': []} -+ ds_os.ds_cfg = {'apply_network_config': True} # Default is False - ds_os.network_json = sample_json - with test_helpers.mock.patch(mock_path) as m_convert_json: - m_convert_json.return_value = example_cfg diff --git a/debian/patches/series b/debian/patches/series deleted file mode 100644 index 166a2d85..00000000 --- a/debian/patches/series +++ /dev/null @@ -1,5 +0,0 @@ -azure-use-walinux-agent.patch -ds-identify-behavior-xenial.patch -stable-release-no-jsonschema-dep.patch -openstack-no-network-config.patch -azure-apply-network-config-false.patch diff --git a/debian/patches/stable-release-no-jsonschema-dep.patch b/debian/patches/stable-release-no-jsonschema-dep.patch deleted file mode 100644 index 41cbe98d..00000000 --- a/debian/patches/stable-release-no-jsonschema-dep.patch +++ /dev/null @@ -1,21 +0,0 @@ -Description: Remove the optional dependency on jsonschema for stable release. - To make this acceptable as a SRU we keep the same dependencies as are - in the stable release. - . - The '${python3:Depends}' in debian/control would automatically add the - dependency if it is seen in requirements.txt. -Forwarded: not-needed -Author: Scott Moser <smoser@ubuntu.com> - ---- a/requirements.txt -+++ b/requirements.txt -@@ -31,7 +31,8 @@ requests - jsonpatch - - # For validating cloud-config sections per schema definitions --jsonschema -+## Do not add dependencies to a stable release (SRU). -+#jsonschema - - # For Python 2/3 compatibility - six diff --git a/debian/po/POTFILES.in b/debian/po/POTFILES.in deleted file mode 100644 index 9dc1b8f6..00000000 --- a/debian/po/POTFILES.in +++ /dev/null @@ -1 +0,0 @@ -[type: gettext/rfc822deb] grub.templates diff --git a/debian/po/ca.po b/debian/po/ca.po deleted file mode 100644 index e839a466..00000000 --- a/debian/po/ca.po +++ /dev/null @@ -1,80 +0,0 @@ -# ucf (debconf) translation to Catalan. -# Copyright (C) 2004 Free Software Foundation, Inc. -# Aleix Badia i Bosch <abadia@ica.es>, 2004 -# -msgid "" -msgstr "" -"Project-Id-Version: ucf_0.30_templates\n" -"Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" -"POT-Creation-Date: 2008-01-28 08:38-0800\n" -"PO-Revision-Date: 2007-09-19 01:08-0500\n" -"Last-Translator: Aleix Badia i Bosch <abadia@ica.es>\n" -"Language-Team: Catalan <debian-l10n-catalan@lists.debian.org>\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=ISO-8859-1\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "install the package maintainer's version" -msgstr "instal·la la versió del mantenidor del paquet" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "keep the local version currently installed" -msgstr "mantén la versió instal·lada actualment" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show the differences between the versions" -msgstr "mostra les diferències entre les versions" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a side-by-side difference between the versions" -msgstr "mostra les diferències entre les versions" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a 3-way difference between available versions" -msgstr "mostra les diferències entre les versions" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "do a 3-way merge between available versions (experimental)" -msgstr "" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "start a new shell to examine the situation" -msgstr "inicia una nova línia de comandes per examinar la situació" - -#. Type: select -#. Description -#: ../grub.templates:1002 -msgid "What would you like to do about ${BASENAME}?" -msgstr "Què voleu fer respecte el ${BASNAME}?" - -#. Type: select -#. Description -#: ../grub.templates:1002 -msgid "" -"A new version of /boot/grub/menu.lst is available, but the version installed " -"currently has been locally modified." -msgstr "" -"Hi ha una nova versió del fitxer /boot/grub/menu.lst, però la vostra versió " -"s'ha modificat localment." diff --git a/debian/po/cs.po b/debian/po/cs.po deleted file mode 100644 index 7d8db4a7..00000000 --- a/debian/po/cs.po +++ /dev/null @@ -1,89 +0,0 @@ -# -# Translators, if you are not familiar with the PO format, gettext -# documentation is worth reading, especially sections dedicated to -# this format, e.g. by running: -# info -n '(gettext)PO Files' -# info -n '(gettext)Header Entry' -# -# Some information specific to po-debconf are available at -# /usr/share/doc/po-debconf/README-trans -# or http://www.debian.org/intl/l10n/po-debconf/README-trans -# -# Developers do not need to manually edit POT or PO files. -# -msgid "" -msgstr "" -"Project-Id-Version: ucf\n" -"Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" -"POT-Creation-Date: 2008-01-28 08:38-0800\n" -"PO-Revision-Date: 2007-06-23 14:31+0200\n" -"Last-Translator: Miroslav kure <kurem@debian.cz>\n" -"Language-Team: Czech <debian-l10n-czech@lists.debian.org>\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "install the package maintainer's version" -msgstr "instalovat verzi od správce balÃku" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "keep the local version currently installed" -msgstr "ponechat aktuálnÄ› instalovanou lokálnà verzi" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show the differences between the versions" -msgstr "zobrazit rozdÃly mezi verzemi" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a side-by-side difference between the versions" -msgstr "zobrazit rozdÃly mezi verzemi vedle sebe" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a 3-way difference between available versions" -msgstr "zobrazit tÅ™Ãcestný rozdÃl mezi dostupnými verzemi" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "do a 3-way merge between available versions (experimental)" -msgstr "provést tÅ™Ãcestné slouÄenà dostupných verzà (experimentálnÃ)" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "start a new shell to examine the situation" -msgstr "spustit nový shell a prozkoumat situaci" - -#. Type: select -#. Description -#: ../grub.templates:1002 -msgid "What would you like to do about ${BASENAME}?" -msgstr "Co chcete udÄ›lat s ${BASENAME}?" - -#. Type: select -#. Description -#: ../grub.templates:1002 -msgid "" -"A new version of /boot/grub/menu.lst is available, but the version installed " -"currently has been locally modified." -msgstr "" -"K dispozici je novÄ›jÅ¡Ã verze souboru /boot/grub/menu.lst, avÅ¡ak " -"nainstalovaná verze obsahuje lokálnà úpravy." diff --git a/debian/po/da.po b/debian/po/da.po deleted file mode 100644 index 399439cf..00000000 --- a/debian/po/da.po +++ /dev/null @@ -1,86 +0,0 @@ -# SOME DESCRIPTIVE TITLE. -# This file is distributed under the same license as the PACKAGE package. -# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER. -# -# Claus Hindsgaul <claus_h@image.dk>, 2005. -# Claus Hindsgaul <claus.hindsgaul@gmail.com>, 2007. -msgid "" -msgstr "" -"Project-Id-Version: ucf debconf template\n" -"Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" -"POT-Creation-Date: 2008-01-28 08:38-0800\n" -"PO-Revision-Date: 2007-09-19 01:09-0500\n" -"Last-Translator: Claus Hindsgaul <claus.hindsgaul@gmail.com>\n" -"Language-Team: Danish\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=ISO-8859-1\n" -"Content-Transfer-Encoding: 8bit\n" -"X-Generator: KBabel 1.11.4\n" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "install the package maintainer's version" -msgstr "installér pakkevedligeholderens udgave" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "keep the local version currently installed" -msgstr "bevar din aktuelt-installerede udgave" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show the differences between the versions" -msgstr "vis forskellene mellem udgaverne" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a side-by-side difference between the versions" -msgstr "vis forskellene mellem versionerne overfor hinanden" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a 3-way difference between available versions" -msgstr "vis 3-vejs forskelle mellem de tilgængelige udgaver af filen" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "do a 3-way merge between available versions (experimental)" -msgstr "" -"udfør en 3-vejs fletning mellem de tilgængelige udgaver af filen [Meget " -"eksperimentelt]" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "start a new shell to examine the situation" -msgstr "start en ny skal for at undersøge situationen" - -#. Type: select -#. Description -#: ../grub.templates:1002 -msgid "What would you like to do about ${BASENAME}?" -msgstr "Hvad vil du gøre med ${BASENAME}?" - -#. Type: select -#. Description -#: ../grub.templates:1002 -#, fuzzy -msgid "" -"A new version of /boot/grub/menu.lst is available, but the version installed " -"currently has been locally modified." -msgstr "" -"Der er kommet en ny version af filen /boot/grub/menu.lst, men din version er " -"blevet ændret lokalt." diff --git a/debian/po/de.po b/debian/po/de.po deleted file mode 100644 index 294f7390..00000000 --- a/debian/po/de.po +++ /dev/null @@ -1,92 +0,0 @@ -# translation of ucf_3.002_de.po to German -# -# Translators, if you are not familiar with the PO format, gettext -# documentation is worth reading, especially sections dedicated to -# this format, e.g. by running: -# info -n '(gettext)PO Files' -# info -n '(gettext)Header Entry' -# Some information specific to po-debconf are available at -# /usr/share/doc/po-debconf/README-trans -# or http://www.debian.org/intl/l10n/po-debconf/README-trans# -# Developers do not need to manually edit POT or PO files. -# -# Erik Schanze <eriks@debian.org>, 2004-2007. -msgid "" -msgstr "" -"Project-Id-Version: ucf_3.002_de\n" -"Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" -"POT-Creation-Date: 2008-01-28 08:38-0800\n" -"PO-Revision-Date: 2007-07-01 22:08+0200\n" -"Last-Translator: Erik Schanze <eriks@debian.org>\n" -"Language-Team: German <debian-l10n-german@lists.debian.org>\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"X-Generator: KBabel 1.11.4\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "install the package maintainer's version" -msgstr "Version des Paket-Betreuers installieren" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "keep the local version currently installed" -msgstr "aktuell installierte Version behalten" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show the differences between the versions" -msgstr "Unterschiede zwischen den Versionen anzeigen" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a side-by-side difference between the versions" -msgstr "Unterschiede zwischen den Versionen nebeneinander anzeigen" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a 3-way difference between available versions" -msgstr "3-Wege-Differenz der verfügbaren Versionen der Datei anzeigen" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "do a 3-way merge between available versions (experimental)" -msgstr "3-Wege-Vereinigung verfügbarer Versionen [experimentell]" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "start a new shell to examine the situation" -msgstr "die Angelegenheit in einer neu gestarteten Shell untersuchen" - -#. Type: select -#. Description -#: ../grub.templates:1002 -msgid "What would you like to do about ${BASENAME}?" -msgstr "Wie wollen Sie mit ${BASENAME} verfahren?" - -#. Type: select -#. Description -#: ../grub.templates:1002 -#, fuzzy -msgid "" -"A new version of /boot/grub/menu.lst is available, but the version installed " -"currently has been locally modified." -msgstr "" -"Eine neue Version der Datei /boot/grub/menu.lst ist verfügbar, aber die " -"installierte Version wurde verändert." diff --git a/debian/po/es.po b/debian/po/es.po deleted file mode 100644 index 725d9e91..00000000 --- a/debian/po/es.po +++ /dev/null @@ -1,105 +0,0 @@ -# ucf translation to spanish -# Copyright (C) 2004-2007 Software in the Public Interest -# This file is distributed under the same license as the ucf package. -# -# Changes: -# - Initial translation -# Lucas Wall <kthulhu@usa.net>, 2004 -# - Updated -# Javier Fernandez-Sanguino <jfs@debian.org>, 2007 -# -# -# Traductores, si no conoce el formato PO, merece la pena leer la -# documentación de gettext, especialmente las secciones dedicadas a este -# formato, por ejemplo ejecutando: -# info -n '(gettext)PO Files' -# info -n '(gettext)Header Entry' -# -# Equipo de traducción al español, por favor lean antes de traducir -# los siguientes documentos: -# -# - El proyecto de traducción de Debian al español -# http://www.debian.org/intl/spanish/coordinacion -# especialmente las notas de traducción en -# http://www.debian.org/intl/spanish/notas -# -# - La guía de traducción de po's de debconf: -# /usr/share/doc/po-debconf/README-trans -# o http://www.debian.org/intl/l10n/po-debconf/README-trans -# -msgid "" -msgstr "" -"Project-Id-Version: ucf 1.06\n" -"Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" -"POT-Creation-Date: 2008-01-28 08:38-0800\n" -"PO-Revision-Date: 2007-06-21 13:41+0200\n" -"Last-Translator: Javier Fernandez-Sanguino <jfs@debian.org>\n" -"Language-Team: Debian Spanish <debian-l10n-spanish@lists.debian.org>\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=ISO-8859-15\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "install the package maintainer's version" -msgstr "instalar la versión del responsable del paquete" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "keep the local version currently installed" -msgstr "conservar la versión local actualmente instalada" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show the differences between the versions" -msgstr "mostrar las diferencias entre las versiones" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a side-by-side difference between the versions" -msgstr "mostrar las diferencias entre las versiones lado a lado" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a 3-way difference between available versions" -msgstr "mostrar las diferencias entre las tres versiones" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "do a 3-way merge between available versions (experimental)" -msgstr "fusionar las tres versiones disponibles (experimental)" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "start a new shell to examine the situation" -msgstr "ejecutar un nuevo intérprete para examinar la situación" - -#. Type: select -#. Description -#: ../grub.templates:1002 -msgid "What would you like to do about ${BASENAME}?" -msgstr "¿Qué desea hacer con ${BASENAME}?" - -#. Type: select -#. Description -#: ../grub.templates:1002 -msgid "" -"A new version of /boot/grub/menu.lst is available, but the version installed " -"currently has been locally modified." -msgstr "" -"Hay una nueva versión del fichero /boot/grub/menu.lst, pero la versión que " -"está instalada ha sido modificada localmente." diff --git a/debian/po/eu.po b/debian/po/eu.po deleted file mode 100644 index ddf7d2a7..00000000 --- a/debian/po/eu.po +++ /dev/null @@ -1,83 +0,0 @@ -# SOME DESCRIPTIVE TITLE. -# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER -# This file is distributed under the same license as the PACKAGE package. -# Piarres Beobide <pi@beobide.net>, 2007. -# -msgid "" -msgstr "" -"Project-Id-Version: ucf-debconf\n" -"Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" -"POT-Creation-Date: 2008-01-28 08:38-0800\n" -"PO-Revision-Date: 2007-06-20 13:40+0200\n" -"Last-Translator: Piarres Beobide <pi@beobide.net>\n" -"Language-Team: Euskara <Librezale@librezale.org>\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"X-Generator: Pootle 0.11\n" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "install the package maintainer's version" -msgstr "Pakete arduradunaren bertsioa instalatu" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "keep the local version currently installed" -msgstr "mantendu lokalean instalaturiko bertsioa" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show the differences between the versions" -msgstr "bertsioen arteko ezberdintasunak ikusi" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a side-by-side difference between the versions" -msgstr "bertsioen arteko ezberdintasunak aldez-alde ikusi" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a 3-way difference between available versions" -msgstr "bertsioen arteko ezberdintasunak 3 eratara ikusi" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "do a 3-way merge between available versions (experimental)" -msgstr "bertsioen arteko ezberdintasunak 3 eratara batu (esperimentala)" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "start a new shell to examine the situation" -msgstr "shell berri bat hasi egoera aztertzeko" - -#. Type: select -#. Description -#: ../grub.templates:1002 -msgid "What would you like to do about ${BASENAME}?" -msgstr "Zer egitea gustatuko litzaizuke ${BASENAME}-ri buruz?" - -#. Type: select -#. Description -#: ../grub.templates:1002 -#, fuzzy -msgid "" -"A new version of /boot/grub/menu.lst is available, but the version installed " -"currently has been locally modified." -msgstr "" -"/boot/grub/menu.lst fitxategiaren bertsio berri bat dago eskuragarri, baina " -"instalaturik dagoenak lokalean eraldatua izan da." diff --git a/debian/po/fi.po b/debian/po/fi.po deleted file mode 100644 index 62da4409..00000000 --- a/debian/po/fi.po +++ /dev/null @@ -1,79 +0,0 @@ -msgid "" -msgstr "" -"Project-Id-Version: ucf_3.003\n" -"Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" -"POT-Creation-Date: 2008-01-28 08:38-0800\n" -"PO-Revision-Date: 2007-10-30 18:42+0200\n" -"Last-Translator: Esko Arajärvi <edu@iki.fi>\n" -"Language-Team: Finnish <debian-l10n-finnish@lists.debian.org>\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"X-Poedit-Language: Finnish\n" -"X-Poedit-Country: FINLAND\n" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "install the package maintainer's version" -msgstr "asenna paketin ylläpitäjän versio" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "keep the local version currently installed" -msgstr "säilytä tällä hetkellä asennettu paikallinen versio" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show the differences between the versions" -msgstr "näytä versioiden väliset erot" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a side-by-side difference between the versions" -msgstr "näytä versioiden väliset erot rinnakkain" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a 3-way difference between available versions" -msgstr "näytä versioiden välinen kolmisuuntainen erotus" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "do a 3-way merge between available versions (experimental)" -msgstr "tee kolmisuuntainen versioiden yhdistys (kokeellinen)" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "start a new shell to examine the situation" -msgstr "käynnistä uusi kuori tilanteen tutkimiseksi" - -#. Type: select -#. Description -#: ../grub.templates:1002 -msgid "What would you like to do about ${BASENAME}?" -msgstr "Miten käsitellään ${BASENAME}?" - -#. Type: select -#. Description -#: ../grub.templates:1002 -#, fuzzy -msgid "" -"A new version of /boot/grub/menu.lst is available, but the version installed " -"currently has been locally modified." -msgstr "" -"Asetustiedostosta /boot/grub/menu.lst on tarjolla uusi versio, mutta " -"nykyistä versiota on muokattu paikallisesti." diff --git a/debian/po/fr.po b/debian/po/fr.po deleted file mode 100644 index 1598329c..00000000 --- a/debian/po/fr.po +++ /dev/null @@ -1,78 +0,0 @@ -# -msgid "" -msgstr "" -"Project-Id-Version: fr\n" -"Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" -"POT-Creation-Date: 2008-01-28 08:38-0800\n" -"PO-Revision-Date: 2007-06-19 09:36+0200\n" -"Last-Translator: Eric Madesclair <eric-m@wanadoo.fr>\n" -"Language-Team: French <debian-l10n-french@lists.debian.org>\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"X-Generator: KBabel 1.11.4\n" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "install the package maintainer's version" -msgstr "Installer la version du responsable du paquet" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "keep the local version currently installed" -msgstr "Garder la version actuellement installée" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show the differences between the versions" -msgstr "Montrer les différences entre les versions" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a side-by-side difference between the versions" -msgstr "Montrer côte à côte les différences entre les versions" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a 3-way difference between available versions" -msgstr "Montrer les différences entre les trois versions du fichier" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "do a 3-way merge between available versions (experimental)" -msgstr "Fusionner les trois versions disponibles du fichier (expérimental)" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "start a new shell to examine the situation" -msgstr "Lancer un shell pour examiner la situation" - -#. Type: select -#. Description -#: ../grub.templates:1002 -msgid "What would you like to do about ${BASENAME}?" -msgstr "Action souhaitée pour ${BASENAME} :" - -#. Type: select -#. Description -#: ../grub.templates:1002 -msgid "" -"A new version of /boot/grub/menu.lst is available, but the version installed " -"currently has been locally modified." -msgstr "" -"Une nouvelle version du fichier /boot/grub/menu.lst est disponible mais la " -"version actuellement utilisée a été modifiée localement." diff --git a/debian/po/gl.po b/debian/po/gl.po deleted file mode 100644 index 6547ad32..00000000 --- a/debian/po/gl.po +++ /dev/null @@ -1,80 +0,0 @@ -# Galician translation of ucf's debconf templates. -# This file is distributed under the same license as the ucf package. -# Jacobo Tarrio <jtarrio@debian.org>, 2006, 2007. -# -msgid "" -msgstr "" -"Project-Id-Version: ucf\n" -"Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" -"POT-Creation-Date: 2008-01-28 08:38-0800\n" -"PO-Revision-Date: 2007-06-20 15:50+0200\n" -"Last-Translator: Jacobo Tarrio <jtarrio@debian.org>\n" -"Language-Team: Galician <proxecto@trasno.net>\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "install the package maintainer's version" -msgstr "instalar a versión do mantedor de paquetes" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "keep the local version currently installed" -msgstr "conservar a versión local instalada actualmente" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show the differences between the versions" -msgstr "amosar as diferencias entre as versións" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a side-by-side difference between the versions" -msgstr "amosar unha comparación entre as versións" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a 3-way difference between available versions" -msgstr "amosar unha diferencia a tres entre as versións dispoñibles" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "do a 3-way merge between available versions (experimental)" -msgstr "mesturar as versións dispoñibles (experimental)" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "start a new shell to examine the situation" -msgstr "iniciar un intérprete de ordes para examinar a situación" - -#. Type: select -#. Description -#: ../grub.templates:1002 -msgid "What would you like to do about ${BASENAME}?" -msgstr "¿Que quere facer con ${BASENAME}?" - -#. Type: select -#. Description -#: ../grub.templates:1002 -msgid "" -"A new version of /boot/grub/menu.lst is available, but the version installed " -"currently has been locally modified." -msgstr "" -"Hai dispoñible unha nova versión do ficheiro /boot/grub/menu.lst, pero a " -"versión actualmente instalada ten modificacións locais." diff --git a/debian/po/it.po b/debian/po/it.po deleted file mode 100644 index f344e463..00000000 --- a/debian/po/it.po +++ /dev/null @@ -1,81 +0,0 @@ -# translation of ucf_1.18_templates.po to italian -# Copyright Luca Bruno <luca.br@uno.it>, 2005. -msgid "" -msgstr "" -"Project-Id-Version: ucf_1.18_templates\n" -"Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" -"POT-Creation-Date: 2008-01-28 08:38-0800\n" -"PO-Revision-Date: 2007-09-19 01:10-0500\n" -"Last-Translator: Luca Bruno <luca.br@uno.it>\n" -"Language-Team: Italian <tp@lists.linux.it>\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "install the package maintainer's version" -msgstr "installare la versione del manutentore del pacchetto" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "keep the local version currently installed" -msgstr "mantenere la propria versione attualmente installata" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show the differences between the versions" -msgstr "mostrare le differenze tra le versioni" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a side-by-side difference between the versions" -msgstr "mostrare le differenze tra le versioni" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a 3-way difference between available versions" -msgstr "Mostrare le differenze tra 3 versioni del file disponibili" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "do a 3-way merge between available versions (experimental)" -msgstr "" -"Integrare le differenze tra 3 versioni del file disponibili [Molto " -"sperimentale]" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "start a new shell to examine the situation" -msgstr "eseguire una nuova shell per esaminare la situazione" - -#. Type: select -#. Description -#: ../grub.templates:1002 -msgid "What would you like to do about ${BASENAME}?" -msgstr "Cosa si vuol fare di ${BASENAME}?" - -#. Type: select -#. Description -#: ../grub.templates:1002 -#, fuzzy -msgid "" -"A new version of /boot/grub/menu.lst is available, but the version installed " -"currently has been locally modified." -msgstr "" -"Una nuova versione del file /boot/grub/menu.lst è disponibile, ma la propria " -"versione è stata modificata localmente." diff --git a/debian/po/ja.po b/debian/po/ja.po deleted file mode 100644 index 981ef228..00000000 --- a/debian/po/ja.po +++ /dev/null @@ -1,90 +0,0 @@ -# -# Translators, if you are not familiar with the PO format, gettext -# documentation is worth reading, especially sections dedicated to -# this format, e.g. by running: -# info -n '(gettext)PO Files' -# info -n '(gettext)Header Entry' -# -# Some information specific to po-debconf are available at -# /usr/share/doc/po-debconf/README-trans -# or http://www.debian.org/intl/l10n/po-debconf/README-trans -# -# Developers do not need to manually edit POT or PO files. -# -msgid "" -msgstr "" -"Project-Id-Version: ucf\n" -"Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" -"POT-Creation-Date: 2008-01-28 08:38-0800\n" -"PO-Revision-Date: 2007-06-21 14:11+0900\n" -"Last-Translator: Kenshi Muto <kmuto@debian.org>\n" -"Language-Team: Japanese <debian-japanese@lists.debian.org>\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "install the package maintainer's version" -msgstr "パッケージメンテナã®ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã‚’インストール" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "keep the local version currently installed" -msgstr "ç¾åœ¨ã‚¤ãƒ³ã‚¹ãƒˆãƒ¼ãƒ«ã•ã‚Œã¦ã„ã‚‹ãƒãƒ¼ã‚«ãƒ«ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã‚’ä¿æŒ" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show the differences between the versions" -msgstr "ãƒãƒ¼ã‚¸ãƒ§ãƒ³é–“ã®å·®ç•°ã‚’表示" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a side-by-side difference between the versions" -msgstr "ãƒãƒ¼ã‚¸ãƒ§ãƒ³é–“ã®å·®ç•°ã‚’並行表示" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a 3-way difference between available versions" -msgstr "利用å¯èƒ½ãªãƒãƒ¼ã‚¸ãƒ§ãƒ³é–“ã®3種類ã®å·®ç•°ã‚’表示" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "do a 3-way merge between available versions (experimental)" -msgstr "利用å¯èƒ½ãªãƒãƒ¼ã‚¸ãƒ§ãƒ³é–“ã§ã®3種類マージを行ㆠ(実験的)" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "start a new shell to examine the situation" -msgstr "状æ³ã‚’検討ã™ã‚‹ãŸã‚ã®æ–°ã—ã„シェルを起動" - -#. Type: select -#. Description -#: ../grub.templates:1002 -msgid "What would you like to do about ${BASENAME}?" -msgstr "${BASENAME} ã«ã¤ã„ã¦ä½•ã‚’è¡Œã„ãŸã„ã§ã™ã‹?" - -#. Type: select -#. Description -#: ../grub.templates:1002 -#, fuzzy -msgid "" -"A new version of /boot/grub/menu.lst is available, but the version installed " -"currently has been locally modified." -msgstr "" -"è¨å®šãƒ•ã‚¡ã‚¤ãƒ« /boot/grub/menu.lst ã®æ–°ã—ã„ãƒãƒ¼ã‚¸ãƒ§ãƒ³ãŒåˆ©ç”¨å¯èƒ½ã§ã™ãŒã€ç¾åœ¨ã‚¤ãƒ³" -"ストールã•ã‚Œã¦ã„ã‚‹ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã¯ã€ãƒãƒ¼ã‚«ãƒ«ã§å¤‰æ›´ã•ã‚Œã¦ã„ã¾ã™ã€‚" diff --git a/debian/po/nl.po b/debian/po/nl.po deleted file mode 100644 index e8acf632..00000000 --- a/debian/po/nl.po +++ /dev/null @@ -1,95 +0,0 @@ -# translation of ucf_2.007_templates.po to dutch -# This file is distributed under the same license as the ucf package. -# -# Translators, if you are not familiar with the PO format, gettext -# documentation is worth reading, especially sections dedicated to -# this format, e.g. by running: -# info -n '(gettext)PO Files' -# info -n '(gettext)Header Entry' -# Some information specific to po-debconf are available at -# /usr/share/doc/po-debconf/README-trans -# or http://www.debian.org/intl/l10n/po-debconf/README-trans# -# Developers do not need to manually edit POT or PO files. -# -msgid "" -msgstr "" -"Project-Id-Version: ucf_2.007_nl\n" -"Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" -"POT-Creation-Date: 2008-01-28 08:38-0800\n" -"PO-Revision-Date: 2007-09-19 01:09-0500\n" -"Last-Translator: Kurt De Bree <kdebree(AT)telenet(DOT)be>\n" -"Language-Team: Nederlands <debian-l10n-dutch@lists.debian.org>\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"X-Generator: KBabel 1.10.2\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "install the package maintainer's version" -msgstr "installeer de versie van de pakketbeheerder" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "keep the local version currently installed" -msgstr "behoud de reeds geïnstalleerde versie" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show the differences between the versions" -msgstr "toon de verschillen tussen de versies" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a side-by-side difference between the versions" -msgstr "toon de verschillende versies zij-aan-zij" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a 3-way difference between available versions" -msgstr "" -"toon een drievoudig verschil tussen de beschikbare versies van het bestand" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "do a 3-way merge between available versions (experimental)" -msgstr "" -"doe een drievoudige samenvoeging tussen de beschikbare versies van het " -"bestand (Zeer Experimenteel)" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "start a new shell to examine the situation" -msgstr "start een nieuwe shell om de situatie te onderzoeken" - -#. Type: select -#. Description -#: ../grub.templates:1002 -msgid "What would you like to do about ${BASENAME}?" -msgstr "Wat wilt u met ${BASENAME} doen?" - -#. Type: select -#. Description -#: ../grub.templates:1002 -#, fuzzy -msgid "" -"A new version of /boot/grub/menu.lst is available, but the version installed " -"currently has been locally modified." -msgstr "" -"Een nieuwe versie van het bestand /boot/grub/menu.lst is beschikbaar, maar " -"uw versie werd handmatig gewijzigd." diff --git a/debian/po/pl.po b/debian/po/pl.po deleted file mode 100644 index a42099e1..00000000 --- a/debian/po/pl.po +++ /dev/null @@ -1,100 +0,0 @@ -# translation of ucf3002.po to Polish -# -# Translators, if you are not familiar with the PO format, gettext -# documentation is worth reading, especially sections dedicated to -# this format, e.g. by running: -# info -n '(gettext)PO Files' -# info -n '(gettext)Header Entry' -# -# Some information specific to po-debconf are available at -# /usr/share/doc/po-debconf/README-trans -# or http://www.debian.org/intl/l10n/po-debconf/README-trans -# -# Developers do not need to manually edit POT or PO files. -# -# Wojciech Zarêba <wojtekz@comp.waw.pl>, 2007. -msgid "" -msgstr "" -"Project-Id-Version: ucf3002\n" -"Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" -"POT-Creation-Date: 2008-01-28 08:38-0800\n" -"PO-Revision-Date: 2007-06-27 17:15+0200\n" -"Last-Translator: Wojciech Zarêba <wojtekz@itrium.icd.waw.pl>\n" -"Language-Team: Polish <pl@li.org>\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=ISO-8859-2\n" -"Content-Transfer-Encoding: 8bit\n" -"Plural-Forms: nplurals=2; plural=n != 1;\n" -"X-Generator: KBabel 1.11.4\n" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "install the package maintainer's version" -msgstr "zainstalowanie wersji przygotowanej przez opiekuna pakietu" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "keep the local version currently installed" -msgstr "zachowanie lokalnie zainstalowanej wersji" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show the differences between the versions" -msgstr "pokazanie ró¿nic pomiêdzy wersjami" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a side-by-side difference between the versions" -msgstr "pokazanie ró¿nic - obok siebie - pomiêdzy wersjami" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a 3-way difference between available versions" -msgstr "pokazanie ró¿nic pomiêdzy trzema dostêpnymi wersjami" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "do a 3-way merge between available versions (experimental)" -msgstr "scalenie zmian pomiêdzy 3 dostêpnymi wersjami (eksperymentalne)" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "start a new shell to examine the situation" -msgstr "uruchomienie pow³oki w celu zbadania sytuacji" - -#. Type: select -#. Description -#: ../grub.templates:1002 -msgid "What would you like to do about ${BASENAME}?" -msgstr "Proszê wybraæ akcjê do wykonania na pliku ${BASENAME}:" - -#. Type: select -#. Description -#: ../grub.templates:1002 -#, fuzzy -msgid "" -"A new version of /boot/grub/menu.lst is available, but the version installed " -"currently has been locally modified." -msgstr "" -"Dostêpna jest nowa wersja pliku /boot/grub/menu.lst, ale obecna wersja " -"zosta³a lokalnie zmodyfikowana." - -#~ msgid "Modified configuration file" -#~ msgstr "Zmodyfikowany plik konfiguracyjny" - -#~ msgid "Line by line differences between versions" -#~ msgstr "Ró¿nice linia po linii pomiêdzy wersjami" diff --git a/debian/po/pt.po b/debian/po/pt.po deleted file mode 100644 index 23c7f8f5..00000000 --- a/debian/po/pt.po +++ /dev/null @@ -1,82 +0,0 @@ -# Portuguese translation of ucf's debconf messages. -# 2007, Pedro Ribeiro <p.m42.ribeiro@gmail.com> -# Bruno Queiros <brunomiguelqueiros@sapo.pt>, 2007. -# -msgid "" -msgstr "" -"Project-Id-Version: ucf 2.0020\n" -"Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" -"POT-Creation-Date: 2008-01-28 08:38-0800\n" -"PO-Revision-Date: 2007-09-19 01:09-0500\n" -"Last-Translator: Bruno Queiros <brunomiguelqueiros@sapo.pt>\n" -"Language-Team: Portuguese <traduz@debianpt.org>\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "install the package maintainer's version" -msgstr "instalar a versão do criador do pacote" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "keep the local version currently installed" -msgstr "manter a versão actualmente instalada" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show the differences between the versions" -msgstr "mostrar a diferença entre as versões" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a side-by-side difference between the versions" -msgstr "mostrar uma diferença lado-a-lado entre as versões" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a 3-way difference between available versions" -msgstr "mostrar uma diferença em 3 vias entre versões disponÃveis do ficheiro" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "do a 3-way merge between available versions (experimental)" -msgstr "" -"fazer uma junção em 3 vias entre versões disponÃveis do ficheiro [Muito " -"Experimental]" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "start a new shell to examine the situation" -msgstr "iniciar uma nova consola para examinar a situação" - -#. Type: select -#. Description -#: ../grub.templates:1002 -msgid "What would you like to do about ${BASENAME}?" -msgstr "O que quer fazer acerca de ${BASENAME}?" - -#. Type: select -#. Description -#: ../grub.templates:1002 -msgid "" -"A new version of /boot/grub/menu.lst is available, but the version installed " -"currently has been locally modified." -msgstr "" -"Uma nova versão do ficheiro /boot/grub/menu.lst está disponÃvel, mas a sua " -"versão foi modificada localmente." diff --git a/debian/po/pt_BR.po b/debian/po/pt_BR.po deleted file mode 100644 index 9d736b97..00000000 --- a/debian/po/pt_BR.po +++ /dev/null @@ -1,89 +0,0 @@ -# -# Translators, if you are not familiar with the PO format, gettext -# documentation is worth reading, especially sections dedicated to -# this format, e.g. by running: -# info -n '(gettext)PO Files' -# info -n '(gettext)Header Entry' -# -# Some information specific to po-debconf are available at -# /usr/share/doc/po-debconf/README-trans -# or http://www.debian.org/intl/l10n/po-debconf/README-trans -# -# Developers do not need to manually edit POT or PO files. -# -msgid "" -msgstr "" -"Project-Id-Version: ucf\n" -"Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" -"POT-Creation-Date: 2008-01-28 08:38-0800\n" -"PO-Revision-Date: 2007-09-19 01:11-0500\n" -"Last-Translator: André LuÃs Lopes <andrelop@debian.org>\n" -"Language-Team: Debian-BR Project <debian-l10n-portuguese@lists.debian.org>\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "install the package maintainer's version" -msgstr "instalar a versão do mantenedor do pacote" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "keep the local version currently installed" -msgstr "manter a versão instalada atualmente" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show the differences between the versions" -msgstr "exibir as diferenças entre as versões" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a side-by-side difference between the versions" -msgstr "exibir as diferenças lado-a-lado entre as versões" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a 3-way difference between available versions" -msgstr "exibir as diferenças entre as três versões disponÃveis do arquivo" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "do a 3-way merge between available versions (experimental)" -msgstr "juntar as 3 versões disponÃveis do arquivo [Bem Experimental]" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "start a new shell to examine the situation" -msgstr "iniciar um novo shell e examinar a situação" - -#. Type: select -#. Description -#: ../grub.templates:1002 -msgid "What would you like to do about ${BASENAME}?" -msgstr "O que você gostaria de fazer em relação a ${BASENAME}?" - -#. Type: select -#. Description -#: ../grub.templates:1002 -msgid "" -"A new version of /boot/grub/menu.lst is available, but the version installed " -"currently has been locally modified." -msgstr "" -"Uma nova versão do arquivo /boot/grub/menu.lst está disponÃvel, mas sua " -"versão local foi modificada." diff --git a/debian/po/ru.po b/debian/po/ru.po deleted file mode 100644 index 6560fe31..00000000 --- a/debian/po/ru.po +++ /dev/null @@ -1,85 +0,0 @@ -# translation of ru.po to Russian -# This file is distributed under the same license as the PACKAGE package. -# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER. -# -# Yuri Kozlov <kozlov.y@gmail.com>, 2006, 2007. -msgid "" -msgstr "" -"Project-Id-Version: 3.001\n" -"Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" -"POT-Creation-Date: 2008-01-28 08:38-0800\n" -"PO-Revision-Date: 2007-07-01 13:45+0400\n" -"Last-Translator: Yuri Kozlov <kozlov.y@gmail.com>\n" -"Language-Team: Russian <debian-l10n-russian@lists.debian.org>\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"X-Generator: KBabel 1.11.4\n" -"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%" -"10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);\n" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "install the package maintainer's version" -msgstr "уÑтановить верÑию из пакета" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "keep the local version currently installed" -msgstr "Ñохранить уÑтановленную локальную верÑию" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show the differences between the versions" -msgstr "показать Ñ€Ð°Ð·Ð»Ð¸Ñ‡Ð¸Ñ Ð¼ÐµÐ¶Ð´Ñƒ верÑиÑми" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a side-by-side difference between the versions" -msgstr "показать Ñ€Ð°Ð·Ð»Ð¸Ñ‡Ð¸Ñ Ð¼ÐµÐ¶Ð´Ñƒ верÑиÑми параллельно" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a 3-way difference between available versions" -msgstr "показать Ñ€Ð°Ð·Ð»Ð¸Ñ‡Ð¸Ñ Ñразу между 3-Ð¼Ñ Ð´Ð¾Ñтупными верÑиÑми" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "do a 3-way merge between available versions (experimental)" -msgstr "выполнить ÑлиÑние 3-Ñ… доÑтупных верÑий [ÑкÑпериментальный режим]" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "start a new shell to examine the situation" -msgstr "запуÑтить новую оболочку командной Ñтроки Ð´Ð»Ñ Ð¿Ñ€Ð¾ÑÑÐ½ÐµÐ½Ð¸Ñ Ñитуации" - -#. Type: select -#. Description -#: ../grub.templates:1002 -msgid "What would you like to do about ${BASENAME}?" -msgstr "Что нужно Ñделать Ñ ${BASENAME}?" - -#. Type: select -#. Description -#: ../grub.templates:1002 -#, fuzzy -msgid "" -"A new version of /boot/grub/menu.lst is available, but the version installed " -"currently has been locally modified." -msgstr "" -"ДоÑтупна Ð½Ð¾Ð²Ð°Ñ Ð²ÐµÑ€ÑÐ¸Ñ Ñ„Ð°Ð¹Ð»Ð° /boot/grub/menu.lst, но верÑÐ¸Ñ Ñ„Ð°Ð¹Ð»Ð°, " -"находÑщегоÑÑ Ð² ÑиÑтеме, была изменёна локально." diff --git a/debian/po/sv.po b/debian/po/sv.po deleted file mode 100644 index 8a157ded..00000000 --- a/debian/po/sv.po +++ /dev/null @@ -1,82 +0,0 @@ -# Swedish translation for ucf. -# Copyright (C) 2007 Free Software Foundation, Inc. -# This file is distributed under the same license as the ucf package. -# Daniel Nylander <po@danielnylander.se>, 2007. -# -msgid "" -msgstr "" -"Project-Id-Version: ucf 2.002\n" -"Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" -"POT-Creation-Date: 2008-01-28 08:38-0800\n" -"PO-Revision-Date: 2007-06-25 10:07+0100\n" -"Last-Translator: Daniel Nylander <po@danielnylander.se>\n" -"Language-Team: Swedish <debian-l10n-swedish@lists.debian.org>\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "install the package maintainer's version" -msgstr "installera paketansvariges version" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "keep the local version currently installed" -msgstr "behÃ¥ll den lokalt installerade version" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show the differences between the versions" -msgstr "visa skillnaderna mellan versionerna" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a side-by-side difference between the versions" -msgstr "visa skillnaderna sida vid sida mellan versionerna" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a 3-way difference between available versions" -msgstr "visa en 3-vägs skillnad mellan tillgängliga versioner" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "do a 3-way merge between available versions (experimental)" -msgstr "gör en 3-vägs sammanslagning mellan versionerna (experimentell)" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "start a new shell to examine the situation" -msgstr "starta ett nytt skal för att undersöka situationen" - -#. Type: select -#. Description -#: ../grub.templates:1002 -msgid "What would you like to do about ${BASENAME}?" -msgstr "Vad vill du göra med ${BASENAME}?" - -#. Type: select -#. Description -#: ../grub.templates:1002 -#, fuzzy -msgid "" -"A new version of /boot/grub/menu.lst is available, but the version installed " -"currently has been locally modified." -msgstr "" -"En ny version av filen /boot/grub/menu.lst finns tillgänglig, men versionen " -"som är installerad har ändrats lokalt." diff --git a/debian/po/templates.pot b/debian/po/templates.pot deleted file mode 100644 index f0b056af..00000000 --- a/debian/po/templates.pot +++ /dev/null @@ -1,80 +0,0 @@ -# SOME DESCRIPTIVE TITLE. -# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER -# This file is distributed under the same license as the PACKAGE package. -# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR. -# -#, fuzzy -msgid "" -msgstr "" -"Project-Id-Version: PACKAGE VERSION\n" -"Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" -"POT-Creation-Date: 2008-01-28 08:38-0800\n" -"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" -"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n" -"Language-Team: LANGUAGE <LL@li.org>\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=CHARSET\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "install the package maintainer's version" -msgstr "" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "keep the local version currently installed" -msgstr "" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show the differences between the versions" -msgstr "" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a side-by-side difference between the versions" -msgstr "" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a 3-way difference between available versions" -msgstr "" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "do a 3-way merge between available versions (experimental)" -msgstr "" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "start a new shell to examine the situation" -msgstr "" - -#. Type: select -#. Description -#: ../grub.templates:1002 -msgid "What would you like to do about ${BASENAME}?" -msgstr "" - -#. Type: select -#. Description -#: ../grub.templates:1002 -msgid "" -"A new version of /boot/grub/menu.lst is available, but the version installed " -"currently has been locally modified." -msgstr "" diff --git a/debian/po/vi.po b/debian/po/vi.po deleted file mode 100644 index a815be9a..00000000 --- a/debian/po/vi.po +++ /dev/null @@ -1,91 +0,0 @@ -# Vietnamese translation for UCF. -# Copyright © 2007 Free Software Foundation, Inc. -# Clytie Siddall <clytie@riverland.net.au>, 2005-2007. -# -msgid "" -msgstr "" -"Project-Id-Version: ucf 3.002\n" -"Report-Msgid-Bugs-To: Source: grub@packages.debian.org\n" -"POT-Creation-Date: 2008-01-28 08:38-0800\n" -"PO-Revision-Date: 2007-06-21 17:52+0930\n" -"Last-Translator: Clytie Siddall <clytie@riverland.net.au>\n" -"Language-Team: Vietnamese <vi-VN@googlegroups.com>\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=utf-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"X-Generator: LocFactoryEditor 1.6.4a1\n" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "install the package maintainer's version" -msgstr "cà i đặt phiên bản của nhà duy trì gói" - -# msgid "keep your currently-installed version" -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "keep the local version currently installed" -msgstr "giữ phiên bản cục bá»™ đã cà i đặt hiện thá»i" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show the differences between the versions" -msgstr "hiển thị khác biệt giữa những phiên bản" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a side-by-side difference between the versions" -msgstr "hiển thị khác biệt cạnh nhau giữa những phiên bản" - -# msgid "show a 3 way difference between available versions of the file" -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "show a 3-way difference between available versions" -msgstr "hiển thị khác biệt ba hÆ°á»›ng giữa những phiên bản sẵn sà ng" - -# msgid "" -# do a 3 way merge between available versions of the file [Very -# Experimental] -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "do a 3-way merge between available versions (experimental)" -msgstr "trá»™n ba hÆ°á»›ng những phiên bản sẵn sà ng (thá»±c nghiệm)" - -#. Type: select -#. Choices -#. Translators, please keep translations *short* (less than 65 columns) -#: ../grub.templates:1001 -msgid "start a new shell to examine the situation" -msgstr "khởi chạy trình bao má»›i để khám xét trÆ°á»ng hợp" - -#. Type: select -#. Description -#: ../grub.templates:1002 -msgid "What would you like to do about ${BASENAME}?" -msgstr "Äối vá»›i ${BASENAME}? muốn là m gì váºy?" - -# msgid "" -# A new version of configuration file ${FILE} is available, but your -# version has been locally modified. -#. Type: select -#. Description -#: ../grub.templates:1002 -#, fuzzy -msgid "" -"A new version of /boot/grub/menu.lst is available, but the version installed " -"currently has been locally modified." -msgstr "" -"Có sẵn má»™t phiên bản má»›i của táºp tin cấu hình /boot/grub/menu.lst, nhÆ°ng " -"phiên bản được cà i đặt hiện thá»i đã bị sá»a đổi cục bá»™." diff --git a/debian/rules b/debian/rules deleted file mode 100755 index c5b18ebb..00000000 --- a/debian/rules +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/make -f -INIT_SYSTEM ?= upstart,systemd -export PYBUILD_INSTALL_ARGS=--init-system=$(INIT_SYSTEM) -DEB_VERSION := $(shell dpkg-parsechangelog --show-field=Version) - -%: - dh $@ --with python3,systemd --buildsystem pybuild - -override_dh_auto_test: -ifeq (,$(findstring nocheck,$(DEB_BUILD_OPTIONS))) - http_proxy= make PYVER=python3 check -else - @echo check disabled by DEB_BUILD_OPTIONS=$(DEB_BUILD_OPTIONS) -endif - -override_dh_systemd_start: - dh_systemd_start --no-restart-on-upgrade --no-start - -override_dh_auto_install: - dh_auto_install --destdir=debian/cloud-init - install -D -m 0644 ./tools/21-cloudinit.conf debian/cloud-init/etc/rsyslog.d/21-cloudinit.conf - install -D ./tools/Z99-cloud-locale-test.sh debian/cloud-init/etc/profile.d/Z99-cloud-locale-test.sh - install -D ./tools/Z99-cloudinit-warnings.sh debian/cloud-init/etc/profile.d/Z99-cloudinit-warnings.sh - install -m 0644 -D debian/apport-launcher.py debian/cloud-init/usr/share/apport/package-hooks/cloud-init.py - flist=$$(find $(CURDIR)/debian/ -type f -name version.py) && sed -i 's,@@PACKAGED_VERSION@@,$(DEB_VERSION),' $${flist:-did-not-find-version-py-for-replacement} diff --git a/debian/source/format b/debian/source/format deleted file mode 100644 index 163aaf8d..00000000 --- a/debian/source/format +++ /dev/null @@ -1 +0,0 @@ -3.0 (quilt) diff --git a/debian/update-grub-legacy-ec2 b/debian/update-grub-legacy-ec2 deleted file mode 100755 index c1c9dc79..00000000 --- a/debian/update-grub-legacy-ec2 +++ /dev/null @@ -1,1620 +0,0 @@ -#!/bin/bash -# -# Insert a list of installed kernels in a grub config file -# Copyright 2001 Wichert Akkerman <wichert@linux.com> -# Copyright 2007, 2008 Canonical Ltd. -# -# This file is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# Contributors: -# Jason Thomas <jason@debian.org> -# David B.Harris <dbarclay10@yahoo.ca> -# Marc Haber <mh@zugschlus.de> -# Crispin Flowerday <crispin@zeus.com> -# Steve Langasek <steve.langasek@canonical.com> - -# Abort on errors -set -e - -# load debconf first, since this re-execs the script -. /usr/share/debconf/confmodule - -host_os=`uname -s | tr '[A-Z]' '[a-z]'` - -abort() { - message=$@ - - echo >&2 - printf '%s\n' "$message" >&2 - echo >&2 - exit 1 -} - -find_grub_dir () -{ - echo -n "Searching for GRUB installation directory ... " >&2 - - for d in $grub_dirs ; do - if [ -d "$d" ] ; then - grub_dir="$d" - break - fi - done - - if [ -z "$grub_dir" ] ; then - abort "No GRUB directory found. - To create a template run 'mkdir /boot/grub' first. - To install grub, install it manually or try the 'grub-install' command. - ### Warning, grub-install is used to change your MBR. ###" - else - echo "found: $grub_dir" >&2 - fi - - echo $grub_dir -} - -find_device () -{ - mount_point=$1 - - # Autodetect current root device - device= - if [ -f /etc/fstab ] ; then - device=$(awk '$1!~/^#/{ - if ($2 ~ "^/+$") { $2 = "/"; } else { sub("/*$", "", $2); } - if ($2 == "'"$mount_point"'"){ - print $1; - } - }' /etc/fstab | tail -n 1) - fi - - if [ -n "$device" ] ; then - case "$device" in - LABEL=* | UUID=*) - device=`readlink -f "$(findfs $device)"` - ;; - *) - device=`readlink -f "$device"` - ;; - esac - fi - - echo $device -} - -find_root_device () -{ - device=$(find_device "/") - - if [ -z "$device" ]; then - echo "Cannot determine root device. Assuming /dev/hda1" >&2 - echo "This error is probably caused by an invalid /etc/fstab" >&2 - device=/dev/hda1 - fi - - echo $device -} - -# Usage: convert_raid1 os_device -# Checks if os_device is a software raid1. -# If so, converts to first physical device in array. -convert_raid1 () -{ - case $1 in - /dev/md[0-9]) - : ;; # Continue - *) - return 1 ;; - esac - - [ -x /sbin/mdadm ] || return 1 - - # Check that the raid device is raid1 - raidlevel=$(mdadm -D -b $1 | grep "^ARRAY" | \ - sed "s/^.*level=//" | cut -d" " -f1) - [ "$raidlevel" = "raid1" ] || return 1 - - # Take only the first device that makes up the raid - raiddev=$(mdadm -D $1 | grep -A1 "Number" | grep "dev" \ - | sed "s/^.*\(\/dev\/.*\)$/\1/") - [ -n "$raiddev" ] || return 1 - - echo $raiddev - return 0 -} - -# Usage: convert os_device -# Convert an OS device to the corresponding GRUB drive. -# This part is OS-specific. -convert () { - # First, check if the device file exists. - if test -e "$1"; then - : - else - echo "$1: Not found or not a block device." 1>&2 - exit 1 - fi - - host_os=`uname -s | tr '[[:upper:]]' '[[:lower:]]'` - - # Break the device name into the disk part and the partition part. - case "$host_os" in - linux) - tmp_disk=`echo "$1" | sed -e 's%\([sh]d[[:lower:]]\)[0-9]*$%\1%' \ - -e 's%\(fd[0-9]*\)$%\1%' \ - -e 's%/part[0-9]*$%/disc%' \ - -e 's%\(c[0-7]d[0-9]*\).*$%\1%'` - tmp_part=`echo "$1" | sed -e 's%.*/[sh]d[[:lower:]]\([0-9]*\)$%\1%' \ - -e 's%.*/fd[0-9]*$%%' \ - -e 's%.*/floppy/[0-9]*$%%' \ - -e 's%.*/\(disc\|part\([0-9]*\)\)$%\2%' \ - -e 's%.*c[0-7]d[0-9]*p*%%'` - ;; - gnu) - tmp_disk=`echo "$1" | sed 's%\([sh]d[0-9]*\).*%\1%'` - tmp_part=`echo "$1" | sed "s%$tmp_disk%%"` ;; - freebsd|*/kfreebsd) - tmp_disk=`echo "$1" | sed 's%r\{0,1\}\([saw]d[0-9]*\).*$%\1%' \ - | sed 's%r\{0,1\}\(da[0-9]*\).*$%\1%'` - tmp_part=`echo "$1" \ - | sed "s%.*/r\{0,1\}[saw]d[0-9]\(s[0-9]*[a-h]\)%\1%" \ - | sed "s%.*/r\{0,1\}da[0-9]\(s[0-9]*[a-h]\)%\1%"` - ;; - netbsd|*/knetbsd) - tmp_disk=`echo "$1" | sed 's%r\{0,1\}\([sw]d[0-9]*\).*$%r\1d%' \ - | sed 's%r\{0,1\}\(fd[0-9]*\).*$%r\1a%'` - tmp_part=`echo "$1" \ - | sed "s%.*/r\{0,1\}[sw]d[0-9]\([abe-p]\)%\1%"` - ;; - *) - echo "update-grub does not support your OS yet." 1>&2 - exit 1 ;; - esac - - # Get the drive name. - tmp_drive=`grep -v '^#' $device_map | grep "$tmp_disk *$" \ - | sed 's%.*\(([hf]d[0-9][a-z0-9,]*)\).*%\1%'` - - # If not found, print an error message and exit. - if test "x$tmp_drive" = x; then - echo "$1 does not have any corresponding BIOS drive." 1>&2 - exit 1 - fi - - if test "x$tmp_part" != x; then - # If a partition is specified, we need to translate it into the - # GRUB's syntax. - case "$host_os" in - linux) - echo "$tmp_drive" | sed "s%)$%,`expr $tmp_part - 1`)%" ;; - gnu) - if echo $tmp_part | grep "^s" >/dev/null; then - tmp_pc_slice=`echo $tmp_part \ - | sed "s%s\([0-9]*\)[a-z]*$%\1%"` - tmp_drive=`echo "$tmp_drive" \ - | sed "s%)%,\`expr "$tmp_pc_slice" - 1\`)%"` - fi - if echo $tmp_part | grep "[a-z]$" >/dev/null; then - tmp_bsd_partition=`echo "$tmp_part" \ - | sed "s%[^a-z]*\([a-z]\)$%\1%"` - tmp_drive=`echo "$tmp_drive" \ - | sed "s%)%,$tmp_bsd_partition)%"` - fi - echo "$tmp_drive" ;; - freebsd|*/kfreebsd) - if echo $tmp_part | grep "^s" >/dev/null; then - tmp_pc_slice=`echo $tmp_part \ - | sed "s%s\([0-9]*\)[a-h]*$%\1%"` - tmp_drive=`echo "$tmp_drive" \ - | sed "s%)%,\`expr "$tmp_pc_slice" - 1\`)%"` - fi - if echo $tmp_part | grep "[a-h]$" >/dev/null; then - tmp_bsd_partition=`echo "$tmp_part" \ - | sed "s%s\{0,1\}[0-9]*\([a-h]\)$%\1%"` - tmp_drive=`echo "$tmp_drive" \ - | sed "s%)%,$tmp_bsd_partition)%"` - fi - echo "$tmp_drive" ;; - netbsd|*/knetbsd) - if echo $tmp_part | grep "^[abe-p]$" >/dev/null; then - tmp_bsd_partition=`echo "$tmp_part" \ - | sed "s%\([a-p]\)$%\1%"` - tmp_drive=`echo "$tmp_drive" \ - | sed "s%)%,$tmp_bsd_partition)%"` - fi - echo "$tmp_drive" ;; - esac - else - # If no partition is specified, just print the drive name. - echo "$tmp_drive" - fi -} - -# Usage: convert_default os_device -# Convert an OS device to the corresponding GRUB drive. -# Calls OS-specific convert, and returns a default of -# (hd0,0) if anything goes wrong -convert_default () { - # Check if device is software raid1 array - if tmp_dev=$(convert_raid1 $1 2>/dev/null) ; then - : # Use device returned by convert_raid1 - else - tmp_dev=$1 - fi - - if tmp=$(convert $tmp_dev 2>/dev/null) ; then - echo $tmp - else - echo "${grub_root_device_fallback}" - fi -} - -is_removable () { - removabledevice="$(echo "$1" | sed -e 's%\([sh]d[a-z]\)[0-9]*$%\1%' -e 's%\(fd[0-9]*\)$%\1%' -e 's%/part[0-9]*$%/disc%' -e 's%\(c[0-7]d[0-9]*\).*$%\1%' -e 's%^/dev/%%g')" - if [ -e "/sys/block/$removabledevice/removable" ]; then - if [ "$(cat /sys/block/$removabledevice/removable)" != "0" ]; then - echo "/dev/$removabledevice" - return - fi - fi - echo "" -} - -convert_to_uuid() -{ - local dev; dev=$1 - - convert=false - case "$dev" in - /dev/disk/*) - ;; - /dev/mapper/*) - ;; - /dev/evms/[hs]d[a-z][0-9]*) - convert=: - ;; - /dev/evms/*) - ;; - /dev/md[0-9]*) - ;; - /dev/*) - convert=: - ;; - esac - if $convert; then - if [ -b "$dev" ]; then - uuid=$(blkid -o value -s UUID "$dev" || true) - fi - fi - - echo "$uuid" -} - -convert_kopt_to_uuid() -{ - local kopt; kopt=$1 - - convert=false - root=$(echo "$kopt" | sed 's/.*root=//;s/ .*//') - case "$root" in - UUID=*|LABEL=*) - ;; - /dev/disk/*) - ;; - /dev/mapper/*) - ;; - /dev/evms/[hs]d[a-z][0-9]*) - convert=: - ;; - /dev/evms/*) - ;; - /dev/md[0-9]*) - ;; - /dev/*) - convert=: - ;; - esac - if $convert; then - if [ -L "$DEV" ] && readlink "$DEV" | grep -q "^/dev/mapper/" - then - : - elif [ -b "$root" ]; then - uuid=$(blkid -o value -s UUID "$root" || true) - if [ -n "$uuid" ]; then - kopt=$(echo "$kopt" | sed "s/\(.*root=\)[^ ]*/\1UUID=$uuid/") - fi - fi - fi - - echo "$kopt" -} - - -## Configuration Options -# directory's to look for the grub installation and the menu file -grub_dirs="/boot/grub /boot/boot/grub" - -# The grub installation directory -grub_dir=$(find_grub_dir) - -# Full path to the menu.lst -menu_file_basename=menu.lst -menu_file=$grub_dir/$menu_file_basename - -# Full path to the menu.lst fragment used for ucf management -ucf_menu_file=/var/run/grub/$menu_file_basename - -# Full path to the default file -default_file_basename=default -default_file=$grub_dir/$default_file_basename - -# the device for the / filesystem -root_device=$(find_root_device) - -# the device for the /boot filesystem -boot_device=$(find_device "/boot") - -# Full path to the device.map -device_map=$grub_dir/device.map - -# Default kernel options, overidden by the kopt statement in the menufile. -loop_file="" -if [ -f /etc/fstab ]; then - loop_file=$(awk '$2=="/" && $4~"loop" {print $1}' /etc/fstab) -fi -if [ -n "$loop_file" ]; then - dev_mountpoint=$(awk '"'${loop_file}'"~"^"$2 && $2!="/" {print $1";"$2}' /proc/mounts|tail -n 1) - host_device="${dev_mountpoint%;*}" - host_mountpoint="${dev_mountpoint#*;}" -fi -if [ -n "$host_device" ]; then - boot_device= - root_device="$host_device" - default_kopt="root=$host_device loop=${loop_file#$host_mountpoint} ro" -else - default_kopt="root=$root_device ro" -fi -default_kopt="$(convert_kopt_to_uuid "$default_kopt")" -kopt="$default_kopt" - -# Title -title=$(lsb_release --short --description 2>/dev/null) || title="Ubuntu" - -# should update-grub remember the default entry -updatedefaultentry="false" - -# Drive(in GRUB terms) where the kernel is located. Overridden by the -# kopt statement in menufile. -# if we don't have a device.map then we can't use the convert function. - -# Try to use a UUID instead of the GRUB device name. -if test -z "$boot_device" ; then - uuid=$(convert_to_uuid "$root_device") -else - uuid=$(convert_to_uuid "$boot_device") -fi - -#if [ -n "$uuid" ]; then -# grub_root_device="$uuid" -#fi -## The ec2 provide pv-grub do not support 'uuid' so we have to use a grub name -## when presented to grub, the root filesystem is on what grub sees -## as a bare disk (hd0), rather than what we see it as in user space (sda1). -grub_root_device_fallback="(hd0)" -grub_root_device="${grub_root_device_fallback}" - -check_removable="" -if true; then - if test -f "$device_map"; then - if test -z "$boot_device" ; then - grub_root_device=$(convert_default "$root_device") - check_removable="$(is_removable "$root_device")" - else - grub_root_device=$(convert_default "$boot_device") - check_removable="$(is_removable "$boot_device")" - fi - else - grub_root_device="${grub_root_device_fallback}" - fi -fi - -# If the root/boot device is on a removable target, we need to override -# the grub_root_device to (hd0,X). This is a requirement since the BIOS -# will change device mapping dynamically if we switch boot device. - -if test -n "$check_removable" ; then - grub_root_device="$(echo "$grub_root_device" | sed -e 's/d.*,/d0,/g')" -fi - -# should grub create the alternative boot options in the menu - alternative="true" - -# should grub lock the alternative boot options in the menu - lockalternative="false" - -# additional options to use with the default boot option, but not with the -# alternatives - defoptions="console=hvc0" - -# should grub lock the old kernels - lockold="false" - -# Xen hypervisor options to use with the default Xen boot option - xenhopt="" - -# Xen Linux kernel options to use with the default Xen boot option - xenkopt="console=tty0" - -# options to use with the alternative boot options - altoptions="(recovery mode) single" - -# controls howmany kernels are listed in the config file, -# this does not include the alternative kernels - howmany="all" - -# should grub create a memtest86 entry - memtest86="true" - -# should grub add "savedefault" to default boot options - savedefault="false" - -# is grub running in a domU? - indomU="true" - -# stores the command line arguments - command_line_arguments=$1 - -# does this version of grub support the quiet option? -if [ -f ${grub_dir}/installed-version ] && dpkg --compare-versions `cat ${grub_dir}/installed-version` ge 0.97-11ubuntu4; then - supports_quiet=true -else - supports_quiet=false -fi - -# read user configuration -if test -f "/etc/default/grub" ; then - . /etc/default/grub -fi - -# Default options to use in a new config file. This will only be used if $menu_file -# doesn't already exist. Only edit the lines between the two "EOF"s. The others are -# part of the script. -newtemplate=$(tempfile) -cat >> "$newtemplate" <<EOF -# $menu_file_basename - See: grub(8), info grub, update-grub(8) -# grub-install(8), grub-floppy(8), -# grub-md5-crypt, /usr/share/doc/grub -# and /usr/share/doc/grub-legacy-doc/. - -## default num -# Set the default entry to the entry number NUM. Numbering starts from 0, and -# the entry number 0 is the default if the command is not used. -# -# You can specify 'saved' instead of a number. In this case, the default entry -# is the entry saved with the command 'savedefault'. -# WARNING: If you are using dmraid do not use 'savedefault' or your -# array will desync and will not let you boot your system. -default 0 - -## timeout sec -# Set a timeout, in SEC seconds, before automatically booting the default entry -# (normally the first entry defined). -# on ec2, with no console access, there is no reason for a timeout. set to 0. -timeout 0 - -## hiddenmenu -# Hides the menu by default (press ESC to see the menu) -hiddenmenu - -# Pretty colours -#color cyan/blue white/blue - -## password ['--md5'] passwd -# If used in the first section of a menu file, disable all interactive editing -# control (menu entry editor and command-line) and entries protected by the -# command 'lock' -# e.g. password topsecret -# password --md5 \$1\$gLhU0/\$aW78kHK1QfV3P2b2znUoe/ -# password topsecret - -# -# examples -# -# title Windows 95/98/NT/2000 -# root (hd0,0) -# makeactive -# chainloader +1 -# -# title Linux -# root (hd0,1) -# kernel /vmlinuz root=/dev/hda2 ro -# - -# -# Put static boot stanzas before and/or after AUTOMAGIC KERNEL LIST - -EOF -## End Configuration Options - -echo -n "Searching for default file ... " >&2 -if [ -f "$default_file" ] ; then - echo "found: $default_file" >&2 -else - echo "Generating $default_file file and setting the default boot entry to 0" >&2 - grub-set-default 0 -fi - -# Make sure we use the standard sorting order -LC_COLLATE=C -# Magic markers we use -start="### BEGIN AUTOMAGIC KERNELS LIST" -end="### END DEBIAN AUTOMAGIC KERNELS LIST" - -startopt="## ## Start Default Options ##" -endopt="## ## End Default Options ##" - -# path to grub2 -grub2name="/boot/grub/core.img" - -# Extract options from config file -ExtractMenuOpt() -{ - opt=$1 - - sed -ne "/^$start\$/,/^$end\$/ { - /^$startopt\$/,/^$endopt\$/ { - /^# $opt=/ { - s/^# $opt=\(.*\)\$/\1/ - p - } - } - }" $menu -} - -GetMenuOpts() -{ - opt=$1 - - sed -ne "/^$start\$/,/^$end\$/ { - /^$startopt\$/,/^$endopt\$/ { - /^# $opt=/ { - p - } - } - }" $menu -} - -ExtractMenuOpts() -{ - opt=$1 - - GetMenuOpts $opt | sed "s/^# $opt=\(.*\)\$/\1=\"\2\"/" -} - -GetMenuOpt() -{ - opt=$1 - value=$2 - - [ -z "$(GetMenuOpts "$opt")" ] || value=$(ExtractMenuOpt "$opt") - - echo $value -} - -# Compares two version strings A and B -# Returns -1 if A<B -# 0 if A==B -# 1 if A>B -# This compares version numbers of the form -# 2.4.14.2 > 2.4.14 -# 2.4.14random = 2.4.14-random > 2.4.14-ac10 > 2.4.14 > 2.4.14-pre2 > -# 2.4.14-pre1 > 2.4.13-ac99 -CompareVersions() -{ - #Changes the line something-x.y.z into somthing-x.y.z.q - #This is to ensure that kernels with a .q is treated as higher than the ones without - #First a space is put after the version number - v1=$(echo $1 | sed -e 's!^\(.*-\([0-9]\+\.\)\{2,3\}[0-9]\+\)\(.*\)!\1 \3!g') - v2=$(echo $2 | sed -e 's!^\(.*-\([0-9]\+\.\)\{2,3\}[0-9]\+\)\(.*\)!\1 \3!g') - #If the version number only has 3 digits then put in another .0 - v1=$(echo $v1 | sed -e 's!^\(.*-\([0-9]\+\.\)\{2\}[0-9]\+\)\( .*\|$\)!\1.0 \3!g') - v2=$(echo $v2 | sed -e 's!^\(.*-\([0-9]\+\.\)\{2\}[0-9]\+\)\( .*\|$\)!\1.0 \3!g') - - # Then split the version number and remove any '.' 's or dashes - v1=$(echo $v1 | sed -e 's![-\.]\+! !g' -e 's!\([0-9]\)\([[:alpha:]]\)!\1 \2!') - v2=$(echo $v2 | sed -e 's![-\.]\+! !g' -e 's!\([0-9]\)\([[:alpha:]]\)!\1 \2!') - - # we weight different kernel suffixes here - # ac = 50 - # pre = -50 - # rc = -40 - # test = -60 - # others are given 99 - v1=$(echo $v1 | sed -e 's! k7! 786 !g' -e 's! ac! 50 !g' -e 's! rc! -40 !g' -e 's! pre! -50 !g' -e 's! test! -60 !g' -e 's![^ ]*[^-0-9 ][^ ]*!99!g') - - v2=$(echo $v2 | sed -e 's! k7! 786 !g' -e 's! ac! 50 !g' -e 's! rc! -40 !g' -e 's! pre! -50 !g' -e 's! test! -60 !g' -e 's![^ ]*[^-0-9 ][^ ]*!99!g') - - result=0; v1finished=0; v2finished=0; - while [ $result -eq 0 ] && [ $v1finished -eq 0 ] && [ $v2finished -eq 0 ]; - do - if [ "$v1" = "" ]; then - v1comp=0; v1finished=1 - else - set -- $v1; v1comp=$1; shift; v1=$* - fi - - if [ "$v2" = "" ]; then - v2comp=0; v2finished=1 - else - set -- $v2; v2comp=$1; shift; v2=$* - fi - - set +e - result=`expr $v1comp - $v2comp` - result=`expr substr $result 1 2` - set -e - - if [ $result -gt 0 ]; then result=1 - elif [ $result -lt 0 ]; then result=-1 - fi - done - - # finally return the result - echo $result -} - -# looks in the directory specified for an initrd image with the version specified -FindInitrdName() -{ - # strip trailing slashes - directory=$(echo $1 | sed -e 's#/*$##') - version=$2 - - # initrd - # initrd.img - # initrd-lvm - # .*.gz - - initrdName="" - names="initrd initrd.img initrd-lvm" - compressed="gz" - - for n in $names ; do - # make sure we haven't already found it - if [ -z "$initrdName" ] ; then - if [ -f "$directory/$n$version" ] ; then - initrdName="$n$version" - break - else - for c in $compressed ; do - if [ -f "$directory/$n$version.$c" ] ; then - initrdName="$n$version.$c" - break - fi - done - fi - else - break - fi - done - - # return the result - echo $initrdName -} - -FindXenHypervisorVersions () -{ - version=$1 - - if [ -f "/var/lib/linux-image-$version/xen-versions" ]; then - ret="$(cat /var/lib/linux-image-$version/xen-versions)" - fi - - echo $ret -} - -get_kernel_opt() -{ - kernel_version=$1 - - version=$(echo $kernel_version | sed 's/^[^0-9]*//') - version=$(echo $version | sed 's/[-\+\.]/_/g') - if [ -n "$version" ] ; then - while [ -n "$version" ] ; do - currentOpt="$(eval "echo \${kopt_$version}")" - if [ -n "$currentOpt" ] ; then - break - fi - - oldversion="$version" - version=$(echo $version | sed 's/_\?[^_]*$//') - if [ "$version" = "$oldversion" ] ; then - # Break infinite loop, if the version isn't what we expect - break - fi - done - fi - - if [ -z "$currentOpt" ] ; then - currentOpt=$kopt - fi - - echo $currentOpt -} - -write_kernel_entry() -{ - local kernel_version; kernel_version=$1; shift - local recovery_desc; recovery_desc=$1; shift - local lock_alternative; lock_alternative=$1; shift - local grub_root_device; grub_root_device=$1; shift - local kernel; kernel=$1; shift - local kernel_options; kernel_options=$1; shift - local recovery_suffix; recovery_suffix=$1; shift - local initrd; initrd=$1; shift - local savedefault; savedefault=$1; shift - local lockold; lockold=$1; shift - local dapper_upgrade; dapper_upgrade=$1; shift - local hypervisor - if [ -n "$1" ]; then - # Hypervisor. - hypervisor=$1; shift - local hypervisor_image; hypervisor_image=$1; shift - local hypervisor_version; hypervisor_version=$1; shift - local hypervisor_options; hypervisor_options=$1; shift - fi - - echo -n "title " >> $buffer - - if [ -n "$hypervisor" ]; then - echo -n "$hypervisor $hypervisor_version / " >> $buffer - fi - - echo -n "$title" >> $buffer - if [ -n "$kernel_version" ]; then - echo -n ", " >> $buffer - # memtest86 is not strictly a kernel - if ! echo "$kernel_version" | grep -q ^memtest86; then - echo -n "kernel " >> $buffer - fi - echo -n "$kernel_version" >> $buffer - fi - if [ -n "$recovery_desc" ]; then - echo -n " $recovery_desc" >> $buffer - fi - echo >> $buffer - - # lock the alternative options - if test x"$lock_alternative" = x"true" ; then - echo "lock" >> $buffer - fi - # lock the old entries - if test x"$lockold" = x"true" ; then - echo "lock" >> $buffer - fi - - case "$grub_root_device" in - [^A-Za-z0-9]*) - echo "root $grub_root_device" >> $buffer - ;; - *) - echo "uuid $grub_root_device" >> $buffer - ;; - esac - - echo -n "kernel " >> $buffer - if [ -n "$hypervisor" ]; then - echo -n "$hypervisor_image" >> $buffer - if [ -n "$hypervisor_options" ]; then - echo -n " $hypervisor_options" >> $buffer - fi - echo >> $buffer - echo -n "module " >> $buffer - fi - echo -n "$kernel" >> $buffer - if [ -n "$kernel_options" ]; then - echo -n " $kernel_options" >> $buffer - fi - if [ -n "$recovery_desc" ]; then - echo -n " $recovery_suffix" >> $buffer - fi - if [ -n "$dapper_upgrade" -a -z "$kernel_options$recovery_desc" ]; then - echo -n " " >> $buffer - fi - echo >> $buffer - - if [ -n "$initrd" ]; then - if [ -n "$hypervisor" ]; then - echo -n "module " >> $buffer - else - echo -n "initrd " >> $buffer - fi - echo "$initrd" >> $buffer - fi - - if [ ! -n "$recovery_desc" -a x"$supports_quiet" = x"true" -a -z "$dapper_upgrade" ]; then - echo "quiet" >> $buffer - fi - - if test x"$savedefault" = x"true" ; then - echo "savedefault" >> $buffer - fi - if test x"$dapper_upgrade" != x ; then - echo "boot" >> $buffer - fi - echo >> $buffer -} - -## write out the kernel entries -output_kernel_list() { - counter=0 - - # Xen entries first. - for kern in $xenKernels ; do - if test ! x"$howmany" = x"all" ; then - if [ $counter -gt $howmany ] ; then - break - fi - fi - - kernelName=$(basename $kern) - kernelVersion=$(echo $kernelName | sed -e 's/vmlinuz//') - - initrdName=$(FindInitrdName "/boot" "$kernelVersion") - initrd="" - - kernel=$kernel_dir/$kernelName - if [ -n "$initrdName" ] ; then - initrd=$kernel_dir/$initrdName - fi - - kernelVersion=$(echo $kernelVersion | sed -e 's/^-//') - currentOpt=$(get_kernel_opt $kernelVersion) - - hypervisorVersions=$(FindXenHypervisorVersions "$kernelVersion") - - found= - for hypervisorVersion in $hypervisorVersions; do - hypervisor="$kernel_dir/xen-$hypervisorVersion.gz" - if [ -e "$hypervisor" ]; then - found=1 - - echo "Found Xen hypervisor $hypervisorVersion, kernel: $kernel" >&2 - - write_kernel_entry "$kernelVersion" '' '' "$grub_root_device" \ - "$kernel" "$currentOpt $xenkopt" '' "$initrd" "$savedefault" '' "$dapper_upgrade" \ - Xen "$hypervisor" "$hypervisorVersion" "$xenhopt" - counter=$(($counter + 1)) - fi - done - - if [ -z $found ]; then - for hypervisor in $hypervisors; do - hypVersion=`basename "$hypervisor" .gz | sed s%xen-%%` - - echo "Found Xen hypervisor $hypVersion, kernel: $kernel" >&2 - - write_kernel_entry "$kernelVersion" '' '' "$grub_root_device" \ - "$kernel" "$currentOpt $xenkopt" '' "$initrd" "$savedefault" '' "$dapper_upgrade" \ - Xen "$kernel_dir/$hypervisor" "$hypVersion" "$xenhopt" - counter=$(($counter + 1)) - done - fi - done - - for kern in $sortedKernels ; do - counter=$(($counter + 1)) - if test ! x"$howmany" = x"all" ; then - if [ $counter -gt $howmany ] ; then - break - fi - fi - kernelName=$(basename $kern) - initrdName="" - initrd="" - extra_opts="" - - if [ "$kern" = "/boot/last-good-boot/vmlinuz" ]; then - kernelVersion="Last successful boot" - if [ -e "/boot/last-good-boot/initrd.img" ]; then - initrdName="last-good-boot/initrd.img" - fi - kernelName="last-good-boot/vmlinuz" - extra_opts="$extra_opts last-good-boot" - else - kernelVersion=$(echo $kernelName | sed -e 's/vmlinuz//') - initrdName=$(FindInitrdName "/boot" "$kernelVersion") - if [ -x "/usr/bin/makedumpfile" ] && [ -x "/sbin/kexec" ]; then - extra_opts="$extra_opts crashkernel=384M-2G:64M,2G-:128M" - fi - fi - - kernel=$kernel_dir/$kernelName - - if [ -n "$initrdName" ] ; then - initrd=$kernel_dir/$initrdName - fi - - echo "Found kernel: $kernel" >&2 - - if [ "$kernelName" = "vmlinuz" ]; then - if [ -L "/boot/$kernelName" ]; then - kernelVersion=`readlink -f "/boot/$kernelName"` - kernelVersion=$(echo $kernelVersion | sed -e 's/.*vmlinuz-//') - kernelVersion="$kernelVersion Default" - else - kernelVersion="Default" - fi - fi - if [ "$kernelName" = "vmlinuz.old" ]; then - if [ -L "/boot/$kernelName" ]; then - kernelVersion=`readlink -f "/boot/$kernelName"` - kernelVersion=$(echo $kernelVersion | sed -e 's/.*vmlinuz-//') - kernelVersion="$kernelVersion Previous" - else - kernelVersion="Previous" - fi - fi - - kernelVersion=$(echo $kernelVersion | sed -e 's/^-//') - - currentOpt=$(get_kernel_opt $kernelVersion) - - do_lockold=$lockold - # do not lockold for the first entry - [ $counter -eq 1 ] && do_lockold=false - - if [ "$kernelName" = "last-good-boot/vmlinuz" ]; then - if [ -e /boot/last-good-boot/cmdline ]; then - cmdline="$(cat /boot/last-good-boot/cmdline) last-good-boot" - else - cmdline="$currentOpt $defoptions $extra_opts" - fi - write_kernel_entry "$kernelVersion" "" "" "$grub_root_device" "$kernel" \ - "$cmdline" "" "$initrd" "$savedefault" "$do_lockold" \ - "$dapper_upgrade" - else - write_kernel_entry "$kernelVersion" "" "" "$grub_root_device" "$kernel" \ - "$currentOpt $defoptions $extra_opts" "" "$initrd" "$savedefault" \ - "$do_lockold" "$dapper_upgrade" - fi - - # insert the alternative boot options - if test ! x"$alternative" = x"false" && \ - test ! x"$kernelName" = x"last-good-boot/vmlinuz"; then - # for each altoptions line do this stuff - sed -ne 's/# altoptions=\(.*\)/\1/p' $buffer | while read line; do - descr=$(echo $line | sed -ne 's/\(([^)]*)\)[[:space:]]\(.*\)/\1/p') - suffix=$(echo $line | sed -ne 's/\(([^)]*)\)[[:space:]]\(.*\)/\2/p') - - test x"$lockalternative" = x"true" && do_lockold=false - write_kernel_entry "$kernelVersion" "$descr" "$lockalternative" \ - "$grub_root_device" "$kernel" "$currentOpt $extra_opts" \ - "$suffix" "$initrd" "false" "$do_lockold" \ - "$dapper_upgrade" - done - fi - done - -## Adding the chainload stanza is simply confusing, and for -## legacy ec2 grub, it will never be used. LP: #627451 -## -# if test -f $grub2name ; then -# echo "Found GRUB 2: $grub2name" >&2 -# cat >> $buffer << EOF -#title Chainload into GRUB 2 -#root $grub_root_device -#kernel $grub2name -#EOF -# if test x"$savedefault" = x"true" ; then -# echo "savedefault" >> $buffer -# fi -# echo >> $buffer -# fi - - memtest86names="memtest86 memtest86+" - - if test ! x"$memtest86" = x"false" ; then - for name in $memtest86names ; do - if test -f "/boot/$name.bin" ; then - kernelVersion="$name" - kernel="$kernel_dir/$name.bin" - currentOpt= - initrd= - - echo "Found kernel: $kernel" >&2 - - write_kernel_entry "$kernelVersion" "" "" "$grub_root_device" \ - "$kernel" "$currentOpt" "" "$initrd" "false" "" "$dapper_upgrade" - fi - done - fi - - echo $end >> $buffer -} - -ucf_update_kernels() { - local target; target="$1" - local buffer; buffer="$2" - - sed -ni -e"/$endopt/,/$end/p" "$buffer" - - if [ "x$initialconfig" = "x" ]; then - sed -n -e"/$endopt/,/$end/p" < $menu > $ucf_menu_file - else - cat $buffer > $ucf_menu_file - fi - - db_x_loadtemplatefile "$(dpkg-query --control-path grub-legacy-ec2 templates)" grub - - ucf --debconf-ok \ - --debconf-template grub/update_grub_changeprompt_threeway \ - --three-way "$buffer" $ucf_menu_file - rm "$buffer" - - # now re-merge the ucf results with the target file - sed -i -e "/^$endopt/,/^$end/ { - /^$endopt/r $ucf_menu_file - d - } - " $target - - rm -f $ucf_menu_file ${ucf_menu_file}.ucf-old -} - - -echo -n "Testing for an existing GRUB $menu_file_basename file ... " >&2 - -# Test if our menu file exists -if [ -f "$menu_file" ] ; then - menu="$menu_file" - rm -f $newtemplate - unset newtemplate - echo "found: $menu_file" >&2 - cp -f "$menu_file" "$menu_file~" -else - # if not ask user if they want us to create one - initialconfig=1 - menu="$menu_file" - echo >&2 - echo >&2 - echo -n "Could not find $menu_file file. " >&2 - if [ "-y" = "$command_line_arguments" ] ; then - echo >&2 - echo "Generating $menu_file" >&2 - answer=y - else - echo -n "Would you like $menu_file generated for you? " >&2 - echo -n "(y/N) " >&2 - read answer <&2 - fi - - case "$answer" in - y* | Y*) - cat "$newtemplate" > $menu_file - rm -f $newtemplate - unset newtemplate - ;; - *) - abort "Not creating $menu_file as you wish" - ;; - esac -fi - -# Extract the kernel options to use -kopt=$(GetMenuOpt "kopt" "$kopt") - -# Extract options for specific kernels -opts="$(ExtractMenuOpts "\(kopt_[[:alnum:]_]\+\)")" -test -z "$opts" || eval "$opts" -CustomKopts=$(GetMenuOpts "\(kopt_[[:alnum:]_]\+\)" | \ - grep -v "^# kopt_2_6=" || true) - -# Set the kernel 2.6 option only for fresh install (but convert it to -# mount-by-UUID on upgrade) -test -z "$kopt_2_6" && test -z "$(GetMenuOpt "kopt" "")" && \ - kopt_2_6="$default_kopt" - -# Extract the grub root -grub_root_device=$(GetMenuOpt "groot" "$grub_root_device") -groot_cfg=$(GetMenuOpt groot "${grub_root_device_fallback}") -case "${groot_cfg}" in - [^A-Za-z0-9]*) :;; - *) - echo "uuid not supported. update 'groot' in ${menu_file}" >&2; - abort "groot must be grub root device (ie '(hd0)'). not '${groot_cfg}'" >&2; -esac - -# Extract the old recovery value -alternative=$(GetMenuOpt "recovery" "$alternative") - -# Extract the alternative value -alternative=$(GetMenuOpt "alternative" "$alternative") - -# Extract the lockalternative value -lockalternative=$(GetMenuOpt "lockalternative" "$lockalternative") - -# Extract the additional default options -# Check nonaltoptions too for compatibility with Ubuntu <= 5.10 -defoptions=$(GetMenuOpt "nonaltoptions" "$defoptions") -defoptions=$(GetMenuOpt "defoptions" "$defoptions") - -# Extract the lockold value -lockold=$(GetMenuOpt "lockold" "$lockold") - -# Extract Xen hypervisor options -xenhopt=$(GetMenuOpt "xenhopt" "$xenhopt") - -# Extract Xen Linux kernel options -xenkopt=$(GetMenuOpt "xenkopt" "$xenkopt") - -# Extract the howmany value -howmany=$(GetMenuOpt "howmany" "$howmany") - -# Extract the memtest86 value -memtest86=$(GetMenuOpt "memtest86" "$memtest86") - -# Extract the indomU value -indomU=$(GetMenuOpt "indomU" "$indomU") - -# Extract the updatedefaultentry option -updatedefaultentry=$(GetMenuOpt "updatedefaultentry" "$updatedefaultentry") - -# If "default saved" is in use, set the default to true -grep -q "^default.*saved" $menu && savedefault=true -# Extract the savedefault option -savedefault=$(GetMenuOpt "savedefault" "$savedefault") - -# Generate the menu options we want to insert -buffer=$(tempfile) -echo $start >> $buffer -echo "## lines between the AUTOMAGIC KERNELS LIST markers will be modified" >> $buffer -echo "## by the debian update-grub script except for the default options below" >> $buffer -echo >> $buffer -echo "## DO NOT UNCOMMENT THEM, Just edit them to your needs" >> $buffer -echo >> $buffer -echo "## ## Start Default Options ##" >> $buffer - -echo "## default kernel options" >> $buffer -echo "## default kernel options for automagic boot options" >> $buffer -echo "## If you want special options for specific kernels use kopt_x_y_z" >> $buffer -echo "## where x.y.z is kernel version. Minor versions can be omitted." >> $buffer -echo "## e.g. kopt=root=/dev/hda1 ro" >> $buffer -echo "## kopt_2_6_8=root=/dev/hdc1 ro" >> $buffer -echo "## kopt_2_6_8_2_686=root=/dev/hdc2 ro" >> $buffer -echo "# kopt=$kopt" >> $buffer -if [ -n "$kopt_2_6" ] && [ "$kopt" != "$kopt_2_6" ]; then - echo "# kopt_2_6=$kopt_2_6" >> $buffer -fi -if [ -n "$CustomKopts" ] ; then - echo "$CustomKopts" >> $buffer -fi -echo >> $buffer - -echo "## default grub root device" >> $buffer -echo "## e.g. groot=${grub_root_device_fallback}" >> $buffer -echo "# groot=$grub_root_device" >> $buffer -echo >> $buffer - -echo "## should update-grub create alternative automagic boot options" >> $buffer -echo "## e.g. alternative=true" >> $buffer -echo "## alternative=false" >> $buffer -echo "# alternative=$alternative" >> $buffer -echo >> $buffer - -echo "## should update-grub lock alternative automagic boot options" >> $buffer -echo "## e.g. lockalternative=true" >> $buffer -echo "## lockalternative=false" >> $buffer -echo "# lockalternative=$lockalternative" >> $buffer -echo >> $buffer - -echo "## additional options to use with the default boot option, but not with the" >> $buffer -echo "## alternatives" >> $buffer -echo "## e.g. defoptions=vga=791 resume=/dev/hda5" >> $buffer -echo "# defoptions=$defoptions" >> $buffer -echo >> $buffer - -echo "## should update-grub lock old automagic boot options" >> $buffer -echo "## e.g. lockold=false" >> $buffer -echo "## lockold=true" >> $buffer -echo "# lockold=$lockold" >> $buffer -echo >> $buffer - -echo "## Xen hypervisor options to use with the default Xen boot option" >> $buffer -echo "# xenhopt=$xenhopt" >> $buffer -echo >> $buffer - -echo "## Xen Linux kernel options to use with the default Xen boot option" >> $buffer -echo "# xenkopt=$xenkopt" >> $buffer -echo >> $buffer - -echo "## altoption boot targets option" >> $buffer -echo "## multiple altoptions lines are allowed" >> $buffer -echo "## e.g. altoptions=(extra menu suffix) extra boot options" >> $buffer -echo "## altoptions=(recovery) single" >> $buffer - -if ! grep -q "^# altoptions" $menu ; then - echo "# altoptions=$altoptions" >> $buffer -else - grep "^# altoptions" $menu >> $buffer -fi -echo >> $buffer - -echo "## controls how many kernels should be put into the $menu_file_basename" >> $buffer -echo "## only counts the first occurence of a kernel, not the" >> $buffer -echo "## alternative kernel options" >> $buffer -echo "## e.g. howmany=all" >> $buffer -echo "## howmany=7" >> $buffer -echo "# howmany=$howmany" >> $buffer -echo >> $buffer - -echo "## specify if running in Xen domU or have grub detect automatically" >> $buffer -echo "## update-grub will ignore non-xen kernels when running in domU and vice versa" >> $buffer -echo "## e.g. indomU=detect" >> $buffer -echo "## indomU=true" >> $buffer -echo "## indomU=false" >> $buffer -echo "# indomU=$indomU" >> $buffer -echo >> $buffer - -echo "## should update-grub create memtest86 boot option" >> $buffer -echo "## e.g. memtest86=true" >> $buffer -echo "## memtest86=false" >> $buffer -echo "# memtest86=$memtest86" >> $buffer -echo >> $buffer - -echo "## should update-grub adjust the value of the default booted system" >> $buffer -echo "## can be true or false" >> $buffer -echo "# updatedefaultentry=$updatedefaultentry" >> $buffer -echo >> $buffer - -echo "## should update-grub add savedefault to the default options" >> $buffer -echo "## can be true or false" >> $buffer -echo "# savedefault=$savedefault" >> $buffer -echo >> $buffer - -echo "## ## End Default Options ##" >> $buffer -echo >> $buffer - -echo -n "Searching for splash image ... " >&2 -current_splash=`grep '^splashimage=' ${menu_file} || true` -splash_root_device="" -splash_uuid="" -case "$grub_root_device" in - [^A-Za-z0-9]*) - splash_root_device=${grub_root_device} - ;; - *) - splash_uuid="uuid $grub_root_device" - ;; -esac -splashimage_path="splashimage=${splash_root_device}${grub_dir##${boot_device:+/boot}}/splash.xpm.gz" -if [ `sed -e "/^$start/,/^$end/d" $menu_file | grep -c '^splashimage='` != "0" ] ; then - #checks for splashscreen defined outside the autoupdated part - splashimage=$(grep '^splashimage=' ${menu_file}) - echo "found: ${splashimage##*=}" >&2 - echo >&2 -elif [ -f "${grub_dir}/splash.xpm.gz" ] && [ "$current_splash" = "" ]; then - echo "found: /boot/grub/splash.xpm.gz" >&2 - echo "$splash_uuid" >> $buffer - echo "$splashimage_path" >> $buffer - echo >> $buffer -elif [ -f "${grub_dir}/splash.xpm.gz" ] && [ "$current_splash" = "$splashimage_path" ]; then - echo "found: /boot/grub/splash.xpm.gz" >&2 - echo "$splash_uuid" >> $buffer - echo "$splashimage_path" >> $buffer - echo >> $buffer -elif [ "$current_splash" != "" ] && [ "$current_splash" != "$splashimage_path" ]; then - echo "found but preserving previous setting: $(grep '^splashimage=' ${menu_file})" >&2 - echo "$splash_uuid" >> $buffer - echo "$current_splash" >> $buffer - echo >> $buffer -else - echo "none found, skipping ..." >&2 -fi - - -hypervisors="" -for hyp in /boot/xen-*.gz; do - if [ ! -h "$hyp" ] && [ -f "$hyp" ]; then - hypervisors="$hypervisors `basename "$hyp"`" - fi -done - -# figure out where grub looks for the kernels at boot time -kernel_dir=/boot -if [ -n "$boot_device" ] ; then - kernel_dir= -fi - - -# We need a static path to use for the ucf registration; since we're not -# using the full menu.lst file (maybe we should, just copying it around? -# C.f. discussion with Manoj), create a directory in a fixed location -# even though we're not treating the file in that location as -# persistent. -mkdir -p /var/run/grub - -# The first time ucf sees the file, we can only assume any difference -# between the magic comments and the kernel options is a result of local -# mods, so this will result in a ucf prompt for anyone whose first -# invocation of update-grub is as a result of updating the magic comments. -if ! ucfq grub | grep -q $ucf_menu_file; then - otherbuffer=$(tempfile) - cat $buffer > $otherbuffer - - sortedKernels=`sed -n -e " - /$endopt/,/$end/ { - s/^kernel[[:space:]]\+\([^[:space:]]\+\).*/\1/p - }" < $menu | grep -vE "memtest86|$grub2name|xen" | uniq` - xenKernels=`sed -n -e " - /$endopt/,/$end/ { - s/^module[[:space:]]\+\([^[:space:]]*vmlinuz[^[:space:]]\+\).*/\1/p - }" < $menu | uniq` - - savebuffer="$buffer" - buffer="$otherbuffer" - savetitle="$title" - title="$(sed -n -e "/$endopt/,/$end/ { - s/^title[[:space:]]\+\(.*\),.*/\1/p - }" < $menu | head -n 1)" - if [ -z "$title" ]; then - title="$savetitle" - fi - - # Hack: the kernel list output in Ubuntu 6.06 was different than - # in the current version, so to support smooth upgrades we need to - # properly detect a config generated by this old version of - # update-grub and mimic it for the initial ucf registration - dapper_upgrade=`sed -n -e " - /$endopt/,/$end/ { - /^boot/p - }" < $menu` - save_savedefault="$savedefault" - if [ -n "$dapper_upgrade" ]; then - savedefault=true - fi - - output_kernel_list - - savedefault="$save_savedefault" - dapper_upgrade="" - buffer="$savebuffer" - title="$savetitle" - - ucf_update_kernels "$menu" "$otherbuffer" - - # all done, now register it - ucfr grub $ucf_menu_file -fi - - -if ! type is_xen_kernel >/dev/null 2>&1; then - check_xen_config_for_kernel() { - # input is like /boot/vmlinuz-2.6.35-13-virtual - # expected config path is /boot/config-2.6.35-13-virtual - local kernel="$1" config="" dir="" bname="" - dir=${kernel%/*} - [ "$dir" = "$kernel" ] && dir="." - bname=${kernel##*/} - config="$dir/config-${bname#*-}" - [ -f "$config" ] || return 1 - grep -q CONFIG_XEN=y "$config" - } - - is_xen_kernel() { - # input is like /boot/vmlinuz-2.6.35-13-virtual - # return whether or not this kernel is xen bootable. - check_xen_config_for_kernel "$1" && return 0 - - # get the version string out of it. - local ver_flavor=""; - ver_flavor="${1##*vmlinuz-}" - - case "${ver_flavor}" in - *-aws) return 0;; - *-ec2) return 0;; - *-virtual) - # 10.04 LTS through 12.04 LTS -virtual is the EC2/Xen kernel - dpkg --compare-versions ${ver_flavor%-virtual} gt 2.6.35-13 && return 0;; - *-generic) - # Starting with 12.10, -virtual was merged into -generic - dpkg --compare-versions ${ver_flavor%-generic} ge 3.4.0-3 && return 0;; - esac - return 1; - } -fi - -for kern in /boot/vmlinuz-*; do - case "$kern" in - *.signed) continue;; - esac - is_xen_kernel "${kern}" && - xen_verlist="${xen_verlist} ${kern#/boot/vmlinuz-}" -done -xen_verlist=${xen_verlist# } - -xenKernels="" -for ver in ${xen_verlist}; do - # ver is a kernel version - kern="/boot/vmlinuz-$ver" - if [ -r $kern ] ; then - newerKernels="" - for i in $xenKernels ; do - res=$(CompareVersions "$kern" "$i") - if [ "$kern" != "" ] && [ "$res" -gt 0 ] ; then - newerKernels="$newerKernels $kern $i" - kern="" - else - newerKernels="$newerKernels $i" - fi - done - if [ "$kern" != "" ] ; then - newerKernels="$newerKernels $kern" - fi - xenKernels="$newerKernels" - fi -done -xenKernels=" ${xenKernels} " - -if [ "$indomU" = "detect" ]; then - if [ -e /proc/xen/capabilities ] && ! grep -q "control_d" /proc/xen/capabilities; then - indomU="true" - else - indomU="false" - fi -fi - -sortedKernels="" -for kern in $(/bin/ls -1vr /boot | grep -v "dpkg-*" | grep "^vmlinuz-") ; do - if `echo "$xenKernels" | grep -q "$kern "` || `echo "$kern" | grep -q "xen"`; then - is_xen=1 - else - is_xen= - fi - - if [ "$indomU" = "false" ] && [ "$is_xen" ]; then - # We aren't running in a Xen domU, skip xen kernels - echo "Ignoring Xen kernel on non-Xen host: $kern" - continue - elif [ "$indomU" = "true" ] && ! [ "$is_xen" ]; then - # We are running in a Xen domU, skip non-xen kernels - echo "Ignoring non-Xen Kernel on Xen domU host: $kern" - continue - fi - - kern="/boot/$kern" - newerKernels="" - for i in $sortedKernels ; do - res=$(CompareVersions "$kern" "$i") - if [ "$kern" != "" ] && [ "$res" -gt 0 ] ; then - newerKernels="$newerKernels $kern $i" - kern="" - else - newerKernels="$newerKernels $i" - fi - done - if [ "$kern" != "" ] ; then - newerKernels="$newerKernels $kern" - fi - sortedKernels="$newerKernels" -done - -if test -f "/boot/vmlinuz.old" ; then - sortedKernels="/boot/vmlinuz.old $sortedKernels" -fi -if test -f "/boot/vmlinuz" ; then - sortedKernels="/boot/vmlinuz $sortedKernels" -fi - -# Add our last-good-boot kernel, second in list. We always add it, because -# it can appear out of nowhere. -newerKernels="" -last_good="/boot/last-good-boot/vmlinuz" -if [ -e "$last_good" ]; then - for i in $sortedKernels ; do - if [ "$last_good" != "" ]; then - newerKernels="$i $last_good" - last_good="" - else - newerKernels="$newerKernels $i" - fi - done - # Shouldn't happen, unless someone removed all the kernels - if [ "$last_good" != "" ]; then - newerKernels="$newerKernels $last_good" - fi - sortedKernels="$newerKernels" -fi - -#Finding the value the default line -use_grub_set_default="false" -if test "$updatedefaultentry" = "true" ; then - defaultEntryNumber=$(sed -ne 's/^[[:blank:]]*default[[:blank:]]*\(.*\).*/\1/p' $menu) - - if [ "$defaultEntryNumber" = "saved" ] ; then - defaultEntryNumber=$(sed 'q' "$grub_dir/default") - use_grub_set_default="true" - fi - - if test -n "$defaultEntryNumber"; then - defaultEntryNumberPlusOne=$(expr $defaultEntryNumber \+ 1); - defaultEntry=$(grep "^[[:blank:]]*title" $menu | sed -ne "${defaultEntryNumberPlusOne}p" | sed -ne ";s/^[[:blank:]]*title[[:blank:]]*//p") - defaultEntry=$(echo $defaultEntry | sed -e "s/[[:blank:]]*$//") # don't trust trailing blanks - else - notChangeDefault="yes" - fi -else - notChangeDefault="yes" -fi - -output_kernel_list - -otherbuffer=$(tempfile) -cat $buffer > $otherbuffer - -ucf_update_kernels "$buffer" "$otherbuffer" - -echo -n "Updating $menu ... " >&2 -# Insert the new options into the menu -if ! grep -q "^$start" $menu ; then - cat $buffer >> $menu - rm -f $buffer -else - umask 077 - sed -e "/^$start/,/^$end/{ - /^$start/r $buffer - d - } - " $menu > $menu.new - cat $menu.new > $menu - rm -f $buffer $menu.new -fi - -# Function to update the default value -set_default_value() { - if [ "$use_grub_set_default" = "true" ] ; then - grub-set-default $1 - else - value="$1" - newmenu=$(tempfile) - sed -e "s/^[[:blank:]]*default[[:blank:]]*[[:digit:]]*\(.*\)/default ${value}\1/;b" $menu > $newmenu - cat $newmenu > $menu - rm -f $newmenu - unset newmenu - fi -} - -#Updating the default number -if test -z "$notChangeDefault"; then - newDefaultNumberPlusOne=$(grep "^[[:blank:]]*title[[:blank:]]*" $menu | grep -n "${defaultEntry}" | cut -f1 -d ":" | sed -ne "1p") - if test -z "$newDefaultNumberPlusOne"; then - echo "Previous default entry removed, resetting to 0">&2 - set_default_value "0" - elif test -z "$defaultEntry"; then - echo "Value of default value matches no entry, resetting to 0" >&2 - set_default_value "0" - else - if test "$newDefaultNumberPlusOne" = "1"; then - newDefaultNumber="0" - else - newDefaultNumber=$(expr $newDefaultNumberPlusOne - 1) - fi - echo "Updating the default booting kernel">&2 - set_default_value "$newDefaultNumber" - fi -fi - -echo "done" >&2 -echo >&2 diff --git a/debian/watch b/debian/watch deleted file mode 100644 index 0f7a600b..00000000 --- a/debian/watch +++ /dev/null @@ -1,2 +0,0 @@ -version=3 -https://launchpad.net/cloud-init/+download .*/\+download/cloud-init-(.+)\.tar.gz diff --git a/doc-requirements.txt b/doc-requirements.txt new file mode 100644 index 00000000..d5f921e3 --- /dev/null +++ b/doc-requirements.txt @@ -0,0 +1,5 @@ +doc8 +m2r +sphinx<2 +sphinx_rtd_theme +pyyaml diff --git a/doc/README b/doc/README deleted file mode 100644 index 83559192..00000000 --- a/doc/README +++ /dev/null @@ -1,4 +0,0 @@ -This project is cloud-init it is hosted on launchpad at -https://launchpad.net/cloud-init - -The package was previously named ec2-init. diff --git a/doc/examples/cloud-config-add-apt-repos.txt b/doc/examples/cloud-config-add-apt-repos.txt index 22ef7612..97722107 100644 --- a/doc/examples/cloud-config-add-apt-repos.txt +++ b/doc/examples/cloud-config-add-apt-repos.txt @@ -1,6 +1,10 @@ #cloud-config -# Add apt repositories +# Add primary apt repositories +# +# To add 3rd party repositories, see cloud-config-apt.txt or the +# Additional apt configuration and repositories section. +# # # Default: auto select based on cloud metadata # in ec2, the default is <region>.archive.ubuntu.com diff --git a/doc/examples/cloud-config-chef.txt b/doc/examples/cloud-config-chef.txt index defc5a54..2320e01a 100644 --- a/doc/examples/cloud-config-chef.txt +++ b/doc/examples/cloud-config-chef.txt @@ -98,6 +98,9 @@ chef: # to the install script omnibus_version: "12.3.0" + # If encrypted data bags are used, the client needs to have a secrets file + # configured to decrypt them + encrypted_data_bag_secret: "/etc/chef/encrypted_data_bag_secret" # Capture all subprocess output into a logfile # Useful for troubleshooting cloud-init issues diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt index 2651c027..52a2476b 100644 --- a/doc/examples/cloud-config-datasources.txt +++ b/doc/examples/cloud-config-datasources.txt @@ -38,7 +38,7 @@ datasource: # these are optional, but allow you to basically provide a datasource # right here user-data: | - # This is the user-data verbatum + # This is the user-data verbatim meta-data: instance-id: i-87018aed local-hostname: myhost.internal diff --git a/doc/examples/cloud-config-disk-setup.txt b/doc/examples/cloud-config-disk-setup.txt index 43a62a26..cac44d8c 100644 --- a/doc/examples/cloud-config-disk-setup.txt +++ b/doc/examples/cloud-config-disk-setup.txt @@ -17,7 +17,7 @@ fs_setup: device: ephemeral0 partition: auto -# Default disk definitions for Windows Azure +# Default disk definitions for Microsoft Azure # ------------------------------------------ device_aliases: {'ephemeral0': '/dev/sdb'} @@ -34,6 +34,21 @@ fs_setup: replace_fs: ntfs +# Data disks definitions for Microsoft Azure +# ------------------------------------------ + +disk_setup: + /dev/disk/azure/scsi1/lun0: + table_type: gpt + layout: True + overwrite: True + +fs_setup: + - device: /dev/disk/azure/scsi1/lun0 + partition: 1 + filesystem: ext4 + + # Default disk definitions for SmartOS # ------------------------------------ @@ -49,7 +64,7 @@ fs_setup: filesystem: ext4 device: ephemeral0.0 -# Cavaut for SmartOS: if ephemeral disk is not defined, then the disk will +# Caveat for SmartOS: if ephemeral disk is not defined, then the disk will # not be automatically added to the mounts. @@ -242,7 +257,7 @@ fs_setup: # # "false": If an existing file system exists, skip the creation. # -# <REPLACE_FS>: This is a special directive, used for Windows Azure that +# <REPLACE_FS>: This is a special directive, used for Microsoft Azure that # instructs cloud-init to replace a file system of <FS_TYPE>. NOTE: # unless you define a label, this requires the use of the 'any' partition # directive. diff --git a/doc/examples/cloud-config-mount-points.txt b/doc/examples/cloud-config-mount-points.txt index 5a6c24f5..bce28bf8 100644 --- a/doc/examples/cloud-config-mount-points.txt +++ b/doc/examples/cloud-config-mount-points.txt @@ -34,7 +34,7 @@ mounts: # mount_default_fields # These values are used to fill in any entries in 'mounts' that are not -# complete. This must be an array, and must have 7 fields. +# complete. This must be an array, and must have 6 fields. mount_default_fields: [ None, None, "auto", "defaults,nofail", "0", "2" ] diff --git a/doc/examples/cloud-config-ssh-keys.txt b/doc/examples/cloud-config-ssh-keys.txt index 235a114f..aad8b683 100644 --- a/doc/examples/cloud-config-ssh-keys.txt +++ b/doc/examples/cloud-config-ssh-keys.txt @@ -6,7 +6,7 @@ ssh_authorized_keys: - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUUk8EEAnnkhXlukKoUPND/RRClWz2s5TCzIkd3Ou5+Cyz71X0XmazM3l5WgeErvtIwQMyT1KjNoMhoJMrJnWqQPOt5Q8zWd9qG7PBl9+eiH5qV7NZ mykey@host - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5ozemNSj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbDc1pvxzxtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q7NDwfIrJJtO7Hi42GyXtvEONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhTYWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw== smoser@brickies -# Send pre-generated ssh private keys to the server +# Send pre-generated SSH private keys to the server # If these are present, they will be written to /etc/ssh and # new random keys will not be generated # in addition to 'rsa' and 'dsa' as shown below, 'ecdsa' is also supported @@ -42,5 +42,3 @@ ssh_keys: -----END DSA PRIVATE KEY----- dsa_public: ssh-dss AAAAB3NzaC1kc3MAAACBAM/Ycu7ulMTEvz1RLIzTbrhELJZf8Iwua6TFfQl1ubb1rHwUElOkus7xMhdVjms8AmbV1Meem7ImE69T0bszy09QAG3NImHgZVIeXBoJ/JzByku/1NcOBYilKP7oSIcLJpGUHX8IGn1GJoH7XRBwVub6Vqm4RP78C7q9IOn0hG2VAAAAFQCDEfCrnL1GGzhCPsr/uS1vbt8/wQAAAIEAjSrok/4m8mbBkVp4IwxXFdRuqJKSj8/WWxos00Ednn/ww5QibysHYULrOKJ1+54mmpMyp5CZICUQELCfCt5ScZ9GsqgmnI80Q1h3Xkwbo3kn7PzWwRwcV6muvJn4PcZ71WM+rdN/c2EorAINDTbjRo97NueM94WbiYdtjHFxn0YAAACAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI38UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC/QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQ= smoser@localhost - - diff --git a/doc/examples/cloud-config-update-apt.txt b/doc/examples/cloud-config-update-apt.txt index 647241ca..aaa47326 100644 --- a/doc/examples/cloud-config-update-apt.txt +++ b/doc/examples/cloud-config-update-apt.txt @@ -5,4 +5,4 @@ # # Default: false # Aliases: apt_update -package_update: false +package_update: true diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt index 6a363b77..f588bfbc 100644 --- a/doc/examples/cloud-config-user-groups.txt +++ b/doc/examples/cloud-config-user-groups.txt @@ -1,3 +1,4 @@ +#cloud-config # Add groups to the system # The following example adds the ubuntu group with members 'root' and 'sys' # and the empty group cloud-users. diff --git a/doc/man/cloud-id.1 b/doc/man/cloud-id.1 new file mode 100644 index 00000000..98ce130c --- /dev/null +++ b/doc/man/cloud-id.1 @@ -0,0 +1,31 @@ +.TH CLOUD-ID 1 + +.SH NAME +cloud-id \- Report the canonical cloud-id for this instance + +.SH SYNOPSIS +.BR "cloud-id" " [-h] [-j] [-l] [-i <INSTANCE_DATA>]" + +.SH OPTIONS +.TP +.B "-h, --help" +Show help message and exit + +.TP +.B "-j, --json" +Report all standardized cloud-id information as json + +.TP +.B "-l, --long" +Report extended cloud-id information as tab-delimited string + +.TP +.BR "-i <data>, --instance-data <data>" +Path to instance-data.json file. Default is +/run/cloud-init/instance-data.json + +.SH COPYRIGHT +Copyright (C) 2020 Canonical Ltd. License GPL-3 or Apache-2.0 + +.SH SEE ALSO +Full documentation at: <https://cloudinit.readthedocs.io> diff --git a/doc/man/cloud-init-per.1 b/doc/man/cloud-init-per.1 new file mode 100644 index 00000000..3668232e --- /dev/null +++ b/doc/man/cloud-init-per.1 @@ -0,0 +1,45 @@ +.TH CLOUD-INIT-PER 1 + +.SH NAME +cloud-init-per \- Run a command with arguments at a specific frequency + +.SH SYNOPSIS +.BR "cloud-init-per" " <frequency> <name> <cmd> [ arg1 [ arg2 [...]]]" + +.SH DESCRIPTION +Run a command with arguments at a specific frequency. + +This utility can make it easier to use boothooks or bootcmd on a per +"once" or "always" basis. For example: + + - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ] + +The cloud-init-per command replaced the cloud-init-run-module command. + +.SH OPTIONS +.TP +.B "frequency" +This can be one of the following values: + +.BR "once" ":" +run only once and do not re-run for new instance-id + +.BR "instance" ":" +run only the first boot for a given instance-id + +.BR "always" ":" +run every boot + +.TP +.B "name" +A name to give the command to run to show up in logs. + +.TP +.B "cmd [ arg1 [ arg2 [...]]]" +The actual command to run followed by any additional arguments. + +.SH COPYRIGHT +Copyright (C) 2020 Canonical Ltd. License GPL-3 or Apache-2.0 + +.SH SEE ALSO +Full documentation at: <https://cloudinit.readthedocs.io> diff --git a/doc/man/cloud-init.1 b/doc/man/cloud-init.1 new file mode 100644 index 00000000..9b52dc8d --- /dev/null +++ b/doc/man/cloud-init.1 @@ -0,0 +1,88 @@ +.TH CLOUD-INIT 1 + +.SH NAME +cloud-init \- Cloud instance initialization + +.SH SYNOPSIS +.BR "cloud-init" " [-h] [-d] [-f FILES] [--force] [-v] {init,modules,single,query,dhclient-hook,features,analyze,collect-logs,clean,status}" + +.SH DESCRIPTION +Cloud-init provides a mechanism for cloud instance initialization. +This is done by identifying the cloud platform that is in use, reading +provided cloud metadata and optional vendor and user +data, and then intializing the instance as requested. + +Generally, this command is not normally meant to be run directly by +the user. However, some subcommands may useful for development or +debug of deployments. + +.SH OPTIONS +.TP +.B "-h, --help" +Show help message and exit + +.TP +.B "-d, --debug" +Show additional pre-action logging (default: False) + +.TP +.B "-f <files>, --files <files>" +Additional YAML configuration files to use + +.TP +.B "--force" +Force running even if no datasource is found (use at your own risk) + +.TP +.B "-v, --version" +Show program's version number and exit + +.SH SUBCOMMANDS +Please see the help output for each subcommand for additional details, +flags, and subcommands. + +.TP +.B "analyze" +Analyze cloud-init logs and data. + +.TP +.B "collect-logs" +Collect and tar all cloud-init debug info. + +.TP +.B "clean" +Remove logs and artifacts so cloud-init can re-run. + +.TP +.B "dhclient-hook" +Run the dhclient hook to record network info. + +.TP +.B "features" +List defined features. + +.TP +.B "init" +Initializes cloud-init and performs initial modules. + +.TP +.B "modules" +Activates modules using a given configuration key. + +.TP +.B "query" +Query standardized instance metadata from the command line. + +.TP +.B "single" +Run a single module. + +.TP +.B "status" +Report cloud-init status or wait on completion. + +.SH COPYRIGHT +Copyright (C) 2020 Canonical Ltd. License GPL-3 or Apache-2.0 + +.SH SEE ALSO +Full documentation at: <https://cloudinit.readthedocs.io> diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py index 50eb05cf..86441986 100644 --- a/doc/rtd/conf.py +++ b/doc/rtd/conf.py @@ -17,7 +17,8 @@ from cloudinit.config.schema import get_schema_doc # ] # General information about the project. -project = 'Cloud-Init' +project = 'cloud-init' +copyright = '2019, Canonical Ltd.' # -- General configuration ---------------------------------------------------- @@ -27,16 +28,12 @@ project = 'Cloud-Init' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ - 'sphinx.ext.intersphinx', + 'm2r', 'sphinx.ext.autodoc', 'sphinx.ext.autosectionlabel', 'sphinx.ext.viewcode', ] -intersphinx_mapping = { - 'sphinx': ('http://sphinx.pocoo.org', None) -} - # The suffix of source filenames. source_suffix = '.rst' @@ -64,15 +61,7 @@ show_authors = False # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'default' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "bodyfont": "Ubuntu, Arial, sans-serif", - "headfont": "Ubuntu, Arial, sans-serif" -} +html_theme = 'sphinx_rtd_theme' # The name of an image file (relative to this directory) to place at the top # of the sidebar. diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst index 20a99a30..5d90c131 100644 --- a/doc/rtd/index.rst +++ b/doc/rtd/index.rst @@ -1,50 +1,79 @@ .. _index: -.. http://thomas-cokelaer.info/tutorials/sphinx/rest_syntax.html -.. As suggested at link above for headings use: -.. # with overline, for parts -.. * with overline, for chapters -.. =, for sections -.. -, for subsections -.. ^, for subsubsections -.. “, for paragraphs +cloud-init Documentation +######################## -############# -Documentation -############# +Cloud-init is the *industry standard* multi-distribution method for +cross-platform cloud instance initialization. It is supported across all +major public cloud providers, provisioning systems for private cloud +infrastructure, and bare-metal installations. -.. rubric:: Everything about cloud-init, a set of **python** scripts and - utilities to make your cloud images be all they can be! +Cloud instances are initialized from a disk image and instance data: -******* -Summary -******* +- Cloud metadata +- User data (optional) +- Vendor data (optional) -`Cloud-init`_ is the *defacto* multi-distribution package that handles early -initialization of a cloud instance. +Cloud-init will identify the cloud it is running on during boot, read any +provided metadata from the cloud and initialize the system accordingly. This +may involve setting up the network and storage devices to configuring SSH +access key and many other aspects of a system. Later on the cloud-init will +also parse and process any optional user or vendor data that was passed to the +instance. ----- +Getting help +************ + +Having trouble? We would like to help! + +- Try the :ref:`FAQ` – its got answers to some common questions +- Ask a question in the ``#cloud-init`` IRC channel on Freenode +- Join and ask questions on the `cloud-init mailing list <https://launchpad.net/~cloud-init>`_ +- Find a bug? `Report bugs on Launchpad <https://bugs.launchpad.net/cloud-init/+filebug>`_ .. toctree:: - :maxdepth: 2 + :hidden: + :titlesonly: + :caption: Getting Started - topics/capabilities.rst topics/availability.rst + topics/boot.rst + topics/cli.rst + topics/faq.rst + topics/bugs.rst + +.. toctree:: + :hidden: + :titlesonly: + :caption: User Data + topics/format.rst - topics/instancedata.rst - topics/dir_layout.rst topics/examples.rst - topics/boot.rst - topics/datasources.rst - topics/logging.rst topics/modules.rst topics/merging.rst - topics/network-config.rst + +.. toctree:: + :hidden: + :titlesonly: + :caption: Instance Data + + topics/instancedata.rst + topics/datasources.rst topics/vendordata.rst - topics/debugging.rst - topics/moreinfo.rst + topics/network-config.rst + +.. toctree:: + :hidden: + :titlesonly: + :caption: Development + topics/hacking.rst + topics/security.rst + topics/debugging.rst + topics/logging.rst + topics/dir_layout.rst + topics/analyze.rst + topics/docs.rst topics/tests.rst -.. _Cloud-init: https://launchpad.net/cloud-init -.. vi: textwidth=78 +.. vi: textwidth=79 diff --git a/doc/rtd/topics/analyze.rst b/doc/rtd/topics/analyze.rst new file mode 100644 index 00000000..709131b8 --- /dev/null +++ b/doc/rtd/topics/analyze.rst @@ -0,0 +1,318 @@ +.. _analyze: + +Analyze +******* + +The analyze subcommand was added to cloud-init in order to help analyze +cloud-init boot time performance. It is loosely based on systemd-analyze where +there are four subcommands: + +- blame +- show +- dump +- boot + +Usage +===== + +The analyze command requires one of the four subcommands: + +.. code-block:: shell-session + + $ cloud-init analyze blame + $ cloud-init analyze show + $ cloud-init analyze dump + $ cloud-init analyze boot + +Availability +============ + +The analyze subcommand is generally available across all distributions with the +exception of Gentoo and FreeBSD. + +Subcommands +=========== + +Blame +----- + +The ``blame`` action matches ``systemd-analyze blame`` where it prints, in +descending order, the units that took the longest to run. This output is +highly useful for examining where cloud-init is spending its time during +execution. + +.. code-block:: shell-session + + $ cloud-init analyze blame + -- Boot Record 01 -- + 00.80300s (init-network/config-growpart) + 00.64300s (init-network/config-resizefs) + 00.62100s (init-network/config-ssh) + 00.57300s (modules-config/config-grub-dpkg) + 00.40300s (init-local/search-NoCloud) + 00.38200s (init-network/config-users-groups) + 00.19800s (modules-config/config-apt-configure) + 00.03700s (modules-final/config-keys-to-console) + 00.02100s (init-network/config-update_etc_hosts) + 00.02100s (init-network/check-cache) + 00.00800s (modules-final/config-ssh-authkey-fingerprints) + 00.00800s (init-network/consume-vendor-data) + 00.00600s (modules-config/config-timezone) + 00.00500s (modules-final/config-final-message) + 00.00400s (init-network/consume-user-data) + 00.00400s (init-network/config-mounts) + 00.00400s (init-network/config-disk_setup) + 00.00400s (init-network/config-bootcmd) + 00.00400s (init-network/activate-datasource) + 00.00300s (init-network/config-update_hostname) + 00.00300s (init-network/config-set_hostname) + 00.00200s (modules-final/config-snappy) + 00.00200s (init-network/config-rsyslog) + 00.00200s (init-network/config-ca-certs) + 00.00200s (init-local/check-cache) + 00.00100s (modules-final/config-scripts-vendor) + 00.00100s (modules-final/config-scripts-per-once) + 00.00100s (modules-final/config-salt-minion) + 00.00100s (modules-final/config-rightscale_userdata) + 00.00100s (modules-final/config-phone-home) + 00.00100s (modules-final/config-package-update-upgrade-install) + 00.00100s (modules-final/config-fan) + 00.00100s (modules-config/config-ubuntu-advantage) + 00.00100s (modules-config/config-ssh-import-id) + 00.00100s (modules-config/config-snap) + 00.00100s (modules-config/config-set-passwords) + 00.00100s (modules-config/config-runcmd) + 00.00100s (modules-config/config-locale) + 00.00100s (modules-config/config-byobu) + 00.00100s (modules-config/config-apt-pipelining) + 00.00100s (init-network/config-write-files) + 00.00100s (init-network/config-seed_random) + 00.00100s (init-network/config-migrator) + 00.00000s (modules-final/config-ubuntu-drivers) + 00.00000s (modules-final/config-scripts-user) + 00.00000s (modules-final/config-scripts-per-instance) + 00.00000s (modules-final/config-scripts-per-boot) + 00.00000s (modules-final/config-puppet) + 00.00000s (modules-final/config-power-state-change) + 00.00000s (modules-final/config-mcollective) + 00.00000s (modules-final/config-lxd) + 00.00000s (modules-final/config-landscape) + 00.00000s (modules-final/config-chef) + 00.00000s (modules-config/config-snap_config) + 00.00000s (modules-config/config-ntp) + 00.00000s (modules-config/config-emit_upstart) + 00.00000s (modules-config/config-disable-ec2-metadata) + 00.00000s (init-network/setup-datasource) + + 1 boot records analyzed + +Show +---- + +The ``show`` action is similar to ``systemd-analyze critical-chain`` which +prints a list of units, the time they started and how long they took. +Cloud-init has four stages and within each stage a number of modules may run +depending on configuration. ``cloudinit-analyze show`` will, for each boot, +print this information and a summary total time, per boot. + +The following is an abbreviated example of the show output: + +.. code-block:: shell-session + + $ cloud-init analyze show + -- Boot Record 01 -- + The total time elapsed since completing an event is printed after the "@" character. + The time the event takes is printed after the "+" character. + + Starting stage: init-local + |``->no cache found @00.01700s +00.00200s + |`->found local data from DataSourceNoCloud @00.11000s +00.40300s + Finished stage: (init-local) 00.94200 seconds + + Starting stage: init-network + |`->restored from cache with run check: DataSourceNoCloud [seed=/dev/sr0][dsmode=net] @04.79500s +00.02100s + |`->setting up datasource @04.88900s +00.00000s + |`->reading and applying user-data @04.90100s +00.00400s + |`->reading and applying vendor-data @04.90500s +00.00800s + |`->activating datasource @04.95200s +00.00400s + Finished stage: (init-network) 02.72100 seconds + + Starting stage: modules-config + |`->config-emit_upstart ran successfully @15.43100s +00.00000s + |`->config-snap ran successfully @15.43100s +00.00100s + ... + |`->config-runcmd ran successfully @16.22300s +00.00100s + |`->config-byobu ran successfully @16.23400s +00.00100s + Finished stage: (modules-config) 00.83500 seconds + + Starting stage: modules-final + |`->config-snappy ran successfully @16.87400s +00.00200s + |`->config-package-update-upgrade-install ran successfully @16.87600s +00.00100s + ... + |`->config-final-message ran successfully @16.93700s +00.00500s + |`->config-power-state-change ran successfully @16.94300s +00.00000s + Finished stage: (modules-final) 00.10300 seconds + + Total Time: 4.60100 seconds + + 1 boot records analyzed + +If additional boot records are detected then they are printed out from oldest +to newest. + +Dump +---- + +The ``dump`` action simply dumps the cloud-init logs that the analyze module +is performing the analysis on and returns a list of dictionaries that can be +consumed for other reporting needs. Each element in the list is a boot entry. + +.. code-block:: shell-session + + $ cloud-init analyze dump + [ + { + "description": "starting search for local datasources", + "event_type": "start", + "name": "init-local", + "origin": "cloudinit", + "timestamp": 1567057578.037 + }, + { + "description": "attempting to read from cache [check]", + "event_type": "start", + "name": "init-local/check-cache", + "origin": "cloudinit", + "timestamp": 1567057578.054 + }, + { + "description": "no cache found", + "event_type": "finish", + "name": "init-local/check-cache", + "origin": "cloudinit", + "result": "SUCCESS", + "timestamp": 1567057578.056 + }, + { + "description": "searching for local data from DataSourceNoCloud", + "event_type": "start", + "name": "init-local/search-NoCloud", + "origin": "cloudinit", + "timestamp": 1567057578.147 + }, + { + "description": "found local data from DataSourceNoCloud", + "event_type": "finish", + "name": "init-local/search-NoCloud", + "origin": "cloudinit", + "result": "SUCCESS", + "timestamp": 1567057578.55 + }, + { + "description": "searching for local datasources", + "event_type": "finish", + "name": "init-local", + "origin": "cloudinit", + "result": "SUCCESS", + "timestamp": 1567057578.979 + }, + { + "description": "searching for network datasources", + "event_type": "start", + "name": "init-network", + "origin": "cloudinit", + "timestamp": 1567057582.814 + }, + { + "description": "attempting to read from cache [trust]", + "event_type": "start", + "name": "init-network/check-cache", + "origin": "cloudinit", + "timestamp": 1567057582.832 + }, + ... + { + "description": "config-power-state-change ran successfully", + "event_type": "finish", + "name": "modules-final/config-power-state-change", + "origin": "cloudinit", + "result": "SUCCESS", + "timestamp": 1567057594.98 + }, + { + "description": "running modules for final", + "event_type": "finish", + "name": "modules-final", + "origin": "cloudinit", + "result": "SUCCESS", + "timestamp": 1567057594.982 + } + ] + + +Boot +---- + +The ``boot`` action prints out kernel related timestamps that are not included +in any of the cloud-init logs. There are three different timestamps that are +presented to the user: + +- kernel start +- kernel finish boot +- cloud-init start + +This was added for additional clarity into the boot process that cloud-init +does not have control over, to aid in debugging of performance issues related +to cloud-init startup, and tracking regression. + +.. code-block:: shell-session + + $ cloud-init analyze boot + -- Most Recent Boot Record -- + Kernel Started at: 2019-08-29 01:35:37.753790 + Kernel ended boot at: 2019-08-29 01:35:38.807407 + Kernel time to boot (seconds): 1.053617000579834 + Cloud-init activated by systemd at: 2019-08-29 01:35:43.992460 + Time between Kernel end boot and Cloud-init activation (seconds): 5.185053110122681 + Cloud-init start: 2019-08-29 08:35:45.867000 + successful + +Timestamp Gathering +^^^^^^^^^^^^^^^^^^^ + +The following boot related timestamps are gathered on demand when cloud-init +analyze boot runs: + +- Kernel startup gathered from system uptime +- Kernel finishes initialization from systemd + UserSpaceMonotonicTimestamp property +- Cloud-init activation from the property InactiveExitTimestamp of the + cloud-init local systemd unit + +In order to gather the necessary timestamps using systemd, running the +commands below will gather the UserspaceTimestamp and InactiveExitTimestamp: + +.. code-block:: shell-session + + $ systemctl show -p UserspaceTimestampMonotonic + UserspaceTimestampMonotonic=989279 + $ systemctl show cloud-init-local -p InactiveExitTimestampMonotonic + InactiveExitTimestampMonotonic=4493126 + +The UserspaceTimestamp tracks when the init system starts, which is used as +an indicator of kernel finishing initialization. The InactiveExitTimestamp +tracks when a particular systemd unit transitions from the Inactive to Active +state, which can be used to mark the beginning of systemd's activation of +cloud-init. + +Currently this only works for distros that use systemd as the init process. +We will be expanding support for other distros in the future and this document +will be updated accordingly. + +If systemd is not present on the system, dmesg is used to attempt to find an +event that logs the beginning of the init system. However, with this method +only the first two timestamps are able to be found; dmesg does not monitor +userspace processes, so no cloud-init start timestamps are emitted like when +using systemd. + +.. vi: textwidth=79 diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst index ef5ae7bf..3f215b1b 100644 --- a/doc/rtd/topics/availability.rst +++ b/doc/rtd/topics/availability.rst @@ -1,21 +1,64 @@ -************ +.. _availability: + Availability ************ -It is currently installed in the `Ubuntu Cloud Images`_ and also in the official `Ubuntu`_ images available on EC2, Azure, GCE and many other clouds. +Below outlines the current availability of cloud-init across +distributions and clouds, both public and private. + +.. note:: + + If a distribution or cloud does not show up in the list below contact + them and ask for images to be generated using cloud-init! -Versions for other systems can be (or have been) created for the following distributions: +Distributions +============= + +Cloud-init has support across all major Linux distributions and +FreeBSD: - Ubuntu +- SLES/openSUSE +- RHEL/CentOS - Fedora +- Gentoo Linux - Debian -- RHEL -- CentOS -- *and more...* +- ArchLinux +- FreeBSD + +Clouds +====== + +Cloud-init provides support across a wide ranging list of execution +environments in the public cloud: + +- Amazon Web Services +- Microsoft Azure +- Google Cloud Platform +- Oracle Cloud Infrastructure +- Softlayer +- Rackspace Public Cloud +- IBM Cloud +- Digital Ocean +- Bigstep +- Hetzner +- Joyent +- CloudSigma +- Alibaba Cloud +- OVH +- OpenNebula +- Exoscale +- Scaleway +- CloudStack +- AltCloud +- SmartOS -So ask your distribution provider where you can obtain an image with it built-in if one is not already available ☺ +Additionally, cloud-init is supported on these private clouds: +- Bare metal installs +- OpenStack +- LXD +- KVM +- Metal-as-a-Service (MAAS) -.. _Ubuntu Cloud Images: http://cloud-images.ubuntu.com/ -.. _Ubuntu: http://www.ubuntu.com/ -.. vi: textwidth=78 +.. vi: textwidth=79 diff --git a/doc/rtd/topics/boot.rst b/doc/rtd/topics/boot.rst index f2976fdf..d846867b 100644 --- a/doc/rtd/topics/boot.rst +++ b/doc/rtd/topics/boot.rst @@ -1,43 +1,51 @@ .. _boot_stages: -*********** Boot Stages *********** -In order to be able to provide the functionality that it does, cloud-init -must be integrated into the boot in fairly controlled way. -There are 5 stages. +In order to be able to provide the functionality that it does, cloud-init +must be integrated into the boot in fairly controlled way. There are five +stages to boot: -1. **Generator** -2. **Local** -3. **Network** -4. **Config** -5. **Final** +1. Generator +2. Local +3. Network +4. Config +5. Final Generator ========= + When booting under systemd, a `generator <https://www.freedesktop.org/software/systemd/man/systemd.generator.html>`_ -will run that determines if cloud-init.target should be included in the -boot goals. By default, this generator will enable cloud-init. It will -not enable cloud-init if either: +will run that determines if cloud-init.target should be included in the boot +goals. By default, this generator will enable cloud-init. It will not enable +cloud-init if either: - * A file exists: ``/etc/cloud/cloud-init.disabled`` - * The kernel command line as found in /proc/cmdline contains ``cloud-init=disabled``. - When running in a container, the kernel command line is not honored, but - cloud-init will read an environment variable named ``KERNEL_CMDLINE`` in - its place. + * The file ``/etc/cloud/cloud-init.disabled`` exists + * The kernel command line as found in ``/proc/cmdline`` contains + ``cloud-init=disabled``. When running in a container, the kernel command + line is not honored, but cloud-init will read an environment variable named + ``KERNEL_CMDLINE`` in its place. -This mechanism for disabling at runtime currently only exists in systemd. +Again, these mechanisms for disabling cloud-init at runtime currently only +exist in systemd. Local ===== - * **systemd service**: ``cloud-init-local.service`` - * **runs**: As soon as possible with / mounted read-write. - * **blocks**: as much of boot as possible, *must* block network bringup. - * **modules**: none -The purpose of the local stage is: ++------------------+----------------------------------------------------------+ +| systemd service | ``cloud-init-local.service`` | ++---------+--------+----------------------------------------------------------+ +| runs | as soon as possible with ``/`` mounted read-write | ++---------+--------+----------------------------------------------------------+ +| blocks | as much of boot as possible, *must* block network | ++---------+--------+----------------------------------------------------------+ +| modules | none | ++---------+--------+----------------------------------------------------------+ + +The purpose of the local stage is to: + * locate "local" data sources. * apply networking configuration to the system (including "Fallback") @@ -45,13 +53,13 @@ In most cases, this stage does not do much more than that. It finds the datasource and determines the network configuration to be used. That network configuration can come from: - * the datasource - * fallback: Cloud-init's fallback networking consists of rendering the + * **datasource**: cloud provided network configuration via metadata + * **fallback**: cloud-init's fallback networking consists of rendering the equivalent to "dhcp on eth0", which was historically the most popular - mechanism for network configuration of a guest. - * none. network configuration can be disabled entirely with - config like the following in /etc/cloud/cloud.cfg: - '``network: {config: disabled}``'. + mechanism for network configuration of a guest + * **none**: network configuration can be disabled by writing the file + ``/etc/cloud/cloud.cfg`` with the content: + ``network: {config: disabled}`` If this is an instance's first boot, then the selected network configuration is rendered. This includes clearing of all previous (stale) configuration @@ -73,20 +81,26 @@ that require a network can operate at this stage. Network ======= - * **systemd service**: ``cloud-init.service`` - * **runs**: After local stage and configured networking is up. - * **blocks**: As much of remaining boot as possible. - * **modules**: ``cloud_init_modules`` in **/etc/cloud/cloud.cfg** + ++------------------+----------------------------------------------------------+ +| systemd service | ``cloud-init.service`` | ++---------+--------+----------------------------------------------------------+ +| runs | after local stage and configured networking is up | ++---------+--------+----------------------------------------------------------+ +| blocks | as much of remaining boot as possible | ++---------+--------+----------------------------------------------------------+ +| modules | *cloud_init_modules* in ``/etc/cloud/cloud.cfg`` | ++---------+--------+----------------------------------------------------------+ This stage requires all configured networking to be online, as it will fully process any user-data that is found. Here, processing means: - * retrive any ``#include`` or ``#include-once`` (recursively) including http - * uncompress any compressed content + * retrieve any ``#include`` or ``#include-once`` (recursively) including http + * decompress any compressed content * run any part-handler found. This stage runs the ``disk_setup`` and ``mounts`` modules which may partition -and format disks and configure mount points (such as in /etc/fstab). +and format disks and configure mount points (such as in ``/etc/fstab``). Those modules cannot run earlier as they may receive configuration input from sources only available via network. For example, a user may have provided user-data in a network resource that describes how local mounts @@ -94,30 +108,41 @@ should be done. On some clouds such as Azure, this stage will create filesystems to be mounted, including ones that have stale (previous instance) references in -/etc/fstab. As such, entries /etc/fstab other than those necessary for +``/etc/fstab``. As such, entries ``/etc/fstab`` other than those necessary for cloud-init to run should not be done until after this stage. -A part-handler will run at this stage, as will boothooks including +A part-handler will run at this stage, as will boot-hooks including cloud-config ``bootcmd``. The user of this functionality has to be aware that the system is in the process of booting when their code runs. Config ====== - * **systemd service**: ``cloud-config.service`` - * **runs**: After network stage. - * **blocks**: None. - * **modules**: ``cloud_config_modules`` in **/etc/cloud/cloud.cfg** + ++------------------+----------------------------------------------------------+ +| systemd service | ``cloud-config.service`` | ++---------+--------+----------------------------------------------------------+ +| runs | after network | ++---------+--------+----------------------------------------------------------+ +| blocks | nothing | ++---------+--------+----------------------------------------------------------+ +| modules | *cloud_config_modules* in ``/etc/cloud/cloud.cfg`` | ++---------+--------+----------------------------------------------------------+ This stage runs config modules only. Modules that do not really have an effect on other stages of boot are run here. - Final ===== - * **systemd service**: ``cloud-final.service`` - * **runs**: As final part of boot (traditional "rc.local") - * **blocks**: None. - * **modules**: ``cloud_final_modules`` in **/etc/cloud/cloud.cfg** + ++------------------+----------------------------------------------------------+ +| systemd service | ``cloud-final.service`` | ++---------+--------+----------------------------------------------------------+ +| runs | as final part of boot (traditional "rc.local") | ++---------+--------+----------------------------------------------------------+ +| blocks | nothing | ++---------+--------+----------------------------------------------------------+ +| modules | *cloud_final_modules* in ``/etc/cloud/cloud.cfg`` | ++---------+--------+----------------------------------------------------------+ This stage runs as late in boot as possible. Any scripts that a user is accustomed to running after logging into a system should run correctly here. @@ -127,9 +152,9 @@ Things that run here include * configuration management plugins (puppet, chef, salt-minion) * user-scripts (including ``runcmd``). -For scripts external to cloud-init looking to wait until cloud-init +For scripts external to cloud-init looking to wait until cloud-init is finished, the ``cloud-init status`` subcommand can help block external scripts until cloud-init is done without having to write your own systemd units dependency chains. See :ref:`cli_status` for more info. -.. vi: textwidth=78 +.. vi: textwidth=79 diff --git a/doc/rtd/topics/bugs.rst b/doc/rtd/topics/bugs.rst new file mode 100644 index 00000000..4b60776b --- /dev/null +++ b/doc/rtd/topics/bugs.rst @@ -0,0 +1,108 @@ +.. _reporting_bugs: + +Reporting Bugs +************** + +The following documents: + +1) How to collect information for reporting bugs +2) How to file bugs to the upstream cloud-init project or for distro specific + packages + +Collect Logs +============ + +To aid in debugging, please collect the necessary logs. To do so, run the +`collect-logs` subcommand to produce a tarfile that you can easily upload: + +.. code-block:: shell-session + + $ cloud-init collect-logs + Wrote /home/ubuntu/cloud-init.tar.gz + +If your version of cloud-init does not have the `collect-logs` subcommand, +then please manually collect the base log files by doing the following: + +.. code-block:: shell-session + + $ dmesg > dmesg.txt + $ sudo journalctl -o short-precise > journal.txt + $ sudo tar -cvf cloud-init.tar dmesg.txt journal.txt /run/cloud-init \ + /var/log/cloud-init.log /var/log/cloud-init-output.log + +Report Upstream Bug +=================== + +Bugs for upstream cloud-init are tracked using Launchpad. To file a bug: + +1. Collect the necessary debug logs as described above +2. `Create a Launchpad account`_ or login to your existing account +3. `Report an upstream cloud-init bug`_ + +If debug logs are not provided, you will be asked for them before any +further time is spent debugging. If you are unable to obtain the required +logs please explain why in the bug. + +If your bug is for a specific distro using cloud-init, please first consider +reporting it with the upstream distro or confirm that it still occurs +with the latest upstream cloud-init code. See below for details on specific +distro reporting. + +Distro Specific Issues +====================== + +For issues specific to your distro please use one of the following distro +specific reporting mechanisms: + +Ubuntu +------ + +To report a bug on Ubuntu use the `ubuntu-bug` command on the affected +system to automatically collect the necessary logs and file a bug on +Launchpad: + +.. code-block:: shell-session + + $ ubuntu-bug cloud-init + +If that does not work or is not an option, please collect the logs using the +commands in the above Collect Logs section and then report the bug on the +`Ubuntu bug tracker`_. Make sure to attach your collected logs! + +Debian +------ + +To file a bug against the Debian package fo cloud-init please use the +`Debian bug tracker`_ to file against 'Package: cloud-init'. See the +`Debian bug reporting wiki`_ wiki page for more details. + +Red Hat, CentOS, & Fedora +------------------------- + +To file a bug against the Red Hat or Fedora packages of cloud-init please use +the `Red Hat bugzilla`_. + +SUSE & openSUSE +--------------- + +To file a bug against the SuSE packages of cloud-init please use the +`SUSE bugzilla`_. + +Arch +---- + +To file a bug against the Arch package of cloud-init please use the +`Arch Linux Bugtracker`_. See the `Arch bug reporting wiki`_ for more +details. + +.. _Create a Launchpad account: https://help.launchpad.net/YourAccount/NewAccount +.. _Report an upstream cloud-init bug: https://bugs.launchpad.net/cloud-init/+filebug +.. _Ubuntu bug tracker: https://bugs.launchpad.net/ubuntu/+source/cloud-init/+filebug +.. _Debian bug tracker: https://bugs.debian.org/cgi-bin/pkgreport.cgi?pkg=cloud-init;dist=unstable +.. _Debian bug reporting wiki: https://www.debian.org/Bugs/Reporting +.. _Red Hat bugzilla: https://bugzilla.redhat.com/ +.. _SUSE bugzilla: https://bugzilla.suse.com/index.cgi +.. _Arch Linux Bugtracker: https://bugs.archlinux.org/ +.. _Arch bug reporting wiki: https://wiki.archlinux.org/index.php/Bug_reporting_guidelines + +.. vi: textwidth=79 diff --git a/doc/rtd/topics/capabilities.rst b/doc/rtd/topics/capabilities.rst deleted file mode 100644 index 0d8b8947..00000000 --- a/doc/rtd/topics/capabilities.rst +++ /dev/null @@ -1,299 +0,0 @@ -.. _capabilities: - -************ -Capabilities -************ - -- Setting a default locale -- Setting an instance hostname -- Generating instance SSH private keys -- Adding SSH keys to a user's ``.ssh/authorized_keys`` so they can log in -- Setting up ephemeral mount points -- Configuring network devices - -User configurability -==================== - -`Cloud-init`_ 's behavior can be configured via user-data. - - User-data can be given by the user at instance launch time. See - :ref:`user_data_formats` for acceptable user-data content. - - -This is done via the ``--user-data`` or ``--user-data-file`` argument to -ec2-run-instances for example. - -* Check your local client's documentation for how to provide a `user-data` - string or `user-data` file to cloud-init on instance creation. - - -Feature detection -================= - -Newer versions of cloud-init may have a list of additional features that they -support. This allows other applications to detect what features the installed -cloud-init supports without having to parse its version number. If present, -this list of features will be located at ``cloudinit.version.FEATURES``. - -Currently defined feature names include: - - - ``NETWORK_CONFIG_V1`` support for v1 networking configuration, - see :ref:`network_config_v1` documentation for examples. - - ``NETWORK_CONFIG_V2`` support for v2 networking configuration, - see :ref:`network_config_v2` documentation for examples. - - -CLI Interface -============= - -The command line documentation is accessible on any cloud-init installed -system: - -.. code-block:: shell-session - - % cloud-init --help - usage: cloud-init [-h] [--version] [--file FILES] - [--debug] [--force] - {init,modules,single,query,dhclient-hook,features,analyze,devel,collect-logs,clean,status} - ... - - optional arguments: - -h, --help show this help message and exit - --version, -v show program's version number and exit - --file FILES, -f FILES - additional yaml configuration files to use - --debug, -d show additional pre-action logging (default: False) - --force force running even if no datasource is found (use at - your own risk) - - Subcommands: - {init,modules,single,query,dhclient-hook,features,analyze,devel,collect-logs,clean,status} - init initializes cloud-init and performs initial modules - modules activates modules using a given configuration key - single run a single module - query Query instance metadata from the command line - dhclient-hook run the dhclient hookto record network info - features list defined features - analyze Devel tool: Analyze cloud-init logs and data - devel Run development tools - collect-logs Collect and tar all cloud-init debug info - clean Remove logs and artifacts so cloud-init can re-run - status Report cloud-init status or wait on completion - - -CLI Subcommand details -====================== - -.. _cli_features: - -cloud-init features -------------------- -Print out each feature supported. If cloud-init does not have the -features subcommand, it also does not support any features described in -this document. - -.. code-block:: shell-session - - % cloud-init features - NETWORK_CONFIG_V1 - NETWORK_CONFIG_V2 - -.. _cli_status: - -cloud-init status ------------------ -Report whether cloud-init is running, done, disabled or errored. Exits -non-zero if an error is detected in cloud-init. - -* **--long**: Detailed status information. -* **--wait**: Block until cloud-init completes. - -.. code-block:: shell-session - - % cloud-init status --long - status: done - time: Wed, 17 Jan 2018 20:41:59 +0000 - detail: - DataSourceNoCloud [seed=/var/lib/cloud/seed/nocloud-net][dsmode=net] - - # Cloud-init running still short versus long options - % cloud-init status - status: running - % cloud-init status --long - status: running - time: Fri, 26 Jan 2018 21:39:43 +0000 - detail: - Running in stage: init-local - -.. _cli_collect_logs: - -cloud-init collect-logs ------------------------ -Collect and tar cloud-init generated logs, data files and system -information for triage. This subcommand is integrated with apport. - -**Note**: Ubuntu users can file bugs with `ubuntu-bug cloud-init` to -automaticaly attach these logs to a bug report. - -Logs collected are: - - * /var/log/cloud-init*log - * /run/cloud-init - * cloud-init package version - * dmesg output - * journalctl output - * /var/lib/cloud/instance/user-data.txt - -.. _cli_query: - -cloud-init query ------------------- -Query standardized cloud instance metadata crawled by cloud-init and stored -in ``/run/cloud-init/instance-data.json``. This is a convenience command-line -interface to reference any cached configuration metadata that cloud-init -crawls when booting the instance. See :ref:`instance_metadata` for more info. - -* **--all**: Dump all available instance data as json which can be queried. -* **--instance-data**: Optional path to a different instance-data.json file to - source for queries. -* **--list-keys**: List available query keys from cached instance data. - -.. code-block:: shell-session - - # List all top-level query keys available (includes standardized aliases) - % cloud-init query --list-keys - availability_zone - base64_encoded_keys - cloud_name - ds - instance_id - local_hostname - region - v1 - -* **<varname>**: A dot-delimited variable path into the instance-data.json - object. - -.. code-block:: shell-session - - # Query cloud-init standardized metadata on any cloud - % cloud-init query v1.cloud_name - aws # or openstack, azure, gce etc. - - # Any standardized instance-data under a <v#> key is aliased as a top-level - # key for convenience. - % cloud-init query cloud_name - aws # or openstack, azure, gce etc. - - # Query datasource-specific metadata on EC2 - % cloud-init query ds.meta_data.public_ipv4 - -* **--format** A string that will use jinja-template syntax to render a string - replacing - -.. code-block:: shell-session - - # Generate a custom hostname fqdn based on instance-id, cloud and region - % cloud-init query --format 'custom-{{instance_id}}.{{region}}.{{v1.cloud_name}}.com' - custom-i-0e91f69987f37ec74.us-east-2.aws.com - - -.. note:: - The standardized instance data keys under **v#** are guaranteed not to change - behavior or format. If using top-level convenience aliases for any - standardized instance data keys, the most value (highest **v#**) of that key - name is what is reported as the top-level value. So these aliases act as a - 'latest'. - - -.. _cli_analyze: - -cloud-init analyze ------------------- -Get detailed reports of where cloud-init spends most of its time. See -:ref:`boot_time_analysis` for more info. - -* **blame** Report ordered by most costly operations. -* **dump** Machine-readable JSON dump of all cloud-init tracked events. -* **show** show time-ordered report of the cost of operations during each - boot stage. - -.. _cli_devel: - -cloud-init devel ----------------- -Collection of development tools under active development. These tools will -likely be promoted to top-level subcommands when stable. - - * ``cloud-init devel schema``: A **#cloud-config** format and schema - validator. It accepts a cloud-config yaml file and annotates potential - schema errors locally without the need for deployment. Schema - validation is work in progress and supports a subset of cloud-config - modules. - - * ``cloud-init devel render``: Use cloud-init's jinja template render to - process **#cloud-config** or **custom-scripts**, injecting any variables - from ``/run/cloud-init/instance-data.json``. It accepts a user-data file - containing the jinja template header ``## template: jinja`` and renders - that content with any instance-data.json variables present. - - -.. _cli_clean: - -cloud-init clean ----------------- -Remove cloud-init artifacts from /var/lib/cloud and optionally reboot the -machine to so cloud-init re-runs all stages as it did on first boot. - -* **--logs**: Optionally remove /var/log/cloud-init*log files. -* **--reboot**: Reboot the system after removing artifacts. - -.. _cli_init: - -cloud-init init ---------------- -Generally run by OS init systems to execute cloud-init's stages -*init* and *init-local*. See :ref:`boot_stages` for more info. -Can be run on the commandline, but is generally gated to run only once -due to semaphores in **/var/lib/cloud/instance/sem/** and -**/var/lib/cloud/sem**. - -* **--local**: Run *init-local* stage instead of *init*. - -.. _cli_modules: - -cloud-init modules ------------------- -Generally run by OS init systems to execute *modules:config* and -*modules:final* boot stages. This executes cloud config :ref:`modules` -configured to run in the init, config and final stages. The modules are -declared to run in various boot stages in the file -**/etc/cloud/cloud.cfg** under keys **cloud_init_modules**, -**cloud_init_modules** and **cloud_init_modules**. Can be run on the -commandline, but each module is gated to run only once due to semaphores -in ``/var/lib/cloud/``. - -* **--mode (init|config|final)**: Run *modules:init*, *modules:config* or - *modules:final* cloud-init stages. See :ref:`boot_stages` for more info. - -.. _cli_single: - -cloud-init single ------------------ -Attempt to run a single named cloud config module. The following example -re-runs the cc_set_hostname module ignoring the module default frequency -of once-per-instance: - -* **--name**: The cloud-config module name to run -* **--frequency**: Optionally override the declared module frequency - with one of (always|once-per-instance|once) - -.. code-block:: shell-session - - % cloud-init single --name set_hostname --frequency always - -**Note**: Mileage may vary trying to re-run each cloud-config module, as -some are not idempotent. - -.. _Cloud-init: https://launchpad.net/cloud-init -.. vi: textwidth=78 diff --git a/doc/rtd/topics/cli.rst b/doc/rtd/topics/cli.rst new file mode 100644 index 00000000..b32677b0 --- /dev/null +++ b/doc/rtd/topics/cli.rst @@ -0,0 +1,304 @@ +.. _cli: + +CLI Interface +************* + +For the latest list of subcommands and arguments use cloud-init's ``--help`` +option. This can be used against cloud-init itself or any of its subcommands. + +.. code-block:: shell-session + + $ cloud-init --help + usage: /usr/bin/cloud-init [-h] [--version] [--file FILES] [--debug] [--force] + {init,modules,single,query,dhclient-hook,features,analyze,devel,collect-logs,clean,status} + ... + + optional arguments: + -h, --help show this help message and exit + --version, -v show program's version number and exit + --file FILES, -f FILES + additional yaml configuration files to use + --debug, -d show additional pre-action logging (default: False) + --force force running even if no datasource is found (use at + your own risk) + + Subcommands: + {init,modules,single,query,dhclient-hook,features,analyze,devel,collect-logs,clean,status} + init initializes cloud-init and performs initial modules + modules activates modules using a given configuration key + single run a single module + query Query standardized instance metadata from the command + line. + dhclient-hook Run the dhclient hook to record network info. + features list defined features + analyze Devel tool: Analyze cloud-init logs and data + devel Run development tools + collect-logs Collect and tar all cloud-init debug info + clean Remove logs and artifacts so cloud-init can re-run. + status Report cloud-init status or wait on completion. + +The rest of this document will give an overview of each of the subcommands. + + +.. _cli_analyze: + +analyze +======= + +Get detailed reports of where cloud-init spends its time during the boot +process. For more complete reference see :ref:`analyze`. + +Possible subcommands include: + +* *blame*: report ordered by most costly operations +* *dump*: machine-readable JSON dump of all cloud-init tracked events +* *show*: show time-ordered report of the cost of operations during each + boot stage +* *boot*: show timestamps from kernel initialization, kernel finish + initialization, and cloud-init start + + +.. _cli_clean: + +clean +===== + +Remove cloud-init artifacts from ``/var/lib/cloud`` to simulate a clean +instance. On reboot, cloud-init will re-run all stages as it did on first boot. + +* *\\-\\-logs*: optionally remove all cloud-init log files in ``/var/log/`` +* *\\-\\-reboot*: reboot the system after removing artifacts + + +.. _cli_collect_logs: + +collect-logs +============ + +Collect and tar cloud-init generated logs, data files, and system +information for triage. This subcommand is integrated with apport. + +Logs collected include: + + * ``/var/log/cloud-init.log`` + * ``/var/log/cloud-init-output.log`` + * ``/run/cloud-init`` + * ``/var/lib/cloud/instance/user-data.txt`` + * cloud-init package version + * ``dmesg`` output + * journalctl output + +.. note:: + + Ubuntu users can file bugs with ``ubuntu-bug cloud-init`` to + automatically attach these logs to a bug report + + +.. _cli_devel: + +devel +===== + +Collection of development tools under active development. These tools will +likely be promoted to top-level subcommands when stable. + +Do **NOT** rely on the output of these commands as they can and will change. + +Current subcommands: + + * ``schema``: a **#cloud-config** format and schema + validator. It accepts a cloud-config yaml file and annotates potential + schema errors locally without the need for deployment. Schema + validation is work in progress and supports a subset of cloud-config + modules. + + * ``render``: use cloud-init's jinja template render to + process **#cloud-config** or **custom-scripts**, injecting any variables + from ``/run/cloud-init/instance-data.json``. It accepts a user-data file + containing the jinja template header ``## template: jinja`` and renders + that content with any instance-data.json variables present. + + +.. _cli_features: + +features +======== + +Print out each feature supported. If cloud-init does not have the +features subcommand, it also does not support any features described in +this document. + +.. code-block:: shell-session + + $ cloud-init features + NETWORK_CONFIG_V1 + NETWORK_CONFIG_V2 + + +.. _cli_init: + +init +==== + +Generally run by OS init systems to execute cloud-init's stages +*init* and *init-local*. See :ref:`boot_stages` for more info. +Can be run on the commandline, but is generally gated to run only once +due to semaphores in ``/var/lib/cloud/instance/sem/`` and +``/var/lib/cloud/sem``. + +* *\\-\\-local*: run *init-local* stage instead of *init* + + +.. _cli_modules: + +modules +======= + +Generally run by OS init systems to execute *modules:config* and +*modules:final* boot stages. This executes cloud config :ref:`modules` +configured to run in the init, config and final stages. The modules are +declared to run in various boot stages in the file +``/etc/cloud/cloud.cfg`` under keys: + +* *cloud_init_modules* +* *cloud_config_modules* +* *cloud_init_modules* + +Can be run on the command line, but each module is gated to run only once due +to semaphores in ``/var/lib/cloud/``. + +* *\\-\\-mode [init|config|final]*: run *modules:init*, *modules:config* or + *modules:final* cloud-init stages. See :ref:`boot_stages` for more info. + + +.. _cli_query: + +query +===== + +Query standardized cloud instance metadata crawled by cloud-init and stored +in ``/run/cloud-init/instance-data.json``. This is a convenience command-line +interface to reference any cached configuration metadata that cloud-init +crawls when booting the instance. See :ref:`instance_metadata` for more info. + +* *\\-\\-all*: dump all available instance data as json which can be queried +* *\\-\\-instance-data*: optional path to a different instance-data.json file + to source for queries +* *\\-\\-list-keys*: list available query keys from cached instance data +* *\\-\\-format*: a string that will use jinja-template syntax to render a + string replacing +* *<varname>*: a dot-delimited variable path into the instance-data.json + object + +Below demonstrates how to list all top-level query keys that are standardized +aliases: + +.. code-block:: shell-session + + $ cloud-init query --list-keys + _beta_keys + availability_zone + base64_encoded_keys + cloud_name + ds + instance_id + local_hostname + platform + public_ssh_keys + region + sensitive_keys + subplatform + userdata + v1 + vendordata + +Below demonstrates how to query standardized metadata from clouds: + +.. code-block:: shell-session + + % cloud-init query v1.cloud_name + aws # or openstack, azure, gce etc. + + # Any standardized instance-data under a <v#> key is aliased as a top-level key for convenience. + % cloud-init query cloud_name + aws # or openstack, azure, gce etc. + + # Query datasource-specific metadata on EC2 + % cloud-init query ds.meta_data.public_ipv4 + +.. note:: + + The standardized instance data keys under **v#** are guaranteed not to change + behavior or format. If using top-level convenience aliases for any + standardized instance data keys, the most value (highest **v#**) of that key + name is what is reported as the top-level value. So these aliases act as a + 'latest'. + +This data can then be formatted to generate custom strings or data: + +.. code-block:: shell-session + + # Generate a custom hostname fqdn based on instance-id, cloud and region + % cloud-init query --format 'custom-{{instance_id}}.{{region}}.{{v1.cloud_name}}.com' + custom-i-0e91f69987f37ec74.us-east-2.aws.com + + +.. _cli_single: + +single +====== + +Attempt to run a single named cloud config module. + +* *\\-\\-name*: the cloud-config module name to run +* *\\-\\-frequency*: optionally override the declared module frequency + with one of (always|once-per-instance|once) + +The following example re-runs the cc_set_hostname module ignoring the module +default frequency of once-per-instance: + +.. code-block:: shell-session + + $ cloud-init single --name set_hostname --frequency always + +.. note:: + + Mileage may vary trying to re-run each cloud-config module, as + some are not idempotent. + + +.. _cli_status: + +status +====== + +Report whether cloud-init is running, done, disabled or errored. Exits +non-zero if an error is detected in cloud-init. + +* *\\-\\-long*: detailed status information +* *\\-\\-wait*: block until cloud-init completes + +Below are examples of output when cloud-init is running, showing status and +the currently running modules, as well as when it is done. + +.. code-block:: shell-session + + $ cloud-init status + status: running + + $ cloud-init status --long + status: running + time: Fri, 26 Jan 2018 21:39:43 +0000 + detail: + Running in stage: init-local + + $ cloud-init status + status: done + + $ cloud-init status --long + status: done + time: Wed, 17 Jan 2018 20:41:59 +0000 + detail: + DataSourceNoCloud [seed=/var/lib/cloud/seed/nocloud-net][dsmode=net] + +.. vi: textwidth=79 diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index e34f145c..3d026143 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -1,37 +1,125 @@ .. _datasources: -*********** Datasources *********** -What is a datasource? -===================== - Datasources are sources of configuration data for cloud-init that typically -come from the user (aka userdata) or come from the stack that created the -configuration drive (aka metadata). Typical userdata would include files, +come from the user (e.g. userdata) or come from the cloud that created the +configuration drive (e.g. metadata). Typical userdata would include files, yaml, and shell scripts while typical metadata would include server name, -instance id, display name and other cloud specific details. Since there are -multiple ways to provide this data (each cloud solution seems to prefer its -own way) internally a datasource abstract class was created to allow for a -single way to access the different cloud systems methods to provide this data -through the typical usage of subclasses. +instance id, display name and other cloud specific details. + +Since there are multiple ways to provide this data (each cloud solution seems +to prefer its own way) internally a datasource abstract class was created to +allow for a single way to access the different cloud systems methods to provide +this data through the typical usage of subclasses. Any metadata processed by cloud-init's datasources is persisted as -``/run/cloud0-init/instance-data.json``. Cloud-init provides tooling -to quickly introspect some of that data. See :ref:`instance_metadata` for -more information. +``/run/cloud-init/instance-data.json``. Cloud-init provides tooling to quickly +introspect some of that data. See :ref:`instance_metadata` for more +information. + +Known Sources +============= + +The following is a list of documents for each supported datasource: + +.. toctree:: + :titlesonly: + + datasources/aliyun.rst + datasources/altcloud.rst + datasources/azure.rst + datasources/cloudsigma.rst + datasources/cloudstack.rst + datasources/configdrive.rst + datasources/digitalocean.rst + datasources/e24cloud.rst + datasources/ec2.rst + datasources/exoscale.rst + datasources/fallback.rst + datasources/gce.rst + datasources/maas.rst + datasources/nocloud.rst + datasources/opennebula.rst + datasources/openstack.rst + datasources/oracle.rst + datasources/ovf.rst + datasources/rbxcloud.rst + datasources/smartos.rst + datasources/zstack.rst + + +Creation +======== + +The datasource objects have a few touch points with cloud-init. If you +are interested in adding a new datasource for your cloud platform you will +need to take care of the following items: + +* **Identify a mechanism for positive identification of the platform**: + It is good practice for a cloud platform to positively identify itself + to the guest. This allows the guest to make educated decisions based + on the platform on which it is running. On the x86 and arm64 architectures, + many clouds identify themselves through DMI data. For example, + Oracle's public cloud provides the string 'OracleCloud.com' in the + DMI chassis-asset field. + + cloud-init enabled images produce a log file with details about the + platform. Reading through this log in ``/run/cloud-init/ds-identify.log`` + may provide the information needed to uniquely identify the platform. + If the log is not present, you can generate it by running from source + ``./tools/ds-identify`` or the installed location + ``/usr/lib/cloud-init/ds-identify``. + + The mechanism used to identify the platform will be required for the + ds-identify and datasource module sections below. +* **Add datasource module ``cloudinit/sources/DataSource<CloudPlatform>.py``**: + It is suggested that you start by copying one of the simpler datasources + such as DataSourceHetzner. + +* **Add tests for datasource module**: + Add a new file with some tests for the module to + ``cloudinit/sources/test_<yourplatform>.py``. For example see + ``cloudinit/sources/tests/test_oracle.py`` + +* **Update ds-identify**: In systemd systems, ds-identify is used to detect + which datasource should be enabled or if cloud-init should run at all. + You'll need to make changes to ``tools/ds-identify``. + +* **Add tests for ds-identify**: Add relevant tests in a new class to + ``tests/unittests/test_ds_identify.py``. You can use ``TestOracle`` as an + example. + +* **Add your datasource name to the builtin list of datasources:** Add + your datasource module name to the end of the ``datasource_list`` + entry in ``cloudinit/settings.py``. + +* **Add your your cloud platform to apport collection prompts:** Update the + list of cloud platforms in ``cloudinit/apport.py``. This list will be + provided to the user who invokes ``ubuntu-bug cloud-init``. + +* **Enable datasource by default in ubuntu packaging branches:** + Ubuntu packaging branches contain a template file + ``debian/cloud-init.templates`` that ultimately sets the default + datasource_list when installed via package. This file needs updating when + the commit gets into a package. + +* **Add documentation for your datasource**: You should add a new + file in ``doc/datasources/<cloudplatform>.rst`` + + +API +=== -Datasource API --------------- The current interface that a datasource object must provide is the following: .. sourcecode:: python # returns a mime multipart message that contains # all the various fully-expanded components that - # were found from processing the raw userdata string + # were found from processing the raw user data string # - when filtering only the mime messages targeting # this instance id will be returned (or messages with # no instance id) @@ -52,7 +140,7 @@ The current interface that a datasource object must provide is the following: # because cloud-config content would be handled elsewhere def get_config_obj(self) - #returns a list of public ssh keys + # returns a list of public SSH keys def get_public_ssh_keys(self) # translates a device 'short' name into the actual physical device @@ -73,37 +161,10 @@ The current interface that a datasource object must provide is the following: def get_instance_id(self) # gets the fully qualified domain name that this host should be using - # when configuring network or hostname releated settings, typically + # when configuring network or hostname related settings, typically # assigned either by the cloud provider or the user creating the vm def get_hostname(self, fqdn=False) def get_package_mirror_info(self) - -Datasource Documentation -======================== -The following is a list of the implemented datasources. -Follow for more information. - -.. toctree:: - :maxdepth: 2 - - datasources/aliyun.rst - datasources/altcloud.rst - datasources/azure.rst - datasources/cloudsigma.rst - datasources/cloudstack.rst - datasources/configdrive.rst - datasources/digitalocean.rst - datasources/ec2.rst - datasources/maas.rst - datasources/nocloud.rst - datasources/opennebula.rst - datasources/openstack.rst - datasources/oracle.rst - datasources/ovf.rst - datasources/smartos.rst - datasources/fallback.rst - datasources/gce.rst - -.. vi: textwidth=78 +.. vi: textwidth=79 diff --git a/doc/rtd/topics/datasources/altcloud.rst b/doc/rtd/topics/datasources/altcloud.rst index eeb197f2..9d7e3de1 100644 --- a/doc/rtd/topics/datasources/altcloud.rst +++ b/doc/rtd/topics/datasources/altcloud.rst @@ -3,24 +3,25 @@ Alt Cloud ========= -The datasource altcloud will be used to pick up user data on `RHEVm`_ and `vSphere`_. +The datasource altcloud will be used to pick up user data on `RHEVm`_ and +`vSphere`_. RHEVm ----- For `RHEVm`_ v3.0 the userdata is injected into the VM using floppy -injection via the `RHEVm`_ dashboard "Custom Properties". +injection via the `RHEVm`_ dashboard "Custom Properties". The format of the Custom Properties entry must be: :: - + floppyinject=user-data.txt:<base64 encoded data> For example to pass a simple bash script: .. sourcecode:: sh - + % cat simple_script.bash #!/bin/bash echo "Hello Joe!" >> /tmp/JJV_Joe_out.txt @@ -38,7 +39,7 @@ set the "Custom Properties" when creating the RHEMv v3.0 VM to: **NOTE:** The prefix with file name must be: ``floppyinject=user-data.txt:`` It is also possible to launch a `RHEVm`_ v3.0 VM and pass optional user -data to it using the Delta Cloud. +data to it using the Delta Cloud. For more information on Delta Cloud see: http://deltacloud.apache.org @@ -46,12 +47,12 @@ vSphere ------- For VMWare's `vSphere`_ the userdata is injected into the VM as an ISO -via the cdrom. This can be done using the `vSphere`_ dashboard +via the cdrom. This can be done using the `vSphere`_ dashboard by connecting an ISO image to the CD/DVD drive. To pass this example script to cloud-init running in a `vSphere`_ VM set the CD/DVD drive when creating the vSphere VM to point to an -ISO on the data store. +ISO on the data store. **Note:** The ISO must contain the user data. @@ -61,13 +62,13 @@ Create the ISO ^^^^^^^^^^^^^^ .. sourcecode:: sh - + % mkdir my-iso NOTE: The file name on the ISO must be: ``user-data.txt`` .. sourcecode:: sh - + % cp simple_script.bash my-iso/user-data.txt % genisoimage -o user-data.iso -r my-iso @@ -75,7 +76,7 @@ Verify the ISO ^^^^^^^^^^^^^^ .. sourcecode:: sh - + % sudo mkdir /media/vsphere_iso % sudo mount -o loop user-data.iso /media/vsphere_iso % cat /media/vsphere_iso/user-data.txt @@ -84,7 +85,7 @@ Verify the ISO Then, launch the `vSphere`_ VM the ISO user-data.iso attached as a CDROM. It is also possible to launch a `vSphere`_ VM and pass optional user -data to it using the Delta Cloud. +data to it using the Delta Cloud. For more information on Delta Cloud see: http://deltacloud.apache.org diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst index f73c3694..1427fb3d 100644 --- a/doc/rtd/topics/datasources/azure.rst +++ b/doc/rtd/topics/datasources/azure.rst @@ -5,9 +5,30 @@ Azure This datasource finds metadata and user-data from the Azure cloud platform. -Azure Platform --------------- -The azure cloud-platform provides initial data to an instance via an attached +walinuxagent +------------ +walinuxagent has several functions within images. For cloud-init +specifically, the relevant functionality it performs is to register the +instance with the Azure cloud platform at boot so networking will be +permitted. For more information about the other functionality of +walinuxagent, see `Azure's documentation +<https://github.com/Azure/WALinuxAgent#introduction>`_ for more details. +(Note, however, that only one of walinuxagent's provisioning and cloud-init +should be used to perform instance customisation.) + +If you are configuring walinuxagent yourself, you will want to ensure that you +have `Provisioning.UseCloudInit +<https://github.com/Azure/WALinuxAgent#provisioningusecloudinit>`_ set to +``y``. + + +Builtin Agent +------------- +An alternative to using walinuxagent to register to the Azure cloud platform +is to use the ``__builtin__`` agent command. This section contains more +background on what that code path does, and how to enable it. + +The Azure cloud platform provides initial data to an instance via an attached CD formatted in UDF. That CD contains a 'ovf-env.xml' file that provides some information. Additional information is obtained via interaction with the "endpoint". @@ -23,44 +44,36 @@ information in json format to /run/cloud-init/dhclient.hook/<interface>.json. In order for cloud-init to leverage this method to find the endpoint, the cloud.cfg file must contain: -datasource: - Azure: - set_hostname: False - agent_command: __builtin__ +.. sourcecode:: yaml + + datasource: + Azure: + set_hostname: False + agent_command: __builtin__ If those files are not available, the fallback is to check the leases file for the endpoint server (again option 245). You can define the path to the lease file with the 'dhclient_lease_file' -configuration. The default value is /var/lib/dhcp/dhclient.eth0.leases. +configuration. - dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases - -walinuxagent ------------- -In order to operate correctly, cloud-init needs walinuxagent to provide much -of the interaction with azure. In addition to "provisioning" code, walinux -does the following on the agent is a long running daemon that handles the -following things: -- generate a x509 certificate and send that to the endpoint -waagent.conf config -^^^^^^^^^^^^^^^^^^^ -in order to use waagent.conf with cloud-init, the following settings are recommended. Other values can be changed or set to the defaults. +IMDS +---- +Azure provides the `instance metadata service (IMDS) +<https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service>`_ +which is a REST service on ``169.254.169.254`` providing additional +configuration information to the instance. Cloud-init uses the IMDS for: - :: - - # disabling provisioning turns off all 'Provisioning.*' function - Provisioning.Enabled=n - # this is currently not handled by cloud-init, so let walinuxagent do it. - ResourceDisk.Format=y - ResourceDisk.MountPoint=/mnt +- network configuration for the instance which is applied per boot +- a preprovisioing gate which blocks instance configuration until Azure fabric + is ready to provision Configuration ------------- The following configuration can be set for the datasource in system -configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`). +configuration (in ``/etc/cloud/cloud.cfg`` or ``/etc/cloud/cloud.cfg.d/``). The settings that may be configured are: @@ -69,20 +82,33 @@ The settings that may be configured are: provided command to obtain metadata. * **apply_network_config**: Boolean set to True to use network configuration described by Azure's IMDS endpoint instead of fallback network config of - dhcp on eth0. Default is True. For Ubuntu 16.04 or earlier, default is False. + dhcp on eth0. Default is True. For Ubuntu 16.04 or earlier, default is + False. * **data_dir**: Path used to read metadata files and write crawled data. * **dhclient_lease_file**: The fallback lease file to source when looking for custom DHCP option 245 from Azure fabric. * **disk_aliases**: A dictionary defining which device paths should be interpreted as ephemeral images. See cc_disk_setup module for more info. * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to - metadata changes. + metadata changes. The '``hostname_bounce: command``' entry can be either + the literal string 'builtin' or a command to execute. The command will be + invoked after the hostname is set, and will have the 'interface' in its + environment. If ``set_hostname`` is not true, then ``hostname_bounce`` + will be ignored. An example might be: + + ``command: ["sh", "-c", "killall dhclient; dhclient $interface"]`` + * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to metadata changes. Azure will throttle ifup/down in some cases after metadata has been updated to inform dhcp server about updated hostnames. * **set_hostname**: Boolean set to True when we want Azure to set the hostname based on metadata. +Configuration for the datasource can also be read from a +``dscfg`` entry in the ``LinuxProvisioningConfigurationSet``. Content in +dscfg node is expected to be base64 encoded yaml content, and it will be +merged into the 'datasource: Azure' entry. + An example configuration with the default values is provided below: .. sourcecode:: yaml @@ -143,37 +169,6 @@ Example: </LinuxProvisioningConfigurationSet> </wa:ProvisioningSection> -Configuration -------------- -Configuration for the datasource can be read from the system config's or set -via the `dscfg` entry in the `LinuxProvisioningConfigurationSet`. Content in -dscfg node is expected to be base64 encoded yaml content, and it will be -merged into the 'datasource: Azure' entry. - -The '``hostname_bounce: command``' entry can be either the literal string -'builtin' or a command to execute. The command will be invoked after the -hostname is set, and will have the 'interface' in its environment. If -``set_hostname`` is not true, then ``hostname_bounce`` will be ignored. - -An example might be: - command: ["sh", "-c", "killall dhclient; dhclient $interface"] - -.. code:: yaml - - datasource: - agent_command - Azure: - agent_command: [service, walinuxagent, start] - set_hostname: True - hostname_bounce: - # the name of the interface to bounce - interface: eth0 - # policy can be 'on', 'off' or 'force' - policy: on - # the method 'bounce' command. - command: "builtin" - hostname_command: "hostname" - hostname -------- When the user launches an instance, they provide a hostname for that instance. diff --git a/doc/rtd/topics/datasources/cloudstack.rst b/doc/rtd/topics/datasources/cloudstack.rst index a3101ed7..da183226 100644 --- a/doc/rtd/topics/datasources/cloudstack.rst +++ b/doc/rtd/topics/datasources/cloudstack.rst @@ -4,10 +4,10 @@ CloudStack ========== `Apache CloudStack`_ expose user-data, meta-data, user password and account -sshkey thru the Virtual-Router. The datasource obtains the VR address via +SSH key thru the Virtual-Router. The datasource obtains the VR address via dhcp lease information given to the instance. For more details on meta-data and user-data, -refer the `CloudStack Administrator Guide`_. +refer the `CloudStack Administrator Guide`_. URLs to access user-data and meta-data from the Virtual Machine. Here 10.1.1.1 is the Virtual Router IP: diff --git a/doc/rtd/topics/datasources/configdrive.rst b/doc/rtd/topics/datasources/configdrive.rst index f1a488a2..4fcbccee 100644 --- a/doc/rtd/topics/datasources/configdrive.rst +++ b/doc/rtd/topics/datasources/configdrive.rst @@ -6,7 +6,7 @@ Config Drive The configuration drive datasource supports the `OpenStack`_ configuration drive disk. - See `the config drive extension`_ and `introduction`_ in the public + See `the config drive extension`_ and `metadata introduction`_ in the public documentation for more information. By default, cloud-init does *always* consider this source to be a full-fledged @@ -64,7 +64,7 @@ The following criteria are required to as a config drive: :: openstack/ - - 2012-08-10/ or latest/ + - 2012-08-10/ or latest/ - meta_data.json - user_data (not mandatory) - content/ @@ -83,7 +83,7 @@ only) file in the following ways. :: - dsmode: + dsmode: values: local, net, pass default: pass @@ -97,10 +97,10 @@ The difference between 'local' and 'net' is that local will not require networking to be up before user-data actions (or boothooks) are run. :: - + instance-id: default: iid-dsconfigdrive - + This is utilized as the metadata's instance-id. It should generally be unique, as it is what is used to determine "is this a new instance". @@ -108,24 +108,24 @@ be unique, as it is what is used to determine "is this a new instance". public-keys: default: None - + If present, these keys will be used as the public keys for the instance. This value overrides the content in authorized_keys. Note: it is likely preferable to provide keys via user-data :: - + user-data: default: None - -This provides cloud-init user-data. See :ref:`examples <yaml_examples>` for + +This provides cloud-init user-data. See :ref:`examples <yaml_examples>` for what all can be present here. .. _OpenStack: http://www.openstack.org/ -.. _introduction: http://docs.openstack.org/trunk/openstack-compute/admin/content/config-drive.html +.. _metadata introduction: https://docs.openstack.org/nova/latest/user/metadata.html#config-drives .. _python-novaclient: https://github.com/openstack/python-novaclient .. _iso9660: https://en.wikipedia.org/wiki/ISO_9660 .. _vfat: https://en.wikipedia.org/wiki/File_Allocation_Table -.. _the config drive extension: http://docs.openstack.org/user-guide/content/config-drive.html +.. _the config drive extension: https://docs.openstack.org/nova/latest/admin/config-drive.html .. vi: textwidth=78 diff --git a/doc/rtd/topics/datasources/digitalocean.rst b/doc/rtd/topics/datasources/digitalocean.rst index 938ede89..88f1e5f5 100644 --- a/doc/rtd/topics/datasources/digitalocean.rst +++ b/doc/rtd/topics/datasources/digitalocean.rst @@ -20,8 +20,10 @@ DigitalOcean's datasource can be configured as follows: retries: 3 timeout: 2 -- *retries*: Determines the number of times to attempt to connect to the metadata service -- *timeout*: Determines the timeout in seconds to wait for a response from the metadata service +- *retries*: Determines the number of times to attempt to connect to the + metadata service +- *timeout*: Determines the timeout in seconds to wait for a response from the + metadata service .. _DigitalOcean: http://digitalocean.com/ .. _metadata service: https://developers.digitalocean.com/metadata/ diff --git a/doc/rtd/topics/datasources/e24cloud.rst b/doc/rtd/topics/datasources/e24cloud.rst new file mode 100644 index 00000000..de9a4127 --- /dev/null +++ b/doc/rtd/topics/datasources/e24cloud.rst @@ -0,0 +1,9 @@ +.. _datasource_e24cloud: + +E24Cloud +======== +`E24Cloud <https://www.e24cloud.com/en/>` platform provides an AWS Ec2 metadata +service clone. It identifies itself to guests using the dmi +system-manufacturer (/sys/class/dmi/id/sys_vendor). + +.. vi: textwidth=78 diff --git a/doc/rtd/topics/datasources/ec2.rst b/doc/rtd/topics/datasources/ec2.rst index 64c325d8..a90f3779 100644 --- a/doc/rtd/topics/datasources/ec2.rst +++ b/doc/rtd/topics/datasources/ec2.rst @@ -13,7 +13,7 @@ instance metadata. Metadata is accessible via the following URL: :: - + GET http://169.254.169.254/2009-04-04/meta-data/ ami-id ami-launch-index @@ -34,19 +34,20 @@ Metadata is accessible via the following URL: Userdata is accessible via the following URL: :: - + GET http://169.254.169.254/2009-04-04/user-data 1234,fred,reboot,true | 4512,jimbo, | 173,,, Note that there are multiple versions of this data provided, cloud-init by default uses **2009-04-04** but newer versions can be supported with relative ease (newer versions have more data exposed, while maintaining -backward compatibility with the previous versions). +backward compatibility with the previous versions). -To see which versions are supported from your cloud provider use the following URL: +To see which versions are supported from your cloud provider use the following +URL: :: - + GET http://169.254.169.254/ 1.0 2007-01-19 @@ -90,4 +91,15 @@ An example configuration with the default values is provided below: max_wait: 120 timeout: 50 +Notes +----- + * There are 2 types of EC2 instances network-wise: VPC ones (Virtual Private + Cloud) and Classic ones (also known as non-VPC). One major difference + between them is that Classic instances have their MAC address changed on + stop/restart operations, so cloud-init will recreate the network config + file for EC2 Classic instances every boot. On VPC instances this file is + generated only in the first boot of the instance. + The check for the instance type is performed by is_classic_instance() + method. + .. vi: textwidth=78 diff --git a/doc/rtd/topics/datasources/exoscale.rst b/doc/rtd/topics/datasources/exoscale.rst new file mode 100644 index 00000000..9074edc6 --- /dev/null +++ b/doc/rtd/topics/datasources/exoscale.rst @@ -0,0 +1,68 @@ +.. _datasource_exoscale: + +Exoscale +======== + +This datasource supports reading from the metadata server used on the +`Exoscale platform <https://exoscale.com>`_. + +Use of the Exoscale datasource is recommended to benefit from new features of +the Exoscale platform. + +The datasource relies on the availability of a compatible metadata server +(``http://169.254.169.254`` is used by default) and its companion password +server, reachable at the same address (by default on port 8080). + +Crawling of metadata +-------------------- + +The metadata service and password server are crawled slightly differently: + + * The "metadata service" is crawled every boot. + * The password server is also crawled every boot (the Exoscale datasource + forces the password module to run with "frequency always"). + +In the password server case, the following rules apply in order to enable the +"restore instance password" functionality: + + * If a password is returned by the password server, it is then marked "saved" + by the cloud-init datasource. Subsequent boots will skip setting the + password (the password server will return "saved_password"). + * When the instance password is reset (via the Exoscale UI), the password + server will return the non-empty password at next boot, therefore causing + cloud-init to reset the instance's password. + +Configuration +------------- + +Users of this datasource are discouraged from changing the default settings +unless instructed to by Exoscale support. + +The following settings are available and can be set for the datasource in +system configuration (in `/etc/cloud/cloud.cfg.d/`). + +The settings available are: + + * **metadata_url**: The URL for the metadata service (defaults to + ``http://169.254.169.254``) + * **api_version**: The API version path on which to query the instance + metadata (defaults to ``1.0``) + * **password_server_port**: The port (on the metadata server) on which the + password server listens (defaults to ``8080``). + * **timeout**: the timeout value provided to urlopen for each individual http + request. (defaults to ``10``) + * **retries**: The number of retries that should be done for an http request + (defaults to ``6``) + + +An example configuration with the default values is provided below: + +.. sourcecode:: yaml + + datasource: + Exoscale: + metadata_url: "http://169.254.169.254" + api_version: "1.0" + password_server_port: 8080 + timeout: 10 + retries: 6 diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst index 08578e86..bc96f7fe 100644 --- a/doc/rtd/topics/datasources/nocloud.rst +++ b/doc/rtd/topics/datasources/nocloud.rst @@ -9,7 +9,7 @@ network at all). You can provide meta-data and user-data to a local vm boot via files on a `vfat`_ or `iso9660`_ filesystem. The filesystem volume label must be -``cidata``. +``cidata`` or ``CIDATA``. Alternatively, you can provide meta-data via kernel command line or SMBIOS "serial number" option. The data must be passed in the form of a string: @@ -57,24 +57,24 @@ Given a disk ubuntu 12.04 cloud image in 'disk.img', you can create a sufficient disk by following the example below. :: - + ## create user-data and meta-data files that will be used ## to modify image on first boot $ { echo instance-id: iid-local01; echo local-hostname: cloudimg; } > meta-data - + $ printf "#cloud-config\npassword: passw0rd\nchpasswd: { expire: False }\nssh_pwauth: True\n" > user-data - + ## create a disk to attach with some user-data and meta-data $ genisoimage -output seed.iso -volid cidata -joliet -rock user-data meta-data - + ## alternatively, create a vfat filesystem with same files ## $ truncate --size 2M seed.img ## $ mkfs.vfat -n cidata seed.img ## $ mcopy -oi seed.img user-data meta-data :: - + ## create a new qcow image to boot, backed by your original image $ qemu-img create -f qcow2 -b disk.img boot-disk.img - + ## boot the image and login as 'ubuntu' with password 'passw0rd' ## note, passw0rd was set as password through the user-data above, ## there is no password set on these images. @@ -88,12 +88,12 @@ to determine if this is "first boot". So if you are making updates to user-data you will also have to change that, or start the disk fresh. Also, you can inject an ``/etc/network/interfaces`` file by providing the -content for that file in the ``network-interfaces`` field of metadata. +content for that file in the ``network-interfaces`` field of metadata. Example metadata: :: - + instance-id: iid-abcdefg network-interfaces: | iface eth0 inet static diff --git a/doc/rtd/topics/datasources/opennebula.rst b/doc/rtd/topics/datasources/opennebula.rst index 7c0367c4..8e7c2558 100644 --- a/doc/rtd/topics/datasources/opennebula.rst +++ b/doc/rtd/topics/datasources/opennebula.rst @@ -21,7 +21,7 @@ Datasource configuration Datasource accepts following configuration options. :: - + dsmode: values: local, net, disabled default: net @@ -30,7 +30,7 @@ Tells if this datasource will be processed in 'local' (pre-networking) or 'net' (post-networking) stage or even completely 'disabled'. :: - + parseuser: default: nobody @@ -46,7 +46,7 @@ The following criteria are required: or have a *filesystem* label of **CONTEXT** or **CDROM** 2. Must contain file *context.sh* with contextualization variables. File is generated by OpenNebula, it has a KEY='VALUE' format and - can be easily read by bash + can be easily read by bash Contextualization variables ~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -57,7 +57,7 @@ the OpenNebula documentation. Where multiple similar variables are specified, only first found is taken. :: - + DSMODE Datasource mode configuration override. Values: local, net, disabled. @@ -75,30 +75,30 @@ Datasource mode configuration override. Values: local, net, disabled. Static `network configuration`_. :: - + HOSTNAME Instance hostname. :: - + PUBLIC_IP IP_PUBLIC ETH0_IP If no hostname has been specified, cloud-init will try to create hostname -from instance's IP address in 'local' dsmode. In 'net' dsmode, cloud-init +from instance's IP address in 'local' dsmode. In 'net' dsmode, cloud-init tries to resolve one of its IP addresses to get hostname. :: - + SSH_KEY SSH_PUBLIC_KEY One or multiple SSH keys (separated by newlines) can be specified. :: - + USER_DATA USERDATA @@ -111,7 +111,7 @@ This example cloud-init configuration (*cloud.cfg*) enables OpenNebula datasource only in 'net' mode. :: - + disable_ec2_metadata: True datasource_list: ['OpenNebula'] datasource: @@ -123,17 +123,17 @@ Example VM's context section ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :: - + CONTEXT=[ PUBLIC_IP="$NIC[IP]", - SSH_KEY="$USER[SSH_KEY] - $USER[SSH_KEY1] + SSH_KEY="$USER[SSH_KEY] + $USER[SSH_KEY1] $USER[SSH_KEY2] ", USER_DATA="#cloud-config # see https://help.ubuntu.com/community/CloudInit - + packages: [] - + mounts: - [vdc,none,swap,sw,0,0] runcmd: diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst index 421da08f..8ce2a53d 100644 --- a/doc/rtd/topics/datasources/openstack.rst +++ b/doc/rtd/topics/datasources/openstack.rst @@ -78,6 +78,7 @@ upgrade packages and install ``htop`` on all instances: {"cloud-init": "#cloud-config\npackage_upgrade: True\npackages:\n - htop"} For more general information about how cloud-init handles vendor data, -including how it can be disabled by users on instances, see :doc:`/topics/vendordata`. +including how it can be disabled by users on instances, see +:doc:`/topics/vendordata`. .. vi: textwidth=78 diff --git a/doc/rtd/topics/datasources/oracle.rst b/doc/rtd/topics/datasources/oracle.rst index f2383cee..98c4657c 100644 --- a/doc/rtd/topics/datasources/oracle.rst +++ b/doc/rtd/topics/datasources/oracle.rst @@ -8,7 +8,7 @@ This datasource reads metadata, vendor-data and user-data from Oracle Platform --------------- -OCI provides bare metal and virtual machines. In both cases, +OCI provides bare metal and virtual machines. In both cases, the platform identifies itself via DMI data in the chassis asset tag with the string 'OracleCloud.com'. @@ -22,5 +22,28 @@ Cloud-init has a specific datasource for Oracle in order to: implementation. +Configuration +------------- + +The following configuration can be set for the datasource in system +configuration (in ``/etc/cloud/cloud.cfg`` or ``/etc/cloud/cloud.cfg.d/``). + +The settings that may be configured are: + +* **configure_secondary_nics**: A boolean, defaulting to False. If set + to True on an OCI Virtual Machine, cloud-init will fetch networking + metadata from Oracle's IMDS and use it to configure the non-primary + network interface controllers in the system. If set to True on an + OCI Bare Metal Machine, it will have no effect (though this may + change in the future). + +An example configuration with the default values is provided below: + +.. sourcecode:: yaml + + datasource: + Oracle: + configure_secondary_nics: false + .. _Oracle Compute Infrastructure: https://cloud.oracle.com/ .. vi: textwidth=78 diff --git a/doc/rtd/topics/datasources/rbxcloud.rst b/doc/rtd/topics/datasources/rbxcloud.rst new file mode 100644 index 00000000..52ec02ff --- /dev/null +++ b/doc/rtd/topics/datasources/rbxcloud.rst @@ -0,0 +1,25 @@ +.. _datasource_rbx: + +Rbx Cloud +========= + +The Rbx datasource consumes the metadata drive available on platform +`HyperOne`_ and `Rootbox`_ platform. + +Datasource supports, in particular, network configurations, hostname, +user accounts and user metadata. + +Metadata drive +-------------- + +Drive metadata is a `FAT`_-formatted partition with the ```CLOUDMD``` label on +the system disk. Its contents are refreshed each time the virtual machine +is restarted, if the partition exists. For more information see +`HyperOne Virtual Machine docs`_. + +.. _HyperOne: http://www.hyperone.com/ +.. _Rootbox: https://rootbox.com/ +.. _HyperOne Virtual Machine docs: http://www.hyperone.com/ +.. _FAT: https://en.wikipedia.org/wiki/File_Allocation_Table + +.. vi: textwidth=78 diff --git a/doc/rtd/topics/datasources/smartos.rst b/doc/rtd/topics/datasources/smartos.rst index cb9a128e..be11dfbb 100644 --- a/doc/rtd/topics/datasources/smartos.rst +++ b/doc/rtd/topics/datasources/smartos.rst @@ -15,7 +15,8 @@ second serial console. On Linux, this is /dev/ttyS1. The data is a provided via a simple protocol: something queries for the data, the console responds responds with the status and if "SUCCESS" returns until a single ".\n". -New versions of the SmartOS tooling will include support for base64 encoded data. +New versions of the SmartOS tooling will include support for base64 encoded +data. Meta-data channels ------------------ @@ -27,7 +28,7 @@ channels of SmartOS. - per the spec, user-data is for consumption by the end-user, not provisioning tools - - cloud-init entirely ignores this channel other than writting it to disk + - cloud-init entirely ignores this channel other than writing it to disk - removal of the meta-data key means that /var/db/user-data gets removed - a backup of previous meta-data is maintained as /var/db/user-data.<timestamp>. <timestamp> is the epoch time when @@ -42,8 +43,9 @@ channels of SmartOS. - <timestamp> is the epoch time when cloud-init ran. - when the 'user-script' meta-data key goes missing, the user-script is removed from the file system, although a backup is maintained. - - if the script is not shebanged (i.e. starts with #!<executable>), then - or is not an executable, cloud-init will add a shebang of "#!/bin/bash" + - if the script does not start with a shebang (i.e. starts with + #!<executable>), then or is not an executable, cloud-init will add a + shebang of "#!/bin/bash" * cloud-init:user-data is treated like on other Clouds. @@ -133,7 +135,7 @@ or not to base64 decode something: * base64_all: Except for excluded keys, attempt to base64 decode the values. If the value fails to decode properly, it will be returned in its text - * base64_keys: A comma deliminated list of which keys are base64 encoded. + * base64_keys: A comma delimited list of which keys are base64 encoded. * b64-<key>: for any key, if there exists an entry in the metadata for 'b64-<key>' Then 'b64-<key>' is expected to be a plaintext boolean indicating whether diff --git a/doc/rtd/topics/datasources/zstack.rst b/doc/rtd/topics/datasources/zstack.rst new file mode 100644 index 00000000..93a2791c --- /dev/null +++ b/doc/rtd/topics/datasources/zstack.rst @@ -0,0 +1,37 @@ +.. _datasource_zstack: + +ZStack +====== +ZStack platform provides a AWS Ec2 metadata service, but with different +datasource identity. +More information about ZStack can be found at `ZStack <https://www.zstack.io>`__. + +Discovery +--------- +To determine whether a vm running on ZStack platform, cloud-init checks DMI +information by 'dmidecode -s chassis-asset-tag', if the output ends with +'.zstack.io', it's running on ZStack platform: + + +Metadata +^^^^^^^^ +Same as EC2, instance metadata can be queried at + +:: + + GET http://169.254.169.254/2009-04-04/meta-data/ + instance-id + local-hostname + +Userdata +^^^^^^^^ +Same as EC2, instance userdata can be queried at + +:: + + GET http://169.254.169.254/2009-04-04/user-data/ + meta_data.json + user_data + password + +.. vi: textwidth=78 diff --git a/doc/rtd/topics/debugging.rst b/doc/rtd/topics/debugging.rst index 51363ea5..0d416f32 100644 --- a/doc/rtd/topics/debugging.rst +++ b/doc/rtd/topics/debugging.rst @@ -68,6 +68,19 @@ subcommands default to reading /var/log/cloud-init.log. 00.00100s (modules-final/config-rightscale_userdata) ... +* ``analyze boot`` Make subprocess calls to the kernel in order to get relevant + pre-cloud-init timestamps, such as the kernel start, kernel finish boot, and cloud-init start. + +.. code-block:: shell-session + + $ cloud-init analyze boot + -- Most Recent Boot Record -- + Kernel Started at: 2019-06-13 15:59:55.809385 + Kernel ended boot at: 2019-06-13 16:00:00.944740 + Kernel time to boot (seconds): 5.135355 + Cloud-init start: 2019-06-13 16:00:05.738396 + Time between Kernel boot and Cloud-init start (seconds): 4.793656 + Analyze quickstart - LXC --------------------------- @@ -150,3 +163,104 @@ commandline: Inspect cloud-init.log for output of what operations were performed as a result. + +.. _proposed_sru_testing: + +Stable Release Updates (SRU) testing for cloud-init +=================================================== +Once an Ubuntu release is stable (i.e. after it is released), updates for it +must follow a special procedure called a "stable release update" (or `SRU`_). + +The cloud-init project has a specific process it follows when validating +a cloud-init SRU, documented in the `CloudinitUpdates`_ wiki page. + +Generally an SRU test of cloud-init performs the following: + + * Install a pre-release version of cloud-init from the + **-proposed** APT pocket (e.g. **bionic-proposed**) + * Upgrade cloud-init and attempt a clean run of cloud-init to assert the new + version of cloud-init works properly the specific platform and Ubuntu series + * Check for tracebacks or errors in behavior + + +Manual SRU verification procedure +--------------------------------- +Below are steps to manually test a pre-release version of cloud-init +from **-proposed** + +.. note:: + For each Ubuntu SRU, the Ubuntu Server team manually validates the new version of cloud-init + on these platforms: **Amazon EC2, Azure, GCE, OpenStack, Oracle, + Softlayer (IBM), LXD, KVM** + +1. Launch a VM on your favorite platform, providing this cloud-config + user-data and replacing `<YOUR_LAUNCHPAD_USERNAME>` with your username: + +.. code-block:: yaml + + ## template: jinja + #cloud-config + ssh_import_id: [<YOUR_LAUNCHPAD_USERNAME>] + hostname: SRU-worked-{{v1.cloud_name}} + +2. Wait for current cloud-init to complete, replace `<YOUR_VM_IP>` with the IP + address of the VM that you launched in step 1: + +.. code-block:: bash + + CI_VM_IP=<YOUR_VM_IP> + # Make note of the datasource cloud-init detected in --long output. + # In step 5, you will use this to confirm the same datasource is detected after upgrade. + ssh ubuntu@$CI_VM_IP -- cloud-init status --wait --long + +3. Set up the **-proposed** pocket on your VM and upgrade to the **-proposed** + cloud-init: + +.. code-block:: bash + + # Create a script that will add the -proposed pocket to APT's sources + # and install cloud-init from that pocket + cat > setup_proposed.sh <<EOF + #/bin/bash + mirror=http://archive.ubuntu.com/ubuntu + echo deb \$mirror \$(lsb_release -sc)-proposed main | tee \ + /etc/apt/sources.list.d/proposed.list + apt-get update -q + apt-get install -qy cloud-init + EOF + + scp setup_proposed.sh ubuntu@$CI_VM_IP:. + ssh ubuntu@$CI_VM_IP -- sudo bash setup_proposed.sh + +4. Change hostname, clean cloud-init's state, and reboot to run cloud-init + from scratch: + +.. code-block:: bash + + ssh ubuntu@$CI_VM_IP -- sudo hostname something-else + ssh ubuntu@$CI_VM_IP -- sudo cloud-init clean --logs --reboot + +5. Validate **-proposed** cloud-init came up without error + +.. code-block:: bash + + # Block until cloud-init completes and verify from --long the datasource + # from step 1. Errors would show up in --long + + ssh ubuntu@$CI_VM_IP -- cloud-init status --wait --long + # Make sure hostname was set properly to SRU-worked-<cloud name> + ssh ubuntu@$CI_VM_IP -- hostname + # Check for any errors or warnings in cloud-init logs. + # (This should produce no output if successful.) + ssh ubuntu@$CI_VM_IP -- grep Trace "/var/log/cloud-init*" + +6. If you encounter an error during SRU testing: + + * Create a `new cloud-init bug`_ reporting the version of cloud-init + affected + * Ping upstream cloud-init on Freenode's `#cloud-init IRC channel`_ + +.. _SRU: https://wiki.ubuntu.com/StableReleaseUpdates +.. _CloudinitUpdates: https://wiki.ubuntu.com/CloudinitUpdates +.. _new cloud-init bug: https://bugs.launchpad.net/cloud-init/+filebug +.. _#cloud-init IRC channel: https://webchat.freenode.net/?channel=#cloud-init diff --git a/doc/rtd/topics/dir_layout.rst b/doc/rtd/topics/dir_layout.rst index 7a6265eb..ebd63ae7 100644 --- a/doc/rtd/topics/dir_layout.rst +++ b/doc/rtd/topics/dir_layout.rst @@ -2,11 +2,12 @@ Directory layout **************** -Cloudinits's directory structure is somewhat different from a regular application:: +Cloud-init's directory structure is somewhat different from a regular +application:: /var/lib/cloud/ - data/ - - instance-id + - instance-id - previous-instance-id - datasource - previous-datasource @@ -35,38 +36,41 @@ Cloudinits's directory structure is somewhat different from a regular applicatio The main directory containing the cloud-init specific subdirectories. It is typically located at ``/var/lib`` but there are certain configuration - scenarios where this can be altered. + scenarios where this can be altered. TBD, describe this overriding more. ``data/`` - Contains information related to instance ids, datasources and hostnames of the previous - and current instance if they are different. These can be examined as needed to - determine any information related to a previous boot (if applicable). + Contains information related to instance ids, datasources and hostnames of + the previous and current instance if they are different. These can be + examined as needed to determine any information related to a previous boot + (if applicable). ``handlers/`` - Custom ``part-handlers`` code is written out here. Files that end up here are written - out with in the scheme of ``part-handler-XYZ`` where ``XYZ`` is the handler number (the - first handler found starts at 0). + Custom ``part-handlers`` code is written out here. Files that end up here are + written out with in the scheme of ``part-handler-XYZ`` where ``XYZ`` is the + handler number (the first handler found starts at 0). ``instance`` - A symlink to the current ``instances/`` subdirectory that points to the currently - active instance (which is active is dependent on the datasource loaded). + A symlink to the current ``instances/`` subdirectory that points to the + currently active instance (which is active is dependent on the datasource + loaded). ``instances/`` - All instances that were created using this image end up with instance identifier - subdirectories (and corresponding data for each instance). The currently active - instance will be symlinked the ``instance`` symlink file defined previously. + All instances that were created using this image end up with instance + identifier subdirectories (and corresponding data for each instance). The + currently active instance will be symlinked the ``instance`` symlink file + defined previously. ``scripts/`` - Scripts that are downloaded/created by the corresponding ``part-handler`` will end up - in one of these subdirectories. + Scripts that are downloaded/created by the corresponding ``part-handler`` + will end up in one of these subdirectories. ``seed/`` @@ -77,6 +81,7 @@ Cloudinits's directory structure is somewhat different from a regular applicatio Cloud-init has a concept of a module semaphore, which basically consists of the module name and its frequency. These files are used to ensure a module is only ran `per-once`, `per-instance`, `per-always`. This folder contains - semaphore `files` which are only supposed to run `per-once` (not tied to the instance id). + semaphore `files` which are only supposed to run `per-once` (not tied to the + instance id). .. vi: textwidth=78 diff --git a/doc/rtd/topics/docs.rst b/doc/rtd/topics/docs.rst new file mode 100644 index 00000000..1b15377e --- /dev/null +++ b/doc/rtd/topics/docs.rst @@ -0,0 +1,84 @@ +.. _docs: + +Docs +**** + +These docs are hosted on Read the Docs. The following will explain how to +contribute to and build these docs locally. + +The documentation is primarily written in reStructuredText. + + +Building +======== + +There is a makefile target to build the documentation for you: + +.. code-block:: shell-session + + $ tox -e doc + +This will do two things: + +- Build the documentation using sphinx +- Run doc8 against the documentation source code + +Once build the HTML files will be viewable in ``doc/rtd_html``. Use your +web browser to open ``index.html`` to view and navigate the site. + +Style Guide +=========== + +Headings +-------- +The headings used across the documentation use the following hierarchy: + +- ``*****``: used once atop of a new page +- ``=====``: each sections on the page +- ``-----``: subsections +- ``^^^^^``: sub-subsections +- ``"""""``: paragraphs + +The top level header ``######`` is reserved for the first page. + +If under and overline are used, their length must be identical. The length of +the underline must be at least as long as the title itself + +Line Length +----------- +Please keep the line lengths to a maximum of **79** characters. This ensures +that the pages and tables do not get too wide that side scrolling is required. + +Header +------ +Adding a link at the top of the page allows for the page to be referenced by +other pages. For example for the FAQ page this would be: + +.. code-block:: rst + + .. _faq: + +Footer +------ +The footer should include the textwidth + +.. code-block:: rst + + .. vi: textwidth=79 + +Vertical Whitespace +------------------- +One newline between each section helps ensure readability of the documentation +source code. + +Common Words +------------ +There are some common words that should follow specific usage: + +- ``cloud-init``: always lower case with a hyphen, unless starting a sentence + in which case only the 'C' is capitalized (e.g. ``Cloud-init``). +- ``metadata``: one word +- ``user data``: two words, not to be combined +- ``vendor data``: like user data, it is two words + +.. vi: textwidth=79 diff --git a/doc/rtd/topics/examples.rst b/doc/rtd/topics/examples.rst index c30d2263..81860f85 100644 --- a/doc/rtd/topics/examples.rst +++ b/doc/rtd/topics/examples.rst @@ -60,8 +60,8 @@ Setup and run `puppet`_ :language: yaml :linenos: -Add apt repositories -==================== +Add primary apt repositories +============================ .. literalinclude:: ../../examples/cloud-config-add-apt-repos.txt :language: yaml @@ -128,15 +128,15 @@ Reboot/poweroff when finished :language: yaml :linenos: -Configure instances ssh-keys +Configure instances SSH keys ============================ .. literalinclude:: ../../examples/cloud-config-ssh-keys.txt :language: yaml :linenos: - -Additional apt configuration -============================ + +Additional apt configuration and repositories +============================================= .. literalinclude:: ../../examples/cloud-config-apt.txt :language: yaml diff --git a/doc/rtd/topics/faq.rst b/doc/rtd/topics/faq.rst new file mode 100644 index 00000000..98c0cfaa --- /dev/null +++ b/doc/rtd/topics/faq.rst @@ -0,0 +1,238 @@ +.. _faq: + +FAQ +*** + +How do I get help? +================== + +Having trouble? We would like to help! + +- First go through this page with answers to common questions +- Use the search bar at the upper left to search these docs +- Ask a question in the ``#cloud-init`` IRC channel on Freenode +- Join and ask questions on the `cloud-init mailing list <https://launchpad.net/~cloud-init>`_ +- Find a bug? Check out the :ref:`reporting_bugs` topic for + how to report one + +Where are the logs? +=================== + +Cloud-init uses two files to log to: + +- `/var/log/cloud-init-output.log`: captures the output from each stage of + cloud-init when it runs +- `/var/log/cloud-init.log`: very detailed log with debugging output, + detailing each action taken +- `/run/cloud-init`: contains logs about how cloud-init decided to enable or + disable itself, as well as what platforms/datasources were detected. These + logs are most useful when trying to determine what cloud-init ran or did not + run. + +Be aware that each time a system boots, new logs are appended to the files in +`/var/log`. Therefore, the files may have more than one boot worth of +information present. + +When reviewing these logs look for any errors or Python tracebacks to check +for any errors. + +Where are the configuration files? +================================== + +Cloud-init config is provided in two places: + +- `/etc/cloud/cloud.cfg` +- `/etc/cloud/cloud.cfg.d/*.cfg` + +These files can define the modules that run during instance initialization, +the datasources to evaluate on boot, and other settings. + +Where are the data files? +========================= + +Inside the `/var/lib/cloud/` directory there are two important subdirectories: + +instance +-------- + +The `/var/lib/cloud/instance` directory is a symbolic link that points +to the most recenlty used instance-id directory. This folder contains the +information cloud-init received from datasources, including vendor and user +data. This can be helpful to review to ensure the correct data was passed. + +It also contains the `datasource` file that containers the full information +about what datasource was identified and used to setup the system. + +Finally, the `boot-finished` file is the last thing that cloud-init does. + +data +---- + +The `/var/lib/cloud/data` directory contain information related to the +previous boot: + +* `instance-id`: id of the instance as discovered by cloud-init. Changing + this file has no effect. +* `result.json`: json file will show both the datasource used to setup + the instance, and if any errors occured +* `status.json`: json file shows the datasource used and a break down + of all four modules if any errors occured and the start and stop times. + +What datasource am I using? +=========================== + +To correctly setup an instance, cloud-init must correctly identify the +cloud that it is on. Therefore knowing what datasource is used on an +instance launch can help aid in debugging. + +To find what datasource is getting used run the `cloud-id` command: + +.. code-block:: shell-session + + $ cloud-id + nocloud + +If the cloud-id is not what is expected, then running the `ds-identify` +script in debug mode and providing that in a bug can help aid in resolving +any issues: + +.. code-block:: shell-session + + $ sudo DEBUG_LEVEL=2 DI_LOG=stderr /usr/lib/cloud-init/ds-identify --force + +The force parameter allows the command to be run again since the instance has +already launched. The other options increase the verbosity of logging and +put the logs to STDERR. + +How can I debug my user data? +============================= + +Two of the most common issues with user data, that also happens to be +cloud-config is: + +1. Incorrectly formatted YAML +2. First line does not contain `#cloud-config` + +To verify your YAML, we do have a short script called `validate-yaml.py`_ +that can validate your user data offline. + +.. _validate-yaml.py: https://github.com/canonical/cloud-init/blob/master/tools/validate-yaml.py + +Another option is to run the following on an instance when debugging: + +.. code-block:: shell-session + + $ sudo cloud-init query userdata > user-data.yaml + $ cloud-init devel schema -c user-data.yaml --annotate + +As launching instances in the cloud can cost money and take a bit longer, +sometimes it is easier to launch instances locally using Multipass or LXD: + +Multipass +--------- + +`Multipass`_ is a cross-platform tool to launch Ubuntu VMs across Linux, +Windows, and macOS. + +When a user launches a Multipass VM, user data can be passed by adding the +`--cloud-init` flag and the appropriate YAML file containing user data: + +.. code-block:: shell-session + + $ multipass launch bionic --name test-vm --cloud-init userdata.yaml + +Multipass will validate the YAML syntax of the cloud-config file before +attempting to start the VM! A nice addition to help save time when +experimenting with launching instances with various cloud-configs. + +Multipass only supports passing user-data and only as YAML cloud-config +files. Passing a script, a MIME archive, or any of the other user-data +formats cloud-init supports will result in an error from the YAML syntax +validator. + +.. _Multipass: https://multipass.run/ + +LXD +--- + +`LXD`_ offers a streamlined user experience for using linux system +containers. With LXD, a user can pass: + +* user data +* vendor data +* metadata +* network configuration + +The following initializes a container with user data: + +.. code-block:: shell-session + + $ lxc init ubuntu-daily:bionic test-container + $ lxc config set test-container user.user-data - < userdata.yaml + $ lxc start test-container + +To avoid the extra commands this can also be done at launch: + +.. code-block:: shell-session + + $ lxc launch ubuntu-daily:bionic test-container --config=user.user-data="$(cat userdata.yaml)" + +Finally, a profile can be setup with the specific data if a user needs to +launch this multiple times: + +.. code-block:: shell-session + + $ lxc profile create dev-user-data + $ lxc profile set dev-user-data user.user-data - < cloud-init-config.yaml + $ lxc launch ubuntu-daily:bionic test-container -p default -p dev-user-data + +The above examples all show how to pass user data. To pass other types of +configuration data use the config option specified below: + ++----------------+---------------------+ +| Data | Config Option | ++================+=====================+ +| user data | user.user-data | ++----------------+---------------------+ +| vendor data | user.vendor-data | ++----------------+---------------------+ +| metadata | user.meta-data | ++----------------+---------------------+ +| network config | user.network-config | ++----------------+---------------------+ + +See the LXD `Instance Configuration`_ docs for more info about configuration +values or the LXD `Custom Network Configuration`_ document for more about +custom network config. + +.. _LXD: https://linuxcontainers.org/ +.. _Instance Configuration: https://lxd.readthedocs.io/en/latest/instances/ +.. _Custom Network Configuration: https://lxd.readthedocs.io/en/latest/cloud-init/ + +Where can I learn more? +======================================== + +Below are some videos, blog posts, and white papers about cloud-init from a +variety of sources. + +- `Cloud Instance Initialization with cloud-init (Whitepaper)`_ +- `cloud-init Summit 2018`_ +- `cloud-init - The cross-cloud Magic Sauce (PDF)`_ +- `cloud-init Summit 2017`_ +- `cloud-init - Building clouds one Linux box at a time (Video)`_ +- `cloud-init - Building clouds one Linux box at a time (PDF)`_ +- `Metadata and cloud-init`_ +- `The beauty of cloud-init`_ +- `Introduction to cloud-init`_ + +.. _Cloud Instance Initialization with cloud-init (Whitepaper): https://ubuntu.com/blog/cloud-instance-initialisation-with-cloud-init +.. _cloud-init Summit 2018: https://powersj.io/post/cloud-init-summit18/ +.. _cloud-init - The cross-cloud Magic Sauce (PDF): https://events.linuxfoundation.org/wp-content/uploads/2017/12/cloud-init-The-cross-cloud-Magic-Sauce-Scott-Moser-Chad-Smith-Canonical.pdf +.. _cloud-init Summit 2017: https://powersj.io/post/cloud-init-summit17/ +.. _cloud-init - Building clouds one Linux box at a time (Video): https://www.youtube.com/watch?v=1joQfUZQcPg +.. _cloud-init - Building clouds one Linux box at a time (PDF): https://annex.debconf.org/debconf-share/debconf17/slides/164-cloud-init_Building_clouds_one_Linux_box_at_a_time.pdf +.. _Metadata and cloud-init: https://www.youtube.com/watch?v=RHVhIWifVqU +.. _The beauty of cloud-init: http://brandon.fuller.name/archives/2011/05/02/06.40.57/ +.. _Introduction to cloud-init: http://www.youtube.com/watch?v=-zL3BdbKyGY + +.. vi: textwidth=79 diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst index 15234d21..2b60bdd3 100644 --- a/doc/rtd/topics/format.rst +++ b/doc/rtd/topics/format.rst @@ -4,33 +4,36 @@ User-Data Formats ***************** -User data that will be acted upon by cloud-init must be in one of the following types. +User data that will be acted upon by cloud-init must be in one of the following +types. Gzip Compressed Content ======================= Content found to be gzip compressed will be uncompressed. -The uncompressed data will then be used as if it were not compressed. +The uncompressed data will then be used as if it were not compressed. This is typically useful because user-data is limited to ~16384 [#]_ bytes. Mime Multi Part Archive ======================= -This list of rules is applied to each part of this multi-part file. +This list of rules is applied to each part of this multi-part file. Using a mime-multi part file, the user can specify more than one type of data. -For example, both a user data script and a cloud-config type could be specified. +For example, both a user data script and a cloud-config type could be +specified. Supported content-types: -- text/x-include-once-url -- text/x-include-url -- text/cloud-config-archive -- text/upstart-job +- text/cloud-boothook - text/cloud-config +- text/cloud-config-archive +- text/jinja2 - text/part-handler +- text/upstart-job +- text/x-include-once-url +- text/x-include-url - text/x-shellscript -- text/cloud-boothook Helper script to generate mime messages --------------------------------------- @@ -38,16 +41,16 @@ Helper script to generate mime messages .. code-block:: python #!/usr/bin/python - + import sys - + from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText - + if len(sys.argv) == 1: print("%s input-file:type ..." % (sys.argv[0])) sys.exit(1) - + combined_message = MIMEMultipart() for i in sys.argv[1:]: (filename, format_type) = i.split(":", 1) @@ -56,7 +59,7 @@ Helper script to generate mime messages sub_message = MIMEText(contents, format_type, sys.getdefaultencoding()) sub_message.add_header('Content-Disposition', 'attachment; filename="%s"' % (filename)) combined_message.attach(sub_message) - + print(combined_message) @@ -65,7 +68,8 @@ User-Data Script Typically used by those who just want to execute a shell script. -Begins with: ``#!`` or ``Content-Type: text/x-shellscript`` when using a MIME archive. +Begins with: ``#!`` or ``Content-Type: text/x-shellscript`` when using a MIME +archive. .. note:: New in cloud-init v. 18.4: User-data scripts can also render cloud instance @@ -78,44 +82,48 @@ Example :: $ cat myscript.sh - + #!/bin/sh echo "Hello World. The time is now $(date -R)!" | tee /root/output.txt - - $ euca-run-instances --key mykey --user-data-file myscript.sh ami-a07d95c9 + + $ euca-run-instances --key mykey --user-data-file myscript.sh ami-a07d95c9 Include File ============ This content is a ``include`` file. -The file contains a list of urls, one per line. -Each of the URLs will be read, and their content will be passed through this same set of rules. -Ie, the content read from the URL can be gzipped, mime-multi-part, or plain text. -If an error occurs reading a file the remaining files will not be read. +The file contains a list of urls, one per line. Each of the URLs will be read, +and their content will be passed through this same set of rules. Ie, the +content read from the URL can be gzipped, mime-multi-part, or plain text. If +an error occurs reading a file the remaining files will not be read. -Begins with: ``#include`` or ``Content-Type: text/x-include-url`` when using a MIME archive. +Begins with: ``#include`` or ``Content-Type: text/x-include-url`` when using +a MIME archive. Cloud Config Data ================= -Cloud-config is the simplest way to accomplish some things -via user-data. Using cloud-config syntax, the user can specify certain things in a human friendly format. +Cloud-config is the simplest way to accomplish some things via user-data. Using +cloud-config syntax, the user can specify certain things in a human friendly +format. These things include: - apt upgrade should be run on first boot - a different apt mirror should be used - additional apt sources should be added -- certain ssh keys should be imported +- certain SSH keys should be imported - *and many more...* .. note:: This file must be valid yaml syntax. -See the :ref:`yaml_examples` section for a commented set of examples of supported cloud config formats. +See the :ref:`yaml_examples` section for a commented set of examples of +supported cloud config formats. -Begins with: ``#cloud-config`` or ``Content-Type: text/cloud-config`` when using a MIME archive. +Begins with: ``#cloud-config`` or ``Content-Type: text/cloud-config`` when +using a MIME archive. .. note:: New in cloud-init v. 18.4: Cloud config dta can also render cloud instance @@ -125,25 +133,41 @@ Begins with: ``#cloud-config`` or ``Content-Type: text/cloud-config`` when using Upstart Job =========== -Content is placed into a file in ``/etc/init``, and will be consumed by upstart as any other upstart job. +Content is placed into a file in ``/etc/init``, and will be consumed by upstart +as any other upstart job. -Begins with: ``#upstart-job`` or ``Content-Type: text/upstart-job`` when using a MIME archive. +Begins with: ``#upstart-job`` or ``Content-Type: text/upstart-job`` when using +a MIME archive. Cloud Boothook ============== -This content is ``boothook`` data. It is stored in a file under ``/var/lib/cloud`` and then executed immediately. -This is the earliest ``hook`` available. Note, that there is no mechanism provided for running only once. The boothook must take care of this itself. -It is provided with the instance id in the environment variable ``INSTANCE_ID``. This could be made use of to provide a 'once-per-instance' type of functionality. +This content is ``boothook`` data. It is stored in a file under +``/var/lib/cloud`` and then executed immediately. This is the earliest ``hook`` +available. Note, that there is no mechanism provided for running only once. The +boothook must take care of this itself. + +It is provided with the instance id in the environment variable +``INSTANCE_ID``. This could be made use of to provide a 'once-per-instance' +type of functionality. -Begins with: ``#cloud-boothook`` or ``Content-Type: text/cloud-boothook`` when using a MIME archive. +Begins with: ``#cloud-boothook`` or ``Content-Type: text/cloud-boothook`` when +using a MIME archive. Part Handler ============ -This is a ``part-handler``: It contains custom code for either supporting new mime-types in multi-part user data, or overriding the existing handlers for supported mime-types. It will be written to a file in ``/var/lib/cloud/data`` based on its filename (which is generated). -This must be python code that contains a ``list_types`` function and a ``handle_part`` function. -Once the section is read the ``list_types`` method will be called. It must return a list of mime-types that this part-handler handles. Because mime parts are processed in order, a ``part-handler`` part must precede any parts with mime-types it is expected to handle in the same user data. +This is a ``part-handler``: It contains custom code for either supporting new +mime-types in multi-part user data, or overriding the existing handlers for +supported mime-types. It will be written to a file in ``/var/lib/cloud/data`` +based on its filename (which is generated). + +This must be python code that contains a ``list_types`` function and a +``handle_part`` function. Once the section is read the ``list_types`` method +will be called. It must return a list of mime-types that this part-handler +handles. Because mime parts are processed in order, a ``part-handler`` part +must precede any parts with mime-types it is expected to handle in the same +user data. The ``handle_part`` function must be defined like: @@ -155,11 +179,13 @@ The ``handle_part`` function must be defined like: # filename = the filename of the part (or a generated filename if none is present in mime data) # payload = the parts' content -Cloud-init will then call the ``handle_part`` function once before it handles any parts, once per part received, and once after all parts have been handled. -The ``'__begin__'`` and ``'__end__'`` sentinels allow the part handler to do initialization or teardown before or after -receiving any parts. +Cloud-init will then call the ``handle_part`` function once before it handles +any parts, once per part received, and once after all parts have been handled. +The ``'__begin__'`` and ``'__end__'`` sentinels allow the part handler to do +initialization or teardown before or after receiving any parts. -Begins with: ``#part-handler`` or ``Content-Type: text/part-handler`` when using a MIME archive. +Begins with: ``#part-handler`` or ``Content-Type: text/part-handler`` when +using a MIME archive. Example ------- @@ -170,6 +196,22 @@ Example Also this `blog`_ post offers another example for more advanced usage. +Kernel Command Line +=================== + +When using the :ref:`datasource_nocloud` datasource, users can pass user data +via the kernel command line parameters. See the :ref:`datasource_nocloud` +datasource documentation for more details. + +Disabling User-Data +=================== + +Cloud-init can be configured to ignore any user-data provided to instance. +This allows custom images to prevent users from accidentally breaking closed +appliances. Setting ``allow_userdata: false`` in the configuration will disable +cloud-init from processing user-data. + .. [#] See your cloud provider for applicable user-data size limitations... .. _blog: http://foss-boss.blogspot.com/2011/01/advanced-cloud-init-custom-handlers.html + .. vi: textwidth=78 diff --git a/doc/rtd/topics/instancedata.rst b/doc/rtd/topics/instancedata.rst index 5d2dc948..e7dd0d62 100644 --- a/doc/rtd/topics/instancedata.rst +++ b/doc/rtd/topics/instancedata.rst @@ -4,7 +4,7 @@ Instance Metadata ***************** -What is a instance data? +What is instance data? ======================== Instance data is the collection of all configuration data that cloud-init @@ -90,47 +90,107 @@ There are three basic top-level keys: The standardized keys present: -+----------------------+-----------------------------------------------+-----------------------------------+ -| Key path | Description | Examples | -+======================+===============================================+===================================+ -| v1._beta_keys | List of standardized keys still in 'beta'. | [subplatform] | -| | The format, intent or presence of these keys | | -| | can change. Do not consider them | | -| | production-ready. | | -+----------------------+-----------------------------------------------+-----------------------------------+ -| v1.cloud_name | Where possible this will indicate the 'name' | aws, openstack, azure, | -| | of the cloud this system is running on. This | configdrive, nocloud, | -| | is specifically different than the 'platform' | ovf, etc. | -| | below. As an example, the name of Amazon Web | | -| | Services is 'aws' while the platform is 'ec2'.| | -| | | | -| | If no specific name is determinable or | | -| | provided in meta-data, then this field may | | -| | contain the same content as 'platform'. | | -+----------------------+-----------------------------------------------+-----------------------------------+ -| v1.instance_id | Unique instance_id allocated by the cloud | i-<somehash> | -+----------------------+-----------------------------------------------+-----------------------------------+ -| v1.local_hostname | The internal or local hostname of the system | ip-10-41-41-70, | -| | | <user-provided-hostname> | -+----------------------+-----------------------------------------------+-----------------------------------+ -| v1.platform | An attempt to identify the cloud platform | ec2, openstack, lxd, gce | -| | instance that the system is running on. | nocloud, ovf | -+----------------------+-----------------------------------------------+-----------------------------------+ -| v1.subplatform | Additional platform details describing the | metadata (http://168.254.169.254),| -| | specific source or type of metadata used. | seed-dir (/path/to/seed-dir/), | -| | The format of subplatform will be: | config-disk (/dev/cd0), | -| | <subplatform_type> (<url_file_or_dev_path>) | configdrive (/dev/sr0) | -+----------------------+-----------------------------------------------+-----------------------------------+ -| v1.public_ssh_keys | A list of ssh keys provided to the instance | ['ssh-rsa AA...', ...] | -| | by the datasource metadata. | | -+----------------------+-----------------------------------------------+-----------------------------------+ -| v1.region | The physical region/datacenter in which the | us-east-2 | -| | instance is deployed | | -+----------------------+-----------------------------------------------+-----------------------------------+ -| v1.availability_zone | The physical availability zone in which the | us-east-2b, nova, null | -| | instance is deployed | | -+----------------------+-----------------------------------------------+-----------------------------------+ +v1._beta_keys +------------- +List of standardized keys still in 'beta'. The format, intent or presence of +these keys can change. Do not consider them production-ready. +Example output: + +- [subplatform] + +v1.cloud_name +------------- +Where possible this will indicate the 'name' of the cloud the system is running +on. This is different than the 'platform' item. For example, the cloud name of +Amazone Web Services is 'aws', while the platform is 'ec2'. + +If determining a specific name is not possible or provided in meta-data, then +this filed may contain the same content as 'platform'. + +Example output: + +- aws +- openstack +- azure +- configdrive +- nocloud +- ovf + + +v1.instance_id +-------------- +Unique instance_id allocated by the cloud. + +Examples output: + +- i-<hash> + +v1.local_hostname +----------------- +The internal or local hostname of the system. + +Examples output: + +- ip-10-41-41-70 +- <user-provided-hostname> + +v1.platform +------------- +An attempt to identify the cloud platfrom instance that the system is running +on. + +Examples output: + +- ec2 +- openstack +- lxd +- gce +- nocloud +- ovf + +v1.subplatform +-------------- +Additional platform details describing the specific source or type of metadata +used. The format of subplatform will be: + +``<subplatform_type> (<url_file_or_dev_path>`` + +Examples output: + +- metadata (http://168.254.169.254) +- seed-dir (/path/to/seed-dir/) +- config-disk (/dev/cd0) +- configdrive (/dev/sr0) + +v1.public_ssh_keys +------------------ +A list of SSH keys provided to the instance by the datasource metadata. + +Examples output: + +- ['ssh-rsa AA...', ...] + +v1.region +--------- +The physical region/data center in which the instance is deployed. + +Examples output: + +- us-east-2 + +v1.availability_zone +-------------------- +The physical availability zone in which the instance is deployed. + +Examples output: + +- us-east-2b +- nova +- null + +Example Output +-------------- Below is an example of ``/run/cloud-init/instance_data.json`` on an EC2 instance: @@ -229,28 +289,28 @@ instance: "network": { "interfaces": { "macs": { - "06:74:8f:39:cd:a6": { - "device-number": "0", - "interface-id": "eni-052058bbd7831eaae", - "ipv4-associations": { - "18.218.221.122": "10.41.41.95" - }, - "local-hostname": "ip-10-41-41-95.us-east-2.compute.internal", - "local-ipv4s": "10.41.41.95", - "mac": "06:74:8f:39:cd:a6", - "owner-id": "437526006925", - "public-hostname": "ec2-18-218-221-122.us-east-2.compute.amazonaws.com", - "public-ipv4s": "18.218.221.122", - "security-group-ids": "sg-828247e9", - "security-groups": "Cloud-init integration test secgroup", - "subnet-id": "subnet-282f3053", - "subnet-ipv4-cidr-block": "10.41.41.0/24", - "subnet-ipv6-cidr-blocks": "2600:1f16:b80:ad00::/64", - "vpc-id": "vpc-252ef24d", - "vpc-ipv4-cidr-block": "10.41.0.0/16", - "vpc-ipv4-cidr-blocks": "10.41.0.0/16", - "vpc-ipv6-cidr-blocks": "2600:1f16:b80:ad00::/56" - } + "06:74:8f:39:cd:a6": { + "device-number": "0", + "interface-id": "eni-052058bbd7831eaae", + "ipv4-associations": { + "18.218.221.122": "10.41.41.95" + }, + "local-hostname": "ip-10-41-41-95.us-east-2.compute.internal", + "local-ipv4s": "10.41.41.95", + "mac": "06:74:8f:39:cd:a6", + "owner-id": "437526006925", + "public-hostname": "ec2-18-218-221-122.us-east-2.compute.amazonaws.com", + "public-ipv4s": "18.218.221.122", + "security-group-ids": "sg-828247e9", + "security-groups": "Cloud-init integration test secgroup", + "subnet-id": "subnet-282f3053", + "subnet-ipv4-cidr-block": "10.41.41.0/24", + "subnet-ipv6-cidr-blocks": "2600:1f16:b80:ad00::/64", + "vpc-id": "vpc-252ef24d", + "vpc-ipv4-cidr-block": "10.41.0.0/16", + "vpc-ipv4-cidr-blocks": "10.41.0.0/16", + "vpc-ipv6-cidr-blocks": "2600:1f16:b80:ad00::/56" + } } } }, @@ -319,16 +379,16 @@ user. Below are some examples of providing these types of user-data: -* Cloud config calling home with the ec2 public hostname and avaliability-zone +* Cloud config calling home with the ec2 public hostname and availability-zone -.. code-block:: shell-session +.. code-block:: yaml ## template: jinja #cloud-config runcmd: - echo 'EC2 public hostname allocated to instance: {{ ds.meta_data.public_hostname }}' > /tmp/instance_metadata - - echo 'EC2 avaiability zone: {{ v1.availability_zone }}' >> + - echo 'EC2 availability zone: {{ v1.availability_zone }}' >> /tmp/instance_metadata - curl -X POST -d '{"hostname": "{{ds.meta_data.public_hostname }}", "availability-zone": "{{ v1.availability_zone }}"}' @@ -336,7 +396,7 @@ Below are some examples of providing these types of user-data: * Custom user-data script performing different operations based on region -.. code-block:: shell-session +.. code-block:: jinja ## template: jinja #!/bin/bash @@ -352,7 +412,7 @@ Below are some examples of providing these types of user-data: and the following string in your rendered user-data: ``CI_MISSING_JINJA_VAR/<your_varname>``. -Cloud-init also surfaces a commandline tool **cloud-init query** which can +Cloud-init also surfaces a command line tool **cloud-init query** which can assist developers or scripts with obtaining instance metadata easily. See :ref:`cli_query` for more information. diff --git a/doc/rtd/topics/merging.rst b/doc/rtd/topics/merging.rst index c75ca59c..2b5e5dad 100644 --- a/doc/rtd/topics/merging.rst +++ b/doc/rtd/topics/merging.rst @@ -21,12 +21,12 @@ For example. .. code-block:: yaml #cloud-config (1) - run_cmd: + runcmd: - bash1 - bash2 #cloud-config (2) - run_cmd: + runcmd: - bash3 - bash4 @@ -36,7 +36,7 @@ cloud-config object that contains the following. .. code-block:: yaml #cloud-config (merged) - run_cmd: + runcmd: - bash3 - bash4 @@ -45,7 +45,7 @@ Typically this is not what users want; instead they would likely prefer: .. code-block:: yaml #cloud-config (merged) - run_cmd: + runcmd: - bash1 - bash2 - bash3 @@ -55,6 +55,51 @@ This way makes it easier to combine the various cloud-config objects you have into a more useful list, thus reducing duplication necessary to accomplish the same result with the previous method. + +Built-in Mergers +================ + +Cloud-init provides merging for the following built-in types: + +- Dict +- List +- String + +The ``Dict`` merger has the following options which control what is done with +values contained within the config. + +- ``allow_delete``: Existing values not present in the new value can be + deleted, defaults to False +- ``no_replace``: Do not replace an existing value if one is already present, + enabled by default. +- ``replace``: Overwrite existing values with new ones. + +The ``List`` merger has the following options which control what is done with +the values contained within the config. + +- ``append``: Add new value to the end of the list, defaults to False. +- ``prepend``: Add new values to the start of the list, defaults to False. +- ``no_replace``: Do not replace an existing value if one is already present, + enabled by default. +- ``replace``: Overwrite existing values with new ones. + +The ``Str`` merger has the following options which control what is done with +the values contained within the config. + +- ``append``: Add new value to the end of the string, defaults to False. + +Common options for all merge types which control how recursive merging is +done on other types. + +- ``recurse_dict``: If True merge the new values of the dictionary, defaults to + True. +- ``recurse_list``: If True merge the new values of the list, defaults to + False. +- ``recurse_array``: Alias for ``recurse_list``. +- ``recurse_str``: If True merge the new values of the string, defaults to + False. + + Customizability =============== @@ -164,8 +209,8 @@ string format (i.e. the second option above), for example: .. code-block:: python - {'merge_how': [{'name': 'list', 'settings': ['extend']}, - {'name': 'dict', 'settings': []}, + {'merge_how': [{'name': 'list', 'settings': ['append']}, + {'name': 'dict', 'settings': ['no_replace', 'recurse_list']}, {'name': 'str', 'settings': ['append']}]} This would be the equivalent format for default string format but in dictionary @@ -201,4 +246,43 @@ Note, however, that merge algorithms are not used *across* types of configuration. As was the case before merging was implemented, user-data will overwrite conf.d configuration without merging. +Example cloud-config +==================== + +A common request is to include multiple ``runcmd`` directives in different +files and merge all of the commands together. To achieve this, we must modify +the default merging to allow for dictionaries to join list values. + + +The first config + +.. code-block:: yaml + + #cloud-config + merge_how: + - name: list + settings: [append] + - name: dict + settings: [no_replace, recurse_list] + + runcmd: + - bash1 + - bash2 + +The second config + +.. code-block:: yaml + + #cloud-config + merge_how: + - name: list + settings: [append] + - name: dict + settings: [no_replace, recurse_list] + + runcmd: + - bash3 + - bash4 + + .. vi: textwidth=78 diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst index d9720f6a..9c9be804 100644 --- a/doc/rtd/topics/modules.rst +++ b/doc/rtd/topics/modules.rst @@ -1,8 +1,11 @@ .. _modules: + ******* Modules ******* +.. contents:: Table of Contents + .. automodule:: cloudinit.config.cc_apt_configure .. automodule:: cloudinit.config.cc_apt_pipelining .. automodule:: cloudinit.config.cc_bootcmd @@ -46,14 +49,13 @@ Modules .. automodule:: cloudinit.config.cc_set_hostname .. automodule:: cloudinit.config.cc_set_passwords .. automodule:: cloudinit.config.cc_snap -.. automodule:: cloudinit.config.cc_snappy -.. automodule:: cloudinit.config.cc_snap_config .. automodule:: cloudinit.config.cc_spacewalk .. automodule:: cloudinit.config.cc_ssh .. automodule:: cloudinit.config.cc_ssh_authkey_fingerprints .. automodule:: cloudinit.config.cc_ssh_import_id .. automodule:: cloudinit.config.cc_timezone .. automodule:: cloudinit.config.cc_ubuntu_advantage +.. automodule:: cloudinit.config.cc_ubuntu_drivers .. automodule:: cloudinit.config.cc_update_etc_hosts .. automodule:: cloudinit.config.cc_update_hostname .. automodule:: cloudinit.config.cc_users_groups diff --git a/doc/rtd/topics/moreinfo.rst b/doc/rtd/topics/moreinfo.rst deleted file mode 100644 index 9c3b7fba..00000000 --- a/doc/rtd/topics/moreinfo.rst +++ /dev/null @@ -1,13 +0,0 @@ -**************** -More information -**************** - -Useful external references -========================== - -- `The beauty of cloudinit`_ -- `Introduction to cloud-init`_ (video) - -.. _Introduction to cloud-init: http://www.youtube.com/watch?v=-zL3BdbKyGY -.. _The beauty of cloudinit: http://brandon.fuller.name/archives/2011/05/02/06.40.57/ -.. vi: textwidth=78 diff --git a/doc/rtd/topics/network-config-format-v2.rst b/doc/rtd/topics/network-config-format-v2.rst index ea370ef5..7f857550 100644 --- a/doc/rtd/topics/network-config-format-v2.rst +++ b/doc/rtd/topics/network-config-format-v2.rst @@ -14,7 +14,7 @@ it must include ``version: 2`` and one or more of possible device Cloud-init will read this format from system config. For example the following could be present in -``/etc/cloud/cloud.cfg.d/custom-networking.cfg``: +``/etc/cloud/cloud.cfg.d/custom-networking.cfg``:: network: version: 2 @@ -54,11 +54,11 @@ Physical devices : (Examples: ethernet, wifi) These can dynamically come and go between reboots and even during runtime (hotplugging). In the generic case, they - can be selected by ``match:`` rules on desired properties, such as name/name - pattern, MAC address, driver, or device paths. In general these will match - any number of devices (unless they refer to properties which are unique - such as the full path or MAC address), so without further knowledge about - the hardware these will always be considered as a group. + can be selected by ``match:`` rules on desired properties, such as + name/name pattern, MAC address, driver, or device paths. In general these + will match any number of devices (unless they refer to properties which are + unique such as the full path or MAC address), so without further knowledge + about the hardware these will always be considered as a group. It is valid to specify no match rules at all, in which case the ID field is simply the interface name to be matched. This is mostly useful if you want @@ -228,8 +228,8 @@ Example: :: **parameters**: *<(mapping)>* -Customization parameters for special bonding options. Time values are specified -in seconds unless otherwise specified. +Customization parameters for special bonding options. Time values are +specified in seconds unless otherwise specified. **mode**: *<(scalar)>* @@ -367,8 +367,8 @@ Example: :: **parameters**: <*(mapping)>* -Customization parameters for special bridging options. Time values are specified -in seconds unless otherwise specified. +Customization parameters for special bridging options. Time values are +specified in seconds unless otherwise specified. **ageing-time**: <*(scalar)>* diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst index 1e994551..1520ba9a 100644 --- a/doc/rtd/topics/network-config.rst +++ b/doc/rtd/topics/network-config.rst @@ -163,10 +163,11 @@ found in Ubuntu and Debian. - **Netplan** -Since Ubuntu 16.10, codename Yakkety, the ``netplan`` project has been an -optional network configuration tool which consumes :ref:`network_config_v2` -input and renders network configuration for supported backends such as -``systemd-networkd`` and ``NetworkManager``. +Introduced in Ubuntu 16.10 (Yakkety Yak), `netplan <https://netplan.io/>`_ has +been the default network configuration tool in Ubuntu since 17.10 (Artful +Aardvark). netplan consumes :ref:`network_config_v2` input and renders +network configuration for supported backends such as ``systemd-networkd`` and +``NetworkManager``. - **Sysconfig** @@ -190,7 +191,7 @@ supplying an updated configuration in cloud-config. :: system_info: network: - renderers: ['netplan', 'eni', 'sysconfig'] + renderers: ['netplan', 'eni', 'sysconfig', 'freebsd'] Network Configuration Tools diff --git a/doc/rtd/topics/security.rst b/doc/rtd/topics/security.rst new file mode 100644 index 00000000..b8386843 --- /dev/null +++ b/doc/rtd/topics/security.rst @@ -0,0 +1,5 @@ +.. _security: + +.. mdinclude:: ../../../SECURITY.md + +.. vi: textwidth=78 diff --git a/doc/rtd/topics/tests.rst b/doc/rtd/topics/tests.rst index b83bd899..aee3d7fc 100644 --- a/doc/rtd/topics/tests.rst +++ b/doc/rtd/topics/tests.rst @@ -23,7 +23,7 @@ configuration users can run the integration tests via tox: .. code-block:: shell-session - $ git clone https://git.launchpad.net/cloud-init + $ git clone https://github.com/canonical/cloud-init $ cd cloud-init $ tox -e citest -- -h @@ -53,7 +53,7 @@ explaining how to run one or the other independently. .. code-block:: shell-session - $ git clone https://git.launchpad.net/cloud-init + $ git clone https://github.com/canonical/cloud-init $ cd cloud-init $ tox -e citest -- run --verbose \ --os-name stretch --os-name xenial \ @@ -423,6 +423,57 @@ generated when running ``aws configure``: region = us-west-2 +Azure Cloud +----------- + +To run on Azure Cloud platform users login with Service Principal and export +credentials file. Region is defaulted and can be set in +``tests/cloud_tests/platforms.yaml``. The Service Principal credentials are +the standard authentication for Azure SDK to interact with Azure Services: + +Create Service Principal account or login + +.. code-block:: shell-session + + $ az ad sp create-for-rbac --name "APP_ID" --password "STRONG-SECRET-PASSWORD" + +.. code-block:: shell-session + + $ az login --service-principal --username "APP_ID" --password "STRONG-SECRET-PASSWORD" + +Export credentials + +.. code-block:: shell-session + + $ az ad sp create-for-rbac --sdk-auth > $HOME/.azure/credentials.json + +.. code-block:: json + + { + "clientId": "<Service principal ID>", + "clientSecret": "<Service principal secret/password>", + "subscriptionId": "<Subscription associated with the service principal>", + "tenantId": "<The service principal's tenant>", + "activeDirectoryEndpointUrl": "https://login.microsoftonline.com", + "resourceManagerEndpointUrl": "https://management.azure.com/", + "activeDirectoryGraphResourceId": "https://graph.windows.net/", + "sqlManagementEndpointUrl": "https://management.core.windows.net:8443/", + "galleryEndpointUrl": "https://gallery.azure.com/", + "managementEndpointUrl": "https://management.core.windows.net/" + } + +Set region in platforms.yaml + +.. code-block:: yaml + + azurecloud: + enabled: true + region: West US 2 + vm_size: Standard_DS1_v2 + storage_sku: standard_lrs + tag: ci + + Architecture ============ diff --git a/integration-requirements.txt b/integration-requirements.txt index b10ff964..897d6110 100644 --- a/integration-requirements.txt +++ b/integration-requirements.txt @@ -11,6 +11,8 @@ boto3==1.5.9 # ssh communication paramiko==2.4.2 +cryptography==2.4.2 + # lxd backend # 04/03/2018: enables use of lxd 3.0 @@ -18,3 +20,12 @@ git+https://github.com/lxc/pylxd.git@4b8ab1802f9aee4eb29cf7b119dae0aa47150779 # finds latest image information git+https://git.launchpad.net/simplestreams + +# azure backend +azure-storage==0.36.0 +msrestazure==0.6.1 +azure-common==1.1.23 +azure-mgmt-compute==7.0.0 +azure-mgmt-network==5.0.0 +azure-mgmt-resource==4.0.0 +azure-mgmt-storage==6.0.0 diff --git a/packages/bddeb b/packages/bddeb index 95602a02..209765a5 100755 --- a/packages/bddeb +++ b/packages/bddeb @@ -177,6 +177,11 @@ def main(): # output like 0.7.6-1022-g36e92d3 ver_data = read_version() + if ver_data['is_release_branch_ci']: + # If we're performing CI for a new release branch, we don't yet + # have the tag required to generate version_long; use version + # instead. + ver_data['version_long'] = ver_data['version'] # This is really only a temporary archive # since we will extract it then add in the debian @@ -192,7 +197,9 @@ def main(): break if path is None: print("Creating a temp tarball using the 'make-tarball' helper") - run_helper('make-tarball', ['--long', '--output=' + tarball_fp]) + run_helper('make-tarball', + ['--version', ver_data['version_long'], + '--output=' + tarball_fp]) print("Extracting temporary tarball %r" % (tarball)) cmd = ['tar', '-xvzf', tarball_fp, '-C', tdir] diff --git a/packages/brpm b/packages/brpm index a154ef29..4004fd0e 100755 --- a/packages/brpm +++ b/packages/brpm @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 import argparse import glob diff --git a/packages/debian/manpages b/packages/debian/manpages new file mode 100644 index 00000000..605cfd67 --- /dev/null +++ b/packages/debian/manpages @@ -0,0 +1,3 @@ +doc/man/cloud-id.1 +doc/man/cloud-init-per.1 +doc/man/cloud-init.1 diff --git a/packages/pkg-deps.json b/packages/pkg-deps.json index 72409dd8..cf065219 100644 --- a/packages/pkg-deps.json +++ b/packages/pkg-deps.json @@ -29,27 +29,27 @@ ], "renames" : { "jinja2" : { - "3" : "python34-jinja2" + "3" : "python36-jinja2" }, "jsonschema" : { - "3" : "python34-jsonschema" + "3" : "python36-jsonschema" }, "pyflakes" : { "2" : "pyflakes", - "3" : "python34-pyflakes" + "3" : "python36-pyflakes" }, "pyyaml" : { "2" : "PyYAML", - "3" : "python34-PyYAML" + "3" : "python36-PyYAML" }, "pyserial" : { "2" : "pyserial" }, "requests" : { - "3" : "python34-requests" + "3" : "python36-requests" }, "six" : { - "3" : "python34-six" + "3" : "python36-six" } }, "requires" : [ diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in index 6b2022ba..057a5784 100644 --- a/packages/redhat/cloud-init.spec.in +++ b/packages/redhat/cloud-init.spec.in @@ -205,7 +205,9 @@ fi %dir %{_sysconfdir}/cloud/templates %config(noreplace) %{_sysconfdir}/cloud/templates/* %config(noreplace) %{_sysconfdir}/rsyslog.d/21-cloudinit.conf -%{_sysconfdir}/bash_completion.d/cloud-init + +# Bash completion script +%{_datadir}/bash-completion/completions/cloud-init %{_libexecdir}/%{name} %dir %{_sharedstatedir}/cloud diff --git a/packages/suse/cloud-init.spec.in b/packages/suse/cloud-init.spec.in index 26894b34..004b875f 100644 --- a/packages/suse/cloud-init.spec.in +++ b/packages/suse/cloud-init.spec.in @@ -120,7 +120,9 @@ version_pys=$(cd "%{buildroot}" && find . -name version.py -type f) %config(noreplace) %{_sysconfdir}/cloud/cloud.cfg.d/README %dir %{_sysconfdir}/cloud/templates %config(noreplace) %{_sysconfdir}/cloud/templates/* -%{_sysconfdir}/bash_completion.d/cloud-init + +# Bash completion script +%{_datadir}/bash-completion/completions/cloud-init %{_sysconfdir}/dhcp/dhclient-exit-hooks.d/hook-dhclient %{_sysconfdir}/NetworkManager/dispatcher.d/hook-network-manager @@ -30,6 +30,8 @@ VARIANT = None def is_f(p): return os.path.isfile(p) +def is_generator(p): + return '-generator' in p def tiny_p(cmd, capture=True): # Darn python 2.6 doesn't have check_output (argggg) @@ -90,7 +92,7 @@ def read_requires(): return str(deps).splitlines() -def render_tmpl(template): +def render_tmpl(template, mode=None): """render template into a tmpdir under same dir as setup.py This is rendered to a temporary directory under the top level @@ -119,6 +121,8 @@ def render_tmpl(template): VARIANT, template, fpath]) else: tiny_p([sys.executable, './tools/render-cloudcfg', template, fpath]) + if mode: + os.chmod(fpath, mode) # return path relative to setup.py return os.path.join(os.path.basename(tmpd), bname) @@ -138,8 +142,11 @@ INITSYS_FILES = { 'systemd': [render_tmpl(f) for f in (glob('systemd/*.tmpl') + glob('systemd/*.service') + - glob('systemd/*.target')) if is_f(f)], - 'systemd.generators': [f for f in glob('systemd/*-generator') if is_f(f)], + glob('systemd/*.target')) + if (is_f(f) and not is_generator(f))], + 'systemd.generators': [ + render_tmpl(f, mode=0o755) + for f in glob('systemd/*') if is_f(f) and is_generator(f)], 'upstart': [f for f in glob('upstart/*') if is_f(f)], } INITSYS_ROOTS = { @@ -167,6 +174,19 @@ if os.uname()[0] == 'FreeBSD': USR_LIB_EXEC = "usr/local/lib" elif os.path.isfile('/etc/redhat-release'): USR_LIB_EXEC = "usr/libexec" +elif os.path.isfile('/etc/system-release-cpe'): + with open('/etc/system-release-cpe') as f: + cpe_data = f.read().rstrip().split(':') + + if cpe_data[1] == "\o": + # URI formated CPE + inc = 0 + else: + # String formated CPE + inc = 1 + (cpe_vendor, cpe_product, cpe_version) = cpe_data[2+inc:5+inc] + if cpe_vendor == "amazon": + USR_LIB_EXEC = "usr/libexec" class MyEggInfo(egg_info): @@ -238,13 +258,14 @@ if not in_virtualenv(): INITSYS_ROOTS[k] = "/" + INITSYS_ROOTS[k] data_files = [ - (ETC + '/bash_completion.d', ['bash_completion/cloud-init']), (ETC + '/cloud', [render_tmpl("config/cloud.cfg.tmpl")]), (ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')), (ETC + '/cloud/templates', glob('templates/*')), (USR_LIB_EXEC + '/cloud-init', ['tools/ds-identify', 'tools/uncloud-init', 'tools/write-ssh-key-fingerprints']), + (USR + '/share/bash-completion/completions', + ['bash_completion/cloud-init']), (USR + '/share/doc/cloud-init', [f for f in glob('doc/*') if is_f(f)]), (USR + '/share/doc/cloud-init/examples', [f for f in glob('doc/examples/*') if is_f(f)]), diff --git a/systemd/cloud-final.service.tmpl b/systemd/cloud-final.service.tmpl index e2b91255..8207b18c 100644 --- a/systemd/cloud-final.service.tmpl +++ b/systemd/cloud-final.service.tmpl @@ -15,6 +15,7 @@ ExecStart=/usr/bin/cloud-init modules --mode=final RemainAfterExit=yes TimeoutSec=0 KillMode=process +TasksMax=infinity # Output needs to appear in instance console output StandardOutput=journal+console diff --git a/systemd/cloud-init-generator b/systemd/cloud-init-generator.tmpl index bd9f2678..45efa243 100755 --- a/systemd/cloud-init-generator +++ b/systemd/cloud-init-generator.tmpl @@ -1,3 +1,4 @@ +## template:jinja #!/bin/sh set -f @@ -9,7 +10,11 @@ DISABLE="disabled" FOUND="found" NOTFOUND="notfound" RUN_ENABLED_FILE="$LOG_D/$ENABLE" +{% if variant in ["suse"] %} +CLOUD_SYSTEM_TARGET="/usr/lib/systemd/system/cloud-init.target" +{% else %} CLOUD_SYSTEM_TARGET="/lib/systemd/system/cloud-init.target" +{% endif %} CLOUD_TARGET_NAME="cloud-init.target" # lxc sets 'container', but lets make that explicitly a global CONTAINER="${container}" @@ -77,7 +82,12 @@ default() { } check_for_datasource() { - local ds_rc="" dsidentify="/usr/lib/cloud-init/ds-identify" + local ds_rc="" +{% if variant in ["redhat", "fedora", "centos"] %} + local dsidentify="/usr/libexec/cloud-init/ds-identify" +{% else %} + local dsidentify="/usr/lib/cloud-init/ds-identify" +{% endif %} if [ ! -x "$dsidentify" ]; then debug 1 "no ds-identify in $dsidentify. _RET=$FOUND" return 0 diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl index d8dc393e..9ad3574c 100644 --- a/systemd/cloud-init.service.tmpl +++ b/systemd/cloud-init.service.tmpl @@ -3,6 +3,8 @@ Description=Initial cloud-init job (metadata service crawler) DefaultDependencies=no Wants=cloud-init-local.service +Wants=sshd-keygen.service +Wants=sshd.service After=cloud-init-local.service After=systemd-networkd-wait-online.service {% if variant in ["ubuntu", "unknown", "debian"] %} @@ -10,14 +12,17 @@ After=networking.service {% endif %} {% if variant in ["centos", "fedora", "redhat"] %} After=network.service +After=NetworkManager.service {% endif %} {% if variant in ["suse"] %} -Before=wicked.service +After=wicked.service # setting hostname via hostnamectl depends on dbus, which otherwise # would not be guaranteed at this point. After=dbus.service {% endif %} Before=network-online.target +Before=sshd-keygen.service +Before=sshd.service {% if variant in ["ubuntu", "unknown", "debian"] %} Before=sysinit.target Conflicts=shutdown.target diff --git a/sysvinit/freebsd/cloudconfig b/sysvinit/freebsd/cloudconfig index e4064fa3..fb604f4d 100755 --- a/sysvinit/freebsd/cloudconfig +++ b/sysvinit/freebsd/cloudconfig @@ -22,4 +22,7 @@ cloudconfig_start() } load_rc_config $name + +: ${cloudconfig_enable="NO"} + run_rc_command "$1" diff --git a/sysvinit/freebsd/cloudfinal b/sysvinit/freebsd/cloudfinal index b6894c39..72047653 100755 --- a/sysvinit/freebsd/cloudfinal +++ b/sysvinit/freebsd/cloudfinal @@ -22,4 +22,7 @@ cloudfinal_start() } load_rc_config $name + +: ${cloudfinal_enable="NO"} + run_rc_command "$1" diff --git a/sysvinit/freebsd/cloudinit b/sysvinit/freebsd/cloudinit index 33263009..aa5bd118 100755 --- a/sysvinit/freebsd/cloudinit +++ b/sysvinit/freebsd/cloudinit @@ -1,7 +1,7 @@ #!/bin/sh # PROVIDE: cloudinit -# REQUIRE: FILESYSTEMS NETWORKING cloudinitlocal +# REQUIRE: FILESYSTEMS NETWORKING cloudinitlocal ldconfig devd # BEFORE: cloudconfig cloudfinal . /etc/rc.subr @@ -22,4 +22,7 @@ cloudinit_start() } load_rc_config $name + +: ${cloudinit_enable="NO"} + run_rc_command "$1" diff --git a/sysvinit/freebsd/cloudinitlocal b/sysvinit/freebsd/cloudinitlocal index 7a034b3b..cb67b4a2 100755 --- a/sysvinit/freebsd/cloudinitlocal +++ b/sysvinit/freebsd/cloudinitlocal @@ -22,4 +22,7 @@ cloudlocal_start() } load_rc_config $name + +: ${cloudinitlocal_enable="NO"} + run_rc_command "$1" diff --git a/templates/chef_client.rb.tmpl b/templates/chef_client.rb.tmpl index cbb6b15f..99978d3b 100644 --- a/templates/chef_client.rb.tmpl +++ b/templates/chef_client.rb.tmpl @@ -1,6 +1,6 @@ ## template:jinja {# -This file is only utilized if the module 'cc_chef' is enabled in +This file is only utilized if the module 'cc_chef' is enabled in cloud-config. Specifically, in order to enable it you need to add the following to config: chef: @@ -56,3 +56,6 @@ pid_file "{{pid_file}}" {% if show_time %} Chef::Log::Formatter.show_time = true {% endif %} +{% if encrypted_data_bag_secret %} +encrypted_data_bag_secret "{{encrypted_data_bag_secret}}" +{% endif %} diff --git a/templates/ntp.conf.debian.tmpl b/templates/ntp.conf.debian.tmpl index 3f07eeaa..affe983d 100644 --- a/templates/ntp.conf.debian.tmpl +++ b/templates/ntp.conf.debian.tmpl @@ -19,7 +19,8 @@ filegen clockstats file clockstats type day enable # pool.ntp.org maps to about 1000 low-stratum NTP servers. Your server will # pick a different set every time it starts up. Please consider joining the # pool: <http://www.pool.ntp.org/join.html> -{% if pools -%}# pools{% endif %} +{% if pools %}# pools +{% endif %} {% for pool in pools -%} pool {{pool}} iburst {% endfor %} diff --git a/test-requirements.txt b/test-requirements.txt index d9d41b57..6fb22b24 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,6 +1,5 @@ # Needed generally in tests httpretty>=0.7.1 -mock nose unittest2 coverage diff --git a/tests/cloud_tests/__init__.py b/tests/cloud_tests/__init__.py index dd436989..6c632f99 100644 --- a/tests/cloud_tests/__init__.py +++ b/tests/cloud_tests/__init__.py @@ -22,7 +22,8 @@ def _initialize_logging(): logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) formatter = logging.Formatter( - '%(asctime)s - %(name)s - %(levelname)s - %(message)s') + '%(asctime)s - %(pathname)s:%(funcName)s:%(lineno)s ' + '[%(levelname)s]: %(message)s') console = logging.StreamHandler() console.setLevel(logging.DEBUG) diff --git a/tests/cloud_tests/config.py b/tests/cloud_tests/config.py index 8bd569fd..06536edc 100644 --- a/tests/cloud_tests/config.py +++ b/tests/cloud_tests/config.py @@ -114,7 +114,7 @@ def load_os_config(platform_name, os_name, require_enabled=False, feature_conf = main_conf['features'] feature_groups = conf.get('feature_groups', []) overrides = merge_config(get(conf, 'features'), feature_overrides) - conf['arch'] = c_util.get_architecture() + conf['arch'] = c_util.get_dpkg_architecture() conf['features'] = merge_feature_groups( feature_conf, feature_groups, overrides) diff --git a/tests/cloud_tests/platforms.yaml b/tests/cloud_tests/platforms.yaml index 448aa98d..eaaa0a71 100644 --- a/tests/cloud_tests/platforms.yaml +++ b/tests/cloud_tests/platforms.yaml @@ -66,5 +66,12 @@ platforms: {{ config_get("user.vendor-data", properties.default) }} nocloud-kvm: enabled: true + cache_mode: cache=none,aio=native + azurecloud: + enabled: true + region: West US 2 + vm_size: Standard_DS1_v2 + storage_sku: standard_lrs + tag: ci # vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/__init__.py b/tests/cloud_tests/platforms/__init__.py index a01e51ac..6a410b84 100644 --- a/tests/cloud_tests/platforms/__init__.py +++ b/tests/cloud_tests/platforms/__init__.py @@ -5,11 +5,13 @@ from .ec2 import platform as ec2 from .lxd import platform as lxd from .nocloudkvm import platform as nocloudkvm +from .azurecloud import platform as azurecloud PLATFORMS = { 'ec2': ec2.EC2Platform, 'nocloud-kvm': nocloudkvm.NoCloudKVMPlatform, 'lxd': lxd.LXDPlatform, + 'azurecloud': azurecloud.AzureCloudPlatform, } diff --git a/tests/cloud_tests/platforms/azurecloud/__init__.py b/tests/cloud_tests/platforms/azurecloud/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/tests/cloud_tests/platforms/azurecloud/__init__.py diff --git a/tests/cloud_tests/platforms/azurecloud/image.py b/tests/cloud_tests/platforms/azurecloud/image.py new file mode 100644 index 00000000..aad2bca1 --- /dev/null +++ b/tests/cloud_tests/platforms/azurecloud/image.py @@ -0,0 +1,116 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Azure Cloud image Base class.""" + +from tests.cloud_tests import LOG + +from ..images import Image +from .snapshot import AzureCloudSnapshot + + +class AzureCloudImage(Image): + """Azure Cloud backed image.""" + + platform_name = 'azurecloud' + + def __init__(self, platform, config, image_id): + """Set up image. + + @param platform: platform object + @param config: image configuration + @param image_id: image id used to boot instance + """ + super(AzureCloudImage, self).__init__(platform, config) + self._img_instance = None + self.image_id = image_id + + @property + def _instance(self): + """Internal use only, returns a running instance""" + if not self._img_instance: + self._img_instance = self.platform.create_instance( + self.properties, self.config, self.features, + self.image_id, user_data=None) + self._img_instance.start(wait=True, wait_for_cloud_init=True) + return self._img_instance + + def destroy(self): + """Delete the instance used to create a custom image.""" + if self._img_instance: + LOG.debug('Deleting backing instance %s', + self._img_instance.vm_name) + delete_vm = self.platform.compute_client.virtual_machines.delete( + self.platform.resource_group.name, self._img_instance.vm_name) + delete_vm.wait() + + super(AzureCloudImage, self).destroy() + + def _execute(self, *args, **kwargs): + """Execute command in image, modifying image.""" + LOG.debug('executing commands on image') + self._instance.start(wait=True) + return self._instance._execute(*args, **kwargs) + + def push_file(self, local_path, remote_path): + """Copy file at 'local_path' to instance at 'remote_path'.""" + LOG.debug('pushing file to image') + return self._instance.push_file(local_path, remote_path) + + def run_script(self, *args, **kwargs): + """Run script in image, modifying image. + + @return_value: script output + """ + LOG.debug('running script on image') + self._instance.start() + return self._instance.run_script(*args, **kwargs) + + def snapshot(self): + """ Create snapshot (image) of instance, wait until done. + + If no instance has been booted, base image is returned. + Otherwise runs the clean script, deallocates, generalizes + and creates custom image from instance. + """ + LOG.debug('creating snapshot of image') + if not self._img_instance: + LOG.debug('No existing image, snapshotting base image') + return AzureCloudSnapshot(self.platform, self.properties, + self.config, self.features, + self._instance.vm_name, + delete_on_destroy=False) + + LOG.debug('creating snapshot from instance: %s', self._img_instance) + if self.config.get('boot_clean_script'): + self._img_instance.run_script(self.config.get('boot_clean_script')) + + LOG.debug('deallocating instance %s', self._instance.vm_name) + deallocate = self.platform.compute_client.virtual_machines.deallocate( + self.platform.resource_group.name, self._instance.vm_name) + deallocate.wait() + + LOG.debug('generalizing instance %s', self._instance.vm_name) + self.platform.compute_client.virtual_machines.generalize( + self.platform.resource_group.name, self._instance.vm_name) + + image_params = { + "location": self.platform.location, + "properties": { + "sourceVirtualMachine": { + "id": self._img_instance.instance.id + } + } + } + LOG.debug('updating resource group image %s', self._instance.vm_name) + self.platform.compute_client.images.create_or_update( + self.platform.resource_group.name, self._instance.vm_name, + image_params) + + LOG.debug('destroying self') + self.destroy() + + LOG.debug('snapshot complete') + return AzureCloudSnapshot(self.platform, self.properties, self.config, + self.features, self._instance.vm_name) + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/azurecloud/instance.py b/tests/cloud_tests/platforms/azurecloud/instance.py new file mode 100644 index 00000000..f1e28a96 --- /dev/null +++ b/tests/cloud_tests/platforms/azurecloud/instance.py @@ -0,0 +1,248 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Base Azure Cloud instance.""" + +from datetime import datetime, timedelta +from urllib.parse import urlparse +from time import sleep +import traceback +import os + + +# pylint: disable=no-name-in-module +from azure.storage.blob import BlockBlobService, BlobPermissions +from msrestazure.azure_exceptions import CloudError + +from tests.cloud_tests import LOG + +from ..instances import Instance + + +class AzureCloudInstance(Instance): + """Azure Cloud backed instance.""" + + platform_name = 'azurecloud' + + def __init__(self, platform, properties, config, + features, image_id, user_data=None): + """Set up instance. + + @param platform: platform object + @param properties: dictionary of properties + @param config: dictionary of configuration values + @param features: dictionary of supported feature flags + @param image_id: image to find and/or use + @param user_data: test user-data to pass to instance + """ + super(AzureCloudInstance, self).__init__( + platform, image_id, properties, config, features) + + self.ssh_port = 22 + self.ssh_ip = None + self.instance = None + self.image_id = image_id + self.vm_name = 'ci-azure-i-%s' % self.platform.tag + self.user_data = user_data + self.ssh_key_file = os.path.join( + platform.config['data_dir'], platform.config['private_key']) + self.ssh_pubkey_file = os.path.join( + platform.config['data_dir'], platform.config['public_key']) + self.blob_client, self.container, self.blob = None, None, None + + def start(self, wait=True, wait_for_cloud_init=False): + """Start instance with the platforms NIC.""" + if self.instance: + return + data = self.image_id.split('-') + release, support = data[2].replace('_', '.'), data[3] + sku = '%s-%s' % (release, support) if support == 'LTS' else release + image_resource_id = '/subscriptions/%s' \ + '/resourceGroups/%s' \ + '/providers/Microsoft.Compute/images/%s' % ( + self.platform.subscription_id, + self.platform.resource_group.name, + self.image_id) + storage_uri = "http://%s.blob.core.windows.net" \ + % self.platform.storage.name + with open(self.ssh_pubkey_file, 'r') as key: + ssh_pub_keydata = key.read() + + image_exists = False + try: + LOG.debug('finding image in resource group using image_id') + self.platform.compute_client.images.get( + self.platform.resource_group.name, + self.image_id + ) + image_exists = True + LOG.debug('image found, launching instance, image_id=%s', + self.image_id) + except CloudError: + LOG.debug(('image not found, launching instance with base image, ' + 'image_id=%s'), self.image_id) + pass + + vm_params = { + 'name': self.vm_name, + 'location': self.platform.location, + 'os_profile': { + 'computer_name': 'CI-%s' % self.platform.tag, + 'admin_username': self.ssh_username, + "customData": self.user_data, + "linuxConfiguration": { + "disable_password_authentication": True, + "ssh": { + "public_keys": [{ + "path": "/home/%s/.ssh/authorized_keys" % + self.ssh_username, + "keyData": ssh_pub_keydata + }] + } + } + }, + "diagnosticsProfile": { + "bootDiagnostics": { + "storageUri": storage_uri, + "enabled": True + } + }, + 'hardware_profile': { + 'vm_size': self.platform.vm_size + }, + 'storage_profile': { + 'image_reference': { + 'id': image_resource_id + } if image_exists else { + 'publisher': 'Canonical', + 'offer': 'UbuntuServer', + 'sku': sku, + 'version': 'latest' + } + }, + 'network_profile': { + 'network_interfaces': [{ + 'id': self.platform.nic.id + }] + }, + 'tags': { + 'Name': self.platform.tag, + } + } + + try: + self.instance = self.platform.compute_client.virtual_machines.\ + create_or_update(self.platform.resource_group.name, + self.vm_name, vm_params) + LOG.debug('creating instance %s from image_id=%s', self.vm_name, + self.image_id) + except CloudError: + raise RuntimeError('failed creating instance:\n{}'.format( + traceback.format_exc())) + + if wait: + self.instance.wait() + self.ssh_ip = self.platform.network_client.\ + public_ip_addresses.get( + self.platform.resource_group.name, + self.platform.public_ip.name + ).ip_address + self._wait_for_system(wait_for_cloud_init) + + self.instance = self.instance.result() + self.blob_client, self.container, self.blob =\ + self._get_blob_client() + + def shutdown(self, wait=True): + """Finds console log then stopping/deallocates VM""" + LOG.debug('waiting on console log before stopping') + attempts, exists = 5, False + while not exists and attempts: + try: + attempts -= 1 + exists = self.blob_client.get_blob_to_bytes( + self.container, self.blob) + LOG.debug('found console log') + except Exception as e: + if attempts: + LOG.debug('Unable to find console log, ' + '%s attempts remaining', attempts) + sleep(15) + else: + LOG.warning('Could not find console log: %s', e) + pass + + LOG.debug('stopping instance %s', self.image_id) + vm_deallocate = \ + self.platform.compute_client.virtual_machines.deallocate( + self.platform.resource_group.name, self.image_id) + if wait: + vm_deallocate.wait() + + def destroy(self): + """Delete VM and close all connections""" + if self.instance: + LOG.debug('destroying instance: %s', self.image_id) + vm_delete = self.platform.compute_client.virtual_machines.delete( + self.platform.resource_group.name, self.image_id) + vm_delete.wait() + + self._ssh_close() + + super(AzureCloudInstance, self).destroy() + + def _execute(self, command, stdin=None, env=None): + """Execute command on instance.""" + env_args = [] + if env: + env_args = ['env'] + ["%s=%s" for k, v in env.items()] + + return self._ssh(['sudo'] + env_args + list(command), stdin=stdin) + + def _get_blob_client(self): + """ + Use VM details to retrieve container and blob name. + Then Create blob service client for sas token to + retrieve console log. + + :return: blob service, container name, blob name + """ + LOG.debug('creating blob service for console log') + storage = self.platform.storage_client.storage_accounts.get_properties( + self.platform.resource_group.name, self.platform.storage.name) + + keys = self.platform.storage_client.storage_accounts.list_keys( + self.platform.resource_group.name, self.platform.storage.name + ).keys[0].value + + virtual_machine = self.platform.compute_client.virtual_machines.get( + self.platform.resource_group.name, self.instance.name, + expand='instanceView') + + blob_uri = virtual_machine.instance_view.boot_diagnostics.\ + serial_console_log_blob_uri + + container, blob = urlparse(blob_uri).path.split('/')[-2:] + + blob_client = BlockBlobService( + account_name=storage.name, + account_key=keys) + + sas = blob_client.generate_blob_shared_access_signature( + container_name=container, blob_name=blob, protocol='https', + expiry=datetime.utcnow() + timedelta(hours=1), + permission=BlobPermissions.READ) + + blob_client = BlockBlobService( + account_name=storage.name, + sas_token=sas) + + return blob_client, container, blob + + def console_log(self): + """Instance console. + + @return_value: bytes of this instance’s console + """ + boot_diagnostics = self.blob_client.get_blob_to_bytes( + self.container, self.blob) + return boot_diagnostics.content diff --git a/tests/cloud_tests/platforms/azurecloud/platform.py b/tests/cloud_tests/platforms/azurecloud/platform.py new file mode 100644 index 00000000..cb62a74b --- /dev/null +++ b/tests/cloud_tests/platforms/azurecloud/platform.py @@ -0,0 +1,233 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Base Azure Cloud class.""" + +import os +import base64 +import traceback +from datetime import datetime +from tests.cloud_tests import LOG + +# pylint: disable=no-name-in-module +from azure.common.credentials import ServicePrincipalCredentials +# pylint: disable=no-name-in-module +from azure.mgmt.resource import ResourceManagementClient +# pylint: disable=no-name-in-module +from azure.mgmt.network import NetworkManagementClient +# pylint: disable=no-name-in-module +from azure.mgmt.compute import ComputeManagementClient +# pylint: disable=no-name-in-module +from azure.mgmt.storage import StorageManagementClient +from msrestazure.azure_exceptions import CloudError + +from .image import AzureCloudImage +from .instance import AzureCloudInstance +from ..platforms import Platform + +from cloudinit import util as c_util + + +class AzureCloudPlatform(Platform): + """Azure Cloud test platforms.""" + + platform_name = 'azurecloud' + + def __init__(self, config): + """Set up platform.""" + super(AzureCloudPlatform, self).__init__(config) + self.tag = '%s-%s' % ( + config['tag'], datetime.now().strftime('%Y%m%d%H%M%S')) + self.storage_sku = config['storage_sku'] + self.vm_size = config['vm_size'] + self.location = config['region'] + + try: + self.credentials, self.subscription_id = self._get_credentials() + + self.resource_client = ResourceManagementClient( + self.credentials, self.subscription_id) + self.compute_client = ComputeManagementClient( + self.credentials, self.subscription_id) + self.network_client = NetworkManagementClient( + self.credentials, self.subscription_id) + self.storage_client = StorageManagementClient( + self.credentials, self.subscription_id) + + self.resource_group = self._create_resource_group() + self.public_ip = self._create_public_ip_address() + self.storage = self._create_storage_account(config) + self.vnet = self._create_vnet() + self.subnet = self._create_subnet() + self.nic = self._create_nic() + except CloudError: + raise RuntimeError('failed creating a resource:\n{}'.format( + traceback.format_exc())) + + def create_instance(self, properties, config, features, + image_id, user_data=None): + """Create an instance + + @param properties: image properties + @param config: image configuration + @param features: image features + @param image_id: string of image id + @param user_data: test user-data to pass to instance + @return_value: cloud_tests.instances instance + """ + if user_data is not None: + user_data = str(base64.b64encode( + user_data.encode('utf-8')), 'utf-8') + + return AzureCloudInstance(self, properties, config, features, + image_id, user_data) + + def get_image(self, img_conf): + """Get image using specified image configuration. + + @param img_conf: configuration for image + @return_value: cloud_tests.images instance + """ + ss_region = self.azure_location_to_simplestreams_region() + + filters = [ + 'arch=%s' % 'amd64', + 'endpoint=https://management.core.windows.net/', + 'region=%s' % ss_region, + 'release=%s' % img_conf['release'] + ] + + LOG.debug('finding image using streams') + image = self._query_streams(img_conf, filters) + + try: + image_id = image['id'] + LOG.debug('found image: %s', image_id) + if image_id.find('__') > 0: + image_id = image_id.split('__')[1] + LOG.debug('image_id shortened to %s', image_id) + except KeyError: + raise RuntimeError('no images found for %s' % img_conf['release']) + + return AzureCloudImage(self, img_conf, image_id) + + def destroy(self): + """Delete all resources in resource group.""" + LOG.debug("Deleting resource group: %s", self.resource_group.name) + delete = self.resource_client.resource_groups.delete( + self.resource_group.name) + delete.wait() + + def azure_location_to_simplestreams_region(self): + """Convert location to simplestreams region""" + location = self.location.lower().replace(' ', '') + LOG.debug('finding location %s using simple streams', location) + regions_file = os.path.join( + os.path.dirname(os.path.abspath(__file__)), 'regions.json') + region_simplestreams_map = c_util.load_json( + c_util.load_file(regions_file)) + return region_simplestreams_map.get(location, location) + + def _get_credentials(self): + """Get credentials from environment""" + LOG.debug('getting credentials from environment') + cred_file = os.path.expanduser('~/.azure/credentials.json') + try: + azure_creds = c_util.load_json( + c_util.load_file(cred_file)) + subscription_id = azure_creds['subscriptionId'] + credentials = ServicePrincipalCredentials( + client_id=azure_creds['clientId'], + secret=azure_creds['clientSecret'], + tenant=azure_creds['tenantId']) + return credentials, subscription_id + except KeyError: + raise RuntimeError('Please configure Azure service principal' + ' credentials in %s' % cred_file) + + def _create_resource_group(self): + """Create resource group""" + LOG.debug('creating resource group') + resource_group_name = self.tag + resource_group_params = { + 'location': self.location + } + resource_group = self.resource_client.resource_groups.create_or_update( + resource_group_name, resource_group_params) + return resource_group + + def _create_storage_account(self, config): + LOG.debug('creating storage account') + storage_account_name = 'storage%s' % datetime.now().\ + strftime('%Y%m%d%H%M%S') + storage_params = { + 'sku': { + 'name': config['storage_sku'] + }, + 'kind': "Storage", + 'location': self.location + } + storage_account = self.storage_client.storage_accounts.create( + self.resource_group.name, storage_account_name, storage_params) + return storage_account.result() + + def _create_public_ip_address(self): + """Create public ip address""" + LOG.debug('creating public ip address') + public_ip_name = '%s-ip' % self.resource_group.name + public_ip_params = { + 'location': self.location, + 'public_ip_allocation_method': 'Dynamic' + } + ip = self.network_client.public_ip_addresses.create_or_update( + self.resource_group.name, public_ip_name, public_ip_params) + return ip.result() + + def _create_vnet(self): + """create virtual network""" + LOG.debug('creating vnet') + vnet_name = '%s-vnet' % self.resource_group.name + vnet_params = { + 'location': self.location, + 'address_space': { + 'address_prefixes': ['10.0.0.0/16'] + } + } + vnet = self.network_client.virtual_networks.create_or_update( + self.resource_group.name, vnet_name, vnet_params) + return vnet.result() + + def _create_subnet(self): + """create sub-network""" + LOG.debug('creating subnet') + subnet_name = '%s-subnet' % self.resource_group.name + subnet_params = { + 'address_prefix': '10.0.0.0/24' + } + subnet = self.network_client.subnets.create_or_update( + self.resource_group.name, self.vnet.name, + subnet_name, subnet_params) + return subnet.result() + + def _create_nic(self): + """Create network interface controller""" + LOG.debug('creating nic') + nic_name = '%s-nic' % self.resource_group.name + nic_params = { + 'location': self.location, + 'ip_configurations': [{ + 'name': 'ipconfig', + 'subnet': { + 'id': self.subnet.id + }, + 'publicIpAddress': { + 'id': "/subscriptions/%s" + "/resourceGroups/%s/providers/Microsoft.Network" + "/publicIPAddresses/%s" % ( + self.subscription_id, self.resource_group.name, + self.public_ip.name), + } + }] + } + nic = self.network_client.network_interfaces.create_or_update( + self.resource_group.name, nic_name, nic_params) + return nic.result() diff --git a/tests/cloud_tests/platforms/azurecloud/regions.json b/tests/cloud_tests/platforms/azurecloud/regions.json new file mode 100644 index 00000000..c1b4da20 --- /dev/null +++ b/tests/cloud_tests/platforms/azurecloud/regions.json @@ -0,0 +1,42 @@ +{ + "eastasia": "East Asia", + "southeastasia": "Southeast Asia", + "centralus": "Central US", + "eastus": "East US", + "eastus2": "East US 2", + "westus": "West US", + "northcentralus": "North Central US", + "southcentralus": "South Central US", + "northeurope": "North Europe", + "westeurope": "West Europe", + "japanwest": "Japan West", + "japaneast": "Japan East", + "brazilsouth": "Brazil South", + "australiaeast": "Australia East", + "australiasoutheast": "Australia Southeast", + "southindia": "South India", + "centralindia": "Central India", + "westindia": "West India", + "canadacentral": "Canada Central", + "canadaeast": "Canada East", + "uksouth": "UK South", + "ukwest": "UK West", + "westcentralus": "West Central US", + "westus2": "West US 2", + "koreacentral": "Korea Central", + "koreasouth": "Korea South", + "francecentral": "France Central", + "francesouth": "France South", + "australiacentral": "Australia Central", + "australiacentral2": "Australia Central 2", + "uaecentral": "UAE Central", + "uaenorth": "UAE North", + "southafricanorth": "South Africa North", + "southafricawest": "South Africa West", + "switzerlandnorth": "Switzerland North", + "switzerlandwest": "Switzerland West", + "germanynorth": "Germany North", + "germanywestcentral": "Germany West Central", + "norwaywest": "Norway West", + "norwayeast": "Norway East" +} diff --git a/tests/cloud_tests/platforms/azurecloud/snapshot.py b/tests/cloud_tests/platforms/azurecloud/snapshot.py new file mode 100644 index 00000000..580cc596 --- /dev/null +++ b/tests/cloud_tests/platforms/azurecloud/snapshot.py @@ -0,0 +1,58 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Base Azure Cloud snapshot.""" + +from ..snapshots import Snapshot + +from tests.cloud_tests import LOG + + +class AzureCloudSnapshot(Snapshot): + """Azure Cloud image copy backed snapshot.""" + + platform_name = 'azurecloud' + + def __init__(self, platform, properties, config, features, image_id, + delete_on_destroy=True): + """Set up snapshot. + + @param platform: platform object + @param properties: image properties + @param config: image config + @param features: supported feature flags + """ + super(AzureCloudSnapshot, self).__init__( + platform, properties, config, features) + + self.image_id = image_id + self.delete_on_destroy = delete_on_destroy + + def launch(self, user_data, meta_data=None, block=True, start=True, + use_desc=None): + """Launch instance. + + @param user_data: user-data for the instance + @param meta_data: meta_data for the instance + @param block: wait until instance is created + @param start: start instance and wait until fully started + @param use_desc: description of snapshot instance use + @return_value: an Instance + """ + if meta_data is not None: + raise ValueError("metadata not supported on Azure Cloud tests") + + instance = self.platform.create_instance( + self.properties, self.config, self.features, + self.image_id, user_data) + + return instance + + def destroy(self): + """Clean up snapshot data.""" + LOG.debug('destroying image %s', self.image_id) + if self.delete_on_destroy: + self.platform.compute_client.images.delete( + self.platform.resource_group.name, + self.image_id) + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/ec2/image.py b/tests/cloud_tests/platforms/ec2/image.py index 7bedf59d..d7b2c908 100644 --- a/tests/cloud_tests/platforms/ec2/image.py +++ b/tests/cloud_tests/platforms/ec2/image.py @@ -4,6 +4,7 @@ from ..images import Image from .snapshot import EC2Snapshot + from tests.cloud_tests import LOG diff --git a/tests/cloud_tests/platforms/ec2/platform.py b/tests/cloud_tests/platforms/ec2/platform.py index f188c27b..7a3d0fe0 100644 --- a/tests/cloud_tests/platforms/ec2/platform.py +++ b/tests/cloud_tests/platforms/ec2/platform.py @@ -135,6 +135,7 @@ class EC2Platform(Platform): def _create_internet_gateway(self): """Create Internet Gateway and assign to VPC.""" LOG.debug('creating internet gateway') + # pylint: disable=no-member internet_gateway = self.ec2_resource.create_internet_gateway() internet_gateway.attach_to_vpc(VpcId=self.vpc.id) self._tag_resource(internet_gateway) @@ -190,7 +191,7 @@ class EC2Platform(Platform): """Setup AWS EC2 VPC or return existing VPC.""" LOG.debug('creating new vpc') try: - vpc = self.ec2_resource.create_vpc( + vpc = self.ec2_resource.create_vpc( # pylint: disable=no-member CidrBlock=self.ipv4_cidr, AmazonProvidedIpv6CidrBlock=True) except botocore.exceptions.ClientError as e: diff --git a/tests/cloud_tests/platforms/lxd/instance.py b/tests/cloud_tests/platforms/lxd/instance.py index 83c97ab4..2b804a62 100644 --- a/tests/cloud_tests/platforms/lxd/instance.py +++ b/tests/cloud_tests/platforms/lxd/instance.py @@ -4,6 +4,7 @@ import os import shutil +import time from tempfile import mkdtemp from cloudinit.util import load_yaml, subp, ProcessExecutionError, which @@ -224,7 +225,18 @@ class LXDInstance(Instance): LOG.debug("%s: deleting container.", self) self.unfreeze() self.shutdown() - self.pylxd_container.delete(wait=True) + retries = [1] * 5 + for attempt, wait in enumerate(retries): + try: + self.pylxd_container.delete(wait=True) + break + except Exception: + if attempt + 1 >= len(retries): + raise + LOG.debug('Failed to delete container %s (%s/%s) retrying...', + self, attempt + 1, len(retries)) + time.sleep(wait) + self._pylxd_container = None if self.platform.container_exists(self.name): diff --git a/tests/cloud_tests/platforms/nocloudkvm/instance.py b/tests/cloud_tests/platforms/nocloudkvm/instance.py index 33ff3f24..96185b75 100644 --- a/tests/cloud_tests/platforms/nocloudkvm/instance.py +++ b/tests/cloud_tests/platforms/nocloudkvm/instance.py @@ -74,6 +74,8 @@ class NoCloudKVMInstance(Instance): self.pid_file = None self.console_file = None self.disk = image_path + self.cache_mode = platform.config.get('cache_mode', + 'cache=none,aio=native') self.meta_data = meta_data def shutdown(self, wait=True): @@ -113,7 +115,10 @@ class NoCloudKVMInstance(Instance): pass if self.pid_file: - os.remove(self.pid_file) + try: + os.remove(self.pid_file) + except Exception: + pass self.pid = None self._ssh_close() @@ -160,13 +165,13 @@ class NoCloudKVMInstance(Instance): self.ssh_port = self.get_free_port() cmd = ['./tools/xkvm', - '--disk', '%s,cache=unsafe' % self.disk, - '--disk', '%s,cache=unsafe' % seed, + '--disk', '%s,%s' % (self.disk, self.cache_mode), + '--disk', '%s' % seed, '--netdev', ','.join(['user', 'hostfwd=tcp::%s-:22' % self.ssh_port, 'dnssearch=%s' % CI_DOMAIN]), '--', '-pidfile', self.pid_file, '-vnc', 'none', - '-m', '2G', '-smp', '2', '-nographic', + '-m', '2G', '-smp', '2', '-nographic', '-name', self.name, '-serial', 'file:' + self.console_file] subprocess.Popen(cmd, close_fds=True, diff --git a/tests/cloud_tests/platforms/nocloudkvm/platform.py b/tests/cloud_tests/platforms/nocloudkvm/platform.py index 85933463..2d1480f5 100644 --- a/tests/cloud_tests/platforms/nocloudkvm/platform.py +++ b/tests/cloud_tests/platforms/nocloudkvm/platform.py @@ -29,9 +29,13 @@ class NoCloudKVMPlatform(Platform): """ (url, path) = s_util.path_from_mirror_url(img_conf['mirror_url'], None) - filter = filters.get_filters(['arch=%s' % c_util.get_architecture(), - 'release=%s' % img_conf['release'], - 'ftype=disk1.img']) + filter = filters.get_filters( + [ + 'arch=%s' % c_util.get_dpkg_architecture(), + 'release=%s' % img_conf['release'], + 'ftype=disk1.img', + ] + ) mirror_config = {'filters': filter, 'keep_items': False, 'max_items': 1, diff --git a/tests/cloud_tests/platforms/platforms.py b/tests/cloud_tests/platforms/platforms.py index abbfebba..bebdf1c6 100644 --- a/tests/cloud_tests/platforms/platforms.py +++ b/tests/cloud_tests/platforms/platforms.py @@ -48,7 +48,7 @@ class Platform(object): if os.path.exists(filename): c_util.del_file(filename) - c_util.subp(['ssh-keygen', '-t', 'rsa', '-b', '4096', + c_util.subp(['ssh-keygen', '-m', 'PEM', '-t', 'rsa', '-b', '4096', '-f', filename, '-P', '', '-C', 'ubuntu@cloud_test'], capture=True) diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml index ec5da724..7ddc5b85 100644 --- a/tests/cloud_tests/releases.yaml +++ b/tests/cloud_tests/releases.yaml @@ -55,6 +55,8 @@ default_release_config: # cloud-init, so must pull cloud-init in from repo using # setup_image.upgrade upgrade: true + azurecloud: + boot_timeout: 300 features: # all currently supported feature flags @@ -129,6 +131,22 @@ features: releases: # UBUNTU ================================================================= + eoan: + # EOL: Jul 2020 + default: + enabled: true + release: eoan + version: 19.10 + os: ubuntu + feature_groups: + - base + - debian_base + - ubuntu_specific + lxd: + sstreams_server: https://cloud-images.ubuntu.com/daily + alias: eoan + setup_overrides: null + override_templates: false disco: # EOL: Jan 2020 default: diff --git a/tests/cloud_tests/setup_image.py b/tests/cloud_tests/setup_image.py index 39f4517f..69e66e3f 100644 --- a/tests/cloud_tests/setup_image.py +++ b/tests/cloud_tests/setup_image.py @@ -222,13 +222,14 @@ def setup_image(args, image): for name, func, desc in handlers if getattr(args, name, None)] try: - data = yaml.load(image.read_data("/etc/cloud/build.info", decode=True)) + data = yaml.safe_load( + image.read_data("/etc/cloud/build.info", decode=True)) info = ' '.join(["%s=%s" % (k, data.get(k)) for k in ("build_name", "serial") if k in data]) except Exception as e: info = "N/A (%s)" % e - LOG.info('setting up %s (%s)', image, info) + LOG.info('setting up image %s (info %s)', image, info) res = stage.run_stage( 'set up for {}'.format(image), calls, continue_after_error=False) return res diff --git a/tests/cloud_tests/testcases/modules/TODO.md b/tests/cloud_tests/testcases/modules/TODO.md index 0b933b3b..9513cb2d 100644 --- a/tests/cloud_tests/testcases/modules/TODO.md +++ b/tests/cloud_tests/testcases/modules/TODO.md @@ -78,11 +78,8 @@ Not applicable to write a test for this as it specifies when something should be ## scripts vendor Not applicable to write a test for this as it specifies when something should be run. -## snappy -2016-11-17: Need test to install snaps from store - -## snap-config -2016-11-17: Need to investigate +## snap +2019-12-19: Need to investigate ## spacewalk diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml b/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml index bd9b5d08..22a31dc4 100644 --- a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml +++ b/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml @@ -5,8 +5,7 @@ required_features: - apt cloud_config: | #cloud-config - apt: - apt_pipelining: false + apt_pipelining: false collect_scripts: 90cloud-init-pipelining: | #!/bin/bash diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_os.py b/tests/cloud_tests/testcases/modules/apt_pipelining_os.py index 740dc7c0..2b940a66 100644 --- a/tests/cloud_tests/testcases/modules/apt_pipelining_os.py +++ b/tests/cloud_tests/testcases/modules/apt_pipelining_os.py @@ -8,8 +8,8 @@ class TestAptPipeliningOS(base.CloudTestCase): """Test apt-pipelining module.""" def test_os_pipelining(self): - """Test pipelining set to os.""" - out = self.get_data_file('90cloud-init-pipelining') - self.assertIn('Acquire::http::Pipeline-Depth "0";', out) + """test 'os' settings does not write apt config file.""" + out = self.get_data_file('90cloud-init-pipelining_not_written') + self.assertEqual(0, int(out)) # vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml b/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml index cbed3ba3..86d5220b 100644 --- a/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml +++ b/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml @@ -1,15 +1,14 @@ # -# Set apt pipelining value to OS +# Set apt pipelining value to OS, no conf written # required_features: - apt cloud_config: | #cloud-config - apt: - apt_pipelining: os + apt_pipelining: os collect_scripts: - 90cloud-init-pipelining: | + 90cloud-init-pipelining_not_written: | #!/bin/bash - cat /etc/apt/apt.conf.d/90cloud-init-pipelining + ls /etc/apt/apt.conf.d/90cloud-init-pipelining | wc -l # vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/snappy.py b/tests/cloud_tests/testcases/modules/snappy.py deleted file mode 100644 index 7d17fc5b..00000000 --- a/tests/cloud_tests/testcases/modules/snappy.py +++ /dev/null @@ -1,17 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""cloud-init Integration Test Verify Script""" -from tests.cloud_tests.testcases import base - - -class TestSnappy(base.CloudTestCase): - """Test snappy module""" - - expected_warnings = ('DEPRECATION',) - - def test_snappy_version(self): - """Test snappy version output""" - out = self.get_data_file('snapd') - self.assertIn('Status: install ok installed', out) - -# vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/snappy.yaml b/tests/cloud_tests/testcases/modules/snappy.yaml deleted file mode 100644 index 8ac322ae..00000000 --- a/tests/cloud_tests/testcases/modules/snappy.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# -# Install snappy -# -# Aug 17, 2018: Disabled due to requiring a proxy for testing -# tests do not handle the proxy well at this time. -enabled: False -required_features: - - snap -cloud_config: | - #cloud-config - snappy: - system_snappy: auto -collect_scripts: - snapd: | - #!/bin/bash - dpkg -s snapd - -# vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py b/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py index e7329d48..02935447 100644 --- a/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py +++ b/tests/cloud_tests/testcases/modules/ssh_auth_key_fingerprints_disable.py @@ -11,6 +11,6 @@ class TestSshKeyFingerprintsDisable(base.CloudTestCase): """Verify disabled.""" out = self.get_data_file('cloud-init.log') self.assertIn('Skipping module named ssh-authkey-fingerprints, ' - 'logging of ssh fingerprints disabled', out) + 'logging of SSH fingerprints disabled', out) # vi: ts=4 expandtab diff --git a/tests/cloud_tests/verify.py b/tests/cloud_tests/verify.py index 9911ecf2..7018f4d5 100644 --- a/tests/cloud_tests/verify.py +++ b/tests/cloud_tests/verify.py @@ -61,12 +61,17 @@ def format_test_failures(test_result): if not test_result['failures']: return '' failure_hdr = ' test failures:' - failure_fmt = ' * {module}.{class}.{function}\n {error}' + failure_fmt = ' * {module}.{class}.{function}\n ' output = [] for failure in test_result['failures']: if not output: output = [failure_hdr] - output.append(failure_fmt.format(**failure)) + msg = failure_fmt.format(**failure) + if failure.get('error'): + msg += failure['error'] + else: + msg += failure.get('traceback', '') + output.append(msg) return '\n'.join(output) diff --git a/tests/data/azure/non_unicode_random_string b/tests/data/azure/non_unicode_random_string new file mode 100644 index 00000000..b9ecefb9 --- /dev/null +++ b/tests/data/azure/non_unicode_random_string @@ -0,0 +1 @@ +OEM0d\x00\x00\x00\x01\x80VRTUALMICROSFT\x02\x17\x00\x06MSFT\x97\x00\x00\x00C\xb4{V\xf4X%\x061x\x90\x1c\xfen\x86\xbf~\xf5\x8c\x94&\x88\xed\x84\xf9B\xbd\xd3\xf1\xdb\xee:\xd9\x0fc\x0e\x83(\xbd\xe3'\xfc\x85,\xdf\xf4\x13\x99N\xc5\xf3Y\x1e\xe3\x0b\xa4H\x08J\xb9\xdcdb$
\ No newline at end of file diff --git a/tests/data/azure/parse_certificates_fingerprints b/tests/data/azure/parse_certificates_fingerprints new file mode 100644 index 00000000..f7293c56 --- /dev/null +++ b/tests/data/azure/parse_certificates_fingerprints @@ -0,0 +1,4 @@ +ECEDEB3B8488D31AF3BC4CCED493F64B7D27D7B1 +073E19D14D1C799224C6A0FD8DDAB6A8BF27D473 +4C16E7FAD6297D74A9B25EB8F0A12808CEBE293E +929130695289B450FE45DCD5F6EF0CDE69865867 diff --git a/tests/data/azure/parse_certificates_pem b/tests/data/azure/parse_certificates_pem new file mode 100644 index 00000000..3521ea3a --- /dev/null +++ b/tests/data/azure/parse_certificates_pem @@ -0,0 +1,152 @@ +Bag Attributes + localKeyID: 01 00 00 00 + Microsoft CSP Name: Microsoft Enhanced Cryptographic Provider v1.0 +Key Attributes + X509v3 Key Usage: 10 +-----BEGIN PRIVATE KEY----- +MIIEwAIBADANBgkqhkiG9w0BAQEFAASCBKowggSmAgEAAoIBAQDlEe5fUqwdrQTP +W2oVlGK2f31q/8ULT8KmOTyUvL0RPdJQ69vvHOc5Q2CKg2eviHC2LWhF8WmpnZj6 +61RL0GeFGizwvU8Moebw5p3oqdcgoGpHVtxf+mr4QcWF58/Fwez0dA4hcsimVNBz +eNpBBUIKNBMTBG+4d6hcQBUAGKUdGRcCGEyTqXLU0MgHjxC9JgVqWJl+X2LcAGj5 +7J+tGYGTLzKJmeCeGVNN5ZtJ0T85MYHCKQk1/FElK+Kq5akovXffQHjlnCPcx0NJ +47NBjlPaFp2gjnAChn79bT4iCjOFZ9avWpqRpeU517UCnY7djOr3fuod/MSQyh3L +Wuem1tWBAgMBAAECggEBAM4ZXQRs6Kjmo95BHGiAEnSqrlgX+dycjcBq3QPh8KZT +nifqnf48XhnackENy7tWIjr3DctoUq4mOp8AHt77ijhqfaa4XSg7fwKeK9NLBGC5 +lAXNtAey0o2894/sKrd+LMkgphoYIUnuI4LRaGV56potkj/ZDP/GwTcG/R4SDnTn +C1Nb05PNTAPQtPZrgPo7TdM6gGsTnFbVrYHQLyg2Sq/osHfF15YohB01esRLCAwb +EF8JkRC4hWIZoV7BsyQ39232zAJQGGla7+wKFs3kObwh3VnFkQpT94KZnNiZuEfG +x5pW4Pn3gXgNsftscXsaNe/M9mYZqo//Qw7NvUIvAvECgYEA9AVveyK0HOA06fhh ++3hUWdvw7Pbrl+e06jO9+bT1RjQMbHKyI60DZyVGuAySN86iChJRoJr5c6xj+iXU +cR6BVJDjGH5t1tyiK2aYf6hEpK9/j8Z54UiVQ486zPP0PGfT2TO4lBLK+8AUmoaH +gk21ul8QeVCeCJa/o+xEoRFvzcUCgYEA8FCbbvInrUtNY+9eKaUYoNodsgBVjm5X +I0YPUL9D4d+1nvupHSV2NVmQl0w1RaJwrNTafrl5LkqjhQbmuWNta6QgfZzSA3LB +lWXo1Mm0azKdcD3qMGbvn0Q3zU+yGNEgmB/Yju3/NtgYRG6tc+FCWRbPbiCnZWT8 +v3C2Y0XggI0CgYEA2/jCZBgGkTkzue5kNVJlh5OS/aog+pCvL6hxCtarfBuTT3ed +Sje+p46cz3DVpmUpATc+Si8py7KNdYQAm/BJ2be6X+woi9Xcgo87zWgcaPCjZzId +0I2jsIE/Gl6XvpRCDrxnGWRPgt3GNP4szbPLrDPiH9oie8+Y9eYYf7G+PZkCgYEA +nRSzZOPYV4f/QDF4pVQLMykfe/iH9B/fyWjEHg3He19VQmRReIHCMMEoqBziPXAe +onpHj8oAkeer1wpZyhhZr6CKtFDLXgGm09bXSC/IRMHC81klORovyzU2HHfZfCtG +WOmIDnU2+0xpIGIP8sztJ3qnf97MTJSkOSadsWo9gwkCgYEAh5AQmJQmck88Dff2 +qIfJIX8d+BDw47BFJ89OmMFjGV8TNB+JO+AV4Vkodg4hxKpLqTFZTTUFgoYfy5u1 +1/BhAjpmCDCrzubCFhx+8VEoM2+2+MmnuQoMAm9+/mD/IidwRaARgXgvEmp7sfdt +RyWd+p2lYvFkC/jORQtDMY4uW1o= +-----END PRIVATE KEY----- +Bag Attributes + localKeyID: 02 00 00 00 + Microsoft CSP Name: Microsoft Strong Cryptographic Provider +Key Attributes + X509v3 Key Usage: 10 +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDlQhPrZwVQYFV4 +FBc0H1iTXYaznMpwZvEITKtXWACzTdguUderEVOkXW3HTi5HvC2rMayt0nqo3zcd +x1eGiqdjpZQ/wMrkz9wNEM/nNMsXntEwxk0jCVNKB/jz6vf+BOtrSI01SritAGZW +dpKoTUyztT8C2mA3X6D8g3m4Dd07ltnzxaDqAQIU5jBHh3f/Q14tlPNZWUIiqVTC +gDxgAe7MDmfs9h3CInTBX1XM5J4UsLTL23/padgeSvP5YF5qr1+0c7Tdftxr2lwA +N3rLkisf5EiLAToVyJJlgP/exo2I8DaIKe7DZzD3Y1CrurOpkcMKYu5kM1Htlbua +tDkAa2oDAgMBAAECggEAOvdueS9DyiMlCKAeQb1IQosdQOh0l0ma+FgEABC2CWhd +0LgjQTBRM6cGO+urcq7/jhdWQ1UuUG4tVn71z7itCi/F/Enhxc2C22d2GhFVpWsn +giSXJYpZ/mIjkdVfWNo6FRuRmmHwMys1p0qTOS+8qUJWhSzW75csqJZGgeUrAI61 +LBV5F0SGR7dR2xZfy7PeDs9xpD0QivDt5DpsZWPaPvw4QlhdLgw6/YU1h9vtm6ci +xLjnPRLZ7JMpcQHO8dUDl6FiEI7yQ11BDm253VQAVMddYRPQABn7SpEF8kD/aZVh +2Clvz61Rz80SKjPUthMPLWMCRp7zB0xDMzt3/1i+tQKBgQD6Ar1/oD3eFnRnpi4u +n/hdHJtMuXWNfUA4dspNjP6WGOid9sgIeUUdif1XyVJ+afITzvgpWc7nUWIqG2bQ +WxJ/4q2rjUdvjNXTy1voVungR2jD5WLQ9DKeaTR0yCliWlx4JgdPG7qGI5MMwsr+ +R/PUoUUhGeEX+o/sCSieO3iUrQKBgQDqwBEMvIdhAv/CK2sG3fsKYX8rFT55ZNX3 +Tix9DbUGY3wQColNuI8U1nDlxE9U6VOfT9RPqKelBLCgbzB23kdEJnjSlnqlTxrx +E+Hkndyf2ckdJAR3XNxoQ6SRLJNBsgoBj/z5tlfZE9/Jc+uh0mYy3e6g6XCVPBcz +MgoIc+ofbwKBgQCGQhZ1hR30N+bHCozeaPW9OvGDIE0qcEqeh9xYDRFilXnF6pK9 +SjJ9jG7KR8jPLiHb1VebDSl5O1EV/6UU2vNyTc6pw7LLCryBgkGW4aWy1WZDXNnW +EG1meGS9GghvUss5kmJ2bxOZmV0Mi0brisQ8OWagQf+JGvtS7BAt+Q3l+QKBgAb9 +8YQPmXiqPjPqVyW9Ntz4SnFeEJ5NApJ7IZgX8GxgSjGwHqbR+HEGchZl4ncE/Bii +qBA3Vcb0fM5KgYcI19aPzsl28fA6ivLjRLcqfIfGVNcpW3iyq13vpdctHLW4N9QU +FdTaOYOds+ysJziKq8CYG6NvUIshXw+HTgUybqbBAoGBAIIOqcmmtgOClAwipA17 +dAHsI9Sjk+J0+d4JU6o+5TsmhUfUKIjXf5+xqJkJcQZMEe5GhxcCuYkgFicvh4Hz +kv2H/EU35LcJTqC6KTKZOWIbGcn1cqsvwm3GQJffYDiO8fRZSwCaif2J3F2lfH4Y +R/fA67HXFSTT+OncdRpY1NOn +-----END PRIVATE KEY----- +Bag Attributes: <Empty Attributes> +subject=/CN=CRP/OU=AzureRT/O=Microsoft Corporation/L=Redmond/ST=WA/C=US +issuer=/CN=Root Agency +-----BEGIN CERTIFICATE----- +MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290 +IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV +BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv +cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE +BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIlPjJXzrRih4C +k/XsoI01oqo7IUxH3dA2F7vHGXQoIpKCp8Qe6Z6cFfdD8Uj+s+B1BX6hngwzIwjN +jE/23X3SALVzJVWzX4Y/IEjbgsuao6sOyNyB18wIU9YzZkVGj68fmMlUw3LnhPbe +eWkufZaJCaLyhQOwlRMbOcn48D6Ys8fccOyXNzpq3rH1OzeQpxS2M8zaJYP4/VZ/ +sf6KRpI7bP+QwyFvNKfhcaO9/gj4kMo9lVGjvDU20FW6g8UVNJCV9N4GO6mOcyqo +OhuhVfjCNGgW7N1qi0TIVn0/MQM4l4dcT2R7Z/bV9fhMJLjGsy5A4TLAdRrhKUHT +bzi9HyDvAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA= +-----END CERTIFICATE----- +Bag Attributes + localKeyID: 01 00 00 00 +subject=/C=US/ST=WASHINGTON/L=Seattle/O=Microsoft/OU=Azure/CN=AnhVo/emailAddress=redacted@microsoft.com +issuer=/C=US/ST=WASHINGTON/L=Seattle/O=Microsoft/OU=Azure/CN=AnhVo/emailAddress=redacted@microsoft.com +-----BEGIN CERTIFICATE----- +MIID7TCCAtWgAwIBAgIJALQS3yMg3R41MA0GCSqGSIb3DQEBCwUAMIGMMQswCQYD +VQQGEwJVUzETMBEGA1UECAwKV0FTSElOR1RPTjEQMA4GA1UEBwwHU2VhdHRsZTES +MBAGA1UECgwJTWljcm9zb2Z0MQ4wDAYDVQQLDAVBenVyZTEOMAwGA1UEAwwFQW5o +Vm8xIjAgBgkqhkiG9w0BCQEWE2FuaHZvQG1pY3Jvc29mdC5jb20wHhcNMTkwMjE0 +MjMxMjQwWhcNMjExMTEwMjMxMjQwWjCBjDELMAkGA1UEBhMCVVMxEzARBgNVBAgM +CldBU0hJTkdUT04xEDAOBgNVBAcMB1NlYXR0bGUxEjAQBgNVBAoMCU1pY3Jvc29m +dDEOMAwGA1UECwwFQXp1cmUxDjAMBgNVBAMMBUFuaFZvMSIwIAYJKoZIhvcNAQkB +FhNhbmh2b0BtaWNyb3NvZnQuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA5RHuX1KsHa0Ez1tqFZRitn99av/FC0/Cpjk8lLy9ET3SUOvb7xznOUNg +ioNnr4hwti1oRfFpqZ2Y+utUS9BnhRos8L1PDKHm8Oad6KnXIKBqR1bcX/pq+EHF +hefPxcHs9HQOIXLIplTQc3jaQQVCCjQTEwRvuHeoXEAVABilHRkXAhhMk6ly1NDI +B48QvSYFaliZfl9i3ABo+eyfrRmBky8yiZngnhlTTeWbSdE/OTGBwikJNfxRJSvi +quWpKL1330B45Zwj3MdDSeOzQY5T2hadoI5wAoZ+/W0+IgozhWfWr1qakaXlOde1 +Ap2O3Yzq937qHfzEkMody1rnptbVgQIDAQABo1AwTjAdBgNVHQ4EFgQUPvdgLiv3 +pAk4r0QTPZU3PFOZJvgwHwYDVR0jBBgwFoAUPvdgLiv3pAk4r0QTPZU3PFOZJvgw +DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAVUHZT+h9+uCPLTEl5IDg +kqd9WpzXA7PJd/V+7DeDDTkEd06FIKTWZLfxLVVDjQJnQqubQb//e0zGu1qKbXnX +R7xqWabGU4eyPeUFWddmt1OHhxKLU3HbJNJJdL6XKiQtpGGUQt/mqNQ/DEr6hhNF +im5I79iA8H/dXA2gyZrj5Rxea4mtsaYO0mfp1NrFtJpAh2Djy4B1lBXBIv4DWG9e +mMEwzcLCOZj2cOMA6+mdLMUjYCvIRtnn5MKUHyZX5EmX79wsqMTvVpddlVLB9Kgz +Qnvft9+SBWh9+F3ip7BsL6Q4Q9v8eHRbnP0ya7ddlgh64uwf9VOfZZdKCnwqudJP +3g== +-----END CERTIFICATE----- +Bag Attributes + localKeyID: 02 00 00 00 +subject=/CN=/subscriptions/redacted/resourcegroups/redacted/providers/Microsoft.Compute/virtualMachines/redacted +issuer=/CN=Microsoft.ManagedIdentity +-----BEGIN CERTIFICATE----- +MIIDnTCCAoWgAwIBAgIUB2lauSRccvFkoJybUfIwOUqBN7MwDQYJKoZIhvcNAQEL +BQAwJDEiMCAGA1UEAxMZTWljcm9zb2Z0Lk1hbmFnZWRJZGVudGl0eTAeFw0xOTAy +MTUxOTA5MDBaFw0xOTA4MTQxOTA5MDBaMIGUMYGRMIGOBgNVBAMTgYYvc3Vic2Ny +aXB0aW9ucy8yN2I3NTBjZC1lZDQzLTQyZmQtOTA0NC04ZDc1ZTEyNGFlNTUvcmVz +b3VyY2Vncm91cHMvYW5oZXh0cmFzc2gvcHJvdmlkZXJzL01pY3Jvc29mdC5Db21w +dXRlL3ZpcnR1YWxNYWNoaW5lcy9hbmh0ZXN0Y2VydDCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAOVCE+tnBVBgVXgUFzQfWJNdhrOcynBm8QhMq1dYALNN +2C5R16sRU6RdbcdOLke8LasxrK3SeqjfNx3HV4aKp2OllD/AyuTP3A0Qz+c0yxee +0TDGTSMJU0oH+PPq9/4E62tIjTVKuK0AZlZ2kqhNTLO1PwLaYDdfoPyDebgN3TuW +2fPFoOoBAhTmMEeHd/9DXi2U81lZQiKpVMKAPGAB7swOZ+z2HcIidMFfVczknhSw +tMvbf+lp2B5K8/lgXmqvX7RztN1+3GvaXAA3esuSKx/kSIsBOhXIkmWA/97GjYjw +Nogp7sNnMPdjUKu6s6mRwwpi7mQzUe2Vu5q0OQBragMCAwEAAaNWMFQwDgYDVR0P +AQH/BAQDAgeAMAwGA1UdEwEB/wQCMAAwEwYDVR0lBAwwCgYIKwYBBQUHAwIwHwYD +VR0jBBgwFoAUOJvzEsriQWdJBndPrK+Me1bCPjYwDQYJKoZIhvcNAQELBQADggEB +AFGP/g8o7Hv/to11M0UqfzJuW/AyH9RZtSRcNQFLZUndwweQ6fap8lFsA4REUdqe +7Quqp5JNNY1XzKLWXMPoheIDH1A8FFXdsAroArzlNs9tO3TlIHE8A7HxEVZEmR4b +7ZiixmkQPS2RkjEoV/GM6fheBrzuFn7X5kVZyE6cC5sfcebn8xhk3ZcXI0VmpdT0 +jFBsf5IvFCIXXLLhJI4KXc8VMoKFU1jT9na/jyaoGmfwovKj4ib8s2aiXGAp7Y38 +UCmY+bJapWom6Piy5Jzi/p/kzMVdJcSa+GqpuFxBoQYEVs2XYVl7cGu/wPM+NToC +pkSoWwF1QAnHn0eokR9E1rU= +-----END CERTIFICATE----- +Bag Attributes: <Empty Attributes> +subject=/CN=CRP/OU=AzureRT/O=Microsoft Corporation/L=Redmond/ST=WA/C=US +issuer=/CN=Root Agency +-----BEGIN CERTIFICATE----- +MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290 +IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV +BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv +cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE +BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHU9IDclbKVYVb +Yuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoi +nlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmW +vwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+ +lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4y +WzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7 +t5btUyvpAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA= +-----END CERTIFICATE----- diff --git a/tests/data/azure/pubkey_extract_cert b/tests/data/azure/pubkey_extract_cert new file mode 100644 index 00000000..ce9b852d --- /dev/null +++ b/tests/data/azure/pubkey_extract_cert @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290 +IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV +BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv +cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE +BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHU9IDclbKVYVb +Yuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoi +nlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmW +vwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+ +lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4y +WzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7 +t5btUyvpAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA= +-----END CERTIFICATE----- diff --git a/tests/data/azure/pubkey_extract_ssh_key b/tests/data/azure/pubkey_extract_ssh_key new file mode 100644 index 00000000..54d749ed --- /dev/null +++ b/tests/data/azure/pubkey_extract_ssh_key @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHU9IDclbKVYVbYuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoinlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmWvwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4yWzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7t5btUyvp diff --git a/tests/data/netinfo/freebsd-ifconfig-output b/tests/data/netinfo/freebsd-ifconfig-output new file mode 100644 index 00000000..f64c2f60 --- /dev/null +++ b/tests/data/netinfo/freebsd-ifconfig-output @@ -0,0 +1,39 @@ +vtnet0: flags=8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> metric 0 mtu 1500 + options=6c07bb<RXCSUM,TXCSUM,VLAN_MTU,VLAN_HWTAGGING,JUMBO_MTU,VLAN_HWCSUM,TSO4,TSO6,LRO,VLAN_HWTSO,LINKSTATE,RXCSUM_IPV6,TXCSUM_IPV6> + ether 52:54:00:50:b7:0d +re0.33: flags=8943<UP,BROADCAST,RUNNING,PROMISC,SIMPLEX,MULTICAST> metric 0 mtu 1500 + options=80003<RXCSUM,TXCSUM,LINKSTATE> + ether 80:00:73:63:5c:48 + groups: vlan + vlan: 33 vlanpcp: 0 parent interface: re0 + media: Ethernet autoselect (1000baseT <full-duplex,master>) + status: active + nd6 options=21<PERFORMNUD,AUTO_LINKLOCAL> +bridge0: flags=8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> metric 0 mtu 1500 + ether 02:14:39:0e:25:00 + inet 192.168.1.1 netmask 0xffffff00 broadcast 192.168.1.255 + id 00:00:00:00:00:00 priority 32768 hellotime 2 fwddelay 15 + maxage 20 holdcnt 6 proto rstp maxaddr 2000 timeout 1200 + root id 00:00:00:00:00:00 priority 32768 ifcost 0 port 0 + member: vnet0:11 flags=143<LEARNING,DISCOVER,AUTOEDGE,AUTOPTP> + ifmaxaddr 0 port 5 priority 128 path cost 2000 + member: vnet0:1 flags=143<LEARNING,DISCOVER,AUTOEDGE,AUTOPTP> + ifmaxaddr 0 port 4 priority 128 path cost 2000 + groups: bridge + nd6 options=9<PERFORMNUD,IFDISABLED> +vnet0:11: flags=8943<UP,BROADCAST,RUNNING,PROMISC,SIMPLEX,MULTICAST> metric 0 mtu 1500 + description: 'associated with jail: webirc' + options=8<VLAN_MTU> + ether 02:ff:60:8c:f3:72 + hwaddr 02:2b:bb:64:3f:0a + inet6 fe80::2b:bbff:fe64:3f0a%vnet0:11 prefixlen 64 tentative scopeid 0x5 + groups: epair + media: Ethernet 10Gbase-T (10Gbase-T <full-duplex>) + status: active + nd6 options=29<PERFORMNUD,IFDISABLED,AUTO_LINKLOCAL> +lo0: flags=8049<UP,LOOPBACK,RUNNING,MULTICAST> metric 0 mtu 16384 + options=600003<RXCSUM,TXCSUM,RXCSUM_IPV6,TXCSUM_IPV6> + inet6 ::1 prefixlen 128 + inet6 fe80::1%lo0 prefixlen 64 scopeid 0x2 + inet 127.0.0.1 netmask 0xff000000 + nd6 options=21<PERFORMNUD,AUTO_LINKLOCAL> diff --git a/tests/data/netinfo/freebsd-netdev-formatted-output b/tests/data/netinfo/freebsd-netdev-formatted-output new file mode 100644 index 00000000..a0d937b3 --- /dev/null +++ b/tests/data/netinfo/freebsd-netdev-formatted-output @@ -0,0 +1,12 @@ ++++++++++++++++++++++++++++++++++++++++++Net device info++++++++++++++++++++++++++++++++++++++++++ ++----------+------+-------------------------------------+------------+-------+-------------------+ +| Device | Up | Address | Mask | Scope | Hw-Address | ++----------+------+-------------------------------------+------------+-------+-------------------+ +| bridge0 | True | 192.168.1.1 | 0xffffff00 | . | 02:14:39:0e:25:00 | +| lo0 | True | 127.0.0.1 | 0xff000000 | . | . | +| lo0 | True | ::1/128 | . | . | . | +| lo0 | True | fe80::1%lo0/64 | . | 0x2 | . | +| re0.33 | True | . | . | . | 80:00:73:63:5c:48 | +| vnet0:11 | True | fe80::2b:bbff:fe64:3f0a%vnet0:11/64 | . | 0x5 | 02:2b:bb:64:3f:0a | +| vtnet0 | True | . | . | . | 52:54:00:50:b7:0d | ++----------+------+-------------------------------------+------------+-------+-------------------+ diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index d283f136..e57c15d1 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -1,8 +1,8 @@ # This file is part of cloud-init. See LICENSE file for license information. -from collections import namedtuple import os -import six +import io +from collections import namedtuple from cloudinit.cmd import main as cli from cloudinit.tests import helpers as test_helpers @@ -18,7 +18,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): def setUp(self): super(TestCLI, self).setUp() - self.stderr = six.StringIO() + self.stderr = io.StringIO() self.patchStdoutAndStderr(stderr=self.stderr) def _call_main(self, sysv_args=None): @@ -147,7 +147,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): def test_conditional_subcommands_from_entry_point_sys_argv(self): """Subcommands from entry-point are properly parsed from sys.argv.""" - stdout = six.StringIO() + stdout = io.StringIO() self.patchStdoutAndStderr(stdout=stdout) expected_errors = [ @@ -178,7 +178,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): def test_collect_logs_subcommand_parser(self): """The subcommand cloud-init collect-logs calls the subparser.""" # Provide -h param to collect-logs to avoid having to mock behavior. - stdout = six.StringIO() + stdout = io.StringIO() self.patchStdoutAndStderr(stdout=stdout) self._call_main(['cloud-init', 'collect-logs', '-h']) self.assertIn('usage: cloud-init collect-log', stdout.getvalue()) @@ -186,7 +186,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): def test_clean_subcommand_parser(self): """The subcommand cloud-init clean calls the subparser.""" # Provide -h param to clean to avoid having to mock behavior. - stdout = six.StringIO() + stdout = io.StringIO() self.patchStdoutAndStderr(stdout=stdout) self._call_main(['cloud-init', 'clean', '-h']) self.assertIn('usage: cloud-init clean', stdout.getvalue()) @@ -194,7 +194,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): def test_status_subcommand_parser(self): """The subcommand cloud-init status calls the subparser.""" # Provide -h param to clean to avoid having to mock behavior. - stdout = six.StringIO() + stdout = io.StringIO() self.patchStdoutAndStderr(stdout=stdout) self._call_main(['cloud-init', 'status', '-h']) self.assertIn('usage: cloud-init status', stdout.getvalue()) @@ -219,7 +219,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): def test_wb_devel_schema_subcommand_doc_content(self): """Validate that doc content is sane from known examples.""" - stdout = six.StringIO() + stdout = io.StringIO() self.patchStdoutAndStderr(stdout=stdout) self._call_main(['cloud-init', 'devel', 'schema', '--doc']) expected_doc_sections = [ diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index 3efe7adf..74cc26ec 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -5,13 +5,8 @@ import gzip import logging import os - -try: - from unittest import mock -except ImportError: - import mock - -from six import BytesIO, StringIO +from io import BytesIO, StringIO +from unittest import mock from email import encoders from email.mime.application import MIMEApplication @@ -27,6 +22,7 @@ from cloudinit.settings import (PER_INSTANCE) from cloudinit import sources from cloudinit import stages from cloudinit import user_data as ud +from cloudinit import safeyaml from cloudinit import util from cloudinit.tests import helpers @@ -502,7 +498,7 @@ c: 4 data = [{'content': '#cloud-config\npassword: gocubs\n'}, {'content': '#cloud-config\nlocale: chicago\n'}, {'content': non_decodable}] - message = b'#cloud-config-archive\n' + util.yaml_dumps(data).encode() + message = b'#cloud-config-archive\n' + safeyaml.dumps(data).encode() self.reRoot() ci = stages.Init() @@ -524,6 +520,46 @@ c: 4 self.assertEqual(cfg.get('password'), 'gocubs') self.assertEqual(cfg.get('locale'), 'chicago') + @mock.patch('cloudinit.util.read_conf_with_confd') + def test_dont_allow_user_data(self, mock_cfg): + mock_cfg.return_value = {"allow_userdata": False} + + # test that user-data is ignored but vendor-data is kept + user_blob = ''' +#cloud-config-jsonp +[ + { "op": "add", "path": "/baz", "value": "qux" }, + { "op": "add", "path": "/bar", "value": "qux2" } +] +''' + vendor_blob = ''' +#cloud-config-jsonp +[ + { "op": "add", "path": "/baz", "value": "quxA" }, + { "op": "add", "path": "/bar", "value": "quxB" }, + { "op": "add", "path": "/foo", "value": "quxC" } +] +''' + self.reRoot() + initer = stages.Init() + initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob) + initer.read_cfg() + initer.initialize() + initer.fetch() + initer.instancify() + initer.update() + initer.cloudify().run('consume_data', + initer.consume_data, + args=[PER_INSTANCE], + freq=PER_INSTANCE) + mods = stages.Modules(initer) + (_which_ran, _failures) = mods.run_section('cloud_init_modules') + cfg = mods.cfg + self.assertIn('vendor_data', cfg) + self.assertEqual('quxA', cfg['baz']) + self.assertEqual('quxB', cfg['bar']) + self.assertEqual('quxC', cfg['foo']) + class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase): diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py index e9213ca1..1e66fcdb 100644 --- a/tests/unittests/test_datasource/test_aliyun.py +++ b/tests/unittests/test_datasource/test_aliyun.py @@ -2,8 +2,8 @@ import functools import httpretty -import mock import os +from unittest import mock from cloudinit import helpers from cloudinit.sources import DataSourceAliYun as ay diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 417d86a9..a809fd87 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -6,13 +6,13 @@ from cloudinit import url_helper from cloudinit.sources import ( UNSET, DataSourceAzure as dsaz, InvalidMetaDataException) from cloudinit.util import (b64e, decode_binary, load_file, write_file, - find_freebsd_part, get_path_dev_freebsd, - MountFailedError) + MountFailedError, json_dumps, load_json) from cloudinit.version import version_string as vs from cloudinit.tests.helpers import ( HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, - ExitStack, PY26, SkipTest) + ExitStack, resourceLocation) +import copy import crypt import httpretty import json @@ -85,6 +85,25 @@ def construct_valid_ovf_env(data=None, pubkeys=None, NETWORK_METADATA = { + "compute": { + "location": "eastus2", + "name": "my-hostname", + "offer": "UbuntuServer", + "osType": "Linux", + "placementGroupId": "", + "platformFaultDomain": "0", + "platformUpdateDomain": "0", + "publisher": "Canonical", + "resourceGroupName": "srugroup1", + "sku": "19.04-DAILY", + "subscriptionId": "12aad61c-6de4-4e53-a6c6-5aff52a83777", + "tags": "", + "version": "19.04.201906190", + "vmId": "ff702a6b-cb6a-4fcd-ad68-b4ce38227642", + "vmScaleSetName": "", + "vmSize": "Standard_DS1_v2", + "zone": "" + }, "network": { "interface": [ { @@ -111,9 +130,155 @@ NETWORK_METADATA = { } } +SECONDARY_INTERFACE = { + "macAddress": "220D3A047598", + "ipv6": { + "ipAddress": [] + }, + "ipv4": { + "subnet": [ + { + "prefix": "24", + "address": "10.0.1.0" + } + ], + "ipAddress": [ + { + "privateIpAddress": "10.0.1.5", + } + ] + } +} + MOCKPATH = 'cloudinit.sources.DataSourceAzure.' +class TestParseNetworkConfig(CiTestCase): + + maxDiff = None + + def test_single_ipv4_nic_configuration(self): + """parse_network_config emits dhcp on single nic with ipv4""" + expected = {'ethernets': { + 'eth0': {'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': False, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}}, 'version': 2} + self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA)) + + def test_increases_route_metric_for_non_primary_nics(self): + """parse_network_config increases route-metric for each nic""" + expected = {'ethernets': { + 'eth0': {'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': False, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}, + 'eth1': {'set-name': 'eth1', + 'match': {'macaddress': '22:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 200}}, + 'eth2': {'set-name': 'eth2', + 'match': {'macaddress': '33:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 300}}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data['network']['interface'].append(SECONDARY_INTERFACE) + third_intf = copy.deepcopy(SECONDARY_INTERFACE) + third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') + third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' + third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' + imds_data['network']['interface'].append(third_intf) + self.assertEqual(expected, dsaz.parse_network_config(imds_data)) + + def test_ipv4_and_ipv6_route_metrics_match_for_nics(self): + """parse_network_config emits matching ipv4 and ipv6 route-metrics.""" + expected = {'ethernets': { + 'eth0': {'addresses': ['10.0.0.5/24', '2001:dead:beef::2/128'], + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': True, + 'dhcp6-overrides': {'route-metric': 100}, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}, + 'eth1': {'set-name': 'eth1', + 'match': {'macaddress': '22:0d:3a:04:75:98'}, + 'dhcp4': True, + 'dhcp6': False, + 'dhcp4-overrides': {'route-metric': 200}}, + 'eth2': {'set-name': 'eth2', + 'match': {'macaddress': '33:0d:3a:04:75:98'}, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 300}, + 'dhcp6': True, + 'dhcp6-overrides': {'route-metric': 300}}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + nic1 = imds_data['network']['interface'][0] + nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) + + nic1['ipv6'] = { + "subnet": [{"address": "2001:dead:beef::16"}], + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}, + {"privateIpAddress": "2001:dead:beef::2"}] + } + imds_data['network']['interface'].append(SECONDARY_INTERFACE) + third_intf = copy.deepcopy(SECONDARY_INTERFACE) + third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') + third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' + third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' + third_intf['ipv6'] = { + "subnet": [{"prefix": "64", "address": "2001:dead:beef::2"}], + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}] + } + imds_data['network']['interface'].append(third_intf) + self.assertEqual(expected, dsaz.parse_network_config(imds_data)) + + def test_ipv4_secondary_ips_will_be_static_addrs(self): + """parse_network_config emits primary ipv4 as dhcp others are static""" + expected = {'ethernets': { + 'eth0': {'addresses': ['10.0.0.5/24'], + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': True, + 'dhcp6-overrides': {'route-metric': 100}, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + nic1 = imds_data['network']['interface'][0] + nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) + + nic1['ipv6'] = { + "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}], + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}] + } + self.assertEqual(expected, dsaz.parse_network_config(imds_data)) + + def test_ipv6_secondary_ips_will_be_static_cidrs(self): + """parse_network_config emits primary ipv6 as dhcp others are static""" + expected = {'ethernets': { + 'eth0': {'addresses': ['10.0.0.5/24', '2001:dead:beef::2/10'], + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': True, + 'dhcp6-overrides': {'route-metric': 100}, + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'set-name': 'eth0'}}, 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + nic1 = imds_data['network']['interface'][0] + nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) + + # Secondary ipv6 addresses currently ignored/unconfigured + nic1['ipv6'] = { + "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}], + "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}, + {"privateIpAddress": "2001:dead:beef::2"}] + } + self.assertEqual(expected, dsaz.parse_network_config(imds_data)) + + class TestGetMetadataFromIMDS(HttprettyTestCase): with_logs = True @@ -142,7 +307,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): self.logs.getvalue()) @mock.patch(MOCKPATH + 'readurl') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting') @mock.patch(MOCKPATH + 'net.is_up') def test_get_metadata_performs_dhcp_when_network_is_down( self, m_net_is_up, m_dhcp, m_readurl): @@ -156,14 +321,15 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): dsaz.get_metadata_from_imds('eth9', retries=2)) m_net_is_up.assert_called_with('eth9') - m_dhcp.assert_called_with('eth9') + m_dhcp.assert_called_with(mock.ANY, 'eth9') self.assertIn( "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time self.logs.getvalue()) m_readurl.assert_called_with( self.network_md_url, exception_cb=mock.ANY, - headers={'Metadata': 'true'}, retries=2, timeout=1) + headers={'Metadata': 'true'}, retries=2, + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS) @mock.patch('cloudinit.url_helper.time.sleep') @mock.patch(MOCKPATH + 'net.is_up') @@ -221,8 +387,6 @@ class TestAzureDataSource(CiTestCase): def setUp(self): super(TestAzureDataSource, self).setUp() - if PY26: - raise SkipTest("Does not work on python 2.6") self.tmp = self.tmp_dir() # patch cloud_dir, so our 'seed_dir' is guaranteed empty @@ -313,7 +477,7 @@ scbus-1 on xpt0 bus 0 'public-keys': [], }) - self.instance_id = 'test-instance-id' + self.instance_id = 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8' def _dmi_mocks(key): if key == 'system-uuid': @@ -392,29 +556,6 @@ scbus-1 on xpt0 bus 0 dev = ds.get_resource_disk_on_freebsd(1) self.assertEqual("da1", dev) - @mock.patch('cloudinit.util.subp') - def test_find_freebsd_part_on_Azure(self, mock_subp): - glabel_out = ''' -gptid/fa52d426-c337-11e6-8911-00155d4c5e47 N/A da0p1 - label/rootfs N/A da0p2 - label/swap N/A da0p3 -''' - mock_subp.return_value = (glabel_out, "") - res = find_freebsd_part("/dev/label/rootfs") - self.assertEqual("da0p2", res) - - def test_get_path_dev_freebsd_on_Azure(self): - mnt_list = ''' -/dev/label/rootfs / ufs rw 1 1 -devfs /dev devfs rw,multilabel 0 0 -fdescfs /dev/fd fdescfs rw 0 0 -/dev/da1s1 /mnt/resource ufs rw 2 2 -''' - with mock.patch.object(os.path, 'exists', - return_value=True): - res = get_path_dev_freebsd('/etc', mnt_list) - self.assertIsNotNone(res) - @mock.patch(MOCKPATH + '_is_platform_viable') def test_call_is_platform_viable_seed(self, m_is_platform_viable): """Check seed_dir using _is_platform_viable and return False.""" @@ -503,14 +644,8 @@ fdescfs /dev/fd fdescfs rw 0 0 expected_metadata = { 'azure_data': { 'configurationsettype': 'LinuxProvisioningConfiguration'}, - 'imds': {'network': {'interface': [{ - 'ipv4': {'ipAddress': [ - {'privateIpAddress': '10.0.0.4', - 'publicIpAddress': '104.46.124.81'}], - 'subnet': [{'address': '10.0.0.0', 'prefix': '24'}]}, - 'ipv6': {'ipAddress': []}, - 'macAddress': '000D3A047598'}]}}, - 'instance-id': 'test-instance-id', + 'imds': NETWORK_METADATA, + 'instance-id': 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8', 'local-hostname': u'myhost', 'random_seed': 'wild'} @@ -543,7 +678,8 @@ fdescfs /dev/fd fdescfs rw 0 0 dsrc.crawl_metadata() self.assertEqual(str(cm.exception), error_msg) - @mock.patch('cloudinit.sources.DataSourceAzure.EphemeralDHCPv4') + @mock.patch( + 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting') @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') @mock.patch( 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') @@ -631,12 +767,71 @@ fdescfs /dev/fd fdescfs rw 0 0 'ethernets': { 'eth0': {'set-name': 'eth0', 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'dhcp4': True}}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}}}, 'version': 2} dsrc = self._get_ds(data) dsrc.get_data() self.assertEqual(expected_network_config, dsrc.network_config) + def test_network_config_set_from_imds_route_metric_for_secondary_nic(self): + """Datasource.network_config adds route-metric to secondary nics.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + expected_network_config = { + 'ethernets': { + 'eth0': {'set-name': 'eth0', + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}}, + 'eth1': {'set-name': 'eth1', + 'match': {'macaddress': '22:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 200}}, + 'eth2': {'set-name': 'eth2', + 'match': {'macaddress': '33:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 300}}}, + 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data['network']['interface'].append(SECONDARY_INTERFACE) + third_intf = copy.deepcopy(SECONDARY_INTERFACE) + third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') + third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' + third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' + imds_data['network']['interface'].append(third_intf) + + self.m_get_metadata_from_imds.return_value = imds_data + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual(expected_network_config, dsrc.network_config) + + def test_availability_zone_set_from_imds(self): + """Datasource.availability returns IMDS platformFaultDomain.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual('0', dsrc.availability_zone) + + def test_region_set_from_imds(self): + """Datasource.region returns IMDS region location.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual('eastus2', dsrc.region) + def test_user_cfg_set_agent_command(self): # set dscfg in via base64 encoded yaml cfg = {'agent_command': "my_command"} @@ -704,6 +899,22 @@ fdescfs /dev/fd fdescfs rw 0 0 crypt.crypt(odata['UserPassword'], defuser['passwd'][0:pos])) + def test_user_not_locked_if_password_redacted(self): + odata = {'HostName': "myhost", 'UserName': "myuser", + 'UserPassword': dsaz.DEF_PASSWD_REDACTION} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertTrue('default_user' in dsrc.cfg['system_info']) + defuser = dsrc.cfg['system_info']['default_user'] + + # default user should be updated username and should not be locked. + self.assertEqual(defuser['name'], odata['UserName']) + self.assertIn('lock_passwd', defuser) + self.assertFalse(defuser['lock_passwd']) + def test_userdata_plain(self): mydata = "FOOBAR" odata = {'UserData': {'text': mydata, 'encoding': 'plain'}} @@ -880,6 +1091,24 @@ fdescfs /dev/fd fdescfs rw 0 0 self.assertTrue(ret) self.assertEqual('value', dsrc.metadata['test']) + def test_instance_id_endianness(self): + """Return the previous iid when dmi uuid is the byteswapped iid.""" + ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + # byte-swapped previous + write_file( + os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), + '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8') + ds.get_data() + self.assertEqual( + '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8', ds.metadata['instance-id']) + # not byte-swapped previous + write_file( + os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), + '644CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8') + ds.get_data() + self.assertEqual( + 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8', ds.metadata['instance-id']) + def test_instance_id_from_dmidecode_used(self): ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) ds.get_data() @@ -917,6 +1146,8 @@ fdescfs /dev/fd fdescfs rw 0 0 expected_cfg = { 'ethernets': { 'eth0': {'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}, + 'dhcp6': False, 'match': {'macaddress': '00:0d:3a:04:75:98'}, 'set-name': 'eth0'}}, 'version': 2} @@ -1079,7 +1310,7 @@ class TestAzureBounce(CiTestCase): def _dmi_mocks(key): if key == 'system-uuid': - return 'test-instance-id' + return 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8' elif key == 'chassis-asset-tag': return '7783-7084-3265-9085-8269-3286-77' raise RuntimeError('should not get here') @@ -1243,7 +1474,9 @@ class TestAzureBounce(CiTestCase): self.assertEqual(initial_host_name, self.set_hostname.call_args_list[-1][0][0]) - def test_environment_correct_for_bounce_command(self): + @mock.patch.object(dsaz, 'get_boot_telemetry') + def test_environment_correct_for_bounce_command( + self, mock_get_boot_telemetry): interface = 'int0' hostname = 'my-new-host' old_hostname = 'my-old-host' @@ -1259,7 +1492,9 @@ class TestAzureBounce(CiTestCase): self.assertEqual(hostname, bounce_env['hostname']) self.assertEqual(old_hostname, bounce_env['old_hostname']) - def test_default_bounce_command_ifup_used_by_default(self): + @mock.patch.object(dsaz, 'get_boot_telemetry') + def test_default_bounce_command_ifup_used_by_default( + self, mock_get_boot_telemetry): cfg = {'hostname_bounce': {'policy': 'force'}} data = self.get_ovf_env_with_dscfg('some-hostname', cfg) dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) @@ -1377,12 +1612,15 @@ class TestCanDevBeReformatted(CiTestCase): self._domock(p + "util.mount_cb", 'm_mount_cb') self._domock(p + "os.path.realpath", 'm_realpath') self._domock(p + "os.path.exists", 'm_exists') + self._domock(p + "util.SeLinuxGuard", 'm_selguard') self.m_exists.side_effect = lambda p: p in bypath self.m_realpath.side_effect = realpath self.m_has_ntfs_filesystem.side_effect = has_ntfs_fs self.m_mount_cb.side_effect = mount_cb self.m_partitions_on_device.side_effect = partitions_on_device + self.m_selguard.__enter__ = mock.Mock(return_value=False) + self.m_selguard.__exit__ = mock.Mock() def test_three_partitions_is_false(self): """A disk with 3 partitions can not be formatted.""" @@ -1692,6 +1930,7 @@ class TestPreprovisioningPollIMDS(CiTestCase): self.paths = helpers.Paths({'cloud_dir': self.tmp}) dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + @mock.patch('time.sleep', mock.MagicMock()) @mock.patch(MOCKPATH + 'EphemeralDHCPv4') def test_poll_imds_re_dhcp_on_timeout(self, m_dhcpv4, report_ready_func, fake_resp, m_media_switch, m_dhcp, @@ -1789,12 +2028,14 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): headers={'Metadata': 'true', 'User-Agent': 'Cloud-Init/%s' % vs() - }, method='GET', timeout=1, + }, method='GET', + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, url=full_url)]) self.assertEqual(m_dhcp.call_count, 2) m_net.assert_any_call( broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', - prefix_or_mask='255.255.255.0', router='192.168.2.1') + prefix_or_mask='255.255.255.0', router='192.168.2.1', + static_routes=None) self.assertEqual(m_net.call_count, 2) def test__reprovision_calls__poll_imds(self, fake_resp, @@ -1826,11 +2067,14 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): headers={'Metadata': 'true', 'User-Agent': 'Cloud-Init/%s' % vs()}, - method='GET', timeout=1, url=full_url)]) + method='GET', + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, + url=full_url)]) self.assertEqual(m_dhcp.call_count, 2) m_net.assert_any_call( broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', - prefix_or_mask='255.255.255.0', router='192.168.2.1') + prefix_or_mask='255.255.255.0', router='192.168.2.1', + static_routes=None) self.assertEqual(m_net.call_count, 2) @@ -1924,4 +2168,24 @@ class TestWBIsPlatformViable(CiTestCase): self.logs.getvalue()) +class TestRandomSeed(CiTestCase): + """Test proper handling of random_seed""" + + def test_non_ascii_seed_is_serializable(self): + """Pass if a random string from the Azure infrastructure which + contains at least one non-Unicode character can be converted to/from + JSON without alteration and without throwing an exception. + """ + path = resourceLocation("azure/non_unicode_random_string") + result = dsaz._get_random_seed(path) + + obj = {'seed': result} + try: + serialized = json_dumps(obj) + deserialized = load_json(serialized) + except UnicodeDecodeError: + self.fail("Non-serializable random seed returned") + + self.assertEqual(deserialized['seed'], result) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index 26b2b93d..007df09f 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -1,11 +1,13 @@ # This file is part of cloud-init. See LICENSE file for license information. import os +import unittest2 from textwrap import dedent from cloudinit.sources.helpers import azure as azure_helper from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, populate_dir +from cloudinit.util import load_file from cloudinit.sources.helpers.azure import WALinuxAgentShim as wa_shim GOAL_STATE_TEMPLATE = """\ @@ -65,12 +67,17 @@ class TestFindEndpoint(CiTestCase): self.networkd_leases.return_value = None def test_missing_file(self): - self.assertRaises(ValueError, wa_shim.find_endpoint) + """wa_shim find_endpoint uses default endpoint if leasefile not found + """ + self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16") def test_missing_special_azure_line(self): + """wa_shim find_endpoint uses default endpoint if leasefile is found + but does not contain DHCP Option 245 (whose value is the endpoint) + """ self.load_file.return_value = '' self.dhcp_options.return_value = {'eth0': {'key': 'value'}} - self.assertRaises(ValueError, wa_shim.find_endpoint) + self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16") @staticmethod def _build_lease_content(encoded_address): @@ -163,6 +170,25 @@ class TestGoalStateParsing(CiTestCase): goal_state = self._get_goal_state(instance_id=instance_id) self.assertEqual(instance_id, goal_state.instance_id) + def test_instance_id_byte_swap(self): + """Return true when previous_iid is byteswapped current_iid""" + previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + current_iid = "544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8" + self.assertTrue( + azure_helper.is_byte_swapped(previous_iid, current_iid)) + + def test_instance_id_no_byte_swap_same_instance_id(self): + previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + current_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + self.assertFalse( + azure_helper.is_byte_swapped(previous_iid, current_iid)) + + def test_instance_id_no_byte_swap_diff_instance_id(self): + previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + current_iid = "G0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" + self.assertFalse( + azure_helper.is_byte_swapped(previous_iid, current_iid)) + def test_certificates_xml_parsed_and_fetched_correctly(self): http_client = mock.MagicMock() certificates_url = 'TestCertificatesUrl' @@ -205,8 +231,10 @@ class TestAzureEndpointHttpClient(CiTestCase): response = client.get(url, secure=False) self.assertEqual(1, self.read_file_or_url.call_count) self.assertEqual(self.read_file_or_url.return_value, response) - self.assertEqual(mock.call(url, headers=self.regular_headers), - self.read_file_or_url.call_args) + self.assertEqual( + mock.call(url, headers=self.regular_headers, retries=10, + timeout=5), + self.read_file_or_url.call_args) def test_secure_get(self): url = 'MyTestUrl' @@ -220,8 +248,10 @@ class TestAzureEndpointHttpClient(CiTestCase): response = client.get(url, secure=True) self.assertEqual(1, self.read_file_or_url.call_count) self.assertEqual(self.read_file_or_url.return_value, response) - self.assertEqual(mock.call(url, headers=expected_headers), - self.read_file_or_url.call_args) + self.assertEqual( + mock.call(url, headers=expected_headers, retries=10, + timeout=5), + self.read_file_or_url.call_args) def test_post(self): data = mock.MagicMock() @@ -231,7 +261,8 @@ class TestAzureEndpointHttpClient(CiTestCase): self.assertEqual(1, self.read_file_or_url.call_count) self.assertEqual(self.read_file_or_url.return_value, response) self.assertEqual( - mock.call(url, data=data, headers=self.regular_headers), + mock.call(url, data=data, headers=self.regular_headers, retries=10, + timeout=5), self.read_file_or_url.call_args) def test_post_with_extra_headers(self): @@ -243,7 +274,8 @@ class TestAzureEndpointHttpClient(CiTestCase): expected_headers = self.regular_headers.copy() expected_headers.update(extra_headers) self.assertEqual( - mock.call(mock.ANY, data=mock.ANY, headers=expected_headers), + mock.call(mock.ANY, data=mock.ANY, headers=expected_headers, + retries=10, timeout=5), self.read_file_or_url.call_args) @@ -289,6 +321,50 @@ class TestOpenSSLManager(CiTestCase): self.assertEqual([mock.call(manager.tmpdir)], del_dir.call_args_list) +class TestOpenSSLManagerActions(CiTestCase): + + def setUp(self): + super(TestOpenSSLManagerActions, self).setUp() + + self.allowed_subp = True + + def _data_file(self, name): + path = 'tests/data/azure' + return os.path.join(path, name) + + @unittest2.skip("todo move to cloud_test") + def test_pubkey_extract(self): + cert = load_file(self._data_file('pubkey_extract_cert')) + good_key = load_file(self._data_file('pubkey_extract_ssh_key')) + sslmgr = azure_helper.OpenSSLManager() + key = sslmgr._get_ssh_key_from_cert(cert) + self.assertEqual(good_key, key) + + good_fingerprint = '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473' + fingerprint = sslmgr._get_fingerprint_from_cert(cert) + self.assertEqual(good_fingerprint, fingerprint) + + @unittest2.skip("todo move to cloud_test") + @mock.patch.object(azure_helper.OpenSSLManager, '_decrypt_certs_from_xml') + def test_parse_certificates(self, mock_decrypt_certs): + """Azure control plane puts private keys as well as certificates + into the Certificates XML object. Make sure only the public keys + from certs are extracted and that fingerprints are converted to + the form specified in the ovf-env.xml file. + """ + cert_contents = load_file(self._data_file('parse_certificates_pem')) + fingerprints = load_file(self._data_file( + 'parse_certificates_fingerprints') + ).splitlines() + mock_decrypt_certs.return_value = cert_contents + sslmgr = azure_helper.OpenSSLManager() + keys_by_fp = sslmgr.parse_certificates('') + for fp in keys_by_fp.keys(): + self.assertIn(fp, fingerprints) + for fp in fingerprints: + self.assertIn(fp, keys_by_fp) + + class TestWALinuxAgentShim(CiTestCase): def setUp(self): @@ -329,18 +405,31 @@ class TestWALinuxAgentShim(CiTestCase): def test_certificates_used_to_determine_public_keys(self): shim = wa_shim() - data = shim.register_with_azure_and_fetch_data() + """if register_with_azure_and_fetch_data() isn't passed some info about + the user's public keys, there's no point in even trying to parse + the certificates + """ + mypk = [{'fingerprint': 'fp1', 'path': 'path1'}, + {'fingerprint': 'fp3', 'path': 'path3', 'value': ''}] + certs = {'fp1': 'expected-key', + 'fp2': 'should-not-be-found', + 'fp3': 'expected-no-value-key', + } + sslmgr = self.OpenSSLManager.return_value + sslmgr.parse_certificates.return_value = certs + data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) self.assertEqual( [mock.call(self.GoalState.return_value.certificates_xml)], - self.OpenSSLManager.return_value.parse_certificates.call_args_list) - self.assertEqual( - self.OpenSSLManager.return_value.parse_certificates.return_value, - data['public-keys']) + sslmgr.parse_certificates.call_args_list) + self.assertIn('expected-key', data['public-keys']) + self.assertIn('expected-no-value-key', data['public-keys']) + self.assertNotIn('should-not-be-found', data['public-keys']) def test_absent_certificates_produces_empty_public_keys(self): + mypk = [{'fingerprint': 'fp1', 'path': 'path1'}] self.GoalState.return_value.certificates_xml = None shim = wa_shim() - data = shim.register_with_azure_and_fetch_data() + data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) self.assertEqual([], data['public-keys']) def test_correct_url_used_for_report_ready(self): diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py index 3bf52e69..d62d542b 100644 --- a/tests/unittests/test_datasource/test_cloudsigma.py +++ b/tests/unittests/test_datasource/test_cloudsigma.py @@ -30,6 +30,8 @@ SERVER_CONTEXT = { } } +DS_PATH = 'cloudinit.sources.DataSourceCloudSigma.DataSourceCloudSigma' + class CepkoMock(Cepko): def __init__(self, mocked_context): @@ -42,17 +44,15 @@ class CepkoMock(Cepko): class DataSourceCloudSigmaTest(test_helpers.CiTestCase): def setUp(self): super(DataSourceCloudSigmaTest, self).setUp() - self.add_patch( - "cloudinit.sources.DataSourceCloudSigma.util.is_container", - "m_is_container", return_value=False) self.paths = helpers.Paths({'run_dir': self.tmp_dir()}) + self.add_patch(DS_PATH + '.is_running_in_cloudsigma', + "m_is_container", return_value=True) self.datasource = DataSourceCloudSigma.DataSourceCloudSigma( "", "", paths=self.paths) - self.datasource.is_running_in_cloudsigma = lambda: True self.datasource.cepko = CepkoMock(SERVER_CONTEXT) - self.datasource.get_data() def test_get_hostname(self): + self.datasource.get_data() self.assertEqual("test_server", self.datasource.get_hostname()) self.datasource.metadata['name'] = '' self.assertEqual("65b2fb23", self.datasource.get_hostname()) @@ -61,23 +61,28 @@ class DataSourceCloudSigmaTest(test_helpers.CiTestCase): self.assertEqual("65b2fb23", self.datasource.get_hostname()) def test_get_public_ssh_keys(self): + self.datasource.get_data() self.assertEqual([SERVER_CONTEXT['meta']['ssh_public_key']], self.datasource.get_public_ssh_keys()) def test_get_instance_id(self): + self.datasource.get_data() self.assertEqual(SERVER_CONTEXT['uuid'], self.datasource.get_instance_id()) def test_platform(self): """All platform-related attributes are set.""" + self.datasource.get_data() self.assertEqual(self.datasource.cloud_name, 'cloudsigma') self.assertEqual(self.datasource.platform_type, 'cloudsigma') self.assertEqual(self.datasource.subplatform, 'cepko (/dev/ttyS1)') def test_metadata(self): + self.datasource.get_data() self.assertEqual(self.datasource.metadata, SERVER_CONTEXT) def test_user_data(self): + self.datasource.get_data() self.assertEqual(self.datasource.userdata_raw, SERVER_CONTEXT['meta']['cloudinit-user-data']) @@ -91,14 +96,13 @@ class DataSourceCloudSigmaTest(test_helpers.CiTestCase): self.assertEqual(self.datasource.userdata_raw, b'hi world\n') def test_vendor_data(self): + self.datasource.get_data() self.assertEqual(self.datasource.vendordata_raw, SERVER_CONTEXT['vendor_data']['cloudinit']) def test_lack_of_vendor_data(self): stripped_context = copy.deepcopy(SERVER_CONTEXT) del stripped_context["vendor_data"] - self.datasource = DataSourceCloudSigma.DataSourceCloudSigma( - "", "", paths=self.paths) self.datasource.cepko = CepkoMock(stripped_context) self.datasource.get_data() @@ -107,8 +111,6 @@ class DataSourceCloudSigmaTest(test_helpers.CiTestCase): def test_lack_of_cloudinit_key_in_vendor_data(self): stripped_context = copy.deepcopy(SERVER_CONTEXT) del stripped_context["vendor_data"]["cloudinit"] - self.datasource = DataSourceCloudSigma.DataSourceCloudSigma( - "", "", paths=self.paths) self.datasource.cepko = CepkoMock(stripped_context) self.datasource.get_data() diff --git a/tests/unittests/test_datasource/test_cloudstack.py b/tests/unittests/test_datasource/test_cloudstack.py index d6d2d6b2..83c2f753 100644 --- a/tests/unittests/test_datasource/test_cloudstack.py +++ b/tests/unittests/test_datasource/test_cloudstack.py @@ -10,6 +10,9 @@ from cloudinit.tests.helpers import CiTestCase, ExitStack, mock import os import time +MOD_PATH = 'cloudinit.sources.DataSourceCloudStack' +DS_PATH = MOD_PATH + '.DataSourceCloudStack' + class TestCloudStackPasswordFetching(CiTestCase): @@ -17,7 +20,7 @@ class TestCloudStackPasswordFetching(CiTestCase): super(TestCloudStackPasswordFetching, self).setUp() self.patches = ExitStack() self.addCleanup(self.patches.close) - mod_name = 'cloudinit.sources.DataSourceCloudStack' + mod_name = MOD_PATH self.patches.enter_context(mock.patch('{0}.ec2'.format(mod_name))) self.patches.enter_context(mock.patch('{0}.uhelp'.format(mod_name))) default_gw = "192.201.20.0" @@ -56,7 +59,9 @@ class TestCloudStackPasswordFetching(CiTestCase): ds.get_data() self.assertEqual({}, ds.get_config_obj()) - def test_password_sets_password(self): + @mock.patch(DS_PATH + '.wait_for_metadata_service') + def test_password_sets_password(self, m_wait): + m_wait.return_value = True password = 'SekritSquirrel' self._set_password_server_response(password) ds = DataSourceCloudStack( @@ -64,7 +69,9 @@ class TestCloudStackPasswordFetching(CiTestCase): ds.get_data() self.assertEqual(password, ds.get_config_obj()['password']) - def test_bad_request_doesnt_stop_ds_from_working(self): + @mock.patch(DS_PATH + '.wait_for_metadata_service') + def test_bad_request_doesnt_stop_ds_from_working(self, m_wait): + m_wait.return_value = True self._set_password_server_response('bad_request') ds = DataSourceCloudStack( {}, None, helpers.Paths({'run_dir': self.tmp})) @@ -79,7 +86,9 @@ class TestCloudStackPasswordFetching(CiTestCase): request_types.append(arg.split()[1]) self.assertEqual(expected_request_types, request_types) - def test_valid_response_means_password_marked_as_saved(self): + @mock.patch(DS_PATH + '.wait_for_metadata_service') + def test_valid_response_means_password_marked_as_saved(self, m_wait): + m_wait.return_value = True password = 'SekritSquirrel' subp = self._set_password_server_response(password) ds = DataSourceCloudStack( @@ -92,7 +101,9 @@ class TestCloudStackPasswordFetching(CiTestCase): subp = self._set_password_server_response(response_string) ds = DataSourceCloudStack( {}, None, helpers.Paths({'run_dir': self.tmp})) - ds.get_data() + with mock.patch(DS_PATH + '.wait_for_metadata_service') as m_wait: + m_wait.return_value = True + ds.get_data() self.assertRequestTypesSent(subp, ['send_my_password']) def test_password_not_saved_if_empty(self): diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index 6b01a4ea..4ab5d471 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -4,6 +4,7 @@ from cloudinit import settings from cloudinit import sources from cloudinit import type_utils from cloudinit.sources import ( + DataSource, DataSourceAliYun as AliYun, DataSourceAltCloud as AltCloud, DataSourceAzure as Azure, @@ -13,6 +14,7 @@ from cloudinit.sources import ( DataSourceConfigDrive as ConfigDrive, DataSourceDigitalOcean as DigitalOcean, DataSourceEc2 as Ec2, + DataSourceExoscale as Exoscale, DataSourceGCE as GCE, DataSourceHetzner as Hetzner, DataSourceIBMCloud as IBMCloud, @@ -22,6 +24,7 @@ from cloudinit.sources import ( DataSourceOpenStack as OpenStack, DataSourceOracle as Oracle, DataSourceOVF as OVF, + DataSourceRbxCloud as RbxCloud, DataSourceScaleway as Scaleway, DataSourceSmartOS as SmartOS, ) @@ -43,6 +46,7 @@ DEFAULT_LOCAL = [ SmartOS.DataSourceSmartOS, Ec2.DataSourceEc2Local, OpenStack.DataSourceOpenStackLocal, + RbxCloud.DataSourceRbxCloud, Scaleway.DataSourceScaleway, ] @@ -53,6 +57,7 @@ DEFAULT_NETWORK = [ CloudStack.DataSourceCloudStack, DSNone.DataSourceNone, Ec2.DataSourceEc2, + Exoscale.DataSourceExoscale, GCE.DataSourceGCE, MAAS.DataSourceMAAS, NoCloud.DataSourceNoCloudNet, @@ -83,4 +88,23 @@ class ExpectedDataSources(test_helpers.TestCase): self.assertEqual(set([AliYun.DataSourceAliYun]), set(found)) +class TestDataSourceInvariants(test_helpers.TestCase): + def test_data_sources_have_valid_network_config_sources(self): + for ds in DEFAULT_LOCAL + DEFAULT_NETWORK: + for cfg_src in ds.network_config_sources: + fail_msg = ('{} has an invalid network_config_sources entry:' + ' {}'.format(str(ds), cfg_src)) + self.assertTrue(hasattr(sources.NetworkConfigSource, cfg_src), + fail_msg) + + def test_expected_dsname_defined(self): + for ds in DEFAULT_LOCAL + DEFAULT_NETWORK: + fail_msg = ( + '{} has an invalid / missing dsname property: {}'.format( + str(ds), str(ds.dsname) + ) + ) + self.assertNotEqual(ds.dsname, DataSource.dsname, fail_msg) + self.assertIsNotNone(ds.dsname) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index dcdabea5..6f830cc6 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -220,13 +220,15 @@ CFG_DRIVE_FILES_V2 = { 'openstack/2015-10-15/user_data': USER_DATA, 'openstack/2015-10-15/network_data.json': json.dumps(NETWORK_DATA)} +M_PATH = "cloudinit.sources.DataSourceConfigDrive." + class TestConfigDriveDataSource(CiTestCase): def setUp(self): super(TestConfigDriveDataSource, self).setUp() self.add_patch( - "cloudinit.sources.DataSourceConfigDrive.util.find_devs_with", + M_PATH + "util.find_devs_with", "m_find_devs_with", return_value=[]) self.tmp = self.tmp_dir() @@ -268,8 +270,7 @@ class TestConfigDriveDataSource(CiTestCase): exists_mock = mocks.enter_context( mock.patch.object(os.path, 'exists', side_effect=exists_side_effect())) - device = cfg_ds.device_name_to_device(name) - self.assertEqual(dev_name, device) + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) find_mock.assert_called_once_with(mock.ANY) self.assertEqual(exists_mock.call_count, 2) @@ -296,8 +297,7 @@ class TestConfigDriveDataSource(CiTestCase): exists_mock = mocks.enter_context( mock.patch.object(os.path, 'exists', return_value=True)) - device = cfg_ds.device_name_to_device(name) - self.assertEqual(dev_name, device) + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) find_mock.assert_called_once_with(mock.ANY) exists_mock.assert_called_once_with(mock.ANY) @@ -331,8 +331,7 @@ class TestConfigDriveDataSource(CiTestCase): yield True with mock.patch.object(os.path, 'exists', side_effect=exists_side_effect()): - device = cfg_ds.device_name_to_device(name) - self.assertEqual(dev_name, device) + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) # We don't assert the call count for os.path.exists() because # not all of the entries in name_tests results in two calls to # that function. Specifically, 'root2k' doesn't seem to call @@ -359,8 +358,7 @@ class TestConfigDriveDataSource(CiTestCase): } for name, dev_name in name_tests.items(): with mock.patch.object(os.path, 'exists', return_value=True): - device = cfg_ds.device_name_to_device(name) - self.assertEqual(dev_name, device) + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) def test_dir_valid(self): """Verify a dir is read as such.""" @@ -472,7 +470,7 @@ class TestConfigDriveDataSource(CiTestCase): util.find_devs_with = orig_find_devs_with util.is_partition = orig_is_partition - @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot') + @mock.patch(M_PATH + 'on_first_boot') def test_pubkeys_v2(self, on_first_boot): """Verify that public-keys work in config-drive-v2.""" myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) @@ -482,6 +480,19 @@ class TestConfigDriveDataSource(CiTestCase): self.assertEqual('openstack', myds.platform) self.assertEqual('seed-dir (%s/seed)' % self.tmp, myds.subplatform) + def test_subplatform_config_drive_when_starts_with_dev(self): + """subplatform reports config-drive when source starts with /dev/.""" + cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, + None, + helpers.Paths({})) + with mock.patch(M_PATH + 'find_candidate_devs') as m_find_devs: + with mock.patch(M_PATH + 'util.is_FreeBSD', return_value=False): + with mock.patch(M_PATH + 'util.mount_cb'): + with mock.patch(M_PATH + 'on_first_boot'): + m_find_devs.return_value = ['/dev/anything'] + self.assertEqual(True, cfg_ds.get_data()) + self.assertEqual('config-disk (/dev/anything)', cfg_ds.subplatform) + class TestNetJson(CiTestCase): def setUp(self): @@ -489,13 +500,13 @@ class TestNetJson(CiTestCase): self.tmp = self.tmp_dir() self.maxDiff = None - @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot') + @mock.patch(M_PATH + 'on_first_boot') def test_network_data_is_found(self, on_first_boot): """Verify that network_data is present in ds in config-drive-v2.""" myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) self.assertIsNotNone(myds.network_json) - @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot') + @mock.patch(M_PATH + 'on_first_boot') def test_network_config_is_converted(self, on_first_boot): """Verify that network_data is converted and present on ds object.""" myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) @@ -503,6 +514,46 @@ class TestNetJson(CiTestCase): known_macs=KNOWN_MACS) self.assertEqual(myds.network_config, network_config) + def test_network_config_conversion_dhcp6(self): + """Test some ipv6 input network json and check the expected + conversions.""" + in_data = { + 'links': [ + {'vif_id': '2ecc7709-b3f7-4448-9580-e1ec32d75bbd', + 'ethernet_mac_address': 'fa:16:3e:69:b0:58', + 'type': 'ovs', 'mtu': None, 'id': 'tap2ecc7709-b3'}, + {'vif_id': '2f88d109-5b57-40e6-af32-2472df09dc33', + 'ethernet_mac_address': 'fa:16:3e:d4:57:ad', + 'type': 'ovs', 'mtu': None, 'id': 'tap2f88d109-5b'}, + ], + 'networks': [ + {'link': 'tap2ecc7709-b3', 'type': 'ipv6_dhcpv6-stateless', + 'network_id': '6d6357ac-0f70-4afa-8bd7-c274cc4ea235', + 'id': 'network0'}, + {'link': 'tap2f88d109-5b', 'type': 'ipv6_dhcpv6-stateful', + 'network_id': 'd227a9b3-6960-4d94-8976-ee5788b44f54', + 'id': 'network1'}, + ] + } + out_data = { + 'version': 1, + 'config': [ + {'mac_address': 'fa:16:3e:69:b0:58', + 'mtu': None, + 'name': 'enp0s1', + 'subnets': [{'type': 'ipv6_dhcpv6-stateless'}], + 'type': 'physical'}, + {'mac_address': 'fa:16:3e:d4:57:ad', + 'mtu': None, + 'name': 'enp0s2', + 'subnets': [{'type': 'ipv6_dhcpv6-stateful'}], + 'type': 'physical', + 'accept-ra': True} + ], + } + conv_data = openstack.convert_net_json(in_data, known_macs=KNOWN_MACS) + self.assertEqual(out_data, conv_data) + def test_network_config_conversions(self): """Tests a bunch of input network json and checks the expected conversions.""" @@ -604,6 +655,9 @@ class TestNetJson(CiTestCase): class TestConvertNetworkData(CiTestCase): + + with_logs = True + def setUp(self): super(TestConvertNetworkData, self).setUp() self.tmp = self.tmp_dir() @@ -730,6 +784,26 @@ class TestConvertNetworkData(CiTestCase): 'enp0s2': 'fa:16:3e:d4:57:ad'} self.assertEqual(expected, config_name2mac) + def test_unknown_device_types_accepted(self): + # If we don't recognise a link, we should treat it as physical for a + # best-effort boot + my_netdata = deepcopy(NETWORK_DATA) + my_netdata['links'][0]['type'] = 'my-special-link-type' + + ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS) + config_name2mac = {} + for n in ncfg['config']: + if n['type'] == 'physical': + config_name2mac[n['name']] = n['mac_address'] + + expected = {'nic0': 'fa:16:3e:05:30:fe', 'enp0s1': 'fa:16:3e:69:b0:58', + 'enp0s2': 'fa:16:3e:d4:57:ad'} + self.assertEqual(expected, config_name2mac) + + # We should, however, warn the user that we don't recognise the type + self.assertIn('Unknown network_data link type (my-special-link-type)', + self.logs.getvalue()) + def cfg_ds_from_dir(base_d, files=None): run = os.path.join(base_d, "run") diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index 1a5956d9..2a96122f 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -3,7 +3,7 @@ import copy import httpretty import json -import mock +from unittest import mock from cloudinit import helpers from cloudinit.sources import DataSourceEc2 as ec2 @@ -191,7 +191,9 @@ def register_mock_metaserver(base_url, data): register(base_url, 'not found', status=404) def myreg(*argc, **kwargs): - return httpretty.register_uri(httpretty.GET, *argc, **kwargs) + url = argc[0] + method = httpretty.PUT if ec2.API_TOKEN_ROUTE in url else httpretty.GET + return httpretty.register_uri(method, *argc, **kwargs) register_helper(myreg, base_url, data) @@ -237,6 +239,8 @@ class TestEc2(test_helpers.HttprettyTestCase): if md: all_versions = ( [ds.min_metadata_version] + ds.extended_metadata_versions) + token_url = self.data_url('latest', data_item='api/token') + register_mock_metaserver(token_url, 'API-TOKEN') for version in all_versions: metadata_url = self.data_url(version) + '/' if version == md_version: @@ -401,6 +405,47 @@ class TestEc2(test_helpers.HttprettyTestCase): ds.metadata = DEFAULT_METADATA self.assertEqual('my-identity-id', ds.get_instance_id()) + def test_classic_instance_true(self): + """If no vpc-id in metadata, is_classic_instance must return true.""" + md_copy = copy.deepcopy(DEFAULT_METADATA) + ifaces_md = md_copy.get('network', {}).get('interfaces', {}) + for _mac, mac_data in ifaces_md.get('macs', {}).items(): + if 'vpc-id' in mac_data: + del mac_data['vpc-id'] + + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': md_copy}) + self.assertTrue(ds.get_data()) + self.assertTrue(ds.is_classic_instance()) + + def test_classic_instance_false(self): + """If vpc-id in metadata, is_classic_instance must return false.""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': DEFAULT_METADATA}) + self.assertTrue(ds.get_data()) + self.assertFalse(ds.is_classic_instance()) + + def test_aws_token_redacted(self): + """Verify that aws tokens are redacted when logged.""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': DEFAULT_METADATA}) + self.assertTrue(ds.get_data()) + all_logs = self.logs.getvalue().splitlines() + REDACT_TTL = "'X-aws-ec2-metadata-token-ttl-seconds': 'REDACTED'" + REDACT_TOK = "'X-aws-ec2-metadata-token': 'REDACTED'" + logs_with_redacted_ttl = [log for log in all_logs if REDACT_TTL in log] + logs_with_redacted = [log for log in all_logs if REDACT_TOK in log] + logs_with_token = [log for log in all_logs if 'API-TOKEN' in log] + self.assertEqual(1, len(logs_with_redacted_ttl)) + self.assertEqual(79, len(logs_with_redacted)) + self.assertEqual(0, len(logs_with_token)) + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') def test_valid_platform_with_strict_true(self, m_dhcp): """Valid platform data should return true with strict_id true.""" @@ -514,7 +559,8 @@ class TestEc2(test_helpers.HttprettyTestCase): m_dhcp.assert_called_once_with('eth9') m_net.assert_called_once_with( broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', - prefix_or_mask='255.255.255.0', router='192.168.2.1') + prefix_or_mask='255.255.255.0', router='192.168.2.1', + static_routes=None) self.assertIn('Crawl of metadata service took', self.logs.getvalue()) @@ -637,4 +683,45 @@ class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase): expected, ec2.convert_ec2_metadata_network_config(self.network_metadata)) + +class TesIdentifyPlatform(test_helpers.CiTestCase): + + def collmock(self, **kwargs): + """return non-special _collect_platform_data updated with changes.""" + unspecial = { + 'asset_tag': '3857-0037-2746-7462-1818-3997-77', + 'serial': 'H23-C4J3JV-R6', + 'uuid': '81c7e555-6471-4833-9551-1ab366c4cfd2', + 'uuid_source': 'dmi', + 'vendor': 'tothecloud', + } + unspecial.update(**kwargs) + return unspecial + + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_zstack(self, m_collect): + """zstack should be identified if chassis-asset-tag ends in .zstack.io + """ + m_collect.return_value = self.collmock(asset_tag='123456.zstack.io') + self.assertEqual(ec2.CloudNames.ZSTACK, ec2.identify_platform()) + + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_zstack_full_domain_only(self, m_collect): + """zstack asset-tag matching should match only on full domain boundary. + """ + m_collect.return_value = self.collmock(asset_tag='123456.buzzstack.io') + self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) + + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_e24cloud(self, m_collect): + """e24cloud identified if vendor is e24cloud""" + m_collect.return_value = self.collmock(vendor='e24cloud') + self.assertEqual(ec2.CloudNames.E24CLOUD, ec2.identify_platform()) + + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_e24cloud_negative(self, m_collect): + """e24cloud identified if vendor is e24cloud""" + m_collect.return_value = self.collmock(vendor='e24cloudyday') + self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_exoscale.py b/tests/unittests/test_datasource/test_exoscale.py new file mode 100644 index 00000000..f0061199 --- /dev/null +++ b/tests/unittests/test_datasource/test_exoscale.py @@ -0,0 +1,211 @@ +# Author: Mathieu Corbin <mathieu.corbin@exoscale.com> +# Author: Christopher Glass <christopher.glass@exoscale.com> +# +# This file is part of cloud-init. See LICENSE file for license information. +from cloudinit import helpers +from cloudinit.sources.DataSourceExoscale import ( + API_VERSION, + DataSourceExoscale, + METADATA_URL, + get_password, + PASSWORD_SERVER_PORT, + read_metadata) +from cloudinit.tests.helpers import HttprettyTestCase, mock +from cloudinit import util + +import httpretty +import os +import requests + + +TEST_PASSWORD_URL = "{}:{}/{}/".format(METADATA_URL, + PASSWORD_SERVER_PORT, + API_VERSION) + +TEST_METADATA_URL = "{}/{}/meta-data/".format(METADATA_URL, + API_VERSION) + +TEST_USERDATA_URL = "{}/{}/user-data".format(METADATA_URL, + API_VERSION) + + +@httpretty.activate +class TestDatasourceExoscale(HttprettyTestCase): + + def setUp(self): + super(TestDatasourceExoscale, self).setUp() + self.tmp = self.tmp_dir() + self.password_url = TEST_PASSWORD_URL + self.metadata_url = TEST_METADATA_URL + self.userdata_url = TEST_USERDATA_URL + + def test_password_saved(self): + """The password is not set when it is not found + in the metadata service.""" + httpretty.register_uri(httpretty.GET, + self.password_url, + body="saved_password") + self.assertFalse(get_password()) + + def test_password_empty(self): + """No password is set if the metadata service returns + an empty string.""" + httpretty.register_uri(httpretty.GET, + self.password_url, + body="") + self.assertFalse(get_password()) + + def test_password(self): + """The password is set to what is found in the metadata + service.""" + expected_password = "p@ssw0rd" + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_password) + password = get_password() + self.assertEqual(expected_password, password) + + def test_activate_removes_set_passwords_semaphore(self): + """Allow set_passwords to run every boot by removing the semaphore.""" + path = helpers.Paths({'cloud_dir': self.tmp}) + sem_dir = self.tmp_path('instance/sem', dir=self.tmp) + util.ensure_dir(sem_dir) + sem_file = os.path.join(sem_dir, 'config_set_passwords') + with open(sem_file, 'w') as stream: + stream.write('') + ds = DataSourceExoscale({}, None, path) + ds.activate(None, None) + self.assertFalse(os.path.exists(sem_file)) + + def test_get_data(self): + """The datasource conforms to expected behavior when supplied + full test data.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: True + expected_password = "p@ssw0rd" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_password) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + self.assertTrue(ds._get_data()) + self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") + self.assertEqual(ds.metadata, {"instance-id": expected_id, + "local-hostname": expected_hostname}) + self.assertEqual(ds.get_config_obj(), + {'ssh_pwauth': True, + 'password': expected_password, + 'chpasswd': { + 'expire': False, + }}) + + def test_get_data_saved_password(self): + """The datasource conforms to expected behavior when saved_password is + returned by the password server.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: True + expected_answer = "saved_password" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_answer) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + self.assertTrue(ds._get_data()) + self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") + self.assertEqual(ds.metadata, {"instance-id": expected_id, + "local-hostname": expected_hostname}) + self.assertEqual(ds.get_config_obj(), {}) + + def test_get_data_no_password(self): + """The datasource conforms to expected behavior when no password is + returned by the password server.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: True + expected_answer = "" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_answer) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + self.assertTrue(ds._get_data()) + self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") + self.assertEqual(ds.metadata, {"instance-id": expected_id, + "local-hostname": expected_hostname}) + self.assertEqual(ds.get_config_obj(), {}) + + @mock.patch('cloudinit.sources.DataSourceExoscale.get_password') + def test_read_metadata_when_password_server_unreachable(self, m_password): + """The read_metadata function returns partial results in case the + password server (only) is unreachable.""" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + + m_password.side_effect = requests.Timeout('Fake Connection Timeout') + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + + result = read_metadata() + + self.assertIsNone(result.get("password")) + self.assertEqual(result.get("user-data").decode("utf-8"), + expected_userdata) + + def test_non_viable_platform(self): + """The datasource fails fast when the platform is not viable.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: False + self.assertFalse(ds._get_data()) diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index 41176c6a..4afbccff 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -7,11 +7,11 @@ import datetime import httpretty import json -import mock import re +from unittest import mock +from urllib.parse import urlparse from base64 import b64encode, b64decode -from six.moves.urllib_parse import urlparse from cloudinit import distros from cloudinit import helpers @@ -55,6 +55,8 @@ GCE_USER_DATA_TEXT = { HEADERS = {'Metadata-Flavor': 'Google'} MD_URL_RE = re.compile( r'http://metadata.google.internal/computeMetadata/v1/.*') +GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/' + 'v1/instance/guest-attributes/hostkeys/') def _set_mock_metadata(gce_meta=None): @@ -341,4 +343,20 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): public_key_data, default_user='default') self.assertEqual(sorted(found), sorted(expected)) + @mock.patch("cloudinit.url_helper.readurl") + def test_publish_host_keys(self, m_readurl): + hostkeys = [('ssh-rsa', 'asdfasdf'), + ('ssh-ed25519', 'qwerqwer')] + readurl_expected_calls = [ + mock.call(check_status=False, data=b'asdfasdf', headers=HEADERS, + request_method='PUT', + url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-rsa')), + mock.call(check_status=False, data=b'qwerqwer', headers=HEADERS, + request_method='PUT', + url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-ed25519')), + ] + self.ds.publish_host_keys(hostkeys) + m_readurl.assert_has_calls(readurl_expected_calls, any_order=True) + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index c84d067e..2a81d3f5 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -1,11 +1,11 @@ # This file is part of cloud-init. See LICENSE file for license information. from copy import copy -import mock import os import shutil import tempfile import yaml +from unittest import mock from cloudinit.sources import DataSourceMAAS from cloudinit import url_helper diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index 3429272c..18bea0b9 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -32,6 +32,36 @@ class TestNoCloudDataSource(CiTestCase): self.mocks.enter_context( mock.patch.object(util, 'read_dmi_data', return_value=None)) + def _test_fs_config_is_read(self, fs_label, fs_label_to_search): + vfat_device = 'device-1' + + def m_mount_cb(device, callback, mtype): + if (device == vfat_device): + return {'meta-data': yaml.dump({'instance-id': 'IID'})} + else: + return {} + + def m_find_devs_with(query='', path=''): + if 'TYPE=vfat' == query: + return [vfat_device] + elif 'LABEL={}'.format(fs_label) == query: + return [vfat_device] + else: + return [] + + self.mocks.enter_context( + mock.patch.object(util, 'find_devs_with', + side_effect=m_find_devs_with)) + self.mocks.enter_context( + mock.patch.object(util, 'mount_cb', + side_effect=m_mount_cb)) + sys_cfg = {'datasource': {'NoCloud': {'fs_label': fs_label_to_search}}} + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + + self.assertEqual(dsrc.metadata.get('instance-id'), 'IID') + self.assertTrue(ret) + def test_nocloud_seed_dir_on_lxd(self, m_is_lxd): md = {'instance-id': 'IID', 'dsmode': 'local'} ud = b"USER_DATA_HERE" @@ -90,6 +120,18 @@ class TestNoCloudDataSource(CiTestCase): ret = dsrc.get_data() self.assertFalse(ret) + def test_fs_config_lowercase_label(self, m_is_lxd): + self._test_fs_config_is_read('cidata', 'cidata') + + def test_fs_config_uppercase_label(self, m_is_lxd): + self._test_fs_config_is_read('CIDATA', 'cidata') + + def test_fs_config_lowercase_label_search_uppercase(self, m_is_lxd): + self._test_fs_config_is_read('cidata', 'CIDATA') + + def test_fs_config_uppercase_label_search_uppercase(self, m_is_lxd): + self._test_fs_config_is_read('CIDATA', 'CIDATA') + def test_no_datasource_expected(self, m_is_lxd): # no source should be found if no cmdline, config, and fs_label=None sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} @@ -236,6 +278,24 @@ class TestNoCloudDataSource(CiTestCase): self.assertEqual(netconf, dsrc.network_config) self.assertNotIn(gateway, str(dsrc.network_config)) + @mock.patch("cloudinit.util.blkid") + def test_nocloud_get_devices_freebsd(self, m_is_lxd, fake_blkid): + populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': b"ud", 'meta-data': "instance-id: IID\n"}) + + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + + self.mocks.enter_context( + mock.patch.object(util, 'is_FreeBSD', return_value=True)) + + self.mocks.enter_context( + mock.patch.object(os.path, 'exists', return_value=True)) + + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc._get_devices('foo') + self.assertEqual(['/dev/msdosfs/foo', '/dev/iso9660/foo'], ret) + fake_blkid.assert_not_called() + class TestParseCommandLineData(CiTestCase): diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index a731f1ed..f754556f 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -8,12 +8,11 @@ import copy import httpretty as hp import json import re +from io import StringIO +from urllib.parse import urlparse from cloudinit.tests import helpers as test_helpers -from six.moves.urllib.parse import urlparse -from six import StringIO, text_type - from cloudinit import helpers from cloudinit import settings from cloudinit.sources import BrokenMetadata, convert_vendordata, UNSET @@ -569,8 +568,7 @@ class TestMetadataReader(test_helpers.HttprettyTestCase): 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'} def register(self, path, body=None, status=200): - content = (body if not isinstance(body, text_type) - else body.encode('utf-8')) + content = body if not isinstance(body, str) else body.encode('utf-8') hp.register_uri( hp.GET, self.burl + "openstack" + path, status=status, body=content) diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index 349d54cc..a19c35c8 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -169,19 +169,56 @@ class TestDatasourceOVF(CiTestCase): MARKER-ID = 12345345 """) util.write_file(conf_file, conf_content) - with self.assertRaises(CustomScriptNotFound) as context: - wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'util.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'get_nics_to_enable': ''}, - ds.get_data) + with mock.patch(MPATH + 'get_tools_config', return_value='true'): + with self.assertRaises(CustomScriptNotFound) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'util.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) customscript = self.tmp_path('test-script', self.tdir) self.assertIn('Script %s not found!!' % customscript, str(context.exception)) + def test_get_data_cust_script_disabled(self): + """If custom script is disabled by VMware tools configuration, + raise a RuntimeError. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': False}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CUSTOM-SCRIPT] + SCRIPT-NAME = test-script + [MISC] + MARKER-ID = 12345346 + """) + util.write_file(conf_file, conf_content) + # Prepare the custom sript + customscript = self.tmp_path('test-script', self.tdir) + util.write_file(customscript, "This is the post cust script") + + with mock.patch(MPATH + 'get_tools_config', return_value='invalid'): + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(RuntimeError) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'util.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + self.assertIn('Custom script is disabled by VM Administrator', + str(context.exception)) + def test_get_data_non_vmware_seed_platform_info(self): """Platform info properly reports when on non-vmware platforms.""" paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir}) diff --git a/tests/unittests/test_datasource/test_rbx.py b/tests/unittests/test_datasource/test_rbx.py new file mode 100644 index 00000000..aabf1f18 --- /dev/null +++ b/tests/unittests/test_datasource/test_rbx.py @@ -0,0 +1,208 @@ +import json + +from cloudinit import helpers +from cloudinit import distros +from cloudinit.sources import DataSourceRbxCloud as ds +from cloudinit.tests.helpers import mock, CiTestCase, populate_dir + +DS_PATH = "cloudinit.sources.DataSourceRbxCloud" + +CRYPTO_PASS = "$6$uktth46t$FvpDzFD2iL9YNZIG1Epz7957hJqbH0f" \ + "QKhnzcfBcUhEodGAWRqTy7tYG4nEW7SUOYBjxOSFIQW5" \ + "tToyGP41.s1" + +CLOUD_METADATA = { + "vm": { + "memory": 4, + "cpu": 2, + "name": "vm-image-builder", + "_id": "5beab44f680cffd11f0e60fc" + }, + "additionalMetadata": { + "username": "guru", + "sshKeys": ["ssh-rsa ..."], + "password": { + "sha512": CRYPTO_PASS + } + }, + "disk": [ + {"size": 10, "type": "ssd", + "name": "vm-image-builder-os", + "_id": "5beab450680cffd11f0e60fe"}, + {"size": 2, "type": "ssd", + "name": "ubuntu-1804-bionic", + "_id": "5bef002c680cffd11f107590"} + ], + "netadp": [ + { + "ip": [{"address": "62.181.8.174"}], + "network": { + "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]}, + "routing": [], + "gateway": "62.181.8.1", + "netmask": "255.255.248.0", + "name": "public", + "type": "public", + "_id": "5784e97be2627505227b578c" + }, + "speed": 1000, + "type": "hv", + "macaddress": "00:15:5D:FF:0F:03", + "_id": "5beab450680cffd11f0e6102" + }, + { + "ip": [{"address": "10.209.78.11"}], + "network": { + "dns": {"nameservers": ["9.9.9.9", "8.8.8.8"]}, + "routing": [], + "gateway": "10.209.78.1", + "netmask": "255.255.255.0", + "name": "network-determined-bardeen", + "type": "private", + "_id": "5beaec64680cffd11f0e7c31" + }, + "speed": 1000, + "type": "hv", + "macaddress": "00:15:5D:FF:0F:24", + "_id": "5bec18c6680cffd11f0f0d8b" + } + ], + "dvddrive": [{"iso": {}}] +} + + +class TestRbxDataSource(CiTestCase): + parsed_user = None + allowed_subp = ['bash'] + + def _fetch_distro(self, kind): + cls = distros.fetch(kind) + paths = helpers.Paths({}) + return cls(kind, {}, paths) + + def setUp(self): + super(TestRbxDataSource, self).setUp() + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp} + ) + + # defaults for few tests + self.ds = ds.DataSourceRbxCloud + self.seed_dir = self.paths.seed_dir + self.sys_cfg = {'datasource': {'RbxCloud': {'dsmode': 'local'}}} + + def test_seed_read_user_data_callback_empty_file(self): + populate_user_metadata(self.seed_dir, '') + populate_cloud_metadata(self.seed_dir, {}) + results = ds.read_user_data_callback(self.seed_dir) + + self.assertIsNone(results) + + def test_seed_read_user_data_callback_valid_disk(self): + populate_user_metadata(self.seed_dir, '') + populate_cloud_metadata(self.seed_dir, CLOUD_METADATA) + results = ds.read_user_data_callback(self.seed_dir) + + self.assertNotEqual(results, None) + self.assertTrue('userdata' in results) + self.assertTrue('metadata' in results) + self.assertTrue('cfg' in results) + + def test_seed_read_user_data_callback_userdata(self): + userdata = "#!/bin/sh\nexit 1" + populate_user_metadata(self.seed_dir, userdata) + populate_cloud_metadata(self.seed_dir, CLOUD_METADATA) + + results = ds.read_user_data_callback(self.seed_dir) + + self.assertNotEqual(results, None) + self.assertTrue('userdata' in results) + self.assertEqual(results['userdata'], userdata) + + def test_generate_network_config(self): + expected = { + 'version': 1, + 'config': [ + { + 'subnets': [ + {'control': 'auto', + 'dns_nameservers': ['8.8.8.8', '8.8.4.4'], + 'netmask': '255.255.248.0', + 'address': '62.181.8.174', + 'type': 'static', 'gateway': '62.181.8.1'} + ], + 'type': 'physical', + 'name': 'eth0', + 'mac_address': '00:15:5d:ff:0f:03' + }, + { + 'subnets': [ + {'control': 'auto', + 'dns_nameservers': ['9.9.9.9', '8.8.8.8'], + 'netmask': '255.255.255.0', + 'address': '10.209.78.11', + 'type': 'static', + 'gateway': '10.209.78.1'} + ], + 'type': 'physical', + 'name': 'eth1', + 'mac_address': '00:15:5d:ff:0f:24' + } + ] + } + self.assertTrue( + ds.generate_network_config(CLOUD_METADATA['netadp']), + expected + ) + + @mock.patch(DS_PATH + '.util.subp') + def test_gratuitous_arp_run_standard_arping(self, m_subp): + """Test handle run arping & parameters.""" + items = [ + { + 'destination': '172.17.0.2', + 'source': '172.16.6.104' + }, + { + 'destination': '172.17.0.2', + 'source': '172.16.6.104', + }, + ] + ds.gratuitous_arp(items, self._fetch_distro('ubuntu')) + self.assertEqual([ + mock.call([ + 'arping', '-c', '2', '-S', + '172.16.6.104', '172.17.0.2' + ]), + mock.call([ + 'arping', '-c', '2', '-S', + '172.16.6.104', '172.17.0.2' + ]) + ], m_subp.call_args_list + ) + + @mock.patch(DS_PATH + '.util.subp') + def test_handle_rhel_like_arping(self, m_subp): + """Test handle on RHEL-like distros.""" + items = [ + { + 'source': '172.16.6.104', + 'destination': '172.17.0.2', + } + ] + ds.gratuitous_arp(items, self._fetch_distro('fedora')) + self.assertEqual([ + mock.call( + ['arping', '-c', '2', '-s', '172.16.6.104', '172.17.0.2'] + )], + m_subp.call_args_list + ) + + +def populate_cloud_metadata(path, data): + populate_dir(path, {'cloud.json': json.dumps(data)}) + + +def populate_user_metadata(path, data): + populate_dir(path, {'user.data': data}) diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py index c2bc7a00..1b4dd0ad 100644 --- a/tests/unittests/test_datasource/test_scaleway.py +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -7,6 +7,7 @@ import requests from cloudinit import helpers from cloudinit import settings +from cloudinit import sources from cloudinit.sources import DataSourceScaleway from cloudinit.tests.helpers import mock, HttprettyTestCase, CiTestCase @@ -49,6 +50,9 @@ class MetadataResponses(object): FAKE_METADATA = { 'id': '00000000-0000-0000-0000-000000000000', 'hostname': 'scaleway.host', + 'tags': [ + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", + ], 'ssh_public_keys': [{ 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', 'fingerprint': '2048 06:ae:... login (RSA)' @@ -204,10 +208,11 @@ class TestDataSourceScaleway(HttprettyTestCase): self.assertEqual(self.datasource.get_instance_id(), MetadataResponses.FAKE_METADATA['id']) - self.assertEqual(self.datasource.get_public_ssh_keys(), [ - elem['key'] for elem in - MetadataResponses.FAKE_METADATA['ssh_public_keys'] - ]) + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + ].sort()) self.assertEqual(self.datasource.get_hostname(), MetadataResponses.FAKE_METADATA['hostname']) self.assertEqual(self.datasource.get_userdata_raw(), @@ -218,6 +223,70 @@ class TestDataSourceScaleway(HttprettyTestCase): self.assertIsNone(self.datasource.region) self.assertEqual(sleep.call_count, 0) + def test_ssh_keys_empty(self): + """ + get_public_ssh_keys() should return empty list if no ssh key are + available + """ + self.datasource.metadata['tags'] = [] + self.datasource.metadata['ssh_public_keys'] = [] + self.assertEqual(self.datasource.get_public_ssh_keys(), []) + + def test_ssh_keys_only_tags(self): + """ + get_public_ssh_keys() should return list of keys available in tags + """ + self.datasource.metadata['tags'] = [ + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABCCCCC", + ] + self.datasource.metadata['ssh_public_keys'] = [] + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + ].sort()) + + def test_ssh_keys_only_conf(self): + """ + get_public_ssh_keys() should return list of keys available in + ssh_public_keys field + """ + self.datasource.metadata['tags'] = [] + self.datasource.metadata['ssh_public_keys'] = [{ + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + 'fingerprint': '2048 06:ae:... login (RSA)' + }, { + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'fingerprint': '2048 06:ff:... login2 (RSA)' + }] + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + ].sort()) + + def test_ssh_keys_both(self): + """ + get_public_ssh_keys() should return a merge of keys available + in ssh_public_keys and tags + """ + self.datasource.metadata['tags'] = [ + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", + ] + + self.datasource.metadata['ssh_public_keys'] = [{ + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + 'fingerprint': '2048 06:ae:... login (RSA)' + }, { + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'fingerprint': '2048 06:ff:... login2 (RSA)' + }] + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + ].sort()) + @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', get_source_address_adapter) @@ -335,3 +404,51 @@ class TestDataSourceScaleway(HttprettyTestCase): netcfg = self.datasource.network_config self.assertEqual(netcfg, '0xdeadbeef') + + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_unset(self, m_get_cmdline, fallback_nic): + """ + _network_config will be set to sources.UNSET after the first boot. + Make sure it behave correctly. + """ + m_get_cmdline.return_value = 'scaleway' + fallback_nic.return_value = 'ens2' + self.datasource.metadata['ipv6'] = None + self.datasource._network_config = sources.UNSET + + resp = {'version': 1, + 'config': [{ + 'type': 'physical', + 'name': 'ens2', + 'subnets': [{'type': 'dhcp4'}]}] + } + + netcfg = self.datasource.network_config + self.assertEqual(netcfg, resp) + + @mock.patch('cloudinit.sources.DataSourceScaleway.LOG.warning') + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_cached_none(self, m_get_cmdline, fallback_nic, + logwarning): + """ + network_config() should return config data if cached data is None + rather than sources.UNSET + """ + m_get_cmdline.return_value = 'scaleway' + fallback_nic.return_value = 'ens2' + self.datasource.metadata['ipv6'] = None + self.datasource._network_config = None + + resp = {'version': 1, + 'config': [{ + 'type': 'physical', + 'name': 'ens2', + 'subnets': [{'type': 'dhcp4'}]}] + } + + netcfg = self.datasource.network_config + self.assertEqual(netcfg, resp) + logwarning.assert_called_with('Found None as cached _network_config. ' + 'Resetting to %s', sources.UNSET) diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 42ac6971..62084de5 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -1,5 +1,5 @@ # Copyright (C) 2013 Canonical Ltd. -# Copyright (c) 2018, Joyent, Inc. +# Copyright 2019 Joyent, Inc. # # Author: Ben Howard <ben.howard@canonical.com> # @@ -31,8 +31,7 @@ from cloudinit.sources.DataSourceSmartOS import ( convert_smartos_network_data as convert_net, SMARTOS_ENV_KVM, SERIAL_DEVICE, get_smartos_environ, identify_file) - -import six +from cloudinit.event import EventType from cloudinit import helpers as c_helpers from cloudinit.util import ( @@ -653,6 +652,12 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): self.assertEqual(dsrc.device_name_to_device('FOO'), mydscfg['disk_aliases']['FOO']) + def test_reconfig_network_on_boot(self): + # Test to ensure that network is configured from metadata on each boot + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + self.assertSetEqual(set([EventType.BOOT_NEW_INSTANCE, EventType.BOOT]), + dsrc.update_events['network']) + class TestIdentifyFile(CiTestCase): """Test the 'identify_file' utility.""" @@ -791,7 +796,7 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase): return self.serial.write.call_args[0][0] def test_get_metadata_writes_bytes(self): - self.assertIsInstance(self._get_written_line(), six.binary_type) + self.assertIsInstance(self._get_written_line(), bytes) def test_get_metadata_line_starts_with_v2(self): foo = self._get_written_line() diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/test_distros/test_create_users.py index c3f258d5..ef11784d 100644 --- a/tests/unittests/test_distros/test_create_users.py +++ b/tests/unittests/test_distros/test_create_users.py @@ -206,7 +206,7 @@ class TestCreateUser(CiTestCase): user = 'foouser' self.dist.create_user(user, ssh_redirect_user='someuser') self.assertIn( - 'WARNING: Unable to disable ssh logins for foouser given ' + 'WARNING: Unable to disable SSH logins for foouser given ' 'ssh_redirect_user: someuser. No cloud public-keys present.\n', self.logs.getvalue()) m_setup_user_keys.assert_not_called() @@ -240,4 +240,32 @@ class TestCreateUser(CiTestCase): [mock.call(set(['auth1']), user), # not disabled mock.call(set(['key1']), 'foouser', options=disable_prefix)]) + @mock.patch("cloudinit.distros.util.which") + def test_lock_with_usermod_if_no_passwd(self, m_which, m_subp, + m_is_snappy): + """Lock uses usermod --lock if no 'passwd' cmd available.""" + m_which.side_effect = lambda m: m in ('usermod',) + self.dist.lock_passwd("bob") + self.assertEqual( + [mock.call(['usermod', '--lock', 'bob'])], + m_subp.call_args_list) + + @mock.patch("cloudinit.distros.util.which") + def test_lock_with_passwd_if_available(self, m_which, m_subp, + m_is_snappy): + """Lock with only passwd will use passwd.""" + m_which.side_effect = lambda m: m in ('passwd',) + self.dist.lock_passwd("bob") + self.assertEqual( + [mock.call(['passwd', '-l', 'bob'])], + m_subp.call_args_list) + + @mock.patch("cloudinit.distros.util.which") + def test_lock_raises_runtime_if_no_commands(self, m_which, m_subp, + m_is_snappy): + """Lock with no commands available raises RuntimeError.""" + m_which.return_value = None + with self.assertRaises(RuntimeError): + self.dist.lock_passwd("bob") + # vi: ts=4 expandtab diff --git a/tests/unittests/test_distros/test_freebsd.py b/tests/unittests/test_distros/test_freebsd.py new file mode 100644 index 00000000..8af253a2 --- /dev/null +++ b/tests/unittests/test_distros/test_freebsd.py @@ -0,0 +1,45 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.util import (find_freebsd_part, get_path_dev_freebsd) +from cloudinit.tests.helpers import (CiTestCase, mock) + +import os + + +class TestDeviceLookUp(CiTestCase): + + @mock.patch('cloudinit.util.subp') + def test_find_freebsd_part_label(self, mock_subp): + glabel_out = ''' +gptid/fa52d426-c337-11e6-8911-00155d4c5e47 N/A da0p1 + label/rootfs N/A da0p2 + label/swap N/A da0p3 +''' + mock_subp.return_value = (glabel_out, "") + res = find_freebsd_part("/dev/label/rootfs") + self.assertEqual("da0p2", res) + + @mock.patch('cloudinit.util.subp') + def test_find_freebsd_part_gpt(self, mock_subp): + glabel_out = ''' + gpt/bootfs N/A vtbd0p1 +gptid/3f4cbe26-75da-11e8-a8f2-002590ec6166 N/A vtbd0p1 + gpt/swapfs N/A vtbd0p2 + gpt/rootfs N/A vtbd0p3 + iso9660/cidata N/A vtbd2 +''' + mock_subp.return_value = (glabel_out, "") + res = find_freebsd_part("/dev/gpt/rootfs") + self.assertEqual("vtbd0p3", res) + + def test_get_path_dev_freebsd_label(self): + mnt_list = ''' +/dev/label/rootfs / ufs rw 1 1 +devfs /dev devfs rw,multilabel 0 0 +fdescfs /dev/fd fdescfs rw 0 0 +/dev/da1s1 /mnt/resource ufs rw 2 2 +''' + with mock.patch.object(os.path, 'exists', + return_value=True): + res = get_path_dev_freebsd('/etc', mnt_list) + self.assertIsNotNone(res) diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py index 791fe612..02b334e3 100644 --- a/tests/unittests/test_distros/test_generic.py +++ b/tests/unittests/test_distros/test_generic.py @@ -8,11 +8,7 @@ from cloudinit.tests import helpers import os import shutil import tempfile - -try: - from unittest import mock -except ImportError: - import mock +from unittest import mock unknown_arch_info = { 'arches': ['default'], @@ -244,5 +240,23 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase): with self.assertRaises(NotImplementedError): d.get_locale() + def test_expire_passwd_uses_chpasswd(self): + """Test ubuntu.expire_passwd uses the passwd command.""" + for d_name in ("ubuntu", "rhel"): + cls = distros.fetch(d_name) + d = cls(d_name, {}, None) + with mock.patch("cloudinit.util.subp") as m_subp: + d.expire_passwd("myuser") + m_subp.assert_called_once_with(["passwd", "--expire", "myuser"]) + + def test_expire_passwd_freebsd_uses_pw_command(self): + """Test FreeBSD.expire_passwd uses the pw command.""" + cls = distros.fetch("freebsd") + d = cls("freebsd", {}, None) + with mock.patch("cloudinit.util.subp") as m_subp: + d.expire_passwd("myuser") + m_subp.assert_called_once_with( + ["pw", "usermod", "myuser", "-p", "01-Jan-1970"]) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index 6e339355..ccf66161 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -1,20 +1,17 @@ # This file is part of cloud-init. See LICENSE file for license information. +import copy import os -from six import StringIO +from io import StringIO from textwrap import dedent - -try: - from unittest import mock -except ImportError: - import mock +from unittest import mock from cloudinit import distros from cloudinit.distros.parsers.sys_conf import SysConf from cloudinit import helpers from cloudinit import settings from cloudinit.tests.helpers import ( - FilesystemMockingTestCase, dir2dict, populate_dir) + FilesystemMockingTestCase, dir2dict) from cloudinit import util @@ -91,9 +88,9 @@ V1_NET_CFG = {'config': [{'name': 'eth0', 'version': 1} V1_NET_CFG_OUTPUT = """\ -# This file is generated from information provided by -# the datasource. Changes to it will not persist across an instance. -# To disable cloud-init's network configuration capabilities, write a file +# This file is generated from information provided by the datasource. Changes +# to it will not persist across an instance reboot. To disable cloud-init's +# network configuration capabilities, write a file # /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: # network: {config: disabled} auto lo @@ -109,13 +106,31 @@ auto eth1 iface eth1 inet dhcp """ +V1_NET_CFG_IPV6_OUTPUT = """\ +# This file is generated from information provided by the datasource. Changes +# to it will not persist across an instance reboot. To disable cloud-init's +# network configuration capabilities, write a file +# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: +# network: {config: disabled} +auto lo +iface lo inet loopback + +auto eth0 +iface eth0 inet6 static + address 2607:f0d0:1002:0011::2/64 + gateway 2607:f0d0:1002:0011::1 + +auto eth1 +iface eth1 inet dhcp +""" + V1_NET_CFG_IPV6 = {'config': [{'name': 'eth0', 'subnets': [{'address': '2607:f0d0:1002:0011::2', 'gateway': '2607:f0d0:1002:0011::1', 'netmask': '64', - 'type': 'static'}], + 'type': 'static6'}], 'type': 'physical'}, {'name': 'eth1', 'subnets': [{'control': 'auto', @@ -125,9 +140,9 @@ V1_NET_CFG_IPV6 = {'config': [{'name': 'eth0', V1_TO_V2_NET_CFG_OUTPUT = """\ -# This file is generated from information provided by -# the datasource. Changes to it will not persist across an instance. -# To disable cloud-init's network configuration capabilities, write a file +# This file is generated from information provided by the datasource. Changes +# to it will not persist across an instance reboot. To disable cloud-init's +# network configuration capabilities, write a file # /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: # network: {config: disabled} network: @@ -141,6 +156,23 @@ network: dhcp4: true """ +V1_TO_V2_NET_CFG_IPV6_OUTPUT = """\ +# This file is generated from information provided by the datasource. Changes +# to it will not persist across an instance reboot. To disable cloud-init's +# network configuration capabilities, write a file +# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: +# network: {config: disabled} +network: + version: 2 + ethernets: + eth0: + addresses: + - 2607:f0d0:1002:0011::2/64 + gateway6: 2607:f0d0:1002:0011::1 + eth1: + dhcp4: true +""" + V2_NET_CFG = { 'ethernets': { 'eth7': { @@ -154,9 +186,9 @@ V2_NET_CFG = { V2_TO_V2_NET_CFG_OUTPUT = """\ -# This file is generated from information provided by -# the datasource. Changes to it will not persist across an instance. -# To disable cloud-init's network configuration capabilities, write a file +# This file is generated from information provided by the datasource. Changes +# to it will not persist across an instance reboot. To disable cloud-init's +# network configuration capabilities, write a file # /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: # network: {config: disabled} network: @@ -213,128 +245,95 @@ class TestNetCfgDistroBase(FilesystemMockingTestCase): self.assertEqual(v, b2[k]) -class TestNetCfgDistroFreebsd(TestNetCfgDistroBase): +class TestNetCfgDistroFreeBSD(TestNetCfgDistroBase): + + def setUp(self): + super(TestNetCfgDistroFreeBSD, self).setUp() + self.distro = self._get_distro('freebsd', renderers=['freebsd']) - frbsd_ifout = """\ -hn0: flags=8843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST> metric 0 mtu 1500 - options=51b<RXCSUM,TXCSUM,VLAN_MTU,VLAN_HWTAGGING,TSO4,LRO> - ether 00:15:5d:4c:73:00 - inet6 fe80::215:5dff:fe4c:7300%hn0 prefixlen 64 scopeid 0x2 - inet 10.156.76.127 netmask 0xfffffc00 broadcast 10.156.79.255 - nd6 options=23<PERFORMNUD,ACCEPT_RTADV,AUTO_LINKLOCAL> - media: Ethernet autoselect (10Gbase-T <full-duplex>) - status: active + def _apply_and_verify_freebsd(self, apply_fn, config, expected_cfgs=None, + bringup=False): + if not expected_cfgs: + raise ValueError('expected_cfg must not be None') + + tmpd = None + with mock.patch('cloudinit.net.freebsd.available') as m_avail: + m_avail.return_value = True + with self.reRooted(tmpd) as tmpd: + util.ensure_dir('/etc') + util.ensure_file('/etc/rc.conf') + util.ensure_file('/etc/resolv.conf') + apply_fn(config, bringup) + + results = dir2dict(tmpd) + for cfgpath, expected in expected_cfgs.items(): + print("----------") + print(expected) + print("^^^^ expected | rendered VVVVVVV") + print(results[cfgpath]) + print("----------") + self.assertEqual( + set(expected.split('\n')), + set(results[cfgpath].split('\n'))) + self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + + @mock.patch('cloudinit.net.get_interfaces_by_mac') + def test_apply_network_config_freebsd_standard(self, ifaces_mac): + ifaces_mac.return_value = { + '00:15:5d:4c:73:00': 'eth0', + } + rc_conf_expected = """\ +defaultrouter=192.168.1.254 +ifconfig_eth0='192.168.1.5 netmask 255.255.255.0' +ifconfig_eth1=DHCP """ - @mock.patch('cloudinit.distros.freebsd.Distro.get_ifconfig_list') - @mock.patch('cloudinit.distros.freebsd.Distro.get_ifconfig_ifname_out') - def test_get_ip_nic_freebsd(self, ifname_out, iflist): - frbsd_distro = self._get_distro('freebsd') - iflist.return_value = "lo0 hn0" - ifname_out.return_value = self.frbsd_ifout - res = frbsd_distro.get_ipv4() - self.assertEqual(res, ['lo0', 'hn0']) - res = frbsd_distro.get_ipv6() - self.assertEqual(res, []) - - @mock.patch('cloudinit.distros.freebsd.Distro.get_ifconfig_ether') - @mock.patch('cloudinit.distros.freebsd.Distro.get_ifconfig_ifname_out') - @mock.patch('cloudinit.distros.freebsd.Distro.get_interface_mac') - def test_generate_fallback_config_freebsd(self, mac, ifname_out, if_ether): - frbsd_distro = self._get_distro('freebsd') - - if_ether.return_value = 'hn0' - ifname_out.return_value = self.frbsd_ifout - mac.return_value = '00:15:5d:4c:73:00' - res = frbsd_distro.generate_fallback_config() - self.assertIsNotNone(res) - - def test_simple_write_freebsd(self): - fbsd_distro = self._get_distro('freebsd') - - rc_conf = '/etc/rc.conf' - read_bufs = { - rc_conf: 'initial-rc-conf-not-validated', - '/etc/resolv.conf': 'initial-resolv-conf-not-validated', + expected_cfgs = { + '/etc/rc.conf': rc_conf_expected, + '/etc/resolv.conf': '' } + self._apply_and_verify_freebsd(self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy()) - tmpd = self.tmp_dir() - populate_dir(tmpd, read_bufs) - with self.reRooted(tmpd): - with mock.patch("cloudinit.distros.freebsd.util.subp", - return_value=('vtnet0', '')): - fbsd_distro.apply_network(BASE_NET_CFG, False) - results = dir2dict(tmpd) - - self.assertIn(rc_conf, results) - self.assertCfgEquals( - dedent('''\ - ifconfig_vtnet0="192.168.1.5 netmask 255.255.255.0" - ifconfig_vtnet1="DHCP" - defaultrouter="192.168.1.254" - '''), results[rc_conf]) - self.assertEqual(0o644, get_mode(rc_conf, tmpd)) - - def test_simple_write_freebsd_from_v2eni(self): - fbsd_distro = self._get_distro('freebsd') - - rc_conf = '/etc/rc.conf' - read_bufs = { - rc_conf: 'initial-rc-conf-not-validated', - '/etc/resolv.conf': 'initial-resolv-conf-not-validated', + @mock.patch('cloudinit.net.get_interfaces_by_mac') + def test_apply_network_config_freebsd_ifrename(self, ifaces_mac): + ifaces_mac.return_value = { + '00:15:5d:4c:73:00': 'vtnet0', } + rc_conf_expected = """\ +ifconfig_vtnet0_name=eth0 +defaultrouter=192.168.1.254 +ifconfig_eth0='192.168.1.5 netmask 255.255.255.0' +ifconfig_eth1=DHCP +""" - tmpd = self.tmp_dir() - populate_dir(tmpd, read_bufs) - with self.reRooted(tmpd): - with mock.patch("cloudinit.distros.freebsd.util.subp", - return_value=('vtnet0', '')): - fbsd_distro.apply_network(BASE_NET_CFG_FROM_V2, False) - results = dir2dict(tmpd) - - self.assertIn(rc_conf, results) - self.assertCfgEquals( - dedent('''\ - ifconfig_vtnet0="192.168.1.5 netmask 255.255.255.0" - ifconfig_vtnet1="DHCP" - defaultrouter="192.168.1.254" - '''), results[rc_conf]) - self.assertEqual(0o644, get_mode(rc_conf, tmpd)) - - def test_apply_network_config_fallback_freebsd(self): - fbsd_distro = self._get_distro('freebsd') - - # a weak attempt to verify that we don't have an implementation - # of _write_network_config or apply_network_config in fbsd now, - # which would make this test not actually test the fallback. - self.assertRaises( - NotImplementedError, fbsd_distro._write_network_config, - BASE_NET_CFG) - - # now run - mynetcfg = { - 'config': [{"type": "physical", "name": "eth0", - "mac_address": "c0:d6:9f:2c:e8:80", - "subnets": [{"type": "dhcp"}]}], - 'version': 1} - - rc_conf = '/etc/rc.conf' - read_bufs = { - rc_conf: 'initial-rc-conf-not-validated', - '/etc/resolv.conf': 'initial-resolv-conf-not-validated', + V1_NET_CFG_RENAME = copy.deepcopy(V1_NET_CFG) + V1_NET_CFG_RENAME['config'][0]['mac_address'] = '00:15:5d:4c:73:00' + + expected_cfgs = { + '/etc/rc.conf': rc_conf_expected, + '/etc/resolv.conf': '' } + self._apply_and_verify_freebsd(self.distro.apply_network_config, + V1_NET_CFG_RENAME, + expected_cfgs=expected_cfgs.copy()) - tmpd = self.tmp_dir() - populate_dir(tmpd, read_bufs) - with self.reRooted(tmpd): - with mock.patch("cloudinit.distros.freebsd.util.subp", - return_value=('vtnet0', '')): - fbsd_distro.apply_network_config(mynetcfg, bring_up=False) - results = dir2dict(tmpd) + @mock.patch('cloudinit.net.get_interfaces_by_mac') + def test_apply_network_config_freebsd_nameserver(self, ifaces_mac): + ifaces_mac.return_value = { + '00:15:5d:4c:73:00': 'eth0', + } - self.assertIn(rc_conf, results) - self.assertCfgEquals('ifconfig_vtnet0="DHCP"', results[rc_conf]) - self.assertEqual(0o644, get_mode(rc_conf, tmpd)) + V1_NET_CFG_DNS = copy.deepcopy(V1_NET_CFG) + ns = ['1.2.3.4'] + V1_NET_CFG_DNS['config'][0]['subnets'][0]['dns_nameservers'] = ns + expected_cfgs = { + '/etc/resolv.conf': 'nameserver 1.2.3.4\n' + } + self._apply_and_verify_freebsd(self.distro.apply_network_config, + V1_NET_CFG_DNS, + expected_cfgs=expected_cfgs.copy()) class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase): @@ -376,6 +375,14 @@ class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase): V1_NET_CFG, expected_cfgs=expected_cfgs.copy()) + def test_apply_network_config_ipv6_ub(self): + expected_cfgs = { + self.eni_path(): V1_NET_CFG_IPV6_OUTPUT + } + self._apply_and_verify_eni(self.distro.apply_network_config, + V1_NET_CFG_IPV6, + expected_cfgs=expected_cfgs.copy()) + class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase): def setUp(self): @@ -407,7 +414,7 @@ class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase): self.assertEqual(0o644, get_mode(cfgpath, tmpd)) def netplan_path(self): - return '/etc/netplan/50-cloud-init.yaml' + return '/etc/netplan/50-cloud-init.yaml' def test_apply_network_config_v1_to_netplan_ub(self): expected_cfgs = { @@ -419,6 +426,16 @@ class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase): V1_NET_CFG, expected_cfgs=expected_cfgs.copy()) + def test_apply_network_config_v1_ipv6_to_netplan_ub(self): + expected_cfgs = { + self.netplan_path(): V1_TO_V2_NET_CFG_IPV6_OUTPUT, + } + + # ub_distro.apply_network_config(V1_NET_CFG_IPV6, False) + self._apply_and_verify_netplan(self.distro.apply_network_config, + V1_NET_CFG_IPV6, + expected_cfgs=expected_cfgs.copy()) + def test_apply_network_config_v2_passthrough_ub(self): expected_cfgs = { self.netplan_path(): V2_TO_V2_NET_CFG_OUTPUT, @@ -551,24 +568,14 @@ class TestNetCfgDistroOpensuse(TestNetCfgDistroBase): """Opensuse uses apply_network_config and renders sysconfig""" expected_cfgs = { self.ifcfg_path('eth0'): dedent("""\ - BOOTPROTO=none - DEFROUTE=yes - DEVICE=eth0 - GATEWAY=192.168.1.254 + BOOTPROTO=static IPADDR=192.168.1.5 NETMASK=255.255.255.0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no + STARTMODE=auto """), self.ifcfg_path('eth1'): dedent("""\ - BOOTPROTO=dhcp - DEVICE=eth1 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no + BOOTPROTO=dhcp4 + STARTMODE=auto """), } self._apply_and_verify(self.distro.apply_network_config, @@ -579,24 +586,13 @@ class TestNetCfgDistroOpensuse(TestNetCfgDistroBase): """Opensuse uses apply_network_config and renders sysconfig w/ipv6""" expected_cfgs = { self.ifcfg_path('eth0'): dedent("""\ - BOOTPROTO=none - DEFROUTE=yes - DEVICE=eth0 - IPV6ADDR=2607:f0d0:1002:0011::2/64 - IPV6INIT=yes - IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no + BOOTPROTO=static + IPADDR6=2607:f0d0:1002:0011::2/64 + STARTMODE=auto """), self.ifcfg_path('eth1'): dedent("""\ - BOOTPROTO=dhcp - DEVICE=eth1 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no + BOOTPROTO=dhcp4 + STARTMODE=auto """), } self._apply_and_verify(self.distro.apply_network_config, @@ -604,6 +600,93 @@ class TestNetCfgDistroOpensuse(TestNetCfgDistroBase): expected_cfgs=expected_cfgs.copy()) +class TestNetCfgDistroArch(TestNetCfgDistroBase): + def setUp(self): + super(TestNetCfgDistroArch, self).setUp() + self.distro = self._get_distro('arch', renderers=['netplan']) + + def _apply_and_verify(self, apply_fn, config, expected_cfgs=None, + bringup=False, with_netplan=False): + if not expected_cfgs: + raise ValueError('expected_cfg must not be None') + + tmpd = None + with mock.patch('cloudinit.net.netplan.available', + return_value=with_netplan): + with self.reRooted(tmpd) as tmpd: + apply_fn(config, bringup) + + results = dir2dict(tmpd) + for cfgpath, expected in expected_cfgs.items(): + print("----------") + print(expected) + print("^^^^ expected | rendered VVVVVVV") + print(results[cfgpath]) + print("----------") + self.assertEqual(expected, results[cfgpath]) + self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + + def netctl_path(self, iface): + return '/etc/netctl/%s' % iface + + def netplan_path(self): + return '/etc/netplan/50-cloud-init.yaml' + + def test_apply_network_config_v1_without_netplan(self): + # Note that this is in fact an invalid netctl config: + # "Address=None/None" + # But this is what the renderer has been writing out for a long time, + # and the test's purpose is to assert that the netctl renderer is + # still being used in absence of netplan, not the correctness of the + # rendered netctl config. + expected_cfgs = { + self.netctl_path('eth0'): dedent("""\ + Address=192.168.1.5/255.255.255.0 + Connection=ethernet + DNS=() + Gateway=192.168.1.254 + IP=static + Interface=eth0 + """), + self.netctl_path('eth1'): dedent("""\ + Address=None/None + Connection=ethernet + DNS=() + Gateway= + IP=dhcp + Interface=eth1 + """), + } + + # ub_distro.apply_network_config(V1_NET_CFG, False) + self._apply_and_verify(self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy(), + with_netplan=False) + + def test_apply_network_config_v1_with_netplan(self): + expected_cfgs = { + self.netplan_path(): dedent("""\ + # generated by cloud-init + network: + version: 2 + ethernets: + eth0: + addresses: + - 192.168.1.5/24 + gateway4: 192.168.1.254 + eth1: + dhcp4: true + """), + } + + with mock.patch('cloudinit.util.is_FreeBSD', return_value=False): + self._apply_and_verify(self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs=expected_cfgs.copy(), + with_netplan=True) + + def get_mode(path, target=None): return os.stat(util.target_path(target, path)).st_mode & 0o777 diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py index fa4b6cfe..a6faf0ef 100644 --- a/tests/unittests/test_distros/test_user_data_normalize.py +++ b/tests/unittests/test_distros/test_user_data_normalize.py @@ -1,12 +1,13 @@ # This file is part of cloud-init. See LICENSE file for license information. +from unittest import mock + from cloudinit import distros from cloudinit.distros import ug_util from cloudinit import helpers from cloudinit import settings from cloudinit.tests.helpers import TestCase -import mock bcfg = { diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 756b4fb4..36d7fbbf 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -140,7 +140,8 @@ class DsIdentifyBase(CiTestCase): {'name': 'blkid', 'out': BLKID_EFI_ROOT}, {'name': 'ovf_vmware_transport_guestinfo', 'out': 'No value found', 'ret': 1}, - + {'name': 'dmi_decode', 'ret': 1, + 'err': 'No dmidecode program. ERROR.'}, ] written = [d['name'] for d in mocks] @@ -195,6 +196,10 @@ class DsIdentifyBase(CiTestCase): return self._check_via_dict( data, RC_FOUND, dslist=[data.get('ds'), DS_NONE]) + def _test_ds_not_found(self, name): + data = copy.deepcopy(VALID_CFG[name]) + return self._check_via_dict(data, RC_NOT_FOUND) + def _check_via_dict(self, data, rc, dslist=None, **kwargs): ret = self._call_via_dict(data, **kwargs) good = False @@ -244,9 +249,13 @@ class TestDsIdentify(DsIdentifyBase): self._test_ds_found('Ec2-xen') def test_brightbox_is_ec2(self): - """EC2: product_serial ends with 'brightbox.com'""" + """EC2: product_serial ends with '.brightbox.com'""" self._test_ds_found('Ec2-brightbox') + def test_bobrightbox_is_not_brightbox(self): + """EC2: bobrightbox.com in product_serial is not brightbox'""" + self._test_ds_not_found('Ec2-brightbox-negative') + def test_gce_by_product_name(self): """GCE identifies itself with product_name.""" self._test_ds_found('GCE') @@ -259,10 +268,13 @@ class TestDsIdentify(DsIdentifyBase): """ConfigDrive datasource has a disk with LABEL=config-2.""" self._test_ds_found('ConfigDrive') + def test_rbx_cloud(self): + """Rbx datasource has a disk with LABEL=CLOUDMD.""" + self._test_ds_found('RbxCloud') + def test_config_drive_upper(self): """ConfigDrive datasource has a disk with LABEL=CONFIG-2.""" self._test_ds_found('ConfigDriveUpper') - return def test_config_drive_seed(self): """Config Drive seed directory.""" @@ -435,13 +447,21 @@ class TestDsIdentify(DsIdentifyBase): """Open Telecom identification.""" self._test_ds_found('OpenStack-OpenTelekom') + def test_openstack_asset_tag_nova(self): + """OpenStack identification via asset tag OpenStack Nova.""" + self._test_ds_found('OpenStack-AssetTag-Nova') + + def test_openstack_asset_tag_copute(self): + """OpenStack identification via asset tag OpenStack Compute.""" + self._test_ds_found('OpenStack-AssetTag-Compute') + def test_openstack_on_non_intel_is_maybe(self): """On non-Intel, openstack without dmi info is maybe. nova does not identify itself on platforms other than intel. https://bugs.launchpad.net/cloud-init/+bugs?field.tag=dsid-nova""" - data = VALID_CFG['OpenStack'].copy() + data = copy.deepcopy(VALID_CFG['OpenStack']) del data['files'][P_PRODUCT_NAME] data.update({'policy_dmi': POLICY_FOUND_OR_MAYBE, 'policy_no_dmi': POLICY_FOUND_OR_MAYBE}) @@ -516,10 +536,38 @@ class TestDsIdentify(DsIdentifyBase): self._check_via_dict( ovf_cdrom_by_label, rc=RC_FOUND, dslist=['OVF', DS_NONE]) + def test_ovf_on_vmware_iso_found_by_cdrom_with_different_size(self): + """OVF is identified by well-known iso9660 labels.""" + ovf_cdrom_with_size = copy.deepcopy(VALID_CFG['OVF']) + + # Set cdrom size to 20480 (10MB in 512 byte units) + ovf_cdrom_with_size['files']['sys/class/block/sr0/size'] = '20480\n' + self._check_via_dict( + ovf_cdrom_with_size, rc=RC_NOT_FOUND, policy_dmi="disabled") + + # Set cdrom size to 204800 (100MB in 512 byte units) + ovf_cdrom_with_size['files']['sys/class/block/sr0/size'] = '204800\n' + self._check_via_dict( + ovf_cdrom_with_size, rc=RC_NOT_FOUND, policy_dmi="disabled") + + # Set cdrom size to 18432 (9MB in 512 byte units) + ovf_cdrom_with_size['files']['sys/class/block/sr0/size'] = '18432\n' + self._check_via_dict( + ovf_cdrom_with_size, rc=RC_FOUND, dslist=['OVF', DS_NONE]) + + # Set cdrom size to 2048 (1MB in 512 byte units) + ovf_cdrom_with_size['files']['sys/class/block/sr0/size'] = '2048\n' + self._check_via_dict( + ovf_cdrom_with_size, rc=RC_FOUND, dslist=['OVF', DS_NONE]) + def test_default_nocloud_as_vdb_iso9660(self): """NoCloud is found with iso9660 filesystem on non-cdrom disk.""" self._test_ds_found('NoCloud') + def test_nocloud_upper(self): + """NoCloud is found with uppercase filesystem label.""" + self._test_ds_found('NoCloudUpper') + def test_nocloud_seed(self): """Nocloud seed directory.""" self._test_ds_found('NoCloud-seed') @@ -565,6 +613,33 @@ class TestDsIdentify(DsIdentifyBase): self.assertEqual(expected, [p for p in expected if p in toks], "path did not have expected tokens") + def test_zstack_is_ec2(self): + """EC2: chassis asset tag ends with 'zstack.io'""" + self._test_ds_found('Ec2-ZStack') + + def test_e24cloud_is_ec2(self): + """EC2: e24cloud identified by sys_vendor""" + self._test_ds_found('Ec2-E24Cloud') + + def test_e24cloud_not_active(self): + """EC2: bobrightbox.com in product_serial is not brightbox'""" + self._test_ds_not_found('Ec2-E24Cloud-negative') + + +class TestBSDNoSys(DsIdentifyBase): + """Test *BSD code paths + + FreeBSD doesn't have /sys so we use dmidecode(8) here + It also doesn't have systemd-detect-virt(8), so we use sysctl(8) to query + kern.vm_guest, and optionally map it""" + + def test_dmi_decode(self): + """Test that dmidecode(8) works on systems which don't have /sys + + This will be used on *BSD systems. + """ + self._test_ds_found('Hetzner-dmidecode') + class TestIsIBMProvisioning(DsIdentifyBase): """Test the is_ibm_provisioning method in ds-identify.""" @@ -688,7 +763,11 @@ VALID_CFG = { }, 'Ec2-brightbox': { 'ds': 'Ec2', - 'files': {P_PRODUCT_SERIAL: 'facc6e2f.brightbox.com\n'}, + 'files': {P_PRODUCT_SERIAL: 'srv-otuxg.gb1.brightbox.com\n'}, + }, + 'Ec2-brightbox-negative': { + 'ds': 'Ec2', + 'files': {P_PRODUCT_SERIAL: 'tricky-host.bobrightbox.com\n'}, }, 'GCE': { 'ds': 'GCE', @@ -713,6 +792,19 @@ VALID_CFG = { 'dev/vdb': 'pretend iso content for cidata\n', } }, + 'NoCloudUpper': { + 'ds': 'NoCloud', + 'mocks': [ + MOCK_VIRT_IS_KVM, + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + BLKID_UEFI_UBUNTU + + [{'DEVNAME': 'vdb', 'TYPE': 'iso9660', 'LABEL': 'CIDATA'}])}, + ], + 'files': { + 'dev/vdb': 'pretend iso content for cidata\n', + } + }, 'NoCloud-seed': { 'ds': 'NoCloud', 'files': { @@ -742,6 +834,18 @@ VALID_CFG = { 'files': {P_CHASSIS_ASSET_TAG: 'OpenTelekomCloud\n'}, 'mocks': [MOCK_VIRT_IS_XEN], }, + 'OpenStack-AssetTag-Nova': { + # VMware vSphere can't modify product-name, LP: #1669875 + 'ds': 'OpenStack', + 'files': {P_CHASSIS_ASSET_TAG: 'OpenStack Nova\n'}, + 'mocks': [MOCK_VIRT_IS_XEN], + }, + 'OpenStack-AssetTag-Compute': { + # VMware vSphere can't modify product-name, LP: #1669875 + 'ds': 'OpenStack', + 'files': {P_CHASSIS_ASSET_TAG: 'OpenStack Compute\n'}, + 'mocks': [MOCK_VIRT_IS_XEN], + }, 'OVF-seed': { 'ds': 'OVF', 'files': { @@ -778,6 +882,7 @@ VALID_CFG = { ], 'files': { 'dev/sr0': 'pretend ovf iso has ' + OVF_MATCH_STRING + '\n', + 'sys/class/block/sr0/size': '2048\n', } }, 'OVF-guestinfo': { @@ -818,10 +923,28 @@ VALID_CFG = { os.path.join(P_SEED_DIR, 'config_drive', 'openstack', 'latest', 'meta_data.json'): 'md\n'}, }, + 'RbxCloud': { + 'ds': 'RbxCloud', + 'mocks': [ + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, + {'DEVNAME': 'vda2', 'TYPE': 'ext4', + 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()}, + {'DEVNAME': 'vdb', 'TYPE': 'vfat', 'LABEL': 'CLOUDMD'}] + )}, + ], + }, 'Hetzner': { 'ds': 'Hetzner', 'files': {P_SYS_VENDOR: 'Hetzner\n'}, }, + 'Hetzner-dmidecode': { + 'ds': 'Hetzner', + 'mocks': [ + {'name': 'dmi_decode', 'ret': 0, 'RET': 'Hetzner'} + ], + }, 'IBMCloud-metadata': { 'ds': 'IBMCloud', 'mocks': [ @@ -897,8 +1020,19 @@ VALID_CFG = { {'name': 'blkid', 'ret': 2, 'out': ''}, ], 'files': {ds_smartos.METADATA_SOCKFILE: 'would be a socket\n'}, - } - + }, + 'Ec2-ZStack': { + 'ds': 'Ec2', + 'files': {P_CHASSIS_ASSET_TAG: '123456.zstack.io\n'}, + }, + 'Ec2-E24Cloud': { + 'ds': 'Ec2', + 'files': {P_SYS_VENDOR: 'e24cloud\n'}, + }, + 'Ec2-E24Cloud-negative': { + 'ds': 'Ec2', + 'files': {P_SYS_VENDOR: 'e24cloudyday\n'}, + } } # vi: ts=4 expandtab diff --git a/tests/unittests/test_filters/test_launch_index.py b/tests/unittests/test_filters/test_launch_index.py index e1a5d2c8..1492361e 100644 --- a/tests/unittests/test_filters/test_launch_index.py +++ b/tests/unittests/test_filters/test_launch_index.py @@ -1,11 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. import copy +from itertools import filterfalse from cloudinit.tests import helpers -from six.moves import filterfalse - from cloudinit.filters import launch_index from cloudinit import user_data as ud from cloudinit import util diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py index 23bd6e10..69009a44 100644 --- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py +++ b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py @@ -7,11 +7,7 @@ import logging import os import shutil import tempfile - -try: - from unittest import mock -except ImportError: - import mock +from unittest import mock from cloudinit import cloud from cloudinit import distros @@ -78,7 +74,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): get_rel = rpatcher.start() get_rel.return_value = {'codename': "fakerelease"} self.addCleanup(rpatcher.stop) - apatcher = mock.patch("cloudinit.util.get_architecture") + apatcher = mock.patch("cloudinit.util.get_dpkg_architecture") get_arch = apatcher.start() get_arch.return_value = 'amd64' self.addCleanup(apatcher.stop) diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py index f7608c28..0aa3d51a 100644 --- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py +++ b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py @@ -7,12 +7,8 @@ import logging import os import shutil import tempfile - -try: - from unittest import mock -except ImportError: - import mock -from mock import call +from unittest import mock +from unittest.mock import call from cloudinit import cloud from cloudinit import distros @@ -106,7 +102,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): get_rel = rpatcher.start() get_rel.return_value = {'codename': "fakerel"} self.addCleanup(rpatcher.stop) - apatcher = mock.patch("cloudinit.util.get_architecture") + apatcher = mock.patch("cloudinit.util.get_dpkg_architecture") get_arch = apatcher.start() get_arch.return_value = 'amd64' self.addCleanup(apatcher.stop) diff --git a/tests/unittests/test_handler/test_handler_apt_source_v1.py b/tests/unittests/test_handler/test_handler_apt_source_v1.py index a3132fbd..866752ef 100644 --- a/tests/unittests/test_handler/test_handler_apt_source_v1.py +++ b/tests/unittests/test_handler/test_handler_apt_source_v1.py @@ -9,12 +9,8 @@ import os import re import shutil import tempfile - -try: - from unittest import mock -except ImportError: - import mock -from mock import call +from unittest import mock +from unittest.mock import call from cloudinit.config import cc_apt_configure from cloudinit import gpg @@ -77,7 +73,7 @@ class TestAptSourceConfig(TestCase): get_rel = rpatcher.start() get_rel.return_value = {'codename': self.release} self.addCleanup(rpatcher.stop) - apatcher = mock.patch("cloudinit.util.get_architecture") + apatcher = mock.patch("cloudinit.util.get_dpkg_architecture") get_arch = apatcher.start() get_arch.return_value = 'amd64' self.addCleanup(apatcher.stop) diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py index 90fe6eed..90949b6d 100644 --- a/tests/unittests/test_handler/test_handler_apt_source_v3.py +++ b/tests/unittests/test_handler/test_handler_apt_source_v3.py @@ -11,13 +11,8 @@ import shutil import socket import tempfile -from unittest import TestCase - -try: - from unittest import mock -except ImportError: - import mock -from mock import call +from unittest import TestCase, mock +from unittest.mock import call from cloudinit import cloud from cloudinit import distros @@ -453,14 +448,14 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): self.assertFalse(os.path.isfile(self.aptlistfile2)) self.assertFalse(os.path.isfile(self.aptlistfile3)) - @mock.patch("cloudinit.config.cc_apt_configure.util.get_architecture") - def test_apt_v3_list_rename(self, m_get_architecture): + @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture") + def test_apt_v3_list_rename(self, m_get_dpkg_architecture): """test_apt_v3_list_rename - Test find mirror and apt list renaming""" pre = "/var/lib/apt/lists" # filenames are archive dependent arch = 's390x' - m_get_architecture.return_value = arch + m_get_dpkg_architecture.return_value = arch component = "ubuntu-ports" archive = "ports.ubuntu.com" @@ -487,16 +482,17 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): with mock.patch.object(os, 'rename') as mockren: with mock.patch.object(glob, 'glob', return_value=[fromfn]): - cc_apt_configure.rename_apt_lists(mirrors, TARGET) + cc_apt_configure.rename_apt_lists(mirrors, TARGET, arch) mockren.assert_any_call(fromfn, tofn) - @mock.patch("cloudinit.config.cc_apt_configure.util.get_architecture") - def test_apt_v3_list_rename_non_slash(self, m_get_architecture): + @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture") + def test_apt_v3_list_rename_non_slash(self, m_get_dpkg_architecture): target = os.path.join(self.tmp, "rename_non_slash") apt_lists_d = os.path.join(target, "./" + cc_apt_configure.APT_LISTS) - m_get_architecture.return_value = 'amd64' + arch = 'amd64' + m_get_dpkg_architecture.return_value = arch mirror_path = "some/random/path/" primary = "http://test.ubuntu.com/" + mirror_path @@ -532,7 +528,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): fpath = os.path.join(apt_lists_d, opre + suff) util.write_file(fpath, content=fpath) - cc_apt_configure.rename_apt_lists(mirrors, target) + cc_apt_configure.rename_apt_lists(mirrors, target, arch) found = sorted(os.listdir(apt_lists_d)) self.assertEqual(expected, found) @@ -625,10 +621,12 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): self.assertEqual(mirrors['SECURITY'], smir) - @mock.patch("cloudinit.config.cc_apt_configure.util.get_architecture") - def test_apt_v3_get_def_mir_non_intel_no_arch(self, m_get_architecture): + @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture") + def test_apt_v3_get_def_mir_non_intel_no_arch( + self, m_get_dpkg_architecture + ): arch = 'ppc64el' - m_get_architecture.return_value = arch + m_get_dpkg_architecture.return_value = arch expected = {'PRIMARY': 'http://ports.ubuntu.com/ubuntu-ports', 'SECURITY': 'http://ports.ubuntu.com/ubuntu-ports'} self.assertEqual(expected, cc_apt_configure.get_default_mirrors()) @@ -998,6 +996,17 @@ deb http://ubuntu.com/ubuntu/ xenial-proposed main""") class TestDebconfSelections(TestCase): + @mock.patch("cloudinit.config.cc_apt_configure.util.subp") + def test_set_sel_appends_newline_if_absent(self, m_subp): + """Automatically append a newline to debconf-set-selections config.""" + selections = b'some/setting boolean true' + cc_apt_configure.debconf_set_selections(selections=selections) + cc_apt_configure.debconf_set_selections(selections=selections + b'\n') + m_call = mock.call( + ['debconf-set-selections'], data=selections + b'\n', capture=True, + target=None) + self.assertEqual([m_call, m_call], m_subp.call_args_list) + @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections") def test_no_set_sel_if_none_to_set(self, m_set_sel): cc_apt_configure.apply_debconf_selections({'foo': 'bar'}) diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py index 06e14db0..5b4105dd 100644 --- a/tests/unittests/test_handler/test_handler_ca_certs.py +++ b/tests/unittests/test_handler/test_handler_ca_certs.py @@ -11,12 +11,9 @@ import logging import shutil import tempfile import unittest +from unittest import mock try: - from unittest import mock -except ImportError: - import mock -try: from contextlib import ExitStack except ImportError: from contextlib2 import ExitStack diff --git a/tests/unittests/test_handler/test_handler_chef.py b/tests/unittests/test_handler/test_handler_chef.py index b16532ea..2dab3a54 100644 --- a/tests/unittests/test_handler/test_handler_chef.py +++ b/tests/unittests/test_handler/test_handler_chef.py @@ -4,7 +4,6 @@ import httpretty import json import logging import os -import six from cloudinit import cloud from cloudinit.config import cc_chef @@ -145,6 +144,7 @@ class TestChef(FilesystemMockingTestCase): file_backup_path "/var/backups/chef" pid_file "/var/run/chef/client.pid" Chef::Log::Formatter.show_time = true + encrypted_data_bag_secret "/etc/chef/encrypted_data_bag_secret" """ tpl_file = util.load_file('templates/chef_client.rb.tmpl') self.patchUtils(self.tmp) @@ -157,6 +157,8 @@ class TestChef(FilesystemMockingTestCase): 'validation_name': 'bob', 'validation_key': "/etc/chef/vkey.pem", 'validation_cert': "this is my cert", + 'encrypted_data_bag_secret': + '/etc/chef/encrypted_data_bag_secret' }, } cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, []) @@ -175,7 +177,7 @@ class TestChef(FilesystemMockingTestCase): continue # the value from the cfg overrides that in the default val = cfg['chef'].get(k, v) - if isinstance(val, six.string_types): + if isinstance(val, str): self.assertIn(val, c) c = util.load_file(cc_chef.CHEF_FB_PATH) self.assertEqual({}, json.loads(c)) diff --git a/tests/unittests/test_handler/test_handler_disk_setup.py b/tests/unittests/test_handler/test_handler_disk_setup.py index 5afcacaf..0e51f17a 100644 --- a/tests/unittests/test_handler/test_handler_disk_setup.py +++ b/tests/unittests/test_handler/test_handler_disk_setup.py @@ -222,4 +222,22 @@ class TestMkfsCommandHandling(CiTestCase): '-L', 'without_cmd', '-F', 'are', 'added'], shell=False) + @mock.patch('cloudinit.config.cc_disk_setup.util.which') + def test_mkswap(self, m_which, subp, *args): + """mkfs observes extra_opts and overwrite settings when cmd is not + present.""" + m_which.side_effect = iter([None, '/sbin/mkswap']) + cc_disk_setup.mkfs({ + 'filesystem': 'swap', + 'device': '/dev/xdb1', + 'label': 'swap', + 'overwrite': True, + }) + + self.assertEqual([mock.call('mkfs.swap'), mock.call('mkswap')], + m_which.call_args_list) + subp.assert_called_once_with( + ['/sbin/mkswap', '/dev/xdb1', '-L', 'swap', '-f'], shell=False) + +# # vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index a3e46351..43b53745 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -11,12 +11,9 @@ import logging import os import re import unittest +from unittest import mock try: - from unittest import mock -except ImportError: - import mock -try: from contextlib import ExitStack except ImportError: from contextlib2 import ExitStack @@ -52,6 +49,18 @@ growpart disk partition Resize partition 1 on /dev/sda """ +HELP_GPART = """ +usage: gpart add -t type [-a alignment] [-b start] <SNIP> geom + gpart backup geom + gpart bootcode [-b bootcode] [-p partcode -i index] [-f flags] geom +<SNIP> + gpart resize -i index [-a alignment] [-s size] [-f flags] geom + gpart restore [-lF] [-f flags] provider [...] + gpart recover [-f flags] geom + gpart help +<SNIP> +""" + class TestDisabled(unittest.TestCase): def setUp(self): @@ -97,8 +106,9 @@ class TestConfig(TestCase): self.handle(self.name, config, self.cloud_init, self.log, self.args) - mockobj.assert_called_once_with( - ['growpart', '--help'], env={'LANG': 'C'}) + mockobj.assert_has_calls([ + mock.call(['growpart', '--help'], env={'LANG': 'C'}), + mock.call(['gpart', 'help'], env={'LANG': 'C'}, rcs=[0, 1])]) @mock.patch.dict("os.environ", clear=True) def test_no_resizers_mode_growpart_is_exception(self): @@ -124,6 +134,18 @@ class TestConfig(TestCase): mockobj.assert_called_once_with( ['growpart', '--help'], env={'LANG': 'C'}) + @mock.patch.dict("os.environ", clear=True) + def test_mode_auto_falls_back_to_gpart(self): + with mock.patch.object( + util, 'subp', + return_value=("", HELP_GPART)) as mockobj: + ret = cc_growpart.resizer_factory(mode="auto") + self.assertIsInstance(ret, cc_growpart.ResizeGpart) + + mockobj.assert_has_calls([ + mock.call(['growpart', '--help'], env={'LANG': 'C'}), + mock.call(['gpart', 'help'], env={'LANG': 'C'}, rcs=[0, 1])]) + def test_handle_with_no_growpart_entry(self): # if no 'growpart' entry in config, then mode=auto should be used diff --git a/tests/unittests/test_handler/test_handler_locale.py b/tests/unittests/test_handler/test_handler_locale.py index e29a06f9..2b22559f 100644 --- a/tests/unittests/test_handler/test_handler_locale.py +++ b/tests/unittests/test_handler/test_handler_locale.py @@ -17,13 +17,12 @@ from cloudinit.tests import helpers as t_help from configobj import ConfigObj -from six import BytesIO - import logging -import mock import os import shutil import tempfile +from io import BytesIO +from unittest import mock LOG = logging.getLogger(__name__) diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py index 2478ebc4..40b521e5 100644 --- a/tests/unittests/test_handler/test_handler_lxd.py +++ b/tests/unittests/test_handler/test_handler_lxd.py @@ -5,10 +5,7 @@ from cloudinit.sources import DataSourceNoCloud from cloudinit import (distros, helpers, cloud) from cloudinit.tests import helpers as t_help -try: - from unittest import mock -except ImportError: - import mock +from unittest import mock class TestLxd(t_help.CiTestCase): @@ -62,7 +59,7 @@ class TestLxd(t_help.CiTestCase): cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, []) self.assertFalse(m_maybe_clean.called) install_pkg = cc.distro.install_packages.call_args_list[0][0][0] - self.assertEqual(sorted(install_pkg), ['lxd', 'zfs']) + self.assertEqual(sorted(install_pkg), ['lxd', 'zfsutils-linux']) @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") @mock.patch("cloudinit.config.cc_lxd.util") diff --git a/tests/unittests/test_handler/test_handler_mcollective.py b/tests/unittests/test_handler/test_handler_mcollective.py index 7eec7352..c013a538 100644 --- a/tests/unittests/test_handler/test_handler_mcollective.py +++ b/tests/unittests/test_handler/test_handler_mcollective.py @@ -10,8 +10,8 @@ import configobj import logging import os import shutil -from six import BytesIO import tempfile +from io import BytesIO LOG = logging.getLogger(__name__) diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/test_handler/test_handler_mounts.py index 8fea6c2a..05ac183e 100644 --- a/tests/unittests/test_handler/test_handler_mounts.py +++ b/tests/unittests/test_handler/test_handler_mounts.py @@ -1,16 +1,12 @@ # This file is part of cloud-init. See LICENSE file for license information. import os.path +from unittest import mock from cloudinit.config import cc_mounts from cloudinit.tests import helpers as test_helpers -try: - from unittest import mock -except ImportError: - import mock - class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase): @@ -154,7 +150,15 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase): return_value=True) self.add_patch('cloudinit.config.cc_mounts.util.subp', - 'mock_util_subp') + 'm_util_subp') + + self.add_patch('cloudinit.config.cc_mounts.util.mounts', + 'mock_util_mounts', + return_value={ + '/dev/sda1': {'fstype': 'ext4', + 'mountpoint': '/', + 'opts': 'rw,relatime,discard' + }}) self.mock_cloud = mock.Mock() self.mock_log = mock.Mock() @@ -173,6 +177,18 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase): return dev + def test_swap_integrity(self): + '''Ensure that the swap file is correctly created and can + swapon successfully. Fixing the corner case of: + kernel: swapon: swapfile has holes''' + + fstab = '/swap.img swap swap defaults 0 0\n' + + with open(cc_mounts.FSTAB_PATH, 'w') as fd: + fd.write(fstab) + cc = {'swap': ['filename: /swap.img', 'size: 512', 'maxsize: 512']} + cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, []) + def test_fstab_no_swap_device(self): '''Ensure that cloud-init adds a discovered swap partition to /etc/fstab.''' @@ -230,4 +246,24 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase): fstab_new_content = fd.read() self.assertEqual(fstab_expected_content, fstab_new_content) + def test_no_change_fstab_sets_needs_mount_all(self): + '''verify unchanged fstab entries are mounted if not call mount -a''' + fstab_original_content = ( + 'LABEL=cloudimg-rootfs / ext4 defaults 0 0\n' + 'LABEL=UEFI /boot/efi vfat defaults 0 0\n' + '/dev/vdb /mnt auto defaults,noexec,comment=cloudconfig 0 2\n' + ) + fstab_expected_content = fstab_original_content + cc = {'mounts': [ + ['/dev/vdb', '/mnt', 'auto', 'defaults,noexec']]} + with open(cc_mounts.FSTAB_PATH, 'w') as fd: + fd.write(fstab_original_content) + with open(cc_mounts.FSTAB_PATH, 'r') as fd: + fstab_new_content = fd.read() + self.assertEqual(fstab_expected_content, fstab_new_content) + cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, []) + self.m_util_subp.assert_has_calls([ + mock.call(['mount', '-a']), + mock.call(['systemctl', 'daemon-reload'])]) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py index 0f22e579..463d892a 100644 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ b/tests/unittests/test_handler/test_handler_ntp.py @@ -268,17 +268,22 @@ class TestNtp(FilesystemMockingTestCase): template_fn=template_fn) content = util.load_file(confpath) if client in ['ntp', 'chrony']: - expected_servers = '\n'.join([ - 'server {0} iburst'.format(srv) for srv in servers]) + content_lines = content.splitlines() + expected_servers = [ + 'server {0} iburst'.format(srv) for srv in servers] print('distro=%s client=%s' % (distro, client)) - self.assertIn(expected_servers, content, - ('failed to render {0} conf' - ' for distro:{1}'.format(client, distro))) - expected_pools = '\n'.join([ - 'pool {0} iburst'.format(pool) for pool in pools]) - self.assertIn(expected_pools, content, - ('failed to render {0} conf' - ' for distro:{1}'.format(client, distro))) + for sline in expected_servers: + self.assertIn(sline, content_lines, + ('failed to render {0} conf' + ' for distro:{1}'.format(client, + distro))) + expected_pools = [ + 'pool {0} iburst'.format(pool) for pool in pools] + for pline in expected_pools: + self.assertIn(pline, content_lines, + ('failed to render {0} conf' + ' for distro:{1}'.format(client, + distro))) elif client == 'systemd-timesyncd': expected_content = ( "# cloud-init generated file\n" + diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/test_handler/test_handler_power_state.py index 3c726422..0d8d17b9 100644 --- a/tests/unittests/test_handler/test_handler_power_state.py +++ b/tests/unittests/test_handler/test_handler_power_state.py @@ -90,7 +90,7 @@ class TestCheckCondition(t_help.TestCase): mocklog = mock.Mock() self.assertEqual( psc.check_condition(self.cmd_with_exit(2), mocklog), False) - self.assertEqual(mocklog.warn.call_count, 1) + self.assertEqual(mocklog.warning.call_count, 1) def check_lps_ret(psc_return, mode=None): diff --git a/tests/unittests/test_handler/test_handler_puppet.py b/tests/unittests/test_handler/test_handler_puppet.py index 0b6e3b58..1494177d 100644 --- a/tests/unittests/test_handler/test_handler_puppet.py +++ b/tests/unittests/test_handler/test_handler_puppet.py @@ -6,6 +6,7 @@ from cloudinit import (distros, helpers, cloud, util) from cloudinit.tests.helpers import CiTestCase, mock import logging +import textwrap LOG = logging.getLogger(__name__) @@ -64,6 +65,7 @@ class TestPuppetHandle(CiTestCase): super(TestPuppetHandle, self).setUp() self.new_root = self.tmp_dir() self.conf = self.tmp_path('puppet.conf') + self.csr_attributes_path = self.tmp_path('csr_attributes.yaml') def _get_cloud(self, distro): paths = helpers.Paths({'templates_dir': self.new_root}) @@ -140,3 +142,35 @@ class TestPuppetHandle(CiTestCase): content = util.load_file(self.conf) expected = '[agent]\nserver = puppetmaster.example.org\nother = 3\n\n' self.assertEqual(expected, content) + + @mock.patch('cloudinit.config.cc_puppet.util.subp') + def test_handler_puppet_writes_csr_attributes_file(self, m_subp, m_auto): + """When csr_attributes is provided + creates file in PUPPET_CSR_ATTRIBUTES_PATH.""" + mycloud = self._get_cloud('ubuntu') + mycloud.distro = mock.MagicMock() + cfg = { + 'puppet': { + 'csr_attributes': { + 'custom_attributes': { + '1.2.840.113549.1.9.7': '342thbjkt82094y0ut' + 'hhor289jnqthpc2290'}, + 'extension_requests': { + 'pp_uuid': 'ED803750-E3C7-44F5-BB08-41A04433FE2E', + 'pp_image_name': 'my_ami_image', + 'pp_preshared_key': '342thbjkt82094y0uthhor289jnqthpc2290'} + }}} + csr_attributes = 'cloudinit.config.cc_puppet.' \ + 'PUPPET_CSR_ATTRIBUTES_PATH' + with mock.patch(csr_attributes, self.csr_attributes_path): + cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + content = util.load_file(self.csr_attributes_path) + expected = textwrap.dedent("""\ + custom_attributes: + 1.2.840.113549.1.9.7: 342thbjkt82094y0uthhor289jnqthpc2290 + extension_requests: + pp_image_name: my_ami_image + pp_preshared_key: 342thbjkt82094y0uthhor289jnqthpc2290 + pp_uuid: ED803750-E3C7-44F5-BB08-41A04433FE2E + """) + self.assertEqual(expected, content) diff --git a/tests/unittests/test_handler/test_handler_resizefs.py b/tests/unittests/test_handler/test_handler_resizefs.py index 35187847..db9a0414 100644 --- a/tests/unittests/test_handler/test_handler_resizefs.py +++ b/tests/unittests/test_handler/test_handler_resizefs.py @@ -147,7 +147,7 @@ class TestResizefs(CiTestCase): def test_resize_ufs_cmd_return(self): mount_point = '/' devpth = '/dev/sda2' - self.assertEqual(('growfs', '-y', devpth), + self.assertEqual(('growfs', '-y', mount_point), _resize_ufs(mount_point, devpth)) @mock.patch('cloudinit.util.is_container', return_value=False) diff --git a/tests/unittests/test_handler/test_handler_seed_random.py b/tests/unittests/test_handler/test_handler_seed_random.py index f60dedc2..abecc53b 100644 --- a/tests/unittests/test_handler/test_handler_seed_random.py +++ b/tests/unittests/test_handler/test_handler_seed_random.py @@ -12,8 +12,7 @@ from cloudinit.config import cc_seed_random import gzip import tempfile - -from six import BytesIO +from io import BytesIO from cloudinit import cloud from cloudinit import distros diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/test_handler/test_handler_set_hostname.py index d09ec23a..58abf51a 100644 --- a/tests/unittests/test_handler/test_handler_set_hostname.py +++ b/tests/unittests/test_handler/test_handler_set_hostname.py @@ -13,8 +13,8 @@ from configobj import ConfigObj import logging import os import shutil -from six import BytesIO import tempfile +from io import BytesIO LOG = logging.getLogger(__name__) diff --git a/tests/unittests/test_handler/test_handler_snappy.py b/tests/unittests/test_handler/test_handler_snappy.py deleted file mode 100644 index 76b79c29..00000000 --- a/tests/unittests/test_handler/test_handler_snappy.py +++ /dev/null @@ -1,601 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.config.cc_snappy import ( - makeop, get_package_ops, render_snap_op) -from cloudinit.config.cc_snap_config import ( - add_assertions, add_snap_user, ASSERTIONS_FILE) -from cloudinit import (distros, helpers, cloud, util) -from cloudinit.config.cc_snap_config import handle as snap_handle -from cloudinit.sources import DataSourceNone -from cloudinit.tests.helpers import FilesystemMockingTestCase, mock - -from cloudinit.tests import helpers as t_help - -import logging -import os -import shutil -import tempfile -import textwrap -import yaml - -LOG = logging.getLogger(__name__) -ALLOWED = (dict, list, int, str) - - -class TestInstallPackages(t_help.TestCase): - def setUp(self): - super(TestInstallPackages, self).setUp() - self.unapply = [] - - # by default 'which' has nothing in its path - self.apply_patches([(util, 'subp', self._subp)]) - self.subp_called = [] - self.snapcmds = [] - self.tmp = tempfile.mkdtemp(prefix="TestInstallPackages") - - def tearDown(self): - apply_patches([i for i in reversed(self.unapply)]) - shutil.rmtree(self.tmp) - - def apply_patches(self, patches): - ret = apply_patches(patches) - self.unapply += ret - - def populate_tmp(self, files): - return t_help.populate_dir(self.tmp, files) - - def _subp(self, *args, **kwargs): - # supports subp calling with cmd as args or kwargs - if 'args' not in kwargs: - kwargs['args'] = args[0] - self.subp_called.append(kwargs) - args = kwargs['args'] - # here we basically parse the snappy command invoked - # and append to snapcmds a list of (mode, pkg, config) - if args[0:2] == ['snappy', 'config']: - if args[3] == "-": - config = kwargs.get('data', '') - else: - with open(args[3], "rb") as fp: - config = yaml.safe_load(fp.read()) - self.snapcmds.append(['config', args[2], config]) - elif args[0:2] == ['snappy', 'install']: - config = None - pkg = None - for arg in args[2:]: - if arg.startswith("-"): - continue - if not pkg: - pkg = arg - elif not config: - cfgfile = arg - if cfgfile == "-": - config = kwargs.get('data', '') - elif cfgfile: - with open(cfgfile, "rb") as fp: - config = yaml.safe_load(fp.read()) - self.snapcmds.append(['install', pkg, config]) - - def test_package_ops_1(self): - ret = get_package_ops( - packages=['pkg1', 'pkg2', 'pkg3'], - configs={'pkg2': b'mycfg2'}, installed=[]) - self.assertEqual( - ret, [makeop('install', 'pkg1', None, None), - makeop('install', 'pkg2', b'mycfg2', None), - makeop('install', 'pkg3', None, None)]) - - def test_package_ops_config_only(self): - ret = get_package_ops( - packages=None, - configs={'pkg2': b'mycfg2'}, installed=['pkg1', 'pkg2']) - self.assertEqual( - ret, [makeop('config', 'pkg2', b'mycfg2')]) - - def test_package_ops_install_and_config(self): - ret = get_package_ops( - packages=['pkg3', 'pkg2'], - configs={'pkg2': b'mycfg2', 'xinstalled': b'xcfg'}, - installed=['xinstalled']) - self.assertEqual( - ret, [makeop('install', 'pkg3'), - makeop('install', 'pkg2', b'mycfg2'), - makeop('config', 'xinstalled', b'xcfg')]) - - def test_package_ops_install_long_config_short(self): - # a package can be installed by full name, but have config by short - cfg = {'k1': 'k2'} - ret = get_package_ops( - packages=['config-example.canonical'], - configs={'config-example': cfg}, installed=[]) - self.assertEqual( - ret, [makeop('install', 'config-example.canonical', cfg)]) - - def test_package_ops_with_file(self): - self.populate_tmp( - {"snapf1.snap": b"foo1", "snapf1.config": b"snapf1cfg", - "snapf2.snap": b"foo2", "foo.bar": "ignored"}) - ret = get_package_ops( - packages=['pkg1'], configs={}, installed=[], fspath=self.tmp) - self.assertEqual( - ret, - [makeop_tmpd(self.tmp, 'install', 'snapf1', path="snapf1.snap", - cfgfile="snapf1.config"), - makeop_tmpd(self.tmp, 'install', 'snapf2', path="snapf2.snap"), - makeop('install', 'pkg1')]) - - def test_package_ops_common_filename(self): - # fish package name from filename - # package names likely look like: pkgname.namespace_version_arch.snap - - # find filenames - self.populate_tmp( - {"pkg-ws.smoser_0.3.4_all.snap": "pkg-ws-snapdata", - "pkg-ws.config": "pkg-ws-config", - "pkg1.smoser_1.2.3_all.snap": "pkg1.snapdata", - "pkg1.smoser.config": "pkg1.smoser.config-data", - "pkg1.config": "pkg1.config-data", - "pkg2.smoser_0.0_amd64.snap": "pkg2-snapdata", - "pkg2.smoser_0.0_amd64.config": "pkg2.config"}) - - ret = get_package_ops( - packages=[], configs={}, installed=[], fspath=self.tmp) - self.assertEqual( - ret, - [makeop_tmpd(self.tmp, 'install', 'pkg-ws.smoser', - path="pkg-ws.smoser_0.3.4_all.snap", - cfgfile="pkg-ws.config"), - makeop_tmpd(self.tmp, 'install', 'pkg1.smoser', - path="pkg1.smoser_1.2.3_all.snap", - cfgfile="pkg1.smoser.config"), - makeop_tmpd(self.tmp, 'install', 'pkg2.smoser', - path="pkg2.smoser_0.0_amd64.snap", - cfgfile="pkg2.smoser_0.0_amd64.config"), - ]) - - def test_package_ops_config_overrides_file(self): - # config data overrides local file .config - self.populate_tmp( - {"snapf1.snap": b"foo1", "snapf1.config": b"snapf1cfg"}) - ret = get_package_ops( - packages=[], configs={'snapf1': 'snapf1cfg-config'}, - installed=[], fspath=self.tmp) - self.assertEqual( - ret, [makeop_tmpd(self.tmp, 'install', 'snapf1', - path="snapf1.snap", config="snapf1cfg-config")]) - - def test_package_ops_namespacing(self): - cfgs = { - 'config-example': {'k1': 'v1'}, - 'pkg1': {'p1': 'p2'}, - 'ubuntu-core': {'c1': 'c2'}, - 'notinstalled.smoser': {'s1': 's2'}, - } - ret = get_package_ops( - packages=['config-example.canonical'], configs=cfgs, - installed=['config-example.smoser', 'pkg1.canonical', - 'ubuntu-core']) - - expected_configs = [ - makeop('config', 'pkg1', config=cfgs['pkg1']), - makeop('config', 'ubuntu-core', config=cfgs['ubuntu-core'])] - expected_installs = [ - makeop('install', 'config-example.canonical', - config=cfgs['config-example'])] - - installs = [i for i in ret if i['op'] == 'install'] - configs = [c for c in ret if c['op'] == 'config'] - - self.assertEqual(installs, expected_installs) - # configs are not ordered - self.assertEqual(len(configs), len(expected_configs)) - self.assertTrue(all(found in expected_configs for found in configs)) - - def test_render_op_localsnap(self): - self.populate_tmp({"snapf1.snap": b"foo1"}) - op = makeop_tmpd(self.tmp, 'install', 'snapf1', - path='snapf1.snap') - render_snap_op(**op) - self.assertEqual( - self.snapcmds, [['install', op['path'], None]]) - - def test_render_op_localsnap_localconfig(self): - self.populate_tmp( - {"snapf1.snap": b"foo1", 'snapf1.config': b'snapf1cfg'}) - op = makeop_tmpd(self.tmp, 'install', 'snapf1', - path='snapf1.snap', cfgfile='snapf1.config') - render_snap_op(**op) - self.assertEqual( - self.snapcmds, [['install', op['path'], 'snapf1cfg']]) - - def test_render_op_snap(self): - op = makeop('install', 'snapf1') - render_snap_op(**op) - self.assertEqual( - self.snapcmds, [['install', 'snapf1', None]]) - - def test_render_op_snap_config(self): - mycfg = {'key1': 'value1'} - name = "snapf1" - op = makeop('install', name, config=mycfg) - render_snap_op(**op) - self.assertEqual( - self.snapcmds, [['install', name, {'config': {name: mycfg}}]]) - - def test_render_op_config_bytes(self): - name = "snapf1" - mycfg = b'myconfig' - op = makeop('config', name, config=mycfg) - render_snap_op(**op) - self.assertEqual( - self.snapcmds, [['config', 'snapf1', {'config': {name: mycfg}}]]) - - def test_render_op_config_string(self): - name = 'snapf1' - mycfg = 'myconfig: foo\nhisconfig: bar\n' - op = makeop('config', name, config=mycfg) - render_snap_op(**op) - self.assertEqual( - self.snapcmds, [['config', 'snapf1', {'config': {name: mycfg}}]]) - - def test_render_op_config_dict(self): - # config entry for package can be a dict, not a string blob - mycfg = {'foo': 'bar'} - name = 'snapf1' - op = makeop('config', name, config=mycfg) - render_snap_op(**op) - # snapcmds is a list of 3-entry lists. data_found will be the - # blob of data in the file in 'snappy install --config=<file>' - data_found = self.snapcmds[0][2] - self.assertEqual(mycfg, data_found['config'][name]) - - def test_render_op_config_list(self): - # config entry for package can be a list, not a string blob - mycfg = ['foo', 'bar', 'wark', {'f1': 'b1'}] - name = "snapf1" - op = makeop('config', name, config=mycfg) - render_snap_op(**op) - data_found = self.snapcmds[0][2] - self.assertEqual(mycfg, data_found['config'][name]) - - def test_render_op_config_int(self): - # config entry for package can be a list, not a string blob - mycfg = 1 - name = 'snapf1' - op = makeop('config', name, config=mycfg) - render_snap_op(**op) - data_found = self.snapcmds[0][2] - self.assertEqual(mycfg, data_found['config'][name]) - - def test_render_long_configs_short(self): - # install a namespaced package should have un-namespaced config - mycfg = {'k1': 'k2'} - name = 'snapf1' - op = makeop('install', name + ".smoser", config=mycfg) - render_snap_op(**op) - data_found = self.snapcmds[0][2] - self.assertEqual(mycfg, data_found['config'][name]) - - def test_render_does_not_pad_cfgfile(self): - # package_ops with cfgfile should not modify --file= content. - mydata = "foo1: bar1\nk: [l1, l2, l3]\n" - self.populate_tmp( - {"snapf1.snap": b"foo1", "snapf1.config": mydata.encode()}) - ret = get_package_ops( - packages=[], configs={}, installed=[], fspath=self.tmp) - self.assertEqual( - ret, - [makeop_tmpd(self.tmp, 'install', 'snapf1', path="snapf1.snap", - cfgfile="snapf1.config")]) - - # now the op was ok, but test that render didn't mess it up. - render_snap_op(**ret[0]) - data_found = self.snapcmds[0][2] - # the data found gets loaded in the snapcmd interpretation - # so this comparison is a bit lossy, but input to snappy config - # is expected to be yaml loadable, so it should be OK. - self.assertEqual(yaml.safe_load(mydata), data_found) - - -class TestSnapConfig(FilesystemMockingTestCase): - - SYSTEM_USER_ASSERTION = textwrap.dedent(""" - type: system-user - authority-id: LqvZQdfyfGlYvtep4W6Oj6pFXP9t1Ksp - brand-id: LqvZQdfyfGlYvtep4W6Oj6pFXP9t1Ksp - email: foo@bar.com - password: $6$E5YiAuMIPAwX58jG$miomhVNui/vf7f/3ctB/f0RWSKFxG0YXzrJ9rtJ1ikvzt - series: - - 16 - since: 2016-09-10T16:34:00+03:00 - until: 2017-11-10T16:34:00+03:00 - username: baz - sign-key-sha3-384: RuVvnp4n52GilycjfbbTCI3_L8Y6QlIE75wxMc0KzGV3AUQqVd9GuXoj - - AcLBXAQAAQoABgUCV/UU1wAKCRBKnlMoJQLkZVeLD/9/+hIeVywtzsDA3oxl+P+u9D13y9s6svP - Jd6Wnf4FTw6sq1GjBE4ZA7lrwSaRCUJ9Vcsvf2q9OGPY7mOb2TBxaDe0PbUMjrSrqllSSQwhpNI - zG+NxkkKuxsUmLzFa+k9m6cyojNbw5LFhQZBQCGlr3JYqC0tIREq/UsZxj+90TUC87lDJwkU8GF - s4CR+rejZj4itIcDcVxCSnJH6hv6j2JrJskJmvObqTnoOlcab+JXdamXqbldSP3UIhWoyVjqzkj - +to7mXgx+cCUA9+ngNCcfUG+1huGGTWXPCYkZ78HvErcRlIdeo4d3xwtz1cl/w3vYnq9og1XwsP - Yfetr3boig2qs1Y+j/LpsfYBYncgWjeDfAB9ZZaqQz/oc8n87tIPZDJHrusTlBfop8CqcM4xsKS - d+wnEY8e/F24mdSOYmS1vQCIDiRU3MKb6x138Ud6oHXFlRBbBJqMMctPqWDunWzb5QJ7YR0I39q - BrnEqv5NE0G7w6HOJ1LSPG5Hae3P4T2ea+ATgkb03RPr3KnXnzXg4TtBbW1nytdlgoNc/BafE1H - f3NThcq9gwX4xWZ2PAWnqVPYdDMyCtzW3Ck+o6sIzx+dh4gDLPHIi/6TPe/pUuMop9CBpWwez7V - v1z+1+URx6Xlq3Jq18y5pZ6fY3IDJ6km2nQPMzcm4Q==""") - - ACCOUNT_ASSERTION = textwrap.dedent(""" - type: account-key - authority-id: canonical - revision: 2 - public-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0 - account-id: canonical - name: store - since: 2016-04-01T00:00:00.0Z - body-length: 717 - sign-key-sha3-384: -CvQKAwRQ5h3Ffn10FILJoEZUXOv6km9FwA80-Rcj-f-6jadQ89VRswH - - AcbBTQRWhcGAARAA0KKYYQWuHOrsFVi4p4l7ZzSvX7kLgJFFeFgOkzdWKBTHEnsMKjl5mefFe9j - qe8NlmJdfY7BenP7XeBtwKp700H/t9lLrZbpTNAPHXYxEWFJp5bPqIcJYBZ+29oLVLN1Tc5X482 - vCiDqL8+pPYqBrK2fNlyPlNNSum9wI70rDDL4r6FVvr+osTnGejibdV8JphWX+lrSQDnRSdM8KJ - UM43vTgLGTi9W54oRhsA2OFexRfRksTrnqGoonCjqX5wO3OFSaMDzMsO2MJ/hPfLgDqw53qjzuK - Iec9OL3k5basvu2cj5u9tKwVFDsCKK2GbKUsWWpx2KTpOifmhmiAbzkTHbH9KaoMS7p0kJwhTQG - o9aJ9VMTWHJc/NCBx7eu451u6d46sBPCXS/OMUh2766fQmoRtO1OwCTxsRKG2kkjbMn54UdFULl - VfzvyghMNRKIezsEkmM8wueTqGUGZWa6CEZqZKwhe/PROxOPYzqtDH18XZknbU1n5lNb7vNfem9 - 2ai+3+JyFnW9UhfvpVF7gzAgdyCqNli4C6BIN43uwoS8HkykocZS/+Gv52aUQ/NZ8BKOHLw+7an - Q0o8W9ltSLZbEMxFIPSN0stiZlkXAp6DLyvh1Y4wXSynDjUondTpej2fSvSlCz/W5v5V7qA4nIc - vUvV7RjVzv17ut0AEQEAAQ== - - AcLDXAQAAQoABgUCV83k9QAKCRDUpVvql9g3IBT8IACKZ7XpiBZ3W4lqbPssY6On81WmxQLtvsM - WTp6zZpl/wWOSt2vMNUk9pvcmrNq1jG9CuhDfWFLGXEjcrrmVkN3YuCOajMSPFCGrxsIBLSRt/b - nrKykdLAAzMfG8rP1d82bjFFiIieE+urQ0Kcv09Jtdvavq3JT1Tek5mFyyfhHNlQEKOzWqmRWiL - 3c3VOZUs1ZD8TSlnuq/x+5T0X0YtOyGjSlVxk7UybbyMNd6MZfNaMpIG4x+mxD3KHFtBAC7O6kL - eX3i6j5nCY5UABfA3DZEAkWP4zlmdBEOvZ9t293NaDdOpzsUHRkoi0Zez/9BHQ/kwx/uNc2WqrY - inCmu16JGNeXqsyinnLl7Ghn2RwhvDMlLxF6RTx8xdx1yk6p3PBTwhZMUvuZGjUtN/AG8BmVJQ1 - rsGSRkkSywvnhVJRB2sudnrMBmNS2goJbzSbmJnOlBrd2WsV0T9SgNMWZBiov3LvU4o2SmAb6b+ - rYwh8H5QHcuuYJuxDjFhPswIp6Wes5T6hUicf3SWtObcDS4HSkVS4ImBjjX9YgCuFy7QdnooOWE - aPvkRw3XCVeYq0K6w9GRsk1YFErD4XmXXZjDYY650MX9v42Sz5MmphHV8jdIY5ssbadwFSe2rCQ - 6UX08zy7RsIb19hTndE6ncvSNDChUR9eEnCm73eYaWTWTnq1cxdVP/s52r8uss++OYOkPWqh5nO - haRn7INjH/yZX4qXjNXlTjo0PnHH0q08vNKDwLhxS+D9du+70FeacXFyLIbcWllSbJ7DmbumGpF - yYbtj3FDDPzachFQdIG3lSt+cSUGeyfSs6wVtc3cIPka/2Urx7RprfmoWSI6+a5NcLdj0u2z8O9 - HxeIgxDpg/3gT8ZIuFKePMcLDM19Fh/p0ysCsX+84B9chNWtsMSmIaE57V+959MVtsLu7SLb9gi - skrju0pQCwsu2wHMLTNd1f3PTHmrr49hxetTus07HSQUApMtAGKzQilF5zqFjbyaTd4xgQbd+PK - CjFyzQTDOcUhXpuUGt/IzlqiFfsCsmbj2K4KdSNYMlqIgZ3Azu8KvZLIhsyN7v5vNIZSPfEbjde - ClU9r0VRiJmtYBUjcSghD9LWn+yRLwOxhfQVjm0cBwIt5R/yPF/qC76yIVuWUtM5Y2/zJR1J8OF - qWchvlImHtvDzS9FQeLyzJAOjvZ2CnWp2gILgUz0WQdOk1Dq8ax7KS9BQ42zxw9EZAEPw3PEFqR - IQsRTONp+iVS8YxSmoYZjDlCgRMWUmawez/Fv5b9Fb/XkO5Eq4e+KfrpUujXItaipb+tV8h5v3t - oG3Ie3WOHrVjCLXIdYslpL1O4nadqR6Xv58pHj6k""") - - test_assertions = [ACCOUNT_ASSERTION, SYSTEM_USER_ASSERTION] - - def setUp(self): - super(TestSnapConfig, self).setUp() - self.subp = util.subp - self.new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.new_root) - - def _get_cloud(self, distro, metadata=None): - self.patchUtils(self.new_root) - paths = helpers.Paths({}) - cls = distros.fetch(distro) - mydist = cls(distro, {}, paths) - myds = DataSourceNone.DataSourceNone({}, mydist, paths) - if metadata: - myds.metadata.update(metadata) - return cloud.Cloud(myds, paths, {}, mydist, None) - - @mock.patch('cloudinit.util.write_file') - @mock.patch('cloudinit.util.subp') - def test_snap_config_add_assertions(self, msubp, mwrite): - add_assertions(self.test_assertions) - - combined = "\n".join(self.test_assertions) - mwrite.assert_any_call(ASSERTIONS_FILE, combined.encode('utf-8')) - msubp.assert_called_with(['snap', 'ack', ASSERTIONS_FILE], - capture=True) - - def test_snap_config_add_assertions_empty(self): - self.assertRaises(ValueError, add_assertions, []) - - def test_add_assertions_nonlist(self): - self.assertRaises(ValueError, add_assertions, {}) - - @mock.patch('cloudinit.util.write_file') - @mock.patch('cloudinit.util.subp') - def test_snap_config_add_assertions_ack_fails(self, msubp, mwrite): - msubp.side_effect = [util.ProcessExecutionError("Invalid assertion")] - self.assertRaises(util.ProcessExecutionError, add_assertions, - self.test_assertions) - - @mock.patch('cloudinit.config.cc_snap_config.add_assertions') - @mock.patch('cloudinit.config.cc_snap_config.util') - def test_snap_config_handle_no_config(self, mock_util, mock_add): - cfg = {} - cc = self._get_cloud('ubuntu') - cc.distro = mock.MagicMock() - cc.distro.name = 'ubuntu' - mock_util.which.return_value = None - snap_handle('snap_config', cfg, cc, LOG, None) - mock_add.assert_not_called() - - def test_snap_config_add_snap_user_no_config(self): - usercfg = add_snap_user(cfg=None) - self.assertIsNone(usercfg) - - def test_snap_config_add_snap_user_not_dict(self): - cfg = ['foobar'] - self.assertRaises(ValueError, add_snap_user, cfg) - - def test_snap_config_add_snap_user_no_email(self): - cfg = {'assertions': [], 'known': True} - usercfg = add_snap_user(cfg=cfg) - self.assertIsNone(usercfg) - - @mock.patch('cloudinit.config.cc_snap_config.util') - def test_snap_config_add_snap_user_email_only(self, mock_util): - email = 'janet@planetjanet.org' - cfg = {'email': email} - mock_util.which.return_value = None - mock_util.system_is_snappy.return_value = True - mock_util.subp.side_effect = [ - ("false\n", ""), # snap managed - ] - - usercfg = add_snap_user(cfg=cfg) - - self.assertEqual(usercfg, {'snapuser': email, 'known': False}) - - @mock.patch('cloudinit.config.cc_snap_config.util') - def test_snap_config_add_snap_user_email_known(self, mock_util): - email = 'janet@planetjanet.org' - known = True - cfg = {'email': email, 'known': known} - mock_util.which.return_value = None - mock_util.system_is_snappy.return_value = True - mock_util.subp.side_effect = [ - ("false\n", ""), # snap managed - (self.SYSTEM_USER_ASSERTION, ""), # snap known system-user - ] - - usercfg = add_snap_user(cfg=cfg) - - self.assertEqual(usercfg, {'snapuser': email, 'known': known}) - - @mock.patch('cloudinit.config.cc_snap_config.add_assertions') - @mock.patch('cloudinit.config.cc_snap_config.util') - def test_snap_config_handle_system_not_snappy(self, mock_util, mock_add): - cfg = {'snappy': {'assertions': self.test_assertions}} - cc = self._get_cloud('ubuntu') - cc.distro = mock.MagicMock() - cc.distro.name = 'ubuntu' - mock_util.which.return_value = None - mock_util.system_is_snappy.return_value = False - - snap_handle('snap_config', cfg, cc, LOG, None) - - mock_add.assert_not_called() - - @mock.patch('cloudinit.config.cc_snap_config.add_assertions') - @mock.patch('cloudinit.config.cc_snap_config.util') - def test_snap_config_handle_snapuser(self, mock_util, mock_add): - email = 'janet@planetjanet.org' - cfg = { - 'snappy': { - 'assertions': self.test_assertions, - 'email': email, - } - } - cc = self._get_cloud('ubuntu') - cc.distro = mock.MagicMock() - cc.distro.name = 'ubuntu' - mock_util.which.return_value = None - mock_util.system_is_snappy.return_value = True - mock_util.subp.side_effect = [ - ("false\n", ""), # snap managed - ] - - snap_handle('snap_config', cfg, cc, LOG, None) - - mock_add.assert_called_with(self.test_assertions) - usercfg = {'snapuser': email, 'known': False} - cc.distro.create_user.assert_called_with(email, **usercfg) - - @mock.patch('cloudinit.config.cc_snap_config.add_assertions') - @mock.patch('cloudinit.config.cc_snap_config.util') - def test_snap_config_handle_snapuser_known(self, mock_util, mock_add): - email = 'janet@planetjanet.org' - cfg = { - 'snappy': { - 'assertions': self.test_assertions, - 'email': email, - 'known': True, - } - } - cc = self._get_cloud('ubuntu') - cc.distro = mock.MagicMock() - cc.distro.name = 'ubuntu' - mock_util.which.return_value = None - mock_util.system_is_snappy.return_value = True - mock_util.subp.side_effect = [ - ("false\n", ""), # snap managed - (self.SYSTEM_USER_ASSERTION, ""), # snap known system-user - ] - - snap_handle('snap_config', cfg, cc, LOG, None) - - mock_add.assert_called_with(self.test_assertions) - usercfg = {'snapuser': email, 'known': True} - cc.distro.create_user.assert_called_with(email, **usercfg) - - @mock.patch('cloudinit.config.cc_snap_config.add_assertions') - @mock.patch('cloudinit.config.cc_snap_config.util') - def test_snap_config_handle_snapuser_known_managed(self, mock_util, - mock_add): - email = 'janet@planetjanet.org' - cfg = { - 'snappy': { - 'assertions': self.test_assertions, - 'email': email, - 'known': True, - } - } - cc = self._get_cloud('ubuntu') - cc.distro = mock.MagicMock() - cc.distro.name = 'ubuntu' - mock_util.which.return_value = None - mock_util.system_is_snappy.return_value = True - mock_util.subp.side_effect = [ - ("true\n", ""), # snap managed - ] - - snap_handle('snap_config', cfg, cc, LOG, None) - - mock_add.assert_called_with(self.test_assertions) - cc.distro.create_user.assert_not_called() - - @mock.patch('cloudinit.config.cc_snap_config.add_assertions') - @mock.patch('cloudinit.config.cc_snap_config.util') - def test_snap_config_handle_snapuser_known_no_assertion(self, mock_util, - mock_add): - email = 'janet@planetjanet.org' - cfg = { - 'snappy': { - 'assertions': [self.ACCOUNT_ASSERTION], - 'email': email, - 'known': True, - } - } - cc = self._get_cloud('ubuntu') - cc.distro = mock.MagicMock() - cc.distro.name = 'ubuntu' - mock_util.which.return_value = None - mock_util.system_is_snappy.return_value = True - mock_util.subp.side_effect = [ - ("true\n", ""), # snap managed - ("", ""), # snap known system-user - ] - - snap_handle('snap_config', cfg, cc, LOG, None) - - mock_add.assert_called_with([self.ACCOUNT_ASSERTION]) - cc.distro.create_user.assert_not_called() - - -def makeop_tmpd(tmpd, op, name, config=None, path=None, cfgfile=None): - if cfgfile: - cfgfile = os.path.sep.join([tmpd, cfgfile]) - if path: - path = os.path.sep.join([tmpd, path]) - return(makeop(op=op, name=name, config=config, path=path, cfgfile=cfgfile)) - - -def apply_patches(patches): - ret = [] - for (ref, name, replace) in patches: - if replace is None: - continue - orig = getattr(ref, name) - setattr(ref, name, replace) - ret.append((ref, name, orig)) - return ret - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_spacewalk.py b/tests/unittests/test_handler/test_handler_spacewalk.py index ddbf4a79..410e6f77 100644 --- a/tests/unittests/test_handler/test_handler_spacewalk.py +++ b/tests/unittests/test_handler/test_handler_spacewalk.py @@ -6,11 +6,7 @@ from cloudinit import util from cloudinit.tests import helpers import logging - -try: - from unittest import mock -except ImportError: - import mock +from unittest import mock LOG = logging.getLogger(__name__) diff --git a/tests/unittests/test_handler/test_handler_timezone.py b/tests/unittests/test_handler/test_handler_timezone.py index 27eedded..50c45363 100644 --- a/tests/unittests/test_handler/test_handler_timezone.py +++ b/tests/unittests/test_handler/test_handler_timezone.py @@ -18,8 +18,8 @@ from cloudinit.tests import helpers as t_help from configobj import ConfigObj import logging import shutil -from six import BytesIO import tempfile +from io import BytesIO LOG = logging.getLogger(__name__) diff --git a/tests/unittests/test_handler/test_handler_write_files.py b/tests/unittests/test_handler/test_handler_write_files.py index bc8756ca..ed0a4da2 100644 --- a/tests/unittests/test_handler/test_handler_write_files.py +++ b/tests/unittests/test_handler/test_handler_write_files.py @@ -1,17 +1,16 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.config.cc_write_files import write_files, decode_perms -from cloudinit import log as logging -from cloudinit import util - -from cloudinit.tests.helpers import CiTestCase, FilesystemMockingTestCase - import base64 import gzip +import io import shutil -import six import tempfile +from cloudinit import log as logging +from cloudinit import util +from cloudinit.config.cc_write_files import write_files, decode_perms +from cloudinit.tests.helpers import CiTestCase, FilesystemMockingTestCase + LOG = logging.getLogger(__name__) YAML_TEXT = """ @@ -138,7 +137,7 @@ class TestDecodePerms(CiTestCase): def _gzip_bytes(data): - buf = six.BytesIO() + buf = io.BytesIO() fp = None try: fp = gzip.GzipFile(fileobj=buf, mode="wb") diff --git a/tests/unittests/test_handler/test_handler_yum_add_repo.py b/tests/unittests/test_handler/test_handler_yum_add_repo.py index b90a3af3..0675bd8f 100644 --- a/tests/unittests/test_handler/test_handler_yum_add_repo.py +++ b/tests/unittests/test_handler/test_handler_yum_add_repo.py @@ -7,8 +7,8 @@ from cloudinit.tests import helpers import logging import shutil -from six import StringIO import tempfile +from io import StringIO LOG = logging.getLogger(__name__) diff --git a/tests/unittests/test_handler/test_handler_zypper_add_repo.py b/tests/unittests/test_handler/test_handler_zypper_add_repo.py index 72ab6c08..9685ff28 100644 --- a/tests/unittests/test_handler/test_handler_zypper_add_repo.py +++ b/tests/unittests/test_handler/test_handler_zypper_add_repo.py @@ -2,6 +2,7 @@ import glob import os +from io import StringIO from cloudinit.config import cc_zypper_add_repo from cloudinit import util @@ -10,7 +11,6 @@ from cloudinit.tests import helpers from cloudinit.tests.helpers import mock import logging -from six import StringIO LOG = logging.getLogger(__name__) diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py index 1bad07f6..987a89c9 100644 --- a/tests/unittests/test_handler/test_schema.py +++ b/tests/unittests/test_handler/test_schema.py @@ -10,7 +10,7 @@ from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema from copy import copy import os -from six import StringIO +from io import StringIO from textwrap import dedent from yaml import safe_load @@ -28,6 +28,7 @@ class GetSchemaTest(CiTestCase): 'cc_runcmd', 'cc_snap', 'cc_ubuntu_advantage', + 'cc_ubuntu_drivers', 'cc_zypper_add_repo' ], [subschema['id'] for subschema in schema['allOf']]) diff --git a/tests/unittests/test_log.py b/tests/unittests/test_log.py index cd6296d6..e069a487 100644 --- a/tests/unittests/test_log.py +++ b/tests/unittests/test_log.py @@ -2,14 +2,15 @@ """Tests for cloudinit.log """ -from cloudinit.analyze.dump import CLOUD_INIT_ASCTIME_FMT -from cloudinit import log as ci_logging -from cloudinit.tests.helpers import CiTestCase import datetime +import io import logging -import six import time +from cloudinit import log as ci_logging +from cloudinit.analyze.dump import CLOUD_INIT_ASCTIME_FMT +from cloudinit.tests.helpers import CiTestCase + class TestCloudInitLogger(CiTestCase): @@ -18,7 +19,7 @@ class TestCloudInitLogger(CiTestCase): # of sys.stderr, we'll plug in a StringIO() object so we can see # what gets logged logging.Formatter.converter = time.gmtime - self.ci_logs = six.StringIO() + self.ci_logs = io.StringIO() self.ci_root = logging.getLogger() console = logging.StreamHandler(self.ci_logs) console.setFormatter(logging.Formatter(ci_logging.DEF_CON_FORMAT)) diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py index 3a5072c7..10871bcf 100644 --- a/tests/unittests/test_merging.py +++ b/tests/unittests/test_merging.py @@ -13,13 +13,11 @@ import glob import os import random import re -import six import string SOURCE_PAT = "source*.*yaml" EXPECTED_PAT = "expected%s.yaml" -TYPES = [dict, str, list, tuple, None] -TYPES.extend(six.integer_types) +TYPES = [dict, str, list, tuple, None, int] def _old_mergedict(src, cand): @@ -85,7 +83,7 @@ def _make_dict(current_depth, max_depth, rand): pass if t in [tuple]: base = tuple(base) - elif t in six.integer_types: + elif t in [int]: base = rand.randint(0, 2 ** 8) elif t in [str]: base = _random_str(rand) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 195f261c..bedd05fe 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -9,6 +9,7 @@ from cloudinit.net import ( from cloudinit.sources.helpers import openstack from cloudinit import temp_utils from cloudinit import util +from cloudinit import safeyaml as yaml from cloudinit.tests.helpers import ( CiTestCase, FilesystemMockingTestCase, dir2dict, mock, populate_dir) @@ -19,8 +20,10 @@ import gzip import io import json import os +import re import textwrap -import yaml +from yaml.serializer import Serializer + DHCP_CONTENT_1 = """ DEVICE='eth0' @@ -78,7 +81,7 @@ DHCP6_EXPECTED_1 = { STATIC_CONTENT_1 = """ DEVICE='eth1' -PROTO='static' +PROTO='none' IPV4ADDR='10.0.0.2' IPV4BROADCAST='10.0.0.255' IPV4NETMASK='255.255.255.0' @@ -102,6 +105,357 @@ STATIC_EXPECTED_1 = { 'address': '10.0.0.2'}], } +V1_NAMESERVER_ALIAS = """ +config: +- id: eno1 + mac_address: 08:94:ef:51:ae:e0 + mtu: 1500 + name: eno1 + subnets: + - type: manual + type: physical +- id: eno2 + mac_address: 08:94:ef:51:ae:e1 + mtu: 1500 + name: eno2 + subnets: + - type: manual + type: physical +- id: eno3 + mac_address: 08:94:ef:51:ae:de + mtu: 1500 + name: eno3 + subnets: + - type: manual + type: physical +- bond_interfaces: + - eno1 + - eno3 + id: bondM + mac_address: 08:94:ef:51:ae:e0 + mtu: 1500 + name: bondM + params: + bond-downdelay: 0 + bond-lacp-rate: fast + bond-miimon: 100 + bond-mode: 802.3ad + bond-updelay: 0 + bond-xmit-hash-policy: layer3+4 + subnets: + - address: 10.101.10.47/23 + gateway: 10.101.11.254 + type: static + type: bond +- id: eno4 + mac_address: 08:94:ef:51:ae:df + mtu: 1500 + name: eno4 + subnets: + - type: manual + type: physical +- id: enp0s20f0u1u6 + mac_address: 0a:94:ef:51:a4:b9 + mtu: 1500 + name: enp0s20f0u1u6 + subnets: + - type: manual + type: physical +- id: enp216s0f0 + mac_address: 68:05:ca:81:7c:e8 + mtu: 9000 + name: enp216s0f0 + subnets: + - type: manual + type: physical +- id: enp216s0f1 + mac_address: 68:05:ca:81:7c:e9 + mtu: 9000 + name: enp216s0f1 + subnets: + - type: manual + type: physical +- id: enp47s0f0 + mac_address: 68:05:ca:64:d3:6c + mtu: 9000 + name: enp47s0f0 + subnets: + - type: manual + type: physical +- bond_interfaces: + - enp216s0f0 + - enp47s0f0 + id: bond0 + mac_address: 68:05:ca:64:d3:6c + mtu: 9000 + name: bond0 + params: + bond-downdelay: 0 + bond-lacp-rate: fast + bond-miimon: 100 + bond-mode: 802.3ad + bond-updelay: 0 + bond-xmit-hash-policy: layer3+4 + subnets: + - type: manual + type: bond +- id: bond0.3502 + mtu: 9000 + name: bond0.3502 + subnets: + - address: 172.20.80.4/25 + type: static + type: vlan + vlan_id: 3502 + vlan_link: bond0 +- id: bond0.3503 + mtu: 9000 + name: bond0.3503 + subnets: + - address: 172.20.80.129/25 + type: static + type: vlan + vlan_id: 3503 + vlan_link: bond0 +- id: enp47s0f1 + mac_address: 68:05:ca:64:d3:6d + mtu: 9000 + name: enp47s0f1 + subnets: + - type: manual + type: physical +- bond_interfaces: + - enp216s0f1 + - enp47s0f1 + id: bond1 + mac_address: 68:05:ca:64:d3:6d + mtu: 9000 + name: bond1 + params: + bond-downdelay: 0 + bond-lacp-rate: fast + bond-miimon: 100 + bond-mode: 802.3ad + bond-updelay: 0 + bond-xmit-hash-policy: layer3+4 + subnets: + - address: 10.101.8.65/26 + routes: + - destination: 213.119.192.0/24 + gateway: 10.101.8.126 + metric: 0 + type: static + type: bond +- address: + - 10.101.10.1 + - 10.101.10.2 + - 10.101.10.3 + - 10.101.10.5 + search: + - foo.bar + - maas + type: nameserver +version: 1 +""" + +NETPLAN_NO_ALIAS = """ +network: + version: 2 + ethernets: + eno1: + match: + macaddress: 08:94:ef:51:ae:e0 + mtu: 1500 + set-name: eno1 + eno2: + match: + macaddress: 08:94:ef:51:ae:e1 + mtu: 1500 + set-name: eno2 + eno3: + match: + macaddress: 08:94:ef:51:ae:de + mtu: 1500 + set-name: eno3 + eno4: + match: + macaddress: 08:94:ef:51:ae:df + mtu: 1500 + set-name: eno4 + enp0s20f0u1u6: + match: + macaddress: 0a:94:ef:51:a4:b9 + mtu: 1500 + set-name: enp0s20f0u1u6 + enp216s0f0: + match: + macaddress: 68:05:ca:81:7c:e8 + mtu: 9000 + set-name: enp216s0f0 + enp216s0f1: + match: + macaddress: 68:05:ca:81:7c:e9 + mtu: 9000 + set-name: enp216s0f1 + enp47s0f0: + match: + macaddress: 68:05:ca:64:d3:6c + mtu: 9000 + set-name: enp47s0f0 + enp47s0f1: + match: + macaddress: 68:05:ca:64:d3:6d + mtu: 9000 + set-name: enp47s0f1 + bonds: + bond0: + interfaces: + - enp216s0f0 + - enp47s0f0 + macaddress: 68:05:ca:64:d3:6c + mtu: 9000 + parameters: + down-delay: 0 + lacp-rate: fast + mii-monitor-interval: 100 + mode: 802.3ad + transmit-hash-policy: layer3+4 + up-delay: 0 + bond1: + addresses: + - 10.101.8.65/26 + interfaces: + - enp216s0f1 + - enp47s0f1 + macaddress: 68:05:ca:64:d3:6d + mtu: 9000 + nameservers: + addresses: + - 10.101.10.1 + - 10.101.10.2 + - 10.101.10.3 + - 10.101.10.5 + search: + - foo.bar + - maas + parameters: + down-delay: 0 + lacp-rate: fast + mii-monitor-interval: 100 + mode: 802.3ad + transmit-hash-policy: layer3+4 + up-delay: 0 + routes: + - metric: 0 + to: 213.119.192.0/24 + via: 10.101.8.126 + bondM: + addresses: + - 10.101.10.47/23 + gateway4: 10.101.11.254 + interfaces: + - eno1 + - eno3 + macaddress: 08:94:ef:51:ae:e0 + mtu: 1500 + nameservers: + addresses: + - 10.101.10.1 + - 10.101.10.2 + - 10.101.10.3 + - 10.101.10.5 + search: + - foo.bar + - maas + parameters: + down-delay: 0 + lacp-rate: fast + mii-monitor-interval: 100 + mode: 802.3ad + transmit-hash-policy: layer3+4 + up-delay: 0 + vlans: + bond0.3502: + addresses: + - 172.20.80.4/25 + id: 3502 + link: bond0 + mtu: 9000 + nameservers: + addresses: + - 10.101.10.1 + - 10.101.10.2 + - 10.101.10.3 + - 10.101.10.5 + search: + - foo.bar + - maas + bond0.3503: + addresses: + - 172.20.80.129/25 + id: 3503 + link: bond0 + mtu: 9000 + nameservers: + addresses: + - 10.101.10.1 + - 10.101.10.2 + - 10.101.10.3 + - 10.101.10.5 + search: + - foo.bar + - maas +""" + +NETPLAN_BOND_GRAT_ARP = """ +network: + bonds: + bond0: + interfaces: + - ens3 + macaddress: 68:05:ca:64:d3:6c + mtu: 9000 + parameters: + gratuitious-arp: 1 + bond1: + interfaces: + - ens4 + macaddress: 68:05:ca:64:d3:6d + mtu: 9000 + parameters: + gratuitous-arp: 2 + ethernets: + ens3: + dhcp4: false + dhcp6: false + match: + macaddress: 52:54:00:ab:cd:ef + ens4: + dhcp4: false + dhcp6: false + match: + macaddress: 52:54:00:11:22:ff + version: 2 +""" + +NETPLAN_DHCP_FALSE = """ +version: 2 +ethernets: + ens3: + match: + macaddress: 52:54:00:ab:cd:ef + dhcp4: false + dhcp6: false + addresses: + - 192.168.42.100/24 + - 2001:db8::100/32 + gateway4: 192.168.42.1 + gateway6: 2001:db8::1 + nameservers: + search: [example.com] + addresses: [192.168.42.53, 1.1.1.1] +""" + # Examples (and expected outputs for various renderers). OS_SAMPLES = [ { @@ -135,17 +489,11 @@ OS_SAMPLES = [ """ # Created by cloud-init on instance boot automatically, do not edit. # -BOOTPROTO=none -DEFROUTE=yes -DEVICE=eth0 -GATEWAY=172.19.3.254 -HWADDR=fa:16:3e:ed:9a:59 +BOOTPROTO=static IPADDR=172.19.1.34 +LLADDR=fa:16:3e:ed:9a:59 NETMASK=255.255.252.0 -NM_CONTROLLED=no -ONBOOT=yes -TYPE=Ethernet -USERCTL=no +STARTMODE=auto """.lstrip()), ('etc/resolv.conf', """ @@ -160,7 +508,7 @@ nameserver 172.19.0.12 [main] dns = none """.lstrip()), - ('etc/udev/rules.d/70-persistent-net.rules', + ('etc/udev/rules.d/85-persistent-net-cloud-init.rules', "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))], 'out_sysconfig_rhel': [ @@ -235,19 +583,13 @@ dns = none """ # Created by cloud-init on instance boot automatically, do not edit. # -BOOTPROTO=none -DEFROUTE=yes -DEVICE=eth0 -GATEWAY=172.19.3.254 -HWADDR=fa:16:3e:ed:9a:59 +BOOTPROTO=static IPADDR=172.19.1.34 IPADDR1=10.0.0.10 +LLADDR=fa:16:3e:ed:9a:59 NETMASK=255.255.252.0 NETMASK1=255.255.255.0 -NM_CONTROLLED=no -ONBOOT=yes -TYPE=Ethernet -USERCTL=no +STARTMODE=auto """.lstrip()), ('etc/resolv.conf', """ @@ -262,7 +604,7 @@ nameserver 172.19.0.12 [main] dns = none """.lstrip()), - ('etc/udev/rules.d/70-persistent-net.rules', + ('etc/udev/rules.d/85-persistent-net-cloud-init.rules', "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))], 'out_sysconfig_rhel': [ @@ -359,21 +701,14 @@ dns = none """ # Created by cloud-init on instance boot automatically, do not edit. # -BOOTPROTO=none -DEFROUTE=yes -DEVICE=eth0 -GATEWAY=172.19.3.254 -HWADDR=fa:16:3e:ed:9a:59 +BOOTPROTO=static IPADDR=172.19.1.34 -IPV6ADDR=2001:DB8::10/64 -IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64" -IPV6INIT=yes -IPV6_DEFAULTGW=2001:DB8::1 +IPADDR6=2001:DB8::10/64 +IPADDR6_1=2001:DB9::10/64 +IPADDR6_2=2001:DB10::10/64 +LLADDR=fa:16:3e:ed:9a:59 NETMASK=255.255.252.0 -NM_CONTROLLED=no -ONBOOT=yes -TYPE=Ethernet -USERCTL=no +STARTMODE=auto """.lstrip()), ('etc/resolv.conf', """ @@ -388,7 +723,7 @@ nameserver 172.19.0.12 [main] dns = none """.lstrip()), - ('etc/udev/rules.d/70-persistent-net.rules', + ('etc/udev/rules.d/85-persistent-net-cloud-init.rules', "".join(['SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ', 'ATTR{address}=="fa:16:3e:ed:9a:59", NAME="eth0"\n']))], 'out_sysconfig_rhel': [ @@ -518,7 +853,19 @@ NETWORK_CONFIGS = { via: 65.61.151.37 set-name: eth99 """).rstrip(' '), - 'expected_sysconfig': { + 'expected_sysconfig_opensuse': { + 'ifcfg-eth1': textwrap.dedent("""\ + BOOTPROTO=static + LLADDR=cf:d6:af:48:e8:80 + STARTMODE=auto"""), + 'ifcfg-eth99': textwrap.dedent("""\ + BOOTPROTO=dhcp4 + LLADDR=c0:d6:9f:2c:e8:80 + IPADDR=192.168.21.3 + NETMASK=255.255.255.0 + STARTMODE=auto"""), + }, + 'expected_sysconfig_rhel': { 'ifcfg-eth1': textwrap.dedent("""\ BOOTPROTO=none DEVICE=eth1 @@ -531,6 +878,7 @@ NETWORK_CONFIGS = { BOOTPROTO=dhcp DEFROUTE=yes DEVICE=eth99 + DHCLIENT_SET_DEFAULT_ROUTE=yes DNS1=8.8.8.8 DNS2=8.8.4.4 DOMAIN="barley.maas sach.maas" @@ -594,6 +942,12 @@ NETWORK_CONFIGS = { dhcp4: true dhcp6: true """).rstrip(' '), + 'expected_sysconfig_opensuse': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=dhcp + DHCLIENT6_MODE=managed + STARTMODE=auto""") + }, 'yaml': textwrap.dedent("""\ version: 1 config: @@ -627,8 +981,8 @@ NETWORK_CONFIGS = { addresses: - 192.168.14.2/24 - 2001:1::1/64 + ipv6-mtu: 1500 mtu: 9000 - mtu6: 1500 """).rstrip(' '), 'yaml': textwrap.dedent("""\ version: 1 @@ -644,7 +998,17 @@ NETWORK_CONFIGS = { address: 2001:1::1/64 mtu: 1500 """).rstrip(' '), - 'expected_sysconfig': { + 'expected_sysconfig_opensuse': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=static + IPADDR=192.168.14.2 + IPADDR6=2001:1::1/64 + NETMASK=255.255.255.0 + STARTMODE=auto + MTU=9000 + """), + }, + 'expected_sysconfig_rhel': { 'ifcfg-iface0': textwrap.dedent("""\ BOOTPROTO=none DEVICE=iface0 @@ -661,6 +1025,23 @@ NETWORK_CONFIGS = { """), }, }, + 'v6_and_v4': { + 'expected_sysconfig_opensuse': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=dhcp + DHCLIENT6_MODE=managed + STARTMODE=auto""") + }, + 'yaml': textwrap.dedent("""\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - type: dhcp6 + - type: dhcp4 + """).rstrip(' '), + }, 'dhcpv6_only': { 'expected_eni': textwrap.dedent("""\ auto lo @@ -684,7 +1065,14 @@ NETWORK_CONFIGS = { subnets: - {'type': 'dhcp6'} """).rstrip(' '), - 'expected_sysconfig': { + 'expected_sysconfig_opensuse': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=dhcp6 + DHCLIENT6_MODE=managed + STARTMODE=auto + """), + }, + 'expected_sysconfig_rhel': { 'ifcfg-iface0': textwrap.dedent("""\ BOOTPROTO=none DEVICE=iface0 @@ -698,6 +1086,255 @@ NETWORK_CONFIGS = { """), }, }, + 'dhcpv6_accept_ra': { + 'expected_eni': textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 dhcp + accept_ra 1 + """).rstrip(' '), + 'expected_netplan': textwrap.dedent(""" + network: + version: 2 + ethernets: + iface0: + accept-ra: true + dhcp6: true + """).rstrip(' '), + 'yaml_v1': textwrap.dedent("""\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'dhcp6'} + accept-ra: true + """).rstrip(' '), + 'yaml_v2': textwrap.dedent("""\ + version: 2 + ethernets: + iface0: + dhcp6: true + accept-ra: true + """).rstrip(' '), + 'expected_sysconfig_opensuse': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=dhcp6 + DHCLIENT6_MODE=managed + STARTMODE=auto + """), + }, + 'expected_sysconfig_rhel': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=iface0 + DHCPV6C=yes + IPV6INIT=yes + IPV6_FORCE_ACCEPT_RA=yes + DEVICE=iface0 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + }, + }, + 'dhcpv6_reject_ra': { + 'expected_eni': textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 dhcp + accept_ra 0 + """).rstrip(' '), + 'expected_netplan': textwrap.dedent(""" + network: + version: 2 + ethernets: + iface0: + accept-ra: false + dhcp6: true + """).rstrip(' '), + 'yaml_v1': textwrap.dedent("""\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'dhcp6'} + accept-ra: false + """).rstrip(' '), + 'yaml_v2': textwrap.dedent("""\ + version: 2 + ethernets: + iface0: + dhcp6: true + accept-ra: false + """).rstrip(' '), + 'expected_sysconfig_opensuse': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=dhcp6 + DHCLIENT6_MODE=managed + STARTMODE=auto + """), + }, + 'expected_sysconfig_rhel': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=iface0 + DHCPV6C=yes + IPV6INIT=yes + IPV6_FORCE_ACCEPT_RA=no + DEVICE=iface0 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + }, + }, + 'ipv6_slaac': { + 'expected_eni': textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 auto + dhcp 0 + """).rstrip(' '), + 'expected_netplan': textwrap.dedent(""" + network: + version: 2 + ethernets: + iface0: + dhcp6: true + """).rstrip(' '), + 'yaml': textwrap.dedent("""\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'ipv6_slaac'} + """).rstrip(' '), + 'expected_sysconfig_opensuse': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=dhcp6 + DHCLIENT6_MODE=info + STARTMODE=auto + """), + }, + 'expected_sysconfig_rhel': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=iface0 + IPV6_AUTOCONF=yes + IPV6INIT=yes + DEVICE=iface0 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + }, + }, + 'dhcpv6_stateless': { + 'expected_eni': textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 auto + dhcp 1 + """).rstrip(' '), + 'expected_netplan': textwrap.dedent(""" + network: + version: 2 + ethernets: + iface0: + dhcp6: true + """).rstrip(' '), + 'yaml': textwrap.dedent("""\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'ipv6_dhcpv6-stateless'} + """).rstrip(' '), + 'expected_sysconfig_opensuse': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=dhcp6 + DHCLIENT6_MODE=info + STARTMODE=auto + """), + }, + 'expected_sysconfig_rhel': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=iface0 + DHCPV6C=yes + DHCPV6C_OPTIONS=-S + IPV6_AUTOCONF=yes + IPV6INIT=yes + DEVICE=iface0 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + }, + }, + 'dhcpv6_stateful': { + 'expected_eni': textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 dhcp + """).rstrip(' '), + 'expected_netplan': textwrap.dedent(""" + network: + version: 2 + ethernets: + iface0: + accept-ra: true + dhcp6: true + """).rstrip(' '), + 'yaml': textwrap.dedent("""\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'ipv6_dhcpv6-stateful'} + accept-ra: true + """).rstrip(' '), + 'expected_sysconfig_opensuse': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=dhcp6 + DHCLIENT6_MODE=managed + STARTMODE=auto + """), + }, + 'expected_sysconfig_rhel': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=iface0 + DHCPV6C=yes + IPV6INIT=yes + IPV6_FORCE_ACCEPT_RA=yes + DEVICE=iface0 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + }, + }, 'all': { 'expected_eni': ("""\ auto lo @@ -728,6 +1365,12 @@ iface eth4 inet manual # control-manual eth5 iface eth5 inet dhcp +auto ib0 +iface ib0 inet static + address 192.168.200.7/24 + mtu 9000 + hwaddress a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 + auto bond0 iface bond0 inet6 dhcp bond-mode active-backup @@ -781,8 +1424,8 @@ iface eth0.101 inet static iface eth0.101 inet static address 192.168.2.10/24 -post-up route add -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true -pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true +post-up route add -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true +pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true """), 'expected_netplan': textwrap.dedent(""" network: @@ -881,7 +1524,80 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true - sacchromyces.maas - brettanomyces.maas """).rstrip(' '), - 'expected_sysconfig': { + 'expected_sysconfig_opensuse': { + 'ifcfg-bond0': textwrap.dedent("""\ + BONDING_MASTER=yes + BONDING_OPTS="mode=active-backup """ + """xmit_hash_policy=layer3+4 """ + """miimon=100" + BONDING_SLAVE_0=eth1 + BONDING_SLAVE_1=eth2 + BOOTPROTO=dhcp6 + DHCLIENT6_MODE=managed + LLADDR=aa:bb:cc:dd:ee:ff + STARTMODE=auto"""), + 'ifcfg-bond0.200': textwrap.dedent("""\ + BOOTPROTO=dhcp4 + ETHERDEVICE=bond0 + STARTMODE=auto + VLAN_ID=200"""), + 'ifcfg-br0': textwrap.dedent("""\ + BRIDGE_AGEINGTIME=250 + BOOTPROTO=static + IPADDR=192.168.14.2 + IPADDR6=2001:1::1/64 + LLADDRESS=bb:bb:bb:bb:bb:aa + NETMASK=255.255.255.0 + BRIDGE_PRIORITY=22 + BRIDGE_PORTS='eth3 eth4' + STARTMODE=auto + BRIDGE_STP=off"""), + 'ifcfg-eth0': textwrap.dedent("""\ + BOOTPROTO=static + LLADDR=c0:d6:9f:2c:e8:80 + STARTMODE=auto"""), + 'ifcfg-eth0.101': textwrap.dedent("""\ + BOOTPROTO=static + IPADDR=192.168.0.2 + IPADDR1=192.168.2.10 + MTU=1500 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + ETHERDEVICE=eth0 + STARTMODE=auto + VLAN_ID=101"""), + 'ifcfg-eth1': textwrap.dedent("""\ + BOOTPROTO=none + LLADDR=aa:d6:9f:2c:e8:80 + STARTMODE=hotplug"""), + 'ifcfg-eth2': textwrap.dedent("""\ + BOOTPROTO=none + LLADDR=c0:bb:9f:2c:e8:80 + STARTMODE=hotplug"""), + 'ifcfg-eth3': textwrap.dedent("""\ + BOOTPROTO=static + BRIDGE=yes + LLADDR=66:bb:9f:2c:e8:80 + STARTMODE=auto"""), + 'ifcfg-eth4': textwrap.dedent("""\ + BOOTPROTO=static + BRIDGE=yes + LLADDR=98:bb:9f:2c:e8:80 + STARTMODE=auto"""), + 'ifcfg-eth5': textwrap.dedent("""\ + BOOTPROTO=dhcp + LLADDR=98:bb:9f:2c:e8:8a + STARTMODE=manual"""), + 'ifcfg-ib0': textwrap.dedent("""\ + BOOTPROTO=static + LLADDR=a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 + IPADDR=192.168.200.7 + MTU=9000 + NETMASK=255.255.255.0 + STARTMODE=auto + TYPE=InfiniBand"""), + }, + 'expected_sysconfig_rhel': { 'ifcfg-bond0': textwrap.dedent("""\ BONDING_MASTER=yes BONDING_OPTS="mode=active-backup """ @@ -901,6 +1617,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true 'ifcfg-bond0.200': textwrap.dedent("""\ BOOTPROTO=dhcp DEVICE=bond0.200 + DHCLIENT_SET_DEFAULT_ROUTE=no NM_CONTROLLED=no ONBOOT=yes PHYSDEV=bond0 @@ -992,11 +1709,23 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true 'ifcfg-eth5': textwrap.dedent("""\ BOOTPROTO=dhcp DEVICE=eth5 + DHCLIENT_SET_DEFAULT_ROUTE=no HWADDR=98:bb:9f:2c:e8:8a NM_CONTROLLED=no ONBOOT=no TYPE=Ethernet - USERCTL=no""") + USERCTL=no"""), + 'ifcfg-ib0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=ib0 + HWADDR=a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 + IPADDR=192.168.200.7 + MTU=9000 + NETMASK=255.255.255.0 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=InfiniBand + USERCTL=no"""), }, 'yaml': textwrap.dedent(""" version: 1 @@ -1071,6 +1800,15 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true vlan_id: 200 subnets: - type: dhcp4 + # An infiniband + - type: infiniband + name: ib0 + mac_address: >- + a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 + subnets: + - type: static + address: 192.168.200.7/24 + mtu: 9000 # A bridge. - type: bridge name: br0 @@ -1155,6 +1893,12 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true bond-mode: active-backup bond_miimon: 100 bond-xmit-hash-policy: "layer3+4" + bond-num-grat-arp: 5 + bond-downdelay: 10 + bond-updelay: 20 + bond-fail-over-mac: active + bond-primary: bond0s0 + bond-primary-reselect: always subnets: - type: static address: 192.168.0.2/24 @@ -1163,17 +1907,18 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true - gateway: 192.168.0.3 netmask: 255.255.255.0 network: 10.1.3.0 - - gateway: 2001:67c:1562:1 - network: 2001:67c:1 - netmask: ffff:ffff:0 - - gateway: 3001:67c:1562:1 - network: 3001:67c:1 - netmask: ffff:ffff:0 - metric: 10000 - type: static address: 192.168.1.2/24 - type: static address: 2001:1::1/92 + routes: + - gateway: 2001:67c:1562:1 + network: 2001:67c:1 + netmask: ffff:ffff:0 + - gateway: 3001:67c:1562:1 + network: 3001:67c:1 + netmask: ffff:ffff:0 + metric: 10000 """), 'expected_netplan': textwrap.dedent(""" network: @@ -1200,9 +1945,15 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true macaddress: aa:bb:cc:dd:e8:ff mtu: 9000 parameters: + down-delay: 10 + fail-over-mac-policy: active + gratuitious-arp: 5 mii-monitor-interval: 100 mode: active-backup + primary: bond0s0 + primary-reselect-policy: always transmit-hash-policy: layer3+4 + up-delay: 20 routes: - to: 10.1.3.0/24 via: 192.168.0.3 @@ -1212,6 +1963,69 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true to: 3001:67c:1/32 via: 3001:67c:1562:1 """), + 'expected_eni': textwrap.dedent("""\ +auto lo +iface lo inet loopback + +auto bond0s0 +iface bond0s0 inet manual + bond-downdelay 10 + bond-fail-over-mac active + bond-master bond0 + bond-mode active-backup + bond-num-grat-arp 5 + bond-primary bond0s0 + bond-primary-reselect always + bond-updelay 20 + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + +auto bond0s1 +iface bond0s1 inet manual + bond-downdelay 10 + bond-fail-over-mac active + bond-master bond0 + bond-mode active-backup + bond-num-grat-arp 5 + bond-primary bond0s0 + bond-primary-reselect always + bond-updelay 20 + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + +auto bond0 +iface bond0 inet static + address 192.168.0.2/24 + gateway 192.168.0.1 + bond-downdelay 10 + bond-fail-over-mac active + bond-mode active-backup + bond-num-grat-arp 5 + bond-primary bond0s0 + bond-primary-reselect always + bond-slaves none + bond-updelay 20 + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + hwaddress aa:bb:cc:dd:e8:ff + mtu 9000 + post-up route add -net 10.1.3.0/24 gw 192.168.0.3 || true + pre-down route del -net 10.1.3.0/24 gw 192.168.0.3 || true + +# control-alias bond0 +iface bond0 inet static + address 192.168.1.2/24 + +# control-alias bond0 +iface bond0 inet6 static + address 2001:1::1/92 + post-up route add -A inet6 2001:67c:1/32 gw 2001:67c:1562:1 || true + pre-down route del -A inet6 2001:67c:1/32 gw 2001:67c:1562:1 || true + post-up route add -A inet6 3001:67c:1/32 gw 3001:67c:1562:1 metric 10000 \ +|| true + pre-down route del -A inet6 3001:67c:1/32 gw 3001:67c:1562:1 metric 10000 \ +|| true + """), 'yaml-v2': textwrap.dedent(""" version: 2 ethernets: @@ -1235,10 +2049,15 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true - eth0 - vf0 parameters: + down-delay: 10 + fail-over-mac-policy: active + gratuitious-arp: 5 mii-monitor-interval: 100 mode: active-backup - primary: vf0 - transmit-hash-policy: "layer3+4" + primary: bond0s0 + primary-reselect-policy: always + transmit-hash-policy: layer3+4 + up-delay: 20 routes: - to: 10.1.3.0/24 via: 192.168.0.3 @@ -1261,10 +2080,15 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true - eth0 - vf0 parameters: + down-delay: 10 + fail-over-mac-policy: active + gratuitious-arp: 5 mii-monitor-interval: 100 mode: active-backup - primary: vf0 + primary: bond0s0 + primary-reselect-policy: always transmit-hash-policy: layer3+4 + up-delay: 20 routes: - to: 10.1.3.0/24 via: 192.168.0.3 @@ -1289,59 +2113,44 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true 'expected_sysconfig_opensuse': { 'ifcfg-bond0': textwrap.dedent("""\ BONDING_MASTER=yes - BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 miimon=100" - BONDING_SLAVE0=bond0s0 - BONDING_SLAVE1=bond0s1 - BOOTPROTO=none - DEFROUTE=yes - DEVICE=bond0 - GATEWAY=192.168.0.1 - MACADDR=aa:bb:cc:dd:e8:ff + BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """ + """miimon=100 num_grat_arp=5 """ + """downdelay=10 updelay=20 """ + """fail_over_mac=active """ + """primary=bond0s0 """ + """primary_reselect=always" + BONDING_SLAVE_0=bond0s0 + BONDING_SLAVE_1=bond0s1 + BOOTPROTO=static + LLADDR=aa:bb:cc:dd:e8:ff IPADDR=192.168.0.2 IPADDR1=192.168.1.2 - IPV6ADDR=2001:1::1/92 - IPV6INIT=yes + IPADDR6=2001:1::1/92 MTU=9000 NETMASK=255.255.255.0 NETMASK1=255.255.255.0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Bond - USERCTL=no + STARTMODE=auto """), 'ifcfg-bond0s0': textwrap.dedent("""\ BOOTPROTO=none - DEVICE=bond0s0 - HWADDR=aa:bb:cc:dd:e8:00 - MASTER=bond0 - NM_CONTROLLED=no - ONBOOT=yes - SLAVE=yes - TYPE=Ethernet - USERCTL=no - """), - 'ifroute-bond0': textwrap.dedent("""\ - ADDRESS0=10.1.3.0 - GATEWAY0=192.168.0.3 - NETMASK0=255.255.255.0 + LLADDR=aa:bb:cc:dd:e8:00 + STARTMODE=hotplug """), 'ifcfg-bond0s1': textwrap.dedent("""\ BOOTPROTO=none - DEVICE=bond0s1 - HWADDR=aa:bb:cc:dd:e8:01 - MASTER=bond0 - NM_CONTROLLED=no - ONBOOT=yes - SLAVE=yes - TYPE=Ethernet - USERCTL=no + LLADDR=aa:bb:cc:dd:e8:01 + STARTMODE=hotplug """), }, - 'expected_sysconfig_rhel': { 'ifcfg-bond0': textwrap.dedent("""\ BONDING_MASTER=yes - BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 miimon=100" + BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """ + """miimon=100 num_grat_arp=5 """ + """downdelay=10 updelay=20 """ + """fail_over_mac=active """ + """primary=bond0s0 """ + """primary_reselect=always" BONDING_SLAVE0=bond0s0 BONDING_SLAVE1=bond0s1 BOOTPROTO=none @@ -1421,7 +2230,26 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true netmask: '::' network: '::' """), - 'expected_sysconfig': { + 'expected_sysconfig_opensuse': { + # TODO RJS: unknown proper BOOTPROTO setting ask Marius + 'ifcfg-en0': textwrap.dedent("""\ + BOOTPROTO=static + LLADDR=aa:bb:cc:dd:e8:00 + STARTMODE=auto"""), + 'ifcfg-en0.99': textwrap.dedent("""\ + BOOTPROTO=static + IPADDR=192.168.2.2 + IPADDR1=192.168.1.2 + IPADDR6=2001:1::bbbb/96 + MTU=2222 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 + STARTMODE=auto + ETHERDEVICE=en0 + VLAN_ID=99 + """), + }, + 'expected_sysconfig_rhel': { 'ifcfg-en0': textwrap.dedent("""\ BOOTPROTO=none DEVICE=en0 @@ -1478,7 +2306,32 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true subnets: - type: static address: 192.168.2.2/24"""), - 'expected_sysconfig': { + 'expected_sysconfig_opensuse': { + 'ifcfg-br0': textwrap.dedent("""\ + BOOTPROTO=static + IPADDR=192.168.2.2 + NETMASK=255.255.255.0 + STARTMODE=auto + BRIDGE_STP=off + BRIDGE_PRIORITY=22 + BRIDGE_PORTS='eth0 eth1' + """), + 'ifcfg-eth0': textwrap.dedent("""\ + BOOTPROTO=static + BRIDGE=yes + LLADDR=52:54:00:12:34:00 + IPADDR6=2001:1::100/96 + STARTMODE=auto + """), + 'ifcfg-eth1': textwrap.dedent("""\ + BOOTPROTO=static + BRIDGE=yes + LLADDR=52:54:00:12:34:01 + IPADDR6=2001:1::101/96 + STARTMODE=auto + """), + }, + 'expected_sysconfig_rhel': { 'ifcfg-br0': textwrap.dedent("""\ BOOTPROTO=none DEVICE=br0 @@ -1577,7 +2430,27 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true macaddress: 52:54:00:12:34:ff set-name: eth2 """), - 'expected_sysconfig': { + 'expected_sysconfig_opensuse': { + 'ifcfg-eth0': textwrap.dedent("""\ + BOOTPROTO=static + LLADDR=52:54:00:12:34:00 + IPADDR=192.168.1.2 + NETMASK=255.255.255.0 + STARTMODE=manual + """), + 'ifcfg-eth1': textwrap.dedent("""\ + BOOTPROTO=static + LLADDR=52:54:00:12:34:aa + MTU=1480 + STARTMODE=auto + """), + 'ifcfg-eth2': textwrap.dedent("""\ + BOOTPROTO=static + LLADDR=52:54:00:12:34:ff + STARTMODE=manual + """), + }, + 'expected_sysconfig_rhel': { 'ifcfg-eth0': textwrap.dedent("""\ BOOTPROTO=none DEVICE=eth0 @@ -1632,6 +2505,23 @@ CONFIG_V1_SIMPLE_SUBNET = { 'type': 'static'}], 'type': 'physical'}]} +CONFIG_V1_MULTI_IFACE = { + 'version': 1, + 'config': [{'type': 'physical', + 'mtu': 1500, + 'subnets': [{'type': 'static', + 'netmask': '255.255.240.0', + 'routes': [{'netmask': '0.0.0.0', + 'network': '0.0.0.0', + 'gateway': '51.68.80.1'}], + 'address': '51.68.89.122', + 'ipv4': True}], + 'mac_address': 'fa:16:3e:25:b4:59', + 'name': 'eth0'}, + {'type': 'physical', + 'mtu': 9000, + 'subnets': [{'type': 'dhcp4'}], + 'mac_address': 'fa:16:3e:b1:ca:29', 'name': 'eth1'}]} DEFAULT_DEV_ATTRS = { 'eth1000': { @@ -1639,7 +2529,7 @@ DEFAULT_DEV_ATTRS = { "carrier": False, "dormant": False, "operstate": "down", - "address": "07-1C-C6-75-A4-BE", + "address": "07-1c-c6-75-a4-be", "device/driver": None, "device/device": None, "name_assign_type": "4", @@ -1690,6 +2580,39 @@ class TestGenerateFallbackConfig(CiTestCase): @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") + def test_device_driver_v2(self, mock_get_devicelist, mock_read_sys_net, + mock_sys_dev_path): + """Network configuration for generate_fallback_config is version 2.""" + devices = { + 'eth0': { + 'bridge': False, 'carrier': False, 'dormant': False, + 'operstate': 'down', 'address': '00:11:22:33:44:55', + 'device/driver': 'hv_netsvc', 'device/device': '0x3', + 'name_assign_type': '4'}, + 'eth1': { + 'bridge': False, 'carrier': False, 'dormant': False, + 'operstate': 'down', 'address': '00:11:22:33:44:55', + 'device/driver': 'mlx4_core', 'device/device': '0x7', + 'name_assign_type': '4'}, + + } + + tmp_dir = self.tmp_dir() + _setup_test(tmp_dir, mock_get_devicelist, + mock_read_sys_net, mock_sys_dev_path, + dev_attrs=devices) + + network_cfg = net.generate_fallback_config(config_driver=True) + expected = { + 'ethernets': {'eth0': {'dhcp4': True, 'set-name': 'eth0', + 'match': {'macaddress': '00:11:22:33:44:55', + 'driver': 'hv_netsvc'}}}, + 'version': 2} + self.assertEqual(expected, network_cfg) + + @mock.patch("cloudinit.net.sys_dev_path") + @mock.patch("cloudinit.net.read_sys_net") + @mock.patch("cloudinit.net.get_devicelist") def test_device_driver(self, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path): devices = { @@ -1880,11 +2803,12 @@ class TestRhelSysConfigRendering(CiTestCase): with_logs = True + nm_cfg_file = "/etc/NetworkManager/NetworkManager.conf" scripts_dir = '/etc/sysconfig/network-scripts' header = ('# Created by cloud-init on instance boot automatically, ' 'do not edit.\n#\n') - expected_name = 'expected_sysconfig' + expected_name = 'expected_sysconfig_rhel' def _get_renderer(self): distro_cls = distros.fetch('rhel') @@ -1968,7 +2892,7 @@ class TestRhelSysConfigRendering(CiTestCase): # BOOTPROTO=dhcp DEVICE=eth1000 -HWADDR=07-1C-C6-75-A4-BE +HWADDR=07-1c-c6-75-a4-be NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet @@ -2096,11 +3020,60 @@ TYPE=Ethernet USERCTL=no """ self.assertEqual(expected, found[nspath + 'ifcfg-interface0']) + # The configuration has no nameserver information make sure we + # do not write the resolv.conf file + respath = '/etc/resolv.conf' + self.assertNotIn(respath, found.keys()) + + def test_network_config_v1_multi_iface_samples(self): + ns = network_state.parse_net_config_data(CONFIG_V1_MULTI_IFACE) + render_dir = self.tmp_path("render") + os.makedirs(render_dir) + renderer = self._get_renderer() + renderer.render_network_state(ns, target=render_dir) + found = dir2dict(render_dir) + nspath = '/etc/sysconfig/network-scripts/' + self.assertNotIn(nspath + 'ifcfg-lo', found.keys()) + expected_i1 = """\ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=none +DEFROUTE=yes +DEVICE=eth0 +GATEWAY=51.68.80.1 +HWADDR=fa:16:3e:25:b4:59 +IPADDR=51.68.89.122 +MTU=1500 +NETMASK=255.255.240.0 +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""" + self.assertEqual(expected_i1, found[nspath + 'ifcfg-eth0']) + expected_i2 = """\ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=dhcp +DEVICE=eth1 +DHCLIENT_SET_DEFAULT_ROUTE=no +HWADDR=fa:16:3e:b1:ca:29 +MTU=9000 +NM_CONTROLLED=no +ONBOOT=yes +TYPE=Ethernet +USERCTL=no +""" + self.assertEqual(expected_i2, found[nspath + 'ifcfg-eth1']) def test_config_with_explicit_loopback(self): ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) render_dir = self.tmp_path("render") os.makedirs(render_dir) + # write an etc/resolv.conf and expect it to not be modified + resolvconf = os.path.join(render_dir, 'etc/resolv.conf') + resolvconf_content = "# Original Content" + util.write_file(resolvconf, resolvconf_content) renderer = self._get_renderer() renderer.render_network_state(ns, target=render_dir) found = dir2dict(render_dir) @@ -2117,12 +3090,13 @@ TYPE=Ethernet USERCTL=no """ self.assertEqual(expected, found[nspath + 'ifcfg-eth0']) + # a dhcp only config should not modify resolv.conf + self.assertEqual(resolvconf_content, found['/etc/resolv.conf']) def test_bond_config(self): - expected_name = 'expected_sysconfig_rhel' entry = NETWORK_CONFIGS['bond'] found = self._render_and_read(network_config=yaml.load(entry['yaml'])) - self._compare_files_to_expected(entry[expected_name], found) + self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) def test_vlan_config(self): @@ -2174,6 +3148,273 @@ USERCTL=no self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) + def test_dhcpv6_accept_ra_config_v1(self): + entry = NETWORK_CONFIGS['dhcpv6_accept_ra'] + found = self._render_and_read(network_config=yaml.load( + entry['yaml_v1'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_dhcpv6_accept_ra_config_v2(self): + entry = NETWORK_CONFIGS['dhcpv6_accept_ra'] + found = self._render_and_read(network_config=yaml.load( + entry['yaml_v2'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_dhcpv6_reject_ra_config_v1(self): + entry = NETWORK_CONFIGS['dhcpv6_reject_ra'] + found = self._render_and_read(network_config=yaml.load( + entry['yaml_v1'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_dhcpv6_reject_ra_config_v2(self): + entry = NETWORK_CONFIGS['dhcpv6_reject_ra'] + found = self._render_and_read(network_config=yaml.load( + entry['yaml_v2'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_dhcpv6_stateless_config(self): + entry = NETWORK_CONFIGS['dhcpv6_stateless'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_dhcpv6_stateful_config(self): + entry = NETWORK_CONFIGS['dhcpv6_stateful'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_check_ifcfg_rh(self): + """ifcfg-rh plugin is added NetworkManager.conf if conf present.""" + render_dir = self.tmp_dir() + nm_cfg = util.target_path(render_dir, path=self.nm_cfg_file) + util.ensure_dir(os.path.dirname(nm_cfg)) + + # write a template nm.conf, note plugins is a list here + with open(nm_cfg, 'w') as fh: + fh.write('# test_check_ifcfg_rh\n[main]\nplugins=foo,bar\n') + self.assertTrue(os.path.exists(nm_cfg)) + + # render and read + entry = NETWORK_CONFIGS['small'] + found = self._render_and_read(network_config=yaml.load(entry['yaml']), + dir=render_dir) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + # check ifcfg-rh is in the 'plugins' list + config = sysconfig.ConfigObj(nm_cfg) + self.assertIn('ifcfg-rh', config['main']['plugins']) + + def test_check_ifcfg_rh_plugins_string(self): + """ifcfg-rh plugin is append when plugins is a string.""" + render_dir = self.tmp_path("render") + os.makedirs(render_dir) + nm_cfg = util.target_path(render_dir, path=self.nm_cfg_file) + util.ensure_dir(os.path.dirname(nm_cfg)) + + # write a template nm.conf, note plugins is a value here + util.write_file(nm_cfg, '# test_check_ifcfg_rh\n[main]\nplugins=foo\n') + + # render and read + entry = NETWORK_CONFIGS['small'] + found = self._render_and_read(network_config=yaml.load(entry['yaml']), + dir=render_dir) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + # check raw content has plugin + nm_file_content = util.load_file(nm_cfg) + self.assertIn('ifcfg-rh', nm_file_content) + + # check ifcfg-rh is in the 'plugins' list + config = sysconfig.ConfigObj(nm_cfg) + self.assertIn('ifcfg-rh', config['main']['plugins']) + + def test_check_ifcfg_rh_plugins_no_plugins(self): + """enable_ifcfg_plugin creates plugins value if missing.""" + render_dir = self.tmp_path("render") + os.makedirs(render_dir) + nm_cfg = util.target_path(render_dir, path=self.nm_cfg_file) + util.ensure_dir(os.path.dirname(nm_cfg)) + + # write a template nm.conf, note plugins is missing + util.write_file(nm_cfg, '# test_check_ifcfg_rh\n[main]\n') + self.assertTrue(os.path.exists(nm_cfg)) + + # render and read + entry = NETWORK_CONFIGS['small'] + found = self._render_and_read(network_config=yaml.load(entry['yaml']), + dir=render_dir) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + # check ifcfg-rh is in the 'plugins' list + config = sysconfig.ConfigObj(nm_cfg) + self.assertIn('ifcfg-rh', config['main']['plugins']) + + def test_netplan_dhcp_false_disable_dhcp_in_state(self): + """netplan config with dhcp[46]: False should not add dhcp in state""" + net_config = yaml.load(NETPLAN_DHCP_FALSE) + ns = network_state.parse_net_config_data(net_config, + skip_broken=False) + + dhcp_found = [snet for iface in ns.iter_interfaces() + for snet in iface['subnets'] if 'dhcp' in snet['type']] + + self.assertEqual([], dhcp_found) + + def test_netplan_dhcp_false_no_dhcp_in_sysconfig(self): + """netplan cfg with dhcp[46]: False should not have bootproto=dhcp""" + + entry = { + 'yaml': NETPLAN_DHCP_FALSE, + 'expected_sysconfig': { + 'ifcfg-ens3': textwrap.dedent("""\ + BOOTPROTO=none + DEFROUTE=yes + DEVICE=ens3 + DNS1=192.168.42.53 + DNS2=1.1.1.1 + DOMAIN=example.com + GATEWAY=192.168.42.1 + HWADDR=52:54:00:ab:cd:ef + IPADDR=192.168.42.100 + IPV6ADDR=2001:db8::100/32 + IPV6INIT=yes + IPV6_DEFAULTGW=2001:db8::1 + NETMASK=255.255.255.0 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + } + } + + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._assert_headers(found) + + def test_from_v2_vlan_mtu(self): + """verify mtu gets rendered on bond when source is netplan.""" + v2data = { + 'version': 2, + 'ethernets': {'eno1': {}}, + 'vlans': { + 'eno1.1000': { + 'addresses': ["192.6.1.9/24"], + 'id': 1000, 'link': 'eno1', 'mtu': 1495}}} + expected = { + 'ifcfg-eno1': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=eno1 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + 'ifcfg-eno1.1000': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=eno1.1000 + IPADDR=192.6.1.9 + MTU=1495 + NETMASK=255.255.255.0 + NM_CONTROLLED=no + ONBOOT=yes + PHYSDEV=eno1 + TYPE=Ethernet + USERCTL=no + VLAN=yes + """) + } + self._compare_files_to_expected( + expected, self._render_and_read(network_config=v2data)) + + def test_from_v2_bond_mtu(self): + """verify mtu gets rendered on bond when source is netplan.""" + v2data = { + 'version': 2, + 'bonds': { + 'bond0': {'addresses': ['10.101.8.65/26'], + 'interfaces': ['enp0s0', 'enp0s1'], + 'mtu': 1334, + 'parameters': {}}} + } + expected = { + 'ifcfg-bond0': textwrap.dedent("""\ + BONDING_MASTER=yes + BONDING_SLAVE0=enp0s0 + BONDING_SLAVE1=enp0s1 + BOOTPROTO=none + DEVICE=bond0 + IPADDR=10.101.8.65 + MTU=1334 + NETMASK=255.255.255.192 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Bond + USERCTL=no + """), + 'ifcfg-enp0s0': textwrap.dedent("""\ + BONDING_MASTER=yes + BOOTPROTO=none + DEVICE=enp0s0 + MASTER=bond0 + NM_CONTROLLED=no + ONBOOT=yes + SLAVE=yes + TYPE=Bond + USERCTL=no + """), + 'ifcfg-enp0s1': textwrap.dedent("""\ + BONDING_MASTER=yes + BOOTPROTO=none + DEVICE=enp0s1 + MASTER=bond0 + NM_CONTROLLED=no + ONBOOT=yes + SLAVE=yes + TYPE=Bond + USERCTL=no + """) + } + self._compare_files_to_expected( + expected, self._render_and_read(network_config=v2data)) + + def test_from_v2_route_metric(self): + """verify route-metric gets rendered on nic when source is netplan.""" + overrides = {'route-metric': 100} + v2base = { + 'version': 2, + 'ethernets': { + 'eno1': {'dhcp4': True, + 'match': {'macaddress': '07-1c-c6-75-a4-be'}}}} + expected = { + 'ifcfg-eno1': textwrap.dedent("""\ + BOOTPROTO=dhcp + DEVICE=eno1 + HWADDR=07-1c-c6-75-a4-be + METRIC=100 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + } + for dhcp_ver in ('dhcp4', 'dhcp6'): + v2data = copy.deepcopy(v2base) + if dhcp_ver == 'dhcp6': + expected['ifcfg-eno1'] += "IPV6INIT=yes\nDHCPV6C=yes\n" + v2data['ethernets']['eno1'].update( + {dhcp_ver: True, '{0}-overrides'.format(dhcp_ver): overrides}) + self._compare_files_to_expected( + expected, self._render_and_read(network_config=v2data)) + class TestOpenSuseSysConfigRendering(CiTestCase): @@ -2183,7 +3424,7 @@ class TestOpenSuseSysConfigRendering(CiTestCase): header = ('# Created by cloud-init on instance boot automatically, ' 'do not edit.\n#\n') - expected_name = 'expected_sysconfig' + expected_name = 'expected_sysconfig_opensuse' def _get_renderer(self): distro_cls = distros.fetch('opensuse') @@ -2255,91 +3496,89 @@ class TestOpenSuseSysConfigRendering(CiTestCase): expected_content = """ # Created by cloud-init on instance boot automatically, do not edit. # -BOOTPROTO=dhcp -DEVICE=eth1000 -HWADDR=07-1C-C6-75-A4-BE -NM_CONTROLLED=no -ONBOOT=yes -TYPE=Ethernet -USERCTL=no +BOOTPROTO=dhcp4 +LLADDR=07-1c-c6-75-a4-be +STARTMODE=auto """.lstrip() self.assertEqual(expected_content, content) - def test_multiple_ipv4_default_gateways(self): - """ValueError is raised when duplicate ipv4 gateways exist.""" - net_json = { - "services": [{"type": "dns", "address": "172.19.0.12"}], - "networks": [{ - "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4", - "type": "ipv4", "netmask": "255.255.252.0", - "link": "tap1a81968a-79", - "routes": [{ - "netmask": "0.0.0.0", - "network": "0.0.0.0", - "gateway": "172.19.3.254", - }, { - "netmask": "0.0.0.0", # A second default gateway - "network": "0.0.0.0", - "gateway": "172.20.3.254", - }], - "ip_address": "172.19.1.34", "id": "network0" - }], - "links": [ - { - "ethernet_mac_address": "fa:16:3e:ed:9a:59", - "mtu": None, "type": "bridge", "id": - "tap1a81968a-79", - "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" - }, - ], - } - macs = {'fa:16:3e:ed:9a:59': 'eth0'} - render_dir = self.tmp_dir() - network_cfg = openstack.convert_net_json(net_json, known_macs=macs) - ns = network_state.parse_net_config_data(network_cfg, - skip_broken=False) - renderer = self._get_renderer() - with self.assertRaises(ValueError): - renderer.render_network_state(ns, target=render_dir) - self.assertEqual([], os.listdir(render_dir)) - - def test_multiple_ipv6_default_gateways(self): - """ValueError is raised when duplicate ipv6 gateways exist.""" - net_json = { - "services": [{"type": "dns", "address": "172.19.0.12"}], - "networks": [{ - "network_id": "public-ipv6", - "type": "ipv6", "netmask": "", - "link": "tap1a81968a-79", - "routes": [{ - "gateway": "2001:DB8::1", - "netmask": "::", - "network": "::" - }, { - "gateway": "2001:DB9::1", - "netmask": "::", - "network": "::" - }], - "ip_address": "2001:DB8::10", "id": "network1" - }], - "links": [ - { - "ethernet_mac_address": "fa:16:3e:ed:9a:59", - "mtu": None, "type": "bridge", "id": - "tap1a81968a-79", - "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" - }, - ], - } - macs = {'fa:16:3e:ed:9a:59': 'eth0'} - render_dir = self.tmp_dir() - network_cfg = openstack.convert_net_json(net_json, known_macs=macs) - ns = network_state.parse_net_config_data(network_cfg, - skip_broken=False) - renderer = self._get_renderer() - with self.assertRaises(ValueError): - renderer.render_network_state(ns, target=render_dir) - self.assertEqual([], os.listdir(render_dir)) + # TODO(rjschwei): re-enable test once route writing is implemented + # for SUSE distros +# def test_multiple_ipv4_default_gateways(self): +# """ValueError is raised when duplicate ipv4 gateways exist.""" +# net_json = { +# "services": [{"type": "dns", "address": "172.19.0.12"}], +# "networks": [{ +# "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4", +# "type": "ipv4", "netmask": "255.255.252.0", +# "link": "tap1a81968a-79", +# "routes": [{ +# "netmask": "0.0.0.0", +# "network": "0.0.0.0", +# "gateway": "172.19.3.254", +# }, { +# "netmask": "0.0.0.0", # A second default gateway +# "network": "0.0.0.0", +# "gateway": "172.20.3.254", +# }], +# "ip_address": "172.19.1.34", "id": "network0" +# }], +# "links": [ +# { +# "ethernet_mac_address": "fa:16:3e:ed:9a:59", +# "mtu": None, "type": "bridge", "id": +# "tap1a81968a-79", +# "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" +# }, +# ], +# } +# macs = {'fa:16:3e:ed:9a:59': 'eth0'} +# render_dir = self.tmp_dir() +# network_cfg = openstack.convert_net_json(net_json, known_macs=macs) +# ns = network_state.parse_net_config_data(network_cfg, +# skip_broken=False) +# renderer = self._get_renderer() +# with self.assertRaises(ValueError): +# renderer.render_network_state(ns, target=render_dir) +# self.assertEqual([], os.listdir(render_dir)) +# +# def test_multiple_ipv6_default_gateways(self): +# """ValueError is raised when duplicate ipv6 gateways exist.""" +# net_json = { +# "services": [{"type": "dns", "address": "172.19.0.12"}], +# "networks": [{ +# "network_id": "public-ipv6", +# "type": "ipv6", "netmask": "", +# "link": "tap1a81968a-79", +# "routes": [{ +# "gateway": "2001:DB8::1", +# "netmask": "::", +# "network": "::" +# }, { +# "gateway": "2001:DB9::1", +# "netmask": "::", +# "network": "::" +# }], +# "ip_address": "2001:DB8::10", "id": "network1" +# }], +# "links": [ +# { +# "ethernet_mac_address": "fa:16:3e:ed:9a:59", +# "mtu": None, "type": "bridge", "id": +# "tap1a81968a-79", +# "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" +# }, +# ], +# } +# macs = {'fa:16:3e:ed:9a:59': 'eth0'} +# render_dir = self.tmp_dir() +# network_cfg = openstack.convert_net_json(net_json, known_macs=macs) +# ns = network_state.parse_net_config_data(network_cfg, +# skip_broken=False) +# renderer = self._get_renderer() +# with self.assertRaises(ValueError): +# renderer.render_network_state(ns, target=render_dir) +# self.assertEqual([], os.listdir(render_dir)) def test_openstack_rendering_samples(self): for os_sample in OS_SAMPLES: @@ -2372,24 +3611,26 @@ USERCTL=no expected = """\ # Created by cloud-init on instance boot automatically, do not edit. # -BOOTPROTO=none -DEFROUTE=yes -DEVICE=interface0 -GATEWAY=10.0.2.2 -HWADDR=52:54:00:12:34:00 +BOOTPROTO=static IPADDR=10.0.2.15 +LLADDR=52:54:00:12:34:00 NETMASK=255.255.255.0 -NM_CONTROLLED=no -ONBOOT=yes -TYPE=Ethernet -USERCTL=no +STARTMODE=auto """ self.assertEqual(expected, found[nspath + 'ifcfg-interface0']) + # The configuration has no nameserver information make sure we + # do not write the resolv.conf file + respath = '/etc/resolv.conf' + self.assertNotIn(respath, found.keys()) def test_config_with_explicit_loopback(self): ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) render_dir = self.tmp_path("render") os.makedirs(render_dir) + # write an etc/resolv.conf and expect it to not be modified + resolvconf = os.path.join(render_dir, 'etc/resolv.conf') + resolvconf_content = "# Original Content" + util.write_file(resolvconf, resolvconf_content) renderer = self._get_renderer() renderer.render_network_state(ns, target=render_dir) found = dir2dict(render_dir) @@ -2399,13 +3640,11 @@ USERCTL=no # Created by cloud-init on instance boot automatically, do not edit. # BOOTPROTO=dhcp -DEVICE=eth0 -NM_CONTROLLED=no -ONBOOT=yes -TYPE=Ethernet -USERCTL=no +STARTMODE=auto """ self.assertEqual(expected, found[nspath + 'ifcfg-eth0']) + # a dhcp only config should not modify resolv.conf + self.assertEqual(resolvconf_content, found['/etc/resolv.conf']) def test_bond_config(self): expected_name = 'expected_sysconfig_opensuse' @@ -2472,6 +3711,30 @@ USERCTL=no self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) + def test_simple_render_ipv6_slaac(self): + entry = NETWORK_CONFIGS['ipv6_slaac'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_dhcpv6_stateless_config(self): + entry = NETWORK_CONFIGS['dhcpv6_stateless'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_render_v4_and_v6(self): + entry = NETWORK_CONFIGS['v4_and_v6'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + def test_render_v6_and_v4(self): + entry = NETWORK_CONFIGS['v6_and_v4'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + class TestEniNetRendering(CiTestCase): @@ -2526,6 +3789,30 @@ iface eth0 inet dhcp self.assertEqual( expected, dir2dict(tmp_dir)['/etc/network/interfaces']) + def test_v2_route_metric_to_eni(self): + """Network v2 route-metric overrides are preserved in eni output""" + tmp_dir = self.tmp_dir() + renderer = eni.Renderer() + expected_tmpl = textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto eth0 + iface eth0 inet{suffix} dhcp + metric 100 + """) + for dhcp_ver in ('dhcp4', 'dhcp6'): + suffix = '6' if dhcp_ver == 'dhcp6' else '' + dhcp_cfg = { + dhcp_ver: True, + '{ver}-overrides'.format(ver=dhcp_ver): {'route-metric': 100}} + v2_input = {'version': 2, 'ethernets': {'eth0': dhcp_cfg}} + ns = network_state.parse_net_config_data(v2_input) + renderer.render_network_state(ns, target=tmp_dir) + self.assertEqual( + expected_tmpl.format(suffix=suffix), + dir2dict(tmp_dir)['/etc/network/interfaces']) + class TestNetplanNetRendering(CiTestCase): @@ -2562,13 +3849,13 @@ class TestNetplanNetRendering(CiTestCase): expected = """ network: - version: 2 ethernets: eth1000: dhcp4: true match: macaddress: 07-1c-c6-75-a4-be set-name: eth1000 + version: 2 """ self.assertEqual(expected.lstrip(), contents.lstrip()) self.assertEqual(1, mock_clean_default.call_count) @@ -2645,7 +3932,9 @@ class TestNetplanPostcommands(CiTestCase): @mock.patch.object(netplan.Renderer, '_netplan_generate') @mock.patch.object(netplan.Renderer, '_net_setup_link') - def test_netplan_render_calls_postcmds(self, mock_netplan_generate, + @mock.patch('cloudinit.util.subp') + def test_netplan_render_calls_postcmds(self, mock_subp, + mock_netplan_generate, mock_net_setup_link): tmp_dir = self.tmp_dir() ns = network_state.parse_net_config_data(self.mycfg, @@ -2657,14 +3946,18 @@ class TestNetplanPostcommands(CiTestCase): render_target = 'netplan.yaml' renderer = netplan.Renderer( {'netplan_path': render_target, 'postcmds': True}) + mock_subp.side_effect = iter([util.ProcessExecutionError]) renderer.render_network_state(ns, target=render_dir) mock_netplan_generate.assert_called_with(run=True) mock_net_setup_link.assert_called_with(run=True) + @mock.patch('cloudinit.util.SeLinuxGuard') @mock.patch.object(netplan, "get_devicelist") @mock.patch('cloudinit.util.subp') - def test_netplan_postcmds(self, mock_subp, mock_devlist): + def test_netplan_postcmds(self, mock_subp, mock_devlist, mock_sel): + mock_sel.__enter__ = mock.Mock(return_value=False) + mock_sel.__exit__ = mock.Mock() mock_devlist.side_effect = [['lo']] tmp_dir = self.tmp_dir() ns = network_state.parse_net_config_data(self.mycfg, @@ -2676,7 +3969,13 @@ class TestNetplanPostcommands(CiTestCase): render_target = 'netplan.yaml' renderer = netplan.Renderer( {'netplan_path': render_target, 'postcmds': True}) + mock_subp.side_effect = iter([ + util.ProcessExecutionError, + ('', ''), + ('', ''), + ]) expected = [ + mock.call(['netplan', 'info'], capture=True), mock.call(['netplan', 'generate'], capture=True), mock.call(['udevadm', 'test-builtin', 'net_setup_link', '/sys/class/net/lo'], capture=True), @@ -2775,13 +4074,13 @@ class TestCmdlineConfigParsing(CiTestCase): self.assertEqual(found, self.simple_cfg) -class TestCmdlineReadKernelConfig(FilesystemMockingTestCase): +class TestCmdlineKlibcNetworkConfigSource(FilesystemMockingTestCase): macs = { 'eth0': '14:02:ec:42:48:00', 'eno1': '14:02:ec:42:48:01', } - def test_ip_cmdline_without_ip(self): + def test_without_ip(self): content = {'/run/net-eth0.conf': DHCP_CONTENT_1, cmdline._OPEN_ISCSI_INTERFACE_FILE: "eth0\n"} exp1 = copy.deepcopy(DHCP_EXPECTED_1) @@ -2791,12 +4090,15 @@ class TestCmdlineReadKernelConfig(FilesystemMockingTestCase): populate_dir(root, content) self.reRoot(root) - found = cmdline.read_kernel_cmdline_config( - cmdline='foo root=/root/bar', mac_addrs=self.macs) + src = cmdline.KlibcNetworkConfigSource( + _cmdline='foo root=/root/bar', _mac_addrs=self.macs, + ) + self.assertTrue(src.is_applicable()) + found = src.render_config() self.assertEqual(found['version'], 1) self.assertEqual(found['config'], [exp1]) - def test_ip_cmdline_read_kernel_cmdline_ip(self): + def test_with_ip(self): content = {'/run/net-eth0.conf': DHCP_CONTENT_1} exp1 = copy.deepcopy(DHCP_EXPECTED_1) exp1['mac_address'] = self.macs['eth0'] @@ -2805,20 +4107,25 @@ class TestCmdlineReadKernelConfig(FilesystemMockingTestCase): populate_dir(root, content) self.reRoot(root) - found = cmdline.read_kernel_cmdline_config( - cmdline='foo ip=dhcp', mac_addrs=self.macs) + src = cmdline.KlibcNetworkConfigSource( + _cmdline='foo ip=dhcp', _mac_addrs=self.macs, + ) + self.assertTrue(src.is_applicable()) + found = src.render_config() self.assertEqual(found['version'], 1) self.assertEqual(found['config'], [exp1]) - def test_ip_cmdline_read_kernel_cmdline_ip6(self): + def test_with_ip6(self): content = {'/run/net6-eno1.conf': DHCP6_CONTENT_1} root = self.tmp_dir() populate_dir(root, content) self.reRoot(root) - found = cmdline.read_kernel_cmdline_config( - cmdline='foo ip6=dhcp root=/dev/sda', - mac_addrs=self.macs) + src = cmdline.KlibcNetworkConfigSource( + _cmdline='foo ip6=dhcp root=/dev/sda', _mac_addrs=self.macs, + ) + self.assertTrue(src.is_applicable()) + found = src.render_config() self.assertEqual( found, {'version': 1, 'config': [ @@ -2828,15 +4135,16 @@ class TestCmdlineReadKernelConfig(FilesystemMockingTestCase): {'dns_nameservers': ['2001:67c:1562:8010::2:1'], 'control': 'manual', 'type': 'dhcp6', 'netmask': '64'}]}]}) - def test_ip_cmdline_read_kernel_cmdline_none(self): + def test_with_no_ip_or_ip6(self): # if there is no ip= or ip6= on cmdline, return value should be None content = {'net6-eno1.conf': DHCP6_CONTENT_1} files = sorted(populate_dir(self.tmp_dir(), content)) - found = cmdline.read_kernel_cmdline_config( - files=files, cmdline='foo root=/dev/sda', mac_addrs=self.macs) - self.assertIsNone(found) + src = cmdline.KlibcNetworkConfigSource( + _files=files, _cmdline='foo root=/dev/sda', _mac_addrs=self.macs, + ) + self.assertFalse(src.is_applicable()) - def test_ip_cmdline_both_ip_ip6(self): + def test_with_both_ip_ip6(self): content = { '/run/net-eth0.conf': DHCP_CONTENT_1, '/run/net6-eth0.conf': DHCP6_CONTENT_1.replace('eno1', 'eth0')} @@ -2851,14 +4159,92 @@ class TestCmdlineReadKernelConfig(FilesystemMockingTestCase): populate_dir(root, content) self.reRoot(root) - found = cmdline.read_kernel_cmdline_config( - cmdline='foo ip=dhcp ip6=dhcp', mac_addrs=self.macs) + src = cmdline.KlibcNetworkConfigSource( + _cmdline='foo ip=dhcp ip6=dhcp', _mac_addrs=self.macs, + ) + self.assertTrue(src.is_applicable()) + found = src.render_config() self.assertEqual(found['version'], 1) self.assertEqual(found['config'], expected) +class TestReadInitramfsConfig(CiTestCase): + + def _config_source_cls_mock(self, is_applicable, render_config=None): + return lambda: mock.Mock( + is_applicable=lambda: is_applicable, + render_config=lambda: render_config, + ) + + def test_no_sources(self): + with mock.patch('cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES', []): + self.assertIsNone(cmdline.read_initramfs_config()) + + def test_no_applicable_sources(self): + sources = [ + self._config_source_cls_mock(is_applicable=False), + self._config_source_cls_mock(is_applicable=False), + self._config_source_cls_mock(is_applicable=False), + ] + with mock.patch('cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES', + sources): + self.assertIsNone(cmdline.read_initramfs_config()) + + def test_one_applicable_source(self): + expected_config = object() + sources = [ + self._config_source_cls_mock( + is_applicable=True, render_config=expected_config, + ), + ] + with mock.patch('cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES', + sources): + self.assertEqual(expected_config, cmdline.read_initramfs_config()) + + def test_one_applicable_source_after_inapplicable_sources(self): + expected_config = object() + sources = [ + self._config_source_cls_mock(is_applicable=False), + self._config_source_cls_mock(is_applicable=False), + self._config_source_cls_mock( + is_applicable=True, render_config=expected_config, + ), + ] + with mock.patch('cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES', + sources): + self.assertEqual(expected_config, cmdline.read_initramfs_config()) + + def test_first_applicable_source_is_used(self): + first_config, second_config = object(), object() + sources = [ + self._config_source_cls_mock( + is_applicable=True, render_config=first_config, + ), + self._config_source_cls_mock( + is_applicable=True, render_config=second_config, + ), + ] + with mock.patch('cloudinit.net.cmdline._INITRAMFS_CONFIG_SOURCES', + sources): + self.assertEqual(first_config, cmdline.read_initramfs_config()) + + class TestNetplanRoundTrip(CiTestCase): + + NETPLAN_INFO_OUT = textwrap.dedent(""" + netplan.io: + features: + - dhcp-use-domains + - ipv6-mtu + website: https://netplan.io/ + """) + + def setUp(self): + super(TestNetplanRoundTrip, self).setUp() + self.add_patch('cloudinit.net.netplan.util.subp', 'm_subp') + self.m_subp.return_value = (self.NETPLAN_INFO_OUT, '') + def _render_and_read(self, network_config=None, state=None, netplan_path=None, target=None): if target is None: @@ -2929,6 +4315,46 @@ class TestNetplanRoundTrip(CiTestCase): entry['expected_netplan'].splitlines(), files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + def testsimple_render_dhcpv6_accept_ra(self): + entry = NETWORK_CONFIGS['dhcpv6_accept_ra'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml_v1'])) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + + def testsimple_render_dhcpv6_reject_ra(self): + entry = NETWORK_CONFIGS['dhcpv6_reject_ra'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml_v1'])) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + + def testsimple_render_ipv6_slaac(self): + entry = NETWORK_CONFIGS['ipv6_slaac'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml'])) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + + def testsimple_render_dhcpv6_stateless(self): + entry = NETWORK_CONFIGS['dhcpv6_stateless'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml'])) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + + def testsimple_render_dhcpv6_stateful(self): + entry = NETWORK_CONFIGS['dhcpv6_stateful'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml'])) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + def testsimple_render_all(self): entry = NETWORK_CONFIGS['all'] files = self._render_and_read(network_config=yaml.load(entry['yaml'])) @@ -2946,6 +4372,53 @@ class TestNetplanRoundTrip(CiTestCase): entry['expected_netplan'].splitlines(), files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + def test_render_output_has_yaml_no_aliases(self): + entry = { + 'yaml': V1_NAMESERVER_ALIAS, + 'expected_netplan': NETPLAN_NO_ALIAS, + } + network_config = yaml.load(entry['yaml']) + ns = network_state.parse_net_config_data(network_config) + files = self._render_and_read(state=ns) + # check for alias + content = files['/etc/netplan/50-cloud-init.yaml'] + + # test load the yaml to ensure we don't render something not loadable + # this allows single aliases, but not duplicate ones + parsed = yaml.load(files['/etc/netplan/50-cloud-init.yaml']) + self.assertNotEqual(None, parsed) + + # now look for any alias, avoid rendering them entirely + # generate the first anchor string using the template + # as of this writing, looks like "&id001" + anchor = r'&' + Serializer.ANCHOR_TEMPLATE % 1 + found_alias = re.search(anchor, content, re.MULTILINE) + if found_alias: + msg = "Error at: %s\nContent:\n%s" % (found_alias, content) + raise ValueError('Found yaml alias in rendered netplan: ' + msg) + + print(entry['expected_netplan']) + print('-- expected ^ | v rendered --') + print(files['/etc/netplan/50-cloud-init.yaml']) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + + def test_render_output_supports_both_grat_arp_spelling(self): + entry = { + 'yaml': NETPLAN_BOND_GRAT_ARP, + 'expected_netplan': NETPLAN_BOND_GRAT_ARP.replace('gratuitous', + 'gratuitious'), + } + network_config = yaml.load(entry['yaml']).get('network') + files = self._render_and_read(network_config=network_config) + print(entry['expected_netplan']) + print('-- expected ^ | v rendered --') + print(files['/etc/netplan/50-cloud-init.yaml']) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + class TestEniRoundTrip(CiTestCase): @@ -3012,6 +4485,43 @@ class TestEniRoundTrip(CiTestCase): entry['expected_eni'].splitlines(), files['/etc/network/interfaces'].splitlines()) + def testsimple_render_dhcpv6_stateless(self): + entry = NETWORK_CONFIGS['dhcpv6_stateless'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self.assertEqual( + entry['expected_eni'].splitlines(), + files['/etc/network/interfaces'].splitlines()) + + def testsimple_render_ipv6_slaac(self): + entry = NETWORK_CONFIGS['ipv6_slaac'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self.assertEqual( + entry['expected_eni'].splitlines(), + files['/etc/network/interfaces'].splitlines()) + + def testsimple_render_dhcpv6_stateful(self): + entry = NETWORK_CONFIGS['dhcpv6_stateless'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self.assertEqual( + entry['expected_eni'].splitlines(), + files['/etc/network/interfaces'].splitlines()) + + def testsimple_render_dhcpv6_accept_ra(self): + entry = NETWORK_CONFIGS['dhcpv6_accept_ra'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml_v1'])) + self.assertEqual( + entry['expected_eni'].splitlines(), + files['/etc/network/interfaces'].splitlines()) + + def testsimple_render_dhcpv6_reject_ra(self): + entry = NETWORK_CONFIGS['dhcpv6_reject_ra'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml_v1'])) + self.assertEqual( + entry['expected_eni'].splitlines(), + files['/etc/network/interfaces'].splitlines()) + def testsimple_render_manual(self): """Test rendering of 'manual' for 'type' and 'control'. @@ -3054,17 +4564,17 @@ class TestEniRoundTrip(CiTestCase): 'iface eth0 inet static', ' address 172.23.31.42/26', ' gateway 172.23.31.2', - ('post-up route add -net 10.0.0.0 netmask 255.240.0.0 gw ' + ('post-up route add -net 10.0.0.0/12 gw ' '172.23.31.1 metric 0 || true'), - ('pre-down route del -net 10.0.0.0 netmask 255.240.0.0 gw ' + ('pre-down route del -net 10.0.0.0/12 gw ' '172.23.31.1 metric 0 || true'), - ('post-up route add -net 192.168.2.0 netmask 255.255.0.0 gw ' + ('post-up route add -net 192.168.2.0/16 gw ' '172.23.31.1 metric 0 || true'), - ('pre-down route del -net 192.168.2.0 netmask 255.255.0.0 gw ' + ('pre-down route del -net 192.168.2.0/16 gw ' '172.23.31.1 metric 0 || true'), - ('post-up route add -net 10.0.200.0 netmask 255.255.0.0 gw ' + ('post-up route add -net 10.0.200.0/16 gw ' '172.23.31.1 metric 1 || true'), - ('pre-down route del -net 10.0.200.0 netmask 255.255.0.0 gw ' + ('pre-down route del -net 10.0.200.0/16 gw ' '172.23.31.1 metric 1 || true'), ] found = files['/etc/network/interfaces'].splitlines() @@ -3072,6 +4582,77 @@ class TestEniRoundTrip(CiTestCase): self.assertEqual( expected, [line for line in found if line]) + def test_ipv6_static_routes(self): + # as reported in bug 1818669 + conf = [ + {'name': 'eno3', 'type': 'physical', + 'subnets': [{ + 'address': 'fd00::12/64', + 'dns_nameservers': ['fd00:2::15'], + 'gateway': 'fd00::1', + 'ipv6': True, + 'type': 'static', + 'routes': [{'netmask': '32', + 'network': 'fd00:12::', + 'gateway': 'fd00::2'}, + {'network': 'fd00:14::', + 'gateway': 'fd00::3'}, + {'destination': 'fe00:14::/48', + 'gateway': 'fe00::4', + 'metric': 500}, + {'gateway': '192.168.23.1', + 'metric': 999, + 'netmask': 24, + 'network': '192.168.23.0'}, + {'destination': '10.23.23.0/24', + 'gateway': '10.23.23.2', + 'metric': 300}]}]}, + ] + + files = self._render_and_read( + network_config={'config': conf, 'version': 1}) + expected = [ + 'auto lo', + 'iface lo inet loopback', + 'auto eno3', + 'iface eno3 inet6 static', + ' address fd00::12/64', + ' dns-nameservers fd00:2::15', + ' gateway fd00::1', + (' post-up route add -A inet6 fd00:12::/32 gw ' + 'fd00::2 || true'), + (' pre-down route del -A inet6 fd00:12::/32 gw ' + 'fd00::2 || true'), + (' post-up route add -A inet6 fd00:14::/64 gw ' + 'fd00::3 || true'), + (' pre-down route del -A inet6 fd00:14::/64 gw ' + 'fd00::3 || true'), + (' post-up route add -A inet6 fe00:14::/48 gw ' + 'fe00::4 metric 500 || true'), + (' pre-down route del -A inet6 fe00:14::/48 gw ' + 'fe00::4 metric 500 || true'), + (' post-up route add -net 192.168.23.0/24 gw ' + '192.168.23.1 metric 999 || true'), + (' pre-down route del -net 192.168.23.0/24 gw ' + '192.168.23.1 metric 999 || true'), + (' post-up route add -net 10.23.23.0/24 gw ' + '10.23.23.2 metric 300 || true'), + (' pre-down route del -net 10.23.23.0/24 gw ' + '10.23.23.2 metric 300 || true'), + + ] + found = files['/etc/network/interfaces'].splitlines() + + self.assertEqual( + expected, [line for line in found if line]) + + def testsimple_render_bond(self): + entry = NETWORK_CONFIGS['bond'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self.assertEqual( + entry['expected_eni'].splitlines(), + files['/etc/network/interfaces'].splitlines()) + class TestNetRenderers(CiTestCase): @mock.patch("cloudinit.net.renderers.sysconfig.available") @@ -3116,6 +4697,66 @@ class TestNetRenderers(CiTestCase): self.assertRaises(net.RendererNotFoundError, renderers.select, priority=['sysconfig', 'eni']) + @mock.patch("cloudinit.net.renderers.netplan.available") + @mock.patch("cloudinit.net.renderers.sysconfig.available") + @mock.patch("cloudinit.net.renderers.sysconfig.available_sysconfig") + @mock.patch("cloudinit.net.renderers.sysconfig.available_nm") + @mock.patch("cloudinit.net.renderers.eni.available") + @mock.patch("cloudinit.net.renderers.sysconfig.util.get_linux_distro") + def test_sysconfig_selected_on_sysconfig_enabled_distros(self, m_distro, + m_eni, m_sys_nm, + m_sys_scfg, + m_sys_avail, + m_netplan): + """sysconfig only selected on specific distros (rhel/sles).""" + + # Ubuntu with Network-Manager installed + m_eni.return_value = False # no ifupdown (ifquery) + m_sys_scfg.return_value = False # no sysconfig/ifup/ifdown + m_sys_nm.return_value = True # network-manager is installed + m_netplan.return_value = True # netplan is installed + m_sys_avail.return_value = False # no sysconfig on Ubuntu + m_distro.return_value = ('ubuntu', None, None) + self.assertEqual('netplan', renderers.select(priority=None)[0]) + + # Centos with Network-Manager installed + m_eni.return_value = False # no ifupdown (ifquery) + m_sys_scfg.return_value = False # no sysconfig/ifup/ifdown + m_sys_nm.return_value = True # network-manager is installed + m_netplan.return_value = False # netplan is not installed + m_sys_avail.return_value = True # sysconfig is available on centos + m_distro.return_value = ('centos', None, None) + self.assertEqual('sysconfig', renderers.select(priority=None)[0]) + + # OpenSuse with Network-Manager installed + m_eni.return_value = False # no ifupdown (ifquery) + m_sys_scfg.return_value = False # no sysconfig/ifup/ifdown + m_sys_nm.return_value = True # network-manager is installed + m_netplan.return_value = False # netplan is not installed + m_sys_avail.return_value = True # sysconfig is available on opensuse + m_distro.return_value = ('opensuse', None, None) + self.assertEqual('sysconfig', renderers.select(priority=None)[0]) + + @mock.patch("cloudinit.net.sysconfig.available_sysconfig") + @mock.patch("cloudinit.util.get_linux_distro") + def test_sysconfig_available_uses_variant_mapping(self, m_distro, m_avail): + m_avail.return_value = True + distro_values = [ + ('opensuse', '', ''), + ('opensuse-leap', '', ''), + ('opensuse-tumbleweed', '', ''), + ('sles', '', ''), + ('centos', '', ''), + ('fedora', '', ''), + ('redhat', '', ''), + ] + for (distro_name, distro_version, flavor) in distro_values: + m_distro.return_value = (distro_name, distro_version, flavor) + if hasattr(util.system_info, "cache_clear"): + util.system_info.cache_clear() + result = sysconfig.available() + self.assertTrue(result) + class TestGetInterfaces(CiTestCase): _data = {'bonds': ['bond1'], diff --git a/tests/unittests/test_net_freebsd.py b/tests/unittests/test_net_freebsd.py new file mode 100644 index 00000000..48296c30 --- /dev/null +++ b/tests/unittests/test_net_freebsd.py @@ -0,0 +1,19 @@ +from cloudinit import net + +from cloudinit.tests.helpers import (CiTestCase, mock, readResource) + +SAMPLE_FREEBSD_IFCONFIG_OUT = readResource("netinfo/freebsd-ifconfig-output") + + +class TestInterfacesByMac(CiTestCase): + + @mock.patch('cloudinit.util.subp') + @mock.patch('cloudinit.util.is_FreeBSD') + def test_get_interfaces_by_mac(self, mock_is_FreeBSD, mock_subp): + mock_is_FreeBSD.return_value = True + mock_subp.return_value = (SAMPLE_FREEBSD_IFCONFIG_OUT, 0) + a = net.get_interfaces_by_mac() + assert a == {'52:54:00:50:b7:0d': 'vtnet0', + '80:00:73:63:5c:48': 're0.33', + '02:14:39:0e:25:00': 'bridge0', + '02:ff:60:8c:f3:72': 'vnet0:11'} diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py index e15ba6cf..6814030e 100644 --- a/tests/unittests/test_reporting.py +++ b/tests/unittests/test_reporting.py @@ -2,12 +2,12 @@ # # This file is part of cloud-init. See LICENSE file for license information. +from unittest import mock + from cloudinit import reporting from cloudinit.reporting import events from cloudinit.reporting import handlers -import mock - from cloudinit.tests.helpers import TestCase diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py index 2e64c6c7..b3e083c6 100644 --- a/tests/unittests/test_reporting_hyperv.py +++ b/tests/unittests/test_reporting_hyperv.py @@ -1,19 +1,24 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit.reporting import events -from cloudinit.reporting import handlers +from cloudinit.reporting.handlers import HyperVKvpReportingHandler import json import os +import struct +import time +import re +from unittest import mock from cloudinit import util from cloudinit.tests.helpers import CiTestCase +from cloudinit.sources.helpers import azure class TestKvpEncoding(CiTestCase): def test_encode_decode(self): kvp = {'key': 'key1', 'value': 'value1'} - kvp_reporting = handlers.HyperVKvpReportingHandler() + kvp_reporting = HyperVKvpReportingHandler() data = kvp_reporting._encode_kvp_item(kvp['key'], kvp['value']) self.assertEqual(len(data), kvp_reporting.HV_KVP_RECORD_SIZE) decoded_kvp = kvp_reporting._decode_kvp_item(data) @@ -26,57 +31,9 @@ class TextKvpReporter(CiTestCase): self.tmp_file_path = self.tmp_path('kvp_pool_file') util.ensure_file(self.tmp_file_path) - def test_event_type_can_be_filtered(self): - reporter = handlers.HyperVKvpReportingHandler( - kvp_file_path=self.tmp_file_path, - event_types=['foo', 'bar']) - - reporter.publish_event( - events.ReportingEvent('foo', 'name', 'description')) - reporter.publish_event( - events.ReportingEvent('some_other', 'name', 'description3')) - reporter.q.join() - - kvps = list(reporter._iterate_kvps(0)) - self.assertEqual(1, len(kvps)) - - reporter.publish_event( - events.ReportingEvent('bar', 'name', 'description2')) - reporter.q.join() - kvps = list(reporter._iterate_kvps(0)) - self.assertEqual(2, len(kvps)) - - self.assertIn('foo', kvps[0]['key']) - self.assertIn('bar', kvps[1]['key']) - self.assertNotIn('some_other', kvps[0]['key']) - self.assertNotIn('some_other', kvps[1]['key']) - - def test_events_are_over_written(self): - reporter = handlers.HyperVKvpReportingHandler( - kvp_file_path=self.tmp_file_path) - - self.assertEqual(0, len(list(reporter._iterate_kvps(0)))) - - reporter.publish_event( - events.ReportingEvent('foo', 'name1', 'description')) - reporter.publish_event( - events.ReportingEvent('foo', 'name2', 'description')) - reporter.q.join() - self.assertEqual(2, len(list(reporter._iterate_kvps(0)))) - - reporter2 = handlers.HyperVKvpReportingHandler( - kvp_file_path=self.tmp_file_path) - reporter2.incarnation_no = reporter.incarnation_no + 1 - reporter2.publish_event( - events.ReportingEvent('foo', 'name3', 'description')) - reporter2.q.join() - - self.assertEqual(2, len(list(reporter2._iterate_kvps(0)))) - def test_events_with_higher_incarnation_not_over_written(self): - reporter = handlers.HyperVKvpReportingHandler( + reporter = HyperVKvpReportingHandler( kvp_file_path=self.tmp_file_path) - self.assertEqual(0, len(list(reporter._iterate_kvps(0)))) reporter.publish_event( @@ -86,7 +43,7 @@ class TextKvpReporter(CiTestCase): reporter.q.join() self.assertEqual(2, len(list(reporter._iterate_kvps(0)))) - reporter3 = handlers.HyperVKvpReportingHandler( + reporter3 = HyperVKvpReportingHandler( kvp_file_path=self.tmp_file_path) reporter3.incarnation_no = reporter.incarnation_no - 1 reporter3.publish_event( @@ -95,7 +52,7 @@ class TextKvpReporter(CiTestCase): self.assertEqual(3, len(list(reporter3._iterate_kvps(0)))) def test_finish_event_result_is_logged(self): - reporter = handlers.HyperVKvpReportingHandler( + reporter = HyperVKvpReportingHandler( kvp_file_path=self.tmp_file_path) reporter.publish_event( events.FinishReportingEvent('name2', 'description1', @@ -105,7 +62,7 @@ class TextKvpReporter(CiTestCase): def test_file_operation_issue(self): os.remove(self.tmp_file_path) - reporter = handlers.HyperVKvpReportingHandler( + reporter = HyperVKvpReportingHandler( kvp_file_path=self.tmp_file_path) reporter.publish_event( events.FinishReportingEvent('name2', 'description1', @@ -113,7 +70,7 @@ class TextKvpReporter(CiTestCase): reporter.q.join() def test_event_very_long(self): - reporter = handlers.HyperVKvpReportingHandler( + reporter = HyperVKvpReportingHandler( kvp_file_path=self.tmp_file_path) description = 'ab' * reporter.HV_KVP_EXCHANGE_MAX_VALUE_SIZE long_event = events.FinishReportingEvent( @@ -132,3 +89,123 @@ class TextKvpReporter(CiTestCase): self.assertEqual(msg_slice['msg_i'], i) full_description += msg_slice['msg'] self.assertEqual(description, full_description) + + def test_not_truncate_kvp_file_modified_after_boot(self): + with open(self.tmp_file_path, "wb+") as f: + kvp = {'key': 'key1', 'value': 'value1'} + data = (struct.pack("%ds%ds" % ( + HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_KEY_SIZE, + HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_VALUE_SIZE), + kvp['key'].encode('utf-8'), kvp['value'].encode('utf-8'))) + f.write(data) + cur_time = time.time() + os.utime(self.tmp_file_path, (cur_time, cur_time)) + + # reset this because the unit test framework + # has already polluted the class variable + HyperVKvpReportingHandler._already_truncated_pool_file = False + + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(1, len(kvps)) + + def test_truncate_stale_kvp_file(self): + with open(self.tmp_file_path, "wb+") as f: + kvp = {'key': 'key1', 'value': 'value1'} + data = (struct.pack("%ds%ds" % ( + HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_KEY_SIZE, + HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_VALUE_SIZE), + kvp['key'].encode('utf-8'), kvp['value'].encode('utf-8'))) + f.write(data) + + # set the time ways back to make it look like + # we had an old kvp file + os.utime(self.tmp_file_path, (1000000, 1000000)) + + # reset this because the unit test framework + # has already polluted the class variable + HyperVKvpReportingHandler._already_truncated_pool_file = False + + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(0, len(kvps)) + + @mock.patch('cloudinit.distros.uses_systemd') + @mock.patch('cloudinit.util.subp') + def test_get_boot_telemetry(self, m_subp, m_sysd): + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + datetime_pattern = r"\d{4}-[01]\d-[0-3]\dT[0-2]\d:[0-5]" + r"\d:[0-5]\d\.\d+([+-][0-2]\d:[0-5]\d|Z)" + + # get_boot_telemetry makes two subp calls to systemctl. We provide + # a list of values that the subp calls should return + m_subp.side_effect = [ + ('UserspaceTimestampMonotonic=1844838', ''), + ('InactiveExitTimestampMonotonic=3068203', '')] + m_sysd.return_value = True + + reporter.publish_event(azure.get_boot_telemetry()) + reporter.q.join() + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(1, len(kvps)) + + evt_msg = kvps[0]['value'] + if not re.search("kernel_start=" + datetime_pattern, evt_msg): + raise AssertionError("missing kernel_start timestamp") + if not re.search("user_start=" + datetime_pattern, evt_msg): + raise AssertionError("missing user_start timestamp") + if not re.search("cloudinit_activation=" + datetime_pattern, + evt_msg): + raise AssertionError( + "missing cloudinit_activation timestamp") + + def test_get_system_info(self): + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + pattern = r"[^=\s]+" + + reporter.publish_event(azure.get_system_info()) + reporter.q.join() + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(1, len(kvps)) + evt_msg = kvps[0]['value'] + + # the most important information is cloudinit version, + # kernel_version, and the distro variant. It is ok if + # if the rest is not available + if not re.search("cloudinit_version=" + pattern, evt_msg): + raise AssertionError("missing cloudinit_version string") + if not re.search("kernel_version=" + pattern, evt_msg): + raise AssertionError("missing kernel_version string") + if not re.search("variant=" + pattern, evt_msg): + raise AssertionError("missing distro variant string") + + def test_report_diagnostic_event(self): + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + + reporter.publish_event( + azure.report_diagnostic_event("test_diagnostic")) + reporter.q.join() + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(1, len(kvps)) + evt_msg = kvps[0]['value'] + + if "test_diagnostic" not in evt_msg: + raise AssertionError("missing expected diagnostic message") + + def test_unique_kvp_key(self): + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + evt1 = events.ReportingEvent( + "event_type", 'event_message', + "event_description") + reporter.publish_event(evt1) + + evt2 = events.ReportingEvent( + "event_type", 'event_message', + "event_description", timestamp=evt1.timestamp + 1) + reporter.publish_event(evt2) + + reporter.q.join() + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(2, len(kvps)) + self.assertNotEqual(kvps[0]["key"], kvps[1]["key"], + "duplicate keys for KVP entries") diff --git a/tests/unittests/test_runs/test_merge_run.py b/tests/unittests/test_runs/test_merge_run.py index d1ac4942..ff27a280 100644 --- a/tests/unittests/test_runs/test_merge_run.py +++ b/tests/unittests/test_runs/test_merge_run.py @@ -7,6 +7,7 @@ import tempfile from cloudinit.tests import helpers from cloudinit.settings import PER_INSTANCE +from cloudinit import safeyaml from cloudinit import stages from cloudinit import util @@ -26,7 +27,7 @@ class TestMergeRun(helpers.FilesystemMockingTestCase): 'system_info': {'paths': {'run_dir': new_root}} } ud = helpers.readResource('user_data.1.txt') - cloud_cfg = util.yaml_dumps(cfg) + cloud_cfg = safeyaml.dumps(cfg) util.ensure_dir(os.path.join(new_root, 'etc', 'cloud')) util.write_file(os.path.join(new_root, 'etc', 'cloud', 'cloud.cfg'), cloud_cfg) diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py index d67c422c..cb3aae60 100644 --- a/tests/unittests/test_runs/test_simple_run.py +++ b/tests/unittests/test_runs/test_simple_run.py @@ -5,6 +5,7 @@ import os from cloudinit.settings import PER_INSTANCE +from cloudinit import safeyaml from cloudinit import stages from cloudinit.tests import helpers from cloudinit import util @@ -34,7 +35,7 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): ], 'cloud_init_modules': ['write-files', 'spacewalk', 'runcmd'], } - cloud_cfg = util.yaml_dumps(self.cfg) + cloud_cfg = safeyaml.dumps(self.cfg) util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) util.write_file(os.path.join(self.new_root, 'etc', 'cloud', 'cloud.cfg'), cloud_cfg) @@ -130,7 +131,7 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): # re-write cloud.cfg with unverified_modules override cfg = copy.deepcopy(self.cfg) cfg['unverified_modules'] = ['spacewalk'] # Would have skipped - cloud_cfg = util.yaml_dumps(cfg) + cloud_cfg = safeyaml.dumps(cfg) util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) util.write_file(os.path.join(self.new_root, 'etc', 'cloud', 'cloud.cfg'), cloud_cfg) @@ -159,7 +160,7 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): cfg = copy.deepcopy(self.cfg) # Represent empty configuration in /etc/cloud/cloud.cfg cfg['cloud_init_modules'] = None - cloud_cfg = util.yaml_dumps(cfg) + cloud_cfg = safeyaml.dumps(cfg) util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) util.write_file(os.path.join(self.new_root, 'etc', 'cloud', 'cloud.cfg'), cloud_cfg) diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py index 73ae897f..0be41924 100644 --- a/tests/unittests/test_sshutil.py +++ b/tests/unittests/test_sshutil.py @@ -1,11 +1,19 @@ # This file is part of cloud-init. See LICENSE file for license information. -from mock import patch +from collections import namedtuple +from unittest.mock import patch from cloudinit import ssh_util from cloudinit.tests import helpers as test_helpers from cloudinit import util +# https://stackoverflow.com/questions/11351032/ +FakePwEnt = namedtuple( + 'FakePwEnt', + ['pw_dir', 'pw_gecos', 'pw_name', 'pw_passwd', 'pw_shell', 'pwd_uid']) +FakePwEnt.__new__.__defaults__ = tuple( + "UNSET_%s" % n for n in FakePwEnt._fields) + VALID_CONTENT = { 'dsa': ( @@ -326,4 +334,79 @@ class TestUpdateSshConfig(test_helpers.CiTestCase): m_write_file.assert_not_called() +class TestBasicAuthorizedKeyParse(test_helpers.CiTestCase): + def test_user(self): + self.assertEqual( + ["/opt/bobby/keys"], + ssh_util.render_authorizedkeysfile_paths( + "/opt/%u/keys", "/home/bobby", "bobby")) + + def test_multiple(self): + self.assertEqual( + ["/keys/path1", "/keys/path2"], + ssh_util.render_authorizedkeysfile_paths( + "/keys/path1 /keys/path2", "/home/bobby", "bobby")) + + def test_relative(self): + self.assertEqual( + ["/home/bobby/.secret/keys"], + ssh_util.render_authorizedkeysfile_paths( + ".secret/keys", "/home/bobby", "bobby")) + + def test_home(self): + self.assertEqual( + ["/homedirs/bobby/.keys"], + ssh_util.render_authorizedkeysfile_paths( + "%h/.keys", "/homedirs/bobby", "bobby")) + + +class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): + + @patch("cloudinit.ssh_util.pwd.getpwnam") + def test_multiple_authorizedkeys_file_order1(self, m_getpwnam): + fpw = FakePwEnt(pw_name='bobby', pw_dir='/home2/bobby') + m_getpwnam.return_value = fpw + authorized_keys = self.tmp_path('authorized_keys') + util.write_file(authorized_keys, VALID_CONTENT['rsa']) + + user_keys = self.tmp_path('user_keys') + util.write_file(user_keys, VALID_CONTENT['dsa']) + + sshd_config = self.tmp_path('sshd_config') + util.write_file( + sshd_config, + "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys)) + + (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( + fpw.pw_name, sshd_config) + content = ssh_util.update_authorized_keys( + auth_key_entries, []) + + self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn) + self.assertTrue(VALID_CONTENT['rsa'] in content) + self.assertTrue(VALID_CONTENT['dsa'] in content) + + @patch("cloudinit.ssh_util.pwd.getpwnam") + def test_multiple_authorizedkeys_file_order2(self, m_getpwnam): + fpw = FakePwEnt(pw_name='suzie', pw_dir='/home/suzie') + m_getpwnam.return_value = fpw + authorized_keys = self.tmp_path('authorized_keys') + util.write_file(authorized_keys, VALID_CONTENT['rsa']) + + user_keys = self.tmp_path('user_keys') + util.write_file(user_keys, VALID_CONTENT['dsa']) + + sshd_config = self.tmp_path('sshd_config') + util.write_file( + sshd_config, + "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys)) + + (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( + fpw.pw_name, sshd_config) + content = ssh_util.update_authorized_keys(auth_key_entries, []) + + self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn) + self.assertTrue(VALID_CONTENT['rsa'] in content) + self.assertTrue(VALID_CONTENT['dsa'] in content) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 5a14479a..9ff17f52 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -2,26 +2,21 @@ from __future__ import print_function +import io +import json import logging import os import re import shutil import stat -import tempfile - -import json -import six import sys +import tempfile import yaml +from unittest import mock from cloudinit import importer, util from cloudinit.tests import helpers -try: - from unittest import mock -except ImportError: - import mock - BASH = util.which('bash') BOGUS_COMMAND = 'this-is-not-expected-to-be-a-program-name' @@ -320,7 +315,7 @@ class TestLoadYaml(helpers.CiTestCase): def test_python_unicode(self): # complex type of python/unicode is explicitly allowed - myobj = {'1': six.text_type("FOOBAR")} + myobj = {'1': "FOOBAR"} safe_yaml = yaml.dump(myobj) self.assertEqual(util.load_yaml(blob=safe_yaml, default=self.mydefault), @@ -663,8 +658,8 @@ class TestMultiLog(helpers.FilesystemMockingTestCase): self.patchOS(self.root) self.patchUtils(self.root) self.patchOpen(self.root) - self.stdout = six.StringIO() - self.stderr = six.StringIO() + self.stdout = io.StringIO() + self.stderr = io.StringIO() self.patchStdoutAndStderr(self.stdout, self.stderr) def test_stderr_used_by_default(self): @@ -879,8 +874,8 @@ class TestSubp(helpers.CiTestCase): """Raised exc should have stderr, stdout as string if no decode.""" with self.assertRaises(util.ProcessExecutionError) as cm: util.subp([BOGUS_COMMAND], decode=True) - self.assertTrue(isinstance(cm.exception.stdout, six.string_types)) - self.assertTrue(isinstance(cm.exception.stderr, six.string_types)) + self.assertTrue(isinstance(cm.exception.stdout, str)) + self.assertTrue(isinstance(cm.exception.stderr, str)) def test_bunch_of_slashes_in_path(self): self.assertEqual("/target/my/path/", @@ -1171,4 +1166,10 @@ class TestGetProcEnv(helpers.TestCase): self.assertEqual({}, util.get_proc_env(1)) self.assertEqual(1, m_load_file.call_count) + def test_get_proc_ppid(self): + """get_proc_ppid returns correct parent pid value.""" + my_pid = os.getpid() + my_ppid = os.getppid() + self.assertEqual(my_ppid, util.get_proc_ppid(my_pid)) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_vmware/test_custom_script.py b/tests/unittests/test_vmware/test_custom_script.py index 2d9519b0..f89f8157 100644 --- a/tests/unittests/test_vmware/test_custom_script.py +++ b/tests/unittests/test_vmware/test_custom_script.py @@ -1,10 +1,12 @@ # Copyright (C) 2015 Canonical Ltd. -# Copyright (C) 2017 VMware INC. +# Copyright (C) 2017-2019 VMware INC. # # Author: Maitreyee Saikia <msaikia@vmware.com> # # This file is part of cloud-init. See LICENSE file for license information. +import os +import stat from cloudinit import util from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( CustomScriptConstant, @@ -18,6 +20,10 @@ from cloudinit.tests.helpers import CiTestCase, mock class TestVmwareCustomScript(CiTestCase): def setUp(self): self.tmpDir = self.tmp_dir() + # Mock the tmpDir as the root dir in VM. + self.execDir = os.path.join(self.tmpDir, ".customization") + self.execScript = os.path.join(self.execDir, + ".customize.sh") def test_prepare_custom_script(self): """ @@ -37,63 +43,67 @@ class TestVmwareCustomScript(CiTestCase): # Custom script exists. custScript = self.tmp_path("test-cust", self.tmpDir) - util.write_file(custScript, "test-CR-strip/r/r") - postCust = PostCustomScript("test-cust", self.tmpDir) - self.assertEqual("test-cust", postCust.scriptname) - self.assertEqual(self.tmpDir, postCust.directory) - self.assertEqual(custScript, postCust.scriptpath) - self.assertFalse(postCust.postreboot) - postCust.prepare_script() - # Check if all carraige returns are stripped from script. - self.assertFalse("/r" in custScript) + util.write_file(custScript, "test-CR-strip\r\r") + with mock.patch.object(CustomScriptConstant, + "CUSTOM_TMP_DIR", + self.execDir): + with mock.patch.object(CustomScriptConstant, + "CUSTOM_SCRIPT", + self.execScript): + postCust = PostCustomScript("test-cust", + self.tmpDir, + self.tmpDir) + self.assertEqual("test-cust", postCust.scriptname) + self.assertEqual(self.tmpDir, postCust.directory) + self.assertEqual(custScript, postCust.scriptpath) + postCust.prepare_script() - def test_rc_local_exists(self): - """ - This test is designed to verify the different scenarios associated - with the presence of rclocal. - """ - # test when rc local does not exist - postCust = PostCustomScript("test-cust", self.tmpDir) - with mock.patch.object(CustomScriptConstant, "RC_LOCAL", "/no/path"): - rclocal = postCust.find_rc_local() - self.assertEqual("", rclocal) - - # test when rc local exists - rclocalFile = self.tmp_path("vmware-rclocal", self.tmpDir) - util.write_file(rclocalFile, "# Run post-reboot guest customization", - omode="w") - with mock.patch.object(CustomScriptConstant, "RC_LOCAL", rclocalFile): - rclocal = postCust.find_rc_local() - self.assertEqual(rclocalFile, rclocal) - self.assertTrue(postCust.has_previous_agent, rclocal) - - # test when rc local is a symlink - rclocalLink = self.tmp_path("dummy-rclocal-link", self.tmpDir) - util.sym_link(rclocalFile, rclocalLink, True) - with mock.patch.object(CustomScriptConstant, "RC_LOCAL", rclocalLink): - rclocal = postCust.find_rc_local() - self.assertEqual(rclocalFile, rclocal) + # Custom script is copied with exec privilege + self.assertTrue(os.path.exists(self.execScript)) + st = os.stat(self.execScript) + self.assertTrue(st.st_mode & stat.S_IEXEC) + with open(self.execScript, "r") as f: + content = f.read() + self.assertEqual(content, "test-CR-strip") + # Check if all carraige returns are stripped from script. + self.assertFalse("\r" in content) def test_execute_post_cust(self): """ - This test is to identify if rclocal was properly populated to be - run after reboot. + This test is designed to verify the behavior after execute post + customization. """ - customscript = self.tmp_path("vmware-post-cust-script", self.tmpDir) - rclocal = self.tmp_path("vmware-rclocal", self.tmpDir) - # Create a temporary rclocal file - open(customscript, "w") - util.write_file(rclocal, "tests\nexit 0", omode="w") - postCust = PostCustomScript("vmware-post-cust-script", self.tmpDir) - with mock.patch.object(CustomScriptConstant, "RC_LOCAL", rclocal): - # Test that guest customization agent is not installed initially. - self.assertFalse(postCust.postreboot) - self.assertIs(postCust.has_previous_agent(rclocal), False) - postCust.install_agent() + # Prepare the customize package + postCustRun = self.tmp_path("post-customize-guest.sh", self.tmpDir) + util.write_file(postCustRun, "This is the script to run post cust") + userScript = self.tmp_path("test-cust", self.tmpDir) + util.write_file(userScript, "This is the post cust script") - # Assert rclocal has been modified to have guest customization - # agent. - self.assertTrue(postCust.postreboot) - self.assertTrue(postCust.has_previous_agent, rclocal) + # Mock the cc_scripts_per_instance dir and marker file. + # Create another tmp dir for cc_scripts_per_instance. + ccScriptDir = self.tmp_dir() + ccScript = os.path.join(ccScriptDir, "post-customize-guest.sh") + markerFile = os.path.join(self.tmpDir, ".markerFile") + with mock.patch.object(CustomScriptConstant, + "CUSTOM_TMP_DIR", + self.execDir): + with mock.patch.object(CustomScriptConstant, + "CUSTOM_SCRIPT", + self.execScript): + with mock.patch.object(CustomScriptConstant, + "POST_CUSTOM_PENDING_MARKER", + markerFile): + postCust = PostCustomScript("test-cust", + self.tmpDir, + ccScriptDir) + postCust.execute() + # Check cc_scripts_per_instance and marker file + # are created. + self.assertTrue(os.path.exists(ccScript)) + with open(ccScript, "r") as f: + content = f.read() + self.assertEqual(content, + "This is the script to run post cust") + self.assertTrue(os.path.exists(markerFile)) # vi: ts=4 expandtab diff --git a/tests/unittests/test_vmware/test_guestcust_util.py b/tests/unittests/test_vmware/test_guestcust_util.py new file mode 100644 index 00000000..b175a998 --- /dev/null +++ b/tests/unittests/test_vmware/test_guestcust_util.py @@ -0,0 +1,72 @@ +# Copyright (C) 2019 Canonical Ltd. +# Copyright (C) 2019 VMware INC. +# +# Author: Xiaofeng Wang <xiaofengw@vmware.com> +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import util +from cloudinit.sources.helpers.vmware.imc.guestcust_util import ( + get_tools_config, +) +from cloudinit.tests.helpers import CiTestCase, mock + + +class TestGuestCustUtil(CiTestCase): + def test_get_tools_config_not_installed(self): + """ + This test is designed to verify the behavior if vmware-toolbox-cmd + is not installed. + """ + with mock.patch.object(util, 'which', return_value=None): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), 'defaultVal') + + def test_get_tools_config_internal_exception(self): + """ + This test is designed to verify the behavior if internal exception + is raised. + """ + with mock.patch.object(util, 'which', return_value='/dummy/path'): + with mock.patch.object(util, 'subp', + return_value=('key=value', b''), + side_effect=util.ProcessExecutionError( + "subp failed", exit_code=99)): + # verify return value is 'defaultVal', not 'value'. + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'defaultVal') + + def test_get_tools_config_normal(self): + """ + This test is designed to verify the value could be parsed from + key = value of the given [section] + """ + with mock.patch.object(util, 'which', return_value='/dummy/path'): + # value is not blank + with mock.patch.object(util, 'subp', + return_value=('key = value ', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'value') + # value is blank + with mock.patch.object(util, 'subp', + return_value=('key = ', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + '') + # value contains = + with mock.patch.object(util, 'subp', + return_value=('key=Bar=Wark', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'Bar=Wark') + + # value contains specific characters + with mock.patch.object(util, 'subp', + return_value=('[a] b.c_d=e-f', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'e-f') + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/test_vmware_config_file.py index f47335ea..16343ed2 100644 --- a/tests/unittests/test_vmware_config_file.py +++ b/tests/unittests/test_vmware_config_file.py @@ -62,13 +62,13 @@ class TestVmwareConfigFile(CiTestCase): (md1, _, _) = read_vmware_imc(conf) self.assertIn(instance_id_prefix, md1["instance-id"]) - self.assertEqual(len(md1["instance-id"]), len(instance_id_prefix) + 8) + self.assertEqual(md1["instance-id"], 'iid-vmware-imc') (md2, _, _) = read_vmware_imc(conf) self.assertIn(instance_id_prefix, md2["instance-id"]) - self.assertEqual(len(md2["instance-id"]), len(instance_id_prefix) + 8) + self.assertEqual(md2["instance-id"], 'iid-vmware-imc') - self.assertNotEqual(md1["instance-id"], md2["instance-id"]) + self.assertEqual(md2["instance-id"], md1["instance-id"]) def test_configfile_static_2nics(self): """Tests Config class for a configuration with two static NICs.""" diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user new file mode 100644 index 00000000..6b20d360 --- /dev/null +++ b/tools/.lp-to-git-user @@ -0,0 +1,28 @@ +{ + "adobrawy": "ad-m", + "afranceschini": "andreaf74", + "ahosmanmsft": "AOhassan", + "andreipoltavchenko": "pa-yourserveradmin-com", + "askon": "ask0n", + "bitfehler": "bitfehler", + "chad.smith": "blackboxsw", + "d-info-e": "do3meli", + "daniel-thewatkins": "OddBloke", + "eric-lafontaine1": "elafontaine", + "fredlefebvre": "fred-lefebvre", + "goneri": "goneri", + "harald-jensas": "hjensas", + "i.galic": "igalic", + "larsks": "larsks", + "legovini": "paride", + "louis": "karibou", + "madhuri-rai07": "madhuri-rai07", + "otubo": "otubo", + "pengpengs": "PengpengSun", + "powersj": "powersj", + "raharper": "raharper", + "rjschwei": "rjschwei", + "tribaal": "chrisglass", + "trstringer": "trstringer", + "xiaofengw": "xiaofengw-vmware" +}
\ No newline at end of file diff --git a/tools/build-on-freebsd b/tools/build-on-freebsd index d23fde2b..876368a9 100755 --- a/tools/build-on-freebsd +++ b/tools/build-on-freebsd @@ -3,37 +3,42 @@ # installing cloud-init. This script takes care of building and installing. It # will optionally make a first run at the end. +set -eux + fail() { echo "FAILED:" "$@" 1>&2; exit 1; } +PYTHON="${PYTHON:-python3}" +if [ ! $(which ${PYTHON}) ]; then + echo "Please install python first." + exit 1 +fi +py_prefix=$(${PYTHON} -c 'import sys; print("py%d%d" % (sys.version_info.major, sys.version_info.minor))') + # Check dependencies: depschecked=/tmp/c-i.dependencieschecked pkgs=" - bash - dmidecode - e2fsprogs - py27-Jinja2 - py27-boto - py27-cheetah - py27-configobj - py27-jsonpatch - py27-jsonpointer - py27-oauthlib - py27-requests - py27-serial - py27-six - py27-yaml - python - sudo + bash + dmidecode + e2fsprogs + $py_prefix-Jinja2 + $py_prefix-boto + $py_prefix-configobj + $py_prefix-jsonpatch + $py_prefix-jsonpointer + $py_prefix-jsonschema + $py_prefix-oauthlib + $py_prefix-requests + $py_prefix-serial + $py_prefix-six + $py_prefix-yaml + sudo " -[ -f "$depschecked" ] || pkg install ${pkgs} || fail "install packages" +[ -f "$depschecked" ] || pkg install --yes ${pkgs} || fail "install packages" touch $depschecked -# Required but unavailable port/pkg: py27-jsonpatch py27-jsonpointer -# Luckily, the install step will take care of this by installing it from pypi... - # Build the code and install in /usr/local/: -python setup.py build -python setup.py install -O1 --skip-build --prefix /usr/local/ --init-system sysvinit_freebsd +${PYTHON} setup.py build +${PYTHON} setup.py install -O1 --skip-build --prefix /usr/local/ --init-system sysvinit_freebsd # Enable cloud-init in /etc/rc.conf: sed -i.bak -e "/cloudinit_enable=.*/d" /etc/rc.conf @@ -41,21 +46,21 @@ echo 'cloudinit_enable="YES"' >> /etc/rc.conf echo "Installation completed." -if [ "$1" = "run" ]; then - echo "Ok, now let's see if it works." +if [ "$#" -gt 1 ] && [ "$1" = "run" ]; then + echo "Ok, now let's see if it works." - # Backup SSH keys - mv /etc/ssh/ssh_host_* /tmp/ + # Backup SSH keys + mv /etc/ssh/ssh_host_* /tmp/ - # Remove old metadata - rm -rf /var/lib/cloud + # Remove old metadata + rm -rf /var/lib/cloud - # Just log everything, quick&dirty - rm /usr/local/etc/cloud/cloud.cfg.d/05_logging.cfg + # Just log everything, quick&dirty + rm /usr/local/etc/cloud/cloud.cfg.d/05_logging.cfg - # Start: - /usr/local/etc/rc.d/cloudinit start + # Start: + /usr/local/etc/rc.d/cloudinit start - # Restore SSH keys - mv /tmp/ssh_host_* /etc/ssh/ + # Restore SSH keys + mv /tmp/ssh_host_* /etc/ssh/ fi diff --git a/tools/cloud-init-per b/tools/cloud-init-per index 7d6754b6..fcd1ea79 100755 --- a/tools/cloud-init-per +++ b/tools/cloud-init-per @@ -38,7 +38,7 @@ fi [ "$1" = "-h" -o "$1" = "--help" ] && { Usage ; exit 0; } [ $# -ge 3 ] || { Usage 1>&2; exit 1; } freq=$1 -name=$2 +name=$(echo $2 | sed 's/-/_/g') shift 2; [ "${name#*/}" = "${name}" ] || fail "name cannot contain a /" @@ -53,6 +53,12 @@ esac [ -d "${sem%/*}" ] || mkdir -p "${sem%/*}" || fail "failed to make directory for ${sem}" +# Rename legacy sem files with dashes in their names. Do not overwrite existing +# sem files to prevent clobbering those which may have been created from calls +# outside of cloud-init. +sem_legacy=$(echo $sem | sed 's/_/-/g') +[ "$sem" != "$sem_legacy" -a -e "$sem_legacy" ] && mv -n "$sem_legacy" "$sem" + [ "$freq" != "always" -a -e "$sem" ] && exit 0 "$@" ret=$? diff --git a/tools/ds-identify b/tools/ds-identify index b78b2731..c93d4a77 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -124,7 +124,7 @@ DI_DSNAME="" # be searched if there is no setting found in config. DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \ -OVF SmartOS Scaleway Hetzner IBMCloud Oracle" +OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud" DI_DSLIST="" DI_MODE="" DI_ON_FOUND="" @@ -179,13 +179,39 @@ debug() { echo "$@" 1>&3 } +dmi_decode() { + local sys_field="$1" dmi_field="" val="" + command -v dmidecode >/dev/null 2>&1 || { + warn "No dmidecode program. Cannot read $sys_field." + return 1 + } + case "$1" in + sys_vendor) dmi_field="system-manufacturer";; + product_name) dmi_field="system-product-name";; + product_uuid) dmi_field="system-uuid";; + product_serial) dmi_field="system-serial-number";; + chassis_asset_tag) dmi_field="chassis-asset-tag";; + *) error "Unknown field $sys_field. Cannot call dmidecode." + return 1;; + esac + val=$(dmidecode --quiet "--string=$dmi_field" 2>/dev/null) || return 1 + _RET="$val" +} + get_dmi_field() { local path="${PATH_SYS_CLASS_DMI_ID}/$1" - if [ ! -f "$path" ] || [ ! -r "$path" ]; then - _RET="$UNAVAILABLE" + _RET="$UNAVAILABLE" + if [ -d "${PATH_SYS_CLASS_DMI_ID}" ]; then + if [ -f "$path" ] && [ -r "$path" ]; then + read _RET < "${path}" || _RET="$ERROR" + return + fi + # if `/sys/class/dmi/id` exists, but not the object we're looking for, + # do *not* fallback to dmidecode! return fi - read _RET < "${path}" || _RET="$ERROR" + dmi_decode "$1" || _RET="$ERROR" + return } block_dev_with_label() { @@ -267,6 +293,31 @@ detect_virt() { if [ $r -eq 0 ] || { [ $r -ne 0 ] && [ "$out" = "none" ]; }; then virt="$out" fi + elif [ "$DI_UNAME_KERNEL_NAME" = "FreeBSD" ]; then + # Map FreeBSD's vm_guest names to those systemd-detect-virt that + # don't match up. See + # https://github.com/freebsd/freebsd/blob/master/sys/kern/subr_param.c#L144-L160 + # https://www.freedesktop.org/software/systemd/man/systemd-detect-virt.html + # + # systemd | kern.vm_guest + # ---------------------+--------------- + # none | none + # kvm | kvm + # vmware | vmware + # microsoft | hv + # oracle | vbox + # xen | xen + # parallels | parallels + # bhyve | bhyve + # vm-other | generic + out=$(sysctl -qn kern.vm_guest 2>/dev/null) && { + case "$out" in + hv) virt="microsoft" ;; + vbox) virt="oracle" ;; + generic) "vm-other";; + *) virt="$out" + esac + } fi _RET="$virt" } @@ -553,6 +604,11 @@ dscheck_CloudStack() { return $DS_NOT_FOUND } +dscheck_Exoscale() { + dmi_product_name_matches "Exoscale*" && return $DS_FOUND + return $DS_NOT_FOUND +} + dscheck_CloudSigma() { # http://paste.ubuntu.com/23624795/ dmi_product_name_matches "CloudSigma" && return $DS_FOUND @@ -620,7 +676,7 @@ dscheck_MAAS() { } dscheck_NoCloud() { - local fslabel="cidata" d="" + local fslabel="cidata CIDATA" d="" case " ${DI_KERNEL_CMDLINE} " in *\ ds=nocloud*) return ${DS_FOUND};; esac @@ -632,9 +688,10 @@ dscheck_NoCloud() { check_seed_dir "$d" meta-data user-data && return ${DS_FOUND} check_writable_seed_dir "$d" meta-data user-data && return ${DS_FOUND} done - if has_fs_with_label "${fslabel}"; then + if has_fs_with_label $fslabel; then return ${DS_FOUND} fi + return ${DS_NOT_FOUND} } @@ -696,6 +753,11 @@ dscheck_OpenNebula() { return ${DS_NOT_FOUND} } +dscheck_RbxCloud() { + has_fs_with_label "CLOUDMD" "cloudmd" && return ${DS_FOUND} + return ${DS_NOT_FOUND} +} + ovf_vmware_guest_customization() { # vmware guest customization @@ -762,13 +824,37 @@ is_cdrom_ovf() { # explicitly skip known labels of other types. rd_rdfe is azure. case "$label" in - config-2|CONFIG-2|rd_rdfe_stable*|cidata) return 1;; + config-2|CONFIG-2|rd_rdfe_stable*|cidata|CIDATA) return 1;; esac + # skip device which size is 10MB or larger + local size="" sfile="${PATH_SYS_CLASS_BLOCK}/${dev##*/}/size" + [ -f "$sfile" ] || return 1 + read size <"$sfile" || { warn "failed reading from $sfile"; return 1; } + # size is in 512 byte units. so convert to MB (integer division) + if [ $((size/2048)) -ge 10 ]; then + debug 2 "$dev: size $((size/2048))MB is considered too large for OVF" + return 1 + fi + local idstr="http://schemas.dmtf.org/ovf/environment/1" grep --quiet --ignore-case "$idstr" "${PATH_ROOT}$dev" } +has_ovf_cdrom() { + # DI_ISO9660_DEVS is <device>=label,<device>=label2 + # like /dev/sr0=OVF-TRANSPORT,/dev/other=with spaces + if [ "${DI_ISO9660_DEVS#${UNAVAILABLE}:}" = "${DI_ISO9660_DEVS}" ]; then + local oifs="$IFS" + # shellcheck disable=2086 + { IFS=","; set -- ${DI_ISO9660_DEVS}; IFS="$oifs"; } + for tok in "$@"; do + is_cdrom_ovf "${tok%%=*}" "${tok#*=}" && return 0 + done + fi + return 1 +} + dscheck_OVF() { check_seed_dir ovf ovf-env.xml && return "${DS_FOUND}" @@ -779,20 +865,9 @@ dscheck_OVF() { ovf_vmware_transport_guestinfo && return "${DS_FOUND}" - # DI_ISO9660_DEVS is <device>=label,<device>=label2 - # like /dev/sr0=OVF-TRANSPORT,/dev/other=with spaces - if [ "${DI_ISO9660_DEVS#${UNAVAILABLE}:}" = "${DI_ISO9660_DEVS}" ]; then - local oifs="$IFS" - # shellcheck disable=2086 - { IFS=","; set -- ${DI_ISO9660_DEVS}; IFS="$oifs"; } - for tok in "$@"; do - is_cdrom_ovf "${tok%%=*}" "${tok#*=}" && return $DS_FOUND - done - fi + has_ovf_cdrom && return "${DS_FOUND}" - if ovf_vmware_guest_customization; then - return ${DS_FOUND} - fi + ovf_vmware_guest_customization && return "${DS_FOUND}" return ${DS_NOT_FOUND} } @@ -872,9 +947,18 @@ ec2_identify_platform() { local default="$1" local serial="${DI_DMI_PRODUCT_SERIAL}" - # brightbox https://bugs.launchpad.net/cloud-init/+bug/1661693 case "$serial" in - *brightbox.com) _RET="Brightbox"; return 0;; + *.brightbox.com) _RET="Brightbox"; return 0;; + esac + + local asset_tag="${DI_DMI_CHASSIS_ASSET_TAG}" + case "$asset_tag" in + *.zstack.io) _RET="ZStack"; return 0;; + esac + + local vendor="${DI_DMI_SYS_VENDOR}" + case "$vendor" in + e24cloud) _RET="E24cloud"; return 0;; esac # AWS http://docs.aws.amazon.com/AWSEC2/ @@ -978,6 +1062,14 @@ dscheck_OpenStack() { return ${DS_FOUND} fi + # LP: #1669875 : allow identification of OpenStack by asset tag + if dmi_chassis_asset_tag_matches "$nova"; then + return ${DS_FOUND} + fi + if dmi_chassis_asset_tag_matches "$compute"; then + return ${DS_FOUND} + fi + # LP: #1715241 : arch other than intel are not identified properly. case "$DI_UNAME_MACHINE" in i?86|x86_64) :;; diff --git a/tools/make-tarball b/tools/make-tarball index 8d540139..462e7d04 100755 --- a/tools/make-tarball +++ b/tools/make-tarball @@ -15,24 +15,27 @@ Usage: ${0##*/} [revision] options: -h | --help print usage -o | --output FILE write to file + --version VERSION Set the version used in the tarball. Default value is determined with 'git describe'. --orig-tarball Write file cloud-init_<version>.orig.tar.gz --long Use git describe --long for versioning EOF } short_opts="ho:v" -long_opts="help,output:,orig-tarball,long" +long_opts="help,output:,version:,orig-tarball,long" getopt_out=$(getopt --name "${0##*/}" \ --options "${short_opts}" --long "${long_opts}" -- "$@") && eval set -- "${getopt_out}" || { Usage 1>&2; exit 1; } long_opt="" orig_opt="" +version="" while [ $# -ne 0 ]; do cur=$1; next=$2 case "$cur" in -h|--help) Usage; exit 0;; -o|--output) output=$next; shift;; + --version) version=$next; shift;; --long) long_opt="--long";; --orig-tarball) orig_opt=".orig";; --) shift; break;; @@ -41,7 +44,12 @@ while [ $# -ne 0 ]; do done rev=${1:-HEAD} -version=$(git describe --abbrev=8 "--match=[0-9]*" ${long_opt} $rev) +if [ -z "$version" ]; then + version=$(git describe --abbrev=8 "--match=[0-9]*" ${long_opt} $rev) +elif [ ! -z "$long_opt" ]; then + echo "WARNING: --long has no effect when --version is passed" >&2 + exit 1 +fi archive_base="cloud-init-$version" if [ -z "$output" ]; then diff --git a/tools/migrate-lp-user-to-github b/tools/migrate-lp-user-to-github new file mode 100755 index 00000000..f1247cb3 --- /dev/null +++ b/tools/migrate-lp-user-to-github @@ -0,0 +1,243 @@ +#!/usr/bin/env python3 +"""Link your Launchpad user to github, proposing branches to LP and Github""" + +from argparse import ArgumentParser +from subprocess import Popen, PIPE +import os +import sys + +try: + from launchpadlib.launchpad import Launchpad +except ImportError: + print("Missing python launchpadlib dependency to create branches for you." + "Install with: sudo apt-get install python3-launchpadlib" ) + sys.exit(1) + +if "avoid-pep8-E402-import-not-top-of-file": + _tdir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) + sys.path.insert(0, _tdir) + from cloudinit import util + + +DRYRUN = False +LP_TO_GIT_USER_FILE='.lp-to-git-user' +MIGRATE_BRANCH_NAME='migrate-lp-to-github' +GITHUB_PULL_URL='https://github.com/canonical/cloud-init/compare/master...{github_user}:{branch}' +GH_UPSTREAM_URL='https://github.com/canonical/cloud-init' + + +def error(message): + if isinstance(message, bytes): + message = message.decode('utf-8') + log('ERROR: {error}'.format(error=message)) + sys.exit(1) + + +def log(message): + print(message) + + +def subp(cmd, skip=False): + prefix = 'SKIPPED: ' if skip else '$ ' + log('{prefix}{command}'.format(prefix=prefix, command=' '.join(cmd))) + if skip: + return + proc = Popen(cmd, stdout=PIPE, stderr=PIPE) + out, err = proc.communicate() + if proc.returncode: + error(err if err else out) + return out.decode('utf-8') + + +LP_GIT_PATH_TMPL = 'git+ssh://{launchpad_user}@git.launchpad.net/' +LP_UPSTREAM_PATH_TMPL = LP_GIT_PATH_TMPL + 'cloud-init' +LP_REMOTE_PATH_TMPL = LP_GIT_PATH_TMPL + '~{launchpad_user}/cloud-init' +GITHUB_REMOTE_PATH_TMPL = 'git@github.com:{github_user}/cloud-init.git' + + +# Comment templates +COMMIT_MSG_TMPL = '''\ +lp-to-git-users: adding {gh_username} + +Mapped from {lp_username} +''' +PUBLISH_DIR='/tmp/cloud-init-lp-to-github-migration' + +def get_parser(): + parser = ArgumentParser(description=__doc__) + parser.add_argument( + '--dryrun', required=False, default=False, action='store_true', + help=('Run commands and review operation in dryrun mode, ' + 'making not changes.')) + parser.add_argument('launchpad_user', help='Your launchpad username.') + parser.add_argument('github_user', help='Your github username.') + parser.add_argument( + '--local-repo-dir', required=False, dest='repo_dir', + help=('The name of the local directory into which we clone.' + ' Default: {}'.format(PUBLISH_DIR))) + parser.add_argument( + '--upstream-branch', required=False, dest='upstream', + default='origin/master', + help=('The name of remote branch target into which we will merge.' + ' Default: origin/master')) + parser.add_argument( + '-v', '--verbose', required=False, default=False, action='store_true', + help=('Print all actions.')) + return parser + + +def create_publish_branch(upstream, publish_branch): + '''Create clean publish branch target in the current git repo.''' + branches = subp(['git', 'branch']) + upstream_remote, upstream_branch = upstream.split('/', 1) + subp(['git', 'checkout', upstream_branch]) + subp(['git', 'pull']) + if publish_branch in branches: + subp(['git', 'branch', '-D', publish_branch]) + subp(['git', 'checkout', upstream, '-b', publish_branch]) + + +def add_lp_and_github_remotes(lp_user, gh_user): + """Add lp and github remotes if not present. + + @return Tuple with (lp_remote_name, gh_remote_name) + """ + lp_remote = LP_REMOTE_PATH_TMPL.format(launchpad_user=lp_user) + gh_remote = GITHUB_REMOTE_PATH_TMPL.format(github_user=gh_user) + remotes = subp(['git', 'remote', '-v']) + lp_remote_name = gh_remote_name = None + for remote in remotes.splitlines(): + if not remote: + continue + remote_name, remote_url, _operation = remote.split() + if lp_remote == remote_url: + lp_remote_name = remote_name + elif gh_remote == remote_url: + gh_remote_name = remote_name + if not lp_remote_name: + log("launchpad: Creating git remote launchpad-{} to point at your" + " LP repo".format(lp_user)) + lp_remote_name = 'launchpad-{}'.format(lp_user) + subp(['git', 'remote', 'add', lp_remote_name, lp_remote]) + try: + subp(['git', 'fetch', lp_remote_name]) + except: + log("launchpad: Pushing to ensure LP repo exists") + subp(['git', 'push', lp_remote_name, 'master:master']) + subp(['git', 'fetch', lp_remote_name]) + if not gh_remote_name: + log("github: Creating git remote github-{} to point at your" + " GH repo".format(gh_user)) + gh_remote_name = 'github-{}'.format(gh_user) + subp(['git', 'remote', 'add', gh_remote_name, gh_remote]) + try: + subp(['git', 'fetch', gh_remote_name]) + except: + log("ERROR: [github] Could not fetch remote '{remote}'." + "Please create a fork for your github user by clicking 'Fork'" + " from {gh_upstream}".format( + remote=gh_remote, gh_upstream=GH_UPSTREAM_URL)) + sys.exit(1) + return (lp_remote_name, gh_remote_name) + + +def create_migration_branch( + branch_name, upstream, lp_user, gh_user, commit_msg): + """Create an LP to Github migration branch and add lp_user->gh_user.""" + log("Creating a migration branch: {} adding your users".format( + MIGRATE_BRANCH_NAME)) + create_publish_branch(upstream, MIGRATE_BRANCH_NAME) + lp_to_git_map = {} + lp_to_git_file = os.path.join(os.getcwd(), 'tools', LP_TO_GIT_USER_FILE) + if os.path.exists(lp_to_git_file): + with open(lp_to_git_file) as stream: + lp_to_git_map = util.load_json(stream.read()) + if gh_user in lp_to_git_map.values(): + raise RuntimeError( + "github user '{}' already in {}".format(gh_user, lp_to_git_file)) + if lp_user in lp_to_git_map: + raise RuntimeError( + "launchpad user '{}' already in {}".format( + lp_user, lp_to_git_file)) + lp_to_git_map[lp_user] = gh_user + with open(lp_to_git_file, 'w') as stream: + stream.write(util.json_dumps(lp_to_git_map)) + subp(['git', 'add', lp_to_git_file]) + commit_file = os.path.join(os.path.dirname(os.getcwd()), 'commit.msg') + with open(commit_file, 'wb') as stream: + stream.write(commit_msg.encode('utf-8')) + subp(['git', 'commit', '--all', '-F', commit_file]) + + +def main(): + global DRYRUN + global VERBOSITY + parser = get_parser() + args = parser.parse_args() + DRYRUN = args.dryrun + VERBOSITY = 1 if args.verbose else 0 + repo_dir = args.repo_dir or PUBLISH_DIR + if not os.path.exists(repo_dir): + cleanup_repo_dir = True + subp(['git', 'clone', + LP_UPSTREAM_PATH_TMPL.format(launchpad_user=args.launchpad_user), + repo_dir]) + else: + cleanup_repo_dir = False + cwd = os.getcwd() + os.chdir(repo_dir) + log("Syncing master branch with upstream") + subp(['git', 'checkout', 'master']) + subp(['git', 'pull']) + try: + lp_remote_name, gh_remote_name = add_lp_and_github_remotes( + args.launchpad_user, args.github_user) + commit_msg = COMMIT_MSG_TMPL.format( + gh_username=args.github_user, lp_username=args.launchpad_user) + create_migration_branch( + MIGRATE_BRANCH_NAME, args.upstream, args.launchpad_user, + args.github_user, commit_msg) + + for push_remote in (lp_remote_name, gh_remote_name): + subp(['git', 'push', push_remote, MIGRATE_BRANCH_NAME, '--force']) + except Exception as e: + error('Failed setting up migration branches: {0}'.format(e)) + finally: + os.chdir(cwd) + if cleanup_repo_dir and os.path.exists(repo_dir): + util.del_dir(repo_dir) + # Make merge request on LP + log("[launchpad] Automatically creating merge proposal using launchpadlib") + lp = Launchpad.login_with( + "server-team github-migration tool", 'production', version='devel') + master = lp.git_repositories.getByPath( + path='cloud-init').getRefByPath(path='master') + LP_BRANCH_PATH='~{launchpad_user}/cloud-init/+git/cloud-init' + lp_git_repo = lp.git_repositories.getByPath( + path=LP_BRANCH_PATH.format(launchpad_user=args.launchpad_user)) + lp_user_migrate_branch = lp_git_repo.getRefByPath( + path='refs/heads/migrate-lp-to-github') + lp_merge_url = ( + 'https://code.launchpad.net/' + + LP_BRANCH_PATH.format(launchpad_user=args.launchpad_user) + + '/+ref/' + MIGRATE_BRANCH_NAME) + try: + lp_user_migrate_branch.createMergeProposal( + commit_message=commit_msg, merge_target=master, needs_review=True) + except Exception: + log('[launchpad] active merge proposal already exists at:\n' + '{url}\n'.format(url=lp_merge_url)) + else: + log("[launchpad] Merge proposal created at:\n{url}.\n".format( + url=lp_merge_url)) + log("To link your account to github open your browser and" + " click 'Create pull request' at the following URL:\n" + "{url}".format(url=GITHUB_PULL_URL.format( + github_user=args.github_user, branch=MIGRATE_BRANCH_NAME))) + if os.path.exists(repo_dir): + util.del_dir(repo_dir) + return 0 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/tools/read-version b/tools/read-version index 06fd61a8..92e9fc96 100755 --- a/tools/read-version +++ b/tools/read-version @@ -45,14 +45,58 @@ def which(program): return None +def is_gitdir(path): + # Return boolean indicating if path is a git tree. + git_meta = os.path.join(path, '.git') + if os.path.isdir(git_meta): + return True + if os.path.exists(git_meta): + # in a git worktree, .git is a file with 'gitdir: x' + with open(git_meta, "rb") as fp: + if b'gitdir:' in fp.read(): + return True + return False + + use_long = '--long' in sys.argv or os.environ.get('CI_RV_LONG') use_tags = '--tags' in sys.argv or os.environ.get('CI_RV_TAGS') output_json = '--json' in sys.argv src_version = ci_version.version_string() version_long = None -version = src_version -version_long = None + +# If we're performing CI for a new release branch (which our tooling creates +# with an "upstream/" prefix), then we don't want to enforce strict version +# matching because we know it will fail. +is_release_branch_ci = ( + os.environ.get("TRAVIS_PULL_REQUEST_BRANCH", "").startswith("upstream/") +) +if is_gitdir(_tdir) and which("git") and not is_release_branch_ci: + flags = [] + if use_tags: + flags = ['--tags'] + cmd = ['git', 'describe', '--abbrev=8', '--match=[0-9]*'] + flags + + try: + version = tiny_p(cmd).strip() + except RuntimeError: + version = None + + if version is None or not version.startswith(src_version): + sys.stderr.write("git describe version (%s) differs from " + "cloudinit.version (%s)\n" % (version, src_version)) + sys.stderr.write( + "Please get the latest upstream tags.\n" + "As an example, this can be done with the following:\n" + "$ git remote add upstream https://git.launchpad.net/cloud-init\n" + "$ git fetch upstream --tags\n" + ) + sys.exit(1) + + version_long = tiny_p(cmd + ["--long"]).strip() +else: + version = src_version + version_long = None # version is X.Y.Z[+xxx.gHASH] # version_long is None or X.Y.Z-xxx-gHASH @@ -75,6 +119,7 @@ data = { 'extra': extra, 'commit': commit, 'distance': distance, + 'is_release_branch_ci': is_release_branch_ci, } if output_json: diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg index 8b7cb875..3d5fa725 100755 --- a/tools/render-cloudcfg +++ b/tools/render-cloudcfg @@ -4,7 +4,8 @@ import argparse import os import sys -VARIANTS = ["bsd", "centos", "fedora", "rhel", "suse", "ubuntu", "unknown"] +VARIANTS = ["amazon", "arch", "centos", "debian", "fedora", "freebsd", "rhel", + "suse", "ubuntu", "unknown"] if "avoid-pep8-E402-import-not-top-of-file": _tdir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) diff --git a/tools/run-container b/tools/run-container index 6dedb757..23243474 100755 --- a/tools/run-container +++ b/tools/run-container @@ -35,9 +35,6 @@ Usage: ${0##*/} [ options ] [images:]image-ref tested. Inside container, changes are in local-changes.diff. -k | --keep keep container after tests - --pyexe V python version to use. Default=auto. - Should be name of an executable. - ('python2' or 'python3') -p | --package build a binary package (.deb or .rpm) -s | --source-package build source package (debuild -S or srpm) -u | --unittest run unit tests @@ -262,32 +259,23 @@ prep() { # we need some very basic things not present in the container. # - git # - tar (CentOS 6 lxc container does not have it) - # - python-argparse (or python3) + # - python3 local needed="" pair="" pkg="" cmd="" needed="" local pairs="tar:tar git:git" - local pyexe="$1" get_os_info - local py2pkg="python2" py3pkg="python3" + local py3pkg="python3" case "$OS_NAME" in opensuse) - py2pkg="python-base" py3pkg="python3-base";; esac - case "$pyexe" in - python2) pairs="$pairs python2:$py2pkg";; - python3) pairs="$pairs python3:$py3pkg";; - esac + pairs="$pairs python3:$py3pkg" for pair in $pairs; do pkg=${pair#*:} cmd=${pair%%:*} command -v "$cmd" >/dev/null 2>&1 || needed="${needed} $pkg" done - if [ "$OS_NAME" = "centos" -a "$pyexe" = "python2" ]; then - python -c "import argparse" >/dev/null 2>&1 || - needed="${needed} python-argparse" - fi needed=${needed# } if [ -z "$needed" ]; then error "No prep packages needed" @@ -300,15 +288,7 @@ prep() { } nose() { - local pyexe="$1" cmd="" - shift - get_os_info - if [ "$OS_NAME/$OS_VERSION" = "centos/6" ]; then - cmd="nosetests" - else - cmd="$pyexe -m nose" - fi - ${cmd} "$@" + python3 -m nose "$@" } is_done_cloudinit() { @@ -367,12 +347,13 @@ wait_for_boot() { run_self_inside "$name" wait_inside "$name" "$wtime" "$VERBOSITY" || { errorrc "wait inside $name failed."; return; } - if [ ! -z "${http_proxy-}" ]; then + if [ -n "${http_proxy-}" ]; then if [ "$OS_NAME" = "centos" ]; then debug 1 "configuring proxy ${http_proxy}" inside "$name" sh -c "echo proxy=$http_proxy >> /etc/yum.conf" inside "$name" sed -i s/enabled=1/enabled=0/ \ /etc/yum/pluginconf.d/fastestmirror.conf + inside "$name" sh -c "sed -i '/^#baseurl=/s/#// ; s/^mirrorlist/#mirrorlist/' /etc/yum.repos.d/*.repo" else debug 1 "do not know how to configure proxy on $OS_NAME" fi @@ -410,7 +391,7 @@ run_self_inside_as_cd() { main() { local short_opts="a:hknpsuv" - local long_opts="artifacts:,dirty,help,keep,name:,pyexe:,package,source-package,unittest,verbose" + local long_opts="artifacts:,dirty,help,keep,name:,package,source-package,unittest,verbose" local getopt_out="" getopt_out=$(getopt --name "${0##*/}" \ --options "${short_opts}" --long "${long_opts}" -- "$@") && @@ -419,7 +400,7 @@ main() { local cur="" next="" local package=false srcpackage=false unittest="" name="" - local dirty=false pyexe="auto" artifact_d="." + local dirty=false artifact_d="." while [ $# -ne 0 ]; do cur="${1:-}"; next="${2:-}"; @@ -429,7 +410,6 @@ main() { -h|--help) Usage ; exit 0;; -k|--keep) KEEP=true;; -n|--name) name="$next"; shift;; - --pyexe) pyexe=$next; shift;; -p|--package) package=true;; -s|--source-package) srcpackage=true;; -u|--unittest) unittest=1;; @@ -469,16 +449,8 @@ main() { get_os_info_in "$name" || { errorrc "failed to get os_info in $name"; return; } - if [ "$pyexe" = "auto" ]; then - case "$OS_NAME/$OS_VERSION" in - centos/*|opensuse/*) pyexe=python2;; - *) pyexe=python3;; - esac - debug 1 "set pyexe=$pyexe for $OS_NAME/$OS_VERSION" - fi - # prep the container (install very basic dependencies) - run_self_inside "$name" prep "$pyexe" || + run_self_inside "$name" prep || { errorrc "Failed to prep container $name"; return; } # add the user @@ -492,7 +464,7 @@ main() { } inside_as_cd "$name" root "$cdir" \ - $pyexe ./tools/read-dependencies "--distro=${OS_NAME}" \ + python3 ./tools/read-dependencies "--distro=${OS_NAME}" \ --test-distro || { errorrc "FAIL: failed to install dependencies with read-dependencies" return @@ -506,7 +478,7 @@ main() { if [ -n "$unittest" ]; then debug 1 "running unit tests." - run_self_inside_as_cd "$name" "$user" "$cdir" nose "$pyexe" \ + run_self_inside_as_cd "$name" "$user" "$cdir" nose \ tests/unittests cloudinit/ || { errorrc "nosetests failed."; errors[${#errors[@]}]="nosetests" @@ -536,7 +508,7 @@ main() { } debug 1 "building source package with $build_srcpkg." # shellcheck disable=SC2086 - inside_as_cd "$name" "$user" "$cdir" $pyexe $build_srcpkg || { + inside_as_cd "$name" "$user" "$cdir" python3 $build_srcpkg || { errorrc "failed: $build_srcpkg"; errors[${#errors[@]}]="source package" } @@ -549,7 +521,7 @@ main() { } debug 1 "building binary package with $build_pkg." # shellcheck disable=SC2086 - inside_as_cd "$name" "$user" "$cdir" $pyexe $build_pkg || { + inside_as_cd "$name" "$user" "$cdir" python3 $build_pkg || { errorrc "failed: $build_pkg"; errors[${#errors[@]}]="binary package" } @@ -1,4 +1,6 @@ #!/bin/bash +# This file is part of cloud-init. +# See LICENSE file for copyright and license info. set -f @@ -11,6 +13,8 @@ TAPDEVS=( ) # OVS_CLEANUP gets populated with bridge:devname pairs used with ovs OVS_CLEANUP=( ) MAC_PREFIX="52:54:00:12:34" +# allow this to be set externally. +_QEMU_SUPPORTS_FILE_LOCKING="${_QEMU_SUPPORTS_FILE_LOCKING}" KVM="kvm" declare -A KVM_DEVOPTS @@ -119,6 +123,21 @@ isdevopt() { return 1 } +qemu_supports_file_locking() { + # hackily check if qemu has file.locking in -drive params (LP: #1716028) + if [ -z "$_QEMU_SUPPORTS_FILE_LOCKING" ]; then + # The only way we could find to check presense of file.locking is + # qmp (query-qmp-schema). Simply checking if the virtio-blk driver + # supports 'share-rw' is expected to be equivalent and simpler. + isdevopt virtio-blk share-rw && + _QEMU_SUPPORTS_FILE_LOCKING=true || + _QEMU_SUPPORTS_FILE_LOCKING=false + debug 1 "qemu supports file locking = ${_QEMU_SUPPORTS_FILE_LOCKING}" + fi + [ "$_QEMU_SUPPORTS_FILE_LOCKING" = "true" ] + return +} + padmac() { # return a full mac, given a subset. # assume whatever is input is the last portion to be @@ -367,7 +386,7 @@ main() { [ ${#netdevs[@]} -eq 0 ] && netdevs=( "${DEF_BRIDGE}" ) pt=( "$@" ) - local kvm_pkg="" virtio_scsi_bus="virtio-scsi-pci" + local kvm_pkg="" virtio_scsi_bus="virtio-scsi-pci" virtio_rng_device="virtio-rng-pci" [ -n "$kvm" ] && kvm_pkg="none" case $(uname -m) in i?86) @@ -382,7 +401,10 @@ main() { [ -n "$kvm" ] || { kvm="qemu-system-s390x"; kvm_pkg="qemu-system-misc"; } def_netmodel=${DEF_NETMODEL:-"virtio-net-ccw"} + # disable virtio-scsi-bus virtio_scsi_bus="virtio-scsi-ccw" + virtio_blk_bus="virtio-blk-ccw" + virtio_rng_device="virtio-rng-ccw" ;; ppc64*) [ -n "$kvm" ] || @@ -408,7 +430,7 @@ main() { bios_opts=( "${_RET[@]}" ) local out="" fmt="" bus="" unit="" index="" serial="" driver="" devopts="" - local busorindex="" driveopts="" cur="" val="" file="" + local busorindex="" driveopts="" cur="" val="" file="" wwn="" for((i=0;i<${#diskdevs[@]};i++)); do cur=${diskdevs[$i]} IFS=","; set -- $cur; IFS="$oifs" @@ -420,6 +442,7 @@ main() { unit="" index="" serial="" + wwn="" for tok in "$@"; do [ "${tok#*=}" = "${tok}" -a -f "${tok}" -a -z "$file" ] && file="$tok" val=${tok#*=} @@ -433,6 +456,7 @@ main() { file=*) file=$val;; fmt=*|format=*) fmt=$val;; serial=*) serial=$val;; + wwn=*) wwn=$val;; bus=*) bus=$val;; unit=*) unit=$val;; index=*) index=$val;; @@ -443,14 +467,19 @@ main() { out=$(LANG=C qemu-img info "$file") && fmt=$(echo "$out" | awk '$0 ~ /^file format:/ { print $3 }') || { error "failed to determine format of $file"; return 1; } - else + elif [ -z "$fmt" ]; then fmt=raw fi if [ -z "$driver" ]; then driver="$def_disk_driver" fi if [ -z "$serial" ]; then - serial="${file##*/}" + # use filename as serial if not provided a wwn + if [ -n "$wwn" ]; then + serial="$wwn" + else + serial="${file##*/}" + fi fi # make sure we add either bus= or index= @@ -470,11 +499,21 @@ main() { id=*|if=*|driver=*|$file|file=*) continue;; fmt=*|format=*) continue;; serial=*|bus=*|unit=*|index=*) continue;; + file.locking=*) + qemu_supports_file_locking || { + debug 2 "qemu has no file locking." \ + "Dropping '$tok' from: $cur" + continue + };; esac isdevopt "$driver" "$tok" && devopts="${devopts},$tok" || diskopts="${diskopts},${tok}" done - + case $driver in + virtio-blk-ccw) + # disable scsi when using virtio-blk-ccw + devopts="${devopts},scsi=off";; + esac diskargs=( "${diskargs[@]}" -drive "$diskopts" -device "$devopts" ) done @@ -623,10 +662,16 @@ main() { done local bus_devices - bus_devices=( -device "$virtio_scsi_bus,id=virtio-scsi-xkvm" ) - cmd=( "${kvmcmd[@]}" "${archopts[@]}" + if [ -n "${virtio_scsi_bus}" ]; then + bus_devices=( -device "$virtio_scsi_bus,id=virtio-scsi-xkvm" ) + fi + local rng_devices + rng_devices=( -object "rng-random,filename=/dev/urandom,id=objrng0" + -device "$virtio_rng_device,rng=objrng0,id=rng0" ) + cmd=( "${kvmcmd[@]}" "${archopts[@]}" "${bios_opts[@]}" "${bus_devices[@]}" + "${rng_devices[@]}" "${netargs[@]}" "${diskargs[@]}" "${pt[@]}" ) local pcmd=$(quote_cmd "${cmd[@]}") @@ -661,4 +706,4 @@ else main "$@" fi -# vi: ts=4 expandtab +# vi: ts=4 expandtab syntax=sh @@ -1,11 +1,13 @@ [tox] -envlist = py27, py3, xenial, pycodestyle, pyflakes, pylint +envlist = py3, xenial, pycodestyle, pyflakes, pylint recreate = True [testenv] commands = python -m nose {posargs:tests/unittests cloudinit} setenv = LC_ALL = en_US.utf-8 +passenv= + NOSE_VERBOSE [testenv:pycodestyle] basepython = python3 @@ -21,9 +23,10 @@ setenv = basepython = python3 deps = # requirements - pylint==1.8.1 + pylint==2.3.1 # test-requirements because unit tests are now present in cloudinit tree -r{toxinidir}/test-requirements.txt + -r{toxinidir}/integration-requirements.txt commands = {envpython} -m pylint {posargs:cloudinit tests tools} [testenv:py3] @@ -53,8 +56,11 @@ exclude = .venv,.tox,dist,doc,*egg,.git,build,tools [testenv:doc] basepython = python3 -deps = sphinx -commands = {envpython} -m sphinx {posargs:doc/rtd doc/rtd_html} +deps = + -r{toxinidir}/doc-requirements.txt +commands = + {envpython} -m sphinx {posargs:doc/rtd doc/rtd_html} + doc8 doc/rtd [testenv:xenial] commands = @@ -75,7 +81,7 @@ deps = jsonpatch==1.16 six==1.10.0 # test-requirements - httpretty==0.8.6 + httpretty==0.9.6 mock==1.3.0 nose==1.3.7 unittest2==1.1.0 @@ -96,19 +102,18 @@ deps = six==1.9.0 -r{toxinidir}/test-requirements.txt -[testenv:opensusel42] +[testenv:opensusel150] basepython = python2.7 commands = nosetests {posargs:tests/unittests cloudinit} deps = # requirements - argparse==1.3.0 - jinja2==2.8 - PyYAML==3.11 - oauthlib==0.7.2 + jinja2==2.10 + PyYAML==3.12 + oauthlib==2.0.6 configobj==5.0.6 - requests==2.11.1 - jsonpatch==1.11 - six==1.9.0 + requests==2.18.4 + jsonpatch==1.16 + six==1.11.0 -r{toxinidir}/test-requirements.txt [testenv:tip-pycodestyle] @@ -131,6 +136,7 @@ deps = pylint # test-requirements -r{toxinidir}/test-requirements.txt + -r{toxinidir}/integration-requirements.txt [testenv:citest] basepython = python3 |