From e384a5436560c9494118f0999c314982d4912d27 Mon Sep 17 00:00:00 2001 From: Michael Hudson-Doyle Date: Tue, 23 Feb 2021 08:20:46 +1300 Subject: cc_keys_to_console: add option to disable key emission (#811) Specifically: ssh: emit_keys_to_console: false We also port the cc_keys_to_console cloud tests to the new integration testing framework, and add a test for this new option. LP: #1915460 --- doc/examples/cloud-config-ssh-keys.txt | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'doc/examples') diff --git a/doc/examples/cloud-config-ssh-keys.txt b/doc/examples/cloud-config-ssh-keys.txt index aad8b683..bfe5ab44 100644 --- a/doc/examples/cloud-config-ssh-keys.txt +++ b/doc/examples/cloud-config-ssh-keys.txt @@ -42,3 +42,13 @@ ssh_keys: -----END DSA PRIVATE KEY----- dsa_public: ssh-dss AAAAB3NzaC1kc3MAAACBAM/Ycu7ulMTEvz1RLIzTbrhELJZf8Iwua6TFfQl1ubb1rHwUElOkus7xMhdVjms8AmbV1Meem7ImE69T0bszy09QAG3NImHgZVIeXBoJ/JzByku/1NcOBYilKP7oSIcLJpGUHX8IGn1GJoH7XRBwVub6Vqm4RP78C7q9IOn0hG2VAAAAFQCDEfCrnL1GGzhCPsr/uS1vbt8/wQAAAIEAjSrok/4m8mbBkVp4IwxXFdRuqJKSj8/WWxos00Ednn/ww5QibysHYULrOKJ1+54mmpMyp5CZICUQELCfCt5ScZ9GsqgmnI80Q1h3Xkwbo3kn7PzWwRwcV6muvJn4PcZ71WM+rdN/c2EorAINDTbjRo97NueM94WbiYdtjHFxn0YAAACAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI38UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC/QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQ= smoser@localhost + +# By default, the fingerprints of the authorized keys for the users +# cloud-init adds are printed to the console. Setting +# no_ssh_fingerprints to true suppresses this output. +no_ssh_fingerprints: false + +# By default, (most) ssh host keys are printed to the console. Setting +# emit_keys_to_console to false suppresses this output. +ssh: + emit_keys_to_console: false -- cgit v1.2.3 From 6fe99157876f83ae2249d44c1b456a24cc70e258 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 8 Mar 2021 12:13:37 -0600 Subject: Remove the vi comment from the part-handler example (#835) --- doc/examples/part-handler.txt | 1 - 1 file changed, 1 deletion(-) (limited to 'doc/examples') diff --git a/doc/examples/part-handler.txt b/doc/examples/part-handler.txt index a6e66415..1484e1a0 100644 --- a/doc/examples/part-handler.txt +++ b/doc/examples/part-handler.txt @@ -1,5 +1,4 @@ #part-handler -# vi: syntax=python ts=4 def list_types(): # return a list of mime-types that are handled by this module -- cgit v1.2.3 From d4acc0920cdc19e5be3e1054a9a5fbc20e954492 Mon Sep 17 00:00:00 2001 From: timothegenzmer Date: Wed, 24 Mar 2021 20:57:53 +0100 Subject: Fix chef apt source example (#826) key is a property of source1 and not sources --- doc/examples/cloud-config-chef.txt | 70 +++++++++++++++++++------------------- tools/.github-cla-signers | 1 + 2 files changed, 36 insertions(+), 35 deletions(-) (limited to 'doc/examples') diff --git a/doc/examples/cloud-config-chef.txt b/doc/examples/cloud-config-chef.txt index 8cebfd80..414111a1 100644 --- a/doc/examples/cloud-config-chef.txt +++ b/doc/examples/cloud-config-chef.txt @@ -15,41 +15,41 @@ apt: sources: source1: source: "deb http://packages.chef.io/repos/apt/stable $RELEASE main" - key: | - -----BEGIN PGP PUBLIC KEY BLOCK----- - Version: GnuPG v1.4.12 (Darwin) - Comment: GPGTools - http://gpgtools.org - - mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu - twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99 - dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC - JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W - ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I - XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe - DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm - sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO - Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ - YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG - CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K - +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu0IENIRUYgUGFja2FnZXMg - PHBhY2thZ2VzQGNoZWYuaW8+iGIEExECACIFAlQwYFECGwMGCwkIBwMCBhUIAgkK - CwQWAgMBAh4BAheAAAoJEClAq6mD74JqX94An26z99XOHWpLN8ahzm7cp13t4Xid - AJ9wVcgoUBzvgg91lKfv/34cmemZn7kCDQRKaQu0EAgAg7ZLCVGVTmLqBM6njZEd - Zbv+mZbvwLBSomdiqddE6u3eH0X3GuwaQfQWHUVG2yedyDMiG+EMtCdEeeRebTCz - SNXQ8Xvi22hRPoEsBSwWLZI8/XNg0n0f1+GEr+mOKO0BxDB2DG7DA0nnEISxwFkK - OFJFebR3fRsrWjj0KjDxkhse2ddU/jVz1BY7Nf8toZmwpBmdozETMOTx3LJy1HZ/ - Te9FJXJMUaB2lRyluv15MVWCKQJro4MQG/7QGcIfrIZNfAGJ32DDSjV7/YO+IpRY - IL4CUBQ65suY4gYUG4jhRH6u7H1p99sdwsg5OIpBe/v2Vbc/tbwAB+eJJAp89Zeu - twADBQf/ZcGoPhTGFuzbkcNRSIz+boaeWPoSxK2DyfScyCAuG41CY9+g0HIw9Sq8 - DuxQvJ+vrEJjNvNE3EAEdKl/zkXMZDb1EXjGwDi845TxEMhhD1dDw2qpHqnJ2mtE - WpZ7juGwA3sGhi6FapO04tIGacCfNNHmlRGipyq5ZiKIRq9mLEndlECr8cwaKgkS - 0wWu+xmMZe7N5/t/TK19HXNh4tVacv0F3fYK54GUjt2FjCQV75USnmNY4KPTYLXA - dzC364hEMlXpN21siIFgB04w+TXn5UF3B4FfAy5hevvr4DtV4MvMiGLu0oWjpaLC - MpmrR3Ny2wkmO0h+vgri9uIP06ODWIhJBBgRAgAJBQJKaQu0AhsMAAoJEClAq6mD - 74Jq4hIAoJ5KrYS8kCwj26SAGzglwggpvt3CAJ0bekyky56vNqoegB+y4PQVDv4K - zA== - =IxPr - -----END PGP PUBLIC KEY BLOCK----- + key: | + -----BEGIN PGP PUBLIC KEY BLOCK----- + Version: GnuPG v1.4.12 (Darwin) + Comment: GPGTools - http://gpgtools.org + + mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu + twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99 + dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC + JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W + ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I + XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe + DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm + sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO + Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ + YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG + CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K + +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu0IENIRUYgUGFja2FnZXMg + PHBhY2thZ2VzQGNoZWYuaW8+iGIEExECACIFAlQwYFECGwMGCwkIBwMCBhUIAgkK + CwQWAgMBAh4BAheAAAoJEClAq6mD74JqX94An26z99XOHWpLN8ahzm7cp13t4Xid + AJ9wVcgoUBzvgg91lKfv/34cmemZn7kCDQRKaQu0EAgAg7ZLCVGVTmLqBM6njZEd + Zbv+mZbvwLBSomdiqddE6u3eH0X3GuwaQfQWHUVG2yedyDMiG+EMtCdEeeRebTCz + SNXQ8Xvi22hRPoEsBSwWLZI8/XNg0n0f1+GEr+mOKO0BxDB2DG7DA0nnEISxwFkK + OFJFebR3fRsrWjj0KjDxkhse2ddU/jVz1BY7Nf8toZmwpBmdozETMOTx3LJy1HZ/ + Te9FJXJMUaB2lRyluv15MVWCKQJro4MQG/7QGcIfrIZNfAGJ32DDSjV7/YO+IpRY + IL4CUBQ65suY4gYUG4jhRH6u7H1p99sdwsg5OIpBe/v2Vbc/tbwAB+eJJAp89Zeu + twADBQf/ZcGoPhTGFuzbkcNRSIz+boaeWPoSxK2DyfScyCAuG41CY9+g0HIw9Sq8 + DuxQvJ+vrEJjNvNE3EAEdKl/zkXMZDb1EXjGwDi845TxEMhhD1dDw2qpHqnJ2mtE + WpZ7juGwA3sGhi6FapO04tIGacCfNNHmlRGipyq5ZiKIRq9mLEndlECr8cwaKgkS + 0wWu+xmMZe7N5/t/TK19HXNh4tVacv0F3fYK54GUjt2FjCQV75USnmNY4KPTYLXA + dzC364hEMlXpN21siIFgB04w+TXn5UF3B4FfAy5hevvr4DtV4MvMiGLu0oWjpaLC + MpmrR3Ny2wkmO0h+vgri9uIP06ODWIhJBBgRAgAJBQJKaQu0AhsMAAoJEClAq6mD + 74Jq4hIAoJ5KrYS8kCwj26SAGzglwggpvt3CAJ0bekyky56vNqoegB+y4PQVDv4K + zA== + =IxPr + -----END PGP PUBLIC KEY BLOCK----- chef: diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index 5c57acac..6661c250 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -40,6 +40,7 @@ smoser sshedi TheRealFalcon taoyama +timothegenzmer tnt-dev tomponline tsanghan -- cgit v1.2.3 From 74fa008bfcd3263eb691cc0b3f7a055b17569f8b Mon Sep 17 00:00:00 2001 From: Eduardo Otubo Date: Tue, 30 Mar 2021 18:08:25 +0200 Subject: Add support to resize rootfs if using LVM (#721) This patch adds support to resize a single partition of a VM if it's using an LVM underneath. The patch detects if it's LVM if the given block device is a device mapper by its name (e.g. `/dev/dm-1`) and if it has slave devices under it on sysfs. After that syspath is updated to the real block device and growpart will be called to resize it (and automatically its Physical Volume). The Volume Group will be updated automatically and a final call to extend the rootfs to the remaining space available will be made. Using the same growpart configuration, the user can specify only one device to be resized when using LVM and growpart, otherwise cloud-init won't know which one should be resized and will fail. rhbz: #1810878 LP: #1799953 Signed-off-by: Eduardo Otubo Signed-off-by: Scott Moser --- cloudinit/config/cc_growpart.py | 83 +++++++++++++++++++++- doc/examples/cloud-config-growpart.txt | 2 + .../test_handler/test_handler_growpart.py | 56 ++++++++++++++- 3 files changed, 137 insertions(+), 4 deletions(-) (limited to 'doc/examples') diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 9f338ad1..6399bfb7 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -68,7 +68,9 @@ import os import os.path import re import stat +import platform +from functools import lru_cache from cloudinit import log as logging from cloudinit.settings import PER_ALWAYS from cloudinit import subp @@ -93,6 +95,58 @@ class RESIZE(object): LOG = logging.getLogger(__name__) +@lru_cache() +def is_lvm_lv(devpath): + if util.is_Linux(): + # all lvm lvs will have a realpath as a 'dm-*' name. + rpath = os.path.realpath(devpath) + if not os.path.basename(rpath).startswith("dm-"): + return False + out, _ = subp.subp("udevadm", "info", devpath) + # lvs should have DM_LV_NAME= and also DM_VG_NAME + return 'DM_LV_NAME=' in out + else: + LOG.info("Not an LVM Logical Volume partition") + return False + + +@lru_cache() +def get_pvs_for_lv(devpath): + myenv = {'LANG': 'C'} + + if not util.is_Linux(): + LOG.info("No support for LVM on %s", platform.system()) + return None + if not subp.which('lvm'): + LOG.info("No 'lvm' command present") + return None + + try: + (out, _err) = subp.subp(["lvm", "lvs", devpath, "--options=vgname", + "--noheadings"], update_env=myenv) + vgname = out.strip() + except subp.ProcessExecutionError as e: + if e.exit_code != 0: + util.logexc(LOG, "Failed: can't get Volume Group information " + "from %s", devpath) + raise ResizeFailedException(e) from e + + try: + (out, _err) = subp.subp(["lvm", "vgs", vgname, "--options=pvname", + "--noheadings"], update_env=myenv) + pvs = [p.strip() for p in out.splitlines()] + if len(pvs) > 1: + LOG.info("Do not know how to resize multiple Physical" + " Volumes") + else: + return pvs[0] + except subp.ProcessExecutionError as e: + if e.exit_code != 0: + util.logexc(LOG, "Failed: can't get Physical Volume " + "information from Volume Group %s", vgname) + raise ResizeFailedException(e) from e + + def resizer_factory(mode): resize_class = None if mode == "auto": @@ -208,13 +262,18 @@ def get_size(filename): os.close(fd) -def device_part_info(devpath): +def device_part_info(devpath, is_lvm): # convert an entry in /dev/ to parent disk and partition number # input of /dev/vdb or /dev/disk/by-label/foo # rpath is hopefully a real-ish path in /dev (vda, sdb..) rpath = os.path.realpath(devpath) + # first check if this is an LVM and get its PVs + lvm_rpath = get_pvs_for_lv(devpath) + if is_lvm and lvm_rpath: + rpath = lvm_rpath + bname = os.path.basename(rpath) syspath = "/sys/class/block/%s" % bname @@ -244,7 +303,7 @@ def device_part_info(devpath): # diskdevpath has something like 253:0 # and udev has put links in /dev/block/253:0 to the device name in /dev/ - return (diskdevpath, ptnum) + return diskdevpath, ptnum def devent2dev(devent): @@ -294,8 +353,9 @@ def resize_devices(resizer, devices): "device '%s' not a block device" % blockdev,)) continue + is_lvm = is_lvm_lv(blockdev) try: - (disk, ptnum) = device_part_info(blockdev) + disk, ptnum = device_part_info(blockdev, is_lvm) except (TypeError, ValueError) as e: info.append((devent, RESIZE.SKIPPED, "device_part_info(%s) failed: %s" % (blockdev, e),)) @@ -316,6 +376,23 @@ def resize_devices(resizer, devices): "failed to resize: disk=%s, ptnum=%s: %s" % (disk, ptnum, e),)) + if is_lvm and isinstance(resizer, ResizeGrowPart): + try: + if len(devices) == 1: + (_out, _err) = subp.subp( + ["lvm", "lvextend", "--extents=100%FREE", blockdev], + update_env={'LANG': 'C'}) + info.append((devent, RESIZE.CHANGED, + "Logical Volume %s extended" % devices[0],)) + else: + LOG.info("Exactly one device should be configured to be " + "resized when using LVM. More than one configured" + ": %s", devices) + except (subp.ProcessExecutionError, ValueError) as e: + info.append((devent, RESIZE.NOCHANGE, + "Logical Volume %s resize failed: %s" % + (blockdev, e),)) + return info diff --git a/doc/examples/cloud-config-growpart.txt b/doc/examples/cloud-config-growpart.txt index 393d5164..09268117 100644 --- a/doc/examples/cloud-config-growpart.txt +++ b/doc/examples/cloud-config-growpart.txt @@ -13,6 +13,8 @@ # # devices: # a list of things to resize. +# if the devices are under LVM, the list should be a single entry, +# cloud-init will then extend the single entry, otherwise it will fail. # items can be filesystem paths or devices (in /dev) # examples: # devices: [/, /dev/vdb1] diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index 7f039b79..cc0a9248 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -172,6 +172,53 @@ class TestResize(unittest.TestCase): self.name = "growpart" self.log = logging.getLogger("TestResize") + def test_lvm_resize(self): + # LVM resize should work only if a single device is configured. More + # than one device should fail. + lvm_pass = ["/dev/XXdm-0"] + lvm_fail = ["/dev/XXdm-1", "/dev/YYdm-1"] + devstat_ret = Bunch(st_mode=25008, st_ino=6078, st_dev=5, + st_nlink=1, st_uid=0, st_gid=6, st_size=0, + st_atime=0, st_mtime=0, st_ctime=0) + real_stat = os.stat + resize_calls = [] + + class myresizer(object): + def resize(self, diskdev, partnum, partdev): + resize_calls.append((diskdev, partnum, partdev)) + if partdev == "/dev/XXdm-0": + return (1024, 2048) + return (1024, 1024) # old size, new size + + def mystat(path): + if path in lvm_pass or path in lvm_fail: + return devstat_ret + return real_stat(path) + + try: + opinfo = cc_growpart.device_part_info + cc_growpart.device_part_info = simple_device_part_info_lvm + os.stat = mystat + + resized = cc_growpart.resize_devices(myresizer(), lvm_pass) + not_resized = cc_growpart.resize_devices(myresizer(), lvm_fail) + + def find(name, res): + for f in res: + if f[0] == name: + return f + return None + + self.assertEqual(cc_growpart.RESIZE.CHANGED, + find("/dev/XXdm-0", resized)[1]) + self.assertEqual(cc_growpart.RESIZE.NOCHANGE, + find("/dev/XXdm-1", not_resized)[1]) + self.assertEqual(cc_growpart.RESIZE.NOCHANGE, + find("/dev/YYdm-1", not_resized)[1]) + finally: + cc_growpart.device_part_info = opinfo + os.stat = real_stat + def test_simple_devices(self): # test simple device list # this patches out devent2dev, os.stat, and device_part_info @@ -227,7 +274,14 @@ class TestResize(unittest.TestCase): os.stat = real_stat -def simple_device_part_info(devpath): +def simple_device_part_info_lvm(devpath, is_lvm): + # simple stupid return (/dev/vda, 1) for /dev/vda + ret = re.search("([^0-9]*)([0-9]*)$", devpath) + x = (ret.group(1), ret.group(2)) + return x + + +def simple_device_part_info(devpath, is_lvm): # simple stupid return (/dev/vda, 1) for /dev/vda ret = re.search("([^0-9]*)([0-9]*)$", devpath) x = (ret.group(1), ret.group(2)) -- cgit v1.2.3 From 5f5fa5ee99296b3b1044682c41bab38a32cdccd7 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Mon, 3 May 2021 10:56:46 -0400 Subject: Revert "Add support to resize rootfs if using LVM (#721)" (#887) This reverts commit 74fa008bfcd3263eb691cc0b3f7a055b17569f8b. During pre-release testing, we discovered two issues with this commit. Firstly, there's a typo in the udevadm command that causes a TypeError for _all_ growpart executions. Secondly, the LVM resizing does not appear to successfully resize everything up to the LV, though some things do get resized. We certainly want this change, so we'll be happy to review and land it alongside an integration test which confirms that it is working as expected. LP: #1922742 --- cloudinit/config/cc_growpart.py | 83 +--------------------- doc/examples/cloud-config-growpart.txt | 2 - .../test_handler/test_handler_growpart.py | 56 +-------------- 3 files changed, 4 insertions(+), 137 deletions(-) (limited to 'doc/examples') diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 6399bfb7..9f338ad1 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -68,9 +68,7 @@ import os import os.path import re import stat -import platform -from functools import lru_cache from cloudinit import log as logging from cloudinit.settings import PER_ALWAYS from cloudinit import subp @@ -95,58 +93,6 @@ class RESIZE(object): LOG = logging.getLogger(__name__) -@lru_cache() -def is_lvm_lv(devpath): - if util.is_Linux(): - # all lvm lvs will have a realpath as a 'dm-*' name. - rpath = os.path.realpath(devpath) - if not os.path.basename(rpath).startswith("dm-"): - return False - out, _ = subp.subp("udevadm", "info", devpath) - # lvs should have DM_LV_NAME= and also DM_VG_NAME - return 'DM_LV_NAME=' in out - else: - LOG.info("Not an LVM Logical Volume partition") - return False - - -@lru_cache() -def get_pvs_for_lv(devpath): - myenv = {'LANG': 'C'} - - if not util.is_Linux(): - LOG.info("No support for LVM on %s", platform.system()) - return None - if not subp.which('lvm'): - LOG.info("No 'lvm' command present") - return None - - try: - (out, _err) = subp.subp(["lvm", "lvs", devpath, "--options=vgname", - "--noheadings"], update_env=myenv) - vgname = out.strip() - except subp.ProcessExecutionError as e: - if e.exit_code != 0: - util.logexc(LOG, "Failed: can't get Volume Group information " - "from %s", devpath) - raise ResizeFailedException(e) from e - - try: - (out, _err) = subp.subp(["lvm", "vgs", vgname, "--options=pvname", - "--noheadings"], update_env=myenv) - pvs = [p.strip() for p in out.splitlines()] - if len(pvs) > 1: - LOG.info("Do not know how to resize multiple Physical" - " Volumes") - else: - return pvs[0] - except subp.ProcessExecutionError as e: - if e.exit_code != 0: - util.logexc(LOG, "Failed: can't get Physical Volume " - "information from Volume Group %s", vgname) - raise ResizeFailedException(e) from e - - def resizer_factory(mode): resize_class = None if mode == "auto": @@ -262,18 +208,13 @@ def get_size(filename): os.close(fd) -def device_part_info(devpath, is_lvm): +def device_part_info(devpath): # convert an entry in /dev/ to parent disk and partition number # input of /dev/vdb or /dev/disk/by-label/foo # rpath is hopefully a real-ish path in /dev (vda, sdb..) rpath = os.path.realpath(devpath) - # first check if this is an LVM and get its PVs - lvm_rpath = get_pvs_for_lv(devpath) - if is_lvm and lvm_rpath: - rpath = lvm_rpath - bname = os.path.basename(rpath) syspath = "/sys/class/block/%s" % bname @@ -303,7 +244,7 @@ def device_part_info(devpath, is_lvm): # diskdevpath has something like 253:0 # and udev has put links in /dev/block/253:0 to the device name in /dev/ - return diskdevpath, ptnum + return (diskdevpath, ptnum) def devent2dev(devent): @@ -353,9 +294,8 @@ def resize_devices(resizer, devices): "device '%s' not a block device" % blockdev,)) continue - is_lvm = is_lvm_lv(blockdev) try: - disk, ptnum = device_part_info(blockdev, is_lvm) + (disk, ptnum) = device_part_info(blockdev) except (TypeError, ValueError) as e: info.append((devent, RESIZE.SKIPPED, "device_part_info(%s) failed: %s" % (blockdev, e),)) @@ -376,23 +316,6 @@ def resize_devices(resizer, devices): "failed to resize: disk=%s, ptnum=%s: %s" % (disk, ptnum, e),)) - if is_lvm and isinstance(resizer, ResizeGrowPart): - try: - if len(devices) == 1: - (_out, _err) = subp.subp( - ["lvm", "lvextend", "--extents=100%FREE", blockdev], - update_env={'LANG': 'C'}) - info.append((devent, RESIZE.CHANGED, - "Logical Volume %s extended" % devices[0],)) - else: - LOG.info("Exactly one device should be configured to be " - "resized when using LVM. More than one configured" - ": %s", devices) - except (subp.ProcessExecutionError, ValueError) as e: - info.append((devent, RESIZE.NOCHANGE, - "Logical Volume %s resize failed: %s" % - (blockdev, e),)) - return info diff --git a/doc/examples/cloud-config-growpart.txt b/doc/examples/cloud-config-growpart.txt index 09268117..393d5164 100644 --- a/doc/examples/cloud-config-growpart.txt +++ b/doc/examples/cloud-config-growpart.txt @@ -13,8 +13,6 @@ # # devices: # a list of things to resize. -# if the devices are under LVM, the list should be a single entry, -# cloud-init will then extend the single entry, otherwise it will fail. # items can be filesystem paths or devices (in /dev) # examples: # devices: [/, /dev/vdb1] diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index cc0a9248..7f039b79 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -172,53 +172,6 @@ class TestResize(unittest.TestCase): self.name = "growpart" self.log = logging.getLogger("TestResize") - def test_lvm_resize(self): - # LVM resize should work only if a single device is configured. More - # than one device should fail. - lvm_pass = ["/dev/XXdm-0"] - lvm_fail = ["/dev/XXdm-1", "/dev/YYdm-1"] - devstat_ret = Bunch(st_mode=25008, st_ino=6078, st_dev=5, - st_nlink=1, st_uid=0, st_gid=6, st_size=0, - st_atime=0, st_mtime=0, st_ctime=0) - real_stat = os.stat - resize_calls = [] - - class myresizer(object): - def resize(self, diskdev, partnum, partdev): - resize_calls.append((diskdev, partnum, partdev)) - if partdev == "/dev/XXdm-0": - return (1024, 2048) - return (1024, 1024) # old size, new size - - def mystat(path): - if path in lvm_pass or path in lvm_fail: - return devstat_ret - return real_stat(path) - - try: - opinfo = cc_growpart.device_part_info - cc_growpart.device_part_info = simple_device_part_info_lvm - os.stat = mystat - - resized = cc_growpart.resize_devices(myresizer(), lvm_pass) - not_resized = cc_growpart.resize_devices(myresizer(), lvm_fail) - - def find(name, res): - for f in res: - if f[0] == name: - return f - return None - - self.assertEqual(cc_growpart.RESIZE.CHANGED, - find("/dev/XXdm-0", resized)[1]) - self.assertEqual(cc_growpart.RESIZE.NOCHANGE, - find("/dev/XXdm-1", not_resized)[1]) - self.assertEqual(cc_growpart.RESIZE.NOCHANGE, - find("/dev/YYdm-1", not_resized)[1]) - finally: - cc_growpart.device_part_info = opinfo - os.stat = real_stat - def test_simple_devices(self): # test simple device list # this patches out devent2dev, os.stat, and device_part_info @@ -274,14 +227,7 @@ class TestResize(unittest.TestCase): os.stat = real_stat -def simple_device_part_info_lvm(devpath, is_lvm): - # simple stupid return (/dev/vda, 1) for /dev/vda - ret = re.search("([^0-9]*)([0-9]*)$", devpath) - x = (ret.group(1), ret.group(2)) - return x - - -def simple_device_part_info(devpath, is_lvm): +def simple_device_part_info(devpath): # simple stupid return (/dev/vda, 1) for /dev/vda ret = re.search("([^0-9]*)([0-9]*)$", devpath) x = (ret.group(1), ret.group(2)) -- cgit v1.2.3 From 21a0b12052691d6634d0848dfa353c12939945e9 Mon Sep 17 00:00:00 2001 From: Geert Stappers Date: Fri, 14 May 2021 22:25:55 +0200 Subject: [examples] config-user-groups expire in the future (#902) Changed year 2012 into 2032 --- doc/examples/cloud-config-user-groups.txt | 2 +- tools/.github-cla-signers | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'doc/examples') diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt index 4a5a7e20..1faecf75 100644 --- a/doc/examples/cloud-config-user-groups.txt +++ b/doc/examples/cloud-config-user-groups.txt @@ -19,7 +19,7 @@ users: primary_group: foobar groups: users selinux_user: staff_u - expiredate: '2012-09-01' + expiredate: '2032-09-01' ssh_import_id: foobar lock_passwd: false passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/ diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index 48995057..d5eb3b06 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -44,6 +44,7 @@ riedel slyon smoser sshedi +stappersg TheRealFalcon taoyama timothegenzmer -- cgit v1.2.3 From 1793b8b70ca2e3587c271155033ef943207136ae Mon Sep 17 00:00:00 2001 From: Paul Goins Date: Tue, 18 May 2021 17:02:51 +0000 Subject: Added support for importing keys via primary/security mirror clauses (#882) Presently, mirror keys cannot be associated with primary/security mirrors. Unfortunately, this prevents use of Landscape-managed package mirrors as the mirror key for the Landscape-hosted repository cannot be provided. This patch allows the same key-related fields usable on "sources" entries to be used on the "primary" and "security" entries as well. LP: #1925395 --- cloudinit/config/cc_apt_configure.py | 26 ++++++++++++++++++++++ doc/examples/cloud-config-apt.txt | 6 +++++ .../test_handler/test_handler_apt_source_v3.py | 23 +++++++++++++++++++ 3 files changed, 55 insertions(+) (limited to 'doc/examples') diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index bb8a1278..0c9c7925 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -57,6 +57,15 @@ mirror_property = { }, 'search_dns': { 'type': 'boolean', + }, + 'keyid': { + 'type': 'string' + }, + 'key': { + 'type': 'string' + }, + 'keyserver': { + 'type': 'string' } } } @@ -228,6 +237,15 @@ schema = { key, the search pattern will be ``-security-mirror``. + Each mirror may also specify a key to import via + any of the following optional keys: + + - ``keyid``: a key to import via shortid or \ + fingerprint. + - ``key``: a raw PGP key. + - ``keyserver``: alternate keyserver to pull \ + ``keyid`` key from. + If no mirrors are specified, or all lookups fail, then default mirrors defined in the datasource are used. If none are present in the datasource @@ -453,6 +471,7 @@ def apply_apt(cfg, cloud, target): LOG.debug("Apt Mirror info: %s", mirrors) if util.is_false(cfg.get('preserve_sources_list', False)): + add_mirror_keys(cfg, target) generate_sources_list(cfg, release, mirrors, cloud) rename_apt_lists(mirrors, target, arch) @@ -660,6 +679,13 @@ def disable_suites(disabled, src, release): return retsrc +def add_mirror_keys(cfg, target): + """Adds any keys included in the primary/security mirror clauses""" + for key in ('primary', 'security'): + for mirror in cfg.get(key, []): + add_apt_key(mirror, target) + + def generate_sources_list(cfg, release, mirrors, cloud): """generate_sources_list create a source.list file based on a custom or default template diff --git a/doc/examples/cloud-config-apt.txt b/doc/examples/cloud-config-apt.txt index 004894b7..f4392326 100644 --- a/doc/examples/cloud-config-apt.txt +++ b/doc/examples/cloud-config-apt.txt @@ -138,6 +138,12 @@ apt: # the first defining a valid mirror wins (in the order as defined here, # not the order as listed in the config). # + # Additionally, if the repository requires a custom signing key, it can be + # specified via the same fields as for custom sources: + # 'keyid': providing a key to import via shortid or fingerprint + # 'key': providing a raw PGP key + # 'keyserver': specify an alternate keyserver to pull keys from that + # were specified by keyid - arches: [s390x, arm64] # as above, allowing to have one config for different per arch mirrors # security is optional, if not defined it is set to the same value as primary diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py index ac847238..abb0a9b6 100644 --- a/tests/unittests/test_handler/test_handler_apt_source_v3.py +++ b/tests/unittests/test_handler/test_handler_apt_source_v3.py @@ -1009,6 +1009,29 @@ deb http://ubuntu.com/ubuntu/ xenial-proposed main""") self.assertEqual(mirrors['SECURITY'], smir) + def test_apt_v3_add_mirror_keys(self): + """test_apt_v3_add_mirror_keys - Test adding key for mirrors""" + arch = 'amd64' + cfg = { + 'primary': [ + {'arches': [arch], + 'uri': 'http://test.ubuntu.com/', + 'key': 'fakekey_primary'}], + 'security': [ + {'arches': [arch], + 'uri': 'http://testsec.ubuntu.com/', + 'key': 'fakekey_security'}] + } + + with mock.patch.object(cc_apt_configure, + 'add_apt_key_raw') as mockadd: + cc_apt_configure.add_mirror_keys(cfg, TARGET) + calls = [ + mock.call('fakekey_primary', TARGET), + mock.call('fakekey_security', TARGET), + ] + mockadd.assert_has_calls(calls, any_order=True) + class TestDebconfSelections(TestCase): -- cgit v1.2.3 From 9893dfcd2f0be92197d707236cbd44cb7452364d Mon Sep 17 00:00:00 2001 From: Gabriel Nagy Date: Tue, 10 Aug 2021 18:14:23 +0300 Subject: cc_puppet: support AIO installations and more (#960) - update the puppet module to support AIO installations by setting `install_type` to `aio` - make the install collection configurable through the `collection` parameter; by default the rolling `puppet` collection will be used, which installs the latest version) - when `install_type` is `aio`, puppetlabs repos will be purged after installation; set `cleanup` to `False` to prevent this - AIO installations are performed by downloading and executing a shell script; the URL for this script can be overridden using the `aio_install_url` parameter - make it possible to run puppet agent after installation/configuration via the `exec` key - by default, puppet agent will run with the `--test` argument; this can be overridden via the `exec_args` key --- cloudinit/config/cc_puppet.py | 159 +++++++++++-- doc/examples/cloud-config-puppet.txt | 60 ++++- .../testcases/examples/setup_run_puppet.yaml | 10 +- .../unittests/test_handler/test_handler_puppet.py | 261 +++++++++++++++++++-- 4 files changed, 426 insertions(+), 64 deletions(-) (limited to 'doc/examples') diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index bc981cf4..a0779eb0 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -29,22 +29,41 @@ The keys are ``package_name``, ``conf_file``, ``ssl_dir`` and ones that work with puppet 3.x and with distributions that ship modified puppet 4.x that uses the old paths. +Agent packages from the puppetlabs repositories can be installed by setting +``install_type`` to ``aio``. Based on this setting, the default config/SSL/CSR +paths will be adjusted accordingly. To maintain backwards compatibility this +setting defaults to ``packages`` which will install puppet from the distro +packages. + +If installing ``aio`` packages, ``collection`` can also be set to one of +``puppet`` (rolling release), ``puppet6``, ``puppet7`` (or their nightly +counterparts) in order to install specific release streams. By default, the +puppetlabs repository will be purged after installation finishes; set +``cleanup`` to ``false`` to prevent this. AIO packages are installed through a +shell script which is downloaded on the machine and then executed; the path to +this script can be overridden using the ``aio_install_url`` key. + Puppet configuration can be specified under the ``conf`` key. The configuration is specified as a dictionary containing high-level ``
`` keys and lists of ``=`` pairs within each section. Each section name and ``=`` pair is written directly to ``puppet.conf``. As -such, section names should be one of: ``main``, ``master``, ``agent`` or +such, section names should be one of: ``main``, ``server``, ``agent`` or ``user`` and keys should be valid puppet configuration options. The ``certname`` key supports string substitutions for ``%i`` and ``%f``, corresponding to the instance id and fqdn of the machine respectively. If ``ca_cert`` is present, it will not be written to ``puppet.conf``, but -instead will be used as the puppermaster certificate. It should be specified +instead will be used as the puppetserver certificate. It should be specified in pem format as a multi-line string (using the ``|`` yaml notation). -Additionally it's possible to create a csr_attributes.yaml for -CSR attributes and certificate extension requests. +Additionally it's possible to create a ``csr_attributes.yaml`` file for CSR +attributes and certificate extension requests. See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html +The puppet service will be automatically enabled after installation. A manual +run can also be triggered by setting ``exec`` to ``true``, and additional +arguments can be passed to ``puppet agent`` via the ``exec_args`` key (by +default the agent will execute with the ``--test`` flag). + **Internal name:** ``cc_puppet`` **Module frequency:** per instance @@ -56,13 +75,19 @@ See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html puppet: install: version: + collection: + install_type: + aio_install_url: 'https://git.io/JBhoQ' + cleanup: conf_file: '/etc/puppet/puppet.conf' ssl_dir: '/var/lib/puppet/ssl' csr_attributes_path: '/etc/puppet/csr_attributes.yaml' package_name: 'puppet' + exec: + exec_args: ['--test'] conf: agent: - server: "puppetmaster.example.org" + server: "puppetserver.example.org" certname: "%i.%f" ca_cert: | -------BEGIN CERTIFICATE------- @@ -84,12 +109,12 @@ from io import StringIO from cloudinit import helpers from cloudinit import subp +from cloudinit import temp_utils from cloudinit import util +from cloudinit import url_helper -PUPPET_CONF_PATH = '/etc/puppet/puppet.conf' -PUPPET_SSL_DIR = '/var/lib/puppet/ssl' -PUPPET_CSR_ATTRIBUTES_PATH = '/etc/puppet/csr_attributes.yaml' -PUPPET_PACKAGE_NAME = 'puppet' +AIO_INSTALL_URL = 'https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh' # noqa: E501 +PUPPET_AGENT_DEFAULT_ARGS = ['--test'] class PuppetConstants(object): @@ -119,6 +144,43 @@ def _autostart_puppet(log): " puppet services on this system")) +def get_config_value(puppet_bin, setting): + """Get the config value for a given setting using `puppet config print` + :param puppet_bin: path to puppet binary + :param setting: setting to query + """ + out, _ = subp.subp([puppet_bin, 'config', 'print', setting]) + return out.rstrip() + + +def install_puppet_aio(url=AIO_INSTALL_URL, version=None, + collection=None, cleanup=True): + """Install puppet-agent from the puppetlabs repositories using the one-shot + shell script + + :param url: URL from where to download the install script + :param version: version to install, blank defaults to latest + :param collection: collection to install, blank defaults to latest + :param cleanup: whether to purge the puppetlabs repo after installation + """ + args = [] + if version is not None: + args = ['-v', version] + if collection is not None: + args += ['-c', collection] + + # Purge puppetlabs repos after installation + if cleanup: + args += ['--cleanup'] + content = url_helper.readurl(url=url, retries=5).contents + + # Use tmpdir over tmpfile to avoid 'text file busy' on execute + with temp_utils.tempdir(needs_exe=True) as tmpd: + tmpf = os.path.join(tmpd, 'puppet-install') + util.write_file(tmpf, content, mode=0o700) + return subp.subp([tmpf] + args, capture=False) + + def handle(name, cfg, cloud, log, _args): # If there isn't a puppet key in the configuration don't do anything if 'puppet' not in cfg: @@ -130,23 +192,50 @@ def handle(name, cfg, cloud, log, _args): # Start by installing the puppet package if necessary... install = util.get_cfg_option_bool(puppet_cfg, 'install', True) version = util.get_cfg_option_str(puppet_cfg, 'version', None) - package_name = util.get_cfg_option_str( - puppet_cfg, 'package_name', PUPPET_PACKAGE_NAME) - conf_file = util.get_cfg_option_str( - puppet_cfg, 'conf_file', PUPPET_CONF_PATH) - ssl_dir = util.get_cfg_option_str(puppet_cfg, 'ssl_dir', PUPPET_SSL_DIR) - csr_attributes_path = util.get_cfg_option_str( - puppet_cfg, 'csr_attributes_path', PUPPET_CSR_ATTRIBUTES_PATH) + collection = util.get_cfg_option_str(puppet_cfg, 'collection', None) + install_type = util.get_cfg_option_str( + puppet_cfg, 'install_type', 'packages') + cleanup = util.get_cfg_option_bool(puppet_cfg, 'cleanup', True) + run = util.get_cfg_option_bool(puppet_cfg, 'exec', default=False) + aio_install_url = util.get_cfg_option_str( + puppet_cfg, 'aio_install_url', default=AIO_INSTALL_URL) - p_constants = PuppetConstants(conf_file, ssl_dir, csr_attributes_path, log) + # AIO and distro packages use different paths + if install_type == 'aio': + puppet_user = 'root' + puppet_bin = '/opt/puppetlabs/bin/puppet' + puppet_package = 'puppet-agent' + else: # default to 'packages' + puppet_user = 'puppet' + puppet_bin = 'puppet' + puppet_package = 'puppet' + + package_name = util.get_cfg_option_str( + puppet_cfg, 'package_name', puppet_package) if not install and version: - log.warning(("Puppet install set false but version supplied," + log.warning(("Puppet install set to false but version supplied," " doing nothing.")) elif install: - log.debug(("Attempting to install puppet %s,"), - version if version else 'latest') + log.debug(("Attempting to install puppet %s from %s"), + version if version else 'latest', install_type) - cloud.distro.install_packages((package_name, version)) + if install_type == "packages": + cloud.distro.install_packages((package_name, version)) + elif install_type == "aio": + install_puppet_aio(aio_install_url, version, collection, cleanup) + else: + log.warning("Unknown puppet install type '%s'", install_type) + run = False + + conf_file = util.get_cfg_option_str( + puppet_cfg, 'conf_file', get_config_value(puppet_bin, 'config')) + ssl_dir = util.get_cfg_option_str( + puppet_cfg, 'ssl_dir', get_config_value(puppet_bin, 'ssldir')) + csr_attributes_path = util.get_cfg_option_str( + puppet_cfg, 'csr_attributes_path', + get_config_value(puppet_bin, 'csr_attributes')) + + p_constants = PuppetConstants(conf_file, ssl_dir, csr_attributes_path, log) # ... and then update the puppet configuration if 'conf' in puppet_cfg: @@ -165,17 +254,18 @@ def handle(name, cfg, cloud, log, _args): source=p_constants.conf_path) for (cfg_name, cfg) in puppet_cfg['conf'].items(): # Cert configuration is a special case - # Dump the puppet master ca certificate in the correct place + # Dump the puppetserver ca certificate in the correct place if cfg_name == 'ca_cert': # Puppet ssl sub-directory isn't created yet # Create it with the proper permissions and ownership util.ensure_dir(p_constants.ssl_dir, 0o771) - util.chownbyname(p_constants.ssl_dir, 'puppet', 'root') + util.chownbyname(p_constants.ssl_dir, puppet_user, 'root') util.ensure_dir(p_constants.ssl_cert_dir) - util.chownbyname(p_constants.ssl_cert_dir, 'puppet', 'root') + util.chownbyname(p_constants.ssl_cert_dir, puppet_user, 'root') util.write_file(p_constants.ssl_cert_path, cfg) - util.chownbyname(p_constants.ssl_cert_path, 'puppet', 'root') + util.chownbyname(p_constants.ssl_cert_path, + puppet_user, 'root') else: # Iterate through the config items, we'll use ConfigParser.set # to overwrite or create new items as needed @@ -203,6 +293,25 @@ def handle(name, cfg, cloud, log, _args): # Set it up so it autostarts _autostart_puppet(log) + # Run the agent if needed + if run: + log.debug('Running puppet-agent') + cmd = [puppet_bin, 'agent'] + if 'exec_args' in puppet_cfg: + cmd_args = puppet_cfg['exec_args'] + if isinstance(cmd_args, (list, tuple)): + cmd.extend(cmd_args) + elif isinstance(cmd_args, str): + cmd.extend(cmd_args.split()) + else: + log.warning("Unknown type %s provided for puppet" + " 'exec_args' expected list, tuple," + " or string", type(cmd_args)) + cmd.extend(PUPPET_AGENT_DEFAULT_ARGS) + else: + cmd.extend(PUPPET_AGENT_DEFAULT_ARGS) + subp.subp(cmd, capture=False) + # Start puppetd subp.subp(['service', 'puppet', 'start'], capture=False) diff --git a/doc/examples/cloud-config-puppet.txt b/doc/examples/cloud-config-puppet.txt index 3c7e2da7..c6bc15de 100644 --- a/doc/examples/cloud-config-puppet.txt +++ b/doc/examples/cloud-config-puppet.txt @@ -1,25 +1,65 @@ #cloud-config # -# This is an example file to automatically setup and run puppetd +# This is an example file to automatically setup and run puppet # when the instance boots for the first time. # Make sure that this file is valid yaml before starting instances. # It should be passed as user-data when starting the instance. puppet: + # Boolean: whether or not to install puppet (default: true) + install: true + + # A specific version to pass to the installer script or package manager + version: "7.7.0" + + # Valid values are 'packages' and 'aio' (default: 'packages') + install_type: "packages" + + # Puppet collection to install if 'install_type' is 'aio' + collection: "puppet7" + + # Boolean: whether or not to remove the puppetlabs repo after installation + # if 'install_type' is 'aio' (default: true) + cleanup: true + + # If 'install_type' is 'aio', change the url to the install script + aio_install_url: "https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh" + + # Path to the puppet config file (default: depends on 'install_type') + conf_file: "/etc/puppet/puppet.conf" + + # Path to the puppet SSL directory (default: depends on 'install_type') + ssl_dir: "/var/lib/puppet/ssl" + + # Path to the CSR attributes file (default: depends on 'install_type') + csr_attributes_path: "/etc/puppet/csr_attributes.yaml" + + # The name of the puppet package to install (no-op if 'install_type' is 'aio') + package_name: "puppet" + + # Boolean: whether or not to run puppet after configuration finishes + # (default: false) + exec: false + + # A list of arguments to pass to 'puppet agent' if 'exec' is true + # (default: ['--test']) + exec_args: ['--test'] + # Every key present in the conf object will be added to puppet.conf: # [name] # subkey=value # # For example the configuration below will have the following section # added to puppet.conf: - # [puppetd] - # server=puppetmaster.example.org + # [main] + # server=puppetserver.example.org # certname=i-0123456.ip-X-Y-Z.cloud.internal # - # The puppmaster ca certificate will be available in - # /var/lib/puppet/ssl/certs/ca.pem + # The puppetserver ca certificate will be available in + # /var/lib/puppet/ssl/certs/ca.pem if using distro packages + # or /etc/puppetlabs/puppet/ssl/certs/ca.pem if using AIO packages. conf: agent: - server: "puppetmaster.example.org" + server: "puppetserver.example.org" # certname supports substitutions at runtime: # %i: instanceid # Example: i-0123456 @@ -29,11 +69,13 @@ puppet: # NB: the certname will automatically be lowercased as required by puppet certname: "%i.%f" # ca_cert is a special case. It won't be added to puppet.conf. - # It holds the puppetmaster certificate in pem format. + # It holds the puppetserver certificate in pem format. # It should be a multi-line string (using the | yaml notation for # multi-line strings). - # The puppetmaster certificate is located in - # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetmaster host. + # The puppetserver certificate is located in + # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetserver host if using + # distro packages or /etc/puppetlabs/puppet/ssl/ca/ca_crt.pem if using AIO + # packages. # ca_cert: | -----BEGIN CERTIFICATE----- diff --git a/tests/cloud_tests/testcases/examples/setup_run_puppet.yaml b/tests/cloud_tests/testcases/examples/setup_run_puppet.yaml index e366c042..cdb1c28d 100644 --- a/tests/cloud_tests/testcases/examples/setup_run_puppet.yaml +++ b/tests/cloud_tests/testcases/examples/setup_run_puppet.yaml @@ -14,14 +14,14 @@ cloud_config: | # For example the configuration below will have the following section # added to puppet.conf: # [puppetd] - # server=puppetmaster.example.org + # server=puppetserver.example.org # certname=i-0123456.ip-X-Y-Z.cloud.internal # # The puppmaster ca certificate will be available in # /var/lib/puppet/ssl/certs/ca.pem conf: agent: - server: "puppetmaster.example.org" + server: "puppetserver.example.org" # certname supports substitutions at runtime: # %i: instanceid # Example: i-0123456 @@ -31,11 +31,11 @@ cloud_config: | # NB: the certname will automatically be lowercased as required by puppet certname: "%i.%f" # ca_cert is a special case. It won't be added to puppet.conf. - # It holds the puppetmaster certificate in pem format. + # It holds the puppetserver certificate in pem format. # It should be a multi-line string (using the | yaml notation for # multi-line strings). - # The puppetmaster certificate is located in - # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetmaster host. + # The puppetserver certificate is located in + # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetserver host. # ca_cert: | -----BEGIN CERTIFICATE----- diff --git a/tests/unittests/test_handler/test_handler_puppet.py b/tests/unittests/test_handler/test_handler_puppet.py index 62388ac6..c0ba2e3c 100644 --- a/tests/unittests/test_handler/test_handler_puppet.py +++ b/tests/unittests/test_handler/test_handler_puppet.py @@ -3,8 +3,9 @@ from cloudinit.config import cc_puppet from cloudinit.sources import DataSourceNone from cloudinit import (distros, helpers, cloud, util) -from cloudinit.tests.helpers import CiTestCase, mock +from cloudinit.tests.helpers import CiTestCase, HttprettyTestCase, mock +import httpretty import logging import textwrap @@ -63,7 +64,8 @@ class TestPuppetHandle(CiTestCase): super(TestPuppetHandle, self).setUp() self.new_root = self.tmp_dir() self.conf = self.tmp_path('puppet.conf') - self.csr_attributes_path = self.tmp_path('csr_attributes.yaml') + self.csr_attributes_path = self.tmp_path( + 'csr_attributes.yaml') def _get_cloud(self, distro): paths = helpers.Paths({'templates_dir': self.new_root}) @@ -72,7 +74,7 @@ class TestPuppetHandle(CiTestCase): myds = DataSourceNone.DataSourceNone({}, mydist, paths) return cloud.Cloud(myds, paths, {}, mydist, None) - def test_handler_skips_missing_puppet_key_in_cloudconfig(self, m_auto): + def test_skips_missing_puppet_key_in_cloudconfig(self, m_auto): """Cloud-config containing no 'puppet' key is skipped.""" mycloud = self._get_cloud('ubuntu') cfg = {} @@ -81,19 +83,19 @@ class TestPuppetHandle(CiTestCase): "no 'puppet' configuration found", self.logs.getvalue()) self.assertEqual(0, m_auto.call_count) - @mock.patch('cloudinit.config.cc_puppet.subp.subp') - def test_handler_puppet_config_starts_puppet_service(self, m_subp, m_auto): + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_config_starts_puppet_service(self, m_subp, m_auto): """Cloud-config 'puppet' configuration starts puppet.""" mycloud = self._get_cloud('ubuntu') cfg = {'puppet': {'install': False}} cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) self.assertEqual(1, m_auto.call_count) - self.assertEqual( + self.assertIn( [mock.call(['service', 'puppet', 'start'], capture=False)], m_subp.call_args_list) - @mock.patch('cloudinit.config.cc_puppet.subp.subp') - def test_handler_empty_puppet_config_installs_puppet(self, m_subp, m_auto): + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_empty_puppet_config_installs_puppet(self, m_subp, m_auto): """Cloud-config empty 'puppet' configuration installs latest puppet.""" mycloud = self._get_cloud('ubuntu') mycloud.distro = mock.MagicMock() @@ -103,8 +105,8 @@ class TestPuppetHandle(CiTestCase): [mock.call(('puppet', None))], mycloud.distro.install_packages.call_args_list) - @mock.patch('cloudinit.config.cc_puppet.subp.subp') - def test_handler_puppet_config_installs_puppet_on_true(self, m_subp, _): + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_config_installs_puppet_on_true(self, m_subp, _): """Cloud-config with 'puppet' key installs when 'install' is True.""" mycloud = self._get_cloud('ubuntu') mycloud.distro = mock.MagicMock() @@ -114,8 +116,85 @@ class TestPuppetHandle(CiTestCase): [mock.call(('puppet', None))], mycloud.distro.install_packages.call_args_list) - @mock.patch('cloudinit.config.cc_puppet.subp.subp') - def test_handler_puppet_config_installs_puppet_version(self, m_subp, _): + @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_config_installs_puppet_aio(self, m_subp, m_aio, _): + """Cloud-config with 'puppet' key installs + when 'install_type' is 'aio'.""" + mycloud = self._get_cloud('ubuntu') + mycloud.distro = mock.MagicMock() + cfg = {'puppet': {'install': True, 'install_type': 'aio'}} + cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + m_aio.assert_called_with( + cc_puppet.AIO_INSTALL_URL, + None, None, True) + + @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_config_installs_puppet_aio_with_version(self, + m_subp, m_aio, _): + """Cloud-config with 'puppet' key installs + when 'install_type' is 'aio' and 'version' is specified.""" + mycloud = self._get_cloud('ubuntu') + mycloud.distro = mock.MagicMock() + cfg = {'puppet': {'install': True, + 'version': '6.24.0', 'install_type': 'aio'}} + cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + m_aio.assert_called_with( + cc_puppet.AIO_INSTALL_URL, + '6.24.0', None, True) + + @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_config_installs_puppet_aio_with_collection(self, + m_subp, + m_aio, _): + """Cloud-config with 'puppet' key installs + when 'install_type' is 'aio' and 'collection' is specified.""" + mycloud = self._get_cloud('ubuntu') + mycloud.distro = mock.MagicMock() + cfg = {'puppet': {'install': True, + 'collection': 'puppet6', 'install_type': 'aio'}} + cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + m_aio.assert_called_with( + cc_puppet.AIO_INSTALL_URL, + None, 'puppet6', True) + + @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_config_installs_puppet_aio_with_custom_url(self, + m_subp, + m_aio, _): + """Cloud-config with 'puppet' key installs + when 'install_type' is 'aio' and 'aio_install_url' is specified.""" + mycloud = self._get_cloud('ubuntu') + mycloud.distro = mock.MagicMock() + cfg = {'puppet': + {'install': True, + 'aio_install_url': 'http://test.url/path/to/script.sh', + 'install_type': 'aio'}} + cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + m_aio.assert_called_with( + 'http://test.url/path/to/script.sh', None, None, True) + + @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_config_installs_puppet_aio_without_cleanup(self, + m_subp, + m_aio, _): + """Cloud-config with 'puppet' key installs + when 'install_type' is 'aio' and no cleanup.""" + mycloud = self._get_cloud('ubuntu') + mycloud.distro = mock.MagicMock() + cfg = {'puppet': {'install': True, + 'cleanup': False, 'install_type': 'aio'}} + cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + m_aio.assert_called_with( + cc_puppet.AIO_INSTALL_URL, + None, None, False) + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_config_installs_puppet_version(self, m_subp, _): """Cloud-config 'puppet' configuration can specify a version.""" mycloud = self._get_cloud('ubuntu') mycloud.distro = mock.MagicMock() @@ -125,26 +204,39 @@ class TestPuppetHandle(CiTestCase): [mock.call(('puppet', '3.8'))], mycloud.distro.install_packages.call_args_list) - @mock.patch('cloudinit.config.cc_puppet.subp.subp') - def test_handler_puppet_config_updates_puppet_conf(self, m_subp, m_auto): + @mock.patch('cloudinit.config.cc_puppet.get_config_value') + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_config_updates_puppet_conf(self, + m_subp, m_default, m_auto): """When 'conf' is provided update values in PUPPET_CONF_PATH.""" + + def _fake_get_config_value(puppet_bin, setting): + return self.conf + + m_default.side_effect = _fake_get_config_value mycloud = self._get_cloud('ubuntu') cfg = { 'puppet': { - 'conf': {'agent': {'server': 'puppetmaster.example.org'}}}} - util.write_file(self.conf, '[agent]\nserver = origpuppet\nother = 3') - puppet_conf_path = 'cloudinit.config.cc_puppet.PUPPET_CONF_PATH' + 'conf': {'agent': {'server': 'puppetserver.example.org'}}}} + util.write_file( + self.conf, '[agent]\nserver = origpuppet\nother = 3') mycloud.distro = mock.MagicMock() - with mock.patch(puppet_conf_path, self.conf): - cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) content = util.load_file(self.conf) - expected = '[agent]\nserver = puppetmaster.example.org\nother = 3\n\n' + expected = '[agent]\nserver = puppetserver.example.org\nother = 3\n\n' self.assertEqual(expected, content) + @mock.patch('cloudinit.config.cc_puppet.get_config_value') @mock.patch('cloudinit.config.cc_puppet.subp.subp') - def test_handler_puppet_writes_csr_attributes_file(self, m_subp, m_auto): + def test_puppet_writes_csr_attributes_file(self, + m_subp, m_default, m_auto): """When csr_attributes is provided creates file in PUPPET_CSR_ATTRIBUTES_PATH.""" + + def _fake_get_config_value(puppet_bin, setting): + return self.csr_attributes_path + + m_default.side_effect = _fake_get_config_value mycloud = self._get_cloud('ubuntu') mycloud.distro = mock.MagicMock() cfg = { @@ -163,10 +255,7 @@ class TestPuppetHandle(CiTestCase): } } } - csr_attributes = 'cloudinit.config.cc_puppet.' \ - 'PUPPET_CSR_ATTRIBUTES_PATH' - with mock.patch(csr_attributes, self.csr_attributes_path): - cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) content = util.load_file(self.csr_attributes_path) expected = textwrap.dedent("""\ custom_attributes: @@ -177,3 +266,125 @@ class TestPuppetHandle(CiTestCase): pp_uuid: ED803750-E3C7-44F5-BB08-41A04433FE2E """) self.assertEqual(expected, content) + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_runs_puppet_if_requested(self, m_subp, m_auto): + """Run puppet with default args if 'exec' is set to True.""" + mycloud = self._get_cloud('ubuntu') + cfg = {'puppet': {'exec': True}} + cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + self.assertEqual(1, m_auto.call_count) + self.assertIn( + [mock.call(['puppet', 'agent', '--test'], capture=False)], + m_subp.call_args_list) + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_runs_puppet_with_args_list_if_requested(self, + m_subp, m_auto): + """Run puppet with 'exec_args' list if 'exec' is set to True.""" + mycloud = self._get_cloud('ubuntu') + cfg = {'puppet': {'exec': True, 'exec_args': [ + '--onetime', '--detailed-exitcodes']}} + cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + self.assertEqual(1, m_auto.call_count) + self.assertIn( + [mock.call( + ['puppet', 'agent', '--onetime', '--detailed-exitcodes'], + capture=False)], + m_subp.call_args_list) + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_runs_puppet_with_args_string_if_requested(self, + m_subp, m_auto): + """Run puppet with 'exec_args' string if 'exec' is set to True.""" + mycloud = self._get_cloud('ubuntu') + cfg = {'puppet': {'exec': True, + 'exec_args': '--onetime --detailed-exitcodes'}} + cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + self.assertEqual(1, m_auto.call_count) + self.assertIn( + [mock.call( + ['puppet', 'agent', '--onetime', '--detailed-exitcodes'], + capture=False)], + m_subp.call_args_list) + + +class TestInstallPuppetAio(HttprettyTestCase): + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', + return_value=(None, None)) + def test_install_with_default_arguments(self, m_subp): + """Install AIO with no arguments""" + response = b'#!/bin/bash\necho "Hi Mom"' + httpretty.register_uri( + httpretty.GET, cc_puppet.AIO_INSTALL_URL, + body=response, status=200) + + cc_puppet.install_puppet_aio() + + self.assertEqual( + [mock.call([mock.ANY, '--cleanup'], capture=False)], + m_subp.call_args_list) + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', + return_value=(None, None)) + def test_install_with_custom_url(self, m_subp): + """Install AIO from custom URL""" + response = b'#!/bin/bash\necho "Hi Mom"' + url = 'http://custom.url/path/to/script.sh' + httpretty.register_uri( + httpretty.GET, url, body=response, status=200) + + cc_puppet.install_puppet_aio('http://custom.url/path/to/script.sh') + + self.assertEqual( + [mock.call([mock.ANY, '--cleanup'], capture=False)], + m_subp.call_args_list) + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', + return_value=(None, None)) + def test_install_with_version(self, m_subp): + """Install AIO with specific version""" + response = b'#!/bin/bash\necho "Hi Mom"' + httpretty.register_uri( + httpretty.GET, cc_puppet.AIO_INSTALL_URL, + body=response, status=200) + + cc_puppet.install_puppet_aio(cc_puppet.AIO_INSTALL_URL, '7.6.0') + + self.assertEqual( + [mock.call([mock.ANY, '-v', '7.6.0', '--cleanup'], capture=False)], + m_subp.call_args_list) + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', + return_value=(None, None)) + def test_install_with_collection(self, m_subp): + """Install AIO with specific collection""" + response = b'#!/bin/bash\necho "Hi Mom"' + httpretty.register_uri( + httpretty.GET, cc_puppet.AIO_INSTALL_URL, + body=response, status=200) + + cc_puppet.install_puppet_aio( + cc_puppet.AIO_INSTALL_URL, None, 'puppet6-nightly') + + self.assertEqual( + [mock.call([mock.ANY, '-c', 'puppet6-nightly', '--cleanup'], + capture=False)], + m_subp.call_args_list) + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', + return_value=(None, None)) + def test_install_with_no_cleanup(self, m_subp): + """Install AIO with no cleanup""" + response = b'#!/bin/bash\necho "Hi Mom"' + httpretty.register_uri( + httpretty.GET, cc_puppet.AIO_INSTALL_URL, + body=response, status=200) + + cc_puppet.install_puppet_aio( + cc_puppet.AIO_INSTALL_URL, None, None, False) + + self.assertEqual( + [mock.call([mock.ANY], capture=False)], + m_subp.call_args_list) -- cgit v1.2.3 From 8cebc449812b4bff4afaa0837b140030c419f98e Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 24 Sep 2021 15:22:57 -0500 Subject: Remove invalid ssh_import_id from examples (#1031) --- doc/examples/cloud-config-user-groups.txt | 8 ++++++-- doc/rtd/topics/datasources/vmware.rst | 1 - 2 files changed, 6 insertions(+), 3 deletions(-) (limited to 'doc/examples') diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt index 1faecf75..30cd3f97 100644 --- a/doc/examples/cloud-config-user-groups.txt +++ b/doc/examples/cloud-config-user-groups.txt @@ -20,14 +20,18 @@ users: groups: users selinux_user: staff_u expiredate: '2032-09-01' - ssh_import_id: foobar + ssh_import_id: + - lp:falcojr + - gh:TheRealFalcon lock_passwd: false passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/ - name: barfoo gecos: Bar B. Foo sudo: ALL=(ALL) NOPASSWD:ALL groups: users, admin - ssh_import_id: None + ssh_import_id: + - lp:falcojr + - gh:TheRealFalcon lock_passwd: true ssh_authorized_keys: - diff --git a/doc/rtd/topics/datasources/vmware.rst b/doc/rtd/topics/datasources/vmware.rst index 996eb61f..3ca9f10f 100644 --- a/doc/rtd/topics/datasources/vmware.rst +++ b/doc/rtd/topics/datasources/vmware.rst @@ -236,7 +236,6 @@ this datasource: primary_group: akutz sudo: ALL=(ALL) NOPASSWD:ALL groups: sudo, wheel - ssh_import_id: None lock_passwd: true ssh_authorized_keys: - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDE0c5FczvcGSh/tG4iw+Fhfi/O5/EvUM/96js65tly4++YTXK1d9jcznPS5ruDlbIZ30oveCBd3kT8LLVFwzh6hepYTf0YmCTpF4eDunyqmpCXDvVscQYRXyasEm5olGmVe05RrCJSeSShAeptv4ueIn40kZKOghinGWLDSZG4+FFfgrmcMCpx5YSCtX2gvnEYZJr0czt4rxOZuuP7PkJKgC/mt2PcPjooeX00vAj81jjU2f3XKrjjz2u2+KIt9eba+vOQ6HiC8c2IzRkUAJ5i1atLy8RIbejo23+0P4N2jjk17QySFOVHwPBDTYb0/0M/4ideeU74EN/CgVsvO6JrLsPBR4dojkV5qNbMNxIVv5cUwIy2ThlLgqpNCeFIDLCWNZEFKlEuNeSQ2mPtIO7ETxEL2Cz5y/7AIuildzYMc6wi2bofRC8HmQ7rMXRWdwLKWsR0L7SKjHblIwarxOGqLnUI+k2E71YoP7SZSlxaKi17pqkr0OMCF+kKqvcvHAQuwGqyumTEWOlH6TCx1dSPrW+pVCZSHSJtSTfDW2uzL6y8k10MT06+pVunSrWo5LHAXcS91htHV1M1UrH/tZKSpjYtjMb5+RonfhaFRNzvj7cCE1f3Kp8UVqAdcGBTtReoE8eRUT63qIxjw03a7VwAyB2w+9cu1R9/vAo8SBeRqw== sakutz@gmail.com -- cgit v1.2.3 From 28581988da4b37e3d2423075c64dc1f3bc5da5cc Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Fri, 29 Oct 2021 13:33:33 -0600 Subject: Remove (deprecated) apt-key (#1068) Also, add the "signed by" option to source definitions. This enables users to limit the scope of trust for individual keys. LP: #1836336 --- cloudinit/config/cc_apt_configure.py | 135 ++++++++++++++++++-- cloudinit/gpg.py | 30 +++++ doc/examples/cloud-config-apt.txt | 24 +++- tests/integration_tests/modules/test_apt.py | 62 ++++++++-- tests/unittests/test_gpg.py | 81 ++++++++++++ .../unittests/test_handler/test_handler_apt_key.py | 137 +++++++++++++++++++++ .../test_handler/test_handler_apt_source_v1.py | 75 +++++++---- .../test_handler/test_handler_apt_source_v3.py | 85 ++++++++----- 8 files changed, 548 insertions(+), 81 deletions(-) create mode 100644 tests/unittests/test_gpg.py create mode 100644 tests/unittests/test_handler/test_handler_apt_key.py (limited to 'doc/examples') diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 0c9c7925..c3c48bbd 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -11,6 +11,7 @@ import glob import os import re +import pathlib from textwrap import dedent from cloudinit.config.schema import ( @@ -27,6 +28,10 @@ LOG = logging.getLogger(__name__) # this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar') ADD_APT_REPO_MATCH = r"^[\w-]+:\w" +APT_LOCAL_KEYS = '/etc/apt/trusted.gpg' +APT_TRUSTED_GPG_DIR = '/etc/apt/trusted.gpg.d/' +CLOUD_INIT_GPG_DIR = '/etc/apt/cloud-init.gpg.d/' + frequency = PER_INSTANCE distros = ["ubuntu", "debian"] mirror_property = { @@ -139,7 +144,7 @@ schema = { source1: keyid: 'keyid' keyserver: 'keyserverurl' - source: 'deb http:/// xenial main' + source: 'deb [signed-by=$KEY_FILE] http:/// xenial main' source2: source: 'ppa:' source3: @@ -312,7 +317,8 @@ schema = { - ``$MIRROR`` - ``$RELEASE`` - ``$PRIMARY`` - - ``$SECURITY``""") + - ``$SECURITY`` + - ``$KEY_FILE``""") }, 'conf': { 'type': 'string', @@ -381,7 +387,8 @@ schema = { - ``$MIRROR`` - ``$PRIMARY`` - ``$SECURITY`` - - ``$RELEASE``""") + - ``$RELEASE`` + - ``$KEY_FILE``""") } } } @@ -683,7 +690,7 @@ def add_mirror_keys(cfg, target): """Adds any keys included in the primary/security mirror clauses""" for key in ('primary', 'security'): for mirror in cfg.get(key, []): - add_apt_key(mirror, target) + add_apt_key(mirror, target, file_name=key) def generate_sources_list(cfg, release, mirrors, cloud): @@ -714,20 +721,21 @@ def generate_sources_list(cfg, release, mirrors, cloud): util.write_file(aptsrc, disabled, mode=0o644) -def add_apt_key_raw(key, target=None): +def add_apt_key_raw(key, file_name, hardened=False, target=None): """ actual adding of a key as defined in key argument to the system """ LOG.debug("Adding key:\n'%s'", key) try: - subp.subp(['apt-key', 'add', '-'], data=key.encode(), target=target) + name = pathlib.Path(file_name).stem + return apt_key('add', output_file=name, data=key, hardened=hardened) except subp.ProcessExecutionError: LOG.exception("failed to add apt GPG Key to apt keyring") raise -def add_apt_key(ent, target=None): +def add_apt_key(ent, target=None, hardened=False, file_name=None): """ Add key to the system as defined in ent (if any). Supports raw keys or keyid's @@ -741,7 +749,10 @@ def add_apt_key(ent, target=None): ent['key'] = gpg.getkeybyid(ent['keyid'], keyserver) if 'key' in ent: - add_apt_key_raw(ent['key'], target) + return add_apt_key_raw( + ent['key'], + file_name or ent['filename'], + hardened=hardened) def update_packages(cloud): @@ -751,9 +762,28 @@ def update_packages(cloud): def add_apt_sources(srcdict, cloud, target=None, template_params=None, aa_repo_match=None): """ - add entries in /etc/apt/sources.list.d for each abbreviated - sources.list entry in 'srcdict'. When rendering template, also - include the values in dictionary searchList + install keys and repo source .list files defined in 'sources' + + for each 'source' entry in the config: + 1. expand template variables and write source .list file in + /etc/apt/sources.list.d/ + 2. install defined keys + 3. update packages via distro-specific method (i.e. apt-key update) + + + @param srcdict: a dict containing elements required + @param cloud: cloud instance object + + Example srcdict value: + { + 'rio-grande-repo': { + 'source': 'deb [signed-by=$KEY_FILE] $MIRROR $RELEASE main', + 'keyid': 'B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77', + 'keyserver': 'pgp.mit.edu' + } + } + + Note: Deb822 format is not supported """ if template_params is None: template_params = {} @@ -770,7 +800,11 @@ def add_apt_sources(srcdict, cloud, target=None, template_params=None, if 'filename' not in ent: ent['filename'] = filename - add_apt_key(ent, target) + if 'source' in ent and '$KEY_FILE' in ent['source']: + key_file = add_apt_key(ent, target, hardened=True) + template_params['KEY_FILE'] = key_file + else: + key_file = add_apt_key(ent, target) if 'source' not in ent: continue @@ -1006,7 +1040,7 @@ def get_arch_mirrorconfig(cfg, mirrortype, arch): # select the specification matching the target arch default = None for mirror_cfg_elem in mirror_cfg_list: - arches = mirror_cfg_elem.get("arches") + arches = mirror_cfg_elem.get("arches", []) if arch in arches: return mirror_cfg_elem if "default" in arches: @@ -1089,6 +1123,81 @@ def apply_apt_config(cfg, proxy_fname, config_fname): LOG.debug("no apt config configured, removed %s", config_fname) +def apt_key(command, output_file=None, data=None, hardened=False, + human_output=True): + """apt-key replacement + + commands implemented: 'add', 'list', 'finger' + + @param output_file: name of output gpg file (without .gpg or .asc) + @param data: key contents + @param human_output: list keys formatted for human parsing + @param hardened: write keys to to /etc/apt/cloud-init.gpg.d/ (referred to + with [signed-by] in sources file) + """ + + def _get_key_files(): + """return all apt keys + + /etc/apt/trusted.gpg (if it exists) and all keyfiles (and symlinks to + keyfiles) in /etc/apt/trusted.gpg.d/ are returned + + based on apt-key implementation + """ + key_files = [APT_LOCAL_KEYS] if os.path.isfile(APT_LOCAL_KEYS) else [] + + for file in os.listdir(APT_TRUSTED_GPG_DIR): + if file.endswith('.gpg') or file.endswith('.asc'): + key_files.append(APT_TRUSTED_GPG_DIR + file) + return key_files if key_files else '' + + def apt_key_add(): + """apt-key add + + returns filepath to new keyring, or '/dev/null' when an error occurs + """ + file_name = '/dev/null' + if not output_file: + util.logexc( + LOG, 'Unknown filename, failed to add key: "{}"'.format(data)) + else: + try: + key_dir = \ + CLOUD_INIT_GPG_DIR if hardened else APT_TRUSTED_GPG_DIR + stdout = gpg.dearmor(data) + file_name = '{}{}.gpg'.format(key_dir, output_file) + util.write_file(file_name, stdout) + except subp.ProcessExecutionError: + util.logexc(LOG, 'Gpg error, failed to add key: {}'.format( + data)) + except UnicodeDecodeError: + util.logexc(LOG, 'Decode error, failed to add key: {}'.format( + data)) + return file_name + + def apt_key_list(): + """apt-key list + + returns string of all trusted keys (in /etc/apt/trusted.gpg and + /etc/apt/trusted.gpg.d/) + """ + key_list = [] + for key_file in _get_key_files(): + try: + key_list.append(gpg.list(key_file, human_output=human_output)) + except subp.ProcessExecutionError as error: + LOG.warning('Failed to list key "%s": %s', key_file, error) + return '\n'.join(key_list) + + if command == 'add': + return apt_key_add() + elif command == 'finger' or command == 'list': + return apt_key_list() + else: + raise ValueError( + 'apt_key() commands add, list, and finger are currently supported') + + CONFIG_CLEANERS = { 'cloud-init': clean_cloud_init, } diff --git a/cloudinit/gpg.py b/cloudinit/gpg.py index 3780326c..07d682d2 100644 --- a/cloudinit/gpg.py +++ b/cloudinit/gpg.py @@ -14,6 +14,9 @@ import time LOG = logging.getLogger(__name__) +GPG_LIST = ['gpg', '--with-fingerprint', '--no-default-keyring', '--list-keys', + '--keyring'] + def export_armour(key): """Export gpg key, armoured key gets returned""" @@ -27,6 +30,33 @@ def export_armour(key): return armour +def dearmor(key): + """Dearmor gpg key, dearmored key gets returned + + note: man gpg(1) makes no mention of an --armour spelling, only --armor + """ + return subp.subp(["gpg", "--dearmor"], data=key, decode=False)[0] + + +def list(key_file, human_output=False): + """List keys from a keyring with fingerprints. Default to a stable machine + parseable format. + + @param key_file: a string containing a filepath to a key + @param human_output: return output intended for human parsing + """ + cmd = [] + cmd.extend(GPG_LIST) + if not human_output: + cmd.append('--with-colons') + + cmd.append(key_file) + (stdout, stderr) = subp.subp(cmd, capture=True) + if stderr: + LOG.warning('Failed to export armoured key "%s": %s', key_file, stderr) + return stdout + + def recv_key(key, keyserver, retries=(1, 1)): """Receive gpg key from the specified keyserver. diff --git a/doc/examples/cloud-config-apt.txt b/doc/examples/cloud-config-apt.txt index f4392326..7baa141c 100644 --- a/doc/examples/cloud-config-apt.txt +++ b/doc/examples/cloud-config-apt.txt @@ -149,6 +149,7 @@ apt: # security is optional, if not defined it is set to the same value as primary security: - uri: http://security.ubuntu.com/ubuntu + - arches: [default] # If search_dns is set for security the searched pattern is: # -security-mirror @@ -212,14 +213,14 @@ apt: # # The key of each source entry is the filename and will be prepended by # /etc/apt/sources.list.d/ if it doesn't start with a '/'. - # If it doesn't end with .list it will be appended so that apt picks up it's + # If it doesn't end with .list it will be appended so that apt picks up its # configuration. # # Whenever there is no content to be written into such a file, the key is # not used as filename - yet it can still be used as index for merging # configuration. # - # The values inside the entries consost of the following optional entries: + # The values inside the entries consist of the following optional entries: # 'source': a sources.list entry (some variable replacements apply) # 'keyid': providing a key to import via shortid or fingerprint # 'key': providing a raw PGP key @@ -276,13 +277,14 @@ apt: my-repo2.list: # 2.4 replacement variables # - # sources can use $MIRROR, $PRIMARY, $SECURITY and $RELEASE replacement - # variables. + # sources can use $MIRROR, $PRIMARY, $SECURITY, $RELEASE and $KEY_FILE + # replacement variables. # They will be replaced with the default or specified mirrors and the # running release. # The entry below would be possibly turned into: # source: deb http://archive.ubuntu.com/ubuntu xenial multiverse - source: deb $MIRROR $RELEASE multiverse + source: deb [signed-by=$KEY_FILE] $MIRROR $RELEASE multiverse + keyid: F430BBA5 my-repo3.list: # this would have the same end effect as 'ppa:curtin-dev/test-archive' @@ -310,9 +312,19 @@ apt: keyid: B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77 keyserver: pgp.mit.edu + ignored5: + # 2.8 signed-by + # + # One can specify [signed-by=$KEY_FILE] in the source definition, which + # will make the key be installed in the directory /etc/cloud-init.gpg.d/ + # and the $KEY_FILE replacement variable will be replaced with the path + # to the specified key. If $KEY_FILE is used, but no key is specified, + # apt update will (rightfully) fail due to an invalid value. + source: deb [signed-by=$KEY_FILE] $MIRROR $RELEASE multiverse + keyid: B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77 my-repo4.list: - # 2.8 raw key + # 2.9 raw key # # The apt signing key can also be specified by providing a pgp public key # block. Providing the PGP key this way is the most robust method for diff --git a/tests/integration_tests/modules/test_apt.py b/tests/integration_tests/modules/test_apt.py index 54711fc0..2c388047 100644 --- a/tests/integration_tests/modules/test_apt.py +++ b/tests/integration_tests/modules/test_apt.py @@ -1,9 +1,11 @@ """Series of integration tests covering apt functionality.""" import re -from tests.integration_tests.clouds import ImageSpecification import pytest +from cloudinit.config import cc_apt_configure +from cloudinit import gpg +from tests.integration_tests.clouds import ImageSpecification from tests.integration_tests.instances import IntegrationInstance @@ -43,6 +45,13 @@ apt: keyid: 441614D8 keyserver: keyserver.ubuntu.com source: "ppa:simplestreams-dev/trunk" + test_signed_by: + keyid: A2EB2DEC0BD7519B7B38BE38376A290EC8068B11 + keyserver: keyserver.ubuntu.com + source: "deb [signed-by=$KEY_FILE] http://ppa.launchpad.net/juju/stable/ubuntu $RELEASE main" + test_bad_key: + key: "" + source: "deb $MIRROR $RELEASE main" test_key: source: "deb http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu $RELEASE main" key: | @@ -91,12 +100,27 @@ TEST_KEYSERVER_KEY = "7260 0DB1 5B8E 4C8B 1964 B868 038A CC97 C660 A937" TEST_PPA_KEY = "3552 C902 B4DD F7BD 3842 1821 015D 28D7 4416 14D8" TEST_KEY = "1FF0 D853 5EF7 E719 E5C8 1B9C 083D 06FB E4D3 04DF" +TEST_SIGNED_BY_KEY = "A2EB 2DEC 0BD7 519B 7B38 BE38 376A 290E C806 8B11" @pytest.mark.ci @pytest.mark.ubuntu @pytest.mark.user_data(USER_DATA) class TestApt: + def get_keys(self, class_client: IntegrationInstance): + """Return all keys in /etc/apt/trusted.gpg.d/ and /etc/apt/trusted.gpg + in human readable format. Mimics the output of apt-key finger + """ + list_cmd = ' '.join(gpg.GPG_LIST) + ' ' + keys = class_client.execute(list_cmd + cc_apt_configure.APT_LOCAL_KEYS) + print(keys) + files = class_client.execute( + 'ls ' + cc_apt_configure.APT_TRUSTED_GPG_DIR) + for file in files.split(): + path = cc_apt_configure.APT_TRUSTED_GPG_DIR + file + keys += class_client.execute(list_cmd + path) or '' + return keys + def test_sources_list(self, class_client: IntegrationInstance): """Integration test for the apt module's `sources_list` functionality. @@ -152,8 +176,33 @@ class TestApt: 'http://ppa.launchpad.net/simplestreams-dev/trunk/ubuntu' ) in ppa_path_contents - keys = class_client.execute('apt-key finger') - assert TEST_PPA_KEY in keys + assert TEST_PPA_KEY in self.get_keys(class_client) + + def test_signed_by(self, class_client: IntegrationInstance): + """Test the apt signed-by functionality. + """ + release = ImageSpecification.from_os_image().release + source = ( + "deb [signed-by=/etc/apt/cloud-init.gpg.d/test_signed_by.gpg] " + "http://ppa.launchpad.net/juju/stable/ubuntu" + " {} main".format(release)) + print(class_client.execute('cat /var/log/cloud-init.log')) + path_contents = class_client.read_from_file( + '/etc/apt/sources.list.d/test_signed_by.list') + assert path_contents == source + + key = class_client.execute( + 'gpg --no-default-keyring --with-fingerprint --list-keys ' + '--keyring /etc/apt/cloud-init.gpg.d/test_signed_by.gpg') + + assert TEST_SIGNED_BY_KEY in key + + def test_bad_key(self, class_client: IntegrationInstance): + """Test the apt signed-by functionality. + """ + with pytest.raises(OSError): + class_client.read_from_file( + '/etc/apt/trusted.list.d/test_bad_key.gpg') def test_key(self, class_client: IntegrationInstance): """Test the apt key functionality. @@ -168,9 +217,7 @@ class TestApt: assert ( 'http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu' ) in test_archive_contents - - keys = class_client.execute('apt-key finger') - assert TEST_KEY in keys + assert TEST_KEY in self.get_keys(class_client) def test_keyserver(self, class_client: IntegrationInstance): """Test the apt keyserver functionality. @@ -186,8 +233,7 @@ class TestApt: 'http://ppa.launchpad.net/cloud-init-raharper/curtin-dev/ubuntu' ) in test_keyserver_contents - keys = class_client.execute('apt-key finger') - assert TEST_KEYSERVER_KEY in keys + assert TEST_KEYSERVER_KEY in self.get_keys(class_client) def test_os_pipelining(self, class_client: IntegrationInstance): """Test 'os' settings does not write apt config file. diff --git a/tests/unittests/test_gpg.py b/tests/unittests/test_gpg.py new file mode 100644 index 00000000..451ffa91 --- /dev/null +++ b/tests/unittests/test_gpg.py @@ -0,0 +1,81 @@ +import pytest +from unittest import mock + +from cloudinit import gpg +from cloudinit import subp + +TEST_KEY_HUMAN = ''' +/etc/apt/cloud-init.gpg.d/my_key.gpg +-------------------------------------------- +pub rsa4096 2021-10-22 [SC] + 3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85 +uid [ unknown] Brett Holman +sub rsa4096 2021-10-22 [A] +sub rsa4096 2021-10-22 [E] +''' + +TEST_KEY_MACHINE = ''' +tru::1:1635129362:0:3:1:5 +pub:-:4096:1:F83F77129A5EBD85:1634912922:::-:::scESCA::::::23::0: +fpr:::::::::3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85: +uid:-::::1634912922::64F1F1D6FA96316752D635D7C6406C52C40713C7::Brett Holman \ +::::::::::0: +sub:-:4096:1:544B39C9A9141F04:1634912922::::::a::::::23: +fpr:::::::::8BD901490D6EC986D03D6F0D544B39C9A9141F04: +sub:-:4096:1:F45D9443F0A87092:1634912922::::::e::::::23: +fpr:::::::::8CCCB332317324F030A45B19F45D9443F0A87092: +''' + +TEST_KEY_FINGERPRINT_HUMAN = \ + '3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85' + +TEST_KEY_FINGERPRINT_MACHINE = \ + '3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85' + + +class TestGPGCommands: + def test_dearmor_bad_value(self): + """This exception is handled by the callee. Ensure it is not caught + internally. + """ + with mock.patch.object( + subp, + 'subp', + side_effect=subp.ProcessExecutionError): + with pytest.raises(subp.ProcessExecutionError): + gpg.dearmor('garbage key value') + + def test_gpg_list_args(self): + """Verify correct command gets called to list keys + """ + no_colons = [ + 'gpg', + '--with-fingerprint', + '--no-default-keyring', + '--list-keys', + '--keyring', + 'key'] + colons = [ + 'gpg', + '--with-fingerprint', + '--no-default-keyring', + '--list-keys', + '--keyring', + '--with-colons', + 'key'] + with mock.patch.object(subp, 'subp', return_value=('', '')) as m_subp: + gpg.list('key') + assert mock.call(colons, capture=True) == m_subp.call_args + + gpg.list('key', human_output=True) + test_calls = mock.call((no_colons), capture=True) + assert test_calls == m_subp.call_args + + def test_gpg_dearmor_args(self): + """Verify correct command gets called to dearmor keys + """ + with mock.patch.object(subp, 'subp', return_value=('', '')) as m_subp: + gpg.dearmor('key') + test_call = mock.call( + ["gpg", "--dearmor"], data='key', decode=False) + assert test_call == m_subp.call_args diff --git a/tests/unittests/test_handler/test_handler_apt_key.py b/tests/unittests/test_handler/test_handler_apt_key.py new file mode 100644 index 00000000..00e5a38d --- /dev/null +++ b/tests/unittests/test_handler/test_handler_apt_key.py @@ -0,0 +1,137 @@ +import os +from unittest import mock + +from cloudinit.config import cc_apt_configure +from cloudinit import subp +from cloudinit import util + +TEST_KEY_HUMAN = ''' +/etc/apt/cloud-init.gpg.d/my_key.gpg +-------------------------------------------- +pub rsa4096 2021-10-22 [SC] + 3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85 +uid [ unknown] Brett Holman +sub rsa4096 2021-10-22 [A] +sub rsa4096 2021-10-22 [E] +''' + +TEST_KEY_MACHINE = ''' +tru::1:1635129362:0:3:1:5 +pub:-:4096:1:F83F77129A5EBD85:1634912922:::-:::scESCA::::::23::0: +fpr:::::::::3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85: +uid:-::::1634912922::64F1F1D6FA96316752D635D7C6406C52C40713C7::Brett Holman \ +::::::::::0: +sub:-:4096:1:544B39C9A9141F04:1634912922::::::a::::::23: +fpr:::::::::8BD901490D6EC986D03D6F0D544B39C9A9141F04: +sub:-:4096:1:F45D9443F0A87092:1634912922::::::e::::::23: +fpr:::::::::8CCCB332317324F030A45B19F45D9443F0A87092: +''' + +TEST_KEY_FINGERPRINT_HUMAN = \ + '3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85' + +TEST_KEY_FINGERPRINT_MACHINE = \ + '3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85' + + +class TestAptKey: + """TestAptKey + Class to test apt-key commands + """ + @mock.patch.object(subp, 'subp', return_value=('fakekey', '')) + @mock.patch.object(util, 'write_file') + def _apt_key_add_success_helper(self, directory, *args, hardened=False): + file = cc_apt_configure.apt_key( + 'add', + output_file='my-key', + data='fakekey', + hardened=hardened) + assert file == directory + '/my-key.gpg' + + def test_apt_key_add_success(self): + """Verify the correct directory path gets returned for unhardened case + """ + self._apt_key_add_success_helper('/etc/apt/trusted.gpg.d') + + def test_apt_key_add_success_hardened(self): + """Verify the correct directory path gets returned for hardened case + """ + self._apt_key_add_success_helper( + '/etc/apt/cloud-init.gpg.d', + hardened=True) + + def test_apt_key_add_fail_no_file_name(self): + """Verify that null filename gets handled correctly + """ + file = cc_apt_configure.apt_key( + 'add', + output_file=None, + data='') + assert '/dev/null' == file + + def _apt_key_fail_helper(self): + file = cc_apt_configure.apt_key( + 'add', + output_file='my-key', + data='fakekey') + assert file == '/dev/null' + + @mock.patch.object(subp, 'subp', side_effect=subp.ProcessExecutionError) + def test_apt_key_add_fail_no_file_name_subproc(self, *args): + """Verify that bad key value gets handled correctly + """ + self._apt_key_fail_helper() + + @mock.patch.object( + subp, 'subp', side_effect=UnicodeDecodeError('test', b'', 1, 1, '')) + def test_apt_key_add_fail_no_file_name_unicode(self, *args): + """Verify that bad key encoding gets handled correctly + """ + self._apt_key_fail_helper() + + def _apt_key_list_success_helper(self, finger, key, human_output=True): + @mock.patch.object(os, 'listdir', return_value=('/fake/dir/key.gpg',)) + @mock.patch.object(subp, 'subp', return_value=(key, '')) + def mocked_list(*a): + + keys = cc_apt_configure.apt_key('list', human_output) + assert finger in keys + mocked_list() + + def test_apt_key_list_success_human(self): + """Verify expected key output, human + """ + self._apt_key_list_success_helper( + TEST_KEY_FINGERPRINT_HUMAN, + TEST_KEY_HUMAN) + + def test_apt_key_list_success_machine(self): + """Verify expected key output, machine + """ + self._apt_key_list_success_helper( + TEST_KEY_FINGERPRINT_MACHINE, + TEST_KEY_MACHINE, human_output=False) + + @mock.patch.object(os, 'listdir', return_value=()) + @mock.patch.object(subp, 'subp', return_value=('', '')) + def test_apt_key_list_fail_no_keys(self, *args): + """Ensure falsy output for no keys + """ + keys = cc_apt_configure.apt_key('list') + assert not keys + + @mock.patch.object(os, 'listdir', return_value=('file_not_gpg_key.txt')) + @mock.patch.object(subp, 'subp', return_value=('', '')) + def test_apt_key_list_fail_no_keys_file(self, *args): + """Ensure non-gpg file is not returned. + + apt-key used file extensions for this, so we do too + """ + assert not cc_apt_configure.apt_key('list') + + @mock.patch.object(subp, 'subp', side_effect=subp.ProcessExecutionError) + @mock.patch.object(os, 'listdir', return_value=('bad_gpg_key.gpg')) + def test_apt_key_list_fail_bad_key_file(self, *args): + """Ensure bad gpg key doesn't throw exeption. + """ + assert not cc_apt_configure.apt_key('list') diff --git a/tests/unittests/test_handler/test_handler_apt_source_v1.py b/tests/unittests/test_handler/test_handler_apt_source_v1.py index 367971cb..2357d699 100644 --- a/tests/unittests/test_handler/test_handler_apt_source_v1.py +++ b/tests/unittests/test_handler/test_handler_apt_source_v1.py @@ -9,6 +9,7 @@ import os import re import shutil import tempfile +import pathlib from unittest import mock from unittest.mock import call @@ -279,16 +280,16 @@ class TestAptSourceConfig(TestCase): """ cfg = self.wrapv1conf(cfg) - with mock.patch.object(subp, 'subp', - return_value=('fakekey 1234', '')) as mockobj: + with mock.patch.object(cc_apt_configure, 'add_apt_key') as mockobj: cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) - # check if it added the right ammount of keys + # check if it added the right number of keys calls = [] - for _ in range(keynum): - calls.append(call(['apt-key', 'add', '-'], - data=b'fakekey 1234', - target=None)) + sources = cfg['apt']['sources'] + for src in sources: + print(sources[src]) + calls.append(call(sources[src], None)) + mockobj.assert_has_calls(calls, any_order=True) self.assertTrue(os.path.isfile(filename)) @@ -364,11 +365,17 @@ class TestAptSourceConfig(TestCase): """ cfg = self.wrapv1conf([cfg]) - with mock.patch.object(subp, 'subp') as mockobj: + with mock.patch.object(cc_apt_configure, 'add_apt_key') as mockobj: cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) - mockobj.assert_called_with(['apt-key', 'add', '-'], - data=b'fakekey 4321', target=None) + # check if it added the right amount of keys + sources = cfg['apt']['sources'] + calls = [] + for src in sources: + print(sources[src]) + calls.append(call(sources[src], None)) + + mockobj.assert_has_calls(calls, any_order=True) self.assertTrue(os.path.isfile(filename)) @@ -405,12 +412,15 @@ class TestAptSourceConfig(TestCase): cfg = {'key': "fakekey 4242", 'filename': self.aptlistfile} cfg = self.wrapv1conf([cfg]) - - with mock.patch.object(subp, 'subp') as mockobj: + with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) - mockobj.assert_called_once_with(['apt-key', 'add', '-'], - data=b'fakekey 4242', target=None) + calls = (call( + 'add', + output_file=pathlib.Path(self.aptlistfile).stem, + data='fakekey 4242', + hardened=False),) + mockobj.assert_has_calls(calls, any_order=True) # filename should be ignored on key only self.assertFalse(os.path.isfile(self.aptlistfile)) @@ -422,16 +432,26 @@ class TestAptSourceConfig(TestCase): cfg = self.wrapv1conf([cfg]) with mock.patch.object(subp, 'subp', - return_value=('fakekey 1212', '')) as mockobj: - cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) - - mockobj.assert_called_with(['apt-key', 'add', '-'], - data=b'fakekey 1212', target=None) + return_value=('fakekey 1212', '')): + with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: + cc_apt_configure.handle( + "test", + cfg, + self.fakecloud, + None, + None) + + calls = (call( + 'add', + output_file=pathlib.Path(self.aptlistfile).stem, + data='fakekey 1212', + hardened=False),) + mockobj.assert_has_calls(calls, any_order=True) # filename should be ignored on key only self.assertFalse(os.path.isfile(self.aptlistfile)) - def apt_src_keyid_real(self, cfg, expectedkey): + def apt_src_keyid_real(self, cfg, expectedkey, is_hardened=None): """apt_src_keyid_real Test specification of a keyid without source including up to addition of the key (add_apt_key_raw mocked to keep the @@ -446,9 +466,14 @@ class TestAptSourceConfig(TestCase): return_value=expectedkey) as mockgetkey: cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) - + if is_hardened is not None: + mockkey.assert_called_with( + expectedkey, + self.aptlistfile, + hardened=is_hardened) + else: + mockkey.assert_called_with(expectedkey, self.aptlistfile) mockgetkey.assert_called_with(key, keyserver) - mockkey.assert_called_with(expectedkey, None) # filename should be ignored on key only self.assertFalse(os.path.isfile(self.aptlistfile)) @@ -459,7 +484,7 @@ class TestAptSourceConfig(TestCase): cfg = {'keyid': keyid, 'filename': self.aptlistfile} - self.apt_src_keyid_real(cfg, EXPECTEDKEY) + self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) def test_apt_src_longkeyid_real(self): """test_apt_src_longkeyid_real - Test long keyid including key add""" @@ -467,7 +492,7 @@ class TestAptSourceConfig(TestCase): cfg = {'keyid': keyid, 'filename': self.aptlistfile} - self.apt_src_keyid_real(cfg, EXPECTEDKEY) + self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) def test_apt_src_longkeyid_ks_real(self): """test_apt_src_longkeyid_ks_real - Test long keyid from other ks""" @@ -476,7 +501,7 @@ class TestAptSourceConfig(TestCase): 'keyserver': 'keys.gnupg.net', 'filename': self.aptlistfile} - self.apt_src_keyid_real(cfg, EXPECTEDKEY) + self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) def test_apt_src_ppa(self): """Test adding a ppa""" diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py index d4db610f..20289121 100644 --- a/tests/unittests/test_handler/test_handler_apt_source_v3.py +++ b/tests/unittests/test_handler/test_handler_apt_source_v3.py @@ -10,6 +10,7 @@ import re import shutil import socket import tempfile +import pathlib from unittest import TestCase, mock from unittest.mock import call @@ -214,22 +215,24 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): self.aptlistfile3: {'source': 'deb $MIRROR $RELEASE universe'}} self._apt_src_replace_tri(cfg) - def _apt_src_keyid(self, filename, cfg, keynum): + def _apt_src_keyid(self, filename, cfg, keynum, is_hardened=None): """_apt_src_keyid Test specification of a source + keyid """ params = self._get_default_params() - with mock.patch("cloudinit.subp.subp", - return_value=('fakekey 1234', '')) as mockobj: + with mock.patch.object(cc_apt_configure, 'add_apt_key') as mockobj: self._add_apt_sources(cfg, TARGET, template_params=params, aa_repo_match=self.matcher) - # check if it added the right ammount of keys + # check if it added the right number of keys calls = [] - for _ in range(keynum): - calls.append(call(['apt-key', 'add', '-'], data=b'fakekey 1234', - target=TARGET)) + for key in cfg: + if is_hardened is not None: + calls.append(call(cfg[key], hardened=is_hardened)) + else: + calls.append(call(cfg[key], TARGET)) + mockobj.assert_has_calls(calls, any_order=True) self.assertTrue(os.path.isfile(filename)) @@ -248,6 +251,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): 'http://ppa.launchpad.net/' 'smoser/cloud-init-test/ubuntu' ' xenial main'), + 'filename': self.aptlistfile, 'keyid': "03683F77"}} self._apt_src_keyid(self.aptlistfile, cfg, 1) @@ -268,6 +272,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): 'http://ppa.launchpad.net/' 'smoser/cloud-init-test/ubuntu' ' xenial multiverse'), + 'filename': self.aptlistfile3, 'keyid': "03683F77"}} self._apt_src_keyid(self.aptlistfile, cfg, 3) @@ -293,15 +298,19 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): 'http://ppa.launchpad.net/' 'smoser/cloud-init-test/ubuntu' ' xenial main'), + 'filename': self.aptlistfile, 'key': "fakekey 4321"}} - with mock.patch.object(subp, 'subp') as mockobj: + with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: self._add_apt_sources(cfg, TARGET, template_params=params, aa_repo_match=self.matcher) - mockobj.assert_any_call(['apt-key', 'add', '-'], data=b'fakekey 4321', - target=TARGET) - + calls = (call( + 'add', + output_file=pathlib.Path(self.aptlistfile).stem, + data='fakekey 4321', + hardened=False),) + mockobj.assert_has_calls(calls, any_order=True) self.assertTrue(os.path.isfile(self.aptlistfile)) contents = util.load_file(self.aptlistfile) @@ -317,12 +326,16 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): params = self._get_default_params() cfg = {self.aptlistfile: {'key': "fakekey 4242"}} - with mock.patch.object(subp, 'subp') as mockobj: + with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: self._add_apt_sources(cfg, TARGET, template_params=params, aa_repo_match=self.matcher) - mockobj.assert_any_call(['apt-key', 'add', '-'], data=b'fakekey 4242', - target=TARGET) + calls = (call( + 'add', + output_file=pathlib.Path(self.aptlistfile).stem, + data='fakekey 4242', + hardened=False),) + mockobj.assert_has_calls(calls, any_order=True) # filename should be ignored on key only self.assertFalse(os.path.isfile(self.aptlistfile)) @@ -331,19 +344,23 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): """test_apt_v3_src_keyidonly - Test keyid without source""" params = self._get_default_params() cfg = {self.aptlistfile: {'keyid': "03683F77"}} - with mock.patch.object(subp, 'subp', - return_value=('fakekey 1212', '')) as mockobj: - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) + return_value=('fakekey 1212', '')): + with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: + self._add_apt_sources(cfg, TARGET, template_params=params, + aa_repo_match=self.matcher) - mockobj.assert_any_call(['apt-key', 'add', '-'], data=b'fakekey 1212', - target=TARGET) + calls = (call( + 'add', + output_file=pathlib.Path(self.aptlistfile).stem, + data='fakekey 1212', + hardened=False),) + mockobj.assert_has_calls(calls, any_order=True) # filename should be ignored on key only self.assertFalse(os.path.isfile(self.aptlistfile)) - def apt_src_keyid_real(self, cfg, expectedkey): + def apt_src_keyid_real(self, cfg, expectedkey, is_hardened=None): """apt_src_keyid_real Test specification of a keyid without source including up to addition of the key (add_apt_key_raw mocked to keep the @@ -361,7 +378,11 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): mockgetkey.assert_called_with(keycfg['keyid'], keycfg.get('keyserver', 'keyserver.ubuntu.com')) - mockkey.assert_called_with(expectedkey, TARGET) + if is_hardened is not None: + mockkey.assert_called_with( + expectedkey, + keycfg['keyfile'], + hardened=is_hardened) # filename should be ignored on key only self.assertFalse(os.path.isfile(self.aptlistfile)) @@ -369,21 +390,24 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): def test_apt_v3_src_keyid_real(self): """test_apt_v3_src_keyid_real - Test keyid including key add""" keyid = "03683F77" - cfg = {self.aptlistfile: {'keyid': keyid}} + cfg = {self.aptlistfile: {'keyid': keyid, + 'keyfile': self.aptlistfile}} - self.apt_src_keyid_real(cfg, EXPECTEDKEY) + self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) def test_apt_v3_src_longkeyid_real(self): """test_apt_v3_src_longkeyid_real Test long keyid including key add""" keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" - cfg = {self.aptlistfile: {'keyid': keyid}} + cfg = {self.aptlistfile: {'keyid': keyid, + 'keyfile': self.aptlistfile}} - self.apt_src_keyid_real(cfg, EXPECTEDKEY) + self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) def test_apt_v3_src_longkeyid_ks_real(self): """test_apt_v3_src_longkeyid_ks_real Test long keyid from other ks""" keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" cfg = {self.aptlistfile: {'keyid': keyid, + 'keyfile': self.aptlistfile, 'keyserver': 'keys.gnupg.net'}} self.apt_src_keyid_real(cfg, EXPECTEDKEY) @@ -393,6 +417,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): keyid = "03683F77" params = self._get_default_params() cfg = {self.aptlistfile: {'keyid': keyid, + 'keyfile': self.aptlistfile, 'keyserver': 'test.random.com'}} # in some test environments only *.ubuntu.com is reachable @@ -405,7 +430,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): aa_repo_match=self.matcher) mockgetkey.assert_called_with('03683F77', 'test.random.com') - mockadd.assert_called_with('fakekey', TARGET) + mockadd.assert_called_with('fakekey', self.aptlistfile, hardened=False) # filename should be ignored on key only self.assertFalse(os.path.isfile(self.aptlistfile)) @@ -1002,10 +1027,12 @@ deb http://ubuntu.com/ubuntu/ xenial-proposed main""") 'primary': [ {'arches': [arch], 'uri': 'http://test.ubuntu.com/', + 'filename': 'primary', 'key': 'fakekey_primary'}], 'security': [ {'arches': [arch], 'uri': 'http://testsec.ubuntu.com/', + 'filename': 'security', 'key': 'fakekey_security'}] } @@ -1013,8 +1040,8 @@ deb http://ubuntu.com/ubuntu/ xenial-proposed main""") 'add_apt_key_raw') as mockadd: cc_apt_configure.add_mirror_keys(cfg, TARGET) calls = [ - mock.call('fakekey_primary', TARGET), - mock.call('fakekey_security', TARGET), + mock.call('fakekey_primary', 'primary', hardened=False), + mock.call('fakekey_security', 'security', hardened=False), ] mockadd.assert_has_calls(calls, any_order=True) -- cgit v1.2.3 From b1beb53886527eb787b504f374f24a7bd5fe06ac Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Mon, 1 Nov 2021 12:42:59 -0600 Subject: Fix unhandled apt_configure case. (#1065) Don't throw an exception when mirror arch is unspecified. --- cloudinit/config/cc_apt_configure.py | 15 +++++++++------ doc/examples/cloud-config-apt.txt | 2 +- 2 files changed, 10 insertions(+), 7 deletions(-) (limited to 'doc/examples') diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index c3c48bbd..86d0feae 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -36,14 +36,14 @@ frequency = PER_INSTANCE distros = ["ubuntu", "debian"] mirror_property = { 'type': 'array', - 'item': { + 'items': { 'type': 'object', 'additionalProperties': False, 'required': ['arches'], 'properties': { 'arches': { 'type': 'array', - 'item': { + 'items': { 'type': 'string' }, 'minItems': 1 @@ -54,7 +54,7 @@ mirror_property = { }, 'search': { 'type': 'array', - 'item': { + 'items': { 'type': 'string', 'format': 'uri' }, @@ -113,11 +113,12 @@ schema = { search: - 'http://cool.but-sometimes-unreachable.com/ubuntu' - 'http://us.archive.ubuntu.com/ubuntu' - search_dns: + search_dns: false - arches: - s390x - arm64 uri: 'http://archive-to-use-for-arm64.example.com/ubuntu' + security: - arches: - default @@ -260,7 +261,8 @@ schema = { ``http://archive.ubuntu.com/ubuntu``. - ``security`` => \ ``http://security.ubuntu.com/ubuntu`` - """)}, + """) + }, 'security': { **mirror_property, 'description': dedent("""\ @@ -380,6 +382,7 @@ schema = { - ``key``: a raw PGP key. - ``keyserver``: alternate keyserver to pull \ ``keyid`` key from. + - ``filename``: specify the name of the .list file The ``source`` key supports variable replacements for the following strings: @@ -1040,7 +1043,7 @@ def get_arch_mirrorconfig(cfg, mirrortype, arch): # select the specification matching the target arch default = None for mirror_cfg_elem in mirror_cfg_list: - arches = mirror_cfg_elem.get("arches", []) + arches = mirror_cfg_elem.get("arches") or [] if arch in arches: return mirror_cfg_elem if "default" in arches: diff --git a/doc/examples/cloud-config-apt.txt b/doc/examples/cloud-config-apt.txt index 7baa141c..778187b5 100644 --- a/doc/examples/cloud-config-apt.txt +++ b/doc/examples/cloud-config-apt.txt @@ -149,7 +149,7 @@ apt: # security is optional, if not defined it is set to the same value as primary security: - uri: http://security.ubuntu.com/ubuntu - - arches: [default] + arches: [default] # If search_dns is set for security the searched pattern is: # -security-mirror -- cgit v1.2.3 From 4bf4de25ea487ceb7005dc63d01f73fe56a13a16 Mon Sep 17 00:00:00 2001 From: Chris Patterson Date: Mon, 29 Nov 2021 10:59:39 -0500 Subject: sources/azure: remove unused remnants related to agent command (#1119) Some references were missed in the removal of the agent command in PR #799. This simply removes the remaining references. Signed-off-by: Chris Patterson --- cloudinit/sources/DataSourceAzure.py | 6 +--- doc/examples/cloud-config-datasources.txt | 1 - tests/unittests/test_datasource/test_azure.py | 46 +++++++-------------------- 3 files changed, 13 insertions(+), 40 deletions(-) (limited to 'doc/examples') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 93493fa0..6c1bc085 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -52,8 +52,6 @@ LOG = logging.getLogger(__name__) DS_NAME = 'Azure' DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"} -AGENT_START = ['service', 'walinuxagent', 'start'] -AGENT_START_BUILTIN = "__builtin__" BOUNCE_COMMAND_IFUP = [ 'sh', '-xc', "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x" @@ -262,7 +260,6 @@ if util.is_FreeBSD(): PLATFORM_ENTROPY_SOURCE = None BUILTIN_DS_CONFIG = { - 'agent_command': AGENT_START_BUILTIN, 'data_dir': AGENT_SEED_DIR, 'set_hostname': True, 'hostname_bounce': { @@ -1525,8 +1522,7 @@ class DataSourceAzure(sources.DataSource): dhclient_lease_file, pubkey_info=pubkey_info) - LOG.debug("negotiating with fabric via agent command %s", - self.ds_cfg['agent_command']) + LOG.debug("negotiating with fabric") try: fabric_data = metadata_func() except Exception as e: diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt index 13bb687c..d1a4d79e 100644 --- a/doc/examples/cloud-config-datasources.txt +++ b/doc/examples/cloud-config-datasources.txt @@ -46,7 +46,6 @@ datasource: local-hostname: myhost.internal Azure: - agent_command: [service, walinuxagent, start] set_hostname: True hostname_bounce: interface: eth0 diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index d0274049..995d2b10 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -652,7 +652,7 @@ scbus-1 on xpt0 bus 0 ]) return dsaz - def _get_ds(self, data, agent_command=None, distro='ubuntu', + def _get_ds(self, data, distro='ubuntu', apply_network=None, instance_id=None): def _wait_for_files(flist, _maxwait=None, _naplen=None): @@ -722,8 +722,6 @@ scbus-1 on xpt0 bus 0 distro = distro_cls(distro, data.get('sys_cfg', {}), self.paths) dsrc = dsaz.DataSourceAzure( data.get('sys_cfg', {}), distro=distro, paths=self.paths) - if agent_command is not None: - dsrc.ds_cfg['agent_command'] = agent_command if apply_network is not None: dsrc.ds_cfg['apply_network_config'] = apply_network @@ -921,7 +919,7 @@ scbus-1 on xpt0 bus 0 def test_crawl_metadata_returns_structured_data_and_caches_nothing(self): """Return all structured metadata and cache no class attributes.""" - yaml_cfg = "{agent_command: my_command}\n" + yaml_cfg = "" odata = {'HostName': "myhost", 'UserName': "myuser", 'UserData': {'text': 'FOOBAR', 'encoding': 'plain'}, 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}} @@ -931,7 +929,7 @@ scbus-1 on xpt0 bus 0 expected_cfg = { 'PreprovisionedVMType': None, 'PreprovisionedVm': False, - 'datasource': {'Azure': {'agent_command': 'my_command'}}, + 'datasource': {'Azure': {}}, 'system_info': {'default_user': {'name': 'myuser'}}} expected_metadata = { 'azure_data': { @@ -1449,19 +1447,16 @@ scbus-1 on xpt0 bus 0 def test_dsaz_report_ready_returns_true_when_report_succeeds( self): dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - dsrc.ds_cfg['agent_command'] = '__builtin__' self.assertTrue(dsrc._report_ready(lease=mock.MagicMock())) def test_dsaz_report_ready_returns_false_and_does_not_propagate_exc( self): dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - dsrc.ds_cfg['agent_command'] = '__builtin__' self.m_get_metadata_from_fabric.side_effect = Exception self.assertFalse(dsrc._report_ready(lease=mock.MagicMock())) def test_dsaz_report_failure_returns_true_when_report_succeeds(self): dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - dsrc.ds_cfg['agent_command'] = '__builtin__' with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: # mock crawl metadata failure to cause report failure @@ -1475,7 +1470,6 @@ scbus-1 on xpt0 bus 0 def test_dsaz_report_failure_returns_false_and_does_not_propagate_exc( self): dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - dsrc.ds_cfg['agent_command'] = '__builtin__' with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ mock.patch.object(dsrc, '_ephemeral_dhcp_ctx') \ @@ -1505,7 +1499,6 @@ scbus-1 on xpt0 bus 0 def test_dsaz_report_failure_description_msg(self): dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - dsrc.ds_cfg['agent_command'] = '__builtin__' with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: # mock crawl metadata failure to cause report failure @@ -1518,7 +1511,6 @@ scbus-1 on xpt0 bus 0 def test_dsaz_report_failure_no_description_msg(self): dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - dsrc.ds_cfg['agent_command'] = '__builtin__' with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: m_crawl_metadata.side_effect = Exception @@ -1529,7 +1521,6 @@ scbus-1 on xpt0 bus 0 def test_dsaz_report_failure_uses_cached_ephemeral_dhcp_ctx_lease(self): dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - dsrc.ds_cfg['agent_command'] = '__builtin__' with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ mock.patch.object(dsrc, '_ephemeral_dhcp_ctx') \ @@ -1558,7 +1549,6 @@ scbus-1 on xpt0 bus 0 def test_dsaz_report_failure_no_net_uses_new_ephemeral_dhcp_lease(self): dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - dsrc.ds_cfg['agent_command'] = '__builtin__' with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ mock.patch.object(dsrc.distro.networking, 'is_up') \ @@ -1584,7 +1574,6 @@ scbus-1 on xpt0 bus 0 def test_dsaz_report_failure_no_net_and_no_dhcp_uses_fallback_lease( self): dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - dsrc.ds_cfg['agent_command'] = '__builtin__' with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ mock.patch.object(dsrc.distro.networking, 'is_up') \ @@ -1609,14 +1598,12 @@ scbus-1 on xpt0 bus 0 def test_exception_fetching_fabric_data_doesnt_propagate(self): """Errors communicating with fabric should warn, but return True.""" dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - dsrc.ds_cfg['agent_command'] = '__builtin__' self.m_get_metadata_from_fabric.side_effect = Exception ret = self._get_and_setup(dsrc) self.assertTrue(ret) def test_fabric_data_included_in_metadata(self): dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - dsrc.ds_cfg['agent_command'] = '__builtin__' self.m_get_metadata_from_fabric.return_value = {'test': 'value'} ret = self._get_and_setup(dsrc) self.assertTrue(ret) @@ -1672,7 +1659,6 @@ scbus-1 on xpt0 bus 0 def test_instance_id_from_dmidecode_used_for_builtin(self): ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - ds.ds_cfg['agent_command'] = '__builtin__' ds.get_data() self.assertEqual(self.instance_id, ds.metadata['instance-id']) @@ -2099,13 +2085,11 @@ class TestAzureBounce(CiTestCase): self.patches.close() super(TestAzureBounce, self).tearDown() - def _get_ds(self, ovfcontent=None, agent_command=None): + def _get_ds(self, ovfcontent=None): if ovfcontent is not None: populate_dir(os.path.join(self.paths.seed_dir, "azure"), {'ovf-env.xml': ovfcontent}) dsrc = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - if agent_command is not None: - dsrc.ds_cfg['agent_command'] = agent_command return dsrc def _get_and_setup(self, dsrc): @@ -2161,8 +2145,7 @@ class TestAzureBounce(CiTestCase): host_name = 'unchanged-host-name' self.get_hostname.return_value = host_name cfg = {'hostname_bounce': {'policy': 'force'}} - dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg), - agent_command=['not', '__builtin__']) + dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) ret = self._get_and_setup(dsrc) self.assertTrue(ret) self.assertEqual(1, perform_hostname_bounce.call_count) @@ -2171,8 +2154,7 @@ class TestAzureBounce(CiTestCase): host_name = 'unchanged-host-name' self.get_hostname.return_value = host_name cfg = {'hostname_bounce': {'policy': 'force'}} - dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg), - agent_command=['not', '__builtin__']) + dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) patch_path = MOCKPATH + 'subp.which' with mock.patch(patch_path) as m_which: m_which.return_value = None @@ -2187,8 +2169,7 @@ class TestAzureBounce(CiTestCase): expected_hostname = 'azure-expected-host-name' self.get_hostname.return_value = 'default-host-name' dsrc = self._get_ds( - self.get_ovf_env_with_dscfg(expected_hostname, {}), - agent_command=['not', '__builtin__']) + self.get_ovf_env_with_dscfg(expected_hostname, {})) ret = self._get_and_setup(dsrc) self.assertTrue(ret) self.assertEqual(expected_hostname, @@ -2200,8 +2181,7 @@ class TestAzureBounce(CiTestCase): expected_hostname = 'azure-expected-host-name' self.get_hostname.return_value = 'default-host-name' dsrc = self._get_ds( - self.get_ovf_env_with_dscfg(expected_hostname, {}), - agent_command=['not', '__builtin__']) + self.get_ovf_env_with_dscfg(expected_hostname, {})) ret = self._get_and_setup(dsrc) self.assertTrue(ret) self.assertEqual(1, perform_hostname_bounce.call_count) @@ -2210,8 +2190,7 @@ class TestAzureBounce(CiTestCase): initial_host_name = 'default-host-name' self.get_hostname.return_value = initial_host_name dsrc = self._get_ds( - self.get_ovf_env_with_dscfg('some-host-name', {}), - agent_command=['not', '__builtin__']) + self.get_ovf_env_with_dscfg('some-host-name', {})) ret = self._get_and_setup(dsrc) self.assertTrue(ret) self.assertEqual(initial_host_name, @@ -2224,8 +2203,7 @@ class TestAzureBounce(CiTestCase): initial_host_name = 'default-host-name' self.get_hostname.return_value = initial_host_name dsrc = self._get_ds( - self.get_ovf_env_with_dscfg('some-host-name', {}), - agent_command=['not', '__builtin__']) + self.get_ovf_env_with_dscfg('some-host-name', {})) ret = self._get_and_setup(dsrc) self.assertTrue(ret) self.assertEqual(initial_host_name, @@ -2240,7 +2218,7 @@ class TestAzureBounce(CiTestCase): self.get_hostname.return_value = old_hostname cfg = {'hostname_bounce': {'interface': interface, 'policy': 'force'}} data = self.get_ovf_env_with_dscfg(hostname, cfg) - dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) + dsrc = self._get_ds(data) ret = self._get_and_setup(dsrc) self.assertTrue(ret) self.assertEqual(1, self.subp.call_count) @@ -2254,7 +2232,7 @@ class TestAzureBounce(CiTestCase): self, mock_get_boot_telemetry): cfg = {'hostname_bounce': {'policy': 'force'}} data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) + dsrc = self._get_ds(data) ret = self._get_and_setup(dsrc) self.assertTrue(ret) self.assertEqual(1, self.subp.call_count) -- cgit v1.2.3 From e9634266ea52bf184727fb0782d5dc35f9ed1468 Mon Sep 17 00:00:00 2001 From: Chris Patterson Date: Fri, 10 Dec 2021 12:16:16 -0500 Subject: sources/azure: remove unnecessary hostname bounce (#1143) Thanks to [1], the hostname is set prior to network bring-up. The Azure data source has been bouncing the hostname during setup(), occurring after the hostname has already been properly configured. Note that this doesn't prevent leaking the image's hostname during Azure's _get_data() when it brings up ephemeral DHCP. However, as are not guaranteed to have the hostname metadata available from a truly "local" source, this behavior is to be expected unless we disable `send host-name` from dhclient config. [1]: https://github.com/canonical/cloud-init/commit/133ad2cb327ad17b7b81319fac8f9f14577c04df Signed-off-by: Chris Patterson --- cloudinit/sources/DataSourceAzure.py | 126 -------------- doc/examples/cloud-config-datasources.txt | 6 - doc/rtd/topics/datasources/azure.rst | 20 --- tests/unittests/sources/test_azure.py | 263 ------------------------------ 4 files changed, 415 deletions(-) (limited to 'doc/examples') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 6c1bc085..eee98fa8 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -6,7 +6,6 @@ import base64 from collections import namedtuple -import contextlib import crypt from functools import partial import os @@ -52,20 +51,10 @@ LOG = logging.getLogger(__name__) DS_NAME = 'Azure' DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"} -BOUNCE_COMMAND_IFUP = [ - 'sh', '-xc', - "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x" -] -BOUNCE_COMMAND_FREEBSD = [ - 'sh', '-xc', - ("i=$interface; x=0; ifconfig down $i || x=$?; " - "ifconfig up $i || x=$?; exit $x") -] # azure systems will always have a resource disk, and 66-azure-ephemeral.rules # ensures that it gets linked to this path. RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource' -DEFAULT_PRIMARY_NIC = 'eth0' LEASE_FILE = '/var/lib/dhcp/dhclient.eth0.leases' DEFAULT_FS = 'ext4' # DMI chassis-asset-tag is set static for all azure instances @@ -247,7 +236,6 @@ def get_resource_disk_on_freebsd(port_id): # update the FreeBSD specific information if util.is_FreeBSD(): - DEFAULT_PRIMARY_NIC = 'hn0' LEASE_FILE = '/var/db/dhclient.leases.hn0' DEFAULT_FS = 'freebsd-ufs' res_disk = get_resource_disk_on_freebsd(1) @@ -261,13 +249,6 @@ if util.is_FreeBSD(): BUILTIN_DS_CONFIG = { 'data_dir': AGENT_SEED_DIR, - 'set_hostname': True, - 'hostname_bounce': { - 'interface': DEFAULT_PRIMARY_NIC, - 'policy': True, - 'command': 'builtin', - 'hostname_command': 'hostname', - }, 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH}, 'dhclient_lease_file': LEASE_FILE, 'apply_network_config': True, # Use IMDS published network configuration @@ -293,46 +274,6 @@ DEF_EPHEMERAL_LABEL = 'Temporary Storage' DEF_PASSWD_REDACTION = 'REDACTED' -def get_hostname(hostname_command='hostname'): - if not isinstance(hostname_command, (list, tuple)): - hostname_command = (hostname_command,) - return subp.subp(hostname_command, capture=True)[0].strip() - - -def set_hostname(hostname, hostname_command='hostname'): - subp.subp([hostname_command, hostname]) - - -@azure_ds_telemetry_reporter -@contextlib.contextmanager -def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): - """ - Set a temporary hostname, restoring the previous hostname on exit. - - Will have the value of the previous hostname when used as a context - manager, or None if the hostname was not changed. - """ - policy = cfg['hostname_bounce']['policy'] - previous_hostname = get_hostname(hostname_command) - if (not util.is_true(cfg.get('set_hostname')) or - util.is_false(policy) or - (previous_hostname == temp_hostname and policy != 'force')): - yield None - return - try: - set_hostname(temp_hostname, hostname_command) - except Exception as e: - report_diagnostic_event( - 'Failed setting temporary hostname: %s' % e, - logger_func=LOG.warning) - yield None - return - try: - yield previous_hostname - finally: - set_hostname(previous_hostname, hostname_command) - - class DataSourceAzure(sources.DataSource): dsname = 'Azure' @@ -369,34 +310,6 @@ class DataSourceAzure(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s]" % (root, self.seed) - @azure_ds_telemetry_reporter - def bounce_network_with_azure_hostname(self): - # When using cloud-init to provision, we have to set the hostname from - # the metadata and "bounce" the network to force DDNS to update via - # dhclient - azure_hostname = self.metadata.get('local-hostname') - LOG.debug("Hostname in metadata is %s", azure_hostname) - hostname_command = self.ds_cfg['hostname_bounce']['hostname_command'] - - with temporary_hostname(azure_hostname, self.ds_cfg, - hostname_command=hostname_command) \ - as previous_hn: - if (previous_hn is not None and - util.is_true(self.ds_cfg.get('set_hostname'))): - cfg = self.ds_cfg['hostname_bounce'] - - # "Bouncing" the network - try: - return perform_hostname_bounce(hostname=azure_hostname, - cfg=cfg, - prev_hostname=previous_hn) - except Exception as e: - report_diagnostic_event( - "Failed publishing hostname: %s" % e, - logger_func=LOG.warning) - util.logexc(LOG, "handling set_hostname failed") - return False - def _get_subplatform(self): """Return the subplatform metadata source details.""" if self.seed.startswith('/dev'): @@ -1502,9 +1415,6 @@ class DataSourceAzure(sources.DataSource): On success, returns a dictionary including 'public_keys'. On failure, returns False. """ - - self.bounce_network_with_azure_hostname() - pubkey_info = None ssh_keys_and_source = self._get_public_ssh_keys_and_source() @@ -1763,42 +1673,6 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, return -@azure_ds_telemetry_reporter -def perform_hostname_bounce(hostname, cfg, prev_hostname): - # set the hostname to 'hostname' if it is not already set to that. - # then, if policy is not off, bounce the interface using command - # Returns True if the network was bounced, False otherwise. - command = cfg['command'] - interface = cfg['interface'] - policy = cfg['policy'] - - msg = ("hostname=%s policy=%s interface=%s" % - (hostname, policy, interface)) - env = os.environ.copy() - env['interface'] = interface - env['hostname'] = hostname - env['old_hostname'] = prev_hostname - - if command == "builtin": - if util.is_FreeBSD(): - command = BOUNCE_COMMAND_FREEBSD - elif subp.which('ifup'): - command = BOUNCE_COMMAND_IFUP - else: - LOG.debug( - "Skipping network bounce: ifupdown utils aren't present.") - # Don't bounce as networkd handles hostname DDNS updates - return False - LOG.debug("pubhname: publishing hostname [%s]", msg) - shell = not isinstance(command, (list, tuple)) - # capture=False, see comments in bug 1202758 and bug 1206164. - util.log_time(logfunc=LOG.debug, msg="publishing hostname", - get_uptime=True, func=subp.subp, - kwargs={'args': command, 'shell': shell, 'capture': False, - 'env': env}) - return True - - @azure_ds_telemetry_reporter def write_files(datadir, files, dirmode=None): diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt index d1a4d79e..7a8c4284 100644 --- a/doc/examples/cloud-config-datasources.txt +++ b/doc/examples/cloud-config-datasources.txt @@ -45,12 +45,6 @@ datasource: instance-id: i-87018aed local-hostname: myhost.internal - Azure: - set_hostname: True - hostname_bounce: - interface: eth0 - policy: on # [can be 'on', 'off' or 'force'] - SmartOS: # For KVM guests: # Smart OS datasource works over a serial console interacting with diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst index ad9f2236..bc672486 100644 --- a/doc/rtd/topics/datasources/azure.rst +++ b/doc/rtd/topics/datasources/azure.rst @@ -60,20 +60,6 @@ The settings that may be configured are: custom DHCP option 245 from Azure fabric. * **disk_aliases**: A dictionary defining which device paths should be interpreted as ephemeral images. See cc_disk_setup module for more info. - * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to - metadata changes. The '``hostname_bounce: command``' entry can be either - the literal string 'builtin' or a command to execute. The command will be - invoked after the hostname is set, and will have the 'interface' in its - environment. If ``set_hostname`` is not true, then ``hostname_bounce`` - will be ignored. An example might be: - - ``command: ["sh", "-c", "killall dhclient; dhclient $interface"]`` - - * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to - metadata changes. Azure will throttle ifup/down in some cases after metadata - has been updated to inform dhcp server about updated hostnames. - * **set_hostname**: Boolean set to True when we want Azure to set the hostname - based on metadata. Configuration for the datasource can also be read from a ``dscfg`` entry in the ``LinuxProvisioningConfigurationSet``. Content in @@ -91,12 +77,6 @@ An example configuration with the default values is provided below: dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases disk_aliases: ephemeral0: /dev/disk/cloud/azure_resource - hostname_bounce: - interface: eth0 - command: builtin - policy: true - hostname_command: hostname - set_hostname: true Userdata diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py index 9728a1e7..ad8be04b 100644 --- a/tests/unittests/sources/test_azure.py +++ b/tests/unittests/sources/test_azure.py @@ -696,9 +696,6 @@ scbus-1 on xpt0 bus 0 self.apply_patches([ (dsaz, 'list_possible_azure_ds', self.m_list_possible_azure_ds), - (dsaz, 'perform_hostname_bounce', mock.MagicMock()), - (dsaz, 'get_hostname', mock.MagicMock()), - (dsaz, 'set_hostname', mock.MagicMock()), (dsaz, '_is_platform_viable', self.m_is_platform_viable), (dsaz, 'get_metadata_from_fabric', @@ -1794,21 +1791,6 @@ scbus-1 on xpt0 bus 0 m_net_get_interfaces.assert_called_with( blacklist_drivers=dsaz.BLACKLIST_DRIVERS) - @mock.patch(MOCKPATH + 'subp.subp', autospec=True) - def test_get_hostname_with_no_args(self, m_subp): - dsaz.get_hostname() - m_subp.assert_called_once_with(("hostname",), capture=True) - - @mock.patch(MOCKPATH + 'subp.subp', autospec=True) - def test_get_hostname_with_string_arg(self, m_subp): - dsaz.get_hostname(hostname_command="hostname") - m_subp.assert_called_once_with(("hostname",), capture=True) - - @mock.patch(MOCKPATH + 'subp.subp', autospec=True) - def test_get_hostname_with_iterable_arg(self, m_subp): - dsaz.get_hostname(hostname_command=("hostname",)) - m_subp.assert_called_once_with(("hostname",), capture=True) - @mock.patch( 'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates') def test_get_public_ssh_keys_with_imds(self, m_parse_certificates): @@ -2023,251 +2005,6 @@ scbus-1 on xpt0 bus 0 self.assertEqual(dsrc.userdata_raw, userdataOVF.encode('utf-8')) -class TestAzureBounce(CiTestCase): - - with_logs = True - - def mock_out_azure_moving_parts(self): - - def _load_possible_azure_ds(seed_dir, cache_dir): - yield seed_dir - yield dsaz.DEFAULT_PROVISIONING_ISO_DEV - if cache_dir: - yield cache_dir - - self.patches.enter_context( - mock.patch.object(dsaz.util, 'wait_for_files')) - self.patches.enter_context( - mock.patch.object( - dsaz, 'list_possible_azure_ds', - mock.MagicMock(side_effect=_load_possible_azure_ds))) - self.patches.enter_context( - mock.patch.object(dsaz, 'get_metadata_from_fabric', - mock.MagicMock(return_value={}))) - self.patches.enter_context( - mock.patch.object(dsaz, 'get_metadata_from_imds', - mock.MagicMock(return_value={}))) - self.patches.enter_context( - mock.patch.object(dsaz.subp, 'which', lambda x: True)) - self.patches.enter_context(mock.patch.object( - dsaz, '_get_random_seed', return_value='wild')) - - def _dmi_mocks(key): - if key == 'system-uuid': - return 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8' - elif key == 'chassis-asset-tag': - return '7783-7084-3265-9085-8269-3286-77' - raise RuntimeError('should not get here') - - self.patches.enter_context( - mock.patch.object(dsaz.dmi, 'read_dmi_data', - mock.MagicMock(side_effect=_dmi_mocks))) - - def setUp(self): - super(TestAzureBounce, self).setUp() - self.tmp = self.tmp_dir() - self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent') - self.paths = helpers.Paths( - {'cloud_dir': self.tmp, 'run_dir': self.tmp}) - dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d - self.patches = ExitStack() - self.mock_out_azure_moving_parts() - self.get_hostname = self.patches.enter_context( - mock.patch.object(dsaz, 'get_hostname')) - self.set_hostname = self.patches.enter_context( - mock.patch.object(dsaz, 'set_hostname')) - self.subp = self.patches.enter_context( - mock.patch(MOCKPATH + 'subp.subp')) - self.find_fallback_nic = self.patches.enter_context( - mock.patch('cloudinit.net.find_fallback_nic', return_value='eth9')) - - def tearDown(self): - self.patches.close() - super(TestAzureBounce, self).tearDown() - - def _get_ds(self, ovfcontent=None): - if ovfcontent is not None: - populate_dir(os.path.join(self.paths.seed_dir, "azure"), - {'ovf-env.xml': ovfcontent}) - dsrc = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - return dsrc - - def _get_and_setup(self, dsrc): - ret = dsrc.get_data() - if ret: - dsrc.setup(True) - return ret - - def get_ovf_env_with_dscfg(self, hostname, cfg): - odata = { - 'HostName': hostname, - 'dscfg': { - 'text': b64e(yaml.dump(cfg)), - 'encoding': 'base64' - } - } - return construct_valid_ovf_env(data=odata) - - def test_disabled_bounce_does_not_change_hostname(self): - cfg = {'hostname_bounce': {'policy': 'off'}} - ds = self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)) - ds.get_data() - self.assertEqual(0, self.set_hostname.call_count) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_disabled_bounce_does_not_perform_bounce( - self, perform_hostname_bounce): - cfg = {'hostname_bounce': {'policy': 'off'}} - ds = self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)) - ds.get_data() - self.assertEqual(0, perform_hostname_bounce.call_count) - - def test_same_hostname_does_not_change_hostname(self): - host_name = 'unchanged-host-name' - self.get_hostname.return_value = host_name - cfg = {'hostname_bounce': {'policy': 'yes'}} - ds = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) - ds.get_data() - self.assertEqual(0, self.set_hostname.call_count) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_unchanged_hostname_does_not_perform_bounce( - self, perform_hostname_bounce): - host_name = 'unchanged-host-name' - self.get_hostname.return_value = host_name - cfg = {'hostname_bounce': {'policy': 'yes'}} - ds = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) - ds.get_data() - self.assertEqual(0, perform_hostname_bounce.call_count) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_force_performs_bounce_regardless(self, perform_hostname_bounce): - host_name = 'unchanged-host-name' - self.get_hostname.return_value = host_name - cfg = {'hostname_bounce': {'policy': 'force'}} - dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(1, perform_hostname_bounce.call_count) - - def test_bounce_skipped_on_ifupdown_absent(self): - host_name = 'unchanged-host-name' - self.get_hostname.return_value = host_name - cfg = {'hostname_bounce': {'policy': 'force'}} - dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) - patch_path = MOCKPATH + 'subp.which' - with mock.patch(patch_path) as m_which: - m_which.return_value = None - ret = self._get_and_setup(dsrc) - self.assertEqual([mock.call('ifup')], m_which.call_args_list) - self.assertTrue(ret) - self.assertIn( - "Skipping network bounce: ifupdown utils aren't present.", - self.logs.getvalue()) - - def test_different_hostnames_sets_hostname(self): - expected_hostname = 'azure-expected-host-name' - self.get_hostname.return_value = 'default-host-name' - dsrc = self._get_ds( - self.get_ovf_env_with_dscfg(expected_hostname, {})) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(expected_hostname, - self.set_hostname.call_args_list[0][0][0]) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_different_hostnames_performs_bounce( - self, perform_hostname_bounce): - expected_hostname = 'azure-expected-host-name' - self.get_hostname.return_value = 'default-host-name' - dsrc = self._get_ds( - self.get_ovf_env_with_dscfg(expected_hostname, {})) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(1, perform_hostname_bounce.call_count) - - def test_different_hostnames_sets_hostname_back(self): - initial_host_name = 'default-host-name' - self.get_hostname.return_value = initial_host_name - dsrc = self._get_ds( - self.get_ovf_env_with_dscfg('some-host-name', {})) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(initial_host_name, - self.set_hostname.call_args_list[-1][0][0]) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_failure_in_bounce_still_resets_host_name( - self, perform_hostname_bounce): - perform_hostname_bounce.side_effect = Exception - initial_host_name = 'default-host-name' - self.get_hostname.return_value = initial_host_name - dsrc = self._get_ds( - self.get_ovf_env_with_dscfg('some-host-name', {})) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(initial_host_name, - self.set_hostname.call_args_list[-1][0][0]) - - @mock.patch.object(dsaz, 'get_boot_telemetry') - def test_environment_correct_for_bounce_command( - self, mock_get_boot_telemetry): - interface = 'int0' - hostname = 'my-new-host' - old_hostname = 'my-old-host' - self.get_hostname.return_value = old_hostname - cfg = {'hostname_bounce': {'interface': interface, 'policy': 'force'}} - data = self.get_ovf_env_with_dscfg(hostname, cfg) - dsrc = self._get_ds(data) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(1, self.subp.call_count) - bounce_env = self.subp.call_args[1]['env'] - self.assertEqual(interface, bounce_env['interface']) - self.assertEqual(hostname, bounce_env['hostname']) - self.assertEqual(old_hostname, bounce_env['old_hostname']) - - @mock.patch.object(dsaz, 'get_boot_telemetry') - def test_default_bounce_command_ifup_used_by_default( - self, mock_get_boot_telemetry): - cfg = {'hostname_bounce': {'policy': 'force'}} - data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - dsrc = self._get_ds(data) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(1, self.subp.call_count) - bounce_args = self.subp.call_args[1]['args'] - self.assertEqual( - dsaz.BOUNCE_COMMAND_IFUP, bounce_args) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_set_hostname_option_can_disable_bounce( - self, perform_hostname_bounce): - cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}} - data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - self._get_ds(data).get_data() - - self.assertEqual(0, perform_hostname_bounce.call_count) - - def test_set_hostname_option_can_disable_hostname_set(self): - cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}} - data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - self._get_ds(data).get_data() - - self.assertEqual(0, self.set_hostname.call_count) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_set_hostname_failed_disable_bounce( - self, perform_hostname_bounce): - cfg = {'set_hostname': True, 'hostname_bounce': {'policy': 'force'}} - self.get_hostname.return_value = "old-hostname" - self.set_hostname.side_effect = Exception - data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - self._get_ds(data).get_data() - - self.assertEqual(0, perform_hostname_bounce.call_count) - - class TestLoadAzureDsDir(CiTestCase): """Tests for load_azure_ds_dir.""" -- cgit v1.2.3 From dc1aabfca851e520693c05322f724bd102c76364 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 10 Jan 2022 16:56:29 -0600 Subject: Remove 3.5 and xenial support (SC-711) (#1167) Includes: - Update tox.ini and .travis.yml accordingly - Cleanup tox.ini with new tox syntax and cloud-init dependencies - Update documentation accordingly - Replace/remove xenial references where additional testing isn't required - Remove xenial checks in integration tests - Replace yield_fixture with fixture in pytest tests Sections of code commented with lines like "Remove when Xenial is no longer supported" still exist as they're require additional testing. --- .travis.yml | 41 +++-- cloudinit/config/cc_apt_configure.py | 2 +- cloudinit/distros/debian.py | 2 +- conftest.py | 4 +- doc/examples/cloud-config-apt.txt | 6 +- doc/examples/cloud-config-chef.txt | 5 +- doc/rtd/topics/debugging.rst | 6 +- doc/rtd/topics/testing.rst | 38 +---- tests/integration_tests/bugs/test_gh626.py | 7 - tests/integration_tests/bugs/test_lp1898997.py | 2 - tests/integration_tests/conftest.py | 29 +--- .../datasources/test_lxd_discovery.py | 8 +- tests/integration_tests/modules/test_disk_setup.py | 5 +- tests/integration_tests/modules/test_lxd_bridge.py | 2 - .../integration_tests/modules/test_users_groups.py | 1 - tests/unittests/cmd/devel/test_hotplug_hook.py | 2 +- tests/unittests/config/test_cc_install_hotplug.py | 2 +- tests/unittests/distros/test_networking.py | 4 +- tests/unittests/sources/test_lxd.py | 2 +- tests/unittests/sources/test_oracle.py | 4 +- tests/unittests/sources/test_vmware.py | 2 +- tests/unittests/test_features.py | 2 +- tests/unittests/test_net_activators.py | 4 +- tests/unittests/test_stages.py | 2 +- tests/unittests/test_util.py | 2 +- tox.ini | 190 +++++++++------------ 26 files changed, 137 insertions(+), 237 deletions(-) (limited to 'doc/examples') diff --git a/.travis.yml b/.travis.yml index 2351246b..208bed23 100644 --- a/.travis.yml +++ b/.travis.yml @@ -75,17 +75,17 @@ matrix: - cp /usr/share/doc/sbuild/examples/example.sbuildrc /home/$USER/.sbuildrc script: # Ubuntu LTS: Build - - ./packages/bddeb -S -d --release xenial + - ./packages/bddeb -S -d --release bionic - | needs_caching=false - if [ -e "$TRAVIS_BUILD_DIR/chroots/xenial-amd64.tar" ]; then + if [ -e "$TRAVIS_BUILD_DIR/chroots/bionic-amd64.tar" ]; then # If we have a cached chroot, move it into place - sudo mkdir -p /var/lib/schroot/chroots/xenial-amd64 - sudo tar --sparse --xattrs --preserve-permissions --numeric-owner -xf "$TRAVIS_BUILD_DIR/chroots/xenial-amd64.tar" -C /var/lib/schroot/chroots/xenial-amd64 + sudo mkdir -p /var/lib/schroot/chroots/bionic-amd64 + sudo tar --sparse --xattrs --preserve-permissions --numeric-owner -xf "$TRAVIS_BUILD_DIR/chroots/bionic-amd64.tar" -C /var/lib/schroot/chroots/bionic-amd64 # Write its configuration - cat > sbuild-xenial-amd64 << EOM - [xenial-amd64] - description=xenial-amd64 + cat > sbuild-bionic-amd64 << EOM + [bionic-amd64] + description=bionic-amd64 groups=sbuild,root,admin root-groups=sbuild,root,admin # Uncomment these lines to allow members of these groups to access @@ -95,20 +95,20 @@ matrix: type=directory profile=sbuild union-type=overlay - directory=/var/lib/schroot/chroots/xenial-amd64 + directory=/var/lib/schroot/chroots/bionic-amd64 EOM - sudo mv sbuild-xenial-amd64 /etc/schroot/chroot.d/ - sudo chown root /etc/schroot/chroot.d/sbuild-xenial-amd64 + sudo mv sbuild-bionic-amd64 /etc/schroot/chroot.d/ + sudo chown root /etc/schroot/chroot.d/sbuild-bionic-amd64 # And ensure it's up-to-date. - before_pkgs="$(sudo schroot -c source:xenial-amd64 -d / dpkg -l | sha256sum)" - sudo schroot -c source:xenial-amd64 -d / -- sh -c "apt-get update && apt-get -qqy upgrade" - after_pkgs=$(sudo schroot -c source:xenial-amd64 -d / dpkg -l | sha256sum) + before_pkgs="$(sudo schroot -c source:bionic-amd64 -d / dpkg -l | sha256sum)" + sudo schroot -c source:bionic-amd64 -d / -- sh -c "apt-get update && apt-get -qqy upgrade" + after_pkgs=$(sudo schroot -c source:bionic-amd64 -d / dpkg -l | sha256sum) if [ "$before_pkgs" != "$after_pkgs" ]; then needs_caching=true fi else # Otherwise, create the chroot - sudo -E su $USER -c 'mk-sbuild xenial' + sudo -E su $USER -c 'mk-sbuild bionic' needs_caching=true fi # If there are changes to the schroot (or it's entirely new), @@ -116,19 +116,19 @@ matrix: # move it into the cached dir; no need to compress it because # Travis will do that anyway if [ "$needs_caching" = "true" ]; then - sudo tar --sparse --xattrs --xattrs-include=* -cf "$TRAVIS_BUILD_DIR/chroots/xenial-amd64.tar" -C /var/lib/schroot/chroots/xenial-amd64 . + sudo tar --sparse --xattrs --xattrs-include=* -cf "$TRAVIS_BUILD_DIR/chroots/bionic-amd64.tar" -C /var/lib/schroot/chroots/bionic-amd64 . fi # Use sudo to get a new shell where we're in the sbuild group # Don't run integration tests when build fails - | - sudo -E su $USER -c 'DEB_BUILD_OPTIONS=nocheck sbuild --nolog --no-run-lintian --no-run-autopkgtest --verbose --dist=xenial cloud-init_*.dsc' && + sudo -E su $USER -c 'DEB_BUILD_OPTIONS=nocheck sbuild --nolog --no-run-lintian --no-run-autopkgtest --verbose --dist=bionic cloud-init_*.dsc' && ssh-keygen -P "" -q -f ~/.ssh/id_rsa && sg lxd -c 'CLOUD_INIT_CLOUD_INIT_SOURCE="$(ls *.deb)" tox -e integration-tests-ci' - - python: 3.5 + - python: 3.6 env: - TOXENV=xenial - PYTEST_ADDOPTS=-v # List all tests run by pytest - dist: xenial + TOXENV=lowest-supported + PYTEST_ADDOPTS=-v # List all tests run by pytest + dist: bionic - python: 3.6 env: TOXENV=flake8 - python: 3.6 @@ -145,4 +145,3 @@ matrix: - python: 3.9 - python: 3.8 - python: 3.7 - - python: 3.5 diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index b0728517..37077a9f 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -133,7 +133,7 @@ meta = { source1: keyid: 'keyid' keyserver: 'keyserverurl' - source: 'deb [signed-by=$KEY_FILE] http:/// xenial main' + source: 'deb [signed-by=$KEY_FILE] http:/// bionic main' source2: source: 'ppa:' source3: diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 0105a383..9effa0a0 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -257,7 +257,7 @@ class Distro(distros.Distro): pkgs = [] e = os.environ.copy() - # See: http://manpages.ubuntu.com/manpages/xenial/man7/debconf.7.html + # See: http://manpages.ubuntu.com/manpages/bionic/man7/debconf.7.html e["DEBIAN_FRONTEND"] = "noninteractive" wcfg = self.get_option("apt_get_wrapper", APT_GET_WRAPPER) diff --git a/conftest.py b/conftest.py index ffcb3233..3979eb0a 100644 --- a/conftest.py +++ b/conftest.py @@ -65,7 +65,7 @@ class _FixtureUtils: return result[0] -@pytest.yield_fixture(autouse=True) +@pytest.fixture(autouse=True) def disable_subp_usage(request, fixture_utils): """ Across all (pytest) tests, ensure that subp.subp is not invoked. @@ -166,7 +166,7 @@ def fixture_utils(): return _FixtureUtils -@pytest.yield_fixture +@pytest.fixture def httpretty(): """ Enable HTTPretty for duration of the testcase, resetting before and after. diff --git a/doc/examples/cloud-config-apt.txt b/doc/examples/cloud-config-apt.txt index 778187b5..39f546e1 100644 --- a/doc/examples/cloud-config-apt.txt +++ b/doc/examples/cloud-config-apt.txt @@ -254,7 +254,7 @@ apt: # # Creates a file in /etc/apt/sources.list.d/ for the sources list entry # based on the key: "/etc/apt/sources.list.d/curtin-dev-ppa.list" - source: "deb http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu xenial main" + source: "deb http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu bionic main" # 2.2 keyid # @@ -282,13 +282,13 @@ apt: # They will be replaced with the default or specified mirrors and the # running release. # The entry below would be possibly turned into: - # source: deb http://archive.ubuntu.com/ubuntu xenial multiverse + # source: deb http://archive.ubuntu.com/ubuntu bionic multiverse source: deb [signed-by=$KEY_FILE] $MIRROR $RELEASE multiverse keyid: F430BBA5 my-repo3.list: # this would have the same end effect as 'ppa:curtin-dev/test-archive' - source: "deb http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu xenial main" + source: "deb http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu bionic main" keyid: F430BBA5 # GPG key ID published on the key server filename: curtin-dev-ppa.list diff --git a/doc/examples/cloud-config-chef.txt b/doc/examples/cloud-config-chef.txt index 414111a1..9bb3c150 100644 --- a/doc/examples/cloud-config-chef.txt +++ b/doc/examples/cloud-config-chef.txt @@ -4,9 +4,6 @@ # list of recipes when the instance boots for the first time. # Make sure that this file is valid yaml before starting instances. # It should be passed as user-data when starting the instance. -# -# This example assumes the instance is 16.04 (xenial) - # The default is to install from packages. @@ -55,7 +52,7 @@ chef: # Valid values are 'accept' and 'accept-no-persist' chef_license: "accept" - + # Valid values are 'gems' and 'packages' and 'omnibus' install_type: "packages" diff --git a/doc/rtd/topics/debugging.rst b/doc/rtd/topics/debugging.rst index b897a318..a4a2779f 100644 --- a/doc/rtd/topics/debugging.rst +++ b/doc/rtd/topics/debugging.rst @@ -88,7 +88,7 @@ To quickly obtain a cloud-init log try using lxc on any ubuntu system: .. code-block:: shell-session - $ lxc init ubuntu-daily:xenial x1 + $ lxc init ubuntu-daily:focal x1 $ lxc start x1 $ # Take lxc's cloud-init.log and pipe it to the analyzer $ lxc file pull x1/var/log/cloud-init.log - | cloud-init analyze dump -i - @@ -104,13 +104,13 @@ To quickly analyze a KVM a cloud-init log: .. code-block:: shell-session - $ wget https://cloud-images.ubuntu.com/daily/server/xenial/current/xenial-server-cloudimg-amd64.img + $ wget https://cloud-images.ubuntu.com/daily/server/focal/current/focal-server-cloudimg-amd64.img 2. Create a snapshot image to preserve the original cloud-image .. code-block:: shell-session - $ qemu-img create -b xenial-server-cloudimg-amd64.img -f qcow2 \ + $ qemu-img create -b focal-server-cloudimg-amd64.img -f qcow2 \ test-cloudinit.qcow2 3. Create a seed image with metadata using `cloud-localds` diff --git a/doc/rtd/topics/testing.rst b/doc/rtd/topics/testing.rst index 7a1e3eec..5543c6f5 100644 --- a/doc/rtd/topics/testing.rst +++ b/doc/rtd/topics/testing.rst @@ -54,28 +54,22 @@ Test Layout * pytest tests should use bare ``assert`` statements, to take advantage of pytest's `assertion introspection`_ - * For ``==`` and other commutative assertions, the expected value - should be placed before the value under test: - ``assert expected_value == function_under_test()`` - - ``pytest`` Version Gotchas -------------------------- -As we still support Ubuntu 16.04 (Xenial Xerus), we can only use pytest -features that are available in v2.8.7. This is an inexhaustive list of +As we still support Ubuntu 18.04 (Bionic Beaver), we can only use pytest +features that are available in v3.3.2. This is an inexhaustive list of ways in which this may catch you out: -* Support for using ``yield`` in ``pytest.fixture`` functions was only - introduced in `pytest 3.0`_. Such functions must instead use the - ``pytest.yield_fixture`` decorator. - * Only the following built-in fixtures are available [#fixture-list]_: * ``cache`` * ``capfd`` - * ``caplog`` (provided by ``python3-pytest-catchlog`` on xenial) + * ``capfdbinary`` + * ``caplog`` * ``capsys`` + * ``capsysbinary`` + * ``doctest_namespace`` * ``monkeypatch`` * ``pytestconfig`` * ``record_xml_property`` @@ -83,22 +77,6 @@ ways in which this may catch you out: * ``tmpdir_factory`` * ``tmpdir`` -* On xenial, the objects returned by the ``tmpdir`` fixture cannot be - used where paths are required; they are rejected as invalid paths. - You must instead use their ``.strpath`` attribute. - - * For example, instead of ``util.write_file(tmpdir.join("some_file"), - ...)``, you should write - ``util.write_file(tmpdir.join("some_file").strpath, ...)``. - -* The `pytest.param`_ function cannot be used. It was introduced in - pytest 3.1, which means it is not available on xenial. The more - limited mechanism it replaced was removed in pytest 4.0, so is not - available in focal or later. The only available alternatives are to - write mark-requiring test instances as completely separate tests, - without utilising parameterisation, or to apply the mark to the - entire parameterized test (and therefore every test instance). - Mocking and Assertions ---------------------- @@ -168,9 +146,9 @@ Test Argument Ordering .. [#fixture-list] This list of fixtures (with markup) can be reproduced by running:: - py.test-3 --fixtures -q | grep "^[^ -]" | grep -v '\(no\|capturelog\)' | sort | sed 's/.*/* ``\0``/' + python3 -m pytest --fixtures -q | grep "^[^ -]" | grep -v 'no tests ran in' | sort | sed 's/ \[session scope\]//g;s/.*/* ``\0``/g' - in a xenial lxd container with python3-pytest-catchlog installed. + in an ubuntu lxd container with python3-pytest installed. .. _pytest: https://docs.pytest.org/ .. _pytest fixtures: https://docs.pytest.org/en/latest/fixture.html diff --git a/tests/integration_tests/bugs/test_gh626.py b/tests/integration_tests/bugs/test_gh626.py index 7c720143..b80b677a 100644 --- a/tests/integration_tests/bugs/test_gh626.py +++ b/tests/integration_tests/bugs/test_gh626.py @@ -8,7 +8,6 @@ import pytest import yaml from tests.integration_tests import random_mac_address -from tests.integration_tests.clouds import ImageSpecification from tests.integration_tests.instances import IntegrationInstance MAC_ADDRESS = random_mac_address() @@ -29,7 +28,6 @@ iface eth0 inet dhcp ethernet-wol g""" -@pytest.mark.sru_2020_11 @pytest.mark.lxd_container @pytest.mark.lxd_vm @pytest.mark.lxd_config_dict( @@ -39,11 +37,6 @@ iface eth0 inet dhcp } ) def test_wakeonlan(client: IntegrationInstance): - if ImageSpecification.from_os_image().release == "xenial": - eni = client.execute("cat /etc/network/interfaces.d/50-cloud-init.cfg") - assert eni.endswith(EXPECTED_ENI_END) - return - netplan_cfg = client.execute("cat /etc/netplan/50-cloud-init.yaml") netplan_yaml = yaml.safe_load(netplan_cfg) assert "wakeonlan" in netplan_yaml["network"]["ethernets"]["eth0"] diff --git a/tests/integration_tests/bugs/test_lp1898997.py b/tests/integration_tests/bugs/test_lp1898997.py index 115bd34f..d8ea54c3 100644 --- a/tests/integration_tests/bugs/test_lp1898997.py +++ b/tests/integration_tests/bugs/test_lp1898997.py @@ -47,8 +47,6 @@ version: 2 @pytest.mark.lxd_vm @pytest.mark.lxd_use_exec @pytest.mark.not_bionic -@pytest.mark.not_xenial -@pytest.mark.sru_2020_11 @pytest.mark.ubuntu class TestInterfaceListingWithOpenvSwitch: def test_ovs_member_interfaces_not_excluded(self, client): diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index b14b6ad0..2e44ef29 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -45,17 +45,6 @@ os_list = ["ubuntu"] session_start_time = datetime.datetime.now().strftime("%y%m%d%H%M%S") -XENIAL_LXD_VM_EXEC_MSG = """\ -The default xenial images do not support `exec` for LXD VMs. - -Specify an image known to work using: - - OS_IMAGE=::ubuntu::xenial - -You can re-run specifically tests that require this by passing `-m -lxd_use_exec` to pytest. -""" - def pytest_runtest_setup(item): """Skip tests on unsupported clouds. @@ -101,7 +90,7 @@ def disable_subp_usage(request): pass -@pytest.yield_fixture(scope="session") +@pytest.fixture(scope="session") def session_cloud(): if integration_settings.PLATFORM not in platforms.keys(): raise ValueError( @@ -246,16 +235,6 @@ def _client(request, fixture_utils, session_cloud: IntegrationCloud): if lxd_use_exec is not None: if not isinstance(session_cloud, _LxdIntegrationCloud): pytest.skip("lxd_use_exec requires LXD") - if isinstance(session_cloud, LxdVmCloud): - image_spec = ImageSpecification.from_os_image() - if image_spec.release == image_spec.image_id == "xenial": - # Why fail instead of skip? We expect that skipped tests will - # be run in a different one of our usual battery of test runs - # (e.g. LXD-only tests are skipped on EC2 but will run in our - # normal LXD test runs). This is not true of this test: it - # can't run in our usual xenial LXD VM test run, and it may not - # run anywhere else. A failure flags up this discrepancy. - pytest.fail(XENIAL_LXD_VM_EXEC_MSG) launch_kwargs["execute_via_ssh"] = False local_launch_kwargs = {} if lxd_setup is not None: @@ -276,21 +255,21 @@ def _client(request, fixture_utils, session_cloud: IntegrationCloud): _collect_logs(instance, request.node.nodeid, test_failed) -@pytest.yield_fixture +@pytest.fixture def client(request, fixture_utils, session_cloud, setup_image): """Provide a client that runs for every test.""" with _client(request, fixture_utils, session_cloud) as client: yield client -@pytest.yield_fixture(scope="module") +@pytest.fixture(scope="module") def module_client(request, fixture_utils, session_cloud, setup_image): """Provide a client that runs once per module.""" with _client(request, fixture_utils, session_cloud) as client: yield client -@pytest.yield_fixture(scope="class") +@pytest.fixture(scope="class") def class_client(request, fixture_utils, session_cloud, setup_image): """Provide a client that runs once per class.""" with _client(request, fixture_utils, session_cloud) as client: diff --git a/tests/integration_tests/datasources/test_lxd_discovery.py b/tests/integration_tests/datasources/test_lxd_discovery.py index da010813..eb2a4cf2 100644 --- a/tests/integration_tests/datasources/test_lxd_discovery.py +++ b/tests/integration_tests/datasources/test_lxd_discovery.py @@ -59,13 +59,9 @@ def test_lxd_datasource_discovery(client: IntegrationInstance): ) if ( client.settings.PLATFORM == "lxd_vm" - and ImageSpecification.from_os_image().release - in ( - "xenial", - "bionic", - ) + and ImageSpecification.from_os_image().release == "bionic" ): - # pycloudlib injects user.vendor_data for lxd_vm on bionic and xenial + # pycloudlib injects user.vendor_data for lxd_vm on bionic # to start the lxd-agent. # https://github.com/canonical/pycloudlib/blob/main/pycloudlib/\ # lxd/defaults.py#L13-L27 diff --git a/tests/integration_tests/modules/test_disk_setup.py b/tests/integration_tests/modules/test_disk_setup.py index 22277331..8f9d5f40 100644 --- a/tests/integration_tests/modules/test_disk_setup.py +++ b/tests/integration_tests/modules/test_disk_setup.py @@ -20,7 +20,7 @@ def setup_and_mount_lxd_disk(instance: LXDInstance): ) -@pytest.yield_fixture +@pytest.fixture def create_disk(): # 640k should be enough for anybody subp("dd if=/dev/zero of={} bs=1k count=640".format(DISK_PATH).split()) @@ -133,10 +133,9 @@ class TestPartProbeAvailability: assert sdb["children"][1]["name"] == "sdb2" assert sdb["children"][1]["mountpoint"] == "/mnt2" - # Not bionic or xenial because the LXD agent gets in the way of us + # Not bionic because the LXD agent gets in the way of us # changing the userdata @pytest.mark.not_bionic - @pytest.mark.not_xenial def test_disk_setup_when_mounted( self, create_disk, client: IntegrationInstance ): diff --git a/tests/integration_tests/modules/test_lxd_bridge.py b/tests/integration_tests/modules/test_lxd_bridge.py index 2cb3f4f3..3292a833 100644 --- a/tests/integration_tests/modules/test_lxd_bridge.py +++ b/tests/integration_tests/modules/test_lxd_bridge.py @@ -33,8 +33,6 @@ class TestLxdBridge: """Check that the expected LXD binaries are installed""" assert class_client.execute(["which", binary_name]).ok - @pytest.mark.not_xenial - @pytest.mark.sru_2020_11 def test_bridge(self, class_client): """Check that the given bridge is configured""" cloud_init_log = class_client.read_from_file("/var/log/cloud-init.log") diff --git a/tests/integration_tests/modules/test_users_groups.py b/tests/integration_tests/modules/test_users_groups.py index 3d1358ce..fddff681 100644 --- a/tests/integration_tests/modules/test_users_groups.py +++ b/tests/integration_tests/modules/test_users_groups.py @@ -106,7 +106,6 @@ def test_sudoers_includedir(client: IntegrationInstance): https://github.com/canonical/cloud-init/pull/783 """ if ImageSpecification.from_os_image().release in [ - "xenial", "bionic", "focal", ]: diff --git a/tests/unittests/cmd/devel/test_hotplug_hook.py b/tests/unittests/cmd/devel/test_hotplug_hook.py index 842e8dfd..5ecb5969 100644 --- a/tests/unittests/cmd/devel/test_hotplug_hook.py +++ b/tests/unittests/cmd/devel/test_hotplug_hook.py @@ -16,7 +16,7 @@ hotplug_args = namedtuple("hotplug_args", "udevaction, subsystem, devpath") FAKE_MAC = "11:22:33:44:55:66" -@pytest.yield_fixture +@pytest.fixture def mocks(): m_init = mock.MagicMock(spec=Init) m_distro = mock.MagicMock(spec=Distro) diff --git a/tests/unittests/config/test_cc_install_hotplug.py b/tests/unittests/config/test_cc_install_hotplug.py index 3bd44aba..e67fce60 100644 --- a/tests/unittests/config/test_cc_install_hotplug.py +++ b/tests/unittests/config/test_cc_install_hotplug.py @@ -12,7 +12,7 @@ from cloudinit.config.cc_install_hotplug import ( from cloudinit.event import EventScope, EventType -@pytest.yield_fixture() +@pytest.fixture() def mocks(): m_update_enabled = mock.patch("cloudinit.stages.update_event_enabled") m_write = mock.patch("cloudinit.util.write_file", autospec=True) diff --git a/tests/unittests/distros/test_networking.py b/tests/unittests/distros/test_networking.py index 635f6901..274647cb 100644 --- a/tests/unittests/distros/test_networking.py +++ b/tests/unittests/distros/test_networking.py @@ -13,7 +13,7 @@ from cloudinit.distros.networking import ( ) -@pytest.yield_fixture +@pytest.fixture def generic_networking_cls(): """Returns a direct Networking subclass which errors on /sys usage. @@ -40,7 +40,7 @@ def generic_networking_cls(): yield TestNetworking -@pytest.yield_fixture +@pytest.fixture def sys_class_net(tmpdir): sys_class_net_path = tmpdir.join("sys/class/net") sys_class_net_path.ensure_dir() diff --git a/tests/unittests/sources/test_lxd.py b/tests/unittests/sources/test_lxd.py index ad1508a0..e11c3746 100644 --- a/tests/unittests/sources/test_lxd.py +++ b/tests/unittests/sources/test_lxd.py @@ -57,7 +57,7 @@ def lxd_metadata(): return LXD_V1_METADATA -@pytest.yield_fixture +@pytest.fixture def lxd_ds(request, paths, lxd_metadata): """ Return an instantiated DataSourceLXD. diff --git a/tests/unittests/sources/test_oracle.py b/tests/unittests/sources/test_oracle.py index e0e79c8c..356b3738 100644 --- a/tests/unittests/sources/test_oracle.py +++ b/tests/unittests/sources/test_oracle.py @@ -93,7 +93,7 @@ def metadata_version(): return 2 -@pytest.yield_fixture +@pytest.fixture def oracle_ds(request, fixture_utils, paths, metadata_version): """ Return an instantiated DataSourceOracle. @@ -649,7 +649,7 @@ class TestCommon_GetDataBehaviour: separate class for that case.) """ - @pytest.yield_fixture(params=[True, False]) + @pytest.fixture(params=[True, False]) def parameterized_oracle_ds(self, request, oracle_ds): """oracle_ds parameterized for iSCSI and non-iSCSI root respectively""" is_iscsi_root = request.param diff --git a/tests/unittests/sources/test_vmware.py b/tests/unittests/sources/test_vmware.py index dcdbda89..dd331349 100644 --- a/tests/unittests/sources/test_vmware.py +++ b/tests/unittests/sources/test_vmware.py @@ -57,7 +57,7 @@ runcmd: """ -@pytest.yield_fixture(autouse=True) +@pytest.fixture(autouse=True) def common_patches(): with mock.patch("cloudinit.util.platform.platform", return_value="Linux"): with mock.patch.multiple( diff --git a/tests/unittests/test_features.py b/tests/unittests/test_features.py index 141de55b..794a9654 100644 --- a/tests/unittests/test_features.py +++ b/tests/unittests/test_features.py @@ -12,7 +12,7 @@ import pytest import cloudinit -@pytest.yield_fixture() +@pytest.fixture() def create_override(request): """ Create a feature overrides file and do some module wizardry to make diff --git a/tests/unittests/test_net_activators.py b/tests/unittests/test_net_activators.py index 0e3ab43f..3c29e2f7 100644 --- a/tests/unittests/test_net_activators.py +++ b/tests/unittests/test_net_activators.py @@ -39,7 +39,7 @@ NETPLAN_CALL_LIST = [ ] -@pytest.yield_fixture +@pytest.fixture def available_mocks(): mocks = namedtuple("Mocks", "m_which, m_file") with patch("cloudinit.subp.which", return_value=True) as m_which: @@ -47,7 +47,7 @@ def available_mocks(): yield mocks(m_which, m_file) -@pytest.yield_fixture +@pytest.fixture def unavailable_mocks(): mocks = namedtuple("Mocks", "m_which, m_file") with patch("cloudinit.subp.which", return_value=False) as m_which: diff --git a/tests/unittests/test_stages.py b/tests/unittests/test_stages.py index be1a0787..3214410b 100644 --- a/tests/unittests/test_stages.py +++ b/tests/unittests/test_stages.py @@ -512,7 +512,7 @@ class TestInit_InitializeFilesystem: TODO: Expand these tests to cover all of _initialize_filesystem's behavior. """ - @pytest.yield_fixture + @pytest.fixture def init(self, paths): """A fixture which yields a stages.Init instance with paths and cfg set diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index e2bfe9d2..3765511b 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -1127,7 +1127,7 @@ class TestMountCb: TODO: Test the if/else branch that actually performs the mounting operation """ - @pytest.yield_fixture + @pytest.fixture def already_mounted_device_and_mountdict(self): """Mock an already-mounted device, and yield (device, mount dict)""" device = "/dev/fake0" diff --git a/tox.ini b/tox.ini index 034ee9a4..57d18cdb 100644 --- a/tox.ini +++ b/tox.ini @@ -1,58 +1,52 @@ [tox] -envlist = py3, xenial-dev, flake8, pylint, black, isort +envlist = py3, lowest-supported-dev, flake8, pylint, black, isort recreate = True [testenv] -commands = {envpython} -m pytest {posargs:tests/unittests} +basepython = python3 setenv = LC_ALL = en_US.utf-8 passenv= PYTEST_ADDOPTS -[testenv:flake8] -basepython = python3 +[flake_env] +envdir = {toxworkdir}/.flake_env deps = flake8==3.9.2 -commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/ setup.py} - -# https://github.com/gabrielfalcao/HTTPretty/issues/223 -setenv = - LC_ALL = en_US.utf-8 - -[testenv:pylint] -basepython = python3 -deps = - # requirements pylint==2.11.1 - # test-requirements because unit tests are now present in cloudinit tree + black==21.12b0 + isort==5.10.1 -r{toxinidir}/test-requirements.txt -r{toxinidir}/integration-requirements.txt + +[testenv:flake8] +envdir = {[flake_env]envdir} +deps = {[flake_env]deps} +commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/ setup.py} + +[testenv:pylint] +envdir = {[flake_env]envdir} +deps = {[flake_env]deps} commands = {envpython} -m pylint {posargs:cloudinit tests tools} [testenv:black] -basepython = python3 -deps = - # requirements - black==21.12b0 +envdir = {[flake_env]envdir} +deps = {[flake_env]deps} commands = {envpython} -m black . --check [testenv:isort] -basepython = python3 -deps = - isort==5.10.1 +envdir = {[flake_env]envdir} +deps = {[flake_env]deps} commands = {envpython} -m isort . --check-only [testenv:format] -basepython = python3 -deps = - black==21.12b0 - isort==5.10.1 +envdir = {[flake_env]envdir} +deps = {[flake_env]deps} commands = {envpython} -m isort . {envpython} -m black . [testenv:py3] -basepython = python3 deps = -r{toxinidir}/test-requirements.txt commands = {envpython} -m pytest \ @@ -60,84 +54,56 @@ commands = {envpython} -m pytest \ {posargs:--cov=cloudinit --cov-branch \ tests/unittests} -[testenv:py27] -basepython = python2.7 -deps = -r{toxinidir}/test-requirements.txt +[lowest-supported-deps] +# Tox is going to install requirements from pip. This is fine for +# testing python version compatibility, but when we build cloud-init, we are +# building against the dependencies in the OS repo, not pip. The OS +# dependencies will generally be older than what is found in pip. -[flake8] -# E203: whitespace before ':', doesn't adhere to pep8 or black formatting -# W503: line break before binary operator -ignore=E203,W503 -exclude = .venv,.tox,dist,doc,*egg,.git,build,tools -per-file-ignores = - cloudinit/cmd/main.py:E402 +# To obtain these versions, check the versions of these libraries +# in the oldest support Ubuntu distro. -[testenv:doc] -basepython = python3 -deps = - -r{toxinidir}/doc-requirements.txt -commands = - {envpython} -m sphinx {posargs:doc/rtd doc/rtd_html} - doc8 doc/rtd - -[xenial-shared-deps] -# The version of pytest in xenial doesn't work with Python 3.8, so we define -# two xenial environments: [testenv:xenial] runs the tests with exactly the -# version of pytest present in xenial, and is used in CI. [testenv:xenial-dev] -# runs the tests with the lowest version of pytest that works with Python 3.8, -# 3.0.7, but keeps the other dependencies at xenial's level. -# -# (This section is not a testenv, it is used to maintain a single definition of -# the dependencies shared between the two xenial testenvs.) +# httpretty isn't included here because python2.7 requires a higher version +# than whats run on bionic, so we need two different definitions. deps = - # requirements - jinja2==2.8 - pyyaml==3.11 - oauthlib==1.0.3 - pyserial==3.0.1 + jinja2==2.10 + oauthlib==2.0.6 + pyserial==3.4 configobj==5.0.6 - requests==2.9.1 - jsonschema + pyyaml==3.12 + requests==2.18.4 + jsonpatch==1.16 + jsonschema==2.6.0 + netifaces==0.10.4 # test-requirements - pytest-catchlog==1.2.1 - -[testenv:xenial] -# When updating this commands definition, also update the definition in -# [testenv:xenial-dev]. See the comment there for details. -commands = - python ./tools/pipremove jsonschema - python -m pytest {posargs:tests/unittests} -basepython = python3 + pytest==3.3.2 + pytest-cov==2.5.1 + # Needed by pytest and default causes failures + attrs==17.4.0 + +[testenv:lowest-supported] +# This definition will run on bionic with the version of httpretty +# that runs there deps = - # Refer to the comment in [xenial-shared-deps] for details - {[xenial-shared-deps]deps} - httpretty==0.8.6 - jsonpatch==1.10 - pytest==2.8.7 - -[testenv:xenial-dev] -# This should be: -# commands = {[testenv:xenial]commands} -# but the version of pytest in xenial has a bug -# (https://github.com/tox-dev/tox/issues/208) which means that the {posargs} -# substitution variable is misparsed and causes a traceback. Ensure that any -# changes here are reflected in [testenv:xenial]. -commands = - python ./tools/pipremove jsonschema - python -m pytest {posargs:tests/unittests} -basepython = {[testenv:xenial]basepython} + {[lowest-supported-deps]deps} + httpretty==0.8.14 +commands = {[testenv:py3]commands} + +[testenv:lowest-supported-dev] +# The oldest httpretty version to work with Python 3.7+ is 0.9.5, +# because it is the first to include this commit: +# https://github.com/gabrielfalcao/HTTPretty/commit/5776d97da3992b9071db5e21faf175f6e8729060 deps = - # Refer to the comment in [xenial-shared-deps] for details - {[xenial-shared-deps]deps} - # httpretty in xenial is 0.8.6, not 0.9.5. The oldest version to work with - # Python 3.7+ is 0.9.5, because it is the first to include this commit: - # https://github.com/gabrielfalcao/HTTPretty/commit/5776d97da3992b9071db5e21faf175f6e8729060 + {[lowest-supported-deps]deps} httpretty==0.9.5 - # jsonpatch in xenial is 1.10, not 1.19 (#839779). The oldest version - # to work with python3.6 is 1.16 as found in Artful. To keep default - # invocation of 'tox' happy, accept the difference in version here. - jsonpatch==1.16 - pytest==3.0.7 +commands = {[testenv:py3]commands} + +[testenv:doc] +deps = + -r{toxinidir}/doc-requirements.txt +commands = + {envpython} -m sphinx {posargs:doc/rtd doc/rtd_html} + doc8 doc/rtd [testenv:tip-flake8] commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/ setup.py} @@ -152,37 +118,36 @@ deps = -r{toxinidir}/test-requirements.txt -r{toxinidir}/integration-requirements.txt -# Until Xenial tox support is dropped or bumps to tox:2.3.2, reflect changes to -# deps into testenv:integration-tests-ci: commands, passenv and deps. -# This is due to (https://github.com/tox-dev/tox/issues/208) which means that -# the {posargs} handling and substitutions won't do what we want until tox 2.3.2 -# Once Xenial is dropped, integration-tests-ci can use proper substitution -# commands = {[testenv:integration-tests]commands} [testenv:integration-tests] -basepython = python3 commands = {envpython} -m pytest --log-cli-level=INFO {posargs:tests/integration_tests} -passenv = CLOUD_INIT_* SSH_AUTH_SOCK OS_* deps = -r{toxinidir}/integration-requirements.txt +passenv = CLOUD_INIT_* SSH_AUTH_SOCK OS_* [testenv:integration-tests-ci] -commands = {envpython} -m pytest --log-cli-level=INFO {posargs:tests/integration_tests} +commands = {[testenv:integration-tests]commands} +deps = {[testenv:integration-tests]deps} passenv = CLOUD_INIT_* SSH_AUTH_SOCK OS_* TRAVIS -deps = - -r{toxinidir}/integration-requirements.txt setenv = PYTEST_ADDOPTS="-m ci and not adhoc" [testenv:integration-tests-jenkins] -commands = {envpython} -m pytest --log-cli-level=INFO {posargs:tests/integration_tests} +commands = {[testenv:integration-tests]commands} +deps = {[testenv:integration-tests]deps} passenv = *_proxy CLOUD_INIT_* SSH_AUTH_SOCK OS_* GOOGLE_* GCP_* -deps = - -r{toxinidir}/integration-requirements.txt setenv = PYTEST_ADDOPTS="-m not adhoc" +[flake8] +# E203: whitespace before ':', doesn't adhere to pep8 or black formatting +# W503: line break before binary operator +ignore=E203,W503 +exclude = .venv,.tox,dist,doc,*egg,.git,build,tools +per-file-ignores = + cloudinit/cmd/main.py:E402 + [pytest] -# TODO: s/--strict/--strict-markers/ once xenial support is dropped +# TODO: s/--strict/--strict-markers/ once pytest version is high enough testpaths = tests/unittests addopts = --strict log_format = %(asctime)s %(levelname)-9s %(name)s:%(filename)s:%(lineno)d %(message)s @@ -202,7 +167,6 @@ markers = lxd_setup: specify callable to be called between init and start lxd_use_exec: `execute` will use `lxc exec` instead of SSH lxd_vm: test will only run in LXD VM - not_xenial: test cannot run on the xenial release not_bionic: test cannot run on the bionic release no_container: test cannot run in a container user_data: the user data to be passed to the test instance -- cgit v1.2.3 From af7eb1deab12c7208853c5d18b55228e0ba29c4d Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Mon, 31 Jan 2022 20:45:29 -0700 Subject: Schema a d (#1211) Migrate from legacy schema or define new schema in cloud-init-schema.json, adding extensive schema tests for: - cc_apt_configure - cc_bootcmd - cc_byobu - cc_ca_certs - cc_chef - cc_debug - cc_disable_ec2_metadata - cc_disk_setup Deprecate config hyphenated schema keys in favor of underscores: - ca_certs and ca_certs.remove_defaults instead of ca-certs and ca-certs.remove-defaults - Continue to honor deprecated config keys but emit DEPRECATION warnings in logs for continued use of the deprecated keys: - apt_sources key - any apt v1 or v2 keys - use or ca-certs or ca_certs.remove-defaults - Extend apt_configure schema - Define more strict schema below object opaque keys using patternProperties - create common $def apt_configure.mirror for reuse in 'primary' and 'security' schema definitions within cc_apt_configure Co-Authored-by: James Falcon --- cloudinit/config/cc_apt_configure.py | 314 +------------ cloudinit/config/cc_bootcmd.py | 29 +- cloudinit/config/cc_byobu.py | 40 +- cloudinit/config/cc_ca_certs.py | 108 +++-- cloudinit/config/cc_chef.py | 301 +------------ cloudinit/config/cc_debug.py | 55 ++- cloudinit/config/cc_disable_ec2_metadata.py | 41 +- cloudinit/config/cc_disk_setup.py | 159 +++---- cloudinit/config/cloud-init-schema.json | 495 ++++++++++++++++++++- doc/examples/cloud-config-ca-certs.txt | 6 +- doc/examples/cloud-config-disk-setup.txt | 2 +- tests/integration_tests/modules/test_ca_certs.py | 4 +- tests/unittests/config/test_cc_apt_configure.py | 202 +++++++++ tests/unittests/config/test_cc_bootcmd.py | 100 ++--- tests/unittests/config/test_cc_byobu.py | 51 +++ tests/unittests/config/test_cc_ca_certs.py | 106 ++++- tests/unittests/config/test_cc_chef.py | 172 +++++++ tests/unittests/config/test_cc_debug.py | 54 ++- .../config/test_cc_disable_ec2_metadata.py | 33 +- tests/unittests/config/test_cc_disk_setup.py | 50 ++- tests/unittests/config/test_schema.py | 22 +- 21 files changed, 1443 insertions(+), 901 deletions(-) create mode 100644 tests/unittests/config/test_cc_apt_configure.py create mode 100644 tests/unittests/config/test_cc_byobu.py (limited to 'doc/examples') diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 37077a9f..7fe0e343 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -17,7 +17,7 @@ from textwrap import dedent from cloudinit import gpg from cloudinit import log as logging from cloudinit import subp, templater, util -from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema +from cloudinit.config.schema import get_meta_doc from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) @@ -31,33 +31,6 @@ CLOUD_INIT_GPG_DIR = "/etc/apt/cloud-init.gpg.d/" frequency = PER_INSTANCE distros = ["ubuntu", "debian"] -mirror_property = { - "type": "array", - "items": { - "type": "object", - "additionalProperties": False, - "required": ["arches"], - "properties": { - "arches": { - "type": "array", - "items": {"type": "string"}, - "minItems": 1, - }, - "uri": {"type": "string", "format": "uri"}, - "search": { - "type": "array", - "items": {"type": "string", "format": "uri"}, - "minItems": 1, - }, - "search_dns": { - "type": "boolean", - }, - "keyid": {"type": "string"}, - "key": {"type": "string"}, - "keyserver": {"type": "string"}, - }, - }, -} meta = { "id": "cc_apt_configure", @@ -147,275 +120,7 @@ meta = { "frequency": frequency, } -schema = { - "type": "object", - "properties": { - "apt": { - "type": "object", - "additionalProperties": False, - "properties": { - "preserve_sources_list": { - "type": "boolean", - "default": False, - "description": dedent( - """\ - By default, cloud-init will generate a new sources - list in ``/etc/apt/sources.list.d`` based on any - changes specified in cloud config. To disable this - behavior and preserve the sources list from the - pristine image, set ``preserve_sources_list`` - to ``true``. - - The ``preserve_sources_list`` option overrides - all other config keys that would alter - ``sources.list`` or ``sources.list.d``, - **except** for additional sources to be added - to ``sources.list.d``.""" - ), - }, - "disable_suites": { - "type": "array", - "items": {"type": "string"}, - "uniqueItems": True, - "description": dedent( - """\ - Entries in the sources list can be disabled using - ``disable_suites``, which takes a list of suites - to be disabled. If the string ``$RELEASE`` is - present in a suite in the ``disable_suites`` list, - it will be replaced with the release name. If a - suite specified in ``disable_suites`` is not - present in ``sources.list`` it will be ignored. - For convenience, several aliases are provided for - ``disable_suites``: - - - ``updates`` => ``$RELEASE-updates`` - - ``backports`` => ``$RELEASE-backports`` - - ``security`` => ``$RELEASE-security`` - - ``proposed`` => ``$RELEASE-proposed`` - - ``release`` => ``$RELEASE``. - - When a suite is disabled using ``disable_suites``, - its entry in ``sources.list`` is not deleted; it - is just commented out.""" - ), - }, - "primary": { - **mirror_property, - "description": dedent( - """\ - The primary and security archive mirrors can - be specified using the ``primary`` and - ``security`` keys, respectively. Both the - ``primary`` and ``security`` keys take a list - of configs, allowing mirrors to be specified - on a per-architecture basis. Each config is a - dictionary which must have an entry for - ``arches``, specifying which architectures - that config entry is for. The keyword - ``default`` applies to any architecture not - explicitly listed. The mirror url can be specified - with the ``uri`` key, or a list of mirrors to - check can be provided in order, with the first - mirror that can be resolved being selected. This - allows the same configuration to be used in - different environment, with different hosts used - for a local apt mirror. If no mirror is provided - by ``uri`` or ``search``, ``search_dns`` may be - used to search for dns names in the format - ``-mirror`` in each of the following: - - - fqdn of this host per cloud metadata, - - localdomain, - - domains listed in ``/etc/resolv.conf``. - - If there is a dns entry for ``-mirror``, - then it is assumed that there is a distro mirror - at ``http://-mirror./``. - If the ``primary`` key is defined, but not the - ``security`` key, then then configuration for - ``primary`` is also used for ``security``. - If ``search_dns`` is used for the ``security`` - key, the search pattern will be - ``-security-mirror``. - - Each mirror may also specify a key to import via - any of the following optional keys: - - - ``keyid``: a key to import via shortid or \ - fingerprint. - - ``key``: a raw PGP key. - - ``keyserver``: alternate keyserver to pull \ - ``keyid`` key from. - - If no mirrors are specified, or all lookups fail, - then default mirrors defined in the datasource - are used. If none are present in the datasource - either the following defaults are used: - - - ``primary`` => \ - ``http://archive.ubuntu.com/ubuntu``. - - ``security`` => \ - ``http://security.ubuntu.com/ubuntu`` - """ - ), - }, - "security": { - **mirror_property, - "description": dedent( - """\ - Please refer to the primary config documentation""" - ), - }, - "add_apt_repo_match": { - "type": "string", - "default": ADD_APT_REPO_MATCH, - "description": dedent( - """\ - All source entries in ``apt-sources`` that match - regex in ``add_apt_repo_match`` will be added to - the system using ``add-apt-repository``. If - ``add_apt_repo_match`` is not specified, it - defaults to ``{}``""".format( - ADD_APT_REPO_MATCH - ) - ), - }, - "debconf_selections": { - "type": "object", - "items": {"type": "string"}, - "description": dedent( - """\ - Debconf additional configurations can be specified as a - dictionary under the ``debconf_selections`` config - key, with each key in the dict representing a - different set of configurations. The value of each key - must be a string containing all the debconf - configurations that must be applied. We will bundle - all of the values and pass them to - ``debconf-set-selections``. Therefore, each value line - must be a valid entry for ``debconf-set-selections``, - meaning that they must possess for distinct fields: - - ``pkgname question type answer`` - - Where: - - - ``pkgname`` is the name of the package. - - ``question`` the name of the questions. - - ``type`` is the type of question. - - ``answer`` is the value used to ansert the \ - question. - - For example: \ - ``ippackage ippackage/ip string 127.0.01`` - """ - ), - }, - "sources_list": { - "type": "string", - "description": dedent( - """\ - Specifies a custom template for rendering - ``sources.list`` . If no ``sources_list`` template - is given, cloud-init will use sane default. Within - this template, the following strings will be - replaced with the appropriate values: - - - ``$MIRROR`` - - ``$RELEASE`` - - ``$PRIMARY`` - - ``$SECURITY`` - - ``$KEY_FILE``""" - ), - }, - "conf": { - "type": "string", - "description": dedent( - """\ - Specify configuration for apt, such as proxy - configuration. This configuration is specified as a - string. For multiline apt configuration, make sure - to follow yaml syntax.""" - ), - }, - "https_proxy": { - "type": "string", - "description": dedent( - """\ - More convenient way to specify https apt proxy. - https proxy url is specified in the format - ``https://[[user][:pass]@]host[:port]/``.""" - ), - }, - "http_proxy": { - "type": "string", - "description": dedent( - """\ - More convenient way to specify http apt proxy. - http proxy url is specified in the format - ``http://[[user][:pass]@]host[:port]/``.""" - ), - }, - "proxy": { - "type": "string", - "description": "Alias for defining a http apt proxy.", - }, - "ftp_proxy": { - "type": "string", - "description": dedent( - """\ - More convenient way to specify ftp apt proxy. - ftp proxy url is specified in the format - ``ftp://[[user][:pass]@]host[:port]/``.""" - ), - }, - "sources": { - "type": "object", - "items": {"type": "string"}, - "description": dedent( - """\ - Source list entries can be specified as a - dictionary under the ``sources`` config key, with - each key in the dict representing a different source - file. The key of each source entry will be used - as an id that can be referenced in other config - entries, as well as the filename for the source's - configuration under ``/etc/apt/sources.list.d``. - If the name does not end with ``.list``, it will - be appended. If there is no configuration for a - key in ``sources``, no file will be written, but - the key may still be referred to as an id in other - ``sources`` entries. - - Each entry under ``sources`` is a dictionary which - may contain any of the following optional keys: - - - ``source``: a sources.list entry \ - (some variable replacements apply). - - ``keyid``: a key to import via shortid or \ - fingerprint. - - ``key``: a raw PGP key. - - ``keyserver``: alternate keyserver to pull \ - ``keyid`` key from. - - ``filename``: specify the name of the .list file - - The ``source`` key supports variable - replacements for the following strings: - - - ``$MIRROR`` - - ``$PRIMARY`` - - ``$SECURITY`` - - ``$RELEASE`` - - ``$KEY_FILE``""" - ), - }, - }, - } - }, -} - -__doc__ = get_meta_doc(meta, schema) +__doc__ = get_meta_doc(meta) # place where apt stores cached repository data @@ -474,7 +179,6 @@ def handle(name, ocfg, cloud, log, _): ) ) - validate_cloudconfig_schema(cfg, schema) apply_debconf_selections(cfg, target) apply_apt(cfg, cloud, target) @@ -889,6 +593,10 @@ def add_apt_sources( def convert_v1_to_v2_apt_format(srclist): """convert v1 apt format to v2 (dict in apt_sources)""" srcdict = {} + LOG.warning( + "DEPRECATION: 'apt_sources' deprecated config key found." + " Use 'apt' instead" + ) if isinstance(srclist, list): LOG.debug("apt config: convert V1 to V2 format (source list to dict)") for srcent in srclist: @@ -963,15 +671,19 @@ def convert_v2_to_v3_apt_format(oldcfg): # no old config, so no new one to be created if not needtoconvert: return oldcfg - LOG.debug( - "apt config: convert V2 to V3 format for keys '%s'", + LOG.warning( + "DEPRECATION apt: converted deprecated config V2 to V3 format for" + " keys '%s'. Use updated config keys.", ", ".join(needtoconvert), ) # if old AND new config are provided, prefer the new one (LP #1616831) newaptcfg = oldcfg.get("apt", None) if newaptcfg is not None: - LOG.debug("apt config: V1/2 and V3 format specified, preferring V3") + LOG.warning( + "DEPRECATION: apt config: deprecated V1/2 and V3 format specified," + " preferring V3" + ) for oldkey in needtoconvert: newkey = mapoldkeys[oldkey] verify = oldcfg[oldkey] # drop, but keep a ref for verification diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py index bff11a24..3a239376 100644 --- a/cloudinit/config/cc_bootcmd.py +++ b/cloudinit/config/cc_bootcmd.py @@ -13,17 +13,11 @@ import os from textwrap import dedent from cloudinit import subp, temp_utils, util -from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema +from cloudinit.config.schema import get_meta_doc from cloudinit.settings import PER_ALWAYS frequency = PER_ALWAYS -# The schema definition for each cloud-config module is a strict contract for -# describing supported configuration parameters for each cloud-config section. -# It allows cloud-config to validate and alert users to invalid or ignored -# configuration options before actually attempting to deploy with said -# configuration. - distros = ["all"] meta = { @@ -62,25 +56,7 @@ meta = { "frequency": PER_ALWAYS, } -schema = { - "type": "object", - "properties": { - "bootcmd": { - "type": "array", - "items": { - "oneOf": [ - {"type": "array", "items": {"type": "string"}}, - {"type": "string"}, - ] - }, - "additionalItems": False, # Reject items of non-string non-list - "additionalProperties": False, - "minItems": 1, - } - }, -} - -__doc__ = get_meta_doc(meta, schema) # Supplement python help() +__doc__ = get_meta_doc(meta) def handle(name, cfg, cloud, log, _args): @@ -91,7 +67,6 @@ def handle(name, cfg, cloud, log, _args): ) return - validate_cloudconfig_schema(cfg, schema) with temp_utils.ExtendedTemporaryFile(suffix=".sh") as tmpf: try: content = util.shellify(cfg["bootcmd"]) diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py index 53b6d0c8..b96736a4 100755 --- a/cloudinit/config/cc_byobu.py +++ b/cloudinit/config/cc_byobu.py @@ -6,11 +6,14 @@ # # This file is part of cloud-init. See LICENSE file for license information. -""" -Byobu ------ -**Summary:** enable/disable byobu system wide and for default user +"""Byobu: Enable/disable byobu system wide and for default user.""" + +from cloudinit import subp, util +from cloudinit.config.schema import get_meta_doc +from cloudinit.distros import ug_util +from cloudinit.settings import PER_INSTANCE +MODULE_DESCRIPTION = """\ This module controls whether byobu is enabled or disabled system wide and for the default system user. If byobu is to be enabled, this module will ensure it is installed. Likewise, if it is to be disabled, it will be removed if @@ -26,23 +29,24 @@ Valid configuration options for this module are: - ``disable``: disable byobu for all users - ``user``: alias for ``enable-user`` - ``system``: alias for ``enable-system`` - -**Internal name:** ``cc_byobu`` - -**Module frequency:** per instance - -**Supported distros:** ubuntu, debian - -**Config keys**:: - - byobu_by_default: """ - -from cloudinit import subp, util -from cloudinit.distros import ug_util - distros = ["ubuntu", "debian"] +meta = { + "id": "cc_byobu", + "name": "Byobu", + "title": "Enable/disable byobu system wide and for default user", + "description": MODULE_DESCRIPTION, + "distros": distros, + "frequency": PER_INSTANCE, + "examples": [ + "byobu_by_default: enable-user", + "byobu_by_default: disable-system", + ], +} + +__doc__ = get_meta_doc(meta) + def handle(name, cfg, cloud, log, args): if len(args) != 0: diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py index 9de065ab..c46d0fbe 100644 --- a/cloudinit/config/cc_ca_certs.py +++ b/cloudinit/config/cc_ca_certs.py @@ -2,46 +2,14 @@ # # This file is part of cloud-init. See LICENSE file for license information. -""" -CA Certs --------- -**Summary:** add ca certificates - -This module adds CA certificates to ``/etc/ca-certificates.conf`` and updates -the ssl cert cache using ``update-ca-certificates``. The default certificates -can be removed from the system with the configuration option -``remove-defaults``. - -.. note:: - certificates must be specified using valid yaml. in order to specify a - multiline certificate, the yaml multiline list syntax must be used - -.. note:: - For Alpine Linux the "remove-defaults" functionality works if the - ca-certificates package is installed but not if the - ca-certificates-bundle package is installed. - -**Internal name:** ``cc_ca_certs`` - -**Module frequency:** per instance - -**Supported distros:** alpine, debian, ubuntu, rhel - -**Config keys**:: - - ca-certs: - remove-defaults: - trusted: - - - - | - -----BEGIN CERTIFICATE----- - YOUR-ORGS-TRUSTED-CA-CERT-HERE - -----END CERTIFICATE----- -""" +"""CA Certs: Add ca certificates.""" import os +from textwrap import dedent from cloudinit import subp, util +from cloudinit.config.schema import get_meta_doc +from cloudinit.settings import PER_INSTANCE DEFAULT_CONFIG = { "ca_cert_path": "/usr/share/ca-certificates/", @@ -60,9 +28,48 @@ DISTRO_OVERRIDES = { } } +MODULE_DESCRIPTION = """\ +This module adds CA certificates to ``/etc/ca-certificates.conf`` and updates +the ssl cert cache using ``update-ca-certificates``. The default certificates +can be removed from the system with the configuration option +``remove_defaults``. +.. note:: + certificates must be specified using valid yaml. in order to specify a + multiline certificate, the yaml multiline list syntax must be used + +.. note:: + For Alpine Linux the "remove_defaults" functionality works if the + ca-certificates package is installed but not if the + ca-certificates-bundle package is installed. +""" distros = ["alpine", "debian", "ubuntu", "rhel"] +meta = { + "id": "cc_ca_certs", + "name": "CA Certificates", + "title": "Add ca certificates", + "description": MODULE_DESCRIPTION, + "distros": distros, + "frequency": PER_INSTANCE, + "examples": [ + dedent( + """\ + ca_certs: + remove_defaults: true + trusted: + - single_line_cert + - | + -----BEGIN CERTIFICATE----- + YOUR-ORGS-TRUSTED-CA-CERT-HERE + -----END CERTIFICATE----- + """ + ) + ], +} + +__doc__ = get_meta_doc(meta) + def _distro_ca_certs_configs(distro_name): """Return a distro-specific ca_certs config dictionary @@ -162,20 +169,37 @@ def handle(name, cfg, cloud, log, _args): @param log: Pre-initialized Python logger object to use for logging. @param args: Any module arguments from cloud.cfg """ - # If there isn't a ca-certs section in the configuration don't do anything - if "ca-certs" not in cfg: + if "ca-certs" in cfg: + log.warning( + "DEPRECATION: key 'ca-certs' is now deprecated. Use 'ca_certs'" + " instead." + ) + elif "ca_certs" not in cfg: log.debug( - "Skipping module named %s, no 'ca-certs' key in configuration", + "Skipping module named %s, no 'ca_certs' key in configuration", name, ) return - ca_cert_cfg = cfg["ca-certs"] + if "ca-certs" in cfg and "ca_certs" in cfg: + log.warning( + "Found both ca-certs (deprecated) and ca_certs config keys." + " Ignoring ca-certs." + ) + ca_cert_cfg = cfg.get("ca_certs", cfg.get("ca-certs")) distro_cfg = _distro_ca_certs_configs(cloud.distro.name) - # If there is a remove-defaults option set to true, remove the system + # If there is a remove_defaults option set to true, remove the system # default trusted CA certs first. - if ca_cert_cfg.get("remove-defaults", False): + if "remove-defaults" in ca_cert_cfg: + log.warning( + "DEPRECATION: key 'ca-certs.remove-defaults' is now deprecated." + " Use 'ca_certs.remove_defaults' instead." + ) + if ca_cert_cfg.get("remove-defaults", False): + log.debug("Removing default certificates") + remove_default_ca_certs(cloud.distro.name, distro_cfg) + elif ca_cert_cfg.get("remove_defaults", False): log.debug("Removing default certificates") remove_default_ca_certs(cloud.distro.name, distro_cfg) diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index 67889683..aaf7eaf1 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -14,7 +14,7 @@ import os from textwrap import dedent from cloudinit import subp, temp_utils, templater, url_helper, util -from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema +from cloudinit.config.schema import get_meta_doc from cloudinit.settings import PER_ALWAYS RUBY_VERSION_DEFAULT = "1.8" @@ -137,303 +137,7 @@ meta = { "frequency": frequency, } -schema = { - "type": "object", - "properties": { - "chef": { - "type": "object", - "additionalProperties": False, - "properties": { - "directories": { - "type": "array", - "items": {"type": "string"}, - "uniqueItems": True, - "description": dedent( - """\ - Create the necessary directories for chef to run. By - default, it creates the following directories: - - {chef_dirs}""" - ).format( - chef_dirs="\n".join( - [" - ``{}``".format(d) for d in CHEF_DIRS] - ) - ), - }, - "validation_cert": { - "type": "string", - "description": dedent( - """\ - Optional string to be written to file validation_key. - Special value ``system`` means set use existing file. - """ - ), - }, - "validation_key": { - "type": "string", - "default": CHEF_VALIDATION_PEM_PATH, - "description": dedent( - """\ - Optional path for validation_cert. default to - ``{}``.""".format( - CHEF_VALIDATION_PEM_PATH - ) - ), - }, - "firstboot_path": { - "type": "string", - "default": CHEF_FB_PATH, - "description": dedent( - """\ - Path to write run_list and initial_attributes keys that - should also be present in this configuration, defaults - to ``{}``.""".format( - CHEF_FB_PATH - ) - ), - }, - "exec": { - "type": "boolean", - "default": False, - "description": dedent( - """\ - define if we should run or not run chef (defaults to - false, unless a gem installed is requested where this - will then default to true).""" - ), - }, - "client_key": { - "type": "string", - "default": CHEF_RB_TPL_DEFAULTS["client_key"], - "description": dedent( - """\ - Optional path for client_cert. default to - ``{}``.""".format( - CHEF_RB_TPL_DEFAULTS["client_key"] - ) - ), - }, - "encrypted_data_bag_secret": { - "type": "string", - "default": None, - "description": dedent( - """\ - Specifies the location of the secret key used by chef - to encrypt data items. By default, this path is set - to None, meaning that chef will have to look at the - path ``{}`` for it. - """.format( - CHEF_ENCRYPTED_DATA_BAG_PATH - ) - ), - }, - "environment": { - "type": "string", - "default": CHEF_ENVIRONMENT, - "description": dedent( - """\ - Specifies which environment chef will use. By default, - it will use the ``{}`` configuration. - """.format( - CHEF_ENVIRONMENT - ) - ), - }, - "file_backup_path": { - "type": "string", - "default": CHEF_RB_TPL_DEFAULTS["file_backup_path"], - "description": dedent( - """\ - Specifies the location in which backup files are - stored. By default, it uses the - ``{}`` location.""".format( - CHEF_RB_TPL_DEFAULTS["file_backup_path"] - ) - ), - }, - "file_cache_path": { - "type": "string", - "default": CHEF_RB_TPL_DEFAULTS["file_cache_path"], - "description": dedent( - """\ - Specifies the location in which chef cache files will - be saved. By default, it uses the ``{}`` - location.""".format( - CHEF_RB_TPL_DEFAULTS["file_cache_path"] - ) - ), - }, - "json_attribs": { - "type": "string", - "default": CHEF_FB_PATH, - "description": dedent( - """\ - Specifies the location in which some chef json data is - stored. By default, it uses the - ``{}`` location.""".format( - CHEF_FB_PATH - ) - ), - }, - "log_level": { - "type": "string", - "default": CHEF_RB_TPL_DEFAULTS["log_level"], - "description": dedent( - """\ - Defines the level of logging to be stored in the log - file. By default this value is set to ``{}``. - """.format( - CHEF_RB_TPL_DEFAULTS["log_level"] - ) - ), - }, - "log_location": { - "type": "string", - "default": CHEF_RB_TPL_DEFAULTS["log_location"], - "description": dedent( - """\ - Specifies the location of the chef lof file. By - default, the location is specified at - ``{}``.""".format( - CHEF_RB_TPL_DEFAULTS["log_location"] - ) - ), - }, - "node_name": { - "type": "string", - "description": dedent( - """\ - The name of the node to run. By default, we will - use th instance id as the node name.""" - ), - }, - "omnibus_url": { - "type": "string", - "default": OMNIBUS_URL, - "description": dedent( - """\ - Omnibus URL if chef should be installed through - Omnibus. By default, it uses the - ``{}``.""".format( - OMNIBUS_URL - ) - ), - }, - "omnibus_url_retries": { - "type": "integer", - "default": OMNIBUS_URL_RETRIES, - "description": dedent( - """\ - The number of retries that will be attempted to reach - the Omnibus URL""" - ), - }, - "omnibus_version": { - "type": "string", - "description": dedent( - """\ - Optional version string to require for omnibus - install.""" - ), - }, - "pid_file": { - "type": "string", - "default": CHEF_RB_TPL_DEFAULTS["pid_file"], - "description": dedent( - """\ - The location in which a process identification - number (pid) is saved. By default, it saves - in the ``{}`` location.""".format( - CHEF_RB_TPL_DEFAULTS["pid_file"] - ) - ), - }, - "server_url": { - "type": "string", - "description": "The URL for the chef server", - }, - "show_time": { - "type": "boolean", - "default": True, - "description": "Show time in chef logs", - }, - "ssl_verify_mode": { - "type": "string", - "default": CHEF_RB_TPL_DEFAULTS["ssl_verify_mode"], - "description": dedent( - """\ - Set the verify mode for HTTPS requests. We can have - two possible values for this parameter: - - - ``:verify_none``: No validation of SSL \ - certificates. - - ``:verify_peer``: Validate all SSL certificates. - - By default, the parameter is set as ``{}``. - """.format( - CHEF_RB_TPL_DEFAULTS["ssl_verify_mode"] - ) - ), - }, - "validation_name": { - "type": "string", - "description": dedent( - """\ - The name of the chef-validator key that Chef Infra - Client uses to access the Chef Infra Server during - the initial Chef Infra Client run.""" - ), - }, - "force_install": { - "type": "boolean", - "default": False, - "description": dedent( - """\ - If set to ``True``, forces chef installation, even - if it is already installed.""" - ), - }, - "initial_attributes": { - "type": "object", - "items": {"type": "string"}, - "description": dedent( - """\ - Specify a list of initial attributes used by the - cookbooks.""" - ), - }, - "install_type": { - "type": "string", - "default": "packages", - "description": dedent( - """\ - The type of installation for chef. It can be one of - the following values: - - - ``packages`` - - ``gems`` - - ``omnibus``""" - ), - }, - "run_list": { - "type": "array", - "items": {"type": "string"}, - "description": "A run list for a first boot json.", - }, - "chef_license": { - "type": "string", - "description": dedent( - """\ - string that indicates if user accepts or not license - related to some of chef products""" - ), - }, - }, - } - }, -} - -__doc__ = get_meta_doc(meta, schema) +__doc__ = get_meta_doc(meta) def post_run_chef(chef_cfg, log): @@ -489,7 +193,6 @@ def handle(name, cfg, cloud, log, _args): ) return - validate_cloudconfig_schema(cfg, schema) chef_cfg = cfg["chef"] # Ensure the chef directories we use exist diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py index d09fc129..1a3c9346 100644 --- a/cloudinit/config/cc_debug.py +++ b/cloudinit/config/cc_debug.py @@ -2,37 +2,47 @@ # # This file is part of cloud-init. See LICENSE file for license information. -""" -Debug ------ -**Summary:** helper to debug cloud-init *internal* datastructures. +"""Debug: Helper to debug cloud-init *internal* datastructures.""" + +import copy +from io import StringIO +from textwrap import dedent + +from cloudinit import safeyaml, type_utils, util +from cloudinit.config.schema import get_meta_doc +from cloudinit.distros import ALL_DISTROS +from cloudinit.settings import PER_INSTANCE + +SKIP_KEYS = frozenset(["log_cfgs"]) +MODULE_DESCRIPTION = """\ This module will enable for outputting various internal information that cloud-init sources provide to either a file or to the output console/log location that this cloud-init has been configured with when running. .. note:: Log configurations are not output. - -**Internal name:** ``cc_debug`` - -**Module frequency:** per instance - -**Supported distros:** all - -**Config keys**:: - - debug: - verbose: true/false (defaulting to true) - output: (location to write output, defaulting to console + log) """ -import copy -from io import StringIO - -from cloudinit import safeyaml, type_utils, util - -SKIP_KEYS = frozenset(["log_cfgs"]) +meta = { + "id": "cc_debug", + "name": "Debug", + "title": "Helper to debug cloud-init *internal* datastructures", + "description": MODULE_DESCRIPTION, + "distros": [ALL_DISTROS], + "frequency": PER_INSTANCE, + "examples": [ + dedent( + """\ + debug: + verbose: true + output: /tmp/my_debug.log + """ + ) + ], +} + +__doc__ = get_meta_doc(meta) def _make_header(text): @@ -53,7 +63,6 @@ def _dumps(obj): def handle(name, cfg, cloud, log, args): """Handler method activated by cloud-init.""" - verbose = util.get_cfg_by_path(cfg, ("debug", "verbose"), default=True) if args: # if args are provided (from cmdline) then explicitly set verbose diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py index 5e528e81..6a5e7eda 100644 --- a/cloudinit/config/cc_disable_ec2_metadata.py +++ b/cloudinit/config/cc_disable_ec2_metadata.py @@ -6,34 +6,35 @@ # # This file is part of cloud-init. See LICENSE file for license information. -""" -Disable EC2 Metadata --------------------- -**Summary:** disable aws ec2 metadata +"""Disable EC2 Metadata: Disable AWS EC2 metadata.""" -This module can disable the ec2 datasource by rejecting the route to -``169.254.169.254``, the usual route to the datasource. This module is disabled -by default. - -**Internal name:** ``cc_disable_ec2_metadata`` - -**Module frequency:** always - -**Supported distros:** all - -**Config keys**:: - - disable_ec2_metadata: -""" +from textwrap import dedent from cloudinit import subp, util +from cloudinit.config.schema import get_meta_doc +from cloudinit.distros import ALL_DISTROS from cloudinit.settings import PER_ALWAYS -frequency = PER_ALWAYS - REJECT_CMD_IF = ["route", "add", "-host", "169.254.169.254", "reject"] REJECT_CMD_IP = ["ip", "route", "add", "prohibit", "169.254.169.254"] +meta = { + "id": "cc_disable_ec2_metadata", + "name": "Disable EC2 Metadata", + "title": "Disable AWS EC2 Metadata", + "description": dedent( + """\ + This module can disable the ec2 datasource by rejecting the route to + ``169.254.169.254``, the usual route to the datasource. This module + is disabled by default.""" + ), + "distros": [ALL_DISTROS], + "frequency": PER_ALWAYS, + "examples": ["disable_ec2_metadata: true"], +} + +__doc__ = get_meta_doc(meta) + def handle(name, cfg, _cloud, log, _args): disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False) diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index 4d527c7a..c59d00cd 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -5,110 +5,18 @@ # # This file is part of cloud-init. See LICENSE file for license information. -""" -Disk Setup ----------- -**Summary:** configure partitions and filesystems - -This module is able to configure simple partition tables and filesystems. - -.. note:: - for more detail about configuration options for disk setup, see the disk - setup example - -For convenience, aliases can be specified for disks using the -``device_aliases`` config key, which takes a dictionary of alias: path -mappings. There are automatic aliases for ``swap`` and ``ephemeral``, where -``swap`` will always refer to the active swap partition and ``ephemeral`` -will refer to the block device of the ephemeral image. - -Disk partitioning is done using the ``disk_setup`` directive. This config -directive accepts a dictionary where each key is either a path to a block -device or an alias specified in ``device_aliases``, and each value is the -configuration options for the device. The ``table_type`` option specifies the -partition table type, either ``mbr`` or ``gpt``. The ``layout`` option -specifies how partitions on the device are to be arranged. If ``layout`` is set -to ``true``, a single partition using all the space on the device will be -created. If set to ``false``, no partitions will be created. Partitions can be -specified by providing a list to ``layout``, where each entry in the list is -either a size or a list containing a size and the numerical value for a -partition type. The size for partitions is specified in **percentage** of disk -space, not in bytes (e.g. a size of 33 would take up 1/3 of the disk space). -The ``overwrite`` option controls whether this module tries to be safe about -writing partition tables or not. If ``overwrite: false`` is set, the device -will be checked for a partition table and for a file system and if either is -found, the operation will be skipped. If ``overwrite: true`` is set, no checks -will be performed. - -.. note:: - Using ``overwrite: true`` is dangerous and can lead to data loss, so double - check that the correct device has been specified if using this option. - -File system configuration is done using the ``fs_setup`` directive. This config -directive accepts a list of filesystem configs. The device to create the -filesystem on may be specified either as a path or as an alias in the format -``.`` where ```` denotes the partition number on the device. -The partition can also be specified by setting ``partition`` to the desired -partition number. The ``partition`` option may also be set to ``auto``, in -which this module will search for the existence of a filesystem matching the -``label``, ``type`` and ``device`` of the ``fs_setup`` entry and will skip -creating the filesystem if one is found. The ``partition`` option may also be -set to ``any``, in which case any file system that matches ``type`` and -``device`` will cause this module to skip filesystem creation for the -``fs_setup`` entry, regardless of ``label`` matching or not. To write a -filesystem directly to a device, use ``partition: none``. ``partition: none`` -will **always** write the filesystem, even when the ``label`` and -``filesystem`` are matched, and ``overwrite`` is ``false``. - -A label can be specified for the filesystem using -``label``, and the filesystem type can be specified using ``filesystem``. - -.. note:: - If specifying device using the ``.`` format, - the value of ``partition`` will be overwritten. - -.. note:: - Using ``overwrite: true`` for filesystems is dangerous and can lead to data - loss, so double check the entry in ``fs_setup``. - -.. note:: - ``replace_fs`` is ignored unless ``partition`` is ``auto`` or ``any``. - -**Internal name:** ``cc_disk_setup`` - -**Module frequency:** per instance - -**Supported distros:** all - -**Config keys**:: - - device_aliases: - : - disk_setup: - : - table_type: <'mbr'/'gpt'> - layout: - - [33,82] - - 66 - overwrite: - fs_setup: - - label: