summaryrefslogtreecommitdiff
path: root/cloudinit
diff options
context:
space:
mode:
authorzdc <zdc@users.noreply.github.com>2020-09-15 21:35:20 +0300
committerGitHub <noreply@github.com>2020-09-15 21:35:20 +0300
commit76adf82b8a4dbcf636151d292175b7d1ac182fcf (patch)
treef57f3db085a724df237ffa64b589c6bb6dd3b28f /cloudinit
parent1a790ee102fd405e5c3a20a17a69ba0c118ed874 (diff)
parent7cd260b313267dc7123cb99a75d4555e24909cca (diff)
downloadvyos-cloud-init-76adf82b8a4dbcf636151d292175b7d1ac182fcf.tar.gz
vyos-cloud-init-76adf82b8a4dbcf636151d292175b7d1ac182fcf.zip
Merge pull request #18 from zdc/T2117-equuleus-20.3
T2117: Cloud-init updated to 20.3
Diffstat (limited to 'cloudinit')
-rw-r--r--cloudinit/analyze/dump.py18
-rw-r--r--cloudinit/analyze/show.py48
-rw-r--r--cloudinit/analyze/tests/test_boot.py16
-rw-r--r--cloudinit/analyze/tests/test_dump.py20
-rw-r--r--cloudinit/apport.py1
-rw-r--r--cloudinit/atomic_helper.py4
-rw-r--r--cloudinit/cmd/clean.py5
-rw-r--r--cloudinit/cmd/devel/logs.py4
-rwxr-xr-xcloudinit/cmd/devel/make_mime.py114
-rw-r--r--cloudinit/cmd/devel/parser.py5
-rwxr-xr-xcloudinit/cmd/devel/render.py5
-rw-r--r--cloudinit/cmd/devel/tests/test_logs.py3
-rw-r--r--cloudinit/cmd/query.py45
-rw-r--r--cloudinit/cmd/tests/test_clean.py1
-rw-r--r--cloudinit/cmd/tests/test_main.py2
-rw-r--r--cloudinit/cmd/tests/test_query.py392
-rw-r--r--cloudinit/cmd/tests/test_status.py1
-rw-r--r--cloudinit/config/cc_apk_configure.py263
-rw-r--r--cloudinit/config/cc_apt_configure.py582
-rw-r--r--cloudinit/config/cc_apt_pipelining.py2
-rw-r--r--cloudinit/config/cc_bootcmd.py3
-rwxr-xr-xcloudinit/config/cc_byobu.py3
-rw-r--r--cloudinit/config/cc_ca_certs.py45
-rw-r--r--cloudinit/config/cc_chef.py403
-rw-r--r--cloudinit/config/cc_disable_ec2_metadata.py7
-rw-r--r--cloudinit/config/cc_disk_setup.py83
-rw-r--r--cloudinit/config/cc_emit_upstart.py8
-rw-r--r--cloudinit/config/cc_fan.py7
-rw-r--r--cloudinit/config/cc_final_message.py2
-rw-r--r--cloudinit/config/cc_growpart.py33
-rw-r--r--cloudinit/config/cc_grub_dpkg.py98
-rw-r--r--cloudinit/config/cc_keys_to_console.py3
-rw-r--r--cloudinit/config/cc_landscape.py3
-rw-r--r--cloudinit/config/cc_locale.py65
-rw-r--r--cloudinit/config/cc_lxd.py23
-rw-r--r--cloudinit/config/cc_mcollective.py3
-rw-r--r--cloudinit/config/cc_mounts.py67
-rw-r--r--cloudinit/config/cc_ntp.py110
-rw-r--r--cloudinit/config/cc_package_update_upgrade_install.py3
-rw-r--r--cloudinit/config/cc_phone_home.py3
-rw-r--r--cloudinit/config/cc_power_state_change.py72
-rw-r--r--cloudinit/config/cc_puppet.py13
-rw-r--r--cloudinit/config/cc_resizefs.py23
-rw-r--r--cloudinit/config/cc_resolv_conf.py4
-rw-r--r--cloudinit/config/cc_rh_subscription.py19
-rw-r--r--cloudinit/config/cc_rsyslog.py11
-rw-r--r--cloudinit/config/cc_salt_minion.py4
-rw-r--r--cloudinit/config/cc_scripts_per_boot.py4
-rw-r--r--cloudinit/config/cc_scripts_per_instance.py4
-rw-r--r--cloudinit/config/cc_scripts_per_once.py4
-rw-r--r--cloudinit/config/cc_scripts_user.py4
-rw-r--r--cloudinit/config/cc_scripts_vendor.py3
-rw-r--r--cloudinit/config/cc_seed_random.py5
-rw-r--r--cloudinit/config/cc_set_hostname.py3
-rwxr-xr-xcloudinit/config/cc_set_passwords.py7
-rw-r--r--cloudinit/config/cc_snap.py39
-rw-r--r--cloudinit/config/cc_spacewalk.py8
-rwxr-xr-xcloudinit/config/cc_ssh.py43
-rwxr-xr-xcloudinit/config/cc_ssh_authkey_fingerprints.py6
-rwxr-xr-xcloudinit/config/cc_ssh_import_id.py5
-rw-r--r--cloudinit/config/cc_ubuntu_advantage.py13
-rw-r--r--cloudinit/config/cc_ubuntu_drivers.py9
-rw-r--r--cloudinit/config/cc_users_groups.py7
-rw-r--r--cloudinit/config/cc_write_files.py191
-rw-r--r--cloudinit/config/cc_yum_add_repo.py4
-rw-r--r--cloudinit/config/schema.py137
-rw-r--r--cloudinit/config/tests/test_disable_ec2_metadata.py14
-rw-r--r--cloudinit/config/tests/test_final_message.py46
-rw-r--r--cloudinit/config/tests/test_grub_dpkg.py176
-rw-r--r--cloudinit/config/tests/test_mounts.py28
-rw-r--r--cloudinit/config/tests/test_resolv_conf.py86
-rw-r--r--cloudinit/config/tests/test_set_passwords.py38
-rw-r--r--cloudinit/config/tests/test_snap.py60
-rw-r--r--cloudinit/config/tests/test_ubuntu_advantage.py28
-rw-r--r--cloudinit/config/tests/test_ubuntu_drivers.py33
-rw-r--r--cloudinit/config/tests/test_users_groups.py10
-rwxr-xr-xcloudinit/distros/__init__.py190
-rw-r--r--cloudinit/distros/alpine.py165
-rw-r--r--cloudinit/distros/arch.py21
-rw-r--r--cloudinit/distros/bsd.py129
-rw-r--r--cloudinit/distros/bsd_utils.py50
-rw-r--r--cloudinit/distros/debian.py9
-rw-r--r--cloudinit/distros/freebsd.py148
-rw-r--r--cloudinit/distros/gentoo.py17
-rw-r--r--cloudinit/distros/netbsd.py159
-rw-r--r--cloudinit/distros/networking.py212
-rw-r--r--cloudinit/distros/openbsd.py52
-rw-r--r--cloudinit/distros/opensuse.py12
-rw-r--r--cloudinit/distros/parsers/resolv_conf.py7
-rw-r--r--cloudinit/distros/rhel.py9
-rw-r--r--cloudinit/distros/tests/__init__.py0
-rw-r--r--cloudinit/distros/tests/test_init.py156
-rw-r--r--cloudinit/distros/tests/test_networking.py192
-rw-r--r--cloudinit/distros/ubuntu.py2
-rw-r--r--cloudinit/features.py44
-rw-r--r--cloudinit/gpg.py19
-rw-r--r--cloudinit/handlers/boot_hook.py5
-rw-r--r--cloudinit/handlers/jinja_template.py3
-rw-r--r--cloudinit/handlers/upstart_job.py11
-rw-r--r--cloudinit/helpers.py4
-rw-r--r--cloudinit/log.py17
-rw-r--r--cloudinit/net/__init__.py192
-rw-r--r--cloudinit/net/bsd.py167
-rwxr-xr-xcloudinit/net/cmdline.py52
-rw-r--r--cloudinit/net/dhcp.py50
-rw-r--r--cloudinit/net/eni.py15
-rw-r--r--cloudinit/net/freebsd.py176
-rw-r--r--cloudinit/net/netbsd.py44
-rw-r--r--cloudinit/net/netplan.py17
-rw-r--r--cloudinit/net/network_state.py32
-rw-r--r--cloudinit/net/openbsd.py46
-rw-r--r--cloudinit/net/renderers.py7
-rw-r--r--cloudinit/net/sysconfig.py21
-rw-r--r--cloudinit/net/tests/test_dhcp.py108
-rw-r--r--cloudinit/net/tests/test_init.py172
-rw-r--r--cloudinit/net/tests/test_network_state.py10
-rw-r--r--cloudinit/netinfo.py75
-rw-r--r--cloudinit/reporting/events.py23
-rwxr-xr-xcloudinit/reporting/handlers.py39
-rw-r--r--cloudinit/serial.py2
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py8
-rwxr-xr-xcloudinit/sources/DataSourceAzure.py167
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py3
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py11
-rw-r--r--cloudinit/sources/DataSourceDigitalOcean.py2
-rw-r--r--cloudinit/sources/DataSourceEc2.py200
-rw-r--r--cloudinit/sources/DataSourceGCE.py2
-rw-r--r--cloudinit/sources/DataSourceHetzner.py15
-rw-r--r--cloudinit/sources/DataSourceIBMCloud.py6
-rw-r--r--cloudinit/sources/DataSourceMAAS.py5
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py24
-rw-r--r--cloudinit/sources/DataSourceOVF.py63
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py54
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py9
-rw-r--r--cloudinit/sources/DataSourceOracle.py396
-rw-r--r--cloudinit/sources/DataSourceRbxCloud.py34
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py9
-rw-r--r--cloudinit/sources/__init__.py55
-rwxr-xr-xcloudinit/sources/helpers/azure.py524
-rw-r--r--cloudinit/sources/helpers/digitalocean.py21
-rw-r--r--cloudinit/sources/helpers/hetzner.py19
-rw-r--r--cloudinit/sources/helpers/netlink.py3
-rw-r--r--cloudinit/sources/helpers/openstack.py60
-rw-r--r--cloudinit/sources/helpers/tests/test_netlink.py167
-rw-r--r--cloudinit/sources/helpers/tests/test_openstack.py44
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config.py26
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_custom_script.py3
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_file.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_namespace.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py7
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_passwd.py7
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_source.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_util.py48
-rw-r--r--cloudinit/sources/tests/test_init.py156
-rw-r--r--cloudinit/sources/tests/test_oracle.py1032
-rw-r--r--cloudinit/ssh_util.py48
-rw-r--r--cloudinit/stages.py3
-rw-r--r--cloudinit/subp.py334
-rw-r--r--cloudinit/templater.py4
-rw-r--r--cloudinit/tests/helpers.py72
-rw-r--r--cloudinit/tests/test_conftest.py65
-rw-r--r--cloudinit/tests/test_features.py60
-rw-r--r--cloudinit/tests/test_gpg.py10
-rw-r--r--cloudinit/tests/test_netinfo.py40
-rw-r--r--cloudinit/tests/test_subp.py227
-rw-r--r--cloudinit/tests/test_url_helper.py34
-rw-r--r--cloudinit/tests/test_util.py210
-rw-r--r--cloudinit/url_helper.py21
-rw-r--r--cloudinit/user_data.py39
-rw-r--r--cloudinit/util.py643
-rw-r--r--cloudinit/version.py2
171 files changed, 8071 insertions, 3469 deletions
diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py
index 1f3060d0..62ad51fe 100644
--- a/cloudinit/analyze/dump.py
+++ b/cloudinit/analyze/dump.py
@@ -4,6 +4,7 @@ import calendar
from datetime import datetime
import sys
+from cloudinit import subp
from cloudinit import util
stage_to_description = {
@@ -51,7 +52,7 @@ def parse_timestamp(timestampstr):
def parse_timestamp_from_date(timestampstr):
- out, _ = util.subp(['date', '+%s.%3N', '-d', timestampstr])
+ out, _ = subp.subp(['date', '+%s.%3N', '-d', timestampstr])
timestamp = out.strip()
return float(timestamp)
@@ -74,8 +75,12 @@ def parse_ci_logline(line):
#
# 2017-05-22 18:02:01,088 - util.py[DEBUG]: Cloud-init v. 0.7.9 running \
# 'init-local' at Mon, 22 May 2017 18:02:01 +0000. Up 2.0 seconds.
+ #
+ # Apr 30 19:39:11 cloud-init[2673]: handlers.py[DEBUG]: start: \
+ # init-local/check-cache: attempting to read from cache [check]
- separators = [' - ', ' [CLOUDINIT] ']
+ amazon_linux_2_sep = ' cloud-init['
+ separators = [' - ', ' [CLOUDINIT] ', amazon_linux_2_sep]
found = False
for sep in separators:
if sep in line:
@@ -98,7 +103,14 @@ def parse_ci_logline(line):
hostname = extra.split()[-1]
else:
hostname = timehost.split()[-1]
- timestampstr = timehost.split(hostname)[0].strip()
+ if sep == amazon_linux_2_sep:
+ # This is an Amazon Linux style line, with no hostname and a PID.
+ # Use the whole of timehost as timestampstr, and strip off the PID
+ # from the start of eventstr.
+ timestampstr = timehost.strip()
+ eventstr = eventstr.split(maxsplit=1)[1]
+ else:
+ timestampstr = timehost.split(hostname)[0].strip()
if 'Cloud-init v.' in eventstr:
event_type = 'start'
if 'running' in eventstr:
diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py
index fb152b1d..01a4d3e5 100644
--- a/cloudinit/analyze/show.py
+++ b/cloudinit/analyze/show.py
@@ -11,31 +11,29 @@ import os
import time
import sys
+from cloudinit import subp
from cloudinit import util
from cloudinit.distros import uses_systemd
-# An event:
-'''
-{
- "description": "executing late commands",
- "event_type": "start",
- "level": "INFO",
- "name": "cmd-install/stage-late"
- "origin": "cloudinit",
- "timestamp": 1461164249.1590767,
-},
-
- {
- "description": "executing late commands",
- "event_type": "finish",
- "level": "INFO",
- "name": "cmd-install/stage-late",
- "origin": "cloudinit",
- "result": "SUCCESS",
- "timestamp": 1461164249.1590767
- }
-
-'''
+# Example events:
+# {
+# "description": "executing late commands",
+# "event_type": "start",
+# "level": "INFO",
+# "name": "cmd-install/stage-late"
+# "origin": "cloudinit",
+# "timestamp": 1461164249.1590767,
+# }
+# {
+# "description": "executing late commands",
+# "event_type": "finish",
+# "level": "INFO",
+# "name": "cmd-install/stage-late",
+# "origin": "cloudinit",
+# "result": "SUCCESS",
+# "timestamp": 1461164249.1590767
+# }
+
format_key = {
'%d': 'delta',
'%D': 'description',
@@ -155,7 +153,7 @@ class SystemctlReader(object):
:return: whether the subp call failed or not
'''
try:
- value, err = util.subp(self.args, capture=True)
+ value, err = subp.subp(self.args, capture=True)
if err:
return err
self.epoch = value
@@ -215,7 +213,7 @@ def gather_timestamps_using_dmesg():
with gather_timestamps_using_systemd
'''
try:
- data, _ = util.subp(['dmesg'], capture=True)
+ data, _ = subp.subp(['dmesg'], capture=True)
split_entries = data[0].splitlines()
for i in split_entries:
if i.decode('UTF-8').find('user') != -1:
@@ -269,7 +267,7 @@ def gather_timestamps_using_systemd():
except OSError as err:
raise RuntimeError('Could not determine container boot '
'time from /proc/1/cmdline. ({})'
- .format(err))
+ .format(err)) from err
status = CONTAINER_CODE
else:
status = FAIL_CODE
diff --git a/cloudinit/analyze/tests/test_boot.py b/cloudinit/analyze/tests/test_boot.py
index f4001c14..f69423c3 100644
--- a/cloudinit/analyze/tests/test_boot.py
+++ b/cloudinit/analyze/tests/test_boot.py
@@ -25,7 +25,7 @@ class TestDistroChecker(CiTestCase):
m_get_linux_distro, m_is_FreeBSD):
self.assertEqual(err_code, dist_check_timestamp())
- @mock.patch('cloudinit.util.subp', return_value=(0, 1))
+ @mock.patch('cloudinit.subp.subp', return_value=(0, 1))
def test_subp_fails(self, m_subp):
self.assertEqual(err_code, dist_check_timestamp())
@@ -42,7 +42,7 @@ class TestSystemCtlReader(CiTestCase):
with self.assertRaises(RuntimeError):
reader.parse_epoch_as_float()
- @mock.patch('cloudinit.util.subp', return_value=('U=1000000', None))
+ @mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None))
def test_systemctl_works_correctly_threshold(self, m_subp):
reader = SystemctlReader('dummyProperty', 'dummyParameter')
self.assertEqual(1.0, reader.parse_epoch_as_float())
@@ -50,12 +50,12 @@ class TestSystemCtlReader(CiTestCase):
self.assertTrue(thresh < 1e-6)
self.assertTrue(thresh > (-1 * 1e-6))
- @mock.patch('cloudinit.util.subp', return_value=('U=0', None))
+ @mock.patch('cloudinit.subp.subp', return_value=('U=0', None))
def test_systemctl_succeed_zero(self, m_subp):
reader = SystemctlReader('dummyProperty', 'dummyParameter')
self.assertEqual(0.0, reader.parse_epoch_as_float())
- @mock.patch('cloudinit.util.subp', return_value=('U=1', None))
+ @mock.patch('cloudinit.subp.subp', return_value=('U=1', None))
def test_systemctl_succeed_distinct(self, m_subp):
reader = SystemctlReader('dummyProperty', 'dummyParameter')
val1 = reader.parse_epoch_as_float()
@@ -64,13 +64,13 @@ class TestSystemCtlReader(CiTestCase):
val2 = reader2.parse_epoch_as_float()
self.assertNotEqual(val1, val2)
- @mock.patch('cloudinit.util.subp', return_value=('100', None))
+ @mock.patch('cloudinit.subp.subp', return_value=('100', None))
def test_systemctl_epoch_not_splittable(self, m_subp):
reader = SystemctlReader('dummyProperty', 'dummyParameter')
with self.assertRaises(IndexError):
reader.parse_epoch_as_float()
- @mock.patch('cloudinit.util.subp', return_value=('U=foobar', None))
+ @mock.patch('cloudinit.subp.subp', return_value=('U=foobar', None))
def test_systemctl_cannot_convert_epoch_to_float(self, m_subp):
reader = SystemctlReader('dummyProperty', 'dummyParameter')
with self.assertRaises(ValueError):
@@ -130,7 +130,7 @@ class TestAnalyzeBoot(CiTestCase):
self.assertEqual(err_string, data)
@mock.patch("cloudinit.util.is_container", return_value=True)
- @mock.patch('cloudinit.util.subp', return_value=('U=1000000', None))
+ @mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None))
def test_container_no_ci_log_line(self, m_is_container, m_subp):
path = os.path.dirname(os.path.abspath(__file__))
log_path = path + '/boot-test.log'
@@ -148,7 +148,7 @@ class TestAnalyzeBoot(CiTestCase):
self.assertEqual(FAIL_CODE, finish_code)
@mock.patch("cloudinit.util.is_container", return_value=True)
- @mock.patch('cloudinit.util.subp', return_value=('U=1000000', None))
+ @mock.patch('cloudinit.subp.subp', return_value=('U=1000000', None))
@mock.patch('cloudinit.analyze.__main__._get_events', return_value=[{
'name': 'init-local', 'description': 'starting search', 'timestamp':
100000}])
diff --git a/cloudinit/analyze/tests/test_dump.py b/cloudinit/analyze/tests/test_dump.py
index db2a667b..dac1efb6 100644
--- a/cloudinit/analyze/tests/test_dump.py
+++ b/cloudinit/analyze/tests/test_dump.py
@@ -5,7 +5,8 @@ from textwrap import dedent
from cloudinit.analyze.dump import (
dump_events, parse_ci_logline, parse_timestamp)
-from cloudinit.util import which, write_file
+from cloudinit.util import write_file
+from cloudinit.subp import which
from cloudinit.tests.helpers import CiTestCase, mock, skipIf
@@ -119,6 +120,23 @@ class TestParseCILogLine(CiTestCase):
m_parse_from_date.assert_has_calls(
[mock.call("2016-08-30 21:53:25.972325+00:00")])
+ def test_parse_logline_returns_event_for_amazon_linux_2_line(self):
+ line = (
+ "Apr 30 19:39:11 cloud-init[2673]: handlers.py[DEBUG]: start:"
+ " init-local/check-cache: attempting to read from cache [check]")
+ # Generate the expected value using `datetime`, so that TZ
+ # determination is consistent with the code under test.
+ timestamp_dt = datetime.strptime(
+ "Apr 30 19:39:11", "%b %d %H:%M:%S"
+ ).replace(year=datetime.now().year)
+ expected = {
+ 'description': 'attempting to read from cache [check]',
+ 'event_type': 'start',
+ 'name': 'init-local/check-cache',
+ 'origin': 'cloudinit',
+ 'timestamp': timestamp_dt.timestamp()}
+ self.assertEqual(expected, parse_ci_logline(line))
+
SAMPLE_LOGS = dedent("""\
Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT] util.py[DEBUG]:\
diff --git a/cloudinit/apport.py b/cloudinit/apport.py
index 1f2c2e7e..9bded16c 100644
--- a/cloudinit/apport.py
+++ b/cloudinit/apport.py
@@ -36,6 +36,7 @@ KNOWN_CLOUD_NAMES = [
'OVF',
'RbxCloud - (HyperOne, Rootbox, Rubikon)',
'OpenTelekomCloud',
+ 'SAP Converged Cloud',
'Scaleway',
'SmartOS',
'VMware',
diff --git a/cloudinit/atomic_helper.py b/cloudinit/atomic_helper.py
index 1f61faa2..485ff92f 100644
--- a/cloudinit/atomic_helper.py
+++ b/cloudinit/atomic_helper.py
@@ -11,10 +11,10 @@ LOG = logging.getLogger(__name__)
def write_file(filename, content, mode=_DEF_PERMS,
- omode="wb", copy_mode=False):
+ omode="wb", preserve_mode=False):
# open filename in mode 'omode', write content, set permissions to 'mode'
- if copy_mode:
+ if preserve_mode:
try:
file_stat = os.stat(filename)
mode = stat.S_IMODE(file_stat.st_mode)
diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py
index 30e49de0..928a8eea 100644
--- a/cloudinit/cmd/clean.py
+++ b/cloudinit/cmd/clean.py
@@ -10,9 +10,8 @@ import os
import sys
from cloudinit.stages import Init
-from cloudinit.util import (
- ProcessExecutionError, del_dir, del_file, get_config_logfiles,
- is_link, subp)
+from cloudinit.subp import (ProcessExecutionError, subp)
+from cloudinit.util import (del_dir, del_file, get_config_logfiles, is_link)
def error(msg):
diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py
index 4c086b51..51c61cca 100644
--- a/cloudinit/cmd/devel/logs.py
+++ b/cloudinit/cmd/devel/logs.py
@@ -12,8 +12,8 @@ import sys
from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
from cloudinit.temp_utils import tempdir
-from cloudinit.util import (
- ProcessExecutionError, chdir, copy, ensure_dir, subp, write_file)
+from cloudinit.subp import (ProcessExecutionError, subp)
+from cloudinit.util import (chdir, copy, ensure_dir, write_file)
CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log']
diff --git a/cloudinit/cmd/devel/make_mime.py b/cloudinit/cmd/devel/make_mime.py
new file mode 100755
index 00000000..4e6a5778
--- /dev/null
+++ b/cloudinit/cmd/devel/make_mime.py
@@ -0,0 +1,114 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Generate multi-part mime messages for user-data """
+
+import argparse
+import sys
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
+
+from cloudinit import log
+from cloudinit.handlers import INCLUSION_TYPES_MAP
+from . import addLogHandlerCLI
+
+NAME = 'make-mime'
+LOG = log.getLogger(NAME)
+EPILOG = ("Example: make-mime -a config.yaml:cloud-config "
+ "-a script.sh:x-shellscript > user-data")
+
+
+def file_content_type(text):
+ """ Return file content type by reading the first line of the input. """
+ try:
+ filename, content_type = text.split(":", 1)
+ return (open(filename, 'r'), filename, content_type.strip())
+ except ValueError as e:
+ raise argparse.ArgumentError(
+ text, "Invalid value for %r" % (text)
+ ) from e
+
+
+def get_parser(parser=None):
+ """Build or extend and arg parser for make-mime utility.
+
+ @param parser: Optional existing ArgumentParser instance representing the
+ subcommand which will be extended to support the args of this utility.
+
+ @returns: ArgumentParser with proper argument configuration.
+ """
+ if not parser:
+ parser = argparse.ArgumentParser()
+ # update the parser's doc and add an epilog to show an example
+ parser.description = __doc__
+ parser.epilog = EPILOG
+ parser.add_argument("-a", "--attach", dest="files", type=file_content_type,
+ action='append', default=[],
+ metavar="<file>:<content-type>",
+ help=("attach the given file as the specified "
+ "content-type"))
+ parser.add_argument('-l', '--list-types', action='store_true',
+ default=False,
+ help='List support cloud-init content types.')
+ parser.add_argument('-f', '--force', action='store_true',
+ default=False,
+ help='Ignore unknown content-type warnings')
+ return parser
+
+
+def get_content_types(strip_prefix=False):
+ """ Return a list of cloud-init supported content types. Optionally
+ strip out the leading 'text/' of the type if strip_prefix=True.
+ """
+ return sorted([ctype.replace("text/", "") if strip_prefix else ctype
+ for ctype in INCLUSION_TYPES_MAP.values()])
+
+
+def handle_args(name, args):
+ """Create a multi-part MIME archive for use as user-data. Optionally
+ print out the list of supported content types of cloud-init.
+
+ Also setup CLI log handlers to report to stderr since this is a development
+ utility which should be run by a human on the CLI.
+
+ @return 0 on success, 1 on failure.
+ """
+ addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING)
+ if args.list_types:
+ print("\n".join(get_content_types(strip_prefix=True)))
+ return 0
+
+ sub_messages = []
+ errors = []
+ for i, (fh, filename, format_type) in enumerate(args.files):
+ contents = fh.read()
+ sub_message = MIMEText(contents, format_type, sys.getdefaultencoding())
+ sub_message.add_header('Content-Disposition',
+ 'attachment; filename="%s"' % (filename))
+ content_type = sub_message.get_content_type().lower()
+ if content_type not in get_content_types():
+ level = "WARNING" if args.force else "ERROR"
+ msg = (level + ": content type %r for attachment %s "
+ "may be incorrect!") % (content_type, i + 1)
+ sys.stderr.write(msg + '\n')
+ errors.append(msg)
+ sub_messages.append(sub_message)
+ if len(errors) and not args.force:
+ sys.stderr.write("Invalid content-types, override with --force\n")
+ return 1
+ combined_message = MIMEMultipart()
+ for msg in sub_messages:
+ combined_message.attach(msg)
+ print(combined_message)
+ return 0
+
+
+def main():
+ args = get_parser().parse_args()
+ return(handle_args(NAME, args))
+
+
+if __name__ == '__main__':
+ sys.exit(main())
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py
index 99a234ce..1a3c46a4 100644
--- a/cloudinit/cmd/devel/parser.py
+++ b/cloudinit/cmd/devel/parser.py
@@ -9,6 +9,7 @@ from cloudinit.config import schema
from . import net_convert
from . import render
+from . import make_mime
def get_parser(parser=None):
@@ -25,7 +26,9 @@ def get_parser(parser=None):
(net_convert.NAME, net_convert.__doc__,
net_convert.get_parser, net_convert.handle_args),
(render.NAME, render.__doc__,
- render.get_parser, render.handle_args)
+ render.get_parser, render.handle_args),
+ (make_mime.NAME, make_mime.__doc__,
+ make_mime.get_parser, make_mime.handle_args),
]
for (subcmd, helpmsg, get_parser, handler) in subcmds:
parser = subparsers.add_parser(subcmd, help=helpmsg)
diff --git a/cloudinit/cmd/devel/render.py b/cloudinit/cmd/devel/render.py
index 1bc22406..1090aa16 100755
--- a/cloudinit/cmd/devel/render.py
+++ b/cloudinit/cmd/devel/render.py
@@ -57,8 +57,9 @@ def handle_args(name, args):
paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE)
if not os.path.exists(instance_data_fn):
LOG.warning(
- 'Missing root-readable %s. Using redacted %s instead.',
- instance_data_fn, redacted_data_fn)
+ 'Missing root-readable %s. Using redacted %s instead.',
+ instance_data_fn, redacted_data_fn
+ )
instance_data_fn = redacted_data_fn
else:
instance_data_fn = redacted_data_fn
diff --git a/cloudinit/cmd/devel/tests/test_logs.py b/cloudinit/cmd/devel/tests/test_logs.py
index d2dfa8de..ddfd58e1 100644
--- a/cloudinit/cmd/devel/tests/test_logs.py
+++ b/cloudinit/cmd/devel/tests/test_logs.py
@@ -8,7 +8,8 @@ from cloudinit.cmd.devel import logs
from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
from cloudinit.tests.helpers import (
FilesystemMockingTestCase, mock, wrap_and_call)
-from cloudinit.util import ensure_dir, load_file, subp, write_file
+from cloudinit.subp import subp
+from cloudinit.util import ensure_dir, load_file, write_file
@mock.patch('cloudinit.cmd.devel.logs.os.getuid')
diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py
index e3db8679..07db9552 100644
--- a/cloudinit/cmd/query.py
+++ b/cloudinit/cmd/query.py
@@ -1,6 +1,17 @@
# This file is part of cloud-init. See LICENSE file for license information.
-"""Query standardized instance metadata from the command line."""
+"""Query standardized instance metadata provided to machine, returning a JSON
+structure.
+
+Some instance-data values may be binary on some platforms, such as userdata and
+vendordata. Attempt to decompress and decode UTF-8 any binary values.
+
+Any binary values in the instance metadata will be base64-encoded and prefixed
+with "ci-b64:" in the output. userdata and, where applicable, vendordata may
+be provided to the machine gzip-compressed (and therefore as binary data).
+query will attempt to decompress these to a string before emitting the JSON
+output; if this fails, they are treated as binary.
+"""
import argparse
from errno import EACCES
@@ -30,7 +41,7 @@ def get_parser(parser=None):
"""
if not parser:
parser = argparse.ArgumentParser(
- prog=NAME, description='Query cloud-init instance data')
+ prog=NAME, description=__doc__)
parser.add_argument(
'-d', '--debug', action='store_true', default=False,
help='Add verbose messages during template render')
@@ -52,8 +63,10 @@ def get_parser(parser=None):
' /var/lib/cloud/instance/vendor-data.txt'))
parser.add_argument(
'varname', type=str, nargs='?',
- help=('A dot-delimited instance data variable to query from'
- ' instance-data query. For example: v2.local_hostname'))
+ help=('A dot-delimited specific variable to query from'
+ ' instance-data. For example: v1.local_hostname. If the'
+ ' value is not JSON serializable, it will be base64-encoded and'
+ ' will contain the prefix "ci-b64:". '))
parser.add_argument(
'-a', '--all', action='store_true', default=False, dest='dump_all',
help='Dump all available instance-data')
@@ -65,6 +78,21 @@ def get_parser(parser=None):
return parser
+def load_userdata(ud_file_path):
+ """Attempt to return a string of user-data from ud_file_path
+
+ Attempt to decode or decompress if needed.
+ If unable to decode the content, raw bytes will be returned.
+
+ @returns: String of uncompressed userdata if possible, otherwise bytes.
+ """
+ bdata = util.load_file(ud_file_path, decode=False)
+ try:
+ return bdata.decode('utf-8')
+ except UnicodeDecodeError:
+ return util.decomp_gzip(bdata, quiet=False, decode=True)
+
+
def handle_args(name, args):
"""Handle calls to 'cloud-init query' as a subcommand."""
paths = None
@@ -90,8 +118,9 @@ def handle_args(name, args):
instance_data_fn = sensitive_data_fn
else:
LOG.warning(
- 'Missing root-readable %s. Using redacted %s instead.',
- sensitive_data_fn, redacted_data_fn)
+ 'Missing root-readable %s. Using redacted %s instead.',
+ sensitive_data_fn, redacted_data_fn
+ )
instance_data_fn = redacted_data_fn
else:
instance_data_fn = redacted_data_fn
@@ -120,8 +149,8 @@ def handle_args(name, args):
instance_data['vendordata'] = (
'<%s> file:%s' % (REDACT_SENSITIVE_VALUE, vendor_data_fn))
else:
- instance_data['userdata'] = util.load_file(user_data_fn)
- instance_data['vendordata'] = util.load_file(vendor_data_fn)
+ instance_data['userdata'] = load_userdata(user_data_fn)
+ instance_data['vendordata'] = load_userdata(vendor_data_fn)
if args.format:
payload = '## template: jinja\n{fmt}'.format(fmt=args.format)
rendered_payload = render_jinja_payload(
diff --git a/cloudinit/cmd/tests/test_clean.py b/cloudinit/cmd/tests/test_clean.py
index 13a69aa1..a848a810 100644
--- a/cloudinit/cmd/tests/test_clean.py
+++ b/cloudinit/cmd/tests/test_clean.py
@@ -167,7 +167,6 @@ class TestClean(CiTestCase):
wrap_and_call(
'cloudinit.cmd.clean',
{'Init': {'side_effect': self.init_class},
- 'sys.exit': {'side_effect': self.sys_exit},
'sys.argv': {'new': ['clean', '--logs']}},
clean.main)
diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py
index 384fddc6..585b3b0e 100644
--- a/cloudinit/cmd/tests/test_main.py
+++ b/cloudinit/cmd/tests/test_main.py
@@ -18,8 +18,6 @@ myargs = namedtuple('MyArgs', 'debug files force local reporter subcommand')
class TestMain(FilesystemMockingTestCase):
- with_logs = True
-
def setUp(self):
super(TestMain, self).setUp()
self.new_root = self.tmp_dir()
diff --git a/cloudinit/cmd/tests/test_query.py b/cloudinit/cmd/tests/test_query.py
index 6d36a4ea..c258d321 100644
--- a/cloudinit/cmd/tests/test_query.py
+++ b/cloudinit/cmd/tests/test_query.py
@@ -1,195 +1,260 @@
# This file is part of cloud-init. See LICENSE file for license information.
import errno
-from io import StringIO
+import gzip
+from io import BytesIO
+import json
from textwrap import dedent
-import os
+
+import pytest
from collections import namedtuple
from cloudinit.cmd import query
from cloudinit.helpers import Paths
from cloudinit.sources import (
REDACT_SENSITIVE_VALUE, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE)
-from cloudinit.tests.helpers import CiTestCase, mock
-from cloudinit.util import ensure_dir, write_file
+from cloudinit.tests.helpers import mock
+
+from cloudinit.util import b64e, write_file
+
+def _gzip_data(data):
+ with BytesIO() as iobuf:
+ with gzip.GzipFile(mode="wb", fileobj=iobuf) as gzfp:
+ gzfp.write(data)
+ return iobuf.getvalue()
-class TestQuery(CiTestCase):
- with_logs = True
+@mock.patch("cloudinit.cmd.query.addLogHandlerCLI", lambda *args: "")
+class TestQuery:
args = namedtuple(
'queryargs',
('debug dump_all format instance_data list_keys user_data vendor_data'
' varname'))
- def setUp(self):
- super(TestQuery, self).setUp()
- self.tmp = self.tmp_dir()
- self.instance_data = self.tmp_path('instance-data', dir=self.tmp)
+ def _setup_paths(self, tmpdir, ud_val=None, vd_val=None):
+ """Write userdata and vendordata into a tmpdir.
- def test_handle_args_error_on_missing_param(self):
+ Return:
+ 4-tuple : (paths, run_dir_path, userdata_path, vendordata_path)
+ """
+ if ud_val:
+ user_data = tmpdir.join('user-data')
+ write_file(user_data.strpath, ud_val)
+ else:
+ user_data = None
+ if vd_val:
+ vendor_data = tmpdir.join('vendor-data')
+ write_file(vendor_data.strpath, vd_val)
+ else:
+ vendor_data = None
+ run_dir = tmpdir.join('run_dir')
+ run_dir.ensure_dir()
+ return (
+ Paths({'run_dir': run_dir.strpath}),
+ run_dir,
+ user_data,
+ vendor_data
+ )
+
+ def test_handle_args_error_on_missing_param(self, caplog, capsys):
"""Error when missing required parameters and print usage."""
args = self.args(
debug=False, dump_all=False, format=None, instance_data=None,
list_keys=False, user_data=None, vendor_data=None, varname=None)
- with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- self.assertEqual(1, query.handle_args('anyname', args))
+ with mock.patch(
+ "cloudinit.cmd.query.addLogHandlerCLI", return_value=""
+ ) as m_cli_log:
+ assert 1 == query.handle_args('anyname', args)
expected_error = (
- 'ERROR: Expected one of the options: --all, --format, --list-keys'
+ 'Expected one of the options: --all, --format, --list-keys'
' or varname\n')
- self.assertIn(expected_error, self.logs.getvalue())
- self.assertIn('usage: query', m_stdout.getvalue())
- self.assertIn(expected_error, m_stderr.getvalue())
+ assert expected_error in caplog.text
+ out, _err = capsys.readouterr()
+ assert 'usage: query' in out
+ assert 1 == m_cli_log.call_count
- def test_handle_args_error_on_missing_instance_data(self):
+ def test_handle_args_error_on_missing_instance_data(self, caplog, tmpdir):
"""When instance_data file path does not exist, log an error."""
- absent_fn = self.tmp_path('absent', dir=self.tmp)
+ absent_fn = tmpdir.join('absent')
args = self.args(
- debug=False, dump_all=True, format=None, instance_data=absent_fn,
+ debug=False, dump_all=True, format=None,
+ instance_data=absent_fn.strpath,
list_keys=False, user_data='ud', vendor_data='vd', varname=None)
- with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
- self.assertEqual(1, query.handle_args('anyname', args))
- self.assertIn(
- 'ERROR: Missing instance-data file: %s' % absent_fn,
- self.logs.getvalue())
- self.assertIn(
- 'ERROR: Missing instance-data file: %s' % absent_fn,
- m_stderr.getvalue())
+ assert 1 == query.handle_args('anyname', args)
- def test_handle_args_error_when_no_read_permission_instance_data(self):
+ msg = 'Missing instance-data file: %s' % absent_fn
+ assert msg in caplog.text
+
+ def test_handle_args_error_when_no_read_permission_instance_data(
+ self, caplog, tmpdir
+ ):
"""When instance_data file is unreadable, log an error."""
- noread_fn = self.tmp_path('unreadable', dir=self.tmp)
- write_file(noread_fn, 'thou shall not pass')
+ noread_fn = tmpdir.join('unreadable')
+ noread_fn.write('thou shall not pass')
args = self.args(
- debug=False, dump_all=True, format=None, instance_data=noread_fn,
+ debug=False, dump_all=True, format=None,
+ instance_data=noread_fn.strpath,
list_keys=False, user_data='ud', vendor_data='vd', varname=None)
- with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
- with mock.patch('cloudinit.cmd.query.util.load_file') as m_load:
- m_load.side_effect = OSError(errno.EACCES, 'Not allowed')
- self.assertEqual(1, query.handle_args('anyname', args))
- self.assertIn(
- "ERROR: No read permission on '%s'. Try sudo" % noread_fn,
- self.logs.getvalue())
- self.assertIn(
- "ERROR: No read permission on '%s'. Try sudo" % noread_fn,
- m_stderr.getvalue())
+ with mock.patch('cloudinit.cmd.query.util.load_file') as m_load:
+ m_load.side_effect = OSError(errno.EACCES, 'Not allowed')
+ assert 1 == query.handle_args('anyname', args)
+ msg = "No read permission on '%s'. Try sudo" % noread_fn
+ assert msg in caplog.text
- def test_handle_args_defaults_instance_data(self):
+ def test_handle_args_defaults_instance_data(self, caplog, tmpdir):
"""When no instance_data argument, default to configured run_dir."""
args = self.args(
debug=False, dump_all=True, format=None, instance_data=None,
list_keys=False, user_data=None, vendor_data=None, varname=None)
- run_dir = self.tmp_path('run_dir', dir=self.tmp)
- ensure_dir(run_dir)
- paths = Paths({'run_dir': run_dir})
- self.add_patch('cloudinit.cmd.query.read_cfg_paths', 'm_paths')
- self.m_paths.return_value = paths
- with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
- self.assertEqual(1, query.handle_args('anyname', args))
- json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
- self.assertIn(
- 'ERROR: Missing instance-data file: %s' % json_file,
- self.logs.getvalue())
- self.assertIn(
- 'ERROR: Missing instance-data file: %s' % json_file,
- m_stderr.getvalue())
+ paths, run_dir, _, _ = self._setup_paths(tmpdir)
+ with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths:
+ m_paths.return_value = paths
+ assert 1 == query.handle_args('anyname', args)
+ json_file = run_dir.join(INSTANCE_JSON_FILE)
+ msg = 'Missing instance-data file: %s' % json_file.strpath
+ assert msg in caplog.text
- def test_handle_args_root_fallsback_to_instance_data(self):
+ def test_handle_args_root_fallsback_to_instance_data(self, caplog, tmpdir):
"""When no instance_data argument, root falls back to redacted json."""
args = self.args(
debug=False, dump_all=True, format=None, instance_data=None,
list_keys=False, user_data=None, vendor_data=None, varname=None)
- run_dir = self.tmp_path('run_dir', dir=self.tmp)
- ensure_dir(run_dir)
- paths = Paths({'run_dir': run_dir})
- self.add_patch('cloudinit.cmd.query.read_cfg_paths', 'm_paths')
- self.m_paths.return_value = paths
- with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
+ paths, run_dir, _, _ = self._setup_paths(tmpdir)
+ with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths:
+ m_paths.return_value = paths
with mock.patch('os.getuid') as m_getuid:
m_getuid.return_value = 0
- self.assertEqual(1, query.handle_args('anyname', args))
- json_file = os.path.join(run_dir, INSTANCE_JSON_FILE)
- sensitive_file = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE)
- self.assertIn(
- 'WARNING: Missing root-readable %s. Using redacted %s instead.' % (
- sensitive_file, json_file),
- m_stderr.getvalue())
+ assert 1 == query.handle_args('anyname', args)
+ json_file = run_dir.join(INSTANCE_JSON_FILE)
+ sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
+ msg = (
+ 'Missing root-readable %s. Using redacted %s instead.' %
+ (
+ sensitive_file.strpath, json_file.strpath
+ )
+ )
+ assert msg in caplog.text
- def test_handle_args_root_uses_instance_sensitive_data(self):
- """When no instance_data argument, root uses semsitive json."""
- user_data = self.tmp_path('user-data', dir=self.tmp)
- vendor_data = self.tmp_path('vendor-data', dir=self.tmp)
- write_file(user_data, 'ud')
- write_file(vendor_data, 'vd')
- run_dir = self.tmp_path('run_dir', dir=self.tmp)
- sensitive_file = os.path.join(run_dir, INSTANCE_JSON_SENSITIVE_FILE)
- write_file(sensitive_file, '{"my-var": "it worked"}')
- ensure_dir(run_dir)
- paths = Paths({'run_dir': run_dir})
- self.add_patch('cloudinit.cmd.query.read_cfg_paths', 'm_paths')
- self.m_paths.return_value = paths
+ @pytest.mark.parametrize(
+ 'ud_src,ud_expected,vd_src,vd_expected',
+ (
+ ('hi mom', 'hi mom', 'hi pops', 'hi pops'),
+ ('ud'.encode('utf-8'), 'ud', 'vd'.encode('utf-8'), 'vd'),
+ (_gzip_data(b'ud'), 'ud', _gzip_data(b'vd'), 'vd'),
+ (_gzip_data('ud'.encode('utf-8')), 'ud', _gzip_data(b'vd'), 'vd'),
+ )
+ )
+ def test_handle_args_root_processes_user_data(
+ self, ud_src, ud_expected, vd_src, vd_expected, capsys, tmpdir
+ ):
+ """Support reading multiple user-data file content types"""
+ paths, run_dir, user_data, vendor_data = self._setup_paths(
+ tmpdir, ud_val=ud_src, vd_val=vd_src
+ )
+ sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
+ sensitive_file.write('{"my-var": "it worked"}')
args = self.args(
debug=False, dump_all=True, format=None, instance_data=None,
- list_keys=False, user_data=vendor_data, vendor_data=vendor_data,
- varname=None)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
+ list_keys=False, user_data=user_data.strpath,
+ vendor_data=vendor_data.strpath, varname=None)
+ with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths:
+ m_paths.return_value = paths
with mock.patch('os.getuid') as m_getuid:
m_getuid.return_value = 0
- self.assertEqual(0, query.handle_args('anyname', args))
- self.assertEqual(
- '{\n "my_var": "it worked",\n "userdata": "vd",\n '
- '"vendordata": "vd"\n}\n', m_stdout.getvalue())
+ assert 0 == query.handle_args('anyname', args)
+ out, _err = capsys.readouterr()
+ cmd_output = json.loads(out)
+ assert "it worked" == cmd_output['my_var']
+ if ud_expected == "ci-b64:":
+ ud_expected = "ci-b64:{}".format(b64e(ud_src))
+ if vd_expected == "ci-b64:":
+ vd_expected = "ci-b64:{}".format(b64e(vd_src))
+ assert ud_expected == cmd_output['userdata']
+ assert vd_expected == cmd_output['vendordata']
- def test_handle_args_dumps_all_instance_data(self):
+ def test_handle_args_root_uses_instance_sensitive_data(
+ self, capsys, tmpdir
+ ):
+ """When no instance_data argument, root uses sensitive json."""
+ paths, run_dir, user_data, vendor_data = self._setup_paths(
+ tmpdir, ud_val='ud', vd_val='vd'
+ )
+ sensitive_file = run_dir.join(INSTANCE_JSON_SENSITIVE_FILE)
+ sensitive_file.write('{"my-var": "it worked"}')
+ args = self.args(
+ debug=False, dump_all=True, format=None, instance_data=None,
+ list_keys=False, user_data=user_data.strpath,
+ vendor_data=vendor_data.strpath, varname=None)
+ with mock.patch('cloudinit.cmd.query.read_cfg_paths') as m_paths:
+ m_paths.return_value = paths
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 0
+ assert 0 == query.handle_args('anyname', args)
+ expected = (
+ '{\n "my_var": "it worked",\n "userdata": "ud",\n '
+ '"vendordata": "vd"\n}\n'
+ )
+ out, _err = capsys.readouterr()
+ assert expected == out
+
+ def test_handle_args_dumps_all_instance_data(self, capsys, tmpdir):
"""When --all is specified query will dump all instance data vars."""
- write_file(self.instance_data, '{"my-var": "it worked"}')
+ instance_data = tmpdir.join('instance-data')
+ instance_data.write('{"my-var": "it worked"}')
args = self.args(
debug=False, dump_all=True, format=None,
- instance_data=self.instance_data, list_keys=False,
+ instance_data=instance_data.strpath, list_keys=False,
user_data='ud', vendor_data='vd', varname=None)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- self.assertEqual(0, query.handle_args('anyname', args))
- self.assertEqual(
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ assert 0 == query.handle_args('anyname', args)
+ expected = (
'{\n "my_var": "it worked",\n "userdata": "<%s> file:ud",\n'
' "vendordata": "<%s> file:vd"\n}\n' % (
- REDACT_SENSITIVE_VALUE, REDACT_SENSITIVE_VALUE),
- m_stdout.getvalue())
+ REDACT_SENSITIVE_VALUE, REDACT_SENSITIVE_VALUE
+ )
+ )
+ out, _err = capsys.readouterr()
+ assert expected == out
- def test_handle_args_returns_top_level_varname(self):
+ def test_handle_args_returns_top_level_varname(self, capsys, tmpdir):
"""When the argument varname is passed, report its value."""
- write_file(self.instance_data, '{"my-var": "it worked"}')
+ instance_data = tmpdir.join('instance-data')
+ instance_data.write('{"my-var": "it worked"}')
args = self.args(
debug=False, dump_all=True, format=None,
- instance_data=self.instance_data, list_keys=False,
+ instance_data=instance_data.strpath, list_keys=False,
user_data='ud', vendor_data='vd', varname='my_var')
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- self.assertEqual(0, query.handle_args('anyname', args))
- self.assertEqual('it worked\n', m_stdout.getvalue())
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ assert 0 == query.handle_args('anyname', args)
+ out, _err = capsys.readouterr()
+ assert 'it worked\n' == out
- def test_handle_args_returns_nested_varname(self):
+ def test_handle_args_returns_nested_varname(self, capsys, tmpdir):
"""If user_data file is a jinja template render instance-data vars."""
- write_file(self.instance_data,
- '{"v1": {"key-2": "value-2"}, "my-var": "it worked"}')
+ instance_data = tmpdir.join('instance-data')
+ instance_data.write(
+ '{"v1": {"key-2": "value-2"}, "my-var": "it worked"}'
+ )
args = self.args(
debug=False, dump_all=False, format=None,
- instance_data=self.instance_data, user_data='ud', vendor_data='vd',
- list_keys=False, varname='v1.key_2')
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- self.assertEqual(0, query.handle_args('anyname', args))
- self.assertEqual('value-2\n', m_stdout.getvalue())
+ instance_data=instance_data.strpath, user_data='ud',
+ vendor_data='vd', list_keys=False, varname='v1.key_2')
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ assert 0 == query.handle_args('anyname', args)
+ out, _err = capsys.readouterr()
+ assert 'value-2\n' == out
- def test_handle_args_returns_standardized_vars_to_top_level_aliases(self):
+ def test_handle_args_returns_standardized_vars_to_top_level_aliases(
+ self, capsys, tmpdir
+ ):
"""Any standardized vars under v# are promoted as top-level aliases."""
- write_file(
- self.instance_data,
+ instance_data = tmpdir.join('instance-data')
+ instance_data.write(
'{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},'
' "top": "gun"}')
expected = dedent("""\
@@ -209,65 +274,68 @@ class TestQuery(CiTestCase):
""")
args = self.args(
debug=False, dump_all=True, format=None,
- instance_data=self.instance_data, user_data='ud', vendor_data='vd',
- list_keys=False, varname=None)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- self.assertEqual(0, query.handle_args('anyname', args))
- self.assertEqual(expected, m_stdout.getvalue())
+ instance_data=instance_data.strpath, user_data='ud',
+ vendor_data='vd', list_keys=False, varname=None)
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ assert 0 == query.handle_args('anyname', args)
+ out, _err = capsys.readouterr()
+ assert expected == out
- def test_handle_args_list_keys_sorts_top_level_keys_when_no_varname(self):
+ def test_handle_args_list_keys_sorts_top_level_keys_when_no_varname(
+ self, capsys, tmpdir
+ ):
"""Sort all top-level keys when only --list-keys provided."""
- write_file(
- self.instance_data,
+ instance_data = tmpdir.join('instance-data')
+ instance_data.write(
'{"v1": {"v1_1": "val1.1"}, "v2": {"v2_2": "val2.2"},'
' "top": "gun"}')
expected = 'top\nuserdata\nv1\nv1_1\nv2\nv2_2\nvendordata\n'
args = self.args(
debug=False, dump_all=False, format=None,
- instance_data=self.instance_data, list_keys=True, user_data='ud',
- vendor_data='vd', varname=None)
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- self.assertEqual(0, query.handle_args('anyname', args))
- self.assertEqual(expected, m_stdout.getvalue())
+ instance_data=instance_data.strpath, list_keys=True,
+ user_data='ud', vendor_data='vd', varname=None)
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ assert 0 == query.handle_args('anyname', args)
+ out, _err = capsys.readouterr()
+ assert expected == out
- def test_handle_args_list_keys_sorts_nested_keys_when_varname(self):
+ def test_handle_args_list_keys_sorts_nested_keys_when_varname(
+ self, capsys, tmpdir
+ ):
"""Sort all nested keys of varname object when --list-keys provided."""
- write_file(
- self.instance_data,
+ instance_data = tmpdir.join('instance-data')
+ instance_data.write(
'{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2":' +
' {"v2_2": "val2.2"}, "top": "gun"}')
expected = 'v1_1\nv1_2\n'
args = self.args(
debug=False, dump_all=False, format=None,
- instance_data=self.instance_data, list_keys=True,
+ instance_data=instance_data.strpath, list_keys=True,
user_data='ud', vendor_data='vd', varname='v1')
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- self.assertEqual(0, query.handle_args('anyname', args))
- self.assertEqual(expected, m_stdout.getvalue())
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ assert 0 == query.handle_args('anyname', args)
+ out, _err = capsys.readouterr()
+ assert expected == out
- def test_handle_args_list_keys_errors_when_varname_is_not_a_dict(self):
+ def test_handle_args_list_keys_errors_when_varname_is_not_a_dict(
+ self, caplog, tmpdir
+ ):
"""Raise an error when --list-keys and varname specify a non-list."""
- write_file(
- self.instance_data,
+ instance_data = tmpdir.join('instance-data')
+ instance_data.write(
'{"v1": {"v1_1": "val1.1", "v1_2": "val1.2"}, "v2": ' +
'{"v2_2": "val2.2"}, "top": "gun"}')
- expected_error = "ERROR: --list-keys provided but 'top' is not a dict"
+ expected_error = "--list-keys provided but 'top' is not a dict"
args = self.args(
debug=False, dump_all=False, format=None,
- instance_data=self.instance_data, list_keys=True, user_data='ud',
- vendor_data='vd', varname='top')
- with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
- with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- with mock.patch('os.getuid') as m_getuid:
- m_getuid.return_value = 100
- self.assertEqual(1, query.handle_args('anyname', args))
- self.assertEqual('', m_stdout.getvalue())
- self.assertIn(expected_error, m_stderr.getvalue())
+ instance_data=instance_data.strpath, list_keys=True,
+ user_data='ud', vendor_data='vd', varname='top')
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ assert 1 == query.handle_args('anyname', args)
+ assert expected_error in caplog.text
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py
index 1ed10896..1c9eec37 100644
--- a/cloudinit/cmd/tests/test_status.py
+++ b/cloudinit/cmd/tests/test_status.py
@@ -382,7 +382,6 @@ class TestStatus(CiTestCase):
wrap_and_call(
'cloudinit.cmd.status',
{'sys.argv': {'new': ['status']},
- 'sys.exit': {'side_effect': self.sys_exit},
'_is_cloudinit_disabled': (False, ''),
'Init': {'side_effect': self.init_class}},
status.main)
diff --git a/cloudinit/config/cc_apk_configure.py b/cloudinit/config/cc_apk_configure.py
new file mode 100644
index 00000000..84d7a0b6
--- /dev/null
+++ b/cloudinit/config/cc_apk_configure.py
@@ -0,0 +1,263 @@
+# Copyright (c) 2020 Dermot Bradley
+#
+# Author: Dermot Bradley <dermot_bradley@yahoo.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Apk Configure: Configures apk repositories file."""
+
+from textwrap import dedent
+
+from cloudinit import log as logging
+from cloudinit import temp_utils
+from cloudinit import templater
+from cloudinit import util
+from cloudinit.config.schema import (
+ get_schema_doc, validate_cloudconfig_schema)
+from cloudinit.settings import PER_INSTANCE
+
+LOG = logging.getLogger(__name__)
+
+
+# If no mirror is specified then use this one
+DEFAULT_MIRROR = "https://alpine.global.ssl.fastly.net/alpine"
+
+
+REPOSITORIES_TEMPLATE = """\
+## template:jinja
+#
+# Created by cloud-init
+#
+# This file is written on first boot of an instance
+#
+
+{{ alpine_baseurl }}/{{ alpine_version }}/main
+{% if community_enabled -%}
+{{ alpine_baseurl }}/{{ alpine_version }}/community
+{% endif -%}
+{% if testing_enabled -%}
+{% if alpine_version != 'edge' %}
+#
+# Testing - using with non-Edge installation may cause problems!
+#
+{% endif %}
+{{ alpine_baseurl }}/edge/testing
+{% endif %}
+{% if local_repo != '' %}
+
+#
+# Local repo
+#
+{{ local_repo }}/{{ alpine_version }}
+{% endif %}
+
+"""
+
+
+frequency = PER_INSTANCE
+distros = ['alpine']
+schema = {
+ 'id': 'cc_apk_configure',
+ 'name': 'APK Configure',
+ 'title': 'Configure apk repositories file',
+ 'description': dedent("""\
+ This module handles configuration of the /etc/apk/repositories file.
+
+ .. note::
+ To ensure that apk configuration is valid yaml, any strings
+ containing special characters, especially ``:`` should be quoted.
+ """),
+ 'distros': distros,
+ 'examples': [
+ dedent("""\
+ # Keep the existing /etc/apk/repositories file unaltered.
+ apk_repos:
+ preserve_repositories: true
+ """),
+ dedent("""\
+ # Create repositories file for Alpine v3.12 main and community
+ # using default mirror site.
+ apk_repos:
+ alpine_repo:
+ community_enabled: true
+ version: 'v3.12'
+ """),
+ dedent("""\
+ # Create repositories file for Alpine Edge main, community, and
+ # testing using a specified mirror site and also a local repo.
+ apk_repos:
+ alpine_repo:
+ base_url: 'https://some-alpine-mirror/alpine'
+ community_enabled: true
+ testing_enabled: true
+ version: 'edge'
+ local_repo_base_url: 'https://my-local-server/local-alpine'
+ """),
+ ],
+ 'frequency': frequency,
+ 'type': 'object',
+ 'properties': {
+ 'apk_repos': {
+ 'type': 'object',
+ 'properties': {
+ 'preserve_repositories': {
+ 'type': 'boolean',
+ 'default': False,
+ 'description': dedent("""\
+ By default, cloud-init will generate a new repositories
+ file ``/etc/apk/repositories`` based on any valid
+ configuration settings specified within a apk_repos
+ section of cloud config. To disable this behavior and
+ preserve the repositories file from the pristine image,
+ set ``preserve_repositories`` to ``true``.
+
+ The ``preserve_repositories`` option overrides
+ all other config keys that would alter
+ ``/etc/apk/repositories``.
+ """)
+ },
+ 'alpine_repo': {
+ 'type': ['object', 'null'],
+ 'properties': {
+ 'base_url': {
+ 'type': 'string',
+ 'default': DEFAULT_MIRROR,
+ 'description': dedent("""\
+ The base URL of an Alpine repository, or
+ mirror, to download official packages from.
+ If not specified then it defaults to ``{}``
+ """.format(DEFAULT_MIRROR))
+ },
+ 'community_enabled': {
+ 'type': 'boolean',
+ 'default': False,
+ 'description': dedent("""\
+ Whether to add the Community repo to the
+ repositories file. By default the Community
+ repo is not included.
+ """)
+ },
+ 'testing_enabled': {
+ 'type': 'boolean',
+ 'default': False,
+ 'description': dedent("""\
+ Whether to add the Testing repo to the
+ repositories file. By default the Testing
+ repo is not included. It is only recommended
+ to use the Testing repo on a machine running
+ the ``Edge`` version of Alpine as packages
+ installed from Testing may have dependancies
+ that conflict with those in non-Edge Main or
+ Community repos."
+ """)
+ },
+ 'version': {
+ 'type': 'string',
+ 'description': dedent("""\
+ The Alpine version to use (e.g. ``v3.12`` or
+ ``edge``)
+ """)
+ },
+ },
+ 'required': ['version'],
+ 'minProperties': 1,
+ 'additionalProperties': False,
+ },
+ 'local_repo_base_url': {
+ 'type': 'string',
+ 'description': dedent("""\
+ The base URL of an Alpine repository containing
+ unofficial packages
+ """)
+ }
+ },
+ 'required': [],
+ 'minProperties': 1, # Either preserve_repositories or alpine_repo
+ 'additionalProperties': False,
+ }
+ }
+}
+
+__doc__ = get_schema_doc(schema)
+
+
+def handle(name, cfg, cloud, log, _args):
+ """
+ Call to handle apk_repos sections in cloud-config file.
+
+ @param name: The module name "apk-configure" from cloud.cfg
+ @param cfg: A nested dict containing the entire cloud config contents.
+ @param cloud: The CloudInit object in use.
+ @param log: Pre-initialized Python logger object to use for logging.
+ @param _args: Any module arguments from cloud.cfg
+ """
+
+ # If there is no "apk_repos" section in the configuration
+ # then do nothing.
+ apk_section = cfg.get('apk_repos')
+ if not apk_section:
+ LOG.debug(("Skipping module named %s,"
+ " no 'apk_repos' section found"), name)
+ return
+
+ validate_cloudconfig_schema(cfg, schema)
+
+ # If "preserve_repositories" is explicitly set to True in
+ # the configuration do nothing.
+ if util.get_cfg_option_bool(apk_section, 'preserve_repositories', False):
+ LOG.debug(("Skipping module named %s,"
+ " 'preserve_repositories' is set"), name)
+ return
+
+ # If there is no "alpine_repo" subsection of "apk_repos" present in the
+ # configuration then do nothing, as at least "version" is required to
+ # create valid repositories entries.
+ alpine_repo = apk_section.get('alpine_repo')
+ if not alpine_repo:
+ LOG.debug(("Skipping module named %s,"
+ " no 'alpine_repo' configuration found"), name)
+ return
+
+ # If there is no "version" value present in configuration then do nothing.
+ alpine_version = alpine_repo.get('version')
+ if not alpine_version:
+ LOG.debug(("Skipping module named %s,"
+ " 'version' not specified in alpine_repo"), name)
+ return
+
+ local_repo = apk_section.get('local_repo_base_url', '')
+
+ _write_repositories_file(alpine_repo, alpine_version, local_repo)
+
+
+def _write_repositories_file(alpine_repo, alpine_version, local_repo):
+ """
+ Write the /etc/apk/repositories file with the specified entries.
+
+ @param alpine_repo: A nested dict of the alpine_repo configuration.
+ @param alpine_version: A string of the Alpine version to use.
+ @param local_repo: A string containing the base URL of a local repo.
+ """
+
+ repo_file = '/etc/apk/repositories'
+
+ alpine_baseurl = alpine_repo.get('base_url', DEFAULT_MIRROR)
+
+ params = {'alpine_baseurl': alpine_baseurl,
+ 'alpine_version': alpine_version,
+ 'community_enabled': alpine_repo.get('community_enabled'),
+ 'testing_enabled': alpine_repo.get('testing_enabled'),
+ 'local_repo': local_repo}
+
+ tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl")
+ template_fn = tfile[1] # Filepath is second item in tuple
+ util.write_file(template_fn, content=REPOSITORIES_TEMPLATE)
+
+ LOG.debug('Generating Alpine repository configuration file: %s',
+ repo_file)
+ templater.render_to_file(template_fn, repo_file, params)
+ # Clean up temporary template
+ util.del_file(template_fn)
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index c44dec45..73d8719f 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -6,228 +6,372 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Apt Configure
--------------
-**Summary:** configure apt
-
-This module handles both configuration of apt options and adding source lists.
-There are configuration options such as ``apt_get_wrapper`` and
-``apt_get_command`` that control how cloud-init invokes apt-get.
-These configuration options are handled on a per-distro basis, so consult
-documentation for cloud-init's distro support for instructions on using
-these config options.
-
-.. note::
- To ensure that apt configuration is valid yaml, any strings containing
- special characters, especially ``:`` should be quoted.
-
-.. note::
- For more information about apt configuration, see the
- ``Additional apt configuration`` example.
-
-**Preserve sources.list:**
-
-By default, cloud-init will generate a new sources list in
-``/etc/apt/sources.list.d`` based on any changes specified in cloud config.
-To disable this behavior and preserve the sources list from the pristine image,
-set ``preserve_sources_list`` to ``true``.
-
-.. note::
- The ``preserve_sources_list`` option overrides all other config keys that
- would alter ``sources.list`` or ``sources.list.d``, **except** for
- additional sources to be added to ``sources.list.d``.
-
-**Disable source suites:**
-
-Entries in the sources list can be disabled using ``disable_suites``, which
-takes a list of suites to be disabled. If the string ``$RELEASE`` is present in
-a suite in the ``disable_suites`` list, it will be replaced with the release
-name. If a suite specified in ``disable_suites`` is not present in
-``sources.list`` it will be ignored. For convenience, several aliases are
-provided for ``disable_suites``:
-
- - ``updates`` => ``$RELEASE-updates``
- - ``backports`` => ``$RELEASE-backports``
- - ``security`` => ``$RELEASE-security``
- - ``proposed`` => ``$RELEASE-proposed``
- - ``release`` => ``$RELEASE``
-
-.. note::
- When a suite is disabled using ``disable_suites``, its entry in
- ``sources.list`` is not deleted; it is just commented out.
-
-**Configure primary and security mirrors:**
-
-The primary and security archive mirrors can be specified using the ``primary``
-and ``security`` keys, respectively. Both the ``primary`` and ``security`` keys
-take a list of configs, allowing mirrors to be specified on a per-architecture
-basis. Each config is a dictionary which must have an entry for ``arches``,
-specifying which architectures that config entry is for. The keyword
-``default`` applies to any architecture not explicitly listed. The mirror url
-can be specified with the ``uri`` key, or a list of mirrors to check can be
-provided in order, with the first mirror that can be resolved being selected.
-This allows the same configuration to be used in different environment, with
-different hosts used for a local apt mirror. If no mirror is provided by
-``uri`` or ``search``, ``search_dns`` may be used to search for dns names in
-the format ``<distro>-mirror`` in each of the following:
-
- - fqdn of this host per cloud metadata
- - localdomain
- - domains listed in ``/etc/resolv.conf``
-
-If there is a dns entry for ``<distro>-mirror``, then it is assumed that there
-is a distro mirror at ``http://<distro>-mirror.<domain>/<distro>``. If the
-``primary`` key is defined, but not the ``security`` key, then then
-configuration for ``primary`` is also used for ``security``. If ``search_dns``
-is used for the ``security`` key, the search pattern will be.
-``<distro>-security-mirror``.
-
-If no mirrors are specified, or all lookups fail, then default mirrors defined
-in the datasource are used. If none are present in the datasource either the
-following defaults are used:
-
- - primary: ``http://archive.ubuntu.com/ubuntu``
- - security: ``http://security.ubuntu.com/ubuntu``
-
-**Specify sources.list template:**
-
-A custom template for rendering ``sources.list`` can be specefied with
-``sources_list``. If no ``sources_list`` template is given, cloud-init will
-use sane default. Within this template, the following strings will be replaced
-with the appropriate values:
-
- - ``$MIRROR``
- - ``$RELEASE``
- - ``$PRIMARY``
- - ``$SECURITY``
-
-**Pass configuration to apt:**
-
-Apt configuration can be specified using ``conf``. Configuration is specified
-as a string. For multiline apt configuration, make sure to follow yaml syntax.
-
-**Configure apt proxy:**
-
-Proxy configuration for apt can be specified using ``conf``, but proxy config
-keys also exist for convenience. The proxy config keys, ``http_proxy``,
-``ftp_proxy``, and ``https_proxy`` may be used to specify a proxy for http, ftp
-and https protocols respectively. The ``proxy`` key also exists as an alias for
-``http_proxy``. Proxy url is specified in the format
-``<protocol>://[[user][:pass]@]host[:port]/``.
-
-**Add apt repos by regex:**
+"""Apt Configure: Configure apt for the user."""
-All source entries in ``apt-sources`` that match regex in
-``add_apt_repo_match`` will be added to the system using
-``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults
-to ``^[\\w-]+:\\w``
-
-**Add source list entries:**
-
-Source list entries can be specified as a dictionary under the ``sources``
-config key, with key in the dict representing a different source file. The key
-of each source entry will be used as an id that can be referenced in
-other config entries, as well as the filename for the source's configuration
-under ``/etc/apt/sources.list.d``. If the name does not end with ``.list``,
-it will be appended. If there is no configuration for a key in ``sources``, no
-file will be written, but the key may still be referred to as an id in other
-``sources`` entries.
-
-Each entry under ``sources`` is a dictionary which may contain any of the
-following optional keys:
-
- - ``source``: a sources.list entry (some variable replacements apply)
- - ``keyid``: a key to import via shortid or fingerprint
- - ``key``: a raw PGP key
- - ``keyserver``: alternate keyserver to pull ``keyid`` key from
-
-The ``source`` key supports variable replacements for the following strings:
-
- - ``$MIRROR``
- - ``$PRIMARY``
- - ``$SECURITY``
- - ``$RELEASE``
-
-**Internal name:** ``cc_apt_configure``
+import glob
+import os
+import re
+from textwrap import dedent
-**Module frequency:** per instance
+from cloudinit.config.schema import (
+ get_schema_doc, validate_cloudconfig_schema)
+from cloudinit import gpg
+from cloudinit import log as logging
+from cloudinit import subp
+from cloudinit import templater
+from cloudinit import util
+from cloudinit.settings import PER_INSTANCE
-**Supported distros:** ubuntu, debian
+LOG = logging.getLogger(__name__)
-**Config keys**::
+# this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar')
+ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
- apt:
- preserve_sources_list: <true/false>
- disable_suites:
+frequency = PER_INSTANCE
+distros = ["ubuntu", "debian"]
+mirror_property = {
+ 'type': 'array',
+ 'item': {
+ 'type': 'object',
+ 'additionalProperties': False,
+ 'required': ['arches'],
+ 'properties': {
+ 'arches': {
+ 'type': 'array',
+ 'item': {
+ 'type': 'string'
+ },
+ 'minItems': 1
+ },
+ 'uri': {
+ 'type': 'string',
+ 'format': 'uri'
+ },
+ 'search': {
+ 'type': 'array',
+ 'item': {
+ 'type': 'string',
+ 'format': 'uri'
+ },
+ 'minItems': 1
+ },
+ 'search_dns': {
+ 'type': 'boolean',
+ }
+ }
+ }
+}
+schema = {
+ 'id': 'cc_apt_configure',
+ 'name': 'Apt Configure',
+ 'title': 'Configure apt for the user',
+ 'description': dedent("""\
+ This module handles both configuration of apt options and adding
+ source lists. There are configuration options such as
+ ``apt_get_wrapper`` and ``apt_get_command`` that control how
+ cloud-init invokes apt-get. These configuration options are
+ handled on a per-distro basis, so consult documentation for
+ cloud-init's distro support for instructions on using
+ these config options.
+
+ .. note::
+ To ensure that apt configuration is valid yaml, any strings
+ containing special characters, especially ``:`` should be quoted.
+
+ .. note::
+ For more information about apt configuration, see the
+ ``Additional apt configuration`` example."""),
+ 'distros': distros,
+ 'examples': [dedent("""\
+ apt:
+ preserve_sources_list: false
+ disable_suites:
- $RELEASE-updates
- backports
- $RELEASE
- mysuite
- primary:
+ primary:
- arches:
- amd64
- i386
- default
- uri: "http://us.archive.ubuntu.com/ubuntu"
+ uri: 'http://us.archive.ubuntu.com/ubuntu'
search:
- - "http://cool.but-sometimes-unreachable.com/ubuntu"
- - "http://us.archive.ubuntu.com/ubuntu"
+ - 'http://cool.but-sometimes-unreachable.com/ubuntu'
+ - 'http://us.archive.ubuntu.com/ubuntu'
search_dns: <true/false>
- arches:
- s390x
- arm64
- uri: "http://archive-to-use-for-arm64.example.com/ubuntu"
- security:
+ uri: 'http://archive-to-use-for-arm64.example.com/ubuntu'
+ security:
- arches:
- default
search_dns: true
- sources_list: |
- deb $MIRROR $RELEASE main restricted
- deb-src $MIRROR $RELEASE main restricted
- deb $PRIMARY $RELEASE universe restricted
- deb $SECURITY $RELEASE-security multiverse
- debconf_selections:
- set1: the-package the-package/some-flag boolean true
- conf: |
- APT {
- Get {
- Assume-Yes "true";
- Fix-Broken "true";
+ sources_list: |
+ deb $MIRROR $RELEASE main restricted
+ deb-src $MIRROR $RELEASE main restricted
+ deb $PRIMARY $RELEASE universe restricted
+ deb $SECURITY $RELEASE-security multiverse
+ debconf_selections:
+ set1: the-package the-package/some-flag boolean true
+ conf: |
+ APT {
+ Get {
+ Assume-Yes 'true';
+ Fix-Broken 'true';
+ }
+ }
+ proxy: 'http://[[user][:pass]@]host[:port]/'
+ http_proxy: 'http://[[user][:pass]@]host[:port]/'
+ ftp_proxy: 'ftp://[[user][:pass]@]host[:port]/'
+ https_proxy: 'https://[[user][:pass]@]host[:port]/'
+ sources:
+ source1:
+ keyid: 'keyid'
+ keyserver: 'keyserverurl'
+ source: 'deb http://<url>/ xenial main'
+ source2:
+ source: 'ppa:<ppa-name>'
+ source3:
+ source: 'deb $MIRROR $RELEASE multiverse'
+ key: |
+ ------BEGIN PGP PUBLIC KEY BLOCK-------
+ <key data>
+ ------END PGP PUBLIC KEY BLOCK-------""")],
+ 'frequency': frequency,
+ 'type': 'object',
+ 'properties': {
+ 'apt': {
+ 'type': 'object',
+ 'additionalProperties': False,
+ 'properties': {
+ 'preserve_sources_list': {
+ 'type': 'boolean',
+ 'default': False,
+ 'description': dedent("""\
+ By default, cloud-init will generate a new sources
+ list in ``/etc/apt/sources.list.d`` based on any
+ changes specified in cloud config. To disable this
+ behavior and preserve the sources list from the
+ pristine image, set ``preserve_sources_list``
+ to ``true``.
+
+ The ``preserve_sources_list`` option overrides
+ all other config keys that would alter
+ ``sources.list`` or ``sources.list.d``,
+ **except** for additional sources to be added
+ to ``sources.list.d``.""")
+ },
+ 'disable_suites': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'string'
+ },
+ 'uniqueItems': True,
+ 'description': dedent("""\
+ Entries in the sources list can be disabled using
+ ``disable_suites``, which takes a list of suites
+ to be disabled. If the string ``$RELEASE`` is
+ present in a suite in the ``disable_suites`` list,
+ it will be replaced with the release name. If a
+ suite specified in ``disable_suites`` is not
+ present in ``sources.list`` it will be ignored.
+ For convenience, several aliases are provided for
+ ``disable_suites``:
+
+ - ``updates`` => ``$RELEASE-updates``
+ - ``backports`` => ``$RELEASE-backports``
+ - ``security`` => ``$RELEASE-security``
+ - ``proposed`` => ``$RELEASE-proposed``
+ - ``release`` => ``$RELEASE``.
+
+ When a suite is disabled using ``disable_suites``,
+ its entry in ``sources.list`` is not deleted; it
+ is just commented out.""")
+ },
+ 'primary': {
+ **mirror_property,
+ 'description': dedent("""\
+ The primary and security archive mirrors can
+ be specified using the ``primary`` and
+ ``security`` keys, respectively. Both the
+ ``primary`` and ``security`` keys take a list
+ of configs, allowing mirrors to be specified
+ on a per-architecture basis. Each config is a
+ dictionary which must have an entry for
+ ``arches``, specifying which architectures
+ that config entry is for. The keyword
+ ``default`` applies to any architecture not
+ explicitly listed. The mirror url can be specified
+ with the ``uri`` key, or a list of mirrors to
+ check can be provided in order, with the first
+ mirror that can be resolved being selected. This
+ allows the same configuration to be used in
+ different environment, with different hosts used
+ for a local apt mirror. If no mirror is provided
+ by ``uri`` or ``search``, ``search_dns`` may be
+ used to search for dns names in the format
+ ``<distro>-mirror`` in each of the following:
+
+ - fqdn of this host per cloud metadata,
+ - localdomain,
+ - domains listed in ``/etc/resolv.conf``.
+
+ If there is a dns entry for ``<distro>-mirror``,
+ then it is assumed that there is a distro mirror
+ at ``http://<distro>-mirror.<domain>/<distro>``.
+ If the ``primary`` key is defined, but not the
+ ``security`` key, then then configuration for
+ ``primary`` is also used for ``security``.
+ If ``search_dns`` is used for the ``security``
+ key, the search pattern will be
+ ``<distro>-security-mirror``.
+
+ If no mirrors are specified, or all lookups fail,
+ then default mirrors defined in the datasource
+ are used. If none are present in the datasource
+ either the following defaults are used:
+
+ - ``primary`` => \
+ ``http://archive.ubuntu.com/ubuntu``.
+ - ``security`` => \
+ ``http://security.ubuntu.com/ubuntu``
+ """)},
+ 'security': {
+ **mirror_property,
+ 'description': dedent("""\
+ Please refer to the primary config documentation""")
+ },
+ 'add_apt_repo_match': {
+ 'type': 'string',
+ 'default': ADD_APT_REPO_MATCH,
+ 'description': dedent("""\
+ All source entries in ``apt-sources`` that match
+ regex in ``add_apt_repo_match`` will be added to
+ the system using ``add-apt-repository``. If
+ ``add_apt_repo_match`` is not specified, it
+ defaults to ``{}``""".format(ADD_APT_REPO_MATCH))
+ },
+ 'debconf_selections': {
+ 'type': 'object',
+ 'items': {'type': 'string'},
+ 'description': dedent("""\
+ Debconf additional configurations can be specified as a
+ dictionary under the ``debconf_selections`` config
+ key, with each key in the dict representing a
+ different set of configurations. The value of each key
+ must be a string containing all the debconf
+ configurations that must be applied. We will bundle
+ all of the values and pass them to
+ ``debconf-set-selections``. Therefore, each value line
+ must be a valid entry for ``debconf-set-selections``,
+ meaning that they must possess for distinct fields:
+
+ ``pkgname question type answer``
+
+ Where:
+
+ - ``pkgname`` is the name of the package.
+ - ``question`` the name of the questions.
+ - ``type`` is the type of question.
+ - ``answer`` is the value used to ansert the \
+ question.
+
+ For example: \
+ ``ippackage ippackage/ip string 127.0.01``
+ """)
+ },
+ 'sources_list': {
+ 'type': 'string',
+ 'description': dedent("""\
+ Specifies a custom template for rendering
+ ``sources.list`` . If no ``sources_list`` template
+ is given, cloud-init will use sane default. Within
+ this template, the following strings will be
+ replaced with the appropriate values:
+
+ - ``$MIRROR``
+ - ``$RELEASE``
+ - ``$PRIMARY``
+ - ``$SECURITY``""")
+ },
+ 'conf': {
+ 'type': 'string',
+ 'description': dedent("""\
+ Specify configuration for apt, such as proxy
+ configuration. This configuration is specified as a
+ string. For multiline apt configuration, make sure
+ to follow yaml syntax.""")
+ },
+ 'https_proxy': {
+ 'type': 'string',
+ 'description': dedent("""\
+ More convenient way to specify https apt proxy.
+ https proxy url is specified in the format
+ ``https://[[user][:pass]@]host[:port]/``.""")
+ },
+ 'http_proxy': {
+ 'type': 'string',
+ 'description': dedent("""\
+ More convenient way to specify http apt proxy.
+ http proxy url is specified in the format
+ ``http://[[user][:pass]@]host[:port]/``.""")
+ },
+ 'proxy': {
+ 'type': 'string',
+ 'description': 'Alias for defining a http apt proxy.'
+ },
+ 'ftp_proxy': {
+ 'type': 'string',
+ 'description': dedent("""\
+ More convenient way to specify ftp apt proxy.
+ ftp proxy url is specified in the format
+ ``ftp://[[user][:pass]@]host[:port]/``.""")
+ },
+ 'sources': {
+ 'type': 'object',
+ 'items': {'type': 'string'},
+ 'description': dedent("""\
+ Source list entries can be specified as a
+ dictionary under the ``sources`` config key, with
+ each key in the dict representing a different source
+ file. The key of each source entry will be used
+ as an id that can be referenced in other config
+ entries, as well as the filename for the source's
+ configuration under ``/etc/apt/sources.list.d``.
+ If the name does not end with ``.list``, it will
+ be appended. If there is no configuration for a
+ key in ``sources``, no file will be written, but
+ the key may still be referred to as an id in other
+ ``sources`` entries.
+
+ Each entry under ``sources`` is a dictionary which
+ may contain any of the following optional keys:
+
+ - ``source``: a sources.list entry \
+ (some variable replacements apply).
+ - ``keyid``: a key to import via shortid or \
+ fingerprint.
+ - ``key``: a raw PGP key.
+ - ``keyserver``: alternate keyserver to pull \
+ ``keyid`` key from.
+
+ The ``source`` key supports variable
+ replacements for the following strings:
+
+ - ``$MIRROR``
+ - ``$PRIMARY``
+ - ``$SECURITY``
+ - ``$RELEASE``""")
}
}
- proxy: "http://[[user][:pass]@]host[:port]/"
- http_proxy: "http://[[user][:pass]@]host[:port]/"
- ftp_proxy: "ftp://[[user][:pass]@]host[:port]/"
- https_proxy: "https://[[user][:pass]@]host[:port]/"
- sources:
- source1:
- keyid: "keyid"
- keyserver: "keyserverurl"
- source: "deb http://<url>/ xenial main"
- source2:
- source: "ppa:<ppa-name>"
- source3:
- source: "deb $MIRROR $RELEASE multiverse"
- key: |
- ------BEGIN PGP PUBLIC KEY BLOCK-------
- <key data>
- ------END PGP PUBLIC KEY BLOCK-------
-"""
-
-import glob
-import os
-import re
-
-from cloudinit import gpg
-from cloudinit import log as logging
-from cloudinit import templater
-from cloudinit import util
+ }
+ }
+}
-LOG = logging.getLogger(__name__)
+__doc__ = get_schema_doc(schema)
-# this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar')
-ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
# place where apt stores cached repository data
APT_LISTS = "/var/lib/apt/lists"
@@ -279,6 +423,7 @@ def handle(name, ocfg, cloud, log, _):
"Expected dictionary for 'apt' config, found {config_type}".format(
config_type=type(cfg)))
+ validate_cloudconfig_schema(cfg, schema)
apply_debconf_selections(cfg, target)
apply_apt(cfg, cloud, target)
@@ -287,7 +432,7 @@ def _should_configure_on_empty_apt():
# if no config was provided, should apt configuration be done?
if util.system_is_snappy():
return False, "system is snappy."
- if not (util.which('apt-get') or util.which('apt')):
+ if not (subp.which('apt-get') or subp.which('apt')):
return False, "no apt commands."
return True, "Apt is available."
@@ -334,7 +479,7 @@ def apply_apt(cfg, cloud, target):
def debconf_set_selections(selections, target=None):
if not selections.endswith(b'\n'):
selections += b'\n'
- util.subp(['debconf-set-selections'], data=selections, target=target,
+ subp.subp(['debconf-set-selections'], data=selections, target=target,
capture=True)
@@ -359,7 +504,7 @@ def dpkg_reconfigure(packages, target=None):
"but cannot be unconfigured: %s", unhandled)
if len(to_config):
- util.subp(['dpkg-reconfigure', '--frontend=noninteractive'] +
+ subp.subp(['dpkg-reconfigure', '--frontend=noninteractive'] +
list(to_config), data=None, target=target, capture=True)
@@ -402,7 +547,7 @@ def apply_debconf_selections(cfg, target=None):
def clean_cloud_init(target):
"""clean out any local cloud-init config"""
flist = glob.glob(
- util.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*"))
+ subp.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*"))
LOG.debug("cleaning cloud-init config from: %s", flist)
for dpkg_cfg in flist:
@@ -431,7 +576,7 @@ def rename_apt_lists(new_mirrors, target, arch):
"""rename_apt_lists - rename apt lists to preserve old cache data"""
default_mirrors = get_default_mirrors(arch)
- pre = util.target_path(target, APT_LISTS)
+ pre = subp.target_path(target, APT_LISTS)
for (name, omirror) in default_mirrors.items():
nmirror = new_mirrors.get(name)
if not nmirror:
@@ -550,8 +695,8 @@ def add_apt_key_raw(key, target=None):
"""
LOG.debug("Adding key:\n'%s'", key)
try:
- util.subp(['apt-key', 'add', '-'], data=key.encode(), target=target)
- except util.ProcessExecutionError:
+ subp.subp(['apt-key', 'add', '-'], data=key.encode(), target=target)
+ except subp.ProcessExecutionError:
LOG.exception("failed to add apt GPG Key to apt keyring")
raise
@@ -614,13 +759,13 @@ def add_apt_sources(srcdict, cloud, target=None, template_params=None,
if aa_repo_match(source):
try:
- util.subp(["add-apt-repository", source], target=target)
- except util.ProcessExecutionError:
+ subp.subp(["add-apt-repository", source], target=target)
+ except subp.ProcessExecutionError:
LOG.exception("add-apt-repository failed.")
raise
continue
- sourcefn = util.target_path(target, ent['filename'])
+ sourcefn = subp.target_path(target, ent['filename'])
try:
contents = "%s\n" % (source)
util.write_file(sourcefn, contents, omode="a")
@@ -763,25 +908,6 @@ def convert_to_v3_apt_format(cfg):
return cfg
-def search_for_mirror(candidates):
- """
- Search through a list of mirror urls for one that works
- This needs to return quickly.
- """
- if candidates is None:
- return None
-
- LOG.debug("search for mirror in candidates: '%s'", candidates)
- for cand in candidates:
- try:
- if util.is_resolvable_url(cand):
- LOG.debug("found working mirror: '%s'", cand)
- return cand
- except Exception:
- pass
- return None
-
-
def search_for_mirror_dns(configured, mirrortype, cfg, cloud):
"""
Try to resolve a list of predefines DNS names to pick mirrors
@@ -813,7 +939,7 @@ def search_for_mirror_dns(configured, mirrortype, cfg, cloud):
for post in doms:
mirror_list.append(mirrorfmt % (post))
- mirror = search_for_mirror(mirror_list)
+ mirror = util.search_for_mirror(mirror_list)
return mirror
@@ -876,7 +1002,7 @@ def get_mirror(cfg, mirrortype, arch, cloud):
# fallback to search if specified
if mirror is None:
# list of mirrors to try to resolve
- mirror = search_for_mirror(mcfg.get("search", None))
+ mirror = util.search_for_mirror(mcfg.get("search", None))
# fallback to search_dns if specified
if mirror is None:
diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py
index 225d0905..aa186ce2 100644
--- a/cloudinit/config/cc_apt_pipelining.py
+++ b/cloudinit/config/cc_apt_pipelining.py
@@ -9,7 +9,7 @@ Apt Pipelining
--------------
**Summary:** configure apt pipelining
-This module configures apt's ``Acquite::http::Pipeline-Depth`` option, whcih
+This module configures apt's ``Acquite::http::Pipeline-Depth`` option, which
controls how apt handles HTTP pipelining. It may be useful for pipelining to be
disabled, because some web servers, such as S3 do not pipeline properly (LP:
#948461). The ``apt_pipelining`` config key may be set to ``false`` to disable
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
index 6813f534..246e4497 100644
--- a/cloudinit/config/cc_bootcmd.py
+++ b/cloudinit/config/cc_bootcmd.py
@@ -16,6 +16,7 @@ from cloudinit.config.schema import (
get_schema_doc, validate_cloudconfig_schema)
from cloudinit.settings import PER_ALWAYS
from cloudinit import temp_utils
+from cloudinit import subp
from cloudinit import util
frequency = PER_ALWAYS
@@ -99,7 +100,7 @@ def handle(name, cfg, cloud, log, _args):
if iid:
env['INSTANCE_ID'] = str(iid)
cmd = ['/bin/sh', tmpf.name]
- util.subp(cmd, env=env, capture=False)
+ subp.subp(cmd, env=env, capture=False)
except Exception:
util.logexc(log, "Failed to run bootcmd module %s", name)
raise
diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py
index 0b4352c8..9fdaeba1 100755
--- a/cloudinit/config/cc_byobu.py
+++ b/cloudinit/config/cc_byobu.py
@@ -39,6 +39,7 @@ Valid configuration options for this module are:
"""
from cloudinit.distros import ug_util
+from cloudinit import subp
from cloudinit import util
distros = ['ubuntu', 'debian']
@@ -93,6 +94,6 @@ def handle(name, cfg, cloud, log, args):
if len(shcmd):
cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")]
log.debug("Setting byobu to %s", value)
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py
index 64bc900e..3c453d91 100644
--- a/cloudinit/config/cc_ca_certs.py
+++ b/cloudinit/config/cc_ca_certs.py
@@ -16,11 +16,16 @@ can be removed from the system with the configuration option
certificates must be specified using valid yaml. in order to specify a
multiline certificate, the yaml multiline list syntax must be used
+.. note::
+ For Alpine Linux the "remove-defaults" functionality works if the
+ ca-certificates package is installed but not if the
+ ca-certificates-bundle package is installed.
+
**Internal name:** ``cc_ca_certs``
**Module frequency:** per instance
-**Supported distros:** ubuntu, debian
+**Supported distros:** alpine, debian, ubuntu
**Config keys**::
@@ -36,6 +41,7 @@ can be removed from the system with the configuration option
import os
+from cloudinit import subp
from cloudinit import util
CA_CERT_PATH = "/usr/share/ca-certificates/"
@@ -44,14 +50,14 @@ CA_CERT_CONFIG = "/etc/ca-certificates.conf"
CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/"
CA_CERT_FULL_PATH = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME)
-distros = ['ubuntu', 'debian']
+distros = ['alpine', 'debian', 'ubuntu']
def update_ca_certs():
"""
Updates the CA certificate cache on the current machine.
"""
- util.subp(["update-ca-certificates"], capture=False)
+ subp.subp(["update-ca-certificates"], capture=False)
def add_ca_certs(certs):
@@ -66,17 +72,23 @@ def add_ca_certs(certs):
cert_file_contents = "\n".join([str(c) for c in certs])
util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0o644)
- # Append cert filename to CA_CERT_CONFIG file.
- # We have to strip the content because blank lines in the file
- # causes subsequent entries to be ignored. (LP: #1077020)
- orig = util.load_file(CA_CERT_CONFIG)
- cur_cont = '\n'.join([line for line in orig.splitlines()
- if line != CA_CERT_FILENAME])
- out = "%s\n%s\n" % (cur_cont.rstrip(), CA_CERT_FILENAME)
+ if os.stat(CA_CERT_CONFIG).st_size == 0:
+ # If the CA_CERT_CONFIG file is empty (i.e. all existing
+ # CA certs have been deleted) then simply output a single
+ # line with the cloud-init cert filename.
+ out = "%s\n" % CA_CERT_FILENAME
+ else:
+ # Append cert filename to CA_CERT_CONFIG file.
+ # We have to strip the content because blank lines in the file
+ # causes subsequent entries to be ignored. (LP: #1077020)
+ orig = util.load_file(CA_CERT_CONFIG)
+ cur_cont = '\n'.join([line for line in orig.splitlines()
+ if line != CA_CERT_FILENAME])
+ out = "%s\n%s\n" % (cur_cont.rstrip(), CA_CERT_FILENAME)
util.write_file(CA_CERT_CONFIG, out, omode="wb")
-def remove_default_ca_certs():
+def remove_default_ca_certs(distro_name):
"""
Removes all default trusted CA certificates from the system. To actually
apply the change you must also call L{update_ca_certs}.
@@ -84,11 +96,14 @@ def remove_default_ca_certs():
util.delete_dir_contents(CA_CERT_PATH)
util.delete_dir_contents(CA_CERT_SYSTEM_PATH)
util.write_file(CA_CERT_CONFIG, "", mode=0o644)
- debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no"
- util.subp(('debconf-set-selections', '-'), debconf_sel)
+
+ if distro_name != 'alpine':
+ debconf_sel = (
+ "ca-certificates ca-certificates/trust_new_crts " + "select no")
+ subp.subp(('debconf-set-selections', '-'), debconf_sel)
-def handle(name, cfg, _cloud, log, _args):
+def handle(name, cfg, cloud, log, _args):
"""
Call to handle ca-cert sections in cloud-config file.
@@ -110,7 +125,7 @@ def handle(name, cfg, _cloud, log, _args):
# default trusted CA certs first.
if ca_cert_cfg.get("remove-defaults", False):
log.debug("Removing default certificates")
- remove_default_ca_certs()
+ remove_default_ca_certs(cloud.distro.name)
# If we are given any new trusted CA certs to add, add them.
if "trusted" in ca_cert_cfg:
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index 01d61fa1..aaf71366 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -6,78 +6,22 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Chef
-----
-**Summary:** module that configures, starts and installs chef.
-
-This module enables chef to be installed (from packages or
-from gems, or from omnibus). Before this occurs chef configurations are
-written to disk (validation.pem, client.pem, firstboot.json, client.rb),
-and needed chef folders/directories are created (/etc/chef and /var/log/chef
-and so-on). Then once installing proceeds correctly if configured chef will
-be started (in daemon mode or in non-daemon mode) and then once that has
-finished (if ran in non-daemon mode this will be when chef finishes
-converging, if ran in daemon mode then no further actions are possible since
-chef will have forked into its own process) then a post run function can
-run that can do finishing activities (such as removing the validation pem
-file).
-
-**Internal name:** ``cc_chef``
-
-**Module frequency:** per always
-
-**Supported distros:** all
-
-**Config keys**::
-
- chef:
- directories: (defaulting to /etc/chef, /var/log/chef, /var/lib/chef,
- /var/cache/chef, /var/backups/chef, /var/run/chef)
- validation_cert: (optional string to be written to file validation_key)
- special value 'system' means set use existing file
- validation_key: (optional the path for validation_cert. default
- /etc/chef/validation.pem)
- firstboot_path: (path to write run_list and initial_attributes keys that
- should also be present in this configuration, defaults
- to /etc/chef/firstboot.json)
- exec: boolean to run or not run chef (defaults to false, unless
- a gem installed is requested
- where this will then default
- to true)
-
- chef.rb template keys (if falsey, then will be skipped and not
- written to /etc/chef/client.rb)
-
- chef:
- client_key:
- encrypted_data_bag_secret:
- environment:
- file_backup_path:
- file_cache_path:
- json_attribs:
- log_level:
- log_location:
- node_name:
- omnibus_url:
- omnibus_url_retries:
- omnibus_version:
- pid_file:
- server_url:
- show_time:
- ssl_verify_mode:
- validation_cert:
- validation_key:
- validation_name:
-"""
+"""Chef: module that configures, starts and installs chef."""
import itertools
import json
import os
+from textwrap import dedent
+from cloudinit import subp
+from cloudinit.config.schema import (
+ get_schema_doc, validate_cloudconfig_schema)
from cloudinit import templater
+from cloudinit import temp_utils
from cloudinit import url_helper
from cloudinit import util
+from cloudinit.settings import PER_ALWAYS
+
RUBY_VERSION_DEFAULT = "1.8"
@@ -98,6 +42,8 @@ OMNIBUS_URL = "https://www.chef.io/chef/install.sh"
OMNIBUS_URL_RETRIES = 5
CHEF_VALIDATION_PEM_PATH = '/etc/chef/validation.pem'
+CHEF_ENCRYPTED_DATA_BAG_PATH = '/etc/chef/encrypted_data_bag_secret'
+CHEF_ENVIRONMENT = '_default'
CHEF_FB_PATH = '/etc/chef/firstboot.json'
CHEF_RB_TPL_DEFAULTS = {
# These are ruby symbols...
@@ -107,11 +53,11 @@ CHEF_RB_TPL_DEFAULTS = {
'log_location': '/var/log/chef/client.log',
'validation_key': CHEF_VALIDATION_PEM_PATH,
'validation_cert': None,
- 'client_key': "/etc/chef/client.pem",
+ 'client_key': '/etc/chef/client.pem',
'json_attribs': CHEF_FB_PATH,
- 'file_cache_path': "/var/cache/chef",
- 'file_backup_path': "/var/backups/chef",
- 'pid_file': "/var/run/chef/client.pid",
+ 'file_cache_path': '/var/cache/chef',
+ 'file_backup_path': '/var/backups/chef',
+ 'pid_file': '/var/run/chef/client.pid',
'show_time': True,
'encrypted_data_bag_secret': None,
}
@@ -122,9 +68,9 @@ CHEF_RB_TPL_PATH_KEYS = frozenset([
'client_key',
'file_cache_path',
'json_attribs',
- 'file_cache_path',
'pid_file',
'encrypted_data_bag_secret',
+ 'chef_license',
])
CHEF_RB_TPL_KEYS = list(CHEF_RB_TPL_DEFAULTS.keys())
CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_BOOL_KEYS)
@@ -141,12 +87,277 @@ CHEF_EXEC_PATH = '/usr/bin/chef-client'
CHEF_EXEC_DEF_ARGS = tuple(['-d', '-i', '1800', '-s', '20'])
-def is_installed():
- if not os.path.isfile(CHEF_EXEC_PATH):
- return False
- if not os.access(CHEF_EXEC_PATH, os.X_OK):
- return False
- return True
+frequency = PER_ALWAYS
+distros = ["all"]
+schema = {
+ 'id': 'cc_chef',
+ 'name': 'Chef',
+ 'title': 'module that configures, starts and installs chef',
+ 'description': dedent("""\
+ This module enables chef to be installed (from packages,
+ gems, or from omnibus). Before this occurs, chef configuration is
+ written to disk (validation.pem, client.pem, firstboot.json,
+ client.rb), and required directories are created (/etc/chef and
+ /var/log/chef and so-on). If configured, chef will be
+ installed and started in either daemon or non-daemon mode.
+ If run in non-daemon mode, post run actions are executed to do
+ finishing activities such as removing validation.pem."""),
+ 'distros': distros,
+ 'examples': [dedent("""
+ chef:
+ directories:
+ - /etc/chef
+ - /var/log/chef
+ validation_cert: system
+ install_type: omnibus
+ initial_attributes:
+ apache:
+ prefork:
+ maxclients: 100
+ keepalive: off
+ run_list:
+ - recipe[apache2]
+ - role[db]
+ encrypted_data_bag_secret: /etc/chef/encrypted_data_bag_secret
+ environment: _default
+ log_level: :auto
+ omnibus_url_retries: 2
+ server_url: https://chef.yourorg.com:4000
+ ssl_verify_mode: :verify_peer
+ validation_name: yourorg-validator""")],
+ 'frequency': frequency,
+ 'type': 'object',
+ 'properties': {
+ 'chef': {
+ 'type': 'object',
+ 'additionalProperties': False,
+ 'properties': {
+ 'directories': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'string'
+ },
+ 'uniqueItems': True,
+ 'description': dedent("""\
+ Create the necessary directories for chef to run. By
+ default, it creates the following directories:
+
+ {chef_dirs}""").format(
+ chef_dirs="\n".join(
+ [" - ``{}``".format(d) for d in CHEF_DIRS]
+ )
+ )
+ },
+ 'validation_cert': {
+ 'type': 'string',
+ 'description': dedent("""\
+ Optional string to be written to file validation_key.
+ Special value ``system`` means set use existing file.
+ """)
+ },
+ 'validation_key': {
+ 'type': 'string',
+ 'default': CHEF_VALIDATION_PEM_PATH,
+ 'description': dedent("""\
+ Optional path for validation_cert. default to
+ ``{}``.""".format(CHEF_VALIDATION_PEM_PATH))
+ },
+ 'firstboot_path': {
+ 'type': 'string',
+ 'default': CHEF_FB_PATH,
+ 'description': dedent("""\
+ Path to write run_list and initial_attributes keys that
+ should also be present in this configuration, defaults
+ to ``{}``.""".format(CHEF_FB_PATH))
+ },
+ 'exec': {
+ 'type': 'boolean',
+ 'default': False,
+ 'description': dedent("""\
+ define if we should run or not run chef (defaults to
+ false, unless a gem installed is requested where this
+ will then default to true).""")
+ },
+ 'client_key': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['client_key'],
+ 'description': dedent("""\
+ Optional path for client_cert. default to
+ ``{}``.""".format(CHEF_RB_TPL_DEFAULTS['client_key']))
+ },
+ 'encrypted_data_bag_secret': {
+ 'type': 'string',
+ 'default': None,
+ 'description': dedent("""\
+ Specifies the location of the secret key used by chef
+ to encrypt data items. By default, this path is set
+ to None, meaning that chef will have to look at the
+ path ``{}`` for it.
+ """.format(CHEF_ENCRYPTED_DATA_BAG_PATH))
+ },
+ 'environment': {
+ 'type': 'string',
+ 'default': CHEF_ENVIRONMENT,
+ 'description': dedent("""\
+ Specifies which environment chef will use. By default,
+ it will use the ``{}`` configuration.
+ """.format(CHEF_ENVIRONMENT))
+ },
+ 'file_backup_path': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['file_backup_path'],
+ 'description': dedent("""\
+ Specifies the location in which backup files are
+ stored. By default, it uses the
+ ``{}`` location.""".format(
+ CHEF_RB_TPL_DEFAULTS['file_backup_path']))
+ },
+ 'file_cache_path': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['file_cache_path'],
+ 'description': dedent("""\
+ Specifies the location in which chef cache files will
+ be saved. By default, it uses the ``{}``
+ location.""".format(
+ CHEF_RB_TPL_DEFAULTS['file_cache_path']))
+ },
+ 'json_attribs': {
+ 'type': 'string',
+ 'default': CHEF_FB_PATH,
+ 'description': dedent("""\
+ Specifies the location in which some chef json data is
+ stored. By default, it uses the
+ ``{}`` location.""".format(CHEF_FB_PATH))
+ },
+ 'log_level': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['log_level'],
+ 'description': dedent("""\
+ Defines the level of logging to be stored in the log
+ file. By default this value is set to ``{}``.
+ """.format(CHEF_RB_TPL_DEFAULTS['log_level']))
+ },
+ 'log_location': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['log_location'],
+ 'description': dedent("""\
+ Specifies the location of the chef lof file. By
+ default, the location is specified at
+ ``{}``.""".format(
+ CHEF_RB_TPL_DEFAULTS['log_location']))
+ },
+ 'node_name': {
+ 'type': 'string',
+ 'description': dedent("""\
+ The name of the node to run. By default, we will
+ use th instance id as the node name.""")
+ },
+ 'omnibus_url': {
+ 'type': 'string',
+ 'default': OMNIBUS_URL,
+ 'description': dedent("""\
+ Omnibus URL if chef should be installed through
+ Omnibus. By default, it uses the
+ ``{}``.""".format(OMNIBUS_URL))
+ },
+ 'omnibus_url_retries': {
+ 'type': 'integer',
+ 'default': OMNIBUS_URL_RETRIES,
+ 'description': dedent("""\
+ The number of retries that will be attempted to reach
+ the Omnibus URL""")
+ },
+ 'omnibus_version': {
+ 'type': 'string',
+ 'description': dedent("""\
+ Optional version string to require for omnibus
+ install.""")
+ },
+ 'pid_file': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['pid_file'],
+ 'description': dedent("""\
+ The location in which a process identification
+ number (pid) is saved. By default, it saves
+ in the ``{}`` location.""".format(
+ CHEF_RB_TPL_DEFAULTS['pid_file']))
+ },
+ 'server_url': {
+ 'type': 'string',
+ 'description': 'The URL for the chef server'
+ },
+ 'show_time': {
+ 'type': 'boolean',
+ 'default': True,
+ 'description': 'Show time in chef logs'
+ },
+ 'ssl_verify_mode': {
+ 'type': 'string',
+ 'default': CHEF_RB_TPL_DEFAULTS['ssl_verify_mode'],
+ 'description': dedent("""\
+ Set the verify mode for HTTPS requests. We can have
+ two possible values for this parameter:
+
+ - ``:verify_none``: No validation of SSL \
+ certificates.
+ - ``:verify_peer``: Validate all SSL certificates.
+
+ By default, the parameter is set as ``{}``.
+ """.format(CHEF_RB_TPL_DEFAULTS['ssl_verify_mode']))
+ },
+ 'validation_name': {
+ 'type': 'string',
+ 'description': dedent("""\
+ The name of the chef-validator key that Chef Infra
+ Client uses to access the Chef Infra Server during
+ the initial Chef Infra Client run.""")
+ },
+ 'force_install': {
+ 'type': 'boolean',
+ 'default': False,
+ 'description': dedent("""\
+ If set to ``True``, forces chef installation, even
+ if it is already installed.""")
+ },
+ 'initial_attributes': {
+ 'type': 'object',
+ 'items': {
+ 'type': 'string'
+ },
+ 'description': dedent("""\
+ Specify a list of initial attributes used by the
+ cookbooks.""")
+ },
+ 'install_type': {
+ 'type': 'string',
+ 'default': 'packages',
+ 'description': dedent("""\
+ The type of installation for chef. It can be one of
+ the following values:
+
+ - ``packages``
+ - ``gems``
+ - ``omnibus``""")
+ },
+ 'run_list': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'string'
+ },
+ 'description': 'A run list for a first boot json.'
+ },
+ "chef_license": {
+ 'type': 'string',
+ 'description': dedent("""\
+ string that indicates if user accepts or not license
+ related to some of chef products""")
+ }
+ }
+ }
+ }
+}
+
+__doc__ = get_schema_doc(schema)
def post_run_chef(chef_cfg, log):
@@ -196,6 +407,8 @@ def handle(name, cfg, cloud, log, _args):
log.debug(("Skipping module named %s,"
" no 'chef' key in configuration"), name)
return
+
+ validate_cloudconfig_schema(cfg, schema)
chef_cfg = cfg['chef']
# Ensure the chef directories we use exist
@@ -223,7 +436,7 @@ def handle(name, cfg, cloud, log, _args):
iid = str(cloud.datasource.get_instance_id())
params = get_template_params(iid, chef_cfg, log)
# Do a best effort attempt to ensure that the template values that
- # are associated with paths have there parent directory created
+ # are associated with paths have their parent directory created
# before they are used by the chef-client itself.
param_paths = set()
for (k, v) in params.items():
@@ -253,9 +466,10 @@ def handle(name, cfg, cloud, log, _args):
# Try to install chef, if its not already installed...
force_install = util.get_cfg_option_bool(chef_cfg,
'force_install', default=False)
- if not is_installed() or force_install:
+ installed = subp.is_exe(CHEF_EXEC_PATH)
+ if not installed or force_install:
run = install_chef(cloud, chef_cfg, log)
- elif is_installed():
+ elif installed:
run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False)
else:
run = False
@@ -280,7 +494,32 @@ def run_chef(chef_cfg, log):
cmd.extend(CHEF_EXEC_DEF_ARGS)
else:
cmd.extend(CHEF_EXEC_DEF_ARGS)
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
+
+
+def subp_blob_in_tempfile(blob, *args, **kwargs):
+ """Write blob to a tempfile, and call subp with args, kwargs. Then cleanup.
+
+ 'basename' as a kwarg allows providing the basename for the file.
+ The 'args' argument to subp will be updated with the full path to the
+ filename as the first argument.
+ """
+ basename = kwargs.pop('basename', "subp_blob")
+
+ if len(args) == 0 and 'args' not in kwargs:
+ args = [tuple()]
+
+ # Use tmpdir over tmpfile to avoid 'text file busy' on execute
+ with temp_utils.tempdir(needs_exe=True) as tmpd:
+ tmpf = os.path.join(tmpd, basename)
+ if 'args' in kwargs:
+ kwargs['args'] = [tmpf] + list(kwargs['args'])
+ else:
+ args = list(args)
+ args[0] = [tmpf] + args[0]
+
+ util.write_file(tmpf, blob, mode=0o700)
+ return subp.subp(*args, **kwargs)
def install_chef_from_omnibus(url=None, retries=None, omnibus_version=None):
@@ -303,7 +542,7 @@ def install_chef_from_omnibus(url=None, retries=None, omnibus_version=None):
else:
args = ['-v', omnibus_version]
content = url_helper.readurl(url=url, retries=retries).contents
- return util.subp_blob_in_tempfile(
+ return subp_blob_in_tempfile(
blob=content, args=args,
basename='chef-omnibus-install', capture=False)
@@ -352,11 +591,11 @@ def install_chef_from_gems(ruby_version, chef_version, distro):
if not os.path.exists('/usr/bin/ruby'):
util.sym_link('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby')
if chef_version:
- util.subp(['/usr/bin/gem', 'install', 'chef',
+ subp.subp(['/usr/bin/gem', 'install', 'chef',
'-v %s' % chef_version, '--no-ri',
'--no-rdoc', '--bindir', '/usr/bin', '-q'], capture=False)
else:
- util.subp(['/usr/bin/gem', 'install', 'chef',
+ subp.subp(['/usr/bin/gem', 'install', 'chef',
'--no-ri', '--no-rdoc', '--bindir',
'/usr/bin', '-q'], capture=False)
diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py
index 885b3138..dff93245 100644
--- a/cloudinit/config/cc_disable_ec2_metadata.py
+++ b/cloudinit/config/cc_disable_ec2_metadata.py
@@ -26,6 +26,7 @@ by default.
disable_ec2_metadata: <true/false>
"""
+from cloudinit import subp
from cloudinit import util
from cloudinit.settings import PER_ALWAYS
@@ -40,15 +41,15 @@ def handle(name, cfg, _cloud, log, _args):
disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False)
if disabled:
reject_cmd = None
- if util.which('ip'):
+ if subp.which('ip'):
reject_cmd = REJECT_CMD_IP
- elif util.which('ifconfig'):
+ elif subp.which('ifconfig'):
reject_cmd = REJECT_CMD_IF
else:
log.error(('Neither "route" nor "ip" command found, unable to '
'manipulate routing table'))
return
- util.subp(reject_cmd, capture=False)
+ subp.subp(reject_cmd, capture=False)
else:
log.debug(("Skipping module named %s,"
" disabling the ec2 route not enabled"), name)
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index 0796cb7b..a7bdc703 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -99,6 +99,7 @@ specified using ``filesystem``.
from cloudinit.settings import PER_INSTANCE
from cloudinit import util
+from cloudinit import subp
import logging
import os
import shlex
@@ -106,13 +107,13 @@ import shlex
frequency = PER_INSTANCE
# Define the commands to use
-UDEVADM_CMD = util.which('udevadm')
-SFDISK_CMD = util.which("sfdisk")
-SGDISK_CMD = util.which("sgdisk")
-LSBLK_CMD = util.which("lsblk")
-BLKID_CMD = util.which("blkid")
-BLKDEV_CMD = util.which("blockdev")
-WIPEFS_CMD = util.which("wipefs")
+UDEVADM_CMD = subp.which('udevadm')
+SFDISK_CMD = subp.which("sfdisk")
+SGDISK_CMD = subp.which("sgdisk")
+LSBLK_CMD = subp.which("lsblk")
+BLKID_CMD = subp.which("blkid")
+BLKDEV_CMD = subp.which("blockdev")
+WIPEFS_CMD = subp.which("wipefs")
LANG_C_ENV = {'LANG': 'C'}
@@ -163,7 +164,7 @@ def handle(_name, cfg, cloud, log, _args):
def update_disk_setup_devices(disk_setup, tformer):
# update 'disk_setup' dictionary anywhere were a device may occur
# update it with the response from 'tformer'
- for origname in disk_setup.keys():
+ for origname in list(disk_setup):
transformed = tformer(origname)
if transformed is None or transformed == origname:
continue
@@ -248,9 +249,11 @@ def enumerate_disk(device, nodeps=False):
info = None
try:
- info, _err = util.subp(lsblk_cmd)
+ info, _err = subp.subp(lsblk_cmd)
except Exception as e:
- raise Exception("Failed during disk check for %s\n%s" % (device, e))
+ raise Exception(
+ "Failed during disk check for %s\n%s" % (device, e)
+ ) from e
parts = [x for x in (info.strip()).splitlines() if len(x.split()) > 0]
@@ -310,9 +313,11 @@ def check_fs(device):
blkid_cmd = [BLKID_CMD, '-c', '/dev/null', device]
try:
- out, _err = util.subp(blkid_cmd, rcs=[0, 2])
+ out, _err = subp.subp(blkid_cmd, rcs=[0, 2])
except Exception as e:
- raise Exception("Failed during disk check for %s\n%s" % (device, e))
+ raise Exception(
+ "Failed during disk check for %s\n%s" % (device, e)
+ ) from e
if out:
if len(out.splitlines()) == 1:
@@ -427,16 +432,16 @@ def get_dyn_func(*args):
else:
return globals()[func_name]
- except KeyError:
- raise Exception("No such function %s to call!" % func_name)
+ except KeyError as e:
+ raise Exception("No such function %s to call!" % func_name) from e
def get_hdd_size(device):
try:
- size_in_bytes, _ = util.subp([BLKDEV_CMD, '--getsize64', device])
- sector_size, _ = util.subp([BLKDEV_CMD, '--getss', device])
+ size_in_bytes, _ = subp.subp([BLKDEV_CMD, '--getsize64', device])
+ sector_size, _ = subp.subp([BLKDEV_CMD, '--getss', device])
except Exception as e:
- raise Exception("Failed to get %s size\n%s" % (device, e))
+ raise Exception("Failed to get %s size\n%s" % (device, e)) from e
return int(size_in_bytes) / int(sector_size)
@@ -452,10 +457,11 @@ def check_partition_mbr_layout(device, layout):
read_parttbl(device)
prt_cmd = [SFDISK_CMD, "-l", device]
try:
- out, _err = util.subp(prt_cmd, data="%s\n" % layout)
+ out, _err = subp.subp(prt_cmd, data="%s\n" % layout)
except Exception as e:
- raise Exception("Error running partition command on %s\n%s" % (
- device, e))
+ raise Exception(
+ "Error running partition command on %s\n%s" % (device, e)
+ ) from e
found_layout = []
for line in out.splitlines():
@@ -482,10 +488,11 @@ def check_partition_mbr_layout(device, layout):
def check_partition_gpt_layout(device, layout):
prt_cmd = [SGDISK_CMD, '-p', device]
try:
- out, _err = util.subp(prt_cmd, update_env=LANG_C_ENV)
+ out, _err = subp.subp(prt_cmd, update_env=LANG_C_ENV)
except Exception as e:
- raise Exception("Error running partition command on %s\n%s" % (
- device, e))
+ raise Exception(
+ "Error running partition command on %s\n%s" % (device, e)
+ ) from e
out_lines = iter(out.splitlines())
# Skip header. Output looks like:
@@ -655,9 +662,11 @@ def purge_disk(device):
wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d['name']]
try:
LOG.info("Purging filesystem on /dev/%s", d['name'])
- util.subp(wipefs_cmd)
- except Exception:
- raise Exception("Failed FS purge of /dev/%s" % d['name'])
+ subp.subp(wipefs_cmd)
+ except Exception as e:
+ raise Exception(
+ "Failed FS purge of /dev/%s" % d['name']
+ ) from e
purge_disk_ptable(device)
@@ -682,7 +691,7 @@ def read_parttbl(device):
blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device]
util.udevadm_settle()
try:
- util.subp(blkdev_cmd)
+ subp.subp(blkdev_cmd)
except Exception as e:
util.logexc(LOG, "Failed reading the partition table %s" % e)
@@ -697,25 +706,27 @@ def exec_mkpart_mbr(device, layout):
# Create the partitions
prt_cmd = [SFDISK_CMD, "--Linux", "--unit=S", "--force", device]
try:
- util.subp(prt_cmd, data="%s\n" % layout)
+ subp.subp(prt_cmd, data="%s\n" % layout)
except Exception as e:
- raise Exception("Failed to partition device %s\n%s" % (device, e))
+ raise Exception(
+ "Failed to partition device %s\n%s" % (device, e)
+ ) from e
read_parttbl(device)
def exec_mkpart_gpt(device, layout):
try:
- util.subp([SGDISK_CMD, '-Z', device])
+ subp.subp([SGDISK_CMD, '-Z', device])
for index, (partition_type, (start, end)) in enumerate(layout):
index += 1
- util.subp([SGDISK_CMD,
+ subp.subp([SGDISK_CMD,
'-n', '{}:{}:{}'.format(index, start, end), device])
if partition_type is not None:
# convert to a 4 char (or more) string right padded with 0
# 82 -> 8200. 'Linux' -> 'Linux'
pinput = str(partition_type).ljust(4, "0")
- util.subp(
+ subp.subp(
[SGDISK_CMD, '-t', '{}:{}'.format(index, pinput), device])
except Exception:
LOG.warning("Failed to partition device %s", device)
@@ -967,9 +978,9 @@ def mkfs(fs_cfg):
fs_cmd)
else:
# Find the mkfs command
- mkfs_cmd = util.which("mkfs.%s" % fs_type)
+ mkfs_cmd = subp.which("mkfs.%s" % fs_type)
if not mkfs_cmd:
- mkfs_cmd = util.which("mk%s" % fs_type)
+ mkfs_cmd = subp.which("mk%s" % fs_type)
if not mkfs_cmd:
LOG.warning("Cannot create fstype '%s'. No mkfs.%s command",
@@ -994,8 +1005,8 @@ def mkfs(fs_cfg):
LOG.debug("Creating file system %s on %s", label, device)
LOG.debug(" Using cmd: %s", str(fs_cmd))
try:
- util.subp(fs_cmd, shell=shell)
+ subp.subp(fs_cmd, shell=shell)
except Exception as e:
- raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e))
+ raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e)) from e
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
index b342e04d..b1d99f97 100644
--- a/cloudinit/config/cc_emit_upstart.py
+++ b/cloudinit/config/cc_emit_upstart.py
@@ -25,7 +25,7 @@ import os
from cloudinit import log as logging
from cloudinit.settings import PER_ALWAYS
-from cloudinit import util
+from cloudinit import subp
frequency = PER_ALWAYS
@@ -43,9 +43,9 @@ def is_upstart_system():
del myenv['UPSTART_SESSION']
check_cmd = ['initctl', 'version']
try:
- (out, _err) = util.subp(check_cmd, env=myenv)
+ (out, _err) = subp.subp(check_cmd, env=myenv)
return 'upstart' in out
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
LOG.debug("'%s' returned '%s', not using upstart",
' '.join(check_cmd), e.exit_code)
return False
@@ -66,7 +66,7 @@ def handle(name, _cfg, cloud, log, args):
for n in event_names:
cmd = ['initctl', 'emit', str(n), 'CLOUD_CFG=%s' % cfgpath]
try:
- util.subp(cmd)
+ subp.subp(cmd)
except Exception as e:
# TODO(harlowja), use log exception from utils??
log.warning("Emission of upstart event %s failed due to: %s", n, e)
diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py
index 0a135bbe..77984bca 100644
--- a/cloudinit/config/cc_fan.py
+++ b/cloudinit/config/cc_fan.py
@@ -39,6 +39,7 @@ If cloud-init sees a ``fan`` entry in cloud-config it will:
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -62,8 +63,8 @@ def stop_update_start(service, config_file, content, systemd=False):
def run(cmd, msg):
try:
- return util.subp(cmd, capture=True)
- except util.ProcessExecutionError as e:
+ return subp.subp(cmd, capture=True)
+ except subp.ProcessExecutionError as e:
LOG.warning("failed: %s (%s): %s", service, cmd, e)
return False
@@ -94,7 +95,7 @@ def handle(name, cfg, cloud, log, args):
util.write_file(mycfg.get('config_path'), mycfg.get('config'), omode="w")
distro = cloud.distro
- if not util.which('fanctl'):
+ if not subp.which('fanctl'):
distro.install_packages(['ubuntu-fan'])
stop_update_start(
diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py
index fd141541..3441f7a9 100644
--- a/cloudinit/config/cc_final_message.py
+++ b/cloudinit/config/cc_final_message.py
@@ -78,7 +78,7 @@ def handle(_name, cfg, cloud, log, args):
boot_fin_fn = cloud.paths.boot_finished
try:
contents = "%s - %s - v. %s\n" % (uptime, ts, cver)
- util.write_file(boot_fin_fn, contents)
+ util.write_file(boot_fin_fn, contents, ensure_dir_exists=False)
except Exception:
util.logexc(log, "Failed to write boot finished file %s", boot_fin_fn)
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index 1b512a06..237c3d02 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -70,6 +70,7 @@ import stat
from cloudinit import log as logging
from cloudinit.settings import PER_ALWAYS
+from cloudinit import subp
from cloudinit import util
frequency = PER_ALWAYS
@@ -131,30 +132,30 @@ class ResizeGrowPart(object):
myenv['LANG'] = 'C'
try:
- (out, _err) = util.subp(["growpart", "--help"], env=myenv)
+ (out, _err) = subp.subp(["growpart", "--help"], env=myenv)
if re.search(r"--update\s+", out):
return True
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
pass
return False
def resize(self, diskdev, partnum, partdev):
before = get_size(partdev)
try:
- util.subp(["growpart", '--dry-run', diskdev, partnum])
- except util.ProcessExecutionError as e:
+ subp.subp(["growpart", '--dry-run', diskdev, partnum])
+ except subp.ProcessExecutionError as e:
if e.exit_code != 1:
util.logexc(LOG, "Failed growpart --dry-run for (%s, %s)",
diskdev, partnum)
- raise ResizeFailedException(e)
+ raise ResizeFailedException(e) from e
return (before, before)
try:
- util.subp(["growpart", diskdev, partnum])
- except util.ProcessExecutionError as e:
+ subp.subp(["growpart", diskdev, partnum])
+ except subp.ProcessExecutionError as e:
util.logexc(LOG, "Failed: growpart %s %s", diskdev, partnum)
- raise ResizeFailedException(e)
+ raise ResizeFailedException(e) from e
return (before, get_size(partdev))
@@ -165,11 +166,11 @@ class ResizeGpart(object):
myenv['LANG'] = 'C'
try:
- (_out, err) = util.subp(["gpart", "help"], env=myenv, rcs=[0, 1])
+ (_out, err) = subp.subp(["gpart", "help"], env=myenv, rcs=[0, 1])
if re.search(r"gpart recover ", err):
return True
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
pass
return False
@@ -182,18 +183,18 @@ class ResizeGpart(object):
be recovered.
"""
try:
- util.subp(["gpart", "recover", diskdev])
- except util.ProcessExecutionError as e:
+ subp.subp(["gpart", "recover", diskdev])
+ except subp.ProcessExecutionError as e:
if e.exit_code != 0:
util.logexc(LOG, "Failed: gpart recover %s", diskdev)
- raise ResizeFailedException(e)
+ raise ResizeFailedException(e) from e
before = get_size(partdev)
try:
- util.subp(["gpart", "resize", "-i", partnum, diskdev])
- except util.ProcessExecutionError as e:
+ subp.subp(["gpart", "resize", "-i", partnum, diskdev])
+ except subp.ProcessExecutionError as e:
util.logexc(LOG, "Failed: gpart resize -i %s %s", partnum, diskdev)
- raise ResizeFailedException(e)
+ raise ResizeFailedException(e) from e
# Since growing the FS requires a reboot, make sure we reboot
# first when this module has finished.
diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py
index a323edfa..eb03c664 100644
--- a/cloudinit/config/cc_grub_dpkg.py
+++ b/cloudinit/config/cc_grub_dpkg.py
@@ -1,8 +1,9 @@
-# Copyright (C) 2009-2010 Canonical Ltd.
+# Copyright (C) 2009-2010, 2020 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Matthew Ruffell <matthew.ruffell@canonical.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
@@ -15,15 +16,15 @@ Configure which device is used as the target for grub installation. This module
should work correctly by default without any user configuration. It can be
enabled/disabled using the ``enabled`` config key in the ``grub_dpkg`` config
dict. The global config key ``grub-dpkg`` is an alias for ``grub_dpkg``. If no
-installation device is specified this module will look for the first existing
-device in:
+installation device is specified this module will execute grub-probe to
+determine which disk the /boot directory is associated with.
- - ``/dev/sda``
- - ``/dev/vda``
- - ``/dev/xvda``
- - ``/dev/sda1``
- - ``/dev/vda1``
- - ``/dev/xvda1``
+The value which is placed into the debconf database is in the format which the
+grub postinstall script expects. Normally, this is a /dev/disk/by-id/ value,
+but we do fallback to the plain disk name if a by-id name is not present.
+
+If this module is executed inside a container, then the debconf database is
+seeded with empty values, and install_devices_empty is set to true.
**Internal name:** ``cc_grub_dpkg``
@@ -42,11 +43,68 @@ device in:
import os
+from cloudinit import subp
from cloudinit import util
+from cloudinit.subp import ProcessExecutionError
distros = ['ubuntu', 'debian']
+def fetch_idevs(log):
+ """
+ Fetches the /dev/disk/by-id device grub is installed to.
+ Falls back to plain disk name if no by-id entry is present.
+ """
+ disk = ""
+ devices = []
+
+ try:
+ # get the root disk where the /boot directory resides.
+ disk = subp.subp(['grub-probe', '-t', 'disk', '/boot'],
+ capture=True)[0].strip()
+ except ProcessExecutionError as e:
+ # grub-common may not be installed, especially on containers
+ # FileNotFoundError is a nested exception of ProcessExecutionError
+ if isinstance(e.reason, FileNotFoundError):
+ log.debug("'grub-probe' not found in $PATH")
+ # disks from the container host are present in /proc and /sys
+ # which is where grub-probe determines where /boot is.
+ # it then checks for existence in /dev, which fails as host disks
+ # are not exposed to the container.
+ elif "failed to get canonical path" in e.stderr:
+ log.debug("grub-probe 'failed to get canonical path'")
+ else:
+ # something bad has happened, continue to log the error
+ raise
+ except Exception:
+ util.logexc(log, "grub-probe failed to execute for grub-dpkg")
+
+ if not disk or not os.path.exists(disk):
+ # If we failed to detect a disk, we can return early
+ return ''
+
+ try:
+ # check if disk exists and use udevadm to fetch symlinks
+ devices = subp.subp(
+ ['udevadm', 'info', '--root', '--query=symlink', disk],
+ capture=True
+ )[0].strip().split()
+ except Exception:
+ util.logexc(
+ log, "udevadm DEVLINKS symlink query failed for disk='%s'", disk
+ )
+
+ log.debug('considering these device symlinks: %s', ','.join(devices))
+ # filter symlinks for /dev/disk/by-id entries
+ devices = [dev for dev in devices if 'disk/by-id' in dev]
+ log.debug('filtered to these disk/by-id symlinks: %s', ','.join(devices))
+ # select first device if there is one, else fall back to plain name
+ idevs = sorted(devices)[0] if devices else disk
+ log.debug('selected %s', idevs)
+
+ return idevs
+
+
def handle(name, cfg, _cloud, log, _args):
mycfg = cfg.get("grub_dpkg", cfg.get("grub-dpkg", {}))
@@ -62,22 +120,10 @@ def handle(name, cfg, _cloud, log, _args):
idevs_empty = util.get_cfg_option_str(
mycfg, "grub-pc/install_devices_empty", None)
- if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or
- (os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda"))):
- if idevs is None:
- idevs = ""
- if idevs_empty is None:
- idevs_empty = "true"
- else:
- if idevs_empty is None:
- idevs_empty = "false"
- if idevs is None:
- idevs = "/dev/sda"
- for dev in ("/dev/sda", "/dev/vda", "/dev/xvda",
- "/dev/sda1", "/dev/vda1", "/dev/xvda1"):
- if os.path.exists(dev):
- idevs = dev
- break
+ if idevs is None:
+ idevs = fetch_idevs(log)
+ if idevs_empty is None:
+ idevs_empty = "false" if idevs else "true"
# now idevs and idevs_empty are set to determined values
# or, those set by user
@@ -90,7 +136,7 @@ def handle(name, cfg, _cloud, log, _args):
(idevs, idevs_empty))
try:
- util.subp(['debconf-set-selections'], dconf_sel)
+ subp.subp(['debconf-set-selections'], dconf_sel)
except Exception:
util.logexc(log, "Failed to run debconf-set-selections for grub-dpkg")
diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py
index 3d2ded3d..0f2be52b 100644
--- a/cloudinit/config/cc_keys_to_console.py
+++ b/cloudinit/config/cc_keys_to_console.py
@@ -33,6 +33,7 @@ key can be used. By default ``ssh-dss`` keys are not written to console.
import os
from cloudinit.settings import PER_INSTANCE
+from cloudinit import subp
from cloudinit import util
frequency = PER_INSTANCE
@@ -64,7 +65,7 @@ def handle(name, cfg, cloud, log, _args):
try:
cmd = [helper_path, ','.join(fp_blacklist), ','.join(key_blacklist)]
- (stdout, _stderr) = util.subp(cmd)
+ (stdout, _stderr) = subp.subp(cmd)
util.multi_log("%s\n" % (stdout.strip()),
stderr=False, console=True)
except Exception:
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index a9c04d86..299c4d01 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -61,6 +61,7 @@ from io import BytesIO
from configobj import ConfigObj
from cloudinit import type_utils
+from cloudinit import subp
from cloudinit import util
from cloudinit.settings import PER_INSTANCE
@@ -116,7 +117,7 @@ def handle(_name, cfg, cloud, log, _args):
log.debug("Wrote landscape config file to %s", LSC_CLIENT_CFG_FILE)
util.write_file(LS_DEFAULT_FILE, "RUN=1\n")
- util.subp(["service", "landscape-client", "restart"])
+ subp.subp(["service", "landscape-client", "restart"])
def merge_together(objs):
diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py
index f68c3cc7..4f8b7bf6 100644
--- a/cloudinit/config/cc_locale.py
+++ b/cloudinit/config/cc_locale.py
@@ -6,27 +6,58 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Locale
-------
-**Summary:** set system locale
+"""Locale: set system locale"""
-Configure the system locale and apply it system wide. By default use the locale
-specified by the datasource.
+from textwrap import dedent
-**Internal name:** ``cc_locale``
-
-**Module frequency:** per instance
+from cloudinit import util
+from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema
+from cloudinit.settings import PER_INSTANCE
-**Supported distros:** all
-**Config keys**::
+frequency = PER_INSTANCE
+distros = ['all']
+schema = {
+ 'id': 'cc_locale',
+ 'name': 'Locale',
+ 'title': 'Set system locale',
+ 'description': dedent(
+ """\
+ Configure the system locale and apply it system wide. By default use
+ the locale specified by the datasource."""
+ ),
+ 'distros': distros,
+ 'examples': [
+ dedent("""\
+ # Set the locale to ar_AE
+ locale: ar_AE
+ """),
+ dedent("""\
+ # Set the locale to fr_CA in /etc/alternate_path/locale
+ locale: fr_CA
+ locale_configfile: /etc/alternate_path/locale
+ """),
+ ],
+ 'frequency': frequency,
+ 'type': 'object',
+ 'properties': {
+ 'locale': {
+ 'type': 'string',
+ 'description': (
+ "The locale to set as the system's locale (e.g. ar_PS)"
+ ),
+ },
+ 'locale_configfile': {
+ 'type': 'string',
+ 'description': (
+ "The file in which to write the locale configuration (defaults"
+ " to the distro's default location)"
+ ),
+ },
+ },
+}
- locale: <locale str>
- locale_configfile: <path to locale config file>
-"""
-
-from cloudinit import util
+__doc__ = get_schema_doc(schema) # Supplement python help()
def handle(name, cfg, cloud, log, args):
@@ -40,6 +71,8 @@ def handle(name, cfg, cloud, log, args):
name, locale)
return
+ validate_cloudconfig_schema(cfg, schema)
+
log.debug("Setting locale to %s", locale)
locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile")
cloud.distro.apply_locale(locale, locale_cfgfile)
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index 151a9844..7129c9c6 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -48,6 +48,7 @@ lxd-bridge will be configured accordingly.
"""
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
import os
@@ -85,16 +86,16 @@ def handle(name, cfg, cloud, log, args):
# Install the needed packages
packages = []
- if not util.which("lxd"):
+ if not subp.which("lxd"):
packages.append('lxd')
- if init_cfg.get("storage_backend") == "zfs" and not util.which('zfs'):
+ if init_cfg.get("storage_backend") == "zfs" and not subp.which('zfs'):
packages.append('zfsutils-linux')
if len(packages):
try:
cloud.distro.install_packages(packages)
- except util.ProcessExecutionError as exc:
+ except subp.ProcessExecutionError as exc:
log.warning("failed to install packages %s: %s", packages, exc)
return
@@ -104,20 +105,20 @@ def handle(name, cfg, cloud, log, args):
'network_address', 'network_port', 'storage_backend',
'storage_create_device', 'storage_create_loop',
'storage_pool', 'trust_password')
- util.subp(['lxd', 'waitready', '--timeout=300'])
+ subp.subp(['lxd', 'waitready', '--timeout=300'])
cmd = ['lxd', 'init', '--auto']
for k in init_keys:
if init_cfg.get(k):
cmd.extend(["--%s=%s" %
(k.replace('_', '-'), str(init_cfg[k]))])
- util.subp(cmd)
+ subp.subp(cmd)
# Set up lxd-bridge if bridge config is given
dconf_comm = "debconf-communicate"
if bridge_cfg:
net_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME)
if os.path.exists("/etc/default/lxd-bridge") \
- and util.which(dconf_comm):
+ and subp.which(dconf_comm):
# Bridge configured through packaging
debconf = bridge_to_debconf(bridge_cfg)
@@ -127,7 +128,7 @@ def handle(name, cfg, cloud, log, args):
log.debug("Setting lxd debconf via " + dconf_comm)
data = "\n".join(["set %s %s" % (k, v)
for k, v in debconf.items()]) + "\n"
- util.subp(['debconf-communicate'], data)
+ subp.subp(['debconf-communicate'], data)
except Exception:
util.logexc(log, "Failed to run '%s' for lxd with" %
dconf_comm)
@@ -137,7 +138,7 @@ def handle(name, cfg, cloud, log, args):
# Run reconfigure
log.debug("Running dpkg-reconfigure for lxd")
- util.subp(['dpkg-reconfigure', 'lxd',
+ subp.subp(['dpkg-reconfigure', 'lxd',
'--frontend=noninteractive'])
else:
# Built-in LXD bridge support
@@ -264,7 +265,7 @@ def _lxc(cmd):
env = {'LC_ALL': 'C',
'HOME': os.environ.get('HOME', '/root'),
'USER': os.environ.get('USER', 'root')}
- util.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env)
+ subp.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env)
def maybe_cleanup_default(net_name, did_init, create, attach,
@@ -286,7 +287,7 @@ def maybe_cleanup_default(net_name, did_init, create, attach,
try:
_lxc(["network", "delete", net_name])
LOG.debug(msg, net_name, succeeded)
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.exit_code != 1:
raise e
LOG.debug(msg, net_name, fail_assume_enoent)
@@ -296,7 +297,7 @@ def maybe_cleanup_default(net_name, did_init, create, attach,
try:
_lxc(["profile", "device", "remove", profile, nic_name])
LOG.debug(msg, nic_name, profile, succeeded)
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.exit_code != 1:
raise e
LOG.debug(msg, nic_name, profile, fail_assume_enoent)
diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py
index 351183f1..41ea4fc9 100644
--- a/cloudinit/config/cc_mcollective.py
+++ b/cloudinit/config/cc_mcollective.py
@@ -56,6 +56,7 @@ import io
from configobj import ConfigObj
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem"
@@ -140,6 +141,6 @@ def handle(name, cfg, cloud, log, _args):
configure(config=mcollective_cfg['conf'])
# restart mcollective to handle updated config
- util.subp(['service', 'mcollective', 'restart'], capture=False)
+ subp.subp(['service', 'mcollective', 'restart'], capture=False)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index 4ae3f1fc..54f2f878 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -65,15 +65,19 @@ swap file is created.
from string import whitespace
import logging
-import os.path
+import os
import re
from cloudinit import type_utils
+from cloudinit import subp
from cloudinit import util
# Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0
DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$"
DEVICE_NAME_RE = re.compile(DEVICE_NAME_FILTER)
+# Name matches 'server:/path'
+NETWORK_NAME_FILTER = r"^.+:.*"
+NETWORK_NAME_RE = re.compile(NETWORK_NAME_FILTER)
WS = re.compile("[%s]+" % (whitespace))
FSTAB_PATH = "/etc/fstab"
MNT_COMMENT = "comment=cloudconfig"
@@ -93,6 +97,13 @@ def is_meta_device_name(name):
return False
+def is_network_device(name):
+ # return true if this is a network device
+ if NETWORK_NAME_RE.match(name):
+ return True
+ return False
+
+
def _get_nth_partition_for_device(device_path, partition_number):
potential_suffixes = [str(partition_number), 'p%s' % (partition_number,),
'-part%s' % (partition_number,)]
@@ -122,6 +133,9 @@ def sanitize_devname(startname, transformer, log):
devname = "ephemeral0"
log.debug("Adjusted mount option from ephemeral to ephemeral0")
+ if is_network_device(startname):
+ return startname
+
device_path, partition_number = util.expand_dotted_devname(devname)
if is_meta_device_name(device_path):
@@ -223,24 +237,24 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
return size
-def create_swapfile(fname, size):
+def create_swapfile(fname: str, size: str) -> None:
"""Size is in MiB."""
- errmsg = "Failed to create swapfile '%s' of size %dMB via %s: %s"
+ errmsg = "Failed to create swapfile '%s' of size %sMB via %s: %s"
def create_swap(fname, size, method):
LOG.debug("Creating swapfile in '%s' on fstype '%s' using '%s'",
fname, fstype, method)
if method == "fallocate":
- cmd = ['fallocate', '-l', '%dM' % size, fname]
+ cmd = ['fallocate', '-l', '%sM' % size, fname]
elif method == "dd":
cmd = ['dd', 'if=/dev/zero', 'of=%s' % fname, 'bs=1M',
- 'count=%d' % size]
+ 'count=%s' % size]
try:
- util.subp(cmd, capture=True)
- except util.ProcessExecutionError as e:
+ subp.subp(cmd, capture=True)
+ except subp.ProcessExecutionError as e:
LOG.warning(errmsg, fname, size, method, e)
util.del_file(fname)
@@ -249,20 +263,22 @@ def create_swapfile(fname, size):
fstype = util.get_mount_info(swap_dir)[1]
- if fstype in ("xfs", "btrfs"):
+ if (fstype == "xfs" and
+ util.kernel_version() < (4, 18)) or fstype == "btrfs":
create_swap(fname, size, "dd")
else:
try:
create_swap(fname, size, "fallocate")
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
LOG.warning(errmsg, fname, size, "dd", e)
LOG.warning("Will attempt with dd.")
create_swap(fname, size, "dd")
- util.chmod(fname, 0o600)
+ if os.path.exists(fname):
+ util.chmod(fname, 0o600)
try:
- util.subp(['mkswap', fname])
- except util.ProcessExecutionError:
+ subp.subp(['mkswap', fname])
+ except subp.ProcessExecutionError:
util.del_file(fname)
raise
@@ -274,7 +290,6 @@ def setup_swapfile(fname, size=None, maxsize=None):
maxsize: the maximum size
"""
swap_dir = os.path.dirname(fname)
- mibsize = str(int(size / (2 ** 20)))
if str(size).lower() == "auto":
try:
memsize = util.read_meminfo()['total']
@@ -286,6 +301,7 @@ def setup_swapfile(fname, size=None, maxsize=None):
size = suggested_swapsize(fsys=swap_dir, maxsize=maxsize,
memsize=memsize)
+ mibsize = str(int(size / (2 ** 20)))
if not size:
LOG.debug("Not creating swap: suggested size was 0")
return
@@ -365,17 +381,18 @@ def handle(_name, cfg, cloud, log, _args):
fstab_devs = {}
fstab_removed = []
- for line in util.load_file(FSTAB_PATH).splitlines():
- if MNT_COMMENT in line:
- fstab_removed.append(line)
- continue
+ if os.path.exists(FSTAB_PATH):
+ for line in util.load_file(FSTAB_PATH).splitlines():
+ if MNT_COMMENT in line:
+ fstab_removed.append(line)
+ continue
- try:
- toks = WS.split(line)
- except Exception:
- pass
- fstab_devs[toks[0]] = line
- fstab_lines.append(line)
+ try:
+ toks = WS.split(line)
+ except Exception:
+ pass
+ fstab_devs[toks[0]] = line
+ fstab_lines.append(line)
for i in range(len(cfgmnt)):
# skip something that wasn't a list
@@ -525,9 +542,9 @@ def handle(_name, cfg, cloud, log, _args):
for cmd in activate_cmds:
fmt = "Activate mounts: %s:" + ' '.join(cmd)
try:
- util.subp(cmd)
+ subp.subp(cmd)
log.debug(fmt, "PASS")
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
log.warning(fmt, "FAIL")
util.logexc(log, fmt, "FAIL")
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index 5498bbaa..3d7279d6 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -14,6 +14,7 @@ from cloudinit import log as logging
from cloudinit import temp_utils
from cloudinit import templater
from cloudinit import type_utils
+from cloudinit import subp
from cloudinit import util
from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema
from cloudinit.settings import PER_INSTANCE
@@ -23,7 +24,8 @@ LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
NTP_CONF = '/etc/ntp.conf'
NR_POOL_SERVERS = 4
-distros = ['centos', 'debian', 'fedora', 'opensuse', 'rhel', 'sles', 'ubuntu']
+distros = ['alpine', 'centos', 'debian', 'fedora', 'opensuse', 'rhel',
+ 'sles', 'ubuntu']
NTP_CLIENT_CONFIG = {
'chrony': {
@@ -62,6 +64,17 @@ NTP_CLIENT_CONFIG = {
# This is Distro-specific configuration overrides of the base config
DISTRO_CLIENT_CONFIG = {
+ 'alpine': {
+ 'chrony': {
+ 'confpath': '/etc/chrony/chrony.conf',
+ 'service_name': 'chronyd',
+ },
+ 'ntp': {
+ 'confpath': '/etc/ntp.conf',
+ 'packages': [],
+ 'service_name': 'ntpd',
+ },
+ },
'debian': {
'chrony': {
'confpath': '/etc/chrony/chrony.conf',
@@ -113,11 +126,11 @@ schema = {
Handle ntp configuration. If ntp is not installed on the system and
ntp configuration is specified, ntp will be installed. If there is a
default ntp config file in the image or one is present in the
- distro's ntp package, it will be copied to ``/etc/ntp.conf.dist``
- before any changes are made. A list of ntp pools and ntp servers can
- be provided under the ``ntp`` config key. If no ntp ``servers`` or
- ``pools`` are provided, 4 pools will be used in the format
- ``{0-3}.{distro}.pool.ntp.org``."""),
+ distro's ntp package, it will be copied to a file with ``.dist``
+ appended to the filename before any changes are made. A list of ntp
+ pools and ntp servers can be provided under the ``ntp`` config key.
+ If no ntp ``servers`` or ``pools`` are provided, 4 pools will be used
+ in the format ``{0-3}.{distro}.pool.ntp.org``."""),
'distros': distros,
'examples': [
dedent("""\
@@ -169,8 +182,11 @@ schema = {
'uniqueItems': True,
'description': dedent("""\
List of ntp pools. If both pools and servers are
- empty, 4 default pool servers will be provided of
- the format ``{0-3}.{distro}.pool.ntp.org``.""")
+ empty, 4 default pool servers will be provided of
+ the format ``{0-3}.{distro}.pool.ntp.org``. NOTE:
+ for Alpine Linux when using the Busybox NTP client
+ this setting will be ignored due to the limited
+ functionality of Busybox's ntpd.""")
},
'servers': {
'type': 'array',
@@ -181,46 +197,46 @@ schema = {
'uniqueItems': True,
'description': dedent("""\
List of ntp servers. If both pools and servers are
- empty, 4 default pool servers will be provided with
- the format ``{0-3}.{distro}.pool.ntp.org``.""")
+ empty, 4 default pool servers will be provided with
+ the format ``{0-3}.{distro}.pool.ntp.org``.""")
},
'ntp_client': {
'type': 'string',
'default': 'auto',
'description': dedent("""\
Name of an NTP client to use to configure system NTP.
- When unprovided or 'auto' the default client preferred
- by the distribution will be used. The following
- built-in client names can be used to override existing
- configuration defaults: chrony, ntp, ntpdate,
- systemd-timesyncd."""),
+ When unprovided or 'auto' the default client preferred
+ by the distribution will be used. The following
+ built-in client names can be used to override existing
+ configuration defaults: chrony, ntp, ntpdate,
+ systemd-timesyncd."""),
},
'enabled': {
'type': 'boolean',
'default': True,
'description': dedent("""\
Attempt to enable ntp clients if set to True. If set
- to False, ntp client will not be configured or
- installed"""),
+ to False, ntp client will not be configured or
+ installed"""),
},
'config': {
'description': dedent("""\
Configuration settings or overrides for the
- ``ntp_client`` specified."""),
+ ``ntp_client`` specified."""),
'type': ['object'],
'properties': {
'confpath': {
'type': 'string',
'description': dedent("""\
The path to where the ``ntp_client``
- configuration is written."""),
+ configuration is written."""),
},
'check_exe': {
'type': 'string',
'description': dedent("""\
The executable name for the ``ntp_client``.
- For example, ntp service ``check_exe`` is
- 'ntpd' because it runs the ntpd binary."""),
+ For example, ntp service ``check_exe`` is
+ 'ntpd' because it runs the ntpd binary."""),
},
'packages': {
'type': 'array',
@@ -230,22 +246,22 @@ schema = {
'uniqueItems': True,
'description': dedent("""\
List of packages needed to be installed for the
- selected ``ntp_client``."""),
+ selected ``ntp_client``."""),
},
'service_name': {
'type': 'string',
'description': dedent("""\
The systemd or sysvinit service name used to
- start and stop the ``ntp_client``
- service."""),
+ start and stop the ``ntp_client``
+ service."""),
},
'template': {
'type': 'string',
'description': dedent("""\
Inline template allowing users to define their
- own ``ntp_client`` configuration template.
- The value must start with '## template:jinja'
- to enable use of templating support.
+ own ``ntp_client`` configuration template.
+ The value must start with '## template:jinja'
+ to enable use of templating support.
"""),
},
},
@@ -307,7 +323,7 @@ def select_ntp_client(ntp_client, distro):
if distro_ntp_client == "auto":
for client in distro.preferred_ntp_clients:
cfg = distro_cfg.get(client)
- if util.which(cfg.get('check_exe')):
+ if subp.which(cfg.get('check_exe')):
LOG.debug('Selected NTP client "%s", already installed',
client)
clientcfg = cfg
@@ -336,7 +352,7 @@ def install_ntp_client(install_func, packages=None, check_exe="ntpd"):
@param check_exe: string. The name of a binary that indicates the package
the specified package is already installed.
"""
- if util.which(check_exe):
+ if subp.which(check_exe):
return
if packages is None:
packages = ['ntp']
@@ -363,21 +379,30 @@ def generate_server_names(distro):
"""
names = []
pool_distro = distro
- # For legal reasons x.pool.sles.ntp.org does not exist,
- # use the opensuse pool
+
if distro == 'sles':
+ # For legal reasons x.pool.sles.ntp.org does not exist,
+ # use the opensuse pool
pool_distro = 'opensuse'
+ elif distro == 'alpine':
+ # Alpine-specific pool (i.e. x.alpine.pool.ntp.org) does not exist
+ # so use general x.pool.ntp.org instead.
+ pool_distro = ''
+
for x in range(0, NR_POOL_SERVERS):
- name = "%d.%s.pool.ntp.org" % (x, pool_distro)
- names.append(name)
+ names.append(".".join(
+ [n for n in [str(x)] + [pool_distro] + ['pool.ntp.org'] if n]))
+
return names
-def write_ntp_config_template(distro_name, servers=None, pools=None,
- path=None, template_fn=None, template=None):
+def write_ntp_config_template(distro_name, service_name=None, servers=None,
+ pools=None, path=None, template_fn=None,
+ template=None):
"""Render a ntp client configuration for the specified client.
@param distro_name: string. The distro class name.
+ @param service_name: string. The name of the NTP client service.
@param servers: A list of strings specifying ntp servers. Defaults to empty
list.
@param pools: A list of strings specifying ntp pools. Defaults to empty
@@ -396,7 +421,14 @@ def write_ntp_config_template(distro_name, servers=None, pools=None,
if not pools:
pools = []
- if len(servers) == 0 and len(pools) == 0:
+ if (len(servers) == 0 and distro_name == 'alpine' and
+ service_name == 'ntpd'):
+ # Alpine's Busybox ntpd only understands "servers" configuration
+ # and not "pool" configuration.
+ servers = generate_server_names(distro_name)
+ LOG.debug(
+ 'Adding distro default ntp servers: %s', ','.join(servers))
+ elif len(servers) == 0 and len(pools) == 0:
pools = generate_server_names(distro_name)
LOG.debug(
'Adding distro default ntp pool servers: %s', ','.join(pools))
@@ -431,7 +463,7 @@ def reload_ntp(service, systemd=False):
cmd = ['systemctl', 'reload-or-restart', service]
else:
cmd = ['service', service, 'restart']
- util.subp(cmd, capture=True)
+ subp.subp(cmd, capture=True)
def supplemental_schema_validation(ntp_config):
@@ -531,6 +563,8 @@ def handle(name, cfg, cloud, log, _args):
raise RuntimeError(msg)
write_ntp_config_template(cloud.distro.name,
+ service_name=ntp_client_config.get(
+ 'service_name'),
servers=ntp_cfg.get('servers', []),
pools=ntp_cfg.get('pools', []),
path=ntp_client_config.get('confpath'),
@@ -543,7 +577,7 @@ def handle(name, cfg, cloud, log, _args):
try:
reload_ntp(ntp_client_config['service_name'],
systemd=cloud.distro.uses_systemd())
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
LOG.exception("Failed to reload/start ntp service: %s", e)
raise
diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py
index 86afffef..036baf85 100644
--- a/cloudinit/config/cc_package_update_upgrade_install.py
+++ b/cloudinit/config/cc_package_update_upgrade_install.py
@@ -43,6 +43,7 @@ import os
import time
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
REBOOT_FILE = "/var/run/reboot-required"
@@ -57,7 +58,7 @@ def _multi_cfg_bool_get(cfg, *keys):
def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2):
- util.subp(REBOOT_CMD)
+ subp.subp(REBOOT_CMD)
start = time.time()
wait_time = initial_sleep
for _i in range(0, wait_attempts):
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index b8e27090..733c3910 100644
--- a/cloudinit/config/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -19,6 +19,7 @@ keys to post. Available keys are:
- ``pub_key_dsa``
- ``pub_key_rsa``
- ``pub_key_ecdsa``
+ - ``pub_key_ed25519``
- ``instance_id``
- ``hostname``
- ``fdqn``
@@ -52,6 +53,7 @@ POST_LIST_ALL = [
'pub_key_dsa',
'pub_key_rsa',
'pub_key_ecdsa',
+ 'pub_key_ed25519',
'instance_id',
'hostname',
'fqdn'
@@ -105,6 +107,7 @@ def handle(name, cfg, cloud, log, args):
'pub_key_dsa': '/etc/ssh/ssh_host_dsa_key.pub',
'pub_key_rsa': '/etc/ssh/ssh_host_rsa_key.pub',
'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub',
+ 'pub_key_ed25519': '/etc/ssh/ssh_host_ed25519_key.pub',
}
for (n, path) in pubkeys.items():
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index 3e81a3c7..6fcb8a7d 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -22,9 +22,8 @@ The ``delay`` key specifies a duration to be added onto any shutdown command
used. Therefore, if a 5 minute delay and a 120 second shutdown are specified,
the maximum amount of time between cloud-init starting and the system shutting
down is 7 minutes, and the minimum amount of time is 5 minutes. The ``delay``
-key must have an argument in a form that the ``shutdown`` utility recognizes.
-The most common format is the form ``+5`` for 5 minutes. See ``man shutdown``
-for more options.
+key must have an argument in either the form ``+5`` for 5 minutes or ``now``
+for immediate shutdown.
Optionally, a command can be run to determine whether or not
the system should shut down. The command to be run should be specified in the
@@ -33,6 +32,10 @@ the system should shut down. The command to be run should be specified in the
``condition`` key is omitted or the command specified by the ``condition``
key returns 0.
+.. note::
+ With Alpine Linux any message value specified is ignored as Alpine's halt,
+ poweroff, and reboot commands do not support broadcasting a message.
+
**Internal name:** ``cc_power_state_change``
**Module frequency:** per instance
@@ -56,6 +59,7 @@ import subprocess
import time
from cloudinit.settings import PER_INSTANCE
+from cloudinit import subp
from cloudinit import util
frequency = PER_INSTANCE
@@ -71,7 +75,7 @@ def givecmdline(pid):
# PID COMM ARGS
# 1 init /bin/init --
if util.is_FreeBSD():
- (output, _err) = util.subp(['procstat', '-c', str(pid)])
+ (output, _err) = subp.subp(['procstat', '-c', str(pid)])
line = output.splitlines()[1]
m = re.search(r'\d+ (\w|\.|-)+\s+(/\w.+)', line)
return m.group(2)
@@ -111,9 +115,9 @@ def check_condition(cond, log=None):
return False
-def handle(_name, cfg, _cloud, log, _args):
+def handle(_name, cfg, cloud, log, _args):
try:
- (args, timeout, condition) = load_power_state(cfg)
+ (args, timeout, condition) = load_power_state(cfg, cloud.distro.name)
if args is None:
log.debug("no power_state provided. doing nothing")
return
@@ -140,7 +144,19 @@ def handle(_name, cfg, _cloud, log, _args):
condition, execmd, [args, devnull_fp])
-def load_power_state(cfg):
+def convert_delay(delay, fmt=None, scale=None):
+ if not fmt:
+ fmt = "+%s"
+ if not scale:
+ scale = 1
+
+ if delay != "now":
+ delay = fmt % int(int(delay) * int(scale))
+
+ return delay
+
+
+def load_power_state(cfg, distro_name):
# returns a tuple of shutdown_command, timeout
# shutdown_command is None if no config found
pstate = cfg.get('power_state')
@@ -160,26 +176,42 @@ def load_power_state(cfg):
(','.join(opt_map.keys()), mode))
delay = pstate.get("delay", "now")
- # convert integer 30 or string '30' to '+30'
- try:
- delay = "+%s" % int(delay)
- except ValueError:
- pass
+ message = pstate.get("message")
+ scale = 1
+ fmt = "+%s"
+ command = ["shutdown", opt_map[mode]]
+
+ if distro_name == 'alpine':
+ # Convert integer 30 or string '30' to '1800' (seconds) as Alpine's
+ # halt/poweroff/reboot commands take seconds rather than minutes.
+ scale = 60
+ # No "+" in front of delay value as not supported by Alpine's commands.
+ fmt = "%s"
+ if delay == "now":
+ # Alpine's commands do not understand "now".
+ delay = "0"
+ command = [mode, "-d"]
+ # Alpine's commands don't support a message.
+ message = None
- if delay != "now" and not re.match(r"\+[0-9]+", delay):
+ try:
+ delay = convert_delay(delay, fmt=fmt, scale=scale)
+ except ValueError as e:
raise TypeError(
"power_state[delay] must be 'now' or '+m' (minutes)."
- " found '%s'." % delay)
+ " found '%s'." % delay
+ ) from e
- args = ["shutdown", opt_map[mode], delay]
- if pstate.get("message"):
- args.append(pstate.get("message"))
+ args = command + [delay]
+ if message:
+ args.append(message)
try:
timeout = float(pstate.get('timeout', 30.0))
- except ValueError:
- raise ValueError("failed to convert timeout '%s' to float." %
- pstate['timeout'])
+ except ValueError as e:
+ raise ValueError(
+ "failed to convert timeout '%s' to float." % pstate['timeout']
+ ) from e
condition = pstate.get("condition", True)
if not isinstance(condition, (str, list, bool)):
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index c01f5b8f..bc981cf4 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -83,6 +83,7 @@ import yaml
from io import StringIO
from cloudinit import helpers
+from cloudinit import subp
from cloudinit import util
PUPPET_CONF_PATH = '/etc/puppet/puppet.conf'
@@ -105,14 +106,14 @@ class PuppetConstants(object):
def _autostart_puppet(log):
# Set puppet to automatically start
if os.path.exists('/etc/default/puppet'):
- util.subp(['sed', '-i',
+ subp.subp(['sed', '-i',
'-e', 's/^START=.*/START=yes/',
'/etc/default/puppet'], capture=False)
elif os.path.exists('/bin/systemctl'):
- util.subp(['/bin/systemctl', 'enable', 'puppet.service'],
+ subp.subp(['/bin/systemctl', 'enable', 'puppet.service'],
capture=False)
elif os.path.exists('/sbin/chkconfig'):
- util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False)
+ subp.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False)
else:
log.warning(("Sorry we do not know how to enable"
" puppet services on this system"))
@@ -159,9 +160,9 @@ def handle(name, cfg, cloud, log, _args):
cleaned_lines = [i.lstrip() for i in contents.splitlines()]
cleaned_contents = '\n'.join(cleaned_lines)
# Move to puppet_config.read_file when dropping py2.7
- puppet_config.readfp( # pylint: disable=W1505
+ puppet_config.read_file(
StringIO(cleaned_contents),
- filename=p_constants.conf_path)
+ source=p_constants.conf_path)
for (cfg_name, cfg) in puppet_cfg['conf'].items():
# Cert configuration is a special case
# Dump the puppet master ca certificate in the correct place
@@ -203,6 +204,6 @@ def handle(name, cfg, cloud, log, _args):
_autostart_puppet(log)
# Start puppetd
- util.subp(['service', 'puppet', 'start'], capture=False)
+ subp.subp(['service', 'puppet', 'start'], capture=False)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 01dfc125..978d2ee0 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -19,6 +19,7 @@ from textwrap import dedent
from cloudinit.config.schema import (
get_schema_doc, validate_cloudconfig_schema)
from cloudinit.settings import PER_ALWAYS
+from cloudinit import subp
from cloudinit import util
NOBLOCK = "noblock"
@@ -88,11 +89,11 @@ def _resize_zfs(mount_point, devpth):
def _get_dumpfs_output(mount_point):
- return util.subp(['dumpfs', '-m', mount_point])[0]
+ return subp.subp(['dumpfs', '-m', mount_point])[0]
def _get_gpart_output(part):
- return util.subp(['gpart', 'show', part])[0]
+ return subp.subp(['gpart', 'show', part])[0]
def _can_skip_resize_ufs(mount_point, devpth):
@@ -117,14 +118,12 @@ def _can_skip_resize_ufs(mount_point, devpth):
if o == "-f":
frag_sz = int(a)
# check the current partition size
- """
- # gpart show /dev/da0
-=> 40 62914480 da0 GPT (30G)
- 40 1024 1 freebsd-boot (512K)
- 1064 58719232 2 freebsd-ufs (28G)
- 58720296 3145728 3 freebsd-swap (1.5G)
- 61866024 1048496 - free - (512M)
- """
+ # Example output from `gpart show /dev/da0`:
+ # => 40 62914480 da0 GPT (30G)
+ # 40 1024 1 freebsd-boot (512K)
+ # 1064 58719232 2 freebsd-ufs (28G)
+ # 58720296 3145728 3 freebsd-swap (1.5G)
+ # 61866024 1048496 - free - (512M)
expect_sz = None
m = re.search('^(/dev/.+)p([0-9])$', devpth)
gpart_res = _get_gpart_output(m.group(1))
@@ -306,8 +305,8 @@ def handle(name, cfg, _cloud, log, args):
def do_resize(resize_cmd, log):
try:
- util.subp(resize_cmd)
- except util.ProcessExecutionError:
+ subp.subp(resize_cmd)
+ except subp.ProcessExecutionError:
util.logexc(log, "Failed to resize filesystem (cmd=%s)", resize_cmd)
raise
# TODO(harlowja): Should we add a fsck check after this to make
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index 69f4768a..519e66eb 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -30,7 +30,7 @@ are configured correctly.
**Module frequency:** per instance
-**Supported distros:** fedora, rhel, sles
+**Supported distros:** alpine, fedora, rhel, sles
**Config keys**::
@@ -55,7 +55,7 @@ LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
-distros = ['fedora', 'opensuse', 'rhel', 'sles']
+distros = ['alpine', 'fedora', 'opensuse', 'rhel', 'sles']
def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"):
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index 28c79b83..28d62e9d 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -39,6 +39,7 @@ Subscription`` example config.
"""
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -173,7 +174,7 @@ class SubscriptionManager(object):
try:
_sub_man_cli(cmd)
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
return False
return True
@@ -200,7 +201,7 @@ class SubscriptionManager(object):
try:
return_out = _sub_man_cli(cmd, logstring_val=True)[0]
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.stdout == "":
self.log_warn("Registration failed due "
"to: {0}".format(e.stderr))
@@ -223,7 +224,7 @@ class SubscriptionManager(object):
# Attempting to register the system only
try:
return_out = _sub_man_cli(cmd, logstring_val=True)[0]
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.stdout == "":
self.log_warn("Registration failed due "
"to: {0}".format(e.stderr))
@@ -246,7 +247,7 @@ class SubscriptionManager(object):
try:
return_out = _sub_man_cli(cmd)[0]
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.stdout.rstrip() != '':
for line in e.stdout.split("\n"):
if line != '':
@@ -264,7 +265,7 @@ class SubscriptionManager(object):
cmd = ['attach', '--auto']
try:
return_out = _sub_man_cli(cmd)[0]
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
self.log_warn("Auto-attach failed with: {0}".format(e))
return False
for line in return_out.split("\n"):
@@ -341,7 +342,7 @@ class SubscriptionManager(object):
"system: %s", (", ".join(pool_list))
.replace('--pool=', ''))
return True
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
self.log_warn("Unable to attach pool {0} "
"due to {1}".format(pool, e))
return False
@@ -414,7 +415,7 @@ class SubscriptionManager(object):
try:
_sub_man_cli(cmd)
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
self.log_warn("Unable to alter repos due to {0}".format(e))
return False
@@ -432,11 +433,11 @@ class SubscriptionManager(object):
def _sub_man_cli(cmd, logstring_val=False):
'''
- Uses the prefered cloud-init subprocess def of util.subp
+ Uses the prefered cloud-init subprocess def of subp.subp
and runs subscription-manager. Breaking this to a
separate function for later use in mocking and unittests
'''
- return util.subp(['subscription-manager'] + cmd,
+ return subp.subp(['subscription-manager'] + cmd,
logstring=logstring_val)
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 5df0137d..2a2bc931 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -182,6 +182,7 @@ import os
import re
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
DEF_FILENAME = "20-cloud-config.conf"
@@ -215,7 +216,7 @@ def reload_syslog(command=DEF_RELOAD, systemd=False):
cmd = ['service', service, 'restart']
else:
cmd = command
- util.subp(cmd, capture=True)
+ subp.subp(cmd, capture=True)
def load_config(cfg):
@@ -346,8 +347,10 @@ class SyslogRemotesLine(object):
if self.port:
try:
int(self.port)
- except ValueError:
- raise ValueError("port '%s' is not an integer" % self.port)
+ except ValueError as e:
+ raise ValueError(
+ "port '%s' is not an integer" % self.port
+ ) from e
if not self.addr:
raise ValueError("address is required")
@@ -429,7 +432,7 @@ def handle(name, cfg, cloud, log, _args):
restarted = reload_syslog(
command=mycfg[KEYNAME_RELOAD],
systemd=cloud.distro.uses_systemd()),
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
restarted = False
log.warning("Failed to reload syslog", e)
diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py
index 5dd8de37..b61876aa 100644
--- a/cloudinit/config/cc_salt_minion.py
+++ b/cloudinit/config/cc_salt_minion.py
@@ -45,7 +45,7 @@ specify them with ``pkg_name``, ``service_name`` and ``config_dir``.
import os
-from cloudinit import safeyaml, util
+from cloudinit import safeyaml, subp, util
from cloudinit.distros import rhel_util
@@ -130,6 +130,6 @@ def handle(name, cfg, cloud, log, _args):
# restart salt-minion. 'service' will start even if not started. if it
# was started, it needs to be restarted for config change.
- util.subp(['service', const.srv_name, 'restart'], capture=False)
+ subp.subp(['service', const.srv_name, 'restart'], capture=False)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_per_boot.py b/cloudinit/config/cc_scripts_per_boot.py
index 588e1b03..1e3f419e 100644
--- a/cloudinit/config/cc_scripts_per_boot.py
+++ b/cloudinit/config/cc_scripts_per_boot.py
@@ -24,7 +24,7 @@ module does not accept any config keys.
import os
-from cloudinit import util
+from cloudinit import subp
from cloudinit.settings import PER_ALWAYS
@@ -38,7 +38,7 @@ def handle(name, _cfg, cloud, log, _args):
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
try:
- util.runparts(runparts_path)
+ subp.runparts(runparts_path)
except Exception:
log.warning("Failed to run module %s (%s in %s)",
name, SCRIPT_SUBDIR, runparts_path)
diff --git a/cloudinit/config/cc_scripts_per_instance.py b/cloudinit/config/cc_scripts_per_instance.py
index 75549b52..5966fb9a 100644
--- a/cloudinit/config/cc_scripts_per_instance.py
+++ b/cloudinit/config/cc_scripts_per_instance.py
@@ -27,7 +27,7 @@ the system. As a result per-instance scripts will run again.
import os
-from cloudinit import util
+from cloudinit import subp
from cloudinit.settings import PER_INSTANCE
@@ -41,7 +41,7 @@ def handle(name, _cfg, cloud, log, _args):
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
try:
- util.runparts(runparts_path)
+ subp.runparts(runparts_path)
except Exception:
log.warning("Failed to run module %s (%s in %s)",
name, SCRIPT_SUBDIR, runparts_path)
diff --git a/cloudinit/config/cc_scripts_per_once.py b/cloudinit/config/cc_scripts_per_once.py
index 259bdfab..bcca859e 100644
--- a/cloudinit/config/cc_scripts_per_once.py
+++ b/cloudinit/config/cc_scripts_per_once.py
@@ -25,7 +25,7 @@ be run in alphabetical order. This module does not accept any config keys.
import os
-from cloudinit import util
+from cloudinit import subp
from cloudinit.settings import PER_ONCE
@@ -39,7 +39,7 @@ def handle(name, _cfg, cloud, log, _args):
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
try:
- util.runparts(runparts_path)
+ subp.runparts(runparts_path)
except Exception:
log.warning("Failed to run module %s (%s in %s)",
name, SCRIPT_SUBDIR, runparts_path)
diff --git a/cloudinit/config/cc_scripts_user.py b/cloudinit/config/cc_scripts_user.py
index d940dbd6..215703ef 100644
--- a/cloudinit/config/cc_scripts_user.py
+++ b/cloudinit/config/cc_scripts_user.py
@@ -27,7 +27,7 @@ This module does not accept any config keys.
import os
-from cloudinit import util
+from cloudinit import subp
from cloudinit.settings import PER_INSTANCE
@@ -42,7 +42,7 @@ def handle(name, _cfg, cloud, log, _args):
# go here...
runparts_path = os.path.join(cloud.get_ipath_cur(), SCRIPT_SUBDIR)
try:
- util.runparts(runparts_path)
+ subp.runparts(runparts_path)
except Exception:
log.warning("Failed to run module %s (%s in %s)",
name, SCRIPT_SUBDIR, runparts_path)
diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py
index faac9242..e0a4bfff 100644
--- a/cloudinit/config/cc_scripts_vendor.py
+++ b/cloudinit/config/cc_scripts_vendor.py
@@ -28,6 +28,7 @@ entry under the ``vendor_data`` config key.
import os
+from cloudinit import subp
from cloudinit import util
from cloudinit.settings import PER_INSTANCE
@@ -46,7 +47,7 @@ def handle(name, cfg, cloud, log, _args):
prefix = util.get_cfg_by_path(cfg, ('vendor_data', 'prefix'), [])
try:
- util.runparts(runparts_path, exe_prefix=prefix)
+ subp.runparts(runparts_path, exe_prefix=prefix)
except Exception:
log.warning("Failed to run module %s (%s in %s)",
name, SCRIPT_SUBDIR, runparts_path)
diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
index b65f3ed9..4fb9b44e 100644
--- a/cloudinit/config/cc_seed_random.py
+++ b/cloudinit/config/cc_seed_random.py
@@ -65,6 +65,7 @@ from io import BytesIO
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
+from cloudinit import subp
from cloudinit import util
frequency = PER_INSTANCE
@@ -92,14 +93,14 @@ def handle_random_seed_command(command, required, env=None):
return
cmd = command[0]
- if not util.which(cmd):
+ if not subp.which(cmd):
if required:
raise ValueError(
"command '{cmd}' not found but required=true".format(cmd=cmd))
else:
LOG.debug("command '%s' not found for seed_command", cmd)
return
- util.subp(command, env=env, capture=False)
+ subp.subp(command, env=env, capture=False)
def handle(name, cfg, cloud, log, _args):
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index 10d6d197..1d23d80d 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -55,7 +55,6 @@ class SetHostnameError(Exception):
This may happen if we attempt to set the hostname early in cloud-init's
init-local timeframe as certain services may not be running yet.
"""
- pass
def handle(name, cfg, cloud, log, _args):
@@ -86,7 +85,7 @@ def handle(name, cfg, cloud, log, _args):
except Exception as e:
msg = "Failed to set the hostname to %s (%s)" % (fqdn, hostname)
util.logexc(log, msg)
- raise SetHostnameError("%s: %s" % (msg, e))
+ raise SetHostnameError("%s: %s" % (msg, e)) from e
write_json(prev_fn, {'hostname': hostname, 'fqdn': fqdn})
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index 4943d545..d6b5682d 100755
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -83,6 +83,7 @@ import sys
from cloudinit.distros import ug_util
from cloudinit import log as logging
from cloudinit.ssh_util import update_ssh_config
+from cloudinit import subp
from cloudinit import util
from string import ascii_letters, digits
@@ -128,7 +129,7 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"):
cmd = list(service_cmd) + ["restart", service_name]
else:
cmd = list(service_cmd) + [service_name, "restart"]
- util.subp(cmd)
+ subp.subp(cmd)
LOG.debug("Restarted the SSH daemon.")
@@ -241,12 +242,12 @@ def rand_user_password(pwlen=20):
def chpasswd(distro, plist_in, hashed=False):
- if util.is_FreeBSD():
+ if util.is_BSD():
for pentry in plist_in.splitlines():
u, p = pentry.split(":")
distro.set_passwd(u, p, hashed=hashed)
else:
cmd = ['chpasswd'] + (['-e'] if hashed else [])
- util.subp(cmd, plist_in)
+ subp.subp(cmd, plist_in)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py
index 90724b81..20ed7d2f 100644
--- a/cloudinit/config/cc_snap.py
+++ b/cloudinit/config/cc_snap.py
@@ -12,6 +12,7 @@ from cloudinit.config.schema import (
get_schema_doc, validate_cloudconfig_schema)
from cloudinit.settings import PER_INSTANCE
from cloudinit.subp import prepend_base_command
+from cloudinit import subp
from cloudinit import util
@@ -61,9 +62,9 @@ schema = {
snap:
assertions:
00: |
- signed_assertion_blob_here
+ signed_assertion_blob_here
02: |
- signed_assertion_blob_here
+ signed_assertion_blob_here
commands:
00: snap create-user --sudoer --known <snap-user>@mydomain.com
01: snap install canonical-livepatch
@@ -85,6 +86,21 @@ schema = {
01: ['snap', 'install', 'vlc']
02: snap install vlc
03: 'snap install vlc'
+ """), dedent("""\
+ # You can use a list of commands
+ snap:
+ commands:
+ - ['install', 'vlc']
+ - ['snap', 'install', 'vlc']
+ - snap install vlc
+ - 'snap install vlc'
+ """), dedent("""\
+ # You can use a list of assertions
+ snap:
+ assertions:
+ - signed_assertion_blob_here
+ - |
+ signed_assertion_blob_here
""")],
'frequency': PER_INSTANCE,
'type': 'object',
@@ -98,7 +114,8 @@ schema = {
'additionalItems': False, # Reject items non-string
'minItems': 1,
'minProperties': 1,
- 'uniqueItems': True
+ 'uniqueItems': True,
+ 'additionalProperties': {'type': 'string'},
},
'commands': {
'type': ['object', 'array'], # Array of strings or dict
@@ -110,6 +127,12 @@ schema = {
'additionalItems': False, # Reject non-string & non-list
'minItems': 1,
'minProperties': 1,
+ 'additionalProperties': {
+ 'oneOf': [
+ {'type': 'string'},
+ {'type': 'array', 'items': {'type': 'string'}},
+ ],
+ },
},
'squashfuse_in_container': {
'type': 'boolean'
@@ -122,10 +145,6 @@ schema = {
}
}
-# TODO schema for 'assertions' and 'commands' are too permissive at the moment.
-# Once python-jsonschema supports schema draft 6 add support for arbitrary
-# object keys with 'patternProperties' constraint to validate string values.
-
__doc__ = get_schema_doc(schema) # Supplement python help()
SNAP_CMD = "snap"
@@ -157,7 +176,7 @@ def add_assertions(assertions):
LOG.debug('Snap acking: %s', asrt.split('\n')[0:2])
util.write_file(ASSERTIONS_FILE, combined.encode('utf-8'))
- util.subp(snap_cmd + [ASSERTIONS_FILE], capture=True)
+ subp.subp(snap_cmd + [ASSERTIONS_FILE], capture=True)
def run_commands(commands):
@@ -186,8 +205,8 @@ def run_commands(commands):
for command in fixed_snap_commands:
shell = isinstance(command, str)
try:
- util.subp(command, shell=shell, status_cb=sys.stderr.write)
- except util.ProcessExecutionError as e:
+ subp.subp(command, shell=shell, status_cb=sys.stderr.write)
+ except subp.ProcessExecutionError as e:
cmd_failures.append(str(e))
if cmd_failures:
msg = 'Failures running snap commands:\n{cmd_failures}'.format(
diff --git a/cloudinit/config/cc_spacewalk.py b/cloudinit/config/cc_spacewalk.py
index 1020e944..95083607 100644
--- a/cloudinit/config/cc_spacewalk.py
+++ b/cloudinit/config/cc_spacewalk.py
@@ -27,7 +27,7 @@ For more information about spacewalk see: https://fedorahosted.org/spacewalk/
activation_key: <key>
"""
-from cloudinit import util
+from cloudinit import subp
distros = ['redhat', 'fedora']
@@ -41,9 +41,9 @@ def is_registered():
# assume we aren't registered; which is sorta ghetto...
already_registered = False
try:
- util.subp(['rhn-profile-sync', '--verbose'], capture=False)
+ subp.subp(['rhn-profile-sync', '--verbose'], capture=False)
already_registered = True
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.exit_code != 1:
raise
return already_registered
@@ -65,7 +65,7 @@ def do_register(server, profile_name,
cmd.extend(['--sslCACert', str(ca_cert_path)])
if activation_key:
cmd.extend(['--activationkey', str(activation_key)])
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
def handle(name, cfg, cloud, log, _args):
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 163cce99..9b2a333a 100755
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -35,6 +35,42 @@ root login is disabled, and root login opts are set to::
no-port-forwarding,no-agent-forwarding,no-X11-forwarding
+Supported public key types for the ``ssh_authorized_keys`` are:
+
+ - dsa
+ - rsa
+ - ecdsa
+ - ed25519
+ - ecdsa-sha2-nistp256-cert-v01@openssh.com
+ - ecdsa-sha2-nistp256
+ - ecdsa-sha2-nistp384-cert-v01@openssh.com
+ - ecdsa-sha2-nistp384
+ - ecdsa-sha2-nistp521-cert-v01@openssh.com
+ - ecdsa-sha2-nistp521
+ - sk-ecdsa-sha2-nistp256-cert-v01@openssh.com
+ - sk-ecdsa-sha2-nistp256@openssh.com
+ - sk-ssh-ed25519-cert-v01@openssh.com
+ - sk-ssh-ed25519@openssh.com
+ - ssh-dss-cert-v01@openssh.com
+ - ssh-dss
+ - ssh-ed25519-cert-v01@openssh.com
+ - ssh-ed25519
+ - ssh-rsa-cert-v01@openssh.com
+ - ssh-rsa
+ - ssh-xmss-cert-v01@openssh.com
+ - ssh-xmss@openssh.com
+
+.. note::
+ this list has been filtered out from the supported keytypes of
+ `OpenSSH`_ source, where the sigonly keys are removed. Please see
+ ``ssh_util`` for more information.
+
+ ``dsa``, ``rsa``, ``ecdsa`` and ``ed25519`` are added for legacy,
+ as they are valid public keys in some old distros. They can possibly
+ be removed in the future when support for the older distros are dropped
+
+.. _OpenSSH: https://github.com/openssh/openssh-portable/blob/master/sshkey.c
+
Host Keys
^^^^^^^^^
@@ -116,6 +152,7 @@ import sys
from cloudinit.distros import ug_util
from cloudinit import ssh_util
+from cloudinit import subp
from cloudinit import util
@@ -164,7 +201,7 @@ def handle(_name, cfg, cloud, log, _args):
try:
# TODO(harlowja): Is this guard needed?
with util.SeLinuxGuard("/etc/ssh", recursive=True):
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
log.debug("Generated a key for %s from %s", pair[0], pair[1])
except Exception:
util.logexc(log, "Failed generated a key for %s from %s",
@@ -186,9 +223,9 @@ def handle(_name, cfg, cloud, log, _args):
# TODO(harlowja): Is this guard needed?
with util.SeLinuxGuard("/etc/ssh", recursive=True):
try:
- out, err = util.subp(cmd, capture=True, env=lang_c)
+ out, err = subp.subp(cmd, capture=True, env=lang_c)
sys.stdout.write(util.decode_binary(out))
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
err = util.decode_binary(e.stderr).lower()
if (e.exit_code == 1 and
err.lower().startswith("unknown key")):
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
index 7ac1c8cf..05d30ad1 100755
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -13,7 +13,7 @@ Write fingerprints of authorized keys for each user to log. This is enabled by
default, but can be disabled using ``no_ssh_fingerprints``. The hash type for
the keys can be specified, but defaults to ``sha256``.
-**Internal name:** `` cc_ssh_authkey_fingerprints``
+**Internal name:** ``cc_ssh_authkey_fingerprints``
**Module frequency:** per instance
@@ -59,8 +59,8 @@ def _gen_fingerprint(b64_text, hash_meth='sha256'):
def _is_printable_key(entry):
if any([entry.keytype, entry.base64, entry.comment, entry.options]):
- if (entry.keytype and
- entry.keytype.lower().strip() in ['ssh-dss', 'ssh-rsa']):
+ if (entry.keytype and entry.keytype.lower().strip()
+ in ssh_util.VALID_KEY_TYPES):
return True
return False
diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
index 63f87298..856e5a9e 100755
--- a/cloudinit/config/cc_ssh_import_id.py
+++ b/cloudinit/config/cc_ssh_import_id.py
@@ -31,6 +31,7 @@ either ``lp:`` for launchpad or ``gh:`` for github to the username.
"""
from cloudinit.distros import ug_util
+from cloudinit import subp
from cloudinit import util
import pwd
@@ -101,8 +102,8 @@ def import_ssh_ids(ids, user, log):
log.debug("Importing SSH ids for user %s.", user)
try:
- util.subp(cmd, capture=False)
- except util.ProcessExecutionError as exc:
+ subp.subp(cmd, capture=False)
+ except subp.ProcessExecutionError as exc:
util.logexc(log, "Failed to run command to import %s SSH ids", user)
raise exc
diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py
index 8b6d2a1a..d61dc655 100644
--- a/cloudinit/config/cc_ubuntu_advantage.py
+++ b/cloudinit/config/cc_ubuntu_advantage.py
@@ -8,6 +8,7 @@ from cloudinit.config.schema import (
get_schema_doc, validate_cloudconfig_schema)
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
+from cloudinit import subp
from cloudinit import util
@@ -109,18 +110,18 @@ def configure_ua(token=None, enable=None):
attach_cmd = ['ua', 'attach', token]
LOG.debug('Attaching to Ubuntu Advantage. %s', ' '.join(attach_cmd))
try:
- util.subp(attach_cmd)
- except util.ProcessExecutionError as e:
+ subp.subp(attach_cmd)
+ except subp.ProcessExecutionError as e:
msg = 'Failure attaching Ubuntu Advantage:\n{error}'.format(
error=str(e))
util.logexc(LOG, msg)
- raise RuntimeError(msg)
+ raise RuntimeError(msg) from e
enable_errors = []
for service in enable:
try:
cmd = ['ua', 'enable', service]
- util.subp(cmd, capture=True)
- except util.ProcessExecutionError as e:
+ subp.subp(cmd, capture=True)
+ except subp.ProcessExecutionError as e:
enable_errors.append((service, e))
if enable_errors:
for service, error in enable_errors:
@@ -135,7 +136,7 @@ def configure_ua(token=None, enable=None):
def maybe_install_ua_tools(cloud):
"""Install ubuntu-advantage-tools if not present."""
- if util.which('ua'):
+ if subp.which('ua'):
return
try:
cloud.distro.update_package_sources()
diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py
index 297451d6..2d1d2b32 100644
--- a/cloudinit/config/cc_ubuntu_drivers.py
+++ b/cloudinit/config/cc_ubuntu_drivers.py
@@ -9,6 +9,7 @@ from cloudinit.config.schema import (
get_schema_doc, validate_cloudconfig_schema)
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
+from cloudinit import subp
from cloudinit import temp_utils
from cloudinit import type_utils
from cloudinit import util
@@ -108,7 +109,7 @@ def install_drivers(cfg, pkg_install_func):
LOG.debug("Not installing NVIDIA drivers. %s=%s", cfgpath, nv_acc)
return
- if not util.which('ubuntu-drivers'):
+ if not subp.which('ubuntu-drivers'):
LOG.debug("'ubuntu-drivers' command not available. "
"Installing ubuntu-drivers-common")
pkg_install_func(['ubuntu-drivers-common'])
@@ -131,7 +132,7 @@ def install_drivers(cfg, pkg_install_func):
debconf_script,
util.encode_text(NVIDIA_DRIVER_LATELINK_DEBCONF_SCRIPT),
mode=0o755)
- util.subp([debconf_script, debconf_file])
+ subp.subp([debconf_script, debconf_file])
except Exception as e:
util.logexc(
LOG, "Failed to register NVIDIA debconf template: %s", str(e))
@@ -141,8 +142,8 @@ def install_drivers(cfg, pkg_install_func):
util.del_dir(tdir)
try:
- util.subp(['ubuntu-drivers', 'install', '--gpgpu', driver_arg])
- except util.ProcessExecutionError as exc:
+ subp.subp(['ubuntu-drivers', 'install', '--gpgpu', driver_arg])
+ except subp.ProcessExecutionError as exc:
if OLD_UBUNTU_DRIVERS_STDERR_NEEDLE in exc.stderr:
LOG.warning('the available version of ubuntu-drivers is'
' too old to perform requested driver installation')
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
index 13764e60..426498a3 100644
--- a/cloudinit/config/cc_users_groups.py
+++ b/cloudinit/config/cc_users_groups.py
@@ -78,6 +78,13 @@ config keys for an entry in ``users`` are as follows:
If specifying a sudo rule for a user, ensure that the syntax for the rule
is valid, as it is not checked by cloud-init.
+.. note::
+ Most of these configuration options will not be honored if the user
+ already exists. Following options are the exceptions and they are
+ applicable on already-existing users:
+ - 'plain_text_passwd', 'hashed_passwd', 'lock_passwd', 'sudo',
+ 'ssh_authorized_keys', 'ssh_redirect_user'.
+
**Internal name:** ``cc_users_groups``
**Module frequency:** per instance
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index bd87e9e5..8601e707 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -4,60 +4,14 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Write Files
------------
-**Summary:** write arbitrary files
-
-Write out arbitrary content to files, optionally setting permissions. Content
-can be specified in plain text or binary. Data encoded with either base64 or
-binary gzip data can be specified and will be decoded before being written.
-
-.. note::
- if multiline data is provided, care should be taken to ensure that it
- follows yaml formatting standards. to specify binary data, use the yaml
- option ``!!binary``
-
-.. note::
- Do not write files under /tmp during boot because of a race with
- systemd-tmpfiles-clean that can cause temp files to get cleaned during
- the early boot process. Use /run/somedir instead to avoid race LP:1707222.
-
-**Internal name:** ``cc_write_files``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- write_files:
- - encoding: b64
- content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4...
- owner: root:root
- path: /etc/sysconfig/selinux
- permissions: '0644'
- - content: |
- # My new /etc/sysconfig/samba file
-
- SMDBOPTIONS="-D"
- path: /etc/sysconfig/samba
- - content: !!binary |
- f0VMRgIBAQAAAAAAAAAAAAIAPgABAAAAwARAAAAAAABAAAAAAAAAAJAVAAAAAA
- AEAAHgAdAAYAAAAFAAAAQAAAAAAAAABAAEAAAAAAAEAAQAAAAAAAwAEAAAAAAA
- AAAAAAAAAwAAAAQAAAAAAgAAAAAAAAACQAAAAAAAAAJAAAAAAAAcAAAAAAAAAB
- ...
- path: /bin/arch
- permissions: '0555'
- - content: |
- 15 * * * * root ship_logs
- path: /etc/crontab
- append: true
-"""
+"""Write Files: write arbitrary files"""
import base64
import os
+from textwrap import dedent
+from cloudinit.config.schema import (
+ get_schema_doc, validate_cloudconfig_schema)
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
from cloudinit import util
@@ -71,6 +25,142 @@ UNKNOWN_ENC = 'text/plain'
LOG = logging.getLogger(__name__)
+distros = ['all']
+
+# The schema definition for each cloud-config module is a strict contract for
+# describing supported configuration parameters for each cloud-config section.
+# It allows cloud-config to validate and alert users to invalid or ignored
+# configuration options before actually attempting to deploy with said
+# configuration.
+
+supported_encoding_types = [
+ 'gz', 'gzip', 'gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64', 'b64',
+ 'base64']
+
+schema = {
+ 'id': 'cc_write_files',
+ 'name': 'Write Files',
+ 'title': 'write arbitrary files',
+ 'description': dedent("""\
+ Write out arbitrary content to files, optionally setting permissions.
+ Parent folders in the path are created if absent.
+ Content can be specified in plain text or binary. Data encoded with
+ either base64 or binary gzip data can be specified and will be decoded
+ before being written. For empty file creation, content can be omitted.
+
+ .. note::
+ if multiline data is provided, care should be taken to ensure that it
+ follows yaml formatting standards. to specify binary data, use the yaml
+ option ``!!binary``
+
+ .. note::
+ Do not write files under /tmp during boot because of a race with
+ systemd-tmpfiles-clean that can cause temp files to get cleaned during
+ the early boot process. Use /run/somedir instead to avoid race
+ LP:1707222."""),
+ 'distros': distros,
+ 'examples': [
+ dedent("""\
+ # Write out base64 encoded content to /etc/sysconfig/selinux
+ write_files:
+ - encoding: b64
+ content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4...
+ owner: root:root
+ path: /etc/sysconfig/selinux
+ permissions: '0644'
+ """),
+ dedent("""\
+ # Appending content to an existing file
+ write_files:
+ - content: |
+ 15 * * * * root ship_logs
+ path: /etc/crontab
+ append: true
+ """),
+ dedent("""\
+ # Provide gziped binary content
+ write_files:
+ - encoding: gzip
+ content: !!binary |
+ H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA=
+ path: /usr/bin/hello
+ permissions: '0755'
+ """),
+ dedent("""\
+ # Create an empty file on the system
+ write_files:
+ - path: /root/CLOUD_INIT_WAS_HERE
+ """)],
+ 'frequency': frequency,
+ 'type': 'object',
+ 'properties': {
+ 'write_files': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'path': {
+ 'type': 'string',
+ 'description': dedent("""\
+ Path of the file to which ``content`` is decoded
+ and written
+ """),
+ },
+ 'content': {
+ 'type': 'string',
+ 'default': '',
+ 'description': dedent("""\
+ Optional content to write to the provided ``path``.
+ When content is present and encoding is not '%s',
+ decode the content prior to writing. Default:
+ **''**
+ """ % UNKNOWN_ENC),
+ },
+ 'owner': {
+ 'type': 'string',
+ 'default': DEFAULT_OWNER,
+ 'description': dedent("""\
+ Optional owner:group to chown on the file. Default:
+ **{owner}**
+ """.format(owner=DEFAULT_OWNER)),
+ },
+ 'permissions': {
+ 'type': 'string',
+ 'default': oct(DEFAULT_PERMS).replace('o', ''),
+ 'description': dedent("""\
+ Optional file permissions to set on ``path``
+ represented as an octal string '0###'. Default:
+ **'{perms}'**
+ """.format(perms=oct(DEFAULT_PERMS).replace('o', ''))),
+ },
+ 'encoding': {
+ 'type': 'string',
+ 'default': UNKNOWN_ENC,
+ 'enum': supported_encoding_types,
+ 'description': dedent("""\
+ Optional encoding type of the content. Default is
+ **text/plain** and no content decoding is
+ performed. Supported encoding types are:
+ %s.""" % ", ".join(supported_encoding_types)),
+ },
+ 'append': {
+ 'type': 'boolean',
+ 'default': False,
+ 'description': dedent("""\
+ Whether to append ``content`` to existing file if
+ ``path`` exists. Default: **false**.
+ """),
+ },
+ },
+ 'required': ['path'],
+ 'additionalProperties': False
+ },
+ }
+ }
+}
+
+__doc__ = get_schema_doc(schema) # Supplement python help()
+
def handle(name, cfg, _cloud, log, _args):
files = cfg.get('write_files')
@@ -78,6 +168,7 @@ def handle(name, cfg, _cloud, log, _args):
log.debug(("Skipping module named %s,"
" no/empty 'write_files' key in configuration"), name)
return
+ validate_cloudconfig_schema(cfg, schema)
write_files(name, files)
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index 3673166a..01fe683c 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -18,7 +18,7 @@ entry, the config entry will be skipped.
**Module frequency:** per always
-**Supported distros:** fedora, rhel
+**Supported distros:** centos, fedora, rhel
**Config keys**::
@@ -36,7 +36,7 @@ from configparser import ConfigParser
from cloudinit import util
-distros = ['fedora', 'rhel']
+distros = ['centos', 'fedora', 'rhel']
def _canonicalize_id(repo_id):
diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py
index 807c3eee..8a966aee 100644
--- a/cloudinit/config/schema.py
+++ b/cloudinit/config/schema.py
@@ -1,8 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""schema.py: Set of module functions for processing cloud-config schema."""
-from __future__ import print_function
-
from cloudinit import importer
from cloudinit.util import find_modules, load_file
@@ -36,6 +34,8 @@ SCHEMA_DOC_TMPL = """
{examples}
"""
SCHEMA_PROPERTY_TMPL = '{prefix}**{prop_name}:** ({type}) {description}'
+SCHEMA_LIST_ITEM_TMPL = (
+ '{prefix}Each item in **{prop_name}** list supports the following keys:')
SCHEMA_EXAMPLES_HEADER = '\n**Examples**::\n\n'
SCHEMA_EXAMPLES_SPACER_TEMPLATE = '\n # --- Example{0} ---'
@@ -58,6 +58,19 @@ class SchemaValidationError(ValueError):
super(SchemaValidationError, self).__init__(message)
+def is_schema_byte_string(checker, instance):
+ """TYPE_CHECKER override allowing bytes for string type
+
+ For jsonschema v. 3.0.0+
+ """
+ try:
+ from jsonschema import Draft4Validator
+ except ImportError:
+ return False
+ return (Draft4Validator.TYPE_CHECKER.is_type(instance, "string") or
+ isinstance(instance, (bytes,)))
+
+
def validate_cloudconfig_schema(config, schema, strict=False):
"""Validate provided config meets the schema definition.
@@ -73,11 +86,31 @@ def validate_cloudconfig_schema(config, schema, strict=False):
"""
try:
from jsonschema import Draft4Validator, FormatChecker
+ from jsonschema.validators import create, extend
except ImportError:
logging.debug(
'Ignoring schema validation. python-jsonschema is not present')
return
- validator = Draft4Validator(schema, format_checker=FormatChecker())
+
+ # Allow for bytes to be presented as an acceptable valid value for string
+ # type jsonschema attributes in cloud-init's schema.
+ # This allows #cloud-config to provide valid yaml "content: !!binary | ..."
+ if hasattr(Draft4Validator, 'TYPE_CHECKER'): # jsonschema 3.0+
+ type_checker = Draft4Validator.TYPE_CHECKER.redefine(
+ 'string', is_schema_byte_string)
+ cloudinitValidator = extend(Draft4Validator, type_checker=type_checker)
+ else: # jsonschema 2.6 workaround
+ types = Draft4Validator.DEFAULT_TYPES
+ # Allow bytes as well as string (and disable a spurious
+ # unsupported-assignment-operation pylint warning which appears because
+ # this code path isn't written against the latest jsonschema).
+ types['string'] = (str, bytes) # pylint: disable=E1137
+ cloudinitValidator = create(
+ meta_schema=Draft4Validator.META_SCHEMA,
+ validators=Draft4Validator.VALIDATORS,
+ version="draft4",
+ default_types=types)
+ validator = cloudinitValidator(schema, format_checker=FormatChecker())
errors = ()
for error in sorted(validator.iter_errors(config), key=lambda e: e.path):
path = '.'.join([str(p) for p in error.path])
@@ -106,7 +139,6 @@ def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
schemapaths = _schemapath_for_cloudconfig(
cloudconfig, original_content)
errors_by_line = defaultdict(list)
- error_count = 1
error_footer = []
annotated_content = []
for path, msg in schema_errors:
@@ -120,18 +152,17 @@ def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
if col is not None:
msg = 'Line {line} column {col}: {msg}'.format(
line=line, col=col, msg=msg)
- error_footer.append('# E{0}: {1}'.format(error_count, msg))
- error_count += 1
lines = original_content.decode().split('\n')
- error_count = 1
- for line_number, line in enumerate(lines):
- errors = errors_by_line[line_number + 1]
+ error_index = 1
+ for line_number, line in enumerate(lines, 1):
+ errors = errors_by_line[line_number]
if errors:
- error_label = ','.join(
- ['E{0}'.format(count + error_count)
- for count in range(0, len(errors))])
- error_count += len(errors)
- annotated_content.append(line + '\t\t# ' + error_label)
+ error_label = []
+ for error in errors:
+ error_label.append('E{0}'.format(error_index))
+ error_footer.append('# E{0}: {1}'.format(error_index, error))
+ error_index += 1
+ annotated_content.append(line + '\t\t# ' + ','.join(error_label))
else:
annotated_content.append(line)
annotated_content.append(
@@ -179,7 +210,7 @@ def validate_cloudconfig_file(config_path, schema, annotate=False):
error = SchemaValidationError(errors)
if annotate:
print(annotated_cloudconfig_file({}, content, error.schema_errors))
- raise error
+ raise error from e
try:
validate_cloudconfig_schema(
cloudconfig, schema, strict=True)
@@ -213,20 +244,34 @@ def _schemapath_for_cloudconfig(config, original_content):
previous_depth = -1
path_prefix = ''
if line.startswith('- '):
+ # Process list items adding a list_index to the path prefix
+ previous_list_idx = '.%d' % (list_index - 1)
+ if path_prefix and path_prefix.endswith(previous_list_idx):
+ path_prefix = path_prefix[:-len(previous_list_idx)]
key = str(list_index)
- value = line[1:]
+ schema_line_numbers[key] = line_number
+ item_indent = len(re.match(RE_YAML_INDENT, line[1:]).groups()[0])
+ item_indent += 1 # For the leading '-' character
+ previous_depth = indent_depth
+ indent_depth += item_indent
+ line = line[item_indent:] # Strip leading list item + whitespace
list_index += 1
else:
+ # Process non-list lines setting value if present
list_index = 0
key, value = line.split(':', 1)
+ if path_prefix:
+ # Append any existing path_prefix for a fully-pathed key
+ key = path_prefix + '.' + key
while indent_depth <= previous_depth:
if scopes:
previous_depth, path_prefix = scopes.pop()
+ if list_index > 0 and indent_depth == previous_depth:
+ path_prefix = '.'.join(path_prefix.split('.')[:-1])
+ break
else:
previous_depth = -1
path_prefix = ''
- if path_prefix:
- key = path_prefix + '.' + key
scopes.append((indent_depth, key))
if value:
value = value.strip()
@@ -259,6 +304,28 @@ def _get_property_type(property_dict):
return property_type
+def _parse_description(description, prefix):
+ """Parse description from the schema in a format that we can better
+ display in our docs. This parser does three things:
+
+ - Guarantee that a paragraph will be in a single line
+ - Guarantee that each new paragraph will be aligned with
+ the first paragraph
+ - Proper align lists of items
+
+ @param description: The original description in the schema.
+ @param prefix: The number of spaces used to align the current description
+ """
+ list_paragraph = prefix * 3
+ description = re.sub(r"(\S)\n(\S)", r"\1 \2", description)
+ description = re.sub(
+ r"\n\n", r"\n\n{}".format(prefix), description)
+ description = re.sub(
+ r"\n( +)-", r"\n{}-".format(list_paragraph), description)
+
+ return description
+
+
def _get_property_doc(schema, prefix=' '):
"""Return restructured text describing the supported schema properties."""
new_prefix = prefix + ' '
@@ -266,11 +333,23 @@ def _get_property_doc(schema, prefix=' '):
for prop_key, prop_config in schema.get('properties', {}).items():
# Define prop_name and dscription for SCHEMA_PROPERTY_TMPL
description = prop_config.get('description', '')
+
properties.append(SCHEMA_PROPERTY_TMPL.format(
prefix=prefix,
prop_name=prop_key,
type=_get_property_type(prop_config),
- description=description.replace('\n', '')))
+ description=_parse_description(description, prefix)))
+ items = prop_config.get('items')
+ if items:
+ if isinstance(items, list):
+ for item in items:
+ properties.append(
+ _get_property_doc(item, prefix=new_prefix))
+ elif isinstance(items, dict) and items.get('properties'):
+ properties.append(SCHEMA_LIST_ITEM_TMPL.format(
+ prefix=new_prefix, prop_name=prop_key))
+ new_prefix += ' '
+ properties.append(_get_property_doc(items, prefix=new_prefix))
if 'properties' in prop_config:
properties.append(
_get_property_doc(prop_config, prefix=new_prefix))
@@ -346,8 +425,9 @@ def get_parser(parser=None):
description='Validate cloud-config files or document schema')
parser.add_argument('-c', '--config-file',
help='Path of the cloud-config yaml file to validate')
- parser.add_argument('-d', '--doc', action="store_true", default=False,
- help='Print schema documentation')
+ parser.add_argument('-d', '--docs', nargs='+',
+ help=('Print schema module docs. Choices: all or'
+ ' space-delimited cc_names.'))
parser.add_argument('--annotate', action="store_true", default=False,
help='Annotate existing cloud-config file with errors')
return parser
@@ -355,9 +435,9 @@ def get_parser(parser=None):
def handle_schema_args(name, args):
"""Handle provided schema args and perform the appropriate actions."""
- exclusive_args = [args.config_file, args.doc]
+ exclusive_args = [args.config_file, args.docs]
if not any(exclusive_args) or all(exclusive_args):
- error('Expected either --config-file argument or --doc')
+ error('Expected either --config-file argument or --docs')
full_schema = get_schema()
if args.config_file:
try:
@@ -370,9 +450,16 @@ def handle_schema_args(name, args):
error(str(e))
else:
print("Valid cloud-config file {0}".format(args.config_file))
- if args.doc:
+ elif args.docs:
+ schema_ids = [subschema['id'] for subschema in full_schema['allOf']]
+ schema_ids += ['all']
+ invalid_docs = set(args.docs).difference(set(schema_ids))
+ if invalid_docs:
+ error('Invalid --docs value {0}. Must be one of: {1}'.format(
+ list(invalid_docs), ', '.join(schema_ids)))
for subschema in full_schema['allOf']:
- print(get_schema_doc(subschema))
+ if 'all' in args.docs or subschema['id'] in args.docs:
+ print(get_schema_doc(subschema))
def main():
diff --git a/cloudinit/config/tests/test_disable_ec2_metadata.py b/cloudinit/config/tests/test_disable_ec2_metadata.py
index 67646b03..b00f2083 100644
--- a/cloudinit/config/tests/test_disable_ec2_metadata.py
+++ b/cloudinit/config/tests/test_disable_ec2_metadata.py
@@ -15,10 +15,8 @@ DISABLE_CFG = {'disable_ec2_metadata': 'true'}
class TestEC2MetadataRoute(CiTestCase):
- with_logs = True
-
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp')
def test_disable_ifconfig(self, m_subp, m_which):
"""Set the route if ifconfig command is available"""
m_which.side_effect = lambda x: x if x == 'ifconfig' else None
@@ -27,8 +25,8 @@ class TestEC2MetadataRoute(CiTestCase):
['route', 'add', '-host', '169.254.169.254', 'reject'],
capture=False)
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp')
def test_disable_ip(self, m_subp, m_which):
"""Set the route if ip command is available"""
m_which.side_effect = lambda x: x if x == 'ip' else None
@@ -37,8 +35,8 @@ class TestEC2MetadataRoute(CiTestCase):
['ip', 'route', 'add', 'prohibit', '169.254.169.254'],
capture=False)
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
- @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.which')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.subp.subp')
def test_disable_no_tool(self, m_subp, m_which):
"""Log error when neither route nor ip commands are available"""
m_which.return_value = None # Find neither ifconfig nor ip
diff --git a/cloudinit/config/tests/test_final_message.py b/cloudinit/config/tests/test_final_message.py
new file mode 100644
index 00000000..46ba99b2
--- /dev/null
+++ b/cloudinit/config/tests/test_final_message.py
@@ -0,0 +1,46 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+from unittest import mock
+
+import pytest
+
+from cloudinit.config.cc_final_message import handle
+
+
+class TestHandle:
+ # TODO: Expand these tests to cover full functionality; currently they only
+ # cover the logic around how the boot-finished file is written (and not its
+ # contents).
+
+ @pytest.mark.parametrize(
+ "instance_dir_exists,file_is_written,expected_log_substring",
+ [
+ (True, True, None),
+ (False, False, "Failed to write boot finished file "),
+ ],
+ )
+ def test_boot_finished_written(
+ self,
+ instance_dir_exists,
+ file_is_written,
+ expected_log_substring,
+ caplog,
+ tmpdir,
+ ):
+ instance_dir = tmpdir.join("var/lib/cloud/instance")
+ if instance_dir_exists:
+ instance_dir.ensure_dir()
+ boot_finished = instance_dir.join("boot-finished")
+
+ m_cloud = mock.Mock(
+ paths=mock.Mock(boot_finished=boot_finished.strpath)
+ )
+
+ handle(None, {}, m_cloud, logging.getLogger(), [])
+
+ # We should not change the status of the instance directory
+ assert instance_dir_exists == instance_dir.exists()
+ assert file_is_written == boot_finished.exists()
+
+ if expected_log_substring:
+ assert expected_log_substring in caplog.text
diff --git a/cloudinit/config/tests/test_grub_dpkg.py b/cloudinit/config/tests/test_grub_dpkg.py
new file mode 100644
index 00000000..99c05bb5
--- /dev/null
+++ b/cloudinit/config/tests/test_grub_dpkg.py
@@ -0,0 +1,176 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import pytest
+
+from unittest import mock
+from logging import Logger
+from cloudinit.subp import ProcessExecutionError
+from cloudinit.config.cc_grub_dpkg import fetch_idevs, handle
+
+
+class TestFetchIdevs:
+ """Tests cc_grub_dpkg.fetch_idevs()"""
+
+ # Note: udevadm info returns devices in a large single line string
+ @pytest.mark.parametrize(
+ "grub_output,path_exists,expected_log_call,udevadm_output"
+ ",expected_idevs",
+ [
+ # Inside a container, grub not installed
+ (
+ ProcessExecutionError(reason=FileNotFoundError()),
+ False,
+ mock.call("'grub-probe' not found in $PATH"),
+ '',
+ '',
+ ),
+ # Inside a container, grub installed
+ (
+ ProcessExecutionError(stderr="failed to get canonical path"),
+ False,
+ mock.call("grub-probe 'failed to get canonical path'"),
+ '',
+ '',
+ ),
+ # KVM Instance
+ (
+ ['/dev/vda'],
+ True,
+ None,
+ (
+ '/dev/disk/by-path/pci-0000:00:00.0 ',
+ '/dev/disk/by-path/virtio-pci-0000:00:00.0 '
+ ),
+ '/dev/vda',
+ ),
+ # Xen Instance
+ (
+ ['/dev/xvda'],
+ True,
+ None,
+ '',
+ '/dev/xvda',
+ ),
+ # NVMe Hardware Instance
+ (
+ ['/dev/nvme1n1'],
+ True,
+ None,
+ (
+ '/dev/disk/by-id/nvme-Company_hash000 ',
+ '/dev/disk/by-id/nvme-nvme.000-000-000-000-000 ',
+ '/dev/disk/by-path/pci-0000:00:00.0-nvme-0 '
+ ),
+ '/dev/disk/by-id/nvme-Company_hash000',
+ ),
+ # SCSI Hardware Instance
+ (
+ ['/dev/sda'],
+ True,
+ None,
+ (
+ '/dev/disk/by-id/company-user-1 ',
+ '/dev/disk/by-id/scsi-0Company_user-1 ',
+ '/dev/disk/by-path/pci-0000:00:00.0-scsi-0:0:0:0 '
+ ),
+ '/dev/disk/by-id/company-user-1',
+ ),
+ ],
+ )
+ @mock.patch("cloudinit.config.cc_grub_dpkg.util.logexc")
+ @mock.patch("cloudinit.config.cc_grub_dpkg.os.path.exists")
+ @mock.patch("cloudinit.config.cc_grub_dpkg.subp.subp")
+ def test_fetch_idevs(self, m_subp, m_exists, m_logexc, grub_output,
+ path_exists, expected_log_call, udevadm_output,
+ expected_idevs):
+ """Tests outputs from grub-probe and udevadm info against grub-dpkg"""
+ m_subp.side_effect = [
+ grub_output,
+ ["".join(udevadm_output)]
+ ]
+ m_exists.return_value = path_exists
+ log = mock.Mock(spec=Logger)
+ idevs = fetch_idevs(log)
+ assert expected_idevs == idevs
+ if expected_log_call is not None:
+ assert expected_log_call in log.debug.call_args_list
+
+
+class TestHandle:
+ """Tests cc_grub_dpkg.handle()"""
+
+ @pytest.mark.parametrize(
+ "cfg_idevs,cfg_idevs_empty,fetch_idevs_output,expected_log_output",
+ [
+ (
+ # No configuration
+ None,
+ None,
+ '/dev/disk/by-id/nvme-Company_hash000',
+ (
+ "Setting grub debconf-set-selections with ",
+ "'/dev/disk/by-id/nvme-Company_hash000','false'"
+ ),
+ ),
+ (
+ # idevs set, idevs_empty unset
+ '/dev/sda',
+ None,
+ '/dev/sda',
+ (
+ "Setting grub debconf-set-selections with ",
+ "'/dev/sda','false'"
+ ),
+ ),
+ (
+ # idevs unset, idevs_empty set
+ None,
+ 'true',
+ '/dev/xvda',
+ (
+ "Setting grub debconf-set-selections with ",
+ "'/dev/xvda','true'"
+ ),
+ ),
+ (
+ # idevs set, idevs_empty set
+ '/dev/vda',
+ 'false',
+ '/dev/disk/by-id/company-user-1',
+ (
+ "Setting grub debconf-set-selections with ",
+ "'/dev/vda','false'"
+ ),
+ ),
+ (
+ # idevs set, idevs_empty set
+ # Respect what the user defines, even if its logically wrong
+ '/dev/nvme0n1',
+ 'true',
+ '',
+ (
+ "Setting grub debconf-set-selections with ",
+ "'/dev/nvme0n1','true'"
+ ),
+ )
+ ],
+ )
+ @mock.patch("cloudinit.config.cc_grub_dpkg.fetch_idevs")
+ @mock.patch("cloudinit.config.cc_grub_dpkg.util.get_cfg_option_str")
+ @mock.patch("cloudinit.config.cc_grub_dpkg.util.logexc")
+ @mock.patch("cloudinit.config.cc_grub_dpkg.subp.subp")
+ def test_handle(self, m_subp, m_logexc, m_get_cfg_str, m_fetch_idevs,
+ cfg_idevs, cfg_idevs_empty, fetch_idevs_output,
+ expected_log_output):
+ """Test setting of correct debconf database entries"""
+ m_get_cfg_str.side_effect = [
+ cfg_idevs,
+ cfg_idevs_empty
+ ]
+ m_fetch_idevs.return_value = fetch_idevs_output
+ log = mock.Mock(spec=Logger)
+ handle(mock.Mock(), mock.Mock(), mock.Mock(), log, mock.Mock())
+ log.debug.assert_called_with("".join(expected_log_output))
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_mounts.py b/cloudinit/config/tests/test_mounts.py
new file mode 100644
index 00000000..764a33e3
--- /dev/null
+++ b/cloudinit/config/tests/test_mounts.py
@@ -0,0 +1,28 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+from unittest import mock
+
+import pytest
+
+from cloudinit.config.cc_mounts import create_swapfile
+
+
+M_PATH = 'cloudinit.config.cc_mounts.'
+
+
+class TestCreateSwapfile:
+
+ @pytest.mark.parametrize('fstype', ('xfs', 'btrfs', 'ext4', 'other'))
+ @mock.patch(M_PATH + 'util.get_mount_info')
+ @mock.patch(M_PATH + 'subp.subp')
+ def test_happy_path(self, m_subp, m_get_mount_info, fstype, tmpdir):
+ swap_file = tmpdir.join("swap-file")
+ fname = str(swap_file)
+
+ # Some of the calls to subp.subp should create the swap file; this
+ # roughly approximates that
+ m_subp.side_effect = lambda *args, **kwargs: swap_file.write('')
+
+ m_get_mount_info.return_value = (mock.ANY, fstype)
+
+ create_swapfile(fname, '')
+ assert mock.call(['mkswap', fname]) in m_subp.call_args_list
diff --git a/cloudinit/config/tests/test_resolv_conf.py b/cloudinit/config/tests/test_resolv_conf.py
new file mode 100644
index 00000000..6546a0b5
--- /dev/null
+++ b/cloudinit/config/tests/test_resolv_conf.py
@@ -0,0 +1,86 @@
+from unittest import mock
+
+import pytest
+
+from cloudinit.config.cc_resolv_conf import generate_resolv_conf
+
+
+EXPECTED_HEADER = """\
+# Your system has been configured with 'manage-resolv-conf' set to true.
+# As a result, cloud-init has written this file with configuration data
+# that it has been provided. Cloud-init, by default, will write this file
+# a single time (PER_ONCE).
+#\n\n"""
+
+
+class TestGenerateResolvConf:
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_default_target_fname_is_etc_resolvconf(self, m_render_to_file):
+ generate_resolv_conf("templates/resolv.conf.tmpl", mock.MagicMock())
+
+ assert [
+ mock.call(mock.ANY, "/etc/resolv.conf", mock.ANY)
+ ] == m_render_to_file.call_args_list
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_target_fname_is_used_if_passed(self, m_render_to_file):
+ generate_resolv_conf(
+ "templates/resolv.conf.tmpl", mock.MagicMock(), "/use/this/path"
+ )
+
+ assert [
+ mock.call(mock.ANY, "/use/this/path", mock.ANY)
+ ] == m_render_to_file.call_args_list
+
+ # Patch in templater so we can assert on the actual generated content
+ @mock.patch("cloudinit.templater.util.write_file")
+ # Parameterise with the value to be passed to generate_resolv_conf as the
+ # params parameter, and the expected line after the header as
+ # expected_extra_line.
+ @pytest.mark.parametrize(
+ "params,expected_extra_line",
+ [
+ # No options
+ ({}, None),
+ # Just a true flag
+ ({"options": {"foo": True}}, "options foo"),
+ # Just a false flag
+ ({"options": {"foo": False}}, None),
+ # Just an option
+ ({"options": {"foo": "some_value"}}, "options foo:some_value"),
+ # A true flag and an option
+ (
+ {"options": {"foo": "some_value", "bar": True}},
+ "options bar foo:some_value",
+ ),
+ # Two options
+ (
+ {"options": {"foo": "some_value", "bar": "other_value"}},
+ "options bar:other_value foo:some_value",
+ ),
+ # Everything
+ (
+ {
+ "options": {
+ "foo": "some_value",
+ "bar": "other_value",
+ "baz": False,
+ "spam": True,
+ }
+ },
+ "options spam bar:other_value foo:some_value",
+ ),
+ ],
+ )
+ def test_flags_and_options(
+ self, m_write_file, params, expected_extra_line
+ ):
+ generate_resolv_conf("templates/resolv.conf.tmpl", params)
+
+ expected_content = EXPECTED_HEADER
+ if expected_extra_line is not None:
+ # If we have any extra lines, expect a trailing newline
+ expected_content += "\n".join([expected_extra_line, ""])
+ assert [
+ mock.call(mock.ANY, expected_content, mode=mock.ANY)
+ ] == m_write_file.call_args_list
diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py
index 8247c388..daa1ef51 100644
--- a/cloudinit/config/tests/test_set_passwords.py
+++ b/cloudinit/config/tests/test_set_passwords.py
@@ -14,7 +14,7 @@ class TestHandleSshPwauth(CiTestCase):
with_logs = True
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_unknown_value_logs_warning(self, m_subp):
setpass.handle_ssh_pwauth("floo")
self.assertIn("Unrecognized value: ssh_pwauth=floo",
@@ -22,7 +22,7 @@ class TestHandleSshPwauth(CiTestCase):
m_subp.assert_not_called()
@mock.patch(MODPATH + "update_ssh_config", return_value=True)
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_systemctl_as_service_cmd(self, m_subp, m_update_ssh_config):
"""If systemctl in service cmd: systemctl restart name."""
setpass.handle_ssh_pwauth(
@@ -31,7 +31,7 @@ class TestHandleSshPwauth(CiTestCase):
m_subp.call_args)
@mock.patch(MODPATH + "update_ssh_config", return_value=True)
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_service_as_service_cmd(self, m_subp, m_update_ssh_config):
"""If systemctl in service cmd: systemctl restart name."""
setpass.handle_ssh_pwauth(
@@ -40,7 +40,7 @@ class TestHandleSshPwauth(CiTestCase):
m_subp.call_args)
@mock.patch(MODPATH + "update_ssh_config", return_value=False)
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_not_restarted_if_not_updated(self, m_subp, m_update_ssh_config):
"""If config is not updated, then no system restart should be done."""
setpass.handle_ssh_pwauth(True)
@@ -48,7 +48,7 @@ class TestHandleSshPwauth(CiTestCase):
self.assertIn("No need to restart SSH", self.logs.getvalue())
@mock.patch(MODPATH + "update_ssh_config", return_value=True)
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_unchanged_does_nothing(self, m_subp, m_update_ssh_config):
"""If 'unchanged', then no updates to config and no restart."""
setpass.handle_ssh_pwauth(
@@ -56,7 +56,7 @@ class TestHandleSshPwauth(CiTestCase):
m_update_ssh_config.assert_not_called()
m_subp.assert_not_called()
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_valid_change_values(self, m_subp):
"""If value is a valid changen value, then update should be called."""
upname = MODPATH + "update_ssh_config"
@@ -88,7 +88,7 @@ class TestSetPasswordsHandle(CiTestCase):
'ssh_pwauth=None\n',
self.logs.getvalue())
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "subp.subp")
def test_handle_on_chpasswd_list_parses_common_hashes(self, m_subp):
"""handle parses command password hashes."""
cloud = self.tmp_cloud(distro='ubuntu')
@@ -98,7 +98,7 @@ class TestSetPasswordsHandle(CiTestCase):
'ubuntu:$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9acWCVEoakMMC7dR52q'
'SDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXazGGx3oo1']
cfg = {'chpasswd': {'list': valid_hashed_pwds}}
- with mock.patch(MODPATH + 'util.subp') as m_subp:
+ with mock.patch(MODPATH + 'subp.subp') as m_subp:
setpass.handle(
'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[])
self.assertIn(
@@ -112,12 +112,12 @@ class TestSetPasswordsHandle(CiTestCase):
'\n'.join(valid_hashed_pwds) + '\n')],
m_subp.call_args_list)
- @mock.patch(MODPATH + "util.is_FreeBSD")
- @mock.patch(MODPATH + "util.subp")
- def test_freebsd_calls_custom_pw_cmds_to_set_and_expire_passwords(
- self, m_subp, m_is_freebsd):
- """FreeBSD calls custom pw commands instead of chpasswd and passwd"""
- m_is_freebsd.return_value = True
+ @mock.patch(MODPATH + "util.is_BSD")
+ @mock.patch(MODPATH + "subp.subp")
+ def test_bsd_calls_custom_pw_cmds_to_set_and_expire_passwords(
+ self, m_subp, m_is_bsd):
+ """BSD don't use chpasswd"""
+ m_is_bsd.return_value = True
cloud = self.tmp_cloud(distro='freebsd')
valid_pwds = ['ubuntu:passw0rd']
cfg = {'chpasswd': {'list': valid_pwds}}
@@ -129,18 +129,18 @@ class TestSetPasswordsHandle(CiTestCase):
mock.call(['pw', 'usermod', 'ubuntu', '-p', '01-Jan-1970'])],
m_subp.call_args_list)
- @mock.patch(MODPATH + "util.is_FreeBSD")
- @mock.patch(MODPATH + "util.subp")
+ @mock.patch(MODPATH + "util.is_BSD")
+ @mock.patch(MODPATH + "subp.subp")
def test_handle_on_chpasswd_list_creates_random_passwords(self, m_subp,
- m_is_freebsd):
+ m_is_bsd):
"""handle parses command set random passwords."""
- m_is_freebsd.return_value = False
+ m_is_bsd.return_value = False
cloud = self.tmp_cloud(distro='ubuntu')
valid_random_pwds = [
'root:R',
'ubuntu:RANDOM']
cfg = {'chpasswd': {'expire': 'false', 'list': valid_random_pwds}}
- with mock.patch(MODPATH + 'util.subp') as m_subp:
+ with mock.patch(MODPATH + 'subp.subp') as m_subp:
setpass.handle(
'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[])
self.assertIn(
diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py
index cbbb173d..6d4c014a 100644
--- a/cloudinit/config/tests/test_snap.py
+++ b/cloudinit/config/tests/test_snap.py
@@ -92,7 +92,7 @@ class TestAddAssertions(CiTestCase):
super(TestAddAssertions, self).setUp()
self.tmp = self.tmp_dir()
- @mock.patch('cloudinit.config.cc_snap.util.subp')
+ @mock.patch('cloudinit.config.cc_snap.subp.subp')
def test_add_assertions_on_empty_list(self, m_subp):
"""When provided with an empty list, add_assertions does nothing."""
add_assertions([])
@@ -107,7 +107,7 @@ class TestAddAssertions(CiTestCase):
"assertion parameter was not a list or dict: I'm Not Valid",
str(context_manager.exception))
- @mock.patch('cloudinit.config.cc_snap.util.subp')
+ @mock.patch('cloudinit.config.cc_snap.subp.subp')
def test_add_assertions_adds_assertions_as_list(self, m_subp):
"""When provided with a list, add_assertions adds all assertions."""
self.assertEqual(
@@ -130,7 +130,7 @@ class TestAddAssertions(CiTestCase):
self.assertEqual(
util.load_file(compare_file), util.load_file(assert_file))
- @mock.patch('cloudinit.config.cc_snap.util.subp')
+ @mock.patch('cloudinit.config.cc_snap.subp.subp')
def test_add_assertions_adds_assertions_as_dict(self, m_subp):
"""When provided with a dict, add_assertions adds all assertions."""
self.assertEqual(
@@ -168,7 +168,7 @@ class TestRunCommands(CiTestCase):
super(TestRunCommands, self).setUp()
self.tmp = self.tmp_dir()
- @mock.patch('cloudinit.config.cc_snap.util.subp')
+ @mock.patch('cloudinit.config.cc_snap.subp.subp')
def test_run_commands_on_empty_list(self, m_subp):
"""When provided with an empty list, run_commands does nothing."""
run_commands([])
@@ -310,6 +310,52 @@ class TestSchema(CiTestCase, SchemaTestCaseMixin):
{'snap': {'commands': {'01': 'also valid'}}}, schema)
self.assertEqual('', self.logs.getvalue())
+ @mock.patch('cloudinit.config.cc_snap.run_commands')
+ def test_schema_when_commands_values_are_invalid_type(self, _):
+ """Warnings when snap:commands values are invalid type (e.g. int)"""
+ validate_cloudconfig_schema(
+ {'snap': {'commands': [123]}}, schema)
+ validate_cloudconfig_schema(
+ {'snap': {'commands': {'01': 123}}}, schema)
+ self.assertEqual(
+ "WARNING: Invalid config:\n"
+ "snap.commands.0: 123 is not valid under any of the given"
+ " schemas\n"
+ "WARNING: Invalid config:\n"
+ "snap.commands.01: 123 is not valid under any of the given"
+ " schemas\n",
+ self.logs.getvalue())
+
+ @mock.patch('cloudinit.config.cc_snap.run_commands')
+ def test_schema_when_commands_list_values_are_invalid_type(self, _):
+ """Warnings when snap:commands list values are wrong type (e.g. int)"""
+ validate_cloudconfig_schema(
+ {'snap': {'commands': [["snap", "install", 123]]}}, schema)
+ validate_cloudconfig_schema(
+ {'snap': {'commands': {'01': ["snap", "install", 123]}}}, schema)
+ self.assertEqual(
+ "WARNING: Invalid config:\n"
+ "snap.commands.0: ['snap', 'install', 123] is not valid under any"
+ " of the given schemas\n",
+ "WARNING: Invalid config:\n"
+ "snap.commands.0: ['snap', 'install', 123] is not valid under any"
+ " of the given schemas\n",
+ self.logs.getvalue())
+
+ @mock.patch('cloudinit.config.cc_snap.run_commands')
+ def test_schema_when_assertions_values_are_invalid_type(self, _):
+ """Warnings when snap:assertions values are invalid type (e.g. int)"""
+ validate_cloudconfig_schema(
+ {'snap': {'assertions': [123]}}, schema)
+ validate_cloudconfig_schema(
+ {'snap': {'assertions': {'01': 123}}}, schema)
+ self.assertEqual(
+ "WARNING: Invalid config:\n"
+ "snap.assertions.0: 123 is not of type 'string'\n"
+ "WARNING: Invalid config:\n"
+ "snap.assertions.01: 123 is not of type 'string'\n",
+ self.logs.getvalue())
+
@mock.patch('cloudinit.config.cc_snap.add_assertions')
def test_warn_schema_assertions_is_not_list_or_dict(self, _):
"""Warn when snap:assertions config is not a list or dict."""
@@ -345,7 +391,7 @@ class TestSchema(CiTestCase, SchemaTestCaseMixin):
def test_duplicates_are_fine_array_array(self):
"""Duplicated commands array/array entries are allowed."""
self.assertSchemaValid(
- {'commands': [["echo", "bye"], ["echo" "bye"]]},
+ {'commands': [["echo", "bye"], ["echo", "bye"]]},
"command entries can be duplicate.")
def test_duplicates_are_fine_array_string(self):
@@ -431,7 +477,7 @@ class TestHandle(CiTestCase):
self.assertEqual('HI\nMOM\n', util.load_file(outfile))
- @mock.patch('cloudinit.config.cc_snap.util.subp')
+ @mock.patch('cloudinit.config.cc_snap.subp.subp')
def test_handle_adds_assertions(self, m_subp):
"""Any configured snap assertions are provided to add_assertions."""
assert_file = self.tmp_path('snapd.assertions', dir=self.tmp)
@@ -447,7 +493,7 @@ class TestHandle(CiTestCase):
self.assertEqual(
util.load_file(compare_file), util.load_file(assert_file))
- @mock.patch('cloudinit.config.cc_snap.util.subp')
+ @mock.patch('cloudinit.config.cc_snap.subp.subp')
@skipUnlessJsonSchema()
def test_handle_validates_schema(self, m_subp):
"""Any provided configuration is runs validate_cloudconfig_schema."""
diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py
index 8c4161ef..db7fb726 100644
--- a/cloudinit/config/tests/test_ubuntu_advantage.py
+++ b/cloudinit/config/tests/test_ubuntu_advantage.py
@@ -3,7 +3,7 @@
from cloudinit.config.cc_ubuntu_advantage import (
configure_ua, handle, maybe_install_ua_tools, schema)
from cloudinit.config.schema import validate_cloudconfig_schema
-from cloudinit import util
+from cloudinit import subp
from cloudinit.tests.helpers import (
CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema)
@@ -26,10 +26,10 @@ class TestConfigureUA(CiTestCase):
super(TestConfigureUA, self).setUp()
self.tmp = self.tmp_dir()
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_error(self, m_subp):
"""Errors from ua attach command are raised."""
- m_subp.side_effect = util.ProcessExecutionError(
+ m_subp.side_effect = subp.ProcessExecutionError(
'Invalid token SomeToken')
with self.assertRaises(RuntimeError) as context_manager:
configure_ua(token='SomeToken')
@@ -39,7 +39,7 @@ class TestConfigureUA(CiTestCase):
'Stdout: Invalid token SomeToken\nStderr: -',
str(context_manager.exception))
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_with_token(self, m_subp):
"""When token is provided, attach the machine to ua using the token."""
configure_ua(token='SomeToken')
@@ -48,7 +48,7 @@ class TestConfigureUA(CiTestCase):
'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
self.logs.getvalue())
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_on_service_error(self, m_subp):
"""all services should be enabled and then any failures raised"""
@@ -56,7 +56,7 @@ class TestConfigureUA(CiTestCase):
fail_cmds = [['ua', 'enable', svc] for svc in ['esm', 'cc']]
if cmd in fail_cmds and capture:
svc = cmd[-1]
- raise util.ProcessExecutionError(
+ raise subp.ProcessExecutionError(
'Invalid {} credentials'.format(svc.upper()))
m_subp.side_effect = fake_subp
@@ -83,7 +83,7 @@ class TestConfigureUA(CiTestCase):
'Failure enabling Ubuntu Advantage service(s): "esm", "cc"',
str(context_manager.exception))
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_with_empty_services(self, m_subp):
"""When services is an empty list, do not auto-enable attach."""
configure_ua(token='SomeToken', enable=[])
@@ -92,7 +92,7 @@ class TestConfigureUA(CiTestCase):
'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
self.logs.getvalue())
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_with_specific_services(self, m_subp):
"""When services a list, only enable specific services."""
configure_ua(token='SomeToken', enable=['fips'])
@@ -105,7 +105,7 @@ class TestConfigureUA(CiTestCase):
self.logs.getvalue())
@mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock())
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_with_string_services(self, m_subp):
"""When services a string, treat as singleton list and warn"""
configure_ua(token='SomeToken', enable='fips')
@@ -119,7 +119,7 @@ class TestConfigureUA(CiTestCase):
'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
self.logs.getvalue())
- @mock.patch('%s.util.subp' % MPATH)
+ @mock.patch('%s.subp.subp' % MPATH)
def test_configure_ua_attach_with_weird_services(self, m_subp):
"""When services not string or list, warn but still attach"""
configure_ua(token='SomeToken', enable={'deffo': 'wont work'})
@@ -285,7 +285,7 @@ class TestMaybeInstallUATools(CiTestCase):
super(TestMaybeInstallUATools, self).setUp()
self.tmp = self.tmp_dir()
- @mock.patch('%s.util.which' % MPATH)
+ @mock.patch('%s.subp.which' % MPATH)
def test_maybe_install_ua_tools_noop_when_ua_tools_present(self, m_which):
"""Do nothing if ubuntu-advantage-tools already exists."""
m_which.return_value = '/usr/bin/ua' # already installed
@@ -294,7 +294,7 @@ class TestMaybeInstallUATools(CiTestCase):
'Some apt error')
maybe_install_ua_tools(cloud=FakeCloud(distro)) # No RuntimeError
- @mock.patch('%s.util.which' % MPATH)
+ @mock.patch('%s.subp.which' % MPATH)
def test_maybe_install_ua_tools_raises_update_errors(self, m_which):
"""maybe_install_ua_tools logs and raises apt update errors."""
m_which.return_value = None
@@ -306,7 +306,7 @@ class TestMaybeInstallUATools(CiTestCase):
self.assertEqual('Some apt error', str(context_manager.exception))
self.assertIn('Package update failed\nTraceback', self.logs.getvalue())
- @mock.patch('%s.util.which' % MPATH)
+ @mock.patch('%s.subp.which' % MPATH)
def test_maybe_install_ua_raises_install_errors(self, m_which):
"""maybe_install_ua_tools logs and raises package install errors."""
m_which.return_value = None
@@ -320,7 +320,7 @@ class TestMaybeInstallUATools(CiTestCase):
self.assertIn(
'Failed to install ubuntu-advantage-tools\n', self.logs.getvalue())
- @mock.patch('%s.util.which' % MPATH)
+ @mock.patch('%s.subp.which' % MPATH)
def test_maybe_install_ua_tools_happy_path(self, m_which):
"""maybe_install_ua_tools installs ubuntu-advantage-tools."""
m_which.return_value = None
diff --git a/cloudinit/config/tests/test_ubuntu_drivers.py b/cloudinit/config/tests/test_ubuntu_drivers.py
index 46952692..504ba356 100644
--- a/cloudinit/config/tests/test_ubuntu_drivers.py
+++ b/cloudinit/config/tests/test_ubuntu_drivers.py
@@ -7,7 +7,7 @@ from cloudinit.tests.helpers import CiTestCase, skipUnlessJsonSchema, mock
from cloudinit.config.schema import (
SchemaValidationError, validate_cloudconfig_schema)
from cloudinit.config import cc_ubuntu_drivers as drivers
-from cloudinit.util import ProcessExecutionError
+from cloudinit.subp import ProcessExecutionError
MPATH = "cloudinit.config.cc_ubuntu_drivers."
M_TMP_PATH = MPATH + "temp_utils.mkdtemp"
@@ -16,6 +16,13 @@ OLD_UBUNTU_DRIVERS_ERROR_STDERR = (
"(choose from 'list', 'autoinstall', 'devices', 'debug')\n")
+# The tests in this module call helper methods which are decorated with
+# mock.patch. pylint doesn't understand that mock.patch passes parameters to
+# the decorated function, so it incorrectly reports that we aren't passing
+# values for all parameters. Instead of annotating every single call, we
+# disable it for the entire module:
+# pylint: disable=no-value-for-parameter
+
class AnyTempScriptAndDebconfFile(object):
def __init__(self, tmp_dir, debconf_file):
@@ -46,8 +53,8 @@ class TestUbuntuDrivers(CiTestCase):
schema=drivers.schema, strict=True)
@mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "util.subp", return_value=('', ''))
- @mock.patch(MPATH + "util.which", return_value=False)
+ @mock.patch(MPATH + "subp.subp", return_value=('', ''))
+ @mock.patch(MPATH + "subp.which", return_value=False)
def _assert_happy_path_taken(
self, config, m_which, m_subp, m_tmp):
"""Positive path test through handle. Package should be installed."""
@@ -73,8 +80,8 @@ class TestUbuntuDrivers(CiTestCase):
self._assert_happy_path_taken(new_config)
@mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "util.subp")
- @mock.patch(MPATH + "util.which", return_value=False)
+ @mock.patch(MPATH + "subp.subp")
+ @mock.patch(MPATH + "subp.which", return_value=False)
def test_handle_raises_error_if_no_drivers_found(
self, m_which, m_subp, m_tmp):
"""If ubuntu-drivers doesn't install any drivers, raise an error."""
@@ -102,8 +109,8 @@ class TestUbuntuDrivers(CiTestCase):
self.assertIn('ubuntu-drivers found no drivers for installation',
self.logs.getvalue())
- @mock.patch(MPATH + "util.subp", return_value=('', ''))
- @mock.patch(MPATH + "util.which", return_value=False)
+ @mock.patch(MPATH + "subp.subp", return_value=('', ''))
+ @mock.patch(MPATH + "subp.which", return_value=False)
def _assert_inert_with_config(self, config, m_which, m_subp):
"""Helper to reduce repetition when testing negative cases"""
myCloud = mock.MagicMock()
@@ -147,8 +154,8 @@ class TestUbuntuDrivers(CiTestCase):
self.assertEqual(0, m_install_drivers.call_count)
@mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "util.subp", return_value=('', ''))
- @mock.patch(MPATH + "util.which", return_value=True)
+ @mock.patch(MPATH + "subp.subp", return_value=('', ''))
+ @mock.patch(MPATH + "subp.which", return_value=True)
def test_install_drivers_no_install_if_present(
self, m_which, m_subp, m_tmp):
"""If 'ubuntu-drivers' is present, no package install should occur."""
@@ -174,8 +181,8 @@ class TestUbuntuDrivers(CiTestCase):
self.assertEqual(0, pkg_install.call_count)
@mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "util.subp")
- @mock.patch(MPATH + "util.which", return_value=False)
+ @mock.patch(MPATH + "subp.subp")
+ @mock.patch(MPATH + "subp.which", return_value=False)
def test_install_drivers_handles_old_ubuntu_drivers_gracefully(
self, m_which, m_subp, m_tmp):
"""Older ubuntu-drivers versions should emit message and raise error"""
@@ -212,8 +219,8 @@ class TestUbuntuDriversWithVersion(TestUbuntuDrivers):
install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia:123']
@mock.patch(M_TMP_PATH)
- @mock.patch(MPATH + "util.subp", return_value=('', ''))
- @mock.patch(MPATH + "util.which", return_value=False)
+ @mock.patch(MPATH + "subp.subp", return_value=('', ''))
+ @mock.patch(MPATH + "subp.which", return_value=False)
def test_version_none_uses_latest(self, m_which, m_subp, m_tmp):
tdir = self.tmp_dir()
debconf_file = os.path.join(tdir, 'nvidia.template')
diff --git a/cloudinit/config/tests/test_users_groups.py b/cloudinit/config/tests/test_users_groups.py
index f620b597..df89ddb3 100644
--- a/cloudinit/config/tests/test_users_groups.py
+++ b/cloudinit/config/tests/test_users_groups.py
@@ -39,7 +39,7 @@ class TestHandleUsersGroups(CiTestCase):
cloud = self.tmp_cloud(
distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertItemsEqual(
+ self.assertCountEqual(
m_user.call_args_list,
[mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
shell='/bin/bash'),
@@ -65,7 +65,7 @@ class TestHandleUsersGroups(CiTestCase):
cloud = self.tmp_cloud(
distro='freebsd', sys_cfg=sys_cfg, metadata=metadata)
cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertItemsEqual(
+ self.assertCountEqual(
m_fbsd_user.call_args_list,
[mock.call('freebsd', groups='wheel', lock_passwd=True,
shell='/bin/tcsh'),
@@ -86,7 +86,7 @@ class TestHandleUsersGroups(CiTestCase):
cloud = self.tmp_cloud(
distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertItemsEqual(
+ self.assertCountEqual(
m_user.call_args_list,
[mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
shell='/bin/bash'),
@@ -107,7 +107,7 @@ class TestHandleUsersGroups(CiTestCase):
cloud = self.tmp_cloud(
distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertItemsEqual(
+ self.assertCountEqual(
m_user.call_args_list,
[mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
shell='/bin/bash'),
@@ -146,7 +146,7 @@ class TestHandleUsersGroups(CiTestCase):
cloud = self.tmp_cloud(
distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertItemsEqual(
+ self.assertCountEqual(
m_user.call_args_list,
[mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
shell='/bin/bash'),
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 92598a2d..2537608f 100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -13,6 +13,8 @@ import abc
import os
import re
import stat
+import string
+import urllib.parse
from io import StringIO
from cloudinit import importer
@@ -23,9 +25,14 @@ from cloudinit.net import network_state
from cloudinit.net import renderers
from cloudinit import ssh_util
from cloudinit import type_utils
+from cloudinit import subp
from cloudinit import util
+from cloudinit.features import \
+ ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES
+
from cloudinit.distros.parsers import hosts
+from .networking import LinuxNetworking
# Used when a cloud-config module can be run on all cloud-init distibutions.
@@ -33,12 +40,13 @@ from cloudinit.distros.parsers import hosts
ALL_DISTROS = 'all'
OSFAMILIES = {
+ 'alpine': ['alpine'],
+ 'arch': ['arch'],
'debian': ['debian', 'ubuntu'],
- 'redhat': ['amazon', 'centos', 'fedora', 'rhel'],
- 'gentoo': ['gentoo'],
'freebsd': ['freebsd'],
+ 'gentoo': ['gentoo'],
+ 'redhat': ['amazon', 'centos', 'fedora', 'rhel'],
'suse': ['opensuse', 'sles'],
- 'arch': ['arch'],
}
LOG = logging.getLogger(__name__)
@@ -50,6 +58,9 @@ _EC2_AZ_RE = re.compile('^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$')
# Default NTP Client Configurations
PREFERRED_NTP_CLIENTS = ['chrony', 'systemd-timesyncd', 'ntp', 'ntpdate']
+# Letters/Digits/Hyphen characters, for use in domain name validation
+LDH_ASCII_CHARS = string.ascii_letters + string.digits + "-"
+
class Distro(metaclass=abc.ABCMeta):
@@ -61,11 +72,13 @@ class Distro(metaclass=abc.ABCMeta):
init_cmd = ['service'] # systemctl, service etc
renderer_configs = {}
_preferred_ntp_clients = None
+ networking_cls = LinuxNetworking
def __init__(self, name, cfg, paths):
self._paths = paths
self._cfg = cfg
self.name = name
+ self.networking = self.networking_cls()
@abc.abstractmethod
def install_packages(self, pkglist):
@@ -220,8 +233,8 @@ class Distro(metaclass=abc.ABCMeta):
LOG.debug("Non-persistently setting the system hostname to %s",
hostname)
try:
- util.subp(['hostname', hostname])
- except util.ProcessExecutionError:
+ subp.subp(['hostname', hostname])
+ except subp.ProcessExecutionError:
util.logexc(LOG, "Failed to non-persistently adjust the system "
"hostname to %s", hostname)
@@ -356,12 +369,12 @@ class Distro(metaclass=abc.ABCMeta):
LOG.debug("Attempting to run bring up interface %s using command %s",
device_name, cmd)
try:
- (_out, err) = util.subp(cmd)
+ (_out, err) = subp.subp(cmd)
if len(err):
LOG.warning("Running %s resulted in stderr output: %s",
cmd, err)
return True
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
return False
@@ -380,6 +393,9 @@ class Distro(metaclass=abc.ABCMeta):
def add_user(self, name, **kwargs):
"""
Add a user to the system using standard GNU tools
+
+ This should be overriden on distros where useradd is not desirable or
+ not available.
"""
# XXX need to make add_user idempotent somehow as we
# still want to add groups or modify SSH keys on pre-existing
@@ -475,7 +491,7 @@ class Distro(metaclass=abc.ABCMeta):
# Run the command
LOG.debug("Adding user %s", name)
try:
- util.subp(useradd_cmd, logstring=log_useradd_cmd)
+ subp.subp(useradd_cmd, logstring=log_useradd_cmd)
except Exception as e:
util.logexc(LOG, "Failed to create user %s", name)
raise e
@@ -495,7 +511,7 @@ class Distro(metaclass=abc.ABCMeta):
# Run the command
LOG.debug("Adding snap user %s", name)
try:
- (out, err) = util.subp(create_user_cmd, logstring=create_user_cmd,
+ (out, err) = subp.subp(create_user_cmd, logstring=create_user_cmd,
capture=True)
LOG.debug("snap create-user returned: %s:%s", out, err)
jobj = util.load_json(out)
@@ -508,9 +524,22 @@ class Distro(metaclass=abc.ABCMeta):
def create_user(self, name, **kwargs):
"""
- Creates users for the system using the GNU passwd tools. This
- will work on an GNU system. This should be overriden on
- distros where useradd is not desirable or not available.
+ Creates or partially updates the ``name`` user in the system.
+
+ This defers the actual user creation to ``self.add_user`` or
+ ``self.add_snap_user``, and most of the keys in ``kwargs`` will be
+ processed there if and only if the user does not already exist.
+
+ Once the existence of the ``name`` user has been ensured, this method
+ then processes these keys (for both just-created and pre-existing
+ users):
+
+ * ``plain_text_passwd``
+ * ``hashed_passwd``
+ * ``lock_passwd``
+ * ``sudo``
+ * ``ssh_authorized_keys``
+ * ``ssh_redirect_user``
"""
# Add a snap user, if requested
@@ -577,20 +606,21 @@ class Distro(metaclass=abc.ABCMeta):
# passwd must use short '-l' due to SLES11 lacking long form '--lock'
lock_tools = (['passwd', '-l', name], ['usermod', '--lock', name])
try:
- cmd = next(l for l in lock_tools if util.which(l[0]))
- except StopIteration:
+ cmd = next(tool for tool in lock_tools if subp.which(tool[0]))
+ except StopIteration as e:
raise RuntimeError((
"Unable to lock user account '%s'. No tools available. "
- " Tried: %s.") % (name, [c[0] for c in lock_tools]))
+ " Tried: %s.") % (name, [c[0] for c in lock_tools])
+ ) from e
try:
- util.subp(cmd)
+ subp.subp(cmd)
except Exception as e:
util.logexc(LOG, 'Failed to disable password for user %s', name)
raise e
def expire_passwd(self, user):
try:
- util.subp(['passwd', '--expire', user])
+ subp.subp(['passwd', '--expire', user])
except Exception as e:
util.logexc(LOG, "Failed to set 'expire' for %s", user)
raise e
@@ -606,7 +636,7 @@ class Distro(metaclass=abc.ABCMeta):
cmd.append('-e')
try:
- util.subp(cmd, pass_string, logstring="chpasswd for %s" % user)
+ subp.subp(cmd, pass_string, logstring="chpasswd for %s" % user)
except Exception as e:
util.logexc(LOG, "Failed to set password for %s", user)
raise e
@@ -703,7 +733,7 @@ class Distro(metaclass=abc.ABCMeta):
LOG.warning("Skipping creation of existing group '%s'", name)
else:
try:
- util.subp(group_add_cmd)
+ subp.subp(group_add_cmd)
LOG.info("Created new group %s", name)
except Exception:
util.logexc(LOG, "Failed to create group %s", name)
@@ -716,10 +746,115 @@ class Distro(metaclass=abc.ABCMeta):
"; user does not exist.", member, name)
continue
- util.subp(['usermod', '-a', '-G', name, member])
+ subp.subp(['usermod', '-a', '-G', name, member])
LOG.info("Added user '%s' to group '%s'", member, name)
+def _apply_hostname_transformations_to_url(url: str, transformations: list):
+ """
+ Apply transformations to a URL's hostname, return transformed URL.
+
+ This is a separate function because unwrapping and rewrapping only the
+ hostname portion of a URL is complex.
+
+ :param url:
+ The URL to operate on.
+ :param transformations:
+ A list of ``(str) -> Optional[str]`` functions, which will be applied
+ in order to the hostname portion of the URL. If any function
+ (regardless of ordering) returns None, ``url`` will be returned without
+ any modification.
+
+ :return:
+ A string whose value is ``url`` with the hostname ``transformations``
+ applied, or ``None`` if ``url`` is unparseable.
+ """
+ try:
+ parts = urllib.parse.urlsplit(url)
+ except ValueError:
+ # If we can't even parse the URL, we shouldn't use it for anything
+ return None
+ new_hostname = parts.hostname
+ if new_hostname is None:
+ # The URL given doesn't have a hostname component, so (a) we can't
+ # transform it, and (b) it won't work as a mirror; return None.
+ return None
+
+ for transformation in transformations:
+ new_hostname = transformation(new_hostname)
+ if new_hostname is None:
+ # If a transformation returns None, that indicates we should abort
+ # processing and return `url` unmodified
+ return url
+
+ new_netloc = new_hostname
+ if parts.port is not None:
+ new_netloc = "{}:{}".format(new_netloc, parts.port)
+ return urllib.parse.urlunsplit(parts._replace(netloc=new_netloc))
+
+
+def _sanitize_mirror_url(url: str):
+ """
+ Given a mirror URL, replace or remove any invalid URI characters.
+
+ This performs the following actions on the URL's hostname:
+ * Checks if it is an IP address, returning the URL immediately if it is
+ * Converts it to its IDN form (see below for details)
+ * Replaces any non-Letters/Digits/Hyphen (LDH) characters in it with
+ hyphens
+ * TODO: Remove any leading/trailing hyphens from each domain name label
+
+ Before we replace any invalid domain name characters, we first need to
+ ensure that any valid non-ASCII characters in the hostname will not be
+ replaced, by ensuring the hostname is in its Internationalized domain name
+ (IDN) representation (see RFC 5890). This conversion has to be applied to
+ the whole hostname (rather than just the substitution variables), because
+ the Punycode algorithm used by IDNA transcodes each part of the hostname as
+ a whole string (rather than encoding individual characters). It cannot be
+ applied to the whole URL, because (a) the Punycode algorithm expects to
+ operate on domain names so doesn't output a valid URL, and (b) non-ASCII
+ characters in non-hostname parts of the URL aren't encoded via Punycode.
+
+ To put this in RFC 5890's terminology: before we remove or replace any
+ characters from our domain name (which we do to ensure that each label is a
+ valid LDH Label), we first ensure each label is in its A-label form.
+
+ (Note that Python's builtin idna encoding is actually IDNA2003, not
+ IDNA2008. This changes the specifics of how some characters are encoded to
+ ASCII, but doesn't affect the logic here.)
+
+ :param url:
+ The URL to operate on.
+
+ :return:
+ A sanitized version of the URL, which will have been IDNA encoded if
+ necessary, or ``None`` if the generated string is not a parseable URL.
+ """
+ # Acceptable characters are LDH characters, plus "." to separate each label
+ acceptable_chars = LDH_ASCII_CHARS + "."
+ transformations = [
+ # This is an IP address, not a hostname, so no need to apply the
+ # transformations
+ lambda hostname: None if net.is_ip_address(hostname) else hostname,
+
+ # Encode with IDNA to get the correct characters (as `bytes`), then
+ # decode with ASCII so we return a `str`
+ lambda hostname: hostname.encode('idna').decode('ascii'),
+
+ # Replace any unacceptable characters with "-"
+ lambda hostname: ''.join(
+ c if c in acceptable_chars else "-" for c in hostname
+ ),
+
+ # Drop leading/trailing hyphens from each part of the hostname
+ lambda hostname: '.'.join(
+ part.strip('-') for part in hostname.split('.')
+ ),
+ ]
+
+ return _apply_hostname_transformations_to_url(url, transformations)
+
+
def _get_package_mirror_info(mirror_info, data_source=None,
mirror_filter=util.search_for_mirror):
# given a arch specific 'mirror_info' entry (from package_mirrors)
@@ -735,7 +870,12 @@ def _get_package_mirror_info(mirror_info, data_source=None,
# ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b)
# the region is us-east-1. so region = az[0:-1]
if _EC2_AZ_RE.match(data_source.availability_zone):
- subst['ec2_region'] = "%s" % data_source.availability_zone[0:-1]
+ ec2_region = data_source.availability_zone[0:-1]
+
+ if ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES:
+ subst['ec2_region'] = "%s" % ec2_region
+ elif data_source.platform_type == "ec2":
+ subst['ec2_region'] = "%s" % ec2_region
if data_source and data_source.region:
subst['region'] = data_source.region
@@ -748,9 +888,13 @@ def _get_package_mirror_info(mirror_info, data_source=None,
mirrors = []
for tmpl in searchlist:
try:
- mirrors.append(tmpl % subst)
+ mirror = tmpl % subst
except KeyError:
- pass
+ continue
+
+ mirror = _sanitize_mirror_url(mirror)
+ if mirror is not None:
+ mirrors.append(mirror)
found = mirror_filter(mirrors)
if found:
diff --git a/cloudinit/distros/alpine.py b/cloudinit/distros/alpine.py
new file mode 100644
index 00000000..e42443fc
--- /dev/null
+++ b/cloudinit/distros/alpine.py
@@ -0,0 +1,165 @@
+# Copyright (C) 2016 Matt Dainty
+# Copyright (C) 2020 Dermot Bradley
+#
+# Author: Matt Dainty <matt@bodgit-n-scarper.com>
+# Author: Dermot Bradley <dermot_bradley@yahoo.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import distros
+from cloudinit import helpers
+from cloudinit import log as logging
+from cloudinit import subp
+from cloudinit import util
+
+from cloudinit.distros.parsers.hostname import HostnameConf
+
+from cloudinit.settings import PER_INSTANCE
+
+LOG = logging.getLogger(__name__)
+
+NETWORK_FILE_HEADER = """\
+# This file is generated from information provided by the datasource. Changes
+# to it will not persist across an instance reboot. To disable cloud-init's
+# network configuration capabilities, write a file
+# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following:
+# network: {config: disabled}
+
+"""
+
+
+class Distro(distros.Distro):
+ init_cmd = ['rc-service'] # init scripts
+ locale_conf_fn = "/etc/profile.d/locale.sh"
+ network_conf_fn = "/etc/network/interfaces"
+ renderer_configs = {
+ "eni": {"eni_path": network_conf_fn,
+ "eni_header": NETWORK_FILE_HEADER}
+ }
+
+ def __init__(self, name, cfg, paths):
+ distros.Distro.__init__(self, name, cfg, paths)
+ # This will be used to restrict certain
+ # calls from repeatly happening (when they
+ # should only happen say once per instance...)
+ self._runner = helpers.Runners(paths)
+ self.default_locale = 'C.UTF-8'
+ self.osfamily = 'alpine'
+ cfg['ssh_svcname'] = 'sshd'
+
+ def get_locale(self):
+ """The default locale for Alpine Linux is different than
+ cloud-init's DataSource default.
+ """
+ return self.default_locale
+
+ def apply_locale(self, locale, out_fn=None):
+ # Alpine has limited locale support due to musl library limitations
+
+ if not locale:
+ locale = self.default_locale
+ if not out_fn:
+ out_fn = self.locale_conf_fn
+
+ lines = [
+ "#",
+ "# This file is created by cloud-init once per new instance boot",
+ "#",
+ "export CHARSET=UTF-8",
+ "export LANG=%s" % locale,
+ "export LC_COLLATE=C",
+ "",
+ ]
+ util.write_file(out_fn, "\n".join(lines), 0o644)
+
+ def install_packages(self, pkglist):
+ self.update_package_sources()
+ self.package_command('add', pkgs=pkglist)
+
+ def _write_network_config(self, netconfig):
+ return self._supported_write_network_config(netconfig)
+
+ def _bring_up_interfaces(self, device_names):
+ use_all = False
+ for d in device_names:
+ if d == 'all':
+ use_all = True
+ if use_all:
+ return distros.Distro._bring_up_interface(self, '-a')
+ else:
+ return distros.Distro._bring_up_interfaces(self, device_names)
+
+ def _write_hostname(self, your_hostname, out_fn):
+ conf = None
+ try:
+ # Try to update the previous one
+ # so lets see if we can read it first.
+ conf = self._read_hostname_conf(out_fn)
+ except IOError:
+ pass
+ if not conf:
+ conf = HostnameConf('')
+ conf.set_hostname(your_hostname)
+ util.write_file(out_fn, str(conf), 0o644)
+
+ def _read_system_hostname(self):
+ sys_hostname = self._read_hostname(self.hostname_conf_fn)
+ return (self.hostname_conf_fn, sys_hostname)
+
+ def _read_hostname_conf(self, filename):
+ conf = HostnameConf(util.load_file(filename))
+ conf.parse()
+ return conf
+
+ def _read_hostname(self, filename, default=None):
+ hostname = None
+ try:
+ conf = self._read_hostname_conf(filename)
+ hostname = conf.hostname
+ except IOError:
+ pass
+ if not hostname:
+ return default
+ return hostname
+
+ def _get_localhost_ip(self):
+ return "127.0.1.1"
+
+ def set_timezone(self, tz):
+ distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
+
+ def package_command(self, command, args=None, pkgs=None):
+ if pkgs is None:
+ pkgs = []
+
+ cmd = ['apk']
+ # Redirect output
+ cmd.append("--quiet")
+
+ if args and isinstance(args, str):
+ cmd.append(args)
+ elif args and isinstance(args, list):
+ cmd.extend(args)
+
+ if command:
+ cmd.append(command)
+
+ pkglist = util.expand_package_list('%s-%s', pkgs)
+ cmd.extend(pkglist)
+
+ # Allow the output of this to flow outwards (ie not be captured)
+ subp.subp(cmd, capture=False)
+
+ def update_package_sources(self):
+ self._runner.run("update-sources", self.package_command,
+ ["update"], freq=PER_INSTANCE)
+
+ @property
+ def preferred_ntp_clients(self):
+ """Allow distro to determine the preferred ntp client list"""
+ if not self._preferred_ntp_clients:
+ self._preferred_ntp_clients = ['chrony', 'ntp']
+
+ return self._preferred_ntp_clients
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py
index 9f89c5f9..967be168 100644
--- a/cloudinit/distros/arch.py
+++ b/cloudinit/distros/arch.py
@@ -8,6 +8,7 @@ from cloudinit import distros
from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import util
+from cloudinit import subp
from cloudinit.distros import net_util
from cloudinit.distros.parsers.hostname import HostnameConf
@@ -44,7 +45,7 @@ class Distro(distros.Distro):
def apply_locale(self, locale, out_fn=None):
if not out_fn:
out_fn = self.locale_conf_fn
- util.subp(['locale-gen', '-G', locale], capture=False)
+ subp.subp(['locale-gen', '-G', locale], capture=False)
# "" provides trailing newline during join
lines = [
util.make_header(),
@@ -60,9 +61,9 @@ class Distro(distros.Distro):
def _write_network_config(self, netconfig):
try:
return self._supported_write_network_config(netconfig)
- except RendererNotFoundError:
+ except RendererNotFoundError as e:
# Fall back to old _write_network
- raise NotImplementedError
+ raise NotImplementedError from e
def _write_network(self, settings):
entries = net_util.translate_network(settings)
@@ -76,11 +77,11 @@ class Distro(distros.Distro):
def _enable_interface(self, device_name):
cmd = ['netctl', 'reenable', device_name]
try:
- (_out, err) = util.subp(cmd)
+ (_out, err) = subp.subp(cmd)
if len(err):
LOG.warning("Running %s resulted in stderr output: %s",
cmd, err)
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
def _bring_up_interface(self, device_name):
@@ -88,12 +89,12 @@ class Distro(distros.Distro):
LOG.debug("Attempting to run bring up interface %s using command %s",
device_name, cmd)
try:
- (_out, err) = util.subp(cmd)
+ (_out, err) = subp.subp(cmd)
if len(err):
LOG.warning("Running %s resulted in stderr output: %s",
cmd, err)
return True
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
return False
@@ -158,7 +159,7 @@ class Distro(distros.Distro):
cmd.extend(pkglist)
# Allow the output of this to flow outwards (ie not be captured)
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
def update_package_sources(self):
self._runner.run("update-sources", self.package_command,
@@ -173,8 +174,8 @@ def _render_network(entries, target="/", conf_dir="etc/netctl",
devs = []
nameservers = []
- resolv_conf = util.target_path(target, resolv_conf)
- conf_dir = util.target_path(target, conf_dir)
+ resolv_conf = subp.target_path(target, resolv_conf)
+ conf_dir = subp.target_path(target, conf_dir)
for (dev, info) in entries.items():
if dev == 'lo':
diff --git a/cloudinit/distros/bsd.py b/cloudinit/distros/bsd.py
new file mode 100644
index 00000000..2ed7a7d5
--- /dev/null
+++ b/cloudinit/distros/bsd.py
@@ -0,0 +1,129 @@
+import platform
+
+from cloudinit import distros
+from cloudinit.distros import bsd_utils
+from cloudinit import helpers
+from cloudinit import log as logging
+from cloudinit import net
+from cloudinit import subp
+from cloudinit import util
+from .networking import BSDNetworking
+
+LOG = logging.getLogger(__name__)
+
+
+class BSD(distros.Distro):
+ networking_cls = BSDNetworking
+ hostname_conf_fn = '/etc/rc.conf'
+ rc_conf_fn = "/etc/rc.conf"
+
+ # Set in BSD distro subclasses
+ group_add_cmd_prefix = []
+ pkg_cmd_install_prefix = []
+ pkg_cmd_remove_prefix = []
+ # There is no update/upgrade on OpenBSD
+ pkg_cmd_update_prefix = None
+ pkg_cmd_upgrade_prefix = None
+
+ def __init__(self, name, cfg, paths):
+ super().__init__(name, cfg, paths)
+ # This will be used to restrict certain
+ # calls from repeatly happening (when they
+ # should only happen say once per instance...)
+ self._runner = helpers.Runners(paths)
+ cfg['ssh_svcname'] = 'sshd'
+ self.osfamily = platform.system().lower()
+
+ def _read_system_hostname(self):
+ sys_hostname = self._read_hostname(self.hostname_conf_fn)
+ return (self.hostname_conf_fn, sys_hostname)
+
+ def _read_hostname(self, filename, default=None):
+ return bsd_utils.get_rc_config_value('hostname')
+
+ def _get_add_member_to_group_cmd(self, member_name, group_name):
+ raise NotImplementedError('Return list cmd to add member to group')
+
+ def _write_hostname(self, hostname, filename):
+ bsd_utils.set_rc_config_value('hostname', hostname, fn='/etc/rc.conf')
+
+ def create_group(self, name, members=None):
+ if util.is_group(name):
+ LOG.warning("Skipping creation of existing group '%s'", name)
+ else:
+ group_add_cmd = self.group_add_cmd_prefix + [name]
+ try:
+ subp.subp(group_add_cmd)
+ LOG.info("Created new group %s", name)
+ except Exception:
+ util.logexc(LOG, "Failed to create group %s", name)
+
+ if not members:
+ members = []
+ for member in members:
+ if not util.is_user(member):
+ LOG.warning("Unable to add group member '%s' to group '%s'"
+ "; user does not exist.", member, name)
+ continue
+ try:
+ subp.subp(self._get_add_member_to_group_cmd(member, name))
+ LOG.info("Added user '%s' to group '%s'", member, name)
+ except Exception:
+ util.logexc(LOG, "Failed to add user '%s' to group '%s'",
+ member, name)
+
+ def generate_fallback_config(self):
+ nconf = {'config': [], 'version': 1}
+ for mac, name in net.get_interfaces_by_mac().items():
+ nconf['config'].append(
+ {'type': 'physical', 'name': name,
+ 'mac_address': mac, 'subnets': [{'type': 'dhcp'}]})
+ return nconf
+
+ def install_packages(self, pkglist):
+ self.update_package_sources()
+ self.package_command('install', pkgs=pkglist)
+
+ def _get_pkg_cmd_environ(self):
+ """Return environment vars used in *BSD package_command operations"""
+ raise NotImplementedError('BSD subclasses return a dict of env vars')
+
+ def package_command(self, command, args=None, pkgs=None):
+ if pkgs is None:
+ pkgs = []
+
+ if command == 'install':
+ cmd = self.pkg_cmd_install_prefix
+ elif command == 'remove':
+ cmd = self.pkg_cmd_remove_prefix
+ elif command == 'update':
+ if not self.pkg_cmd_update_prefix:
+ return
+ cmd = self.pkg_cmd_update_prefix
+ elif command == 'upgrade':
+ if not self.pkg_cmd_upgrade_prefix:
+ return
+ cmd = self.pkg_cmd_upgrade_prefix
+
+ if args and isinstance(args, str):
+ cmd.append(args)
+ elif args and isinstance(args, list):
+ cmd.extend(args)
+
+ pkglist = util.expand_package_list('%s-%s', pkgs)
+ cmd.extend(pkglist)
+
+ # Allow the output of this to flow outwards (ie not be captured)
+ subp.subp(cmd, env=self._get_pkg_cmd_environ(), capture=False)
+
+ def _write_network_config(self, netconfig):
+ return self._supported_write_network_config(netconfig)
+
+ def set_timezone(self, tz):
+ distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
+
+ def apply_locale(self, locale, out_fn=None):
+ LOG.debug('Cannot set the locale.')
+
+ def apply_network_config_names(self, netconfig):
+ LOG.debug('Cannot rename network interface.')
diff --git a/cloudinit/distros/bsd_utils.py b/cloudinit/distros/bsd_utils.py
new file mode 100644
index 00000000..079d0d53
--- /dev/null
+++ b/cloudinit/distros/bsd_utils.py
@@ -0,0 +1,50 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import shlex
+
+from cloudinit import util
+
+# On NetBSD, /etc/rc.conf comes with a if block:
+# if [ -r /etc/defaults/rc.conf ]; then
+# as a consequence, the file is not a regular key/value list
+# anymore and we cannot use cloudinit.distros.parsers.sys_conf
+# The module comes with a more naive parser, but is able to
+# preserve these if blocks.
+
+
+def _unquote(value):
+ if value[0] == value[-1] and value[0] in ['"', "'"]:
+ return value[1:-1]
+ return value
+
+
+def get_rc_config_value(key, fn='/etc/rc.conf'):
+ key_prefix = '{}='.format(key)
+ for line in util.load_file(fn).splitlines():
+ if line.startswith(key_prefix):
+ value = line.replace(key_prefix, '')
+ return _unquote(value)
+
+
+def set_rc_config_value(key, value, fn='/etc/rc.conf'):
+ lines = []
+ done = False
+ value = shlex.quote(value)
+ original_content = util.load_file(fn)
+ for line in original_content.splitlines():
+ if '=' in line:
+ k, v = line.split('=', 1)
+ if k == key:
+ v = value
+ done = True
+ lines.append('='.join([k, v]))
+ else:
+ lines.append(line)
+ if not done:
+ lines.append('='.join([key, value]))
+ new_content = '\n'.join(lines) + '\n'
+ if new_content != original_content:
+ util.write_file(fn, new_content)
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index 128bb523..844aaf21 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -13,6 +13,7 @@ import os
from cloudinit import distros
from cloudinit import helpers
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
from cloudinit.distros.parsers.hostname import HostnameConf
@@ -197,7 +198,7 @@ class Distro(distros.Distro):
# Allow the output of this to flow outwards (ie not be captured)
util.log_time(logfunc=LOG.debug,
msg="apt-%s [%s]" % (command, ' '.join(cmd)),
- func=util.subp,
+ func=subp.subp,
args=(cmd,), kwargs={'env': e, 'capture': False})
def update_package_sources(self):
@@ -214,7 +215,7 @@ def _get_wrapper_prefix(cmd, mode):
if (util.is_true(mode) or
(str(mode).lower() == "auto" and cmd[0] and
- util.which(cmd[0]))):
+ subp.which(cmd[0]))):
return cmd
else:
return []
@@ -269,7 +270,7 @@ def update_locale_conf(locale, sys_path, keyname='LANG'):
"""Update system locale config"""
LOG.debug('Updating %s with locale setting %s=%s',
sys_path, keyname, locale)
- util.subp(
+ subp.subp(
['update-locale', '--locale-file=' + sys_path,
'%s=%s' % (keyname, locale)], capture=False)
@@ -291,7 +292,7 @@ def regenerate_locale(locale, sys_path, keyname='LANG'):
# finally, trigger regeneration
LOG.debug('Generating locales for %s', locale)
- util.subp(['locale-gen', locale], capture=False)
+ subp.subp(['locale-gen', locale], capture=False)
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index 026d1142..dde34d41 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -8,34 +8,25 @@ import os
import re
from io import StringIO
-from cloudinit import distros
-from cloudinit import helpers
+import cloudinit.distros.bsd
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit import ssh_util
+from cloudinit import subp
from cloudinit import util
-from cloudinit.distros import rhel_util
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
-class Distro(distros.Distro):
+class Distro(cloudinit.distros.bsd.BSD):
usr_lib_exec = '/usr/local/lib'
- rc_conf_fn = "/etc/rc.conf"
login_conf_fn = '/etc/login.conf'
login_conf_fn_bak = '/etc/login.conf.orig'
ci_sudoers_fn = '/usr/local/etc/sudoers.d/90-cloud-init-users'
- hostname_conf_fn = '/etc/rc.conf'
-
- def __init__(self, name, cfg, paths):
- distros.Distro.__init__(self, name, cfg, paths)
- # This will be used to restrict certain
- # calls from repeatly happening (when they
- # should only happen say once per instance...)
- self._runner = helpers.Runners(paths)
- self.osfamily = 'freebsd'
- cfg['ssh_svcname'] = 'sshd'
+ group_add_cmd_prefix = ['pw', 'group', 'add']
+ pkg_cmd_install_prefix = ["pkg", "install"]
+ pkg_cmd_remove_prefix = ["pkg", "remove"]
+ pkg_cmd_update_prefix = ["pkg", "update"]
+ pkg_cmd_upgrade_prefix = ["pkg", "upgrade"]
def _select_hostname(self, hostname, fqdn):
# Should be FQDN if available. See rc.conf(5) in FreeBSD
@@ -43,45 +34,8 @@ class Distro(distros.Distro):
return fqdn
return hostname
- def _read_system_hostname(self):
- sys_hostname = self._read_hostname(self.hostname_conf_fn)
- return (self.hostname_conf_fn, sys_hostname)
-
- def _read_hostname(self, filename, default=None):
- (_exists, contents) = rhel_util.read_sysconfig_file(filename)
- if contents.get('hostname'):
- return contents['hostname']
- else:
- return default
-
- def _write_hostname(self, hostname, filename):
- rhel_util.update_sysconfig_file(filename, {'hostname': hostname})
-
- def create_group(self, name, members):
- group_add_cmd = ['pw', 'group', 'add', name]
- if util.is_group(name):
- LOG.warning("Skipping creation of existing group '%s'", name)
- else:
- try:
- util.subp(group_add_cmd)
- LOG.info("Created new group %s", name)
- except Exception:
- util.logexc(LOG, "Failed to create group %s", name)
- raise
- if not members:
- members = []
-
- for member in members:
- if not util.is_user(member):
- LOG.warning("Unable to add group member '%s' to group '%s'"
- "; user does not exist.", member, name)
- continue
- try:
- util.subp(['pw', 'usermod', '-n', name, '-G', member])
- LOG.info("Added user '%s' to group '%s'", member, name)
- except Exception:
- util.logexc(LOG, "Failed to add user '%s' to group '%s'",
- member, name)
+ def _get_add_member_to_group_cmd(self, member_name, group_name):
+ return ['pw', 'usermod', '-n', member_name, '-G', group_name]
def add_user(self, name, **kwargs):
if util.is_user(name):
@@ -125,7 +79,7 @@ class Distro(distros.Distro):
# Run the command
LOG.info("Adding user %s", name)
try:
- util.subp(pw_useradd_cmd, logstring=log_pw_useradd_cmd)
+ subp.subp(pw_useradd_cmd, logstring=log_pw_useradd_cmd)
except Exception:
util.logexc(LOG, "Failed to create user %s", name)
raise
@@ -137,7 +91,7 @@ class Distro(distros.Distro):
def expire_passwd(self, user):
try:
- util.subp(['pw', 'usermod', user, '-p', '01-Jan-1970'])
+ subp.subp(['pw', 'usermod', user, '-p', '01-Jan-1970'])
except Exception:
util.logexc(LOG, "Failed to set pw expiration for %s", user)
raise
@@ -149,7 +103,7 @@ class Distro(distros.Distro):
hash_opt = "-h"
try:
- util.subp(['pw', 'usermod', user, hash_opt, '0'],
+ subp.subp(['pw', 'usermod', user, hash_opt, '0'],
data=passwd, logstring="chpasswd for %s" % user)
except Exception:
util.logexc(LOG, "Failed to set password for %s", user)
@@ -157,45 +111,13 @@ class Distro(distros.Distro):
def lock_passwd(self, name):
try:
- util.subp(['pw', 'usermod', name, '-h', '-'])
+ subp.subp(['pw', 'usermod', name, '-h', '-'])
except Exception:
util.logexc(LOG, "Failed to lock user %s", name)
raise
- def create_user(self, name, **kwargs):
- self.add_user(name, **kwargs)
-
- # Set password if plain-text password provided and non-empty
- if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']:
- self.set_passwd(name, kwargs['plain_text_passwd'])
-
- # Default locking down the account. 'lock_passwd' defaults to True.
- # lock account unless lock_password is False.
- if kwargs.get('lock_passwd', True):
- self.lock_passwd(name)
-
- # Configure sudo access
- if 'sudo' in kwargs and kwargs['sudo'] is not False:
- self.write_sudo_rules(name, kwargs['sudo'])
-
- # Import SSH keys
- if 'ssh_authorized_keys' in kwargs:
- keys = set(kwargs['ssh_authorized_keys']) or []
- ssh_util.setup_user_keys(keys, name, options=None)
-
- def generate_fallback_config(self):
- nconf = {'config': [], 'version': 1}
- for mac, name in net.get_interfaces_by_mac().items():
- nconf['config'].append(
- {'type': 'physical', 'name': name,
- 'mac_address': mac, 'subnets': [{'type': 'dhcp'}]})
- return nconf
-
- def _write_network_config(self, netconfig):
- return self._supported_write_network_config(netconfig)
-
def apply_locale(self, locale, out_fn=None):
- # Adjust the locals value to the new value
+ # Adjust the locales value to the new value
newconf = StringIO()
for line in util.load_file(self.login_conf_fn).splitlines():
newconf.write(re.sub(r'^default:',
@@ -210,8 +132,8 @@ class Distro(distros.Distro):
try:
LOG.debug("Running cap_mkdb for %s", locale)
- util.subp(['cap_mkdb', self.login_conf_fn])
- except util.ProcessExecutionError:
+ subp.subp(['cap_mkdb', self.login_conf_fn])
+ except subp.ProcessExecutionError:
# cap_mkdb failed, so restore the backup.
util.logexc(LOG, "Failed to apply locale %s", locale)
try:
@@ -225,39 +147,17 @@ class Distro(distros.Distro):
# /etc/rc.conf a line with the following format:
# ifconfig_OLDNAME_name=NEWNAME
# FreeBSD network script will rename the interface automatically.
- return
-
- def install_packages(self, pkglist):
- self.update_package_sources()
- self.package_command('install', pkgs=pkglist)
-
- def package_command(self, command, args=None, pkgs=None):
- if pkgs is None:
- pkgs = []
+ pass
+ def _get_pkg_cmd_environ(self):
+ """Return environment vars used in *BSD package_command operations"""
e = os.environ.copy()
e['ASSUME_ALWAYS_YES'] = 'YES'
-
- cmd = ['pkg']
- if args and isinstance(args, str):
- cmd.append(args)
- elif args and isinstance(args, list):
- cmd.extend(args)
-
- if command:
- cmd.append(command)
-
- pkglist = util.expand_package_list('%s-%s', pkgs)
- cmd.extend(pkglist)
-
- # Allow the output of this to flow outwards (ie not be captured)
- util.subp(cmd, env=e, capture=False)
-
- def set_timezone(self, tz):
- distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
+ return e
def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ["update"], freq=PER_INSTANCE)
+ self._runner.run(
+ "update-sources", self.package_command,
+ ["update"], freq=PER_INSTANCE)
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py
index dc57717d..2bee1c89 100644
--- a/cloudinit/distros/gentoo.py
+++ b/cloudinit/distros/gentoo.py
@@ -9,6 +9,7 @@
from cloudinit import distros
from cloudinit import helpers
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
from cloudinit.distros import net_util
@@ -39,7 +40,7 @@ class Distro(distros.Distro):
def apply_locale(self, locale, out_fn=None):
if not out_fn:
out_fn = self.locale_conf_fn
- util.subp(['locale-gen', '-G', locale], capture=False)
+ subp.subp(['locale-gen', '-G', locale], capture=False)
# "" provides trailing newline during join
lines = [
util.make_header(),
@@ -94,11 +95,11 @@ class Distro(distros.Distro):
cmd = ['rc-update', 'add', 'net.{name}'.format(name=dev),
'default']
try:
- (_out, err) = util.subp(cmd)
+ (_out, err) = subp.subp(cmd)
if len(err):
LOG.warning("Running %s resulted in stderr output: %s",
cmd, err)
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed",
cmd)
@@ -119,12 +120,12 @@ class Distro(distros.Distro):
LOG.debug("Attempting to run bring up interface %s using command %s",
device_name, cmd)
try:
- (_out, err) = util.subp(cmd)
+ (_out, err) = subp.subp(cmd)
if len(err):
LOG.warning("Running %s resulted in stderr output: %s",
cmd, err)
return True
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
return False
@@ -137,11 +138,11 @@ class Distro(distros.Distro):
# Grab device names from init scripts
cmd = ['ls', '/etc/init.d/net.*']
try:
- (_out, err) = util.subp(cmd)
+ (_out, err) = subp.subp(cmd)
if len(err):
LOG.warning("Running %s resulted in stderr output: %s",
cmd, err)
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
return False
devices = [x.split('.')[2] for x in _out.split(' ')]
@@ -208,7 +209,7 @@ class Distro(distros.Distro):
cmd.extend(pkglist)
# Allow the output of this to flow outwards (ie not be captured)
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
def update_package_sources(self):
self._runner.run("update-sources", self.package_command,
diff --git a/cloudinit/distros/netbsd.py b/cloudinit/distros/netbsd.py
new file mode 100644
index 00000000..f1a9b182
--- /dev/null
+++ b/cloudinit/distros/netbsd.py
@@ -0,0 +1,159 @@
+# Copyright (C) 2019-2020 Gonéri Le Bouder
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import crypt
+import os
+import platform
+
+import cloudinit.distros.bsd
+from cloudinit import log as logging
+from cloudinit import subp
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+
+class NetBSD(cloudinit.distros.bsd.BSD):
+ """
+ Distro subclass for NetBSD.
+
+ (N.B. OpenBSD inherits from this class.)
+ """
+
+ ci_sudoers_fn = '/usr/pkg/etc/sudoers.d/90-cloud-init-users'
+ group_add_cmd_prefix = ["groupadd"]
+
+ def __init__(self, name, cfg, paths):
+ super().__init__(name, cfg, paths)
+ if os.path.exists("/usr/pkg/bin/pkgin"):
+ self.pkg_cmd_install_prefix = ['pkgin', '-y', 'install']
+ self.pkg_cmd_remove_prefix = ['pkgin', '-y', 'remove']
+ self.pkg_cmd_update_prefix = ['pkgin', '-y', 'update']
+ self.pkg_cmd_upgrade_prefix = ['pkgin', '-y', 'full-upgrade']
+ else:
+ self.pkg_cmd_install_prefix = ['pkg_add', '-U']
+ self.pkg_cmd_remove_prefix = ['pkg_delete']
+
+ def _get_add_member_to_group_cmd(self, member_name, group_name):
+ return ['usermod', '-G', group_name, member_name]
+
+ def add_user(self, name, **kwargs):
+ if util.is_user(name):
+ LOG.info("User %s already exists, skipping.", name)
+ return False
+
+ adduser_cmd = ['useradd']
+ log_adduser_cmd = ['useradd']
+
+ adduser_opts = {
+ "homedir": '-d',
+ "gecos": '-c',
+ "primary_group": '-g',
+ "groups": '-G',
+ "shell": '-s',
+ }
+ adduser_flags = {
+ "no_user_group": '--no-user-group',
+ "system": '--system',
+ "no_log_init": '--no-log-init',
+ }
+
+ for key, val in kwargs.items():
+ if key in adduser_opts and val and isinstance(val, str):
+ adduser_cmd.extend([adduser_opts[key], val])
+
+ elif key in adduser_flags and val:
+ adduser_cmd.append(adduser_flags[key])
+ log_adduser_cmd.append(adduser_flags[key])
+
+ if 'no_create_home' not in kwargs or 'system' not in kwargs:
+ adduser_cmd += ['-m']
+ log_adduser_cmd += ['-m']
+
+ adduser_cmd += [name]
+ log_adduser_cmd += [name]
+
+ # Run the command
+ LOG.info("Adding user %s", name)
+ try:
+ subp.subp(adduser_cmd, logstring=log_adduser_cmd)
+ except Exception:
+ util.logexc(LOG, "Failed to create user %s", name)
+ raise
+ # Set the password if it is provided
+ # For security consideration, only hashed passwd is assumed
+ passwd_val = kwargs.get('passwd', None)
+ if passwd_val is not None:
+ self.set_passwd(name, passwd_val, hashed=True)
+
+ def set_passwd(self, user, passwd, hashed=False):
+ if hashed:
+ hashed_pw = passwd
+ elif not hasattr(crypt, 'METHOD_BLOWFISH'):
+ # crypt.METHOD_BLOWFISH comes with Python 3.7 which is available
+ # on NetBSD 7 and 8.
+ LOG.error((
+ 'Cannot set non-encrypted password for user %s. '
+ 'Python >= 3.7 is required.'), user)
+ return
+ else:
+ method = crypt.METHOD_BLOWFISH # pylint: disable=E1101
+ hashed_pw = crypt.crypt(
+ passwd,
+ crypt.mksalt(method)
+ )
+
+ try:
+ subp.subp(['usermod', '-p', hashed_pw, user])
+ except Exception:
+ util.logexc(LOG, "Failed to set password for %s", user)
+ raise
+ self.unlock_passwd(user)
+
+ def force_passwd_change(self, user):
+ try:
+ subp.subp(['usermod', '-F', user])
+ except Exception:
+ util.logexc(LOG, "Failed to set pw expiration for %s", user)
+ raise
+
+ def lock_passwd(self, name):
+ try:
+ subp.subp(['usermod', '-C', 'yes', name])
+ except Exception:
+ util.logexc(LOG, "Failed to lock user %s", name)
+ raise
+
+ def unlock_passwd(self, name):
+ try:
+ subp.subp(['usermod', '-C', 'no', name])
+ except Exception:
+ util.logexc(LOG, "Failed to unlock user %s", name)
+ raise
+
+ def apply_locale(self, locale, out_fn=None):
+ LOG.debug('Cannot set the locale.')
+
+ def apply_network_config_names(self, netconfig):
+ LOG.debug('NetBSD cannot rename network interface.')
+
+ def _get_pkg_cmd_environ(self):
+ """Return env vars used in NetBSD package_command operations"""
+ os_release = platform.release()
+ os_arch = platform.machine()
+ e = os.environ.copy()
+ e['PKG_PATH'] = (
+ 'http://cdn.netbsd.org/pub/pkgsrc/'
+ 'packages/NetBSD/%s/%s/All'
+ ) % (os_arch, os_release)
+ return e
+
+ def update_package_sources(self):
+ pass
+
+
+class Distro(NetBSD):
+ pass
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/networking.py b/cloudinit/distros/networking.py
new file mode 100644
index 00000000..10ed249d
--- /dev/null
+++ b/cloudinit/distros/networking.py
@@ -0,0 +1,212 @@
+import abc
+import logging
+import os
+
+from cloudinit import net, util
+
+
+LOG = logging.getLogger(__name__)
+
+
+# Type aliases (https://docs.python.org/3/library/typing.html#type-aliases),
+# used to make the signatures of methods a little clearer
+DeviceName = str
+NetworkConfig = dict
+
+
+class Networking(metaclass=abc.ABCMeta):
+ """The root of the Networking hierarchy in cloud-init.
+
+ This is part of an ongoing refactor in the cloud-init codebase, for more
+ details see "``cloudinit.net`` -> ``cloudinit.distros.networking``
+ Hierarchy" in HACKING.rst for full details.
+ """
+
+ def _get_current_rename_info(self) -> dict:
+ return net._get_current_rename_info()
+
+ def _rename_interfaces(self, renames: list, *, current_info=None) -> None:
+ return net._rename_interfaces(renames, current_info=current_info)
+
+ def apply_network_config_names(self, netcfg: NetworkConfig) -> None:
+ return net.apply_network_config_names(netcfg)
+
+ def device_devid(self, devname: DeviceName):
+ return net.device_devid(devname)
+
+ def device_driver(self, devname: DeviceName):
+ return net.device_driver(devname)
+
+ def extract_physdevs(self, netcfg: NetworkConfig) -> list:
+ return net.extract_physdevs(netcfg)
+
+ def find_fallback_nic(self, *, blacklist_drivers=None):
+ return net.find_fallback_nic(blacklist_drivers=blacklist_drivers)
+
+ def generate_fallback_config(
+ self, *, blacklist_drivers=None, config_driver: bool = False
+ ):
+ return net.generate_fallback_config(
+ blacklist_drivers=blacklist_drivers, config_driver=config_driver
+ )
+
+ def get_devicelist(self) -> list:
+ return net.get_devicelist()
+
+ def get_ib_hwaddrs_by_interface(self) -> dict:
+ return net.get_ib_hwaddrs_by_interface()
+
+ def get_ib_interface_hwaddr(
+ self, devname: DeviceName, ethernet_format: bool
+ ):
+ return net.get_ib_interface_hwaddr(devname, ethernet_format)
+
+ def get_interface_mac(self, devname: DeviceName):
+ return net.get_interface_mac(devname)
+
+ def get_interfaces(self) -> list:
+ return net.get_interfaces()
+
+ def get_interfaces_by_mac(self) -> dict:
+ return net.get_interfaces_by_mac()
+
+ def get_master(self, devname: DeviceName):
+ return net.get_master(devname)
+
+ def interface_has_own_mac(
+ self, devname: DeviceName, *, strict: bool = False
+ ) -> bool:
+ return net.interface_has_own_mac(devname, strict=strict)
+
+ def is_bond(self, devname: DeviceName) -> bool:
+ return net.is_bond(devname)
+
+ def is_bridge(self, devname: DeviceName) -> bool:
+ return net.is_bridge(devname)
+
+ @abc.abstractmethod
+ def is_physical(self, devname: DeviceName) -> bool:
+ """
+ Is ``devname`` a physical network device?
+
+ Examples of non-physical network devices: bonds, bridges, tunnels,
+ loopback devices.
+ """
+
+ def is_renamed(self, devname: DeviceName) -> bool:
+ return net.is_renamed(devname)
+
+ def is_up(self, devname: DeviceName) -> bool:
+ return net.is_up(devname)
+
+ def is_vlan(self, devname: DeviceName) -> bool:
+ return net.is_vlan(devname)
+
+ def master_is_bridge_or_bond(self, devname: DeviceName) -> bool:
+ return net.master_is_bridge_or_bond(devname)
+
+ @abc.abstractmethod
+ def settle(self, *, exists=None) -> None:
+ """Wait for device population in the system to complete.
+
+ :param exists:
+ An optional optimisation. If given, only perform as much of the
+ settle process as is required for the given DeviceName to be
+ present in the system. (This may include skipping the settle
+ process entirely, if the device already exists.)
+ :type exists: Optional[DeviceName]
+ """
+
+ def wait_for_physdevs(
+ self, netcfg: NetworkConfig, *, strict: bool = True
+ ) -> None:
+ """Wait for all the physical devices in `netcfg` to exist on the system
+
+ Specifically, this will call `self.settle` 5 times, and check after
+ each one if the physical devices are now present in the system.
+
+ :param netcfg:
+ The NetworkConfig from which to extract physical devices to wait
+ for.
+ :param strict:
+ Raise a `RuntimeError` if any physical devices are not present
+ after waiting.
+ """
+ physdevs = self.extract_physdevs(netcfg)
+
+ # set of expected iface names and mac addrs
+ expected_ifaces = dict([(iface[0], iface[1]) for iface in physdevs])
+ expected_macs = set(expected_ifaces.keys())
+
+ # set of current macs
+ present_macs = self.get_interfaces_by_mac().keys()
+
+ # compare the set of expected mac address values to
+ # the current macs present; we only check MAC as cloud-init
+ # has not yet renamed interfaces and the netcfg may include
+ # such renames.
+ for _ in range(0, 5):
+ if expected_macs.issubset(present_macs):
+ LOG.debug("net: all expected physical devices present")
+ return
+
+ missing = expected_macs.difference(present_macs)
+ LOG.debug("net: waiting for expected net devices: %s", missing)
+ for mac in missing:
+ # trigger a settle, unless this interface exists
+ devname = expected_ifaces[mac]
+ msg = "Waiting for settle or {} exists".format(devname)
+ util.log_time(
+ LOG.debug,
+ msg,
+ func=self.settle,
+ kwargs={"exists": devname},
+ )
+
+ # update present_macs after settles
+ present_macs = self.get_interfaces_by_mac().keys()
+
+ msg = "Not all expected physical devices present: %s" % missing
+ LOG.warning(msg)
+ if strict:
+ raise RuntimeError(msg)
+
+
+class BSDNetworking(Networking):
+ """Implementation of networking functionality shared across BSDs."""
+
+ def is_physical(self, devname: DeviceName) -> bool:
+ raise NotImplementedError()
+
+ def settle(self, *, exists=None) -> None:
+ """BSD has no equivalent to `udevadm settle`; noop."""
+
+
+class LinuxNetworking(Networking):
+ """Implementation of networking functionality common to Linux distros."""
+
+ def get_dev_features(self, devname: DeviceName) -> str:
+ return net.get_dev_features(devname)
+
+ def has_netfail_standby_feature(self, devname: DeviceName) -> bool:
+ return net.has_netfail_standby_feature(devname)
+
+ def is_netfailover(self, devname: DeviceName) -> bool:
+ return net.is_netfailover(devname)
+
+ def is_netfail_master(self, devname: DeviceName) -> bool:
+ return net.is_netfail_master(devname)
+
+ def is_netfail_primary(self, devname: DeviceName) -> bool:
+ return net.is_netfail_primary(devname)
+
+ def is_netfail_standby(self, devname: DeviceName) -> bool:
+ return net.is_netfail_standby(devname)
+
+ def is_physical(self, devname: DeviceName) -> bool:
+ return os.path.exists(net.sys_dev_path(devname, "device"))
+
+ def settle(self, *, exists=None) -> None:
+ if exists is not None:
+ exists = net.sys_dev_path(exists)
+ util.udevadm_settle(exists=exists)
diff --git a/cloudinit/distros/openbsd.py b/cloudinit/distros/openbsd.py
new file mode 100644
index 00000000..720c9cf3
--- /dev/null
+++ b/cloudinit/distros/openbsd.py
@@ -0,0 +1,52 @@
+# Copyright (C) 2019-2020 Gonéri Le Bouder
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+import platform
+
+import cloudinit.distros.netbsd
+from cloudinit import log as logging
+from cloudinit import subp
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+
+class Distro(cloudinit.distros.netbsd.NetBSD):
+ hostname_conf_fn = '/etc/myname'
+
+ def _read_hostname(self, filename, default=None):
+ return util.load_file(self.hostname_conf_fn)
+
+ def _write_hostname(self, hostname, filename):
+ content = hostname + '\n'
+ util.write_file(self.hostname_conf_fn, content)
+
+ def _get_add_member_to_group_cmd(self, member_name, group_name):
+ return ['usermod', '-G', group_name, member_name]
+
+ def lock_passwd(self, name):
+ try:
+ subp.subp(['usermod', '-p', '*', name])
+ except Exception:
+ util.logexc(LOG, "Failed to lock user %s", name)
+ raise
+
+ def unlock_passwd(self, name):
+ pass
+
+ def _get_pkg_cmd_environ(self):
+ """Return env vars used in OpenBSD package_command operations"""
+ os_release = platform.release()
+ os_arch = platform.machine()
+ e = os.environ.copy()
+ e['PKG_PATH'] = (
+ 'ftp://ftp.openbsd.org/pub/OpenBSD/{os_release}/'
+ 'packages/{os_arch}/').format(
+ os_arch=os_arch, os_release=os_release
+ )
+ return e
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py
index dd56a3f4..b8e557b8 100644
--- a/cloudinit/distros/opensuse.py
+++ b/cloudinit/distros/opensuse.py
@@ -14,6 +14,7 @@ from cloudinit.distros.parsers.hostname import HostnameConf
from cloudinit import helpers
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
from cloudinit.distros import rhel_util as rhutil
@@ -97,7 +98,7 @@ class Distro(distros.Distro):
cmd.extend(pkglist)
# Allow the output of this to flow outwards (ie not be captured)
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
def set_timezone(self, tz):
tz_file = self._find_tz_file(tz)
@@ -129,7 +130,7 @@ class Distro(distros.Distro):
if self.uses_systemd() and filename.endswith('/previous-hostname'):
return util.load_file(filename).strip()
elif self.uses_systemd():
- (out, _err) = util.subp(['hostname'])
+ (out, _err) = subp.subp(['hostname'])
if len(out):
return out
else:
@@ -144,6 +145,9 @@ class Distro(distros.Distro):
return default
return hostname
+ def _get_localhost_ip(self):
+ return "127.0.1.1"
+
def _read_hostname_conf(self, filename):
conf = HostnameConf(util.load_file(filename))
conf.parse()
@@ -160,7 +164,7 @@ class Distro(distros.Distro):
if self.uses_systemd() and out_fn.endswith('/previous-hostname'):
util.write_file(out_fn, hostname)
elif self.uses_systemd():
- util.subp(['hostnamectl', 'set-hostname', str(hostname)])
+ subp.subp(['hostnamectl', 'set-hostname', str(hostname)])
else:
conf = None
try:
@@ -181,7 +185,7 @@ class Distro(distros.Distro):
def preferred_ntp_clients(self):
"""The preferred ntp client is dependent on the version."""
- """Allow distro to determine the preferred ntp client list"""
+ # Allow distro to determine the preferred ntp client list
if not self._preferred_ntp_clients:
distro_info = util.system_info()['dist']
name = distro_info[0]
diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py
index 299d54b5..62929d03 100644
--- a/cloudinit/distros/parsers/resolv_conf.py
+++ b/cloudinit/distros/parsers/resolv_conf.py
@@ -150,9 +150,10 @@ class ResolvConf(object):
tail = ''
try:
(cfg_opt, cfg_values) = head.split(None, 1)
- except (IndexError, ValueError):
- raise IOError("Incorrectly formatted resolv.conf line %s"
- % (i + 1))
+ except (IndexError, ValueError) as e:
+ raise IOError(
+ "Incorrectly formatted resolv.conf line %s" % (i + 1)
+ ) from e
if cfg_opt not in ['nameserver', 'domain',
'search', 'sortlist', 'options']:
raise IOError("Unexpected resolv.conf option %s" % (cfg_opt))
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index f55d96f7..c72f7c17 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -11,6 +11,7 @@
from cloudinit import distros
from cloudinit import helpers
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
from cloudinit.distros import rhel_util
@@ -83,7 +84,7 @@ class Distro(distros.Distro):
if self.uses_systemd() and out_fn.endswith('/previous-hostname'):
util.write_file(out_fn, hostname)
elif self.uses_systemd():
- util.subp(['hostnamectl', 'set-hostname', str(hostname)])
+ subp.subp(['hostnamectl', 'set-hostname', str(hostname)])
else:
host_cfg = {
'HOSTNAME': hostname,
@@ -108,7 +109,7 @@ class Distro(distros.Distro):
if self.uses_systemd() and filename.endswith('/previous-hostname'):
return util.load_file(filename).strip()
elif self.uses_systemd():
- (out, _err) = util.subp(['hostname'])
+ (out, _err) = subp.subp(['hostname'])
if len(out):
return out
else:
@@ -146,7 +147,7 @@ class Distro(distros.Distro):
if pkgs is None:
pkgs = []
- if util.which('dnf'):
+ if subp.which('dnf'):
LOG.debug('Using DNF for package management')
cmd = ['dnf']
else:
@@ -173,7 +174,7 @@ class Distro(distros.Distro):
cmd.extend(pkglist)
# Allow the output of this to flow outwards (ie not be captured)
- util.subp(cmd, capture=False)
+ subp.subp(cmd, capture=False)
def update_package_sources(self):
self._runner.run("update-sources", self.package_command,
diff --git a/cloudinit/distros/tests/__init__.py b/cloudinit/distros/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/cloudinit/distros/tests/__init__.py
diff --git a/cloudinit/distros/tests/test_init.py b/cloudinit/distros/tests/test_init.py
new file mode 100644
index 00000000..db534654
--- /dev/null
+++ b/cloudinit/distros/tests/test_init.py
@@ -0,0 +1,156 @@
+# Copyright (C) 2020 Canonical Ltd.
+#
+# Author: Daniel Watkins <oddbloke@ubuntu.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Tests for cloudinit/distros/__init__.py"""
+
+from unittest import mock
+
+import pytest
+
+from cloudinit.distros import _get_package_mirror_info, LDH_ASCII_CHARS
+
+
+# Define a set of characters we would expect to be replaced
+INVALID_URL_CHARS = [
+ chr(x) for x in range(127) if chr(x) not in LDH_ASCII_CHARS
+]
+for separator in [":", ".", "/", "#", "?", "@", "[", "]"]:
+ # Remove from the set characters that either separate hostname parts (":",
+ # "."), terminate hostnames ("/", "#", "?", "@"), or cause Python to be
+ # unable to parse URLs ("[", "]").
+ INVALID_URL_CHARS.remove(separator)
+
+
+class TestGetPackageMirrorInfo:
+ """
+ Tests for cloudinit.distros._get_package_mirror_info.
+
+ These supplement the tests in tests/unittests/test_distros/test_generic.py
+ which are more focused on testing a single production-like configuration.
+ These tests are more focused on specific aspects of the unit under test.
+ """
+
+ @pytest.mark.parametrize('mirror_info,expected', [
+ # Empty info gives empty return
+ ({}, {}),
+ # failsafe values used if present
+ ({'failsafe': {'primary': 'http://value', 'security': 'http://other'}},
+ {'primary': 'http://value', 'security': 'http://other'}),
+ # search values used if present
+ ({'search': {'primary': ['http://value'],
+ 'security': ['http://other']}},
+ {'primary': ['http://value'], 'security': ['http://other']}),
+ # failsafe values used if search value not present
+ ({'search': {'primary': ['http://value']},
+ 'failsafe': {'security': 'http://other'}},
+ {'primary': ['http://value'], 'security': 'http://other'})
+ ])
+ def test_get_package_mirror_info_failsafe(self, mirror_info, expected):
+ """
+ Test the interaction between search and failsafe inputs
+
+ (This doesn't test the case where the mirror_filter removes all search
+ options; test_failsafe_used_if_all_search_results_filtered_out covers
+ that.)
+ """
+ assert expected == _get_package_mirror_info(mirror_info,
+ mirror_filter=lambda x: x)
+
+ def test_failsafe_used_if_all_search_results_filtered_out(self):
+ """Test the failsafe option used if all search options eliminated."""
+ mirror_info = {
+ 'search': {'primary': ['http://value']},
+ 'failsafe': {'primary': 'http://other'}
+ }
+ assert {'primary': 'http://other'} == _get_package_mirror_info(
+ mirror_info, mirror_filter=lambda x: False)
+
+ @pytest.mark.parametrize('allow_ec2_mirror, platform_type', [
+ (True, 'ec2')
+ ])
+ @pytest.mark.parametrize('availability_zone,region,patterns,expected', (
+ # Test ec2_region alone
+ ('fk-fake-1f', None, ['http://EC2-%(ec2_region)s/ubuntu'],
+ ['http://ec2-fk-fake-1/ubuntu']),
+ # Test availability_zone alone
+ ('fk-fake-1f', None, ['http://AZ-%(availability_zone)s/ubuntu'],
+ ['http://az-fk-fake-1f/ubuntu']),
+ # Test region alone
+ (None, 'fk-fake-1', ['http://RG-%(region)s/ubuntu'],
+ ['http://rg-fk-fake-1/ubuntu']),
+ # Test that ec2_region is not available for non-matching AZs
+ ('fake-fake-1f', None,
+ ['http://EC2-%(ec2_region)s/ubuntu',
+ 'http://AZ-%(availability_zone)s/ubuntu'],
+ ['http://az-fake-fake-1f/ubuntu']),
+ # Test that template order maintained
+ (None, 'fake-region',
+ ['http://RG-%(region)s-2/ubuntu', 'http://RG-%(region)s-1/ubuntu'],
+ ['http://rg-fake-region-2/ubuntu', 'http://rg-fake-region-1/ubuntu']),
+ # Test that non-ASCII hostnames are IDNA encoded;
+ # "IDNA-ТεЅТ̣".encode('idna') == b"xn--idna--4kd53hh6aba3q"
+ (None, 'ТεЅТ̣', ['http://www.IDNA-%(region)s.com/ubuntu'],
+ ['http://www.xn--idna--4kd53hh6aba3q.com/ubuntu']),
+ # Test that non-ASCII hostnames with a port are IDNA encoded;
+ # "IDNA-ТεЅТ̣".encode('idna') == b"xn--idna--4kd53hh6aba3q"
+ (None, 'ТεЅТ̣', ['http://www.IDNA-%(region)s.com:8080/ubuntu'],
+ ['http://www.xn--idna--4kd53hh6aba3q.com:8080/ubuntu']),
+ # Test that non-ASCII non-hostname parts of URLs are unchanged
+ (None, 'ТεЅТ̣', ['http://www.example.com/%(region)s/ubuntu'],
+ ['http://www.example.com/ТεЅТ̣/ubuntu']),
+ # Test that IPv4 addresses are unchanged
+ (None, 'fk-fake-1', ['http://192.168.1.1:8080/%(region)s/ubuntu'],
+ ['http://192.168.1.1:8080/fk-fake-1/ubuntu']),
+ # Test that IPv6 addresses are unchanged
+ (None, 'fk-fake-1',
+ ['http://[2001:67c:1360:8001::23]/%(region)s/ubuntu'],
+ ['http://[2001:67c:1360:8001::23]/fk-fake-1/ubuntu']),
+ # Test that unparseable URLs are filtered out of the mirror list
+ (None, 'inv[lid',
+ ['http://%(region)s.in.hostname/should/be/filtered',
+ 'http://but.not.in.the.path/%(region)s'],
+ ['http://but.not.in.the.path/inv[lid']),
+ (None, '-some-region-',
+ ['http://-lead-ing.%(region)s.trail-ing-.example.com/ubuntu'],
+ ['http://lead-ing.some-region.trail-ing.example.com/ubuntu']),
+ ) + tuple(
+ # Dynamically generate a test case for each non-LDH
+ # (Letters/Digits/Hyphen) ASCII character, testing that it is
+ # substituted with a hyphen
+ (None, 'fk{0}fake{0}1'.format(invalid_char),
+ ['http://%(region)s/ubuntu'], ['http://fk-fake-1/ubuntu'])
+ for invalid_char in INVALID_URL_CHARS
+ ))
+ def test_valid_substitution(self,
+ allow_ec2_mirror,
+ platform_type,
+ availability_zone,
+ region,
+ patterns,
+ expected):
+ """Test substitution works as expected."""
+ flag_path = "cloudinit.distros." \
+ "ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES"
+
+ m_data_source = mock.Mock(
+ availability_zone=availability_zone,
+ region=region,
+ platform_type=platform_type
+ )
+ mirror_info = {'search': {'primary': patterns}}
+
+ with mock.patch(flag_path, allow_ec2_mirror):
+ ret = _get_package_mirror_info(
+ mirror_info,
+ data_source=m_data_source,
+ mirror_filter=lambda x: x
+ )
+ print(allow_ec2_mirror)
+ print(platform_type)
+ print(availability_zone)
+ print(region)
+ print(patterns)
+ print(expected)
+ assert {'primary': expected} == ret
diff --git a/cloudinit/distros/tests/test_networking.py b/cloudinit/distros/tests/test_networking.py
new file mode 100644
index 00000000..b9a63842
--- /dev/null
+++ b/cloudinit/distros/tests/test_networking.py
@@ -0,0 +1,192 @@
+from unittest import mock
+
+import pytest
+
+from cloudinit import net
+from cloudinit.distros.networking import (
+ BSDNetworking,
+ LinuxNetworking,
+ Networking,
+)
+
+# See https://docs.pytest.org/en/stable/example
+# /parametrize.html#parametrizing-conditional-raising
+from contextlib import ExitStack as does_not_raise
+
+
+@pytest.yield_fixture
+def generic_networking_cls():
+ """Returns a direct Networking subclass which errors on /sys usage.
+
+ This enables the direct testing of functionality only present on the
+ ``Networking`` super-class, and provides a check on accidentally using /sys
+ in that context.
+ """
+
+ class TestNetworking(Networking):
+ def is_physical(self, *args, **kwargs):
+ raise NotImplementedError
+
+ def settle(self, *args, **kwargs):
+ raise NotImplementedError
+
+ error = AssertionError("Unexpectedly used /sys in generic networking code")
+ with mock.patch(
+ "cloudinit.net.get_sys_class_path", side_effect=error,
+ ):
+ yield TestNetworking
+
+
+@pytest.yield_fixture
+def sys_class_net(tmpdir):
+ sys_class_net_path = tmpdir.join("sys/class/net")
+ sys_class_net_path.ensure_dir()
+ with mock.patch(
+ "cloudinit.net.get_sys_class_path",
+ return_value=sys_class_net_path.strpath + "/",
+ ):
+ yield sys_class_net_path
+
+
+class TestBSDNetworkingIsPhysical:
+ def test_raises_notimplementederror(self):
+ with pytest.raises(NotImplementedError):
+ BSDNetworking().is_physical("eth0")
+
+
+class TestLinuxNetworkingIsPhysical:
+ def test_returns_false_by_default(self, sys_class_net):
+ assert not LinuxNetworking().is_physical("eth0")
+
+ def test_returns_false_if_devname_exists_but_not_physical(
+ self, sys_class_net
+ ):
+ devname = "eth0"
+ sys_class_net.join(devname).mkdir()
+ assert not LinuxNetworking().is_physical(devname)
+
+ def test_returns_true_if_device_is_physical(self, sys_class_net):
+ devname = "eth0"
+ device_dir = sys_class_net.join(devname)
+ device_dir.mkdir()
+ device_dir.join("device").write("")
+
+ assert LinuxNetworking().is_physical(devname)
+
+
+class TestBSDNetworkingSettle:
+ def test_settle_doesnt_error(self):
+ # This also implicitly tests that it doesn't use subp.subp
+ BSDNetworking().settle()
+
+
+@pytest.mark.usefixtures("sys_class_net")
+@mock.patch("cloudinit.distros.networking.util.udevadm_settle", autospec=True)
+class TestLinuxNetworkingSettle:
+ def test_no_arguments(self, m_udevadm_settle):
+ LinuxNetworking().settle()
+
+ assert [mock.call(exists=None)] == m_udevadm_settle.call_args_list
+
+ def test_exists_argument(self, m_udevadm_settle):
+ LinuxNetworking().settle(exists="ens3")
+
+ expected_path = net.sys_dev_path("ens3")
+ assert [
+ mock.call(exists=expected_path)
+ ] == m_udevadm_settle.call_args_list
+
+
+class TestNetworkingWaitForPhysDevs:
+ @pytest.fixture
+ def wait_for_physdevs_netcfg(self):
+ """This config is shared across all the tests in this class."""
+
+ def ethernet(mac, name, driver=None, device_id=None):
+ v2_cfg = {"set-name": name, "match": {"macaddress": mac}}
+ if driver:
+ v2_cfg["match"].update({"driver": driver})
+ if device_id:
+ v2_cfg["match"].update({"device_id": device_id})
+
+ return v2_cfg
+
+ physdevs = [
+ ["aa:bb:cc:dd:ee:ff", "eth0", "virtio", "0x1000"],
+ ["00:11:22:33:44:55", "ens3", "e1000", "0x1643"],
+ ]
+ netcfg = {
+ "version": 2,
+ "ethernets": {args[1]: ethernet(*args) for args in physdevs},
+ }
+ return netcfg
+
+ def test_skips_settle_if_all_present(
+ self, generic_networking_cls, wait_for_physdevs_netcfg,
+ ):
+ networking = generic_networking_cls()
+ with mock.patch.object(
+ networking, "get_interfaces_by_mac"
+ ) as m_get_interfaces_by_mac:
+ m_get_interfaces_by_mac.side_effect = iter(
+ [{"aa:bb:cc:dd:ee:ff": "eth0", "00:11:22:33:44:55": "ens3"}]
+ )
+ with mock.patch.object(
+ networking, "settle", autospec=True
+ ) as m_settle:
+ networking.wait_for_physdevs(wait_for_physdevs_netcfg)
+ assert 0 == m_settle.call_count
+
+ def test_calls_udev_settle_on_missing(
+ self, generic_networking_cls, wait_for_physdevs_netcfg,
+ ):
+ networking = generic_networking_cls()
+ with mock.patch.object(
+ networking, "get_interfaces_by_mac"
+ ) as m_get_interfaces_by_mac:
+ m_get_interfaces_by_mac.side_effect = iter(
+ [
+ {
+ "aa:bb:cc:dd:ee:ff": "eth0"
+ }, # first call ens3 is missing
+ {
+ "aa:bb:cc:dd:ee:ff": "eth0",
+ "00:11:22:33:44:55": "ens3",
+ }, # second call has both
+ ]
+ )
+ with mock.patch.object(
+ networking, "settle", autospec=True
+ ) as m_settle:
+ networking.wait_for_physdevs(wait_for_physdevs_netcfg)
+ m_settle.assert_called_with(exists="ens3")
+
+ @pytest.mark.parametrize(
+ "strict,expectation",
+ [(True, pytest.raises(RuntimeError)), (False, does_not_raise())],
+ )
+ def test_retrying_and_strict_behaviour(
+ self,
+ strict,
+ expectation,
+ generic_networking_cls,
+ wait_for_physdevs_netcfg,
+ ):
+ networking = generic_networking_cls()
+ with mock.patch.object(
+ networking, "get_interfaces_by_mac"
+ ) as m_get_interfaces_by_mac:
+ m_get_interfaces_by_mac.return_value = {}
+
+ with mock.patch.object(
+ networking, "settle", autospec=True
+ ) as m_settle:
+ with expectation:
+ networking.wait_for_physdevs(
+ wait_for_physdevs_netcfg, strict=strict
+ )
+
+ assert (
+ 5 * len(wait_for_physdevs_netcfg["ethernets"])
+ == m_settle.call_count
+ )
diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py
index 23be3bdd..b4c4b0c3 100644
--- a/cloudinit/distros/ubuntu.py
+++ b/cloudinit/distros/ubuntu.py
@@ -49,7 +49,5 @@ class Distro(debian.Distro):
copy.deepcopy(PREFERRED_NTP_CLIENTS))
return self._preferred_ntp_clients
- pass
-
# vi: ts=4 expandtab
diff --git a/cloudinit/features.py b/cloudinit/features.py
new file mode 100644
index 00000000..c44fa29e
--- /dev/null
+++ b/cloudinit/features.py
@@ -0,0 +1,44 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""
+Feature flags are used as a way to easily toggle configuration
+**at build time**. They are provided to accommodate feature deprecation and
+downstream configuration changes.
+
+Currently used upstream values for feature flags are set in
+``cloudinit/features.py``. Overrides to these values (typically via quilt
+patch) can be placed
+in a file called ``feature_overrides.py`` in the same directory. Any value
+set in ``feature_overrides.py`` will override the original value set
+in ``features.py``.
+
+Each flag should include a short comment regarding the reason for
+the flag and intended lifetime.
+
+Tests are required for new feature flags, and tests must verify
+all valid states of a flag, not just the default state.
+"""
+
+ERROR_ON_USER_DATA_FAILURE = True
+"""
+If there is a failure in obtaining user data (i.e., #include or
+decompress fails), old behavior is to log a warning and proceed.
+After the 20.2 release, we instead raise an exception.
+This flag can be removed after Focal is no longer supported
+"""
+
+
+ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES = False
+"""
+When configuring apt mirrors, old behavior is to allow
+the use of ec2 mirrors if the datasource availability_zone format
+matches one of the possible aws ec2 regions. After the 20.2 release, we
+no longer publish ec2 region mirror urls on non-AWS cloud platforms.
+Besides feature_overrides.py, users can override this by providing
+#cloud-config apt directives.
+"""
+
+try:
+ # pylint: disable=wildcard-import
+ from cloudinit.feature_overrides import * # noqa
+except ImportError:
+ pass
diff --git a/cloudinit/gpg.py b/cloudinit/gpg.py
index 7fe17a2e..be0ca0ea 100644
--- a/cloudinit/gpg.py
+++ b/cloudinit/gpg.py
@@ -8,7 +8,7 @@
"""gpg.py - Collection of gpg key related functions"""
from cloudinit import log as logging
-from cloudinit import util
+from cloudinit import subp
import time
@@ -18,9 +18,9 @@ LOG = logging.getLogger(__name__)
def export_armour(key):
"""Export gpg key, armoured key gets returned"""
try:
- (armour, _) = util.subp(["gpg", "--export", "--armour", key],
+ (armour, _) = subp.subp(["gpg", "--export", "--armour", key],
capture=True)
- except util.ProcessExecutionError as error:
+ except subp.ProcessExecutionError as error:
# debug, since it happens for any key not on the system initially
LOG.debug('Failed to export armoured key "%s": %s', key, error)
armour = None
@@ -51,11 +51,11 @@ def recv_key(key, keyserver, retries=(1, 1)):
while True:
trynum += 1
try:
- util.subp(cmd, capture=True)
+ subp.subp(cmd, capture=True)
LOG.debug("Imported key '%s' from keyserver '%s' on try %d",
key, keyserver, trynum)
return
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
error = e
try:
naplen = next(sleeps)
@@ -63,18 +63,19 @@ def recv_key(key, keyserver, retries=(1, 1)):
"Import failed with exit code %d, will try again in %ss",
error.exit_code, naplen)
time.sleep(naplen)
- except StopIteration:
+ except StopIteration as e:
raise ValueError(
("Failed to import key '%s' from keyserver '%s' "
- "after %d tries: %s") % (key, keyserver, trynum, error))
+ "after %d tries: %s") % (key, keyserver, trynum, error)
+ ) from e
def delete_key(key):
"""Delete the specified key from the local gpg ring"""
try:
- util.subp(["gpg", "--batch", "--yes", "--delete-keys", key],
+ subp.subp(["gpg", "--batch", "--yes", "--delete-keys", key],
capture=True)
- except util.ProcessExecutionError as error:
+ except subp.ProcessExecutionError as error:
LOG.warning('Failed delete key "%s": %s', key, error)
diff --git a/cloudinit/handlers/boot_hook.py b/cloudinit/handlers/boot_hook.py
index dca50a49..c6205097 100644
--- a/cloudinit/handlers/boot_hook.py
+++ b/cloudinit/handlers/boot_hook.py
@@ -12,6 +12,7 @@ import os
from cloudinit import handlers
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
from cloudinit.settings import (PER_ALWAYS)
@@ -48,8 +49,8 @@ class BootHookPartHandler(handlers.Handler):
env = os.environ.copy()
if self.instance_id is not None:
env['INSTANCE_ID'] = str(self.instance_id)
- util.subp([filepath], env=env)
- except util.ProcessExecutionError:
+ subp.subp([filepath], env=env)
+ except subp.ProcessExecutionError:
util.logexc(LOG, "Boothooks script %s execution error", filepath)
except Exception:
util.logexc(LOG, "Boothooks unknown error when running %s",
diff --git a/cloudinit/handlers/jinja_template.py b/cloudinit/handlers/jinja_template.py
index ce3accf6..aadfbf86 100644
--- a/cloudinit/handlers/jinja_template.py
+++ b/cloudinit/handlers/jinja_template.py
@@ -83,7 +83,8 @@ def render_jinja_payload_from_file(
if e.errno == EACCES:
raise RuntimeError(
'Cannot render jinja template vars. No read permission on'
- " '%s'. Try sudo" % instance_data_file)
+ " '%s'. Try sudo" % instance_data_file
+ ) from e
rendered_payload = render_jinja_payload(
payload, payload_fn, instance_data, debug)
diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py
index 003cad60..a9d29537 100644
--- a/cloudinit/handlers/upstart_job.py
+++ b/cloudinit/handlers/upstart_job.py
@@ -13,6 +13,7 @@ import re
from cloudinit import handlers
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
from cloudinit.settings import (PER_INSTANCE)
@@ -52,7 +53,7 @@ class UpstartJobPartHandler(handlers.Handler):
util.write_file(path, payload, 0o644)
if SUITABLE_UPSTART:
- util.subp(["initctl", "reload-configuration"], capture=False)
+ subp.subp(["initctl", "reload-configuration"], capture=False)
def _has_suitable_upstart():
@@ -63,7 +64,7 @@ def _has_suitable_upstart():
if not os.path.exists("/sbin/initctl"):
return False
try:
- (version_out, _err) = util.subp(["initctl", "version"])
+ (version_out, _err) = subp.subp(["initctl", "version"])
except Exception:
util.logexc(LOG, "initctl version failed")
return False
@@ -77,7 +78,7 @@ def _has_suitable_upstart():
if not os.path.exists("/usr/bin/dpkg-query"):
return False
try:
- (dpkg_ver, _err) = util.subp(["dpkg-query",
+ (dpkg_ver, _err) = subp.subp(["dpkg-query",
"--showformat=${Version}",
"--show", "upstart"], rcs=[0, 1])
except Exception:
@@ -86,9 +87,9 @@ def _has_suitable_upstart():
try:
good = "1.8-0ubuntu1.2"
- util.subp(["dpkg", "--compare-versions", dpkg_ver, "ge", good])
+ subp.subp(["dpkg", "--compare-versions", dpkg_ver, "ge", good])
return True
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.exit_code == 1:
pass
else:
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index 7d2a3305..9752ad28 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -451,8 +451,4 @@ class DefaultingConfigParser(RawConfigParser):
contents = '\n'.join([header, contents, ''])
return contents
-
-def identity(object):
- return object
-
# vi: ts=4 expandtab
diff --git a/cloudinit/log.py b/cloudinit/log.py
index 827db12b..2e5df042 100644
--- a/cloudinit/log.py
+++ b/cloudinit/log.py
@@ -122,17 +122,12 @@ def getLogger(name='cloudinit'):
return logging.getLogger(name)
-# Fixes this annoyance...
-# No handlers could be found for logger XXX annoying output...
-try:
- from logging import NullHandler
-except ImportError:
- class NullHandler(logging.Handler):
- def emit(self, record):
- pass
-
-
def _resetLogger(log):
+ """Remove all current handlers, unset log level and add a NullHandler.
+
+ (Adding the NullHandler avoids "No handlers could be found for logger XXX"
+ messages.)
+ """
if not log:
return
handlers = list(log.handlers)
@@ -141,7 +136,7 @@ def _resetLogger(log):
h.close()
log.removeHandler(h)
log.setLevel(NOTSET)
- log.addHandler(NullHandler())
+ log.addHandler(logging.NullHandler())
def resetLogging():
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index 1d5eb535..e233149a 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -6,13 +6,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
import errno
+import ipaddress
import logging
import os
import re
-from functools import partial
-from cloudinit.net.network_state import mask_to_net_prefix
+from cloudinit import subp
from cloudinit import util
+from cloudinit.net.network_state import mask_to_net_prefix
from cloudinit.url_helper import UrlError, readurl
LOG = logging.getLogger(__name__)
@@ -97,10 +98,6 @@ def is_up(devname):
return read_sys_net_safe(devname, "operstate", translate=translate)
-def is_wireless(devname):
- return os.path.exists(sys_dev_path(devname, "wireless"))
-
-
def is_bridge(devname):
return os.path.exists(sys_dev_path(devname, "bridge"))
@@ -264,28 +261,6 @@ def is_vlan(devname):
return 'DEVTYPE=vlan' in uevent.splitlines()
-def is_connected(devname):
- # is_connected isn't really as simple as that. 2 is
- # 'physically connected'. 3 is 'not connected'. but a wlan interface will
- # always show 3.
- iflink = read_sys_net_safe(devname, "iflink")
- if iflink == "2":
- return True
- if not is_wireless(devname):
- return False
- LOG.debug("'%s' is wireless, basing 'connected' on carrier", devname)
- return read_sys_net_safe(devname, "carrier",
- translate={'0': False, '1': True})
-
-
-def is_physical(devname):
- return os.path.exists(sys_dev_path(devname, "device"))
-
-
-def is_present(devname):
- return os.path.exists(sys_dev_path(devname))
-
-
def device_driver(devname):
"""Return the device driver for net device named 'devname'."""
driver = None
@@ -334,10 +309,20 @@ def find_fallback_nic(blacklist_drivers=None):
"""Return the name of the 'fallback' network device."""
if util.is_FreeBSD():
return find_fallback_nic_on_freebsd(blacklist_drivers)
+ elif util.is_NetBSD() or util.is_OpenBSD():
+ return find_fallback_nic_on_netbsd_or_openbsd(blacklist_drivers)
else:
return find_fallback_nic_on_linux(blacklist_drivers)
+def find_fallback_nic_on_netbsd_or_openbsd(blacklist_drivers=None):
+ values = list(sorted(
+ get_interfaces_by_mac().values(),
+ key=natural_sort_key))
+ if values:
+ return values[0]
+
+
def find_fallback_nic_on_freebsd(blacklist_drivers=None):
"""Return the name of the 'fallback' network device on FreeBSD.
@@ -347,7 +332,7 @@ def find_fallback_nic_on_freebsd(blacklist_drivers=None):
we'll use the first interface from ``ifconfig -l -u ether``
"""
- stdout, _stderr = util.subp(['ifconfig', '-l', '-u', 'ether'])
+ stdout, _stderr = subp.subp(['ifconfig', '-l', '-u', 'ether'])
values = stdout.split()
if values:
return values[0]
@@ -508,43 +493,6 @@ def extract_physdevs(netcfg):
raise RuntimeError('Unknown network config version: %s' % version)
-def wait_for_physdevs(netcfg, strict=True):
- physdevs = extract_physdevs(netcfg)
-
- # set of expected iface names and mac addrs
- expected_ifaces = dict([(iface[0], iface[1]) for iface in physdevs])
- expected_macs = set(expected_ifaces.keys())
-
- # set of current macs
- present_macs = get_interfaces_by_mac().keys()
-
- # compare the set of expected mac address values to
- # the current macs present; we only check MAC as cloud-init
- # has not yet renamed interfaces and the netcfg may include
- # such renames.
- for _ in range(0, 5):
- if expected_macs.issubset(present_macs):
- LOG.debug('net: all expected physical devices present')
- return
-
- missing = expected_macs.difference(present_macs)
- LOG.debug('net: waiting for expected net devices: %s', missing)
- for mac in missing:
- # trigger a settle, unless this interface exists
- syspath = sys_dev_path(expected_ifaces[mac])
- settle = partial(util.udevadm_settle, exists=syspath)
- msg = 'Waiting for udev events to settle or %s exists' % syspath
- util.log_time(LOG.debug, msg, func=settle)
-
- # update present_macs after settles
- present_macs = get_interfaces_by_mac().keys()
-
- msg = 'Not all expected physical devices present: %s' % missing
- LOG.warning(msg)
- if strict:
- raise RuntimeError(msg)
-
-
def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
"""read the network config and rename devices accordingly.
if strict_present is false, then do not raise exception if no devices
@@ -558,7 +506,9 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
try:
_rename_interfaces(extract_physdevs(netcfg))
except RuntimeError as e:
- raise RuntimeError('Failed to apply network config names: %s' % e)
+ raise RuntimeError(
+ 'Failed to apply network config names: %s' % e
+ ) from e
def interface_has_own_mac(ifname, strict=False):
@@ -609,9 +559,9 @@ def _get_current_rename_info(check_downable=True):
if check_downable:
nmatch = re.compile(r"[0-9]+:\s+(\w+)[@:]")
- ipv6, _err = util.subp(['ip', '-6', 'addr', 'show', 'permanent',
+ ipv6, _err = subp.subp(['ip', '-6', 'addr', 'show', 'permanent',
'scope', 'global'], capture=True)
- ipv4, _err = util.subp(['ip', '-4', 'addr', 'show'], capture=True)
+ ipv4, _err = subp.subp(['ip', '-4', 'addr', 'show'], capture=True)
nics_with_addresses = set()
for bytes_out in (ipv6, ipv4):
@@ -647,13 +597,13 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True,
for data in cur_info.values())
def rename(cur, new):
- util.subp(["ip", "link", "set", cur, "name", new], capture=True)
+ subp.subp(["ip", "link", "set", cur, "name", new], capture=True)
def down(name):
- util.subp(["ip", "link", "set", name, "down"], capture=True)
+ subp.subp(["ip", "link", "set", name, "down"], capture=True)
def up(name):
- util.subp(["ip", "link", "set", name, "up"], capture=True)
+ subp.subp(["ip", "link", "set", name, "up"], capture=True)
ops = []
errors = []
@@ -799,23 +749,27 @@ def get_ib_interface_hwaddr(ifname, ethernet_format):
def get_interfaces_by_mac():
if util.is_FreeBSD():
return get_interfaces_by_mac_on_freebsd()
+ elif util.is_NetBSD():
+ return get_interfaces_by_mac_on_netbsd()
+ elif util.is_OpenBSD():
+ return get_interfaces_by_mac_on_openbsd()
else:
return get_interfaces_by_mac_on_linux()
def get_interfaces_by_mac_on_freebsd():
- (out, _) = util.subp(['ifconfig', '-a', 'ether'])
+ (out, _) = subp.subp(['ifconfig', '-a', 'ether'])
# flatten each interface block in a single line
def flatten(out):
curr_block = ''
- for l in out.split('\n'):
- if l.startswith('\t'):
- curr_block += l
+ for line in out.split('\n'):
+ if line.startswith('\t'):
+ curr_block += line
else:
if curr_block:
yield curr_block
- curr_block = l
+ curr_block = line
yield curr_block
# looks for interface and mac in a list of flatten block
@@ -830,6 +784,37 @@ def get_interfaces_by_mac_on_freebsd():
return results
+def get_interfaces_by_mac_on_netbsd():
+ ret = {}
+ re_field_match = (
+ r"(?P<ifname>\w+).*address:\s"
+ r"(?P<mac>([\da-f]{2}[:-]){5}([\da-f]{2})).*"
+ )
+ (out, _) = subp.subp(['ifconfig', '-a'])
+ if_lines = re.sub(r'\n\s+', ' ', out).splitlines()
+ for line in if_lines:
+ m = re.match(re_field_match, line)
+ if m:
+ fields = m.groupdict()
+ ret[fields['mac']] = fields['ifname']
+ return ret
+
+
+def get_interfaces_by_mac_on_openbsd():
+ ret = {}
+ re_field_match = (
+ r"(?P<ifname>\w+).*lladdr\s"
+ r"(?P<mac>([\da-f]{2}[:-]){5}([\da-f]{2})).*")
+ (out, _) = subp.subp(['ifconfig', '-a'])
+ if_lines = re.sub(r'\n\s+', ' ', out).splitlines()
+ for line in if_lines:
+ m = re.match(re_field_match, line)
+ if m:
+ fields = m.groupdict()
+ ret[fields['mac']] = fields['ifname']
+ return ret
+
+
def get_interfaces_by_mac_on_linux():
"""Build a dictionary of tuples {mac: name}.
@@ -917,6 +902,38 @@ def has_url_connectivity(url):
return True
+def is_ip_address(s: str) -> bool:
+ """Returns a bool indicating if ``s`` is an IP address.
+
+ :param s:
+ The string to test.
+
+ :return:
+ A bool indicating if the string contains an IP address or not.
+ """
+ try:
+ ipaddress.ip_address(s)
+ except ValueError:
+ return False
+ return True
+
+
+def is_ipv4_address(s: str) -> bool:
+ """Returns a bool indicating if ``s`` is an IPv4 address.
+
+ :param s:
+ The string to test.
+
+ :return:
+ A bool indicating if the string contains an IPv4 address or not.
+ """
+ try:
+ ipaddress.IPv4Address(s)
+ except ValueError:
+ return False
+ return True
+
+
class EphemeralIPv4Network(object):
"""Context manager which sets up temporary static network configuration.
@@ -950,7 +967,8 @@ class EphemeralIPv4Network(object):
self.prefix = mask_to_net_prefix(prefix_or_mask)
except ValueError as e:
raise ValueError(
- 'Cannot setup network: {0}'.format(e))
+ 'Cannot setup network: {0}'.format(e)
+ ) from e
self.connectivity_url = connectivity_url
self.interface = interface
@@ -990,11 +1008,11 @@ class EphemeralIPv4Network(object):
def __exit__(self, excp_type, excp_value, excp_traceback):
"""Teardown anything we set up."""
for cmd in self.cleanup_cmds:
- util.subp(cmd, capture=True)
+ subp.subp(cmd, capture=True)
def _delete_address(self, address, prefix):
"""Perform the ip command to remove the specified address."""
- util.subp(
+ subp.subp(
['ip', '-family', 'inet', 'addr', 'del',
'%s/%s' % (address, prefix), 'dev', self.interface],
capture=True)
@@ -1006,11 +1024,11 @@ class EphemeralIPv4Network(object):
'Attempting setup of ephemeral network on %s with %s brd %s',
self.interface, cidr, self.broadcast)
try:
- util.subp(
+ subp.subp(
['ip', '-family', 'inet', 'addr', 'add', cidr, 'broadcast',
self.broadcast, 'dev', self.interface],
capture=True, update_env={'LANG': 'C'})
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if "File exists" not in e.stderr:
raise
LOG.debug(
@@ -1018,7 +1036,7 @@ class EphemeralIPv4Network(object):
self.interface, self.ip)
else:
# Address creation success, bring up device and queue cleanup
- util.subp(
+ subp.subp(
['ip', '-family', 'inet', 'link', 'set', 'dev', self.interface,
'up'], capture=True)
self.cleanup_cmds.append(
@@ -1035,7 +1053,7 @@ class EphemeralIPv4Network(object):
via_arg = []
if gateway != "0.0.0.0/0":
via_arg = ['via', gateway]
- util.subp(
+ subp.subp(
['ip', '-4', 'route', 'add', net_address] + via_arg +
['dev', self.interface], capture=True)
self.cleanup_cmds.insert(
@@ -1045,20 +1063,20 @@ class EphemeralIPv4Network(object):
def _bringup_router(self):
"""Perform the ip commands to fully setup the router if needed."""
# Check if a default route exists and exit if it does
- out, _ = util.subp(['ip', 'route', 'show', '0.0.0.0/0'], capture=True)
+ out, _ = subp.subp(['ip', 'route', 'show', '0.0.0.0/0'], capture=True)
if 'default' in out:
LOG.debug(
'Skip ephemeral route setup. %s already has default route: %s',
self.interface, out.strip())
return
- util.subp(
+ subp.subp(
['ip', '-4', 'route', 'add', self.router, 'dev', self.interface,
'src', self.ip], capture=True)
self.cleanup_cmds.insert(
0,
['ip', '-4', 'route', 'del', self.router, 'dev', self.interface,
'src', self.ip])
- util.subp(
+ subp.subp(
['ip', '-4', 'route', 'add', 'default', 'via', self.router,
'dev', self.interface], capture=True)
self.cleanup_cmds.insert(
diff --git a/cloudinit/net/bsd.py b/cloudinit/net/bsd.py
new file mode 100644
index 00000000..e34e0454
--- /dev/null
+++ b/cloudinit/net/bsd.py
@@ -0,0 +1,167 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import re
+
+from cloudinit import log as logging
+from cloudinit import net
+from cloudinit import util
+from cloudinit import subp
+from cloudinit.distros.parsers.resolv_conf import ResolvConf
+from cloudinit.distros import bsd_utils
+
+from . import renderer
+
+LOG = logging.getLogger(__name__)
+
+
+class BSDRenderer(renderer.Renderer):
+ resolv_conf_fn = 'etc/resolv.conf'
+ rc_conf_fn = 'etc/rc.conf'
+
+ def get_rc_config_value(self, key):
+ fn = subp.target_path(self.target, self.rc_conf_fn)
+ bsd_utils.get_rc_config_value(key, fn=fn)
+
+ def set_rc_config_value(self, key, value):
+ fn = subp.target_path(self.target, self.rc_conf_fn)
+ bsd_utils.set_rc_config_value(key, value, fn=fn)
+
+ def __init__(self, config=None):
+ if not config:
+ config = {}
+ self.target = None
+ self.interface_configurations = {}
+ self._postcmds = config.get('postcmds', True)
+
+ def _ifconfig_entries(self, settings, target=None):
+ ifname_by_mac = net.get_interfaces_by_mac()
+ for interface in settings.iter_interfaces():
+ device_name = interface.get("name")
+ device_mac = interface.get("mac_address")
+ if device_name and re.match(r'^lo\d+$', device_name):
+ continue
+ if device_mac not in ifname_by_mac:
+ LOG.info('Cannot find any device with MAC %s', device_mac)
+ elif device_mac and device_name:
+ cur_name = ifname_by_mac[device_mac]
+ if cur_name != device_name:
+ LOG.info('netif service will rename interface %s to %s',
+ cur_name, device_name)
+ try:
+ self.rename_interface(cur_name, device_name)
+ except NotImplementedError:
+ LOG.error((
+ 'Interface renaming is '
+ 'not supported on this OS'))
+ device_name = cur_name
+
+ else:
+ device_name = ifname_by_mac[device_mac]
+
+ LOG.info('Configuring interface %s', device_name)
+
+ self.interface_configurations[device_name] = 'DHCP'
+
+ for subnet in interface.get("subnets", []):
+ if subnet.get('type') == 'static':
+ if not subnet.get('netmask'):
+ LOG.debug(
+ 'Skipping IP %s, because there is no netmask',
+ subnet.get('address')
+ )
+ continue
+ LOG.debug('Configuring dev %s with %s / %s', device_name,
+ subnet.get('address'), subnet.get('netmask'))
+
+ self.interface_configurations[device_name] = {
+ 'address': subnet.get('address'),
+ 'netmask': subnet.get('netmask'),
+ }
+
+ def _route_entries(self, settings, target=None):
+ routes = list(settings.iter_routes())
+ for interface in settings.iter_interfaces():
+ subnets = interface.get("subnets", [])
+ for subnet in subnets:
+ if subnet.get('type') != 'static':
+ continue
+ gateway = subnet.get('gateway')
+ if gateway and len(gateway.split('.')) == 4:
+ routes.append({
+ 'network': '0.0.0.0',
+ 'netmask': '0.0.0.0',
+ 'gateway': gateway})
+ routes += subnet.get('routes', [])
+ for route in routes:
+ network = route.get('network')
+ if not network:
+ LOG.debug('Skipping a bad route entry')
+ continue
+ netmask = route.get('netmask')
+ gateway = route.get('gateway')
+ self.set_route(network, netmask, gateway)
+
+ def _resolve_conf(self, settings, target=None):
+ nameservers = settings.dns_nameservers
+ searchdomains = settings.dns_searchdomains
+ for interface in settings.iter_interfaces():
+ for subnet in interface.get("subnets", []):
+ if 'dns_nameservers' in subnet:
+ nameservers.extend(subnet['dns_nameservers'])
+ if 'dns_search' in subnet:
+ searchdomains.extend(subnet['dns_search'])
+ # Try to read the /etc/resolv.conf or just start from scratch if that
+ # fails.
+ try:
+ resolvconf = ResolvConf(util.load_file(subp.target_path(
+ target, self.resolv_conf_fn)))
+ resolvconf.parse()
+ except IOError:
+ util.logexc(LOG, "Failed to parse %s, use new empty file",
+ subp.target_path(target, self.resolv_conf_fn))
+ resolvconf = ResolvConf('')
+ resolvconf.parse()
+
+ # Add some nameservers
+ for server in nameservers:
+ try:
+ resolvconf.add_nameserver(server)
+ except ValueError:
+ util.logexc(LOG, "Failed to add nameserver %s", server)
+
+ # And add any searchdomains.
+ for domain in searchdomains:
+ try:
+ resolvconf.add_search_domain(domain)
+ except ValueError:
+ util.logexc(LOG, "Failed to add search domain %s", domain)
+ util.write_file(
+ subp.target_path(target, self.resolv_conf_fn),
+ str(resolvconf), 0o644)
+
+ def render_network_state(self, network_state, templates=None, target=None):
+ self._ifconfig_entries(settings=network_state)
+ self._route_entries(settings=network_state)
+ self._resolve_conf(settings=network_state)
+
+ self.write_config()
+ self.start_services(run=self._postcmds)
+
+ def dhcp_interfaces(self):
+ ic = self.interface_configurations.items
+ return [k for k, v in ic() if v == 'DHCP']
+
+ def start_services(self, run=False):
+ raise NotImplementedError()
+
+ def write_config(self, target=None):
+ raise NotImplementedError()
+
+ def set_gateway(self, gateway):
+ raise NotImplementedError()
+
+ def rename_interface(self, cur_name, device_name):
+ raise NotImplementedError()
+
+ def set_route(self, network, netmask, gateway):
+ raise NotImplementedError()
diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py
index 64e1c699..cc8dc17b 100755
--- a/cloudinit/net/cmdline.py
+++ b/cloudinit/net/cmdline.py
@@ -10,6 +10,7 @@ import base64
import glob
import gzip
import io
+import logging
import os
from cloudinit import util
@@ -19,21 +20,19 @@ from . import read_sys_net_safe
_OPEN_ISCSI_INTERFACE_FILE = "/run/initramfs/open-iscsi.interface"
+KERNEL_CMDLINE_NETWORK_CONFIG_DISABLED = "disabled"
+
class InitramfsNetworkConfigSource(metaclass=abc.ABCMeta):
"""ABC for net config sources that read config written by initramfses"""
@abc.abstractmethod
- def is_applicable(self):
- # type: () -> bool
+ def is_applicable(self) -> bool:
"""Is this initramfs config source applicable to the current system?"""
- pass
@abc.abstractmethod
- def render_config(self):
- # type: () -> dict
+ def render_config(self) -> dict:
"""Render a v1 network config from the initramfs configuration"""
- pass
class KlibcNetworkConfigSource(InitramfsNetworkConfigSource):
@@ -62,8 +61,7 @@ class KlibcNetworkConfigSource(InitramfsNetworkConfigSource):
if mac_addr:
self._mac_addrs[k] = mac_addr
- def is_applicable(self):
- # type: () -> bool
+ def is_applicable(self) -> bool:
"""
Return whether this system has klibc initramfs network config or not
@@ -81,8 +79,7 @@ class KlibcNetworkConfigSource(InitramfsNetworkConfigSource):
return True
return False
- def render_config(self):
- # type: () -> dict
+ def render_config(self) -> dict:
return config_from_klibc_net_cfg(
files=self._files, mac_addrs=self._mac_addrs,
)
@@ -115,8 +112,8 @@ def _klibc_to_config_entry(content, mac_addrs=None):
data = util.load_shell_content(content)
try:
name = data['DEVICE'] if 'DEVICE' in data else data['DEVICE6']
- except KeyError:
- raise ValueError("no 'DEVICE' or 'DEVICE6' entry in data")
+ except KeyError as e:
+ raise ValueError("no 'DEVICE' or 'DEVICE6' entry in data") from e
# ipconfig on precise does not write PROTO
# IPv6 config gives us IPV6PROTO, not PROTO.
@@ -233,34 +230,35 @@ def read_initramfs_config():
return None
-def _decomp_gzip(blob, strict=True):
- # decompress blob. raise exception if not compressed unless strict=False.
+def _decomp_gzip(blob):
+ # decompress blob or return original blob
with io.BytesIO(blob) as iobuf:
gzfp = None
try:
gzfp = gzip.GzipFile(mode="rb", fileobj=iobuf)
return gzfp.read()
except IOError:
- if strict:
- raise
return blob
finally:
if gzfp:
gzfp.close()
-def _b64dgz(b64str, gzipped="try"):
- # decode a base64 string. If gzipped is true, transparently uncompresss
- # if gzipped is 'try', then try gunzip, returning the original on fail.
- try:
- blob = base64.b64decode(b64str)
- except TypeError:
- raise ValueError("Invalid base64 text: %s" % b64str)
+def _b64dgz(data):
+ """Decode a string base64 encoding, if gzipped, uncompress as well
- if not gzipped:
- return blob
+ :return: decompressed unencoded string of the data or empty string on
+ unencoded data.
+ """
+ try:
+ blob = base64.b64decode(data)
+ except (TypeError, ValueError):
+ logging.error(
+ "Expected base64 encoded kernel commandline parameter"
+ " network-config. Ignoring network-config=%s.", data)
+ return ''
- return _decomp_gzip(blob, strict=gzipped != "try")
+ return _decomp_gzip(blob)
def read_kernel_cmdline_config(cmdline=None):
@@ -273,6 +271,8 @@ def read_kernel_cmdline_config(cmdline=None):
if tok.startswith("network-config="):
data64 = tok.split("=", 1)[1]
if data64:
+ if data64 == KERNEL_CMDLINE_NETWORK_CONFIG_DISABLED:
+ return {"config": "disabled"}
return util.load_yaml(_b64dgz(data64))
return None
diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py
index 19d0199c..4394c68b 100644
--- a/cloudinit/net/dhcp.py
+++ b/cloudinit/net/dhcp.py
@@ -17,6 +17,7 @@ from cloudinit.net import (
has_url_connectivity)
from cloudinit.net.network_state import mask_and_ipv4_to_bcast_addr as bcip
from cloudinit import temp_utils
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -30,19 +31,18 @@ class InvalidDHCPLeaseFileError(Exception):
Current uses are DataSourceAzure and DataSourceEc2 during ephemeral
boot to scrape metadata.
"""
- pass
class NoDHCPLeaseError(Exception):
"""Raised when unable to get a DHCP lease."""
- pass
class EphemeralDHCPv4(object):
- def __init__(self, iface=None, connectivity_url=None):
+ def __init__(self, iface=None, connectivity_url=None, dhcp_log_func=None):
self.iface = iface
self._ephipv4 = None
self.lease = None
+ self.dhcp_log_func = dhcp_log_func
self.connectivity_url = connectivity_url
def __enter__(self):
@@ -80,9 +80,10 @@ class EphemeralDHCPv4(object):
if self.lease:
return self.lease
try:
- leases = maybe_perform_dhcp_discovery(self.iface)
- except InvalidDHCPLeaseFileError:
- raise NoDHCPLeaseError()
+ leases = maybe_perform_dhcp_discovery(
+ self.iface, self.dhcp_log_func)
+ except InvalidDHCPLeaseFileError as e:
+ raise NoDHCPLeaseError() from e
if not leases:
raise NoDHCPLeaseError()
self.lease = leases[-1]
@@ -130,13 +131,15 @@ class EphemeralDHCPv4(object):
result[internal_mapping] = self.lease.get(different_names)
-def maybe_perform_dhcp_discovery(nic=None):
+def maybe_perform_dhcp_discovery(nic=None, dhcp_log_func=None):
"""Perform dhcp discovery if nic valid and dhclient command exists.
If the nic is invalid or undiscoverable or dhclient command is not found,
skip dhcp_discovery and return an empty dict.
@param nic: Name of the network interface we want to run dhclient on.
+ @param dhcp_log_func: A callable accepting the dhclient output and error
+ streams.
@return: A list of dicts representing dhcp options for each lease obtained
from the dhclient discovery if run, otherwise an empty list is
returned.
@@ -150,7 +153,7 @@ def maybe_perform_dhcp_discovery(nic=None):
LOG.debug(
'Skip dhcp_discovery: nic %s not found in get_devicelist.', nic)
return []
- dhclient_path = util.which('dhclient')
+ dhclient_path = subp.which('dhclient')
if not dhclient_path:
LOG.debug('Skip dhclient configuration: No dhclient command found.')
return []
@@ -158,7 +161,7 @@ def maybe_perform_dhcp_discovery(nic=None):
prefix='cloud-init-dhcp-',
needs_exe=True) as tdir:
# Use /var/tmp because /run/cloud-init/tmp is mounted noexec
- return dhcp_discovery(dhclient_path, nic, tdir)
+ return dhcp_discovery(dhclient_path, nic, tdir, dhcp_log_func)
def parse_dhcp_lease_file(lease_file):
@@ -192,13 +195,15 @@ def parse_dhcp_lease_file(lease_file):
return dhcp_leases
-def dhcp_discovery(dhclient_cmd_path, interface, cleandir):
+def dhcp_discovery(dhclient_cmd_path, interface, cleandir, dhcp_log_func=None):
"""Run dhclient on the interface without scripts or filesystem artifacts.
@param dhclient_cmd_path: Full path to the dhclient used.
@param interface: Name of the network inteface on which to dhclient.
@param cleandir: The directory from which to run dhclient as well as store
dhcp leases.
+ @param dhcp_log_func: A callable accepting the dhclient output and error
+ streams.
@return: A list of dicts of representing the dhcp leases parsed from the
dhcp.leases file or empty list.
@@ -215,14 +220,20 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir):
pid_file = os.path.join(cleandir, 'dhclient.pid')
lease_file = os.path.join(cleandir, 'dhcp.leases')
+ # In some cases files in /var/tmp may not be executable, launching dhclient
+ # from there will certainly raise 'Permission denied' error. Try launching
+ # the original dhclient instead.
+ if not os.access(sandbox_dhclient_cmd, os.X_OK):
+ sandbox_dhclient_cmd = dhclient_cmd_path
+
# ISC dhclient needs the interface up to send initial discovery packets.
# Generally dhclient relies on dhclient-script PREINIT action to bring the
# link up before attempting discovery. Since we are using -sf /bin/true,
# we need to do that "link up" ourselves first.
- util.subp(['ip', 'link', 'set', 'dev', interface, 'up'], capture=True)
+ subp.subp(['ip', 'link', 'set', 'dev', interface, 'up'], capture=True)
cmd = [sandbox_dhclient_cmd, '-1', '-v', '-lf', lease_file,
'-pf', pid_file, interface, '-sf', '/bin/true']
- util.subp(cmd, capture=True)
+ out, err = subp.subp(cmd, capture=True)
# Wait for pid file and lease file to appear, and for the process
# named by the pid file to daemonize (have pid 1 as its parent). If we
@@ -239,6 +250,7 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir):
return []
ppid = 'unknown'
+ daemonized = False
for _ in range(0, 1000):
pid_content = util.load_file(pid_file).strip()
try:
@@ -250,13 +262,17 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir):
if ppid == 1:
LOG.debug('killing dhclient with pid=%s', pid)
os.kill(pid, signal.SIGKILL)
- return parse_dhcp_lease_file(lease_file)
+ daemonized = True
+ break
time.sleep(0.01)
- LOG.error(
- 'dhclient(pid=%s, parentpid=%s) failed to daemonize after %s seconds',
- pid_content, ppid, 0.01 * 1000
- )
+ if not daemonized:
+ LOG.error(
+ 'dhclient(pid=%s, parentpid=%s) failed to daemonize after %s '
+ 'seconds', pid_content, ppid, 0.01 * 1000
+ )
+ if dhcp_log_func is not None:
+ dhcp_log_func(out, err)
return parse_dhcp_lease_file(lease_file)
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index 2f714563..13c041f3 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -11,6 +11,7 @@ from . import renderer
from .network_state import subnet_is_ipv6
from cloudinit import log as logging
+from cloudinit import subp
from cloudinit import util
@@ -482,10 +483,8 @@ class Renderer(renderer.Renderer):
if searchdomains:
lo['subnets'][0]["dns_search"] = (" ".join(searchdomains))
- ''' Apply a sort order to ensure that we write out
- the physical interfaces first; this is critical for
- bonding
- '''
+ # Apply a sort order to ensure that we write out the physical
+ # interfaces first; this is critical for bonding
order = {
'loopback': 0,
'physical': 1,
@@ -511,13 +510,13 @@ class Renderer(renderer.Renderer):
return '\n\n'.join(['\n'.join(s) for s in sections]) + "\n"
def render_network_state(self, network_state, templates=None, target=None):
- fpeni = util.target_path(target, self.eni_path)
+ fpeni = subp.target_path(target, self.eni_path)
util.ensure_dir(os.path.dirname(fpeni))
header = self.eni_header if self.eni_header else ""
util.write_file(fpeni, header + self._render_interfaces(network_state))
if self.netrules_path:
- netrules = util.target_path(target, self.netrules_path)
+ netrules = subp.target_path(target, self.netrules_path)
util.ensure_dir(os.path.dirname(netrules))
util.write_file(netrules,
self._render_persistent_net(network_state))
@@ -544,9 +543,9 @@ def available(target=None):
expected = ['ifquery', 'ifup', 'ifdown']
search = ['/sbin', '/usr/sbin']
for p in expected:
- if not util.which(p, search=search, target=target):
+ if not subp.which(p, search=search, target=target):
return False
- eni = util.target_path(target, 'etc/network/interfaces')
+ eni = subp.target_path(target, 'etc/network/interfaces')
if not os.path.isfile(eni):
return False
diff --git a/cloudinit/net/freebsd.py b/cloudinit/net/freebsd.py
index d6f61da3..0285dfec 100644
--- a/cloudinit/net/freebsd.py
+++ b/cloudinit/net/freebsd.py
@@ -1,175 +1,59 @@
# This file is part of cloud-init. See LICENSE file for license information.
-import re
-
from cloudinit import log as logging
-from cloudinit import net
+import cloudinit.net.bsd
+from cloudinit import subp
from cloudinit import util
-from cloudinit.distros import rhel_util
-from cloudinit.distros.parsers.resolv_conf import ResolvConf
-
-from . import renderer
LOG = logging.getLogger(__name__)
-class Renderer(renderer.Renderer):
- resolv_conf_fn = 'etc/resolv.conf'
- rc_conf_fn = 'etc/rc.conf'
+class Renderer(cloudinit.net.bsd.BSDRenderer):
def __init__(self, config=None):
- if not config:
- config = {}
- self.dhcp_interfaces = []
- self._postcmds = config.get('postcmds', True)
-
- def _update_rc_conf(self, settings, target=None):
- fn = util.target_path(target, self.rc_conf_fn)
- rhel_util.update_sysconfig_file(fn, settings)
-
- def _write_ifconfig_entries(self, settings, target=None):
- ifname_by_mac = net.get_interfaces_by_mac()
- for interface in settings.iter_interfaces():
- device_name = interface.get("name")
- device_mac = interface.get("mac_address")
- if device_name and re.match(r'^lo\d+$', device_name):
- continue
- if device_mac not in ifname_by_mac:
- LOG.info('Cannot find any device with MAC %s', device_mac)
- elif device_mac and device_name:
- cur_name = ifname_by_mac[device_mac]
- if cur_name != device_name:
- LOG.info('netif service will rename interface %s to %s',
- cur_name, device_name)
- self._update_rc_conf(
- {'ifconfig_%s_name' % cur_name: device_name},
- target=target)
- else:
- device_name = ifname_by_mac[device_mac]
-
- LOG.info('Configuring interface %s', device_name)
- ifconfig = 'DHCP' # default
-
- for subnet in interface.get("subnets", []):
- if ifconfig != 'DHCP':
- LOG.info('The FreeBSD provider only set the first subnet.')
- break
- if subnet.get('type') == 'static':
- if not subnet.get('netmask'):
- LOG.debug(
- 'Skipping IP %s, because there is no netmask',
- subnet.get('address'))
- continue
- LOG.debug('Configuring dev %s with %s / %s', device_name,
- subnet.get('address'), subnet.get('netmask'))
- # Configure an ipv4 address.
- ifconfig = (
- subnet.get('address') + ' netmask ' +
- subnet.get('netmask'))
-
- if ifconfig == 'DHCP':
- self.dhcp_interfaces.append(device_name)
- self._update_rc_conf(
- {'ifconfig_' + device_name: ifconfig},
- target=target)
-
- def _write_route_entries(self, settings, target=None):
- routes = list(settings.iter_routes())
- for interface in settings.iter_interfaces():
- subnets = interface.get("subnets", [])
- for subnet in subnets:
- if subnet.get('type') != 'static':
- continue
- gateway = subnet.get('gateway')
- if gateway and len(gateway.split('.')) == 4:
- routes.append({
- 'network': '0.0.0.0',
- 'netmask': '0.0.0.0',
- 'gateway': gateway})
- routes += subnet.get('routes', [])
- route_cpt = 0
- for route in routes:
- network = route.get('network')
- if not network:
- LOG.debug('Skipping a bad route entry')
- continue
- netmask = route.get('netmask')
- gateway = route.get('gateway')
- route_cmd = "-route %s/%s %s" % (network, netmask, gateway)
- if network == '0.0.0.0':
- self._update_rc_conf(
- {'defaultrouter': gateway}, target=target)
+ self._route_cpt = 0
+ super(Renderer, self).__init__()
+
+ def rename_interface(self, cur_name, device_name):
+ self.set_rc_config_value('ifconfig_%s_name' % cur_name, device_name)
+
+ def write_config(self):
+ for device_name, v in self.interface_configurations.items():
+ if isinstance(v, dict):
+ self.set_rc_config_value(
+ 'ifconfig_' + device_name,
+ v.get('address') + ' netmask ' + v.get('netmask'))
else:
- self._update_rc_conf(
- {'route_net%d' % route_cpt: route_cmd}, target=target)
- route_cpt += 1
-
- def _write_resolve_conf(self, settings, target=None):
- nameservers = settings.dns_nameservers
- searchdomains = settings.dns_searchdomains
- for interface in settings.iter_interfaces():
- for subnet in interface.get("subnets", []):
- if 'dns_nameservers' in subnet:
- nameservers.extend(subnet['dns_nameservers'])
- if 'dns_search' in subnet:
- searchdomains.extend(subnet['dns_search'])
- # Try to read the /etc/resolv.conf or just start from scratch if that
- # fails.
- try:
- resolvconf = ResolvConf(util.load_file(util.target_path(
- target, self.resolv_conf_fn)))
- resolvconf.parse()
- except IOError:
- util.logexc(LOG, "Failed to parse %s, use new empty file",
- util.target_path(target, self.resolv_conf_fn))
- resolvconf = ResolvConf('')
- resolvconf.parse()
-
- # Add some nameservers
- for server in nameservers:
- try:
- resolvconf.add_nameserver(server)
- except ValueError:
- util.logexc(LOG, "Failed to add nameserver %s", server)
-
- # And add any searchdomains.
- for domain in searchdomains:
- try:
- resolvconf.add_search_domain(domain)
- except ValueError:
- util.logexc(LOG, "Failed to add search domain %s", domain)
- util.write_file(
- util.target_path(target, self.resolv_conf_fn),
- str(resolvconf), 0o644)
-
- def _write_network(self, settings, target=None):
- self._write_ifconfig_entries(settings, target=target)
- self._write_route_entries(settings, target=target)
- self._write_resolve_conf(settings, target=target)
-
- self.start_services(run=self._postcmds)
-
- def render_network_state(self, network_state, templates=None, target=None):
- self._write_network(network_state, target=target)
+ self.set_rc_config_value('ifconfig_' + device_name, 'DHCP')
def start_services(self, run=False):
if not run:
LOG.debug("freebsd generate postcmd disabled")
return
- util.subp(['service', 'netif', 'restart'], capture=True)
+ subp.subp(['service', 'netif', 'restart'], capture=True)
# On FreeBSD 10, the restart of routing and dhclient is likely to fail
# because
# - routing: it cannot remove the loopback route, but it will still set
# up the default route as expected.
# - dhclient: it cannot stop the dhclient started by the netif service.
# In both case, the situation is ok, and we can proceed.
- util.subp(['service', 'routing', 'restart'], capture=True, rcs=[0, 1])
- for dhcp_interface in self.dhcp_interfaces:
- util.subp(['service', 'dhclient', 'restart', dhcp_interface],
+ subp.subp(['service', 'routing', 'restart'], capture=True, rcs=[0, 1])
+
+ for dhcp_interface in self.dhcp_interfaces():
+ subp.subp(['service', 'dhclient', 'restart', dhcp_interface],
rcs=[0, 1],
capture=True)
+ def set_route(self, network, netmask, gateway):
+ if network == '0.0.0.0':
+ self.set_rc_config_value('defaultrouter', gateway)
+ else:
+ route_name = 'route_net%d' % self._route_cpt
+ route_cmd = "-route %s/%s %s" % (network, netmask, gateway)
+ self.set_rc_config_value(route_name, route_cmd)
+ self._route_cpt += 1
+
def available(target=None):
return util.is_FreeBSD()
diff --git a/cloudinit/net/netbsd.py b/cloudinit/net/netbsd.py
new file mode 100644
index 00000000..71b38ee6
--- /dev/null
+++ b/cloudinit/net/netbsd.py
@@ -0,0 +1,44 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import log as logging
+from cloudinit import subp
+from cloudinit import util
+import cloudinit.net.bsd
+
+LOG = logging.getLogger(__name__)
+
+
+class Renderer(cloudinit.net.bsd.BSDRenderer):
+
+ def __init__(self, config=None):
+ super(Renderer, self).__init__()
+
+ def write_config(self):
+ if self.dhcp_interfaces():
+ self.set_rc_config_value('dhcpcd', 'YES')
+ self.set_rc_config_value(
+ 'dhcpcd_flags',
+ ' '.join(self.dhcp_interfaces())
+ )
+ for device_name, v in self.interface_configurations.items():
+ if isinstance(v, dict):
+ self.set_rc_config_value(
+ 'ifconfig_' + device_name,
+ v.get('address') + ' netmask ' + v.get('netmask'))
+
+ def start_services(self, run=False):
+ if not run:
+ LOG.debug("netbsd generate postcmd disabled")
+ return
+
+ subp.subp(['service', 'network', 'restart'], capture=True)
+ if self.dhcp_interfaces():
+ subp.subp(['service', 'dhcpcd', 'restart'], capture=True)
+
+ def set_route(self, network, netmask, gateway):
+ if network == '0.0.0.0':
+ self.set_rc_config_value('defaultroute', gateway)
+
+
+def available(target=None):
+ return util.is_NetBSD()
diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
index 89855270..53347c83 100644
--- a/cloudinit/net/netplan.py
+++ b/cloudinit/net/netplan.py
@@ -8,6 +8,7 @@ from .network_state import subnet_is_ipv6, NET_CONFIG_TO_V2, IPV6_DYNAMIC_TYPES
from cloudinit import log as logging
from cloudinit import util
+from cloudinit import subp
from cloudinit import safeyaml
from cloudinit.net import SYS_CLASS_NET, get_devicelist
@@ -164,14 +165,14 @@ def _extract_bond_slaves_by_name(interfaces, entry, bond_master):
def _clean_default(target=None):
# clean out any known default files and derived files in target
# LP: #1675576
- tpath = util.target_path(target, "etc/netplan/00-snapd-config.yaml")
+ tpath = subp.target_path(target, "etc/netplan/00-snapd-config.yaml")
if not os.path.isfile(tpath):
return
content = util.load_file(tpath, decode=False)
if content != KNOWN_SNAPD_CONFIG:
return
- derived = [util.target_path(target, f) for f in (
+ derived = [subp.target_path(target, f) for f in (
'run/systemd/network/10-netplan-all-en.network',
'run/systemd/network/10-netplan-all-eth.network',
'run/systemd/generator/netplan.stamp')]
@@ -203,10 +204,10 @@ class Renderer(renderer.Renderer):
def features(self):
if self._features is None:
try:
- info_blob, _err = util.subp(self.NETPLAN_INFO, capture=True)
+ info_blob, _err = subp.subp(self.NETPLAN_INFO, capture=True)
info = util.load_yaml(info_blob)
self._features = info['netplan.io']['features']
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
# if the info subcommand is not present then we don't have any
# new features
pass
@@ -218,7 +219,7 @@ class Renderer(renderer.Renderer):
# check network state for version
# if v2, then extract network_state.config
# else render_v2_from_state
- fpnplan = os.path.join(util.target_path(target), self.netplan_path)
+ fpnplan = os.path.join(subp.target_path(target), self.netplan_path)
util.ensure_dir(os.path.dirname(fpnplan))
header = self.netplan_header if self.netplan_header else ""
@@ -239,7 +240,7 @@ class Renderer(renderer.Renderer):
if not run:
LOG.debug("netplan generate postcmd disabled")
return
- util.subp(self.NETPLAN_GENERATE, capture=True)
+ subp.subp(self.NETPLAN_GENERATE, capture=True)
def _net_setup_link(self, run=False):
"""To ensure device link properties are applied, we poke
@@ -253,7 +254,7 @@ class Renderer(renderer.Renderer):
for cmd in [setup_lnk + [SYS_CLASS_NET + iface]
for iface in get_devicelist() if
os.path.islink(SYS_CLASS_NET + iface)]:
- util.subp(cmd, capture=True)
+ subp.subp(cmd, capture=True)
def _render_content(self, network_state):
@@ -406,7 +407,7 @@ def available(target=None):
expected = ['netplan']
search = ['/usr/sbin', '/sbin']
for p in expected:
- if not util.which(p, search=search, target=target):
+ if not subp.which(p, search=search, target=target):
return False
return True
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index 63d6e291..b2f7d31e 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -215,7 +215,7 @@ class NetworkState(object):
return (
route.get('prefix') == 0
and route.get('network') in default_nets
- )
+ )
class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
@@ -297,9 +297,10 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
command_type = command['type']
try:
handler = self.command_handlers[command_type]
- except KeyError:
- raise RuntimeError("No handler found for"
- " command '%s'" % command_type)
+ except KeyError as e:
+ raise RuntimeError(
+ "No handler found for command '%s'" % command_type
+ ) from e
try:
handler(self, command)
except InvalidCommand:
@@ -312,13 +313,14 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
def parse_config_v2(self, skip_broken=True):
for command_type, command in self._config.items():
- if command_type == 'version':
+ if command_type in ['version', 'renderer']:
continue
try:
handler = self.command_handlers[command_type]
- except KeyError:
- raise RuntimeError("No handler found for"
- " command '%s'" % command_type)
+ except KeyError as e:
+ raise RuntimeError(
+ "No handler found for command '%s'" % command_type
+ ) from e
try:
handler(self, command)
self._v2_common(command)
@@ -696,7 +698,7 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
def handle_wifis(self, command):
LOG.warning('Wifi configuration is only available to distros with'
- 'netplan rendering support.')
+ ' netplan rendering support.')
def _v2_common(self, cfg):
LOG.debug('v2_common: handling config:\n%s', cfg)
@@ -722,10 +724,10 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
item_params = dict((key, value) for (key, value) in
item_cfg.items() if key not in
NETWORK_V2_KEY_FILTER)
- # we accept the fixed spelling, but write the old for compatability
+ # we accept the fixed spelling, but write the old for compatibility
# Xenial does not have an updated netplan which supports the
# correct spelling. LP: #1756701
- params = item_params['parameters']
+ params = item_params.get('parameters', {})
grat_value = params.pop('gratuitous-arp', None)
if grat_value:
params['gratuitious-arp'] = grat_value
@@ -734,8 +736,7 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
'type': cmd_type,
'name': item_name,
cmd_type + '_interfaces': item_cfg.get('interfaces'),
- 'params': dict((v2key_to_v1[k], v) for k, v in
- item_params.get('parameters', {}).items())
+ 'params': dict((v2key_to_v1[k], v) for k, v in params.items())
}
if 'mtu' in item_cfg:
v1_cmd['mtu'] = item_cfg['mtu']
@@ -915,9 +916,10 @@ def _normalize_route(route):
if metric:
try:
normal_route['metric'] = int(metric)
- except ValueError:
+ except ValueError as e:
raise TypeError(
- 'Route config metric {} is not an integer'.format(metric))
+ 'Route config metric {} is not an integer'.format(metric)
+ ) from e
return normal_route
diff --git a/cloudinit/net/openbsd.py b/cloudinit/net/openbsd.py
new file mode 100644
index 00000000..166d77e6
--- /dev/null
+++ b/cloudinit/net/openbsd.py
@@ -0,0 +1,46 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import log as logging
+from cloudinit import subp
+from cloudinit import util
+import cloudinit.net.bsd
+
+LOG = logging.getLogger(__name__)
+
+
+class Renderer(cloudinit.net.bsd.BSDRenderer):
+
+ def write_config(self):
+ for device_name, v in self.interface_configurations.items():
+ if_file = 'etc/hostname.{}'.format(device_name)
+ fn = subp.target_path(self.target, if_file)
+ if device_name in self.dhcp_interfaces():
+ content = 'dhcp\n'
+ elif isinstance(v, dict):
+ try:
+ content = "inet {address} {netmask}\n".format(
+ address=v['address'],
+ netmask=v['netmask']
+ )
+ except KeyError:
+ LOG.error(
+ "Invalid static configuration for %s",
+ device_name)
+ util.write_file(fn, content)
+
+ def start_services(self, run=False):
+ if not self._postcmds:
+ LOG.debug("openbsd generate postcmd disabled")
+ return
+ subp.subp(['sh', '/etc/netstart'], capture=True)
+
+ def set_route(self, network, netmask, gateway):
+ if network == '0.0.0.0':
+ if_file = 'etc/mygate'
+ fn = subp.target_path(self.target, if_file)
+ content = gateway + '\n'
+ util.write_file(fn, content)
+
+
+def available(target=None):
+ return util.is_OpenBSD()
diff --git a/cloudinit/net/renderers.py b/cloudinit/net/renderers.py
index b98dbbe3..e2de4d55 100644
--- a/cloudinit/net/renderers.py
+++ b/cloudinit/net/renderers.py
@@ -2,18 +2,23 @@
from . import eni
from . import freebsd
+from . import netbsd
from . import netplan
from . import RendererNotFoundError
+from . import openbsd
from . import sysconfig
NAME_TO_RENDERER = {
"eni": eni,
"freebsd": freebsd,
+ "netbsd": netbsd,
"netplan": netplan,
+ "openbsd": openbsd,
"sysconfig": sysconfig,
}
-DEFAULT_PRIORITY = ["eni", "sysconfig", "netplan", "freebsd"]
+DEFAULT_PRIORITY = ["eni", "sysconfig", "netplan", "freebsd",
+ "netbsd", "openbsd"]
def search(priority=None, target=None, first=False):
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index 0a387377..0a5d481d 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -9,6 +9,7 @@ from configobj import ConfigObj
from cloudinit import log as logging
from cloudinit import util
+from cloudinit import subp
from cloudinit.distros.parsers import networkmanager_conf
from cloudinit.distros.parsers import resolv_conf
@@ -504,7 +505,7 @@ class Renderer(renderer.Renderer):
iface_cfg['IPADDR6_%d' % ipv6_index] = ipv6_cidr
else:
iface_cfg['IPV6ADDR_SECONDARIES'] += \
- " " + ipv6_cidr
+ " " + ipv6_cidr
else:
ipv4_index = ipv4_index + 1
suff = "" if ipv4_index == 0 else str(ipv4_index)
@@ -858,19 +859,19 @@ class Renderer(renderer.Renderer):
if not templates:
templates = self.templates
file_mode = 0o644
- base_sysconf_dir = util.target_path(target, self.sysconf_dir)
+ base_sysconf_dir = subp.target_path(target, self.sysconf_dir)
for path, data in self._render_sysconfig(base_sysconf_dir,
network_state, self.flavor,
templates=templates).items():
util.write_file(path, data, file_mode)
if self.dns_path:
- dns_path = util.target_path(target, self.dns_path)
+ dns_path = subp.target_path(target, self.dns_path)
resolv_content = self._render_dns(network_state,
existing_dns_path=dns_path)
if resolv_content:
util.write_file(dns_path, resolv_content, file_mode)
if self.networkmanager_conf_path:
- nm_conf_path = util.target_path(target,
+ nm_conf_path = subp.target_path(target,
self.networkmanager_conf_path)
nm_conf_content = self._render_networkmanager_conf(network_state,
templates)
@@ -878,12 +879,12 @@ class Renderer(renderer.Renderer):
util.write_file(nm_conf_path, nm_conf_content, file_mode)
if self.netrules_path:
netrules_content = self._render_persistent_net(network_state)
- netrules_path = util.target_path(target, self.netrules_path)
+ netrules_path = subp.target_path(target, self.netrules_path)
util.write_file(netrules_path, netrules_content, file_mode)
if available_nm(target=target):
- enable_ifcfg_rh(util.target_path(target, path=NM_CFG_FILE))
+ enable_ifcfg_rh(subp.target_path(target, path=NM_CFG_FILE))
- sysconfig_path = util.target_path(target, templates.get('control'))
+ sysconfig_path = subp.target_path(target, templates.get('control'))
# Distros configuring /etc/sysconfig/network as a file e.g. Centos
if sysconfig_path.endswith('network'):
util.ensure_dir(os.path.dirname(sysconfig_path))
@@ -906,20 +907,20 @@ def available_sysconfig(target=None):
expected = ['ifup', 'ifdown']
search = ['/sbin', '/usr/sbin']
for p in expected:
- if not util.which(p, search=search, target=target):
+ if not subp.which(p, search=search, target=target):
return False
expected_paths = [
'etc/sysconfig/network-scripts/network-functions',
'etc/sysconfig/config']
for p in expected_paths:
- if os.path.isfile(util.target_path(target, p)):
+ if os.path.isfile(subp.target_path(target, p)):
return True
return False
def available_nm(target=None):
- if not os.path.isfile(util.target_path(target, path=NM_CFG_FILE)):
+ if not os.path.isfile(subp.target_path(target, path=NM_CFG_FILE)):
return False
return True
diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py
index c3fa1e04..74cf4b94 100644
--- a/cloudinit/net/tests/test_dhcp.py
+++ b/cloudinit/net/tests/test_dhcp.py
@@ -62,7 +62,7 @@ class TestParseDHCPLeasesFile(CiTestCase):
{'interface': 'wlp3s0', 'fixed-address': '192.168.2.74',
'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}]
write_file(lease_file, content)
- self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file))
+ self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file))
class TestDHCPRFC3442(CiTestCase):
@@ -88,7 +88,7 @@ class TestDHCPRFC3442(CiTestCase):
'renew': '4 2017/07/27 18:02:30',
'expire': '5 2017/07/28 07:08:15'}]
write_file(lease_file, content)
- self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file))
+ self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file))
def test_parse_lease_finds_classless_static_routes(self):
"""
@@ -114,7 +114,7 @@ class TestDHCPRFC3442(CiTestCase):
'renew': '4 2017/07/27 18:02:30',
'expire': '5 2017/07/28 07:08:15'}]
write_file(lease_file, content)
- self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file))
+ self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file))
@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
@@ -211,7 +211,7 @@ class TestDHCPParseStaticRoutes(CiTestCase):
"class_b": "16,172,16,10",
"class_a": "8,10,10",
"gateway": "0,0",
- "netlen": "33,0",
+ "netlen": "33,0",
}
for rfc3442 in bad_rfc3442.values():
self.assertEqual([], parse_static_routes(rfc3442))
@@ -266,7 +266,7 @@ class TestDHCPDiscoveryClean(CiTestCase):
'Skip dhcp_discovery: nic idontexist not found in get_devicelist.',
self.logs.getvalue())
- @mock.patch('cloudinit.net.dhcp.util.which')
+ @mock.patch('cloudinit.net.dhcp.subp.which')
@mock.patch('cloudinit.net.dhcp.find_fallback_nic')
def test_absent_dhclient_command(self, m_fallback, m_which):
"""When dhclient doesn't exist in the OS, log the issue and no-op."""
@@ -279,7 +279,7 @@ class TestDHCPDiscoveryClean(CiTestCase):
@mock.patch('cloudinit.temp_utils.os.getuid')
@mock.patch('cloudinit.net.dhcp.dhcp_discovery')
- @mock.patch('cloudinit.net.dhcp.util.which')
+ @mock.patch('cloudinit.net.dhcp.subp.which')
@mock.patch('cloudinit.net.dhcp.find_fallback_nic')
def test_dhclient_run_with_tmpdir(self, m_fback, m_which, m_dhcp, m_uid):
"""maybe_perform_dhcp_discovery passes tmpdir to dhcp_discovery."""
@@ -302,13 +302,14 @@ class TestDHCPDiscoveryClean(CiTestCase):
@mock.patch('time.sleep', mock.MagicMock())
@mock.patch('cloudinit.net.dhcp.os.kill')
- @mock.patch('cloudinit.net.dhcp.util.subp')
+ @mock.patch('cloudinit.net.dhcp.subp.subp')
def test_dhcp_discovery_run_in_sandbox_warns_invalid_pid(self, m_subp,
m_kill):
"""dhcp_discovery logs a warning when pidfile contains invalid content.
Lease processing still occurs and no proc kill is attempted.
"""
+ m_subp.return_value = ('', '')
tmpdir = self.tmp_dir()
dhclient_script = os.path.join(tmpdir, 'dhclient.orig')
script_content = '#!/bin/bash\necho fake-dhclient'
@@ -324,7 +325,7 @@ class TestDHCPDiscoveryClean(CiTestCase):
""")
write_file(self.tmp_path('dhcp.leases', tmpdir), lease_content)
- self.assertItemsEqual(
+ self.assertCountEqual(
[{'interface': 'eth9', 'fixed-address': '192.168.2.74',
'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}],
dhcp_discovery(dhclient_script, 'eth9', tmpdir))
@@ -337,13 +338,14 @@ class TestDHCPDiscoveryClean(CiTestCase):
@mock.patch('cloudinit.net.dhcp.util.get_proc_ppid')
@mock.patch('cloudinit.net.dhcp.os.kill')
@mock.patch('cloudinit.net.dhcp.util.wait_for_files')
- @mock.patch('cloudinit.net.dhcp.util.subp')
+ @mock.patch('cloudinit.net.dhcp.subp.subp')
def test_dhcp_discovery_run_in_sandbox_waits_on_lease_and_pid(self,
m_subp,
m_wait,
m_kill,
m_getppid):
"""dhcp_discovery waits for the presence of pidfile and dhcp.leases."""
+ m_subp.return_value = ('', '')
tmpdir = self.tmp_dir()
dhclient_script = os.path.join(tmpdir, 'dhclient.orig')
script_content = '#!/bin/bash\necho fake-dhclient'
@@ -364,12 +366,13 @@ class TestDHCPDiscoveryClean(CiTestCase):
@mock.patch('cloudinit.net.dhcp.util.get_proc_ppid')
@mock.patch('cloudinit.net.dhcp.os.kill')
- @mock.patch('cloudinit.net.dhcp.util.subp')
+ @mock.patch('cloudinit.net.dhcp.subp.subp')
def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill, m_getppid):
"""dhcp_discovery brings up the interface and runs dhclient.
It also returns the parsed dhcp.leases file generated in the sandbox.
"""
+ m_subp.return_value = ('', '')
tmpdir = self.tmp_dir()
dhclient_script = os.path.join(tmpdir, 'dhclient.orig')
script_content = '#!/bin/bash\necho fake-dhclient'
@@ -389,7 +392,7 @@ class TestDHCPDiscoveryClean(CiTestCase):
write_file(pid_file, "%d\n" % my_pid)
m_getppid.return_value = 1 # Indicate that dhclient has daemonized
- self.assertItemsEqual(
+ self.assertCountEqual(
[{'interface': 'eth9', 'fixed-address': '192.168.2.74',
'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}],
dhcp_discovery(dhclient_script, 'eth9', tmpdir))
@@ -406,6 +409,87 @@ class TestDHCPDiscoveryClean(CiTestCase):
'eth9', '-sf', '/bin/true'], capture=True)])
m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)])
+ @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid')
+ @mock.patch('cloudinit.net.dhcp.os.kill')
+ @mock.patch('cloudinit.net.dhcp.subp.subp')
+ def test_dhcp_discovery_outside_sandbox(self, m_subp, m_kill, m_getppid):
+ """dhcp_discovery brings up the interface and runs dhclient.
+
+ It also returns the parsed dhcp.leases file generated in the sandbox.
+ """
+ m_subp.return_value = ('', '')
+ tmpdir = self.tmp_dir()
+ dhclient_script = os.path.join(tmpdir, 'dhclient.orig')
+ script_content = '#!/bin/bash\necho fake-dhclient'
+ write_file(dhclient_script, script_content, mode=0o755)
+ lease_content = dedent("""
+ lease {
+ interface "eth9";
+ fixed-address 192.168.2.74;
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ }
+ """)
+ lease_file = os.path.join(tmpdir, 'dhcp.leases')
+ write_file(lease_file, lease_content)
+ pid_file = os.path.join(tmpdir, 'dhclient.pid')
+ my_pid = 1
+ write_file(pid_file, "%d\n" % my_pid)
+ m_getppid.return_value = 1 # Indicate that dhclient has daemonized
+
+ with mock.patch('os.access', return_value=False):
+ self.assertCountEqual(
+ [{'interface': 'eth9', 'fixed-address': '192.168.2.74',
+ 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}],
+ dhcp_discovery(dhclient_script, 'eth9', tmpdir))
+ # dhclient script got copied
+ with open(os.path.join(tmpdir, 'dhclient.orig')) as stream:
+ self.assertEqual(script_content, stream.read())
+ # Interface was brought up before dhclient called from sandbox
+ m_subp.assert_has_calls([
+ mock.call(
+ ['ip', 'link', 'set', 'dev', 'eth9', 'up'], capture=True),
+ mock.call(
+ [os.path.join(tmpdir, 'dhclient.orig'), '-1', '-v', '-lf',
+ lease_file, '-pf', os.path.join(tmpdir, 'dhclient.pid'),
+ 'eth9', '-sf', '/bin/true'], capture=True)])
+ m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)])
+
+ @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid')
+ @mock.patch('cloudinit.net.dhcp.os.kill')
+ @mock.patch('cloudinit.net.dhcp.subp.subp')
+ def test_dhcp_output_error_stream(self, m_subp, m_kill, m_getppid):
+ """"dhcp_log_func is called with the output and error streams of
+ dhclinet when the callable is passed."""
+ dhclient_err = 'FAKE DHCLIENT ERROR'
+ dhclient_out = 'FAKE DHCLIENT OUT'
+ m_subp.return_value = (dhclient_out, dhclient_err)
+ tmpdir = self.tmp_dir()
+ dhclient_script = os.path.join(tmpdir, 'dhclient.orig')
+ script_content = '#!/bin/bash\necho fake-dhclient'
+ write_file(dhclient_script, script_content, mode=0o755)
+ lease_content = dedent("""
+ lease {
+ interface "eth9";
+ fixed-address 192.168.2.74;
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ }
+ """)
+ lease_file = os.path.join(tmpdir, 'dhcp.leases')
+ write_file(lease_file, lease_content)
+ pid_file = os.path.join(tmpdir, 'dhclient.pid')
+ my_pid = 1
+ write_file(pid_file, "%d\n" % my_pid)
+ m_getppid.return_value = 1 # Indicate that dhclient has daemonized
+
+ def dhcp_log_func(out, err):
+ self.assertEqual(out, dhclient_out)
+ self.assertEqual(err, dhclient_err)
+
+ dhcp_discovery(
+ dhclient_script, 'eth9', tmpdir, dhcp_log_func=dhcp_log_func)
+
class TestSystemdParseLeases(CiTestCase):
@@ -529,7 +613,7 @@ class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase):
# Ensure that no teardown happens:
m_dhcp.assert_not_called()
- @mock.patch('cloudinit.net.dhcp.util.subp')
+ @mock.patch('cloudinit.net.dhcp.subp.subp')
@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
def test_ephemeral_dhcp_setup_network_if_url_connectivity(
self, m_dhcp, m_subp):
diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py
index 5081a337..311ab6f8 100644
--- a/cloudinit/net/tests/test_init.py
+++ b/cloudinit/net/tests/test_init.py
@@ -2,16 +2,20 @@
import copy
import errno
-import httpretty
+import ipaddress
import os
-import requests
import textwrap
from unittest import mock
+import httpretty
+import pytest
+import requests
+
import cloudinit.net as net
-from cloudinit.util import ensure_file, write_file, ProcessExecutionError
-from cloudinit.tests.helpers import CiTestCase, HttprettyTestCase
from cloudinit import safeyaml as yaml
+from cloudinit.tests.helpers import CiTestCase, HttprettyTestCase
+from cloudinit.subp import ProcessExecutionError
+from cloudinit.util import ensure_file, write_file
class TestSysDevPath(CiTestCase):
@@ -139,12 +143,6 @@ class TestReadSysNet(CiTestCase):
write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state)
self.assertFalse(net.is_up('eth0'))
- def test_is_wireless(self):
- """is_wireless is True when /sys/net/devname/wireless exists."""
- self.assertFalse(net.is_wireless('eth0'))
- ensure_file(os.path.join(self.sysdir, 'eth0', 'wireless'))
- self.assertTrue(net.is_wireless('eth0'))
-
def test_is_bridge(self):
"""is_bridge is True when /sys/net/devname/bridge exists."""
self.assertFalse(net.is_bridge('eth0'))
@@ -200,32 +198,6 @@ class TestReadSysNet(CiTestCase):
write_file(os.path.join(self.sysdir, 'eth0', 'uevent'), content)
self.assertTrue(net.is_vlan('eth0'))
- def test_is_connected_when_physically_connected(self):
- """is_connected is True when /sys/net/devname/iflink reports 2."""
- self.assertFalse(net.is_connected('eth0'))
- write_file(os.path.join(self.sysdir, 'eth0', 'iflink'), "2")
- self.assertTrue(net.is_connected('eth0'))
-
- def test_is_connected_when_wireless_and_carrier_active(self):
- """is_connected is True if wireless /sys/net/devname/carrier is 1."""
- self.assertFalse(net.is_connected('eth0'))
- ensure_file(os.path.join(self.sysdir, 'eth0', 'wireless'))
- self.assertFalse(net.is_connected('eth0'))
- write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), "1")
- self.assertTrue(net.is_connected('eth0'))
-
- def test_is_physical(self):
- """is_physical is True when /sys/net/devname/device exists."""
- self.assertFalse(net.is_physical('eth0'))
- ensure_file(os.path.join(self.sysdir, 'eth0', 'device'))
- self.assertTrue(net.is_physical('eth0'))
-
- def test_is_present(self):
- """is_present is True when /sys/net/devname exists."""
- self.assertFalse(net.is_present('eth0'))
- ensure_file(os.path.join(self.sysdir, 'eth0', 'device'))
- self.assertTrue(net.is_present('eth0'))
-
class TestGenerateFallbackConfig(CiTestCase):
@@ -341,8 +313,6 @@ class TestGenerateFallbackConfig(CiTestCase):
class TestNetFindFallBackNic(CiTestCase):
- with_logs = True
-
def setUp(self):
super(TestNetFindFallBackNic, self).setUp()
sys_mock = mock.patch('cloudinit.net.get_sys_class_path')
@@ -396,7 +366,7 @@ class TestGetDeviceList(CiTestCase):
"""get_devicelist returns a directory listing for SYS_CLASS_NET."""
write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), 'up')
write_file(os.path.join(self.sysdir, 'eth1', 'operstate'), 'up')
- self.assertItemsEqual(['eth0', 'eth1'], net.get_devicelist())
+ self.assertCountEqual(['eth0', 'eth1'], net.get_devicelist())
class TestGetInterfaceMAC(CiTestCase):
@@ -540,7 +510,7 @@ class TestInterfaceHasOwnMAC(CiTestCase):
net.interface_has_own_mac('eth1', strict=True)
-@mock.patch('cloudinit.net.util.subp')
+@mock.patch('cloudinit.net.subp.subp')
class TestEphemeralIPV4Network(CiTestCase):
with_logs = True
@@ -993,86 +963,8 @@ class TestExtractPhysdevs(CiTestCase):
net.extract_physdevs({'version': 3, 'awesome_config': []})
-class TestWaitForPhysdevs(CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestWaitForPhysdevs, self).setUp()
- self.add_patch('cloudinit.net.get_interfaces_by_mac',
- 'm_get_iface_mac')
- self.add_patch('cloudinit.util.udevadm_settle', 'm_udev_settle')
-
- def test_wait_for_physdevs_skips_settle_if_all_present(self):
- physdevs = [
- ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'],
- ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'],
- ]
- netcfg = {
- 'version': 2,
- 'ethernets': {args[1]: _mk_v2_phys(*args)
- for args in physdevs},
- }
- self.m_get_iface_mac.side_effect = iter([
- {'aa:bb:cc:dd:ee:ff': 'eth0',
- '00:11:22:33:44:55': 'ens3'},
- ])
- net.wait_for_physdevs(netcfg)
- self.assertEqual(0, self.m_udev_settle.call_count)
-
- def test_wait_for_physdevs_calls_udev_settle_on_missing(self):
- physdevs = [
- ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'],
- ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'],
- ]
- netcfg = {
- 'version': 2,
- 'ethernets': {args[1]: _mk_v2_phys(*args)
- for args in physdevs},
- }
- self.m_get_iface_mac.side_effect = iter([
- {'aa:bb:cc:dd:ee:ff': 'eth0'}, # first call ens3 is missing
- {'aa:bb:cc:dd:ee:ff': 'eth0',
- '00:11:22:33:44:55': 'ens3'}, # second call has both
- ])
- net.wait_for_physdevs(netcfg)
- self.m_udev_settle.assert_called_with(exists=net.sys_dev_path('ens3'))
-
- def test_wait_for_physdevs_raise_runtime_error_if_missing_and_strict(self):
- physdevs = [
- ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'],
- ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'],
- ]
- netcfg = {
- 'version': 2,
- 'ethernets': {args[1]: _mk_v2_phys(*args)
- for args in physdevs},
- }
- self.m_get_iface_mac.return_value = {}
- with self.assertRaises(RuntimeError):
- net.wait_for_physdevs(netcfg)
-
- self.assertEqual(5 * len(physdevs), self.m_udev_settle.call_count)
-
- def test_wait_for_physdevs_no_raise_if_not_strict(self):
- physdevs = [
- ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'],
- ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'],
- ]
- netcfg = {
- 'version': 2,
- 'ethernets': {args[1]: _mk_v2_phys(*args)
- for args in physdevs},
- }
- self.m_get_iface_mac.return_value = {}
- net.wait_for_physdevs(netcfg, strict=False)
- self.assertEqual(5 * len(physdevs), self.m_udev_settle.call_count)
-
-
class TestNetFailOver(CiTestCase):
- with_logs = True
-
def setUp(self):
super(TestNetFailOver, self).setUp()
self.add_patch('cloudinit.net.util', 'm_util')
@@ -1297,4 +1189,48 @@ class TestNetFailOver(CiTestCase):
m_standby.return_value = False
self.assertFalse(net.is_netfailover(devname, driver))
+
+class TestIsIpAddress:
+ """Tests for net.is_ip_address.
+
+ Instead of testing with values we rely on the ipaddress stdlib module to
+ handle all values correctly, so simply test that is_ip_address defers to
+ the ipaddress module correctly.
+ """
+
+ @pytest.mark.parametrize('ip_address_side_effect,expected_return', (
+ (ValueError, False),
+ (lambda _: ipaddress.IPv4Address('192.168.0.1'), True),
+ (lambda _: ipaddress.IPv6Address('2001:db8::'), True),
+ ))
+ def test_is_ip_address(self, ip_address_side_effect, expected_return):
+ with mock.patch('cloudinit.net.ipaddress.ip_address',
+ side_effect=ip_address_side_effect) as m_ip_address:
+ ret = net.is_ip_address(mock.sentinel.ip_address_in)
+ assert expected_return == ret
+ expected_call = mock.call(mock.sentinel.ip_address_in)
+ assert [expected_call] == m_ip_address.call_args_list
+
+
+class TestIsIpv4Address:
+ """Tests for net.is_ipv4_address.
+
+ Instead of testing with values we rely on the ipaddress stdlib module to
+ handle all values correctly, so simply test that is_ipv4_address defers to
+ the ipaddress module correctly.
+ """
+
+ @pytest.mark.parametrize('ipv4address_mock,expected_return', (
+ (mock.Mock(side_effect=ValueError), False),
+ (mock.Mock(return_value=ipaddress.IPv4Address('192.168.0.1')), True),
+ ))
+ def test_is_ip_address(self, ipv4address_mock, expected_return):
+ with mock.patch('cloudinit.net.ipaddress.IPv4Address',
+ ipv4address_mock) as m_ipv4address:
+ ret = net.is_ipv4_address(mock.sentinel.ip_address_in)
+ assert expected_return == ret
+ expected_call = mock.call(mock.sentinel.ip_address_in)
+ assert [expected_call] == m_ipv4address.call_args_list
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/net/tests/test_network_state.py b/cloudinit/net/tests/test_network_state.py
index 55880852..07d726e2 100644
--- a/cloudinit/net/tests/test_network_state.py
+++ b/cloudinit/net/tests/test_network_state.py
@@ -45,4 +45,14 @@ class TestNetworkStateParseConfig(CiTestCase):
self.assertNotEqual(None, result)
+class TestNetworkStateParseConfigV2(CiTestCase):
+
+ def test_version_2_ignores_renderer_key(self):
+ ncfg = {'version': 2, 'renderer': 'networkd', 'ethernets': {}}
+ nsi = network_state.NetworkStateInterpreter(version=ncfg['version'],
+ config=ncfg)
+ nsi.parse_config(skip_broken=False)
+ self.assertEqual(ncfg, nsi.as_dict()['config'])
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
index 6ba21f4d..628e2908 100644
--- a/cloudinit/netinfo.py
+++ b/cloudinit/netinfo.py
@@ -13,6 +13,7 @@ import re
from cloudinit import log as logging
from cloudinit.net.network_state import net_prefix_to_ipv4_mask
+from cloudinit import subp
from cloudinit import util
from cloudinit.simpletable import SimpleTable
@@ -91,6 +92,53 @@ def _netdev_info_iproute(ipaddr_out):
return devs
+def _netdev_info_ifconfig_netbsd(ifconfig_data):
+ # fields that need to be returned in devs for each dev
+ devs = {}
+ for line in ifconfig_data.splitlines():
+ if len(line) == 0:
+ continue
+ if line[0] not in ("\t", " "):
+ curdev = line.split()[0]
+ # current ifconfig pops a ':' on the end of the device
+ if curdev.endswith(':'):
+ curdev = curdev[:-1]
+ if curdev not in devs:
+ devs[curdev] = deepcopy(DEFAULT_NETDEV_INFO)
+ toks = line.lower().strip().split()
+ if len(toks) > 1:
+ if re.search(r"flags=[x\d]+<up.*>", toks[1]):
+ devs[curdev]['up'] = True
+
+ for i in range(len(toks)):
+ if toks[i] == "inet": # Create new ipv4 addr entry
+ network, net_bits = toks[i + 1].split('/')
+ devs[curdev]['ipv4'].append(
+ {'ip': network, 'mask': net_prefix_to_ipv4_mask(net_bits)})
+ elif toks[i] == "broadcast":
+ devs[curdev]['ipv4'][-1]['bcast'] = toks[i + 1]
+ elif toks[i] == "address:":
+ devs[curdev]['hwaddr'] = toks[i + 1]
+ elif toks[i] == "inet6":
+ if toks[i + 1] == "addr:":
+ devs[curdev]['ipv6'].append({'ip': toks[i + 2]})
+ else:
+ devs[curdev]['ipv6'].append({'ip': toks[i + 1]})
+ elif toks[i] == "prefixlen": # Add prefix to current ipv6 value
+ addr6 = devs[curdev]['ipv6'][-1]['ip'] + "/" + toks[i + 1]
+ devs[curdev]['ipv6'][-1]['ip'] = addr6
+ elif toks[i].startswith("scope:"):
+ devs[curdev]['ipv6'][-1]['scope6'] = toks[i].lstrip("scope:")
+ elif toks[i] == "scopeid":
+ res = re.match(r'.*<(\S+)>', toks[i + 1])
+ if res:
+ devs[curdev]['ipv6'][-1]['scope6'] = res.group(1)
+ else:
+ devs[curdev]['ipv6'][-1]['scope6'] = toks[i + 1]
+
+ return devs
+
+
def _netdev_info_ifconfig(ifconfig_data):
# fields that need to be returned in devs for each dev
devs = {}
@@ -149,13 +197,16 @@ def _netdev_info_ifconfig(ifconfig_data):
def netdev_info(empty=""):
devs = {}
- if util.which('ip'):
+ if util.is_NetBSD():
+ (ifcfg_out, _err) = subp.subp(["ifconfig", "-a"], rcs=[0, 1])
+ devs = _netdev_info_ifconfig_netbsd(ifcfg_out)
+ elif subp.which('ip'):
# Try iproute first of all
- (ipaddr_out, _err) = util.subp(["ip", "addr", "show"])
+ (ipaddr_out, _err) = subp.subp(["ip", "addr", "show"])
devs = _netdev_info_iproute(ipaddr_out)
- elif util.which('ifconfig'):
+ elif subp.which('ifconfig'):
# Fall back to net-tools if iproute2 is not present
- (ifcfg_out, _err) = util.subp(["ifconfig", "-a"], rcs=[0, 1])
+ (ifcfg_out, _err) = subp.subp(["ifconfig", "-a"], rcs=[0, 1])
devs = _netdev_info_ifconfig(ifcfg_out)
else:
LOG.warning(
@@ -235,10 +286,10 @@ def _netdev_route_info_iproute(iproute_data):
entry['flags'] = ''.join(flags)
routes['ipv4'].append(entry)
try:
- (iproute_data6, _err6) = util.subp(
+ (iproute_data6, _err6) = subp.subp(
["ip", "--oneline", "-6", "route", "list", "table", "all"],
rcs=[0, 1])
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
pass
else:
entries6 = iproute_data6.splitlines()
@@ -307,9 +358,9 @@ def _netdev_route_info_netstat(route_data):
routes['ipv4'].append(entry)
try:
- (route_data6, _err6) = util.subp(
+ (route_data6, _err6) = subp.subp(
["netstat", "-A", "inet6", "--route", "--numeric"], rcs=[0, 1])
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
pass
else:
entries6 = route_data6.splitlines()
@@ -343,13 +394,13 @@ def _netdev_route_info_netstat(route_data):
def route_info():
routes = {}
- if util.which('ip'):
+ if subp.which('ip'):
# Try iproute first of all
- (iproute_out, _err) = util.subp(["ip", "-o", "route", "list"])
+ (iproute_out, _err) = subp.subp(["ip", "-o", "route", "list"])
routes = _netdev_route_info_iproute(iproute_out)
- elif util.which('netstat'):
+ elif subp.which('netstat'):
# Fall back to net-tools if iproute2 is not present
- (route_out, _err) = util.subp(
+ (route_out, _err) = subp.subp(
["netstat", "--route", "--numeric", "--extend"], rcs=[0, 1])
routes = _netdev_route_info_netstat(route_out)
else:
diff --git a/cloudinit/reporting/events.py b/cloudinit/reporting/events.py
index e5dfab33..b8677c8b 100644
--- a/cloudinit/reporting/events.py
+++ b/cloudinit/reporting/events.py
@@ -12,7 +12,7 @@ import base64
import os.path
import time
-from . import instantiated_handler_registry
+from . import instantiated_handler_registry, available_handlers
FINISH_EVENT_TYPE = 'finish'
START_EVENT_TYPE = 'start'
@@ -81,17 +81,32 @@ class FinishReportingEvent(ReportingEvent):
return data
-def report_event(event):
- """Report an event to all registered event handlers.
+def report_event(event, excluded_handler_types=None):
+ """Report an event to all registered event handlers
+ except those whose type is in excluded_handler_types.
This should generally be called via one of the other functions in
the reporting module.
+ :param excluded_handler_types:
+ List of handlers types to exclude from reporting the event to.
:param event_type:
The type of the event; this should be a constant from the
reporting module.
"""
- for _, handler in instantiated_handler_registry.registered_items.items():
+
+ if not excluded_handler_types:
+ excluded_handler_types = {}
+ excluded_handler_classes = {
+ hndl_cls
+ for hndl_type, hndl_cls in available_handlers.registered_items.items()
+ if hndl_type in excluded_handler_types
+ }
+
+ handlers = instantiated_handler_registry.registered_items.items()
+ for _, handler in handlers:
+ if type(handler) in excluded_handler_classes:
+ continue # skip this excluded handler
handler.publish_event(event)
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index 946df7e0..0a8c7af3 100755
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -35,7 +35,6 @@ class ReportingHandler(metaclass=abc.ABCMeta):
def flush(self):
"""Ensure ReportingHandler has published all events"""
- pass
class LogHandler(ReportingHandler):
@@ -114,6 +113,8 @@ class HyperVKvpReportingHandler(ReportingHandler):
https://technet.microsoft.com/en-us/library/dn798287.aspx#Linux%20guests
"""
HV_KVP_EXCHANGE_MAX_VALUE_SIZE = 2048
+ # The maximum value size expected in Azure
+ HV_KVP_AZURE_MAX_VALUE_SIZE = 1024
HV_KVP_EXCHANGE_MAX_KEY_SIZE = 512
HV_KVP_RECORD_SIZE = (HV_KVP_EXCHANGE_MAX_KEY_SIZE +
HV_KVP_EXCHANGE_MAX_VALUE_SIZE)
@@ -139,7 +140,8 @@ class HyperVKvpReportingHandler(ReportingHandler):
self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX,
self.incarnation_no)
self.publish_thread = threading.Thread(
- target=self._publish_event_routine)
+ target=self._publish_event_routine
+ )
self.publish_thread.daemon = True
self.publish_thread.start()
@@ -195,17 +197,23 @@ class HyperVKvpReportingHandler(ReportingHandler):
def _event_key(self, event):
"""
the event key format is:
- CLOUD_INIT|<incarnation number>|<event_type>|<event_name>|<time>
+ CLOUD_INIT|<incarnation number>|<event_type>|<event_name>|<uuid>
+ [|subevent_index]
"""
return u"{0}|{1}|{2}|{3}".format(self.event_key_prefix,
event.event_type, event.name,
uuid.uuid4())
def _encode_kvp_item(self, key, value):
- data = (struct.pack("%ds%ds" % (
+ data = struct.pack(
+ "%ds%ds"
+ % (
self.HV_KVP_EXCHANGE_MAX_KEY_SIZE,
- self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE),
- key.encode('utf-8'), value.encode('utf-8')))
+ self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE,
+ ),
+ key.encode("utf-8"),
+ value.encode("utf-8"),
+ )
return data
def _decode_kvp_item(self, record_data):
@@ -219,7 +227,7 @@ class HyperVKvpReportingHandler(ReportingHandler):
v = (
record_data[
self.HV_KVP_EXCHANGE_MAX_KEY_SIZE:self.HV_KVP_RECORD_SIZE
- ].decode('utf-8').strip('\x00'))
+ ].decode('utf-8').strip('\x00'))
return {'key': k, 'value': v}
@@ -244,13 +252,14 @@ class HyperVKvpReportingHandler(ReportingHandler):
data_without_desc = json.dumps(meta_data,
separators=self.JSON_SEPARATORS)
room_for_desc = (
- self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE -
+ self.HV_KVP_AZURE_MAX_VALUE_SIZE -
len(data_without_desc) - 8)
value = data_without_desc.replace(
message_place_holder,
'"{key}":"{desc}"'.format(
key=self.MSG_KEY, desc=des_in_json[:room_for_desc]))
- result_array.append(self._encode_kvp_item(key, value))
+ subkey = "{}|{}".format(key, i)
+ result_array.append(self._encode_kvp_item(subkey, value))
i += 1
des_in_json = des_in_json[room_for_desc:]
if len(des_in_json) == 0:
@@ -265,11 +274,11 @@ class HyperVKvpReportingHandler(ReportingHandler):
"""
key = self._event_key(event)
meta_data = {
- "name": event.name,
- "type": event.event_type,
- "ts": (datetime.utcfromtimestamp(event.timestamp)
- .isoformat() + 'Z'),
- }
+ "name": event.name,
+ "type": event.event_type,
+ "ts": (datetime.utcfromtimestamp(event.timestamp)
+ .isoformat() + 'Z'),
+ }
if hasattr(event, self.RESULT_KEY):
meta_data[self.RESULT_KEY] = event.result
meta_data[self.MSG_KEY] = event.description
@@ -277,7 +286,7 @@ class HyperVKvpReportingHandler(ReportingHandler):
# if it reaches the maximum length of kvp value,
# break it down to slices.
# this should be very corner case.
- if len(value) > self.HV_KVP_EXCHANGE_MAX_VALUE_SIZE:
+ if len(value) > self.HV_KVP_AZURE_MAX_VALUE_SIZE:
return self._break_down(key, meta_data, event.description)
else:
data = self._encode_kvp_item(key, value)
diff --git a/cloudinit/serial.py b/cloudinit/serial.py
index f9ef7acc..67486e09 100644
--- a/cloudinit/serial.py
+++ b/cloudinit/serial.py
@@ -1,7 +1,5 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from __future__ import absolute_import
-
try:
from serial import Serial
except ImportError:
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index 5270fda8..ac3ecc3d 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -18,9 +18,9 @@ import os.path
from cloudinit import log as logging
from cloudinit import sources
+from cloudinit import subp
from cloudinit import util
-from cloudinit.util import ProcessExecutionError
LOG = logging.getLogger(__name__)
@@ -192,7 +192,7 @@ class DataSourceAltCloud(sources.DataSource):
# modprobe floppy
try:
modprobe_floppy()
- except ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
util.logexc(LOG, 'Failed modprobe: %s', e)
return False
@@ -201,7 +201,7 @@ class DataSourceAltCloud(sources.DataSource):
# udevadm settle for floppy device
try:
util.udevadm_settle(exists=floppy_dev, timeout=5)
- except (ProcessExecutionError, OSError) as e:
+ except (subp.ProcessExecutionError, OSError) as e:
util.logexc(LOG, 'Failed udevadm_settle: %s\n', e)
return False
@@ -261,7 +261,7 @@ class DataSourceAltCloud(sources.DataSource):
def modprobe_floppy():
- out, _err = util.subp(CMD_PROBE_FLOPPY)
+ out, _err = subp.subp(CMD_PROBE_FLOPPY)
LOG.debug('Command: %s\nOutput%s', ' '.join(CMD_PROBE_FLOPPY), out)
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 61ec522a..f3c6452b 100755
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -8,7 +8,6 @@ import base64
import contextlib
import crypt
from functools import partial
-import json
import os
import os.path
import re
@@ -19,9 +18,11 @@ import xml.etree.ElementTree as ET
from cloudinit import log as logging
from cloudinit import net
from cloudinit.event import EventType
+from cloudinit.net import device_driver
from cloudinit.net.dhcp import EphemeralDHCPv4
from cloudinit import sources
from cloudinit.sources.helpers import netlink
+from cloudinit import subp
from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc
from cloudinit import util
from cloudinit.reporting import events
@@ -34,7 +35,9 @@ from cloudinit.sources.helpers.azure import (
get_system_info,
report_diagnostic_event,
EphemeralDHCPv4WithReporting,
- is_byte_swapped)
+ is_byte_swapped,
+ dhcp_log_cb,
+ push_log_to_kvp)
LOG = logging.getLogger(__name__)
@@ -139,8 +142,8 @@ def find_dev_from_busdev(camcontrol_out, busdev):
def execute_or_debug(cmd, fail_ret=None):
try:
- return util.subp(cmd)[0]
- except util.ProcessExecutionError:
+ return subp.subp(cmd)[0]
+ except subp.ProcessExecutionError:
LOG.debug("Failed to execute: %s", ' '.join(cmd))
return fail_ret
@@ -164,12 +167,11 @@ def get_resource_disk_on_freebsd(port_id):
port_id = port_id - 2
g1 = "000" + str(port_id)
g0g1 = "{0}-{1}".format(g0, g1)
- """
- search 'X' from
- 'dev.storvsc.X.%pnpinfo:
- classid=32412632-86cb-44a2-9b5c-50d1417354f5
- deviceid=00000000-0001-8899-0000-000000000000'
- """
+
+ # search 'X' from
+ # 'dev.storvsc.X.%pnpinfo:
+ # classid=32412632-86cb-44a2-9b5c-50d1417354f5
+ # deviceid=00000000-0001-8899-0000-000000000000'
sysctl_out = get_dev_storvsc_sysctl()
storvscid = find_storvscid_from_sysctl_pnpinfo(sysctl_out, g0g1)
@@ -252,11 +254,11 @@ DEF_PASSWD_REDACTION = 'REDACTED'
def get_hostname(hostname_command='hostname'):
if not isinstance(hostname_command, (list, tuple)):
hostname_command = (hostname_command,)
- return util.subp(hostname_command, capture=True)[0].strip()
+ return subp.subp(hostname_command, capture=True)[0].strip()
def set_hostname(hostname, hostname_command='hostname'):
- util.subp([hostname_command, hostname])
+ subp.subp([hostname_command, hostname])
@azure_ds_telemetry_reporter
@@ -275,7 +277,14 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
(previous_hostname == temp_hostname and policy != 'force')):
yield None
return
- set_hostname(temp_hostname, hostname_command)
+ try:
+ set_hostname(temp_hostname, hostname_command)
+ except Exception as e:
+ msg = 'Failed setting temporary hostname: %s' % e
+ report_diagnostic_event(msg)
+ LOG.warning(msg)
+ yield None
+ return
try:
yield previous_hostname
finally:
@@ -343,7 +352,7 @@ class DataSourceAzure(sources.DataSource):
try:
invoke_agent(agent_cmd)
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
# claim the datasource even if the command failed
util.logexc(LOG, "agent command '%s' failed.",
self.ds_cfg['agent_command'])
@@ -522,8 +531,9 @@ class DataSourceAzure(sources.DataSource):
try:
crawled_data = util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=self.crawl_metadata)
+ logfunc=LOG.debug, msg='Crawl of metadata service',
+ func=self.crawl_metadata
+ )
except sources.InvalidMetaDataException as e:
LOG.warning('Could not crawl Azure metadata: %s', e)
return False
@@ -596,25 +606,35 @@ class DataSourceAzure(sources.DataSource):
return_val = None
def exc_cb(msg, exception):
- if isinstance(exception, UrlError) and exception.code == 404:
- if self.imds_poll_counter == self.imds_logging_threshold:
- # Reducing the logging frequency as we are polling IMDS
- self.imds_logging_threshold *= 2
- LOG.debug("Call to IMDS with arguments %s failed "
- "with status code %s after %s retries",
- msg, exception.code, self.imds_poll_counter)
- LOG.debug("Backing off logging threshold for the same "
- "exception to %d", self.imds_logging_threshold)
- self.imds_poll_counter += 1
- return True
-
- # If we get an exception while trying to call IMDS, we
- # call DHCP and setup the ephemeral network to acquire the new IP.
- LOG.debug("Call to IMDS with arguments %s failed with "
- "status code %s", msg, exception.code)
- report_diagnostic_event("polling IMDS failed with exception %s"
- % exception.code)
- return False
+ if isinstance(exception, UrlError):
+ if exception.code in (404, 410):
+ if self.imds_poll_counter == self.imds_logging_threshold:
+ # Reducing the logging frequency as we are polling IMDS
+ self.imds_logging_threshold *= 2
+ LOG.debug("Call to IMDS with arguments %s failed "
+ "with status code %s after %s retries",
+ msg, exception.code, self.imds_poll_counter)
+ LOG.debug("Backing off logging threshold for the same "
+ "exception to %d",
+ self.imds_logging_threshold)
+ report_diagnostic_event("poll IMDS with %s failed. "
+ "Exception: %s and code: %s" %
+ (msg, exception.cause,
+ exception.code))
+ self.imds_poll_counter += 1
+ return True
+ else:
+ # If we get an exception while trying to call IMDS, we call
+ # DHCP and setup the ephemeral network to acquire a new IP.
+ report_diagnostic_event("poll IMDS with %s failed. "
+ "Exception: %s and code: %s" %
+ (msg, exception.cause,
+ exception.code))
+ return False
+
+ LOG.debug("poll IMDS failed with an unexpected exception: %s",
+ exception)
+ return False
LOG.debug("Wait for vnetswitch to happen")
while True:
@@ -624,7 +644,8 @@ class DataSourceAzure(sources.DataSource):
name="obtain-dhcp-lease",
description="obtain dhcp lease",
parent=azure_ds_reporter):
- self._ephemeral_dhcp_ctx = EphemeralDHCPv4()
+ self._ephemeral_dhcp_ctx = EphemeralDHCPv4(
+ dhcp_log_func=dhcp_log_cb)
lease = self._ephemeral_dhcp_ctx.obtain_lease()
if vnet_switched:
@@ -675,7 +696,6 @@ class DataSourceAzure(sources.DataSource):
except UrlError:
# Teardown our EphemeralDHCPv4 context on failure as we retry
self._ephemeral_dhcp_ctx.clean_network()
- pass
finally:
if nl_sock:
nl_sock.close()
@@ -771,9 +791,12 @@ class DataSourceAzure(sources.DataSource):
@azure_ds_telemetry_reporter
def activate(self, cfg, is_new_instance):
- address_ephemeral_resize(is_new_instance=is_new_instance,
- preserve_ntfs=self.ds_cfg.get(
- DS_CFG_KEY_PRESERVE_NTFS, False))
+ try:
+ address_ephemeral_resize(is_new_instance=is_new_instance,
+ preserve_ntfs=self.ds_cfg.get(
+ DS_CFG_KEY_PRESERVE_NTFS, False))
+ finally:
+ push_log_to_kvp(self.sys_cfg['def_log_file'])
return
@property
@@ -882,9 +905,10 @@ def can_dev_be_reformatted(devpath, preserve_ntfs):
(cand_part, cand_path, devpath))
with events.ReportEventStack(
- name="mount-ntfs-and-count",
- description="mount-ntfs-and-count",
- parent=azure_ds_reporter) as evt:
+ name="mount-ntfs-and-count",
+ description="mount-ntfs-and-count",
+ parent=azure_ds_reporter
+ ) as evt:
try:
file_count = util.mount_cb(cand_path, count_files, mtype="ntfs",
update_env_for_mount={'LANG': 'C'})
@@ -913,9 +937,10 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
# wait for ephemeral disk to come up
naplen = .2
with events.ReportEventStack(
- name="wait-for-ephemeral-disk",
- description="wait for ephemeral disk",
- parent=azure_ds_reporter):
+ name="wait-for-ephemeral-disk",
+ description="wait for ephemeral disk",
+ parent=azure_ds_reporter
+ ):
missing = util.wait_for_files([devpath],
maxwait=maxwait,
naplen=naplen,
@@ -972,7 +997,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname):
if command == "builtin":
if util.is_FreeBSD():
command = BOUNCE_COMMAND_FREEBSD
- elif util.which('ifup'):
+ elif subp.which('ifup'):
command = BOUNCE_COMMAND_IFUP
else:
LOG.debug(
@@ -983,7 +1008,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname):
shell = not isinstance(command, (list, tuple))
# capture=False, see comments in bug 1202758 and bug 1206164.
util.log_time(logfunc=LOG.debug, msg="publishing hostname",
- get_uptime=True, func=util.subp,
+ get_uptime=True, func=subp.subp,
kwargs={'args': command, 'shell': shell, 'capture': False,
'env': env})
return True
@@ -993,7 +1018,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname):
def crtfile_to_pubkey(fname, data=None):
pipeline = ('openssl x509 -noout -pubkey < "$0" |'
'ssh-keygen -i -m PKCS8 -f /dev/stdin')
- (out, _err) = util.subp(['sh', '-c', pipeline, fname],
+ (out, _err) = subp.subp(['sh', '-c', pipeline, fname],
capture=True, data=data)
return out.rstrip()
@@ -1005,7 +1030,7 @@ def pubkeys_from_crt_files(flist):
for fname in flist:
try:
pubkeys.append(crtfile_to_pubkey(fname))
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
errors.append(fname)
if errors:
@@ -1047,7 +1072,7 @@ def invoke_agent(cmd):
# this is a function itself to simplify patching it for test
if cmd:
LOG.debug("invoking agent: %s", cmd)
- util.subp(cmd, shell=(not isinstance(cmd, list)))
+ subp.subp(cmd, shell=(not isinstance(cmd, list)))
else:
LOG.debug("not invoking agent")
@@ -1122,7 +1147,7 @@ def read_azure_ovf(contents):
except Exception as e:
error_str = "Invalid ovf-env.xml: %s" % e
report_diagnostic_event(error_str)
- raise BrokenAzureDataSource(error_str)
+ raise BrokenAzureDataSource(error_str) from e
results = find_child(dom.documentElement,
lambda n: n.localName == "ProvisioningSection")
@@ -1323,9 +1348,10 @@ def parse_network_config(imds_metadata):
@return: Dictionary containing network version 2 standard configuration.
"""
with events.ReportEventStack(
- name="parse_network_config",
- description="",
- parent=azure_ds_reporter) as evt:
+ name="parse_network_config",
+ description="",
+ parent=azure_ds_reporter
+ ) as evt:
if imds_metadata != sources.UNSET and imds_metadata:
netconfig = {'version': 2, 'ethernets': {}}
LOG.debug('Azure: generating network configuration from IMDS')
@@ -1362,9 +1388,16 @@ def parse_network_config(imds_metadata):
ip=privateIp, prefix=netPrefix))
if dev_config:
mac = ':'.join(re.findall(r'..', intf['macAddress']))
- dev_config.update(
- {'match': {'macaddress': mac.lower()},
- 'set-name': nicname})
+ dev_config.update({
+ 'match': {'macaddress': mac.lower()},
+ 'set-name': nicname
+ })
+ # With netvsc, we can get two interfaces that
+ # share the same MAC, so we need to make sure
+ # our match condition also contains the driver
+ driver = device_driver(nicname)
+ if driver and driver == 'hv_netvsc':
+ dev_config['match']['driver'] = driver
netconfig['ethernets'][nicname] = dev_config
evt.description = "network config from imds"
else:
@@ -1422,8 +1455,14 @@ def _get_metadata_from_imds(retries):
LOG.debug(msg)
return {}
try:
+ from json.decoder import JSONDecodeError
+ json_decode_error = JSONDecodeError
+ except ImportError:
+ json_decode_error = ValueError
+
+ try:
return util.load_json(str(response))
- except json.decoder.JSONDecodeError as e:
+ except json_decode_error as e:
report_diagnostic_event('non-json imds response' % e)
LOG.warning(
'Ignoring non-json IMDS instance metadata: %s', str(response))
@@ -1468,12 +1507,12 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None):
def _is_platform_viable(seed_dir):
+ """Check platform environment to report if this datasource may run."""
with events.ReportEventStack(
- name="check-platform-viability",
- description="found azure asset tag",
- parent=azure_ds_reporter) as evt:
-
- """Check platform environment to report if this datasource may run."""
+ name="check-platform-viability",
+ description="found azure asset tag",
+ parent=azure_ds_reporter
+ ) as evt:
asset_tag = util.read_dmi_data('chassis-asset-tag')
if asset_tag == AZURE_CHASSIS_ASSET_TAG:
return True
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 2013bed7..54810439 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -22,6 +22,7 @@ from cloudinit import log as logging
from cloudinit.net import dhcp
from cloudinit import sources
from cloudinit import url_helper as uhelp
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -46,7 +47,7 @@ class CloudStackPasswordServerClient(object):
# The password server was in the past, a broken HTTP server, but is now
# fixed. wget handles this seamlessly, so it's easier to shell out to
# that rather than write our own handling code.
- output, _ = util.subp([
+ output, _ = subp.subp([
'wget', '--quiet', '--tries', '3', '--timeout', '20',
'--output-document', '-', '--header',
'DomU_Request: {0}'.format(domu_request),
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index f77923c2..62756cf7 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -10,6 +10,7 @@ import os
from cloudinit import log as logging
from cloudinit import sources
+from cloudinit import subp
from cloudinit import util
from cloudinit.net import eni
@@ -71,11 +72,11 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
if not found:
dslist = self.sys_cfg.get('datasource_list')
for dev in find_candidate_devs(dslist=dslist):
- try:
- if util.is_FreeBSD() and dev.startswith("/dev/cd"):
+ mtype = None
+ if util.is_BSD():
+ if dev.startswith("/dev/cd"):
mtype = "cd9660"
- else:
- mtype = None
+ try:
results = util.mount_cb(dev, read_config_drive,
mtype=mtype)
found = dev
@@ -245,7 +246,7 @@ def find_candidate_devs(probe_optical=True, dslist=None):
for device in OPTICAL_DEVICES:
try:
util.find_devs_with(path=device)
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
pass
by_fstype = []
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
index e0ef665e..5040ce5b 100644
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ b/cloudinit/sources/DataSourceDigitalOcean.py
@@ -58,7 +58,7 @@ class DataSourceDigitalOcean(sources.DataSource):
ipv4LL_nic = None
if self.use_ip4LL:
- ipv4LL_nic = do_helper.assign_ipv4_link_local()
+ ipv4LL_nic = do_helper.assign_ipv4_link_local(self.distro)
md = do_helper.read_metadata(
self.metadata_address, timeout=self.timeout,
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 0f2bfef4..1d09c12a 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -29,7 +29,6 @@ STRICT_ID_PATH = ("datasource", "Ec2", "strict_id")
STRICT_ID_DEFAULT = "warn"
API_TOKEN_ROUTE = 'latest/api/token'
-API_TOKEN_DISABLED = '_ec2_disable_api_token'
AWS_TOKEN_TTL_SECONDS = '21600'
AWS_TOKEN_PUT_HEADER = 'X-aws-ec2-metadata-token'
AWS_TOKEN_REQ_HEADER = AWS_TOKEN_PUT_HEADER + '-ttl-seconds'
@@ -63,7 +62,7 @@ class DataSourceEc2(sources.DataSource):
# Priority ordered list of additional metadata versions which will be tried
# for extended metadata content. IPv6 support comes in 2016-09-02
- extended_metadata_versions = ['2016-09-02']
+ extended_metadata_versions = ['2018-09-24', '2016-09-02']
# Setup read_url parameters per get_url_params.
url_max_wait = 120
@@ -193,6 +192,12 @@ class DataSourceEc2(sources.DataSource):
return self.metadata['instance-id']
def _maybe_fetch_api_token(self, mdurls, timeout=None, max_wait=None):
+ """ Get an API token for EC2 Instance Metadata Service.
+
+ On EC2. IMDS will always answer an API token, unless
+ the instance owner has disabled the IMDS HTTP endpoint or
+ the network topology conflicts with the configured hop-limit.
+ """
if self.cloud_name != CloudNames.AWS:
return
@@ -205,18 +210,33 @@ class DataSourceEc2(sources.DataSource):
urls.append(cur)
url2base[cur] = url
- # use the self._status_cb to check for Read errors, which means
- # we can't reach the API token URL, so we should disable IMDSv2
+ # use the self._imds_exception_cb to check for Read errors
LOG.debug('Fetching Ec2 IMDSv2 API Token')
- url, response = uhelp.wait_for_url(
- urls=urls, max_wait=1, timeout=1, status_cb=self._status_cb,
- headers_cb=self._get_headers, request_method=request_method,
- headers_redact=AWS_TOKEN_REDACT)
+
+ response = None
+ url = None
+ url_params = self.get_url_params()
+ try:
+ url, response = uhelp.wait_for_url(
+ urls=urls, max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds, status_cb=LOG.warning,
+ headers_cb=self._get_headers,
+ exception_cb=self._imds_exception_cb,
+ request_method=request_method,
+ headers_redact=AWS_TOKEN_REDACT)
+ except uhelp.UrlError:
+ # We use the raised exception to interupt the retry loop.
+ # Nothing else to do here.
+ pass
if url and response:
self._api_token = response
return url2base[url]
+ # If we get here, then wait_for_url timed out, waiting for IMDS
+ # or the IMDS HTTP endpoint is disabled
+ return None
+
def wait_for_metadata_service(self):
mcfg = self.ds_cfg
@@ -240,9 +260,11 @@ class DataSourceEc2(sources.DataSource):
# try the api token path first
metadata_address = self._maybe_fetch_api_token(mdurls)
- if not metadata_address:
- if self._api_token == API_TOKEN_DISABLED:
- LOG.warning('Retrying with IMDSv1')
+ # When running on EC2, we always access IMDS with an API token.
+ # If we could not get an API token, then we assume the IMDS
+ # endpoint was disabled and we move on without a data source.
+ # Fallback to IMDSv1 if not running on EC2
+ if not metadata_address and self.cloud_name != CloudNames.AWS:
# if we can't get a token, use instance-id path
urls = []
url2base = {}
@@ -267,6 +289,8 @@ class DataSourceEc2(sources.DataSource):
if metadata_address:
self.metadata_address = metadata_address
LOG.debug("Using metadata source: '%s'", self.metadata_address)
+ elif self.cloud_name == CloudNames.AWS:
+ LOG.warning("IMDS's HTTP endpoint is probably disabled")
else:
LOG.critical("Giving up on md from %s after %s seconds",
urls, int(time.time() - start_time))
@@ -381,13 +405,16 @@ class DataSourceEc2(sources.DataSource):
logfunc=LOG.debug, msg='Re-crawl of metadata service',
func=self.get_data)
- # Limit network configuration to only the primary/fallback nic
iface = self.fallback_interface
- macs_to_nics = {net.get_interface_mac(iface): iface}
net_md = self.metadata.get('network')
if isinstance(net_md, dict):
+ # SRU_BLOCKER: xenial, bionic and eoan should default
+ # apply_full_imds_network_config to False to retain original
+ # behavior on those releases.
result = convert_ec2_metadata_network_config(
- net_md, macs_to_nics=macs_to_nics, fallback_nic=iface)
+ net_md, fallback_nic=iface,
+ full_network_config=util.get_cfg_option_bool(
+ self.ds_cfg, 'apply_full_imds_network_config', True))
# RELEASE_BLOCKER: xenial should drop the below if statement,
# because the issue being addressed doesn't exist pre-netplan.
@@ -496,11 +523,29 @@ class DataSourceEc2(sources.DataSource):
self._api_token = None
return True # always retry
- def _status_cb(self, msg, exc=None):
- LOG.warning(msg)
- if 'Read timed out' in msg:
- LOG.warning('Cannot use Ec2 IMDSv2 API tokens, using IMDSv1')
- self._api_token = API_TOKEN_DISABLED
+ def _imds_exception_cb(self, msg, exception=None):
+ """Fail quickly on proper AWS if IMDSv2 rejects API token request
+
+ Guidance from Amazon is that if IMDSv2 had disabled token requests
+ by returning a 403, or cloud-init malformed requests resulting in
+ other 40X errors, we want the datasource detection to fail quickly
+ without retries as those symptoms will likely not be resolved by
+ retries.
+
+ Exceptions such as requests.ConnectionError due to IMDS being
+ temporarily unroutable or unavailable will still retry due to the
+ callsite wait_for_url.
+ """
+ if isinstance(exception, uhelp.UrlError):
+ # requests.ConnectionError will have exception.code == None
+ if exception.code and exception.code >= 400:
+ if exception.code == 403:
+ LOG.warning('Ec2 IMDS endpoint returned a 403 error. '
+ 'HTTP endpoint is disabled. Aborting.')
+ else:
+ LOG.warning('Fatal error while requesting '
+ 'Ec2 IMDSv2 API tokens')
+ raise exception
def _get_headers(self, url=''):
"""Return a dict of headers for accessing a url.
@@ -508,8 +553,7 @@ class DataSourceEc2(sources.DataSource):
If _api_token is unset on AWS, attempt to refresh the token via a PUT
and then return the updated token header.
"""
- if self.cloud_name != CloudNames.AWS or (self._api_token ==
- API_TOKEN_DISABLED):
+ if self.cloud_name != CloudNames.AWS:
return {}
# Request a 6 hour token if URL is API_TOKEN_ROUTE
request_token_header = {AWS_TOKEN_REQ_HEADER: AWS_TOKEN_TTL_SECONDS}
@@ -573,9 +617,11 @@ def parse_strict_mode(cfgval):
if sleep:
try:
sleep = int(sleep)
- except ValueError:
- raise ValueError("Invalid sleep '%s' in strict_id setting '%s': "
- "not an integer" % (sleep, cfgval))
+ except ValueError as e:
+ raise ValueError(
+ "Invalid sleep '%s' in strict_id setting '%s': not an integer"
+ % (sleep, cfgval)
+ ) from e
else:
sleep = None
@@ -678,9 +724,10 @@ def _collect_platform_data():
return data
-def convert_ec2_metadata_network_config(network_md, macs_to_nics=None,
- fallback_nic=None):
- """Convert ec2 metadata to network config version 1 data dict.
+def convert_ec2_metadata_network_config(
+ network_md, macs_to_nics=None, fallback_nic=None,
+ full_network_config=True):
+ """Convert ec2 metadata to network config version 2 data dict.
@param: network_md: 'network' portion of EC2 metadata.
generally formed as {"interfaces": {"macs": {}} where
@@ -690,28 +737,105 @@ def convert_ec2_metadata_network_config(network_md, macs_to_nics=None,
not provided, get_interfaces_by_mac is called to get it from the OS.
@param: fallback_nic: Optionally provide the primary nic interface name.
This nic will be guaranteed to minimally have a dhcp4 configuration.
+ @param: full_network_config: Boolean set True to configure all networking
+ presented by IMDS. This includes rendering secondary IPv4 and IPv6
+ addresses on all NICs and rendering network config on secondary NICs.
+ If False, only the primary nic will be configured and only with dhcp
+ (IPv4/IPv6).
- @return A dict of network config version 1 based on the metadata and macs.
+ @return A dict of network config version 2 based on the metadata and macs.
"""
- netcfg = {'version': 1, 'config': []}
+ netcfg = {'version': 2, 'ethernets': {}}
if not macs_to_nics:
macs_to_nics = net.get_interfaces_by_mac()
macs_metadata = network_md['interfaces']['macs']
- for mac, nic_name in macs_to_nics.items():
+
+ if not full_network_config:
+ for mac, nic_name in macs_to_nics.items():
+ if nic_name == fallback_nic:
+ break
+ dev_config = {'dhcp4': True,
+ 'dhcp6': False,
+ 'match': {'macaddress': mac.lower()},
+ 'set-name': nic_name}
+ nic_metadata = macs_metadata.get(mac)
+ if nic_metadata.get('ipv6s'): # Any IPv6 addresses configured
+ dev_config['dhcp6'] = True
+ netcfg['ethernets'][nic_name] = dev_config
+ return netcfg
+ # Apply network config for all nics and any secondary IPv4/v6 addresses
+ for mac, nic_name in sorted(macs_to_nics.items()):
nic_metadata = macs_metadata.get(mac)
if not nic_metadata:
continue # Not a physical nic represented in metadata
- nic_cfg = {'type': 'physical', 'name': nic_name, 'subnets': []}
- nic_cfg['mac_address'] = mac
- if (nic_name == fallback_nic or nic_metadata.get('public-ipv4s') or
- nic_metadata.get('local-ipv4s')):
- nic_cfg['subnets'].append({'type': 'dhcp4'})
- if nic_metadata.get('ipv6s'):
- nic_cfg['subnets'].append({'type': 'dhcp6'})
- netcfg['config'].append(nic_cfg)
+ # device-number is zero-indexed, we want it 1-indexed for the
+ # multiplication on the following line
+ nic_idx = int(nic_metadata['device-number']) + 1
+ dhcp_override = {'route-metric': nic_idx * 100}
+ dev_config = {'dhcp4': True, 'dhcp4-overrides': dhcp_override,
+ 'dhcp6': False,
+ 'match': {'macaddress': mac.lower()},
+ 'set-name': nic_name}
+ if nic_metadata.get('ipv6s'): # Any IPv6 addresses configured
+ dev_config['dhcp6'] = True
+ dev_config['dhcp6-overrides'] = dhcp_override
+ dev_config['addresses'] = get_secondary_addresses(nic_metadata, mac)
+ if not dev_config['addresses']:
+ dev_config.pop('addresses') # Since we found none configured
+ netcfg['ethernets'][nic_name] = dev_config
+ # Remove route-metric dhcp overrides if only one nic configured
+ if len(netcfg['ethernets']) == 1:
+ for nic_name in netcfg['ethernets'].keys():
+ netcfg['ethernets'][nic_name].pop('dhcp4-overrides')
+ netcfg['ethernets'][nic_name].pop('dhcp6-overrides', None)
return netcfg
+def get_secondary_addresses(nic_metadata, mac):
+ """Parse interface-specific nic metadata and return any secondary IPs
+
+ :return: List of secondary IPv4 or IPv6 addresses to configure on the
+ interface
+ """
+ ipv4s = nic_metadata.get('local-ipv4s')
+ ipv6s = nic_metadata.get('ipv6s')
+ addresses = []
+ # In version < 2018-09-24 local_ipv4s or ipv6s is a str with one IP
+ if bool(isinstance(ipv4s, list) and len(ipv4s) > 1):
+ addresses.extend(
+ _get_secondary_addresses(
+ nic_metadata, 'subnet-ipv4-cidr-block', mac, ipv4s, '24'))
+ if bool(isinstance(ipv6s, list) and len(ipv6s) > 1):
+ addresses.extend(
+ _get_secondary_addresses(
+ nic_metadata, 'subnet-ipv6-cidr-block', mac, ipv6s, '128'))
+ return sorted(addresses)
+
+
+def _get_secondary_addresses(nic_metadata, cidr_key, mac, ips, default_prefix):
+ """Return list of IP addresses as CIDRs for secondary IPs
+
+ The CIDR prefix will be default_prefix if cidr_key is absent or not
+ parseable in nic_metadata.
+ """
+ addresses = []
+ cidr = nic_metadata.get(cidr_key)
+ prefix = default_prefix
+ if not cidr or len(cidr.split('/')) != 2:
+ ip_type = 'ipv4' if 'ipv4' in cidr_key else 'ipv6'
+ LOG.warning(
+ 'Could not parse %s %s for mac %s. %s network'
+ ' config prefix defaults to /%s',
+ cidr_key, cidr, mac, ip_type, prefix)
+ else:
+ prefix = cidr.split('/')[1]
+ # We know we have > 1 ips for in metadata for this IP type
+ for ip in ips[1:]:
+ addresses.append(
+ '{ip}/{prefix}'.format(ip=ip, prefix=prefix))
+ return addresses
+
+
# Used to match classes to dependencies
datasources = [
(DataSourceEc2Local, (sources.DEP_FILESYSTEM,)), # Run at init-local
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index 6cbfbbac..0ec5f6ec 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -116,7 +116,7 @@ def _write_host_key_to_guest_attributes(key_type, key_value):
resp = url_helper.readurl(url=url, data=key_value, headers=HEADERS,
request_method='PUT', check_status=False)
if resp.ok():
- LOG.debug('Wrote %s host key to guest attributes.', key_type)
+ LOG.debug('Wrote %s host key to guest attributes.', key_type)
else:
LOG.debug('Unable to write %s host key to guest attributes.', key_type)
diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py
index 50298330..a86035e0 100644
--- a/cloudinit/sources/DataSourceHetzner.py
+++ b/cloudinit/sources/DataSourceHetzner.py
@@ -59,12 +59,19 @@ class DataSourceHetzner(sources.DataSource):
self.userdata_address, timeout=self.timeout,
sec_between=self.wait_retry, retries=self.retries)
- self.userdata_raw = ud
+ # Hetzner cloud does not support binary user-data. So here, do a
+ # base64 decode of the data if we can. The end result being that a
+ # user can provide base64 encoded (possibly gzipped) data as user-data.
+ #
+ # The fallout is that in the event of b64 encoded user-data,
+ # /var/lib/cloud-init/cloud-config.txt will not be identical to the
+ # user-data provided. It will be decoded.
+ self.userdata_raw = hc_helper.maybe_b64decode(ud)
self.metadata_full = md
- """hostname is name provided by user at launch. The API enforces
- it is a valid hostname, but it is not guaranteed to be resolvable
- in dns or fully qualified."""
+ # hostname is name provided by user at launch. The API enforces it is
+ # a valid hostname, but it is not guaranteed to be resolvable in dns or
+ # fully qualified.
self.metadata['instance-id'] = md['instance-id']
self.metadata['local-hostname'] = md['hostname']
self.metadata['network-config'] = md.get('network-config', None)
diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py
index e0c714e8..8d196185 100644
--- a/cloudinit/sources/DataSourceIBMCloud.py
+++ b/cloudinit/sources/DataSourceIBMCloud.py
@@ -99,6 +99,7 @@ import os
from cloudinit import log as logging
from cloudinit import sources
from cloudinit.sources.helpers import openstack
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -240,7 +241,7 @@ def get_ibm_platform():
fslabels = {}
try:
devs = util.blkid()
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
LOG.warning("Failed to run blkid: %s", e)
return (None, None)
@@ -302,7 +303,8 @@ def read_md():
except sources.BrokenMetadata as e:
raise RuntimeError(
"Failed reading IBM config disk (platform=%s path=%s): %s" %
- (platform, path, e))
+ (platform, path, e)
+ ) from e
ret.update(results)
return ret
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 517913aa..9156925f 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -6,8 +6,6 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from __future__ import print_function
-
import hashlib
import os
import time
@@ -228,7 +226,8 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
except url_helper.UrlError as e:
if e.code == 404 and not optional:
raise MAASSeedDirMalformed(
- "Missing required %s: %s" % (path, e))
+ "Missing required %s: %s" % (path, e)
+ ) from e
elif e.code != 404:
raise e
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index ee748b41..e408d730 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -36,23 +36,15 @@ class DataSourceNoCloud(sources.DataSource):
return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode)
def _get_devices(self, label):
- if util.is_FreeBSD():
- devlist = [
- p for p in ['/dev/msdosfs/' + label, '/dev/iso9660/' + label]
- if os.path.exists(p)]
- else:
- # Query optical drive to get it in blkid cache for 2.6 kernels
- util.find_devs_with(path="/dev/sr0")
- util.find_devs_with(path="/dev/sr1")
-
- fslist = util.find_devs_with("TYPE=vfat")
- fslist.extend(util.find_devs_with("TYPE=iso9660"))
+ fslist = util.find_devs_with("TYPE=vfat")
+ fslist.extend(util.find_devs_with("TYPE=iso9660"))
- label_list = util.find_devs_with("LABEL=%s" % label.upper())
- label_list.extend(util.find_devs_with("LABEL=%s" % label.lower()))
+ label_list = util.find_devs_with("LABEL=%s" % label.upper())
+ label_list.extend(util.find_devs_with("LABEL=%s" % label.lower()))
+ label_list.extend(util.find_devs_with("LABEL_FATBOOT=%s" % label))
- devlist = list(set(fslist) & set(label_list))
- devlist.sort(reverse=True)
+ devlist = list(set(fslist) & set(label_list))
+ devlist.sort(reverse=True)
return devlist
def _get_data(self):
@@ -370,7 +362,7 @@ def _merge_new_seed(cur, seeded):
class DataSourceNoCloudNet(DataSourceNoCloud):
def __init__(self, sys_cfg, distro, paths):
DataSourceNoCloud.__init__(self, sys_cfg, distro, paths)
- self.supported_seed_starts = ("http://", "https://", "ftp://")
+ self.supported_seed_starts = ("http://", "https://")
# Used to match classes to dependencies
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index 7f55b5f8..6a9a331d 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -16,6 +16,7 @@ from xml.dom import minidom
from cloudinit import log as logging
from cloudinit import sources
+from cloudinit import subp
from cloudinit import util
from cloudinit.sources.helpers.vmware.imc.config \
import Config
@@ -37,7 +38,8 @@ from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
enable_nics,
get_nics_to_enable,
set_customization_status,
- get_tools_config
+ get_tools_config,
+ set_gc_status
)
LOG = logging.getLogger(__name__)
@@ -140,6 +142,8 @@ class DataSourceOVF(sources.DataSource):
try:
cf = ConfigFile(vmwareImcConfigFilePath)
self._vmware_cust_conf = Config(cf)
+ set_gc_status(self._vmware_cust_conf, "Started")
+
(md, ud, cfg) = read_vmware_imc(self._vmware_cust_conf)
self._vmware_nics_to_enable = get_nics_to_enable(nicspath)
imcdirpath = os.path.dirname(vmwareImcConfigFilePath)
@@ -148,14 +152,25 @@ class DataSourceOVF(sources.DataSource):
product_marker, os.path.join(self.paths.cloud_dir, 'data'))
special_customization = product_marker and not hasmarkerfile
customscript = self._vmware_cust_conf.custom_script_name
- custScriptConfig = get_tools_config(
- CONFGROUPNAME_GUESTCUSTOMIZATION,
- GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS,
- "false")
- if custScriptConfig.lower() != "true":
- # Update the customization status if there is a
- # custom script is disabled
- if special_customization and customscript:
+
+ # In case there is a custom script, check whether VMware
+ # Tools configuration allow the custom script to run.
+ if special_customization and customscript:
+ defVal = "false"
+ if self._vmware_cust_conf.default_run_post_script:
+ LOG.debug(
+ "Set default value to true due to"
+ " customization configuration."
+ )
+ defVal = "true"
+
+ custScriptConfig = get_tools_config(
+ CONFGROUPNAME_GUESTCUSTOMIZATION,
+ GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS,
+ defVal)
+ if custScriptConfig.lower() != "true":
+ # Update the customization status if custom script
+ # is disabled
msg = "Custom script is disabled by VM Administrator"
LOG.debug(msg)
set_customization_status(
@@ -171,7 +186,8 @@ class DataSourceOVF(sources.DataSource):
"Error parsing the customization Config File",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
- vmwareImcConfigFilePath)
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf)
if special_customization:
if customscript:
@@ -183,7 +199,8 @@ class DataSourceOVF(sources.DataSource):
"Error executing pre-customization script",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
- vmwareImcConfigFilePath)
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf)
try:
LOG.debug("Preparing the Network configuration")
@@ -197,7 +214,8 @@ class DataSourceOVF(sources.DataSource):
"Error preparing Network Configuration",
e,
GuestCustEvent.GUESTCUST_EVENT_NETWORK_SETUP_FAILED,
- vmwareImcConfigFilePath)
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf)
if special_customization:
LOG.debug("Applying password customization")
@@ -215,7 +233,8 @@ class DataSourceOVF(sources.DataSource):
"Error applying Password Configuration",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
- vmwareImcConfigFilePath)
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf)
if customscript:
try:
@@ -228,7 +247,8 @@ class DataSourceOVF(sources.DataSource):
"Error executing post-customization script",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
- vmwareImcConfigFilePath)
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf)
if product_marker:
try:
@@ -240,7 +260,8 @@ class DataSourceOVF(sources.DataSource):
"Error creating marker files",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
- vmwareImcConfigFilePath)
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf)
self._vmware_cust_found = True
found.append('vmware-tools')
@@ -252,6 +273,7 @@ class DataSourceOVF(sources.DataSource):
set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_DONE,
GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS)
+ set_gc_status(self._vmware_cust_conf, "Successful")
else:
np = [('com.vmware.guestInfo', transport_vmware_guestinfo),
@@ -327,7 +349,7 @@ class DataSourceOVFNet(DataSourceOVF):
def __init__(self, sys_cfg, distro, paths):
DataSourceOVF.__init__(self, sys_cfg, distro, paths)
self.seed_dir = os.path.join(paths.seed_dir, 'ovf-net')
- self.supported_seed_starts = ("http://", "https://", "ftp://")
+ self.supported_seed_starts = ("http://", "https://")
self.vmware_customization_supported = False
@@ -527,15 +549,15 @@ def transport_iso9660(require_iso=True):
def transport_vmware_guestinfo():
rpctool = "vmware-rpctool"
not_found = None
- if not util.which(rpctool):
+ if not subp.which(rpctool):
return not_found
cmd = [rpctool, "info-get guestinfo.ovfEnv"]
try:
- out, _err = util.subp(cmd)
+ out, _err = subp.subp(cmd)
if out:
return out
LOG.debug("cmd %s exited 0 with empty stdout: %s", cmd, out)
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
if e.exit_code != 1:
LOG.warning("%s exited with code %d", rpctool, e.exit_code)
LOG.debug(e)
@@ -647,7 +669,7 @@ def setup_marker_files(markerid, marker_dir):
open(markerfile, 'w').close()
-def _raise_error_status(prefix, error, event, config_file):
+def _raise_error_status(prefix, error, event, config_file, conf):
"""
Raise error and send customization status to the underlying VMware
Virtualization Platform. Also, cleanup the imc directory.
@@ -656,6 +678,7 @@ def _raise_error_status(prefix, error, event, config_file):
set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
event)
+ set_gc_status(conf, prefix)
util.del_dir(os.path.dirname(config_file))
raise error
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index 02c9a7b8..45481938 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -13,6 +13,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import collections
+import functools
import os
import pwd
import re
@@ -21,6 +22,7 @@ import string
from cloudinit import log as logging
from cloudinit import net
from cloudinit import sources
+from cloudinit import subp
from cloudinit import util
@@ -59,10 +61,19 @@ class DataSourceOpenNebula(sources.DataSource):
for cdev in candidates:
try:
if os.path.isdir(self.seed_dir):
- results = read_context_disk_dir(cdev, asuser=parseuser)
+ results = read_context_disk_dir(
+ cdev, self.distro, asuser=parseuser
+ )
elif cdev.startswith("/dev"):
- results = util.mount_cb(cdev, read_context_disk_dir,
- data=parseuser)
+ # util.mount_cb only handles passing a single argument
+ # through to the wrapped function, so we have to partially
+ # apply the function to pass in `distro`. See LP: #1884979
+ partially_applied_func = functools.partial(
+ read_context_disk_dir,
+ asuser=parseuser,
+ distro=self.distro,
+ )
+ results = util.mount_cb(cdev, partially_applied_func)
except NonContextDiskDir:
continue
except BrokenContextDiskDir as exc:
@@ -128,10 +139,10 @@ class BrokenContextDiskDir(Exception):
class OpenNebulaNetwork(object):
- def __init__(self, context, system_nics_by_mac=None):
+ def __init__(self, context, distro, system_nics_by_mac=None):
self.context = context
if system_nics_by_mac is None:
- system_nics_by_mac = get_physical_nics_by_mac()
+ system_nics_by_mac = get_physical_nics_by_mac(distro)
self.ifaces = collections.OrderedDict(
[k for k in sorted(system_nics_by_mac.items(),
key=lambda k: net.natural_sort_key(k[1]))])
@@ -334,7 +345,7 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
cmd.extend(bash)
- (output, _error) = util.subp(cmd, data=bcmd)
+ (output, _error) = subp.subp(cmd, data=bcmd)
# exclude vars in bash that change on their own or that we used
excluded = (
@@ -366,7 +377,7 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
return ret
-def read_context_disk_dir(source_dir, asuser=None):
+def read_context_disk_dir(source_dir, distro, asuser=None):
"""
read_context_disk_dir(source_dir):
read source_dir and return a tuple with metadata dict and user-data
@@ -388,18 +399,23 @@ def read_context_disk_dir(source_dir, asuser=None):
if asuser is not None:
try:
pwd.getpwnam(asuser)
- except KeyError:
+ except KeyError as e:
raise BrokenContextDiskDir(
"configured user '{user}' does not exist".format(
- user=asuser))
+ user=asuser)
+ ) from e
try:
path = os.path.join(source_dir, 'context.sh')
content = util.load_file(path)
context = parse_shell_config(content, asuser=asuser)
- except util.ProcessExecutionError as e:
- raise BrokenContextDiskDir("Error processing context.sh: %s" % (e))
+ except subp.ProcessExecutionError as e:
+ raise BrokenContextDiskDir(
+ "Error processing context.sh: %s" % (e)
+ ) from e
except IOError as e:
- raise NonContextDiskDir("Error reading context.sh: %s" % (e))
+ raise NonContextDiskDir(
+ "Error reading context.sh: %s" % (e)
+ ) from e
else:
raise NonContextDiskDir("Missing context.sh")
@@ -417,9 +433,9 @@ def read_context_disk_dir(source_dir, asuser=None):
if ssh_key_var:
lines = context.get(ssh_key_var).splitlines()
- results['metadata']['public-keys'] = [l for l in lines
- if len(l) and not
- l.startswith("#")]
+ results['metadata']['public-keys'] = [
+ line for line in lines if len(line) and not line.startswith("#")
+ ]
# custom hostname -- try hostname or leave cloud-init
# itself create hostname from IP address later
@@ -449,15 +465,17 @@ def read_context_disk_dir(source_dir, asuser=None):
# http://docs.opennebula.org/5.4/operation/references/template.html#context-section
ipaddr_keys = [k for k in context if re.match(r'^ETH\d+_IP.*$', k)]
if ipaddr_keys:
- onet = OpenNebulaNetwork(context)
+ onet = OpenNebulaNetwork(context, distro)
results['network-interfaces'] = onet.gen_conf()
return results
-def get_physical_nics_by_mac():
+def get_physical_nics_by_mac(distro):
devs = net.get_interfaces_by_mac()
- return dict([(m, n) for m, n in devs.items() if net.is_physical(n)])
+ return dict(
+ [(m, n) for m, n in devs.items() if distro.networking.is_physical(n)]
+ )
# Legacy: Must be present in case we load an old pkl object
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index 7a5e71b6..d4b43f44 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -29,7 +29,10 @@ DMI_PRODUCT_NOVA = 'OpenStack Nova'
DMI_PRODUCT_COMPUTE = 'OpenStack Compute'
VALID_DMI_PRODUCT_NAMES = [DMI_PRODUCT_NOVA, DMI_PRODUCT_COMPUTE]
DMI_ASSET_TAG_OPENTELEKOM = 'OpenTelekomCloud'
-VALID_DMI_ASSET_TAGS = [DMI_ASSET_TAG_OPENTELEKOM]
+# See github.com/sapcc/helm-charts/blob/master/openstack/nova/values.yaml
+# -> compute.defaults.vmware.smbios_asset_tag for this value
+DMI_ASSET_TAG_SAPCCLOUD = 'SAP CCloud VM'
+VALID_DMI_ASSET_TAGS = [DMI_ASSET_TAG_OPENTELEKOM, DMI_ASSET_TAG_SAPCCLOUD]
class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
@@ -191,10 +194,10 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
'timeout': url_params.timeout_seconds})
except openstack.NonReadable as e:
raise sources.InvalidMetaDataException(str(e))
- except (openstack.BrokenMetadata, IOError):
+ except (openstack.BrokenMetadata, IOError) as e:
msg = 'Broken metadata address {addr}'.format(
addr=self.metadata_address)
- raise sources.InvalidMetaDataException(msg)
+ raise sources.InvalidMetaDataException(msg) from e
return result
diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py
index eec87403..20d6487d 100644
--- a/cloudinit/sources/DataSourceOracle.py
+++ b/cloudinit/sources/DataSourceOracle.py
@@ -1,30 +1,31 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Datasource for Oracle (OCI/Oracle Cloud Infrastructure)
-OCI provides a OpenStack like metadata service which provides only
-'2013-10-17' and 'latest' versions..
-
Notes:
- * This datasource does not support the OCI-Classic. OCI-Classic
- provides an EC2 lookalike metadata service.
- * The uuid provided in DMI data is not the same as the meta-data provided
+ * This datasource does not support OCI Classic. OCI Classic provides an EC2
+ lookalike metadata service.
+ * The UUID provided in DMI data is not the same as the meta-data provided
instance-id, but has an equivalent lifespan.
* We do need to support upgrade from an instance that cloud-init
identified as OpenStack.
- * Both bare-metal and vms use iscsi root
- * Both bare-metal and vms provide chassis-asset-tag of OracleCloud.com
+ * Bare metal instances use iSCSI root, virtual machine instances do not.
+ * Both bare metal and virtual machine instances provide a chassis-asset-tag of
+ OracleCloud.com.
"""
-from cloudinit.url_helper import combine_url, readurl, UrlError
-from cloudinit.net import dhcp, get_interfaces_by_mac, is_netfail_master
-from cloudinit import net
-from cloudinit import sources
-from cloudinit import util
-from cloudinit.net import cmdline
-from cloudinit import log as logging
+import base64
+from collections import namedtuple
+from contextlib import suppress as noop
-import json
-import re
+from cloudinit import log as logging
+from cloudinit import net, sources, util
+from cloudinit.net import (
+ cmdline,
+ dhcp,
+ get_interfaces_by_mac,
+ is_netfail_master,
+)
+from cloudinit.url_helper import UrlError, readurl
LOG = logging.getLogger(__name__)
@@ -33,79 +34,13 @@ BUILTIN_DS_CONFIG = {
'configure_secondary_nics': False,
}
CHASSIS_ASSET_TAG = "OracleCloud.com"
-METADATA_ENDPOINT = "http://169.254.169.254/openstack/"
-VNIC_METADATA_URL = 'http://169.254.169.254/opc/v1/vnics/'
+METADATA_ROOT = "http://169.254.169.254/opc/v{version}/"
+METADATA_PATTERN = METADATA_ROOT + "{path}/"
# https://docs.cloud.oracle.com/iaas/Content/Network/Troubleshoot/connectionhang.htm#Overview,
# indicates that an MTU of 9000 is used within OCI
MTU = 9000
-
-def _add_network_config_from_opc_imds(network_config):
- """
- Fetch data from Oracle's IMDS, generate secondary NIC config, merge it.
-
- The primary NIC configuration should not be modified based on the IMDS
- values, as it should continue to be configured for DHCP. As such, this
- takes an existing network_config dict which is expected to have the primary
- NIC configuration already present. It will mutate the given dict to
- include the secondary VNICs.
-
- :param network_config:
- A v1 or v2 network config dict with the primary NIC already configured.
- This dict will be mutated.
-
- :raises:
- Exceptions are not handled within this function. Likely exceptions are
- those raised by url_helper.readurl (if communicating with the IMDS
- fails), ValueError/JSONDecodeError (if the IMDS returns invalid JSON),
- and KeyError/IndexError (if the IMDS returns valid JSON with unexpected
- contents).
- """
- resp = readurl(VNIC_METADATA_URL)
- vnics = json.loads(str(resp))
-
- if 'nicIndex' in vnics[0]:
- # TODO: Once configure_secondary_nics defaults to True, lower the level
- # of this log message. (Currently, if we're running this code at all,
- # someone has explicitly opted-in to secondary VNIC configuration, so
- # we should warn them that it didn't happen. Once it's default, this
- # would be emitted on every Bare Metal Machine launch, which means INFO
- # or DEBUG would be more appropriate.)
- LOG.warning(
- 'VNIC metadata indicates this is a bare metal machine; skipping'
- ' secondary VNIC configuration.'
- )
- return
-
- interfaces_by_mac = get_interfaces_by_mac()
-
- for vnic_dict in vnics[1:]:
- # We skip the first entry in the response because the primary interface
- # is already configured by iSCSI boot; applying configuration from the
- # IMDS is not required.
- mac_address = vnic_dict['macAddr'].lower()
- if mac_address not in interfaces_by_mac:
- LOG.debug('Interface with MAC %s not found; skipping', mac_address)
- continue
- name = interfaces_by_mac[mac_address]
-
- if network_config['version'] == 1:
- subnet = {
- 'type': 'static',
- 'address': vnic_dict['privateIp'],
- }
- network_config['config'].append({
- 'name': name,
- 'type': 'physical',
- 'mac_address': mac_address,
- 'mtu': MTU,
- 'subnets': [subnet],
- })
- elif network_config['version'] == 2:
- network_config['ethernets'][name] = {
- 'addresses': [vnic_dict['privateIp']],
- 'mtu': MTU, 'dhcp4': False, 'dhcp6': False,
- 'match': {'macaddress': mac_address}}
+OpcMetadata = namedtuple("OpcMetadata", "version instance_data vnics_data")
def _ensure_netfailover_safe(network_config):
@@ -174,6 +109,7 @@ class DataSourceOracle(sources.DataSource):
def __init__(self, sys_cfg, *args, **kwargs):
super(DataSourceOracle, self).__init__(sys_cfg, *args, **kwargs)
+ self._vnics_data = None
self.ds_cfg = util.mergemanydict([
util.get_cfg_by_path(sys_cfg, ['datasource', self.dsname], {}),
@@ -187,54 +123,46 @@ class DataSourceOracle(sources.DataSource):
if not self._is_platform_viable():
return False
+ self.system_uuid = _read_system_uuid()
+
# network may be configured if iscsi root. If that is the case
# then read_initramfs_config will return non-None.
- if _is_iscsi_root():
- data = self.crawl_metadata()
- else:
- with dhcp.EphemeralDHCPv4(net.find_fallback_nic()):
- data = self.crawl_metadata()
-
- self._crawled_metadata = data
- vdata = data['2013-10-17']
-
- self.userdata_raw = vdata.get('user_data')
- self.system_uuid = vdata['system_uuid']
-
- vd = vdata.get('vendor_data')
- if vd:
- self.vendordata_pure = vd
- try:
- self.vendordata_raw = sources.convert_vendordata(vd)
- except ValueError as e:
- LOG.warning("Invalid content in vendor-data: %s", e)
- self.vendordata_raw = None
-
- mdcopies = ('public_keys',)
- md = dict([(k, vdata['meta_data'].get(k))
- for k in mdcopies if k in vdata['meta_data']])
-
- mdtrans = (
- # oracle meta_data.json name, cloudinit.datasource.metadata name
- ('availability_zone', 'availability-zone'),
- ('hostname', 'local-hostname'),
- ('launch_index', 'launch-index'),
- ('uuid', 'instance-id'),
+ fetch_vnics_data = self.ds_cfg.get(
+ 'configure_secondary_nics',
+ BUILTIN_DS_CONFIG["configure_secondary_nics"]
+ )
+ network_context = noop()
+ if not _is_iscsi_root():
+ network_context = dhcp.EphemeralDHCPv4(net.find_fallback_nic())
+ with network_context:
+ fetched_metadata = read_opc_metadata(
+ fetch_vnics_data=fetch_vnics_data
+ )
+
+ data = self._crawled_metadata = fetched_metadata.instance_data
+ self.metadata_address = METADATA_ROOT.format(
+ version=fetched_metadata.version
)
- for dsname, ciname in mdtrans:
- if dsname in vdata['meta_data']:
- md[ciname] = vdata['meta_data'][dsname]
+ self._vnics_data = fetched_metadata.vnics_data
+
+ self.metadata = {
+ "availability-zone": data["ociAdName"],
+ "instance-id": data["id"],
+ "launch-index": 0,
+ "local-hostname": data["hostname"],
+ "name": data["displayName"],
+ }
+
+ if "metadata" in data:
+ user_data = data["metadata"].get("user_data")
+ if user_data:
+ self.userdata_raw = base64.b64decode(user_data)
+ self.metadata["public_keys"] = data["metadata"].get(
+ "ssh_authorized_keys"
+ )
- self.metadata = md
return True
- def crawl_metadata(self):
- return read_metadata()
-
- def _get_subplatform(self):
- """Return the subplatform metadata source details."""
- return 'metadata (%s)' % METADATA_ENDPOINT
-
def check_instance_id(self, sys_cfg):
"""quickly check (local only) if self.instance_id is still valid
@@ -248,15 +176,9 @@ class DataSourceOracle(sources.DataSource):
@property
def network_config(self):
"""Network config is read from initramfs provided files
- If none is present, then we fall back to fallback configuration.
- One thing to note here is that this method is not currently
- considered at all if there is is kernel/initramfs provided
- data. In that case, stages considers that the cmdline data
- overrides datasource provided data and does not consult here.
-
- We nonetheless return cmdline provided config if present
- and fallback to generate fallback."""
+ If none is present, then we fall back to fallback configuration.
+ """
if self._network_config == sources.UNSET:
# this is v1
self._network_config = cmdline.read_initramfs_config()
@@ -265,14 +187,18 @@ class DataSourceOracle(sources.DataSource):
# this is now v2
self._network_config = self.distro.generate_fallback_config()
- if self.ds_cfg.get('configure_secondary_nics'):
+ if self.ds_cfg.get(
+ 'configure_secondary_nics',
+ BUILTIN_DS_CONFIG["configure_secondary_nics"]
+ ):
try:
- # Mutate self._network_config to include secondary VNICs
- _add_network_config_from_opc_imds(self._network_config)
+ # Mutate self._network_config to include secondary
+ # VNICs
+ self._add_network_config_from_opc_imds()
except Exception:
util.logexc(
LOG,
- "Failed to fetch secondary network configuration!")
+ "Failed to parse secondary network configuration!")
# we need to verify that the nic selected is not a netfail over
# device and, if it is a netfail master, then we need to avoid
@@ -281,6 +207,70 @@ class DataSourceOracle(sources.DataSource):
return self._network_config
+ def _add_network_config_from_opc_imds(self):
+ """Generate secondary NIC config from IMDS and merge it.
+
+ The primary NIC configuration should not be modified based on the IMDS
+ values, as it should continue to be configured for DHCP. As such, this
+ uses the instance's network config dict which is expected to have the
+ primary NIC configuration already present.
+ It will mutate the network config to include the secondary VNICs.
+
+ :raises:
+ Exceptions are not handled within this function. Likely
+ exceptions are KeyError/IndexError
+ (if the IMDS returns valid JSON with unexpected contents).
+ """
+ if self._vnics_data is None:
+ LOG.warning(
+ "Secondary NIC data is UNSET but should not be")
+ return
+
+ if 'nicIndex' in self._vnics_data[0]:
+ # TODO: Once configure_secondary_nics defaults to True, lower the
+ # level of this log message. (Currently, if we're running this
+ # code at all, someone has explicitly opted-in to secondary
+ # VNIC configuration, so we should warn them that it didn't
+ # happen. Once it's default, this would be emitted on every Bare
+ # Metal Machine launch, which means INFO or DEBUG would be more
+ # appropriate.)
+ LOG.warning(
+ 'VNIC metadata indicates this is a bare metal machine; '
+ 'skipping secondary VNIC configuration.'
+ )
+ return
+
+ interfaces_by_mac = get_interfaces_by_mac()
+
+ for vnic_dict in self._vnics_data[1:]:
+ # We skip the first entry in the response because the primary
+ # interface is already configured by iSCSI boot; applying
+ # configuration from the IMDS is not required.
+ mac_address = vnic_dict['macAddr'].lower()
+ if mac_address not in interfaces_by_mac:
+ LOG.debug('Interface with MAC %s not found; skipping',
+ mac_address)
+ continue
+ name = interfaces_by_mac[mac_address]
+
+ if self._network_config['version'] == 1:
+ subnet = {
+ 'type': 'static',
+ 'address': vnic_dict['privateIp'],
+ }
+ self._network_config['config'].append({
+ 'name': name,
+ 'type': 'physical',
+ 'mac_address': mac_address,
+ 'mtu': MTU,
+ 'subnets': [subnet],
+ })
+ elif self._network_config['version'] == 2:
+ self._network_config['ethernets'][name] = {
+ 'addresses': [vnic_dict['privateIp']],
+ 'mtu': MTU, 'dhcp4': False, 'dhcp6': False,
+ 'match': {'macaddress': mac_address}}
+
def _read_system_uuid():
sys_uuid = util.read_dmi_data('system-uuid')
@@ -296,72 +286,46 @@ def _is_iscsi_root():
return bool(cmdline.read_initramfs_config())
-def _load_index(content):
- """Return a list entries parsed from content.
-
- OpenStack's metadata service returns a newline delimited list
- of items. Oracle's implementation has html formatted list of links.
- The parser here just grabs targets from <a href="target">
- and throws away "../".
-
- Oracle has accepted that to be buggy and may fix in the future
- to instead return a '\n' delimited plain text list. This function
- will continue to work if that change is made."""
- if not content.lower().startswith("<html>"):
- return content.splitlines()
- items = re.findall(
- r'href="(?P<target>[^"]*)"', content, re.MULTILINE | re.IGNORECASE)
- return [i for i in items if not i.startswith(".")]
-
+def read_opc_metadata(*, fetch_vnics_data: bool = False):
+ """Fetch metadata from the /opc/ routes.
-def read_metadata(endpoint_base=METADATA_ENDPOINT, sys_uuid=None,
- version='2013-10-17'):
- """Read metadata, return a dictionary.
+ :return:
+ A namedtuple containing:
+ The metadata version as an integer
+ The JSON-decoded value of the instance data endpoint on the IMDS
+ The JSON-decoded value of the vnics data endpoint if
+ `fetch_vnics_data` is True, else None
- Each path listed in the index will be represented in the dictionary.
- If the path ends in .json, then the content will be decoded and
- populated into the dictionary.
-
- The system uuid (/sys/class/dmi/id/product_uuid) is also populated.
- Example: given paths = ('user_data', 'meta_data.json')
- This would return:
- {version: {'user_data': b'blob', 'meta_data': json.loads(blob.decode())
- 'system_uuid': '3b54f2e0-3ab2-458d-b770-af9926eee3b2'}}
"""
- endpoint = combine_url(endpoint_base, version) + "/"
- if sys_uuid is None:
- sys_uuid = _read_system_uuid()
- if not sys_uuid:
- raise sources.BrokenMetadata("Failed to read system uuid.")
-
+ # Per Oracle, there are short windows (measured in milliseconds) throughout
+ # an instance's lifetime where the IMDS is being updated and may 404 as a
+ # result. To work around these windows, we retry a couple of times.
+ retries = 2
+
+ def _fetch(metadata_version: int, path: str) -> dict:
+ headers = {
+ "Authorization": "Bearer Oracle"} if metadata_version > 1 else None
+ return readurl(
+ url=METADATA_PATTERN.format(version=metadata_version, path=path),
+ headers=headers,
+ retries=retries,
+ )._response.json()
+
+ metadata_version = 2
try:
- resp = readurl(endpoint)
- if not resp.ok():
- raise sources.BrokenMetadata(
- "Bad response from %s: %s" % (endpoint, resp.code))
- except UrlError as e:
- raise sources.BrokenMetadata(
- "Failed to read index at %s: %s" % (endpoint, e))
-
- entries = _load_index(resp.contents.decode('utf-8'))
- LOG.debug("index url %s contained: %s", endpoint, entries)
-
- # meta_data.json is required.
- mdj = 'meta_data.json'
- if mdj not in entries:
- raise sources.BrokenMetadata(
- "Required field '%s' missing in index at %s" % (mdj, endpoint))
-
- ret = {'system_uuid': sys_uuid}
- for path in entries:
- response = readurl(combine_url(endpoint, path))
- if path.endswith(".json"):
- ret[path.rpartition(".")[0]] = (
- json.loads(response.contents.decode('utf-8')))
- else:
- ret[path] = response.contents
-
- return {version: ret}
+ instance_data = _fetch(metadata_version, path="instance")
+ except UrlError:
+ metadata_version = 1
+ instance_data = _fetch(metadata_version, path="instance")
+
+ vnics_data = None
+ if fetch_vnics_data:
+ try:
+ vnics_data = _fetch(metadata_version, path="vnics")
+ except UrlError:
+ util.logexc(LOG,
+ "Failed to fetch secondary network configuration!")
+ return OpcMetadata(metadata_version, instance_data, vnics_data)
# Used to match classes to dependencies
@@ -377,17 +341,21 @@ def get_datasource_list(depends):
if __name__ == "__main__":
import argparse
- import os
-
- parser = argparse.ArgumentParser(description='Query Oracle Cloud Metadata')
- parser.add_argument("--endpoint", metavar="URL",
- help="The url of the metadata service.",
- default=METADATA_ENDPOINT)
- args = parser.parse_args()
- sys_uuid = "uuid-not-available-not-root" if os.geteuid() != 0 else None
-
- data = read_metadata(endpoint_base=args.endpoint, sys_uuid=sys_uuid)
- data['is_platform_viable'] = _is_platform_viable()
- print(util.json_dumps(data))
+
+ description = """
+ Query Oracle Cloud metadata and emit a JSON object with two keys:
+ `read_opc_metadata` and `_is_platform_viable`. The values of each are
+ the return values of the corresponding functions defined in
+ DataSourceOracle.py."""
+ parser = argparse.ArgumentParser(description=description)
+ parser.parse_args()
+ print(
+ util.json_dumps(
+ {
+ "read_opc_metadata": read_opc_metadata(),
+ "_is_platform_viable": _is_platform_viable(),
+ }
+ )
+ )
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceRbxCloud.py b/cloudinit/sources/DataSourceRbxCloud.py
index c3cd5c79..e064c8d6 100644
--- a/cloudinit/sources/DataSourceRbxCloud.py
+++ b/cloudinit/sources/DataSourceRbxCloud.py
@@ -15,6 +15,7 @@ import os.path
from cloudinit import log as logging
from cloudinit import sources
+from cloudinit import subp
from cloudinit import util
from cloudinit.event import EventType
@@ -43,11 +44,11 @@ def int2ip(addr):
def _sub_arp(cmd):
"""
- Uses the prefered cloud-init subprocess def of util.subp
+ Uses the preferred cloud-init subprocess def of subp.subp
and runs arping. Breaking this to a separate function
for later use in mocking and unittests
"""
- return util.subp(['arping'] + cmd)
+ return subp.subp(['arping'] + cmd)
def gratuitous_arp(items, distro):
@@ -55,26 +56,32 @@ def gratuitous_arp(items, distro):
if distro.name in ['fedora', 'centos', 'rhel']:
source_param = '-s'
for item in items:
- _sub_arp([
- '-c', '2',
- source_param, item['source'],
- item['destination']
- ])
+ try:
+ _sub_arp([
+ '-c', '2',
+ source_param, item['source'],
+ item['destination']
+ ])
+ except subp.ProcessExecutionError as error:
+ # warning, because the system is able to function properly
+ # despite no success - some ARP table may be waiting for
+ # expiration, but the system may continue
+ LOG.warning('Failed to arping from "%s" to "%s": %s',
+ item['source'], item['destination'], error)
def get_md():
rbx_data = None
- devices = [
- dev
- for dev, bdata in util.blkid().items()
- if bdata.get('LABEL', '').upper() == 'CLOUDMD'
- ]
+ devices = set(
+ util.find_devs_with('LABEL=CLOUDMD') +
+ util.find_devs_with('LABEL=cloudmd')
+ )
for device in devices:
try:
rbx_data = util.mount_cb(
device=device,
callback=read_user_data_callback,
- mtype=['vfat', 'fat']
+ mtype=['vfat', 'fat', 'msdosfs']
)
if rbx_data:
break
@@ -182,7 +189,6 @@ def read_user_data_callback(mount_dir):
'passwd': hash,
'lock_passwd': False,
'ssh_authorized_keys': ssh_keys,
- 'shell': '/bin/bash'
}
},
'network_config': network,
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index cf676504..f1f903bc 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -33,6 +33,7 @@ import socket
from cloudinit import log as logging
from cloudinit import serial
from cloudinit import sources
+from cloudinit import subp
from cloudinit import util
from cloudinit.event import EventType
@@ -412,7 +413,9 @@ class JoyentMetadataClient(object):
response.append(byte)
except OSError as exc:
if exc.errno == errno.EAGAIN:
- raise JoyentMetadataTimeoutException(msg % as_ascii())
+ raise JoyentMetadataTimeoutException(
+ msg % as_ascii()
+ ) from exc
raise
def _write(self, msg):
@@ -696,9 +699,9 @@ def identify_file(content_f):
cmd = ["file", "--brief", "--mime-type", content_f]
f_type = None
try:
- (f_type, _err) = util.subp(cmd)
+ (f_type, _err) = subp.subp(cmd)
LOG.debug("script %s mime type is %s", content_f, f_type)
- except util.ProcessExecutionError as e:
+ except subp.ProcessExecutionError as e:
util.logexc(
LOG, ("Failed to identify script type for %s" % content_f, e))
return None if f_type is None else f_type.strip()
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index dd93cfd8..c4d60fff 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -78,7 +78,6 @@ class DataSourceNotFoundException(Exception):
class InvalidMetaDataException(Exception):
"""Raised when metadata is broken, unavailable or disabled."""
- pass
def process_instance_metadata(metadata, key_path='', sensitive_keys=()):
@@ -89,26 +88,26 @@ def process_instance_metadata(metadata, key_path='', sensitive_keys=()):
@return Dict copy of processed metadata.
"""
md_copy = copy.deepcopy(metadata)
- md_copy['base64_encoded_keys'] = []
- md_copy['sensitive_keys'] = []
+ base64_encoded_keys = []
+ sens_keys = []
for key, val in metadata.items():
if key_path:
sub_key_path = key_path + '/' + key
else:
sub_key_path = key
if key in sensitive_keys or sub_key_path in sensitive_keys:
- md_copy['sensitive_keys'].append(sub_key_path)
+ sens_keys.append(sub_key_path)
if isinstance(val, str) and val.startswith('ci-b64:'):
- md_copy['base64_encoded_keys'].append(sub_key_path)
+ base64_encoded_keys.append(sub_key_path)
md_copy[key] = val.replace('ci-b64:', '')
if isinstance(val, dict):
return_val = process_instance_metadata(
val, sub_key_path, sensitive_keys)
- md_copy['base64_encoded_keys'].extend(
- return_val.pop('base64_encoded_keys'))
- md_copy['sensitive_keys'].extend(
- return_val.pop('sensitive_keys'))
+ base64_encoded_keys.extend(return_val.pop('base64_encoded_keys'))
+ sens_keys.extend(return_val.pop('sensitive_keys'))
md_copy[key] = return_val
+ md_copy['base64_encoded_keys'] = sorted(base64_encoded_keys)
+ md_copy['sensitive_keys'] = sorted(sens_keys)
return md_copy
@@ -193,7 +192,7 @@ class DataSource(metaclass=abc.ABCMeta):
# N-tuple of keypaths or keynames redact from instance-data.json for
# non-root users
- sensitive_metadata_keys = ('security-credentials',)
+ sensitive_metadata_keys = ('merged_cfg', 'security-credentials',)
def __init__(self, sys_cfg, distro, paths, ud_proc=None):
self.sys_cfg = sys_cfg
@@ -218,14 +217,15 @@ class DataSource(metaclass=abc.ABCMeta):
def __str__(self):
return type_utils.obj_name(self)
- def _get_standardized_metadata(self):
+ def _get_standardized_metadata(self, instance_data):
"""Return a dictionary of standardized metadata keys."""
local_hostname = self.get_hostname()
instance_id = self.get_instance_id()
availability_zone = self.availability_zone
# In the event of upgrade from existing cloudinit, pickled datasource
# will not contain these new class attributes. So we need to recrawl
- # metadata to discover that content.
+ # metadata to discover that content
+ sysinfo = instance_data["sys_info"]
return {
'v1': {
'_beta_keys': ['subplatform'],
@@ -233,14 +233,22 @@ class DataSource(metaclass=abc.ABCMeta):
'availability_zone': availability_zone,
'cloud-name': self.cloud_name,
'cloud_name': self.cloud_name,
+ 'distro': sysinfo["dist"][0],
+ 'distro_version': sysinfo["dist"][1],
+ 'distro_release': sysinfo["dist"][2],
'platform': self.platform_type,
'public_ssh_keys': self.get_public_ssh_keys(),
+ 'python_version': sysinfo["python"],
'instance-id': instance_id,
'instance_id': instance_id,
+ 'kernel_release': sysinfo["uname"][2],
'local-hostname': local_hostname,
'local_hostname': local_hostname,
+ 'machine': sysinfo["uname"][4],
'region': self.region,
- 'subplatform': self.subplatform}}
+ 'subplatform': self.subplatform,
+ 'system_platform': sysinfo["platform"],
+ 'variant': sysinfo["variant"]}}
def clear_cached_attrs(self, attr_defaults=()):
"""Reset any cached metadata attributes to datasource defaults.
@@ -299,9 +307,15 @@ class DataSource(metaclass=abc.ABCMeta):
ec2_metadata = getattr(self, 'ec2_metadata')
if ec2_metadata != UNSET:
instance_data['ds']['ec2_metadata'] = ec2_metadata
- instance_data.update(
- self._get_standardized_metadata())
instance_data['ds']['_doc'] = EXPERIMENTAL_TEXT
+ # Add merged cloud.cfg and sys info for jinja templates and cli query
+ instance_data['merged_cfg'] = copy.deepcopy(self.sys_cfg)
+ instance_data['merged_cfg']['_doc'] = (
+ 'Merged cloud-init system config from /etc/cloud/cloud.cfg and'
+ ' /etc/cloud/cloud.cfg.d/')
+ instance_data['sys_info'] = util.system_info()
+ instance_data.update(
+ self._get_standardized_metadata(instance_data))
try:
# Process content base64encoding unserializable values
content = util.json_dumps(instance_data)
@@ -315,12 +329,12 @@ class DataSource(metaclass=abc.ABCMeta):
except UnicodeDecodeError as e:
LOG.warning('Error persisting instance-data.json: %s', str(e))
return False
- json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
- write_json(json_file, processed_data) # World readable
json_sensitive_file = os.path.join(self.paths.run_dir,
INSTANCE_JSON_SENSITIVE_FILE)
- write_json(json_sensitive_file,
- redact_sensitive_keys(processed_data), mode=0o600)
+ write_json(json_sensitive_file, processed_data, mode=0o600)
+ json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
+ # World readable
+ write_json(json_file, redact_sensitive_keys(processed_data))
return True
def _get_data(self):
@@ -496,7 +510,6 @@ class DataSource(metaclass=abc.ABCMeta):
(e.g. 'ssh-rsa') and key_value is the key itself
(e.g. 'AAAAB3NzaC1y...').
"""
- pass
def _remap_device(self, short_name):
# LP: #611137
@@ -587,7 +600,7 @@ class DataSource(metaclass=abc.ABCMeta):
# if there is an ipv4 address in 'local-hostname', then
# make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx
lhost = self.metadata['local-hostname']
- if util.is_ipv4(lhost):
+ if net.is_ipv4_address(lhost):
toks = []
if resolve_ip:
toks = util.gethostbyaddr(lhost)
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index fc760581..b968a96f 100755
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -1,5 +1,5 @@
# This file is part of cloud-init. See LICENSE file for license information.
-
+import base64
import json
import logging
import os
@@ -8,13 +8,16 @@ import socket
import struct
import time
import textwrap
+import zlib
+from cloudinit.settings import CFG_BUILTIN
from cloudinit.net import dhcp
from cloudinit import stages
from cloudinit import temp_utils
from contextlib import contextmanager
from xml.etree import ElementTree
+from cloudinit import subp
from cloudinit import url_helper
from cloudinit import util
from cloudinit import version
@@ -32,7 +35,14 @@ DEFAULT_WIRESERVER_ENDPOINT = "a8:3f:81:10"
BOOT_EVENT_TYPE = 'boot-telemetry'
SYSTEMINFO_EVENT_TYPE = 'system-info'
DIAGNOSTIC_EVENT_TYPE = 'diagnostic'
-
+COMPRESSED_EVENT_TYPE = 'compressed'
+# Maximum number of bytes of the cloud-init.log file that can be dumped to KVP
+# at once. This number is based on the analysis done on a large sample of
+# cloud-init.log files where the P95 of the file sizes was 537KB and the time
+# consumed to dump 500KB file was (P95:76, P99:233, P99.9:1170) in ms
+MAX_LOG_TO_KVP_LENGTH = 512000
+# Marker file to indicate whether cloud-init.log is pushed to KVP
+LOG_PUSHED_TO_KVP_MARKER_FILE = '/var/lib/cloud/data/log_pushed_to_kvp'
azure_ds_reporter = events.ReportEventStack(
name="azure-ds",
description="initialize reporter for azure ds",
@@ -64,13 +74,15 @@ def is_byte_swapped(previous_id, current_id):
return ''.join(dd)
parts = current_id.split('-')
- swapped_id = '-'.join([
+ swapped_id = '-'.join(
+ [
swap_bytestring(parts[0]),
swap_bytestring(parts[1]),
swap_bytestring(parts[2]),
parts[3],
parts[4]
- ])
+ ]
+ )
return previous_id == swapped_id
@@ -86,11 +98,13 @@ def get_boot_telemetry():
LOG.debug("Collecting boot telemetry")
try:
kernel_start = float(time.time()) - float(util.uptime())
- except ValueError:
- raise RuntimeError("Failed to determine kernel start timestamp")
+ except ValueError as e:
+ raise RuntimeError(
+ "Failed to determine kernel start timestamp"
+ ) from e
try:
- out, _ = util.subp(['/bin/systemctl',
+ out, _ = subp.subp(['/bin/systemctl',
'show', '-p',
'UserspaceTimestampMonotonic'],
capture=True)
@@ -103,16 +117,17 @@ def get_boot_telemetry():
"UserspaceTimestampMonotonic from systemd")
user_start = kernel_start + (float(tsm) / 1000000)
- except util.ProcessExecutionError as e:
- raise RuntimeError("Failed to get UserspaceTimestampMonotonic: %s"
- % e)
+ except subp.ProcessExecutionError as e:
+ raise RuntimeError(
+ "Failed to get UserspaceTimestampMonotonic: %s" % e
+ ) from e
except ValueError as e:
- raise RuntimeError("Failed to parse "
- "UserspaceTimestampMonotonic from systemd: %s"
- % e)
+ raise RuntimeError(
+ "Failed to parse UserspaceTimestampMonotonic from systemd: %s" % e
+ ) from e
try:
- out, _ = util.subp(['/bin/systemctl', 'show',
+ out, _ = subp.subp(['/bin/systemctl', 'show',
'cloud-init-local', '-p',
'InactiveExitTimestampMonotonic'],
capture=True)
@@ -124,13 +139,15 @@ def get_boot_telemetry():
"InactiveExitTimestampMonotonic from systemd")
cloudinit_activation = kernel_start + (float(tsm) / 1000000)
- except util.ProcessExecutionError as e:
- raise RuntimeError("Failed to get InactiveExitTimestampMonotonic: %s"
- % e)
+ except subp.ProcessExecutionError as e:
+ raise RuntimeError(
+ "Failed to get InactiveExitTimestampMonotonic: %s" % e
+ ) from e
except ValueError as e:
- raise RuntimeError("Failed to parse "
- "InactiveExitTimestampMonotonic from systemd: %s"
- % e)
+ raise RuntimeError(
+ "Failed to parse InactiveExitTimestampMonotonic from systemd: %s"
+ % e
+ ) from e
evt = events.ReportingEvent(
BOOT_EVENT_TYPE, 'boot-telemetry',
@@ -174,6 +191,49 @@ def report_diagnostic_event(str):
return evt
+def report_compressed_event(event_name, event_content):
+ """Report a compressed event"""
+ compressed_data = base64.encodebytes(zlib.compress(event_content))
+ event_data = {"encoding": "gz+b64",
+ "data": compressed_data.decode('ascii')}
+ evt = events.ReportingEvent(
+ COMPRESSED_EVENT_TYPE, event_name,
+ json.dumps(event_data),
+ events.DEFAULT_EVENT_ORIGIN)
+ events.report_event(evt,
+ excluded_handler_types={"log", "print", "webhook"})
+
+ # return the event for unit testing purpose
+ return evt
+
+
+@azure_ds_telemetry_reporter
+def push_log_to_kvp(file_name=CFG_BUILTIN['def_log_file']):
+ """Push a portion of cloud-init.log file or the whole file to KVP
+ based on the file size.
+ If called more than once, it skips pushing the log file to KVP again."""
+
+ log_pushed_to_kvp = bool(os.path.isfile(LOG_PUSHED_TO_KVP_MARKER_FILE))
+ if log_pushed_to_kvp:
+ report_diagnostic_event("cloud-init.log is already pushed to KVP")
+ return
+
+ LOG.debug("Dumping cloud-init.log file to KVP")
+ try:
+ with open(file_name, "rb") as f:
+ f.seek(0, os.SEEK_END)
+ seek_index = max(f.tell() - MAX_LOG_TO_KVP_LENGTH, 0)
+ report_diagnostic_event(
+ "Dumping last {} bytes of cloud-init.log file to KVP".format(
+ f.tell() - seek_index))
+ f.seek(seek_index, os.SEEK_SET)
+ report_compressed_event("cloud-init.log", f.read())
+ util.write_file(LOG_PUSHED_TO_KVP_MARKER_FILE, '')
+ except Exception as ex:
+ report_diagnostic_event("Exception when dumping log file: %s" %
+ repr(ex))
+
+
@contextmanager
def cd(newdir):
prevdir = os.getcwd()
@@ -192,7 +252,7 @@ def _get_dhcp_endpoint_option_name():
return azure_endpoint
-class AzureEndpointHttpClient(object):
+class AzureEndpointHttpClient:
headers = {
'x-ms-agent-name': 'WALinuxAgent',
@@ -210,57 +270,77 @@ class AzureEndpointHttpClient(object):
if secure:
headers = self.headers.copy()
headers.update(self.extra_secure_headers)
- return url_helper.read_file_or_url(url, headers=headers, timeout=5,
- retries=10)
+ return url_helper.readurl(url, headers=headers,
+ timeout=5, retries=10, sec_between=5)
def post(self, url, data=None, extra_headers=None):
headers = self.headers
if extra_headers is not None:
headers = self.headers.copy()
headers.update(extra_headers)
- return url_helper.read_file_or_url(url, data=data, headers=headers,
- timeout=5, retries=10)
+ return url_helper.readurl(url, data=data, headers=headers,
+ timeout=5, retries=10, sec_between=5)
-class GoalState(object):
+class InvalidGoalStateXMLException(Exception):
+ """Raised when GoalState XML is invalid or has missing data."""
- def __init__(self, xml, http_client):
- self.http_client = http_client
- self.root = ElementTree.fromstring(xml)
- self._certificates_xml = None
- def _text_from_xpath(self, xpath):
- element = self.root.find(xpath)
- if element is not None:
- return element.text
- return None
+class GoalState:
- @property
- def container_id(self):
- return self._text_from_xpath('./Container/ContainerId')
+ def __init__(self, unparsed_xml, azure_endpoint_client):
+ """Parses a GoalState XML string and returns a GoalState object.
- @property
- def incarnation(self):
- return self._text_from_xpath('./Incarnation')
+ @param unparsed_xml: string representing a GoalState XML.
+ @param azure_endpoint_client: instance of AzureEndpointHttpClient
+ @return: GoalState object representing the GoalState XML string.
+ """
+ self.azure_endpoint_client = azure_endpoint_client
- @property
- def instance_id(self):
- return self._text_from_xpath(
+ try:
+ self.root = ElementTree.fromstring(unparsed_xml)
+ except ElementTree.ParseError as e:
+ msg = 'Failed to parse GoalState XML: %s'
+ LOG.warning(msg, e)
+ report_diagnostic_event(msg % (e,))
+ raise
+
+ self.container_id = self._text_from_xpath('./Container/ContainerId')
+ self.instance_id = self._text_from_xpath(
'./Container/RoleInstanceList/RoleInstance/InstanceId')
+ self.incarnation = self._text_from_xpath('./Incarnation')
+
+ for attr in ("container_id", "instance_id", "incarnation"):
+ if getattr(self, attr) is None:
+ msg = 'Missing %s in GoalState XML'
+ LOG.warning(msg, attr)
+ report_diagnostic_event(msg % (attr,))
+ raise InvalidGoalStateXMLException(msg)
+
+ self.certificates_xml = None
+ url = self._text_from_xpath(
+ './Container/RoleInstanceList/RoleInstance'
+ '/Configuration/Certificates')
+ if url is not None:
+ with events.ReportEventStack(
+ name="get-certificates-xml",
+ description="get certificates xml",
+ parent=azure_ds_reporter):
+ self.certificates_xml = \
+ self.azure_endpoint_client.get(
+ url, secure=True).contents
+ if self.certificates_xml is None:
+ raise InvalidGoalStateXMLException(
+ 'Azure endpoint returned empty certificates xml.')
- @property
- def certificates_xml(self):
- if self._certificates_xml is None:
- url = self._text_from_xpath(
- './Container/RoleInstanceList/RoleInstance'
- '/Configuration/Certificates')
- if url is not None:
- self._certificates_xml = self.http_client.get(
- url, secure=True).contents
- return self._certificates_xml
+ def _text_from_xpath(self, xpath):
+ element = self.root.find(xpath)
+ if element is not None:
+ return element.text
+ return None
-class OpenSSLManager(object):
+class OpenSSLManager:
certificate_names = {
'private_key': 'TransportPrivate.pem',
@@ -282,7 +362,7 @@ class OpenSSLManager(object):
LOG.debug('Certificate already generated.')
return
with cd(self.tmpdir):
- util.subp([
+ subp.subp([
'openssl', 'req', '-x509', '-nodes', '-subj',
'/CN=LinuxTransport', '-days', '32768', '-newkey', 'rsa:2048',
'-keyout', self.certificate_names['private_key'],
@@ -299,14 +379,14 @@ class OpenSSLManager(object):
@azure_ds_telemetry_reporter
def _run_x509_action(action, cert):
cmd = ['openssl', 'x509', '-noout', action]
- result, _ = util.subp(cmd, data=cert)
+ result, _ = subp.subp(cmd, data=cert)
return result
@azure_ds_telemetry_reporter
def _get_ssh_key_from_cert(self, certificate):
pub_key = self._run_x509_action('-pubkey', certificate)
keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin']
- ssh_key, _ = util.subp(keygen_cmd, data=pub_key)
+ ssh_key, _ = subp.subp(keygen_cmd, data=pub_key)
return ssh_key
@azure_ds_telemetry_reporter
@@ -339,7 +419,7 @@ class OpenSSLManager(object):
certificates_content.encode('utf-8'),
]
with cd(self.tmpdir):
- out, _ = util.subp(
+ out, _ = subp.subp(
'openssl cms -decrypt -in /dev/stdin -inkey'
' {private_key} -recip {certificate} | openssl pkcs12 -nodes'
' -password pass:'.format(**self.certificate_names),
@@ -367,25 +447,122 @@ class OpenSSLManager(object):
return keys
-class WALinuxAgentShim(object):
-
- REPORT_READY_XML_TEMPLATE = '\n'.join([
- '<?xml version="1.0" encoding="utf-8"?>',
- '<Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
- ' xmlns:xsd="http://www.w3.org/2001/XMLSchema">',
- ' <GoalStateIncarnation>{incarnation}</GoalStateIncarnation>',
- ' <Container>',
- ' <ContainerId>{container_id}</ContainerId>',
- ' <RoleInstanceList>',
- ' <Role>',
- ' <InstanceId>{instance_id}</InstanceId>',
- ' <Health>',
- ' <State>Ready</State>',
- ' </Health>',
- ' </Role>',
- ' </RoleInstanceList>',
- ' </Container>',
- '</Health>'])
+class GoalStateHealthReporter:
+
+ HEALTH_REPORT_XML_TEMPLATE = textwrap.dedent('''\
+ <?xml version="1.0" encoding="utf-8"?>
+ <Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xmlns:xsd="http://www.w3.org/2001/XMLSchema">
+ <GoalStateIncarnation>{incarnation}</GoalStateIncarnation>
+ <Container>
+ <ContainerId>{container_id}</ContainerId>
+ <RoleInstanceList>
+ <Role>
+ <InstanceId>{instance_id}</InstanceId>
+ <Health>
+ <State>{health_status}</State>
+ {health_detail_subsection}
+ </Health>
+ </Role>
+ </RoleInstanceList>
+ </Container>
+ </Health>
+ ''')
+
+ HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = textwrap.dedent('''\
+ <Details>
+ <SubStatus>{health_substatus}</SubStatus>
+ <Description>{health_description}</Description>
+ </Details>
+ ''')
+
+ PROVISIONING_SUCCESS_STATUS = 'Ready'
+
+ def __init__(self, goal_state, azure_endpoint_client, endpoint):
+ """Creates instance that will report provisioning status to an endpoint
+
+ @param goal_state: An instance of class GoalState that contains
+ goal state info such as incarnation, container id, and instance id.
+ These 3 values are needed when reporting the provisioning status
+ to Azure
+ @param azure_endpoint_client: Instance of class AzureEndpointHttpClient
+ @param endpoint: Endpoint (string) where the provisioning status report
+ will be sent to
+ @return: Instance of class GoalStateHealthReporter
+ """
+ self._goal_state = goal_state
+ self._azure_endpoint_client = azure_endpoint_client
+ self._endpoint = endpoint
+
+ @azure_ds_telemetry_reporter
+ def send_ready_signal(self):
+ document = self.build_report(
+ incarnation=self._goal_state.incarnation,
+ container_id=self._goal_state.container_id,
+ instance_id=self._goal_state.instance_id,
+ status=self.PROVISIONING_SUCCESS_STATUS)
+ LOG.debug('Reporting ready to Azure fabric.')
+ try:
+ self._post_health_report(document=document)
+ except Exception as e:
+ msg = "exception while reporting ready: %s" % e
+ LOG.error(msg)
+ report_diagnostic_event(msg)
+ raise
+
+ LOG.info('Reported ready to Azure fabric.')
+
+ def build_report(
+ self, incarnation, container_id, instance_id,
+ status, substatus=None, description=None):
+ health_detail = ''
+ if substatus is not None:
+ health_detail = self.HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE.format(
+ health_substatus=substatus, health_description=description)
+
+ health_report = self.HEALTH_REPORT_XML_TEMPLATE.format(
+ incarnation=incarnation,
+ container_id=container_id,
+ instance_id=instance_id,
+ health_status=status,
+ health_detail_subsection=health_detail)
+
+ return health_report
+
+ @azure_ds_telemetry_reporter
+ def _post_health_report(self, document):
+ push_log_to_kvp()
+
+ # Whenever report_diagnostic_event(diagnostic_msg) is invoked in code,
+ # the diagnostic messages are written to special files
+ # (/var/opt/hyperv/.kvp_pool_*) as Hyper-V KVP messages.
+ # Hyper-V KVP message communication is done through these files,
+ # and KVP functionality is used to communicate and share diagnostic
+ # info with the Azure Host.
+ # The Azure Host will collect the VM's Hyper-V KVP diagnostic messages
+ # when cloud-init reports to fabric.
+ # When the Azure Host receives the health report signal, it will only
+ # collect and process whatever KVP diagnostic messages have been
+ # written to the KVP files.
+ # KVP messages that are published after the Azure Host receives the
+ # signal are ignored and unprocessed, so yield this thread to the
+ # Hyper-V KVP Reporting thread so that they are written.
+ # time.sleep(0) is a low-cost and proven method to yield the scheduler
+ # and ensure that events are flushed.
+ # See HyperVKvpReportingHandler class, which is a multi-threaded
+ # reporting handler that writes to the special KVP files.
+ time.sleep(0)
+
+ LOG.debug('Sending health report to Azure fabric.')
+ url = "http://{}/machine?comp=health".format(self._endpoint)
+ self._azure_endpoint_client.post(
+ url,
+ data=document,
+ extra_headers={'Content-Type': 'text/xml; charset=utf-8'})
+ LOG.debug('Successfully sent health report to Azure fabric')
+
+
+class WALinuxAgentShim:
def __init__(self, fallback_lease_file=None, dhcp_options=None):
LOG.debug('WALinuxAgentShim instantiated, fallback_lease_file=%s',
@@ -393,6 +570,7 @@ class WALinuxAgentShim(object):
self.dhcpoptions = dhcp_options
self._endpoint = None
self.openssl_manager = None
+ self.azure_endpoint_client = None
self.lease_file = fallback_lease_file
def clean_up(self):
@@ -469,9 +647,10 @@ class WALinuxAgentShim(object):
try:
name = os.path.basename(hook_file).replace('.json', '')
dhcp_options[name] = json.loads(util.load_file((hook_file)))
- except ValueError:
+ except ValueError as e:
raise ValueError(
- '{_file} is not valid JSON data'.format(_file=hook_file))
+ '{_file} is not valid JSON data'.format(_file=hook_file)
+ ) from e
return dhcp_options
@staticmethod
@@ -491,7 +670,22 @@ class WALinuxAgentShim(object):
@staticmethod
@azure_ds_telemetry_reporter
def find_endpoint(fallback_lease_file=None, dhcp245=None):
+ """Finds and returns the Azure endpoint using various methods.
+
+ The Azure endpoint is searched in the following order:
+ 1. Endpoint from dhcp options (dhcp option 245).
+ 2. Endpoint from networkd.
+ 3. Endpoint from dhclient hook json.
+ 4. Endpoint from fallback lease file.
+ 5. The default Azure endpoint.
+
+ @param fallback_lease_file: Fallback lease file that will be used
+ during endpoint search.
+ @param dhcp245: dhcp options that will be used during endpoint search.
+ @return: Azure endpoint IP address.
+ """
value = None
+
if dhcp245 is not None:
value = dhcp245
LOG.debug("Using Azure Endpoint from dhcp options")
@@ -533,42 +727,128 @@ class WALinuxAgentShim(object):
@azure_ds_telemetry_reporter
def register_with_azure_and_fetch_data(self, pubkey_info=None):
+ """Gets the VM's GoalState from Azure, uses the GoalState information
+ to report ready/send the ready signal/provisioning complete signal to
+ Azure, and then uses pubkey_info to filter and obtain the user's
+ pubkeys from the GoalState.
+
+ @param pubkey_info: List of pubkey values and fingerprints which are
+ used to filter and obtain the user's pubkey values from the
+ GoalState.
+ @return: The list of user's authorized pubkey values.
+ """
if self.openssl_manager is None:
self.openssl_manager = OpenSSLManager()
- http_client = AzureEndpointHttpClient(self.openssl_manager.certificate)
+ if self.azure_endpoint_client is None:
+ self.azure_endpoint_client = AzureEndpointHttpClient(
+ self.openssl_manager.certificate)
+ goal_state = self._fetch_goal_state_from_azure()
+ ssh_keys = self._get_user_pubkeys(goal_state, pubkey_info)
+ health_reporter = GoalStateHealthReporter(
+ goal_state, self.azure_endpoint_client, self.endpoint)
+ health_reporter.send_ready_signal()
+ return {'public-keys': ssh_keys}
+
+ @azure_ds_telemetry_reporter
+ def _fetch_goal_state_from_azure(self):
+ """Fetches the GoalState XML from the Azure endpoint, parses the XML,
+ and returns a GoalState object.
+
+ @return: GoalState object representing the GoalState XML
+ """
+ unparsed_goal_state_xml = self._get_raw_goal_state_xml_from_azure()
+ return self._parse_raw_goal_state_xml(unparsed_goal_state_xml)
+
+ @azure_ds_telemetry_reporter
+ def _get_raw_goal_state_xml_from_azure(self):
+ """Fetches the GoalState XML from the Azure endpoint and returns
+ the XML as a string.
+
+ @return: GoalState XML string
+ """
+
LOG.info('Registering with Azure...')
- attempts = 0
- while True:
- try:
- response = http_client.get(
- 'http://{0}/machine/?comp=goalstate'.format(self.endpoint))
- except Exception as e:
- if attempts < 10:
- time.sleep(attempts + 1)
- else:
- report_diagnostic_event(
- "failed to register with Azure: %s" % e)
- raise
- else:
- break
- attempts += 1
+ url = 'http://{}/machine/?comp=goalstate'.format(self.endpoint)
+ try:
+ response = self.azure_endpoint_client.get(url)
+ except Exception as e:
+ msg = 'failed to register with Azure: %s' % e
+ LOG.warning(msg)
+ report_diagnostic_event(msg)
+ raise
LOG.debug('Successfully fetched GoalState XML.')
- goal_state = GoalState(response.contents, http_client)
- report_diagnostic_event("container_id %s" % goal_state.container_id)
+ return response.contents
+
+ @azure_ds_telemetry_reporter
+ def _parse_raw_goal_state_xml(self, unparsed_goal_state_xml):
+ """Parses a GoalState XML string and returns a GoalState object.
+
+ @param unparsed_goal_state_xml: GoalState XML string
+ @return: GoalState object representing the GoalState XML
+ """
+ try:
+ goal_state = GoalState(
+ unparsed_goal_state_xml, self.azure_endpoint_client)
+ except Exception as e:
+ msg = 'Error processing GoalState XML: %s' % e
+ LOG.warning(msg)
+ report_diagnostic_event(msg)
+ raise
+ msg = ', '.join([
+ 'GoalState XML container id: %s' % goal_state.container_id,
+ 'GoalState XML instance id: %s' % goal_state.instance_id,
+ 'GoalState XML incarnation: %s' % goal_state.incarnation])
+ LOG.debug(msg)
+ report_diagnostic_event(msg)
+ return goal_state
+
+ @azure_ds_telemetry_reporter
+ def _get_user_pubkeys(self, goal_state, pubkey_info):
+ """Gets and filters the VM admin user's authorized pubkeys.
+
+ The admin user in this case is the username specified as "admin"
+ when deploying VMs on Azure.
+ See https://docs.microsoft.com/en-us/cli/azure/vm#az-vm-create.
+ cloud-init expects a straightforward array of keys to be dropped
+ into the admin user's authorized_keys file. Azure control plane exposes
+ multiple public keys to the VM via wireserver. Select just the
+ admin user's key(s) and return them, ignoring any other certs.
+
+ @param goal_state: GoalState object. The GoalState object contains
+ a certificate XML, which contains both the VM user's authorized
+ pubkeys and other non-user pubkeys, which are used for
+ MSI and protected extension handling.
+ @param pubkey_info: List of VM user pubkey dicts that were previously
+ obtained from provisioning data.
+ Each pubkey dict in this list can either have the format
+ pubkey['value'] or pubkey['fingerprint'].
+ Each pubkey['fingerprint'] in the list is used to filter
+ and obtain the actual pubkey value from the GoalState
+ certificates XML.
+ Each pubkey['value'] requires no further processing and is
+ immediately added to the return list.
+ @return: A list of the VM user's authorized pubkey values.
+ """
ssh_keys = []
if goal_state.certificates_xml is not None and pubkey_info is not None:
LOG.debug('Certificate XML found; parsing out public keys.')
keys_by_fingerprint = self.openssl_manager.parse_certificates(
goal_state.certificates_xml)
ssh_keys = self._filter_pubkeys(keys_by_fingerprint, pubkey_info)
- self._report_ready(goal_state, http_client)
- return {'public-keys': ssh_keys}
+ return ssh_keys
- def _filter_pubkeys(self, keys_by_fingerprint, pubkey_info):
- """cloud-init expects a straightforward array of keys to be dropped
- into the user's authorized_keys file. Azure control plane exposes
- multiple public keys to the VM via wireserver. Select just the
- user's key(s) and return them, ignoring any other certs.
+ @staticmethod
+ def _filter_pubkeys(keys_by_fingerprint, pubkey_info):
+ """ Filter and return only the user's actual pubkeys.
+
+ @param keys_by_fingerprint: pubkey fingerprint -> pubkey value dict
+ that was obtained from GoalState Certificates XML. May contain
+ non-user pubkeys.
+ @param pubkey_info: List of VM user pubkeys. Pubkey values are added
+ to the return list without further processing. Pubkey fingerprints
+ are used to filter and obtain the actual pubkey values from
+ keys_by_fingerprint.
+ @return: A list of the VM user's authorized pubkey values.
"""
keys = []
for pubkey in pubkey_info:
@@ -587,30 +867,6 @@ class WALinuxAgentShim(object):
return keys
- @azure_ds_telemetry_reporter
- def _report_ready(self, goal_state, http_client):
- LOG.debug('Reporting ready to Azure fabric.')
- document = self.REPORT_READY_XML_TEMPLATE.format(
- incarnation=goal_state.incarnation,
- container_id=goal_state.container_id,
- instance_id=goal_state.instance_id,
- )
- # Host will collect kvps when cloud-init reports ready.
- # some kvps might still be in the queue. We yield the scheduler
- # to make sure we process all kvps up till this point.
- time.sleep(0)
- try:
- http_client.post(
- "http://{0}/machine?comp=health".format(self.endpoint),
- data=document,
- extra_headers={'Content-Type': 'text/xml; charset=utf-8'},
- )
- except Exception as e:
- report_diagnostic_event("exception while reporting ready: %s" % e)
- raise
-
- LOG.info('Reported ready to Azure fabric.')
-
@azure_ds_telemetry_reporter
def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None,
@@ -623,10 +879,16 @@ def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None,
shim.clean_up()
-class EphemeralDHCPv4WithReporting(object):
+def dhcp_log_cb(out, err):
+ report_diagnostic_event("dhclient output stream: %s" % out)
+ report_diagnostic_event("dhclient error stream: %s" % err)
+
+
+class EphemeralDHCPv4WithReporting:
def __init__(self, reporter, nic=None):
self.reporter = reporter
- self.ephemeralDHCPv4 = EphemeralDHCPv4(iface=nic)
+ self.ephemeralDHCPv4 = EphemeralDHCPv4(
+ iface=nic, dhcp_log_func=dhcp_log_cb)
def __enter__(self):
with events.ReportEventStack(
diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py
index 0e7cccac..b545c4d6 100644
--- a/cloudinit/sources/helpers/digitalocean.py
+++ b/cloudinit/sources/helpers/digitalocean.py
@@ -8,6 +8,7 @@ import random
from cloudinit import log as logging
from cloudinit import net as cloudnet
from cloudinit import url_helper
+from cloudinit import subp
from cloudinit import util
NIC_MAP = {'public': 'eth0', 'private': 'eth1'}
@@ -15,7 +16,7 @@ NIC_MAP = {'public': 'eth0', 'private': 'eth1'}
LOG = logging.getLogger(__name__)
-def assign_ipv4_link_local(nic=None):
+def assign_ipv4_link_local(distro, nic=None):
"""Bring up NIC using an address using link-local (ip4LL) IPs. On
DigitalOcean, the link-local domain is per-droplet routed, so there
is no risk of collisions. However, to be more safe, the ip4LL
@@ -23,7 +24,7 @@ def assign_ipv4_link_local(nic=None):
"""
if not nic:
- nic = get_link_local_nic()
+ nic = get_link_local_nic(distro)
LOG.debug("selected interface '%s' for reading metadata", nic)
if not nic:
@@ -36,14 +37,14 @@ def assign_ipv4_link_local(nic=None):
ip_addr_cmd = ['ip', 'addr', 'add', addr, 'dev', nic]
ip_link_cmd = ['ip', 'link', 'set', 'dev', nic, 'up']
- if not util.which('ip'):
+ if not subp.which('ip'):
raise RuntimeError("No 'ip' command available to configure ip4LL "
"address")
try:
- util.subp(ip_addr_cmd)
+ subp.subp(ip_addr_cmd)
LOG.debug("assigned ip4LL address '%s' to '%s'", addr, nic)
- util.subp(ip_link_cmd)
+ subp.subp(ip_link_cmd)
LOG.debug("brought device '%s' up", nic)
except Exception:
util.logexc(LOG, "ip4LL address assignment of '%s' to '%s' failed."
@@ -53,8 +54,12 @@ def assign_ipv4_link_local(nic=None):
return nic
-def get_link_local_nic():
- nics = [f for f in cloudnet.get_devicelist() if cloudnet.is_physical(f)]
+def get_link_local_nic(distro):
+ nics = [
+ f
+ for f in cloudnet.get_devicelist()
+ if distro.networking.is_physical(f)
+ ]
if not nics:
return None
return min(nics, key=lambda d: cloudnet.read_sys_net_int(d, 'ifindex'))
@@ -74,7 +79,7 @@ def del_ipv4_link_local(nic=None):
ip_addr_cmd = ['ip', 'addr', 'flush', 'dev', nic]
try:
- util.subp(ip_addr_cmd)
+ subp.subp(ip_addr_cmd)
LOG.debug("removed ip4LL addresses from %s", nic)
except Exception as e:
diff --git a/cloudinit/sources/helpers/hetzner.py b/cloudinit/sources/helpers/hetzner.py
index 2554530d..72edb023 100644
--- a/cloudinit/sources/helpers/hetzner.py
+++ b/cloudinit/sources/helpers/hetzner.py
@@ -7,6 +7,9 @@ from cloudinit import log as logging
from cloudinit import url_helper
from cloudinit import util
+import base64
+import binascii
+
LOG = logging.getLogger(__name__)
@@ -24,3 +27,19 @@ def read_userdata(url, timeout=2, sec_between=2, retries=30):
if not response.ok():
raise RuntimeError("unable to read userdata at %s" % url)
return response.contents
+
+
+def maybe_b64decode(data: bytes) -> bytes:
+ """base64 decode data
+
+ If data is base64 encoded bytes, return b64decode(data).
+ If not, return data unmodified.
+
+ @param data: data as bytes. TypeError is raised if not bytes.
+ """
+ if not isinstance(data, bytes):
+ raise TypeError("data is '%s', expected bytes" % type(data))
+ try:
+ return base64.b64decode(data, validate=True)
+ except binascii.Error:
+ return data
diff --git a/cloudinit/sources/helpers/netlink.py b/cloudinit/sources/helpers/netlink.py
index d377ae3d..c2ad587b 100644
--- a/cloudinit/sources/helpers/netlink.py
+++ b/cloudinit/sources/helpers/netlink.py
@@ -55,7 +55,6 @@ NetlinkHeader = namedtuple('NetlinkHeader', ['length', 'type', 'flags', 'seq',
class NetlinkCreateSocketError(RuntimeError):
'''Raised if netlink socket fails during create or bind.'''
- pass
def create_bound_netlink_socket():
@@ -75,7 +74,7 @@ def create_bound_netlink_socket():
netlink_socket.setblocking(0)
except socket.error as e:
msg = "Exception during netlink socket create: %s" % e
- raise NetlinkCreateSocketError(msg)
+ raise NetlinkCreateSocketError(msg) from e
LOG.debug("Created netlink socket")
return netlink_socket
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 441db506..65e020c5 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -16,6 +16,7 @@ from cloudinit import ec2_utils
from cloudinit import log as logging
from cloudinit import net
from cloudinit import sources
+from cloudinit import subp
from cloudinit import url_helper
from cloudinit import util
from cloudinit.sources import BrokenMetadata
@@ -68,6 +69,7 @@ KNOWN_PHYSICAL_TYPES = (
None,
'bgpovs', # not present in OpenStack upstream but used on OVH cloud.
'bridge',
+ 'cascading', # not present in OpenStack upstream, used on OpenTelekomCloud
'dvs',
'ethernet',
'hw_veb',
@@ -109,7 +111,7 @@ class SourceMixin(object):
dev_entries = util.find_devs_with(criteria)
if dev_entries:
device = dev_entries[0]
- except util.ProcessExecutionError:
+ except subp.ProcessExecutionError:
pass
return device
@@ -278,8 +280,9 @@ class BaseReader(metaclass=abc.ABCMeta):
try:
data = translator(data)
except Exception as e:
- raise BrokenMetadata("Failed to process "
- "path %s: %s" % (path, e))
+ raise BrokenMetadata(
+ "Failed to process path %s: %s" % (path, e)
+ ) from e
if found:
results[name] = data
@@ -289,8 +292,9 @@ class BaseReader(metaclass=abc.ABCMeta):
try:
metadata['random_seed'] = base64.b64decode(random_seed)
except (ValueError, TypeError) as e:
- raise BrokenMetadata("Badly formatted metadata"
- " random_seed entry: %s" % e)
+ raise BrokenMetadata(
+ "Badly formatted metadata random_seed entry: %s" % e
+ ) from e
# load any files that were provided
files = {}
@@ -302,8 +306,9 @@ class BaseReader(metaclass=abc.ABCMeta):
try:
files[path] = self._read_content_path(item)
except Exception as e:
- raise BrokenMetadata("Failed to read provided "
- "file %s: %s" % (path, e))
+ raise BrokenMetadata(
+ "Failed to read provided file %s: %s" % (path, e)
+ ) from e
results['files'] = files
# The 'network_config' item in metadata is a content pointer
@@ -315,8 +320,9 @@ class BaseReader(metaclass=abc.ABCMeta):
content = self._read_content_path(net_item, decode=True)
results['network_config'] = content
except IOError as e:
- raise BrokenMetadata("Failed to read network"
- " configuration: %s" % (e))
+ raise BrokenMetadata(
+ "Failed to read network configuration: %s" % (e)
+ ) from e
# To openstack, user can specify meta ('nova boot --meta=key=value')
# and those will appear under metadata['meta'].
@@ -368,8 +374,9 @@ class ConfigDriveReader(BaseReader):
try:
return util.load_json(self._path_read(path))
except Exception as e:
- raise BrokenMetadata("Failed to process "
- "path %s: %s" % (path, e))
+ raise BrokenMetadata(
+ "Failed to process path %s: %s" % (path, e)
+ ) from e
def read_v1(self):
"""Reads a version 1 formatted location.
@@ -393,13 +400,17 @@ class ConfigDriveReader(BaseReader):
path = found[name]
try:
contents = self._path_read(path)
- except IOError:
- raise BrokenMetadata("Failed to read: %s" % path)
+ except IOError as e:
+ raise BrokenMetadata("Failed to read: %s" % path) from e
try:
- md[key] = translator(contents)
+ # Disable not-callable pylint check; pylint isn't able to
+ # determine that every member of FILES_V1 has a callable in
+ # the appropriate position
+ md[key] = translator(contents) # pylint: disable=E1102
except Exception as e:
- raise BrokenMetadata("Failed to process "
- "path %s: %s" % (path, e))
+ raise BrokenMetadata(
+ "Failed to process path %s: %s" % (path, e)
+ ) from e
else:
md[key] = copy.deepcopy(default)
@@ -410,8 +421,11 @@ class ConfigDriveReader(BaseReader):
keydata = meta_js.get('public-keys', keydata)
if keydata:
lines = keydata.splitlines()
- md['public-keys'] = [l for l in lines
- if len(l) and not l.startswith("#")]
+ md['public-keys'] = [
+ line
+ for line in lines
+ if len(line) and not line.startswith("#")
+ ]
# config-drive-v1 has no way for openstack to provide the instance-id
# so we copy that into metadata from the user input
@@ -673,11 +687,13 @@ def convert_net_json(network_json=None, known_macs=None):
raise ValueError("Unable to find a system nic for %s" % d)
d['name'] = known_macs[mac]
- for cfg, key, fmt, target in link_updates:
- if isinstance(target, (list, tuple)):
- cfg[key] = [fmt % link_id_info[l]['name'] for l in target]
+ for cfg, key, fmt, targets in link_updates:
+ if isinstance(targets, (list, tuple)):
+ cfg[key] = [
+ fmt % link_id_info[target]['name'] for target in targets
+ ]
else:
- cfg[key] = fmt % link_id_info[target]['name']
+ cfg[key] = fmt % link_id_info[targets]['name']
# Infiniband interfaces may be referenced in network_data.json by a 6 byte
# Ethernet MAC-style address, and we use that address to look up the
diff --git a/cloudinit/sources/helpers/tests/test_netlink.py b/cloudinit/sources/helpers/tests/test_netlink.py
index c2898a16..10760bd6 100644
--- a/cloudinit/sources/helpers/tests/test_netlink.py
+++ b/cloudinit/sources/helpers/tests/test_netlink.py
@@ -87,7 +87,7 @@ class TestParseNetlinkMessage(CiTestCase):
data = None
with self.assertRaises(AssertionError) as context:
read_rta_oper_state(data)
- self.assertTrue('data is none', str(context.exception))
+ self.assertEqual('data is none', str(context.exception))
def test_read_invalid_rta_operstate_none(self):
'''read_rta_oper_state returns none if operstate is none'''
@@ -180,17 +180,22 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
other_ifname = "eth1"
expected_ifname = "eth0"
data_op_down_eth1 = self._media_switch_data(
- other_ifname, RTM_NEWLINK, OPER_DOWN)
+ other_ifname, RTM_NEWLINK, OPER_DOWN
+ )
data_op_up_eth1 = self._media_switch_data(
- other_ifname, RTM_NEWLINK, OPER_UP)
+ other_ifname, RTM_NEWLINK, OPER_UP
+ )
data_op_down_eth0 = self._media_switch_data(
- expected_ifname, RTM_NEWLINK, OPER_DOWN)
+ expected_ifname, RTM_NEWLINK, OPER_DOWN
+ )
data_op_up_eth0 = self._media_switch_data(
- expected_ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [data_op_down_eth1,
- data_op_up_eth1,
- data_op_down_eth0,
- data_op_up_eth0]
+ expected_ifname, RTM_NEWLINK, OPER_UP)
+ m_read_netlink_socket.side_effect = [
+ data_op_down_eth1,
+ data_op_up_eth1,
+ data_op_down_eth0,
+ data_op_up_eth0
+ ]
wait_for_media_disconnect_connect(m_socket, expected_ifname)
self.assertIn('Ignored netlink event on interface %s' % other_ifname,
self.logs.getvalue())
@@ -207,17 +212,23 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
'''
ifname = "eth0"
data_getlink_down = self._media_switch_data(
- ifname, RTM_GETLINK, OPER_DOWN)
+ ifname, RTM_GETLINK, OPER_DOWN
+ )
data_getlink_up = self._media_switch_data(
- ifname, RTM_GETLINK, OPER_UP)
+ ifname, RTM_GETLINK, OPER_UP
+ )
data_newlink_down = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_DOWN)
+ ifname, RTM_NEWLINK, OPER_DOWN
+ )
data_newlink_up = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [data_getlink_down,
- data_getlink_up,
- data_newlink_down,
- data_newlink_up]
+ ifname, RTM_NEWLINK, OPER_UP
+ )
+ m_read_netlink_socket.side_effect = [
+ data_getlink_down,
+ data_getlink_up,
+ data_newlink_down,
+ data_newlink_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
@@ -233,19 +244,25 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
'''
ifname = "eth0"
data_setlink_down = self._media_switch_data(
- ifname, RTM_SETLINK, OPER_DOWN)
+ ifname, RTM_SETLINK, OPER_DOWN
+ )
data_setlink_up = self._media_switch_data(
- ifname, RTM_SETLINK, OPER_UP)
+ ifname, RTM_SETLINK, OPER_UP
+ )
data_newlink_down = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_DOWN)
+ ifname, RTM_NEWLINK, OPER_DOWN
+ )
data_newlink_up = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [data_setlink_down,
- data_setlink_up,
- data_newlink_down,
- data_newlink_up,
- data_newlink_down,
- data_newlink_up]
+ ifname, RTM_NEWLINK, OPER_UP
+ )
+ m_read_netlink_socket.side_effect = [
+ data_setlink_down,
+ data_setlink_up,
+ data_newlink_down,
+ data_newlink_up,
+ data_newlink_down,
+ data_newlink_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
@@ -255,23 +272,30 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
ifname = "eth0"
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
- data_op_dormant = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_DORMANT)
- data_op_notpresent = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_NOTPRESENT)
- data_op_lowerdown = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_LOWERLAYERDOWN)
- data_op_testing = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_TESTING)
- data_op_unknown = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_UNKNOWN)
- m_read_netlink_socket.side_effect = [data_op_up, data_op_up,
- data_op_dormant, data_op_up,
- data_op_notpresent, data_op_up,
- data_op_lowerdown, data_op_up,
- data_op_testing, data_op_up,
- data_op_unknown, data_op_up,
- data_op_down, data_op_up]
+ data_op_dormant = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_DORMANT
+ )
+ data_op_notpresent = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_NOTPRESENT
+ )
+ data_op_lowerdown = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_LOWERLAYERDOWN
+ )
+ data_op_testing = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_TESTING
+ )
+ data_op_unknown = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_UNKNOWN
+ )
+ m_read_netlink_socket.side_effect = [
+ data_op_up, data_op_up,
+ data_op_dormant, data_op_up,
+ data_op_notpresent, data_op_up,
+ data_op_lowerdown, data_op_up,
+ data_op_testing, data_op_up,
+ data_op_unknown, data_op_up,
+ data_op_down, data_op_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 14)
@@ -281,12 +305,14 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
ifname = "eth0"
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
- data_op_dormant = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_DORMANT)
- data_op_unknown = self._media_switch_data(ifname, RTM_NEWLINK,
- OPER_UNKNOWN)
- m_read_netlink_socket.side_effect = [data_op_down, data_op_dormant,
- data_op_unknown, data_op_up]
+ data_op_dormant = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_DORMANT)
+ data_op_unknown = self._media_switch_data(
+ ifname, RTM_NEWLINK, OPER_UNKNOWN)
+ m_read_netlink_socket.side_effect = [
+ data_op_down, data_op_dormant,
+ data_op_unknown, data_op_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
@@ -300,9 +326,11 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
data_op_invalid = self._media_switch_data(ifname, RTM_NEWLINK, 7)
- m_read_netlink_socket.side_effect = [data_op_invalid, data_op_up,
- data_op_down, data_op_invalid,
- data_op_up]
+ m_read_netlink_socket.side_effect = [
+ data_op_invalid, data_op_up,
+ data_op_down, data_op_invalid,
+ data_op_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 5)
@@ -333,8 +361,9 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
data_invalid2 = self._media_switch_data(ifname, RTM_NEWLINK, None)
data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [data_invalid1, data_invalid2,
- data_op_down, data_op_up]
+ m_read_netlink_socket.side_effect = [
+ data_invalid1, data_invalid2, data_op_down, data_op_up
+ ]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 4)
@@ -344,11 +373,15 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
bytes = ifname.encode("utf-8")
data = bytearray(96)
struct.pack_into("=LHHLL", data, 0, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data, RTATTR_START_OFFSET, 8, 3,
- bytes, 5, 16, int_to_bytes(OPER_DOWN))
+ struct.pack_into(
+ "HH4sHHc", data, RTATTR_START_OFFSET, 8, 3,
+ bytes, 5, 16, int_to_bytes(OPER_DOWN)
+ )
struct.pack_into("=LHHLL", data, 48, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data, 48 + RTATTR_START_OFFSET, 8,
- 3, bytes, 5, 16, int_to_bytes(OPER_UP))
+ struct.pack_into(
+ "HH4sHHc", data, 48 + RTATTR_START_OFFSET, 8,
+ 3, bytes, 5, 16, int_to_bytes(OPER_UP)
+ )
m_read_netlink_socket.return_value = data
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 1)
@@ -360,14 +393,18 @@ class TestWaitForMediaDisconnectConnect(CiTestCase):
data1 = bytearray(112)
data2 = bytearray(32)
struct.pack_into("=LHHLL", data1, 0, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data1, RTATTR_START_OFFSET, 8, 3,
- bytes, 5, 16, int_to_bytes(OPER_DOWN))
+ struct.pack_into(
+ "HH4sHHc", data1, RTATTR_START_OFFSET, 8, 3,
+ bytes, 5, 16, int_to_bytes(OPER_DOWN)
+ )
struct.pack_into("=LHHLL", data1, 48, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data1, 80, 8, 3, bytes, 5, 16,
- int_to_bytes(OPER_DOWN))
+ struct.pack_into(
+ "HH4sHHc", data1, 80, 8, 3, bytes, 5, 16, int_to_bytes(OPER_DOWN)
+ )
struct.pack_into("=LHHLL", data1, 96, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into("HH4sHHc", data2, 16, 8, 3, bytes, 5, 16,
- int_to_bytes(OPER_UP))
+ struct.pack_into(
+ "HH4sHHc", data2, 16, 8, 3, bytes, 5, 16, int_to_bytes(OPER_UP)
+ )
m_read_netlink_socket.side_effect = [data1, data2]
wait_for_media_disconnect_connect(m_socket, ifname)
self.assertEqual(m_read_netlink_socket.call_count, 2)
diff --git a/cloudinit/sources/helpers/tests/test_openstack.py b/cloudinit/sources/helpers/tests/test_openstack.py
new file mode 100644
index 00000000..2bde1e3f
--- /dev/null
+++ b/cloudinit/sources/helpers/tests/test_openstack.py
@@ -0,0 +1,44 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+# ./cloudinit/sources/helpers/tests/test_openstack.py
+
+from cloudinit.sources.helpers import openstack
+from cloudinit.tests import helpers as test_helpers
+
+
+class TestConvertNetJson(test_helpers.CiTestCase):
+
+ def test_phy_types(self):
+ """Verify the different known physical types are handled."""
+ # network_data.json example from
+ # https://docs.openstack.org/nova/latest/user/metadata.html
+ mac0 = "fa:16:3e:9c:bf:3d"
+ net_json = {
+ "links": [
+ {"ethernet_mac_address": mac0, "id": "tapcd9f6d46-4a",
+ "mtu": None, "type": "bridge",
+ "vif_id": "cd9f6d46-4a3a-43ab-a466-994af9db96fc"}
+ ],
+ "networks": [
+ {"id": "network0", "link": "tapcd9f6d46-4a",
+ "network_id": "99e88329-f20d-4741-9593-25bf07847b16",
+ "type": "ipv4_dhcp"}
+ ],
+ "services": [{"address": "8.8.8.8", "type": "dns"}]
+ }
+ macs = {mac0: 'eth0'}
+
+ expected = {
+ 'version': 1,
+ 'config': [
+ {'mac_address': 'fa:16:3e:9c:bf:3d',
+ 'mtu': None, 'name': 'eth0',
+ 'subnets': [{'type': 'dhcp4'}],
+ 'type': 'physical'},
+ {'address': '8.8.8.8', 'type': 'nameserver'}]}
+
+ for t in openstack.KNOWN_PHYSICAL_TYPES:
+ net_json["links"][0]["type"] = t
+ self.assertEqual(
+ expected,
+ openstack.convert_net_json(network_json=net_json,
+ known_macs=macs))
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py
index 2eaeff34..7109aef3 100644
--- a/cloudinit/sources/helpers/vmware/imc/config.py
+++ b/cloudinit/sources/helpers/vmware/imc/config.py
@@ -25,6 +25,8 @@ class Config(object):
SUFFIX = 'DNS|SUFFIX|'
TIMEZONE = 'DATETIME|TIMEZONE'
UTC = 'DATETIME|UTC'
+ POST_GC_STATUS = 'MISC|POST-GC-STATUS'
+ DEFAULT_RUN_POST_SCRIPT = 'MISC|DEFAULT-RUN-POST-CUST-SCRIPT'
def __init__(self, configFile):
self._configFile = configFile
@@ -104,4 +106,28 @@ class Config(object):
def custom_script_name(self):
"""Return the name of custom (pre/post) script."""
return self._configFile.get(Config.CUSTOM_SCRIPT, None)
+
+ @property
+ def post_gc_status(self):
+ """Return whether to post guestinfo.gc.status VMX property."""
+ postGcStatus = self._configFile.get(Config.POST_GC_STATUS, 'no')
+ postGcStatus = postGcStatus.lower()
+ if postGcStatus not in ('yes', 'no'):
+ raise ValueError('PostGcStatus value should be yes/no')
+ return postGcStatus == 'yes'
+
+ @property
+ def default_run_post_script(self):
+ """
+ Return enable-custom-scripts default value if enable-custom-scripts
+ is absent in VM Tools configuration
+ """
+ defaultRunPostScript = self._configFile.get(
+ Config.DEFAULT_RUN_POST_SCRIPT,
+ 'no')
+ defaultRunPostScript = defaultRunPostScript.lower()
+ if defaultRunPostScript not in ('yes', 'no'):
+ raise ValueError('defaultRunPostScript value should be yes/no')
+ return defaultRunPostScript == 'yes'
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
index 9f14770e..2ab22de9 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
@@ -9,6 +9,7 @@ import logging
import os
import stat
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -61,7 +62,7 @@ class PreCustomScript(RunCustomScript):
"""Executing custom script with precustomization argument."""
LOG.debug("Executing pre-customization script")
self.prepare_script()
- util.subp([CustomScriptConstant.CUSTOM_SCRIPT, "precustomization"])
+ subp.subp([CustomScriptConstant.CUSTOM_SCRIPT, "precustomization"])
class PostCustomScript(RunCustomScript):
diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py
index 602af078..fc034c95 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_file.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_file.py
@@ -22,7 +22,6 @@ class ConfigFile(ConfigSource, dict):
def __init__(self, filename):
self._loadConfigFile(filename)
- pass
def _insertKey(self, key, val):
"""
diff --git a/cloudinit/sources/helpers/vmware/imc/config_namespace.py b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
index 2f29edd4..5899d8f7 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_namespace.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
@@ -10,6 +10,5 @@ from .config_source import ConfigSource
class ConfigNamespace(ConfigSource):
"""Specifies the Config Namespace."""
- pass
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index 77cbf3b6..3745a262 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -10,6 +10,7 @@ import os
import re
from cloudinit.net.network_state import mask_to_net_prefix
+from cloudinit import subp
from cloudinit import util
logger = logging.getLogger(__name__)
@@ -73,7 +74,7 @@ class NicConfigurator(object):
The mac address(es) are in the lower case
"""
cmd = ['ip', 'addr', 'show']
- output, _err = util.subp(cmd)
+ output, _err = subp.subp(cmd)
sections = re.split(r'\n\d+: ', '\n' + output)[1:]
macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))'
@@ -248,8 +249,8 @@ class NicConfigurator(object):
logger.info('Clearing DHCP leases')
# Ignore the return code 1.
- util.subp(["pkill", "dhclient"], rcs=[0, 1])
- util.subp(["rm", "-f", "/var/lib/dhcp/*"])
+ subp.subp(["pkill", "dhclient"], rcs=[0, 1])
+ subp.subp(["rm", "-f", "/var/lib/dhcp/*"])
def configure(self, osfamily=None):
"""
diff --git a/cloudinit/sources/helpers/vmware/imc/config_passwd.py b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
index 8c91fa41..d16a7690 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_passwd.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
@@ -9,6 +9,7 @@
import logging
import os
+from cloudinit import subp
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -56,10 +57,10 @@ class PasswordConfigurator(object):
LOG.info('Expiring password.')
for user in uidUserList:
try:
- util.subp(['passwd', '--expire', user])
- except util.ProcessExecutionError as e:
+ subp.subp(['passwd', '--expire', user])
+ except subp.ProcessExecutionError as e:
if os.path.exists('/usr/bin/chage'):
- util.subp(['chage', '-d', '0', user])
+ subp.subp(['chage', '-d', '0', user])
else:
LOG.warning('Failed to expire password for %s with error: '
'%s', user, e)
diff --git a/cloudinit/sources/helpers/vmware/imc/config_source.py b/cloudinit/sources/helpers/vmware/imc/config_source.py
index 2f8ea546..7ec06a9c 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_source.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_source.py
@@ -8,6 +8,5 @@
class ConfigSource(object):
"""Specifies a source for the Config Content."""
- pass
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
index 3d369d04..d919f693 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
@@ -10,7 +10,7 @@ import os
import re
import time
-from cloudinit import util
+from cloudinit import subp
from .guestcust_event import GuestCustEventEnum
from .guestcust_state import GuestCustStateEnum
@@ -34,7 +34,7 @@ def send_rpc(rpc):
try:
logger.debug("Sending RPC command: %s", rpc)
- (out, err) = util.subp(["vmware-rpctool", rpc], rcs=[0])
+ (out, err) = subp.subp(["vmware-rpctool", rpc], rcs=[0])
# Remove the trailing newline in the output.
if out:
out = out.rstrip()
@@ -128,30 +128,46 @@ def get_tools_config(section, key, defaultVal):
not installed.
"""
- if not util.which('vmware-toolbox-cmd'):
+ if not subp.which('vmware-toolbox-cmd'):
logger.debug(
'vmware-toolbox-cmd not installed, returning default value')
return defaultVal
- retValue = defaultVal
cmd = ['vmware-toolbox-cmd', 'config', 'get', section, key]
try:
- (outText, _) = util.subp(cmd)
- m = re.match(r'([^=]+)=(.*)', outText)
- if m:
- retValue = m.group(2).strip()
- logger.debug("Get tools config: [%s] %s = %s",
- section, key, retValue)
- else:
+ (outText, _) = subp.subp(cmd)
+ except subp.ProcessExecutionError as e:
+ if e.exit_code == 69:
logger.debug(
- "Tools config: [%s] %s is not found, return default value: %s",
- section, key, retValue)
- except util.ProcessExecutionError as e:
- logger.error("Failed running %s[%s]", cmd, e.exit_code)
- logger.exception(e)
+ "vmware-toolbox-cmd returned 69 (unavailable) for cmd: %s."
+ " Return default value: %s", " ".join(cmd), defaultVal)
+ else:
+ logger.error("Failed running %s[%s]", cmd, e.exit_code)
+ logger.exception(e)
+ return defaultVal
+
+ retValue = defaultVal
+ m = re.match(r'([^=]+)=(.*)', outText)
+ if m:
+ retValue = m.group(2).strip()
+ logger.debug("Get tools config: [%s] %s = %s",
+ section, key, retValue)
+ else:
+ logger.debug(
+ "Tools config: [%s] %s is not found, return default value: %s",
+ section, key, retValue)
return retValue
+# Sets message to the VMX guestinfo.gc.status property to the
+# underlying VMware Virtualization Platform.
+def set_gc_status(config, gcMsg):
+ if config and config.post_gc_status:
+ rpc = "info-set guestinfo.gc.status %s" % gcMsg
+ return send_rpc(rpc)
+ return None
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py
index f73b37ed..1420a988 100644
--- a/cloudinit/sources/tests/test_init.py
+++ b/cloudinit/sources/tests/test_init.py
@@ -55,6 +55,7 @@ class InvalidDataSourceTestSubclassNet(DataSource):
class TestDataSource(CiTestCase):
with_logs = True
+ maxDiff = None
def setUp(self):
super(TestDataSource, self).setUp()
@@ -288,27 +289,47 @@ class TestDataSource(CiTestCase):
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
- datasource.get_data()
+ sys_info = {
+ "python": "3.7",
+ "platform":
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal",
+ "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah",
+ "x86_64"],
+ "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]}
+ with mock.patch("cloudinit.util.system_info", return_value=sys_info):
+ datasource.get_data()
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
content = util.load_file(json_file)
expected = {
'base64_encoded_keys': [],
- 'sensitive_keys': [],
+ 'merged_cfg': REDACT_SENSITIVE_VALUE,
+ 'sensitive_keys': ['merged_cfg'],
+ 'sys_info': sys_info,
'v1': {
'_beta_keys': ['subplatform'],
'availability-zone': 'myaz',
'availability_zone': 'myaz',
'cloud-name': 'subclasscloudname',
'cloud_name': 'subclasscloudname',
+ 'distro': 'ubuntu',
+ 'distro_release': 'focal',
+ 'distro_version': '20.04',
'instance-id': 'iid-datasource',
'instance_id': 'iid-datasource',
'local-hostname': 'test-subclass-hostname',
'local_hostname': 'test-subclass-hostname',
+ 'kernel_release': '5.4.0-24-generic',
+ 'machine': 'x86_64',
'platform': 'mytestsubclass',
'public_ssh_keys': [],
+ 'python_version': '3.7',
'region': 'myregion',
- 'subplatform': 'unknown'},
+ 'system_platform':
+ 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal',
+ 'subplatform': 'unknown',
+ 'variant': 'ubuntu'},
'ds': {
+
'_doc': EXPERIMENTAL_TEXT,
'meta_data': {'availability_zone': 'myaz',
'local-hostname': 'test-subclass-hostname',
@@ -318,8 +339,8 @@ class TestDataSource(CiTestCase):
self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
self.assertEqual(expected, util.load_json(content))
- def test_get_data_writes_json_instance_data_sensitive(self):
- """get_data writes INSTANCE_JSON_SENSITIVE_FILE as readonly root."""
+ def test_get_data_writes_redacted_public_json_instance_data(self):
+ """get_data writes redacted content to public INSTANCE_JSON_FILE."""
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
@@ -329,33 +350,49 @@ class TestDataSource(CiTestCase):
'region': 'myregion',
'some': {'security-credentials': {
'cred1': 'sekret', 'cred2': 'othersekret'}}})
- self.assertEqual(
- ('security-credentials',), datasource.sensitive_metadata_keys)
- datasource.get_data()
+ self.assertCountEqual(
+ ('merged_cfg', 'security-credentials',),
+ datasource.sensitive_metadata_keys)
+ sys_info = {
+ "python": "3.7",
+ "platform":
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal",
+ "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah",
+ "x86_64"],
+ "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]}
+ with mock.patch("cloudinit.util.system_info", return_value=sys_info):
+ datasource.get_data()
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
- sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp)
redacted = util.load_json(util.load_file(json_file))
- self.assertEqual(
- {'cred1': 'sekret', 'cred2': 'othersekret'},
- redacted['ds']['meta_data']['some']['security-credentials'])
- content = util.load_file(sensitive_json_file)
expected = {
'base64_encoded_keys': [],
- 'sensitive_keys': ['ds/meta_data/some/security-credentials'],
+ 'merged_cfg': REDACT_SENSITIVE_VALUE,
+ 'sensitive_keys': [
+ 'ds/meta_data/some/security-credentials', 'merged_cfg'],
+ 'sys_info': sys_info,
'v1': {
'_beta_keys': ['subplatform'],
'availability-zone': 'myaz',
'availability_zone': 'myaz',
'cloud-name': 'subclasscloudname',
'cloud_name': 'subclasscloudname',
+ 'distro': 'ubuntu',
+ 'distro_release': 'focal',
+ 'distro_version': '20.04',
'instance-id': 'iid-datasource',
'instance_id': 'iid-datasource',
'local-hostname': 'test-subclass-hostname',
'local_hostname': 'test-subclass-hostname',
+ 'kernel_release': '5.4.0-24-generic',
+ 'machine': 'x86_64',
'platform': 'mytestsubclass',
'public_ssh_keys': [],
+ 'python_version': '3.7',
'region': 'myregion',
- 'subplatform': 'unknown'},
+ 'system_platform':
+ 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal',
+ 'subplatform': 'unknown',
+ 'variant': 'ubuntu'},
'ds': {
'_doc': EXPERIMENTAL_TEXT,
'meta_data': {
@@ -364,8 +401,83 @@ class TestDataSource(CiTestCase):
'region': 'myregion',
'some': {'security-credentials': REDACT_SENSITIVE_VALUE}}}
}
- self.maxDiff = None
- self.assertEqual(expected, util.load_json(content))
+ self.assertCountEqual(expected, redacted)
+ file_stat = os.stat(json_file)
+ self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
+
+ def test_get_data_writes_json_instance_data_sensitive(self):
+ """
+ get_data writes unmodified data to sensitive file as root-readonly.
+ """
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
+ custom_metadata={
+ 'availability_zone': 'myaz',
+ 'local-hostname': 'test-subclass-hostname',
+ 'region': 'myregion',
+ 'some': {'security-credentials': {
+ 'cred1': 'sekret', 'cred2': 'othersekret'}}})
+ sys_info = {
+ "python": "3.7",
+ "platform":
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal",
+ "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah",
+ "x86_64"],
+ "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]}
+
+ self.assertCountEqual(
+ ('merged_cfg', 'security-credentials',),
+ datasource.sensitive_metadata_keys)
+ with mock.patch("cloudinit.util.system_info", return_value=sys_info):
+ datasource.get_data()
+ sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp)
+ content = util.load_file(sensitive_json_file)
+ expected = {
+ 'base64_encoded_keys': [],
+ 'merged_cfg': {
+ '_doc': (
+ 'Merged cloud-init system config from '
+ '/etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/'
+ ),
+ 'datasource': {'_undef': {'key1': False}}},
+ 'sensitive_keys': [
+ 'ds/meta_data/some/security-credentials', 'merged_cfg'],
+ 'sys_info': sys_info,
+ 'v1': {
+ '_beta_keys': ['subplatform'],
+ 'availability-zone': 'myaz',
+ 'availability_zone': 'myaz',
+ 'cloud-name': 'subclasscloudname',
+ 'cloud_name': 'subclasscloudname',
+ 'distro': 'ubuntu',
+ 'distro_release': 'focal',
+ 'distro_version': '20.04',
+ 'instance-id': 'iid-datasource',
+ 'instance_id': 'iid-datasource',
+ 'kernel_release': '5.4.0-24-generic',
+ 'local-hostname': 'test-subclass-hostname',
+ 'local_hostname': 'test-subclass-hostname',
+ 'machine': 'x86_64',
+ 'platform': 'mytestsubclass',
+ 'public_ssh_keys': [],
+ 'python_version': '3.7',
+ 'region': 'myregion',
+ 'subplatform': 'unknown',
+ 'system_platform':
+ 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal',
+ 'variant': 'ubuntu'},
+ 'ds': {
+ '_doc': EXPERIMENTAL_TEXT,
+ 'meta_data': {
+ 'availability_zone': 'myaz',
+ 'local-hostname': 'test-subclass-hostname',
+ 'region': 'myregion',
+ 'some': {
+ 'security-credentials':
+ {'cred1': 'sekret', 'cred2': 'othersekret'}}}}
+ }
+ self.assertCountEqual(expected, util.load_json(content))
file_stat = os.stat(sensitive_json_file)
self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode))
self.assertEqual(expected, util.load_json(content))
@@ -431,7 +543,7 @@ class TestDataSource(CiTestCase):
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
content = util.load_file(json_file)
instance_json = util.load_json(content)
- self.assertItemsEqual(
+ self.assertCountEqual(
['ds/meta_data/key2/key2.1'],
instance_json['base64_encoded_keys'])
self.assertEqual(
@@ -440,9 +552,7 @@ class TestDataSource(CiTestCase):
def test_get_hostname_subclass_support(self):
"""Validate get_hostname signature on all subclasses of DataSource."""
- # Use inspect.getfullargspec when we drop py2.6 and py2.7
- get_args = inspect.getargspec # pylint: disable=W1505
- base_args = get_args(DataSource.get_hostname) # pylint: disable=W1505
+ base_args = inspect.getfullargspec(DataSource.get_hostname)
# Import all DataSource subclasses so we can inspect them.
modules = util.find_modules(os.path.dirname(os.path.dirname(__file__)))
for _loc, name in modules.items():
@@ -454,13 +564,13 @@ class TestDataSource(CiTestCase):
continue
self.assertEqual(
base_args,
- get_args(child.get_hostname), # pylint: disable=W1505
+ inspect.getfullargspec(child.get_hostname),
'%s does not implement DataSource.get_hostname params'
% child)
for grandchild in child.__subclasses__():
self.assertEqual(
base_args,
- get_args(grandchild.get_hostname), # pylint: disable=W1505
+ inspect.getfullargspec(grandchild.get_hostname),
'%s does not implement DataSource.get_hostname params'
% grandchild)
diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py
index abf3d359..7bd23813 100644
--- a/cloudinit/sources/tests/test_oracle.py
+++ b/cloudinit/sources/tests/test_oracle.py
@@ -1,22 +1,20 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.sources import DataSourceOracle as oracle
-from cloudinit.sources import BrokenMetadata, NetworkConfigSource
-from cloudinit import helpers
-
-from cloudinit.tests import helpers as test_helpers
-
-from textwrap import dedent
-import argparse
+import base64
import copy
-import httpretty
import json
-import os
-import uuid
+from contextlib import ExitStack
from unittest import mock
+import pytest
+
+from cloudinit.sources import DataSourceOracle as oracle
+from cloudinit.sources import NetworkConfigSource
+from cloudinit.sources.DataSourceOracle import OpcMetadata
+from cloudinit.tests import helpers as test_helpers
+from cloudinit.url_helper import UrlError
+
DS_PATH = "cloudinit.sources.DataSourceOracle"
-MD_VER = "2013-10-17"
# `curl -L http://169.254.169.254/opc/v1/vnics/` on a Oracle Bare Metal Machine
# with a secondary VNIC attached (vnicId truncated for Python line length)
@@ -59,328 +57,99 @@ OPC_VM_SECONDARY_VNIC_RESPONSE = """\
} ]"""
-class TestDataSourceOracle(test_helpers.CiTestCase):
- """Test datasource DataSourceOracle."""
-
- with_logs = True
-
- ds_class = oracle.DataSourceOracle
-
- my_uuid = str(uuid.uuid4())
- my_md = {"uuid": "ocid1.instance.oc1.phx.abyhqlj",
- "name": "ci-vm1", "availability_zone": "phx-ad-3",
- "hostname": "ci-vm1hostname",
- "launch_index": 0, "files": [],
- "public_keys": {"0": "ssh-rsa AAAAB3N...== user@host"},
- "meta": {}}
-
- def _patch_instance(self, inst, patches):
- """Patch an instance of a class 'inst'.
- for each name, kwargs in patches:
- inst.name = mock.Mock(**kwargs)
- returns a namespace object that has
- namespace.name = mock.Mock(**kwargs)
- Do not bother with cleanup as instance is assumed transient."""
- mocks = argparse.Namespace()
- for name, kwargs in patches.items():
- imock = mock.Mock(name=name, spec=getattr(inst, name), **kwargs)
- setattr(mocks, name, imock)
- setattr(inst, name, imock)
- return mocks
-
- def _get_ds(self, sys_cfg=None, distro=None, paths=None, ud_proc=None,
- patches=None):
- if sys_cfg is None:
- sys_cfg = {}
- if patches is None:
- patches = {}
- if paths is None:
- tmpd = self.tmp_dir()
- dirs = {'cloud_dir': self.tmp_path('cloud_dir', tmpd),
- 'run_dir': self.tmp_path('run_dir')}
- for d in dirs.values():
- os.mkdir(d)
- paths = helpers.Paths(dirs)
-
- ds = self.ds_class(sys_cfg=sys_cfg, distro=distro,
- paths=paths, ud_proc=ud_proc)
-
- return ds, self._patch_instance(ds, patches)
-
- def test_platform_not_viable_returns_false(self):
- ds, mocks = self._get_ds(
- patches={'_is_platform_viable': {'return_value': False}})
- self.assertFalse(ds._get_data())
- mocks._is_platform_viable.assert_called_once_with()
-
- def test_platform_info(self):
- """Return platform-related information for Oracle Datasource."""
- ds, _mocks = self._get_ds()
- self.assertEqual('oracle', ds.cloud_name)
- self.assertEqual('oracle', ds.platform_type)
- self.assertEqual(
- 'metadata (http://169.254.169.254/openstack/)', ds.subplatform)
-
- def test_sys_cfg_can_enable_configure_secondary_nics(self):
- # Confirm that behaviour is toggled by sys_cfg
- ds, _mocks = self._get_ds()
- self.assertFalse(ds.ds_cfg['configure_secondary_nics'])
-
- sys_cfg = {
- 'datasource': {'Oracle': {'configure_secondary_nics': True}}}
- ds, _mocks = self._get_ds(sys_cfg=sys_cfg)
- self.assertTrue(ds.ds_cfg['configure_secondary_nics'])
-
- @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_without_userdata(self, m_is_iscsi_root):
- """If no user-data is provided, it should not be in return dict."""
- ds, mocks = self._get_ds(patches={
- '_is_platform_viable': {'return_value': True},
- 'crawl_metadata': {
- 'return_value': {
- MD_VER: {'system_uuid': self.my_uuid,
- 'meta_data': self.my_md}}}})
- self.assertTrue(ds._get_data())
- mocks._is_platform_viable.assert_called_once_with()
- mocks.crawl_metadata.assert_called_once_with()
- self.assertEqual(self.my_uuid, ds.system_uuid)
- self.assertEqual(self.my_md['availability_zone'], ds.availability_zone)
- self.assertIn(self.my_md["public_keys"]["0"], ds.get_public_ssh_keys())
- self.assertEqual(self.my_md['uuid'], ds.get_instance_id())
- self.assertIsNone(ds.userdata_raw)
-
- @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_with_vendordata(self, m_is_iscsi_root):
- """Test with vendor data."""
- vd = {'cloud-init': '#cloud-config\nkey: value'}
- ds, mocks = self._get_ds(patches={
- '_is_platform_viable': {'return_value': True},
- 'crawl_metadata': {
- 'return_value': {
- MD_VER: {'system_uuid': self.my_uuid,
- 'meta_data': self.my_md,
- 'vendor_data': vd}}}})
- self.assertTrue(ds._get_data())
- mocks._is_platform_viable.assert_called_once_with()
- mocks.crawl_metadata.assert_called_once_with()
- self.assertEqual(vd, ds.vendordata_pure)
- self.assertEqual(vd['cloud-init'], ds.vendordata_raw)
-
- @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_with_userdata(self, m_is_iscsi_root):
- """Ensure user-data is populated if present and is binary."""
- my_userdata = b'abcdefg'
- ds, mocks = self._get_ds(patches={
- '_is_platform_viable': {'return_value': True},
- 'crawl_metadata': {
- 'return_value': {
- MD_VER: {'system_uuid': self.my_uuid,
- 'meta_data': self.my_md,
- 'user_data': my_userdata}}}})
- self.assertTrue(ds._get_data())
- mocks._is_platform_viable.assert_called_once_with()
- mocks.crawl_metadata.assert_called_once_with()
- self.assertEqual(self.my_uuid, ds.system_uuid)
- self.assertIn(self.my_md["public_keys"]["0"], ds.get_public_ssh_keys())
- self.assertEqual(self.my_md['uuid'], ds.get_instance_id())
- self.assertEqual(my_userdata, ds.userdata_raw)
-
- @mock.patch(DS_PATH + "._add_network_config_from_opc_imds",
- side_effect=lambda network_config: network_config)
- @mock.patch(DS_PATH + ".cmdline.read_initramfs_config")
- @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_network_cmdline(self, m_is_iscsi_root, m_initramfs_config,
- _m_add_network_config_from_opc_imds):
- """network_config should read kernel cmdline."""
- distro = mock.MagicMock()
- ds, _ = self._get_ds(distro=distro, patches={
- '_is_platform_viable': {'return_value': True},
- 'crawl_metadata': {
- 'return_value': {
- MD_VER: {'system_uuid': self.my_uuid,
- 'meta_data': self.my_md}}}})
- ncfg = {'version': 1, 'config': [{'a': 'b'}]}
- m_initramfs_config.return_value = ncfg
- self.assertTrue(ds._get_data())
- self.assertEqual(ncfg, ds.network_config)
- self.assertEqual([mock.call()], m_initramfs_config.call_args_list)
- self.assertFalse(distro.generate_fallback_config.called)
-
- @mock.patch(DS_PATH + "._add_network_config_from_opc_imds",
- side_effect=lambda network_config: network_config)
- @mock.patch(DS_PATH + ".cmdline.read_initramfs_config")
- @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_network_fallback(self, m_is_iscsi_root, m_initramfs_config,
- _m_add_network_config_from_opc_imds):
- """test that fallback network is generated if no kernel cmdline."""
- distro = mock.MagicMock()
- ds, _ = self._get_ds(distro=distro, patches={
- '_is_platform_viable': {'return_value': True},
- 'crawl_metadata': {
- 'return_value': {
- MD_VER: {'system_uuid': self.my_uuid,
- 'meta_data': self.my_md}}}})
- ncfg = {'version': 1, 'config': [{'a': 'b'}]}
- m_initramfs_config.return_value = None
- self.assertTrue(ds._get_data())
- ncfg = {'version': 1, 'config': [{'distro1': 'value'}]}
- distro.generate_fallback_config.return_value = ncfg
- self.assertEqual(ncfg, ds.network_config)
- self.assertEqual([mock.call()], m_initramfs_config.call_args_list)
- distro.generate_fallback_config.assert_called_once_with()
-
- # test that the result got cached, and the methods not re-called.
- self.assertEqual(ncfg, ds.network_config)
- self.assertEqual(1, m_initramfs_config.call_count)
-
- @mock.patch(DS_PATH + "._add_network_config_from_opc_imds")
- @mock.patch(DS_PATH + ".cmdline.read_initramfs_config",
- return_value={'some': 'config'})
- @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_secondary_nics_added_to_network_config_if_enabled(
- self, _m_is_iscsi_root, _m_initramfs_config,
- m_add_network_config_from_opc_imds):
-
- needle = object()
-
- def network_config_side_effect(network_config):
- network_config['secondary_added'] = needle
-
- m_add_network_config_from_opc_imds.side_effect = (
- network_config_side_effect)
-
- distro = mock.MagicMock()
- ds, _ = self._get_ds(distro=distro, patches={
- '_is_platform_viable': {'return_value': True},
- 'crawl_metadata': {
- 'return_value': {
- MD_VER: {'system_uuid': self.my_uuid,
- 'meta_data': self.my_md}}}})
- ds.ds_cfg['configure_secondary_nics'] = True
- self.assertEqual(needle, ds.network_config['secondary_added'])
-
- @mock.patch(DS_PATH + "._add_network_config_from_opc_imds")
- @mock.patch(DS_PATH + ".cmdline.read_initramfs_config",
- return_value={'some': 'config'})
- @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_secondary_nics_not_added_to_network_config_by_default(
- self, _m_is_iscsi_root, _m_initramfs_config,
- m_add_network_config_from_opc_imds):
-
- def network_config_side_effect(network_config):
- network_config['secondary_added'] = True
-
- m_add_network_config_from_opc_imds.side_effect = (
- network_config_side_effect)
-
- distro = mock.MagicMock()
- ds, _ = self._get_ds(distro=distro, patches={
- '_is_platform_viable': {'return_value': True},
- 'crawl_metadata': {
- 'return_value': {
- MD_VER: {'system_uuid': self.my_uuid,
- 'meta_data': self.my_md}}}})
- self.assertNotIn('secondary_added', ds.network_config)
-
- @mock.patch(DS_PATH + "._add_network_config_from_opc_imds")
- @mock.patch(DS_PATH + ".cmdline.read_initramfs_config")
- @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_secondary_nic_failure_isnt_blocking(
- self, _m_is_iscsi_root, m_initramfs_config,
- m_add_network_config_from_opc_imds):
-
- m_add_network_config_from_opc_imds.side_effect = Exception()
-
- distro = mock.MagicMock()
- ds, _ = self._get_ds(distro=distro, patches={
- '_is_platform_viable': {'return_value': True},
- 'crawl_metadata': {
- 'return_value': {
- MD_VER: {'system_uuid': self.my_uuid,
- 'meta_data': self.my_md}}}})
- ds.ds_cfg['configure_secondary_nics'] = True
- self.assertEqual(ds.network_config, m_initramfs_config.return_value)
- self.assertIn('Failed to fetch secondary network configuration',
- self.logs.getvalue())
-
- def test_ds_network_cfg_preferred_over_initramfs(self):
- """Ensure that DS net config is preferred over initramfs config"""
- network_config_sources = oracle.DataSourceOracle.network_config_sources
- self.assertLess(
- network_config_sources.index(NetworkConfigSource.ds),
- network_config_sources.index(NetworkConfigSource.initramfs)
- )
-
-
-@mock.patch(DS_PATH + "._read_system_uuid", return_value=str(uuid.uuid4()))
-class TestReadMetaData(test_helpers.HttprettyTestCase):
- """Test the read_metadata which interacts with http metadata service."""
-
- mdurl = oracle.METADATA_ENDPOINT
- my_md = {"uuid": "ocid1.instance.oc1.phx.abyhqlj",
- "name": "ci-vm1", "availability_zone": "phx-ad-3",
- "hostname": "ci-vm1hostname",
- "launch_index": 0, "files": [],
- "public_keys": {"0": "ssh-rsa AAAAB3N...== user@host"},
- "meta": {}}
-
- def populate_md(self, data):
- """call httppretty.register_url for each item dict 'data',
- including valid indexes. Text values converted to bytes."""
- httpretty.register_uri(
- httpretty.GET, self.mdurl + MD_VER + "/",
- '\n'.join(data.keys()).encode('utf-8'))
- for k, v in data.items():
- httpretty.register_uri(
- httpretty.GET, self.mdurl + MD_VER + "/" + k,
- v if not isinstance(v, str) else v.encode('utf-8'))
-
- def test_broken_no_sys_uuid(self, m_read_system_uuid):
- """Datasource requires ability to read system_uuid and true return."""
- m_read_system_uuid.return_value = None
- self.assertRaises(BrokenMetadata, oracle.read_metadata)
-
- def test_broken_no_metadata_json(self, m_read_system_uuid):
- """Datasource requires meta_data.json."""
- httpretty.register_uri(
- httpretty.GET, self.mdurl + MD_VER + "/",
- '\n'.join(['user_data']).encode('utf-8'))
- with self.assertRaises(BrokenMetadata) as cm:
- oracle.read_metadata()
- self.assertIn("Required field 'meta_data.json' missing",
- str(cm.exception))
-
- def test_with_userdata(self, m_read_system_uuid):
- data = {'user_data': b'#!/bin/sh\necho hi world\n',
- 'meta_data.json': json.dumps(self.my_md)}
- self.populate_md(data)
- result = oracle.read_metadata()[MD_VER]
- self.assertEqual(data['user_data'], result['user_data'])
- self.assertEqual(self.my_md, result['meta_data'])
-
- def test_without_userdata(self, m_read_system_uuid):
- data = {'meta_data.json': json.dumps(self.my_md)}
- self.populate_md(data)
- result = oracle.read_metadata()[MD_VER]
- self.assertNotIn('user_data', result)
- self.assertEqual(self.my_md, result['meta_data'])
-
- def test_unknown_fields_included(self, m_read_system_uuid):
- """Unknown fields listed in index should be included.
- And those ending in .json should be decoded."""
- some_data = {'key1': 'data1', 'subk1': {'subd1': 'subv'}}
- some_vendor_data = {'cloud-init': 'foo'}
- data = {'meta_data.json': json.dumps(self.my_md),
- 'some_data.json': json.dumps(some_data),
- 'vendor_data.json': json.dumps(some_vendor_data),
- 'other_blob': b'this is blob'}
- self.populate_md(data)
- result = oracle.read_metadata()[MD_VER]
- self.assertNotIn('user_data', result)
- self.assertEqual(self.my_md, result['meta_data'])
- self.assertEqual(some_data, result['some_data'])
- self.assertEqual(some_vendor_data, result['vendor_data'])
- self.assertEqual(data['other_blob'], result['other_blob'])
+# Fetched with `curl http://169.254.169.254/opc/v1/instance/` (and then
+# truncated for line length)
+OPC_V2_METADATA = """\
+{
+ "availabilityDomain" : "qIZq:PHX-AD-1",
+ "faultDomain" : "FAULT-DOMAIN-2",
+ "compartmentId" : "ocid1.tenancy.oc1..aaaaaaaao7f7cccogqrg5emjxkxmTRUNCATED",
+ "displayName" : "instance-20200320-1400",
+ "hostname" : "instance-20200320-1400",
+ "id" : "ocid1.instance.oc1.phx.anyhqljtniwq6syc3nex55sep5w34qbwmw6TRUNCATED",
+ "image" : "ocid1.image.oc1.phx.aaaaaaaagmkn4gdhvvx24kiahh2b2qchsicTRUNCATED",
+ "metadata" : {
+ "ssh_authorized_keys" : "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ truncated",
+ "user_data" : "IyEvYmluL3NoCnRvdWNoIC90bXAvZm9v"
+ },
+ "region" : "phx",
+ "canonicalRegionName" : "us-phoenix-1",
+ "ociAdName" : "phx-ad-3",
+ "shape" : "VM.Standard2.1",
+ "state" : "Running",
+ "timeCreated" : 1584727285318,
+ "agentConfig" : {
+ "monitoringDisabled" : true,
+ "managementDisabled" : true
+ }
+}"""
+
+# Just a small meaningless change to differentiate the two metadatas
+OPC_V1_METADATA = OPC_V2_METADATA.replace("ocid1.instance", "ocid2.instance")
+
+
+@pytest.fixture
+def metadata_version():
+ return 2
+
+
+@pytest.yield_fixture
+def oracle_ds(request, fixture_utils, paths, metadata_version):
+ """
+ Return an instantiated DataSourceOracle.
+
+ This also performs the mocking required for the default test case:
+ * ``_read_system_uuid`` returns something,
+ * ``_is_platform_viable`` returns True,
+ * ``_is_iscsi_root`` returns True (the simpler code path),
+ * ``read_opc_metadata`` returns ``OPC_V1_METADATA``
+
+ (This uses the paths fixture for the required helpers.Paths object, and the
+ fixture_utils fixture for fetching markers.)
+ """
+ sys_cfg = fixture_utils.closest_marker_first_arg_or(
+ request, "ds_sys_cfg", mock.MagicMock()
+ )
+ metadata = OpcMetadata(metadata_version, json.loads(OPC_V2_METADATA), None)
+ with mock.patch(DS_PATH + "._read_system_uuid", return_value="someuuid"):
+ with mock.patch(DS_PATH + "._is_platform_viable", return_value=True):
+ with mock.patch(DS_PATH + "._is_iscsi_root", return_value=True):
+ with mock.patch(
+ DS_PATH + ".read_opc_metadata",
+ return_value=metadata,
+ ):
+ yield oracle.DataSourceOracle(
+ sys_cfg=sys_cfg, distro=mock.Mock(), paths=paths,
+ )
+
+
+class TestDataSourceOracle:
+ def test_platform_info(self, oracle_ds):
+ assert "oracle" == oracle_ds.cloud_name
+ assert "oracle" == oracle_ds.platform_type
+
+ def test_subplatform_before_fetch(self, oracle_ds):
+ assert 'unknown' == oracle_ds.subplatform
+
+ def test_platform_info_after_fetch(self, oracle_ds):
+ oracle_ds._get_data()
+ assert 'metadata (http://169.254.169.254/opc/v2/)' == \
+ oracle_ds.subplatform
+
+ @pytest.mark.parametrize('metadata_version', [1])
+ def test_v1_platform_info_after_fetch(self, oracle_ds):
+ oracle_ds._get_data()
+ assert 'metadata (http://169.254.169.254/opc/v1/)' == \
+ oracle_ds.subplatform
+
+ def test_secondary_nics_disabled_by_default(self, oracle_ds):
+ assert not oracle_ds.ds_cfg["configure_secondary_nics"]
+
+ @pytest.mark.ds_sys_cfg(
+ {"datasource": {"Oracle": {"configure_secondary_nics": True}}}
+ )
+ def test_sys_cfg_can_enable_configure_secondary_nics(self, oracle_ds):
+ assert oracle_ds.ds_cfg["configure_secondary_nics"]
class TestIsPlatformViable(test_helpers.CiTestCase):
@@ -404,192 +173,99 @@ class TestIsPlatformViable(test_helpers.CiTestCase):
m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')])
-class TestLoadIndex(test_helpers.CiTestCase):
- """_load_index handles parsing of an index into a proper list.
- The tests here guarantee correct parsing of html version or
- a fixed version. See the function docstring for more doc."""
-
- _known_html_api_versions = dedent("""\
- <html>
- <head><title>Index of /openstack/</title></head>
- <body bgcolor="white">
- <h1>Index of /openstack/</h1><hr><pre><a href="../">../</a>
- <a href="2013-10-17/">2013-10-17/</a> 27-Jun-2018 12:22 -
- <a href="latest/">latest/</a> 27-Jun-2018 12:22 -
- </pre><hr></body>
- </html>""")
-
- _known_html_contents = dedent("""\
- <html>
- <head><title>Index of /openstack/2013-10-17/</title></head>
- <body bgcolor="white">
- <h1>Index of /openstack/2013-10-17/</h1><hr><pre><a href="../">../</a>
- <a href="meta_data.json">meta_data.json</a> 27-Jun-2018 12:22 679
- <a href="user_data">user_data</a> 27-Jun-2018 12:22 146
- </pre><hr></body>
- </html>""")
-
- def test_parse_html(self):
- """Test parsing of lower case html."""
- self.assertEqual(
- ['2013-10-17/', 'latest/'],
- oracle._load_index(self._known_html_api_versions))
- self.assertEqual(
- ['meta_data.json', 'user_data'],
- oracle._load_index(self._known_html_contents))
-
- def test_parse_html_upper(self):
- """Test parsing of upper case html, although known content is lower."""
- def _toupper(data):
- return data.replace("<a", "<A").replace("html>", "HTML>")
-
- self.assertEqual(
- ['2013-10-17/', 'latest/'],
- oracle._load_index(_toupper(self._known_html_api_versions)))
- self.assertEqual(
- ['meta_data.json', 'user_data'],
- oracle._load_index(_toupper(self._known_html_contents)))
-
- def test_parse_newline_list_with_endl(self):
- """Test parsing of newline separated list with ending newline."""
- self.assertEqual(
- ['2013-10-17/', 'latest/'],
- oracle._load_index("\n".join(["2013-10-17/", "latest/", ""])))
- self.assertEqual(
- ['meta_data.json', 'user_data'],
- oracle._load_index("\n".join(["meta_data.json", "user_data", ""])))
-
- def test_parse_newline_list_without_endl(self):
- """Test parsing of newline separated list with no ending newline.
-
- Actual openstack implementation does not include trailing newline."""
- self.assertEqual(
- ['2013-10-17/', 'latest/'],
- oracle._load_index("\n".join(["2013-10-17/", "latest/"])))
- self.assertEqual(
- ['meta_data.json', 'user_data'],
- oracle._load_index("\n".join(["meta_data.json", "user_data"])))
-
-
-class TestNetworkConfigFromOpcImds(test_helpers.CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestNetworkConfigFromOpcImds, self).setUp()
- self.add_patch(DS_PATH + '.readurl', 'm_readurl')
- self.add_patch(DS_PATH + '.get_interfaces_by_mac',
- 'm_get_interfaces_by_mac')
-
- def test_failure_to_readurl(self):
- # readurl failures should just bubble out to the caller
- self.m_readurl.side_effect = Exception('oh no')
- with self.assertRaises(Exception) as excinfo:
- oracle._add_network_config_from_opc_imds({})
- self.assertEqual(str(excinfo.exception), 'oh no')
-
- def test_empty_response(self):
- # empty response error should just bubble out to the caller
- self.m_readurl.return_value = ''
- with self.assertRaises(Exception):
- oracle._add_network_config_from_opc_imds([])
-
- def test_invalid_json(self):
- # invalid JSON error should just bubble out to the caller
- self.m_readurl.return_value = '{'
- with self.assertRaises(Exception):
- oracle._add_network_config_from_opc_imds([])
-
- def test_no_secondary_nics_does_not_mutate_input(self):
- self.m_readurl.return_value = json.dumps([{}])
- # We test this by passing in a non-dict to ensure that no dict
+class TestNetworkConfigFromOpcImds:
+ def test_no_secondary_nics_does_not_mutate_input(self, oracle_ds):
+ oracle_ds._vnics_data = [{}]
+ # We test this by using in a non-dict to ensure that no dict
# operations are used; failure would be seen as exceptions
- oracle._add_network_config_from_opc_imds(object())
+ oracle_ds._network_config = object()
+ oracle_ds._add_network_config_from_opc_imds()
- def test_bare_metal_machine_skipped(self):
+ def test_bare_metal_machine_skipped(self, oracle_ds, caplog):
# nicIndex in the first entry indicates a bare metal machine
- self.m_readurl.return_value = OPC_BM_SECONDARY_VNIC_RESPONSE
- # We test this by passing in a non-dict to ensure that no dict
+ oracle_ds._vnics_data = json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE)
+ # We test this by using a non-dict to ensure that no dict
# operations are used
- self.assertFalse(oracle._add_network_config_from_opc_imds(object()))
- self.assertIn('bare metal machine', self.logs.getvalue())
+ oracle_ds._network_config = object()
+ oracle_ds._add_network_config_from_opc_imds()
+ assert 'bare metal machine' in caplog.text
- def test_missing_mac_skipped(self):
- self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE
- self.m_get_interfaces_by_mac.return_value = {}
+ def test_missing_mac_skipped(self, oracle_ds, caplog):
+ oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
- network_config = {'version': 1, 'config': [{'primary': 'nic'}]}
- oracle._add_network_config_from_opc_imds(network_config)
+ oracle_ds._network_config = {
+ 'version': 1, 'config': [{'primary': 'nic'}]
+ }
+ with mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={}):
+ oracle_ds._add_network_config_from_opc_imds()
- self.assertEqual(1, len(network_config['config']))
- self.assertIn(
- 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping',
- self.logs.getvalue())
+ assert 1 == len(oracle_ds.network_config['config'])
+ assert 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping' in \
+ caplog.text
- def test_missing_mac_skipped_v2(self):
- self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE
- self.m_get_interfaces_by_mac.return_value = {}
+ def test_missing_mac_skipped_v2(self, oracle_ds, caplog):
+ oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
- network_config = {'version': 2, 'ethernets': {'primary': {'nic': {}}}}
- oracle._add_network_config_from_opc_imds(network_config)
+ oracle_ds._network_config = {
+ 'version': 2, 'ethernets': {'primary': {'nic': {}}}
+ }
+ with mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={}):
+ oracle_ds._add_network_config_from_opc_imds()
- self.assertEqual(1, len(network_config['ethernets']))
- self.assertIn(
- 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping',
- self.logs.getvalue())
+ assert 1 == len(oracle_ds.network_config['ethernets'])
+ assert 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping' in \
+ caplog.text
- def test_secondary_nic(self):
- self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE
- mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
- self.m_get_interfaces_by_mac.return_value = {
- mac_addr: nic_name,
+ def test_secondary_nic(self, oracle_ds):
+ oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
+ oracle_ds._network_config = {
+ 'version': 1, 'config': [{'primary': 'nic'}]
}
-
- network_config = {'version': 1, 'config': [{'primary': 'nic'}]}
- oracle._add_network_config_from_opc_imds(network_config)
+ mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
+ with mock.patch(DS_PATH + ".get_interfaces_by_mac",
+ return_value={mac_addr: nic_name}):
+ oracle_ds._add_network_config_from_opc_imds()
# The input is mutated
- self.assertEqual(2, len(network_config['config']))
+ assert 2 == len(oracle_ds.network_config['config'])
- secondary_nic_cfg = network_config['config'][1]
- self.assertEqual(nic_name, secondary_nic_cfg['name'])
- self.assertEqual('physical', secondary_nic_cfg['type'])
- self.assertEqual(mac_addr, secondary_nic_cfg['mac_address'])
- self.assertEqual(9000, secondary_nic_cfg['mtu'])
+ secondary_nic_cfg = oracle_ds.network_config['config'][1]
+ assert nic_name == secondary_nic_cfg['name']
+ assert 'physical' == secondary_nic_cfg['type']
+ assert mac_addr == secondary_nic_cfg['mac_address']
+ assert 9000 == secondary_nic_cfg['mtu']
- self.assertEqual(1, len(secondary_nic_cfg['subnets']))
+ assert 1 == len(secondary_nic_cfg['subnets'])
subnet_cfg = secondary_nic_cfg['subnets'][0]
# These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE
- self.assertEqual('10.0.0.231', subnet_cfg['address'])
+ assert '10.0.0.231' == subnet_cfg['address']
- def test_secondary_nic_v2(self):
- self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE
- mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
- self.m_get_interfaces_by_mac.return_value = {
- mac_addr: nic_name,
+ def test_secondary_nic_v2(self, oracle_ds):
+ oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
+ oracle_ds._network_config = {
+ 'version': 2, 'ethernets': {'primary': {'nic': {}}}
}
-
- network_config = {'version': 2, 'ethernets': {'primary': {'nic': {}}}}
- oracle._add_network_config_from_opc_imds(network_config)
+ mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
+ with mock.patch(DS_PATH + ".get_interfaces_by_mac",
+ return_value={mac_addr: nic_name}):
+ oracle_ds._add_network_config_from_opc_imds()
# The input is mutated
- self.assertEqual(2, len(network_config['ethernets']))
+ assert 2 == len(oracle_ds.network_config['ethernets'])
- secondary_nic_cfg = network_config['ethernets']['ens3']
- self.assertFalse(secondary_nic_cfg['dhcp4'])
- self.assertFalse(secondary_nic_cfg['dhcp6'])
- self.assertEqual(mac_addr, secondary_nic_cfg['match']['macaddress'])
- self.assertEqual(9000, secondary_nic_cfg['mtu'])
+ secondary_nic_cfg = oracle_ds.network_config['ethernets']['ens3']
+ assert secondary_nic_cfg['dhcp4'] is False
+ assert secondary_nic_cfg['dhcp6'] is False
+ assert mac_addr == secondary_nic_cfg['match']['macaddress']
+ assert 9000 == secondary_nic_cfg['mtu']
- self.assertEqual(1, len(secondary_nic_cfg['addresses']))
+ assert 1 == len(secondary_nic_cfg['addresses'])
# These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE
- self.assertEqual('10.0.0.231', secondary_nic_cfg['addresses'][0])
+ assert '10.0.0.231' == secondary_nic_cfg['addresses'][0]
class TestNetworkConfigFiltersNetFailover(test_helpers.CiTestCase):
- with_logs = True
-
def setUp(self):
super(TestNetworkConfigFiltersNetFailover, self).setUp()
self.add_patch(DS_PATH + '.get_interfaces_by_mac',
@@ -732,4 +408,378 @@ class TestNetworkConfigFiltersNetFailover(test_helpers.CiTestCase):
self.assertEqual(expected_cfg, netcfg)
+def _mock_v2_urls(httpretty):
+ def instance_callback(request, uri, response_headers):
+ print(response_headers)
+ assert request.headers.get("Authorization") == "Bearer Oracle"
+ return [200, response_headers, OPC_V2_METADATA]
+
+ def vnics_callback(request, uri, response_headers):
+ assert request.headers.get("Authorization") == "Bearer Oracle"
+ return [200, response_headers, OPC_BM_SECONDARY_VNIC_RESPONSE]
+
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v2/instance/",
+ body=instance_callback
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v2/vnics/",
+ body=vnics_callback
+ )
+
+
+def _mock_no_v2_urls(httpretty):
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v2/instance/",
+ status=404,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v1/instance/",
+ body=OPC_V1_METADATA
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v1/vnics/",
+ body=OPC_BM_SECONDARY_VNIC_RESPONSE
+ )
+
+
+class TestReadOpcMetadata:
+ # See https://docs.pytest.org/en/stable/example
+ # /parametrize.html#parametrizing-conditional-raising
+ does_not_raise = ExitStack
+
+ @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None)
+ @pytest.mark.parametrize(
+ 'version,setup_urls,instance_data,fetch_vnics,vnics_data', [
+ (2, _mock_v2_urls, json.loads(OPC_V2_METADATA), True,
+ json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE)),
+ (2, _mock_v2_urls, json.loads(OPC_V2_METADATA), False, None),
+ (1, _mock_no_v2_urls, json.loads(OPC_V1_METADATA), True,
+ json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE)),
+ (1, _mock_no_v2_urls, json.loads(OPC_V1_METADATA), False, None),
+ ]
+ )
+ def test_metadata_returned(
+ self, version, setup_urls, instance_data,
+ fetch_vnics, vnics_data, httpretty
+ ):
+ setup_urls(httpretty)
+ metadata = oracle.read_opc_metadata(fetch_vnics_data=fetch_vnics)
+
+ assert version == metadata.version
+ assert instance_data == metadata.instance_data
+ assert vnics_data == metadata.vnics_data
+
+ # No need to actually wait between retries in the tests
+ @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None)
+ @pytest.mark.parametrize(
+ "v2_failure_count,v1_failure_count,expected_body,expectation",
+ [
+ (1, 0, json.loads(OPC_V2_METADATA), does_not_raise()),
+ (2, 0, json.loads(OPC_V2_METADATA), does_not_raise()),
+ (3, 0, json.loads(OPC_V1_METADATA), does_not_raise()),
+ (3, 1, json.loads(OPC_V1_METADATA), does_not_raise()),
+ (3, 2, json.loads(OPC_V1_METADATA), does_not_raise()),
+ (3, 3, None, pytest.raises(UrlError)),
+ ]
+ )
+ def test_retries(self, v2_failure_count, v1_failure_count,
+ expected_body, expectation, httpretty):
+ v2_responses = [httpretty.Response("", status=404)] * v2_failure_count
+ v2_responses.append(httpretty.Response(OPC_V2_METADATA))
+ v1_responses = [httpretty.Response("", status=404)] * v1_failure_count
+ v1_responses.append(httpretty.Response(OPC_V1_METADATA))
+
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v1/instance/",
+ responses=v1_responses,
+ )
+ httpretty.register_uri(
+ httpretty.GET,
+ "http://169.254.169.254/opc/v2/instance/",
+ responses=v2_responses,
+ )
+ with expectation:
+ assert expected_body == oracle.read_opc_metadata().instance_data
+
+
+class TestCommon_GetDataBehaviour:
+ """This test class tests behaviour common to iSCSI and non-iSCSI root.
+
+ It defines a fixture, parameterized_oracle_ds, which is used in all the
+ tests herein to test that the commonly expected behaviour is the same with
+ iSCSI root and without.
+
+ (As non-iSCSI root behaviour is a superset of iSCSI root behaviour this
+ class is implicitly also testing all iSCSI root behaviour so there is no
+ separate class for that case.)
+ """
+
+ @pytest.yield_fixture(params=[True, False])
+ def parameterized_oracle_ds(self, request, oracle_ds):
+ """oracle_ds parameterized for iSCSI and non-iSCSI root respectively"""
+ is_iscsi_root = request.param
+ with ExitStack() as stack:
+ stack.enter_context(
+ mock.patch(
+ DS_PATH + "._is_iscsi_root", return_value=is_iscsi_root
+ )
+ )
+ if not is_iscsi_root:
+ stack.enter_context(
+ mock.patch(DS_PATH + ".net.find_fallback_nic")
+ )
+ stack.enter_context(
+ mock.patch(DS_PATH + ".dhcp.EphemeralDHCPv4")
+ )
+ yield oracle_ds
+
+ @mock.patch(
+ DS_PATH + "._is_platform_viable", mock.Mock(return_value=False)
+ )
+ def test_false_if_platform_not_viable(
+ self, parameterized_oracle_ds,
+ ):
+ assert not parameterized_oracle_ds._get_data()
+
+ @pytest.mark.parametrize(
+ "keyname,expected_value",
+ (
+ ("availability-zone", "phx-ad-3"),
+ ("launch-index", 0),
+ ("local-hostname", "instance-20200320-1400"),
+ (
+ "instance-id",
+ "ocid1.instance.oc1.phx"
+ ".anyhqljtniwq6syc3nex55sep5w34qbwmw6TRUNCATED",
+ ),
+ ("name", "instance-20200320-1400"),
+ (
+ "public_keys",
+ "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ truncated",
+ ),
+ ),
+ )
+ def test_metadata_keys_set_correctly(
+ self, keyname, expected_value, parameterized_oracle_ds,
+ ):
+ assert parameterized_oracle_ds._get_data()
+ assert expected_value == parameterized_oracle_ds.metadata[keyname]
+
+ @pytest.mark.parametrize(
+ "attribute_name,expected_value",
+ [
+ ("_crawled_metadata", json.loads(OPC_V2_METADATA)),
+ (
+ "userdata_raw",
+ base64.b64decode(b"IyEvYmluL3NoCnRvdWNoIC90bXAvZm9v"),
+ ),
+ ("system_uuid", "my-test-uuid"),
+ ],
+ )
+ @mock.patch(
+ DS_PATH + "._read_system_uuid", mock.Mock(return_value="my-test-uuid")
+ )
+ def test_attributes_set_correctly(
+ self, attribute_name, expected_value, parameterized_oracle_ds,
+ ):
+ assert parameterized_oracle_ds._get_data()
+ assert expected_value == getattr(
+ parameterized_oracle_ds, attribute_name
+ )
+
+ @pytest.mark.parametrize(
+ "ssh_keys,expected_value",
+ [
+ # No SSH keys in metadata => no keys detected
+ (None, []),
+ # Empty SSH keys in metadata => no keys detected
+ ("", []),
+ # Single SSH key in metadata => single key detected
+ ("ssh-rsa ... test@test", ["ssh-rsa ... test@test"]),
+ # Multiple SSH keys in metadata => multiple keys detected
+ (
+ "ssh-rsa ... test@test\nssh-rsa ... test2@test2",
+ ["ssh-rsa ... test@test", "ssh-rsa ... test2@test2"],
+ ),
+ ],
+ )
+ def test_public_keys_handled_correctly(
+ self, ssh_keys, expected_value, parameterized_oracle_ds
+ ):
+ instance_data = json.loads(OPC_V1_METADATA)
+ if ssh_keys is None:
+ del instance_data["metadata"]["ssh_authorized_keys"]
+ else:
+ instance_data["metadata"]["ssh_authorized_keys"] = ssh_keys
+ metadata = OpcMetadata(None, instance_data, None)
+ with mock.patch(
+ DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata),
+ ):
+ assert parameterized_oracle_ds._get_data()
+ assert (
+ expected_value == parameterized_oracle_ds.get_public_ssh_keys()
+ )
+
+ def test_missing_user_data_handled_gracefully(
+ self, parameterized_oracle_ds
+ ):
+ instance_data = json.loads(OPC_V1_METADATA)
+ del instance_data["metadata"]["user_data"]
+ metadata = OpcMetadata(None, instance_data, None)
+ with mock.patch(
+ DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata),
+ ):
+ assert parameterized_oracle_ds._get_data()
+
+ assert parameterized_oracle_ds.userdata_raw is None
+
+ def test_missing_metadata_handled_gracefully(
+ self, parameterized_oracle_ds
+ ):
+ instance_data = json.loads(OPC_V1_METADATA)
+ del instance_data["metadata"]
+ metadata = OpcMetadata(None, instance_data, None)
+ with mock.patch(
+ DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata),
+ ):
+ assert parameterized_oracle_ds._get_data()
+
+ assert parameterized_oracle_ds.userdata_raw is None
+ assert [] == parameterized_oracle_ds.get_public_ssh_keys()
+
+
+@mock.patch(DS_PATH + "._is_iscsi_root", lambda: False)
+class TestNonIscsiRoot_GetDataBehaviour:
+ @mock.patch(DS_PATH + ".dhcp.EphemeralDHCPv4")
+ @mock.patch(DS_PATH + ".net.find_fallback_nic")
+ def test_read_opc_metadata_called_with_ephemeral_dhcp(
+ self, m_find_fallback_nic, m_EphemeralDHCPv4, oracle_ds
+ ):
+ in_context_manager = False
+
+ def enter_context_manager():
+ nonlocal in_context_manager
+ in_context_manager = True
+
+ def exit_context_manager(*args):
+ nonlocal in_context_manager
+ in_context_manager = False
+
+ m_EphemeralDHCPv4.return_value.__enter__.side_effect = (
+ enter_context_manager
+ )
+ m_EphemeralDHCPv4.return_value.__exit__.side_effect = (
+ exit_context_manager
+ )
+
+ def assert_in_context_manager(**kwargs):
+ assert in_context_manager
+ return mock.MagicMock()
+
+ with mock.patch(
+ DS_PATH + ".read_opc_metadata",
+ mock.Mock(side_effect=assert_in_context_manager),
+ ):
+ assert oracle_ds._get_data()
+
+ assert [
+ mock.call(m_find_fallback_nic.return_value)
+ ] == m_EphemeralDHCPv4.call_args_list
+
+
+@mock.patch(DS_PATH + ".get_interfaces_by_mac", lambda: {})
+@mock.patch(DS_PATH + ".cmdline.read_initramfs_config")
+class TestNetworkConfig:
+ def test_network_config_cached(self, m_read_initramfs_config, oracle_ds):
+ """.network_config should be cached"""
+ assert 0 == m_read_initramfs_config.call_count
+ oracle_ds.network_config # pylint: disable=pointless-statement
+ assert 1 == m_read_initramfs_config.call_count
+ oracle_ds.network_config # pylint: disable=pointless-statement
+ assert 1 == m_read_initramfs_config.call_count
+
+ def test_network_cmdline(self, m_read_initramfs_config, oracle_ds):
+ """network_config should prefer initramfs config over fallback"""
+ ncfg = {"version": 1, "config": [{"a": "b"}]}
+ m_read_initramfs_config.return_value = copy.deepcopy(ncfg)
+
+ assert ncfg == oracle_ds.network_config
+ assert 0 == oracle_ds.distro.generate_fallback_config.call_count
+
+ def test_network_fallback(self, m_read_initramfs_config, oracle_ds):
+ """network_config should prefer initramfs config over fallback"""
+ ncfg = {"version": 1, "config": [{"a": "b"}]}
+
+ m_read_initramfs_config.return_value = None
+ oracle_ds.distro.generate_fallback_config.return_value = copy.deepcopy(
+ ncfg
+ )
+
+ assert ncfg == oracle_ds.network_config
+
+ @pytest.mark.parametrize(
+ "configure_secondary_nics,expect_secondary_nics",
+ [(True, True), (False, False), (None, False)],
+ )
+ def test_secondary_nic_addition(
+ self,
+ m_read_initramfs_config,
+ configure_secondary_nics,
+ expect_secondary_nics,
+ oracle_ds,
+ ):
+ """Test that _add_network_config_from_opc_imds is called as expected
+
+ (configure_secondary_nics=None is used to test the default behaviour.)
+ """
+ m_read_initramfs_config.return_value = {"version": 1, "config": []}
+
+ if configure_secondary_nics is not None:
+ oracle_ds.ds_cfg[
+ "configure_secondary_nics"
+ ] = configure_secondary_nics
+
+ def side_effect(self):
+ self._network_config["secondary_added"] = mock.sentinel.needle
+
+ oracle_ds._vnics_data = 'DummyData'
+ with mock.patch.object(
+ oracle.DataSourceOracle, "_add_network_config_from_opc_imds",
+ new=side_effect,
+ ):
+ was_secondary_added = "secondary_added" in oracle_ds.network_config
+ assert expect_secondary_nics == was_secondary_added
+
+ def test_secondary_nic_failure_isnt_blocking(
+ self,
+ m_read_initramfs_config,
+ caplog,
+ oracle_ds,
+ ):
+ oracle_ds.ds_cfg["configure_secondary_nics"] = True
+ oracle_ds._vnics_data = "DummyData"
+
+ with mock.patch.object(
+ oracle.DataSourceOracle, "_add_network_config_from_opc_imds",
+ side_effect=Exception()
+ ):
+ network_config = oracle_ds.network_config
+ assert network_config == m_read_initramfs_config.return_value
+ assert "Failed to parse secondary network configuration" in caplog.text
+
+ def test_ds_network_cfg_preferred_over_initramfs(self, _m):
+ """Ensure that DS net config is preferred over initramfs config"""
+ config_sources = oracle.DataSourceOracle.network_config_sources
+ ds_idx = config_sources.index(NetworkConfigSource.ds)
+ initramfs_idx = config_sources.index(NetworkConfigSource.initramfs)
+ assert ds_idx < initramfs_idx
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index c3a9b5b7..c08042d6 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -17,34 +17,52 @@ LOG = logging.getLogger(__name__)
# See: man sshd_config
DEF_SSHD_CFG = "/etc/ssh/sshd_config"
-# taken from OpenSSH source openssh-7.3p1/sshkey.c:
-# static const struct keytype keytypes[] = { ... }
+# this list has been filtered out from keytypes of OpenSSH source
+# openssh-8.3p1/sshkey.c:
+# static const struct keytype keytypes[] = {
+# filter out the keytypes with the sigonly flag, eg:
+# { "rsa-sha2-256", "RSA", NULL, KEY_RSA, 0, 0, 1 },
+# refer to the keytype struct of OpenSSH in the same file, to see
+# if the position of the sigonly flag has been moved.
+#
+# dsa, rsa, ecdsa and ed25519 are added for legacy, as they are valid
+# public keys in some old distros. They can possibly be removed
+# in the future when support for the older distros is dropped
+#
+# When updating the list, also update the _is_printable_key list in
+# cloudinit/config/cc_ssh_authkey_fingerprints.py
VALID_KEY_TYPES = (
"dsa",
+ "rsa",
"ecdsa",
- "ecdsa-sha2-nistp256",
+ "ed25519",
"ecdsa-sha2-nistp256-cert-v01@openssh.com",
- "ecdsa-sha2-nistp384",
+ "ecdsa-sha2-nistp256",
"ecdsa-sha2-nistp384-cert-v01@openssh.com",
- "ecdsa-sha2-nistp521",
+ "ecdsa-sha2-nistp384",
"ecdsa-sha2-nistp521-cert-v01@openssh.com",
- "ed25519",
- "rsa",
- "rsa-sha2-256",
- "rsa-sha2-512",
- "ssh-dss",
+ "ecdsa-sha2-nistp521",
+ "sk-ecdsa-sha2-nistp256-cert-v01@openssh.com",
+ "sk-ecdsa-sha2-nistp256@openssh.com",
+ "sk-ssh-ed25519-cert-v01@openssh.com",
+ "sk-ssh-ed25519@openssh.com",
"ssh-dss-cert-v01@openssh.com",
- "ssh-ed25519",
+ "ssh-dss",
"ssh-ed25519-cert-v01@openssh.com",
- "ssh-rsa",
+ "ssh-ed25519",
"ssh-rsa-cert-v01@openssh.com",
+ "ssh-rsa",
+ "ssh-xmss-cert-v01@openssh.com",
+ "ssh-xmss@openssh.com",
)
+_DISABLE_USER_SSH_EXIT = 142
DISABLE_USER_OPTS = (
"no-port-forwarding,no-agent-forwarding,"
"no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\""
- " rather than the user \\\"$DISABLE_USER\\\".\';echo;sleep 10\"")
+ " rather than the user \\\"$DISABLE_USER\\\".\';echo;sleep 10;"
+ "exit " + str(_DISABLE_USER_SSH_EXIT) + "\"")
class AuthKeyLine(object):
@@ -344,7 +362,9 @@ def update_ssh_config(updates, fname=DEF_SSHD_CFG):
changed = update_ssh_config_lines(lines=lines, updates=updates)
if changed:
util.write_file(
- fname, "\n".join([str(l) for l in lines]) + "\n", copy_mode=True)
+ fname, "\n".join(
+ [str(line) for line in lines]
+ ) + "\n", preserve_mode=True)
return len(changed) != 0
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index db8ba64c..765f4aab 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -696,7 +696,7 @@ class Init(object):
netcfg, src = self._find_networking_config()
# ensure all physical devices in config are present
- net.wait_for_physdevs(netcfg)
+ self.distro.networking.wait_for_physdevs(netcfg)
# apply renames from config
self._apply_netcfg_names(netcfg)
@@ -947,7 +947,6 @@ def _pkl_load(fname):
except Exception as e:
if os.path.isfile(fname):
LOG.warning("failed loading pickle in %s: %s", fname, e)
- pass
# This is allowed so just return nothing successfully loaded...
if not pickle_contents:
diff --git a/cloudinit/subp.py b/cloudinit/subp.py
index 0ad09306..3e4efa42 100644
--- a/cloudinit/subp.py
+++ b/cloudinit/subp.py
@@ -1,9 +1,11 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Common utility functions for interacting with subprocess."""
-# TODO move subp shellify and runparts related functions out of util.py
-
import logging
+import os
+import subprocess
+
+from errno import ENOEXEC
LOG = logging.getLogger(__name__)
@@ -54,4 +56,332 @@ def prepend_base_command(base_command, commands):
return fixed_commands
+class ProcessExecutionError(IOError):
+
+ MESSAGE_TMPL = ('%(description)s\n'
+ 'Command: %(cmd)s\n'
+ 'Exit code: %(exit_code)s\n'
+ 'Reason: %(reason)s\n'
+ 'Stdout: %(stdout)s\n'
+ 'Stderr: %(stderr)s')
+ empty_attr = '-'
+
+ def __init__(self, stdout=None, stderr=None,
+ exit_code=None, cmd=None,
+ description=None, reason=None,
+ errno=None):
+ if not cmd:
+ self.cmd = self.empty_attr
+ else:
+ self.cmd = cmd
+
+ if not description:
+ if not exit_code and errno == ENOEXEC:
+ self.description = 'Exec format error. Missing #! in script?'
+ else:
+ self.description = 'Unexpected error while running command.'
+ else:
+ self.description = description
+
+ if not isinstance(exit_code, int):
+ self.exit_code = self.empty_attr
+ else:
+ self.exit_code = exit_code
+
+ if not stderr:
+ if stderr is None:
+ self.stderr = self.empty_attr
+ else:
+ self.stderr = stderr
+ else:
+ self.stderr = self._indent_text(stderr)
+
+ if not stdout:
+ if stdout is None:
+ self.stdout = self.empty_attr
+ else:
+ self.stdout = stdout
+ else:
+ self.stdout = self._indent_text(stdout)
+
+ if reason:
+ self.reason = reason
+ else:
+ self.reason = self.empty_attr
+
+ self.errno = errno
+ message = self.MESSAGE_TMPL % {
+ 'description': self._ensure_string(self.description),
+ 'cmd': self._ensure_string(self.cmd),
+ 'exit_code': self._ensure_string(self.exit_code),
+ 'stdout': self._ensure_string(self.stdout),
+ 'stderr': self._ensure_string(self.stderr),
+ 'reason': self._ensure_string(self.reason),
+ }
+ IOError.__init__(self, message)
+
+ def _ensure_string(self, text):
+ """
+ if data is bytes object, decode
+ """
+ return text.decode() if isinstance(text, bytes) else text
+
+ def _indent_text(self, text, indent_level=8):
+ """
+ indent text on all but the first line, allowing for easy to read output
+ """
+ cr = '\n'
+ indent = ' ' * indent_level
+ # if input is bytes, return bytes
+ if isinstance(text, bytes):
+ cr = cr.encode()
+ indent = indent.encode()
+ # remove any newlines at end of text first to prevent unneeded blank
+ # line in output
+ return text.rstrip(cr).replace(cr, cr + indent)
+
+
+def subp(args, data=None, rcs=None, env=None, capture=True,
+ combine_capture=False, shell=False,
+ logstring=False, decode="replace", target=None, update_env=None,
+ status_cb=None):
+ """Run a subprocess.
+
+ :param args: command to run in a list. [cmd, arg1, arg2...]
+ :param data: input to the command, made available on its stdin.
+ :param rcs:
+ a list of allowed return codes. If subprocess exits with a value not
+ in this list, a ProcessExecutionError will be raised. By default,
+ data is returned as a string. See 'decode' parameter.
+ :param env: a dictionary for the command's environment.
+ :param capture:
+ boolean indicating if output should be captured. If True, then stderr
+ and stdout will be returned. If False, they will not be redirected.
+ :param combine_capture:
+ boolean indicating if stderr should be redirected to stdout. When True,
+ interleaved stderr and stdout will be returned as the first element of
+ a tuple, the second will be empty string or bytes (per decode).
+ if combine_capture is True, then output is captured independent of
+ the value of capture.
+ :param shell: boolean indicating if this should be run with a shell.
+ :param logstring:
+ the command will be logged to DEBUG. If it contains info that should
+ not be logged, then logstring will be logged instead.
+ :param decode:
+ if False, no decoding will be done and returned stdout and stderr will
+ be bytes. Other allowed values are 'strict', 'ignore', and 'replace'.
+ These values are passed through to bytes().decode() as the 'errors'
+ parameter. There is no support for decoding to other than utf-8.
+ :param target:
+ not supported, kwarg present only to make function signature similar
+ to curtin's subp.
+ :param update_env:
+ update the enviornment for this command with this dictionary.
+ this will not affect the current processes os.environ.
+ :param status_cb:
+ call this fuction with a single string argument before starting
+ and after finishing.
+
+ :return
+ if not capturing, return is (None, None)
+ if capturing, stdout and stderr are returned.
+ if decode:
+ entries in tuple will be python2 unicode or python3 string
+ if not decode:
+ entries in tuple will be python2 string or python3 bytes
+ """
+
+ # not supported in cloud-init (yet), for now kept in the call signature
+ # to ease maintaining code shared between cloud-init and curtin
+ if target is not None:
+ raise ValueError("target arg not supported by cloud-init")
+
+ if rcs is None:
+ rcs = [0]
+
+ devnull_fp = None
+
+ if update_env:
+ if env is None:
+ env = os.environ
+ env = env.copy()
+ env.update(update_env)
+
+ if target_path(target) != "/":
+ args = ['chroot', target] + list(args)
+
+ if status_cb:
+ command = ' '.join(args) if isinstance(args, list) else args
+ status_cb('Begin run command: {command}\n'.format(command=command))
+ if not logstring:
+ LOG.debug(("Running command %s with allowed return codes %s"
+ " (shell=%s, capture=%s)"),
+ args, rcs, shell, 'combine' if combine_capture else capture)
+ else:
+ LOG.debug(("Running hidden command to protect sensitive "
+ "input/output logstring: %s"), logstring)
+
+ stdin = None
+ stdout = None
+ stderr = None
+ if capture:
+ stdout = subprocess.PIPE
+ stderr = subprocess.PIPE
+ if combine_capture:
+ stdout = subprocess.PIPE
+ stderr = subprocess.STDOUT
+ if data is None:
+ # using devnull assures any reads get null, rather
+ # than possibly waiting on input.
+ devnull_fp = open(os.devnull)
+ stdin = devnull_fp
+ else:
+ stdin = subprocess.PIPE
+ if not isinstance(data, bytes):
+ data = data.encode()
+
+ # Popen converts entries in the arguments array from non-bytes to bytes.
+ # When locale is unset it may use ascii for that encoding which can
+ # cause UnicodeDecodeErrors. (LP: #1751051)
+ if isinstance(args, bytes):
+ bytes_args = args
+ elif isinstance(args, str):
+ bytes_args = args.encode("utf-8")
+ else:
+ bytes_args = [
+ x if isinstance(x, bytes) else x.encode("utf-8")
+ for x in args]
+ try:
+ sp = subprocess.Popen(bytes_args, stdout=stdout,
+ stderr=stderr, stdin=stdin,
+ env=env, shell=shell)
+ (out, err) = sp.communicate(data)
+ except OSError as e:
+ if status_cb:
+ status_cb('ERROR: End run command: invalid command provided\n')
+ raise ProcessExecutionError(
+ cmd=args, reason=e, errno=e.errno,
+ stdout="-" if decode else b"-",
+ stderr="-" if decode else b"-"
+ ) from e
+ finally:
+ if devnull_fp:
+ devnull_fp.close()
+
+ # Just ensure blank instead of none.
+ if capture or combine_capture:
+ if not out:
+ out = b''
+ if not err:
+ err = b''
+ if decode:
+ def ldecode(data, m='utf-8'):
+ if not isinstance(data, bytes):
+ return data
+ return data.decode(m, decode)
+
+ out = ldecode(out)
+ err = ldecode(err)
+
+ rc = sp.returncode
+ if rc not in rcs:
+ if status_cb:
+ status_cb(
+ 'ERROR: End run command: exit({code})\n'.format(code=rc))
+ raise ProcessExecutionError(stdout=out, stderr=err,
+ exit_code=rc,
+ cmd=args)
+ if status_cb:
+ status_cb('End run command: exit({code})\n'.format(code=rc))
+ return (out, err)
+
+
+def target_path(target, path=None):
+ # return 'path' inside target, accepting target as None
+ if target in (None, ""):
+ target = "/"
+ elif not isinstance(target, str):
+ raise ValueError("Unexpected input for target: %s" % target)
+ else:
+ target = os.path.abspath(target)
+ # abspath("//") returns "//" specifically for 2 slashes.
+ if target.startswith("//"):
+ target = target[1:]
+
+ if not path:
+ return target
+
+ # os.path.join("/etc", "/foo") returns "/foo". Chomp all leading /.
+ while len(path) and path[0] == "/":
+ path = path[1:]
+
+ return os.path.join(target, path)
+
+
+def which(program, search=None, target=None):
+ target = target_path(target)
+
+ if os.path.sep in program:
+ # if program had a '/' in it, then do not search PATH
+ # 'which' does consider cwd here. (cd / && which bin/ls) = bin/ls
+ # so effectively we set cwd to / (or target)
+ if is_exe(target_path(target, program)):
+ return program
+
+ if search is None:
+ paths = [p.strip('"') for p in
+ os.environ.get("PATH", "").split(os.pathsep)]
+ if target == "/":
+ search = paths
+ else:
+ search = [p for p in paths if p.startswith("/")]
+
+ # normalize path input
+ search = [os.path.abspath(p) for p in search]
+
+ for path in search:
+ ppath = os.path.sep.join((path, program))
+ if is_exe(target_path(target, ppath)):
+ return ppath
+
+ return None
+
+
+def is_exe(fpath):
+ # return boolean indicating if fpath exists and is executable.
+ return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
+
+
+def runparts(dirp, skip_no_exist=True, exe_prefix=None):
+ if skip_no_exist and not os.path.isdir(dirp):
+ return
+
+ failed = []
+ attempted = []
+
+ if exe_prefix is None:
+ prefix = []
+ elif isinstance(exe_prefix, str):
+ prefix = [str(exe_prefix)]
+ elif isinstance(exe_prefix, list):
+ prefix = exe_prefix
+ else:
+ raise TypeError("exe_prefix must be None, str, or list")
+
+ for exe_name in sorted(os.listdir(dirp)):
+ exe_path = os.path.join(dirp, exe_name)
+ if is_exe(exe_path):
+ attempted.append(exe_path)
+ try:
+ subp(prefix + [exe_path], capture=False)
+ except ProcessExecutionError as e:
+ LOG.debug(e)
+ failed.append(exe_name)
+
+ if failed and attempted:
+ raise RuntimeError(
+ 'Runparts: %s failures (%s) in %s attempted commands' %
+ (len(failed), ",".join(failed), len(attempted)))
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/templater.py b/cloudinit/templater.py
index e47cdeda..a00ade20 100644
--- a/cloudinit/templater.py
+++ b/cloudinit/templater.py
@@ -21,13 +21,10 @@ except (ImportError, AttributeError):
CHEETAH_AVAILABLE = False
try:
- from jinja2.runtime import implements_to_string
from jinja2 import Template as JTemplate
from jinja2 import DebugUndefined as JUndefined
JINJA_AVAILABLE = True
except (ImportError, AttributeError):
- from cloudinit.helpers import identity
- implements_to_string = identity
JINJA_AVAILABLE = False
JUndefined = object
@@ -42,7 +39,6 @@ BASIC_MATCHER = re.compile(r'\$\{([A-Za-z0-9_.]+)\}|\$([A-Za-z0-9_.]+)')
MISSING_JINJA_PREFIX = u'CI_MISSING_JINJA_VAR/'
-@implements_to_string # Needed for python2.7. Otherwise cached super.__str__
class UndefinedJinjaVariable(JUndefined):
"""Class used to represent any undefined jinja template variable."""
diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py
index 70f6bad7..58f63b69 100644
--- a/cloudinit/tests/helpers.py
+++ b/cloudinit/tests/helpers.py
@@ -1,7 +1,5 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from __future__ import print_function
-
import functools
import httpretty
import io
@@ -13,20 +11,10 @@ import string
import sys
import tempfile
import time
+import unittest
+from contextlib import ExitStack, contextmanager
from unittest import mock
-
-import unittest2
-from unittest2.util import strclass
-
-try:
- from contextlib import ExitStack, contextmanager
-except ImportError:
- from contextlib2 import ExitStack, contextmanager
-
-try:
- from configparser import ConfigParser
-except ImportError:
- from ConfigParser import ConfigParser
+from unittest.util import strclass
from cloudinit.config.schema import (
SchemaValidationError, validate_cloudconfig_schema)
@@ -35,13 +23,14 @@ from cloudinit import distros
from cloudinit import helpers as ch
from cloudinit.sources import DataSourceNone
from cloudinit.templater import JINJA_AVAILABLE
+from cloudinit import subp
from cloudinit import util
-_real_subp = util.subp
+_real_subp = subp.subp
# Used for skipping tests
-SkipTest = unittest2.SkipTest
-skipIf = unittest2.skipIf
+SkipTest = unittest.SkipTest
+skipIf = unittest.skipIf
# Makes the old path start
@@ -78,7 +67,7 @@ def retarget_many_wrapper(new_base, am, old_func):
return wrapper
-class TestCase(unittest2.TestCase):
+class TestCase(unittest.TestCase):
def reset_global_state(self):
"""Reset any global state to its original settings.
@@ -114,16 +103,6 @@ class TestCase(unittest2.TestCase):
self.addCleanup(m.stop)
setattr(self, attr, p)
- # prefer python3 read_file over readfp but allow fallback
- def parse_and_read(self, contents):
- parser = ConfigParser()
- if hasattr(parser, 'read_file'):
- parser.read_file(contents)
- elif hasattr(parser, 'readfp'):
- # pylint: disable=W1505
- parser.readfp(contents)
- return parser
-
class CiTestCase(TestCase):
"""This is the preferred test case base class unless user
@@ -156,14 +135,17 @@ class CiTestCase(TestCase):
self.old_handlers = self.logger.handlers
self.logger.handlers = [handler]
if self.allowed_subp is True:
- util.subp = _real_subp
+ subp.subp = _real_subp
else:
- util.subp = self._fake_subp
+ subp.subp = self._fake_subp
def _fake_subp(self, *args, **kwargs):
if 'args' in kwargs:
cmd = kwargs['args']
else:
+ if not args:
+ raise TypeError(
+ "subp() missing 1 required positional argument: 'args'")
cmd = args[0]
if not isinstance(cmd, str):
@@ -190,7 +172,7 @@ class CiTestCase(TestCase):
# Remove the handler we setup
logging.getLogger().handlers = self.old_handlers
logging.getLogger().level = None
- util.subp = _real_subp
+ subp.subp = _real_subp
super(CiTestCase, self).tearDown()
def tmp_dir(self, dir=None, cleanup=True):
@@ -212,16 +194,6 @@ class CiTestCase(TestCase):
dir = self.tmp_dir()
return os.path.normpath(os.path.abspath(os.path.join(dir, path)))
- def sys_exit(self, code):
- """Provide a wrapper around sys.exit for python 2.6
-
- In 2.6, this code would produce 'cm.exception' with value int(2)
- rather than the SystemExit that was raised by sys.exit(2).
- with assertRaises(SystemExit) as cm:
- sys.exit(2)
- """
- raise SystemExit(code)
-
def tmp_cloud(self, distro, sys_cfg=None, metadata=None):
"""Create a cloud with tmp working directory paths.
@@ -309,13 +281,13 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
mock.patch.object(mod, f, trap_func))
# Handle subprocess calls
- func = getattr(util, 'subp')
+ func = getattr(subp, 'subp')
def nsubp(*_args, **_kwargs):
return ('', '')
self.patched_funcs.enter_context(
- mock.patch.object(util, 'subp', nsubp))
+ mock.patch.object(subp, 'subp', nsubp))
def null_func(*_args, **_kwargs):
return None
@@ -363,6 +335,7 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
root = self.tmp_dir()
self.patchUtils(root)
self.patchOS(root)
+ self.patchOpen(root)
return root
@contextmanager
@@ -396,7 +369,7 @@ class HttprettyTestCase(CiTestCase):
super(HttprettyTestCase, self).tearDown()
-class SchemaTestCaseMixin(unittest2.TestCase):
+class SchemaTestCaseMixin(unittest.TestCase):
def assertSchemaValid(self, cfg, msg="Valid Schema failed validation."):
"""Assert the config is valid per self.schema.
@@ -528,13 +501,4 @@ if not hasattr(mock.Mock, 'assert_not_called'):
raise AssertionError(msg)
mock.Mock.assert_not_called = __mock_assert_not_called
-
-# older unittest2.TestCase (centos6) have only the now-deprecated
-# assertRaisesRegexp. Simple assignment makes pylint complain, about
-# users of assertRaisesRegex so we use getattr to trick it.
-# https://github.com/PyCQA/pylint/issues/1946
-if not hasattr(unittest2.TestCase, 'assertRaisesRegex'):
- unittest2.TestCase.assertRaisesRegex = (
- getattr(unittest2.TestCase, 'assertRaisesRegexp'))
-
# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_conftest.py b/cloudinit/tests/test_conftest.py
new file mode 100644
index 00000000..6f1263a5
--- /dev/null
+++ b/cloudinit/tests/test_conftest.py
@@ -0,0 +1,65 @@
+import pytest
+
+from cloudinit import subp
+from cloudinit.tests.helpers import CiTestCase
+
+
+class TestDisableSubpUsage:
+ """Test that the disable_subp_usage fixture behaves as expected."""
+
+ def test_using_subp_raises_assertion_error(self):
+ with pytest.raises(AssertionError):
+ subp.subp(["some", "args"])
+
+ def test_typeerrors_on_incorrect_usage(self):
+ with pytest.raises(TypeError):
+ # We are intentionally passing no value for a parameter, so:
+ # pylint: disable=no-value-for-parameter
+ subp.subp()
+
+ @pytest.mark.allow_all_subp
+ def test_subp_usage_can_be_reenabled(self):
+ subp.subp(['whoami'])
+
+ @pytest.mark.allow_subp_for("whoami")
+ def test_subp_usage_can_be_conditionally_reenabled(self):
+ # The two parameters test each potential invocation with a single
+ # argument
+ with pytest.raises(AssertionError) as excinfo:
+ subp.subp(["some", "args"])
+ assert "allowed: whoami" in str(excinfo.value)
+ subp.subp(['whoami'])
+
+ @pytest.mark.allow_subp_for("whoami", "bash")
+ def test_subp_usage_can_be_conditionally_reenabled_for_multiple_cmds(self):
+ with pytest.raises(AssertionError) as excinfo:
+ subp.subp(["some", "args"])
+ assert "allowed: whoami,bash" in str(excinfo.value)
+ subp.subp(['bash', '-c', 'true'])
+ subp.subp(['whoami'])
+
+ @pytest.mark.allow_all_subp
+ @pytest.mark.allow_subp_for("bash")
+ def test_both_marks_raise_an_error(self):
+ with pytest.raises(AssertionError, match="marked both"):
+ subp.subp(["bash"])
+
+
+class TestDisableSubpUsageInTestSubclass(CiTestCase):
+ """Test that disable_subp_usage doesn't impact CiTestCase's subp logic."""
+
+ def test_using_subp_raises_exception(self):
+ with pytest.raises(Exception):
+ subp.subp(["some", "args"])
+
+ def test_typeerrors_on_incorrect_usage(self):
+ with pytest.raises(TypeError):
+ subp.subp()
+
+ def test_subp_usage_can_be_reenabled(self):
+ _old_allowed_subp = self.allow_subp
+ self.allowed_subp = True
+ try:
+ subp.subp(['bash', '-c', 'true'])
+ finally:
+ self.allowed_subp = _old_allowed_subp
diff --git a/cloudinit/tests/test_features.py b/cloudinit/tests/test_features.py
new file mode 100644
index 00000000..d7a7226d
--- /dev/null
+++ b/cloudinit/tests/test_features.py
@@ -0,0 +1,60 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+# pylint: disable=no-member,no-name-in-module
+"""
+This file is for testing the feature flag functionality itself,
+NOT for testing any individual feature flag
+"""
+import pytest
+import sys
+from pathlib import Path
+
+import cloudinit
+
+
+@pytest.yield_fixture()
+def create_override(request):
+ """
+ Create a feature overrides file and do some module wizardry to make
+ it seem like we're importing the features file for the first time.
+
+ After creating the override file with the values passed by the test,
+ we need to reload cloudinit.features
+ to get all of the current features (including the overridden ones).
+ Once the test is complete, we remove the file we created and set
+ features and feature_overrides modules to how they were before
+ the test started
+ """
+ override_path = Path(cloudinit.__file__).parent / 'feature_overrides.py'
+ if override_path.exists():
+ raise Exception("feature_overrides.py unexpectedly exists! "
+ "Remove it to run this test.")
+ with override_path.open('w') as f:
+ for key, value in request.param.items():
+ f.write('{} = {}\n'.format(key, value))
+
+ sys.modules.pop('cloudinit.features', None)
+
+ yield
+
+ override_path.unlink()
+ sys.modules.pop('cloudinit.feature_overrides', None)
+
+
+class TestFeatures:
+ def test_feature_without_override(self):
+ from cloudinit.features import ERROR_ON_USER_DATA_FAILURE
+ assert ERROR_ON_USER_DATA_FAILURE is True
+
+ @pytest.mark.parametrize('create_override',
+ [{'ERROR_ON_USER_DATA_FAILURE': False}],
+ indirect=True)
+ def test_feature_with_override(self, create_override):
+ from cloudinit.features import ERROR_ON_USER_DATA_FAILURE
+ assert ERROR_ON_USER_DATA_FAILURE is False
+
+ @pytest.mark.parametrize('create_override',
+ [{'SPAM': True}],
+ indirect=True)
+ def test_feature_only_in_override(self, create_override):
+ from cloudinit.features import SPAM
+ assert SPAM is True
diff --git a/cloudinit/tests/test_gpg.py b/cloudinit/tests/test_gpg.py
index 8dd57137..f96f5372 100644
--- a/cloudinit/tests/test_gpg.py
+++ b/cloudinit/tests/test_gpg.py
@@ -4,19 +4,19 @@
from unittest import mock
from cloudinit import gpg
-from cloudinit import util
+from cloudinit import subp
from cloudinit.tests.helpers import CiTestCase
@mock.patch("cloudinit.gpg.time.sleep")
-@mock.patch("cloudinit.gpg.util.subp")
+@mock.patch("cloudinit.gpg.subp.subp")
class TestReceiveKeys(CiTestCase):
"""Test the recv_key method."""
def test_retries_on_subp_exc(self, m_subp, m_sleep):
"""retry should be done on gpg receive keys failure."""
retries = (1, 2, 4)
- my_exc = util.ProcessExecutionError(
+ my_exc = subp.ProcessExecutionError(
stdout='', stderr='', exit_code=2, cmd=['mycmd'])
m_subp.side_effect = (my_exc, my_exc, ('', ''))
gpg.recv_key("ABCD", "keyserver.example.com", retries=retries)
@@ -26,7 +26,7 @@ class TestReceiveKeys(CiTestCase):
"""If the final run fails, error should be raised."""
naplen = 1
keyid, keyserver = ("ABCD", "keyserver.example.com")
- m_subp.side_effect = util.ProcessExecutionError(
+ m_subp.side_effect = subp.ProcessExecutionError(
stdout='', stderr='', exit_code=2, cmd=['mycmd'])
with self.assertRaises(ValueError) as rcm:
gpg.recv_key(keyid, keyserver, retries=(naplen,))
@@ -36,7 +36,7 @@ class TestReceiveKeys(CiTestCase):
def test_no_retries_on_none(self, m_subp, m_sleep):
"""retry should not be done if retries is None."""
- m_subp.side_effect = util.ProcessExecutionError(
+ m_subp.side_effect = subp.ProcessExecutionError(
stdout='', stderr='', exit_code=2, cmd=['mycmd'])
with self.assertRaises(ValueError):
gpg.recv_key("ABCD", "keyserver.example.com", retries=None)
diff --git a/cloudinit/tests/test_netinfo.py b/cloudinit/tests/test_netinfo.py
index 1c8a791e..e44b16d8 100644
--- a/cloudinit/tests/test_netinfo.py
+++ b/cloudinit/tests/test_netinfo.py
@@ -27,8 +27,8 @@ class TestNetInfo(CiTestCase):
maxDiff = None
with_logs = True
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_netdev_old_nettools_pformat(self, m_subp, m_which):
"""netdev_pformat properly rendering old nettools info."""
m_subp.return_value = (SAMPLE_OLD_IFCONFIG_OUT, '')
@@ -36,8 +36,8 @@ class TestNetInfo(CiTestCase):
content = netdev_pformat()
self.assertEqual(NETDEV_FORMATTED_OUT, content)
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_netdev_new_nettools_pformat(self, m_subp, m_which):
"""netdev_pformat properly rendering netdev new nettools info."""
m_subp.return_value = (SAMPLE_NEW_IFCONFIG_OUT, '')
@@ -45,8 +45,8 @@ class TestNetInfo(CiTestCase):
content = netdev_pformat()
self.assertEqual(NETDEV_FORMATTED_OUT, content)
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_netdev_freebsd_nettools_pformat(self, m_subp, m_which):
"""netdev_pformat properly rendering netdev new nettools info."""
m_subp.return_value = (SAMPLE_FREEBSD_IFCONFIG_OUT, '')
@@ -57,8 +57,8 @@ class TestNetInfo(CiTestCase):
print()
self.assertEqual(FREEBSD_NETDEV_OUT, content)
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_netdev_iproute_pformat(self, m_subp, m_which):
"""netdev_pformat properly rendering ip route info."""
m_subp.return_value = (SAMPLE_IPADDRSHOW_OUT, '')
@@ -72,8 +72,8 @@ class TestNetInfo(CiTestCase):
'255.0.0.0 | . |', '255.0.0.0 | host |')
self.assertEqual(new_output, content)
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_netdev_warn_on_missing_commands(self, m_subp, m_which):
"""netdev_pformat warns when missing both ip and 'netstat'."""
m_which.return_value = None # Niether ip nor netstat found
@@ -85,8 +85,8 @@ class TestNetInfo(CiTestCase):
self.logs.getvalue())
m_subp.assert_not_called()
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_netdev_info_nettools_down(self, m_subp, m_which):
"""test netdev_info using nettools and down interfaces."""
m_subp.return_value = (
@@ -100,8 +100,8 @@ class TestNetInfo(CiTestCase):
'hwaddr': '.', 'up': True}},
netdev_info("."))
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_netdev_info_iproute_down(self, m_subp, m_which):
"""Test netdev_info with ip and down interfaces."""
m_subp.return_value = (
@@ -130,8 +130,8 @@ class TestNetInfo(CiTestCase):
readResource("netinfo/netdev-formatted-output-down"),
netdev_pformat())
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_route_nettools_pformat(self, m_subp, m_which):
"""route_pformat properly rendering nettools route info."""
@@ -147,8 +147,8 @@ class TestNetInfo(CiTestCase):
content = route_pformat()
self.assertEqual(ROUTE_FORMATTED_OUT, content)
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_route_iproute_pformat(self, m_subp, m_which):
"""route_pformat properly rendering ip route info."""
@@ -165,8 +165,8 @@ class TestNetInfo(CiTestCase):
content = route_pformat()
self.assertEqual(ROUTE_FORMATTED_OUT, content)
- @mock.patch('cloudinit.netinfo.util.which')
- @mock.patch('cloudinit.netinfo.util.subp')
+ @mock.patch('cloudinit.netinfo.subp.which')
+ @mock.patch('cloudinit.netinfo.subp.subp')
def test_route_warn_on_missing_commands(self, m_subp, m_which):
"""route_pformat warns when missing both ip and 'netstat'."""
m_which.return_value = None # Niether ip nor netstat found
diff --git a/cloudinit/tests/test_subp.py b/cloudinit/tests/test_subp.py
index 448097d3..911c1f3d 100644
--- a/cloudinit/tests/test_subp.py
+++ b/cloudinit/tests/test_subp.py
@@ -2,10 +2,21 @@
"""Tests for cloudinit.subp utility functions"""
-from cloudinit import subp
+import json
+import os
+import sys
+import stat
+
+from unittest import mock
+
+from cloudinit import subp, util
from cloudinit.tests.helpers import CiTestCase
+BASH = subp.which('bash')
+BOGUS_COMMAND = 'this-is-not-expected-to-be-a-program-name'
+
+
class TestPrependBaseCommands(CiTestCase):
with_logs = True
@@ -58,4 +69,218 @@ class TestPrependBaseCommands(CiTestCase):
self.assertEqual('', self.logs.getvalue())
self.assertEqual(expected, fixed_commands)
+
+class TestSubp(CiTestCase):
+ allowed_subp = [BASH, 'cat', CiTestCase.SUBP_SHELL_TRUE,
+ BOGUS_COMMAND, sys.executable]
+
+ stdin2err = [BASH, '-c', 'cat >&2']
+ stdin2out = ['cat']
+ utf8_invalid = b'ab\xaadef'
+ utf8_valid = b'start \xc3\xa9 end'
+ utf8_valid_2 = b'd\xc3\xa9j\xc8\xa7'
+ printenv = [BASH, '-c', 'for n in "$@"; do echo "$n=${!n}"; done', '--']
+
+ def printf_cmd(self, *args):
+ # bash's printf supports \xaa. So does /usr/bin/printf
+ # but by using bash, we remove dependency on another program.
+ return([BASH, '-c', 'printf "$@"', 'printf'] + list(args))
+
+ def test_subp_handles_bytestrings(self):
+ """subp can run a bytestring command if shell is True."""
+ tmp_file = self.tmp_path('test.out')
+ cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file)
+ (out, _err) = subp.subp(cmd.encode('utf-8'), shell=True)
+ self.assertEqual(u'', out)
+ self.assertEqual(u'', _err)
+ self.assertEqual('HI MOM\n', util.load_file(tmp_file))
+
+ def test_subp_handles_strings(self):
+ """subp can run a string command if shell is True."""
+ tmp_file = self.tmp_path('test.out')
+ cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file)
+ (out, _err) = subp.subp(cmd, shell=True)
+ self.assertEqual(u'', out)
+ self.assertEqual(u'', _err)
+ self.assertEqual('HI MOM\n', util.load_file(tmp_file))
+
+ def test_subp_handles_utf8(self):
+ # The given bytes contain utf-8 accented characters as seen in e.g.
+ # the "deja dup" package in Ubuntu.
+ cmd = self.printf_cmd(self.utf8_valid_2)
+ (out, _err) = subp.subp(cmd, capture=True)
+ self.assertEqual(out, self.utf8_valid_2.decode('utf-8'))
+
+ def test_subp_respects_decode_false(self):
+ (out, err) = subp.subp(self.stdin2out, capture=True, decode=False,
+ data=self.utf8_valid)
+ self.assertTrue(isinstance(out, bytes))
+ self.assertTrue(isinstance(err, bytes))
+ self.assertEqual(out, self.utf8_valid)
+
+ def test_subp_decode_ignore(self):
+ # this executes a string that writes invalid utf-8 to stdout
+ (out, _err) = subp.subp(self.printf_cmd('abc\\xaadef'),
+ capture=True, decode='ignore')
+ self.assertEqual(out, 'abcdef')
+
+ def test_subp_decode_strict_valid_utf8(self):
+ (out, _err) = subp.subp(self.stdin2out, capture=True,
+ decode='strict', data=self.utf8_valid)
+ self.assertEqual(out, self.utf8_valid.decode('utf-8'))
+
+ def test_subp_decode_invalid_utf8_replaces(self):
+ (out, _err) = subp.subp(self.stdin2out, capture=True,
+ data=self.utf8_invalid)
+ expected = self.utf8_invalid.decode('utf-8', 'replace')
+ self.assertEqual(out, expected)
+
+ def test_subp_decode_strict_raises(self):
+ args = []
+ kwargs = {'args': self.stdin2out, 'capture': True,
+ 'decode': 'strict', 'data': self.utf8_invalid}
+ self.assertRaises(UnicodeDecodeError, subp.subp, *args, **kwargs)
+
+ def test_subp_capture_stderr(self):
+ data = b'hello world'
+ (out, err) = subp.subp(self.stdin2err, capture=True,
+ decode=False, data=data,
+ update_env={'LC_ALL': 'C'})
+ self.assertEqual(err, data)
+ self.assertEqual(out, b'')
+
+ def test_subp_reads_env(self):
+ with mock.patch.dict("os.environ", values={'FOO': 'BAR'}):
+ out, _err = subp.subp(self.printenv + ['FOO'], capture=True)
+ self.assertEqual('FOO=BAR', out.splitlines()[0])
+
+ def test_subp_env_and_update_env(self):
+ out, _err = subp.subp(
+ self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True,
+ env={'FOO': 'BAR'},
+ update_env={'HOME': '/myhome', 'K2': 'V2'})
+ self.assertEqual(
+ ['FOO=BAR', 'HOME=/myhome', 'K1=', 'K2=V2'], out.splitlines())
+
+ def test_subp_update_env(self):
+ extra = {'FOO': 'BAR', 'HOME': '/root', 'K1': 'V1'}
+ with mock.patch.dict("os.environ", values=extra):
+ out, _err = subp.subp(
+ self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True,
+ update_env={'HOME': '/myhome', 'K2': 'V2'})
+
+ self.assertEqual(
+ ['FOO=BAR', 'HOME=/myhome', 'K1=V1', 'K2=V2'], out.splitlines())
+
+ def test_subp_warn_missing_shebang(self):
+ """Warn on no #! in script"""
+ noshebang = self.tmp_path('noshebang')
+ util.write_file(noshebang, 'true\n')
+
+ print("os is %s" % os)
+ os.chmod(noshebang, os.stat(noshebang).st_mode | stat.S_IEXEC)
+ with self.allow_subp([noshebang]):
+ self.assertRaisesRegex(subp.ProcessExecutionError,
+ r'Missing #! in script\?',
+ subp.subp, (noshebang,))
+
+ def test_subp_combined_stderr_stdout(self):
+ """Providing combine_capture as True redirects stderr to stdout."""
+ data = b'hello world'
+ (out, err) = subp.subp(self.stdin2err, capture=True,
+ combine_capture=True, decode=False, data=data)
+ self.assertEqual(b'', err)
+ self.assertEqual(data, out)
+
+ def test_returns_none_if_no_capture(self):
+ (out, err) = subp.subp(self.stdin2out, data=b'', capture=False)
+ self.assertIsNone(err)
+ self.assertIsNone(out)
+
+ def test_exception_has_out_err_are_bytes_if_decode_false(self):
+ """Raised exc should have stderr, stdout as bytes if no decode."""
+ with self.assertRaises(subp.ProcessExecutionError) as cm:
+ subp.subp([BOGUS_COMMAND], decode=False)
+ self.assertTrue(isinstance(cm.exception.stdout, bytes))
+ self.assertTrue(isinstance(cm.exception.stderr, bytes))
+
+ def test_exception_has_out_err_are_bytes_if_decode_true(self):
+ """Raised exc should have stderr, stdout as string if no decode."""
+ with self.assertRaises(subp.ProcessExecutionError) as cm:
+ subp.subp([BOGUS_COMMAND], decode=True)
+ self.assertTrue(isinstance(cm.exception.stdout, str))
+ self.assertTrue(isinstance(cm.exception.stderr, str))
+
+ def test_bunch_of_slashes_in_path(self):
+ self.assertEqual("/target/my/path/",
+ subp.target_path("/target/", "//my/path/"))
+ self.assertEqual("/target/my/path/",
+ subp.target_path("/target/", "///my/path/"))
+
+ def test_c_lang_can_take_utf8_args(self):
+ """Independent of system LC_CTYPE, args can contain utf-8 strings.
+
+ When python starts up, its default encoding gets set based on
+ the value of LC_CTYPE. If no system locale is set, the default
+ encoding for both python2 and python3 in some paths will end up
+ being ascii.
+
+ Attempts to use setlocale or patching (or changing) os.environ
+ in the current environment seem to not be effective.
+
+ This test starts up a python with LC_CTYPE set to C so that
+ the default encoding will be set to ascii. In such an environment
+ Popen(['command', 'non-ascii-arg']) would cause a UnicodeDecodeError.
+ """
+ python_prog = '\n'.join([
+ 'import json, sys',
+ 'from cloudinit.subp import subp',
+ 'data = sys.stdin.read()',
+ 'cmd = json.loads(data)',
+ 'subp(cmd, capture=False)',
+ ''])
+ cmd = [BASH, '-c', 'echo -n "$@"', '--',
+ self.utf8_valid.decode("utf-8")]
+ python_subp = [sys.executable, '-c', python_prog]
+
+ out, _err = subp.subp(
+ python_subp, update_env={'LC_CTYPE': 'C'},
+ data=json.dumps(cmd).encode("utf-8"),
+ decode=False)
+ self.assertEqual(self.utf8_valid, out)
+
+ def test_bogus_command_logs_status_messages(self):
+ """status_cb gets status messages logs on bogus commands provided."""
+ logs = []
+
+ def status_cb(log):
+ logs.append(log)
+
+ with self.assertRaises(subp.ProcessExecutionError):
+ subp.subp([BOGUS_COMMAND], status_cb=status_cb)
+
+ expected = [
+ 'Begin run command: {cmd}\n'.format(cmd=BOGUS_COMMAND),
+ 'ERROR: End run command: invalid command provided\n']
+ self.assertEqual(expected, logs)
+
+ def test_command_logs_exit_codes_to_status_cb(self):
+ """status_cb gets status messages containing command exit code."""
+ logs = []
+
+ def status_cb(log):
+ logs.append(log)
+
+ with self.assertRaises(subp.ProcessExecutionError):
+ subp.subp([BASH, '-c', 'exit 2'], status_cb=status_cb)
+ subp.subp([BASH, '-c', 'exit 0'], status_cb=status_cb)
+
+ expected = [
+ 'Begin run command: %s -c exit 2\n' % BASH,
+ 'ERROR: End run command: exit(2)\n',
+ 'Begin run command: %s -c exit 0\n' % BASH,
+ 'End run command: exit(0)\n']
+ self.assertEqual(expected, logs)
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py
index 1674120f..364ec822 100644
--- a/cloudinit/tests/test_url_helper.py
+++ b/cloudinit/tests/test_url_helper.py
@@ -1,7 +1,8 @@
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit.url_helper import (
- NOT_FOUND, UrlError, oauth_headers, read_file_or_url, retry_on_url_exc)
+ NOT_FOUND, UrlError, REDACTED, oauth_headers, read_file_or_url,
+ retry_on_url_exc)
from cloudinit.tests.helpers import CiTestCase, mock, skipIf
from cloudinit import util
from cloudinit import version
@@ -50,6 +51,9 @@ class TestOAuthHeaders(CiTestCase):
class TestReadFileOrUrl(CiTestCase):
+
+ with_logs = True
+
def test_read_file_or_url_str_from_file(self):
"""Test that str(result.contents) on file is text version of contents.
It should not be "b'data'", but just "'data'" """
@@ -71,6 +75,34 @@ class TestReadFileOrUrl(CiTestCase):
self.assertEqual(result.contents, data)
self.assertEqual(str(result), data.decode('utf-8'))
+ @httpretty.activate
+ def test_read_file_or_url_str_from_url_redacting_headers_from_logs(self):
+ """Headers are redacted from logs but unredacted in requests."""
+ url = 'http://hostname/path'
+ headers = {'sensitive': 'sekret', 'server': 'blah'}
+ httpretty.register_uri(httpretty.GET, url)
+
+ read_file_or_url(url, headers=headers, headers_redact=['sensitive'])
+ logs = self.logs.getvalue()
+ for k in headers.keys():
+ self.assertEqual(headers[k], httpretty.last_request().headers[k])
+ self.assertIn(REDACTED, logs)
+ self.assertNotIn('sekret', logs)
+
+ @httpretty.activate
+ def test_read_file_or_url_str_from_url_redacts_noheaders(self):
+ """When no headers_redact, header values are in logs and requests."""
+ url = 'http://hostname/path'
+ headers = {'sensitive': 'sekret', 'server': 'blah'}
+ httpretty.register_uri(httpretty.GET, url)
+
+ read_file_or_url(url, headers=headers)
+ for k in headers.keys():
+ self.assertEqual(headers[k], httpretty.last_request().headers[k])
+ logs = self.logs.getvalue()
+ self.assertNotIn(REDACTED, logs)
+ self.assertIn('sekret', logs)
+
@mock.patch(M_PATH + 'readurl')
def test_read_file_or_url_passes_params_to_readurl(self, m_readurl):
"""read_file_or_url passes all params through to readurl."""
diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py
index 11f37000..096a3037 100644
--- a/cloudinit/tests/test_util.py
+++ b/cloudinit/tests/test_util.py
@@ -6,8 +6,10 @@ import base64
import logging
import json
import platform
+import pytest
import cloudinit.util as util
+from cloudinit import subp
from cloudinit.tests.helpers import CiTestCase, mock
from textwrap import dedent
@@ -331,7 +333,7 @@ class TestBlkid(CiTestCase):
"PARTUUID": self.ids["id09"]},
})
- @mock.patch("cloudinit.util.subp")
+ @mock.patch("cloudinit.subp.subp")
def test_functional_blkid(self, m_subp):
m_subp.return_value = (
self.blkid_out.format(**self.ids), "")
@@ -339,7 +341,7 @@ class TestBlkid(CiTestCase):
m_subp.assert_called_with(["blkid", "-o", "full"], capture=True,
decode="replace")
- @mock.patch("cloudinit.util.subp")
+ @mock.patch("cloudinit.subp.subp")
def test_blkid_no_cache_uses_no_cache(self, m_subp):
"""blkid should turn off cache if disable_cache is true."""
m_subp.return_value = (
@@ -350,7 +352,7 @@ class TestBlkid(CiTestCase):
capture=True, decode="replace")
-@mock.patch('cloudinit.util.subp')
+@mock.patch('cloudinit.subp.subp')
class TestUdevadmSettle(CiTestCase):
def test_with_no_params(self, m_subp):
"""called with no parameters."""
@@ -395,8 +397,8 @@ class TestUdevadmSettle(CiTestCase):
'--timeout=%s' % timeout])
def test_subp_exception_raises_to_caller(self, m_subp):
- m_subp.side_effect = util.ProcessExecutionError("BOOM")
- self.assertRaises(util.ProcessExecutionError, util.udevadm_settle)
+ m_subp.side_effect = subp.ProcessExecutionError("BOOM")
+ self.assertRaises(subp.ProcessExecutionError, util.udevadm_settle)
@mock.patch('os.path.exists')
@@ -419,12 +421,6 @@ class TestGetLinuxDistro(CiTestCase):
if path == '/etc/redhat-release':
return 1
- @classmethod
- def freebsd_version_exists(self, path):
- """Side effect function """
- if path == '/bin/freebsd-version':
- return 1
-
@mock.patch('cloudinit.util.load_file')
def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists):
"""Verify we get the correct name if the os-release file has
@@ -443,11 +439,18 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(('ubuntu', '16.04', 'xenial'), dist)
- @mock.patch('cloudinit.util.subp')
- def test_get_linux_freebsd(self, m_subp, m_path_exists):
+ @mock.patch('platform.system')
+ @mock.patch('platform.release')
+ @mock.patch('cloudinit.util._parse_redhat_release')
+ def test_get_linux_freebsd(self, m_parse_redhat_release,
+ m_platform_release,
+ m_platform_system, m_path_exists):
"""Verify we get the correct name and release name on FreeBSD."""
- m_path_exists.side_effect = TestGetLinuxDistro.freebsd_version_exists
- m_subp.return_value = ("12.0-RELEASE-p10\n", '')
+ m_path_exists.return_value = False
+ m_platform_release.return_value = '12.0-RELEASE-p10'
+ m_platform_system.return_value = 'FreeBSD'
+ m_parse_redhat_release.return_value = {}
+ util.is_BSD.cache_clear()
dist = util.get_linux_distro()
self.assertEqual(('freebsd', '12.0-RELEASE-p10', ''), dist)
@@ -538,27 +541,36 @@ class TestGetLinuxDistro(CiTestCase):
self.assertEqual(
('opensuse-tumbleweed', '20180920', platform.machine()), dist)
+ @mock.patch('platform.system')
@mock.patch('platform.dist', create=True)
- def test_get_linux_distro_no_data(self, m_platform_dist, m_path_exists):
+ def test_get_linux_distro_no_data(self, m_platform_dist,
+ m_platform_system, m_path_exists):
"""Verify we get no information if os-release does not exist"""
m_platform_dist.return_value = ('', '', '')
+ m_platform_system.return_value = "Linux"
m_path_exists.return_value = 0
dist = util.get_linux_distro()
self.assertEqual(('', '', ''), dist)
+ @mock.patch('platform.system')
@mock.patch('platform.dist', create=True)
- def test_get_linux_distro_no_impl(self, m_platform_dist, m_path_exists):
+ def test_get_linux_distro_no_impl(self, m_platform_dist,
+ m_platform_system, m_path_exists):
"""Verify we get an empty tuple when no information exists and
Exceptions are not propagated"""
m_platform_dist.side_effect = Exception()
+ m_platform_system.return_value = "Linux"
m_path_exists.return_value = 0
dist = util.get_linux_distro()
self.assertEqual(('', '', ''), dist)
+ @mock.patch('platform.system')
@mock.patch('platform.dist', create=True)
- def test_get_linux_distro_plat_data(self, m_platform_dist, m_path_exists):
+ def test_get_linux_distro_plat_data(self, m_platform_dist,
+ m_platform_system, m_path_exists):
"""Verify we get the correct platform information"""
m_platform_dist.return_value = ('foo', '1.1', 'aarch64')
+ m_platform_system.return_value = "Linux"
m_path_exists.return_value = 0
dist = util.get_linux_distro()
self.assertEqual(('foo', '1.1', 'aarch64'), dist)
@@ -597,4 +609,166 @@ class TestIsLXD(CiTestCase):
self.assertFalse(util.is_lxd())
m_exists.assert_called_once_with('/dev/lxd/sock')
+
+class TestReadCcFromCmdline:
+
+ @pytest.mark.parametrize(
+ "cmdline,expected_cfg",
+ [
+ # Return None if cmdline has no cc:<YAML>end_cc content.
+ (CiTestCase.random_string(), None),
+ # Return None if YAML content is empty string.
+ ('foo cc: end_cc bar', None),
+ # Return expected dictionary without trailing end_cc marker.
+ ('foo cc: ssh_pwauth: true', {'ssh_pwauth': True}),
+ # Return expected dictionary w escaped newline and no end_cc.
+ ('foo cc: ssh_pwauth: true\\n', {'ssh_pwauth': True}),
+ # Return expected dictionary of yaml between cc: and end_cc.
+ ('foo cc: ssh_pwauth: true end_cc bar', {'ssh_pwauth': True}),
+ # Return dict with list value w escaped newline, no end_cc.
+ (
+ 'cc: ssh_import_id: [smoser, kirkland]\\n',
+ {'ssh_import_id': ['smoser', 'kirkland']}
+ ),
+ # Parse urlencoded brackets in yaml content.
+ (
+ 'cc: ssh_import_id: %5Bsmoser, kirkland%5D end_cc',
+ {'ssh_import_id': ['smoser', 'kirkland']}
+ ),
+ # Parse complete urlencoded yaml content.
+ (
+ 'cc: ssh_import_id%3A%20%5Buser1%2C%20user2%5D end_cc',
+ {'ssh_import_id': ['user1', 'user2']}
+ ),
+ # Parse nested dictionary in yaml content.
+ (
+ 'cc: ntp: {enabled: true, ntp_client: myclient} end_cc',
+ {'ntp': {'enabled': True, 'ntp_client': 'myclient'}}
+ ),
+ # Parse single mapping value in yaml content.
+ ('cc: ssh_import_id: smoser end_cc', {'ssh_import_id': 'smoser'}),
+ # Parse multiline content with multiple mapping and nested lists.
+ (
+ ('cc: ssh_import_id: [smoser, bob]\\n'
+ 'runcmd: [ [ ls, -l ], echo hi ] end_cc'),
+ {'ssh_import_id': ['smoser', 'bob'],
+ 'runcmd': [['ls', '-l'], 'echo hi']}
+ ),
+ # Parse multiline encoded content w/ mappings and nested lists.
+ (
+ ('cc: ssh_import_id: %5Bsmoser, bob%5D\\n'
+ 'runcmd: [ [ ls, -l ], echo hi ] end_cc'),
+ {'ssh_import_id': ['smoser', 'bob'],
+ 'runcmd': [['ls', '-l'], 'echo hi']}
+ ),
+ # test encoded escaped newlines work.
+ #
+ # unquote(encoded_content)
+ # 'ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ]'
+ (
+ ('cc: ' +
+ ('ssh_import_id%3A%20%5Bsmoser%2C%20bob%5D%5Cn'
+ 'runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%2C'
+ '%20echo%20hi%20%5D') + ' end_cc'),
+ {'ssh_import_id': ['smoser', 'bob'],
+ 'runcmd': [['ls', '-l'], 'echo hi']}
+ ),
+ # test encoded newlines work.
+ #
+ # unquote(encoded_content)
+ # 'ssh_import_id: [smoser, bob]\nruncmd: [ [ ls, -l ], echo hi ]'
+ (
+ ("cc: " +
+ ('ssh_import_id%3A%20%5Bsmoser%2C%20bob%5D%0A'
+ 'runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%2C'
+ '%20echo%20hi%20%5D') + ' end_cc'),
+ {'ssh_import_id': ['smoser', 'bob'],
+ 'runcmd': [['ls', '-l'], 'echo hi']}
+ ),
+ # Parse and merge multiple yaml content sections.
+ (
+ ('cc:ssh_import_id: [smoser, bob] end_cc '
+ 'cc: runcmd: [ [ ls, -l ] ] end_cc'),
+ {'ssh_import_id': ['smoser', 'bob'],
+ 'runcmd': [['ls', '-l']]}
+ ),
+ # Parse and merge multiple encoded yaml content sections.
+ (
+ ('cc:ssh_import_id%3A%20%5Bsmoser%5D end_cc '
+ 'cc:runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%20%5D end_cc'),
+ {'ssh_import_id': ['smoser'], 'runcmd': [['ls', '-l']]}
+ ),
+ ]
+ )
+ def test_read_conf_from_cmdline_config(self, expected_cfg, cmdline):
+ assert expected_cfg == util.read_conf_from_cmdline(cmdline=cmdline)
+
+
+class TestMountCb:
+ """Tests for ``util.mount_cb``.
+
+ These tests consider the "unit" under test to be ``util.mount_cb`` and
+ ``util.unmounter``, which is only used by ``mount_cb``.
+
+ TODO: Test default mtype determination
+ TODO: Test the if/else branch that actually performs the mounting operation
+ """
+
+ @pytest.yield_fixture
+ def already_mounted_device_and_mountdict(self):
+ """Mock an already-mounted device, and yield (device, mount dict)"""
+ device = "/dev/fake0"
+ mountpoint = "/mnt/fake"
+ with mock.patch("cloudinit.util.subp.subp"):
+ with mock.patch("cloudinit.util.mounts") as m_mounts:
+ mounts = {device: {"mountpoint": mountpoint}}
+ m_mounts.return_value = mounts
+ yield device, mounts[device]
+
+ @pytest.fixture
+ def already_mounted_device(self, already_mounted_device_and_mountdict):
+ """already_mounted_device_and_mountdict, but return only the device"""
+ return already_mounted_device_and_mountdict[0]
+
+ @pytest.mark.parametrize("invalid_mtype", [int(0), float(0.0), dict()])
+ def test_typeerror_raised_for_invalid_mtype(self, invalid_mtype):
+ with pytest.raises(TypeError):
+ util.mount_cb(mock.Mock(), mock.Mock(), mtype=invalid_mtype)
+
+ @mock.patch("cloudinit.util.subp.subp")
+ def test_already_mounted_does_not_mount_or_umount_anything(
+ self, m_subp, already_mounted_device
+ ):
+ util.mount_cb(already_mounted_device, mock.Mock())
+
+ assert 0 == m_subp.call_count
+
+ @pytest.mark.parametrize("trailing_slash_in_mounts", ["/", ""])
+ def test_already_mounted_calls_callback(
+ self, trailing_slash_in_mounts, already_mounted_device_and_mountdict
+ ):
+ device, mount_dict = already_mounted_device_and_mountdict
+ mountpoint = mount_dict["mountpoint"]
+ mount_dict["mountpoint"] += trailing_slash_in_mounts
+
+ callback = mock.Mock()
+ util.mount_cb(device, callback)
+
+ # The mountpoint passed to callback should always have a trailing
+ # slash, regardless of the input
+ assert [mock.call(mountpoint + "/")] == callback.call_args_list
+
+ def test_already_mounted_calls_callback_with_data(
+ self, already_mounted_device
+ ):
+ callback = mock.Mock()
+ util.mount_cb(
+ already_mounted_device, callback, data=mock.sentinel.data
+ )
+
+ assert [
+ mock.call(mock.ANY, mock.sentinel.data)
+ ] == callback.call_args_list
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index eeb27aa8..caa88435 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -95,7 +95,7 @@ def read_file_or_url(url, **kwargs):
code = e.errno
if e.errno == ENOENT:
code = NOT_FOUND
- raise UrlError(cause=e, code=code, headers=None, url=url)
+ raise UrlError(cause=e, code=code, headers=None, url=url) from e
return FileResponse(file_path, contents=contents)
else:
return readurl(url, **kwargs)
@@ -281,13 +281,14 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
for (k, v) in req_args.items():
if k == 'data':
continue
- filtered_req_args[k] = v
- if k == 'headers':
- for hkey, _hval in v.items():
- if hkey in headers_redact:
- filtered_req_args[k][hkey] = (
- copy.deepcopy(req_args[k][hkey]))
- filtered_req_args[k][hkey] = REDACTED
+ if k == 'headers' and headers_redact:
+ matched_headers = [k for k in headers_redact if v.get(k)]
+ if matched_headers:
+ filtered_req_args[k] = copy.deepcopy(v)
+ for key in matched_headers:
+ filtered_req_args[k][key] = REDACTED
+ else:
+ filtered_req_args[k] = v
try:
if log_req_resp:
@@ -574,8 +575,8 @@ def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret,
timestamp=None):
try:
import oauthlib.oauth1 as oauth1
- except ImportError:
- raise NotImplementedError('oauth support is not available')
+ except ImportError as e:
+ raise NotImplementedError('oauth support is not available') from e
if timestamp:
timestamp = str(timestamp)
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index 6f41b03a..f234b962 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -16,6 +16,7 @@ from email.mime.text import MIMEText
from cloudinit import handlers
from cloudinit import log as logging
+from cloudinit import features
from cloudinit.url_helper import read_file_or_url, UrlError
from cloudinit import util
@@ -25,6 +26,7 @@ LOG = logging.getLogger(__name__)
NOT_MULTIPART_TYPE = handlers.NOT_MULTIPART_TYPE
PART_FN_TPL = handlers.PART_FN_TPL
OCTET_TYPE = handlers.OCTET_TYPE
+INCLUDE_MAP = handlers.INCLUSION_TYPES_MAP
# Saves typing errors
CONTENT_TYPE = 'Content-Type'
@@ -68,6 +70,13 @@ def _set_filename(msg, filename):
'attachment', filename=str(filename))
+def _handle_error(error_message, source_exception=None):
+ if features.ERROR_ON_USER_DATA_FAILURE:
+ raise Exception(error_message) from source_exception
+ else:
+ LOG.warning(error_message)
+
+
class UserDataProcessor(object):
def __init__(self, paths):
self.paths = paths
@@ -107,15 +116,22 @@ class UserDataProcessor(object):
ctype_orig = None
was_compressed = True
except util.DecompressionError as e:
- LOG.warning("Failed decompressing payload from %s of"
- " length %s due to: %s",
- ctype_orig, len(payload), e)
+ error_message = (
+ "Failed decompressing payload from {} of"
+ " length {} due to: {}".format(
+ ctype_orig, len(payload), e))
+ _handle_error(error_message, e)
continue
# Attempt to figure out the payloads content-type
if not ctype_orig:
ctype_orig = UNDEF_TYPE
- if ctype_orig in TYPE_NEEDED:
+ # There are known cases where mime-type text/x-shellscript included
+ # non shell-script content that was user-data instead. It is safe
+ # to check the true MIME type for x-shellscript type since all
+ # shellscript payloads must have a #! header. The other MIME types
+ # that cloud-init supports do not have the same guarantee.
+ if ctype_orig in TYPE_NEEDED + ['text/x-shellscript']:
ctype = find_ctype(payload)
if ctype is None:
ctype = ctype_orig
@@ -229,19 +245,22 @@ class UserDataProcessor(object):
if resp.ok():
content = resp.contents
else:
- LOG.warning(("Fetching from %s resulted in"
- " a invalid http code of %s"),
- include_url, resp.code)
+ error_message = (
+ "Fetching from {} resulted in"
+ " a invalid http code of {}".format(
+ include_url, resp.code))
+ _handle_error(error_message)
except UrlError as urle:
message = str(urle)
# Older versions of requests.exceptions.HTTPError may not
# include the errant url. Append it for clarity in logs.
if include_url not in message:
message += ' for url: {0}'.format(include_url)
- LOG.warning(message)
+ _handle_error(message, urle)
except IOError as ioe:
- LOG.warning("Fetching from %s resulted in %s",
- include_url, ioe)
+ error_message = "Fetching from {} resulted in {}".format(
+ include_url, ioe)
+ _handle_error(error_message, ioe)
if content is not None:
new_msg = convert_string(content)
diff --git a/cloudinit/util.py b/cloudinit/util.py
index c02b3d9a..cf9e349f 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -15,6 +15,7 @@ import glob
import grp
import gzip
import hashlib
+import io
import json
import os
import os.path
@@ -30,34 +31,23 @@ import string
import subprocess
import sys
import time
-
-from errno import ENOENT, ENOEXEC
-
from base64 import b64decode, b64encode
-from six.moves.urllib import parse as urlparse
-
-import six
+from errno import ENOENT
+from functools import lru_cache
+from urllib import parse
from cloudinit import importer
from cloudinit import log as logging
-from cloudinit import mergers
-from cloudinit import safeyaml
-from cloudinit import temp_utils
-from cloudinit import type_utils
-from cloudinit import url_helper
-from cloudinit import version
-
-from cloudinit.settings import (CFG_BUILTIN)
-
-try:
- from functools import lru_cache
-except ImportError:
- def lru_cache():
- """pass-thru replace for Python3's lru_cache()"""
- def wrapper(f):
- return f
- return wrapper
-
+from cloudinit import subp
+from cloudinit import (
+ mergers,
+ safeyaml,
+ temp_utils,
+ type_utils,
+ url_helper,
+ version,
+)
+from cloudinit.settings import CFG_BUILTIN
_DNS_REDIRECT_IP = None
LOG = logging.getLogger(__name__)
@@ -78,6 +68,10 @@ CONTAINER_TESTS = (['systemd-detect-virt', '--quiet', '--container'],
['lxc-is-container'])
+def kernel_version():
+ return tuple(map(int, os.uname().release.split('.')[:2]))
+
+
@lru_cache()
def get_dpkg_architecture(target=None):
"""Return the sanitized string output by `dpkg --print-architecture`.
@@ -85,8 +79,8 @@ def get_dpkg_architecture(target=None):
N.B. This function is wrapped in functools.lru_cache, so repeated calls
won't shell out every time.
"""
- out, _ = subp(['dpkg', '--print-architecture'], capture=True,
- target=target)
+ out, _ = subp.subp(['dpkg', '--print-architecture'], capture=True,
+ target=target)
return out.strip()
@@ -97,7 +91,8 @@ def lsb_release(target=None):
data = {}
try:
- out, _ = subp(['lsb_release', '--all'], capture=True, target=target)
+ out, _ = subp.subp(['lsb_release', '--all'], capture=True,
+ target=target)
for line in out.splitlines():
fname, _, val = line.partition(":")
if fname in fmap:
@@ -107,45 +102,23 @@ def lsb_release(target=None):
LOG.warning("Missing fields in lsb_release --all output: %s",
','.join(missing))
- except ProcessExecutionError as err:
+ except subp.ProcessExecutionError as err:
LOG.warning("Unable to get lsb_release --all: %s", err)
data = dict((v, "UNAVAILABLE") for v in fmap.values())
return data
-def target_path(target, path=None):
- # return 'path' inside target, accepting target as None
- if target in (None, ""):
- target = "/"
- elif not isinstance(target, six.string_types):
- raise ValueError("Unexpected input for target: %s" % target)
- else:
- target = os.path.abspath(target)
- # abspath("//") returns "//" specifically for 2 slashes.
- if target.startswith("//"):
- target = target[1:]
-
- if not path:
- return target
-
- # os.path.join("/etc", "/foo") returns "/foo". Chomp all leading /.
- while len(path) and path[0] == "/":
- path = path[1:]
-
- return os.path.join(target, path)
-
-
def decode_binary(blob, encoding='utf-8'):
# Converts a binary type into a text type using given encoding.
- if isinstance(blob, six.string_types):
+ if isinstance(blob, str):
return blob
return blob.decode(encoding)
def encode_text(text, encoding='utf-8'):
# Converts a text string into a binary type using given encoding.
- if isinstance(text, six.binary_type):
+ if isinstance(text, bytes):
return text
return text.encode(encoding)
@@ -175,8 +148,7 @@ def fully_decoded_payload(part):
# bytes, first try to decode to str via CT charset, and failing that, try
# utf-8 using surrogate escapes.
cte_payload = part.get_payload(decode=True)
- if (six.PY3 and
- part.get_content_maintype() == 'text' and
+ if (part.get_content_maintype() == 'text' and
isinstance(cte_payload, bytes)):
charset = part.get_charset()
if charset and charset.input_codec:
@@ -213,91 +185,6 @@ DMIDECODE_TO_DMI_SYS_MAPPING = {
}
-class ProcessExecutionError(IOError):
-
- MESSAGE_TMPL = ('%(description)s\n'
- 'Command: %(cmd)s\n'
- 'Exit code: %(exit_code)s\n'
- 'Reason: %(reason)s\n'
- 'Stdout: %(stdout)s\n'
- 'Stderr: %(stderr)s')
- empty_attr = '-'
-
- def __init__(self, stdout=None, stderr=None,
- exit_code=None, cmd=None,
- description=None, reason=None,
- errno=None):
- if not cmd:
- self.cmd = self.empty_attr
- else:
- self.cmd = cmd
-
- if not description:
- if not exit_code and errno == ENOEXEC:
- self.description = 'Exec format error. Missing #! in script?'
- else:
- self.description = 'Unexpected error while running command.'
- else:
- self.description = description
-
- if not isinstance(exit_code, six.integer_types):
- self.exit_code = self.empty_attr
- else:
- self.exit_code = exit_code
-
- if not stderr:
- if stderr is None:
- self.stderr = self.empty_attr
- else:
- self.stderr = stderr
- else:
- self.stderr = self._indent_text(stderr)
-
- if not stdout:
- if stdout is None:
- self.stdout = self.empty_attr
- else:
- self.stdout = stdout
- else:
- self.stdout = self._indent_text(stdout)
-
- if reason:
- self.reason = reason
- else:
- self.reason = self.empty_attr
-
- self.errno = errno
- message = self.MESSAGE_TMPL % {
- 'description': self._ensure_string(self.description),
- 'cmd': self._ensure_string(self.cmd),
- 'exit_code': self._ensure_string(self.exit_code),
- 'stdout': self._ensure_string(self.stdout),
- 'stderr': self._ensure_string(self.stderr),
- 'reason': self._ensure_string(self.reason),
- }
- IOError.__init__(self, message)
-
- def _ensure_string(self, text):
- """
- if data is bytes object, decode
- """
- return text.decode() if isinstance(text, six.binary_type) else text
-
- def _indent_text(self, text, indent_level=8):
- """
- indent text on all but the first line, allowing for easy to read output
- """
- cr = '\n'
- indent = ' ' * indent_level
- # if input is bytes, return bytes
- if isinstance(text, six.binary_type):
- cr = cr.encode()
- indent = indent.encode()
- # remove any newlines at end of text first to prevent unneeded blank
- # line in output
- return text.rstrip(cr).replace(cr, cr + indent)
-
-
class SeLinuxGuard(object):
def __init__(self, path, recursive=False):
# Late import since it might not always
@@ -322,9 +209,6 @@ class SeLinuxGuard(object):
return
path = os.path.realpath(self.path)
- # path should be a string, not unicode
- if six.PY2:
- path = str(path)
try:
stats = os.lstat(path)
self.selinux.matchpathcon(path, stats[stat.ST_MODE])
@@ -369,7 +253,7 @@ def is_true(val, addons=None):
check_set = TRUE_STRINGS
if addons:
check_set = list(check_set) + addons
- if six.text_type(val).lower().strip() in check_set:
+ if str(val).lower().strip() in check_set:
return True
return False
@@ -380,7 +264,7 @@ def is_false(val, addons=None):
check_set = FALSE_STRINGS
if addons:
check_set = list(check_set) + addons
- if six.text_type(val).lower().strip() in check_set:
+ if str(val).lower().strip() in check_set:
return True
return False
@@ -441,7 +325,7 @@ def uniq_merge_sorted(*lists):
def uniq_merge(*lists):
combined_list = []
for a_list in lists:
- if isinstance(a_list, six.string_types):
+ if isinstance(a_list, str):
a_list = a_list.strip().split(",")
# Kickout the empty ones
a_list = [a for a in a_list if len(a)]
@@ -464,7 +348,7 @@ def clean_filename(fn):
def decomp_gzip(data, quiet=True, decode=True):
try:
- buf = six.BytesIO(encode_text(data))
+ buf = io.BytesIO(encode_text(data))
with contextlib.closing(gzip.GzipFile(None, "rb", 1, buf)) as gh:
# E1101 is https://github.com/PyCQA/pylint/issues/1444
if decode:
@@ -475,7 +359,7 @@ def decomp_gzip(data, quiet=True, decode=True):
if quiet:
return data
else:
- raise DecompressionError(six.text_type(e))
+ raise DecompressionError(str(e)) from e
def extract_usergroup(ug_pair):
@@ -533,18 +417,9 @@ def multi_log(text, console=True, stderr=True,
log.log(log_level, text)
-def is_ipv4(instr):
- """determine if input string is a ipv4 address. return boolean."""
- toks = instr.split('.')
- if len(toks) != 4:
- return False
-
- try:
- toks = [x for x in toks if 0 <= int(x) < 256]
- except Exception:
- return False
-
- return len(toks) == 4
+@lru_cache()
+def is_BSD():
+ return 'BSD' in platform.system()
@lru_cache()
@@ -552,6 +427,16 @@ def is_FreeBSD():
return system_info()['variant'] == "freebsd"
+@lru_cache()
+def is_NetBSD():
+ return system_info()['variant'] == "netbsd"
+
+
+@lru_cache()
+def is_OpenBSD():
+ return system_info()['variant'] == "openbsd"
+
+
def get_cfg_option_bool(yobj, key, default=False):
if key not in yobj:
return default
@@ -562,7 +447,7 @@ def get_cfg_option_str(yobj, key, default=None):
if key not in yobj:
return default
val = yobj[key]
- if not isinstance(val, six.string_types):
+ if not isinstance(val, str):
val = str(val)
return val
@@ -625,10 +510,9 @@ def get_linux_distro():
flavor = match.groupdict()['codename']
if distro_name == 'rhel':
distro_name = 'redhat'
- elif os.path.exists('/bin/freebsd-version'):
- distro_name = 'freebsd'
- distro_version, _ = subp(['uname', '-r'])
- distro_version = distro_version.strip()
+ elif is_BSD():
+ distro_name = platform.system().lower()
+ distro_version = platform.release()
else:
dist = ('', '', '')
try:
@@ -656,7 +540,7 @@ def system_info():
'system': platform.system(),
'release': platform.release(),
'python': platform.python_version(),
- 'uname': platform.uname(),
+ 'uname': list(platform.uname()),
'dist': get_linux_distro()
}
system = info['system'].lower()
@@ -664,18 +548,20 @@ def system_info():
if system == "linux":
linux_dist = info['dist'][0].lower()
if linux_dist in (
- 'arch', 'centos', 'debian', 'fedora', 'rhel', 'suse'):
+ 'alpine', 'arch', 'centos', 'debian', 'fedora', 'rhel',
+ 'suse'):
var = linux_dist
elif linux_dist in ('ubuntu', 'linuxmint', 'mint'):
var = 'ubuntu'
elif linux_dist == 'redhat':
var = 'rhel'
elif linux_dist in (
- 'opensuse', 'opensuse-tumbleweed', 'opensuse-leap', 'sles'):
+ 'opensuse', 'opensuse-tumbleweed', 'opensuse-leap',
+ 'sles', 'sle_hpc'):
var = 'suse'
else:
var = 'linux'
- elif system in ('windows', 'darwin', "freebsd"):
+ elif system in ('windows', 'darwin', "freebsd", "netbsd", "openbsd"):
var = system
info['variant'] = var
@@ -703,7 +589,7 @@ def get_cfg_option_list(yobj, key, default=None):
if isinstance(val, (list)):
cval = [v for v in val]
return cval
- if not isinstance(val, six.string_types):
+ if not isinstance(val, str):
val = str(val)
return [val]
@@ -724,7 +610,7 @@ def get_cfg_by_path(yobj, keyp, default=None):
@return: The value of the item at keyp."
is not found."""
- if isinstance(keyp, six.string_types):
+ if isinstance(keyp, str):
keyp = keyp.split("/")
cur = yobj
for tok in keyp:
@@ -822,7 +708,7 @@ def make_url(scheme, host, port=None,
pieces.append(query or '')
pieces.append(fragment or '')
- return urlparse.urlunparse(pieces)
+ return parse.urlunparse(pieces)
def mergemanydict(srcs, reverse=False):
@@ -869,37 +755,6 @@ def del_dir(path):
shutil.rmtree(path)
-def runparts(dirp, skip_no_exist=True, exe_prefix=None):
- if skip_no_exist and not os.path.isdir(dirp):
- return
-
- failed = []
- attempted = []
-
- if exe_prefix is None:
- prefix = []
- elif isinstance(exe_prefix, str):
- prefix = [str(exe_prefix)]
- elif isinstance(exe_prefix, list):
- prefix = exe_prefix
- else:
- raise TypeError("exe_prefix must be None, str, or list")
-
- for exe_name in sorted(os.listdir(dirp)):
- exe_path = os.path.join(dirp, exe_name)
- if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK):
- attempted.append(exe_path)
- try:
- subp(prefix + [exe_path], capture=False)
- except ProcessExecutionError as e:
- logexc(LOG, "Failed running %s [%s]", exe_path, e.exit_code)
- failed.append(e)
-
- if failed and attempted:
- raise RuntimeError('Runparts: %s failures in %s attempted commands'
- % (len(failed), len(attempted)))
-
-
# read_optional_seed
# returns boolean indicating success or failure (presense of files)
# if files are present, populates 'fill' dictionary with 'user-data' and
@@ -1031,7 +886,7 @@ def read_conf_with_confd(cfgfile):
if "conf_d" in cfg:
confd = cfg['conf_d']
if confd:
- if not isinstance(confd, six.string_types):
+ if not isinstance(confd, str):
raise TypeError(("Config file %s contains 'conf_d' "
"with non-string type %s") %
(cfgfile, type_utils.obj_name(confd)))
@@ -1049,7 +904,7 @@ def read_conf_with_confd(cfgfile):
def read_conf_from_cmdline(cmdline=None):
- # return a dictionary or config on the cmdline or None
+ # return a dictionary of config on the cmdline or None
return load_yaml(read_cc_from_cmdline(cmdline=cmdline))
@@ -1057,11 +912,12 @@ def read_cc_from_cmdline(cmdline=None):
# this should support reading cloud-config information from
# the kernel command line. It is intended to support content of the
# format:
- # cc: <yaml content here> [end_cc]
+ # cc: <yaml content here|urlencoded yaml content> [end_cc]
# this would include:
# cc: ssh_import_id: [smoser, kirkland]\\n
# cc: ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ] end_cc
# cc:ssh_import_id: [smoser] end_cc cc:runcmd: [ [ ls, -l ] ] end_cc
+ # cc:ssh_import_id: %5Bsmoser%5D end_cc
if cmdline is None:
cmdline = get_cmdline()
@@ -1076,9 +932,9 @@ def read_cc_from_cmdline(cmdline=None):
end = cmdline.find(tag_end, begin + begin_l)
if end < 0:
end = clen
- tokens.append(cmdline[begin + begin_l:end].lstrip().replace("\\n",
- "\n"))
-
+ tokens.append(
+ parse.unquote(
+ cmdline[begin + begin_l:end].lstrip()).replace("\\n", "\n"))
begin = cmdline.find(tag_begin, end + end_l)
return '\n'.join(tokens)
@@ -1223,7 +1079,7 @@ def is_resolvable_url(url):
"""determine if this url is resolvable (existing or ip)."""
return log_time(logfunc=LOG.debug, msg="Resolving URL: " + url,
func=is_resolvable,
- args=(urlparse.urlparse(url).hostname,))
+ args=(parse.urlparse(url).hostname,))
def search_for_mirror(candidates):
@@ -1231,9 +1087,14 @@ def search_for_mirror(candidates):
Search through a list of mirror urls for one that works
This needs to return quickly.
"""
+ if candidates is None:
+ return None
+
+ LOG.debug("search for mirror in candidates: '%s'", candidates)
for cand in candidates:
try:
if is_resolvable_url(cand):
+ LOG.debug("found working mirror: '%s'", cand)
return cand
except Exception:
pass
@@ -1254,6 +1115,68 @@ def close_stdin():
os.dup2(fp.fileno(), sys.stdin.fileno())
+def find_devs_with_freebsd(criteria=None, oformat='device',
+ tag=None, no_cache=False, path=None):
+ devlist = []
+ if not criteria:
+ return glob.glob("/dev/msdosfs/*") + glob.glob("/dev/iso9660/*")
+ if criteria.startswith("LABEL="):
+ label = criteria.lstrip("LABEL=")
+ devlist = [
+ p for p in ['/dev/msdosfs/' + label, '/dev/iso9660/' + label]
+ if os.path.exists(p)]
+ elif criteria == "TYPE=vfat":
+ devlist = glob.glob("/dev/msdosfs/*")
+ elif criteria == "TYPE=iso9660":
+ devlist = glob.glob("/dev/iso9660/*")
+ return devlist
+
+
+def find_devs_with_netbsd(criteria=None, oformat='device',
+ tag=None, no_cache=False, path=None):
+ devlist = []
+ label = None
+ _type = None
+ if criteria:
+ if criteria.startswith("LABEL="):
+ label = criteria.lstrip("LABEL=")
+ if criteria.startswith("TYPE="):
+ _type = criteria.lstrip("TYPE=")
+ out, _err = subp.subp(['sysctl', '-n', 'hw.disknames'], rcs=[0])
+ for dev in out.split():
+ if label or _type:
+ mscdlabel_out, _ = subp.subp(['mscdlabel', dev], rcs=[0, 1])
+ if label and not ('label "%s"' % label) in mscdlabel_out:
+ continue
+ if _type == "iso9660" and "ISO filesystem" not in mscdlabel_out:
+ continue
+ if _type == "vfat" and "ISO filesystem" in mscdlabel_out:
+ continue
+ devlist.append('/dev/' + dev)
+ return devlist
+
+
+def find_devs_with_openbsd(criteria=None, oformat='device',
+ tag=None, no_cache=False, path=None):
+ out, _err = subp.subp(['sysctl', '-n', 'hw.disknames'], rcs=[0])
+ devlist = []
+ for entry in out.split(','):
+ if not entry.endswith(':'):
+ # ffs partition with a serial, not a config-drive
+ continue
+ if entry == 'fd0:':
+ continue
+ part_id = 'a' if entry.startswith('cd') else 'i'
+ devlist.append(entry[:-1] + part_id)
+ if criteria == "TYPE=iso9660":
+ devlist = [i for i in devlist if i.startswith('cd')]
+ elif criteria in ["LABEL=CONFIG-2", "TYPE=vfat"]:
+ devlist = [i for i in devlist if not i.startswith('cd')]
+ elif criteria:
+ LOG.debug("Unexpected criteria: %s", criteria)
+ return ['/dev/' + i for i in devlist]
+
+
def find_devs_with(criteria=None, oformat='device',
tag=None, no_cache=False, path=None):
"""
@@ -1263,6 +1186,16 @@ def find_devs_with(criteria=None, oformat='device',
LABEL=<label>
UUID=<uuid>
"""
+ if is_FreeBSD():
+ return find_devs_with_freebsd(criteria, oformat,
+ tag, no_cache, path)
+ elif is_NetBSD():
+ return find_devs_with_netbsd(criteria, oformat,
+ tag, no_cache, path)
+ elif is_OpenBSD():
+ return find_devs_with_openbsd(criteria, oformat,
+ tag, no_cache, path)
+
blk_id_cmd = ['blkid']
options = []
if criteria:
@@ -1291,8 +1224,8 @@ def find_devs_with(criteria=None, oformat='device',
cmd = blk_id_cmd + options
# See man blkid for why 2 is added
try:
- (out, _err) = subp(cmd, rcs=[0, 2])
- except ProcessExecutionError as e:
+ (out, _err) = subp.subp(cmd, rcs=[0, 2])
+ except subp.ProcessExecutionError as e:
if e.errno == ENOENT:
# blkid not found...
out = ""
@@ -1327,7 +1260,7 @@ def blkid(devs=None, disable_cache=False):
# we have to decode with 'replace' as shelx.split (called by
# load_shell_content) can't take bytes. So this is potentially
# lossy of non-utf-8 chars in blkid output.
- out, _ = subp(cmd, capture=True, decode="replace")
+ out, _ = subp.subp(cmd, capture=True, decode="replace")
ret = {}
for line in out.splitlines():
dev, _, data = line.partition(":")
@@ -1355,7 +1288,7 @@ def uniq_list(in_list):
def load_file(fname, read_cb=None, quiet=False, decode=True):
LOG.debug("Reading from %s (quiet=%s)", fname, quiet)
- ofh = six.BytesIO()
+ ofh = io.BytesIO()
try:
with open(fname, 'rb') as ifh:
pipe_in_out(ifh, ofh, chunk_cb=read_cb)
@@ -1430,7 +1363,7 @@ def chownbyname(fname, user=None, group=None):
if group:
gid = grp.getgrnam(group).gr_gid
except KeyError as e:
- raise OSError("Unknown user or group: %s" % (e))
+ raise OSError("Unknown user or group: %s" % (e)) from e
chownbyid(fname, uid, gid)
@@ -1647,7 +1580,7 @@ def unmounter(umount):
finally:
if umount:
umount_cmd = ["umount", umount]
- subp(umount_cmd)
+ subp.subp(umount_cmd)
def mounts():
@@ -1658,7 +1591,7 @@ def mounts():
mount_locs = load_file("/proc/mounts").splitlines()
method = 'proc'
else:
- (mountoutput, _err) = subp("mount")
+ (mountoutput, _err) = subp.subp("mount")
mount_locs = mountoutput.splitlines()
method = 'mount'
mountre = r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$'
@@ -1742,7 +1675,7 @@ def mount_cb(device, callback, data=None, mtype=None,
mountcmd.extend(['-t', mtype])
mountcmd.append(device)
mountcmd.append(tmpd)
- subp(mountcmd, update_env=update_env_for_mount)
+ subp.subp(mountcmd, update_env=update_env_for_mount)
umount = tmpd # This forces it to be unmounted (when set)
mountpoint = tmpd
break
@@ -1804,6 +1737,7 @@ def time_rfc2822():
return ts
+@lru_cache()
def boottime():
"""Use sysctlbyname(3) via ctypes to find kern.boottime
@@ -1813,6 +1747,7 @@ def boottime():
@return boottime: float to be compatible with linux
"""
import ctypes
+ import ctypes.util
NULL_BYTES = b"\x00"
@@ -1821,7 +1756,7 @@ def boottime():
("tv_sec", ctypes.c_int64),
("tv_usec", ctypes.c_int64)
]
- libc = ctypes.CDLL('/lib/libc.so.7')
+ libc = ctypes.CDLL(ctypes.util.find_library('c'))
size = ctypes.c_size_t()
size.value = ctypes.sizeof(timeval)
buf = timeval()
@@ -1872,7 +1807,15 @@ def chmod(path, mode):
os.chmod(path, real_mode)
-def write_file(filename, content, mode=0o644, omode="wb", copy_mode=False):
+def write_file(
+ filename,
+ content,
+ mode=0o644,
+ omode="wb",
+ preserve_mode=False,
+ *,
+ ensure_dir_exists=True
+):
"""
Writes a file with the given content and sets the file mode as specified.
Restores the SELinux context if possible.
@@ -1881,16 +1824,22 @@ def write_file(filename, content, mode=0o644, omode="wb", copy_mode=False):
@param content: The content to write to the file.
@param mode: The filesystem mode to set on the file.
@param omode: The open mode used when opening the file (w, wb, a, etc.)
+ @param preserve_mode: If True and `filename` exists, preserve `filename`s
+ current mode instead of applying `mode`.
+ @param ensure_dir_exists: If True (the default), ensure that the directory
+ containing `filename` exists before writing to
+ the file.
"""
- if copy_mode:
+ if preserve_mode:
try:
file_stat = os.stat(filename)
mode = stat.S_IMODE(file_stat.st_mode)
except OSError:
pass
- ensure_dir(os.path.dirname(filename))
+ if ensure_dir_exists:
+ ensure_dir(os.path.dirname(filename))
if 'b' in omode.lower():
content = encode_text(content)
write_type = 'bytes'
@@ -1924,185 +1873,6 @@ def delete_dir_contents(dirname):
del_file(node_fullpath)
-def subp_blob_in_tempfile(blob, *args, **kwargs):
- """Write blob to a tempfile, and call subp with args, kwargs. Then cleanup.
-
- 'basename' as a kwarg allows providing the basename for the file.
- The 'args' argument to subp will be updated with the full path to the
- filename as the first argument.
- """
- basename = kwargs.pop('basename', "subp_blob")
-
- if len(args) == 0 and 'args' not in kwargs:
- args = [tuple()]
-
- # Use tmpdir over tmpfile to avoid 'text file busy' on execute
- with temp_utils.tempdir(needs_exe=True) as tmpd:
- tmpf = os.path.join(tmpd, basename)
- if 'args' in kwargs:
- kwargs['args'] = [tmpf] + list(kwargs['args'])
- else:
- args = list(args)
- args[0] = [tmpf] + args[0]
-
- write_file(tmpf, blob, mode=0o700)
- return subp(*args, **kwargs)
-
-
-def subp(args, data=None, rcs=None, env=None, capture=True,
- combine_capture=False, shell=False,
- logstring=False, decode="replace", target=None, update_env=None,
- status_cb=None):
- """Run a subprocess.
-
- :param args: command to run in a list. [cmd, arg1, arg2...]
- :param data: input to the command, made available on its stdin.
- :param rcs:
- a list of allowed return codes. If subprocess exits with a value not
- in this list, a ProcessExecutionError will be raised. By default,
- data is returned as a string. See 'decode' parameter.
- :param env: a dictionary for the command's environment.
- :param capture:
- boolean indicating if output should be captured. If True, then stderr
- and stdout will be returned. If False, they will not be redirected.
- :param combine_capture:
- boolean indicating if stderr should be redirected to stdout. When True,
- interleaved stderr and stdout will be returned as the first element of
- a tuple, the second will be empty string or bytes (per decode).
- if combine_capture is True, then output is captured independent of
- the value of capture.
- :param shell: boolean indicating if this should be run with a shell.
- :param logstring:
- the command will be logged to DEBUG. If it contains info that should
- not be logged, then logstring will be logged instead.
- :param decode:
- if False, no decoding will be done and returned stdout and stderr will
- be bytes. Other allowed values are 'strict', 'ignore', and 'replace'.
- These values are passed through to bytes().decode() as the 'errors'
- parameter. There is no support for decoding to other than utf-8.
- :param target:
- not supported, kwarg present only to make function signature similar
- to curtin's subp.
- :param update_env:
- update the enviornment for this command with this dictionary.
- this will not affect the current processes os.environ.
- :param status_cb:
- call this fuction with a single string argument before starting
- and after finishing.
-
- :return
- if not capturing, return is (None, None)
- if capturing, stdout and stderr are returned.
- if decode:
- entries in tuple will be python2 unicode or python3 string
- if not decode:
- entries in tuple will be python2 string or python3 bytes
- """
-
- # not supported in cloud-init (yet), for now kept in the call signature
- # to ease maintaining code shared between cloud-init and curtin
- if target is not None:
- raise ValueError("target arg not supported by cloud-init")
-
- if rcs is None:
- rcs = [0]
-
- devnull_fp = None
-
- if update_env:
- if env is None:
- env = os.environ
- env = env.copy()
- env.update(update_env)
-
- if target_path(target) != "/":
- args = ['chroot', target] + list(args)
-
- if status_cb:
- command = ' '.join(args) if isinstance(args, list) else args
- status_cb('Begin run command: {command}\n'.format(command=command))
- if not logstring:
- LOG.debug(("Running command %s with allowed return codes %s"
- " (shell=%s, capture=%s)"),
- args, rcs, shell, 'combine' if combine_capture else capture)
- else:
- LOG.debug(("Running hidden command to protect sensitive "
- "input/output logstring: %s"), logstring)
-
- stdin = None
- stdout = None
- stderr = None
- if capture:
- stdout = subprocess.PIPE
- stderr = subprocess.PIPE
- if combine_capture:
- stdout = subprocess.PIPE
- stderr = subprocess.STDOUT
- if data is None:
- # using devnull assures any reads get null, rather
- # than possibly waiting on input.
- devnull_fp = open(os.devnull)
- stdin = devnull_fp
- else:
- stdin = subprocess.PIPE
- if not isinstance(data, bytes):
- data = data.encode()
-
- # Popen converts entries in the arguments array from non-bytes to bytes.
- # When locale is unset it may use ascii for that encoding which can
- # cause UnicodeDecodeErrors. (LP: #1751051)
- if isinstance(args, six.binary_type):
- bytes_args = args
- elif isinstance(args, six.string_types):
- bytes_args = args.encode("utf-8")
- else:
- bytes_args = [
- x if isinstance(x, six.binary_type) else x.encode("utf-8")
- for x in args]
- try:
- sp = subprocess.Popen(bytes_args, stdout=stdout,
- stderr=stderr, stdin=stdin,
- env=env, shell=shell)
- (out, err) = sp.communicate(data)
- except OSError as e:
- if status_cb:
- status_cb('ERROR: End run command: invalid command provided\n')
- raise ProcessExecutionError(
- cmd=args, reason=e, errno=e.errno,
- stdout="-" if decode else b"-",
- stderr="-" if decode else b"-")
- finally:
- if devnull_fp:
- devnull_fp.close()
-
- # Just ensure blank instead of none.
- if capture or combine_capture:
- if not out:
- out = b''
- if not err:
- err = b''
- if decode:
- def ldecode(data, m='utf-8'):
- if not isinstance(data, bytes):
- return data
- return data.decode(m, decode)
-
- out = ldecode(out)
- err = ldecode(err)
-
- rc = sp.returncode
- if rc not in rcs:
- if status_cb:
- status_cb(
- 'ERROR: End run command: exit({code})\n'.format(code=rc))
- raise ProcessExecutionError(stdout=out, stderr=err,
- exit_code=rc,
- cmd=args)
- if status_cb:
- status_cb('End run command: exit({code})\n'.format(code=rc))
- return (out, err)
-
-
def make_header(comment_char="#", base='created'):
ci_ver = version.version_string()
header = str(comment_char)
@@ -2111,8 +1881,8 @@ def make_header(comment_char="#", base='created'):
return header
-def abs_join(*paths):
- return os.path.abspath(os.path.join(*paths))
+def abs_join(base, *paths):
+ return os.path.abspath(os.path.join(base, *paths))
# shellify, takes a list of commands
@@ -2136,10 +1906,10 @@ def shellify(cmdlist, add_header=True):
if isinstance(args, (list, tuple)):
fixed = []
for f in args:
- fixed.append("'%s'" % (six.text_type(f).replace("'", escaped)))
+ fixed.append("'%s'" % (str(f).replace("'", escaped)))
content = "%s%s\n" % (content, ' '.join(fixed))
cmds_made += 1
- elif isinstance(args, six.string_types):
+ elif isinstance(args, str):
content = "%s%s\n" % (content, args)
cmds_made += 1
else:
@@ -2168,7 +1938,7 @@ def is_container():
try:
# try to run a helper program. if it returns true/zero
# then we're inside a container. otherwise, no
- subp(helper)
+ subp.subp(helper)
return True
except (IOError, OSError):
pass
@@ -2265,7 +2035,7 @@ def expand_package_list(version_fmt, pkgs):
pkglist = []
for pkg in pkgs:
- if isinstance(pkg, six.string_types):
+ if isinstance(pkg, str):
pkglist.append(pkg)
continue
@@ -2374,7 +2144,7 @@ def find_freebsd_part(fs):
return splitted[2]
elif splitted[2] in ['label', 'gpt', 'ufs']:
target_label = fs[5:]
- (part, _err) = subp(['glabel', 'status', '-s'])
+ (part, _err) = subp.subp(['glabel', 'status', '-s'])
for labels in part.split("\n"):
items = labels.split()
if len(items) > 0 and items[0] == target_label:
@@ -2396,10 +2166,10 @@ def get_path_dev_freebsd(path, mnt_list):
def get_mount_info_freebsd(path):
- (result, err) = subp(['mount', '-p', path], rcs=[0, 1])
+ (result, err) = subp.subp(['mount', '-p', path], rcs=[0, 1])
if len(err):
# find a path if the input is not a mounting point
- (mnt_list, err) = subp(['mount', '-p'])
+ (mnt_list, err) = subp.subp(['mount', '-p'])
path_found = get_path_dev_freebsd(path, mnt_list)
if (path_found is None):
return None
@@ -2415,8 +2185,8 @@ def get_device_info_from_zpool(zpool):
LOG.debug('Cannot get zpool info, no /dev/zfs')
return None
try:
- (zpoolstatus, err) = subp(['zpool', 'status', zpool])
- except ProcessExecutionError as err:
+ (zpoolstatus, err) = subp.subp(['zpool', 'status', zpool])
+ except subp.ProcessExecutionError as err:
LOG.warning("Unable to get zpool status of %s: %s", zpool, err)
return None
if len(err):
@@ -2430,7 +2200,7 @@ def get_device_info_from_zpool(zpool):
def parse_mount(path):
- (mountoutput, _err) = subp(['mount'])
+ (mountoutput, _err) = subp.subp(['mount'])
mount_locs = mountoutput.splitlines()
# there are 2 types of mount outputs we have to parse therefore
# the regex is a bit complex. to better understand this regex see:
@@ -2503,40 +2273,6 @@ def get_mount_info(path, log=LOG, get_mnt_opts=False):
return parse_mount(path)
-def is_exe(fpath):
- # return boolean indicating if fpath exists and is executable.
- return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
-
-
-def which(program, search=None, target=None):
- target = target_path(target)
-
- if os.path.sep in program:
- # if program had a '/' in it, then do not search PATH
- # 'which' does consider cwd here. (cd / && which bin/ls) = bin/ls
- # so effectively we set cwd to / (or target)
- if is_exe(target_path(target, program)):
- return program
-
- if search is None:
- paths = [p.strip('"') for p in
- os.environ.get("PATH", "").split(os.pathsep)]
- if target == "/":
- search = paths
- else:
- search = [p for p in paths if p.startswith("/")]
-
- # normalize path input
- search = [os.path.abspath(p) for p in search]
-
- for path in search:
- ppath = os.path.sep.join((path, program))
- if is_exe(target_path(target, ppath)):
- return ppath
-
- return None
-
-
def log_time(logfunc, msg, func, args=None, kwargs=None, get_uptime=False):
if args is None:
args = []
@@ -2651,8 +2387,8 @@ def human2bytes(size):
try:
num = float(num)
- except ValueError:
- raise ValueError("'%s' is not valid input." % size_in)
+ except ValueError as e:
+ raise ValueError("'%s' is not valid input." % size_in) from e
if num < 0:
raise ValueError("'%s': cannot be negative" % size_in)
@@ -2700,7 +2436,7 @@ def _call_dmidecode(key, dmidecode_path):
"""
try:
cmd = [dmidecode_path, "--string", key]
- (result, _err) = subp(cmd)
+ (result, _err) = subp.subp(cmd)
result = result.strip()
LOG.debug("dmidecode returned '%s' for '%s'", result, key)
if result.replace(".", "") == "":
@@ -2754,7 +2490,8 @@ def read_dmi_data(key):
LOG.debug("dmidata is not supported on %s", uname_arch)
return None
- dmidecode_path = which('dmidecode')
+ print("hi, now its: %s\n", subp)
+ dmidecode_path = subp.which('dmidecode')
if dmidecode_path:
return _call_dmidecode(key, dmidecode_path)
@@ -2765,12 +2502,12 @@ def read_dmi_data(key):
def message_from_string(string):
if sys.version_info[:2] < (2, 7):
- return email.message_from_file(six.StringIO(string))
+ return email.message_from_file(io.StringIO(string))
return email.message_from_string(string)
def get_installed_packages(target=None):
- (out, _) = subp(['dpkg-query', '--list'], target=target, capture=True)
+ (out, _) = subp.subp(['dpkg-query', '--list'], target=target, capture=True)
pkgs_inst = set()
for line in out.splitlines():
@@ -2906,7 +2643,7 @@ def udevadm_settle(exists=None, timeout=None):
if timeout:
settle_cmd.extend(['--timeout=%s' % timeout])
- return subp(settle_cmd)
+ return subp.subp(settle_cmd)
def get_proc_ppid(pid):
diff --git a/cloudinit/version.py b/cloudinit/version.py
index 1bc1899c..8560d087 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -4,7 +4,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-__VERSION__ = "20.1"
+__VERSION__ = "20.3"
_PACKAGED_VERSION = '@@PACKAGED_VERSION@@'
FEATURES = [