summaryrefslogtreecommitdiff
path: root/cloudinit
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit')
-rw-r--r--cloudinit/analyze/__main__.py88
-rw-r--r--cloudinit/analyze/show.py204
-rw-r--r--cloudinit/analyze/tests/test_boot.py170
-rw-r--r--cloudinit/apport.py7
-rw-r--r--cloudinit/atomic_helper.py6
-rw-r--r--cloudinit/cmd/clean.py27
-rwxr-xr-xcloudinit/cmd/devel/net_convert.py17
-rw-r--r--cloudinit/cmd/devel/tests/test_logs.py2
-rw-r--r--cloudinit/cmd/devel/tests/test_render.py2
-rw-r--r--cloudinit/cmd/main.py9
-rw-r--r--cloudinit/cmd/query.py3
-rw-r--r--cloudinit/cmd/tests/test_clean.py8
-rw-r--r--cloudinit/cmd/tests/test_cloud_id.py2
-rw-r--r--cloudinit/cmd/tests/test_main.py9
-rw-r--r--cloudinit/cmd/tests/test_query.py30
-rw-r--r--cloudinit/cmd/tests/test_status.py2
-rw-r--r--cloudinit/config/cc_apt_configure.py18
-rw-r--r--cloudinit/config/cc_apt_pipelining.py4
-rwxr-xr-xcloudinit/config/cc_byobu.py6
-rw-r--r--cloudinit/config/cc_chef.py27
-rw-r--r--cloudinit/config/cc_debug.py6
-rw-r--r--cloudinit/config/cc_disk_setup.py5
-rw-r--r--cloudinit/config/cc_emit_upstart.py2
-rw-r--r--cloudinit/config/cc_final_message.py2
-rw-r--r--cloudinit/config/cc_growpart.py31
-rw-r--r--cloudinit/config/cc_keys_to_console.py12
-rw-r--r--cloudinit/config/cc_landscape.py3
-rw-r--r--cloudinit/config/cc_lxd.py19
-rw-r--r--cloudinit/config/cc_mcollective.py10
-rw-r--r--cloudinit/config/cc_mounts.py86
-rw-r--r--cloudinit/config/cc_ntp.py20
-rw-r--r--cloudinit/config/cc_package_update_upgrade_install.py7
-rw-r--r--cloudinit/config/cc_phone_home.py12
-rw-r--r--cloudinit/config/cc_power_state_change.py22
-rw-r--r--cloudinit/config/cc_puppet.py47
-rw-r--r--cloudinit/config/cc_resizefs.py23
-rw-r--r--cloudinit/config/cc_resolv_conf.py4
-rw-r--r--cloudinit/config/cc_rh_subscription.py6
-rw-r--r--cloudinit/config/cc_rightscale_userdata.py7
-rw-r--r--cloudinit/config/cc_rsyslog.py11
-rw-r--r--cloudinit/config/cc_salt_minion.py13
-rw-r--r--cloudinit/config/cc_scripts_per_boot.py4
-rw-r--r--cloudinit/config/cc_scripts_per_instance.py7
-rw-r--r--cloudinit/config/cc_scripts_per_once.py9
-rw-r--r--cloudinit/config/cc_scripts_user.py4
-rw-r--r--cloudinit/config/cc_scripts_vendor.py4
-rw-r--r--cloudinit/config/cc_seed_random.py5
-rw-r--r--cloudinit/config/cc_set_hostname.py12
-rwxr-xr-xcloudinit/config/cc_set_passwords.py86
-rw-r--r--cloudinit/config/cc_snap_config.py184
-rw-r--r--cloudinit/config/cc_snappy.py321
-rwxr-xr-xcloudinit/config/cc_ssh.py153
-rwxr-xr-xcloudinit/config/cc_ssh_authkey_fingerprints.py14
-rwxr-xr-xcloudinit/config/cc_ssh_import_id.py8
-rw-r--r--cloudinit/config/cc_ubuntu_advantage.py223
-rw-r--r--cloudinit/config/cc_ubuntu_drivers.py160
-rw-r--r--cloudinit/config/cc_update_etc_hosts.py8
-rw-r--r--cloudinit/config/cc_users_groups.py6
-rw-r--r--cloudinit/config/cc_vyos.py141
-rw-r--r--cloudinit/config/cc_write_files.py3
-rw-r--r--cloudinit/config/cc_yum_add_repo.py22
-rw-r--r--cloudinit/config/cc_zypper_add_repo.py3
-rw-r--r--cloudinit/config/schema.py2
-rw-r--r--cloudinit/config/tests/test_apt_pipelining.py28
-rw-r--r--cloudinit/config/tests/test_set_passwords.py88
-rw-r--r--cloudinit/config/tests/test_snap.py2
-rw-r--r--cloudinit/config/tests/test_ssh.py202
-rw-r--r--cloudinit/config/tests/test_ubuntu_advantage.py347
-rw-r--r--cloudinit/config/tests/test_ubuntu_drivers.py237
-rw-r--r--cloudinit/config/tests/test_users_groups.py28
-rw-r--r--cloudinit/cs_utils.py2
-rwxr-xr-x[-rw-r--r--]cloudinit/distros/__init__.py85
-rw-r--r--cloudinit/distros/amazon.py26
-rw-r--r--cloudinit/distros/arch.py14
-rw-r--r--cloudinit/distros/debian.py14
-rw-r--r--cloudinit/distros/freebsd.py532
-rw-r--r--cloudinit/distros/opensuse.py3
-rw-r--r--cloudinit/distros/parsers/hostname.py2
-rw-r--r--cloudinit/distros/parsers/hosts.py2
-rw-r--r--cloudinit/distros/parsers/resolv_conf.py2
-rw-r--r--cloudinit/distros/parsers/sys_conf.py13
-rw-r--r--cloudinit/distros/ubuntu.py4
-rwxr-xr-xcloudinit/distros/ug_util.py22
-rw-r--r--cloudinit/ec2_utils.py45
-rw-r--r--cloudinit/handlers/__init__.py9
-rw-r--r--cloudinit/handlers/cloud_config.py3
-rw-r--r--cloudinit/handlers/upstart_job.py2
-rw-r--r--cloudinit/helpers.py6
-rw-r--r--cloudinit/log.py14
-rw-r--r--cloudinit/mergers/__init__.py4
-rw-r--r--cloudinit/mergers/m_dict.py4
-rw-r--r--cloudinit/mergers/m_list.py4
-rw-r--r--cloudinit/mergers/m_str.py9
-rw-r--r--cloudinit/net/__init__.py368
-rwxr-xr-xcloudinit/net/cmdline.py133
-rw-r--r--cloudinit/net/dhcp.py166
-rw-r--r--cloudinit/net/eni.py49
-rw-r--r--cloudinit/net/freebsd.py175
-rw-r--r--cloudinit/net/netplan.py60
-rw-r--r--cloudinit/net/network_state.py125
-rw-r--r--cloudinit/net/renderer.py4
-rw-r--r--cloudinit/net/renderers.py4
-rw-r--r--cloudinit/net/sysconfig.py428
-rw-r--r--cloudinit/net/tests/test_dhcp.py201
-rw-r--r--cloudinit/net/tests/test_init.py651
-rw-r--r--cloudinit/net/tests/test_network_state.py48
-rw-r--r--cloudinit/netinfo.py19
-rwxr-xr-x[-rw-r--r--]cloudinit/reporting/handlers.py138
-rw-r--r--cloudinit/safeyaml.py22
-rw-r--r--cloudinit/settings.py2
-rw-r--r--cloudinit/signal_handler.py3
-rwxr-xr-x[-rw-r--r--]cloudinit/sources/DataSourceAzure.py474
-rw-r--r--cloudinit/sources/DataSourceCloudSigma.py8
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py26
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py15
-rw-r--r--cloudinit/sources/DataSourceEc2.py240
-rw-r--r--cloudinit/sources/DataSourceExoscale.py268
-rw-r--r--cloudinit/sources/DataSourceGCE.py30
-rw-r--r--cloudinit/sources/DataSourceHetzner.py3
-rw-r--r--cloudinit/sources/DataSourceIBMCloud.py2
-rw-r--r--cloudinit/sources/DataSourceMAAS.py2
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py40
-rw-r--r--cloudinit/sources/DataSourceOVF.py41
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py4
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py2
-rw-r--r--cloudinit/sources/DataSourceOracle.py170
-rw-r--r--cloudinit/sources/DataSourceRbxCloud.py251
-rw-r--r--cloudinit/sources/DataSourceScaleway.py20
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py8
-rw-r--r--cloudinit/sources/__init__.py50
-rwxr-xr-x[-rw-r--r--]cloudinit/sources/helpers/azure.py345
-rw-r--r--cloudinit/sources/helpers/openstack.py40
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_custom_script.py143
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_error.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_util.py37
-rw-r--r--cloudinit/sources/tests/test_init.py35
-rw-r--r--cloudinit/sources/tests/test_oracle.py424
-rw-r--r--cloudinit/ssh_util.py80
-rw-r--r--cloudinit/stages.py79
-rw-r--r--cloudinit/temp_utils.py4
-rw-r--r--cloudinit/templater.py4
-rw-r--r--cloudinit/tests/helpers.py49
-rw-r--r--cloudinit/tests/test_dhclient_hook.py2
-rw-r--r--cloudinit/tests/test_gpg.py4
-rw-r--r--cloudinit/tests/test_netinfo.py14
-rw-r--r--cloudinit/tests/test_stages.py151
-rw-r--r--cloudinit/tests/test_temp_utils.py18
-rw-r--r--cloudinit/tests/test_url_helper.py53
-rw-r--r--cloudinit/tests/test_util.py60
-rw-r--r--cloudinit/tests/test_version.py4
-rw-r--r--cloudinit/type_utils.py25
-rw-r--r--cloudinit/url_helper.py118
-rw-r--r--cloudinit/user_data.py13
-rw-r--r--cloudinit/util.py241
-rw-r--r--cloudinit/version.py2
155 files changed, 7464 insertions, 2853 deletions
diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py
index f8613656..99e5c203 100644
--- a/cloudinit/analyze/__main__.py
+++ b/cloudinit/analyze/__main__.py
@@ -7,7 +7,7 @@ import re
import sys
from cloudinit.util import json_dumps
-
+from datetime import datetime
from . import dump
from . import show
@@ -52,9 +52,93 @@ def get_parser(parser=None):
dest='outfile', default='-',
help='specify where to write output. ')
parser_dump.set_defaults(action=('dump', analyze_dump))
+ parser_boot = subparsers.add_parser(
+ 'boot', help='Print list of boot times for kernel and cloud-init')
+ parser_boot.add_argument('-i', '--infile', action='store',
+ dest='infile', default='/var/log/cloud-init.log',
+ help='specify where to read input. ')
+ parser_boot.add_argument('-o', '--outfile', action='store',
+ dest='outfile', default='-',
+ help='specify where to write output.')
+ parser_boot.set_defaults(action=('boot', analyze_boot))
return parser
+def analyze_boot(name, args):
+ """Report a list of how long different boot operations took.
+
+ For Example:
+ -- Most Recent Boot Record --
+ Kernel Started at: <time>
+ Kernel ended boot at: <time>
+ Kernel time to boot (seconds): <time>
+ Cloud-init activated by systemd at: <time>
+ Time between Kernel end boot and Cloud-init activation (seconds):<time>
+ Cloud-init start: <time>
+ """
+ infh, outfh = configure_io(args)
+ kernel_info = show.dist_check_timestamp()
+ status_code, kernel_start, kernel_end, ci_sysd_start = \
+ kernel_info
+ kernel_start_timestamp = datetime.utcfromtimestamp(kernel_start)
+ kernel_end_timestamp = datetime.utcfromtimestamp(kernel_end)
+ ci_sysd_start_timestamp = datetime.utcfromtimestamp(ci_sysd_start)
+ try:
+ last_init_local = \
+ [e for e in _get_events(infh) if e['name'] == 'init-local' and
+ 'starting search' in e['description']][-1]
+ ci_start = datetime.utcfromtimestamp(last_init_local['timestamp'])
+ except IndexError:
+ ci_start = 'Could not find init-local log-line in cloud-init.log'
+ status_code = show.FAIL_CODE
+
+ FAILURE_MSG = 'Your Linux distro or container does not support this ' \
+ 'functionality.\n' \
+ 'You must be running a Kernel Telemetry supported ' \
+ 'distro.\nPlease check ' \
+ 'https://cloudinit.readthedocs.io/en/latest' \
+ '/topics/analyze.html for more ' \
+ 'information on supported distros.\n'
+
+ SUCCESS_MSG = '-- Most Recent Boot Record --\n' \
+ ' Kernel Started at: {k_s_t}\n' \
+ ' Kernel ended boot at: {k_e_t}\n' \
+ ' Kernel time to boot (seconds): {k_r}\n' \
+ ' Cloud-init activated by systemd at: {ci_sysd_t}\n' \
+ ' Time between Kernel end boot and Cloud-init ' \
+ 'activation (seconds): {bt_r}\n' \
+ ' Cloud-init start: {ci_start}\n'
+
+ CONTAINER_MSG = '-- Most Recent Container Boot Record --\n' \
+ ' Container started at: {k_s_t}\n' \
+ ' Cloud-init activated by systemd at: {ci_sysd_t}\n' \
+ ' Cloud-init start: {ci_start}\n' \
+
+ status_map = {
+ show.FAIL_CODE: FAILURE_MSG,
+ show.CONTAINER_CODE: CONTAINER_MSG,
+ show.SUCCESS_CODE: SUCCESS_MSG
+ }
+
+ kernel_runtime = kernel_end - kernel_start
+ between_process_runtime = ci_sysd_start - kernel_end
+
+ kwargs = {
+ 'k_s_t': kernel_start_timestamp,
+ 'k_e_t': kernel_end_timestamp,
+ 'k_r': kernel_runtime,
+ 'bt_r': between_process_runtime,
+ 'k_e': kernel_end,
+ 'k_s': kernel_start,
+ 'ci_sysd': ci_sysd_start,
+ 'ci_sysd_t': ci_sysd_start_timestamp,
+ 'ci_start': ci_start
+ }
+
+ outfh.write(status_map[status_code].format(**kwargs))
+ return status_code
+
+
def analyze_blame(name, args):
"""Report a list of records sorted by largest time delta.
@@ -119,7 +203,7 @@ def analyze_dump(name, args):
def _get_events(infile):
rawdata = None
- events, rawdata = show.load_events(infile, None)
+ events, rawdata = show.load_events_infile(infile)
if not events:
events, _ = dump.dump_events(rawdata=rawdata)
return events
diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py
index 3e778b8b..fb152b1d 100644
--- a/cloudinit/analyze/show.py
+++ b/cloudinit/analyze/show.py
@@ -8,8 +8,11 @@ import base64
import datetime
import json
import os
+import time
+import sys
from cloudinit import util
+from cloudinit.distros import uses_systemd
# An event:
'''
@@ -49,6 +52,10 @@ format_key = {
formatting_help = " ".join(["{0}: {1}".format(k.replace('%', '%%'), v)
for k, v in format_key.items()])
+SUCCESS_CODE = 'successful'
+FAIL_CODE = 'failure'
+CONTAINER_CODE = 'container'
+TIMESTAMP_UNKNOWN = (FAIL_CODE, -1, -1, -1)
def format_record(msg, event):
@@ -125,9 +132,175 @@ def total_time_record(total_time):
return 'Total Time: %3.5f seconds\n' % total_time
+class SystemctlReader(object):
+ '''
+ Class for dealing with all systemctl subp calls in a consistent manner.
+ '''
+ def __init__(self, property, parameter=None):
+ self.epoch = None
+ self.args = ['/bin/systemctl', 'show']
+ if parameter:
+ self.args.append(parameter)
+ self.args.extend(['-p', property])
+ # Don't want the init of our object to break. Instead of throwing
+ # an exception, set an error code that gets checked when data is
+ # requested from the object
+ self.failure = self.subp()
+
+ def subp(self):
+ '''
+ Make a subp call based on set args and handle errors by setting
+ failure code
+
+ :return: whether the subp call failed or not
+ '''
+ try:
+ value, err = util.subp(self.args, capture=True)
+ if err:
+ return err
+ self.epoch = value
+ return None
+ except Exception as systemctl_fail:
+ return systemctl_fail
+
+ def parse_epoch_as_float(self):
+ '''
+ If subp call succeeded, return the timestamp from subp as a float.
+
+ :return: timestamp as a float
+ '''
+ # subp has 2 ways to fail: it either fails and throws an exception,
+ # or returns an error code. Raise an exception here in order to make
+ # sure both scenarios throw exceptions
+ if self.failure:
+ raise RuntimeError('Subprocess call to systemctl has failed, '
+ 'returning error code ({})'
+ .format(self.failure))
+ # Output from systemctl show has the format Property=Value.
+ # For example, UserspaceMonotonic=1929304
+ timestamp = self.epoch.split('=')[1]
+ # Timestamps reported by systemctl are in microseconds, converting
+ return float(timestamp) / 1000000
+
+
+def dist_check_timestamp():
+ '''
+ Determine which init system a particular linux distro is using.
+ Each init system (systemd, upstart, etc) has a different way of
+ providing timestamps.
+
+ :return: timestamps of kernelboot, kernelendboot, and cloud-initstart
+ or TIMESTAMP_UNKNOWN if the timestamps cannot be retrieved.
+ '''
+
+ if uses_systemd():
+ return gather_timestamps_using_systemd()
+
+ # Use dmesg to get timestamps if the distro does not have systemd
+ if util.is_FreeBSD() or 'gentoo' in \
+ util.system_info()['system'].lower():
+ return gather_timestamps_using_dmesg()
+
+ # this distro doesn't fit anything that is supported by cloud-init. just
+ # return error codes
+ return TIMESTAMP_UNKNOWN
+
+
+def gather_timestamps_using_dmesg():
+ '''
+ Gather timestamps that corresponds to kernel begin initialization,
+ kernel finish initialization using dmesg as opposed to systemctl
+
+ :return: the two timestamps plus a dummy timestamp to keep consistency
+ with gather_timestamps_using_systemd
+ '''
+ try:
+ data, _ = util.subp(['dmesg'], capture=True)
+ split_entries = data[0].splitlines()
+ for i in split_entries:
+ if i.decode('UTF-8').find('user') != -1:
+ splitup = i.decode('UTF-8').split()
+ stripped = splitup[1].strip(']')
+
+ # kernel timestamp from dmesg is equal to 0,
+ # with the userspace timestamp relative to it.
+ user_space_timestamp = float(stripped)
+ kernel_start = float(time.time()) - float(util.uptime())
+ kernel_end = kernel_start + user_space_timestamp
+
+ # systemd wont start cloud-init in this case,
+ # so we cannot get that timestamp
+ return SUCCESS_CODE, kernel_start, kernel_end, \
+ kernel_end
+
+ except Exception:
+ pass
+ return TIMESTAMP_UNKNOWN
+
+
+def gather_timestamps_using_systemd():
+ '''
+ Gather timestamps that corresponds to kernel begin initialization,
+ kernel finish initialization. and cloud-init systemd unit activation
+
+ :return: the three timestamps
+ '''
+ kernel_start = float(time.time()) - float(util.uptime())
+ try:
+ delta_k_end = SystemctlReader('UserspaceTimestampMonotonic')\
+ .parse_epoch_as_float()
+ delta_ci_s = SystemctlReader('InactiveExitTimestampMonotonic',
+ 'cloud-init-local').parse_epoch_as_float()
+ base_time = kernel_start
+ status = SUCCESS_CODE
+ # lxc based containers do not set their monotonic zero point to be when
+ # the container starts, instead keep using host boot as zero point
+ # time.CLOCK_MONOTONIC_RAW is only available in python 3.3
+ if util.is_container():
+ # clock.monotonic also uses host boot as zero point
+ if sys.version_info >= (3, 3):
+ base_time = float(time.time()) - float(time.monotonic())
+ # TODO: lxcfs automatically truncates /proc/uptime to seconds
+ # in containers when https://github.com/lxc/lxcfs/issues/292
+ # is fixed, util.uptime() should be used instead of stat on
+ try:
+ file_stat = os.stat('/proc/1/cmdline')
+ kernel_start = file_stat.st_atime
+ except OSError as err:
+ raise RuntimeError('Could not determine container boot '
+ 'time from /proc/1/cmdline. ({})'
+ .format(err))
+ status = CONTAINER_CODE
+ else:
+ status = FAIL_CODE
+ kernel_end = base_time + delta_k_end
+ cloudinit_sysd = base_time + delta_ci_s
+
+ except Exception as e:
+ # Except ALL exceptions as Systemctl reader can throw many different
+ # errors, but any failure in systemctl means that timestamps cannot be
+ # obtained
+ print(e)
+ return TIMESTAMP_UNKNOWN
+ return status, kernel_start, kernel_end, cloudinit_sysd
+
+
def generate_records(events, blame_sort=False,
print_format="(%n) %d seconds in %I%D",
dump_files=False, log_datafiles=False):
+ '''
+ Take in raw events and create parent-child dependencies between events
+ in order to order events in chronological order.
+
+ :param events: JSONs from dump that represents events taken from logs
+ :param blame_sort: whether to sort by timestamp or by time taken.
+ :param print_format: formatting to represent event, time stamp,
+ and time taken by the event in one line
+ :param dump_files: whether to dump files into JSONs
+ :param log_datafiles: whether or not to log events generated
+
+ :return: boot records ordered chronologically
+ '''
sorted_events = sorted(events, key=lambda x: x['timestamp'])
records = []
@@ -176,7 +349,7 @@ def generate_records(events, blame_sort=False,
if event_name(event) == event_name(prev_evt):
record = event_record(start_time, prev_evt, event)
records.append(format_record("Finished stage: "
- "(%n) %d seconds ",
+ "(%n) %d seconds",
record) + "\n")
total_time += record.get('delta')
else:
@@ -189,19 +362,28 @@ def generate_records(events, blame_sort=False,
def show_events(events, print_format):
+ '''
+ A passthrough method that makes it easier to call generate_records()
+
+ :param events: JSONs from dump that represents events taken from logs
+ :param print_format: formatting to represent event, time stamp,
+ and time taken by the event in one line
+
+ :return: boot records ordered chronologically
+ '''
return generate_records(events, print_format=print_format)
-def load_events(infile, rawdata=None):
- if rawdata:
- data = rawdata.read()
- else:
- data = infile.read()
+def load_events_infile(infile):
+ '''
+ Takes in a log file, read it, and convert to json.
+
+ :param infile: The Log file to be read
- j = None
+ :return: json version of logfile, raw file
+ '''
+ data = infile.read()
try:
- j = json.loads(data)
+ return json.loads(data), data
except ValueError:
- pass
-
- return j, data
+ return None, data
diff --git a/cloudinit/analyze/tests/test_boot.py b/cloudinit/analyze/tests/test_boot.py
new file mode 100644
index 00000000..f4001c14
--- /dev/null
+++ b/cloudinit/analyze/tests/test_boot.py
@@ -0,0 +1,170 @@
+import os
+from cloudinit.analyze.__main__ import (analyze_boot, get_parser)
+from cloudinit.tests.helpers import CiTestCase, mock
+from cloudinit.analyze.show import dist_check_timestamp, SystemctlReader, \
+ FAIL_CODE, CONTAINER_CODE
+
+err_code = (FAIL_CODE, -1, -1, -1)
+
+
+class TestDistroChecker(CiTestCase):
+
+ @mock.patch('cloudinit.util.system_info', return_value={'dist': ('', '',
+ ''),
+ 'system': ''})
+ @mock.patch('cloudinit.util.get_linux_distro', return_value=('', '', ''))
+ @mock.patch('cloudinit.util.is_FreeBSD', return_value=False)
+ def test_blank_distro(self, m_sys_info, m_get_linux_distro, m_free_bsd):
+ self.assertEqual(err_code, dist_check_timestamp())
+
+ @mock.patch('cloudinit.util.system_info', return_value={'dist': ('', '',
+ '')})
+ @mock.patch('cloudinit.util.get_linux_distro', return_value=('', '', ''))
+ @mock.patch('cloudinit.util.is_FreeBSD', return_value=True)
+ def test_freebsd_gentoo_cant_find(self, m_sys_info,
+ m_get_linux_distro, m_is_FreeBSD):
+ self.assertEqual(err_code, dist_check_timestamp())
+
+ @mock.patch('cloudinit.util.subp', return_value=(0, 1))
+ def test_subp_fails(self, m_subp):
+ self.assertEqual(err_code, dist_check_timestamp())
+
+
+class TestSystemCtlReader(CiTestCase):
+
+ def test_systemctl_invalid_property(self):
+ reader = SystemctlReader('dummyProperty')
+ with self.assertRaises(RuntimeError):
+ reader.parse_epoch_as_float()
+
+ def test_systemctl_invalid_parameter(self):
+ reader = SystemctlReader('dummyProperty', 'dummyParameter')
+ with self.assertRaises(RuntimeError):
+ reader.parse_epoch_as_float()
+
+ @mock.patch('cloudinit.util.subp', return_value=('U=1000000', None))
+ def test_systemctl_works_correctly_threshold(self, m_subp):
+ reader = SystemctlReader('dummyProperty', 'dummyParameter')
+ self.assertEqual(1.0, reader.parse_epoch_as_float())
+ thresh = 1.0 - reader.parse_epoch_as_float()
+ self.assertTrue(thresh < 1e-6)
+ self.assertTrue(thresh > (-1 * 1e-6))
+
+ @mock.patch('cloudinit.util.subp', return_value=('U=0', None))
+ def test_systemctl_succeed_zero(self, m_subp):
+ reader = SystemctlReader('dummyProperty', 'dummyParameter')
+ self.assertEqual(0.0, reader.parse_epoch_as_float())
+
+ @mock.patch('cloudinit.util.subp', return_value=('U=1', None))
+ def test_systemctl_succeed_distinct(self, m_subp):
+ reader = SystemctlReader('dummyProperty', 'dummyParameter')
+ val1 = reader.parse_epoch_as_float()
+ m_subp.return_value = ('U=2', None)
+ reader2 = SystemctlReader('dummyProperty', 'dummyParameter')
+ val2 = reader2.parse_epoch_as_float()
+ self.assertNotEqual(val1, val2)
+
+ @mock.patch('cloudinit.util.subp', return_value=('100', None))
+ def test_systemctl_epoch_not_splittable(self, m_subp):
+ reader = SystemctlReader('dummyProperty', 'dummyParameter')
+ with self.assertRaises(IndexError):
+ reader.parse_epoch_as_float()
+
+ @mock.patch('cloudinit.util.subp', return_value=('U=foobar', None))
+ def test_systemctl_cannot_convert_epoch_to_float(self, m_subp):
+ reader = SystemctlReader('dummyProperty', 'dummyParameter')
+ with self.assertRaises(ValueError):
+ reader.parse_epoch_as_float()
+
+
+class TestAnalyzeBoot(CiTestCase):
+
+ def set_up_dummy_file_ci(self, path, log_path):
+ infh = open(path, 'w+')
+ infh.write('2019-07-08 17:40:49,601 - util.py[DEBUG]: Cloud-init v. '
+ '19.1-1-gbaa47854-0ubuntu1~18.04.1 running \'init-local\' '
+ 'at Mon, 08 Jul 2019 17:40:49 +0000. Up 18.84 seconds.')
+ infh.close()
+ outfh = open(log_path, 'w+')
+ outfh.close()
+
+ def set_up_dummy_file(self, path, log_path):
+ infh = open(path, 'w+')
+ infh.write('dummy data')
+ infh.close()
+ outfh = open(log_path, 'w+')
+ outfh.close()
+
+ def remove_dummy_file(self, path, log_path):
+ if os.path.isfile(path):
+ os.remove(path)
+ if os.path.isfile(log_path):
+ os.remove(log_path)
+
+ @mock.patch('cloudinit.analyze.show.dist_check_timestamp',
+ return_value=err_code)
+ def test_boot_invalid_distro(self, m_dist_check_timestamp):
+
+ path = os.path.dirname(os.path.abspath(__file__))
+ log_path = path + '/boot-test.log'
+ path += '/dummy.log'
+ self.set_up_dummy_file(path, log_path)
+
+ parser = get_parser()
+ args = parser.parse_args(args=['boot', '-i', path, '-o',
+ log_path])
+ name_default = ''
+ analyze_boot(name_default, args)
+ # now args have been tested, go into outfile and make sure error
+ # message is in the outfile
+ outfh = open(args.outfile, 'r')
+ data = outfh.read()
+ err_string = 'Your Linux distro or container does not support this ' \
+ 'functionality.\nYou must be running a Kernel ' \
+ 'Telemetry supported distro.\nPlease check ' \
+ 'https://cloudinit.readthedocs.io/en/latest/topics' \
+ '/analyze.html for more information on supported ' \
+ 'distros.\n'
+
+ self.remove_dummy_file(path, log_path)
+ self.assertEqual(err_string, data)
+
+ @mock.patch("cloudinit.util.is_container", return_value=True)
+ @mock.patch('cloudinit.util.subp', return_value=('U=1000000', None))
+ def test_container_no_ci_log_line(self, m_is_container, m_subp):
+ path = os.path.dirname(os.path.abspath(__file__))
+ log_path = path + '/boot-test.log'
+ path += '/dummy.log'
+ self.set_up_dummy_file(path, log_path)
+
+ parser = get_parser()
+ args = parser.parse_args(args=['boot', '-i', path, '-o',
+ log_path])
+ name_default = ''
+
+ finish_code = analyze_boot(name_default, args)
+
+ self.remove_dummy_file(path, log_path)
+ self.assertEqual(FAIL_CODE, finish_code)
+
+ @mock.patch("cloudinit.util.is_container", return_value=True)
+ @mock.patch('cloudinit.util.subp', return_value=('U=1000000', None))
+ @mock.patch('cloudinit.analyze.__main__._get_events', return_value=[{
+ 'name': 'init-local', 'description': 'starting search', 'timestamp':
+ 100000}])
+ @mock.patch('cloudinit.analyze.show.dist_check_timestamp',
+ return_value=(CONTAINER_CODE, 1, 1, 1))
+ def test_container_ci_log_line(self, m_is_container, m_subp, m_get, m_g):
+ path = os.path.dirname(os.path.abspath(__file__))
+ log_path = path + '/boot-test.log'
+ path += '/dummy.log'
+ self.set_up_dummy_file_ci(path, log_path)
+
+ parser = get_parser()
+ args = parser.parse_args(args=['boot', '-i', path, '-o',
+ log_path])
+ name_default = ''
+ finish_code = analyze_boot(name_default, args)
+
+ self.remove_dummy_file(path, log_path)
+ self.assertEqual(CONTAINER_CODE, finish_code)
diff --git a/cloudinit/apport.py b/cloudinit/apport.py
index 22cb7fde..1f2c2e7e 100644
--- a/cloudinit/apport.py
+++ b/cloudinit/apport.py
@@ -22,7 +22,9 @@ KNOWN_CLOUD_NAMES = [
'CloudSigma',
'CloudStack',
'DigitalOcean',
+ 'E24Cloud',
'GCE - Google Compute Engine',
+ 'Exoscale',
'Hetzner Cloud',
'IBM - (aka SoftLayer or BlueMix)',
'LXD',
@@ -32,11 +34,14 @@ KNOWN_CLOUD_NAMES = [
'OpenStack',
'Oracle',
'OVF',
+ 'RbxCloud - (HyperOne, Rootbox, Rubikon)',
'OpenTelekomCloud',
'Scaleway',
'SmartOS',
'VMware',
- 'Other']
+ 'ZStack',
+ 'Other'
+]
# Potentially clear text collected logs
CLOUDINIT_LOG = '/var/log/cloud-init.log'
diff --git a/cloudinit/atomic_helper.py b/cloudinit/atomic_helper.py
index 587b9945..1f61faa2 100644
--- a/cloudinit/atomic_helper.py
+++ b/cloudinit/atomic_helper.py
@@ -1,11 +1,13 @@
# This file is part of cloud-init. See LICENSE file for license information.
import json
+import logging
import os
import stat
import tempfile
_DEF_PERMS = 0o644
+LOG = logging.getLogger(__name__)
def write_file(filename, content, mode=_DEF_PERMS,
@@ -23,6 +25,10 @@ def write_file(filename, content, mode=_DEF_PERMS,
try:
tf = tempfile.NamedTemporaryFile(dir=os.path.dirname(filename),
delete=False, mode=omode)
+ LOG.debug(
+ "Atomically writing to file %s (via temporary file %s) - %s: [%o]"
+ " %d bytes/chars",
+ filename, tf.name, omode, mode, len(content))
tf.write(content)
tf.close()
os.chmod(tf.name, mode)
diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py
index de22f7f2..30e49de0 100644
--- a/cloudinit/cmd/clean.py
+++ b/cloudinit/cmd/clean.py
@@ -5,12 +5,13 @@
"""Define 'clean' utility and handler as part of cloud-init commandline."""
import argparse
+import glob
import os
import sys
from cloudinit.stages import Init
from cloudinit.util import (
- ProcessExecutionError, chdir, del_dir, del_file, get_config_logfiles,
+ ProcessExecutionError, del_dir, del_file, get_config_logfiles,
is_link, subp)
@@ -61,18 +62,18 @@ def remove_artifacts(remove_logs, remove_seed=False):
if not os.path.isdir(init.paths.cloud_dir):
return 0 # Artifacts dir already cleaned
- with chdir(init.paths.cloud_dir):
- for path in os.listdir('.'):
- if path == 'seed' and not remove_seed:
- continue
- try:
- if os.path.isdir(path) and not is_link(path):
- del_dir(path)
- else:
- del_file(path)
- except OSError as e:
- error('Could not remove {0}: {1}'.format(path, str(e)))
- return 1
+ seed_path = os.path.join(init.paths.cloud_dir, 'seed')
+ for path in glob.glob('%s/*' % init.paths.cloud_dir):
+ if path == seed_path and not remove_seed:
+ continue
+ try:
+ if os.path.isdir(path) and not is_link(path):
+ del_dir(path)
+ else:
+ del_file(path)
+ except OSError as e:
+ error('Could not remove {0}: {1}'.format(path, str(e)))
+ return 1
return 0
diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py
index 1ad7e0bd..80d217ca 100755
--- a/cloudinit/cmd/devel/net_convert.py
+++ b/cloudinit/cmd/devel/net_convert.py
@@ -5,13 +5,12 @@ import argparse
import json
import os
import sys
-import yaml
from cloudinit.sources.helpers import openstack
from cloudinit.sources import DataSourceAzure as azure
from cloudinit.sources import DataSourceOVF as ovf
-from cloudinit import distros
+from cloudinit import distros, safeyaml
from cloudinit.net import eni, netplan, network_state, sysconfig
from cloudinit import log
@@ -78,13 +77,12 @@ def handle_args(name, args):
if args.kind == "eni":
pre_ns = eni.convert_eni_data(net_data)
elif args.kind == "yaml":
- pre_ns = yaml.load(net_data)
+ pre_ns = safeyaml.load(net_data)
if 'network' in pre_ns:
pre_ns = pre_ns.get('network')
if args.debug:
sys.stderr.write('\n'.join(
- ["Input YAML",
- yaml.dump(pre_ns, default_flow_style=False, indent=4), ""]))
+ ["Input YAML", safeyaml.dumps(pre_ns), ""]))
elif args.kind == 'network_data.json':
pre_ns = openstack.convert_net_json(
json.loads(net_data), known_macs=known_macs)
@@ -97,12 +95,11 @@ def handle_args(name, args):
ns = network_state.parse_net_config_data(pre_ns)
if not ns:
raise RuntimeError("No valid network_state object created from"
- "input data")
+ " input data")
if args.debug:
- sys.stderr.write('\n'.join([
- "", "Internal State",
- yaml.dump(ns, default_flow_style=False, indent=4), ""]))
+ sys.stderr.write('\n'.join(
+ ["", "Internal State", safeyaml.dumps(ns), ""]))
distro_cls = distros.fetch(args.distro)
distro = distro_cls(args.distro, {}, None)
config = {}
@@ -116,6 +113,8 @@ def handle_args(name, args):
config['postcmds'] = False
# trim leading slash
config['netplan_path'] = config['netplan_path'][1:]
+ # enable some netplan features
+ config['features'] = ['dhcp-use-domains', 'ipv6-mtu']
else:
r_cls = sysconfig.Renderer
config = distro.renderer_configs.get('sysconfig')
diff --git a/cloudinit/cmd/devel/tests/test_logs.py b/cloudinit/cmd/devel/tests/test_logs.py
index 4951797b..d2dfa8de 100644
--- a/cloudinit/cmd/devel/tests/test_logs.py
+++ b/cloudinit/cmd/devel/tests/test_logs.py
@@ -2,7 +2,7 @@
from datetime import datetime
import os
-from six import StringIO
+from io import StringIO
from cloudinit.cmd.devel import logs
from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
diff --git a/cloudinit/cmd/devel/tests/test_render.py b/cloudinit/cmd/devel/tests/test_render.py
index 988bba03..a7fcf2ce 100644
--- a/cloudinit/cmd/devel/tests/test_render.py
+++ b/cloudinit/cmd/devel/tests/test_render.py
@@ -1,7 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from six import StringIO
import os
+from io import StringIO
from collections import namedtuple
from cloudinit.cmd.devel import render
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index 933c019a..a5446da7 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -632,13 +632,14 @@ def status_wrapper(name, args, data_d=None, link_d=None):
'start': None,
'finished': None,
}
+
if status is None:
status = {'v1': {}}
- for m in modes:
- status['v1'][m] = nullstatus.copy()
status['v1']['datasource'] = None
- elif mode not in status['v1']:
- status['v1'][mode] = nullstatus.copy()
+
+ for m in modes:
+ if m not in status['v1']:
+ status['v1'][m] = nullstatus.copy()
v1 = status['v1']
v1['stage'] = mode
diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py
index 1d888b9d..e3db8679 100644
--- a/cloudinit/cmd/query.py
+++ b/cloudinit/cmd/query.py
@@ -5,7 +5,6 @@
import argparse
from errno import EACCES
import os
-import six
import sys
from cloudinit.handlers.jinja_template import (
@@ -149,7 +148,7 @@ def handle_args(name, args):
response = '\n'.join(sorted(response.keys()))
elif args.list_keys:
response = '\n'.join(sorted(response.keys()))
- if not isinstance(response, six.string_types):
+ if not isinstance(response, str):
response = util.json_dumps(response)
print(response)
return 0
diff --git a/cloudinit/cmd/tests/test_clean.py b/cloudinit/cmd/tests/test_clean.py
index 5a3ec3bf..13a69aa1 100644
--- a/cloudinit/cmd/tests/test_clean.py
+++ b/cloudinit/cmd/tests/test_clean.py
@@ -5,7 +5,7 @@ from cloudinit.util import ensure_dir, sym_link, write_file
from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock
from collections import namedtuple
import os
-from six import StringIO
+from io import StringIO
mypaths = namedtuple('MyPaths', 'cloud_dir')
@@ -22,7 +22,8 @@ class TestClean(CiTestCase):
class FakeInit(object):
cfg = {'def_log_file': self.log1,
'output': {'all': '|tee -a {0}'.format(self.log2)}}
- paths = mypaths(cloud_dir=self.artifact_dir)
+ # Ensure cloud_dir has a trailing slash, to match real behaviour
+ paths = mypaths(cloud_dir='{}/'.format(self.artifact_dir))
def __init__(self, ds_deps):
pass
@@ -136,7 +137,8 @@ class TestClean(CiTestCase):
clean.remove_artifacts, remove_logs=False)
self.assertEqual(1, retcode)
self.assertEqual(
- 'ERROR: Could not remove dir1: oops\n', m_stderr.getvalue())
+ 'ERROR: Could not remove %s/dir1: oops\n' % self.artifact_dir,
+ m_stderr.getvalue())
def test_handle_clean_args_reboots(self):
"""handle_clean_args_reboots when reboot arg is provided."""
diff --git a/cloudinit/cmd/tests/test_cloud_id.py b/cloudinit/cmd/tests/test_cloud_id.py
index 73738170..3f3727fd 100644
--- a/cloudinit/cmd/tests/test_cloud_id.py
+++ b/cloudinit/cmd/tests/test_cloud_id.py
@@ -4,7 +4,7 @@
from cloudinit import util
from collections import namedtuple
-from six import StringIO
+from io import StringIO
from cloudinit.cmd import cloud_id
diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py
index a1e534fb..384fddc6 100644
--- a/cloudinit/cmd/tests/test_main.py
+++ b/cloudinit/cmd/tests/test_main.py
@@ -3,11 +3,12 @@
from collections import namedtuple
import copy
import os
-from six import StringIO
+from io import StringIO
from cloudinit.cmd import main
+from cloudinit import safeyaml
from cloudinit.util import (
- ensure_dir, load_file, write_file, yaml_dumps)
+ ensure_dir, load_file, write_file)
from cloudinit.tests.helpers import (
FilesystemMockingTestCase, wrap_and_call)
@@ -39,7 +40,7 @@ class TestMain(FilesystemMockingTestCase):
],
'cloud_init_modules': ['write-files', 'runcmd'],
}
- cloud_cfg = yaml_dumps(self.cfg)
+ cloud_cfg = safeyaml.dumps(self.cfg)
ensure_dir(os.path.join(self.new_root, 'etc', 'cloud'))
self.cloud_cfg_file = os.path.join(
self.new_root, 'etc', 'cloud', 'cloud.cfg')
@@ -113,7 +114,7 @@ class TestMain(FilesystemMockingTestCase):
"""When local-hostname metadata is present, call cc_set_hostname."""
self.cfg['datasource'] = {
'None': {'metadata': {'local-hostname': 'md-hostname'}}}
- cloud_cfg = yaml_dumps(self.cfg)
+ cloud_cfg = safeyaml.dumps(self.cfg)
write_file(self.cloud_cfg_file, cloud_cfg)
cmdargs = myargs(
debug=False, files=None, force=False, local=False, reporter=None,
diff --git a/cloudinit/cmd/tests/test_query.py b/cloudinit/cmd/tests/test_query.py
index 28738b1e..6d36a4ea 100644
--- a/cloudinit/cmd/tests/test_query.py
+++ b/cloudinit/cmd/tests/test_query.py
@@ -1,7 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import errno
-from six import StringIO
+from io import StringIO
from textwrap import dedent
import os
@@ -150,7 +150,9 @@ class TestQuery(CiTestCase):
instance_data=self.instance_data, list_keys=False,
user_data='ud', vendor_data='vd', varname=None)
with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- self.assertEqual(0, query.handle_args('anyname', args))
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ self.assertEqual(0, query.handle_args('anyname', args))
self.assertEqual(
'{\n "my_var": "it worked",\n "userdata": "<%s> file:ud",\n'
' "vendordata": "<%s> file:vd"\n}\n' % (
@@ -165,7 +167,9 @@ class TestQuery(CiTestCase):
instance_data=self.instance_data, list_keys=False,
user_data='ud', vendor_data='vd', varname='my_var')
with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- self.assertEqual(0, query.handle_args('anyname', args))
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ self.assertEqual(0, query.handle_args('anyname', args))
self.assertEqual('it worked\n', m_stdout.getvalue())
def test_handle_args_returns_nested_varname(self):
@@ -177,7 +181,9 @@ class TestQuery(CiTestCase):
instance_data=self.instance_data, user_data='ud', vendor_data='vd',
list_keys=False, varname='v1.key_2')
with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- self.assertEqual(0, query.handle_args('anyname', args))
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ self.assertEqual(0, query.handle_args('anyname', args))
self.assertEqual('value-2\n', m_stdout.getvalue())
def test_handle_args_returns_standardized_vars_to_top_level_aliases(self):
@@ -206,7 +212,9 @@ class TestQuery(CiTestCase):
instance_data=self.instance_data, user_data='ud', vendor_data='vd',
list_keys=False, varname=None)
with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- self.assertEqual(0, query.handle_args('anyname', args))
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ self.assertEqual(0, query.handle_args('anyname', args))
self.assertEqual(expected, m_stdout.getvalue())
def test_handle_args_list_keys_sorts_top_level_keys_when_no_varname(self):
@@ -221,7 +229,9 @@ class TestQuery(CiTestCase):
instance_data=self.instance_data, list_keys=True, user_data='ud',
vendor_data='vd', varname=None)
with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- self.assertEqual(0, query.handle_args('anyname', args))
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ self.assertEqual(0, query.handle_args('anyname', args))
self.assertEqual(expected, m_stdout.getvalue())
def test_handle_args_list_keys_sorts_nested_keys_when_varname(self):
@@ -236,7 +246,9 @@ class TestQuery(CiTestCase):
instance_data=self.instance_data, list_keys=True,
user_data='ud', vendor_data='vd', varname='v1')
with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- self.assertEqual(0, query.handle_args('anyname', args))
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ self.assertEqual(0, query.handle_args('anyname', args))
self.assertEqual(expected, m_stdout.getvalue())
def test_handle_args_list_keys_errors_when_varname_is_not_a_dict(self):
@@ -252,7 +264,9 @@ class TestQuery(CiTestCase):
vendor_data='vd', varname='top')
with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
- self.assertEqual(1, query.handle_args('anyname', args))
+ with mock.patch('os.getuid') as m_getuid:
+ m_getuid.return_value = 100
+ self.assertEqual(1, query.handle_args('anyname', args))
self.assertEqual('', m_stdout.getvalue())
self.assertIn(expected_error, m_stderr.getvalue())
diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py
index aded8580..1ed10896 100644
--- a/cloudinit/cmd/tests/test_status.py
+++ b/cloudinit/cmd/tests/test_status.py
@@ -2,7 +2,7 @@
from collections import namedtuple
import os
-from six import StringIO
+from io import StringIO
from textwrap import dedent
from cloudinit.atomic_helper import write_json
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index e18944ec..c44dec45 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -127,7 +127,7 @@ to ``^[\\w-]+:\\w``
Source list entries can be specified as a dictionary under the ``sources``
config key, with key in the dict representing a different source file. The key
-The key of each source entry will be used as an id that can be referenced in
+of each source entry will be used as an id that can be referenced in
other config entries, as well as the filename for the source's configuration
under ``/etc/apt/sources.list.d``. If the name does not end with ``.list``,
it will be appended. If there is no configuration for a key in ``sources``, no
@@ -253,7 +253,7 @@ def get_default_mirrors(arch=None, target=None):
architecture, for more see:
https://wiki.ubuntu.com/UbuntuDevelopment/PackageArchive#Ports"""
if arch is None:
- arch = util.get_architecture(target)
+ arch = util.get_dpkg_architecture(target)
if arch in PRIMARY_ARCHES:
return PRIMARY_ARCH_MIRRORS.copy()
if arch in PORTS_ARCHES:
@@ -303,13 +303,13 @@ def apply_apt(cfg, cloud, target):
LOG.debug("handling apt config: %s", cfg)
release = util.lsb_release(target=target)['codename']
- arch = util.get_architecture(target)
+ arch = util.get_dpkg_architecture(target)
mirrors = find_apt_mirror_info(cfg, cloud, arch=arch)
LOG.debug("Apt Mirror info: %s", mirrors)
if util.is_false(cfg.get('preserve_sources_list', False)):
generate_sources_list(cfg, release, mirrors, cloud)
- rename_apt_lists(mirrors, target)
+ rename_apt_lists(mirrors, target, arch)
try:
apply_apt_config(cfg, APT_PROXY_FN, APT_CONFIG_FN)
@@ -332,6 +332,8 @@ def apply_apt(cfg, cloud, target):
def debconf_set_selections(selections, target=None):
+ if not selections.endswith(b'\n'):
+ selections += b'\n'
util.subp(['debconf-set-selections'], data=selections, target=target,
capture=True)
@@ -374,7 +376,7 @@ def apply_debconf_selections(cfg, target=None):
selections = '\n'.join(
[selsets[key] for key in sorted(selsets.keys())])
- debconf_set_selections(selections.encode() + b"\n", target=target)
+ debconf_set_selections(selections.encode(), target=target)
# get a complete list of packages listed in input
pkgs_cfgd = set()
@@ -425,9 +427,9 @@ def mirrorurl_to_apt_fileprefix(mirror):
return string
-def rename_apt_lists(new_mirrors, target=None):
+def rename_apt_lists(new_mirrors, target, arch):
"""rename_apt_lists - rename apt lists to preserve old cache data"""
- default_mirrors = get_default_mirrors(util.get_architecture(target))
+ default_mirrors = get_default_mirrors(arch)
pre = util.target_path(target, APT_LISTS)
for (name, omirror) in default_mirrors.items():
@@ -894,7 +896,7 @@ def find_apt_mirror_info(cfg, cloud, arch=None):
"""
if arch is None:
- arch = util.get_architecture()
+ arch = util.get_dpkg_architecture()
LOG.debug("got arch for mirror selection: %s", arch)
pmirror = get_mirror(cfg, "primary", arch, cloud)
LOG.debug("got primary mirror: %s", pmirror)
diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py
index cdf28cd9..225d0905 100644
--- a/cloudinit/config/cc_apt_pipelining.py
+++ b/cloudinit/config/cc_apt_pipelining.py
@@ -49,7 +49,7 @@ APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n"
def handle(_name, cfg, _cloud, log, _args):
- apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False)
+ apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", 'os')
apt_pipe_value_s = str(apt_pipe_value).lower().strip()
if apt_pipe_value_s == "false":
@@ -59,7 +59,7 @@ def handle(_name, cfg, _cloud, log, _args):
elif apt_pipe_value_s in [str(b) for b in range(0, 6)]:
write_apt_snippet(apt_pipe_value_s, log, DEFAULT_FILE)
else:
- log.warn("Invalid option for apt_pipeling: %s", apt_pipe_value)
+ log.warning("Invalid option for apt_pipelining: %s", apt_pipe_value)
def write_apt_snippet(setting, log, f_name):
diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py
index 8570da15..0b4352c8 100755
--- a/cloudinit/config/cc_byobu.py
+++ b/cloudinit/config/cc_byobu.py
@@ -60,7 +60,7 @@ def handle(name, cfg, cloud, log, args):
valid = ("enable-user", "enable-system", "enable",
"disable-user", "disable-system", "disable")
if value not in valid:
- log.warn("Unknown value %s for byobu_by_default", value)
+ log.warning("Unknown value %s for byobu_by_default", value)
mod_user = value.endswith("-user")
mod_sys = value.endswith("-system")
@@ -80,8 +80,8 @@ def handle(name, cfg, cloud, log, args):
(users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
(user, _user_config) = ug_util.extract_default(users)
if not user:
- log.warn(("No default byobu user provided, "
- "can not launch %s for the default user"), bl_inst)
+ log.warning(("No default byobu user provided, "
+ "can not launch %s for the default user"), bl_inst)
else:
shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst)
shcmd += " || X=$(($X+1)); "
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index 46abedd1..01d61fa1 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -51,6 +51,7 @@ file).
chef:
client_key:
+ encrypted_data_bag_secret:
environment:
file_backup_path:
file_cache_path:
@@ -78,8 +79,6 @@ from cloudinit import templater
from cloudinit import url_helper
from cloudinit import util
-import six
-
RUBY_VERSION_DEFAULT = "1.8"
CHEF_DIRS = tuple([
@@ -114,6 +113,7 @@ CHEF_RB_TPL_DEFAULTS = {
'file_backup_path': "/var/backups/chef",
'pid_file': "/var/run/chef/client.pid",
'show_time': True,
+ 'encrypted_data_bag_secret': None,
}
CHEF_RB_TPL_BOOL_KEYS = frozenset(['show_time'])
CHEF_RB_TPL_PATH_KEYS = frozenset([
@@ -124,6 +124,7 @@ CHEF_RB_TPL_PATH_KEYS = frozenset([
'json_attribs',
'file_cache_path',
'pid_file',
+ 'encrypted_data_bag_secret',
])
CHEF_RB_TPL_KEYS = list(CHEF_RB_TPL_DEFAULTS.keys())
CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_BOOL_KEYS)
@@ -193,7 +194,7 @@ def handle(name, cfg, cloud, log, _args):
# If there isn't a chef key in the configuration don't do anything
if 'chef' not in cfg:
log.debug(("Skipping module named %s,"
- " no 'chef' key in configuration"), name)
+ " no 'chef' key in configuration"), name)
return
chef_cfg = cfg['chef']
@@ -212,9 +213,9 @@ def handle(name, cfg, cloud, log, _args):
if vcert != "system":
util.write_file(vkey_path, vcert)
elif not os.path.isfile(vkey_path):
- log.warn("chef validation_cert provided as 'system', but "
- "validation_key path '%s' does not exist.",
- vkey_path)
+ log.warning("chef validation_cert provided as 'system', but "
+ "validation_key path '%s' does not exist.",
+ vkey_path)
# Create the chef config from template
template_fn = cloud.get_template_filename('chef_client.rb')
@@ -231,8 +232,8 @@ def handle(name, cfg, cloud, log, _args):
util.ensure_dirs(param_paths)
templater.render_to_file(template_fn, CHEF_RB_PATH, params)
else:
- log.warn("No template found, not rendering to %s",
- CHEF_RB_PATH)
+ log.warning("No template found, not rendering to %s",
+ CHEF_RB_PATH)
# Set the firstboot json
fb_filename = util.get_cfg_option_str(chef_cfg, 'firstboot_path',
@@ -270,12 +271,12 @@ def run_chef(chef_cfg, log):
cmd_args = chef_cfg['exec_arguments']
if isinstance(cmd_args, (list, tuple)):
cmd.extend(cmd_args)
- elif isinstance(cmd_args, six.string_types):
+ elif isinstance(cmd_args, str):
cmd.append(cmd_args)
else:
- log.warn("Unknown type %s provided for chef"
- " 'exec_arguments' expected list, tuple,"
- " or string", type(cmd_args))
+ log.warning("Unknown type %s provided for chef"
+ " 'exec_arguments' expected list, tuple,"
+ " or string", type(cmd_args))
cmd.extend(CHEF_EXEC_DEF_ARGS)
else:
cmd.extend(CHEF_EXEC_DEF_ARGS)
@@ -331,7 +332,7 @@ def install_chef(cloud, chef_cfg, log):
retries=util.get_cfg_option_int(chef_cfg, "omnibus_url_retries"),
omnibus_version=omnibus_version)
else:
- log.warn("Unknown chef install type '%s'", install_type)
+ log.warning("Unknown chef install type '%s'", install_type)
run = False
return run
diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py
index 0a039eb3..4d5a6aa2 100644
--- a/cloudinit/config/cc_debug.py
+++ b/cloudinit/config/cc_debug.py
@@ -28,11 +28,11 @@ location that this cloud-init has been configured with when running.
"""
import copy
-
-from six import StringIO
+from io import StringIO
from cloudinit import type_utils
from cloudinit import util
+from cloudinit import safeyaml
SKIP_KEYS = frozenset(['log_cfgs'])
@@ -49,7 +49,7 @@ def _make_header(text):
def _dumps(obj):
- text = util.yaml_dumps(obj, explicit_start=False, explicit_end=False)
+ text = safeyaml.dumps(obj, explicit_start=False, explicit_end=False)
return text.rstrip()
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index 29e192e8..0796cb7b 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -825,6 +825,7 @@ def lookup_force_flag(fs):
'btrfs': '-f',
'xfs': '-f',
'reiserfs': '-f',
+ 'swap': '-f',
}
if 'ext' in fs.lower():
@@ -982,7 +983,9 @@ def mkfs(fs_cfg):
# File systems that support the -F flag
if overwrite or device_type(device) == "disk":
- fs_cmd.append(lookup_force_flag(fs_type))
+ force_flag = lookup_force_flag(fs_type)
+ if force_flag:
+ fs_cmd.append(force_flag)
# Add the extends FS options
if fs_opts:
diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
index eb9fbe66..b342e04d 100644
--- a/cloudinit/config/cc_emit_upstart.py
+++ b/cloudinit/config/cc_emit_upstart.py
@@ -69,6 +69,6 @@ def handle(name, _cfg, cloud, log, args):
util.subp(cmd)
except Exception as e:
# TODO(harlowja), use log exception from utils??
- log.warn("Emission of upstart event %s failed due to: %s", n, e)
+ log.warning("Emission of upstart event %s failed due to: %s", n, e)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py
index c61f03d4..fd141541 100644
--- a/cloudinit/config/cc_final_message.py
+++ b/cloudinit/config/cc_final_message.py
@@ -83,6 +83,6 @@ def handle(_name, cfg, cloud, log, args):
util.logexc(log, "Failed to write boot finished file %s", boot_fin_fn)
if cloud.datasource.is_disconnected:
- log.warn("Used fallback datasource")
+ log.warning("Used fallback datasource")
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index bafca9d8..1b512a06 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -22,11 +22,11 @@ mountpoint in the filesystem or a path to the block device in ``/dev``.
The utility to use for resizing can be selected using the ``mode`` config key.
If ``mode`` key is set to ``auto``, then any available utility (either
-``growpart`` or ``gpart``) will be used. If neither utility is available, no
-error will be raised. If ``mode`` is set to ``growpart``, then the ``growpart``
-utility will be used. If this utility is not available on the system, this will
-result in an error. If ``mode`` is set to ``off`` or ``false``, then
-``cc_growpart`` will take no action.
+``growpart`` or BSD ``gpart``) will be used. If neither utility is available,
+no error will be raised. If ``mode`` is set to ``growpart``, then the
+``growpart`` utility will be used. If this utility is not available on the
+system, this will result in an error. If ``mode`` is set to ``off`` or
+``false``, then ``cc_growpart`` will take no action.
There is some functionality overlap between this module and the ``growroot``
functionality of ``cloud-initramfs-tools``. However, there are some situations
@@ -132,7 +132,7 @@ class ResizeGrowPart(object):
try:
(out, _err) = util.subp(["growpart", "--help"], env=myenv)
- if re.search(r"--update\s+", out, re.DOTALL):
+ if re.search(r"--update\s+", out):
return True
except util.ProcessExecutionError:
@@ -161,9 +161,17 @@ class ResizeGrowPart(object):
class ResizeGpart(object):
def available(self):
- if not util.which('gpart'):
- return False
- return True
+ myenv = os.environ.copy()
+ myenv['LANG'] = 'C'
+
+ try:
+ (_out, err) = util.subp(["gpart", "help"], env=myenv, rcs=[0, 1])
+ if re.search(r"gpart recover ", err):
+ return True
+
+ except util.ProcessExecutionError:
+ pass
+ return False
def resize(self, diskdev, partnum, partdev):
"""
@@ -215,7 +223,8 @@ def device_part_info(devpath):
# FreeBSD doesn't know of sysfs so just get everything we need from
# the device, like /dev/vtbd0p2.
if util.is_FreeBSD():
- m = re.search('^(/dev/.+)p([0-9])$', devpath)
+ freebsd_part = "/dev/" + util.find_freebsd_part(devpath)
+ m = re.search('^(/dev/.+)p([0-9])$', freebsd_part)
return (m.group(1), m.group(2))
if not os.path.exists(syspath):
@@ -320,7 +329,7 @@ def handle(_name, cfg, _cloud, log, _args):
mycfg = cfg.get('growpart')
if not isinstance(mycfg, dict):
- log.warn("'growpart' in config was not a dict")
+ log.warning("'growpart' in config was not a dict")
return
mode = mycfg.get('mode', "auto")
diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py
index aff4010e..3d2ded3d 100644
--- a/cloudinit/config/cc_keys_to_console.py
+++ b/cloudinit/config/cc_keys_to_console.py
@@ -9,10 +9,10 @@
"""
Keys to Console
---------------
-**Summary:** control which ssh keys may be written to console
+**Summary:** control which SSH keys may be written to console
-For security reasons it may be desirable not to write ssh fingerprints and keys
-to the console. To avoid the fingerprint of types of ssh keys being written to
+For security reasons it may be desirable not to write SSH fingerprints and keys
+to the console. To avoid the fingerprint of types of SSH keys being written to
console the ``ssh_fp_console_blacklist`` config key can be used. By default all
types of keys will have their fingerprints written to console. To avoid keys
of a key type being written to console the ``ssh_key_console_blacklist`` config
@@ -52,8 +52,8 @@ def _get_helper_tool_path(distro):
def handle(name, cfg, cloud, log, _args):
helper_path = _get_helper_tool_path(cloud.distro)
if not os.path.exists(helper_path):
- log.warn(("Unable to activate module %s,"
- " helper tool not found at %s"), name, helper_path)
+ log.warning(("Unable to activate module %s,"
+ " helper tool not found at %s"), name, helper_path)
return
fp_blacklist = util.get_cfg_option_list(cfg,
@@ -68,7 +68,7 @@ def handle(name, cfg, cloud, log, _args):
util.multi_log("%s\n" % (stdout.strip()),
stderr=False, console=True)
except Exception:
- log.warn("Writing keys to the system console failed!")
+ log.warning("Writing keys to the system console failed!")
raise
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index eaf1e940..a9c04d86 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -56,8 +56,7 @@ The following default client config is provided, but can be overridden::
"""
import os
-
-from six import BytesIO
+from io import BytesIO
from configobj import ConfigObj
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index 24a8ebea..151a9844 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -66,21 +66,21 @@ def handle(name, cfg, cloud, log, args):
name)
return
if not isinstance(lxd_cfg, dict):
- log.warn("lxd config must be a dictionary. found a '%s'",
- type(lxd_cfg))
+ log.warning("lxd config must be a dictionary. found a '%s'",
+ type(lxd_cfg))
return
# Grab the configuration
init_cfg = lxd_cfg.get('init')
if not isinstance(init_cfg, dict):
- log.warn("lxd/init config must be a dictionary. found a '%s'",
- type(init_cfg))
+ log.warning("lxd/init config must be a dictionary. found a '%s'",
+ type(init_cfg))
init_cfg = {}
bridge_cfg = lxd_cfg.get('bridge', {})
if not isinstance(bridge_cfg, dict):
- log.warn("lxd/bridge config must be a dictionary. found a '%s'",
- type(bridge_cfg))
+ log.warning("lxd/bridge config must be a dictionary. found a '%s'",
+ type(bridge_cfg))
bridge_cfg = {}
# Install the needed packages
@@ -89,13 +89,13 @@ def handle(name, cfg, cloud, log, args):
packages.append('lxd')
if init_cfg.get("storage_backend") == "zfs" and not util.which('zfs'):
- packages.append('zfs')
+ packages.append('zfsutils-linux')
if len(packages):
try:
cloud.distro.install_packages(packages)
except util.ProcessExecutionError as exc:
- log.warn("failed to install packages %s: %s", packages, exc)
+ log.warning("failed to install packages %s: %s", packages, exc)
return
# Set up lxd if init config is given
@@ -152,7 +152,7 @@ def handle(name, cfg, cloud, log, args):
if cmd_attach:
log.debug("Setting up default lxd bridge: %s" %
- " ".join(cmd_create))
+ " ".join(cmd_attach))
_lxc(cmd_attach)
elif bridge_cfg:
@@ -301,5 +301,4 @@ def maybe_cleanup_default(net_name, did_init, create, attach,
raise e
LOG.debug(msg, nic_name, profile, fail_assume_enoent)
-
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py
index d5f63f5f..351183f1 100644
--- a/cloudinit/config/cc_mcollective.py
+++ b/cloudinit/config/cc_mcollective.py
@@ -49,9 +49,7 @@ private certificates for mcollective. Their values will be written to
"""
import errno
-
-import six
-from six import BytesIO
+import io
# Used since this can maintain comments
# and doesn't need a top level section
@@ -73,7 +71,7 @@ def configure(config, server_cfg=SERVER_CFG,
# original file in order to be able to mix the rest up.
try:
old_contents = util.load_file(server_cfg, quiet=False, decode=False)
- mcollective_config = ConfigObj(BytesIO(old_contents))
+ mcollective_config = ConfigObj(io.BytesIO(old_contents))
except IOError as e:
if e.errno != errno.ENOENT:
raise
@@ -93,7 +91,7 @@ def configure(config, server_cfg=SERVER_CFG,
'plugin.ssl_server_private'] = pricert_file
mcollective_config['securityprovider'] = 'ssl'
else:
- if isinstance(cfg, six.string_types):
+ if isinstance(cfg, str):
# Just set it in the 'main' section
mcollective_config[cfg_name] = cfg
elif isinstance(cfg, (dict)):
@@ -119,7 +117,7 @@ def configure(config, server_cfg=SERVER_CFG,
raise
# Now we got the whole (new) file, write to disk...
- contents = BytesIO()
+ contents = io.BytesIO()
mcollective_config.write(contents)
util.write_file(server_cfg, contents.getvalue(), mode=0o644)
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index 339baba9..4ae3f1fc 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -25,7 +25,7 @@ mountpoint (i.e. ``[ sda1 ]`` or ``[ sda1, null ]``).
The ``mount_default_fields`` config key allows default options to be specified
for the values in a ``mounts`` entry that are not specified, aside from the
-``fs_spec`` and the ``fs_file``. If specified, this must be a list containing 7
+``fs_spec`` and the ``fs_file``. If specified, this must be a list containing 6
values. It defaults to::
mount_default_fields: [none, none, "auto", "defaults,nobootwait", "0", "2"]
@@ -223,13 +223,58 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
return size
+def create_swapfile(fname, size):
+ """Size is in MiB."""
+
+ errmsg = "Failed to create swapfile '%s' of size %dMB via %s: %s"
+
+ def create_swap(fname, size, method):
+ LOG.debug("Creating swapfile in '%s' on fstype '%s' using '%s'",
+ fname, fstype, method)
+
+ if method == "fallocate":
+ cmd = ['fallocate', '-l', '%dM' % size, fname]
+ elif method == "dd":
+ cmd = ['dd', 'if=/dev/zero', 'of=%s' % fname, 'bs=1M',
+ 'count=%d' % size]
+
+ try:
+ util.subp(cmd, capture=True)
+ except util.ProcessExecutionError as e:
+ LOG.warning(errmsg, fname, size, method, e)
+ util.del_file(fname)
+
+ swap_dir = os.path.dirname(fname)
+ util.ensure_dir(swap_dir)
+
+ fstype = util.get_mount_info(swap_dir)[1]
+
+ if fstype in ("xfs", "btrfs"):
+ create_swap(fname, size, "dd")
+ else:
+ try:
+ create_swap(fname, size, "fallocate")
+ except util.ProcessExecutionError as e:
+ LOG.warning(errmsg, fname, size, "dd", e)
+ LOG.warning("Will attempt with dd.")
+ create_swap(fname, size, "dd")
+
+ util.chmod(fname, 0o600)
+ try:
+ util.subp(['mkswap', fname])
+ except util.ProcessExecutionError:
+ util.del_file(fname)
+ raise
+
+
def setup_swapfile(fname, size=None, maxsize=None):
"""
fname: full path string of filename to setup
size: the size to create. set to "auto" for recommended
maxsize: the maximum size
"""
- tdir = os.path.dirname(fname)
+ swap_dir = os.path.dirname(fname)
+ mibsize = str(int(size / (2 ** 20)))
if str(size).lower() == "auto":
try:
memsize = util.read_meminfo()['total']
@@ -237,28 +282,16 @@ def setup_swapfile(fname, size=None, maxsize=None):
LOG.debug("Not creating swap: failed to read meminfo")
return
- util.ensure_dir(tdir)
- size = suggested_swapsize(fsys=tdir, maxsize=maxsize,
+ util.ensure_dir(swap_dir)
+ size = suggested_swapsize(fsys=swap_dir, maxsize=maxsize,
memsize=memsize)
if not size:
LOG.debug("Not creating swap: suggested size was 0")
return
- mbsize = str(int(size / (2 ** 20)))
- msg = "creating swap file '%s' of %sMB" % (fname, mbsize)
- try:
- util.ensure_dir(tdir)
- util.log_time(LOG.debug, msg, func=util.subp,
- args=[['sh', '-c',
- ('rm -f "$1" && umask 0066 && '
- '{ fallocate -l "${2}M" "$1" || '
- ' dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && '
- 'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'),
- 'setup_swap', fname, mbsize]])
-
- except Exception as e:
- raise IOError("Failed %s: %s" % (msg, e))
+ util.log_time(LOG.debug, msg="Setting up swap file", func=create_swapfile,
+ args=[fname, mibsize])
return fname
@@ -347,8 +380,8 @@ def handle(_name, cfg, cloud, log, _args):
for i in range(len(cfgmnt)):
# skip something that wasn't a list
if not isinstance(cfgmnt[i], list):
- log.warn("Mount option %s not a list, got a %s instead",
- (i + 1), type_utils.obj_name(cfgmnt[i]))
+ log.warning("Mount option %s not a list, got a %s instead",
+ (i + 1), type_utils.obj_name(cfgmnt[i]))
continue
start = str(cfgmnt[i][0])
@@ -439,6 +472,7 @@ def handle(_name, cfg, cloud, log, _args):
cc_lines = []
needswap = False
+ need_mount_all = False
dirs = []
for line in actlist:
# write 'comment' in the fs_mntops, entry, claiming this
@@ -449,11 +483,18 @@ def handle(_name, cfg, cloud, log, _args):
dirs.append(line[1])
cc_lines.append('\t'.join(line))
+ mount_points = [v['mountpoint'] for k, v in util.mounts().items()
+ if 'mountpoint' in v]
for d in dirs:
try:
util.ensure_dir(d)
except Exception:
util.logexc(log, "Failed to make '%s' config-mount", d)
+ # dirs is list of directories on which a volume should be mounted.
+ # If any of them does not already show up in the list of current
+ # mount points, we will definitely need to do mount -a.
+ if not need_mount_all and d not in mount_points:
+ need_mount_all = True
sadds = [WS.sub(" ", n) for n in cc_lines]
sdrops = [WS.sub(" ", n) for n in fstab_removed]
@@ -473,6 +514,9 @@ def handle(_name, cfg, cloud, log, _args):
log.debug("No changes to /etc/fstab made.")
else:
log.debug("Changes to fstab: %s", sops)
+ need_mount_all = True
+
+ if need_mount_all:
activate_cmds.append(["mount", "-a"])
if uses_systemd:
activate_cmds.append(["systemctl", "daemon-reload"])
@@ -484,7 +528,7 @@ def handle(_name, cfg, cloud, log, _args):
util.subp(cmd)
log.debug(fmt, "PASS")
except util.ProcessExecutionError:
- log.warn(fmt, "FAIL")
+ log.warning(fmt, "FAIL")
util.logexc(log, fmt, "FAIL")
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index 9e074bda..5498bbaa 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -6,19 +6,17 @@
"""NTP: enable and configure ntp"""
-from cloudinit.config.schema import (
- get_schema_doc, validate_cloudconfig_schema)
+import copy
+import os
+from textwrap import dedent
+
from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
from cloudinit import temp_utils
from cloudinit import templater
from cloudinit import type_utils
from cloudinit import util
-
-import copy
-import os
-import six
-from textwrap import dedent
+from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema
+from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
@@ -460,7 +458,7 @@ def supplemental_schema_validation(ntp_config):
for key, value in sorted(ntp_config.items()):
keypath = 'ntp:config:' + key
if key == 'confpath':
- if not all([value, isinstance(value, six.string_types)]):
+ if not all([value, isinstance(value, str)]):
errors.append(
'Expected a config file path {keypath}.'
' Found ({value})'.format(keypath=keypath, value=value))
@@ -472,11 +470,11 @@ def supplemental_schema_validation(ntp_config):
elif key in ('template', 'template_name'):
if value is None: # Either template or template_name can be none
continue
- if not isinstance(value, six.string_types):
+ if not isinstance(value, str):
errors.append(
'Expected a string type for {keypath}.'
' Found ({value})'.format(keypath=keypath, value=value))
- elif not isinstance(value, six.string_types):
+ elif not isinstance(value, str):
errors.append(
'Expected a string type for {keypath}.'
' Found ({value})'.format(keypath=keypath, value=value))
diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py
index 17b91011..86afffef 100644
--- a/cloudinit/config/cc_package_update_upgrade_install.py
+++ b/cloudinit/config/cc_package_update_upgrade_install.py
@@ -108,7 +108,8 @@ def handle(_name, cfg, cloud, log, _args):
reboot_fn_exists = os.path.isfile(REBOOT_FILE)
if (upgrade or pkglist) and reboot_if_required and reboot_fn_exists:
try:
- log.warn("Rebooting after upgrade or install per %s", REBOOT_FILE)
+ log.warning("Rebooting after upgrade or install per "
+ "%s", REBOOT_FILE)
# Flush the above warning + anything else out...
logging.flushLoggers(log)
_fire_reboot(log)
@@ -117,8 +118,8 @@ def handle(_name, cfg, cloud, log, _args):
errors.append(e)
if len(errors):
- log.warn("%s failed with exceptions, re-raising the last one",
- len(errors))
+ log.warning("%s failed with exceptions, re-raising the last one",
+ len(errors))
raise errors[-1]
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index 3be0d1c1..b8e27090 100644
--- a/cloudinit/config/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -79,8 +79,8 @@ def handle(name, cfg, cloud, log, args):
ph_cfg = cfg['phone_home']
if 'url' not in ph_cfg:
- log.warn(("Skipping module named %s, "
- "no 'url' found in 'phone_home' configuration"), name)
+ log.warning(("Skipping module named %s, "
+ "no 'url' found in 'phone_home' configuration"), name)
return
url = ph_cfg['url']
@@ -91,7 +91,7 @@ def handle(name, cfg, cloud, log, args):
except Exception:
tries = 10
util.logexc(log, "Configuration entry 'tries' is not an integer, "
- "using %s instead", tries)
+ "using %s instead", tries)
if post_list == "all":
post_list = POST_LIST_ALL
@@ -112,7 +112,7 @@ def handle(name, cfg, cloud, log, args):
all_keys[n] = util.load_file(path)
except Exception:
util.logexc(log, "%s: failed to open, can not phone home that "
- "data!", path)
+ "data!", path)
submit_keys = {}
for k in post_list:
@@ -120,8 +120,8 @@ def handle(name, cfg, cloud, log, args):
submit_keys[k] = all_keys[k]
else:
submit_keys[k] = None
- log.warn(("Requested key %s from 'post'"
- " configuration list not available"), k)
+ log.warning(("Requested key %s from 'post'"
+ " configuration list not available"), k)
# Get them read to be posted
real_submit_keys = {}
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index 50b37470..3e81a3c7 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -49,16 +49,15 @@ key returns 0.
condition: <true/false/command>
"""
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
import errno
import os
import re
-import six
import subprocess
import time
+from cloudinit.settings import PER_INSTANCE
+from cloudinit import util
+
frequency = PER_INSTANCE
EXIT_FAIL = 254
@@ -103,24 +102,23 @@ def check_condition(cond, log=None):
return False
else:
if log:
- log.warn(pre + "unexpected exit %s. " % ret +
- "do not apply change.")
+ log.warning(pre + "unexpected exit %s. " % ret +
+ "do not apply change.")
return False
except Exception as e:
if log:
- log.warn(pre + "Unexpected error: %s" % e)
+ log.warning(pre + "Unexpected error: %s" % e)
return False
def handle(_name, cfg, _cloud, log, _args):
-
try:
(args, timeout, condition) = load_power_state(cfg)
if args is None:
log.debug("no power_state provided. doing nothing")
return
except Exception as e:
- log.warn("%s Not performing power state change!" % str(e))
+ log.warning("%s Not performing power state change!" % str(e))
return
if condition is False:
@@ -131,7 +129,7 @@ def handle(_name, cfg, _cloud, log, _args):
cmdline = givecmdline(mypid)
if not cmdline:
- log.warn("power_state: failed to get cmdline of current process")
+ log.warning("power_state: failed to get cmdline of current process")
return
devnull_fp = open(os.devnull, "w")
@@ -184,7 +182,7 @@ def load_power_state(cfg):
pstate['timeout'])
condition = pstate.get("condition", True)
- if not isinstance(condition, six.string_types + (list, bool)):
+ if not isinstance(condition, (str, list, bool)):
raise TypeError("condition type %s invalid. must be list, bool, str")
return (args, timeout, condition)
@@ -214,7 +212,7 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args):
def fatal(msg):
if log:
- log.warn(msg)
+ log.warning(msg)
doexit(EXIT_FAIL)
known_errnos = (errno.ENOENT, errno.ESRCH)
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index 4190a20b..c01f5b8f 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -24,9 +24,10 @@ module will attempt to start puppet even if no installation was performed.
The module also provides keys for configuring the new puppet 4 paths and
installing the puppet package from the puppetlabs repositories:
https://docs.puppet.com/puppet/4.2/reference/whered_it_go.html
-The keys are ``package_name``, ``conf_file`` and ``ssl_dir``. If unset, their
-values will default to ones that work with puppet 3.x and with distributions
-that ship modified puppet 4.x that uses the old paths.
+The keys are ``package_name``, ``conf_file``, ``ssl_dir`` and
+``csr_attributes_path``. If unset, their values will default to
+ones that work with puppet 3.x and with distributions that ship modified
+puppet 4.x that uses the old paths.
Puppet configuration can be specified under the ``conf`` key. The
configuration is specified as a dictionary containing high-level ``<section>``
@@ -40,6 +41,10 @@ If ``ca_cert`` is present, it will not be written to ``puppet.conf``, but
instead will be used as the puppermaster certificate. It should be specified
in pem format as a multi-line string (using the ``|`` yaml notation).
+Additionally it's possible to create a csr_attributes.yaml for
+CSR attributes and certificate extension requests.
+See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html
+
**Internal name:** ``cc_puppet``
**Module frequency:** per instance
@@ -53,6 +58,7 @@ in pem format as a multi-line string (using the ``|`` yaml notation).
version: <version>
conf_file: '/etc/puppet/puppet.conf'
ssl_dir: '/var/lib/puppet/ssl'
+ csr_attributes_path: '/etc/puppet/csr_attributes.yaml'
package_name: 'puppet'
conf:
agent:
@@ -62,28 +68,38 @@ in pem format as a multi-line string (using the ``|`` yaml notation).
-------BEGIN CERTIFICATE-------
<cert data>
-------END CERTIFICATE-------
+ csr_attributes:
+ custom_attributes:
+ 1.2.840.113549.1.9.7: 342thbjkt82094y0uthhor289jnqthpc2290
+ extension_requests:
+ pp_uuid: ED803750-E3C7-44F5-BB08-41A04433FE2E
+ pp_image_name: my_ami_image
+ pp_preshared_key: 342thbjkt82094y0uthhor289jnqthpc2290
"""
-from six import StringIO
-
import os
import socket
+import yaml
+from io import StringIO
from cloudinit import helpers
from cloudinit import util
PUPPET_CONF_PATH = '/etc/puppet/puppet.conf'
PUPPET_SSL_DIR = '/var/lib/puppet/ssl'
+PUPPET_CSR_ATTRIBUTES_PATH = '/etc/puppet/csr_attributes.yaml'
PUPPET_PACKAGE_NAME = 'puppet'
class PuppetConstants(object):
- def __init__(self, puppet_conf_file, puppet_ssl_dir, log):
+ def __init__(self, puppet_conf_file, puppet_ssl_dir,
+ csr_attributes_path, log):
self.conf_path = puppet_conf_file
self.ssl_dir = puppet_ssl_dir
self.ssl_cert_dir = os.path.join(puppet_ssl_dir, "certs")
self.ssl_cert_path = os.path.join(self.ssl_cert_dir, "ca.pem")
+ self.csr_attributes_path = csr_attributes_path
def _autostart_puppet(log):
@@ -98,8 +114,8 @@ def _autostart_puppet(log):
elif os.path.exists('/sbin/chkconfig'):
util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False)
else:
- log.warn(("Sorry we do not know how to enable"
- " puppet services on this system"))
+ log.warning(("Sorry we do not know how to enable"
+ " puppet services on this system"))
def handle(name, cfg, cloud, log, _args):
@@ -118,11 +134,13 @@ def handle(name, cfg, cloud, log, _args):
conf_file = util.get_cfg_option_str(
puppet_cfg, 'conf_file', PUPPET_CONF_PATH)
ssl_dir = util.get_cfg_option_str(puppet_cfg, 'ssl_dir', PUPPET_SSL_DIR)
+ csr_attributes_path = util.get_cfg_option_str(
+ puppet_cfg, 'csr_attributes_path', PUPPET_CSR_ATTRIBUTES_PATH)
- p_constants = PuppetConstants(conf_file, ssl_dir, log)
+ p_constants = PuppetConstants(conf_file, ssl_dir, csr_attributes_path, log)
if not install and version:
- log.warn(("Puppet install set false but version supplied,"
- " doing nothing."))
+ log.warning(("Puppet install set false but version supplied,"
+ " doing nothing."))
elif install:
log.debug(("Attempting to install puppet %s,"),
version if version else 'latest')
@@ -141,7 +159,7 @@ def handle(name, cfg, cloud, log, _args):
cleaned_lines = [i.lstrip() for i in contents.splitlines()]
cleaned_contents = '\n'.join(cleaned_lines)
# Move to puppet_config.read_file when dropping py2.7
- puppet_config.readfp( # pylint: disable=W1505
+ puppet_config.readfp( # pylint: disable=W1505
StringIO(cleaned_contents),
filename=p_constants.conf_path)
for (cfg_name, cfg) in puppet_cfg['conf'].items():
@@ -176,6 +194,11 @@ def handle(name, cfg, cloud, log, _args):
% (p_constants.conf_path))
util.write_file(p_constants.conf_path, puppet_config.stringify())
+ if 'csr_attributes' in puppet_cfg:
+ util.write_file(p_constants.csr_attributes_path,
+ yaml.dump(puppet_cfg['csr_attributes'],
+ default_flow_style=False))
+
# Set it up so it autostarts
_autostart_puppet(log)
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 076b9d5a..01dfc125 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -8,7 +8,6 @@
"""Resizefs: cloud-config module which resizes the filesystem"""
-
import errno
import getopt
import os
@@ -81,7 +80,7 @@ def _resize_xfs(mount_point, devpth):
def _resize_ufs(mount_point, devpth):
- return ('growfs', '-y', devpth)
+ return ('growfs', '-y', mount_point)
def _resize_zfs(mount_point, devpth):
@@ -101,7 +100,7 @@ def _can_skip_resize_ufs(mount_point, devpth):
"""
# dumpfs -m /
# newfs command for / (/dev/label/rootfs)
- newfs -O 2 -U -a 4 -b 32768 -d 32768 -e 4096 -f 4096 -g 16384
+ newfs -L rootf -O 2 -U -a 4 -b 32768 -d 32768 -e 4096 -f 4096 -g 16384
-h 64 -i 8192 -j -k 6408 -m 8 -o time -s 58719232 /dev/label/rootf
"""
cur_fs_sz = None
@@ -110,7 +109,7 @@ def _can_skip_resize_ufs(mount_point, devpth):
for line in dumpfs_res.splitlines():
if not line.startswith('#'):
newfs_cmd = shlex.split(line)
- opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:'
+ opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:L:'
optlist, _args = getopt.getopt(newfs_cmd[1:], opt_value)
for o, a in optlist:
if o == "-s":
@@ -183,7 +182,7 @@ def maybe_get_writable_device_path(devpath, info, log):
not container):
devpath = util.rootdev_from_cmdline(util.get_cmdline())
if devpath is None:
- log.warn("Unable to find device '/dev/root'")
+ log.warning("Unable to find device '/dev/root'")
return None
log.debug("Converted /dev/root to '%s' per kernel cmdline", devpath)
@@ -212,8 +211,8 @@ def maybe_get_writable_device_path(devpath, info, log):
log.debug("Device '%s' did not exist in container. "
"cannot resize: %s", devpath, info)
elif exc.errno == errno.ENOENT:
- log.warn("Device '%s' did not exist. cannot resize: %s",
- devpath, info)
+ log.warning("Device '%s' did not exist. cannot resize: %s",
+ devpath, info)
else:
raise exc
return None
@@ -223,8 +222,8 @@ def maybe_get_writable_device_path(devpath, info, log):
log.debug("device '%s' not a block device in container."
" cannot resize: %s" % (devpath, info))
else:
- log.warn("device '%s' not a block device. cannot resize: %s" %
- (devpath, info))
+ log.warning("device '%s' not a block device. cannot resize: %s" %
+ (devpath, info))
return None
return devpath # The writable block devpath
@@ -243,7 +242,7 @@ def handle(name, cfg, _cloud, log, args):
resize_what = "/"
result = util.get_mount_info(resize_what, log)
if not result:
- log.warn("Could not determine filesystem type of %s", resize_what)
+ log.warning("Could not determine filesystem type of %s", resize_what)
return
(devpth, fs_type, mount_point) = result
@@ -280,8 +279,8 @@ def handle(name, cfg, _cloud, log, args):
break
if not resizer:
- log.warn("Not resizing unknown filesystem type %s for %s",
- fs_type, resize_what)
+ log.warning("Not resizing unknown filesystem type %s for %s",
+ fs_type, resize_what)
return
resize_cmd = resizer(resize_what, devpth)
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index 9812562a..69f4768a 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -102,11 +102,11 @@ def handle(name, cfg, cloud, log, _args):
return
if "resolv_conf" not in cfg:
- log.warn("manage_resolv_conf True but no parameters provided!")
+ log.warning("manage_resolv_conf True but no parameters provided!")
template_fn = cloud.get_template_filename('resolv.conf')
if not template_fn:
- log.warn("No template found, not rendering /etc/resolv.conf")
+ log.warning("No template found, not rendering /etc/resolv.conf")
return
generate_resolv_conf(template_fn=template_fn, params=cfg["resolv_conf"])
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index edee01e5..28c79b83 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -249,14 +249,14 @@ class SubscriptionManager(object):
except util.ProcessExecutionError as e:
if e.stdout.rstrip() != '':
for line in e.stdout.split("\n"):
- if line is not '':
+ if line != '':
self.log_warn(line)
else:
self.log_warn("Setting the service level failed with: "
"{0}".format(e.stderr.strip()))
return False
for line in return_out.split("\n"):
- if line is not "":
+ if line != "":
self.log.debug(line)
return True
@@ -268,7 +268,7 @@ class SubscriptionManager(object):
self.log_warn("Auto-attach failed with: {0}".format(e))
return False
for line in return_out.split("\n"):
- if line is not "":
+ if line != "":
self.log.debug(line)
return True
diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py
index 4e34c7e9..a5aca038 100644
--- a/cloudinit/config/cc_rightscale_userdata.py
+++ b/cloudinit/config/cc_rightscale_userdata.py
@@ -50,13 +50,12 @@ user scripts configuration directory, to be run later by ``cc_scripts_user``.
#
import os
+from urllib.parse import parse_qs
from cloudinit.settings import PER_INSTANCE
from cloudinit import url_helper as uhelp
from cloudinit import util
-from six.moves.urllib_parse import parse_qs
-
frequency = PER_INSTANCE
MY_NAME = "cc_rightscale_userdata"
@@ -111,8 +110,8 @@ def handle(name, _cfg, cloud, log, _args):
log.debug("%s urls were skipped or failed", skipped)
if captured_excps:
- log.warn("%s failed with exceptions, re-raising the last one",
- len(captured_excps))
+ log.warning("%s failed with exceptions, re-raising the last one",
+ len(captured_excps))
raise captured_excps[-1]
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 27d2366c..5df0137d 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -180,7 +180,6 @@ config entries. Legacy to new mappings are as follows:
import os
import re
-import six
from cloudinit import log as logging
from cloudinit import util
@@ -203,7 +202,7 @@ LOG = logging.getLogger(__name__)
COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*')
HOST_PORT_RE = re.compile(
r'^(?P<proto>[@]{0,2})'
- r'(([[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))'
+ r'(([\[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))'
r'([:](?P<port>[0-9]+))?$')
@@ -233,9 +232,9 @@ def load_config(cfg):
fillup = (
(KEYNAME_CONFIGS, [], list),
- (KEYNAME_DIR, DEF_DIR, six.string_types),
- (KEYNAME_FILENAME, DEF_FILENAME, six.string_types),
- (KEYNAME_RELOAD, DEF_RELOAD, six.string_types + (list,)),
+ (KEYNAME_DIR, DEF_DIR, str),
+ (KEYNAME_FILENAME, DEF_FILENAME, str),
+ (KEYNAME_RELOAD, DEF_RELOAD, (str, list)),
(KEYNAME_REMOTES, DEF_REMOTES, dict))
for key, default, vtypes in fillup:
@@ -432,7 +431,7 @@ def handle(name, cfg, cloud, log, _args):
systemd=cloud.distro.uses_systemd()),
except util.ProcessExecutionError as e:
restarted = False
- log.warn("Failed to reload syslog", e)
+ log.warning("Failed to reload syslog", e)
if restarted:
# This only needs to run if we *actually* restarted
diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py
index d6a21d72..5dd8de37 100644
--- a/cloudinit/config/cc_salt_minion.py
+++ b/cloudinit/config/cc_salt_minion.py
@@ -45,7 +45,9 @@ specify them with ``pkg_name``, ``service_name`` and ``config_dir``.
import os
-from cloudinit import util
+from cloudinit import safeyaml, util
+from cloudinit.distros import rhel_util
+
# Note: see https://docs.saltstack.com/en/latest/topics/installation/
# Note: see https://docs.saltstack.com/en/latest/ref/configuration/
@@ -59,7 +61,7 @@ class SaltConstants(object):
# constants tailored for FreeBSD
if util.is_FreeBSD():
- self.pkg_name = 'py27-salt'
+ self.pkg_name = 'py36-salt'
self.srv_name = 'salt_minion'
self.conf_dir = '/usr/local/etc/salt'
# constants for any other OS
@@ -97,13 +99,13 @@ def handle(name, cfg, cloud, log, _args):
if 'conf' in s_cfg:
# Add all sections from the conf object to minion config file
minion_config = os.path.join(const.conf_dir, 'minion')
- minion_data = util.yaml_dumps(s_cfg.get('conf'))
+ minion_data = safeyaml.dumps(s_cfg.get('conf'))
util.write_file(minion_config, minion_data)
if 'grains' in s_cfg:
# add grains to /etc/salt/grains
grains_config = os.path.join(const.conf_dir, 'grains')
- grains_data = util.yaml_dumps(s_cfg.get('grains'))
+ grains_data = safeyaml.dumps(s_cfg.get('grains'))
util.write_file(grains_config, grains_data)
# ... copy the key pair if specified
@@ -123,7 +125,8 @@ def handle(name, cfg, cloud, log, _args):
# we need to have the salt minion service enabled in rc in order to be
# able to start the service. this does only apply on FreeBSD servers.
if cloud.distro.osfamily == 'freebsd':
- cloud.distro.updatercconf('salt_minion_enable', 'YES')
+ rhel_util.update_sysconfig_file(
+ '/etc/rc.conf', {'salt_minion_enable': 'YES'})
# restart salt-minion. 'service' will start even if not started. if it
# was started, it needs to be restarted for config change.
diff --git a/cloudinit/config/cc_scripts_per_boot.py b/cloudinit/config/cc_scripts_per_boot.py
index b03255c7..588e1b03 100644
--- a/cloudinit/config/cc_scripts_per_boot.py
+++ b/cloudinit/config/cc_scripts_per_boot.py
@@ -40,8 +40,8 @@ def handle(name, _cfg, cloud, log, _args):
try:
util.runparts(runparts_path)
except Exception:
- log.warn("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
+ log.warning("Failed to run module %s (%s in %s)",
+ name, SCRIPT_SUBDIR, runparts_path)
raise
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_per_instance.py b/cloudinit/config/cc_scripts_per_instance.py
index baee5cc4..75549b52 100644
--- a/cloudinit/config/cc_scripts_per_instance.py
+++ b/cloudinit/config/cc_scripts_per_instance.py
@@ -15,6 +15,9 @@ Any scripts in the ``scripts/per-instance`` directory on the datasource will
be run when a new instance is first booted. Scripts will be run in alphabetical
order. This module does not accept any config keys.
+Some cloud platforms change instance-id if a significant change was made to
+the system. As a result per-instance scripts will run again.
+
**Internal name:** ``cc_scripts_per_instance``
**Module frequency:** per instance
@@ -40,8 +43,8 @@ def handle(name, _cfg, cloud, log, _args):
try:
util.runparts(runparts_path)
except Exception:
- log.warn("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
+ log.warning("Failed to run module %s (%s in %s)",
+ name, SCRIPT_SUBDIR, runparts_path)
raise
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_per_once.py b/cloudinit/config/cc_scripts_per_once.py
index 4943e9aa..259bdfab 100644
--- a/cloudinit/config/cc_scripts_per_once.py
+++ b/cloudinit/config/cc_scripts_per_once.py
@@ -12,8 +12,9 @@ Scripts Per Once
**Summary:** run one time scripts
Any scripts in the ``scripts/per-once`` directory on the datasource will be run
-only once. Scripts will be run in alphabetical order. This module does not
-accept any config keys.
+only once. Changes to the instance will not force a re-run. The only way to
+re-run these scripts is to run the clean subcommand and reboot. Scripts will
+be run in alphabetical order. This module does not accept any config keys.
**Internal name:** ``cc_scripts_per_once``
@@ -40,8 +41,8 @@ def handle(name, _cfg, cloud, log, _args):
try:
util.runparts(runparts_path)
except Exception:
- log.warn("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
+ log.warning("Failed to run module %s (%s in %s)",
+ name, SCRIPT_SUBDIR, runparts_path)
raise
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_user.py b/cloudinit/config/cc_scripts_user.py
index 6c66481e..d940dbd6 100644
--- a/cloudinit/config/cc_scripts_user.py
+++ b/cloudinit/config/cc_scripts_user.py
@@ -44,8 +44,8 @@ def handle(name, _cfg, cloud, log, _args):
try:
util.runparts(runparts_path)
except Exception:
- log.warn("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
+ log.warning("Failed to run module %s (%s in %s)",
+ name, SCRIPT_SUBDIR, runparts_path)
raise
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py
index 0292eafb..faac9242 100644
--- a/cloudinit/config/cc_scripts_vendor.py
+++ b/cloudinit/config/cc_scripts_vendor.py
@@ -48,8 +48,8 @@ def handle(name, cfg, cloud, log, _args):
try:
util.runparts(runparts_path, exe_prefix=prefix)
except Exception:
- log.warn("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
+ log.warning("Failed to run module %s (%s in %s)",
+ name, SCRIPT_SUBDIR, runparts_path)
raise
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
index 65f6e777..b65f3ed9 100644
--- a/cloudinit/config/cc_seed_random.py
+++ b/cloudinit/config/cc_seed_random.py
@@ -61,8 +61,7 @@ used::
import base64
import os
-
-from six import BytesIO
+from io import BytesIO
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
@@ -131,7 +130,7 @@ def handle(name, cfg, cloud, log, _args):
env['RANDOM_SEED_FILE'] = seed_path
handle_random_seed_command(command=command, required=req, env=env)
except ValueError as e:
- log.warn("handling random command [%s] failed: %s", command, e)
+ log.warning("handling random command [%s] failed: %s", command, e)
raise e
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index 3d2b2da3..10d6d197 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -21,7 +21,17 @@ key, and the fqdn of the cloud wil be used. If a fqdn specified with the
the ``fqdn`` config key. If both ``fqdn`` and ``hostname`` are set, ``fqdn``
will be used.
-**Internal name:** per instance
+This module will run in the init-local stage before networking is configured
+if the hostname is set by metadata or user data on the local system.
+
+This will occur on datasources like nocloud and ovf where metadata and user
+data are available locally. This ensures that the desired hostname is applied
+before any DHCP requests are preformed on these platforms where dynamic DNS is
+based on initial hostname.
+
+**Internal name:** ``cc_set_hostname``
+
+**Module frequency:** per always
**Supported distros:** all
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index 5ef97376..4943d545 100755
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -9,27 +9,40 @@
"""
Set Passwords
-------------
-**Summary:** Set user passwords
-
-Set system passwords and enable or disable ssh password authentication.
-The ``chpasswd`` config key accepts a dictionary containing a single one of two
-keys, either ``expire`` or ``list``. If ``expire`` is specified and is set to
-``false``, then the ``password`` global config key is used as the password for
-all user accounts. If the ``expire`` key is specified and is set to ``true``
-then user passwords will be expired, preventing the default system passwords
-from being used.
-
-If the ``list`` key is provided, a list of
-``username:password`` pairs can be specified. The usernames specified
-must already exist on the system, or have been created using the
-``cc_users_groups`` module. A password can be randomly generated using
-``username:RANDOM`` or ``username:R``. A hashed password can be specified
-using ``username:$6$salt$hash``. Password ssh authentication can be
-enabled, disabled, or left to system defaults using ``ssh_pwauth``.
+**Summary:** Set user passwords and enable/disable SSH password authentication
+
+This module consumes three top-level config keys: ``ssh_pwauth``, ``chpasswd``
+and ``password``.
+
+The ``ssh_pwauth`` config key determines whether or not sshd will be configured
+to accept password authentication. True values will enable password auth,
+false values will disable password auth, and the literal string ``unchanged``
+will leave it unchanged. Setting no value will also leave the current setting
+on-disk unchanged.
+
+The ``chpasswd`` config key accepts a dictionary containing either or both of
+``expire`` and ``list``.
+
+If the ``list`` key is provided, it should contain a list of
+``username:password`` pairs. This can be either a YAML list (of strings), or a
+multi-line string with one pair per line. Each user will have the
+corresponding password set. A password can be randomly generated by specifying
+``RANDOM`` or ``R`` as a user's password. A hashed password, created by a tool
+like ``mkpasswd``, can be specified; a regex
+(``r'\\$(1|2a|2y|5|6)(\\$.+){2}'``) is used to determine if a password value
+should be treated as a hash.
.. note::
- if using ``expire: true`` then a ssh authkey should be specified or it may
- not be possible to login to the system
+ The users specified must already exist on the system. Users will have been
+ created by the ``cc_users_groups`` module at this point.
+
+By default, all users on the system will have their passwords expired (meaning
+that they will have to be reset the next time the user logs in). To disable
+this behaviour, set ``expire`` under ``chpasswd`` to a false value.
+
+If a ``list`` of user/password pairs is not specified under ``chpasswd``, then
+the value of the ``password`` config key will be used to set the default user's
+password.
**Internal name:** ``cc_set_passwords``
@@ -99,7 +112,7 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"):
elif util.is_false(pw_auth):
cfg_val = 'no'
else:
- bmsg = "Leaving ssh config '%s' unchanged." % cfg_name
+ bmsg = "Leaving SSH config '%s' unchanged." % cfg_name
if pw_auth is None or pw_auth.lower() == 'unchanged':
LOG.debug("%s ssh_pwauth=%s", bmsg, pw_auth)
else:
@@ -108,7 +121,7 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"):
updated = update_ssh_config({cfg_name: cfg_val})
if not updated:
- LOG.debug("No need to restart ssh service, %s not updated.", cfg_name)
+ LOG.debug("No need to restart SSH service, %s not updated.", cfg_name)
return
if 'systemctl' in service_cmd:
@@ -116,7 +129,7 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"):
else:
cmd = list(service_cmd) + [service_name, "restart"]
util.subp(cmd)
- LOG.debug("Restarted the ssh daemon.")
+ LOG.debug("Restarted the SSH daemon.")
def handle(_name, cfg, cloud, log, args):
@@ -151,7 +164,7 @@ def handle(_name, cfg, cloud, log, args):
if user:
plist = ["%s:%s" % (user, password)]
else:
- log.warn("No default or defined user to change password for.")
+ log.warning("No default or defined user to change password for.")
errors = []
if plist:
@@ -160,24 +173,27 @@ def handle(_name, cfg, cloud, log, args):
hashed_users = []
randlist = []
users = []
- prog = re.compile(r'\$[1,2a,2y,5,6](\$.+){2}')
+ # N.B. This regex is included in the documentation (i.e. the module
+ # docstring), so any changes to it should be reflected there.
+ prog = re.compile(r'\$(1|2a|2y|5|6)(\$.+){2}')
for line in plist:
u, p = line.split(':', 1)
if prog.match(p) is not None and ":" not in p:
- hashed_plist_in.append("%s:%s" % (u, p))
+ hashed_plist_in.append(line)
hashed_users.append(u)
else:
+ # in this else branch, we potentially change the password
+ # hence, a deviation from .append(line)
if p == "R" or p == "RANDOM":
p = rand_user_password()
randlist.append("%s:%s" % (u, p))
plist_in.append("%s:%s" % (u, p))
users.append(u)
-
ch_in = '\n'.join(plist_in) + '\n'
if users:
try:
log.debug("Changing password for %s:", users)
- util.subp(['chpasswd'], ch_in)
+ chpasswd(cloud.distro, ch_in)
except Exception as e:
errors.append(e)
util.logexc(
@@ -187,7 +203,7 @@ def handle(_name, cfg, cloud, log, args):
if hashed_users:
try:
log.debug("Setting hashed password for %s:", hashed_users)
- util.subp(['chpasswd', '-e'], hashed_ch_in)
+ chpasswd(cloud.distro, hashed_ch_in, hashed=True)
except Exception as e:
errors.append(e)
util.logexc(
@@ -203,7 +219,7 @@ def handle(_name, cfg, cloud, log, args):
expired_users = []
for u in users:
try:
- util.subp(['passwd', '--expire', u])
+ cloud.distro.expire_passwd(u)
expired_users.append(u)
except Exception as e:
errors.append(e)
@@ -220,7 +236,17 @@ def handle(_name, cfg, cloud, log, args):
raise errors[-1]
-def rand_user_password(pwlen=9):
+def rand_user_password(pwlen=20):
return util.rand_str(pwlen, select_from=PW_SET)
+
+def chpasswd(distro, plist_in, hashed=False):
+ if util.is_FreeBSD():
+ for pentry in plist_in.splitlines():
+ u, p = pentry.split(":")
+ distro.set_passwd(u, p, hashed=hashed)
+ else:
+ cmd = ['chpasswd'] + (['-e'] if hashed else [])
+ util.subp(cmd, plist_in)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_snap_config.py b/cloudinit/config/cc_snap_config.py
deleted file mode 100644
index afe297ee..00000000
--- a/cloudinit/config/cc_snap_config.py
+++ /dev/null
@@ -1,184 +0,0 @@
-# Copyright (C) 2016 Canonical Ltd.
-#
-# Author: Ryan Harper <ryan.harper@canonical.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-# RELEASE_BLOCKER: Remove this deprecated module in 18.3
-"""
-Snap Config
------------
-**Summary:** snap_config modules allows configuration of snapd.
-
-**Deprecated**: Use :ref:`snap` module instead. This module will not exist
-in cloud-init 18.3.
-
-This module uses the same ``snappy`` namespace for configuration but
-acts only only a subset of the configuration.
-
-If ``assertions`` is set and the user has included a list of assertions
-then cloud-init will collect the assertions into a single assertion file
-and invoke ``snap ack <path to file with assertions>`` which will attempt
-to load the provided assertions into the snapd assertion database.
-
-If ``email`` is set, this value is used to create an authorized user for
-contacting and installing snaps from the Ubuntu Store. This is done by
-calling ``snap create-user`` command.
-
-If ``known`` is set to True, then it is expected the user also included
-an assertion of type ``system-user``. When ``snap create-user`` is called
-cloud-init will append '--known' flag which instructs snapd to look for
-a system-user assertion with the details. If ``known`` is not set, then
-``snap create-user`` will contact the Ubuntu SSO for validating and importing
-a system-user for the instance.
-
-.. note::
- If the system is already managed, then cloud-init will not attempt to
- create a system-user.
-
-**Internal name:** ``cc_snap_config``
-
-**Module frequency:** per instance
-
-**Supported distros:** any with 'snapd' available
-
-**Config keys**::
-
- #cloud-config
- snappy:
- assertions:
- - |
- <assertion 1>
- - |
- <assertion 2>
- email: user@user.org
- known: true
-
-"""
-
-from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-frequency = PER_INSTANCE
-SNAPPY_CMD = "snap"
-ASSERTIONS_FILE = "/var/lib/cloud/instance/snapd.assertions"
-
-
-"""
-snappy:
- assertions:
- - |
- <snap assertion 1>
- - |
- <snap assertion 2>
- email: foo@foo.io
- known: true
-"""
-
-
-def add_assertions(assertions=None):
- """Import list of assertions.
-
- Import assertions by concatenating each assertion into a
- string separated by a '\n'. Write this string to a instance file and
- then invoke `snap ack /path/to/file` and check for errors.
- If snap exits 0, then all assertions are imported.
- """
- if not assertions:
- assertions = []
-
- if not isinstance(assertions, list):
- raise ValueError(
- 'assertion parameter was not a list: {assertions}'.format(
- assertions=assertions))
-
- snap_cmd = [SNAPPY_CMD, 'ack']
- combined = "\n".join(assertions)
- if len(combined) == 0:
- raise ValueError("Assertion list is empty")
-
- for asrt in assertions:
- LOG.debug('Acking: %s', asrt.split('\n')[0:2])
-
- util.write_file(ASSERTIONS_FILE, combined.encode('utf-8'))
- util.subp(snap_cmd + [ASSERTIONS_FILE], capture=True)
-
-
-def add_snap_user(cfg=None):
- """Add a snap system-user if provided with email under snappy config.
-
- - Check that system is not already managed.
- - Check that if using a system-user assertion, that it's
- imported into snapd.
-
- Returns a dictionary to be passed to Distro.create_user
- """
-
- if not cfg:
- cfg = {}
-
- if not isinstance(cfg, dict):
- raise ValueError(
- 'configuration parameter was not a dict: {cfg}'.format(cfg=cfg))
-
- snapuser = cfg.get('email', None)
- if not snapuser:
- return
-
- usercfg = {
- 'snapuser': snapuser,
- 'known': cfg.get('known', False),
- }
-
- # query if we're already registered
- out, _ = util.subp([SNAPPY_CMD, 'managed'], capture=True)
- if out.strip() == "true":
- LOG.warning('This device is already managed. '
- 'Skipping system-user creation')
- return
-
- if usercfg.get('known'):
- # Check that we imported a system-user assertion
- out, _ = util.subp([SNAPPY_CMD, 'known', 'system-user'],
- capture=True)
- if len(out) == 0:
- LOG.error('Missing "system-user" assertion. '
- 'Check "snappy" user-data assertions.')
- return
-
- return usercfg
-
-
-def handle(name, cfg, cloud, log, args):
- cfgin = cfg.get('snappy')
- if not cfgin:
- LOG.debug('No snappy config provided, skipping')
- return
-
- log.warning(
- 'DEPRECATION: snap_config module will be dropped in 18.3 release.'
- ' Use snap module instead')
- if not(util.system_is_snappy()):
- LOG.debug("%s: system not snappy", name)
- return
-
- assertions = cfgin.get('assertions', [])
- if len(assertions) > 0:
- LOG.debug('Importing user-provided snap assertions')
- add_assertions(assertions)
-
- # Create a snap user if requested.
- # Snap systems contact the store with a user's email
- # and extract information needed to create a local user.
- # A user may provide a 'system-user' assertion which includes
- # the required information. Using such an assertion to create
- # a local user requires specifying 'known: true' in the supplied
- # user-data.
- usercfg = add_snap_user(cfg=cfgin)
- if usercfg:
- cloud.distro.create_user(usercfg.get('snapuser'), **usercfg)
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
deleted file mode 100644
index 15bee2d3..00000000
--- a/cloudinit/config/cc_snappy.py
+++ /dev/null
@@ -1,321 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-# RELEASE_BLOCKER: Remove this deprecated module in 18.3
-"""
-Snappy
-------
-**Summary:** snappy modules allows configuration of snappy.
-
-**Deprecated**: Use :ref:`snap` module instead. This module will not exist
-in cloud-init 18.3.
-
-The below example config config would install ``etcd``, and then install
-``pkg2.smoser`` with a ``<config-file>`` argument where ``config-file`` has
-``config-blob`` inside it. If ``pkgname`` is installed already, then
-``snappy config pkgname <file>``
-will be called where ``file`` has ``pkgname-config-blob`` as its content.
-
-Entries in ``config`` can be namespaced or non-namespaced for a package.
-In either case, the config provided to snappy command is non-namespaced.
-The package name is provided as it appears.
-
-If ``packages_dir`` has files in it that end in ``.snap``, then they are
-installed. Given 3 files:
-
- - <packages_dir>/foo.snap
- - <packages_dir>/foo.config
- - <packages_dir>/bar.snap
-
-cloud-init will invoke:
-
- - snappy install <packages_dir>/foo.snap <packages_dir>/foo.config
- - snappy install <packages_dir>/bar.snap
-
-.. note::
- that if provided a ``config`` entry for ``ubuntu-core``, then
- cloud-init will invoke: snappy config ubuntu-core <config>
- Allowing you to configure ubuntu-core in this way.
-
-The ``ssh_enabled`` key controls the system's ssh service. The default value
-is ``auto``. Options are:
-
- - **True:** enable ssh service
- - **False:** disable ssh service
- - **auto:** enable ssh service if either ssh keys have been provided
- or user has requested password authentication (ssh_pwauth).
-
-**Internal name:** ``cc_snappy``
-
-**Module frequency:** per instance
-
-**Supported distros:** ubuntu
-
-**Config keys**::
-
- #cloud-config
- snappy:
- system_snappy: auto
- ssh_enabled: auto
- packages: [etcd, pkg2.smoser]
- config:
- pkgname:
- key2: value2
- pkg2:
- key1: value1
- packages_dir: '/writable/user-data/cloud-init/snaps'
-"""
-
-from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import temp_utils
-from cloudinit import util
-
-import glob
-import os
-
-LOG = logging.getLogger(__name__)
-
-frequency = PER_INSTANCE
-SNAPPY_CMD = "snappy"
-NAMESPACE_DELIM = '.'
-
-BUILTIN_CFG = {
- 'packages': [],
- 'packages_dir': '/writable/user-data/cloud-init/snaps',
- 'ssh_enabled': "auto",
- 'system_snappy': "auto",
- 'config': {},
-}
-
-distros = ['ubuntu']
-
-
-def parse_filename(fname):
- fname = os.path.basename(fname)
- fname_noext = fname.rpartition(".")[0]
- name = fname_noext.partition("_")[0]
- shortname = name.partition(".")[0]
- return(name, shortname, fname_noext)
-
-
-def get_fs_package_ops(fspath):
- if not fspath:
- return []
- ops = []
- for snapfile in sorted(glob.glob(os.path.sep.join([fspath, '*.snap']))):
- (name, shortname, fname_noext) = parse_filename(snapfile)
- cfg = None
- for cand in (fname_noext, name, shortname):
- fpcand = os.path.sep.join([fspath, cand]) + ".config"
- if os.path.isfile(fpcand):
- cfg = fpcand
- break
- ops.append(makeop('install', name, config=None,
- path=snapfile, cfgfile=cfg))
- return ops
-
-
-def makeop(op, name, config=None, path=None, cfgfile=None):
- return({'op': op, 'name': name, 'config': config, 'path': path,
- 'cfgfile': cfgfile})
-
-
-def get_package_config(configs, name):
- # load the package's config from the configs dict.
- # prefer full-name entry (config-example.canonical)
- # over short name entry (config-example)
- if name in configs:
- return configs[name]
- return configs.get(name.partition(NAMESPACE_DELIM)[0])
-
-
-def get_package_ops(packages, configs, installed=None, fspath=None):
- # get the install an config operations that should be done
- if installed is None:
- installed = read_installed_packages()
- short_installed = [p.partition(NAMESPACE_DELIM)[0] for p in installed]
-
- if not packages:
- packages = []
- if not configs:
- configs = {}
-
- ops = []
- ops += get_fs_package_ops(fspath)
-
- for name in packages:
- ops.append(makeop('install', name, get_package_config(configs, name)))
-
- to_install = [f['name'] for f in ops]
- short_to_install = [f['name'].partition(NAMESPACE_DELIM)[0] for f in ops]
-
- for name in configs:
- if name in to_install:
- continue
- shortname = name.partition(NAMESPACE_DELIM)[0]
- if shortname in short_to_install:
- continue
- if name in installed or shortname in short_installed:
- ops.append(makeop('config', name,
- config=get_package_config(configs, name)))
-
- # prefer config entries to filepath entries
- for op in ops:
- if op['op'] != 'install' or not op['cfgfile']:
- continue
- name = op['name']
- fromcfg = get_package_config(configs, op['name'])
- if fromcfg:
- LOG.debug("preferring configs[%(name)s] over '%(cfgfile)s'", op)
- op['cfgfile'] = None
- op['config'] = fromcfg
-
- return ops
-
-
-def render_snap_op(op, name, path=None, cfgfile=None, config=None):
- if op not in ('install', 'config'):
- raise ValueError("cannot render op '%s'" % op)
-
- shortname = name.partition(NAMESPACE_DELIM)[0]
- try:
- cfg_tmpf = None
- if config is not None:
- # input to 'snappy config packagename' must have nested data. odd.
- # config:
- # packagename:
- # config
- # Note, however, we do not touch config files on disk.
- nested_cfg = {'config': {shortname: config}}
- (fd, cfg_tmpf) = temp_utils.mkstemp()
- os.write(fd, util.yaml_dumps(nested_cfg).encode())
- os.close(fd)
- cfgfile = cfg_tmpf
-
- cmd = [SNAPPY_CMD, op]
- if op == 'install':
- if path:
- cmd.append("--allow-unauthenticated")
- cmd.append(path)
- else:
- cmd.append(name)
- if cfgfile:
- cmd.append(cfgfile)
- elif op == 'config':
- cmd += [name, cfgfile]
-
- util.subp(cmd)
-
- finally:
- if cfg_tmpf:
- os.unlink(cfg_tmpf)
-
-
-def read_installed_packages():
- ret = []
- for (name, _date, _version, dev) in read_pkg_data():
- if dev:
- ret.append(NAMESPACE_DELIM.join([name, dev]))
- else:
- ret.append(name)
- return ret
-
-
-def read_pkg_data():
- out, _err = util.subp([SNAPPY_CMD, "list"])
- pkg_data = []
- for line in out.splitlines()[1:]:
- toks = line.split(sep=None, maxsplit=3)
- if len(toks) == 3:
- (name, date, version) = toks
- dev = None
- else:
- (name, date, version, dev) = toks
- pkg_data.append((name, date, version, dev,))
- return pkg_data
-
-
-def disable_enable_ssh(enabled):
- LOG.debug("setting enablement of ssh to: %s", enabled)
- # do something here that would enable or disable
- not_to_be_run = "/etc/ssh/sshd_not_to_be_run"
- if enabled:
- util.del_file(not_to_be_run)
- # this is an indempotent operation
- util.subp(["systemctl", "start", "ssh"])
- else:
- # this is an indempotent operation
- util.subp(["systemctl", "stop", "ssh"])
- util.write_file(not_to_be_run, "cloud-init\n")
-
-
-def set_snappy_command():
- global SNAPPY_CMD
- if util.which("snappy-go"):
- SNAPPY_CMD = "snappy-go"
- elif util.which("snappy"):
- SNAPPY_CMD = "snappy"
- else:
- SNAPPY_CMD = "snap"
- LOG.debug("snappy command is '%s'", SNAPPY_CMD)
-
-
-def handle(name, cfg, cloud, log, args):
- cfgin = cfg.get('snappy')
- if not cfgin:
- cfgin = {}
- mycfg = util.mergemanydict([cfgin, BUILTIN_CFG])
-
- sys_snappy = str(mycfg.get("system_snappy", "auto"))
- if util.is_false(sys_snappy):
- LOG.debug("%s: System is not snappy. disabling", name)
- return
-
- if sys_snappy.lower() == "auto" and not(util.system_is_snappy()):
- LOG.debug("%s: 'auto' mode, and system not snappy", name)
- return
-
- log.warning(
- 'DEPRECATION: snappy module will be dropped in 18.3 release.'
- ' Use snap module instead')
-
- set_snappy_command()
-
- pkg_ops = get_package_ops(packages=mycfg['packages'],
- configs=mycfg['config'],
- fspath=mycfg['packages_dir'])
-
- fails = []
- for pkg_op in pkg_ops:
- try:
- render_snap_op(**pkg_op)
- except Exception as e:
- fails.append((pkg_op, e,))
- LOG.warning("'%s' failed for '%s': %s",
- pkg_op['op'], pkg_op['name'], e)
-
- # Default to disabling SSH
- ssh_enabled = mycfg.get('ssh_enabled', "auto")
-
- # If the user has not explicitly enabled or disabled SSH, then enable it
- # when password SSH authentication is requested or there are SSH keys
- if ssh_enabled == "auto":
- user_ssh_keys = cloud.get_public_ssh_keys() or None
- password_auth_enabled = cfg.get('ssh_pwauth', False)
- if user_ssh_keys:
- LOG.debug("Enabling SSH, ssh keys found in datasource")
- ssh_enabled = True
- elif cfg.get('ssh_authorized_keys'):
- LOG.debug("Enabling SSH, ssh keys found in config")
- elif password_auth_enabled:
- LOG.debug("Enabling SSH, password authentication requested")
- ssh_enabled = True
- elif ssh_enabled not in (True, False):
- LOG.warning("Unknown value '%s' in ssh_enabled", ssh_enabled)
-
- disable_enable_ssh(ssh_enabled)
-
- if fails:
- raise Exception("failed to install/configure snaps")
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index f8f7cb35..163cce99 100755
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -9,43 +9,23 @@
"""
SSH
---
-**Summary:** configure ssh and ssh keys
+**Summary:** configure SSH and SSH keys (host and authorized)
-This module handles most configuration for ssh and ssh keys. Many images have
-default ssh keys, which can be removed using ``ssh_deletekeys``. Since removing
-default keys is usually the desired behavior this option is enabled by default.
+This module handles most configuration for SSH and both host and authorized SSH
+keys.
-Keys can be added using the ``ssh_keys`` configuration key. The argument to
-this config key should be a dictionary entries for the public and private keys
-of each desired key type. Entries in the ``ssh_keys`` config dict should
-have keys in the format ``<key type>_private`` and ``<key type>_public``, e.g.
-``rsa_private: <key>`` and ``rsa_public: <key>``. See below for supported key
-types. Not all key types have to be specified, ones left unspecified will not
-be used. If this config option is used, then no keys will be generated.
+Authorized Keys
+^^^^^^^^^^^^^^^
-.. note::
- when specifying private keys in cloud-config, care should be taken to
- ensure that the communication between the data source and the instance is
- secure
+Authorized keys are a list of public SSH keys that are allowed to connect to a
+a user account on a system. They are stored in `.ssh/authorized_keys` in that
+account's home directory. Authorized keys for the default user defined in
+``users`` can be specified using ``ssh_authorized_keys``. Keys
+should be specified as a list of public keys.
.. note::
- to specify multiline private keys, use yaml multiline syntax
-
-If no keys are specified using ``ssh_keys``, then keys will be generated using
-``ssh-keygen``. By default one public/private pair of each supported key type
-will be generated. The key types to generate can be specified using the
-``ssh_genkeytypes`` config flag, which accepts a list of key types to use. For
-each key type for which this module has been instructed to create a keypair, if
-a key of the same type is already present on the system (i.e. if
-``ssh_deletekeys`` was false), no key will be generated.
-
-Supported key types for the ``ssh_keys`` and the ``ssh_genkeytypes`` config
-flags are:
-
- - rsa
- - dsa
- - ecdsa
- - ed25519
+ see the ``cc_set_passwords`` module documentation to enable/disable SSH
+ password authentication
Root login can be enabled/disabled using the ``disable_root`` config key. Root
login options can be manually specified with ``disable_root_opts``. If
@@ -55,13 +35,46 @@ root login is disabled, and root login opts are set to::
no-port-forwarding,no-agent-forwarding,no-X11-forwarding
-Authorized keys for the default user/first user defined in ``users`` can be
-specified using `ssh_authorized_keys``. Keys should be specified as a list of
-public keys.
+Host Keys
+^^^^^^^^^
+
+Host keys are for authenticating a specific instance. Many images have default
+host SSH keys, which can be removed using ``ssh_deletekeys``. This prevents
+re-use of a private host key from an image on multiple machines. Since
+removing default host keys is usually the desired behavior this option is
+enabled by default.
+
+Host keys can be added using the ``ssh_keys`` configuration key. The argument
+to this config key should be a dictionary entries for the public and private
+keys of each desired key type. Entries in the ``ssh_keys`` config dict should
+have keys in the format ``<key type>_private`` and ``<key type>_public``,
+e.g. ``rsa_private: <key>`` and ``rsa_public: <key>``. See below for supported
+key types. Not all key types have to be specified, ones left unspecified will
+not be used. If this config option is used, then no keys will be generated.
.. note::
- see the ``cc_set_passwords`` module documentation to enable/disable ssh
- password authentication
+ when specifying private host keys in cloud-config, care should be taken to
+ ensure that the communication between the data source and the instance is
+ secure
+
+.. note::
+ to specify multiline private host keys, use yaml multiline syntax
+
+If no host keys are specified using ``ssh_keys``, then keys will be generated
+using ``ssh-keygen``. By default one public/private pair of each supported
+host key type will be generated. The key types to generate can be specified
+using the ``ssh_genkeytypes`` config flag, which accepts a list of host key
+types to use. For each host key type for which this module has been instructed
+to create a keypair, if a key of the same type is already present on the
+system (i.e. if ``ssh_deletekeys`` was false), no key will be generated.
+
+Supported host key types for the ``ssh_keys`` and the ``ssh_genkeytypes``
+config flags are:
+
+ - rsa
+ - dsa
+ - ecdsa
+ - ed25519
**Internal name:** ``cc_ssh``
@@ -91,6 +104,10 @@ public keys.
ssh_authorized_keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUU ...
- ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZ ...
+ allow_public_ssh_keys: <true/false>
+ ssh_publish_hostkeys:
+ enabled: <true/false> (Defaults to true)
+ blacklist: <list of key types> (Defaults to [dsa])
"""
import glob
@@ -104,6 +121,10 @@ from cloudinit import util
GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519']
KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key'
+PUBLISH_HOST_KEYS = True
+# Don't publish the dsa hostkey by default since OpenSSH recommends not using
+# it.
+HOST_KEY_PUBLISH_BLACKLIST = ['dsa']
CONFIG_KEY_TO_FILE = {}
PRIV_TO_PUB = {}
@@ -176,6 +197,23 @@ def handle(_name, cfg, cloud, log, _args):
util.logexc(log, "Failed generating key type %s to "
"file %s", keytype, keyfile)
+ if "ssh_publish_hostkeys" in cfg:
+ host_key_blacklist = util.get_cfg_option_list(
+ cfg["ssh_publish_hostkeys"], "blacklist",
+ HOST_KEY_PUBLISH_BLACKLIST)
+ publish_hostkeys = util.get_cfg_option_bool(
+ cfg["ssh_publish_hostkeys"], "enabled", PUBLISH_HOST_KEYS)
+ else:
+ host_key_blacklist = HOST_KEY_PUBLISH_BLACKLIST
+ publish_hostkeys = PUBLISH_HOST_KEYS
+
+ if publish_hostkeys:
+ hostkeys = get_public_host_keys(blacklist=host_key_blacklist)
+ try:
+ cloud.datasource.publish_host_keys(hostkeys)
+ except Exception:
+ util.logexc(log, "Publishing host keys failed!")
+
try:
(users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
(user, _user_config) = ug_util.extract_default(users)
@@ -183,14 +221,20 @@ def handle(_name, cfg, cloud, log, _args):
disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
ssh_util.DISABLE_USER_OPTS)
- keys = cloud.get_public_ssh_keys() or []
+ keys = []
+ if util.get_cfg_option_bool(cfg, 'allow_public_ssh_keys', True):
+ keys = cloud.get_public_ssh_keys() or []
+ else:
+ log.debug('Skipping import of publish SSH keys per '
+ 'config setting: allow_public_ssh_keys=False')
+
if "ssh_authorized_keys" in cfg:
cfgkeys = cfg["ssh_authorized_keys"]
keys.extend(cfgkeys)
apply_credentials(keys, user, disable_root, disable_root_opts)
except Exception:
- util.logexc(log, "Applying ssh credentials failed!")
+ util.logexc(log, "Applying SSH credentials failed!")
def apply_credentials(keys, user, disable_root, disable_root_opts):
@@ -209,4 +253,35 @@ def apply_credentials(keys, user, disable_root, disable_root_opts):
ssh_util.setup_user_keys(keys, 'root', options=key_prefix)
+
+def get_public_host_keys(blacklist=None):
+ """Read host keys from /etc/ssh/*.pub files and return them as a list.
+
+ @param blacklist: List of key types to ignore. e.g. ['dsa', 'rsa']
+ @returns: List of keys, each formatted as a two-element tuple.
+ e.g. [('ssh-rsa', 'AAAAB3Nz...'), ('ssh-ed25519', 'AAAAC3Nx...')]
+ """
+ public_key_file_tmpl = '%s.pub' % (KEY_FILE_TPL,)
+ key_list = []
+ blacklist_files = []
+ if blacklist:
+ # Convert blacklist to filenames:
+ # 'dsa' -> '/etc/ssh/ssh_host_dsa_key.pub'
+ blacklist_files = [public_key_file_tmpl % (key_type,)
+ for key_type in blacklist]
+ # Get list of public key files and filter out blacklisted files.
+ file_list = [hostfile for hostfile
+ in glob.glob(public_key_file_tmpl % ('*',))
+ if hostfile not in blacklist_files]
+
+ # Read host key files, retrieve first two fields as a tuple and
+ # append that tuple to key_list.
+ for file_name in file_list:
+ file_contents = util.load_file(file_name)
+ key_data = file_contents.split()
+ if key_data and len(key_data) > 1:
+ key_list.append(tuple(key_data[:2]))
+ return key_list
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
index 98b0e665..7ac1c8cf 100755
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -7,11 +7,11 @@
"""
SSH Authkey Fingerprints
------------------------
-**Summary:** log fingerprints of user ssh keys
+**Summary:** log fingerprints of user SSH keys
Write fingerprints of authorized keys for each user to log. This is enabled by
default, but can be disabled using ``no_ssh_fingerprints``. The hash type for
-the keys can be specified, but defaults to ``md5``.
+the keys can be specified, but defaults to ``sha256``.
**Internal name:** `` cc_ssh_authkey_fingerprints``
@@ -42,7 +42,7 @@ def _split_hash(bin_hash):
return split_up
-def _gen_fingerprint(b64_text, hash_meth='md5'):
+def _gen_fingerprint(b64_text, hash_meth='sha256'):
if not b64_text:
return ''
# TBD(harlowja): Maybe we should feed this into 'ssh -lf'?
@@ -65,10 +65,10 @@ def _is_printable_key(entry):
return False
-def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5',
+def _pprint_key_entries(user, key_fn, key_entries, hash_meth='sha256',
prefix='ci-info: '):
if not key_entries:
- message = ("%sno authorized ssh keys fingerprints found for user %s.\n"
+ message = ("%sno authorized SSH keys fingerprints found for user %s.\n"
% (prefix, user))
util.multi_log(message)
return
@@ -98,10 +98,10 @@ def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5',
def handle(name, cfg, cloud, log, _args):
if util.is_true(cfg.get('no_ssh_fingerprints', False)):
log.debug(("Skipping module named %s, "
- "logging of ssh fingerprints disabled"), name)
+ "logging of SSH fingerprints disabled"), name)
return
- hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5")
+ hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "sha256")
(users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
for (user_name, _cfg) in users.items():
(key_fn, key_entries) = ssh_util.extract_authorized_keys(user_name)
diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
index 6b46dafe..63f87298 100755
--- a/cloudinit/config/cc_ssh_import_id.py
+++ b/cloudinit/config/cc_ssh_import_id.py
@@ -9,9 +9,9 @@
"""
SSH Import Id
-------------
-**Summary:** import ssh id
+**Summary:** import SSH id
-This module imports ssh keys from either a public keyserver, usually launchpad
+This module imports SSH keys from either a public keyserver, usually launchpad
or github using ``ssh-import-id``. Keys are referenced by the username they are
associated with on the keyserver. The keyserver can be specified by prepending
either ``lp:`` for launchpad or ``gh:`` for github to the username.
@@ -98,12 +98,12 @@ def import_ssh_ids(ids, user, log):
raise exc
cmd = ["sudo", "-Hu", user, "ssh-import-id"] + ids
- log.debug("Importing ssh ids for user %s.", user)
+ log.debug("Importing SSH ids for user %s.", user)
try:
util.subp(cmd, capture=False)
except util.ProcessExecutionError as exc:
- util.logexc(log, "Failed to run command to import %s ssh ids", user)
+ util.logexc(log, "Failed to run command to import %s SSH ids", user)
raise exc
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py
index 5e082bd6..8b6d2a1a 100644
--- a/cloudinit/config/cc_ubuntu_advantage.py
+++ b/cloudinit/config/cc_ubuntu_advantage.py
@@ -1,150 +1,141 @@
-# Copyright (C) 2018 Canonical Ltd.
-#
# This file is part of cloud-init. See LICENSE file for license information.
-"""Ubuntu advantage: manage ubuntu-advantage offerings from Canonical."""
+"""ubuntu_advantage: Configure Ubuntu Advantage support services"""
-import sys
from textwrap import dedent
-from cloudinit import log as logging
from cloudinit.config.schema import (
get_schema_doc, validate_cloudconfig_schema)
+from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
-from cloudinit.subp import prepend_base_command
from cloudinit import util
-distros = ['ubuntu']
-frequency = PER_INSTANCE
+UA_URL = 'https://ubuntu.com/advantage'
-LOG = logging.getLogger(__name__)
+distros = ['ubuntu']
schema = {
'id': 'cc_ubuntu_advantage',
'name': 'Ubuntu Advantage',
- 'title': 'Install, configure and manage ubuntu-advantage offerings',
+ 'title': 'Configure Ubuntu Advantage support services',
'description': dedent("""\
- This module provides configuration options to setup ubuntu-advantage
- subscriptions.
-
- .. note::
- Both ``commands`` value can be either a dictionary or a list. If
- the configuration provided is a dictionary, the keys are only used
- to order the execution of the commands and the dictionary is
- merged with any vendor-data ubuntu-advantage configuration
- provided. If a ``commands`` is provided as a list, any vendor-data
- ubuntu-advantage ``commands`` are ignored.
-
- Ubuntu-advantage ``commands`` is a dictionary or list of
- ubuntu-advantage commands to run on the deployed machine.
- These commands can be used to enable or disable subscriptions to
- various ubuntu-advantage products. See 'man ubuntu-advantage' for more
- information on supported subcommands.
-
- .. note::
- Each command item can be a string or list. If the item is a list,
- 'ubuntu-advantage' can be omitted and it will automatically be
- inserted as part of the command.
+ Attach machine to an existing Ubuntu Advantage support contract and
+ enable or disable support services such as Livepatch, ESM,
+ FIPS and FIPS Updates. When attaching a machine to Ubuntu Advantage,
+ one can also specify services to enable. When the 'enable'
+ list is present, any named service will be enabled and all absent
+ services will remain disabled.
+
+ Note that when enabling FIPS or FIPS updates you will need to schedule
+ a reboot to ensure the machine is running the FIPS-compliant kernel.
+ See :ref:`Power State Change` for information on how to configure
+ cloud-init to perform this reboot.
"""),
'distros': distros,
'examples': [dedent("""\
- # Enable Extended Security Maintenance using your service auth token
- ubuntu-advantage:
- commands:
- 00: ubuntu-advantage enable-esm <token>
- """), dedent("""\
- # Enable livepatch by providing your livepatch token
+ # Attach the machine to an Ubuntu Advantage support contract with a
+ # UA contract token obtained from %s.
+ ubuntu_advantage:
+ token: <ua_contract_token>
+ """ % UA_URL), dedent("""\
+ # Attach the machine to an Ubuntu Advantage support contract enabling
+ # only fips and esm services. Services will only be enabled if
+ # the environment supports said service. Otherwise warnings will
+ # be logged for incompatible services specified.
ubuntu-advantage:
- commands:
- 00: ubuntu-advantage enable-livepatch <livepatch-token>
-
+ token: <ua_contract_token>
+ enable:
+ - fips
+ - esm
"""), dedent("""\
- # Convenience: the ubuntu-advantage command can be omitted when
- # specifying commands as a list and 'ubuntu-advantage' will
- # automatically be prepended.
- # The following commands are equivalent
+ # Attach the machine to an Ubuntu Advantage support contract and enable
+ # the FIPS service. Perform a reboot once cloud-init has
+ # completed.
+ power_state:
+ mode: reboot
ubuntu-advantage:
- commands:
- 00: ['enable-livepatch', 'my-token']
- 01: ['ubuntu-advantage', 'enable-livepatch', 'my-token']
- 02: ubuntu-advantage enable-livepatch my-token
- 03: 'ubuntu-advantage enable-livepatch my-token'
- """)],
+ token: <ua_contract_token>
+ enable:
+ - fips
+ """)],
'frequency': PER_INSTANCE,
'type': 'object',
'properties': {
- 'ubuntu-advantage': {
+ 'ubuntu_advantage': {
'type': 'object',
'properties': {
- 'commands': {
- 'type': ['object', 'array'], # Array of strings or dict
- 'items': {
- 'oneOf': [
- {'type': 'array', 'items': {'type': 'string'}},
- {'type': 'string'}]
- },
- 'additionalItems': False, # Reject non-string & non-list
- 'minItems': 1,
- 'minProperties': 1,
+ 'enable': {
+ 'type': 'array',
+ 'items': {'type': 'string'},
+ },
+ 'token': {
+ 'type': 'string',
+ 'description': (
+ 'A contract token obtained from %s.' % UA_URL)
}
},
- 'additionalProperties': False, # Reject keys not in schema
- 'required': ['commands']
+ 'required': ['token'],
+ 'additionalProperties': False
}
}
}
-# TODO schema for 'assertions' and 'commands' are too permissive at the moment.
-# Once python-jsonschema supports schema draft 6 add support for arbitrary
-# object keys with 'patternProperties' constraint to validate string values.
-
__doc__ = get_schema_doc(schema) # Supplement python help()
-UA_CMD = "ubuntu-advantage"
-
-
-def run_commands(commands):
- """Run the commands provided in ubuntu-advantage:commands config.
+LOG = logging.getLogger(__name__)
- Commands are run individually. Any errors are collected and reported
- after attempting all commands.
- @param commands: A list or dict containing commands to run. Keys of a
- dict will be used to order the commands provided as dict values.
- """
- if not commands:
- return
- LOG.debug('Running user-provided ubuntu-advantage commands')
- if isinstance(commands, dict):
- # Sort commands based on dictionary key
- commands = [v for _, v in sorted(commands.items())]
- elif not isinstance(commands, list):
- raise TypeError(
- 'commands parameter was not a list or dict: {commands}'.format(
- commands=commands))
-
- fixed_ua_commands = prepend_base_command('ubuntu-advantage', commands)
-
- cmd_failures = []
- for command in fixed_ua_commands:
- shell = isinstance(command, str)
- try:
- util.subp(command, shell=shell, status_cb=sys.stderr.write)
- except util.ProcessExecutionError as e:
- cmd_failures.append(str(e))
- if cmd_failures:
- msg = (
- 'Failures running ubuntu-advantage commands:\n'
- '{cmd_failures}'.format(
- cmd_failures=cmd_failures))
+def configure_ua(token=None, enable=None):
+ """Call ua commandline client to attach or enable services."""
+ error = None
+ if not token:
+ error = ('ubuntu_advantage: token must be provided')
+ LOG.error(error)
+ raise RuntimeError(error)
+
+ if enable is None:
+ enable = []
+ elif isinstance(enable, str):
+ LOG.warning('ubuntu_advantage: enable should be a list, not'
+ ' a string; treating as a single enable')
+ enable = [enable]
+ elif not isinstance(enable, list):
+ LOG.warning('ubuntu_advantage: enable should be a list, not'
+ ' a %s; skipping enabling services',
+ type(enable).__name__)
+ enable = []
+
+ attach_cmd = ['ua', 'attach', token]
+ LOG.debug('Attaching to Ubuntu Advantage. %s', ' '.join(attach_cmd))
+ try:
+ util.subp(attach_cmd)
+ except util.ProcessExecutionError as e:
+ msg = 'Failure attaching Ubuntu Advantage:\n{error}'.format(
+ error=str(e))
util.logexc(LOG, msg)
raise RuntimeError(msg)
+ enable_errors = []
+ for service in enable:
+ try:
+ cmd = ['ua', 'enable', service]
+ util.subp(cmd, capture=True)
+ except util.ProcessExecutionError as e:
+ enable_errors.append((service, e))
+ if enable_errors:
+ for service, error in enable_errors:
+ msg = 'Failure enabling "{service}":\n{error}'.format(
+ service=service, error=str(error))
+ util.logexc(LOG, msg)
+ raise RuntimeError(
+ 'Failure enabling Ubuntu Advantage service(s): {}'.format(
+ ', '.join('"{}"'.format(service)
+ for service, _ in enable_errors)))
def maybe_install_ua_tools(cloud):
"""Install ubuntu-advantage-tools if not present."""
- if util.which('ubuntu-advantage'):
+ if util.which('ua'):
return
try:
cloud.distro.update_package_sources()
@@ -159,14 +150,28 @@ def maybe_install_ua_tools(cloud):
def handle(name, cfg, cloud, log, args):
- cfgin = cfg.get('ubuntu-advantage')
- if cfgin is None:
- LOG.debug(("Skipping module named %s,"
- " no 'ubuntu-advantage' key in configuration"), name)
+ ua_section = None
+ if 'ubuntu-advantage' in cfg:
+ LOG.warning('Deprecated configuration key "ubuntu-advantage" provided.'
+ ' Expected underscore delimited "ubuntu_advantage"; will'
+ ' attempt to continue.')
+ ua_section = cfg['ubuntu-advantage']
+ if 'ubuntu_advantage' in cfg:
+ ua_section = cfg['ubuntu_advantage']
+ if ua_section is None:
+ LOG.debug("Skipping module named %s,"
+ " no 'ubuntu_advantage' configuration found", name)
return
-
validate_cloudconfig_schema(cfg, schema)
+ if 'commands' in ua_section:
+ msg = (
+ 'Deprecated configuration "ubuntu-advantage: commands" provided.'
+ ' Expected "token"')
+ LOG.error(msg)
+ raise RuntimeError(msg)
+
maybe_install_ua_tools(cloud)
- run_commands(cfgin.get('commands', []))
+ configure_ua(token=ua_section.get('token'),
+ enable=ua_section.get('enable'))
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py
new file mode 100644
index 00000000..297451d6
--- /dev/null
+++ b/cloudinit/config/cc_ubuntu_drivers.py
@@ -0,0 +1,160 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Ubuntu Drivers: Interact with third party drivers in Ubuntu."""
+
+import os
+from textwrap import dedent
+
+from cloudinit.config.schema import (
+ get_schema_doc, validate_cloudconfig_schema)
+from cloudinit import log as logging
+from cloudinit.settings import PER_INSTANCE
+from cloudinit import temp_utils
+from cloudinit import type_utils
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+frequency = PER_INSTANCE
+distros = ['ubuntu']
+schema = {
+ 'id': 'cc_ubuntu_drivers',
+ 'name': 'Ubuntu Drivers',
+ 'title': 'Interact with third party drivers in Ubuntu.',
+ 'description': dedent("""\
+ This module interacts with the 'ubuntu-drivers' command to install
+ third party driver packages."""),
+ 'distros': distros,
+ 'examples': [dedent("""\
+ drivers:
+ nvidia:
+ license-accepted: true
+ """)],
+ 'frequency': frequency,
+ 'type': 'object',
+ 'properties': {
+ 'drivers': {
+ 'type': 'object',
+ 'additionalProperties': False,
+ 'properties': {
+ 'nvidia': {
+ 'type': 'object',
+ 'additionalProperties': False,
+ 'required': ['license-accepted'],
+ 'properties': {
+ 'license-accepted': {
+ 'type': 'boolean',
+ 'description': ("Do you accept the NVIDIA driver"
+ " license?"),
+ },
+ 'version': {
+ 'type': 'string',
+ 'description': (
+ 'The version of the driver to install (e.g.'
+ ' "390", "410"). Defaults to the latest'
+ ' version.'),
+ },
+ },
+ },
+ },
+ },
+ },
+}
+OLD_UBUNTU_DRIVERS_STDERR_NEEDLE = (
+ "ubuntu-drivers: error: argument <command>: invalid choice: 'install'")
+
+__doc__ = get_schema_doc(schema) # Supplement python help()
+
+
+# Use a debconf template to configure a global debconf variable
+# (linux/nvidia/latelink) setting this to "true" allows the
+# 'linux-restricted-modules' deb to accept the NVIDIA EULA and the package
+# will automatically link the drivers to the running kernel.
+
+# EOL_XENIAL: can then drop this script and use python3-debconf which is only
+# available in Bionic and later. Can't use python3-debconf currently as it
+# isn't in Xenial and doesn't yet support X_LOADTEMPLATEFILE debconf command.
+
+NVIDIA_DEBCONF_CONTENT = """\
+Template: linux/nvidia/latelink
+Type: boolean
+Default: true
+Description: Late-link NVIDIA kernel modules?
+ Enable this to link the NVIDIA kernel modules in cloud-init and
+ make them available for use.
+"""
+
+NVIDIA_DRIVER_LATELINK_DEBCONF_SCRIPT = """\
+#!/bin/sh
+# Allow cloud-init to trigger EULA acceptance via registering a debconf
+# template to set linux/nvidia/latelink true
+. /usr/share/debconf/confmodule
+db_x_loadtemplatefile "$1" cloud-init
+"""
+
+
+def install_drivers(cfg, pkg_install_func):
+ if not isinstance(cfg, dict):
+ raise TypeError(
+ "'drivers' config expected dict, found '%s': %s" %
+ (type_utils.obj_name(cfg), cfg))
+
+ cfgpath = 'nvidia/license-accepted'
+ # Call translate_bool to ensure that we treat string values like "yes" as
+ # acceptance and _don't_ treat string values like "nah" as acceptance
+ # because they're True-ish
+ nv_acc = util.translate_bool(util.get_cfg_by_path(cfg, cfgpath))
+ if not nv_acc:
+ LOG.debug("Not installing NVIDIA drivers. %s=%s", cfgpath, nv_acc)
+ return
+
+ if not util.which('ubuntu-drivers'):
+ LOG.debug("'ubuntu-drivers' command not available. "
+ "Installing ubuntu-drivers-common")
+ pkg_install_func(['ubuntu-drivers-common'])
+
+ driver_arg = 'nvidia'
+ version_cfg = util.get_cfg_by_path(cfg, 'nvidia/version')
+ if version_cfg:
+ driver_arg += ':{}'.format(version_cfg)
+
+ LOG.debug("Installing and activating NVIDIA drivers (%s=%s, version=%s)",
+ cfgpath, nv_acc, version_cfg if version_cfg else 'latest')
+
+ # Register and set debconf selection linux/nvidia/latelink = true
+ tdir = temp_utils.mkdtemp(needs_exe=True)
+ debconf_file = os.path.join(tdir, 'nvidia.template')
+ debconf_script = os.path.join(tdir, 'nvidia-debconf.sh')
+ try:
+ util.write_file(debconf_file, NVIDIA_DEBCONF_CONTENT)
+ util.write_file(
+ debconf_script,
+ util.encode_text(NVIDIA_DRIVER_LATELINK_DEBCONF_SCRIPT),
+ mode=0o755)
+ util.subp([debconf_script, debconf_file])
+ except Exception as e:
+ util.logexc(
+ LOG, "Failed to register NVIDIA debconf template: %s", str(e))
+ raise
+ finally:
+ if os.path.isdir(tdir):
+ util.del_dir(tdir)
+
+ try:
+ util.subp(['ubuntu-drivers', 'install', '--gpgpu', driver_arg])
+ except util.ProcessExecutionError as exc:
+ if OLD_UBUNTU_DRIVERS_STDERR_NEEDLE in exc.stderr:
+ LOG.warning('the available version of ubuntu-drivers is'
+ ' too old to perform requested driver installation')
+ elif 'No drivers found for installation.' in exc.stdout:
+ LOG.warning('ubuntu-drivers found no drivers for installation')
+ raise
+
+
+def handle(name, cfg, cloud, log, _args):
+ if "drivers" not in cfg:
+ log.debug("Skipping module named %s, no 'drivers' key in config", name)
+ return
+
+ validate_cloudconfig_schema(cfg, schema)
+ install_drivers(cfg['drivers'], cloud.distro.install_packages)
diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py
index c96eede1..03fffb96 100644
--- a/cloudinit/config/cc_update_etc_hosts.py
+++ b/cloudinit/config/cc_update_etc_hosts.py
@@ -62,8 +62,8 @@ def handle(name, cfg, cloud, log, _args):
if util.translate_bool(manage_hosts, addons=['template']):
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
if not hostname:
- log.warn(("Option 'manage_etc_hosts' was set,"
- " but no hostname was found"))
+ log.warning(("Option 'manage_etc_hosts' was set,"
+ " but no hostname was found"))
return
# Render from a template file
@@ -80,8 +80,8 @@ def handle(name, cfg, cloud, log, _args):
elif manage_hosts == "localhost":
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
if not hostname:
- log.warn(("Option 'manage_etc_hosts' was set,"
- " but no hostname was found"))
+ log.warning(("Option 'manage_etc_hosts' was set,"
+ " but no hostname was found"))
return
log.debug("Managing localhost in /etc/hosts")
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
index c32a743a..13764e60 100644
--- a/cloudinit/config/cc_users_groups.py
+++ b/cloudinit/config/cc_users_groups.py
@@ -51,14 +51,14 @@ config keys for an entry in ``users`` are as follows:
a Snappy user through ``snap create-user``. If an Ubuntu SSO account is
associated with the address, username and SSH keys will be requested from
there. Default: none
- - ``ssh_authorized_keys``: Optional. List of ssh keys to add to user's
+ - ``ssh_authorized_keys``: Optional. List of SSH keys to add to user's
authkeys file. Default: none. This key can not be combined with
``ssh_redirect_user``.
- ``ssh_import_id``: Optional. SSH id to import for user. Default: none.
This key can not be combined with ``ssh_redirect_user``.
- ``ssh_redirect_user``: Optional. Boolean set to true to disable SSH
- logins for this user. When specified, all cloud meta-data public ssh
- keys will be set up in a disabled state for this username. Any ssh login
+ logins for this user. When specified, all cloud meta-data public SSH
+ keys will be set up in a disabled state for this username. Any SSH login
as this username will timeout and prompt with a message to login instead
as the configured <default_username> for this instance. Default: false.
This key can not be combined with ``ssh_import_id`` or
diff --git a/cloudinit/config/cc_vyos.py b/cloudinit/config/cc_vyos.py
index bd595397..e51ed7f2 100644
--- a/cloudinit/config/cc_vyos.py
+++ b/cloudinit/config/cc_vyos.py
@@ -24,17 +24,22 @@ import os
import re
import sys
import ast
-import subprocess
-from ipaddress import IPv4Network
+import ipaddress
from cloudinit import stages
from cloudinit import util
from cloudinit.distros import ug_util
from cloudinit.settings import PER_INSTANCE
+from cloudinit import handlers
+from cloudinit import log as logging
from vyos.configtree import ConfigTree
+# configure logging
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
frequency = PER_INSTANCE
class VyosError(Exception):
@@ -43,6 +48,7 @@ class VyosError(Exception):
"""
pass
+# configure user account with password
def set_pass_login(config, user, password, encrypted_pass):
if encrypted_pass:
config.set(['system', 'login', 'user', user, 'authentication', 'encrypted-password'], value=password, replace=True)
@@ -50,16 +56,15 @@ def set_pass_login(config, user, password, encrypted_pass):
config.set(['system', 'login', 'user', user, 'authentication', 'plaintext-password'], value=password, replace=True)
config.set_tag(['system', 'login', 'user'])
- config.set(['system', 'login', 'user', user, 'level'], value='admin', replace=True)
-
-def set_ssh_login(config, log, user, key_string, key_x):
+# configure user account with ssh key
+def set_ssh_login(config, user, key_string, key_x):
key_type = None
key_data = None
key_name = None
if key_string == '':
- log.debug("No keys found.")
+ logger.error("No keys found.")
return
key_parts = key_string.split(None)
@@ -72,11 +77,11 @@ def set_ssh_login(config, log, user, key_string, key_x):
key_data = key
if not key_type:
- util.logexc(log, 'Key type not defined, wrong ssh key format.')
+ logger.error("Key type not defined, wrong ssh key format.")
return
if not key_data:
- util.logexc(log, 'Key base64 not defined, wrong ssh key format.')
+ logger.error("Key base64 not defined, wrong ssh key format.")
return
if len(key_parts) > 2:
@@ -91,9 +96,9 @@ def set_ssh_login(config, log, user, key_string, key_x):
config.set(['system', 'login', 'user', user, 'authentication', 'public-keys', key_name , 'type'], value=key_type, replace=True)
config.set_tag(['system', 'login', 'user'])
config.set_tag(['system', 'login', 'user', user, 'authentication', 'public-keys'])
- config.set(['system', 'login', 'user', user, 'level'], value='admin', replace=True)
+# configure system parameters from OVF template
def set_config_ovf(config, hostname, metadata):
ip_0 = metadata['ip0']
mask_0 = metadata['netmask0']
@@ -105,7 +110,7 @@ def set_config_ovf(config, hostname, metadata):
APIDEBUG = metadata['APIDEBUG']
if ip_0 and ip_0 != 'null' and mask_0 and mask_0 != 'null' and gateway and gateway != 'null':
- cidr = str(IPv4Network('0.0.0.0/' + mask_0).prefixlen)
+ cidr = str(ipaddress.IPv4Network('0.0.0.0/' + mask_0).prefixlen)
ipcidr = ip_0 + '/' + cidr
config.set(['interfaces', 'ethernet', 'eth0', 'address'], value=ipcidr, replace=True)
@@ -148,59 +153,83 @@ def set_config_ovf(config, hostname, metadata):
config.set(['system', 'host-name'], value='vyos', replace=True)
-def set_config_interfaces(config, interface):
- for item in interface['subnets']:
- if item['type'] == 'static':
- if 'address' in item and runcommand("/usr/bin/ipaddrcheck --is-ipv4 " + item['address']) == 0:
- cidr = str(IPv4Network('0.0.0.0/' + item['netmask']).prefixlen)
- ipcidr = item['address'] + '/' + cidr
- config.set(['interfaces', 'ethernet', interface['name'], 'address'], value=ipcidr, replace=True)
- config.set_tag(['interfaces', 'ethernet'])
- if item['gateway']:
- config.set(['protocols', 'static', 'route', '0.0.0.0/0', 'next-hop'], value=item['gateway'], replace=True)
- config.set_tag(['protocols', 'static', 'route'])
- config.set_tag(['protocols', 'static', 'route', '0.0.0.0/0', 'next-hop'])
+# configure interface
+def set_config_interfaces(config, iface_name, iface_config):
+ # configure DHCP client
+ if 'dhcp4' in iface_config:
+ if iface_config['dhcp4'] == True:
+ config.set(['interfaces', 'ethernet', iface_name, 'address'], value='dhcp', replace=True)
+ config.set_tag(['interfaces', 'ethernet'])
+ if 'dhcp6' in iface_config:
+ if iface_config['dhcp6'] == True:
+ config.set(['interfaces', 'ethernet', iface_name, 'address'], value='dhcp6', replace=True)
+ config.set_tag(['interfaces', 'ethernet'])
- if 'address' in item and runcommand("/usr/bin/ipaddrcheck --is-ipv6 " + item['address']) == 0:
- config.set(['interfaces', 'ethernet', interface['name'], 'address'], value=item['address'], replace=False)
- config.set_tag(['interfaces', 'ethernet'])
- if item['gateway']:
- config.set(['protocols', 'static', 'route6', '::/0', 'next-hop'], value=item['gateway'], replace=True)
- config.set_tag(['protocols', 'static', 'route6'])
- config.set_tag(['protocols', 'static', 'route6', '::/0', 'next-hop'])
- else:
- config.set(['interfaces', 'ethernet', interface['name'], 'address'], value='dhcp', replace=True)
+ # configure static addresses
+ if 'addresses' in iface_config:
+ for item in iface_config['addresses']:
+ config.set(['interfaces', 'ethernet', iface_name, 'address'], value=item, replace=True)
config.set_tag(['interfaces', 'ethernet'])
+ # configure gateways
+ if 'gateway4' in iface_config:
+ config.set(['protocols', 'static', 'route', '0.0.0.0/0', 'next-hop'], value=item, replace=True)
+ config.set_tag(['protocols', 'static', 'route'])
+ config.set_tag(['protocols', 'static', 'route', '0.0.0.0/0', 'next-hop'])
+ if 'gateway6' in iface_config:
+ config.set(['protocols', 'static', 'route6', '::/0', 'next-hop'], value=item, replace=True)
+ config.set_tag(['protocols', 'static', 'route6'])
+ config.set_tag(['protocols', 'static', 'route6', '::/0', 'next-hop'])
+
+ # configre MTU
+ if 'mtu' in iface_config:
+ config.set(['interfaces', 'ethernet', iface_name, 'mtu'], value=iface_config['mtu'], replace=True)
+ config.set_tag(['interfaces', 'ethernet'])
+
+ # configure routes
+ if 'routes' in iface_config:
+ for item in iface_config['routes']:
+ try:
+ if ipaddress.ip_network(item['to']).version == 4:
+ config.set(['protocols', 'static', 'route', item['to'], 'next-hop'], value=item['via'], replace=True)
+ config.set_tag(['protocols', 'static', 'route'])
+ config.set_tag(['protocols', 'static', 'route', item['to'], 'next-hop'])
+ if ipaddress.ip_network(item['to']).version == 6:
+ config.set(['protocols', 'static', 'route6', item['to'], 'next-hop'], value=item['via'], replace=True)
+ config.set_tag(['protocols', 'static', 'route6'])
+ config.set_tag(['protocols', 'static', 'route6', item['to'], 'next-hop'])
+ except Exception as err:
+ logger.error("Impossible to detect IP protocol version: {}".format(err))
-def set_config_nameserver(config, log, interface):
- if 'address' in interface:
- for server in interface['address']:
- config.set(['system', 'name-server'], value=server, replace=False)
- else:
- log.debug("No name-servers found.")
- if 'search' in interface:
- for server in interface['search']:
- config.set(['system', 'domain-search'], value=server, replace=False)
- else:
- log.debug("No search-domains found.")
+ # configure nameservers
+ if 'nameservers' in iface_config:
+ if 'search' in iface_config['nameservers']:
+ for item in iface_config['nameservers']['search']:
+ config.set(['system', 'domain-search'], value=item, replace=False)
+ if 'addresses' in iface_config['nameservers']:
+ for item in iface_config['nameservers']['addresses']:
+ config.set(['system', 'name-server'], value=item, replace=False)
+# configure DHCP client for interface
def set_config_dhcp(config):
config.set(['interfaces', 'ethernet', 'eth0', 'address'], value='dhcp', replace=True)
config.set_tag(['interfaces', 'ethernet'])
+# configure SSH server service
def set_config_ssh(config):
config.set(['service', 'ssh'], replace=True)
config.set(['service', 'ssh', 'port'], value='22', replace=True)
config.set(['service', 'ssh', 'client-keepalive-interval'], value='180', replace=True)
+# configure hostname
def set_config_hostname(config, hostname):
config.set(['system', 'host-name'], value=hostname, replace=True)
+# configure SSH, eth0 interface and hostname
def set_config_cloud(config, hostname):
config.set(['service', 'ssh'], replace=True)
config.set(['service', 'ssh', 'port'], value='22', replace=True)
@@ -210,16 +239,7 @@ def set_config_cloud(config, hostname):
config.set(['system', 'host-name'], value=hostname, replace=True)
-def runcommand(cmd):
- proc = subprocess.Popen(cmd,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- shell=True,
- universal_newlines=True)
- std_out, std_err = proc.communicate()
- return proc.returncode
-
-
+# main config handler
def handle(name, cfg, cloud, log, _args):
init = stages.Init()
dc = init.fetch()
@@ -256,7 +276,7 @@ def handle(name, cfg, cloud, log, _args):
vyos_keys = metadata['public-keys']
for ssh_key in vyos_keys:
- set_ssh_login(config, log, user, ssh_key, key_x)
+ set_ssh_login(config, user, ssh_key, key_x)
key_x = key_x + 1
else:
encrypted_pass = False
@@ -284,20 +304,17 @@ def handle(name, cfg, cloud, log, _args):
vyos_keys.extend(cfgkeys)
for ssh_key in vyos_keys:
- set_ssh_login(config, log, user, ssh_key, key_x)
+ set_ssh_login(config, user, ssh_key, key_x)
key_x = key_x + 1
if 'OVF' in dc.dsname:
set_config_ovf(config, hostname, metadata)
key_y = 1
elif netcfg:
- for interface in netcfg['config']:
- if interface['type'] == 'physical':
- key_y = 1
- set_config_interfaces(config, interface)
-
- if interface['type'] == 'nameserver':
- set_config_nameserver(config, log, interface)
+ if 'ethernets' in netcfg:
+ key_y = 1
+ for interface_name, interface_config in netcfg['ethernets'].items():
+ set_config_interfaces(config, interface_name, interface_config)
set_config_ssh(config)
set_config_hostname(config, hostname)
@@ -313,4 +330,4 @@ def handle(name, cfg, cloud, log, _args):
with open(cfg_file_name, 'w') as f:
f.write(config.to_string())
except Exception as e:
- util.logexc(log, "Failed to write configs into file %s error %s", file_name, e)
+ logger.error("Failed to write configs into file %s error %s", file_name, e)
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index 0b6546e2..bd87e9e5 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -57,7 +57,6 @@ binary gzip data can be specified and will be decoded before being written.
import base64
import os
-import six
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
@@ -126,7 +125,7 @@ def decode_perms(perm, default):
if perm is None:
return default
try:
- if isinstance(perm, six.integer_types + (float,)):
+ if isinstance(perm, (int, float)):
# Just 'downcast' it (if a float)
return int(perm)
else:
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index 6a42f499..3673166a 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -30,13 +30,9 @@ entry, the config entry will be skipped.
# any repository configuration options (see man yum.conf)
"""
+import io
import os
-
-try:
- from configparser import ConfigParser
-except ImportError:
- from ConfigParser import ConfigParser
-import six
+from configparser import ConfigParser
from cloudinit import util
@@ -57,7 +53,7 @@ def _format_repo_value(val):
# Can handle 'lists' in certain cases
# See: https://linux.die.net/man/5/yum.conf
return "\n".join([_format_repo_value(v) for v in val])
- if not isinstance(val, six.string_types):
+ if not isinstance(val, str):
return str(val)
return val
@@ -72,7 +68,7 @@ def _format_repository_config(repo_id, repo_config):
# For now assume that people using this know
# the format of yum and don't verify keys/values further
to_be.set(repo_id, k, _format_repo_value(v))
- to_be_stream = six.StringIO()
+ to_be_stream = io.StringIO()
to_be.write(to_be_stream)
to_be_stream.seek(0)
lines = to_be_stream.readlines()
@@ -113,16 +109,16 @@ def handle(name, cfg, _cloud, log, _args):
missing_required = 0
for req_field in ['baseurl']:
if req_field not in repo_config:
- log.warn(("Repository %s does not contain a %s"
- " configuration 'required' entry"),
- repo_id, req_field)
+ log.warning(("Repository %s does not contain a %s"
+ " configuration 'required' entry"),
+ repo_id, req_field)
missing_required += 1
if not missing_required:
repo_configs[canon_repo_id] = repo_config
repo_locations[canon_repo_id] = repo_fn_pth
else:
- log.warn("Repository %s is missing %s required fields, skipping!",
- repo_id, missing_required)
+ log.warning("Repository %s is missing %s required fields, "
+ "skipping!", repo_id, missing_required)
for (c_repo_id, path) in repo_locations.items():
repo_blob = _format_repository_config(c_repo_id,
repo_configs.get(c_repo_id))
diff --git a/cloudinit/config/cc_zypper_add_repo.py b/cloudinit/config/cc_zypper_add_repo.py
index aba26952..05855b0c 100644
--- a/cloudinit/config/cc_zypper_add_repo.py
+++ b/cloudinit/config/cc_zypper_add_repo.py
@@ -7,7 +7,6 @@
import configobj
import os
-from six import string_types
from textwrap import dedent
from cloudinit.config.schema import get_schema_doc
@@ -110,7 +109,7 @@ def _format_repo_value(val):
return 1 if val else 0
if isinstance(val, (list, tuple)):
return "\n ".join([_format_repo_value(v) for v in val])
- if not isinstance(val, string_types):
+ if not isinstance(val, str):
return str(val)
return val
diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py
index 080a6d06..807c3eee 100644
--- a/cloudinit/config/schema.py
+++ b/cloudinit/config/schema.py
@@ -367,7 +367,7 @@ def handle_schema_args(name, args):
if not args.annotate:
error(str(e))
except RuntimeError as e:
- error(str(e))
+ error(str(e))
else:
print("Valid cloud-config file {0}".format(args.config_file))
if args.doc:
diff --git a/cloudinit/config/tests/test_apt_pipelining.py b/cloudinit/config/tests/test_apt_pipelining.py
new file mode 100644
index 00000000..2a6bb10b
--- /dev/null
+++ b/cloudinit/config/tests/test_apt_pipelining.py
@@ -0,0 +1,28 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests cc_apt_pipelining handler"""
+
+import cloudinit.config.cc_apt_pipelining as cc_apt_pipelining
+
+from cloudinit.tests.helpers import CiTestCase, mock
+
+
+class TestAptPipelining(CiTestCase):
+
+ @mock.patch('cloudinit.config.cc_apt_pipelining.util.write_file')
+ def test_not_disabled_by_default(self, m_write_file):
+ """ensure that default behaviour is to not disable pipelining"""
+ cc_apt_pipelining.handle('foo', {}, None, mock.MagicMock(), None)
+ self.assertEqual(0, m_write_file.call_count)
+
+ @mock.patch('cloudinit.config.cc_apt_pipelining.util.write_file')
+ def test_false_disables_pipelining(self, m_write_file):
+ """ensure that pipelining can be disabled with correct config"""
+ cc_apt_pipelining.handle(
+ 'foo', {'apt_pipelining': 'false'}, None, mock.MagicMock(), None)
+ self.assertEqual(1, m_write_file.call_count)
+ args, _ = m_write_file.call_args
+ self.assertEqual(cc_apt_pipelining.DEFAULT_FILE, args[0])
+ self.assertIn('Pipeline-Depth "0"', args[1])
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py
index b051ec82..8247c388 100644
--- a/cloudinit/config/tests/test_set_passwords.py
+++ b/cloudinit/config/tests/test_set_passwords.py
@@ -1,6 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
-import mock
+from unittest import mock
from cloudinit.config import cc_set_passwords as setpass
from cloudinit.tests.helpers import CiTestCase
@@ -45,7 +45,7 @@ class TestHandleSshPwauth(CiTestCase):
"""If config is not updated, then no system restart should be done."""
setpass.handle_ssh_pwauth(True)
m_subp.assert_not_called()
- self.assertIn("No need to restart ssh", self.logs.getvalue())
+ self.assertIn("No need to restart SSH", self.logs.getvalue())
@mock.patch(MODPATH + "update_ssh_config", return_value=True)
@mock.patch(MODPATH + "util.subp")
@@ -68,4 +68,88 @@ class TestHandleSshPwauth(CiTestCase):
m_update.assert_called_with({optname: optval})
m_subp.assert_not_called()
+
+class TestSetPasswordsHandle(CiTestCase):
+ """Test cc_set_passwords.handle"""
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestSetPasswordsHandle, self).setUp()
+ self.add_patch('cloudinit.config.cc_set_passwords.sys.stderr', 'm_err')
+
+ def test_handle_on_empty_config(self, *args):
+ """handle logs that no password has changed when config is empty."""
+ cloud = self.tmp_cloud(distro='ubuntu')
+ setpass.handle(
+ 'IGNORED', cfg={}, cloud=cloud, log=self.logger, args=[])
+ self.assertEqual(
+ "DEBUG: Leaving SSH config 'PasswordAuthentication' unchanged. "
+ 'ssh_pwauth=None\n',
+ self.logs.getvalue())
+
+ @mock.patch(MODPATH + "util.subp")
+ def test_handle_on_chpasswd_list_parses_common_hashes(self, m_subp):
+ """handle parses command password hashes."""
+ cloud = self.tmp_cloud(distro='ubuntu')
+ valid_hashed_pwds = [
+ 'root:$2y$10$8BQjxjVByHA/Ee.O1bCXtO8S7Y5WojbXWqnqYpUW.BrPx/'
+ 'Dlew1Va',
+ 'ubuntu:$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9acWCVEoakMMC7dR52q'
+ 'SDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXazGGx3oo1']
+ cfg = {'chpasswd': {'list': valid_hashed_pwds}}
+ with mock.patch(MODPATH + 'util.subp') as m_subp:
+ setpass.handle(
+ 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[])
+ self.assertIn(
+ 'DEBUG: Handling input for chpasswd as list.',
+ self.logs.getvalue())
+ self.assertIn(
+ "DEBUG: Setting hashed password for ['root', 'ubuntu']",
+ self.logs.getvalue())
+ self.assertEqual(
+ [mock.call(['chpasswd', '-e'],
+ '\n'.join(valid_hashed_pwds) + '\n')],
+ m_subp.call_args_list)
+
+ @mock.patch(MODPATH + "util.is_FreeBSD")
+ @mock.patch(MODPATH + "util.subp")
+ def test_freebsd_calls_custom_pw_cmds_to_set_and_expire_passwords(
+ self, m_subp, m_is_freebsd):
+ """FreeBSD calls custom pw commands instead of chpasswd and passwd"""
+ m_is_freebsd.return_value = True
+ cloud = self.tmp_cloud(distro='freebsd')
+ valid_pwds = ['ubuntu:passw0rd']
+ cfg = {'chpasswd': {'list': valid_pwds}}
+ setpass.handle(
+ 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[])
+ self.assertEqual([
+ mock.call(['pw', 'usermod', 'ubuntu', '-h', '0'], data='passw0rd',
+ logstring="chpasswd for ubuntu"),
+ mock.call(['pw', 'usermod', 'ubuntu', '-p', '01-Jan-1970'])],
+ m_subp.call_args_list)
+
+ @mock.patch(MODPATH + "util.is_FreeBSD")
+ @mock.patch(MODPATH + "util.subp")
+ def test_handle_on_chpasswd_list_creates_random_passwords(self, m_subp,
+ m_is_freebsd):
+ """handle parses command set random passwords."""
+ m_is_freebsd.return_value = False
+ cloud = self.tmp_cloud(distro='ubuntu')
+ valid_random_pwds = [
+ 'root:R',
+ 'ubuntu:RANDOM']
+ cfg = {'chpasswd': {'expire': 'false', 'list': valid_random_pwds}}
+ with mock.patch(MODPATH + 'util.subp') as m_subp:
+ setpass.handle(
+ 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[])
+ self.assertIn(
+ 'DEBUG: Handling input for chpasswd as list.',
+ self.logs.getvalue())
+ self.assertNotEqual(
+ [mock.call(['chpasswd'],
+ '\n'.join(valid_random_pwds) + '\n')],
+ m_subp.call_args_list)
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py
index 3c472891..cbbb173d 100644
--- a/cloudinit/config/tests/test_snap.py
+++ b/cloudinit/config/tests/test_snap.py
@@ -1,7 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import re
-from six import StringIO
+from io import StringIO
from cloudinit.config.cc_snap import (
ASSERTIONS_FILE, add_assertions, handle, maybe_install_squashfuse,
diff --git a/cloudinit/config/tests/test_ssh.py b/cloudinit/config/tests/test_ssh.py
index c8a4271f..0c554414 100644
--- a/cloudinit/config/tests/test_ssh.py
+++ b/cloudinit/config/tests/test_ssh.py
@@ -1,9 +1,13 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import os.path
from cloudinit.config import cc_ssh
from cloudinit import ssh_util
from cloudinit.tests.helpers import CiTestCase, mock
+import logging
+
+LOG = logging.getLogger(__name__)
MODPATH = "cloudinit.config.cc_ssh."
@@ -12,6 +16,25 @@ MODPATH = "cloudinit.config.cc_ssh."
class TestHandleSsh(CiTestCase):
"""Test cc_ssh handling of ssh config."""
+ def _publish_hostkey_test_setup(self):
+ self.test_hostkeys = {
+ 'dsa': ('ssh-dss', 'AAAAB3NzaC1kc3MAAACB'),
+ 'ecdsa': ('ecdsa-sha2-nistp256', 'AAAAE2VjZ'),
+ 'ed25519': ('ssh-ed25519', 'AAAAC3NzaC1lZDI'),
+ 'rsa': ('ssh-rsa', 'AAAAB3NzaC1yc2EAAA'),
+ }
+ self.test_hostkey_files = []
+ hostkey_tmpdir = self.tmp_dir()
+ for key_type in ['dsa', 'ecdsa', 'ed25519', 'rsa']:
+ key_data = self.test_hostkeys[key_type]
+ filename = 'ssh_host_%s_key.pub' % key_type
+ filepath = os.path.join(hostkey_tmpdir, filename)
+ self.test_hostkey_files.append(filepath)
+ with open(filepath, 'w') as f:
+ f.write(' '.join(key_data))
+
+ cc_ssh.KEY_FILE_TPL = os.path.join(hostkey_tmpdir, 'ssh_host_%s_key')
+
def test_apply_credentials_with_user(self, m_setup_keys):
"""Apply keys for the given user and root."""
keys = ["key1"]
@@ -64,9 +87,10 @@ class TestHandleSsh(CiTestCase):
# Mock os.path.exits to True to short-circuit the key writing logic
m_path_exists.return_value = True
m_nug.return_value = ([], {})
+ cc_ssh.PUBLISH_HOST_KEYS = False
cloud = self.tmp_cloud(
distro='ubuntu', metadata={'public-keys': keys})
- cc_ssh.handle("name", cfg, cloud, None, None)
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
options = ssh_util.DISABLE_USER_OPTS.replace("$USER", "NONE")
options = options.replace("$DISABLE_USER", "root")
m_glob.assert_called_once_with('/etc/ssh/ssh_host_*key*')
@@ -82,6 +106,31 @@ class TestHandleSsh(CiTestCase):
@mock.patch(MODPATH + "glob.glob")
@mock.patch(MODPATH + "ug_util.normalize_users_groups")
@mock.patch(MODPATH + "os.path.exists")
+ def test_dont_allow_public_ssh_keys(self, m_path_exists, m_nug,
+ m_glob, m_setup_keys):
+ """Test allow_public_ssh_keys=False ignores ssh public keys from
+ platform.
+ """
+ cfg = {"allow_public_ssh_keys": False}
+ keys = ["key1"]
+ user = "clouduser"
+ m_glob.return_value = [] # Return no matching keys to prevent removal
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ({user: {"default": user}}, {})
+ cloud = self.tmp_cloud(
+ distro='ubuntu', metadata={'public-keys': keys})
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
+
+ options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user)
+ options = options.replace("$DISABLE_USER", "root")
+ self.assertEqual([mock.call(set(), user),
+ mock.call(set(), "root", options=options)],
+ m_setup_keys.call_args_list)
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
def test_handle_no_cfg_and_default_root(self, m_path_exists, m_nug,
m_glob, m_setup_keys):
"""Test handle with no config and a default distro user."""
@@ -94,7 +143,7 @@ class TestHandleSsh(CiTestCase):
m_nug.return_value = ({user: {"default": user}}, {})
cloud = self.tmp_cloud(
distro='ubuntu', metadata={'public-keys': keys})
- cc_ssh.handle("name", cfg, cloud, None, None)
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user)
options = options.replace("$DISABLE_USER", "root")
@@ -119,7 +168,7 @@ class TestHandleSsh(CiTestCase):
m_nug.return_value = ({user: {"default": user}}, {})
cloud = self.tmp_cloud(
distro='ubuntu', metadata={'public-keys': keys})
- cc_ssh.handle("name", cfg, cloud, None, None)
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
options = ssh_util.DISABLE_USER_OPTS.replace("$USER", user)
options = options.replace("$DISABLE_USER", "root")
@@ -144,8 +193,153 @@ class TestHandleSsh(CiTestCase):
cloud = self.tmp_cloud(
distro='ubuntu', metadata={'public-keys': keys})
cloud.get_public_ssh_keys = mock.Mock(return_value=keys)
- cc_ssh.handle("name", cfg, cloud, None, None)
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
self.assertEqual([mock.call(set(keys), user),
mock.call(set(keys), "root", options="")],
m_setup_keys.call_args_list)
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_handle_publish_hostkeys_default(
+ self, m_path_exists, m_nug, m_glob, m_setup_keys):
+ """Test handle with various configs for ssh_publish_hostkeys."""
+ self._publish_hostkey_test_setup()
+ cc_ssh.PUBLISH_HOST_KEYS = True
+ keys = ["key1"]
+ user = "clouduser"
+ # Return no matching keys for first glob, test keys for second.
+ m_glob.side_effect = iter([
+ [],
+ self.test_hostkey_files,
+ ])
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ({user: {"default": user}}, {})
+ cloud = self.tmp_cloud(
+ distro='ubuntu', metadata={'public-keys': keys})
+ cloud.datasource.publish_host_keys = mock.Mock()
+
+ cfg = {}
+ expected_call = [self.test_hostkeys[key_type] for key_type
+ in ['ecdsa', 'ed25519', 'rsa']]
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
+ self.assertEqual([mock.call(expected_call)],
+ cloud.datasource.publish_host_keys.call_args_list)
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_handle_publish_hostkeys_config_enable(
+ self, m_path_exists, m_nug, m_glob, m_setup_keys):
+ """Test handle with various configs for ssh_publish_hostkeys."""
+ self._publish_hostkey_test_setup()
+ cc_ssh.PUBLISH_HOST_KEYS = False
+ keys = ["key1"]
+ user = "clouduser"
+ # Return no matching keys for first glob, test keys for second.
+ m_glob.side_effect = iter([
+ [],
+ self.test_hostkey_files,
+ ])
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ({user: {"default": user}}, {})
+ cloud = self.tmp_cloud(
+ distro='ubuntu', metadata={'public-keys': keys})
+ cloud.datasource.publish_host_keys = mock.Mock()
+
+ cfg = {'ssh_publish_hostkeys': {'enabled': True}}
+ expected_call = [self.test_hostkeys[key_type] for key_type
+ in ['ecdsa', 'ed25519', 'rsa']]
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
+ self.assertEqual([mock.call(expected_call)],
+ cloud.datasource.publish_host_keys.call_args_list)
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_handle_publish_hostkeys_config_disable(
+ self, m_path_exists, m_nug, m_glob, m_setup_keys):
+ """Test handle with various configs for ssh_publish_hostkeys."""
+ self._publish_hostkey_test_setup()
+ cc_ssh.PUBLISH_HOST_KEYS = True
+ keys = ["key1"]
+ user = "clouduser"
+ # Return no matching keys for first glob, test keys for second.
+ m_glob.side_effect = iter([
+ [],
+ self.test_hostkey_files,
+ ])
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ({user: {"default": user}}, {})
+ cloud = self.tmp_cloud(
+ distro='ubuntu', metadata={'public-keys': keys})
+ cloud.datasource.publish_host_keys = mock.Mock()
+
+ cfg = {'ssh_publish_hostkeys': {'enabled': False}}
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
+ self.assertFalse(cloud.datasource.publish_host_keys.call_args_list)
+ cloud.datasource.publish_host_keys.assert_not_called()
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_handle_publish_hostkeys_config_blacklist(
+ self, m_path_exists, m_nug, m_glob, m_setup_keys):
+ """Test handle with various configs for ssh_publish_hostkeys."""
+ self._publish_hostkey_test_setup()
+ cc_ssh.PUBLISH_HOST_KEYS = True
+ keys = ["key1"]
+ user = "clouduser"
+ # Return no matching keys for first glob, test keys for second.
+ m_glob.side_effect = iter([
+ [],
+ self.test_hostkey_files,
+ ])
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ({user: {"default": user}}, {})
+ cloud = self.tmp_cloud(
+ distro='ubuntu', metadata={'public-keys': keys})
+ cloud.datasource.publish_host_keys = mock.Mock()
+
+ cfg = {'ssh_publish_hostkeys': {'enabled': True,
+ 'blacklist': ['dsa', 'rsa']}}
+ expected_call = [self.test_hostkeys[key_type] for key_type
+ in ['ecdsa', 'ed25519']]
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
+ self.assertEqual([mock.call(expected_call)],
+ cloud.datasource.publish_host_keys.call_args_list)
+
+ @mock.patch(MODPATH + "glob.glob")
+ @mock.patch(MODPATH + "ug_util.normalize_users_groups")
+ @mock.patch(MODPATH + "os.path.exists")
+ def test_handle_publish_hostkeys_empty_blacklist(
+ self, m_path_exists, m_nug, m_glob, m_setup_keys):
+ """Test handle with various configs for ssh_publish_hostkeys."""
+ self._publish_hostkey_test_setup()
+ cc_ssh.PUBLISH_HOST_KEYS = True
+ keys = ["key1"]
+ user = "clouduser"
+ # Return no matching keys for first glob, test keys for second.
+ m_glob.side_effect = iter([
+ [],
+ self.test_hostkey_files,
+ ])
+ # Mock os.path.exits to True to short-circuit the key writing logic
+ m_path_exists.return_value = True
+ m_nug.return_value = ({user: {"default": user}}, {})
+ cloud = self.tmp_cloud(
+ distro='ubuntu', metadata={'public-keys': keys})
+ cloud.datasource.publish_host_keys = mock.Mock()
+
+ cfg = {'ssh_publish_hostkeys': {'enabled': True,
+ 'blacklist': []}}
+ expected_call = [self.test_hostkeys[key_type] for key_type
+ in ['dsa', 'ecdsa', 'ed25519', 'rsa']]
+ cc_ssh.handle("name", cfg, cloud, LOG, None)
+ self.assertEqual([mock.call(expected_call)],
+ cloud.datasource.publish_host_keys.call_args_list)
diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py
index b7cf9bee..8c4161ef 100644
--- a/cloudinit/config/tests/test_ubuntu_advantage.py
+++ b/cloudinit/config/tests/test_ubuntu_advantage.py
@@ -1,10 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
-import re
-from six import StringIO
-
from cloudinit.config.cc_ubuntu_advantage import (
- handle, maybe_install_ua_tools, run_commands, schema)
+ configure_ua, handle, maybe_install_ua_tools, schema)
from cloudinit.config.schema import validate_cloudconfig_schema
from cloudinit import util
from cloudinit.tests.helpers import (
@@ -20,90 +17,120 @@ class FakeCloud(object):
self.distro = distro
-class TestRunCommands(CiTestCase):
+class TestConfigureUA(CiTestCase):
with_logs = True
allowed_subp = [CiTestCase.SUBP_SHELL_TRUE]
def setUp(self):
- super(TestRunCommands, self).setUp()
+ super(TestConfigureUA, self).setUp()
self.tmp = self.tmp_dir()
@mock.patch('%s.util.subp' % MPATH)
- def test_run_commands_on_empty_list(self, m_subp):
- """When provided with an empty list, run_commands does nothing."""
- run_commands([])
- self.assertEqual('', self.logs.getvalue())
- m_subp.assert_not_called()
-
- def test_run_commands_on_non_list_or_dict(self):
- """When provided an invalid type, run_commands raises an error."""
- with self.assertRaises(TypeError) as context_manager:
- run_commands(commands="I'm Not Valid")
+ def test_configure_ua_attach_error(self, m_subp):
+ """Errors from ua attach command are raised."""
+ m_subp.side_effect = util.ProcessExecutionError(
+ 'Invalid token SomeToken')
+ with self.assertRaises(RuntimeError) as context_manager:
+ configure_ua(token='SomeToken')
self.assertEqual(
- "commands parameter was not a list or dict: I'm Not Valid",
+ 'Failure attaching Ubuntu Advantage:\nUnexpected error while'
+ ' running command.\nCommand: -\nExit code: -\nReason: -\n'
+ 'Stdout: Invalid token SomeToken\nStderr: -',
str(context_manager.exception))
- def test_run_command_logs_commands_and_exit_codes_to_stderr(self):
- """All exit codes are logged to stderr."""
- outfile = self.tmp_path('output.log', dir=self.tmp)
-
- cmd1 = 'echo "HI" >> %s' % outfile
- cmd2 = 'bogus command'
- cmd3 = 'echo "MOM" >> %s' % outfile
- commands = [cmd1, cmd2, cmd3]
-
- mock_path = '%s.sys.stderr' % MPATH
- with mock.patch(mock_path, new_callable=StringIO) as m_stderr:
- with self.assertRaises(RuntimeError) as context_manager:
- run_commands(commands=commands)
-
- self.assertIsNotNone(
- re.search(r'bogus: (command )?not found',
- str(context_manager.exception)),
- msg='Expected bogus command not found')
- expected_stderr_log = '\n'.join([
- 'Begin run command: {cmd}'.format(cmd=cmd1),
- 'End run command: exit(0)',
- 'Begin run command: {cmd}'.format(cmd=cmd2),
- 'ERROR: End run command: exit(127)',
- 'Begin run command: {cmd}'.format(cmd=cmd3),
- 'End run command: exit(0)\n'])
- self.assertEqual(expected_stderr_log, m_stderr.getvalue())
-
- def test_run_command_as_lists(self):
- """When commands are specified as a list, run them in order."""
- outfile = self.tmp_path('output.log', dir=self.tmp)
-
- cmd1 = 'echo "HI" >> %s' % outfile
- cmd2 = 'echo "MOM" >> %s' % outfile
- commands = [cmd1, cmd2]
- with mock.patch('%s.sys.stderr' % MPATH, new_callable=StringIO):
- run_commands(commands=commands)
+ @mock.patch('%s.util.subp' % MPATH)
+ def test_configure_ua_attach_with_token(self, m_subp):
+ """When token is provided, attach the machine to ua using the token."""
+ configure_ua(token='SomeToken')
+ m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken'])
+ self.assertEqual(
+ 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
+ self.logs.getvalue())
+
+ @mock.patch('%s.util.subp' % MPATH)
+ def test_configure_ua_attach_on_service_error(self, m_subp):
+ """all services should be enabled and then any failures raised"""
+ def fake_subp(cmd, capture=None):
+ fail_cmds = [['ua', 'enable', svc] for svc in ['esm', 'cc']]
+ if cmd in fail_cmds and capture:
+ svc = cmd[-1]
+ raise util.ProcessExecutionError(
+ 'Invalid {} credentials'.format(svc.upper()))
+
+ m_subp.side_effect = fake_subp
+
+ with self.assertRaises(RuntimeError) as context_manager:
+ configure_ua(token='SomeToken', enable=['esm', 'cc', 'fips'])
+ self.assertEqual(
+ m_subp.call_args_list,
+ [mock.call(['ua', 'attach', 'SomeToken']),
+ mock.call(['ua', 'enable', 'esm'], capture=True),
+ mock.call(['ua', 'enable', 'cc'], capture=True),
+ mock.call(['ua', 'enable', 'fips'], capture=True)])
self.assertIn(
- 'DEBUG: Running user-provided ubuntu-advantage commands',
+ 'WARNING: Failure enabling "esm":\nUnexpected error'
+ ' while running command.\nCommand: -\nExit code: -\nReason: -\n'
+ 'Stdout: Invalid ESM credentials\nStderr: -\n',
self.logs.getvalue())
- self.assertEqual('HI\nMOM\n', util.load_file(outfile))
self.assertIn(
- 'WARNING: Non-ubuntu-advantage commands in ubuntu-advantage'
- ' config:',
+ 'WARNING: Failure enabling "cc":\nUnexpected error'
+ ' while running command.\nCommand: -\nExit code: -\nReason: -\n'
+ 'Stdout: Invalid CC credentials\nStderr: -\n',
+ self.logs.getvalue())
+ self.assertEqual(
+ 'Failure enabling Ubuntu Advantage service(s): "esm", "cc"',
+ str(context_manager.exception))
+
+ @mock.patch('%s.util.subp' % MPATH)
+ def test_configure_ua_attach_with_empty_services(self, m_subp):
+ """When services is an empty list, do not auto-enable attach."""
+ configure_ua(token='SomeToken', enable=[])
+ m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken'])
+ self.assertEqual(
+ 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
self.logs.getvalue())
- def test_run_command_dict_sorted_as_command_script(self):
- """When commands are a dict, sort them and run."""
- outfile = self.tmp_path('output.log', dir=self.tmp)
- cmd1 = 'echo "HI" >> %s' % outfile
- cmd2 = 'echo "MOM" >> %s' % outfile
- commands = {'02': cmd1, '01': cmd2}
- with mock.patch('%s.sys.stderr' % MPATH, new_callable=StringIO):
- run_commands(commands=commands)
+ @mock.patch('%s.util.subp' % MPATH)
+ def test_configure_ua_attach_with_specific_services(self, m_subp):
+ """When services a list, only enable specific services."""
+ configure_ua(token='SomeToken', enable=['fips'])
+ self.assertEqual(
+ m_subp.call_args_list,
+ [mock.call(['ua', 'attach', 'SomeToken']),
+ mock.call(['ua', 'enable', 'fips'], capture=True)])
+ self.assertEqual(
+ 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
+ self.logs.getvalue())
+
+ @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock())
+ @mock.patch('%s.util.subp' % MPATH)
+ def test_configure_ua_attach_with_string_services(self, m_subp):
+ """When services a string, treat as singleton list and warn"""
+ configure_ua(token='SomeToken', enable='fips')
+ self.assertEqual(
+ m_subp.call_args_list,
+ [mock.call(['ua', 'attach', 'SomeToken']),
+ mock.call(['ua', 'enable', 'fips'], capture=True)])
+ self.assertEqual(
+ 'WARNING: ubuntu_advantage: enable should be a list, not a'
+ ' string; treating as a single enable\n'
+ 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
+ self.logs.getvalue())
- expected_messages = [
- 'DEBUG: Running user-provided ubuntu-advantage commands']
- for message in expected_messages:
- self.assertIn(message, self.logs.getvalue())
- self.assertEqual('MOM\nHI\n', util.load_file(outfile))
+ @mock.patch('%s.util.subp' % MPATH)
+ def test_configure_ua_attach_with_weird_services(self, m_subp):
+ """When services not string or list, warn but still attach"""
+ configure_ua(token='SomeToken', enable={'deffo': 'wont work'})
+ self.assertEqual(
+ m_subp.call_args_list,
+ [mock.call(['ua', 'attach', 'SomeToken'])])
+ self.assertEqual(
+ 'WARNING: ubuntu_advantage: enable should be a list, not a'
+ ' dict; skipping enabling services\n'
+ 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n',
+ self.logs.getvalue())
@skipUnlessJsonSchema()
@@ -112,90 +139,50 @@ class TestSchema(CiTestCase, SchemaTestCaseMixin):
with_logs = True
schema = schema
- def test_schema_warns_on_ubuntu_advantage_not_as_dict(self):
- """If ubuntu-advantage configuration is not a dict, emit a warning."""
- validate_cloudconfig_schema({'ubuntu-advantage': 'wrong type'}, schema)
+ @mock.patch('%s.maybe_install_ua_tools' % MPATH)
+ @mock.patch('%s.configure_ua' % MPATH)
+ def test_schema_warns_on_ubuntu_advantage_not_dict(self, _cfg, _):
+ """If ubuntu_advantage configuration is not a dict, emit a warning."""
+ validate_cloudconfig_schema({'ubuntu_advantage': 'wrong type'}, schema)
self.assertEqual(
- "WARNING: Invalid config:\nubuntu-advantage: 'wrong type' is not"
+ "WARNING: Invalid config:\nubuntu_advantage: 'wrong type' is not"
" of type 'object'\n",
self.logs.getvalue())
- @mock.patch('%s.run_commands' % MPATH)
- def test_schema_disallows_unknown_keys(self, _):
- """Unknown keys in ubuntu-advantage configuration emit warnings."""
+ @mock.patch('%s.maybe_install_ua_tools' % MPATH)
+ @mock.patch('%s.configure_ua' % MPATH)
+ def test_schema_disallows_unknown_keys(self, _cfg, _):
+ """Unknown keys in ubuntu_advantage configuration emit warnings."""
validate_cloudconfig_schema(
- {'ubuntu-advantage': {'commands': ['ls'], 'invalid-key': ''}},
+ {'ubuntu_advantage': {'token': 'winner', 'invalid-key': ''}},
schema)
self.assertIn(
- 'WARNING: Invalid config:\nubuntu-advantage: Additional properties'
+ 'WARNING: Invalid config:\nubuntu_advantage: Additional properties'
" are not allowed ('invalid-key' was unexpected)",
self.logs.getvalue())
- def test_warn_schema_requires_commands(self):
- """Warn when ubuntu-advantage configuration lacks commands."""
- validate_cloudconfig_schema(
- {'ubuntu-advantage': {}}, schema)
- self.assertEqual(
- "WARNING: Invalid config:\nubuntu-advantage: 'commands' is a"
- " required property\n",
- self.logs.getvalue())
-
- @mock.patch('%s.run_commands' % MPATH)
- def test_warn_schema_commands_is_not_list_or_dict(self, _):
- """Warn when ubuntu-advantage:commands config is not a list or dict."""
+ @mock.patch('%s.maybe_install_ua_tools' % MPATH)
+ @mock.patch('%s.configure_ua' % MPATH)
+ def test_warn_schema_requires_token(self, _cfg, _):
+ """Warn if ubuntu_advantage configuration lacks token."""
validate_cloudconfig_schema(
- {'ubuntu-advantage': {'commands': 'broken'}}, schema)
+ {'ubuntu_advantage': {'enable': ['esm']}}, schema)
self.assertEqual(
- "WARNING: Invalid config:\nubuntu-advantage.commands: 'broken' is"
- " not of type 'object', 'array'\n",
- self.logs.getvalue())
+ "WARNING: Invalid config:\nubuntu_advantage:"
+ " 'token' is a required property\n", self.logs.getvalue())
- @mock.patch('%s.run_commands' % MPATH)
- def test_warn_schema_when_commands_is_empty(self, _):
- """Emit warnings when ubuntu-advantage:commands is empty."""
- validate_cloudconfig_schema(
- {'ubuntu-advantage': {'commands': []}}, schema)
+ @mock.patch('%s.maybe_install_ua_tools' % MPATH)
+ @mock.patch('%s.configure_ua' % MPATH)
+ def test_warn_schema_services_is_not_list_or_dict(self, _cfg, _):
+ """Warn when ubuntu_advantage:enable config is not a list."""
validate_cloudconfig_schema(
- {'ubuntu-advantage': {'commands': {}}}, schema)
+ {'ubuntu_advantage': {'enable': 'needslist'}}, schema)
self.assertEqual(
- "WARNING: Invalid config:\nubuntu-advantage.commands: [] is too"
- " short\nWARNING: Invalid config:\nubuntu-advantage.commands: {}"
- " does not have enough properties\n",
+ "WARNING: Invalid config:\nubuntu_advantage: 'token' is a"
+ " required property\nubuntu_advantage.enable: 'needslist'"
+ " is not of type 'array'\n",
self.logs.getvalue())
- @mock.patch('%s.run_commands' % MPATH)
- def test_schema_when_commands_are_list_or_dict(self, _):
- """No warnings when ubuntu-advantage:commands are a list or dict."""
- validate_cloudconfig_schema(
- {'ubuntu-advantage': {'commands': ['valid']}}, schema)
- validate_cloudconfig_schema(
- {'ubuntu-advantage': {'commands': {'01': 'also valid'}}}, schema)
- self.assertEqual('', self.logs.getvalue())
-
- def test_duplicates_are_fine_array_array(self):
- """Duplicated commands array/array entries are allowed."""
- self.assertSchemaValid(
- {'commands': [["echo", "bye"], ["echo" "bye"]]},
- "command entries can be duplicate.")
-
- def test_duplicates_are_fine_array_string(self):
- """Duplicated commands array/string entries are allowed."""
- self.assertSchemaValid(
- {'commands': ["echo bye", "echo bye"]},
- "command entries can be duplicate.")
-
- def test_duplicates_are_fine_dict_array(self):
- """Duplicated commands dict/array entries are allowed."""
- self.assertSchemaValid(
- {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}},
- "command entries can be duplicate.")
-
- def test_duplicates_are_fine_dict_string(self):
- """Duplicated commands dict/string entries are allowed."""
- self.assertSchemaValid(
- {'commands': {'00': "echo bye", '01': "echo bye"}},
- "command entries can be duplicate.")
-
class TestHandle(CiTestCase):
@@ -205,41 +192,89 @@ class TestHandle(CiTestCase):
super(TestHandle, self).setUp()
self.tmp = self.tmp_dir()
- @mock.patch('%s.run_commands' % MPATH)
@mock.patch('%s.validate_cloudconfig_schema' % MPATH)
- def test_handle_no_config(self, m_schema, m_run):
+ def test_handle_no_config(self, m_schema):
"""When no ua-related configuration is provided, nothing happens."""
cfg = {}
handle('ua-test', cfg=cfg, cloud=None, log=self.logger, args=None)
self.assertIn(
- "DEBUG: Skipping module named ua-test, no 'ubuntu-advantage' key"
- " in config",
+ "DEBUG: Skipping module named ua-test, no 'ubuntu_advantage'"
+ ' configuration found',
self.logs.getvalue())
m_schema.assert_not_called()
- m_run.assert_not_called()
+ @mock.patch('%s.configure_ua' % MPATH)
@mock.patch('%s.maybe_install_ua_tools' % MPATH)
- def test_handle_tries_to_install_ubuntu_advantage_tools(self, m_install):
+ def test_handle_tries_to_install_ubuntu_advantage_tools(
+ self, m_install, m_cfg):
"""If ubuntu_advantage is provided, try installing ua-tools package."""
- cfg = {'ubuntu-advantage': {}}
+ cfg = {'ubuntu_advantage': {'token': 'valid'}}
mycloud = FakeCloud(None)
handle('nomatter', cfg=cfg, cloud=mycloud, log=self.logger, args=None)
m_install.assert_called_once_with(mycloud)
+ @mock.patch('%s.configure_ua' % MPATH)
@mock.patch('%s.maybe_install_ua_tools' % MPATH)
- def test_handle_runs_commands_provided(self, m_install):
- """When commands are specified as a list, run them."""
- outfile = self.tmp_path('output.log', dir=self.tmp)
+ def test_handle_passes_credentials_and_services_to_configure_ua(
+ self, m_install, m_configure_ua):
+ """All ubuntu_advantage config keys are passed to configure_ua."""
+ cfg = {'ubuntu_advantage': {'token': 'token', 'enable': ['esm']}}
+ handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
+ m_configure_ua.assert_called_once_with(
+ token='token', enable=['esm'])
+
+ @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock())
+ @mock.patch('%s.configure_ua' % MPATH)
+ def test_handle_warns_on_deprecated_ubuntu_advantage_key_w_config(
+ self, m_configure_ua):
+ """Warning when ubuntu-advantage key is present with new config"""
+ cfg = {'ubuntu-advantage': {'token': 'token', 'enable': ['esm']}}
+ handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
+ self.assertEqual(
+ 'WARNING: Deprecated configuration key "ubuntu-advantage"'
+ ' provided. Expected underscore delimited "ubuntu_advantage";'
+ ' will attempt to continue.',
+ self.logs.getvalue().splitlines()[0])
+ m_configure_ua.assert_called_once_with(
+ token='token', enable=['esm'])
+
+ def test_handle_error_on_deprecated_commands_key_dashed(self):
+ """Error when commands is present in ubuntu-advantage key."""
+ cfg = {'ubuntu-advantage': {'commands': 'nogo'}}
+ with self.assertRaises(RuntimeError) as context_manager:
+ handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
+ self.assertEqual(
+ 'Deprecated configuration "ubuntu-advantage: commands" provided.'
+ ' Expected "token"',
+ str(context_manager.exception))
+
+ def test_handle_error_on_deprecated_commands_key_underscored(self):
+ """Error when commands is present in ubuntu_advantage key."""
+ cfg = {'ubuntu_advantage': {'commands': 'nogo'}}
+ with self.assertRaises(RuntimeError) as context_manager:
+ handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
+ self.assertEqual(
+ 'Deprecated configuration "ubuntu-advantage: commands" provided.'
+ ' Expected "token"',
+ str(context_manager.exception))
+ @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock())
+ @mock.patch('%s.configure_ua' % MPATH)
+ def test_handle_prefers_new_style_config(
+ self, m_configure_ua):
+ """ubuntu_advantage should be preferred over ubuntu-advantage"""
cfg = {
- 'ubuntu-advantage': {'commands': ['echo "HI" >> %s' % outfile,
- 'echo "MOM" >> %s' % outfile]}}
- mock_path = '%s.sys.stderr' % MPATH
- with self.allow_subp([CiTestCase.SUBP_SHELL_TRUE]):
- with mock.patch(mock_path, new_callable=StringIO):
- handle('nomatter', cfg=cfg, cloud=None, log=self.logger,
- args=None)
- self.assertEqual('HI\nMOM\n', util.load_file(outfile))
+ 'ubuntu-advantage': {'token': 'nope', 'enable': ['wrong']},
+ 'ubuntu_advantage': {'token': 'token', 'enable': ['esm']},
+ }
+ handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None)
+ self.assertEqual(
+ 'WARNING: Deprecated configuration key "ubuntu-advantage"'
+ ' provided. Expected underscore delimited "ubuntu_advantage";'
+ ' will attempt to continue.',
+ self.logs.getvalue().splitlines()[0])
+ m_configure_ua.assert_called_once_with(
+ token='token', enable=['esm'])
class TestMaybeInstallUATools(CiTestCase):
@@ -253,7 +288,7 @@ class TestMaybeInstallUATools(CiTestCase):
@mock.patch('%s.util.which' % MPATH)
def test_maybe_install_ua_tools_noop_when_ua_tools_present(self, m_which):
"""Do nothing if ubuntu-advantage-tools already exists."""
- m_which.return_value = '/usr/bin/ubuntu-advantage' # already installed
+ m_which.return_value = '/usr/bin/ua' # already installed
distro = mock.MagicMock()
distro.update_package_sources.side_effect = RuntimeError(
'Some apt error')
diff --git a/cloudinit/config/tests/test_ubuntu_drivers.py b/cloudinit/config/tests/test_ubuntu_drivers.py
new file mode 100644
index 00000000..46952692
--- /dev/null
+++ b/cloudinit/config/tests/test_ubuntu_drivers.py
@@ -0,0 +1,237 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import copy
+import os
+
+from cloudinit.tests.helpers import CiTestCase, skipUnlessJsonSchema, mock
+from cloudinit.config.schema import (
+ SchemaValidationError, validate_cloudconfig_schema)
+from cloudinit.config import cc_ubuntu_drivers as drivers
+from cloudinit.util import ProcessExecutionError
+
+MPATH = "cloudinit.config.cc_ubuntu_drivers."
+M_TMP_PATH = MPATH + "temp_utils.mkdtemp"
+OLD_UBUNTU_DRIVERS_ERROR_STDERR = (
+ "ubuntu-drivers: error: argument <command>: invalid choice: 'install' "
+ "(choose from 'list', 'autoinstall', 'devices', 'debug')\n")
+
+
+class AnyTempScriptAndDebconfFile(object):
+
+ def __init__(self, tmp_dir, debconf_file):
+ self.tmp_dir = tmp_dir
+ self.debconf_file = debconf_file
+
+ def __eq__(self, cmd):
+ if not len(cmd) == 2:
+ return False
+ script, debconf_file = cmd
+ if bool(script.startswith(self.tmp_dir) and script.endswith('.sh')):
+ return debconf_file == self.debconf_file
+ return False
+
+
+class TestUbuntuDrivers(CiTestCase):
+ cfg_accepted = {'drivers': {'nvidia': {'license-accepted': True}}}
+ install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia']
+
+ with_logs = True
+
+ @skipUnlessJsonSchema()
+ def test_schema_requires_boolean_for_license_accepted(self):
+ with self.assertRaisesRegex(
+ SchemaValidationError, ".*license-accepted.*TRUE.*boolean"):
+ validate_cloudconfig_schema(
+ {'drivers': {'nvidia': {'license-accepted': "TRUE"}}},
+ schema=drivers.schema, strict=True)
+
+ @mock.patch(M_TMP_PATH)
+ @mock.patch(MPATH + "util.subp", return_value=('', ''))
+ @mock.patch(MPATH + "util.which", return_value=False)
+ def _assert_happy_path_taken(
+ self, config, m_which, m_subp, m_tmp):
+ """Positive path test through handle. Package should be installed."""
+ tdir = self.tmp_dir()
+ debconf_file = os.path.join(tdir, 'nvidia.template')
+ m_tmp.return_value = tdir
+ myCloud = mock.MagicMock()
+ drivers.handle('ubuntu_drivers', config, myCloud, None, None)
+ self.assertEqual([mock.call(['ubuntu-drivers-common'])],
+ myCloud.distro.install_packages.call_args_list)
+ self.assertEqual(
+ [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
+ mock.call(self.install_gpgpu)],
+ m_subp.call_args_list)
+
+ def test_handle_does_package_install(self):
+ self._assert_happy_path_taken(self.cfg_accepted)
+
+ def test_trueish_strings_are_considered_approval(self):
+ for true_value in ['yes', 'true', 'on', '1']:
+ new_config = copy.deepcopy(self.cfg_accepted)
+ new_config['drivers']['nvidia']['license-accepted'] = true_value
+ self._assert_happy_path_taken(new_config)
+
+ @mock.patch(M_TMP_PATH)
+ @mock.patch(MPATH + "util.subp")
+ @mock.patch(MPATH + "util.which", return_value=False)
+ def test_handle_raises_error_if_no_drivers_found(
+ self, m_which, m_subp, m_tmp):
+ """If ubuntu-drivers doesn't install any drivers, raise an error."""
+ tdir = self.tmp_dir()
+ debconf_file = os.path.join(tdir, 'nvidia.template')
+ m_tmp.return_value = tdir
+ myCloud = mock.MagicMock()
+
+ def fake_subp(cmd):
+ if cmd[0].startswith(tdir):
+ return
+ raise ProcessExecutionError(
+ stdout='No drivers found for installation.\n', exit_code=1)
+ m_subp.side_effect = fake_subp
+
+ with self.assertRaises(Exception):
+ drivers.handle(
+ 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None)
+ self.assertEqual([mock.call(['ubuntu-drivers-common'])],
+ myCloud.distro.install_packages.call_args_list)
+ self.assertEqual(
+ [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
+ mock.call(self.install_gpgpu)],
+ m_subp.call_args_list)
+ self.assertIn('ubuntu-drivers found no drivers for installation',
+ self.logs.getvalue())
+
+ @mock.patch(MPATH + "util.subp", return_value=('', ''))
+ @mock.patch(MPATH + "util.which", return_value=False)
+ def _assert_inert_with_config(self, config, m_which, m_subp):
+ """Helper to reduce repetition when testing negative cases"""
+ myCloud = mock.MagicMock()
+ drivers.handle('ubuntu_drivers', config, myCloud, None, None)
+ self.assertEqual(0, myCloud.distro.install_packages.call_count)
+ self.assertEqual(0, m_subp.call_count)
+
+ def test_handle_inert_if_license_not_accepted(self):
+ """Ensure we don't do anything if the license is rejected."""
+ self._assert_inert_with_config(
+ {'drivers': {'nvidia': {'license-accepted': False}}})
+
+ def test_handle_inert_if_garbage_in_license_field(self):
+ """Ensure we don't do anything if unknown text is in license field."""
+ self._assert_inert_with_config(
+ {'drivers': {'nvidia': {'license-accepted': 'garbage'}}})
+
+ def test_handle_inert_if_no_license_key(self):
+ """Ensure we don't do anything if no license key."""
+ self._assert_inert_with_config({'drivers': {'nvidia': {}}})
+
+ def test_handle_inert_if_no_nvidia_key(self):
+ """Ensure we don't do anything if other license accepted."""
+ self._assert_inert_with_config(
+ {'drivers': {'acme': {'license-accepted': True}}})
+
+ def test_handle_inert_if_string_given(self):
+ """Ensure we don't do anything if string refusal given."""
+ for false_value in ['no', 'false', 'off', '0']:
+ self._assert_inert_with_config(
+ {'drivers': {'nvidia': {'license-accepted': false_value}}})
+
+ @mock.patch(MPATH + "install_drivers")
+ def test_handle_no_drivers_does_nothing(self, m_install_drivers):
+ """If no 'drivers' key in the config, nothing should be done."""
+ myCloud = mock.MagicMock()
+ myLog = mock.MagicMock()
+ drivers.handle('ubuntu_drivers', {'foo': 'bzr'}, myCloud, myLog, None)
+ self.assertIn('Skipping module named',
+ myLog.debug.call_args_list[0][0][0])
+ self.assertEqual(0, m_install_drivers.call_count)
+
+ @mock.patch(M_TMP_PATH)
+ @mock.patch(MPATH + "util.subp", return_value=('', ''))
+ @mock.patch(MPATH + "util.which", return_value=True)
+ def test_install_drivers_no_install_if_present(
+ self, m_which, m_subp, m_tmp):
+ """If 'ubuntu-drivers' is present, no package install should occur."""
+ tdir = self.tmp_dir()
+ debconf_file = os.path.join(tdir, 'nvidia.template')
+ m_tmp.return_value = tdir
+ pkg_install = mock.MagicMock()
+ drivers.install_drivers(self.cfg_accepted['drivers'],
+ pkg_install_func=pkg_install)
+ self.assertEqual(0, pkg_install.call_count)
+ self.assertEqual([mock.call('ubuntu-drivers')],
+ m_which.call_args_list)
+ self.assertEqual(
+ [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
+ mock.call(self.install_gpgpu)],
+ m_subp.call_args_list)
+
+ def test_install_drivers_rejects_invalid_config(self):
+ """install_drivers should raise TypeError if not given a config dict"""
+ pkg_install = mock.MagicMock()
+ with self.assertRaisesRegex(TypeError, ".*expected dict.*"):
+ drivers.install_drivers("mystring", pkg_install_func=pkg_install)
+ self.assertEqual(0, pkg_install.call_count)
+
+ @mock.patch(M_TMP_PATH)
+ @mock.patch(MPATH + "util.subp")
+ @mock.patch(MPATH + "util.which", return_value=False)
+ def test_install_drivers_handles_old_ubuntu_drivers_gracefully(
+ self, m_which, m_subp, m_tmp):
+ """Older ubuntu-drivers versions should emit message and raise error"""
+ tdir = self.tmp_dir()
+ debconf_file = os.path.join(tdir, 'nvidia.template')
+ m_tmp.return_value = tdir
+ myCloud = mock.MagicMock()
+
+ def fake_subp(cmd):
+ if cmd[0].startswith(tdir):
+ return
+ raise ProcessExecutionError(
+ stderr=OLD_UBUNTU_DRIVERS_ERROR_STDERR, exit_code=2)
+ m_subp.side_effect = fake_subp
+
+ with self.assertRaises(Exception):
+ drivers.handle(
+ 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None)
+ self.assertEqual([mock.call(['ubuntu-drivers-common'])],
+ myCloud.distro.install_packages.call_args_list)
+ self.assertEqual(
+ [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
+ mock.call(self.install_gpgpu)],
+ m_subp.call_args_list)
+ self.assertIn('WARNING: the available version of ubuntu-drivers is'
+ ' too old to perform requested driver installation',
+ self.logs.getvalue())
+
+
+# Sub-class TestUbuntuDrivers to run the same test cases, but with a version
+class TestUbuntuDriversWithVersion(TestUbuntuDrivers):
+ cfg_accepted = {
+ 'drivers': {'nvidia': {'license-accepted': True, 'version': '123'}}}
+ install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia:123']
+
+ @mock.patch(M_TMP_PATH)
+ @mock.patch(MPATH + "util.subp", return_value=('', ''))
+ @mock.patch(MPATH + "util.which", return_value=False)
+ def test_version_none_uses_latest(self, m_which, m_subp, m_tmp):
+ tdir = self.tmp_dir()
+ debconf_file = os.path.join(tdir, 'nvidia.template')
+ m_tmp.return_value = tdir
+ myCloud = mock.MagicMock()
+ version_none_cfg = {
+ 'drivers': {'nvidia': {'license-accepted': True, 'version': None}}}
+ drivers.handle(
+ 'ubuntu_drivers', version_none_cfg, myCloud, None, None)
+ self.assertEqual(
+ [mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
+ mock.call(['ubuntu-drivers', 'install', '--gpgpu', 'nvidia'])],
+ m_subp.call_args_list)
+
+ def test_specifying_a_version_doesnt_override_license_acceptance(self):
+ self._assert_inert_with_config({
+ 'drivers': {'nvidia': {'license-accepted': False,
+ 'version': '123'}}
+ })
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_users_groups.py b/cloudinit/config/tests/test_users_groups.py
index ba0afae3..f620b597 100644
--- a/cloudinit/config/tests/test_users_groups.py
+++ b/cloudinit/config/tests/test_users_groups.py
@@ -46,6 +46,34 @@ class TestHandleUsersGroups(CiTestCase):
mock.call('me2', default=False)])
m_group.assert_not_called()
+ @mock.patch('cloudinit.distros.freebsd.Distro.create_group')
+ @mock.patch('cloudinit.distros.freebsd.Distro.create_user')
+ def test_handle_users_in_cfg_calls_create_users_on_bsd(
+ self,
+ m_fbsd_user,
+ m_fbsd_group,
+ m_linux_user,
+ m_linux_group,
+ ):
+ """When users in config, create users with freebsd.create_user."""
+ cfg = {'users': ['default', {'name': 'me2'}]} # merged cloud-config
+ # System config defines a default user for the distro.
+ sys_cfg = {'default_user': {'name': 'freebsd', 'lock_passwd': True,
+ 'groups': ['wheel'],
+ 'shell': '/bin/tcsh'}}
+ metadata = {}
+ cloud = self.tmp_cloud(
+ distro='freebsd', sys_cfg=sys_cfg, metadata=metadata)
+ cc_users_groups.handle('modulename', cfg, cloud, None, None)
+ self.assertItemsEqual(
+ m_fbsd_user.call_args_list,
+ [mock.call('freebsd', groups='wheel', lock_passwd=True,
+ shell='/bin/tcsh'),
+ mock.call('me2', default=False)])
+ m_fbsd_group.assert_not_called()
+ m_linux_group.assert_not_called()
+ m_linux_user.assert_not_called()
+
def test_users_with_ssh_redirect_user_passes_keys(self, m_user, m_group):
"""When ssh_redirect_user is True pass default user and cloud keys."""
cfg = {
diff --git a/cloudinit/cs_utils.py b/cloudinit/cs_utils.py
index 51c09582..8bac9c44 100644
--- a/cloudinit/cs_utils.py
+++ b/cloudinit/cs_utils.py
@@ -14,7 +14,7 @@ Having the server definition accessible by the VM can ve useful in various
ways. For example it is possible to easily determine from within the VM,
which network interfaces are connected to public and which to private network.
Another use is to pass some data to initial VM setup scripts, like setting the
-hostname to the VM name or passing ssh public keys through server meta.
+hostname to the VM name or passing SSH public keys through server meta.
For more information take a look at the Server Context section of CloudSigma
API Docs: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index ef618c28..92598a2d 100644..100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -9,13 +9,11 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-import six
-from six import StringIO
-
import abc
import os
import re
import stat
+from io import StringIO
from cloudinit import importer
from cloudinit import log as logging
@@ -36,7 +34,7 @@ ALL_DISTROS = 'all'
OSFAMILIES = {
'debian': ['debian', 'ubuntu'],
- 'redhat': ['centos', 'fedora', 'rhel'],
+ 'redhat': ['amazon', 'centos', 'fedora', 'rhel'],
'gentoo': ['gentoo'],
'freebsd': ['freebsd'],
'suse': ['opensuse', 'sles'],
@@ -53,8 +51,7 @@ _EC2_AZ_RE = re.compile('^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$')
PREFERRED_NTP_CLIENTS = ['chrony', 'systemd-timesyncd', 'ntp', 'ntpdate']
-@six.add_metaclass(abc.ABCMeta)
-class Distro(object):
+class Distro(metaclass=abc.ABCMeta):
usr_lib_exec = "/usr/lib"
hosts_fn = "/etc/hosts"
@@ -145,7 +142,7 @@ class Distro(object):
# Write it out
# pylint: disable=assignment-from-no-return
- # We have implementations in arch, freebsd and gentoo still
+ # We have implementations in arch and gentoo still
dev_names = self._write_network(settings)
# pylint: enable=assignment-from-no-return
# Now try to bring them up
@@ -385,7 +382,7 @@ class Distro(object):
Add a user to the system using standard GNU tools
"""
# XXX need to make add_user idempotent somehow as we
- # still want to add groups or modify ssh keys on pre-existing
+ # still want to add groups or modify SSH keys on pre-existing
# users in the image.
if util.is_user(name):
LOG.info("User %s already exists, skipping.", name)
@@ -396,16 +393,16 @@ class Distro(object):
else:
create_groups = True
- adduser_cmd = ['useradd', name]
- log_adduser_cmd = ['useradd', name]
+ useradd_cmd = ['useradd', name]
+ log_useradd_cmd = ['useradd', name]
if util.system_is_snappy():
- adduser_cmd.append('--extrausers')
- log_adduser_cmd.append('--extrausers')
+ useradd_cmd.append('--extrausers')
+ log_useradd_cmd.append('--extrausers')
# Since we are creating users, we want to carefully validate the
# inputs. If something goes wrong, we can end up with a system
# that nobody can login to.
- adduser_opts = {
+ useradd_opts = {
"gecos": '--comment',
"homedir": '--home',
"primary_group": '--gid',
@@ -418,7 +415,7 @@ class Distro(object):
"selinux_user": '--selinux-user',
}
- adduser_flags = {
+ useradd_flags = {
"no_user_group": '--no-user-group',
"system": '--system',
"no_log_init": '--no-log-init',
@@ -429,7 +426,7 @@ class Distro(object):
# support kwargs having groups=[list] or groups="g1,g2"
groups = kwargs.get('groups')
if groups:
- if isinstance(groups, six.string_types):
+ if isinstance(groups, str):
groups = groups.split(",")
# remove any white spaces in group names, most likely
@@ -453,32 +450,32 @@ class Distro(object):
# Check the values and create the command
for key, val in sorted(kwargs.items()):
- if key in adduser_opts and val and isinstance(val, str):
- adduser_cmd.extend([adduser_opts[key], val])
+ if key in useradd_opts and val and isinstance(val, str):
+ useradd_cmd.extend([useradd_opts[key], val])
# Redact certain fields from the logs
if key in redact_opts:
- log_adduser_cmd.extend([adduser_opts[key], 'REDACTED'])
+ log_useradd_cmd.extend([useradd_opts[key], 'REDACTED'])
else:
- log_adduser_cmd.extend([adduser_opts[key], val])
+ log_useradd_cmd.extend([useradd_opts[key], val])
- elif key in adduser_flags and val:
- adduser_cmd.append(adduser_flags[key])
- log_adduser_cmd.append(adduser_flags[key])
+ elif key in useradd_flags and val:
+ useradd_cmd.append(useradd_flags[key])
+ log_useradd_cmd.append(useradd_flags[key])
# Don't create the home directory if directed so or if the user is a
# system user
if kwargs.get('no_create_home') or kwargs.get('system'):
- adduser_cmd.append('-M')
- log_adduser_cmd.append('-M')
+ useradd_cmd.append('-M')
+ log_useradd_cmd.append('-M')
else:
- adduser_cmd.append('-m')
- log_adduser_cmd.append('-m')
+ useradd_cmd.append('-m')
+ log_useradd_cmd.append('-m')
# Run the command
LOG.debug("Adding user %s", name)
try:
- util.subp(adduser_cmd, logstring=log_adduser_cmd)
+ util.subp(useradd_cmd, logstring=log_useradd_cmd)
except Exception as e:
util.logexc(LOG, "Failed to create user %s", name)
raise e
@@ -490,15 +487,15 @@ class Distro(object):
snapuser = kwargs.get('snapuser')
known = kwargs.get('known', False)
- adduser_cmd = ["snap", "create-user", "--sudoer", "--json"]
+ create_user_cmd = ["snap", "create-user", "--sudoer", "--json"]
if known:
- adduser_cmd.append("--known")
- adduser_cmd.append(snapuser)
+ create_user_cmd.append("--known")
+ create_user_cmd.append(snapuser)
# Run the command
LOG.debug("Adding snap user %s", name)
try:
- (out, err) = util.subp(adduser_cmd, logstring=adduser_cmd,
+ (out, err) = util.subp(create_user_cmd, logstring=create_user_cmd,
capture=True)
LOG.debug("snap create-user returned: %s:%s", out, err)
jobj = util.load_json(out)
@@ -544,7 +541,7 @@ class Distro(object):
if 'ssh_authorized_keys' in kwargs:
# Try to handle this in a smart manner.
keys = kwargs['ssh_authorized_keys']
- if isinstance(keys, six.string_types):
+ if isinstance(keys, str):
keys = [keys]
elif isinstance(keys, dict):
keys = list(keys.values())
@@ -561,7 +558,7 @@ class Distro(object):
cloud_keys = kwargs.get('cloud_public_ssh_keys', [])
if not cloud_keys:
LOG.warning(
- 'Unable to disable ssh logins for %s given'
+ 'Unable to disable SSH logins for %s given'
' ssh_redirect_user: %s. No cloud public-keys present.',
name, kwargs['ssh_redirect_user'])
else:
@@ -577,15 +574,27 @@ class Distro(object):
"""
Lock the password of a user, i.e., disable password logins
"""
+ # passwd must use short '-l' due to SLES11 lacking long form '--lock'
+ lock_tools = (['passwd', '-l', name], ['usermod', '--lock', name])
try:
- # Need to use the short option name '-l' instead of '--lock'
- # (which would be more descriptive) since SLES 11 doesn't know
- # about long names.
- util.subp(['passwd', '-l', name])
+ cmd = next(l for l in lock_tools if util.which(l[0]))
+ except StopIteration:
+ raise RuntimeError((
+ "Unable to lock user account '%s'. No tools available. "
+ " Tried: %s.") % (name, [c[0] for c in lock_tools]))
+ try:
+ util.subp(cmd)
except Exception as e:
util.logexc(LOG, 'Failed to disable password for user %s', name)
raise e
+ def expire_passwd(self, user):
+ try:
+ util.subp(['passwd', '--expire', user])
+ except Exception as e:
+ util.logexc(LOG, "Failed to set 'expire' for %s", user)
+ raise e
+
def set_passwd(self, user, passwd, hashed=False):
pass_string = '%s:%s' % (user, passwd)
cmd = ['chpasswd']
@@ -656,7 +665,7 @@ class Distro(object):
if isinstance(rules, (list, tuple)):
for rule in rules:
lines.append("%s %s" % (user, rule))
- elif isinstance(rules, six.string_types):
+ elif isinstance(rules, str):
lines.append("%s %s" % (user, rules))
else:
msg = "Can not create sudoers rule addition with type %r"
diff --git a/cloudinit/distros/amazon.py b/cloudinit/distros/amazon.py
new file mode 100644
index 00000000..ff9a549f
--- /dev/null
+++ b/cloudinit/distros/amazon.py
@@ -0,0 +1,26 @@
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+# Copyright (C) 2014 Amazon.com, Inc. or its affiliates.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+# Author: Andrew Jorgensen <ajorgens@amazon.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.distros import rhel
+
+from cloudinit import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+class Distro(rhel.Distro):
+
+ def update_package_sources(self):
+ return None
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py
index b814c8ba..9f89c5f9 100644
--- a/cloudinit/distros/arch.py
+++ b/cloudinit/distros/arch.py
@@ -12,6 +12,8 @@ from cloudinit import util
from cloudinit.distros import net_util
from cloudinit.distros.parsers.hostname import HostnameConf
+from cloudinit.net.renderers import RendererNotFoundError
+
from cloudinit.settings import PER_INSTANCE
import os
@@ -24,6 +26,11 @@ class Distro(distros.Distro):
network_conf_dir = "/etc/netctl"
resolve_conf_fn = "/etc/resolv.conf"
init_cmd = ['systemctl'] # init scripts
+ renderer_configs = {
+ "netplan": {"netplan_path": "/etc/netplan/50-cloud-init.yaml",
+ "netplan_header": "# generated by cloud-init\n",
+ "postcmds": True}
+ }
def __init__(self, name, cfg, paths):
distros.Distro.__init__(self, name, cfg, paths)
@@ -50,6 +57,13 @@ class Distro(distros.Distro):
self.update_package_sources()
self.package_command('', pkgs=pkglist)
+ def _write_network_config(self, netconfig):
+ try:
+ return self._supported_write_network_config(netconfig)
+ except RendererNotFoundError:
+ # Fall back to old _write_network
+ raise NotImplementedError
+
def _write_network(self, settings):
entries = net_util.translate_network(settings)
LOG.debug("Translated ubuntu style network settings %s into %s",
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index 0ad93ffe..128bb523 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -29,9 +29,10 @@ APT_GET_WRAPPER = {
'enabled': 'auto',
}
-ENI_HEADER = """# This file is generated from information provided by
-# the datasource. Changes to it will not persist across an instance.
-# To disable cloud-init's network configuration capabilities, write a file
+NETWORK_FILE_HEADER = """\
+# This file is generated from information provided by the datasource. Changes
+# to it will not persist across an instance reboot. To disable cloud-init's
+# network configuration capabilities, write a file
# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following:
# network: {config: disabled}
"""
@@ -48,9 +49,9 @@ class Distro(distros.Distro):
}
renderer_configs = {
"eni": {"eni_path": network_conf_fn["eni"],
- "eni_header": ENI_HEADER},
+ "eni_header": NETWORK_FILE_HEADER},
"netplan": {"netplan_path": network_conf_fn["netplan"],
- "netplan_header": ENI_HEADER,
+ "netplan_header": NETWORK_FILE_HEADER,
"postcmds": True}
}
@@ -204,8 +205,7 @@ class Distro(distros.Distro):
["update"], freq=PER_INSTANCE)
def get_primary_arch(self):
- (arch, _err) = util.subp(['dpkg', '--print-architecture'])
- return str(arch).strip()
+ return util.get_dpkg_architecture()
def _get_wrapper_prefix(cmd, mode):
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index ff22d568..026d1142 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -5,32 +5,28 @@
# This file is part of cloud-init. See LICENSE file for license information.
import os
-import six
-from six import StringIO
-
import re
+from io import StringIO
from cloudinit import distros
from cloudinit import helpers
from cloudinit import log as logging
+from cloudinit import net
from cloudinit import ssh_util
from cloudinit import util
-
-from cloudinit.distros import net_util
-from cloudinit.distros.parsers.resolv_conf import ResolvConf
-
+from cloudinit.distros import rhel_util
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
class Distro(distros.Distro):
+ usr_lib_exec = '/usr/local/lib'
rc_conf_fn = "/etc/rc.conf"
login_conf_fn = '/etc/login.conf'
login_conf_fn_bak = '/etc/login.conf.orig'
- resolv_conf_fn = '/etc/resolv.conf'
ci_sudoers_fn = '/usr/local/etc/sudoers.d/90-cloud-init-users'
- default_primary_nic = 'hn0'
+ hostname_conf_fn = '/etc/rc.conf'
def __init__(self, name, cfg, paths):
distros.Distro.__init__(self, name, cfg, paths)
@@ -39,99 +35,8 @@ class Distro(distros.Distro):
# should only happen say once per instance...)
self._runner = helpers.Runners(paths)
self.osfamily = 'freebsd'
- self.ipv4_pat = re.compile(r"\s+inet\s+\d+[.]\d+[.]\d+[.]\d+")
cfg['ssh_svcname'] = 'sshd'
- # Updates a key in /etc/rc.conf.
- def updatercconf(self, key, value):
- LOG.debug("Checking %s for: %s = %s", self.rc_conf_fn, key, value)
- conf = self.loadrcconf()
- config_changed = False
- if key not in conf:
- LOG.debug("Adding key in %s: %s = %s", self.rc_conf_fn, key,
- value)
- conf[key] = value
- config_changed = True
- else:
- for item in conf.keys():
- if item == key and conf[item] != value:
- conf[item] = value
- LOG.debug("Changing key in %s: %s = %s", self.rc_conf_fn,
- key, value)
- config_changed = True
-
- if config_changed:
- LOG.info("Writing %s", self.rc_conf_fn)
- buf = StringIO()
- for keyval in conf.items():
- buf.write('%s="%s"\n' % keyval)
- util.write_file(self.rc_conf_fn, buf.getvalue())
-
- # Load the contents of /etc/rc.conf and store all keys in a dict. Make sure
- # quotes are ignored:
- # hostname="bla"
- def loadrcconf(self):
- RE_MATCH = re.compile(r'^(\w+)\s*=\s*(.*)\s*')
- conf = {}
- lines = util.load_file(self.rc_conf_fn).splitlines()
- for line in lines:
- m = RE_MATCH.match(line)
- if not m:
- LOG.debug("Skipping line from /etc/rc.conf: %s", line)
- continue
- key = m.group(1).rstrip()
- val = m.group(2).rstrip()
- # Kill them quotes (not completely correct, aka won't handle
- # quoted values, but should be ok ...)
- if val[0] in ('"', "'"):
- val = val[1:]
- if val[-1] in ('"', "'"):
- val = val[0:-1]
- if len(val) == 0:
- LOG.debug("Skipping empty value from /etc/rc.conf: %s", line)
- continue
- conf[key] = val
- return conf
-
- def readrcconf(self, key):
- conf = self.loadrcconf()
- try:
- val = conf[key]
- except KeyError:
- val = None
- return val
-
- # NOVA will inject something like eth0, rewrite that to use the FreeBSD
- # adapter. Since this adapter is based on the used driver, we need to
- # figure out which interfaces are available. On KVM platforms this is
- # vtnet0, where Xen would use xn0.
- def getnetifname(self, dev):
- LOG.debug("Translating network interface %s", dev)
- if dev.startswith('lo'):
- return dev
-
- n = re.search(r'\d+$', dev)
- index = n.group(0)
-
- (out, _err) = util.subp(['ifconfig', '-a'])
- ifconfigoutput = [x for x in (out.strip()).splitlines()
- if len(x.split()) > 0]
- bsddev = 'NOT_FOUND'
- for line in ifconfigoutput:
- m = re.match(r'^\w+', line)
- if m:
- if m.group(0).startswith('lo'):
- continue
- # Just settle with the first non-lo adapter we find, since it's
- # rather unlikely there will be multiple nicdrivers involved.
- bsddev = m.group(0)
- break
-
- # Replace the index with the one we're after.
- bsddev = re.sub(r'\d+$', index, bsddev)
- LOG.debug("Using network interface %s", bsddev)
- return bsddev
-
def _select_hostname(self, hostname, fqdn):
# Should be FQDN if available. See rc.conf(5) in FreeBSD
if fqdn:
@@ -139,56 +44,54 @@ class Distro(distros.Distro):
return hostname
def _read_system_hostname(self):
- sys_hostname = self._read_hostname(filename=None)
- return ('rc.conf', sys_hostname)
+ sys_hostname = self._read_hostname(self.hostname_conf_fn)
+ return (self.hostname_conf_fn, sys_hostname)
def _read_hostname(self, filename, default=None):
- hostname = None
- try:
- hostname = self.readrcconf('hostname')
- except IOError:
- pass
- if not hostname:
+ (_exists, contents) = rhel_util.read_sysconfig_file(filename)
+ if contents.get('hostname'):
+ return contents['hostname']
+ else:
return default
- return hostname
def _write_hostname(self, hostname, filename):
- self.updatercconf('hostname', hostname)
+ rhel_util.update_sysconfig_file(filename, {'hostname': hostname})
def create_group(self, name, members):
- group_add_cmd = ['pw', '-n', name]
+ group_add_cmd = ['pw', 'group', 'add', name]
if util.is_group(name):
LOG.warning("Skipping creation of existing group '%s'", name)
else:
try:
util.subp(group_add_cmd)
LOG.info("Created new group %s", name)
- except Exception as e:
+ except Exception:
util.logexc(LOG, "Failed to create group %s", name)
- raise e
-
- if len(members) > 0:
- for member in members:
- if not util.is_user(member):
- LOG.warning("Unable to add group member '%s' to group '%s'"
- "; user does not exist.", member, name)
- continue
- try:
- util.subp(['pw', 'usermod', '-n', name, '-G', member])
- LOG.info("Added user '%s' to group '%s'", member, name)
- except Exception:
- util.logexc(LOG, "Failed to add user '%s' to group '%s'",
- member, name)
+ raise
+ if not members:
+ members = []
+
+ for member in members:
+ if not util.is_user(member):
+ LOG.warning("Unable to add group member '%s' to group '%s'"
+ "; user does not exist.", member, name)
+ continue
+ try:
+ util.subp(['pw', 'usermod', '-n', name, '-G', member])
+ LOG.info("Added user '%s' to group '%s'", member, name)
+ except Exception:
+ util.logexc(LOG, "Failed to add user '%s' to group '%s'",
+ member, name)
def add_user(self, name, **kwargs):
if util.is_user(name):
LOG.info("User %s already exists, skipping.", name)
return False
- adduser_cmd = ['pw', 'useradd', '-n', name]
- log_adduser_cmd = ['pw', 'useradd', '-n', name]
+ pw_useradd_cmd = ['pw', 'useradd', '-n', name]
+ log_pw_useradd_cmd = ['pw', 'useradd', '-n', name]
- adduser_opts = {
+ pw_useradd_opts = {
"homedir": '-d',
"gecos": '-c',
"primary_group": '-g',
@@ -196,43 +99,49 @@ class Distro(distros.Distro):
"shell": '-s',
"inactive": '-E',
}
- adduser_flags = {
+ pw_useradd_flags = {
"no_user_group": '--no-user-group',
"system": '--system',
"no_log_init": '--no-log-init',
}
for key, val in kwargs.items():
- if (key in adduser_opts and val and
- isinstance(val, six.string_types)):
- adduser_cmd.extend([adduser_opts[key], val])
+ if key in pw_useradd_opts and val and isinstance(val, str):
+ pw_useradd_cmd.extend([pw_useradd_opts[key], val])
- elif key in adduser_flags and val:
- adduser_cmd.append(adduser_flags[key])
- log_adduser_cmd.append(adduser_flags[key])
+ elif key in pw_useradd_flags and val:
+ pw_useradd_cmd.append(pw_useradd_flags[key])
+ log_pw_useradd_cmd.append(pw_useradd_flags[key])
if 'no_create_home' in kwargs or 'system' in kwargs:
- adduser_cmd.append('-d/nonexistent')
- log_adduser_cmd.append('-d/nonexistent')
+ pw_useradd_cmd.append('-d/nonexistent')
+ log_pw_useradd_cmd.append('-d/nonexistent')
else:
- adduser_cmd.append('-d/usr/home/%s' % name)
- adduser_cmd.append('-m')
- log_adduser_cmd.append('-d/usr/home/%s' % name)
- log_adduser_cmd.append('-m')
+ pw_useradd_cmd.append('-d/usr/home/%s' % name)
+ pw_useradd_cmd.append('-m')
+ log_pw_useradd_cmd.append('-d/usr/home/%s' % name)
+ log_pw_useradd_cmd.append('-m')
# Run the command
LOG.info("Adding user %s", name)
try:
- util.subp(adduser_cmd, logstring=log_adduser_cmd)
- except Exception as e:
+ util.subp(pw_useradd_cmd, logstring=log_pw_useradd_cmd)
+ except Exception:
util.logexc(LOG, "Failed to create user %s", name)
- raise e
+ raise
# Set the password if it is provided
# For security consideration, only hashed passwd is assumed
passwd_val = kwargs.get('passwd', None)
if passwd_val is not None:
self.set_passwd(name, passwd_val, hashed=True)
+ def expire_passwd(self, user):
+ try:
+ util.subp(['pw', 'usermod', user, '-p', '01-Jan-1970'])
+ except Exception:
+ util.logexc(LOG, "Failed to set pw expiration for %s", user)
+ raise
+
def set_passwd(self, user, passwd, hashed=False):
if hashed:
hash_opt = "-H"
@@ -242,16 +151,16 @@ class Distro(distros.Distro):
try:
util.subp(['pw', 'usermod', user, hash_opt, '0'],
data=passwd, logstring="chpasswd for %s" % user)
- except Exception as e:
+ except Exception:
util.logexc(LOG, "Failed to set password for %s", user)
- raise e
+ raise
def lock_passwd(self, name):
try:
util.subp(['pw', 'usermod', name, '-h', '-'])
- except Exception as e:
+ except Exception:
util.logexc(LOG, "Failed to lock user %s", name)
- raise e
+ raise
def create_user(self, name, **kwargs):
self.add_user(name, **kwargs)
@@ -274,309 +183,16 @@ class Distro(distros.Distro):
keys = set(kwargs['ssh_authorized_keys']) or []
ssh_util.setup_user_keys(keys, name, options=None)
- @staticmethod
- def get_ifconfig_list():
- cmd = ['ifconfig', '-l']
- (nics, err) = util.subp(cmd, rcs=[0, 1])
- if len(err):
- LOG.warning("Error running %s: %s", cmd, err)
- return None
- return nics
-
- @staticmethod
- def get_ifconfig_ifname_out(ifname):
- cmd = ['ifconfig', ifname]
- (if_result, err) = util.subp(cmd, rcs=[0, 1])
- if len(err):
- LOG.warning("Error running %s: %s", cmd, err)
- return None
- return if_result
-
- @staticmethod
- def get_ifconfig_ether():
- cmd = ['ifconfig', '-l', 'ether']
- (nics, err) = util.subp(cmd, rcs=[0, 1])
- if len(err):
- LOG.warning("Error running %s: %s", cmd, err)
- return None
- return nics
-
- @staticmethod
- def get_interface_mac(ifname):
- if_result = Distro.get_ifconfig_ifname_out(ifname)
- for item in if_result.splitlines():
- if item.find('ether ') != -1:
- mac = str(item.split()[1])
- if mac:
- return mac
-
- @staticmethod
- def get_devicelist():
- nics = Distro.get_ifconfig_list()
- return nics.split()
-
- @staticmethod
- def get_ipv6():
- ipv6 = []
- nics = Distro.get_devicelist()
- for nic in nics:
- if_result = Distro.get_ifconfig_ifname_out(nic)
- for item in if_result.splitlines():
- if item.find("inet6 ") != -1 and item.find("scopeid") == -1:
- ipv6.append(nic)
- return ipv6
-
- def get_ipv4(self):
- ipv4 = []
- nics = Distro.get_devicelist()
- for nic in nics:
- if_result = Distro.get_ifconfig_ifname_out(nic)
- for item in if_result.splitlines():
- print(item)
- if self.ipv4_pat.match(item):
- ipv4.append(nic)
- return ipv4
-
- def is_up(self, ifname):
- if_result = Distro.get_ifconfig_ifname_out(ifname)
- pat = "^" + ifname
- for item in if_result.splitlines():
- if re.match(pat, item):
- flags = item.split('<')[1].split('>')[0]
- if flags.find("UP") != -1:
- return True
-
- def _get_current_rename_info(self, check_downable=True):
- """Collect information necessary for rename_interfaces."""
- names = Distro.get_devicelist()
- bymac = {}
- for n in names:
- bymac[Distro.get_interface_mac(n)] = {
- 'name': n, 'up': self.is_up(n), 'downable': None}
-
- nics_with_addresses = set()
- if check_downable:
- nics_with_addresses = set(self.get_ipv4() + self.get_ipv6())
-
- for d in bymac.values():
- d['downable'] = (d['up'] is False or
- d['name'] not in nics_with_addresses)
-
- return bymac
-
- def _rename_interfaces(self, renames):
- if not len(renames):
- LOG.debug("no interfaces to rename")
- return
-
- current_info = self._get_current_rename_info()
-
- cur_bymac = {}
- for mac, data in current_info.items():
- cur = data.copy()
- cur['mac'] = mac
- cur_bymac[mac] = cur
-
- def update_byname(bymac):
- return dict((data['name'], data)
- for data in bymac.values())
-
- def rename(cur, new):
- util.subp(["ifconfig", cur, "name", new], capture=True)
-
- def down(name):
- util.subp(["ifconfig", name, "down"], capture=True)
-
- def up(name):
- util.subp(["ifconfig", name, "up"], capture=True)
-
- ops = []
- errors = []
- ups = []
- cur_byname = update_byname(cur_bymac)
- tmpname_fmt = "cirename%d"
- tmpi = -1
-
- for mac, new_name in renames:
- cur = cur_bymac.get(mac, {})
- cur_name = cur.get('name')
- cur_ops = []
- if cur_name == new_name:
- # nothing to do
- continue
-
- if not cur_name:
- errors.append("[nic not present] Cannot rename mac=%s to %s"
- ", not available." % (mac, new_name))
- continue
-
- if cur['up']:
- msg = "[busy] Error renaming mac=%s from %s to %s"
- if not cur['downable']:
- errors.append(msg % (mac, cur_name, new_name))
- continue
- cur['up'] = False
- cur_ops.append(("down", mac, new_name, (cur_name,)))
- ups.append(("up", mac, new_name, (new_name,)))
-
- if new_name in cur_byname:
- target = cur_byname[new_name]
- if target['up']:
- msg = "[busy-target] Error renaming mac=%s from %s to %s."
- if not target['downable']:
- errors.append(msg % (mac, cur_name, new_name))
- continue
- else:
- cur_ops.append(("down", mac, new_name, (new_name,)))
-
- tmp_name = None
- while tmp_name is None or tmp_name in cur_byname:
- tmpi += 1
- tmp_name = tmpname_fmt % tmpi
-
- cur_ops.append(("rename", mac, new_name, (new_name, tmp_name)))
- target['name'] = tmp_name
- cur_byname = update_byname(cur_bymac)
- if target['up']:
- ups.append(("up", mac, new_name, (tmp_name,)))
-
- cur_ops.append(("rename", mac, new_name, (cur['name'], new_name)))
- cur['name'] = new_name
- cur_byname = update_byname(cur_bymac)
- ops += cur_ops
-
- opmap = {'rename': rename, 'down': down, 'up': up}
- if len(ops) + len(ups) == 0:
- if len(errors):
- LOG.debug("unable to do any work for renaming of %s", renames)
- else:
- LOG.debug("no work necessary for renaming of %s", renames)
- else:
- LOG.debug("achieving renaming of %s with ops %s",
- renames, ops + ups)
-
- for op, mac, new_name, params in ops + ups:
- try:
- opmap.get(op)(*params)
- except Exception as e:
- errors.append(
- "[unknown] Error performing %s%s for %s, %s: %s" %
- (op, params, mac, new_name, e))
- if len(errors):
- raise Exception('\n'.join(errors))
-
- def apply_network_config_names(self, netcfg):
- renames = []
- for ent in netcfg.get('config', {}):
- if ent.get('type') != 'physical':
- continue
- mac = ent.get('mac_address')
- name = ent.get('name')
- if not mac:
- continue
- renames.append([mac, name])
- return self._rename_interfaces(renames)
-
- @classmethod
def generate_fallback_config(self):
- nics = Distro.get_ifconfig_ether()
- if nics is None:
- LOG.debug("Fail to get network interfaces")
- return None
- potential_interfaces = nics.split()
- connected = []
- for nic in potential_interfaces:
- pat = "^" + nic
- if_result = Distro.get_ifconfig_ifname_out(nic)
- for item in if_result.split("\n"):
- if re.match(pat, item):
- flags = item.split('<')[1].split('>')[0]
- if flags.find("RUNNING") != -1:
- connected.append(nic)
- if connected:
- potential_interfaces = connected
- names = list(sorted(potential_interfaces))
- default_pri_nic = Distro.default_primary_nic
- if default_pri_nic in names:
- names.remove(default_pri_nic)
- names.insert(0, default_pri_nic)
- target_name = None
- target_mac = None
- for name in names:
- mac = Distro.get_interface_mac(name)
- if mac:
- target_name = name
- target_mac = mac
- break
- if target_mac and target_name:
- nconf = {'config': [], 'version': 1}
+ nconf = {'config': [], 'version': 1}
+ for mac, name in net.get_interfaces_by_mac().items():
nconf['config'].append(
- {'type': 'physical', 'name': target_name,
- 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]})
- return nconf
- else:
- return None
-
- def _write_network(self, settings):
- entries = net_util.translate_network(settings)
- nameservers = []
- searchdomains = []
- dev_names = entries.keys()
- for (device, info) in entries.items():
- # Skip the loopback interface.
- if device.startswith('lo'):
- continue
-
- dev = self.getnetifname(device)
-
- LOG.info('Configuring interface %s', dev)
-
- if info.get('bootproto') == 'static':
- LOG.debug('Configuring dev %s with %s / %s', dev,
- info.get('address'), info.get('netmask'))
- # Configure an ipv4 address.
- ifconfig = (info.get('address') + ' netmask ' +
- info.get('netmask'))
-
- # Configure the gateway.
- self.updatercconf('defaultrouter', info.get('gateway'))
-
- if 'dns-nameservers' in info:
- nameservers.extend(info['dns-nameservers'])
- if 'dns-search' in info:
- searchdomains.extend(info['dns-search'])
- else:
- ifconfig = 'DHCP'
-
- self.updatercconf('ifconfig_' + dev, ifconfig)
-
- # Try to read the /etc/resolv.conf or just start from scratch if that
- # fails.
- try:
- resolvconf = ResolvConf(util.load_file(self.resolv_conf_fn))
- resolvconf.parse()
- except IOError:
- util.logexc(LOG, "Failed to parse %s, use new empty file",
- self.resolv_conf_fn)
- resolvconf = ResolvConf('')
- resolvconf.parse()
-
- # Add some nameservers
- for server in nameservers:
- try:
- resolvconf.add_nameserver(server)
- except ValueError:
- util.logexc(LOG, "Failed to add nameserver %s", server)
-
- # And add any searchdomains.
- for domain in searchdomains:
- try:
- resolvconf.add_search_domain(domain)
- except ValueError:
- util.logexc(LOG, "Failed to add search domain %s", domain)
- util.write_file(self.resolv_conf_fn, str(resolvconf), 0o644)
+ {'type': 'physical', 'name': name,
+ 'mac_address': mac, 'subnets': [{'type': 'dhcp'}]})
+ return nconf
- return dev_names
+ def _write_network_config(self, netconfig):
+ return self._supported_write_network_config(netconfig)
def apply_locale(self, locale, out_fn=None):
# Adjust the locals value to the new value
@@ -604,18 +220,12 @@ class Distro(distros.Distro):
util.logexc(LOG, "Failed to restore %s backup",
self.login_conf_fn)
- def _bring_up_interface(self, device_name):
- if device_name.startswith('lo'):
- return
- dev = self.getnetifname(device_name)
- cmd = ['/etc/rc.d/netif', 'start', dev]
- LOG.debug("Attempting to bring up interface %s using command %s",
- dev, cmd)
- # This could return 1 when the interface has already been put UP by the
- # OS. This is just fine.
- (_out, err) = util.subp(cmd, rcs=[0, 1])
- if len(err):
- LOG.warning("Error running %s: %s", cmd, err)
+ def apply_network_config_names(self, netconfig):
+ # This is handled by the freebsd network renderer. It writes in
+ # /etc/rc.conf a line with the following format:
+ # ifconfig_OLDNAME_name=NEWNAME
+ # FreeBSD network script will rename the interface automatically.
+ return
def install_packages(self, pkglist):
self.update_package_sources()
diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py
index 1bfe0478..dd56a3f4 100644
--- a/cloudinit/distros/opensuse.py
+++ b/cloudinit/distros/opensuse.py
@@ -37,7 +37,10 @@ class Distro(distros.Distro):
renderer_configs = {
'sysconfig': {
'control': 'etc/sysconfig/network/config',
+ 'flavor': 'suse',
'iface_templates': '%(base)s/network/ifcfg-%(name)s',
+ 'netrules_path': (
+ 'etc/udev/rules.d/85-persistent-net-cloud-init.rules'),
'route_templates': {
'ipv4': '%(base)s/network/ifroute-%(name)s',
'ipv6': '%(base)s/network/ifroute-%(name)s',
diff --git a/cloudinit/distros/parsers/hostname.py b/cloudinit/distros/parsers/hostname.py
index dd434ac6..e74c083c 100644
--- a/cloudinit/distros/parsers/hostname.py
+++ b/cloudinit/distros/parsers/hostname.py
@@ -4,7 +4,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from six import StringIO
+from io import StringIO
from cloudinit.distros.parsers import chop_comment
diff --git a/cloudinit/distros/parsers/hosts.py b/cloudinit/distros/parsers/hosts.py
index 64444581..54e4e934 100644
--- a/cloudinit/distros/parsers/hosts.py
+++ b/cloudinit/distros/parsers/hosts.py
@@ -4,7 +4,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from six import StringIO
+from io import StringIO
from cloudinit.distros.parsers import chop_comment
diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py
index a62055ae..299d54b5 100644
--- a/cloudinit/distros/parsers/resolv_conf.py
+++ b/cloudinit/distros/parsers/resolv_conf.py
@@ -4,7 +4,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from six import StringIO
+from io import StringIO
from cloudinit.distros.parsers import chop_comment
from cloudinit import log as logging
diff --git a/cloudinit/distros/parsers/sys_conf.py b/cloudinit/distros/parsers/sys_conf.py
index c27b5d5d..dee4c551 100644
--- a/cloudinit/distros/parsers/sys_conf.py
+++ b/cloudinit/distros/parsers/sys_conf.py
@@ -4,11 +4,9 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-import six
-from six import StringIO
-
import pipes
import re
+from io import StringIO
# This library is used to parse/write
# out the various sysconfig files edited (best attempt effort)
@@ -43,6 +41,13 @@ def _contains_shell_variable(text):
class SysConf(configobj.ConfigObj):
+ """A configobj.ConfigObj subclass specialised for sysconfig files.
+
+ :param contents:
+ The sysconfig file to parse, in a format accepted by
+ ``configobj.ConfigObj.__init__`` (i.e. "a filename, file like object,
+ or list of lines").
+ """
def __init__(self, contents):
configobj.ConfigObj.__init__(self, contents,
interpolation=False,
@@ -58,7 +63,7 @@ class SysConf(configobj.ConfigObj):
return out_contents.getvalue()
def _quote(self, value, multiline=False):
- if not isinstance(value, six.string_types):
+ if not isinstance(value, str):
raise ValueError('Value "%s" is not a string' % (value))
if len(value) == 0:
return ''
diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py
index e5fcbc58..23be3bdd 100644
--- a/cloudinit/distros/ubuntu.py
+++ b/cloudinit/distros/ubuntu.py
@@ -30,9 +30,9 @@ class Distro(debian.Distro):
}
self.renderer_configs = {
"eni": {"eni_path": self.network_conf_fn["eni"],
- "eni_header": debian.ENI_HEADER},
+ "eni_header": debian.NETWORK_FILE_HEADER},
"netplan": {"netplan_path": self.network_conf_fn["netplan"],
- "netplan_header": debian.ENI_HEADER,
+ "netplan_header": debian.NETWORK_FILE_HEADER,
"postcmds": True}
}
diff --git a/cloudinit/distros/ug_util.py b/cloudinit/distros/ug_util.py
index 9378dd78..08446a95 100755
--- a/cloudinit/distros/ug_util.py
+++ b/cloudinit/distros/ug_util.py
@@ -9,8 +9,6 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-import six
-
from cloudinit import log as logging
from cloudinit import type_utils
from cloudinit import util
@@ -29,7 +27,7 @@ LOG = logging.getLogger(__name__)
# is the standard form used in the rest
# of cloud-init
def _normalize_groups(grp_cfg):
- if isinstance(grp_cfg, six.string_types):
+ if isinstance(grp_cfg, str):
grp_cfg = grp_cfg.strip().split(",")
if isinstance(grp_cfg, list):
c_grp_cfg = {}
@@ -39,7 +37,7 @@ def _normalize_groups(grp_cfg):
if k not in c_grp_cfg:
if isinstance(v, list):
c_grp_cfg[k] = list(v)
- elif isinstance(v, six.string_types):
+ elif isinstance(v, str):
c_grp_cfg[k] = [v]
else:
raise TypeError("Bad group member type %s" %
@@ -47,12 +45,12 @@ def _normalize_groups(grp_cfg):
else:
if isinstance(v, list):
c_grp_cfg[k].extend(v)
- elif isinstance(v, six.string_types):
+ elif isinstance(v, str):
c_grp_cfg[k].append(v)
else:
raise TypeError("Bad group member type %s" %
type_utils.obj_name(v))
- elif isinstance(i, six.string_types):
+ elif isinstance(i, str):
if i not in c_grp_cfg:
c_grp_cfg[i] = []
else:
@@ -89,7 +87,7 @@ def _normalize_users(u_cfg, def_user_cfg=None):
if isinstance(u_cfg, dict):
ad_ucfg = []
for (k, v) in u_cfg.items():
- if isinstance(v, (bool, int, float) + six.string_types):
+ if isinstance(v, (bool, int, float, str)):
if util.is_true(v):
ad_ucfg.append(str(k))
elif isinstance(v, dict):
@@ -99,12 +97,12 @@ def _normalize_users(u_cfg, def_user_cfg=None):
raise TypeError(("Unmappable user value type %s"
" for key %s") % (type_utils.obj_name(v), k))
u_cfg = ad_ucfg
- elif isinstance(u_cfg, six.string_types):
+ elif isinstance(u_cfg, str):
u_cfg = util.uniq_merge_sorted(u_cfg)
users = {}
for user_config in u_cfg:
- if isinstance(user_config, (list,) + six.string_types):
+ if isinstance(user_config, (list, str)):
for u in util.uniq_merge(user_config):
if u and u not in users:
users[u] = {}
@@ -209,7 +207,7 @@ def normalize_users_groups(cfg, distro):
old_user = cfg['user']
# Translate it into the format that is more useful
# going forward
- if isinstance(old_user, six.string_types):
+ if isinstance(old_user, str):
old_user = {
'name': old_user,
}
@@ -238,7 +236,7 @@ def normalize_users_groups(cfg, distro):
default_user_config = util.mergemanydict([old_user, distro_user_config])
base_users = cfg.get('users', [])
- if not isinstance(base_users, (list, dict) + six.string_types):
+ if not isinstance(base_users, (list, dict, str)):
LOG.warning(("Format for 'users' key must be a comma separated string"
" or a dictionary or a list and not %s"),
type_utils.obj_name(base_users))
@@ -252,7 +250,7 @@ def normalize_users_groups(cfg, distro):
base_users.append({'name': 'default'})
elif isinstance(base_users, dict):
base_users['default'] = dict(base_users).get('default', True)
- elif isinstance(base_users, six.string_types):
+ elif isinstance(base_users, str):
# Just append it on to be re-parsed later
base_users += ",default"
diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
index 3b7b17f1..34acfe84 100644
--- a/cloudinit/ec2_utils.py
+++ b/cloudinit/ec2_utils.py
@@ -134,25 +134,30 @@ class MetadataMaterializer(object):
return joined
-def _skip_retry_on_codes(status_codes, _request_args, cause):
+def skip_retry_on_codes(status_codes, _request_args, cause):
"""Returns False if cause.code is in status_codes."""
return cause.code not in status_codes
def get_instance_userdata(api_version='latest',
metadata_address='http://169.254.169.254',
- ssl_details=None, timeout=5, retries=5):
+ ssl_details=None, timeout=5, retries=5,
+ headers_cb=None, headers_redact=None,
+ exception_cb=None):
ud_url = url_helper.combine_url(metadata_address, api_version)
ud_url = url_helper.combine_url(ud_url, 'user-data')
user_data = ''
try:
- # It is ok for userdata to not exist (thats why we are stopping if
- # NOT_FOUND occurs) and just in that case returning an empty string.
- exception_cb = functools.partial(_skip_retry_on_codes,
- SKIP_USERDATA_CODES)
+ if not exception_cb:
+ # It is ok for userdata to not exist (thats why we are stopping if
+ # NOT_FOUND occurs) and just in that case returning an empty
+ # string.
+ exception_cb = functools.partial(skip_retry_on_codes,
+ SKIP_USERDATA_CODES)
response = url_helper.read_file_or_url(
ud_url, ssl_details=ssl_details, timeout=timeout,
- retries=retries, exception_cb=exception_cb)
+ retries=retries, exception_cb=exception_cb, headers_cb=headers_cb,
+ headers_redact=headers_redact)
user_data = response.contents
except url_helper.UrlError as e:
if e.code not in SKIP_USERDATA_CODES:
@@ -165,11 +170,15 @@ def get_instance_userdata(api_version='latest',
def _get_instance_metadata(tree, api_version='latest',
metadata_address='http://169.254.169.254',
ssl_details=None, timeout=5, retries=5,
- leaf_decoder=None):
+ leaf_decoder=None, headers_cb=None,
+ headers_redact=None,
+ exception_cb=None):
md_url = url_helper.combine_url(metadata_address, api_version, tree)
caller = functools.partial(
url_helper.read_file_or_url, ssl_details=ssl_details,
- timeout=timeout, retries=retries)
+ timeout=timeout, retries=retries, headers_cb=headers_cb,
+ headers_redact=headers_redact,
+ exception_cb=exception_cb)
def mcaller(url):
return caller(url).contents
@@ -191,22 +200,32 @@ def _get_instance_metadata(tree, api_version='latest',
def get_instance_metadata(api_version='latest',
metadata_address='http://169.254.169.254',
ssl_details=None, timeout=5, retries=5,
- leaf_decoder=None):
+ leaf_decoder=None, headers_cb=None,
+ headers_redact=None,
+ exception_cb=None):
# Note, 'meta-data' explicitly has trailing /.
# this is required for CloudStack (LP: #1356855)
return _get_instance_metadata(tree='meta-data/', api_version=api_version,
metadata_address=metadata_address,
ssl_details=ssl_details, timeout=timeout,
- retries=retries, leaf_decoder=leaf_decoder)
+ retries=retries, leaf_decoder=leaf_decoder,
+ headers_redact=headers_redact,
+ headers_cb=headers_cb,
+ exception_cb=exception_cb)
def get_instance_identity(api_version='latest',
metadata_address='http://169.254.169.254',
ssl_details=None, timeout=5, retries=5,
- leaf_decoder=None):
+ leaf_decoder=None, headers_cb=None,
+ headers_redact=None,
+ exception_cb=None):
return _get_instance_metadata(tree='dynamic/instance-identity',
api_version=api_version,
metadata_address=metadata_address,
ssl_details=ssl_details, timeout=timeout,
- retries=retries, leaf_decoder=leaf_decoder)
+ retries=retries, leaf_decoder=leaf_decoder,
+ headers_redact=headers_redact,
+ headers_cb=headers_cb,
+ exception_cb=exception_cb)
# vi: ts=4 expandtab
diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
index 0db75af9..a409ff8a 100644
--- a/cloudinit/handlers/__init__.py
+++ b/cloudinit/handlers/__init__.py
@@ -10,14 +10,12 @@
import abc
import os
-import six
-
-from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE, FREQUENCIES)
from cloudinit import importer
from cloudinit import log as logging
from cloudinit import type_utils
from cloudinit import util
+from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE, FREQUENCIES)
LOG = logging.getLogger(__name__)
@@ -60,8 +58,7 @@ INCLUSION_SRCH = sorted(list(INCLUSION_TYPES_MAP.keys()),
key=(lambda e: 0 - len(e)))
-@six.add_metaclass(abc.ABCMeta)
-class Handler(object):
+class Handler(metaclass=abc.ABCMeta):
def __init__(self, frequency, version=2):
self.handler_version = version
@@ -159,7 +156,7 @@ def _extract_first_or_bytes(blob, size):
# Extract the first line or upto X symbols for text objects
# Extract first X bytes for binary objects
try:
- if isinstance(blob, six.string_types):
+ if isinstance(blob, str):
start = blob.split("\n", 1)[0]
else:
# We want to avoid decoding the whole blob (it might be huge)
diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py
index 99bf0e61..2a307364 100644
--- a/cloudinit/handlers/cloud_config.py
+++ b/cloudinit/handlers/cloud_config.py
@@ -14,6 +14,7 @@ from cloudinit import handlers
from cloudinit import log as logging
from cloudinit import mergers
from cloudinit import util
+from cloudinit import safeyaml
from cloudinit.settings import (PER_ALWAYS)
@@ -75,7 +76,7 @@ class CloudConfigPartHandler(handlers.Handler):
'',
]
lines.extend(file_lines)
- lines.append(util.yaml_dumps(self.cloud_buf))
+ lines.append(safeyaml.dumps(self.cloud_buf))
else:
lines = []
util.write_file(self.cloud_fn, "\n".join(lines), 0o600)
diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py
index 83fb0724..003cad60 100644
--- a/cloudinit/handlers/upstart_job.py
+++ b/cloudinit/handlers/upstart_job.py
@@ -89,7 +89,7 @@ def _has_suitable_upstart():
util.subp(["dpkg", "--compare-versions", dpkg_ver, "ge", good])
return True
except util.ProcessExecutionError as e:
- if e.exit_code is 1:
+ if e.exit_code == 1:
pass
else:
util.logexc(LOG, "dpkg --compare-versions failed [%s]",
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index dcd2645e..7d2a3305 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -12,10 +12,8 @@ from time import time
import contextlib
import os
-
-from six import StringIO
-from six.moves.configparser import (
- NoSectionError, NoOptionError, RawConfigParser)
+from configparser import NoSectionError, NoOptionError, RawConfigParser
+from io import StringIO
from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE,
CFG_ENV_NAME)
diff --git a/cloudinit/log.py b/cloudinit/log.py
index 5ae312ba..827db12b 100644
--- a/cloudinit/log.py
+++ b/cloudinit/log.py
@@ -8,17 +8,13 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+import collections
+import io
import logging
import logging.config
import logging.handlers
-
-import collections
import os
import sys
-
-import six
-from six import StringIO
-
import time
# Logging levels for easy access
@@ -74,13 +70,13 @@ def setupLogging(cfg=None):
log_cfgs = []
log_cfg = cfg.get('logcfg')
- if log_cfg and isinstance(log_cfg, six.string_types):
+ if log_cfg and isinstance(log_cfg, str):
# If there is a 'logcfg' entry in the config,
# respect it, it is the old keyname
log_cfgs.append(str(log_cfg))
elif "log_cfgs" in cfg:
for a_cfg in cfg['log_cfgs']:
- if isinstance(a_cfg, six.string_types):
+ if isinstance(a_cfg, str):
log_cfgs.append(a_cfg)
elif isinstance(a_cfg, (collections.Iterable)):
cfg_str = [str(c) for c in a_cfg]
@@ -100,7 +96,7 @@ def setupLogging(cfg=None):
# is acting as a file)
pass
else:
- log_cfg = StringIO(log_cfg)
+ log_cfg = io.StringIO(log_cfg)
# Attempt to load its config
logging.config.fileConfig(log_cfg)
# The first one to work wins!
diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py
index 7fbc25ff..668e3cd6 100644
--- a/cloudinit/mergers/__init__.py
+++ b/cloudinit/mergers/__init__.py
@@ -6,8 +6,6 @@
import re
-import six
-
from cloudinit import importer
from cloudinit import log as logging
from cloudinit import type_utils
@@ -85,7 +83,7 @@ def dict_extract_mergers(config):
raw_mergers = config.pop('merge_type', None)
if raw_mergers is None:
return parsed_mergers
- if isinstance(raw_mergers, six.string_types):
+ if isinstance(raw_mergers, str):
return string_extract_mergers(raw_mergers)
for m in raw_mergers:
if isinstance(m, (dict)):
diff --git a/cloudinit/mergers/m_dict.py b/cloudinit/mergers/m_dict.py
index 6c5fddc2..93472f13 100644
--- a/cloudinit/mergers/m_dict.py
+++ b/cloudinit/mergers/m_dict.py
@@ -4,8 +4,6 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-import six
-
DEF_MERGE_TYPE = 'no_replace'
MERGE_TYPES = ('replace', DEF_MERGE_TYPE,)
@@ -47,7 +45,7 @@ class Merger(object):
return new_v
if isinstance(new_v, (list, tuple)) and self._recurse_array:
return self._merger.merge(old_v, new_v)
- if isinstance(new_v, six.string_types) and self._recurse_str:
+ if isinstance(new_v, str) and self._recurse_str:
return self._merger.merge(old_v, new_v)
if isinstance(new_v, (dict)) and self._recurse_dict:
return self._merger.merge(old_v, new_v)
diff --git a/cloudinit/mergers/m_list.py b/cloudinit/mergers/m_list.py
index daa0469a..19f32771 100644
--- a/cloudinit/mergers/m_list.py
+++ b/cloudinit/mergers/m_list.py
@@ -4,8 +4,6 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-import six
-
DEF_MERGE_TYPE = 'replace'
MERGE_TYPES = ('append', 'prepend', DEF_MERGE_TYPE, 'no_replace')
@@ -63,7 +61,7 @@ class Merger(object):
return old_v
if isinstance(new_v, (list, tuple)) and self._recurse_array:
return self._merger.merge(old_v, new_v)
- if isinstance(new_v, six.string_types) and self._recurse_str:
+ if isinstance(new_v, str) and self._recurse_str:
return self._merger.merge(old_v, new_v)
if isinstance(new_v, (dict)) and self._recurse_dict:
return self._merger.merge(old_v, new_v)
diff --git a/cloudinit/mergers/m_str.py b/cloudinit/mergers/m_str.py
index 629df58e..539e3e29 100644
--- a/cloudinit/mergers/m_str.py
+++ b/cloudinit/mergers/m_str.py
@@ -4,8 +4,6 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-import six
-
class Merger(object):
def __init__(self, _merger, opts):
@@ -23,13 +21,10 @@ class Merger(object):
# perform the following action, if appending we will
# merge them together, otherwise we will just return value.
def _on_str(self, value, merge_with):
- if not isinstance(value, six.string_types):
+ if not isinstance(value, str):
return merge_with
if not self._append:
return merge_with
- if isinstance(value, six.text_type):
- return value + six.text_type(merge_with)
- else:
- return value + six.binary_type(merge_with)
+ return value + merge_with
# vi: ts=4 expandtab
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index 3642fb1f..1d5eb535 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -9,6 +9,7 @@ import errno
import logging
import os
import re
+from functools import partial
from cloudinit.net.network_state import mask_to_net_prefix
from cloudinit import util
@@ -108,6 +109,141 @@ def is_bond(devname):
return os.path.exists(sys_dev_path(devname, "bonding"))
+def get_master(devname):
+ """Return the master path for devname, or None if no master"""
+ path = sys_dev_path(devname, path="master")
+ if os.path.exists(path):
+ return path
+ return None
+
+
+def master_is_bridge_or_bond(devname):
+ """Return a bool indicating if devname's master is a bridge or bond"""
+ master_path = get_master(devname)
+ if master_path is None:
+ return False
+ bonding_path = os.path.join(master_path, "bonding")
+ bridge_path = os.path.join(master_path, "bridge")
+ return (os.path.exists(bonding_path) or os.path.exists(bridge_path))
+
+
+def is_netfailover(devname, driver=None):
+ """ netfailover driver uses 3 nics, master, primary and standby.
+ this returns True if the device is either the primary or standby
+ as these devices are to be ignored.
+ """
+ if driver is None:
+ driver = device_driver(devname)
+ if is_netfail_primary(devname, driver) or is_netfail_standby(devname,
+ driver):
+ return True
+ return False
+
+
+def get_dev_features(devname):
+ """ Returns a str from reading /sys/class/net/<devname>/device/features."""
+ features = ''
+ try:
+ features = read_sys_net(devname, 'device/features')
+ except Exception:
+ pass
+ return features
+
+
+def has_netfail_standby_feature(devname):
+ """ Return True if VIRTIO_NET_F_STANDBY bit (62) is set.
+
+ https://github.com/torvalds/linux/blob/ \
+ 089cf7f6ecb266b6a4164919a2e69bd2f938374a/ \
+ include/uapi/linux/virtio_net.h#L60
+ """
+ features = get_dev_features(devname)
+ if not features or len(features) < 64:
+ return False
+ return features[62] == "1"
+
+
+def is_netfail_master(devname, driver=None):
+ """ A device is a "netfail master" device if:
+
+ - The device does NOT have the 'master' sysfs attribute
+ - The device driver is 'virtio_net'
+ - The device has the standby feature bit set
+
+ Return True if all of the above is True.
+ """
+ if get_master(devname) is not None:
+ return False
+
+ if driver is None:
+ driver = device_driver(devname)
+
+ if driver != "virtio_net":
+ return False
+
+ if not has_netfail_standby_feature(devname):
+ return False
+
+ return True
+
+
+def is_netfail_primary(devname, driver=None):
+ """ A device is a "netfail primary" device if:
+
+ - the device has a 'master' sysfs file
+ - the device driver is not 'virtio_net'
+ - the 'master' sysfs file points to device with virtio_net driver
+ - the 'master' device has the 'standby' feature bit set
+
+ Return True if all of the above is True.
+ """
+ # /sys/class/net/<devname>/master -> ../../<master devname>
+ master_sysfs_path = sys_dev_path(devname, path='master')
+ if not os.path.exists(master_sysfs_path):
+ return False
+
+ if driver is None:
+ driver = device_driver(devname)
+
+ if driver == "virtio_net":
+ return False
+
+ master_devname = os.path.basename(os.path.realpath(master_sysfs_path))
+ master_driver = device_driver(master_devname)
+ if master_driver != "virtio_net":
+ return False
+
+ master_has_standby = has_netfail_standby_feature(master_devname)
+ if not master_has_standby:
+ return False
+
+ return True
+
+
+def is_netfail_standby(devname, driver=None):
+ """ A device is a "netfail standby" device if:
+
+ - The device has a 'master' sysfs attribute
+ - The device driver is 'virtio_net'
+ - The device has the standby feature bit set
+
+ Return True if all of the above is True.
+ """
+ if get_master(devname) is None:
+ return False
+
+ if driver is None:
+ driver = device_driver(devname)
+
+ if driver != "virtio_net":
+ return False
+
+ if not has_netfail_standby_feature(devname):
+ return False
+
+ return True
+
+
def is_renamed(devname):
"""
/* interface name assignment types (sysfs name_assign_type attribute) */
@@ -171,6 +307,9 @@ def device_devid(devname):
def get_devicelist():
+ if util.is_FreeBSD():
+ return list(get_interfaces_by_mac().values())
+
try:
devs = os.listdir(get_sys_class_path())
except OSError as e:
@@ -193,6 +332,35 @@ def is_disabled_cfg(cfg):
def find_fallback_nic(blacklist_drivers=None):
"""Return the name of the 'fallback' network device."""
+ if util.is_FreeBSD():
+ return find_fallback_nic_on_freebsd(blacklist_drivers)
+ else:
+ return find_fallback_nic_on_linux(blacklist_drivers)
+
+
+def find_fallback_nic_on_freebsd(blacklist_drivers=None):
+ """Return the name of the 'fallback' network device on FreeBSD.
+
+ @param blacklist_drivers: currently ignored
+ @return default interface, or None
+
+
+ we'll use the first interface from ``ifconfig -l -u ether``
+ """
+ stdout, _stderr = util.subp(['ifconfig', '-l', '-u', 'ether'])
+ values = stdout.split()
+ if values:
+ return values[0]
+ # On FreeBSD <= 10, 'ifconfig -l' ignores the interfaces with DOWN
+ # status
+ values = list(get_interfaces_by_mac().values())
+ values.sort()
+ if values:
+ return values[0]
+
+
+def find_fallback_nic_on_linux(blacklist_drivers=None):
+ """Return the name of the 'fallback' network device on Linux."""
if not blacklist_drivers:
blacklist_drivers = []
@@ -226,6 +394,9 @@ def find_fallback_nic(blacklist_drivers=None):
if is_bond(interface):
# skip any bonds
continue
+ if is_netfailover(interface):
+ # ignore netfailover primary/standby interfaces
+ continue
carrier = read_sys_net_int(interface, 'carrier')
if carrier:
connected.append(interface)
@@ -250,7 +421,7 @@ def find_fallback_nic(blacklist_drivers=None):
potential_interfaces = possibly_connected
# if eth0 exists use it above anything else, otherwise get the interface
- # that we can read 'first' (using the sorted defintion of first).
+ # that we can read 'first' (using the sorted definition of first).
names = list(sorted(potential_interfaces, key=natural_sort_key))
if DEFAULT_PRIMARY_INTERFACE in names:
names.remove(DEFAULT_PRIMARY_INTERFACE)
@@ -264,46 +435,34 @@ def find_fallback_nic(blacklist_drivers=None):
def generate_fallback_config(blacklist_drivers=None, config_driver=None):
- """Determine which attached net dev is most likely to have a connection and
- generate network state to run dhcp on that interface"""
-
+ """Generate network cfg v2 for dhcp on the NIC most likely connected."""
if not config_driver:
config_driver = False
target_name = find_fallback_nic(blacklist_drivers=blacklist_drivers)
- if target_name:
- target_mac = read_sys_net_safe(target_name, 'address')
- nconf = {'config': [], 'version': 1}
- cfg = {'type': 'physical', 'name': target_name,
- 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]}
- # inject the device driver name, dev_id into config if enabled and
- # device has a valid device driver value
- if config_driver:
- driver = device_driver(target_name)
- if driver:
- cfg['params'] = {
- 'driver': driver,
- 'device_id': device_devid(target_name),
- }
- nconf['config'].append(cfg)
- return nconf
- else:
+ if not target_name:
# can't read any interfaces addresses (or there are none); give up
return None
+ # netfail cannot use mac for matching, they have duplicate macs
+ if is_netfail_master(target_name):
+ match = {'name': target_name}
+ else:
+ match = {
+ 'macaddress': read_sys_net_safe(target_name, 'address').lower()}
+ cfg = {'dhcp4': True, 'set-name': target_name, 'match': match}
+ if config_driver:
+ driver = device_driver(target_name)
+ if driver:
+ cfg['match']['driver'] = driver
+ nconf = {'ethernets': {target_name: cfg}, 'version': 2}
+ return nconf
-def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
- """read the network config and rename devices accordingly.
- if strict_present is false, then do not raise exception if no devices
- match. if strict_busy is false, then do not raise exception if the
- device cannot be renamed because it is currently configured.
- renames are only attempted for interfaces of type 'physical'. It is
- expected that the network system will create other devices with the
- correct name in place."""
+def extract_physdevs(netcfg):
def _version_1(netcfg):
- renames = []
+ physdevs = []
for ent in netcfg.get('config', {}):
if ent.get('type') != 'physical':
continue
@@ -317,11 +476,11 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
driver = device_driver(name)
if not device_id:
device_id = device_devid(name)
- renames.append([mac, name, driver, device_id])
- return renames
+ physdevs.append([mac, name, driver, device_id])
+ return physdevs
def _version_2(netcfg):
- renames = []
+ physdevs = []
for ent in netcfg.get('ethernets', {}).values():
# only rename if configured to do so
name = ent.get('set-name')
@@ -337,16 +496,69 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
driver = device_driver(name)
if not device_id:
device_id = device_devid(name)
- renames.append([mac, name, driver, device_id])
- return renames
+ physdevs.append([mac, name, driver, device_id])
+ return physdevs
+
+ version = netcfg.get('version')
+ if version == 1:
+ return _version_1(netcfg)
+ elif version == 2:
+ return _version_2(netcfg)
+
+ raise RuntimeError('Unknown network config version: %s' % version)
- if netcfg.get('version') == 1:
- return _rename_interfaces(_version_1(netcfg))
- elif netcfg.get('version') == 2:
- return _rename_interfaces(_version_2(netcfg))
- raise RuntimeError('Failed to apply network config names. Found bad'
- ' network config version: %s' % netcfg.get('version'))
+def wait_for_physdevs(netcfg, strict=True):
+ physdevs = extract_physdevs(netcfg)
+
+ # set of expected iface names and mac addrs
+ expected_ifaces = dict([(iface[0], iface[1]) for iface in physdevs])
+ expected_macs = set(expected_ifaces.keys())
+
+ # set of current macs
+ present_macs = get_interfaces_by_mac().keys()
+
+ # compare the set of expected mac address values to
+ # the current macs present; we only check MAC as cloud-init
+ # has not yet renamed interfaces and the netcfg may include
+ # such renames.
+ for _ in range(0, 5):
+ if expected_macs.issubset(present_macs):
+ LOG.debug('net: all expected physical devices present')
+ return
+
+ missing = expected_macs.difference(present_macs)
+ LOG.debug('net: waiting for expected net devices: %s', missing)
+ for mac in missing:
+ # trigger a settle, unless this interface exists
+ syspath = sys_dev_path(expected_ifaces[mac])
+ settle = partial(util.udevadm_settle, exists=syspath)
+ msg = 'Waiting for udev events to settle or %s exists' % syspath
+ util.log_time(LOG.debug, msg, func=settle)
+
+ # update present_macs after settles
+ present_macs = get_interfaces_by_mac().keys()
+
+ msg = 'Not all expected physical devices present: %s' % missing
+ LOG.warning(msg)
+ if strict:
+ raise RuntimeError(msg)
+
+
+def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
+ """read the network config and rename devices accordingly.
+ if strict_present is false, then do not raise exception if no devices
+ match. if strict_busy is false, then do not raise exception if the
+ device cannot be renamed because it is currently configured.
+
+ renames are only attempted for interfaces of type 'physical'. It is
+ expected that the network system will create other devices with the
+ correct name in place."""
+
+ try:
+ _rename_interfaces(extract_physdevs(netcfg))
+ except RuntimeError as e:
+ raise RuntimeError('Failed to apply network config names: %s' % e)
def interface_has_own_mac(ifname, strict=False):
@@ -585,6 +797,40 @@ def get_ib_interface_hwaddr(ifname, ethernet_format):
def get_interfaces_by_mac():
+ if util.is_FreeBSD():
+ return get_interfaces_by_mac_on_freebsd()
+ else:
+ return get_interfaces_by_mac_on_linux()
+
+
+def get_interfaces_by_mac_on_freebsd():
+ (out, _) = util.subp(['ifconfig', '-a', 'ether'])
+
+ # flatten each interface block in a single line
+ def flatten(out):
+ curr_block = ''
+ for l in out.split('\n'):
+ if l.startswith('\t'):
+ curr_block += l
+ else:
+ if curr_block:
+ yield curr_block
+ curr_block = l
+ yield curr_block
+
+ # looks for interface and mac in a list of flatten block
+ def find_mac(flat_list):
+ for block in flat_list:
+ m = re.search(
+ r"^(?P<ifname>\S*): .*ether\s(?P<mac>[\da-f:]{17}).*",
+ block)
+ if m:
+ yield (m.group('mac'), m.group('ifname'))
+ results = {mac: ifname for mac, ifname in find_mac(flatten(out))}
+ return results
+
+
+def get_interfaces_by_mac_on_linux():
"""Build a dictionary of tuples {mac: name}.
Bridges and any devices that have a 'stolen' mac are excluded."""
@@ -622,6 +868,12 @@ def get_interfaces():
continue
if is_vlan(name):
continue
+ if is_bond(name):
+ continue
+ if get_master(name) is not None and not master_is_bridge_or_bond(name):
+ continue
+ if is_netfailover(name):
+ continue
mac = get_interface_mac(name)
# some devices may not have a mac (tun0)
if not mac:
@@ -677,7 +929,7 @@ class EphemeralIPv4Network(object):
"""
def __init__(self, interface, ip, prefix_or_mask, broadcast, router=None,
- connectivity_url=None):
+ connectivity_url=None, static_routes=None):
"""Setup context manager and validate call signature.
@param interface: Name of the network interface to bring up.
@@ -688,6 +940,7 @@ class EphemeralIPv4Network(object):
@param router: Optionally the default gateway IP.
@param connectivity_url: Optionally, a URL to verify if a usable
connection already exists.
+ @param static_routes: Optionally a list of static routes from DHCP
"""
if not all([interface, ip, prefix_or_mask, broadcast]):
raise ValueError(
@@ -704,6 +957,7 @@ class EphemeralIPv4Network(object):
self.ip = ip
self.broadcast = broadcast
self.router = router
+ self.static_routes = static_routes
self.cleanup_cmds = [] # List of commands to run to cleanup state.
def __enter__(self):
@@ -716,7 +970,21 @@ class EphemeralIPv4Network(object):
return
self._bringup_device()
- if self.router:
+
+ # rfc3442 requires us to ignore the router config *if* classless static
+ # routes are provided.
+ #
+ # https://tools.ietf.org/html/rfc3442
+ #
+ # If the DHCP server returns both a Classless Static Routes option and
+ # a Router option, the DHCP client MUST ignore the Router option.
+ #
+ # Similarly, if the DHCP server returns both a Classless Static Routes
+ # option and a Static Routes option, the DHCP client MUST ignore the
+ # Static Routes option.
+ if self.static_routes:
+ self._bringup_static_routes()
+ elif self.router:
self._bringup_router()
def __exit__(self, excp_type, excp_value, excp_traceback):
@@ -760,6 +1028,20 @@ class EphemeralIPv4Network(object):
['ip', '-family', 'inet', 'addr', 'del', cidr, 'dev',
self.interface])
+ def _bringup_static_routes(self):
+ # static_routes = [("169.254.169.254/32", "130.56.248.255"),
+ # ("0.0.0.0/0", "130.56.240.1")]
+ for net_address, gateway in self.static_routes:
+ via_arg = []
+ if gateway != "0.0.0.0/0":
+ via_arg = ['via', gateway]
+ util.subp(
+ ['ip', '-4', 'route', 'add', net_address] + via_arg +
+ ['dev', self.interface], capture=True)
+ self.cleanup_cmds.insert(
+ 0, ['ip', '-4', 'route', 'del', net_address] + via_arg +
+ ['dev', self.interface])
+
def _bringup_router(self):
"""Perform the ip commands to fully setup the router if needed."""
# Check if a default route exists and exit if it does
diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py
index f89a0f73..64e1c699 100755
--- a/cloudinit/net/cmdline.py
+++ b/cloudinit/net/cmdline.py
@@ -5,20 +5,92 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+import abc
import base64
import glob
import gzip
import io
import os
+from cloudinit import util
+
from . import get_devicelist
from . import read_sys_net_safe
-from cloudinit import util
-
_OPEN_ISCSI_INTERFACE_FILE = "/run/initramfs/open-iscsi.interface"
+class InitramfsNetworkConfigSource(metaclass=abc.ABCMeta):
+ """ABC for net config sources that read config written by initramfses"""
+
+ @abc.abstractmethod
+ def is_applicable(self):
+ # type: () -> bool
+ """Is this initramfs config source applicable to the current system?"""
+ pass
+
+ @abc.abstractmethod
+ def render_config(self):
+ # type: () -> dict
+ """Render a v1 network config from the initramfs configuration"""
+ pass
+
+
+class KlibcNetworkConfigSource(InitramfsNetworkConfigSource):
+ """InitramfsNetworkConfigSource for klibc initramfs (i.e. Debian/Ubuntu)
+
+ Has three parameters, but they are intended to make testing simpler, _not_
+ for use in production code. (This is indicated by the prepended
+ underscores.)
+ """
+
+ def __init__(self, _files=None, _mac_addrs=None, _cmdline=None):
+ self._files = _files
+ self._mac_addrs = _mac_addrs
+ self._cmdline = _cmdline
+
+ # Set defaults here, as they require computation that we don't want to
+ # do at method definition time
+ if self._files is None:
+ self._files = _get_klibc_net_cfg_files()
+ if self._cmdline is None:
+ self._cmdline = util.get_cmdline()
+ if self._mac_addrs is None:
+ self._mac_addrs = {}
+ for k in get_devicelist():
+ mac_addr = read_sys_net_safe(k, 'address')
+ if mac_addr:
+ self._mac_addrs[k] = mac_addr
+
+ def is_applicable(self):
+ # type: () -> bool
+ """
+ Return whether this system has klibc initramfs network config or not
+
+ Will return True if:
+ (a) klibc files exist in /run, AND
+ (b) either:
+ (i) ip= or ip6= are on the kernel cmdline, OR
+ (ii) an open-iscsi interface file is present in the system
+ """
+ if self._files:
+ if 'ip=' in self._cmdline or 'ip6=' in self._cmdline:
+ return True
+ if os.path.exists(_OPEN_ISCSI_INTERFACE_FILE):
+ # iBft can configure networking without ip=
+ return True
+ return False
+
+ def render_config(self):
+ # type: () -> dict
+ return config_from_klibc_net_cfg(
+ files=self._files, mac_addrs=self._mac_addrs,
+ )
+
+
+_INITRAMFS_CONFIG_SOURCES = [KlibcNetworkConfigSource]
+
+
def _klibc_to_config_entry(content, mac_addrs=None):
"""Convert a klibc written shell content file to a 'config' entry
When ip= is seen on the kernel command line in debian initramfs
@@ -29,9 +101,12 @@ def _klibc_to_config_entry(content, mac_addrs=None):
provided here. There is no good documentation on this unfortunately.
DEVICE=<name> is expected/required and PROTO should indicate if
- this is 'static' or 'dhcp' or 'dhcp6' (LP: #1621507).
+ this is 'none' (static) or 'dhcp' or 'dhcp6' (LP: #1621507).
note that IPV6PROTO is also written by newer code to address the
possibility of both ipv4 and ipv6 getting addresses.
+
+ Full syntax is documented at:
+ https://git.kernel.org/pub/scm/libs/klibc/klibc.git/plain/usr/kinit/ipconfig/README.ipconfig
"""
if mac_addrs is None:
@@ -50,9 +125,9 @@ def _klibc_to_config_entry(content, mac_addrs=None):
if data.get('filename'):
proto = 'dhcp'
else:
- proto = 'static'
+ proto = 'none'
- if proto not in ('static', 'dhcp', 'dhcp6'):
+ if proto not in ('none', 'dhcp', 'dhcp6'):
raise ValueError("Unexpected value for PROTO: %s" % proto)
iface = {
@@ -72,6 +147,9 @@ def _klibc_to_config_entry(content, mac_addrs=None):
# PROTO for ipv4, IPV6PROTO for ipv6
cur_proto = data.get(pre + 'PROTO', proto)
+ # ipconfig's 'none' is called 'static'
+ if cur_proto == 'none':
+ cur_proto = 'static'
subnet = {'type': cur_proto, 'control': 'manual'}
# only populate address for static types. While the rendered config
@@ -137,6 +215,24 @@ def config_from_klibc_net_cfg(files=None, mac_addrs=None):
return {'config': entries, 'version': 1}
+def read_initramfs_config():
+ """
+ Return v1 network config for initramfs-configured networking (or None)
+
+ This will consider each _INITRAMFS_CONFIG_SOURCES entry in turn, and return
+ v1 network configuration for the first one that is applicable. If none are
+ applicable, return None.
+ """
+ for src_cls in _INITRAMFS_CONFIG_SOURCES:
+ cfg_source = src_cls()
+
+ if not cfg_source.is_applicable():
+ continue
+
+ return cfg_source.render_config()
+ return None
+
+
def _decomp_gzip(blob, strict=True):
# decompress blob. raise exception if not compressed unless strict=False.
with io.BytesIO(blob) as iobuf:
@@ -167,23 +263,10 @@ def _b64dgz(b64str, gzipped="try"):
return _decomp_gzip(blob, strict=gzipped != "try")
-def _is_initramfs_netconfig(files, cmdline):
- if files:
- if 'ip=' in cmdline or 'ip6=' in cmdline:
- return True
- if os.path.exists(_OPEN_ISCSI_INTERFACE_FILE):
- # iBft can configure networking without ip=
- return True
- return False
-
-
-def read_kernel_cmdline_config(files=None, mac_addrs=None, cmdline=None):
+def read_kernel_cmdline_config(cmdline=None):
if cmdline is None:
cmdline = util.get_cmdline()
- if files is None:
- files = _get_klibc_net_cfg_files()
-
if 'network-config=' in cmdline:
data64 = None
for tok in cmdline.split():
@@ -192,16 +275,6 @@ def read_kernel_cmdline_config(files=None, mac_addrs=None, cmdline=None):
if data64:
return util.load_yaml(_b64dgz(data64))
- if not _is_initramfs_netconfig(files, cmdline):
- return None
-
- if mac_addrs is None:
- mac_addrs = {}
- for k in get_devicelist():
- mac_addr = read_sys_net_safe(k, 'address')
- if mac_addr:
- mac_addrs[k] = mac_addr
-
- return config_from_klibc_net_cfg(files=files, mac_addrs=mac_addrs)
+ return None
# vi: ts=4 expandtab
diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py
index 0db991db..19d0199c 100644
--- a/cloudinit/net/dhcp.py
+++ b/cloudinit/net/dhcp.py
@@ -9,6 +9,8 @@ import logging
import os
import re
import signal
+import time
+from io import StringIO
from cloudinit.net import (
EphemeralIPv4Network, find_fallback_nic, get_devicelist,
@@ -16,7 +18,6 @@ from cloudinit.net import (
from cloudinit.net.network_state import mask_and_ipv4_to_bcast_addr as bcip
from cloudinit import temp_utils
from cloudinit import util
-from six import StringIO
LOG = logging.getLogger(__name__)
@@ -91,10 +92,17 @@ class EphemeralDHCPv4(object):
nmap = {'interface': 'interface', 'ip': 'fixed-address',
'prefix_or_mask': 'subnet-mask',
'broadcast': 'broadcast-address',
+ 'static_routes': [
+ 'rfc3442-classless-static-routes',
+ 'classless-static-routes'
+ ],
'router': 'routers'}
- kwargs = dict([(k, self.lease.get(v)) for k, v in nmap.items()])
+ kwargs = self.extract_dhcp_options_mapping(nmap)
if not kwargs['broadcast']:
kwargs['broadcast'] = bcip(kwargs['prefix_or_mask'], kwargs['ip'])
+ if kwargs['static_routes']:
+ kwargs['static_routes'] = (
+ parse_static_routes(kwargs['static_routes']))
if self.connectivity_url:
kwargs['connectivity_url'] = self.connectivity_url
ephipv4 = EphemeralIPv4Network(**kwargs)
@@ -102,6 +110,25 @@ class EphemeralDHCPv4(object):
self._ephipv4 = ephipv4
return self.lease
+ def extract_dhcp_options_mapping(self, nmap):
+ result = {}
+ for internal_reference, lease_option_names in nmap.items():
+ if isinstance(lease_option_names, list):
+ self.get_first_option_value(
+ internal_reference,
+ lease_option_names,
+ result
+ )
+ else:
+ result[internal_reference] = self.lease.get(lease_option_names)
+ return result
+
+ def get_first_option_value(self, internal_mapping,
+ lease_option_names, result):
+ for different_names in lease_option_names:
+ if not result.get(internal_mapping):
+ result[internal_mapping] = self.lease.get(different_names)
+
def maybe_perform_dhcp_discovery(nic=None):
"""Perform dhcp discovery if nic valid and dhclient command exists.
@@ -127,7 +154,9 @@ def maybe_perform_dhcp_discovery(nic=None):
if not dhclient_path:
LOG.debug('Skip dhclient configuration: No dhclient command found.')
return []
- with temp_utils.tempdir(prefix='cloud-init-dhcp-', needs_exe=True) as tdir:
+ with temp_utils.tempdir(rmtree_ignore_errors=True,
+ prefix='cloud-init-dhcp-',
+ needs_exe=True) as tdir:
# Use /var/tmp because /run/cloud-init/tmp is mounted noexec
return dhcp_discovery(dhclient_path, nic, tdir)
@@ -195,24 +224,39 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir):
'-pf', pid_file, interface, '-sf', '/bin/true']
util.subp(cmd, capture=True)
- # dhclient doesn't write a pid file until after it forks when it gets a
- # proper lease response. Since cleandir is a temp directory that gets
- # removed, we need to wait for that pidfile creation before the
- # cleandir is removed, otherwise we get FileNotFound errors.
+ # Wait for pid file and lease file to appear, and for the process
+ # named by the pid file to daemonize (have pid 1 as its parent). If we
+ # try to read the lease file before daemonization happens, we might try
+ # to read it before the dhclient has actually written it. We also have
+ # to wait until the dhclient has become a daemon so we can be sure to
+ # kill the correct process, thus freeing cleandir to be deleted back
+ # up the callstack.
missing = util.wait_for_files(
[pid_file, lease_file], maxwait=5, naplen=0.01)
if missing:
LOG.warning("dhclient did not produce expected files: %s",
', '.join(os.path.basename(f) for f in missing))
return []
- pid_content = util.load_file(pid_file).strip()
- try:
- pid = int(pid_content)
- except ValueError:
- LOG.debug(
- "pid file contains non-integer content '%s'", pid_content)
- else:
- os.kill(pid, signal.SIGKILL)
+
+ ppid = 'unknown'
+ for _ in range(0, 1000):
+ pid_content = util.load_file(pid_file).strip()
+ try:
+ pid = int(pid_content)
+ except ValueError:
+ pass
+ else:
+ ppid = util.get_proc_ppid(pid)
+ if ppid == 1:
+ LOG.debug('killing dhclient with pid=%s', pid)
+ os.kill(pid, signal.SIGKILL)
+ return parse_dhcp_lease_file(lease_file)
+ time.sleep(0.01)
+
+ LOG.error(
+ 'dhclient(pid=%s, parentpid=%s) failed to daemonize after %s seconds',
+ pid_content, ppid, 0.01 * 1000
+ )
return parse_dhcp_lease_file(lease_file)
@@ -254,4 +298,96 @@ def networkd_get_option_from_leases(keyname, leases_d=None):
return data[keyname]
return None
+
+def parse_static_routes(rfc3442):
+ """ parse rfc3442 format and return a list containing tuple of strings.
+
+ The tuple is composed of the network_address (including net length) and
+ gateway for a parsed static route. It can parse two formats of rfc3442,
+ one from dhcpcd and one from dhclient (isc).
+
+ @param rfc3442: string in rfc3442 format (isc or dhcpd)
+ @returns: list of tuple(str, str) for all valid parsed routes until the
+ first parsing error.
+
+ E.g.
+ sr=parse_static_routes("32,169,254,169,254,130,56,248,255,0,130,56,240,1")
+ sr=[
+ ("169.254.169.254/32", "130.56.248.255"), ("0.0.0.0/0", "130.56.240.1")
+ ]
+
+ sr2 = parse_static_routes("24.191.168.128 192.168.128.1,0 192.168.128.1")
+ sr2 = [
+ ("191.168.128.0/24", "192.168.128.1"), ("0.0.0.0/0", "192.168.128.1")
+ ]
+
+ Python version of isc-dhclient's hooks:
+ /etc/dhcp/dhclient-exit-hooks.d/rfc3442-classless-routes
+ """
+ # raw strings from dhcp lease may end in semi-colon
+ rfc3442 = rfc3442.rstrip(";")
+ tokens = [tok for tok in re.split(r"[, .]", rfc3442) if tok]
+ static_routes = []
+
+ def _trunc_error(cidr, required, remain):
+ msg = ("RFC3442 string malformed. Current route has CIDR of %s "
+ "and requires %s significant octets, but only %s remain. "
+ "Verify DHCP rfc3442-classless-static-routes value: %s"
+ % (cidr, required, remain, rfc3442))
+ LOG.error(msg)
+
+ current_idx = 0
+ for idx, tok in enumerate(tokens):
+ if idx < current_idx:
+ continue
+ net_length = int(tok)
+ if net_length in range(25, 33):
+ req_toks = 9
+ if len(tokens[idx:]) < req_toks:
+ _trunc_error(net_length, req_toks, len(tokens[idx:]))
+ return static_routes
+ net_address = ".".join(tokens[idx+1:idx+5])
+ gateway = ".".join(tokens[idx+5:idx+req_toks])
+ current_idx = idx + req_toks
+ elif net_length in range(17, 25):
+ req_toks = 8
+ if len(tokens[idx:]) < req_toks:
+ _trunc_error(net_length, req_toks, len(tokens[idx:]))
+ return static_routes
+ net_address = ".".join(tokens[idx+1:idx+4] + ["0"])
+ gateway = ".".join(tokens[idx+4:idx+req_toks])
+ current_idx = idx + req_toks
+ elif net_length in range(9, 17):
+ req_toks = 7
+ if len(tokens[idx:]) < req_toks:
+ _trunc_error(net_length, req_toks, len(tokens[idx:]))
+ return static_routes
+ net_address = ".".join(tokens[idx+1:idx+3] + ["0", "0"])
+ gateway = ".".join(tokens[idx+3:idx+req_toks])
+ current_idx = idx + req_toks
+ elif net_length in range(1, 9):
+ req_toks = 6
+ if len(tokens[idx:]) < req_toks:
+ _trunc_error(net_length, req_toks, len(tokens[idx:]))
+ return static_routes
+ net_address = ".".join(tokens[idx+1:idx+2] + ["0", "0", "0"])
+ gateway = ".".join(tokens[idx+2:idx+req_toks])
+ current_idx = idx + req_toks
+ elif net_length == 0:
+ req_toks = 5
+ if len(tokens[idx:]) < req_toks:
+ _trunc_error(net_length, req_toks, len(tokens[idx:]))
+ return static_routes
+ net_address = "0.0.0.0"
+ gateway = ".".join(tokens[idx+1:idx+req_toks])
+ current_idx = idx + req_toks
+ else:
+ LOG.error('Parsed invalid net length "%s". Verify DHCP '
+ 'rfc3442-classless-static-routes value.', net_length)
+ return static_routes
+
+ static_routes.append(("%s/%s" % (net_address, net_length), gateway))
+
+ return static_routes
+
# vi: ts=4 expandtab
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index 64236320..2f714563 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -94,7 +94,7 @@ def _iface_add_attrs(iface, index, ipv4_subnet_mtu):
]
renames = {'mac_address': 'hwaddress'}
- if iface['type'] not in ['bond', 'bridge', 'vlan']:
+ if iface['type'] not in ['bond', 'bridge', 'infiniband', 'vlan']:
ignore_map.append('mac_address')
for key, value in iface.items():
@@ -366,8 +366,6 @@ class Renderer(renderer.Renderer):
down = indent + "pre-down route del"
or_true = " || true"
mapping = {
- 'network': '-net',
- 'netmask': 'netmask',
'gateway': 'gw',
'metric': 'metric',
}
@@ -379,13 +377,21 @@ class Renderer(renderer.Renderer):
default_gw = ' -A inet6 default'
route_line = ''
- for k in ['network', 'netmask', 'gateway', 'metric']:
- if default_gw and k in ['network', 'netmask']:
+ for k in ['network', 'gateway', 'metric']:
+ if default_gw and k == 'network':
continue
if k == 'gateway':
route_line += '%s %s %s' % (default_gw, mapping[k], route[k])
elif k in route:
- route_line += ' %s %s' % (mapping[k], route[k])
+ if k == 'network':
+ if ':' in route[k]:
+ route_line += ' -A inet6'
+ else:
+ route_line += ' -net'
+ if 'prefix' in route:
+ route_line += ' %s/%s' % (route[k], route['prefix'])
+ else:
+ route_line += ' %s %s' % (mapping[k], route[k])
content.append(up + route_line + or_true)
content.append(down + route_line + or_true)
return content
@@ -393,6 +399,7 @@ class Renderer(renderer.Renderer):
def _render_iface(self, iface, render_hwaddress=False):
sections = []
subnets = iface.get('subnets', {})
+ accept_ra = iface.pop('accept-ra', None)
if subnets:
for index, subnet in enumerate(subnets):
ipv4_subnet_mtu = None
@@ -405,8 +412,29 @@ class Renderer(renderer.Renderer):
else:
ipv4_subnet_mtu = subnet.get('mtu')
iface['inet'] = subnet_inet
- if subnet['type'].startswith('dhcp'):
+ if (subnet['type'] == 'dhcp4' or subnet['type'] == 'dhcp6' or
+ subnet['type'] == 'ipv6_dhcpv6-stateful'):
+ # Configure network settings using DHCP or DHCPv6
iface['mode'] = 'dhcp'
+ if accept_ra is not None:
+ # Accept router advertisements (0=off, 1=on)
+ iface['accept_ra'] = '1' if accept_ra else '0'
+ elif subnet['type'] == 'ipv6_dhcpv6-stateless':
+ # Configure network settings using SLAAC from RAs
+ iface['mode'] = 'auto'
+ # Use stateless DHCPv6 (0=off, 1=on)
+ iface['dhcp'] = '1'
+ elif subnet['type'] == 'ipv6_slaac':
+ # Configure network settings using SLAAC from RAs
+ iface['mode'] = 'auto'
+ # Use stateless DHCPv6 (0=off, 1=on)
+ iface['dhcp'] = '0'
+ elif subnet_is_ipv6(subnet):
+ # mode might be static6, eni uses 'static'
+ iface['mode'] = 'static'
+ if accept_ra is not None:
+ # Accept router advertisements (0=off, 1=on)
+ iface['accept_ra'] = '1' if accept_ra else '0'
# do not emit multiple 'auto $IFACE' lines as older (precise)
# ifupdown complains
@@ -461,9 +489,10 @@ class Renderer(renderer.Renderer):
order = {
'loopback': 0,
'physical': 1,
- 'bond': 2,
- 'bridge': 3,
- 'vlan': 4,
+ 'infiniband': 2,
+ 'bond': 3,
+ 'bridge': 4,
+ 'vlan': 5,
}
sections = []
diff --git a/cloudinit/net/freebsd.py b/cloudinit/net/freebsd.py
new file mode 100644
index 00000000..d6f61da3
--- /dev/null
+++ b/cloudinit/net/freebsd.py
@@ -0,0 +1,175 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import re
+
+from cloudinit import log as logging
+from cloudinit import net
+from cloudinit import util
+from cloudinit.distros import rhel_util
+from cloudinit.distros.parsers.resolv_conf import ResolvConf
+
+from . import renderer
+
+LOG = logging.getLogger(__name__)
+
+
+class Renderer(renderer.Renderer):
+ resolv_conf_fn = 'etc/resolv.conf'
+ rc_conf_fn = 'etc/rc.conf'
+
+ def __init__(self, config=None):
+ if not config:
+ config = {}
+ self.dhcp_interfaces = []
+ self._postcmds = config.get('postcmds', True)
+
+ def _update_rc_conf(self, settings, target=None):
+ fn = util.target_path(target, self.rc_conf_fn)
+ rhel_util.update_sysconfig_file(fn, settings)
+
+ def _write_ifconfig_entries(self, settings, target=None):
+ ifname_by_mac = net.get_interfaces_by_mac()
+ for interface in settings.iter_interfaces():
+ device_name = interface.get("name")
+ device_mac = interface.get("mac_address")
+ if device_name and re.match(r'^lo\d+$', device_name):
+ continue
+ if device_mac not in ifname_by_mac:
+ LOG.info('Cannot find any device with MAC %s', device_mac)
+ elif device_mac and device_name:
+ cur_name = ifname_by_mac[device_mac]
+ if cur_name != device_name:
+ LOG.info('netif service will rename interface %s to %s',
+ cur_name, device_name)
+ self._update_rc_conf(
+ {'ifconfig_%s_name' % cur_name: device_name},
+ target=target)
+ else:
+ device_name = ifname_by_mac[device_mac]
+
+ LOG.info('Configuring interface %s', device_name)
+ ifconfig = 'DHCP' # default
+
+ for subnet in interface.get("subnets", []):
+ if ifconfig != 'DHCP':
+ LOG.info('The FreeBSD provider only set the first subnet.')
+ break
+ if subnet.get('type') == 'static':
+ if not subnet.get('netmask'):
+ LOG.debug(
+ 'Skipping IP %s, because there is no netmask',
+ subnet.get('address'))
+ continue
+ LOG.debug('Configuring dev %s with %s / %s', device_name,
+ subnet.get('address'), subnet.get('netmask'))
+ # Configure an ipv4 address.
+ ifconfig = (
+ subnet.get('address') + ' netmask ' +
+ subnet.get('netmask'))
+
+ if ifconfig == 'DHCP':
+ self.dhcp_interfaces.append(device_name)
+ self._update_rc_conf(
+ {'ifconfig_' + device_name: ifconfig},
+ target=target)
+
+ def _write_route_entries(self, settings, target=None):
+ routes = list(settings.iter_routes())
+ for interface in settings.iter_interfaces():
+ subnets = interface.get("subnets", [])
+ for subnet in subnets:
+ if subnet.get('type') != 'static':
+ continue
+ gateway = subnet.get('gateway')
+ if gateway and len(gateway.split('.')) == 4:
+ routes.append({
+ 'network': '0.0.0.0',
+ 'netmask': '0.0.0.0',
+ 'gateway': gateway})
+ routes += subnet.get('routes', [])
+ route_cpt = 0
+ for route in routes:
+ network = route.get('network')
+ if not network:
+ LOG.debug('Skipping a bad route entry')
+ continue
+ netmask = route.get('netmask')
+ gateway = route.get('gateway')
+ route_cmd = "-route %s/%s %s" % (network, netmask, gateway)
+ if network == '0.0.0.0':
+ self._update_rc_conf(
+ {'defaultrouter': gateway}, target=target)
+ else:
+ self._update_rc_conf(
+ {'route_net%d' % route_cpt: route_cmd}, target=target)
+ route_cpt += 1
+
+ def _write_resolve_conf(self, settings, target=None):
+ nameservers = settings.dns_nameservers
+ searchdomains = settings.dns_searchdomains
+ for interface in settings.iter_interfaces():
+ for subnet in interface.get("subnets", []):
+ if 'dns_nameservers' in subnet:
+ nameservers.extend(subnet['dns_nameservers'])
+ if 'dns_search' in subnet:
+ searchdomains.extend(subnet['dns_search'])
+ # Try to read the /etc/resolv.conf or just start from scratch if that
+ # fails.
+ try:
+ resolvconf = ResolvConf(util.load_file(util.target_path(
+ target, self.resolv_conf_fn)))
+ resolvconf.parse()
+ except IOError:
+ util.logexc(LOG, "Failed to parse %s, use new empty file",
+ util.target_path(target, self.resolv_conf_fn))
+ resolvconf = ResolvConf('')
+ resolvconf.parse()
+
+ # Add some nameservers
+ for server in nameservers:
+ try:
+ resolvconf.add_nameserver(server)
+ except ValueError:
+ util.logexc(LOG, "Failed to add nameserver %s", server)
+
+ # And add any searchdomains.
+ for domain in searchdomains:
+ try:
+ resolvconf.add_search_domain(domain)
+ except ValueError:
+ util.logexc(LOG, "Failed to add search domain %s", domain)
+ util.write_file(
+ util.target_path(target, self.resolv_conf_fn),
+ str(resolvconf), 0o644)
+
+ def _write_network(self, settings, target=None):
+ self._write_ifconfig_entries(settings, target=target)
+ self._write_route_entries(settings, target=target)
+ self._write_resolve_conf(settings, target=target)
+
+ self.start_services(run=self._postcmds)
+
+ def render_network_state(self, network_state, templates=None, target=None):
+ self._write_network(network_state, target=target)
+
+ def start_services(self, run=False):
+ if not run:
+ LOG.debug("freebsd generate postcmd disabled")
+ return
+
+ util.subp(['service', 'netif', 'restart'], capture=True)
+ # On FreeBSD 10, the restart of routing and dhclient is likely to fail
+ # because
+ # - routing: it cannot remove the loopback route, but it will still set
+ # up the default route as expected.
+ # - dhclient: it cannot stop the dhclient started by the netif service.
+ # In both case, the situation is ok, and we can proceed.
+ util.subp(['service', 'routing', 'restart'], capture=True, rcs=[0, 1])
+ for dhcp_interface in self.dhcp_interfaces:
+ util.subp(['service', 'dhclient', 'restart', dhcp_interface],
+ rcs=[0, 1],
+ capture=True)
+
+
+def available(target=None):
+ return util.is_FreeBSD()
diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
index 21517fda..89855270 100644
--- a/cloudinit/net/netplan.py
+++ b/cloudinit/net/netplan.py
@@ -4,10 +4,11 @@ import copy
import os
from . import renderer
-from .network_state import subnet_is_ipv6, NET_CONFIG_TO_V2
+from .network_state import subnet_is_ipv6, NET_CONFIG_TO_V2, IPV6_DYNAMIC_TYPES
from cloudinit import log as logging
from cloudinit import util
+from cloudinit import safeyaml
from cloudinit.net import SYS_CLASS_NET, get_devicelist
KNOWN_SNAPD_CONFIG = b"""\
@@ -34,7 +35,7 @@ def _get_params_dict_by_match(config, match):
if key.startswith(match))
-def _extract_addresses(config, entry, ifname):
+def _extract_addresses(config, entry, ifname, features=None):
"""This method parse a cloudinit.net.network_state dictionary (config) and
maps netstate keys/values into a dictionary (entry) to represent
netplan yaml.
@@ -51,7 +52,8 @@ def _extract_addresses(config, entry, ifname):
'mtu': 1480,
'netmask': 64,
'type': 'static'}],
- 'type: physical'
+ 'type: physical',
+ 'accept-ra': 'true'
}
An entry dictionary looks like:
@@ -66,7 +68,7 @@ def _extract_addresses(config, entry, ifname):
'match': {'macaddress': '52:54:00:12:34:00'},
'mtu': 1501,
'address': ['192.168.1.2/24', '2001:4800:78ff:1b:be76:4eff:fe06:1000"],
- 'mtu6': 1480}
+ 'ipv6-mtu': 1480}
"""
@@ -79,6 +81,8 @@ def _extract_addresses(config, entry, ifname):
else:
return [obj, ]
+ if features is None:
+ features = []
addresses = []
routes = []
nameservers = []
@@ -92,7 +96,9 @@ def _extract_addresses(config, entry, ifname):
if sn_type == 'dhcp':
sn_type += '4'
entry.update({sn_type: True})
- elif sn_type in ['static']:
+ elif sn_type in IPV6_DYNAMIC_TYPES:
+ entry.update({'dhcp6': True})
+ elif sn_type in ['static', 'static6']:
addr = "%s" % subnet.get('address')
if 'prefix' in subnet:
addr += "/%d" % subnet.get('prefix')
@@ -108,8 +114,8 @@ def _extract_addresses(config, entry, ifname):
searchdomains += _listify(subnet.get('dns_search', []))
if 'mtu' in subnet:
mtukey = 'mtu'
- if subnet_is_ipv6(subnet):
- mtukey += '6'
+ if subnet_is_ipv6(subnet) and 'ipv6-mtu' in features:
+ mtukey = 'ipv6-mtu'
entry.update({mtukey: subnet.get('mtu')})
for route in subnet.get('routes', []):
to_net = "%s/%s" % (route.get('network'),
@@ -144,6 +150,8 @@ def _extract_addresses(config, entry, ifname):
ns = entry.get('nameservers', {})
ns.update({'search': searchdomains})
entry.update({'nameservers': ns})
+ if 'accept-ra' in config and config['accept-ra'] is not None:
+ entry.update({'accept-ra': util.is_true(config.get('accept-ra'))})
def _extract_bond_slaves_by_name(interfaces, entry, bond_master):
@@ -179,6 +187,7 @@ class Renderer(renderer.Renderer):
"""Renders network information in a /etc/netplan/network.yaml format."""
NETPLAN_GENERATE = ['netplan', 'generate']
+ NETPLAN_INFO = ['netplan', 'info']
def __init__(self, config=None):
if not config:
@@ -188,6 +197,22 @@ class Renderer(renderer.Renderer):
self.netplan_header = config.get('netplan_header', None)
self._postcmds = config.get('postcmds', False)
self.clean_default = config.get('clean_default', True)
+ self._features = config.get('features', None)
+
+ @property
+ def features(self):
+ if self._features is None:
+ try:
+ info_blob, _err = util.subp(self.NETPLAN_INFO, capture=True)
+ info = util.load_yaml(info_blob)
+ self._features = info['netplan.io']['features']
+ except util.ProcessExecutionError:
+ # if the info subcommand is not present then we don't have any
+ # new features
+ pass
+ except (TypeError, KeyError) as e:
+ LOG.debug('Failed to list features from netplan info: %s', e)
+ return self._features
def render_network_state(self, network_state, templates=None, target=None):
# check network state for version
@@ -235,9 +260,9 @@ class Renderer(renderer.Renderer):
# if content already in netplan format, pass it back
if network_state.version == 2:
LOG.debug('V2 to V2 passthrough')
- return util.yaml_dumps({'network': network_state.config},
- explicit_start=False,
- explicit_end=False)
+ return safeyaml.dumps({'network': network_state.config},
+ explicit_start=False,
+ explicit_end=False)
ethernets = {}
wifis = {}
@@ -271,7 +296,7 @@ class Renderer(renderer.Renderer):
else:
del eth['match']
del eth['set-name']
- _extract_addresses(ifcfg, eth, ifname)
+ _extract_addresses(ifcfg, eth, ifname, self.features)
ethernets.update({ifname: eth})
elif if_type == 'bond':
@@ -296,7 +321,7 @@ class Renderer(renderer.Renderer):
slave_interfaces = ifcfg.get('bond-slaves')
if slave_interfaces == 'none':
_extract_bond_slaves_by_name(interfaces, bond, ifname)
- _extract_addresses(ifcfg, bond, ifname)
+ _extract_addresses(ifcfg, bond, ifname, self.features)
bonds.update({ifname: bond})
elif if_type == 'bridge':
@@ -331,7 +356,7 @@ class Renderer(renderer.Renderer):
bridge.update({'parameters': br_config})
if ifcfg.get('mac_address'):
bridge['macaddress'] = ifcfg.get('mac_address').lower()
- _extract_addresses(ifcfg, bridge, ifname)
+ _extract_addresses(ifcfg, bridge, ifname, self.features)
bridges.update({ifname: bridge})
elif if_type == 'vlan':
@@ -343,7 +368,7 @@ class Renderer(renderer.Renderer):
macaddr = ifcfg.get('mac_address', None)
if macaddr is not None:
vlan['macaddress'] = macaddr.lower()
- _extract_addresses(ifcfg, vlan, ifname)
+ _extract_addresses(ifcfg, vlan, ifname, self.features)
vlans.update({ifname: vlan})
# inject global nameserver values under each all interface which
@@ -359,9 +384,10 @@ class Renderer(renderer.Renderer):
# workaround yaml dictionary key sorting when dumping
def _render_section(name, section):
if section:
- dump = util.yaml_dumps({name: section},
- explicit_start=False,
- explicit_end=False)
+ dump = safeyaml.dumps({name: section},
+ explicit_start=False,
+ explicit_end=False,
+ noalias=True)
txt = util.indent(dump, ' ' * 4)
return [txt]
return []
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index f76e508a..63d6e291 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -10,19 +10,23 @@ import logging
import socket
import struct
-import six
-
+from cloudinit import safeyaml
from cloudinit import util
LOG = logging.getLogger(__name__)
NETWORK_STATE_VERSION = 1
+IPV6_DYNAMIC_TYPES = ['dhcp6',
+ 'ipv6_slaac',
+ 'ipv6_dhcpv6-stateless',
+ 'ipv6_dhcpv6-stateful']
NETWORK_STATE_REQUIRED_KEYS = {
1: ['version', 'config', 'network_state'],
}
NETWORK_V2_KEY_FILTER = [
- 'addresses', 'dhcp4', 'dhcp6', 'gateway4', 'gateway6', 'interfaces',
- 'match', 'mtu', 'nameservers', 'renderer', 'set-name', 'wakeonlan'
+ 'addresses', 'dhcp4', 'dhcp4-overrides', 'dhcp6', 'dhcp6-overrides',
+ 'gateway4', 'gateway6', 'interfaces', 'match', 'mtu', 'nameservers',
+ 'renderer', 'set-name', 'wakeonlan', 'accept-ra'
]
NET_CONFIG_TO_V2 = {
@@ -67,7 +71,7 @@ def parse_net_config_data(net_config, skip_broken=True):
# pass the whole net-config as-is
config = net_config
- if version and config:
+ if version and config is not None:
nsi = NetworkStateInterpreter(version=version, config=config)
nsi.parse_config(skip_broken=skip_broken)
state = nsi.get_network_state()
@@ -148,6 +152,7 @@ class NetworkState(object):
self._network_state = copy.deepcopy(network_state)
self._version = version
self.use_ipv6 = network_state.get('use_ipv6', False)
+ self._has_default_route = None
@property
def config(self):
@@ -157,14 +162,6 @@ class NetworkState(object):
def version(self):
return self._version
- def iter_routes(self, filter_func=None):
- for route in self._network_state.get('routes', []):
- if filter_func is not None:
- if filter_func(route):
- yield route
- else:
- yield route
-
@property
def dns_nameservers(self):
try:
@@ -179,18 +176,49 @@ class NetworkState(object):
except KeyError:
return []
+ @property
+ def has_default_route(self):
+ if self._has_default_route is None:
+ self._has_default_route = self._maybe_has_default_route()
+ return self._has_default_route
+
def iter_interfaces(self, filter_func=None):
ifaces = self._network_state.get('interfaces', {})
- for iface in six.itervalues(ifaces):
+ for iface in ifaces.values():
if filter_func is None:
yield iface
else:
if filter_func(iface):
yield iface
+ def iter_routes(self, filter_func=None):
+ for route in self._network_state.get('routes', []):
+ if filter_func is not None:
+ if filter_func(route):
+ yield route
+ else:
+ yield route
+
+ def _maybe_has_default_route(self):
+ for route in self.iter_routes():
+ if self._is_default_route(route):
+ return True
+ for iface in self.iter_interfaces():
+ for subnet in iface.get('subnets', []):
+ for route in subnet.get('routes', []):
+ if self._is_default_route(route):
+ return True
+ return False
+
+ def _is_default_route(self, route):
+ default_nets = ('::', '0.0.0.0')
+ return (
+ route.get('prefix') == 0
+ and route.get('network') in default_nets
+ )
-@six.add_metaclass(CommandHandlerMeta)
-class NetworkStateInterpreter(object):
+
+class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
initial_network_state = {
'interfaces': {},
@@ -228,7 +256,7 @@ class NetworkStateInterpreter(object):
'config': self._config,
'network_state': self._network_state,
}
- return util.yaml_dumps(state)
+ return safeyaml.dumps(state)
def load(self, state):
if 'version' not in state:
@@ -247,7 +275,7 @@ class NetworkStateInterpreter(object):
setattr(self, key, state[key])
def dump_network_state(self):
- return util.yaml_dumps(self._network_state)
+ return safeyaml.dumps(self._network_state)
def as_dict(self):
return {'version': self._version, 'config': self._config}
@@ -315,7 +343,8 @@ class NetworkStateInterpreter(object):
'name': 'eth0',
'subnets': [
{'type': 'dhcp4'}
- ]
+ ],
+ 'accept-ra': 'true'
}
'''
@@ -335,6 +364,9 @@ class NetworkStateInterpreter(object):
self.use_ipv6 = True
break
+ accept_ra = command.get('accept-ra', None)
+ if accept_ra is not None:
+ accept_ra = util.is_true(accept_ra)
iface.update({
'name': command.get('name'),
'type': command.get('type'),
@@ -345,6 +377,7 @@ class NetworkStateInterpreter(object):
'address': None,
'gateway': None,
'subnets': subnets,
+ 'accept-ra': accept_ra
})
self._network_state['interfaces'].update({command.get('name'): iface})
self.dump_network_state()
@@ -571,6 +604,7 @@ class NetworkStateInterpreter(object):
eno1:
match:
macaddress: 00:11:22:33:44:55
+ driver: hv_netsvc
wakeonlan: true
dhcp4: true
dhcp6: false
@@ -587,6 +621,7 @@ class NetworkStateInterpreter(object):
driver: ixgbe
set-name: lom1
dhcp6: true
+ accept-ra: true
switchports:
match:
name: enp2*
@@ -606,15 +641,18 @@ class NetworkStateInterpreter(object):
'type': 'physical',
'name': cfg.get('set-name', eth),
}
- mac_address = cfg.get('match', {}).get('macaddress', None)
+ match = cfg.get('match', {})
+ mac_address = match.get('macaddress', None)
if not mac_address:
LOG.debug('NetworkState Version2: missing "macaddress" info '
'in config entry: %s: %s', eth, str(cfg))
- phy_cmd.update({'mac_address': mac_address})
-
- for key in ['mtu', 'match', 'wakeonlan']:
+ phy_cmd['mac_address'] = mac_address
+ driver = match.get('driver', None)
+ if driver:
+ phy_cmd['params'] = {'driver': driver}
+ for key in ['mtu', 'match', 'wakeonlan', 'accept-ra']:
if key in cfg:
- phy_cmd.update({key: cfg.get(key)})
+ phy_cmd[key] = cfg[key]
subnets = self._v2_to_v1_ipcfg(cfg)
if len(subnets) > 0:
@@ -648,6 +686,8 @@ class NetworkStateInterpreter(object):
'vlan_id': cfg.get('id'),
'vlan_link': cfg.get('link'),
}
+ if 'mtu' in cfg:
+ vlan_cmd['mtu'] = cfg['mtu']
subnets = self._v2_to_v1_ipcfg(cfg)
if len(subnets) > 0:
vlan_cmd.update({'subnets': subnets})
@@ -682,6 +722,14 @@ class NetworkStateInterpreter(object):
item_params = dict((key, value) for (key, value) in
item_cfg.items() if key not in
NETWORK_V2_KEY_FILTER)
+ # we accept the fixed spelling, but write the old for compatability
+ # Xenial does not have an updated netplan which supports the
+ # correct spelling. LP: #1756701
+ params = item_params['parameters']
+ grat_value = params.pop('gratuitous-arp', None)
+ if grat_value:
+ params['gratuitious-arp'] = grat_value
+
v1_cmd = {
'type': cmd_type,
'name': item_name,
@@ -689,6 +737,8 @@ class NetworkStateInterpreter(object):
'params': dict((v2key_to_v1[k], v) for k, v in
item_params.get('parameters', {}).items())
}
+ if 'mtu' in item_cfg:
+ v1_cmd['mtu'] = item_cfg['mtu']
subnets = self._v2_to_v1_ipcfg(item_cfg)
if len(subnets) > 0:
v1_cmd.update({'subnets': subnets})
@@ -705,12 +755,20 @@ class NetworkStateInterpreter(object):
def _v2_to_v1_ipcfg(self, cfg):
"""Common ipconfig extraction from v2 to v1 subnets array."""
+ def _add_dhcp_overrides(overrides, subnet):
+ if 'route-metric' in overrides:
+ subnet['metric'] = overrides['route-metric']
+
subnets = []
- if 'dhcp4' in cfg:
- subnets.append({'type': 'dhcp4'})
- if 'dhcp6' in cfg:
+ if cfg.get('dhcp4'):
+ subnet = {'type': 'dhcp4'}
+ _add_dhcp_overrides(cfg.get('dhcp4-overrides', {}), subnet)
+ subnets.append(subnet)
+ if cfg.get('dhcp6'):
+ subnet = {'type': 'dhcp6'}
self.use_ipv6 = True
- subnets.append({'type': 'dhcp6'})
+ _add_dhcp_overrides(cfg.get('dhcp6-overrides', {}), subnet)
+ subnets.append(subnet)
gateway4 = None
gateway6 = None
@@ -877,9 +935,10 @@ def is_ipv6_addr(address):
def subnet_is_ipv6(subnet):
"""Common helper for checking network_state subnets for ipv6."""
- # 'static6' or 'dhcp6'
- if subnet['type'].endswith('6'):
- # This is a request for DHCPv6.
+ # 'static6', 'dhcp6', 'ipv6_dhcpv6-stateful', 'ipv6_dhcpv6-stateless' or
+ # 'ipv6_slaac'
+ if subnet['type'].endswith('6') or subnet['type'] in IPV6_DYNAMIC_TYPES:
+ # This is a request either static6 type or DHCPv6.
return True
elif subnet['type'] == 'static' and is_ipv6_addr(subnet.get('address')):
return True
@@ -908,7 +967,7 @@ def ipv4_mask_to_net_prefix(mask):
"""
if isinstance(mask, int):
return mask
- if isinstance(mask, six.string_types):
+ if isinstance(mask, str):
try:
return int(mask)
except ValueError:
@@ -935,7 +994,7 @@ def ipv6_mask_to_net_prefix(mask):
if isinstance(mask, int):
return mask
- if isinstance(mask, six.string_types):
+ if isinstance(mask, str):
try:
return int(mask)
except ValueError:
diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py
index 5f32e90f..2a61a7a8 100644
--- a/cloudinit/net/renderer.py
+++ b/cloudinit/net/renderer.py
@@ -6,7 +6,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import abc
-import six
+import io
from .network_state import parse_net_config_data
from .udev import generate_udev_rule
@@ -34,7 +34,7 @@ class Renderer(object):
"""Given state, emit udev rules to map mac to ifname."""
# TODO(harlowja): this seems shared between eni renderer and
# this, so move it to a shared location.
- content = six.StringIO()
+ content = io.StringIO()
for iface in network_state.iter_interfaces(filter_by_physical):
# for physical interfaces write out a persist net udev rule
if 'name' in iface and iface.get('mac_address'):
diff --git a/cloudinit/net/renderers.py b/cloudinit/net/renderers.py
index 5117b4a5..b98dbbe3 100644
--- a/cloudinit/net/renderers.py
+++ b/cloudinit/net/renderers.py
@@ -1,17 +1,19 @@
# This file is part of cloud-init. See LICENSE file for license information.
from . import eni
+from . import freebsd
from . import netplan
from . import RendererNotFoundError
from . import sysconfig
NAME_TO_RENDERER = {
"eni": eni,
+ "freebsd": freebsd,
"netplan": netplan,
"sysconfig": sysconfig,
}
-DEFAULT_PRIORITY = ["eni", "sysconfig", "netplan"]
+DEFAULT_PRIORITY = ["eni", "sysconfig", "netplan", "freebsd"]
def search(priority=None, target=None, first=False):
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index 17293e1d..0a387377 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -1,20 +1,24 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import copy
+import io
import os
import re
-import six
+from configobj import ConfigObj
-from cloudinit.distros.parsers import networkmanager_conf
-from cloudinit.distros.parsers import resolv_conf
from cloudinit import log as logging
from cloudinit import util
+from cloudinit.distros.parsers import networkmanager_conf
+from cloudinit.distros.parsers import resolv_conf
from . import renderer
from .network_state import (
- is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6)
+ is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6, IPV6_DYNAMIC_TYPES)
LOG = logging.getLogger(__name__)
+NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf"
+KNOWN_DISTROS = ['centos', 'fedora', 'rhel', 'suse']
def _make_header(sep='#'):
@@ -46,6 +50,24 @@ def _quote_value(value):
return value
+def enable_ifcfg_rh(path):
+ """Add ifcfg-rh to NetworkManager.cfg plugins if main section is present"""
+ config = ConfigObj(path)
+ if 'main' in config:
+ if 'plugins' in config['main']:
+ if 'ifcfg-rh' in config['main']['plugins']:
+ return
+ else:
+ config['main']['plugins'] = []
+
+ if isinstance(config['main']['plugins'], list):
+ config['main']['plugins'].append('ifcfg-rh')
+ else:
+ config['main']['plugins'] = [config['main']['plugins'], 'ifcfg-rh']
+ config.write()
+ LOG.debug('Enabled ifcfg-rh NetworkManager plugins')
+
+
class ConfigMap(object):
"""Sysconfig like dictionary object."""
@@ -64,6 +86,9 @@ class ConfigMap(object):
def __getitem__(self, key):
return self._conf[key]
+ def get(self, key):
+ return self._conf.get(key)
+
def __contains__(self, key):
return key in self._conf
@@ -74,7 +99,7 @@ class ConfigMap(object):
return len(self._conf)
def to_string(self):
- buf = six.StringIO()
+ buf = io.StringIO()
buf.write(_make_header())
if self._conf:
buf.write("\n")
@@ -82,11 +107,14 @@ class ConfigMap(object):
value = self._conf[key]
if isinstance(value, bool):
value = self._bool_map[value]
- if not isinstance(value, six.string_types):
+ if not isinstance(value, str):
value = str(value)
buf.write("%s=%s\n" % (key, _quote_value(value)))
return buf.getvalue()
+ def update(self, updates):
+ self._conf.update(updates)
+
class Route(ConfigMap):
"""Represents a route configuration."""
@@ -128,7 +156,7 @@ class Route(ConfigMap):
# only accept ipv4 and ipv6
if proto not in ['ipv4', 'ipv6']:
raise ValueError("Unknown protocol '%s'" % (str(proto)))
- buf = six.StringIO()
+ buf = io.StringIO()
buf.write(_make_header())
if self._conf:
buf.write("\n")
@@ -247,12 +275,29 @@ class Renderer(renderer.Renderer):
# s1-networkscripts-interfaces.html (or other docs for
# details about this)
- iface_defaults = tuple([
- ('ONBOOT', True),
- ('USERCTL', False),
- ('NM_CONTROLLED', False),
- ('BOOTPROTO', 'none'),
- ])
+ iface_defaults = {
+ 'rhel': {'ONBOOT': True, 'USERCTL': False, 'NM_CONTROLLED': False,
+ 'BOOTPROTO': 'none'},
+ 'suse': {'BOOTPROTO': 'static', 'STARTMODE': 'auto'},
+ }
+
+ cfg_key_maps = {
+ 'rhel': {
+ 'accept-ra': 'IPV6_FORCE_ACCEPT_RA',
+ 'bridge_stp': 'STP',
+ 'bridge_ageing': 'AGEING',
+ 'bridge_bridgeprio': 'PRIO',
+ 'mac_address': 'HWADDR',
+ 'mtu': 'MTU',
+ },
+ 'suse': {
+ 'bridge_stp': 'BRIDGE_STP',
+ 'bridge_ageing': 'BRIDGE_AGEINGTIME',
+ 'bridge_bridgeprio': 'BRIDGE_PRIORITY',
+ 'mac_address': 'LLADDR',
+ 'mtu': 'MTU',
+ },
+ }
# If these keys exist, then their values will be used to form
# a BONDING_OPTS grouping; otherwise no grouping will be set.
@@ -260,12 +305,18 @@ class Renderer(renderer.Renderer):
('bond_mode', "mode=%s"),
('bond_xmit_hash_policy', "xmit_hash_policy=%s"),
('bond_miimon', "miimon=%s"),
- ])
-
- bridge_opts_keys = tuple([
- ('bridge_stp', 'STP'),
- ('bridge_ageing', 'AGEING'),
- ('bridge_bridgeprio', 'PRIO'),
+ ('bond_min_links', "min_links=%s"),
+ ('bond_arp_interval', "arp_interval=%s"),
+ ('bond_arp_ip_target', "arp_ip_target=%s"),
+ ('bond_arp_validate', "arp_validate=%s"),
+ ('bond_ad_select', "ad_select=%s"),
+ ('bond_num_grat_arp', "num_grat_arp=%s"),
+ ('bond_downdelay', "downdelay=%s"),
+ ('bond_updelay', "updelay=%s"),
+ ('bond_lacp_rate', "lacp_rate=%s"),
+ ('bond_fail_over_mac', "fail_over_mac=%s"),
+ ('bond_primary', "primary=%s"),
+ ('bond_primary_reselect', "primary_reselect=%s"),
])
templates = {}
@@ -285,46 +336,101 @@ class Renderer(renderer.Renderer):
'iface_templates': config.get('iface_templates'),
'route_templates': config.get('route_templates'),
}
+ self.flavor = config.get('flavor', 'rhel')
@classmethod
- def _render_iface_shared(cls, iface, iface_cfg):
- for k, v in cls.iface_defaults:
- iface_cfg[k] = v
+ def _render_iface_shared(cls, iface, iface_cfg, flavor):
+ flavor_defaults = copy.deepcopy(cls.iface_defaults.get(flavor, {}))
+ iface_cfg.update(flavor_defaults)
- for (old_key, new_key) in [('mac_address', 'HWADDR'), ('mtu', 'MTU')]:
+ for old_key in ('mac_address', 'mtu', 'accept-ra'):
old_value = iface.get(old_key)
if old_value is not None:
# only set HWADDR on physical interfaces
- if old_key == 'mac_address' and iface['type'] != 'physical':
+ if (old_key == 'mac_address' and
+ iface['type'] not in ['physical', 'infiniband']):
continue
- iface_cfg[new_key] = old_value
+ new_key = cls.cfg_key_maps[flavor].get(old_key)
+ if new_key:
+ iface_cfg[new_key] = old_value
@classmethod
- def _render_subnets(cls, iface_cfg, subnets):
+ def _render_subnets(cls, iface_cfg, subnets, has_default_route, flavor):
# setting base values
- iface_cfg['BOOTPROTO'] = 'none'
+ if flavor == 'suse':
+ iface_cfg['BOOTPROTO'] = 'static'
+ if 'BRIDGE' in iface_cfg:
+ iface_cfg['BOOTPROTO'] = 'dhcp'
+ iface_cfg.drop('BRIDGE')
+ else:
+ iface_cfg['BOOTPROTO'] = 'none'
# modifying base values according to subnets
for i, subnet in enumerate(subnets, start=len(iface_cfg.children)):
mtu_key = 'MTU'
subnet_type = subnet.get('type')
- if subnet_type == 'dhcp6':
- iface_cfg['IPV6INIT'] = True
- iface_cfg['DHCPV6C'] = True
+ if subnet_type == 'dhcp6' or subnet_type == 'ipv6_dhcpv6-stateful':
+ if flavor == 'suse':
+ # User wants dhcp for both protocols
+ if iface_cfg['BOOTPROTO'] == 'dhcp4':
+ iface_cfg['BOOTPROTO'] = 'dhcp'
+ else:
+ # Only IPv6 is DHCP, IPv4 may be static
+ iface_cfg['BOOTPROTO'] = 'dhcp6'
+ iface_cfg['DHCLIENT6_MODE'] = 'managed'
+ else:
+ iface_cfg['IPV6INIT'] = True
+ # Configure network settings using DHCPv6
+ iface_cfg['DHCPV6C'] = True
+ elif subnet_type == 'ipv6_dhcpv6-stateless':
+ if flavor == 'suse':
+ # User wants dhcp for both protocols
+ if iface_cfg['BOOTPROTO'] == 'dhcp4':
+ iface_cfg['BOOTPROTO'] = 'dhcp'
+ else:
+ # Only IPv6 is DHCP, IPv4 may be static
+ iface_cfg['BOOTPROTO'] = 'dhcp6'
+ iface_cfg['DHCLIENT6_MODE'] = 'info'
+ else:
+ iface_cfg['IPV6INIT'] = True
+ # Configure network settings using SLAAC from RAs and
+ # optional info from dhcp server using DHCPv6
+ iface_cfg['IPV6_AUTOCONF'] = True
+ iface_cfg['DHCPV6C'] = True
+ # Use Information-request to get only stateless
+ # configuration parameters (i.e., without address).
+ iface_cfg['DHCPV6C_OPTIONS'] = '-S'
+ elif subnet_type == 'ipv6_slaac':
+ if flavor == 'suse':
+ # User wants dhcp for both protocols
+ if iface_cfg['BOOTPROTO'] == 'dhcp4':
+ iface_cfg['BOOTPROTO'] = 'dhcp'
+ else:
+ # Only IPv6 is DHCP, IPv4 may be static
+ iface_cfg['BOOTPROTO'] = 'dhcp6'
+ iface_cfg['DHCLIENT6_MODE'] = 'info'
+ else:
+ iface_cfg['IPV6INIT'] = True
+ # Configure network settings using SLAAC from RAs
+ iface_cfg['IPV6_AUTOCONF'] = True
elif subnet_type in ['dhcp4', 'dhcp']:
+ bootproto_in = iface_cfg['BOOTPROTO']
iface_cfg['BOOTPROTO'] = 'dhcp'
- elif subnet_type == 'static':
+ if flavor == 'suse' and subnet_type == 'dhcp4':
+ # If dhcp6 is already specified the user wants dhcp
+ # for both protocols
+ if bootproto_in != 'dhcp6':
+ # Only IPv4 is DHCP, IPv6 may be static
+ iface_cfg['BOOTPROTO'] = 'dhcp4'
+ elif subnet_type in ['static', 'static6']:
+ # RH info
# grep BOOTPROTO sysconfig.txt -A2 | head -3
# BOOTPROTO=none|bootp|dhcp
# 'bootp' or 'dhcp' cause a DHCP client
# to run on the device. Any other
# value causes any static configuration
# in the file to be applied.
- # ==> the following should not be set to 'static'
- # but should remain 'none'
- # if iface_cfg['BOOTPROTO'] == 'none':
- # iface_cfg['BOOTPROTO'] = 'static'
- if subnet_is_ipv6(subnet):
+ if subnet_is_ipv6(subnet) and flavor != 'suse':
mtu_key = 'IPV6_MTU'
iface_cfg['IPV6INIT'] = True
if 'mtu' in subnet:
@@ -335,37 +441,70 @@ class Renderer(renderer.Renderer):
'Network config: ignoring %s device-level mtu:%s'
' because ipv4 subnet-level mtu:%s provided.',
iface_cfg.name, iface_cfg[mtu_key], subnet['mtu'])
- iface_cfg[mtu_key] = subnet['mtu']
+ if subnet_is_ipv6(subnet):
+ if flavor == 'suse':
+ # TODO(rjschwei) write mtu setting to
+ # /etc/sysctl.d/
+ pass
+ else:
+ iface_cfg[mtu_key] = subnet['mtu']
+ else:
+ iface_cfg[mtu_key] = subnet['mtu']
elif subnet_type == 'manual':
- # If the subnet has an MTU setting, then ONBOOT=True
- # to apply the setting
- iface_cfg['ONBOOT'] = mtu_key in iface_cfg
+ if flavor == 'suse':
+ LOG.debug('Unknown subnet type setting "%s"', subnet_type)
+ else:
+ # If the subnet has an MTU setting, then ONBOOT=True
+ # to apply the setting
+ iface_cfg['ONBOOT'] = mtu_key in iface_cfg
else:
raise ValueError("Unknown subnet type '%s' found"
" for interface '%s'" % (subnet_type,
iface_cfg.name))
if subnet.get('control') == 'manual':
- iface_cfg['ONBOOT'] = False
+ if flavor == 'suse':
+ iface_cfg['STARTMODE'] = 'manual'
+ else:
+ iface_cfg['ONBOOT'] = False
# set IPv4 and IPv6 static addresses
ipv4_index = -1
ipv6_index = -1
for i, subnet in enumerate(subnets, start=len(iface_cfg.children)):
subnet_type = subnet.get('type')
- if subnet_type == 'dhcp6':
+ # metric may apply to both dhcp and static config
+ if 'metric' in subnet:
+ if flavor != 'suse':
+ iface_cfg['METRIC'] = subnet['metric']
+ if subnet_type in ['dhcp', 'dhcp4']:
+ # On SUSE distros 'DHCLIENT_SET_DEFAULT_ROUTE' is a global
+ # setting in /etc/sysconfig/network/dhcp
+ if flavor != 'suse':
+ if has_default_route and iface_cfg['BOOTPROTO'] != 'none':
+ iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = False
continue
- elif subnet_type in ['dhcp4', 'dhcp']:
+ elif subnet_type in IPV6_DYNAMIC_TYPES:
continue
- elif subnet_type == 'static':
+ elif subnet_type in ['static', 'static6']:
if subnet_is_ipv6(subnet):
ipv6_index = ipv6_index + 1
ipv6_cidr = "%s/%s" % (subnet['address'], subnet['prefix'])
if ipv6_index == 0:
- iface_cfg['IPV6ADDR'] = ipv6_cidr
+ if flavor == 'suse':
+ iface_cfg['IPADDR6'] = ipv6_cidr
+ else:
+ iface_cfg['IPV6ADDR'] = ipv6_cidr
elif ipv6_index == 1:
- iface_cfg['IPV6ADDR_SECONDARIES'] = ipv6_cidr
+ if flavor == 'suse':
+ iface_cfg['IPADDR6_1'] = ipv6_cidr
+ else:
+ iface_cfg['IPV6ADDR_SECONDARIES'] = ipv6_cidr
else:
- iface_cfg['IPV6ADDR_SECONDARIES'] += " " + ipv6_cidr
+ if flavor == 'suse':
+ iface_cfg['IPADDR6_%d' % ipv6_index] = ipv6_cidr
+ else:
+ iface_cfg['IPV6ADDR_SECONDARIES'] += \
+ " " + ipv6_cidr
else:
ipv4_index = ipv4_index + 1
suff = "" if ipv4_index == 0 else str(ipv4_index)
@@ -373,20 +512,17 @@ class Renderer(renderer.Renderer):
iface_cfg['NETMASK' + suff] = \
net_prefix_to_ipv4_mask(subnet['prefix'])
- if 'gateway' in subnet:
+ if 'gateway' in subnet and flavor != 'suse':
iface_cfg['DEFROUTE'] = True
if is_ipv6_addr(subnet['gateway']):
iface_cfg['IPV6_DEFAULTGW'] = subnet['gateway']
else:
iface_cfg['GATEWAY'] = subnet['gateway']
- if 'metric' in subnet:
- iface_cfg['METRIC'] = subnet['metric']
-
- if 'dns_search' in subnet:
+ if 'dns_search' in subnet and flavor != 'suse':
iface_cfg['DOMAIN'] = ' '.join(subnet['dns_search'])
- if 'dns_nameservers' in subnet:
+ if 'dns_nameservers' in subnet and flavor != 'suse':
if len(subnet['dns_nameservers']) > 3:
# per resolv.conf(5) MAXNS sets this to 3.
LOG.debug("%s has %d entries in dns_nameservers. "
@@ -396,12 +532,21 @@ class Renderer(renderer.Renderer):
iface_cfg['DNS' + str(i)] = k
@classmethod
- def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets):
+ def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets, flavor):
+ # TODO(rjschwei): route configuration on SUSE distro happens via
+ # ifroute-* files, see lp#1812117. SUSE currently carries a local
+ # patch in their package.
+ if flavor == 'suse':
+ return
for _, subnet in enumerate(subnets, start=len(iface_cfg.children)):
+ subnet_type = subnet.get('type')
for route in subnet.get('routes', []):
is_ipv6 = subnet.get('ipv6') or is_ipv6_addr(route['gateway'])
- if _is_default_route(route):
+ # Any dynamic configuration method, slaac, dhcpv6-stateful/
+ # stateless should get router information from router RA's.
+ if (_is_default_route(route) and subnet_type not in
+ IPV6_DYNAMIC_TYPES):
if (
(subnet.get('ipv4') and
route_cfg.has_set_default_ipv4) or
@@ -420,8 +565,10 @@ class Renderer(renderer.Renderer):
# TODO(harlowja): add validation that no other iface has
# also provided the default route?
iface_cfg['DEFROUTE'] = True
+ if iface_cfg['BOOTPROTO'] in ('dhcp', 'dhcp4'):
+ iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = True
if 'gateway' in route:
- if is_ipv6 or is_ipv6_addr(route['gateway']):
+ if is_ipv6:
iface_cfg['IPV6_DEFAULTGW'] = route['gateway']
route_cfg.has_set_default_ipv6 = True
else:
@@ -462,7 +609,9 @@ class Renderer(renderer.Renderer):
iface_cfg['BONDING_OPTS'] = " ".join(bond_opts)
@classmethod
- def _render_physical_interfaces(cls, network_state, iface_contents):
+ def _render_physical_interfaces(
+ cls, network_state, iface_contents, flavor
+ ):
physical_filter = renderer.filter_by_physical
for iface in network_state.iter_interfaces(physical_filter):
iface_name = iface['name']
@@ -470,11 +619,16 @@ class Renderer(renderer.Renderer):
iface_cfg = iface_contents[iface_name]
route_cfg = iface_cfg.routes
- cls._render_subnets(iface_cfg, iface_subnets)
- cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
+ cls._render_subnets(
+ iface_cfg, iface_subnets, network_state.has_default_route,
+ flavor
+ )
+ cls._render_subnet_routes(
+ iface_cfg, route_cfg, iface_subnets, flavor
+ )
@classmethod
- def _render_bond_interfaces(cls, network_state, iface_contents):
+ def _render_bond_interfaces(cls, network_state, iface_contents, flavor):
bond_filter = renderer.filter_by_type('bond')
slave_filter = renderer.filter_by_attr('bond-master')
for iface in network_state.iter_interfaces(bond_filter):
@@ -488,15 +642,24 @@ class Renderer(renderer.Renderer):
master_cfgs.extend(iface_cfg.children)
for master_cfg in master_cfgs:
master_cfg['BONDING_MASTER'] = True
- master_cfg.kind = 'bond'
+ if flavor != 'suse':
+ master_cfg.kind = 'bond'
if iface.get('mac_address'):
- iface_cfg['MACADDR'] = iface.get('mac_address')
+ if flavor == 'suse':
+ iface_cfg['LLADDR'] = iface.get('mac_address')
+ else:
+ iface_cfg['MACADDR'] = iface.get('mac_address')
iface_subnets = iface.get("subnets", [])
route_cfg = iface_cfg.routes
- cls._render_subnets(iface_cfg, iface_subnets)
- cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
+ cls._render_subnets(
+ iface_cfg, iface_subnets, network_state.has_default_route,
+ flavor
+ )
+ cls._render_subnet_routes(
+ iface_cfg, route_cfg, iface_subnets, flavor
+ )
# iter_interfaces on network-state is not sorted to produce
# consistent numbers we need to sort.
@@ -506,29 +669,51 @@ class Renderer(renderer.Renderer):
if slave_iface['bond-master'] == iface_name])
for index, bond_slave in enumerate(bond_slaves):
- slavestr = 'BONDING_SLAVE%s' % index
+ if flavor == 'suse':
+ slavestr = 'BONDING_SLAVE_%s' % index
+ else:
+ slavestr = 'BONDING_SLAVE%s' % index
iface_cfg[slavestr] = bond_slave
slave_cfg = iface_contents[bond_slave]
- slave_cfg['MASTER'] = iface_name
- slave_cfg['SLAVE'] = True
+ if flavor == 'suse':
+ slave_cfg['BOOTPROTO'] = 'none'
+ slave_cfg['STARTMODE'] = 'hotplug'
+ else:
+ slave_cfg['MASTER'] = iface_name
+ slave_cfg['SLAVE'] = True
@classmethod
- def _render_vlan_interfaces(cls, network_state, iface_contents):
+ def _render_vlan_interfaces(cls, network_state, iface_contents, flavor):
vlan_filter = renderer.filter_by_type('vlan')
for iface in network_state.iter_interfaces(vlan_filter):
iface_name = iface['name']
iface_cfg = iface_contents[iface_name]
- iface_cfg['VLAN'] = True
- iface_cfg['PHYSDEV'] = iface_name[:iface_name.rfind('.')]
+ if flavor == 'suse':
+ vlan_id = iface.get('vlan_id')
+ if vlan_id:
+ iface_cfg['VLAN_ID'] = vlan_id
+ iface_cfg['ETHERDEVICE'] = iface_name[:iface_name.rfind('.')]
+ else:
+ iface_cfg['VLAN'] = True
+ iface_cfg['PHYSDEV'] = iface_name[:iface_name.rfind('.')]
iface_subnets = iface.get("subnets", [])
route_cfg = iface_cfg.routes
- cls._render_subnets(iface_cfg, iface_subnets)
- cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
+ cls._render_subnets(
+ iface_cfg, iface_subnets, network_state.has_default_route,
+ flavor
+ )
+ cls._render_subnet_routes(
+ iface_cfg, route_cfg, iface_subnets, flavor
+ )
@staticmethod
def _render_dns(network_state, existing_dns_path=None):
+ # skip writing resolv.conf if network_state doesn't include any input.
+ if not any([len(network_state.dns_nameservers),
+ len(network_state.dns_searchdomains)]):
+ return None
content = resolv_conf.ResolvConf("")
if existing_dns_path and os.path.isfile(existing_dns_path):
content = resolv_conf.ResolvConf(util.load_file(existing_dns_path))
@@ -558,19 +743,39 @@ class Renderer(renderer.Renderer):
return out
@classmethod
- def _render_bridge_interfaces(cls, network_state, iface_contents):
+ def _render_bridge_interfaces(cls, network_state, iface_contents, flavor):
+ bridge_key_map = {
+ old_k: new_k for old_k, new_k in cls.cfg_key_maps[flavor].items()
+ if old_k.startswith('bridge')}
bridge_filter = renderer.filter_by_type('bridge')
+
for iface in network_state.iter_interfaces(bridge_filter):
iface_name = iface['name']
iface_cfg = iface_contents[iface_name]
- iface_cfg.kind = 'bridge'
- for old_key, new_key in cls.bridge_opts_keys:
+ if flavor != 'suse':
+ iface_cfg.kind = 'bridge'
+ for old_key, new_key in bridge_key_map.items():
if old_key in iface:
iface_cfg[new_key] = iface[old_key]
- if iface.get('mac_address'):
- iface_cfg['MACADDR'] = iface.get('mac_address')
+ if flavor == 'suse':
+ if 'BRIDGE_STP' in iface_cfg:
+ if iface_cfg.get('BRIDGE_STP'):
+ iface_cfg['BRIDGE_STP'] = 'on'
+ else:
+ iface_cfg['BRIDGE_STP'] = 'off'
+ if iface.get('mac_address'):
+ key = 'MACADDR'
+ if flavor == 'suse':
+ key = 'LLADDRESS'
+ iface_cfg[key] = iface.get('mac_address')
+
+ if flavor == 'suse':
+ if iface.get('bridge_ports', []):
+ iface_cfg['BRIDGE_PORTS'] = '%s' % " ".join(
+ iface.get('bridge_ports')
+ )
# Is this the right key to get all the connected interfaces?
for bridged_iface_name in iface.get('bridge_ports', []):
# Ensure all bridged interfaces are correctly tagged
@@ -579,15 +784,23 @@ class Renderer(renderer.Renderer):
bridged_cfgs = [bridged_cfg]
bridged_cfgs.extend(bridged_cfg.children)
for bridge_cfg in bridged_cfgs:
- bridge_cfg['BRIDGE'] = iface_name
+ bridge_value = iface_name
+ if flavor == 'suse':
+ bridge_value = 'yes'
+ bridge_cfg['BRIDGE'] = bridge_value
iface_subnets = iface.get("subnets", [])
route_cfg = iface_cfg.routes
- cls._render_subnets(iface_cfg, iface_subnets)
- cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
+ cls._render_subnets(
+ iface_cfg, iface_subnets, network_state.has_default_route,
+ flavor
+ )
+ cls._render_subnet_routes(
+ iface_cfg, route_cfg, iface_subnets, flavor
+ )
@classmethod
- def _render_ib_interfaces(cls, network_state, iface_contents):
+ def _render_ib_interfaces(cls, network_state, iface_contents, flavor):
ib_filter = renderer.filter_by_type('infiniband')
for iface in network_state.iter_interfaces(ib_filter):
iface_name = iface['name']
@@ -595,11 +808,16 @@ class Renderer(renderer.Renderer):
iface_cfg.kind = 'infiniband'
iface_subnets = iface.get("subnets", [])
route_cfg = iface_cfg.routes
- cls._render_subnets(iface_cfg, iface_subnets)
- cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
+ cls._render_subnets(
+ iface_cfg, iface_subnets, network_state.has_default_route,
+ flavor
+ )
+ cls._render_subnet_routes(
+ iface_cfg, route_cfg, iface_subnets, flavor
+ )
@classmethod
- def _render_sysconfig(cls, base_sysconf_dir, network_state,
+ def _render_sysconfig(cls, base_sysconf_dir, network_state, flavor,
templates=None):
'''Given state, return /etc/sysconfig files + contents'''
if not templates:
@@ -610,13 +828,17 @@ class Renderer(renderer.Renderer):
continue
iface_name = iface['name']
iface_cfg = NetInterface(iface_name, base_sysconf_dir, templates)
- cls._render_iface_shared(iface, iface_cfg)
+ if flavor == 'suse':
+ iface_cfg.drop('DEVICE')
+ # If type detection fails it is considered a bug in SUSE
+ iface_cfg.drop('TYPE')
+ cls._render_iface_shared(iface, iface_cfg, flavor)
iface_contents[iface_name] = iface_cfg
- cls._render_physical_interfaces(network_state, iface_contents)
- cls._render_bond_interfaces(network_state, iface_contents)
- cls._render_vlan_interfaces(network_state, iface_contents)
- cls._render_bridge_interfaces(network_state, iface_contents)
- cls._render_ib_interfaces(network_state, iface_contents)
+ cls._render_physical_interfaces(network_state, iface_contents, flavor)
+ cls._render_bond_interfaces(network_state, iface_contents, flavor)
+ cls._render_vlan_interfaces(network_state, iface_contents, flavor)
+ cls._render_bridge_interfaces(network_state, iface_contents, flavor)
+ cls._render_ib_interfaces(network_state, iface_contents, flavor)
contents = {}
for iface_name, iface_cfg in iface_contents.items():
if iface_cfg or iface_cfg.children:
@@ -638,14 +860,15 @@ class Renderer(renderer.Renderer):
file_mode = 0o644
base_sysconf_dir = util.target_path(target, self.sysconf_dir)
for path, data in self._render_sysconfig(base_sysconf_dir,
- network_state,
+ network_state, self.flavor,
templates=templates).items():
util.write_file(path, data, file_mode)
if self.dns_path:
dns_path = util.target_path(target, self.dns_path)
resolv_content = self._render_dns(network_state,
existing_dns_path=dns_path)
- util.write_file(dns_path, resolv_content, file_mode)
+ if resolv_content:
+ util.write_file(dns_path, resolv_content, file_mode)
if self.networkmanager_conf_path:
nm_conf_path = util.target_path(target,
self.networkmanager_conf_path)
@@ -657,6 +880,8 @@ class Renderer(renderer.Renderer):
netrules_content = self._render_persistent_net(network_state)
netrules_path = util.target_path(target, self.netrules_path)
util.write_file(netrules_path, netrules_content, file_mode)
+ if available_nm(target=target):
+ enable_ifcfg_rh(util.target_path(target, path=NM_CFG_FILE))
sysconfig_path = util.target_path(target, templates.get('control'))
# Distros configuring /etc/sysconfig/network as a file e.g. Centos
@@ -671,6 +896,13 @@ class Renderer(renderer.Renderer):
def available(target=None):
+ sysconfig = available_sysconfig(target=target)
+ nm = available_nm(target=target)
+ return (util.system_info()['variant'] in KNOWN_DISTROS
+ and any([nm, sysconfig]))
+
+
+def available_sysconfig(target=None):
expected = ['ifup', 'ifdown']
search = ['/sbin', '/usr/sbin']
for p in expected:
@@ -679,10 +911,16 @@ def available(target=None):
expected_paths = [
'etc/sysconfig/network-scripts/network-functions',
- 'etc/sysconfig/network-scripts/ifdown-eth']
+ 'etc/sysconfig/config']
for p in expected_paths:
- if not os.path.isfile(util.target_path(target, p)):
- return False
+ if os.path.isfile(util.target_path(target, p)):
+ return True
+ return False
+
+
+def available_nm(target=None):
+ if not os.path.isfile(util.target_path(target, path=NM_CFG_FILE)):
+ return False
return True
diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py
index cd3e7328..c3fa1e04 100644
--- a/cloudinit/net/tests/test_dhcp.py
+++ b/cloudinit/net/tests/test_dhcp.py
@@ -8,7 +8,8 @@ from textwrap import dedent
import cloudinit.net as net
from cloudinit.net.dhcp import (
InvalidDHCPLeaseFileError, maybe_perform_dhcp_discovery,
- parse_dhcp_lease_file, dhcp_discovery, networkd_load_leases)
+ parse_dhcp_lease_file, dhcp_discovery, networkd_load_leases,
+ parse_static_routes)
from cloudinit.util import ensure_file, write_file
from cloudinit.tests.helpers import (
CiTestCase, HttprettyTestCase, mock, populate_dir, wrap_and_call)
@@ -64,6 +65,188 @@ class TestParseDHCPLeasesFile(CiTestCase):
self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file))
+class TestDHCPRFC3442(CiTestCase):
+
+ def test_parse_lease_finds_rfc3442_classless_static_routes(self):
+ """parse_dhcp_lease_file returns rfc3442-classless-static-routes."""
+ lease_file = self.tmp_path('leases')
+ content = dedent("""
+ lease {
+ interface "wlp3s0";
+ fixed-address 192.168.2.74;
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ option rfc3442-classless-static-routes 0,130,56,240,1;
+ renew 4 2017/07/27 18:02:30;
+ expire 5 2017/07/28 07:08:15;
+ }
+ """)
+ expected = [
+ {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74',
+ 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1',
+ 'rfc3442-classless-static-routes': '0,130,56,240,1',
+ 'renew': '4 2017/07/27 18:02:30',
+ 'expire': '5 2017/07/28 07:08:15'}]
+ write_file(lease_file, content)
+ self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file))
+
+ def test_parse_lease_finds_classless_static_routes(self):
+ """
+ parse_dhcp_lease_file returns classless-static-routes
+ for Centos lease format.
+ """
+ lease_file = self.tmp_path('leases')
+ content = dedent("""
+ lease {
+ interface "wlp3s0";
+ fixed-address 192.168.2.74;
+ option subnet-mask 255.255.255.0;
+ option routers 192.168.2.1;
+ option classless-static-routes 0 130.56.240.1;
+ renew 4 2017/07/27 18:02:30;
+ expire 5 2017/07/28 07:08:15;
+ }
+ """)
+ expected = [
+ {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74',
+ 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1',
+ 'classless-static-routes': '0 130.56.240.1',
+ 'renew': '4 2017/07/27 18:02:30',
+ 'expire': '5 2017/07/28 07:08:15'}]
+ write_file(lease_file, content)
+ self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file))
+
+ @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
+ @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
+ def test_obtain_lease_parses_static_routes(self, m_maybe, m_ipv4):
+ """EphemeralDHPCv4 parses rfc3442 routes for EphemeralIPv4Network"""
+ lease = [
+ {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74',
+ 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1',
+ 'rfc3442-classless-static-routes': '0,130,56,240,1',
+ 'renew': '4 2017/07/27 18:02:30',
+ 'expire': '5 2017/07/28 07:08:15'}]
+ m_maybe.return_value = lease
+ eph = net.dhcp.EphemeralDHCPv4()
+ eph.obtain_lease()
+ expected_kwargs = {
+ 'interface': 'wlp3s0',
+ 'ip': '192.168.2.74',
+ 'prefix_or_mask': '255.255.255.0',
+ 'broadcast': '192.168.2.255',
+ 'static_routes': [('0.0.0.0/0', '130.56.240.1')],
+ 'router': '192.168.2.1'}
+ m_ipv4.assert_called_with(**expected_kwargs)
+
+ @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
+ @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
+ def test_obtain_centos_lease_parses_static_routes(self, m_maybe, m_ipv4):
+ """
+ EphemeralDHPCv4 parses rfc3442 routes for EphemeralIPv4Network
+ for Centos Lease format
+ """
+ lease = [
+ {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74',
+ 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1',
+ 'classless-static-routes': '0 130.56.240.1',
+ 'renew': '4 2017/07/27 18:02:30',
+ 'expire': '5 2017/07/28 07:08:15'}]
+ m_maybe.return_value = lease
+ eph = net.dhcp.EphemeralDHCPv4()
+ eph.obtain_lease()
+ expected_kwargs = {
+ 'interface': 'wlp3s0',
+ 'ip': '192.168.2.74',
+ 'prefix_or_mask': '255.255.255.0',
+ 'broadcast': '192.168.2.255',
+ 'static_routes': [('0.0.0.0/0', '130.56.240.1')],
+ 'router': '192.168.2.1'}
+ m_ipv4.assert_called_with(**expected_kwargs)
+
+
+class TestDHCPParseStaticRoutes(CiTestCase):
+
+ with_logs = True
+
+ def parse_static_routes_empty_string(self):
+ self.assertEqual([], parse_static_routes(""))
+
+ def test_parse_static_routes_invalid_input_returns_empty_list(self):
+ rfc3442 = "32,169,254,169,254,130,56,248"
+ self.assertEqual([], parse_static_routes(rfc3442))
+
+ def test_parse_static_routes_bogus_width_returns_empty_list(self):
+ rfc3442 = "33,169,254,169,254,130,56,248"
+ self.assertEqual([], parse_static_routes(rfc3442))
+
+ def test_parse_static_routes_single_ip(self):
+ rfc3442 = "32,169,254,169,254,130,56,248,255"
+ self.assertEqual([('169.254.169.254/32', '130.56.248.255')],
+ parse_static_routes(rfc3442))
+
+ def test_parse_static_routes_single_ip_handles_trailing_semicolon(self):
+ rfc3442 = "32,169,254,169,254,130,56,248,255;"
+ self.assertEqual([('169.254.169.254/32', '130.56.248.255')],
+ parse_static_routes(rfc3442))
+
+ def test_parse_static_routes_default_route(self):
+ rfc3442 = "0,130,56,240,1"
+ self.assertEqual([('0.0.0.0/0', '130.56.240.1')],
+ parse_static_routes(rfc3442))
+
+ def test_parse_static_routes_class_c_b_a(self):
+ class_c = "24,192,168,74,192,168,0,4"
+ class_b = "16,172,16,172,16,0,4"
+ class_a = "8,10,10,0,0,4"
+ rfc3442 = ",".join([class_c, class_b, class_a])
+ self.assertEqual(sorted([
+ ("192.168.74.0/24", "192.168.0.4"),
+ ("172.16.0.0/16", "172.16.0.4"),
+ ("10.0.0.0/8", "10.0.0.4")
+ ]), sorted(parse_static_routes(rfc3442)))
+
+ def test_parse_static_routes_logs_error_truncated(self):
+ bad_rfc3442 = {
+ "class_c": "24,169,254,169,10",
+ "class_b": "16,172,16,10",
+ "class_a": "8,10,10",
+ "gateway": "0,0",
+ "netlen": "33,0",
+ }
+ for rfc3442 in bad_rfc3442.values():
+ self.assertEqual([], parse_static_routes(rfc3442))
+
+ logs = self.logs.getvalue()
+ self.assertEqual(len(bad_rfc3442.keys()), len(logs.splitlines()))
+
+ def test_parse_static_routes_returns_valid_routes_until_parse_err(self):
+ class_c = "24,192,168,74,192,168,0,4"
+ class_b = "16,172,16,172,16,0,4"
+ class_a_error = "8,10,10,0,0"
+ rfc3442 = ",".join([class_c, class_b, class_a_error])
+ self.assertEqual(sorted([
+ ("192.168.74.0/24", "192.168.0.4"),
+ ("172.16.0.0/16", "172.16.0.4"),
+ ]), sorted(parse_static_routes(rfc3442)))
+
+ logs = self.logs.getvalue()
+ self.assertIn(rfc3442, logs.splitlines()[0])
+
+ def test_redhat_format(self):
+ redhat_format = "24.191.168.128 192.168.128.1,0 192.168.128.1"
+ self.assertEqual(sorted([
+ ("191.168.128.0/24", "192.168.128.1"),
+ ("0.0.0.0/0", "192.168.128.1")
+ ]), sorted(parse_static_routes(redhat_format)))
+
+ def test_redhat_format_with_a_space_too_much_after_comma(self):
+ redhat_format = "24.191.168.128 192.168.128.1, 0 192.168.128.1"
+ self.assertEqual(sorted([
+ ("191.168.128.0/24", "192.168.128.1"),
+ ("0.0.0.0/0", "192.168.128.1")
+ ]), sorted(parse_static_routes(redhat_format)))
+
+
class TestDHCPDiscoveryClean(CiTestCase):
with_logs = True
@@ -117,6 +300,7 @@ class TestDHCPDiscoveryClean(CiTestCase):
self.assertEqual('eth9', call[0][1])
self.assertIn('/var/tmp/cloud-init/cloud-init-dhcp-', call[0][2])
+ @mock.patch('time.sleep', mock.MagicMock())
@mock.patch('cloudinit.net.dhcp.os.kill')
@mock.patch('cloudinit.net.dhcp.util.subp')
def test_dhcp_discovery_run_in_sandbox_warns_invalid_pid(self, m_subp,
@@ -145,16 +329,20 @@ class TestDHCPDiscoveryClean(CiTestCase):
'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}],
dhcp_discovery(dhclient_script, 'eth9', tmpdir))
self.assertIn(
- "pid file contains non-integer content ''", self.logs.getvalue())
+ "dhclient(pid=, parentpid=unknown) failed "
+ "to daemonize after 10.0 seconds",
+ self.logs.getvalue())
m_kill.assert_not_called()
+ @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid')
@mock.patch('cloudinit.net.dhcp.os.kill')
@mock.patch('cloudinit.net.dhcp.util.wait_for_files')
@mock.patch('cloudinit.net.dhcp.util.subp')
def test_dhcp_discovery_run_in_sandbox_waits_on_lease_and_pid(self,
m_subp,
m_wait,
- m_kill):
+ m_kill,
+ m_getppid):
"""dhcp_discovery waits for the presence of pidfile and dhcp.leases."""
tmpdir = self.tmp_dir()
dhclient_script = os.path.join(tmpdir, 'dhclient.orig')
@@ -164,6 +352,7 @@ class TestDHCPDiscoveryClean(CiTestCase):
pidfile = self.tmp_path('dhclient.pid', tmpdir)
leasefile = self.tmp_path('dhcp.leases', tmpdir)
m_wait.return_value = [pidfile] # Return the missing pidfile wait for
+ m_getppid.return_value = 1 # Indicate that dhclient has daemonized
self.assertEqual([], dhcp_discovery(dhclient_script, 'eth9', tmpdir))
self.assertEqual(
mock.call([pidfile, leasefile], maxwait=5, naplen=0.01),
@@ -173,9 +362,10 @@ class TestDHCPDiscoveryClean(CiTestCase):
self.logs.getvalue())
m_kill.assert_not_called()
+ @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid')
@mock.patch('cloudinit.net.dhcp.os.kill')
@mock.patch('cloudinit.net.dhcp.util.subp')
- def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill):
+ def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill, m_getppid):
"""dhcp_discovery brings up the interface and runs dhclient.
It also returns the parsed dhcp.leases file generated in the sandbox.
@@ -197,6 +387,7 @@ class TestDHCPDiscoveryClean(CiTestCase):
pid_file = os.path.join(tmpdir, 'dhclient.pid')
my_pid = 1
write_file(pid_file, "%d\n" % my_pid)
+ m_getppid.return_value = 1 # Indicate that dhclient has daemonized
self.assertItemsEqual(
[{'interface': 'eth9', 'fixed-address': '192.168.2.74',
@@ -355,3 +546,5 @@ class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase):
self.assertEqual(fake_lease, lease)
# Ensure that dhcp discovery occurs
m_dhcp.called_once_with()
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py
index f55c31e8..5081a337 100644
--- a/cloudinit/net/tests/test_init.py
+++ b/cloudinit/net/tests/test_init.py
@@ -3,15 +3,15 @@
import copy
import errno
import httpretty
-import mock
import os
import requests
import textwrap
-import yaml
+from unittest import mock
import cloudinit.net as net
from cloudinit.util import ensure_file, write_file, ProcessExecutionError
from cloudinit.tests.helpers import CiTestCase, HttprettyTestCase
+from cloudinit import safeyaml as yaml
class TestSysDevPath(CiTestCase):
@@ -157,6 +157,41 @@ class TestReadSysNet(CiTestCase):
ensure_file(os.path.join(self.sysdir, 'eth0', 'bonding'))
self.assertTrue(net.is_bond('eth0'))
+ def test_get_master(self):
+ """get_master returns the path when /sys/net/devname/master exists."""
+ self.assertIsNone(net.get_master('enP1s1'))
+ master_path = os.path.join(self.sysdir, 'enP1s1', 'master')
+ ensure_file(master_path)
+ self.assertEqual(master_path, net.get_master('enP1s1'))
+
+ def test_master_is_bridge_or_bond(self):
+ bridge_mac = 'aa:bb:cc:aa:bb:cc'
+ bond_mac = 'cc:bb:aa:cc:bb:aa'
+
+ # No master => False
+ write_file(os.path.join(self.sysdir, 'eth1', 'address'), bridge_mac)
+ write_file(os.path.join(self.sysdir, 'eth2', 'address'), bond_mac)
+
+ self.assertFalse(net.master_is_bridge_or_bond('eth1'))
+ self.assertFalse(net.master_is_bridge_or_bond('eth2'))
+
+ # masters without bridge/bonding => False
+ write_file(os.path.join(self.sysdir, 'br0', 'address'), bridge_mac)
+ write_file(os.path.join(self.sysdir, 'bond0', 'address'), bond_mac)
+
+ os.symlink('../br0', os.path.join(self.sysdir, 'eth1', 'master'))
+ os.symlink('../bond0', os.path.join(self.sysdir, 'eth2', 'master'))
+
+ self.assertFalse(net.master_is_bridge_or_bond('eth1'))
+ self.assertFalse(net.master_is_bridge_or_bond('eth2'))
+
+ # masters with bridge/bonding => True
+ write_file(os.path.join(self.sysdir, 'br0', 'bridge'), '')
+ write_file(os.path.join(self.sysdir, 'bond0', 'bonding'), '')
+
+ self.assertTrue(net.master_is_bridge_or_bond('eth1'))
+ self.assertTrue(net.master_is_bridge_or_bond('eth2'))
+
def test_is_vlan(self):
"""is_vlan is True when /sys/net/devname/uevent has DEVTYPE=vlan."""
ensure_file(os.path.join(self.sysdir, 'eth0', 'uevent'))
@@ -204,6 +239,10 @@ class TestGenerateFallbackConfig(CiTestCase):
self.add_patch('cloudinit.net.util.is_container', 'm_is_container',
return_value=False)
self.add_patch('cloudinit.net.util.udevadm_settle', 'm_settle')
+ self.add_patch('cloudinit.net.is_netfailover', 'm_netfail',
+ return_value=False)
+ self.add_patch('cloudinit.net.is_netfail_master', 'm_netfail_master',
+ return_value=False)
def test_generate_fallback_finds_connected_eth_with_mac(self):
"""generate_fallback_config finds any connected device with a mac."""
@@ -212,9 +251,9 @@ class TestGenerateFallbackConfig(CiTestCase):
mac = 'aa:bb:cc:aa:bb:cc'
write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac)
expected = {
- 'config': [{'type': 'physical', 'mac_address': mac,
- 'name': 'eth1', 'subnets': [{'type': 'dhcp'}]}],
- 'version': 1}
+ 'ethernets': {'eth1': {'match': {'macaddress': mac},
+ 'dhcp4': True, 'set-name': 'eth1'}},
+ 'version': 2}
self.assertEqual(expected, net.generate_fallback_config())
def test_generate_fallback_finds_dormant_eth_with_mac(self):
@@ -223,9 +262,9 @@ class TestGenerateFallbackConfig(CiTestCase):
mac = 'aa:bb:cc:aa:bb:cc'
write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac)
expected = {
- 'config': [{'type': 'physical', 'mac_address': mac,
- 'name': 'eth0', 'subnets': [{'type': 'dhcp'}]}],
- 'version': 1}
+ 'ethernets': {'eth0': {'match': {'macaddress': mac}, 'dhcp4': True,
+ 'set-name': 'eth0'}},
+ 'version': 2}
self.assertEqual(expected, net.generate_fallback_config())
def test_generate_fallback_finds_eth_by_operstate(self):
@@ -233,9 +272,10 @@ class TestGenerateFallbackConfig(CiTestCase):
mac = 'aa:bb:cc:aa:bb:cc'
write_file(os.path.join(self.sysdir, 'eth0', 'address'), mac)
expected = {
- 'config': [{'type': 'physical', 'mac_address': mac,
- 'name': 'eth0', 'subnets': [{'type': 'dhcp'}]}],
- 'version': 1}
+ 'ethernets': {
+ 'eth0': {'dhcp4': True, 'match': {'macaddress': mac},
+ 'set-name': 'eth0'}},
+ 'version': 2}
valid_operstates = ['dormant', 'down', 'lowerlayerdown', 'unknown']
for state in valid_operstates:
write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), state)
@@ -267,6 +307,61 @@ class TestGenerateFallbackConfig(CiTestCase):
ensure_file(os.path.join(self.sysdir, 'eth0', 'bonding'))
self.assertIsNone(net.generate_fallback_config())
+ def test_generate_fallback_config_skips_netfail_devs(self):
+ """gen_fallback_config ignores netfail primary,sby no mac on master."""
+ mac = 'aa:bb:cc:aa:bb:cc' # netfailover devs share the same mac
+ for iface in ['ens3', 'ens3sby', 'enP0s1f3']:
+ write_file(os.path.join(self.sysdir, iface, 'carrier'), '1')
+ write_file(
+ os.path.join(self.sysdir, iface, 'addr_assign_type'), '0')
+ write_file(
+ os.path.join(self.sysdir, iface, 'address'), mac)
+
+ def is_netfail(iface, _driver=None):
+ # ens3 is the master
+ if iface == 'ens3':
+ return False
+ return True
+ self.m_netfail.side_effect = is_netfail
+
+ def is_netfail_master(iface, _driver=None):
+ # ens3 is the master
+ if iface == 'ens3':
+ return True
+ return False
+ self.m_netfail_master.side_effect = is_netfail_master
+ expected = {
+ 'ethernets': {
+ 'ens3': {'dhcp4': True, 'match': {'name': 'ens3'},
+ 'set-name': 'ens3'}},
+ 'version': 2}
+ result = net.generate_fallback_config()
+ self.assertEqual(expected, result)
+
+
+class TestNetFindFallBackNic(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestNetFindFallBackNic, self).setUp()
+ sys_mock = mock.patch('cloudinit.net.get_sys_class_path')
+ self.m_sys_path = sys_mock.start()
+ self.sysdir = self.tmp_dir() + '/'
+ self.m_sys_path.return_value = self.sysdir
+ self.addCleanup(sys_mock.stop)
+ self.add_patch('cloudinit.net.util.is_container', 'm_is_container',
+ return_value=False)
+ self.add_patch('cloudinit.net.util.udevadm_settle', 'm_settle')
+
+ def test_generate_fallback_finds_first_connected_eth_with_mac(self):
+ """find_fallback_nic finds any connected device with a mac."""
+ write_file(os.path.join(self.sysdir, 'eth0', 'carrier'), '1')
+ write_file(os.path.join(self.sysdir, 'eth1', 'carrier'), '1')
+ mac = 'aa:bb:cc:aa:bb:cc'
+ write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac)
+ self.assertEqual('eth1', net.find_fallback_nic())
+
class TestGetDeviceList(CiTestCase):
@@ -364,6 +459,57 @@ class TestGetInterfaceMAC(CiTestCase):
expected = [('eth2', 'aa:bb:cc:aa:bb:cc', None, None)]
self.assertEqual(expected, net.get_interfaces())
+ def test_get_interfaces_by_mac_skips_master_devs(self):
+ """Ignore interfaces with a master device which would have dup mac."""
+ mac1 = mac2 = 'aa:bb:cc:aa:bb:cc'
+ write_file(os.path.join(self.sysdir, 'eth1', 'addr_assign_type'), '0')
+ write_file(os.path.join(self.sysdir, 'eth1', 'address'), mac1)
+ write_file(os.path.join(self.sysdir, 'eth1', 'master'), "blah")
+ write_file(os.path.join(self.sysdir, 'eth2', 'addr_assign_type'), '0')
+ write_file(os.path.join(self.sysdir, 'eth2', 'address'), mac2)
+ expected = [('eth2', mac2, None, None)]
+ self.assertEqual(expected, net.get_interfaces())
+
+ @mock.patch('cloudinit.net.is_netfailover')
+ def test_get_interfaces_by_mac_skips_netfailvoer(self, m_netfail):
+ """Ignore interfaces if netfailover primary or standby."""
+ mac = 'aa:bb:cc:aa:bb:cc' # netfailover devs share the same mac
+ for iface in ['ens3', 'ens3sby', 'enP0s1f3']:
+ write_file(
+ os.path.join(self.sysdir, iface, 'addr_assign_type'), '0')
+ write_file(
+ os.path.join(self.sysdir, iface, 'address'), mac)
+
+ def is_netfail(iface, _driver=None):
+ # ens3 is the master
+ if iface == 'ens3':
+ return False
+ else:
+ return True
+ m_netfail.side_effect = is_netfail
+ expected = [('ens3', mac, None, None)]
+ self.assertEqual(expected, net.get_interfaces())
+
+ def test_get_interfaces_does_not_skip_phys_members_of_bridges_and_bonds(
+ self
+ ):
+ bridge_mac = 'aa:bb:cc:aa:bb:cc'
+ bond_mac = 'cc:bb:aa:cc:bb:aa'
+ write_file(os.path.join(self.sysdir, 'br0', 'address'), bridge_mac)
+ write_file(os.path.join(self.sysdir, 'br0', 'bridge'), '')
+
+ write_file(os.path.join(self.sysdir, 'bond0', 'address'), bond_mac)
+ write_file(os.path.join(self.sysdir, 'bond0', 'bonding'), '')
+
+ write_file(os.path.join(self.sysdir, 'eth1', 'address'), bridge_mac)
+ os.symlink('../br0', os.path.join(self.sysdir, 'eth1', 'master'))
+
+ write_file(os.path.join(self.sysdir, 'eth2', 'address'), bond_mac)
+ os.symlink('../bond0', os.path.join(self.sysdir, 'eth2', 'master'))
+
+ interface_names = [interface[0] for interface in net.get_interfaces()]
+ self.assertEqual(['eth1', 'eth2'], sorted(interface_names))
+
class TestInterfaceHasOwnMAC(CiTestCase):
@@ -549,6 +695,45 @@ class TestEphemeralIPV4Network(CiTestCase):
self.assertEqual(expected_setup_calls, m_subp.call_args_list)
m_subp.assert_has_calls(expected_teardown_calls)
+ def test_ephemeral_ipv4_network_with_rfc3442_static_routes(self, m_subp):
+ params = {
+ 'interface': 'eth0', 'ip': '192.168.2.2',
+ 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255',
+ 'static_routes': [('169.254.169.254/32', '192.168.2.1'),
+ ('0.0.0.0/0', '192.168.2.1')],
+ 'router': '192.168.2.1'}
+ expected_setup_calls = [
+ mock.call(
+ ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24',
+ 'broadcast', '192.168.2.255', 'dev', 'eth0'],
+ capture=True, update_env={'LANG': 'C'}),
+ mock.call(
+ ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'up'],
+ capture=True),
+ mock.call(
+ ['ip', '-4', 'route', 'add', '169.254.169.254/32',
+ 'via', '192.168.2.1', 'dev', 'eth0'], capture=True),
+ mock.call(
+ ['ip', '-4', 'route', 'add', '0.0.0.0/0',
+ 'via', '192.168.2.1', 'dev', 'eth0'], capture=True)]
+ expected_teardown_calls = [
+ mock.call(
+ ['ip', '-4', 'route', 'del', '0.0.0.0/0',
+ 'via', '192.168.2.1', 'dev', 'eth0'], capture=True),
+ mock.call(
+ ['ip', '-4', 'route', 'del', '169.254.169.254/32',
+ 'via', '192.168.2.1', 'dev', 'eth0'], capture=True),
+ mock.call(
+ ['ip', '-family', 'inet', 'link', 'set', 'dev',
+ 'eth0', 'down'], capture=True),
+ mock.call(
+ ['ip', '-family', 'inet', 'addr', 'del',
+ '192.168.2.2/24', 'dev', 'eth0'], capture=True)
+ ]
+ with net.EphemeralIPv4Network(**params):
+ self.assertEqual(expected_setup_calls, m_subp.call_args_list)
+ m_subp.assert_has_calls(expected_setup_calls + expected_teardown_calls)
+
class TestApplyNetworkCfgNames(CiTestCase):
V1_CONFIG = textwrap.dedent("""\
@@ -669,3 +854,447 @@ class TestHasURLConnectivity(HttprettyTestCase):
httpretty.register_uri(httpretty.GET, self.url, body={}, status=404)
self.assertFalse(
net.has_url_connectivity(self.url), 'Expected False on url fail')
+
+
+def _mk_v1_phys(mac, name, driver, device_id):
+ v1_cfg = {'type': 'physical', 'name': name, 'mac_address': mac}
+ params = {}
+ if driver:
+ params.update({'driver': driver})
+ if device_id:
+ params.update({'device_id': device_id})
+
+ if params:
+ v1_cfg.update({'params': params})
+
+ return v1_cfg
+
+
+def _mk_v2_phys(mac, name, driver=None, device_id=None):
+ v2_cfg = {'set-name': name, 'match': {'macaddress': mac}}
+ if driver:
+ v2_cfg['match'].update({'driver': driver})
+ if device_id:
+ v2_cfg['match'].update({'device_id': device_id})
+
+ return v2_cfg
+
+
+class TestExtractPhysdevs(CiTestCase):
+
+ def setUp(self):
+ super(TestExtractPhysdevs, self).setUp()
+ self.add_patch('cloudinit.net.device_driver', 'm_driver')
+ self.add_patch('cloudinit.net.device_devid', 'm_devid')
+
+ def test_extract_physdevs_looks_up_driver_v1(self):
+ driver = 'virtio'
+ self.m_driver.return_value = driver
+ physdevs = [
+ ['aa:bb:cc:dd:ee:ff', 'eth0', None, '0x1000'],
+ ]
+ netcfg = {
+ 'version': 1,
+ 'config': [_mk_v1_phys(*args) for args in physdevs],
+ }
+ # insert the driver value for verification
+ physdevs[0][2] = driver
+ self.assertEqual(sorted(physdevs),
+ sorted(net.extract_physdevs(netcfg)))
+ self.m_driver.assert_called_with('eth0')
+
+ def test_extract_physdevs_looks_up_driver_v2(self):
+ driver = 'virtio'
+ self.m_driver.return_value = driver
+ physdevs = [
+ ['aa:bb:cc:dd:ee:ff', 'eth0', None, '0x1000'],
+ ]
+ netcfg = {
+ 'version': 2,
+ 'ethernets': {args[1]: _mk_v2_phys(*args) for args in physdevs},
+ }
+ # insert the driver value for verification
+ physdevs[0][2] = driver
+ self.assertEqual(sorted(physdevs),
+ sorted(net.extract_physdevs(netcfg)))
+ self.m_driver.assert_called_with('eth0')
+
+ def test_extract_physdevs_looks_up_devid_v1(self):
+ devid = '0x1000'
+ self.m_devid.return_value = devid
+ physdevs = [
+ ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', None],
+ ]
+ netcfg = {
+ 'version': 1,
+ 'config': [_mk_v1_phys(*args) for args in physdevs],
+ }
+ # insert the driver value for verification
+ physdevs[0][3] = devid
+ self.assertEqual(sorted(physdevs),
+ sorted(net.extract_physdevs(netcfg)))
+ self.m_devid.assert_called_with('eth0')
+
+ def test_extract_physdevs_looks_up_devid_v2(self):
+ devid = '0x1000'
+ self.m_devid.return_value = devid
+ physdevs = [
+ ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', None],
+ ]
+ netcfg = {
+ 'version': 2,
+ 'ethernets': {args[1]: _mk_v2_phys(*args) for args in physdevs},
+ }
+ # insert the driver value for verification
+ physdevs[0][3] = devid
+ self.assertEqual(sorted(physdevs),
+ sorted(net.extract_physdevs(netcfg)))
+ self.m_devid.assert_called_with('eth0')
+
+ def test_get_v1_type_physical(self):
+ physdevs = [
+ ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'],
+ ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'],
+ ['09:87:65:43:21:10', 'ens0p1', 'mlx4_core', '0:0:1000'],
+ ]
+ netcfg = {
+ 'version': 1,
+ 'config': [_mk_v1_phys(*args) for args in physdevs],
+ }
+ self.assertEqual(sorted(physdevs),
+ sorted(net.extract_physdevs(netcfg)))
+
+ def test_get_v2_type_physical(self):
+ physdevs = [
+ ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'],
+ ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'],
+ ['09:87:65:43:21:10', 'ens0p1', 'mlx4_core', '0:0:1000'],
+ ]
+ netcfg = {
+ 'version': 2,
+ 'ethernets': {args[1]: _mk_v2_phys(*args) for args in physdevs},
+ }
+ self.assertEqual(sorted(physdevs),
+ sorted(net.extract_physdevs(netcfg)))
+
+ def test_get_v2_type_physical_skips_if_no_set_name(self):
+ netcfg = {
+ 'version': 2,
+ 'ethernets': {
+ 'ens3': {
+ 'match': {'macaddress': '00:11:22:33:44:55'},
+ }
+ }
+ }
+ self.assertEqual([], net.extract_physdevs(netcfg))
+
+ def test_runtime_error_on_unknown_netcfg_version(self):
+ with self.assertRaises(RuntimeError):
+ net.extract_physdevs({'version': 3, 'awesome_config': []})
+
+
+class TestWaitForPhysdevs(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestWaitForPhysdevs, self).setUp()
+ self.add_patch('cloudinit.net.get_interfaces_by_mac',
+ 'm_get_iface_mac')
+ self.add_patch('cloudinit.util.udevadm_settle', 'm_udev_settle')
+
+ def test_wait_for_physdevs_skips_settle_if_all_present(self):
+ physdevs = [
+ ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'],
+ ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'],
+ ]
+ netcfg = {
+ 'version': 2,
+ 'ethernets': {args[1]: _mk_v2_phys(*args)
+ for args in physdevs},
+ }
+ self.m_get_iface_mac.side_effect = iter([
+ {'aa:bb:cc:dd:ee:ff': 'eth0',
+ '00:11:22:33:44:55': 'ens3'},
+ ])
+ net.wait_for_physdevs(netcfg)
+ self.assertEqual(0, self.m_udev_settle.call_count)
+
+ def test_wait_for_physdevs_calls_udev_settle_on_missing(self):
+ physdevs = [
+ ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'],
+ ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'],
+ ]
+ netcfg = {
+ 'version': 2,
+ 'ethernets': {args[1]: _mk_v2_phys(*args)
+ for args in physdevs},
+ }
+ self.m_get_iface_mac.side_effect = iter([
+ {'aa:bb:cc:dd:ee:ff': 'eth0'}, # first call ens3 is missing
+ {'aa:bb:cc:dd:ee:ff': 'eth0',
+ '00:11:22:33:44:55': 'ens3'}, # second call has both
+ ])
+ net.wait_for_physdevs(netcfg)
+ self.m_udev_settle.assert_called_with(exists=net.sys_dev_path('ens3'))
+
+ def test_wait_for_physdevs_raise_runtime_error_if_missing_and_strict(self):
+ physdevs = [
+ ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'],
+ ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'],
+ ]
+ netcfg = {
+ 'version': 2,
+ 'ethernets': {args[1]: _mk_v2_phys(*args)
+ for args in physdevs},
+ }
+ self.m_get_iface_mac.return_value = {}
+ with self.assertRaises(RuntimeError):
+ net.wait_for_physdevs(netcfg)
+
+ self.assertEqual(5 * len(physdevs), self.m_udev_settle.call_count)
+
+ def test_wait_for_physdevs_no_raise_if_not_strict(self):
+ physdevs = [
+ ['aa:bb:cc:dd:ee:ff', 'eth0', 'virtio', '0x1000'],
+ ['00:11:22:33:44:55', 'ens3', 'e1000', '0x1643'],
+ ]
+ netcfg = {
+ 'version': 2,
+ 'ethernets': {args[1]: _mk_v2_phys(*args)
+ for args in physdevs},
+ }
+ self.m_get_iface_mac.return_value = {}
+ net.wait_for_physdevs(netcfg, strict=False)
+ self.assertEqual(5 * len(physdevs), self.m_udev_settle.call_count)
+
+
+class TestNetFailOver(CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestNetFailOver, self).setUp()
+ self.add_patch('cloudinit.net.util', 'm_util')
+ self.add_patch('cloudinit.net.read_sys_net', 'm_read_sys_net')
+ self.add_patch('cloudinit.net.device_driver', 'm_device_driver')
+
+ def test_get_dev_features(self):
+ devname = self.random_string()
+ features = self.random_string()
+ self.m_read_sys_net.return_value = features
+
+ self.assertEqual(features, net.get_dev_features(devname))
+ self.assertEqual(1, self.m_read_sys_net.call_count)
+ self.assertEqual(mock.call(devname, 'device/features'),
+ self.m_read_sys_net.call_args_list[0])
+
+ def test_get_dev_features_none_returns_empty_string(self):
+ devname = self.random_string()
+ self.m_read_sys_net.side_effect = Exception('error')
+ self.assertEqual('', net.get_dev_features(devname))
+ self.assertEqual(1, self.m_read_sys_net.call_count)
+ self.assertEqual(mock.call(devname, 'device/features'),
+ self.m_read_sys_net.call_args_list[0])
+
+ @mock.patch('cloudinit.net.get_dev_features')
+ def test_has_netfail_standby_feature(self, m_dev_features):
+ devname = self.random_string()
+ standby_features = ('0' * 62) + '1' + '0'
+ m_dev_features.return_value = standby_features
+ self.assertTrue(net.has_netfail_standby_feature(devname))
+
+ @mock.patch('cloudinit.net.get_dev_features')
+ def test_has_netfail_standby_feature_short_is_false(self, m_dev_features):
+ devname = self.random_string()
+ standby_features = self.random_string()
+ m_dev_features.return_value = standby_features
+ self.assertFalse(net.has_netfail_standby_feature(devname))
+
+ @mock.patch('cloudinit.net.get_dev_features')
+ def test_has_netfail_standby_feature_not_present_is_false(self,
+ m_dev_features):
+ devname = self.random_string()
+ standby_features = '0' * 64
+ m_dev_features.return_value = standby_features
+ self.assertFalse(net.has_netfail_standby_feature(devname))
+
+ @mock.patch('cloudinit.net.get_dev_features')
+ def test_has_netfail_standby_feature_no_features_is_false(self,
+ m_dev_features):
+ devname = self.random_string()
+ standby_features = None
+ m_dev_features.return_value = standby_features
+ self.assertFalse(net.has_netfail_standby_feature(devname))
+
+ @mock.patch('cloudinit.net.has_netfail_standby_feature')
+ @mock.patch('cloudinit.net.os.path.exists')
+ def test_is_netfail_master(self, m_exists, m_standby):
+ devname = self.random_string()
+ driver = 'virtio_net'
+ m_exists.return_value = False # no master sysfs attr
+ m_standby.return_value = True # has standby feature flag
+ self.assertTrue(net.is_netfail_master(devname, driver))
+
+ @mock.patch('cloudinit.net.sys_dev_path')
+ def test_is_netfail_master_checks_master_attr(self, m_sysdev):
+ devname = self.random_string()
+ driver = 'virtio_net'
+ m_sysdev.return_value = self.random_string()
+ self.assertFalse(net.is_netfail_master(devname, driver))
+ self.assertEqual(1, m_sysdev.call_count)
+ self.assertEqual(mock.call(devname, path='master'),
+ m_sysdev.call_args_list[0])
+
+ @mock.patch('cloudinit.net.has_netfail_standby_feature')
+ @mock.patch('cloudinit.net.os.path.exists')
+ def test_is_netfail_master_wrong_driver(self, m_exists, m_standby):
+ devname = self.random_string()
+ driver = self.random_string()
+ self.assertFalse(net.is_netfail_master(devname, driver))
+
+ @mock.patch('cloudinit.net.has_netfail_standby_feature')
+ @mock.patch('cloudinit.net.os.path.exists')
+ def test_is_netfail_master_has_master_attr(self, m_exists, m_standby):
+ devname = self.random_string()
+ driver = 'virtio_net'
+ m_exists.return_value = True # has master sysfs attr
+ self.assertFalse(net.is_netfail_master(devname, driver))
+
+ @mock.patch('cloudinit.net.has_netfail_standby_feature')
+ @mock.patch('cloudinit.net.os.path.exists')
+ def test_is_netfail_master_no_standby_feat(self, m_exists, m_standby):
+ devname = self.random_string()
+ driver = 'virtio_net'
+ m_exists.return_value = False # no master sysfs attr
+ m_standby.return_value = False # no standby feature flag
+ self.assertFalse(net.is_netfail_master(devname, driver))
+
+ @mock.patch('cloudinit.net.has_netfail_standby_feature')
+ @mock.patch('cloudinit.net.os.path.exists')
+ @mock.patch('cloudinit.net.sys_dev_path')
+ def test_is_netfail_primary(self, m_sysdev, m_exists, m_standby):
+ devname = self.random_string()
+ driver = self.random_string() # device not virtio_net
+ master_devname = self.random_string()
+ m_sysdev.return_value = "%s/%s" % (self.random_string(),
+ master_devname)
+ m_exists.return_value = True # has master sysfs attr
+ self.m_device_driver.return_value = 'virtio_net' # master virtio_net
+ m_standby.return_value = True # has standby feature flag
+ self.assertTrue(net.is_netfail_primary(devname, driver))
+ self.assertEqual(1, self.m_device_driver.call_count)
+ self.assertEqual(mock.call(master_devname),
+ self.m_device_driver.call_args_list[0])
+ self.assertEqual(1, m_standby.call_count)
+ self.assertEqual(mock.call(master_devname),
+ m_standby.call_args_list[0])
+
+ @mock.patch('cloudinit.net.has_netfail_standby_feature')
+ @mock.patch('cloudinit.net.os.path.exists')
+ @mock.patch('cloudinit.net.sys_dev_path')
+ def test_is_netfail_primary_wrong_driver(self, m_sysdev, m_exists,
+ m_standby):
+ devname = self.random_string()
+ driver = 'virtio_net'
+ self.assertFalse(net.is_netfail_primary(devname, driver))
+
+ @mock.patch('cloudinit.net.has_netfail_standby_feature')
+ @mock.patch('cloudinit.net.os.path.exists')
+ @mock.patch('cloudinit.net.sys_dev_path')
+ def test_is_netfail_primary_no_master(self, m_sysdev, m_exists, m_standby):
+ devname = self.random_string()
+ driver = self.random_string() # device not virtio_net
+ m_exists.return_value = False # no master sysfs attr
+ self.assertFalse(net.is_netfail_primary(devname, driver))
+
+ @mock.patch('cloudinit.net.has_netfail_standby_feature')
+ @mock.patch('cloudinit.net.os.path.exists')
+ @mock.patch('cloudinit.net.sys_dev_path')
+ def test_is_netfail_primary_bad_master(self, m_sysdev, m_exists,
+ m_standby):
+ devname = self.random_string()
+ driver = self.random_string() # device not virtio_net
+ master_devname = self.random_string()
+ m_sysdev.return_value = "%s/%s" % (self.random_string(),
+ master_devname)
+ m_exists.return_value = True # has master sysfs attr
+ self.m_device_driver.return_value = 'XXXX' # master not virtio_net
+ self.assertFalse(net.is_netfail_primary(devname, driver))
+
+ @mock.patch('cloudinit.net.has_netfail_standby_feature')
+ @mock.patch('cloudinit.net.os.path.exists')
+ @mock.patch('cloudinit.net.sys_dev_path')
+ def test_is_netfail_primary_no_standby(self, m_sysdev, m_exists,
+ m_standby):
+ devname = self.random_string()
+ driver = self.random_string() # device not virtio_net
+ master_devname = self.random_string()
+ m_sysdev.return_value = "%s/%s" % (self.random_string(),
+ master_devname)
+ m_exists.return_value = True # has master sysfs attr
+ self.m_device_driver.return_value = 'virtio_net' # master virtio_net
+ m_standby.return_value = False # master has no standby feature flag
+ self.assertFalse(net.is_netfail_primary(devname, driver))
+
+ @mock.patch('cloudinit.net.has_netfail_standby_feature')
+ @mock.patch('cloudinit.net.os.path.exists')
+ def test_is_netfail_standby(self, m_exists, m_standby):
+ devname = self.random_string()
+ driver = 'virtio_net'
+ m_exists.return_value = True # has master sysfs attr
+ m_standby.return_value = True # has standby feature flag
+ self.assertTrue(net.is_netfail_standby(devname, driver))
+
+ @mock.patch('cloudinit.net.has_netfail_standby_feature')
+ @mock.patch('cloudinit.net.os.path.exists')
+ def test_is_netfail_standby_wrong_driver(self, m_exists, m_standby):
+ devname = self.random_string()
+ driver = self.random_string()
+ self.assertFalse(net.is_netfail_standby(devname, driver))
+
+ @mock.patch('cloudinit.net.has_netfail_standby_feature')
+ @mock.patch('cloudinit.net.os.path.exists')
+ def test_is_netfail_standby_no_master(self, m_exists, m_standby):
+ devname = self.random_string()
+ driver = 'virtio_net'
+ m_exists.return_value = False # has master sysfs attr
+ self.assertFalse(net.is_netfail_standby(devname, driver))
+
+ @mock.patch('cloudinit.net.has_netfail_standby_feature')
+ @mock.patch('cloudinit.net.os.path.exists')
+ def test_is_netfail_standby_no_standby_feature(self, m_exists, m_standby):
+ devname = self.random_string()
+ driver = 'virtio_net'
+ m_exists.return_value = True # has master sysfs attr
+ m_standby.return_value = False # has standby feature flag
+ self.assertFalse(net.is_netfail_standby(devname, driver))
+
+ @mock.patch('cloudinit.net.is_netfail_standby')
+ @mock.patch('cloudinit.net.is_netfail_primary')
+ def test_is_netfailover_primary(self, m_primary, m_standby):
+ devname = self.random_string()
+ driver = self.random_string()
+ m_primary.return_value = True
+ m_standby.return_value = False
+ self.assertTrue(net.is_netfailover(devname, driver))
+
+ @mock.patch('cloudinit.net.is_netfail_standby')
+ @mock.patch('cloudinit.net.is_netfail_primary')
+ def test_is_netfailover_standby(self, m_primary, m_standby):
+ devname = self.random_string()
+ driver = self.random_string()
+ m_primary.return_value = False
+ m_standby.return_value = True
+ self.assertTrue(net.is_netfailover(devname, driver))
+
+ @mock.patch('cloudinit.net.is_netfail_standby')
+ @mock.patch('cloudinit.net.is_netfail_primary')
+ def test_is_netfailover_returns_false(self, m_primary, m_standby):
+ devname = self.random_string()
+ driver = self.random_string()
+ m_primary.return_value = False
+ m_standby.return_value = False
+ self.assertFalse(net.is_netfailover(devname, driver))
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/net/tests/test_network_state.py b/cloudinit/net/tests/test_network_state.py
new file mode 100644
index 00000000..55880852
--- /dev/null
+++ b/cloudinit/net/tests/test_network_state.py
@@ -0,0 +1,48 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from unittest import mock
+
+from cloudinit.net import network_state
+from cloudinit.tests.helpers import CiTestCase
+
+netstate_path = 'cloudinit.net.network_state'
+
+
+class TestNetworkStateParseConfig(CiTestCase):
+
+ def setUp(self):
+ super(TestNetworkStateParseConfig, self).setUp()
+ nsi_path = netstate_path + '.NetworkStateInterpreter'
+ self.add_patch(nsi_path, 'm_nsi')
+
+ def test_missing_version_returns_none(self):
+ ncfg = {}
+ self.assertEqual(None, network_state.parse_net_config_data(ncfg))
+
+ def test_unknown_versions_returns_none(self):
+ ncfg = {'version': 13.2}
+ self.assertEqual(None, network_state.parse_net_config_data(ncfg))
+
+ def test_version_2_passes_self_as_config(self):
+ ncfg = {'version': 2, 'otherconfig': {}, 'somemore': [1, 2, 3]}
+ network_state.parse_net_config_data(ncfg)
+ self.assertEqual([mock.call(version=2, config=ncfg)],
+ self.m_nsi.call_args_list)
+
+ def test_valid_config_gets_network_state(self):
+ ncfg = {'version': 2, 'otherconfig': {}, 'somemore': [1, 2, 3]}
+ result = network_state.parse_net_config_data(ncfg)
+ self.assertNotEqual(None, result)
+
+ def test_empty_v1_config_gets_network_state(self):
+ ncfg = {'version': 1, 'config': []}
+ result = network_state.parse_net_config_data(ncfg)
+ self.assertNotEqual(None, result)
+
+ def test_empty_v2_config_gets_network_state(self):
+ ncfg = {'version': 2}
+ result = network_state.parse_net_config_data(ncfg)
+ self.assertNotEqual(None, result)
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
index 9ff929c2..6ba21f4d 100644
--- a/cloudinit/netinfo.py
+++ b/cloudinit/netinfo.py
@@ -141,6 +141,9 @@ def _netdev_info_ifconfig(ifconfig_data):
res = re.match(r'.*<(\S+)>', toks[i + 1])
if res:
devs[curdev]['ipv6'][-1]['scope6'] = res.group(1)
+ else:
+ devs[curdev]['ipv6'][-1]['scope6'] = toks[i + 1]
+
return devs
@@ -355,18 +358,6 @@ def route_info():
return routes
-def getgateway():
- try:
- routes = route_info()
- except Exception:
- pass
- else:
- for r in routes.get('ipv4', []):
- if r['flags'].find("G") >= 0:
- return "%s[%s]" % (r['gateway'], r['iface'])
- return None
-
-
def netdev_pformat():
lines = []
empty = "."
@@ -389,8 +380,8 @@ def netdev_pformat():
addr.get('scope', empty), data["hwaddr"]))
for addr in data.get('ipv6'):
tbl.add_row(
- (dev, data["up"], addr["ip"], empty, addr["scope6"],
- data["hwaddr"]))
+ (dev, data["up"], addr["ip"], empty,
+ addr.get("scope6", empty), data["hwaddr"]))
if len(data.get('ipv6')) + len(data.get('ipv4')) == 0:
tbl.add_row((dev, data["up"], empty, empty, empty,
data["hwaddr"]))
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index 6d23558e..946df7e0 100644..100755
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -3,22 +3,17 @@
import abc
import fcntl
import json
-import six
import os
-import re
+import queue
import struct
import threading
import time
+import uuid
+from datetime import datetime
from cloudinit import log as logging
from cloudinit.registry import DictRegistry
from cloudinit import (url_helper, util)
-from datetime import datetime
-
-if six.PY2:
- from multiprocessing.queues import JoinableQueue as JQueue
-else:
- from queue import Queue as JQueue
LOG = logging.getLogger(__name__)
@@ -27,8 +22,7 @@ class ReportException(Exception):
pass
-@six.add_metaclass(abc.ABCMeta)
-class ReportingHandler(object):
+class ReportingHandler(metaclass=abc.ABCMeta):
"""Base class for report handlers.
Implement :meth:`~publish_event` for controlling what
@@ -129,24 +123,50 @@ class HyperVKvpReportingHandler(ReportingHandler):
DESC_IDX_KEY = 'msg_i'
JSON_SEPARATORS = (',', ':')
KVP_POOL_FILE_GUEST = '/var/lib/hyperv/.kvp_pool_1'
+ _already_truncated_pool_file = False
def __init__(self,
kvp_file_path=KVP_POOL_FILE_GUEST,
event_types=None):
super(HyperVKvpReportingHandler, self).__init__()
self._kvp_file_path = kvp_file_path
+ HyperVKvpReportingHandler._truncate_guest_pool_file(
+ self._kvp_file_path)
+
self._event_types = event_types
- self.q = JQueue()
- self.kvp_file = None
+ self.q = queue.Queue()
self.incarnation_no = self._get_incarnation_no()
self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX,
self.incarnation_no)
- self._current_offset = 0
self.publish_thread = threading.Thread(
target=self._publish_event_routine)
self.publish_thread.daemon = True
self.publish_thread.start()
+ @classmethod
+ def _truncate_guest_pool_file(cls, kvp_file):
+ """
+ Truncate the pool file if it has not been truncated since boot.
+ This should be done exactly once for the file indicated by
+ KVP_POOL_FILE_GUEST constant above. This method takes a filename
+ so that we can use an arbitrary file during unit testing.
+ Since KVP is a best-effort telemetry channel we only attempt to
+ truncate the file once and only if the file has not been modified
+ since boot. Additional truncation can lead to loss of existing
+ KVPs.
+ """
+ if cls._already_truncated_pool_file:
+ return
+ boot_time = time.time() - float(util.uptime())
+ try:
+ if os.path.getmtime(kvp_file) < boot_time:
+ with open(kvp_file, "w"):
+ pass
+ except (OSError, IOError) as e:
+ LOG.warning("failed to truncate kvp pool file, %s", e)
+ finally:
+ cls._already_truncated_pool_file = True
+
def _get_incarnation_no(self):
"""
use the time passed as the incarnation number.
@@ -162,28 +182,24 @@ class HyperVKvpReportingHandler(ReportingHandler):
def _iterate_kvps(self, offset):
"""iterate the kvp file from the current offset."""
- try:
- with open(self._kvp_file_path, 'rb+') as f:
- self.kvp_file = f
- fcntl.flock(f, fcntl.LOCK_EX)
- f.seek(offset)
+ with open(self._kvp_file_path, 'rb') as f:
+ fcntl.flock(f, fcntl.LOCK_EX)
+ f.seek(offset)
+ record_data = f.read(self.HV_KVP_RECORD_SIZE)
+ while len(record_data) == self.HV_KVP_RECORD_SIZE:
+ kvp_item = self._decode_kvp_item(record_data)
+ yield kvp_item
record_data = f.read(self.HV_KVP_RECORD_SIZE)
- while len(record_data) == self.HV_KVP_RECORD_SIZE:
- self._current_offset += self.HV_KVP_RECORD_SIZE
- kvp_item = self._decode_kvp_item(record_data)
- yield kvp_item
- record_data = f.read(self.HV_KVP_RECORD_SIZE)
- fcntl.flock(f, fcntl.LOCK_UN)
- finally:
- self.kvp_file = None
+ fcntl.flock(f, fcntl.LOCK_UN)
def _event_key(self, event):
"""
the event key format is:
- CLOUD_INIT|<incarnation number>|<event_type>|<event_name>
+ CLOUD_INIT|<incarnation number>|<event_type>|<event_name>|<time>
"""
- return u"{0}|{1}|{2}".format(self.event_key_prefix,
- event.event_type, event.name)
+ return u"{0}|{1}|{2}|{3}".format(self.event_key_prefix,
+ event.event_type, event.name,
+ uuid.uuid4())
def _encode_kvp_item(self, key, value):
data = (struct.pack("%ds%ds" % (
@@ -207,23 +223,13 @@ class HyperVKvpReportingHandler(ReportingHandler):
return {'key': k, 'value': v}
- def _update_kvp_item(self, record_data):
- if self.kvp_file is None:
- raise ReportException(
- "kvp file '{0}' not opened."
- .format(self._kvp_file_path))
- self.kvp_file.seek(-self.HV_KVP_RECORD_SIZE, 1)
- self.kvp_file.write(record_data)
-
def _append_kvp_item(self, record_data):
- with open(self._kvp_file_path, 'rb+') as f:
+ with open(self._kvp_file_path, 'ab') as f:
fcntl.flock(f, fcntl.LOCK_EX)
- # seek to end of the file
- f.seek(0, 2)
- f.write(record_data)
+ for data in record_data:
+ f.write(data)
f.flush()
fcntl.flock(f, fcntl.LOCK_UN)
- self._current_offset = f.tell()
def _break_down(self, key, meta_data, description):
del meta_data[self.MSG_KEY]
@@ -279,40 +285,26 @@ class HyperVKvpReportingHandler(ReportingHandler):
def _publish_event_routine(self):
while True:
+ items_from_queue = 0
try:
event = self.q.get(block=True)
- need_append = True
+ items_from_queue += 1
+ encoded_data = []
+ while event is not None:
+ encoded_data += self._encode_event(event)
+ try:
+ # get all the rest of the events in the queue
+ event = self.q.get(block=False)
+ items_from_queue += 1
+ except queue.Empty:
+ event = None
try:
- if not os.path.exists(self._kvp_file_path):
- LOG.warning(
- "skip writing events %s to %s. file not present.",
- event.as_string(),
- self._kvp_file_path)
- encoded_event = self._encode_event(event)
- # for each encoded_event
- for encoded_data in (encoded_event):
- for kvp in self._iterate_kvps(self._current_offset):
- match = (
- re.match(
- r"^{0}\|(\d+)\|.+"
- .format(self.EVENT_PREFIX),
- kvp['key']
- ))
- if match:
- match_groups = match.groups(0)
- if int(match_groups[0]) < self.incarnation_no:
- need_append = False
- self._update_kvp_item(encoded_data)
- continue
- if need_append:
- self._append_kvp_item(encoded_data)
- except IOError as e:
- LOG.warning(
- "failed posting event to kvp: %s e:%s",
- event.as_string(), e)
+ self._append_kvp_item(encoded_data)
+ except (OSError, IOError) as e:
+ LOG.warning("failed posting events to kvp, %s", e)
finally:
- self.q.task_done()
-
+ for _ in range(items_from_queue):
+ self.q.task_done()
# when main process exits, q.get() will through EOFError
# indicating we should exit this thread.
except EOFError:
@@ -322,7 +314,7 @@ class HyperVKvpReportingHandler(ReportingHandler):
# if the kvp pool already contains a chunk of data,
# so defer it to another thread.
def publish_event(self, event):
- if (not self._event_types or event.event_type in self._event_types):
+ if not self._event_types or event.event_type in self._event_types:
self.q.put(event)
def flush(self):
diff --git a/cloudinit/safeyaml.py b/cloudinit/safeyaml.py
index 7bcf9dd3..d6f5f95b 100644
--- a/cloudinit/safeyaml.py
+++ b/cloudinit/safeyaml.py
@@ -6,6 +6,8 @@
import yaml
+YAMLError = yaml.YAMLError
+
class _CustomSafeLoader(yaml.SafeLoader):
def construct_python_unicode(self, node):
@@ -17,7 +19,27 @@ _CustomSafeLoader.add_constructor(
_CustomSafeLoader.construct_python_unicode)
+class NoAliasSafeDumper(yaml.dumper.SafeDumper):
+ """A class which avoids constructing anchors/aliases on yaml dump"""
+
+ def ignore_aliases(self, data):
+ return True
+
+
def load(blob):
return(yaml.load(blob, Loader=_CustomSafeLoader))
+
+def dumps(obj, explicit_start=True, explicit_end=True, noalias=False):
+ """Return data in nicely formatted yaml."""
+
+ return yaml.dump(obj,
+ line_break="\n",
+ indent=4,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ default_flow_style=False,
+ Dumper=(NoAliasSafeDumper
+ if noalias else yaml.dumper.Dumper))
+
# vi: ts=4 expandtab
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index b1ebaade..ca4ffa8e 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -39,6 +39,8 @@ CFG_BUILTIN = {
'Hetzner',
'IBMCloud',
'Oracle',
+ 'Exoscale',
+ 'RbxCloud',
# At the end to act as a 'catch' when none of the above work...
'None',
],
diff --git a/cloudinit/signal_handler.py b/cloudinit/signal_handler.py
index 12fdfe6c..9272d22d 100644
--- a/cloudinit/signal_handler.py
+++ b/cloudinit/signal_handler.py
@@ -9,8 +9,7 @@
import inspect
import signal
import sys
-
-from six import StringIO
+from io import StringIO
from cloudinit import log as logging
from cloudinit import util
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index a06e6e1f..61ec522a 100644..100755
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -13,7 +13,6 @@ import os
import os.path
import re
from time import time
-from subprocess import call
from xml.dom import minidom
import xml.etree.ElementTree as ET
@@ -22,10 +21,20 @@ from cloudinit import net
from cloudinit.event import EventType
from cloudinit.net.dhcp import EphemeralDHCPv4
from cloudinit import sources
-from cloudinit.sources.helpers.azure import get_metadata_from_fabric
from cloudinit.sources.helpers import netlink
from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc
from cloudinit import util
+from cloudinit.reporting import events
+
+from cloudinit.sources.helpers.azure import (
+ azure_ds_reporter,
+ azure_ds_telemetry_reporter,
+ get_metadata_from_fabric,
+ get_boot_telemetry,
+ get_system_info,
+ report_diagnostic_event,
+ EphemeralDHCPv4WithReporting,
+ is_byte_swapped)
LOG = logging.getLogger(__name__)
@@ -54,8 +63,14 @@ AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77'
REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds"
REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready"
AGENT_SEED_DIR = '/var/lib/waagent'
+
+# In the event where the IMDS primary server is not
+# available, it takes 1s to fallback to the secondary one
+IMDS_TIMEOUT_IN_SECONDS = 2
IMDS_URL = "http://169.254.169.254/metadata/"
+PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0"
+
# List of static scripts and network config artifacts created by
# stock ubuntu suported images.
UBUNTU_EXTENDED_NETWORK_SCRIPTS = [
@@ -196,6 +211,8 @@ if util.is_FreeBSD():
RESOURCE_DISK_PATH = "/dev/" + res_disk
else:
LOG.debug("resource disk is None")
+ # TODO Find where platform entropy data is surfaced
+ PLATFORM_ENTROPY_SOURCE = None
BUILTIN_DS_CONFIG = {
'agent_command': AGENT_START_BUILTIN,
@@ -242,6 +259,7 @@ def set_hostname(hostname, hostname_command='hostname'):
util.subp([hostname_command, hostname])
+@azure_ds_telemetry_reporter
@contextlib.contextmanager
def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
"""
@@ -269,11 +287,6 @@ class DataSourceAzure(sources.DataSource):
dsname = 'Azure'
_negotiated = False
_metadata_imds = sources.UNSET
- process_name = 'dhclient'
-
- tmpps = os.popen("ps -Af").read()
- if process_name not in tmpps[:]:
- call(['/sbin/dhclient', DEFAULT_PRIMARY_NIC])
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -293,6 +306,7 @@ class DataSourceAzure(sources.DataSource):
root = sources.DataSource.__str__(self)
return "%s [seed=%s]" % (root, self.seed)
+ @azure_ds_telemetry_reporter
def bounce_network_with_azure_hostname(self):
# When using cloud-init to provision, we have to set the hostname from
# the metadata and "bounce" the network to force DDNS to update via
@@ -318,6 +332,7 @@ class DataSourceAzure(sources.DataSource):
util.logexc(LOG, "handling set_hostname failed")
return False
+ @azure_ds_telemetry_reporter
def get_metadata_from_agent(self):
temp_hostname = self.metadata.get('local-hostname')
agent_cmd = self.ds_cfg['agent_command']
@@ -340,22 +355,25 @@ class DataSourceAzure(sources.DataSource):
for pk in self.cfg.get('_pubkeys', []):
if pk.get('value', None):
key_value = pk['value']
- LOG.debug("ssh authentication: using value from fabric")
+ LOG.debug("SSH authentication: using value from fabric")
else:
bname = str(pk['fingerprint'] + ".crt")
fp_files += [os.path.join(ddir, bname)]
- LOG.debug("ssh authentication: "
- "using fingerprint from fabirc")
-
- # wait very long for public SSH keys to arrive
- # https://bugs.launchpad.net/cloud-init/+bug/1717611
- missing = util.log_time(logfunc=LOG.debug,
- msg="waiting for SSH public key files",
- func=util.wait_for_files,
- args=(fp_files, 900))
-
- if len(missing):
- LOG.warning("Did not find files, but going on: %s", missing)
+ LOG.debug("SSH authentication: "
+ "using fingerprint from fabric")
+
+ with events.ReportEventStack(
+ name="waiting-for-ssh-public-key",
+ description="wait for agents to retrieve SSH keys",
+ parent=azure_ds_reporter):
+ # wait very long for public SSH keys to arrive
+ # https://bugs.launchpad.net/cloud-init/+bug/1717611
+ missing = util.log_time(logfunc=LOG.debug,
+ msg="waiting for SSH public key files",
+ func=util.wait_for_files,
+ args=(fp_files, 900))
+ if len(missing):
+ LOG.warning("Did not find files, but going on: %s", missing)
metadata = {}
metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
@@ -369,6 +387,7 @@ class DataSourceAzure(sources.DataSource):
subplatform_type = 'seed-dir'
return '%s (%s)' % (subplatform_type, self.seed)
+ @azure_ds_telemetry_reporter
def crawl_metadata(self):
"""Walk all instance metadata sources returning a dict on success.
@@ -399,19 +418,24 @@ class DataSourceAzure(sources.DataSource):
elif cdev.startswith("/dev/"):
if util.is_FreeBSD():
ret = util.mount_cb(cdev, load_azure_ds_dir,
- mtype="udf", sync=False)
+ mtype="udf")
else:
ret = util.mount_cb(cdev, load_azure_ds_dir)
else:
ret = load_azure_ds_dir(cdev)
except NonAzureDataSource:
+ report_diagnostic_event(
+ "Did not find Azure data source in %s" % cdev)
continue
except BrokenAzureDataSource as exc:
msg = 'BrokenAzureDataSource: %s' % exc
+ report_diagnostic_event(msg)
raise sources.InvalidMetaDataException(msg)
except util.MountFailedError:
- LOG.warning("%s was not mountable", cdev)
+ msg = '%s was not mountable' % cdev
+ report_diagnostic_event(msg)
+ LOG.warning(msg)
continue
perform_reprovision = reprovision or self._should_reprovision(ret)
@@ -419,10 +443,11 @@ class DataSourceAzure(sources.DataSource):
if util.is_FreeBSD():
msg = "Free BSD is not supported for PPS VMs"
LOG.error(msg)
+ report_diagnostic_event(msg)
raise sources.InvalidMetaDataException(msg)
ret = self._reprovision()
imds_md = get_metadata_from_imds(
- self.fallback_interface, retries=3)
+ self.fallback_interface, retries=10)
(md, userdata_raw, cfg, files) = ret
self.seed = cdev
crawled_data.update({
@@ -437,7 +462,9 @@ class DataSourceAzure(sources.DataSource):
break
if not found:
- raise sources.InvalidMetaDataException('No Azure metadata found')
+ msg = 'No Azure metadata found'
+ report_diagnostic_event(msg)
+ raise sources.InvalidMetaDataException(msg)
if found == ddir:
LOG.debug("using files cached in %s", ddir)
@@ -445,8 +472,7 @@ class DataSourceAzure(sources.DataSource):
seed = _get_random_seed()
if seed:
crawled_data['metadata']['random_seed'] = seed
- crawled_data['metadata']['instance-id'] = util.read_dmi_data(
- 'system-uuid')
+ crawled_data['metadata']['instance-id'] = self._iid()
if perform_reprovision:
LOG.info("Reporting ready to Azure after getting ReprovisionData")
@@ -456,9 +482,14 @@ class DataSourceAzure(sources.DataSource):
self._report_ready(lease=self._ephemeral_dhcp_ctx.lease)
self._ephemeral_dhcp_ctx.clean_network() # Teardown ephemeral
else:
- with EphemeralDHCPv4() as lease:
- self._report_ready(lease=lease)
-
+ try:
+ with EphemeralDHCPv4WithReporting(
+ azure_ds_reporter) as lease:
+ self._report_ready(lease=lease)
+ except Exception as e:
+ report_diagnostic_event(
+ "exception while reporting ready: %s" % e)
+ raise
return crawled_data
def _is_platform_viable(self):
@@ -470,6 +501,7 @@ class DataSourceAzure(sources.DataSource):
super(DataSourceAzure, self).clear_cached_attrs(attr_defaults)
self._metadata_imds = sources.UNSET
+ @azure_ds_telemetry_reporter
def _get_data(self):
"""Crawl and process datasource metadata caching metadata as attrs.
@@ -479,6 +511,16 @@ class DataSourceAzure(sources.DataSource):
if not self._is_platform_viable():
return False
try:
+ get_boot_telemetry()
+ except Exception as e:
+ LOG.warning("Failed to get boot telemetry: %s", e)
+
+ try:
+ get_system_info()
+ except Exception as e:
+ LOG.warning("Failed to get system information: %s", e)
+
+ try:
crawled_data = util.log_time(
logfunc=LOG.debug, msg='Crawl of metadata service',
func=self.crawl_metadata)
@@ -516,6 +558,17 @@ class DataSourceAzure(sources.DataSource):
# quickly (local check only) if self.instance_id is still valid
return sources.instance_id_matches_system_uuid(self.get_instance_id())
+ def _iid(self, previous=None):
+ prev_iid_path = os.path.join(
+ self.paths.get_cpath('data'), 'instance-id')
+ iid = util.read_dmi_data('system-uuid')
+ if os.path.exists(prev_iid_path):
+ previous = util.load_file(prev_iid_path).strip()
+ if is_byte_swapped(previous, iid):
+ return previous
+ return iid
+
+ @azure_ds_telemetry_reporter
def setup(self, is_new_instance):
if self._negotiated is False:
LOG.debug("negotiating for %s (new_instance=%s)",
@@ -536,27 +589,55 @@ class DataSourceAzure(sources.DataSource):
headers = {"Metadata": "true"}
nl_sock = None
report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE))
+ self.imds_logging_threshold = 1
+ self.imds_poll_counter = 1
+ dhcp_attempts = 0
+ vnet_switched = False
+ return_val = None
def exc_cb(msg, exception):
if isinstance(exception, UrlError) and exception.code == 404:
+ if self.imds_poll_counter == self.imds_logging_threshold:
+ # Reducing the logging frequency as we are polling IMDS
+ self.imds_logging_threshold *= 2
+ LOG.debug("Call to IMDS with arguments %s failed "
+ "with status code %s after %s retries",
+ msg, exception.code, self.imds_poll_counter)
+ LOG.debug("Backing off logging threshold for the same "
+ "exception to %d", self.imds_logging_threshold)
+ self.imds_poll_counter += 1
return True
+
# If we get an exception while trying to call IMDS, we
# call DHCP and setup the ephemeral network to acquire the new IP.
+ LOG.debug("Call to IMDS with arguments %s failed with "
+ "status code %s", msg, exception.code)
+ report_diagnostic_event("polling IMDS failed with exception %s"
+ % exception.code)
return False
LOG.debug("Wait for vnetswitch to happen")
while True:
try:
- # Save our EphemeralDHCPv4 context so we avoid repeated dhcp
- self._ephemeral_dhcp_ctx = EphemeralDHCPv4()
- lease = self._ephemeral_dhcp_ctx.obtain_lease()
+ # Save our EphemeralDHCPv4 context to avoid repeated dhcp
+ with events.ReportEventStack(
+ name="obtain-dhcp-lease",
+ description="obtain dhcp lease",
+ parent=azure_ds_reporter):
+ self._ephemeral_dhcp_ctx = EphemeralDHCPv4()
+ lease = self._ephemeral_dhcp_ctx.obtain_lease()
+
+ if vnet_switched:
+ dhcp_attempts += 1
if report_ready:
try:
nl_sock = netlink.create_bound_netlink_socket()
except netlink.NetlinkCreateSocketError as e:
+ report_diagnostic_event(e)
LOG.warning(e)
self._ephemeral_dhcp_ctx.clean_network()
- return
+ break
+
path = REPORTED_READY_MARKER_FILE
LOG.info(
"Creating a marker file to report ready: %s", path)
@@ -564,17 +645,33 @@ class DataSourceAzure(sources.DataSource):
pid=os.getpid(), time=time()))
self._report_ready(lease=lease)
report_ready = False
- try:
- netlink.wait_for_media_disconnect_connect(
- nl_sock, lease['interface'])
- except AssertionError as error:
- LOG.error(error)
- return
+
+ with events.ReportEventStack(
+ name="wait-for-media-disconnect-connect",
+ description="wait for vnet switch",
+ parent=azure_ds_reporter):
+ try:
+ netlink.wait_for_media_disconnect_connect(
+ nl_sock, lease['interface'])
+ except AssertionError as error:
+ report_diagnostic_event(error)
+ LOG.error(error)
+ break
+
+ vnet_switched = True
self._ephemeral_dhcp_ctx.clean_network()
else:
- return readurl(url, timeout=1, headers=headers,
- exception_cb=exc_cb, infinite=True,
- log_req_resp=False).contents
+ with events.ReportEventStack(
+ name="get-reprovision-data-from-imds",
+ description="get reprovision data from imds",
+ parent=azure_ds_reporter):
+ return_val = readurl(url,
+ timeout=IMDS_TIMEOUT_IN_SECONDS,
+ headers=headers,
+ exception_cb=exc_cb,
+ infinite=True,
+ log_req_resp=False).contents
+ break
except UrlError:
# Teardown our EphemeralDHCPv4 context on failure as we retry
self._ephemeral_dhcp_ctx.clean_network()
@@ -583,6 +680,15 @@ class DataSourceAzure(sources.DataSource):
if nl_sock:
nl_sock.close()
+ if vnet_switched:
+ report_diagnostic_event("attempted dhcp %d times after reuse" %
+ dhcp_attempts)
+ report_diagnostic_event("polled imds %d times after reuse" %
+ self.imds_poll_counter)
+
+ return return_val
+
+ @azure_ds_telemetry_reporter
def _report_ready(self, lease):
"""Tells the fabric provisioning has completed """
try:
@@ -620,9 +726,14 @@ class DataSourceAzure(sources.DataSource):
def _reprovision(self):
"""Initiate the reprovisioning workflow."""
contents = self._poll_imds()
- md, ud, cfg = read_azure_ovf(contents)
- return (md, ud, cfg, {'ovf-env.xml': contents})
-
+ with events.ReportEventStack(
+ name="reprovisioning-read-azure-ovf",
+ description="read azure ovf during reprovisioning",
+ parent=azure_ds_reporter):
+ md, ud, cfg = read_azure_ovf(contents)
+ return (md, ud, cfg, {'ovf-env.xml': contents})
+
+ @azure_ds_telemetry_reporter
def _negotiate(self):
"""Negotiate with fabric and return data from it.
@@ -633,9 +744,11 @@ class DataSourceAzure(sources.DataSource):
if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN:
self.bounce_network_with_azure_hostname()
+ pubkey_info = self.cfg.get('_pubkeys', None)
metadata_func = partial(get_metadata_from_fabric,
fallback_lease_file=self.
- dhclient_lease_file)
+ dhclient_lease_file,
+ pubkey_info=pubkey_info)
else:
metadata_func = self.get_metadata_from_agent
@@ -643,15 +756,20 @@ class DataSourceAzure(sources.DataSource):
self.ds_cfg['agent_command'])
try:
fabric_data = metadata_func()
- except Exception:
+ except Exception as e:
+ report_diagnostic_event(
+ "Error communicating with Azure fabric; You may experience "
+ "connectivity issues: %s" % e)
LOG.warning(
- "Error communicating with Azure fabric; You may experience."
+ "Error communicating with Azure fabric; You may experience "
"connectivity issues.", exc_info=True)
return False
+
util.del_file(REPORTED_READY_MARKER_FILE)
util.del_file(REPROVISION_MARKER_FILE)
return fabric_data
+ @azure_ds_telemetry_reporter
def activate(self, cfg, is_new_instance):
address_ephemeral_resize(is_new_instance=is_new_instance,
preserve_ntfs=self.ds_cfg.get(
@@ -659,6 +777,11 @@ class DataSourceAzure(sources.DataSource):
return
@property
+ def availability_zone(self):
+ return self.metadata.get(
+ 'imds', {}).get('compute', {}).get('platformFaultDomain')
+
+ @property
def network_config(self):
"""Generate a network config like net.generate_fallback_network() with
the following exceptions.
@@ -668,7 +791,7 @@ class DataSourceAzure(sources.DataSource):
2. Generate a fallback network config that does not include any of
the blacklisted devices.
"""
- if not self._network_config:
+ if not self._network_config or self._network_config == sources.UNSET:
if self.ds_cfg.get('apply_network_config'):
nc_src = self._metadata_imds
else:
@@ -676,6 +799,10 @@ class DataSourceAzure(sources.DataSource):
self._network_config = parse_network_config(nc_src)
return self._network_config
+ @property
+ def region(self):
+ return self.metadata.get('imds', {}).get('compute', {}).get('location')
+
def _partitions_on_device(devpath, maxnum=16):
# return a list of tuples (ptnum, path) for each part on devpath
@@ -690,12 +817,14 @@ def _partitions_on_device(devpath, maxnum=16):
return []
+@azure_ds_telemetry_reporter
def _has_ntfs_filesystem(devpath):
ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True)
LOG.debug('ntfs_devices found = %s', ntfs_devices)
return os.path.realpath(devpath) in ntfs_devices
+@azure_ds_telemetry_reporter
def can_dev_be_reformatted(devpath, preserve_ntfs):
"""Determine if the ephemeral drive at devpath should be reformatted.
@@ -744,43 +873,59 @@ def can_dev_be_reformatted(devpath, preserve_ntfs):
(cand_part, cand_path, devpath))
return False, msg
+ @azure_ds_telemetry_reporter
def count_files(mp):
ignored = set(['dataloss_warning_readme.txt'])
return len([f for f in os.listdir(mp) if f.lower() not in ignored])
bmsg = ('partition %s (%s) on device %s was ntfs formatted' %
(cand_part, cand_path, devpath))
- try:
- file_count = util.mount_cb(cand_path, count_files, mtype="ntfs",
- update_env_for_mount={'LANG': 'C'})
- except util.MountFailedError as e:
- if "unknown filesystem type 'ntfs'" in str(e):
- return True, (bmsg + ' but this system cannot mount NTFS,'
- ' assuming there are no important files.'
- ' Formatting allowed.')
- return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e)
-
- if file_count != 0:
- LOG.warning("it looks like you're using NTFS on the ephemeral disk, "
- 'to ensure that filesystem does not get wiped, set '
- '%s.%s in config', '.'.join(DS_CFG_PATH),
- DS_CFG_KEY_PRESERVE_NTFS)
- return False, bmsg + ' but had %d files on it.' % file_count
+
+ with events.ReportEventStack(
+ name="mount-ntfs-and-count",
+ description="mount-ntfs-and-count",
+ parent=azure_ds_reporter) as evt:
+ try:
+ file_count = util.mount_cb(cand_path, count_files, mtype="ntfs",
+ update_env_for_mount={'LANG': 'C'})
+ except util.MountFailedError as e:
+ evt.description = "cannot mount ntfs"
+ if "unknown filesystem type 'ntfs'" in str(e):
+ return True, (bmsg + ' but this system cannot mount NTFS,'
+ ' assuming there are no important files.'
+ ' Formatting allowed.')
+ return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e)
+
+ if file_count != 0:
+ evt.description = "mounted and counted %d files" % file_count
+ LOG.warning("it looks like you're using NTFS on the ephemeral"
+ " disk, to ensure that filesystem does not get wiped,"
+ " set %s.%s in config", '.'.join(DS_CFG_PATH),
+ DS_CFG_KEY_PRESERVE_NTFS)
+ return False, bmsg + ' but had %d files on it.' % file_count
return True, bmsg + ' and had no important files. Safe for reformatting.'
+@azure_ds_telemetry_reporter
def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
is_new_instance=False, preserve_ntfs=False):
# wait for ephemeral disk to come up
naplen = .2
- missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen,
- log_pre="Azure ephemeral disk: ")
-
- if missing:
- LOG.warning("ephemeral device '%s' did not appear after %d seconds.",
- devpath, maxwait)
- return
+ with events.ReportEventStack(
+ name="wait-for-ephemeral-disk",
+ description="wait for ephemeral disk",
+ parent=azure_ds_reporter):
+ missing = util.wait_for_files([devpath],
+ maxwait=maxwait,
+ naplen=naplen,
+ log_pre="Azure ephemeral disk: ")
+
+ if missing:
+ LOG.warning("ephemeral device '%s' did"
+ " not appear after %d seconds.",
+ devpath, maxwait)
+ return
result = False
msg = None
@@ -808,6 +953,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
return
+@azure_ds_telemetry_reporter
def perform_hostname_bounce(hostname, cfg, prev_hostname):
# set the hostname to 'hostname' if it is not already set to that.
# then, if policy is not off, bounce the interface using command
@@ -843,6 +989,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname):
return True
+@azure_ds_telemetry_reporter
def crtfile_to_pubkey(fname, data=None):
pipeline = ('openssl x509 -noout -pubkey < "$0" |'
'ssh-keygen -i -m PKCS8 -f /dev/stdin')
@@ -851,6 +998,7 @@ def crtfile_to_pubkey(fname, data=None):
return out.rstrip()
+@azure_ds_telemetry_reporter
def pubkeys_from_crt_files(flist):
pubkeys = []
errors = []
@@ -866,6 +1014,7 @@ def pubkeys_from_crt_files(flist):
return pubkeys
+@azure_ds_telemetry_reporter
def write_files(datadir, files, dirmode=None):
def _redact_password(cnt, fname):
@@ -893,6 +1042,7 @@ def write_files(datadir, files, dirmode=None):
util.write_file(filename=fname, content=content, mode=0o600)
+@azure_ds_telemetry_reporter
def invoke_agent(cmd):
# this is a function itself to simplify patching it for test
if cmd:
@@ -912,16 +1062,19 @@ def find_child(node, filter_func):
return ret
+@azure_ds_telemetry_reporter
def load_azure_ovf_pubkeys(sshnode):
# This parses a 'SSH' node formatted like below, and returns
# an array of dicts.
- # [{'fp': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7',
- # 'path': 'where/to/go'}]
+ # [{'fingerprint': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7',
+ # 'path': '/where/to/go'}]
#
# <SSH><PublicKeys>
- # <PublicKey><Fingerprint>ABC</FingerPrint><Path>/ABC</Path>
+ # <PublicKey><Fingerprint>ABC</FingerPrint><Path>/x/y/z</Path>
# ...
# </PublicKeys></SSH>
+ # Under some circumstances, there may be a <Value> element along with the
+ # Fingerprint and Path. Pass those along if they appear.
results = find_child(sshnode, lambda n: n.localName == "PublicKeys")
if len(results) == 0:
return []
@@ -962,11 +1115,14 @@ def load_azure_ovf_pubkeys(sshnode):
return found
+@azure_ds_telemetry_reporter
def read_azure_ovf(contents):
try:
dom = minidom.parseString(contents)
except Exception as e:
- raise BrokenAzureDataSource("Invalid ovf-env.xml: %s" % e)
+ error_str = "Invalid ovf-env.xml: %s" % e
+ report_diagnostic_event(error_str)
+ raise BrokenAzureDataSource(error_str)
results = find_child(dom.documentElement,
lambda n: n.localName == "ProvisioningSection")
@@ -986,8 +1142,8 @@ def read_azure_ovf(contents):
raise NonAzureDataSource("No LinuxProvisioningConfigurationSet")
if len(lpcs_nodes) > 1:
raise BrokenAzureDataSource("found '%d' %ss" %
- ("LinuxProvisioningConfigurationSet",
- len(lpcs_nodes)))
+ (len(lpcs_nodes),
+ "LinuxProvisioningConfigurationSet"))
lpcs = lpcs_nodes[0]
if not lpcs.hasChildNodes():
@@ -1047,9 +1203,10 @@ def read_azure_ovf(contents):
defuser = {}
if username:
defuser['name'] = username
- if password and DEF_PASSWD_REDACTION != password:
- defuser['passwd'] = encrypt_pass(password)
+ if password:
defuser['lock_passwd'] = False
+ if DEF_PASSWD_REDACTION != password:
+ defuser['passwd'] = encrypt_pass(password)
if defuser:
cfg['system_info'] = {'default_user': defuser}
@@ -1062,6 +1219,7 @@ def read_azure_ovf(contents):
return (md, ud, cfg)
+@azure_ds_telemetry_reporter
def _extract_preprovisioned_vm_setting(dom):
"""Read the preprovision flag from the ovf. It should not
exist unless true."""
@@ -1090,6 +1248,7 @@ def encrypt_pass(password, salt_id="$6$"):
return crypt.crypt(password, salt_id + util.rand_str(strlen=16))
+@azure_ds_telemetry_reporter
def _check_freebsd_cdrom(cdrom_dev):
"""Return boolean indicating path to cdrom device has content."""
try:
@@ -1101,18 +1260,31 @@ def _check_freebsd_cdrom(cdrom_dev):
return False
-def _get_random_seed():
+@azure_ds_telemetry_reporter
+def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE):
"""Return content random seed file if available, otherwise,
return None."""
# azure / hyper-v provides random data here
- # TODO. find the seed on FreeBSD platform
# now update ds_cfg to reflect contents pass in config
- if util.is_FreeBSD():
+ if source is None:
return None
- return util.load_file("/sys/firmware/acpi/tables/OEM0",
- quiet=True, decode=False)
+ seed = util.load_file(source, quiet=True, decode=False)
+
+ # The seed generally contains non-Unicode characters. load_file puts
+ # them into a str (in python 2) or bytes (in python 3). In python 2,
+ # bad octets in a str cause util.json_dumps() to throw an exception. In
+ # python 3, bytes is a non-serializable type, and the handler load_file
+ # uses applies b64 encoding *again* to handle it. The simplest solution
+ # is to just b64encode the data and then decode it to a serializable
+ # string. Same number of bits of entropy, just with 25% more zeroes.
+ # There's no need to undo this base64-encoding when the random seed is
+ # actually used in cc_seed_random.py.
+ seed = base64.b64encode(seed).decode()
+ return seed
+
+@azure_ds_telemetry_reporter
def list_possible_azure_ds_devs():
devlist = []
if util.is_FreeBSD():
@@ -1127,6 +1299,7 @@ def list_possible_azure_ds_devs():
return devlist
+@azure_ds_telemetry_reporter
def load_azure_ds_dir(source_dir):
ovf_file = os.path.join(source_dir, "ovf-env.xml")
@@ -1149,47 +1322,62 @@ def parse_network_config(imds_metadata):
@param: imds_metadata: Dict of content read from IMDS network service.
@return: Dictionary containing network version 2 standard configuration.
"""
- if imds_metadata != sources.UNSET and imds_metadata:
- netconfig = {'version': 2, 'ethernets': {}}
- LOG.debug('Azure: generating network configuration from IMDS')
- network_metadata = imds_metadata['network']
- for idx, intf in enumerate(network_metadata['interface']):
- nicname = 'eth{idx}'.format(idx=idx)
- dev_config = {}
- for addr4 in intf['ipv4']['ipAddress']:
- privateIpv4 = addr4['privateIpAddress']
- if privateIpv4:
- if dev_config.get('dhcp4', False):
- # Append static address config for nic > 1
- netPrefix = intf['ipv4']['subnet'][0].get(
- 'prefix', '24')
+ with events.ReportEventStack(
+ name="parse_network_config",
+ description="",
+ parent=azure_ds_reporter) as evt:
+ if imds_metadata != sources.UNSET and imds_metadata:
+ netconfig = {'version': 2, 'ethernets': {}}
+ LOG.debug('Azure: generating network configuration from IMDS')
+ network_metadata = imds_metadata['network']
+ for idx, intf in enumerate(network_metadata['interface']):
+ # First IPv4 and/or IPv6 address will be obtained via DHCP.
+ # Any additional IPs of each type will be set as static
+ # addresses.
+ nicname = 'eth{idx}'.format(idx=idx)
+ dhcp_override = {'route-metric': (idx + 1) * 100}
+ dev_config = {'dhcp4': True, 'dhcp4-overrides': dhcp_override,
+ 'dhcp6': False}
+ for addr_type in ('ipv4', 'ipv6'):
+ addresses = intf.get(addr_type, {}).get('ipAddress', [])
+ if addr_type == 'ipv4':
+ default_prefix = '24'
+ else:
+ default_prefix = '128'
+ if addresses:
+ dev_config['dhcp6'] = True
+ # non-primary interfaces should have a higher
+ # route-metric (cost) so default routes prefer
+ # primary nic due to lower route-metric value
+ dev_config['dhcp6-overrides'] = dhcp_override
+ for addr in addresses[1:]:
+ # Append static address config for ip > 1
+ netPrefix = intf[addr_type]['subnet'][0].get(
+ 'prefix', default_prefix)
+ privateIp = addr['privateIpAddress']
if not dev_config.get('addresses'):
dev_config['addresses'] = []
dev_config['addresses'].append(
'{ip}/{prefix}'.format(
- ip=privateIpv4, prefix=netPrefix))
- else:
- dev_config['dhcp4'] = True
- for addr6 in intf['ipv6']['ipAddress']:
- privateIpv6 = addr6['privateIpAddress']
- if privateIpv6:
- dev_config['dhcp6'] = True
- break
- if dev_config:
- mac = ':'.join(re.findall(r'..', intf['macAddress']))
- dev_config.update(
- {'match': {'macaddress': mac.lower()},
- 'set-name': nicname})
- netconfig['ethernets'][nicname] = dev_config
- else:
- blacklist = ['mlx4_core']
- LOG.debug('Azure: generating fallback configuration')
- # generate a network config, blacklist picking mlx4_core devs
- netconfig = net.generate_fallback_config(
- blacklist_drivers=blacklist, config_driver=True)
- return netconfig
+ ip=privateIp, prefix=netPrefix))
+ if dev_config:
+ mac = ':'.join(re.findall(r'..', intf['macAddress']))
+ dev_config.update(
+ {'match': {'macaddress': mac.lower()},
+ 'set-name': nicname})
+ netconfig['ethernets'][nicname] = dev_config
+ evt.description = "network config from imds"
+ else:
+ blacklist = ['mlx4_core']
+ LOG.debug('Azure: generating fallback configuration')
+ # generate a network config, blacklist picking mlx4_core devs
+ netconfig = net.generate_fallback_config(
+ blacklist_drivers=blacklist, config_driver=True)
+ evt.description = "network config from fallback"
+ return netconfig
+@azure_ds_telemetry_reporter
def get_metadata_from_imds(fallback_nic, retries):
"""Query Azure's network metadata service, returning a dictionary.
@@ -1210,29 +1398,39 @@ def get_metadata_from_imds(fallback_nic, retries):
if net.is_up(fallback_nic):
return util.log_time(**kwargs)
else:
- with EphemeralDHCPv4(fallback_nic):
- return util.log_time(**kwargs)
+ try:
+ with EphemeralDHCPv4WithReporting(
+ azure_ds_reporter, fallback_nic):
+ return util.log_time(**kwargs)
+ except Exception as e:
+ report_diagnostic_event("exception while getting metadata: %s" % e)
+ raise
+@azure_ds_telemetry_reporter
def _get_metadata_from_imds(retries):
url = IMDS_URL + "instance?api-version=2017-12-01"
headers = {"Metadata": "true"}
try:
response = readurl(
- url, timeout=1, headers=headers, retries=retries,
- exception_cb=retry_on_url_exc)
+ url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers,
+ retries=retries, exception_cb=retry_on_url_exc)
except Exception as e:
- LOG.debug('Ignoring IMDS instance metadata: %s', e)
+ msg = 'Ignoring IMDS instance metadata: %s' % e
+ report_diagnostic_event(msg)
+ LOG.debug(msg)
return {}
try:
return util.load_json(str(response))
- except json.decoder.JSONDecodeError:
+ except json.decoder.JSONDecodeError as e:
+ report_diagnostic_event('non-json imds response' % e)
LOG.warning(
'Ignoring non-json IMDS instance metadata: %s', str(response))
return {}
+@azure_ds_telemetry_reporter
def maybe_remove_ubuntu_network_config_scripts(paths=None):
"""Remove Azure-specific ubuntu network config for non-primary nics.
@@ -1270,14 +1468,22 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None):
def _is_platform_viable(seed_dir):
- """Check platform environment to report if this datasource may run."""
- asset_tag = util.read_dmi_data('chassis-asset-tag')
- if asset_tag == AZURE_CHASSIS_ASSET_TAG:
- return True
- LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag)
- if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')):
- return True
- return False
+ with events.ReportEventStack(
+ name="check-platform-viability",
+ description="found azure asset tag",
+ parent=azure_ds_reporter) as evt:
+
+ """Check platform environment to report if this datasource may run."""
+ asset_tag = util.read_dmi_data('chassis-asset-tag')
+ if asset_tag == AZURE_CHASSIS_ASSET_TAG:
+ return True
+ msg = "Non-Azure DMI asset tag '%s' discovered." % asset_tag
+ LOG.debug(msg)
+ evt.description = msg
+ report_diagnostic_event(msg)
+ if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')):
+ return True
+ return False
class BrokenAzureDataSource(Exception):
diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py
index 2955d3f0..df88f677 100644
--- a/cloudinit/sources/DataSourceCloudSigma.py
+++ b/cloudinit/sources/DataSourceCloudSigma.py
@@ -42,12 +42,8 @@ class DataSourceCloudSigma(sources.DataSource):
if not sys_product_name:
LOG.debug("system-product-name not available in dmi data")
return False
- else:
- LOG.debug("detected hypervisor as %s", sys_product_name)
- return 'cloudsigma' in sys_product_name.lower()
-
- LOG.warning("failed to query dmi data for system product name")
- return False
+ LOG.debug("detected hypervisor as %s", sys_product_name)
+ return 'cloudsigma' in sys_product_name.lower()
def _get_data(self):
"""
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index d4b758f2..2013bed7 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -13,7 +13,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import os
-from socket import inet_ntoa
+from socket import inet_ntoa, getaddrinfo, gaierror
from struct import pack
import time
@@ -93,9 +93,9 @@ class DataSourceCloudStack(sources.DataSource):
urls = [uhelp.combine_url(self.metadata_address,
'latest/meta-data/instance-id')]
start_time = time.time()
- url = uhelp.wait_for_url(
+ url, _response = uhelp.wait_for_url(
urls=urls, max_wait=url_params.max_wait_seconds,
- timeout=url_params.timeout_seconds, status_cb=LOG.warn)
+ timeout=url_params.timeout_seconds, status_cb=LOG.warning)
if url:
LOG.debug("Using metadata source: '%s'", url)
@@ -156,6 +156,17 @@ class DataSourceCloudStack(sources.DataSource):
return self.metadata['availability-zone']
+def get_data_server():
+ # Returns the metadataserver from dns
+ try:
+ addrinfo = getaddrinfo("data-server.", 80)
+ except gaierror:
+ LOG.debug("DNS Entry data-server not found")
+ return None
+ else:
+ return addrinfo[0][4][0] # return IP
+
+
def get_default_gateway():
# Returns the default gateway ip address in the dotted format.
lines = util.load_file("/proc/net/route").splitlines()
@@ -218,7 +229,14 @@ def get_vr_address():
# If no virtual router is detected, fallback on default gateway.
# See http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/4.8/virtual_machines/user-data.html # noqa
- # Try networkd first...
+ # Try data-server DNS entry first
+ latest_address = get_data_server()
+ if latest_address:
+ LOG.debug("Found metadata server '%s' via data-server DNS entry",
+ latest_address)
+ return latest_address
+
+ # Try networkd second...
latest_address = dhcp.networkd_get_option_from_leases('SERVER_ADDRESS')
if latest_address:
LOG.debug("Found SERVER_ADDRESS '%s' via networkd_leases",
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 564e3eb3..f77923c2 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -72,15 +72,12 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
dslist = self.sys_cfg.get('datasource_list')
for dev in find_candidate_devs(dslist=dslist):
try:
- # Set mtype if freebsd and turn off sync
- if dev.startswith("/dev/cd"):
+ if util.is_FreeBSD() and dev.startswith("/dev/cd"):
mtype = "cd9660"
- sync = False
else:
mtype = None
- sync = True
results = util.mount_cb(dev, read_config_drive,
- mtype=mtype, sync=sync)
+ mtype=mtype)
found = dev
except openstack.NonReadable:
pass
@@ -166,10 +163,10 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- if self.seed_dir in self.source:
- subplatform_type = 'seed-dir'
- elif self.source.startswith('/dev'):
+ if self.source.startswith('/dev'):
subplatform_type = 'config-disk'
+ else:
+ subplatform_type = 'seed-dir'
return '%s (%s)' % (subplatform_type, self.source)
@@ -237,7 +234,7 @@ def find_candidate_devs(probe_optical=True, dslist=None):
config drive v2:
Disk should be:
- * either vfat or iso9660 formated
+ * either vfat or iso9660 formatted
* labeled with 'config-2' or 'CONFIG-2'
"""
if dslist is None:
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index b49a08db..0f2bfef4 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -10,7 +10,6 @@
import os
import time
-from subprocess import call
from cloudinit import ec2_utils as ec2
from cloudinit import log as logging
@@ -20,6 +19,7 @@ from cloudinit import sources
from cloudinit import url_helper as uhelp
from cloudinit import util
from cloudinit import warnings
+from cloudinit.event import EventType
LOG = logging.getLogger(__name__)
@@ -27,13 +27,21 @@ SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND])
STRICT_ID_PATH = ("datasource", "Ec2", "strict_id")
STRICT_ID_DEFAULT = "warn"
-DEFAULT_PRIMARY_NIC = 'eth0'
+
+API_TOKEN_ROUTE = 'latest/api/token'
+API_TOKEN_DISABLED = '_ec2_disable_api_token'
+AWS_TOKEN_TTL_SECONDS = '21600'
+AWS_TOKEN_PUT_HEADER = 'X-aws-ec2-metadata-token'
+AWS_TOKEN_REQ_HEADER = AWS_TOKEN_PUT_HEADER + '-ttl-seconds'
+AWS_TOKEN_REDACT = [AWS_TOKEN_PUT_HEADER, AWS_TOKEN_REQ_HEADER]
class CloudNames(object):
ALIYUN = "aliyun"
AWS = "aws"
BRIGHTBOX = "brightbox"
+ ZSTACK = "zstack"
+ E24CLOUD = "e24cloud"
# UNKNOWN indicates no positive id. If strict_id is 'warn' or 'false',
# then an attempt at the Ec2 Metadata service will be made.
UNKNOWN = "unknown"
@@ -45,12 +53,6 @@ class CloudNames(object):
class DataSourceEc2(sources.DataSource):
dsname = 'Ec2'
- process_name = 'dhclient'
-
- tmpps = os.popen("ps -Af").read()
- if process_name not in tmpps[:]:
- call(['/sbin/dhclient', DEFAULT_PRIMARY_NIC])
-
# Default metadata urls that will be used if none are provided
# They will be checked for 'resolveability' and some of the
# following may be discarded if they do not resolve
@@ -67,6 +69,7 @@ class DataSourceEc2(sources.DataSource):
url_max_wait = 120
url_timeout = 50
+ _api_token = None # API token for accessing the metadata service
_network_config = sources.UNSET # Used to cache calculated network cfg v1
# Whether we want to get network configuration from the metadata service.
@@ -115,6 +118,19 @@ class DataSourceEc2(sources.DataSource):
'dynamic', {}).get('instance-identity', {}).get('document', {})
return True
+ def is_classic_instance(self):
+ """Report if this instance type is Ec2 Classic (non-vpc)."""
+ if not self.metadata:
+ # Can return False on inconclusive as we are also called in
+ # network_config where metadata will be present.
+ # Secondary call site is in packaging postinst script.
+ return False
+ ifaces_md = self.metadata.get('network', {}).get('interfaces', {})
+ for _mac, mac_data in ifaces_md.get('macs', {}).items():
+ if 'vpc-id' in mac_data:
+ return False
+ return True
+
@property
def launch_index(self):
if not self.metadata:
@@ -140,11 +156,13 @@ class DataSourceEc2(sources.DataSource):
min_metadata_version.
"""
# Assumes metadata service is already up
+ url_tmpl = '{0}/{1}/meta-data/instance-id'
+ headers = self._get_headers()
for api_ver in self.extended_metadata_versions:
- url = '{0}/{1}/meta-data/instance-id'.format(
- self.metadata_address, api_ver)
+ url = url_tmpl.format(self.metadata_address, api_ver)
try:
- resp = uhelp.readurl(url=url)
+ resp = uhelp.readurl(url=url, headers=headers,
+ headers_redact=AWS_TOKEN_REDACT)
except uhelp.UrlError as e:
LOG.debug('url %s raised exception %s', url, e)
else:
@@ -164,12 +182,41 @@ class DataSourceEc2(sources.DataSource):
# setup self.identity. So we need to do that now.
api_version = self.get_metadata_api_version()
self.identity = ec2.get_instance_identity(
- api_version, self.metadata_address).get('document', {})
+ api_version, self.metadata_address,
+ headers_cb=self._get_headers,
+ headers_redact=AWS_TOKEN_REDACT,
+ exception_cb=self._refresh_stale_aws_token_cb).get(
+ 'document', {})
return self.identity.get(
'instanceId', self.metadata['instance-id'])
else:
return self.metadata['instance-id']
+ def _maybe_fetch_api_token(self, mdurls, timeout=None, max_wait=None):
+ if self.cloud_name != CloudNames.AWS:
+ return
+
+ urls = []
+ url2base = {}
+ url_path = API_TOKEN_ROUTE
+ request_method = 'PUT'
+ for url in mdurls:
+ cur = '{0}/{1}'.format(url, url_path)
+ urls.append(cur)
+ url2base[cur] = url
+
+ # use the self._status_cb to check for Read errors, which means
+ # we can't reach the API token URL, so we should disable IMDSv2
+ LOG.debug('Fetching Ec2 IMDSv2 API Token')
+ url, response = uhelp.wait_for_url(
+ urls=urls, max_wait=1, timeout=1, status_cb=self._status_cb,
+ headers_cb=self._get_headers, request_method=request_method,
+ headers_redact=AWS_TOKEN_REDACT)
+
+ if url and response:
+ self._api_token = response
+ return url2base[url]
+
def wait_for_metadata_service(self):
mcfg = self.ds_cfg
@@ -191,27 +238,40 @@ class DataSourceEc2(sources.DataSource):
LOG.warning("Empty metadata url list! using default list")
mdurls = self.metadata_urls
- urls = []
- url2base = {}
- for url in mdurls:
- cur = '{0}/{1}/meta-data/instance-id'.format(
- url, self.min_metadata_version)
- urls.append(cur)
- url2base[cur] = url
-
- start_time = time.time()
- url = uhelp.wait_for_url(
- urls=urls, max_wait=url_params.max_wait_seconds,
- timeout=url_params.timeout_seconds, status_cb=LOG.warn)
-
- if url:
- self.metadata_address = url2base[url]
+ # try the api token path first
+ metadata_address = self._maybe_fetch_api_token(mdurls)
+ if not metadata_address:
+ if self._api_token == API_TOKEN_DISABLED:
+ LOG.warning('Retrying with IMDSv1')
+ # if we can't get a token, use instance-id path
+ urls = []
+ url2base = {}
+ url_path = '{ver}/meta-data/instance-id'.format(
+ ver=self.min_metadata_version)
+ request_method = 'GET'
+ for url in mdurls:
+ cur = '{0}/{1}'.format(url, url_path)
+ urls.append(cur)
+ url2base[cur] = url
+
+ start_time = time.time()
+ url, _ = uhelp.wait_for_url(
+ urls=urls, max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds, status_cb=LOG.warning,
+ headers_redact=AWS_TOKEN_REDACT, headers_cb=self._get_headers,
+ request_method=request_method)
+
+ if url:
+ metadata_address = url2base[url]
+
+ if metadata_address:
+ self.metadata_address = metadata_address
LOG.debug("Using metadata source: '%s'", self.metadata_address)
else:
LOG.critical("Giving up on md from %s after %s seconds",
urls, int(time.time() - start_time))
- return bool(url)
+ return bool(metadata_address)
def device_name_to_device(self, name):
# Consult metadata service, that has
@@ -328,6 +388,17 @@ class DataSourceEc2(sources.DataSource):
if isinstance(net_md, dict):
result = convert_ec2_metadata_network_config(
net_md, macs_to_nics=macs_to_nics, fallback_nic=iface)
+
+ # RELEASE_BLOCKER: xenial should drop the below if statement,
+ # because the issue being addressed doesn't exist pre-netplan.
+ # (This datasource doesn't implement check_instance_id() so the
+ # datasource object is recreated every boot; this means we don't
+ # need to modify update_events on cloud-init upgrade.)
+
+ # Non-VPC (aka Classic) Ec2 instances need to rewrite the
+ # network config file every boot due to MAC address change.
+ if self.is_classic_instance():
+ self.update_events['network'].add(EventType.BOOT)
else:
LOG.warning("Metadata 'network' key not valid: %s.", net_md)
self._network_config = result
@@ -356,15 +427,27 @@ class DataSourceEc2(sources.DataSource):
if not self.wait_for_metadata_service():
return {}
api_version = self.get_metadata_api_version()
+ redact = AWS_TOKEN_REDACT
crawled_metadata = {}
+ if self.cloud_name == CloudNames.AWS:
+ exc_cb = self._refresh_stale_aws_token_cb
+ exc_cb_ud = self._skip_or_refresh_stale_aws_token_cb
+ else:
+ exc_cb = exc_cb_ud = None
try:
crawled_metadata['user-data'] = ec2.get_instance_userdata(
- api_version, self.metadata_address)
+ api_version, self.metadata_address,
+ headers_cb=self._get_headers, headers_redact=redact,
+ exception_cb=exc_cb_ud)
crawled_metadata['meta-data'] = ec2.get_instance_metadata(
- api_version, self.metadata_address)
+ api_version, self.metadata_address,
+ headers_cb=self._get_headers, headers_redact=redact,
+ exception_cb=exc_cb)
if self.cloud_name == CloudNames.AWS:
identity = ec2.get_instance_identity(
- api_version, self.metadata_address)
+ api_version, self.metadata_address,
+ headers_cb=self._get_headers, headers_redact=redact,
+ exception_cb=exc_cb)
crawled_metadata['dynamic'] = {'instance-identity': identity}
except Exception:
util.logexc(
@@ -374,6 +457,73 @@ class DataSourceEc2(sources.DataSource):
crawled_metadata['_metadata_api_version'] = api_version
return crawled_metadata
+ def _refresh_api_token(self, seconds=AWS_TOKEN_TTL_SECONDS):
+ """Request new metadata API token.
+ @param seconds: The lifetime of the token in seconds
+
+ @return: The API token or None if unavailable.
+ """
+ if self.cloud_name != CloudNames.AWS:
+ return None
+ LOG.debug("Refreshing Ec2 metadata API token")
+ request_header = {AWS_TOKEN_REQ_HEADER: seconds}
+ token_url = '{}/{}'.format(self.metadata_address, API_TOKEN_ROUTE)
+ try:
+ response = uhelp.readurl(token_url, headers=request_header,
+ headers_redact=AWS_TOKEN_REDACT,
+ request_method="PUT")
+ except uhelp.UrlError as e:
+ LOG.warning(
+ 'Unable to get API token: %s raised exception %s',
+ token_url, e)
+ return None
+ return response.contents
+
+ def _skip_or_refresh_stale_aws_token_cb(self, msg, exception):
+ """Callback will not retry on SKIP_USERDATA_CODES or if no token
+ is available."""
+ retry = ec2.skip_retry_on_codes(
+ ec2.SKIP_USERDATA_CODES, msg, exception)
+ if not retry:
+ return False # False raises exception
+ return self._refresh_stale_aws_token_cb(msg, exception)
+
+ def _refresh_stale_aws_token_cb(self, msg, exception):
+ """Exception handler for Ec2 to refresh token if token is stale."""
+ if isinstance(exception, uhelp.UrlError) and exception.code == 401:
+ # With _api_token as None, _get_headers will _refresh_api_token.
+ LOG.debug("Clearing cached Ec2 API token due to expiry")
+ self._api_token = None
+ return True # always retry
+
+ def _status_cb(self, msg, exc=None):
+ LOG.warning(msg)
+ if 'Read timed out' in msg:
+ LOG.warning('Cannot use Ec2 IMDSv2 API tokens, using IMDSv1')
+ self._api_token = API_TOKEN_DISABLED
+
+ def _get_headers(self, url=''):
+ """Return a dict of headers for accessing a url.
+
+ If _api_token is unset on AWS, attempt to refresh the token via a PUT
+ and then return the updated token header.
+ """
+ if self.cloud_name != CloudNames.AWS or (self._api_token ==
+ API_TOKEN_DISABLED):
+ return {}
+ # Request a 6 hour token if URL is API_TOKEN_ROUTE
+ request_token_header = {AWS_TOKEN_REQ_HEADER: AWS_TOKEN_TTL_SECONDS}
+ if API_TOKEN_ROUTE in url:
+ return request_token_header
+ if not self._api_token:
+ # If we don't yet have an API token, get one via a PUT against
+ # API_TOKEN_ROUTE. This _api_token may get unset by a 403 due
+ # to an invalid or expired token
+ self._api_token = self._refresh_api_token()
+ if not self._api_token:
+ return {}
+ return {AWS_TOKEN_PUT_HEADER: self._api_token}
+
class DataSourceEc2Local(DataSourceEc2):
"""Datasource run at init-local which sets up network to query metadata.
@@ -450,20 +600,31 @@ def identify_aws(data):
if (data['uuid'].startswith('ec2') and
(data['uuid_source'] == 'hypervisor' or
data['uuid'] == data['serial'])):
- return CloudNames.AWS
+ return CloudNames.AWS
return None
def identify_brightbox(data):
- if data['serial'].endswith('brightbox.com'):
+ if data['serial'].endswith('.brightbox.com'):
return CloudNames.BRIGHTBOX
+def identify_zstack(data):
+ if data['asset_tag'].endswith('.zstack.io'):
+ return CloudNames.ZSTACK
+
+
+def identify_e24cloud(data):
+ if data['vendor'] == 'e24cloud':
+ return CloudNames.E24CLOUD
+
+
def identify_platform():
# identify the platform and return an entry in CloudNames.
data = _collect_platform_data()
- checks = (identify_aws, identify_brightbox, lambda x: CloudNames.UNKNOWN)
+ checks = (identify_aws, identify_brightbox, identify_zstack,
+ identify_e24cloud, lambda x: CloudNames.UNKNOWN)
for checker in checks:
try:
result = checker(data)
@@ -481,6 +642,8 @@ def _collect_platform_data():
uuid: system-uuid from dmi or /sys/hypervisor
uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi'
serial: dmi 'system-serial-number' (/sys/.../product_serial)
+ asset_tag: 'dmidecode -s chassis-asset-tag'
+ vendor: dmi 'system-manufacturer' (/sys/.../sys_vendor)
On Ec2 instances experimentation is that product_serial is upper case,
and product_uuid is lower case. This returns lower case values for both.
@@ -503,6 +666,15 @@ def _collect_platform_data():
data['serial'] = serial.lower()
+ asset_tag = util.read_dmi_data('chassis-asset-tag')
+ if asset_tag is None:
+ asset_tag = ''
+
+ data['asset_tag'] = asset_tag.lower()
+
+ vendor = util.read_dmi_data('system-manufacturer')
+ data['vendor'] = (vendor if vendor else '').lower()
+
return data
diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py
new file mode 100644
index 00000000..d59aefd1
--- /dev/null
+++ b/cloudinit/sources/DataSourceExoscale.py
@@ -0,0 +1,268 @@
+# Author: Mathieu Corbin <mathieu.corbin@exoscale.com>
+# Author: Christopher Glass <christopher.glass@exoscale.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import ec2_utils as ec2
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import helpers
+from cloudinit import url_helper
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+METADATA_URL = "http://169.254.169.254"
+API_VERSION = "1.0"
+PASSWORD_SERVER_PORT = 8080
+
+URL_TIMEOUT = 10
+URL_RETRIES = 6
+
+EXOSCALE_DMI_NAME = "Exoscale"
+
+
+class DataSourceExoscale(sources.DataSource):
+
+ dsname = 'Exoscale'
+
+ url_max_wait = 120
+
+ def __init__(self, sys_cfg, distro, paths):
+ super(DataSourceExoscale, self).__init__(sys_cfg, distro, paths)
+ LOG.debug("Initializing the Exoscale datasource")
+
+ self.metadata_url = self.ds_cfg.get('metadata_url', METADATA_URL)
+ self.api_version = self.ds_cfg.get('api_version', API_VERSION)
+ self.password_server_port = int(
+ self.ds_cfg.get('password_server_port', PASSWORD_SERVER_PORT))
+ self.url_timeout = self.ds_cfg.get('timeout', URL_TIMEOUT)
+ self.url_retries = self.ds_cfg.get('retries', URL_RETRIES)
+ self.extra_config = {}
+
+ def activate(self, cfg, is_new_instance):
+ """Adjust set-passwords module to run 'always' during each boot"""
+ # We run the set password config module on every boot in order to
+ # enable resetting the instance's password via the exoscale console
+ # (and a subsequent instance reboot).
+ # Exoscale password server only provides set-passwords user-data if
+ # a user has triggered a password reset. So calling that password
+ # service generally results in no additional cloud-config.
+ # TODO(Create util functions for overriding merged sys_cfg module freq)
+ mod = 'set_passwords'
+ sem_path = self.paths.get_ipath_cur('sem')
+ sem_helper = helpers.FileSemaphores(sem_path)
+ if sem_helper.clear('config_' + mod, None):
+ LOG.debug('Overriding module set-passwords with frequency always')
+
+ def wait_for_metadata_service(self):
+ """Wait for the metadata service to be reachable."""
+
+ metadata_url = "{}/{}/meta-data/instance-id".format(
+ self.metadata_url, self.api_version)
+
+ url, _response = url_helper.wait_for_url(
+ urls=[metadata_url],
+ max_wait=self.url_max_wait,
+ timeout=self.url_timeout,
+ status_cb=LOG.critical)
+
+ return bool(url)
+
+ def crawl_metadata(self):
+ """
+ Crawl the metadata service when available.
+
+ @returns: Dictionary of crawled metadata content.
+ """
+ metadata_ready = util.log_time(
+ logfunc=LOG.info,
+ msg='waiting for the metadata service',
+ func=self.wait_for_metadata_service)
+
+ if not metadata_ready:
+ return {}
+
+ return read_metadata(self.metadata_url, self.api_version,
+ self.password_server_port, self.url_timeout,
+ self.url_retries)
+
+ def _get_data(self):
+ """Fetch the user data, the metadata and the VM password
+ from the metadata service.
+
+ Please refer to the datasource documentation for details on how the
+ metadata server and password server are crawled.
+ """
+ if not self._is_platform_viable():
+ return False
+
+ data = util.log_time(
+ logfunc=LOG.debug,
+ msg='Crawl of metadata service',
+ func=self.crawl_metadata)
+
+ if not data:
+ return False
+
+ self.userdata_raw = data['user-data']
+ self.metadata = data['meta-data']
+ password = data.get('password')
+
+ password_config = {}
+ if password:
+ # Since we have a password, let's make sure we are allowed to use
+ # it by allowing ssh_pwauth.
+ # The password module's default behavior is to leave the
+ # configuration as-is in this regard, so that means it will either
+ # leave the password always disabled if no password is ever set, or
+ # leave the password login enabled if we set it once.
+ password_config = {
+ 'ssh_pwauth': True,
+ 'password': password,
+ 'chpasswd': {
+ 'expire': False,
+ },
+ }
+
+ # builtin extra_config overrides password_config
+ self.extra_config = util.mergemanydict(
+ [self.extra_config, password_config])
+
+ return True
+
+ def get_config_obj(self):
+ return self.extra_config
+
+ def _is_platform_viable(self):
+ return util.read_dmi_data('system-product-name').startswith(
+ EXOSCALE_DMI_NAME)
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceExoscale, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
+
+
+def get_password(metadata_url=METADATA_URL,
+ api_version=API_VERSION,
+ password_server_port=PASSWORD_SERVER_PORT,
+ url_timeout=URL_TIMEOUT,
+ url_retries=URL_RETRIES):
+ """Obtain the VM's password if set.
+
+ Once fetched the password is marked saved. Future calls to this method may
+ return empty string or 'saved_password'."""
+ password_url = "{}:{}/{}/".format(metadata_url, password_server_port,
+ api_version)
+ response = url_helper.read_file_or_url(
+ password_url,
+ ssl_details=None,
+ headers={"DomU_Request": "send_my_password"},
+ timeout=url_timeout,
+ retries=url_retries)
+ password = response.contents.decode('utf-8')
+ # the password is empty or already saved
+ # Note: the original metadata server would answer an additional
+ # 'bad_request' status, but the Exoscale implementation does not.
+ if password in ['', 'saved_password']:
+ return None
+ # save the password
+ url_helper.read_file_or_url(
+ password_url,
+ ssl_details=None,
+ headers={"DomU_Request": "saved_password"},
+ timeout=url_timeout,
+ retries=url_retries)
+ return password
+
+
+def read_metadata(metadata_url=METADATA_URL,
+ api_version=API_VERSION,
+ password_server_port=PASSWORD_SERVER_PORT,
+ url_timeout=URL_TIMEOUT,
+ url_retries=URL_RETRIES):
+ """Query the metadata server and return the retrieved data."""
+ crawled_metadata = {}
+ crawled_metadata['_metadata_api_version'] = api_version
+ try:
+ crawled_metadata['user-data'] = ec2.get_instance_userdata(
+ api_version,
+ metadata_url,
+ timeout=url_timeout,
+ retries=url_retries)
+ crawled_metadata['meta-data'] = ec2.get_instance_metadata(
+ api_version,
+ metadata_url,
+ timeout=url_timeout,
+ retries=url_retries)
+ except Exception as e:
+ util.logexc(LOG, "failed reading from metadata url %s (%s)",
+ metadata_url, e)
+ return {}
+
+ try:
+ crawled_metadata['password'] = get_password(
+ api_version=api_version,
+ metadata_url=metadata_url,
+ password_server_port=password_server_port,
+ url_retries=url_retries,
+ url_timeout=url_timeout)
+ except Exception as e:
+ util.logexc(LOG, "failed to read from password server url %s:%s (%s)",
+ metadata_url, password_server_port, e)
+
+ return crawled_metadata
+
+
+if __name__ == "__main__":
+ import argparse
+
+ parser = argparse.ArgumentParser(description='Query Exoscale Metadata')
+ parser.add_argument(
+ "--endpoint",
+ metavar="URL",
+ help="The url of the metadata service.",
+ default=METADATA_URL)
+ parser.add_argument(
+ "--version",
+ metavar="VERSION",
+ help="The version of the metadata endpoint to query.",
+ default=API_VERSION)
+ parser.add_argument(
+ "--retries",
+ metavar="NUM",
+ type=int,
+ help="The number of retries querying the endpoint.",
+ default=URL_RETRIES)
+ parser.add_argument(
+ "--timeout",
+ metavar="NUM",
+ type=int,
+ help="The time in seconds to wait before timing out.",
+ default=URL_TIMEOUT)
+ parser.add_argument(
+ "--password-port",
+ metavar="PORT",
+ type=int,
+ help="The port on which the password endpoint listens",
+ default=PASSWORD_SERVER_PORT)
+
+ args = parser.parse_args()
+
+ data = read_metadata(
+ metadata_url=args.endpoint,
+ api_version=args.version,
+ password_server_port=args.password_port,
+ url_timeout=args.timeout,
+ url_retries=args.retries)
+
+ print(util.json_dumps(data))
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index f72d9836..6cbfbbac 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -2,10 +2,8 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-import os
import datetime
import json
-from subprocess import call
from base64 import b64decode
@@ -20,11 +18,13 @@ LOG = logging.getLogger(__name__)
MD_V1_URL = 'http://metadata.google.internal/computeMetadata/v1/'
BUILTIN_DS_CONFIG = {'metadata_url': MD_V1_URL}
REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname')
-DEFAULT_PRIMARY_NIC = 'eth0'
+GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/'
+ 'v1/instance/guest-attributes')
+HOSTKEY_NAMESPACE = 'hostkeys'
+HEADERS = {'Metadata-Flavor': 'Google'}
class GoogleMetadataFetcher(object):
- headers = {'Metadata-Flavor': 'Google'}
def __init__(self, metadata_address):
self.metadata_address = metadata_address
@@ -35,7 +35,7 @@ class GoogleMetadataFetcher(object):
url = self.metadata_address + path
if is_recursive:
url += '/?recursive=True'
- resp = url_helper.readurl(url=url, headers=self.headers)
+ resp = url_helper.readurl(url=url, headers=HEADERS)
except url_helper.UrlError as exc:
msg = "url %s raised exception %s"
LOG.debug(msg, path, exc)
@@ -53,11 +53,6 @@ class GoogleMetadataFetcher(object):
class DataSourceGCE(sources.DataSource):
dsname = 'GCE'
- process_name = 'dhclient'
-
- tmpps = os.popen("ps -Af").read()
- if process_name not in tmpps[:]:
- call(['/sbin/dhclient', DEFAULT_PRIMARY_NIC])
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -98,6 +93,10 @@ class DataSourceGCE(sources.DataSource):
public_keys_data = self.metadata['public-keys-data']
return _parse_public_keys(public_keys_data, self.default_user)
+ def publish_host_keys(self, hostkeys):
+ for key in hostkeys:
+ _write_host_key_to_guest_attributes(*key)
+
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
# GCE has long FDQN's and has asked for short hostnames.
return self.metadata['local-hostname'].split('.')[0]
@@ -111,6 +110,17 @@ class DataSourceGCE(sources.DataSource):
return self.availability_zone.rsplit('-', 1)[0]
+def _write_host_key_to_guest_attributes(key_type, key_value):
+ url = '%s/%s/%s' % (GUEST_ATTRIBUTES_URL, HOSTKEY_NAMESPACE, key_type)
+ key_value = key_value.encode('utf-8')
+ resp = url_helper.readurl(url=url, data=key_value, headers=HEADERS,
+ request_method='PUT', check_status=False)
+ if resp.ok():
+ LOG.debug('Wrote %s host key to guest attributes.', key_type)
+ else:
+ LOG.debug('Unable to write %s host key to guest attributes.', key_type)
+
+
def _has_expired(public_key):
# Check whether an SSH key is expired. Public key input is a single SSH
# public key in the GCE specific key format documented here:
diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py
index 5c75b65b..50298330 100644
--- a/cloudinit/sources/DataSourceHetzner.py
+++ b/cloudinit/sources/DataSourceHetzner.py
@@ -28,6 +28,9 @@ MD_WAIT_RETRY = 2
class DataSourceHetzner(sources.DataSource):
+
+ dsname = 'Hetzner'
+
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.distro = distro
diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py
index 21e6ae6b..e0c714e8 100644
--- a/cloudinit/sources/DataSourceIBMCloud.py
+++ b/cloudinit/sources/DataSourceIBMCloud.py
@@ -83,7 +83,7 @@ creates 6 boot scenarios.
There is no information available to identify this scenario.
- The user will be able to ssh in as as root with their public keys that
+ The user will be able to SSH in as as root with their public keys that
have been installed into /root/ssh/.authorized_keys
during the provisioning stage.
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 61aa6d7e..517913aa 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -136,7 +136,7 @@ class DataSourceMAAS(sources.DataSource):
url = url[:-1]
check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION)
urls = [check_url]
- url = self.oauth_helper.wait_for_url(
+ url, _response = self.oauth_helper.wait_for_url(
urls=urls, max_wait=max_wait, timeout=timeout)
if url:
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index 6860f0cc..ee748b41 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -35,6 +35,26 @@ class DataSourceNoCloud(sources.DataSource):
root = sources.DataSource.__str__(self)
return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode)
+ def _get_devices(self, label):
+ if util.is_FreeBSD():
+ devlist = [
+ p for p in ['/dev/msdosfs/' + label, '/dev/iso9660/' + label]
+ if os.path.exists(p)]
+ else:
+ # Query optical drive to get it in blkid cache for 2.6 kernels
+ util.find_devs_with(path="/dev/sr0")
+ util.find_devs_with(path="/dev/sr1")
+
+ fslist = util.find_devs_with("TYPE=vfat")
+ fslist.extend(util.find_devs_with("TYPE=iso9660"))
+
+ label_list = util.find_devs_with("LABEL=%s" % label.upper())
+ label_list.extend(util.find_devs_with("LABEL=%s" % label.lower()))
+
+ devlist = list(set(fslist) & set(label_list))
+ devlist.sort(reverse=True)
+ return devlist
+
def _get_data(self):
defaults = {
"instance-id": "nocloud",
@@ -99,18 +119,7 @@ class DataSourceNoCloud(sources.DataSource):
label = self.ds_cfg.get('fs_label', "cidata")
if label is not None:
- # Query optical drive to get it in blkid cache for 2.6 kernels
- util.find_devs_with(path="/dev/sr0")
- util.find_devs_with(path="/dev/sr1")
-
- fslist = util.find_devs_with("TYPE=vfat")
- fslist.extend(util.find_devs_with("TYPE=iso9660"))
-
- label_list = util.find_devs_with("LABEL=%s" % label)
- devlist = list(set(fslist) & set(label_list))
- devlist.sort(reverse=True)
-
- for dev in devlist:
+ for dev in self._get_devices(label):
try:
LOG.debug("Attempting to use data from %s", dev)
@@ -118,9 +127,8 @@ class DataSourceNoCloud(sources.DataSource):
seeded = util.mount_cb(dev, _pp2d_callback,
pp2d_kwargs)
except ValueError:
- if dev in label_list:
- LOG.warning("device %s with label=%s not a"
- "valid seed.", dev, label)
+ LOG.warning("device %s with label=%s not a "
+ "valid seed.", dev, label)
continue
mydata = _merge_new_seed(mydata, seeded)
@@ -258,7 +266,7 @@ def load_cmdline_data(fill, cmdline=None):
("ds=nocloud-net", sources.DSMODE_NETWORK)]
for idstr, dsmode in pairs:
if parse_cmdline_data(idstr, fill, cmdline):
- # if dsmode was explicitly in the commanad line, then
+ # if dsmode was explicitly in the command line, then
# prefer it to the dsmode based on the command line id
if 'dsmode' not in fill:
fill['dsmode'] = dsmode
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index 2c40cf97..7f55b5f8 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -8,17 +8,15 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from xml.dom import minidom
-
import base64
import os
import re
import time
+from xml.dom import minidom
from cloudinit import log as logging
from cloudinit import sources
from cloudinit import util
-
from cloudinit.sources.helpers.vmware.imc.config \
import Config
from cloudinit.sources.helpers.vmware.imc.config_custom_script \
@@ -38,11 +36,15 @@ from cloudinit.sources.helpers.vmware.imc.guestcust_state \
from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
enable_nics,
get_nics_to_enable,
- set_customization_status
+ set_customization_status,
+ get_tools_config
)
LOG = logging.getLogger(__name__)
+CONFGROUPNAME_GUESTCUSTOMIZATION = "deployPkg"
+GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS = "enable-custom-scripts"
+
class DataSourceOVF(sources.DataSource):
@@ -103,8 +105,7 @@ class DataSourceOVF(sources.DataSource):
plugin = "libdeployPkgPlugin.so"
deployPkgPluginPath = None
for path in search_paths:
- # Ignore deployPkgPluginPath for now.
- #deployPkgPluginPath = search_file(path, plugin)
+ deployPkgPluginPath = search_file(path, plugin)
if deployPkgPluginPath:
LOG.debug("Found the customization plugin at %s",
deployPkgPluginPath)
@@ -147,6 +148,24 @@ class DataSourceOVF(sources.DataSource):
product_marker, os.path.join(self.paths.cloud_dir, 'data'))
special_customization = product_marker and not hasmarkerfile
customscript = self._vmware_cust_conf.custom_script_name
+ custScriptConfig = get_tools_config(
+ CONFGROUPNAME_GUESTCUSTOMIZATION,
+ GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS,
+ "false")
+ if custScriptConfig.lower() != "true":
+ # Update the customization status if there is a
+ # custom script is disabled
+ if special_customization and customscript:
+ msg = "Custom script is disabled by VM Administrator"
+ LOG.debug(msg)
+ set_customization_status(
+ GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
+ GuestCustErrorEnum.GUESTCUST_ERROR_SCRIPT_DISABLED)
+ raise RuntimeError(msg)
+
+ ccScriptsDir = os.path.join(
+ self.paths.get_cpath("scripts"),
+ "per-instance")
except Exception as e:
_raise_error_status(
"Error parsing the customization Config File",
@@ -200,7 +219,9 @@ class DataSourceOVF(sources.DataSource):
if customscript:
try:
- postcust = PostCustomScript(customscript, imcdirpath)
+ postcust = PostCustomScript(customscript,
+ imcdirpath,
+ ccScriptsDir)
postcust.execute()
except Exception as e:
_raise_error_status(
@@ -381,9 +402,7 @@ def read_vmware_imc(config):
if config.timezone:
cfg['timezone'] = config.timezone
- # Generate a unique instance-id so that re-customization will
- # happen in cloud-init
- md['instance-id'] = "iid-vmware-" + util.rand_str(strlen=8)
+ md['instance-id'] = "iid-vmware-imc"
return (md, ud, cfg)
@@ -436,7 +455,7 @@ def maybe_cdrom_device(devname):
"""
if not devname:
return False
- elif not isinstance(devname, util.string_types):
+ elif not isinstance(devname, str):
raise ValueError("Unexpected input for devname: %s" % devname)
# resolve '..' and multi '/' elements
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index e62e9729..02c9a7b8 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -337,7 +337,9 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
(output, _error) = util.subp(cmd, data=bcmd)
# exclude vars in bash that change on their own or that we used
- excluded = ("RANDOM", "LINENO", "SECONDS", "_", "__v")
+ excluded = (
+ "EPOCHREALTIME", "EPOCHSECONDS", "RANDOM", "LINENO", "SECONDS", "_",
+ "__v")
preset = {}
ret = {}
target = None
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index 4a015240..7a5e71b6 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -76,7 +76,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
url_params = self.get_url_params()
start_time = time.time()
- avail_url = url_helper.wait_for_url(
+ avail_url, _response = url_helper.wait_for_url(
urls=md_urls, max_wait=url_params.max_wait_seconds,
timeout=url_params.timeout_seconds)
if avail_url:
diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py
index 70b9c58a..eec87403 100644
--- a/cloudinit/sources/DataSourceOracle.py
+++ b/cloudinit/sources/DataSourceOracle.py
@@ -16,7 +16,7 @@ Notes:
"""
from cloudinit.url_helper import combine_url, readurl, UrlError
-from cloudinit.net import dhcp
+from cloudinit.net import dhcp, get_interfaces_by_mac, is_netfail_master
from cloudinit import net
from cloudinit import sources
from cloudinit import util
@@ -28,8 +28,134 @@ import re
LOG = logging.getLogger(__name__)
+BUILTIN_DS_CONFIG = {
+ # Don't use IMDS to configure secondary NICs by default
+ 'configure_secondary_nics': False,
+}
CHASSIS_ASSET_TAG = "OracleCloud.com"
METADATA_ENDPOINT = "http://169.254.169.254/openstack/"
+VNIC_METADATA_URL = 'http://169.254.169.254/opc/v1/vnics/'
+# https://docs.cloud.oracle.com/iaas/Content/Network/Troubleshoot/connectionhang.htm#Overview,
+# indicates that an MTU of 9000 is used within OCI
+MTU = 9000
+
+
+def _add_network_config_from_opc_imds(network_config):
+ """
+ Fetch data from Oracle's IMDS, generate secondary NIC config, merge it.
+
+ The primary NIC configuration should not be modified based on the IMDS
+ values, as it should continue to be configured for DHCP. As such, this
+ takes an existing network_config dict which is expected to have the primary
+ NIC configuration already present. It will mutate the given dict to
+ include the secondary VNICs.
+
+ :param network_config:
+ A v1 or v2 network config dict with the primary NIC already configured.
+ This dict will be mutated.
+
+ :raises:
+ Exceptions are not handled within this function. Likely exceptions are
+ those raised by url_helper.readurl (if communicating with the IMDS
+ fails), ValueError/JSONDecodeError (if the IMDS returns invalid JSON),
+ and KeyError/IndexError (if the IMDS returns valid JSON with unexpected
+ contents).
+ """
+ resp = readurl(VNIC_METADATA_URL)
+ vnics = json.loads(str(resp))
+
+ if 'nicIndex' in vnics[0]:
+ # TODO: Once configure_secondary_nics defaults to True, lower the level
+ # of this log message. (Currently, if we're running this code at all,
+ # someone has explicitly opted-in to secondary VNIC configuration, so
+ # we should warn them that it didn't happen. Once it's default, this
+ # would be emitted on every Bare Metal Machine launch, which means INFO
+ # or DEBUG would be more appropriate.)
+ LOG.warning(
+ 'VNIC metadata indicates this is a bare metal machine; skipping'
+ ' secondary VNIC configuration.'
+ )
+ return
+
+ interfaces_by_mac = get_interfaces_by_mac()
+
+ for vnic_dict in vnics[1:]:
+ # We skip the first entry in the response because the primary interface
+ # is already configured by iSCSI boot; applying configuration from the
+ # IMDS is not required.
+ mac_address = vnic_dict['macAddr'].lower()
+ if mac_address not in interfaces_by_mac:
+ LOG.debug('Interface with MAC %s not found; skipping', mac_address)
+ continue
+ name = interfaces_by_mac[mac_address]
+
+ if network_config['version'] == 1:
+ subnet = {
+ 'type': 'static',
+ 'address': vnic_dict['privateIp'],
+ }
+ network_config['config'].append({
+ 'name': name,
+ 'type': 'physical',
+ 'mac_address': mac_address,
+ 'mtu': MTU,
+ 'subnets': [subnet],
+ })
+ elif network_config['version'] == 2:
+ network_config['ethernets'][name] = {
+ 'addresses': [vnic_dict['privateIp']],
+ 'mtu': MTU, 'dhcp4': False, 'dhcp6': False,
+ 'match': {'macaddress': mac_address}}
+
+
+def _ensure_netfailover_safe(network_config):
+ """
+ Search network config physical interfaces to see if any of them are
+ a netfailover master. If found, we prevent matching by MAC as the other
+ failover devices have the same MAC but need to be ignored.
+
+ Note: we rely on cloudinit.net changes which prevent netfailover devices
+ from being present in the provided network config. For more details about
+ netfailover devices, refer to cloudinit.net module.
+
+ :param network_config
+ A v1 or v2 network config dict with the primary NIC, and possibly
+ secondary nic configured. This dict will be mutated.
+
+ """
+ # ignore anything that's not an actual network-config
+ if 'version' not in network_config:
+ return
+
+ if network_config['version'] not in [1, 2]:
+ LOG.debug('Ignoring unknown network config version: %s',
+ network_config['version'])
+ return
+
+ mac_to_name = get_interfaces_by_mac()
+ if network_config['version'] == 1:
+ for cfg in [c for c in network_config['config'] if 'type' in c]:
+ if cfg['type'] == 'physical':
+ if 'mac_address' in cfg:
+ mac = cfg['mac_address']
+ cur_name = mac_to_name.get(mac)
+ if not cur_name:
+ continue
+ elif is_netfail_master(cur_name):
+ del cfg['mac_address']
+
+ elif network_config['version'] == 2:
+ for _, cfg in network_config.get('ethernets', {}).items():
+ if 'match' in cfg:
+ macaddr = cfg.get('match', {}).get('macaddress')
+ if macaddr:
+ cur_name = mac_to_name.get(macaddr)
+ if not cur_name:
+ continue
+ elif is_netfail_master(cur_name):
+ del cfg['match']['macaddress']
+ del cfg['set-name']
+ cfg['match']['name'] = cur_name
class DataSourceOracle(sources.DataSource):
@@ -37,8 +163,22 @@ class DataSourceOracle(sources.DataSource):
dsname = 'Oracle'
system_uuid = None
vendordata_pure = None
+ network_config_sources = (
+ sources.NetworkConfigSource.cmdline,
+ sources.NetworkConfigSource.ds,
+ sources.NetworkConfigSource.initramfs,
+ sources.NetworkConfigSource.system_cfg,
+ )
+
_network_config = sources.UNSET
+ def __init__(self, sys_cfg, *args, **kwargs):
+ super(DataSourceOracle, self).__init__(sys_cfg, *args, **kwargs)
+
+ self.ds_cfg = util.mergemanydict([
+ util.get_cfg_by_path(sys_cfg, ['datasource', self.dsname], {}),
+ BUILTIN_DS_CONFIG])
+
def _is_platform_viable(self):
"""Check platform environment to report if this datasource may run."""
return _is_platform_viable()
@@ -48,7 +188,7 @@ class DataSourceOracle(sources.DataSource):
return False
# network may be configured if iscsi root. If that is the case
- # then read_kernel_cmdline_config will return non-None.
+ # then read_initramfs_config will return non-None.
if _is_iscsi_root():
data = self.crawl_metadata()
else:
@@ -118,11 +258,27 @@ class DataSourceOracle(sources.DataSource):
We nonetheless return cmdline provided config if present
and fallback to generate fallback."""
if self._network_config == sources.UNSET:
- cmdline_cfg = cmdline.read_kernel_cmdline_config()
- if cmdline_cfg:
- self._network_config = cmdline_cfg
- else:
+ # this is v1
+ self._network_config = cmdline.read_initramfs_config()
+
+ if not self._network_config:
+ # this is now v2
self._network_config = self.distro.generate_fallback_config()
+
+ if self.ds_cfg.get('configure_secondary_nics'):
+ try:
+ # Mutate self._network_config to include secondary VNICs
+ _add_network_config_from_opc_imds(self._network_config)
+ except Exception:
+ util.logexc(
+ LOG,
+ "Failed to fetch secondary network configuration!")
+
+ # we need to verify that the nic selected is not a netfail over
+ # device and, if it is a netfail master, then we need to avoid
+ # emitting any match by mac
+ _ensure_netfailover_safe(self._network_config)
+
return self._network_config
@@ -137,7 +293,7 @@ def _is_platform_viable():
def _is_iscsi_root():
- return bool(cmdline.read_kernel_cmdline_config())
+ return bool(cmdline.read_initramfs_config())
def _load_index(content):
diff --git a/cloudinit/sources/DataSourceRbxCloud.py b/cloudinit/sources/DataSourceRbxCloud.py
new file mode 100644
index 00000000..c3cd5c79
--- /dev/null
+++ b/cloudinit/sources/DataSourceRbxCloud.py
@@ -0,0 +1,251 @@
+# Copyright (C) 2018 Warsaw Data Center
+#
+# Author: Malwina Leis <m.leis@rootbox.com>
+# Author: Grzegorz Brzeski <gregory@rootbox.io>
+# Author: Adam Dobrawy <a.dobrawy@hyperone.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+"""
+This file contains code used to gather the user data passed to an
+instance on rootbox / hyperone cloud platforms
+"""
+import errno
+import os
+import os.path
+
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import util
+from cloudinit.event import EventType
+
+LOG = logging.getLogger(__name__)
+ETC_HOSTS = '/etc/hosts'
+
+
+def get_manage_etc_hosts():
+ hosts = util.load_file(ETC_HOSTS, quiet=True)
+ if hosts:
+ LOG.debug('/etc/hosts exists - setting manage_etc_hosts to False')
+ return False
+ LOG.debug('/etc/hosts does not exists - setting manage_etc_hosts to True')
+ return True
+
+
+def ip2int(addr):
+ parts = addr.split('.')
+ return (int(parts[0]) << 24) + (int(parts[1]) << 16) + \
+ (int(parts[2]) << 8) + int(parts[3])
+
+
+def int2ip(addr):
+ return '.'.join([str(addr >> (i << 3) & 0xFF) for i in range(4)[::-1]])
+
+
+def _sub_arp(cmd):
+ """
+ Uses the prefered cloud-init subprocess def of util.subp
+ and runs arping. Breaking this to a separate function
+ for later use in mocking and unittests
+ """
+ return util.subp(['arping'] + cmd)
+
+
+def gratuitous_arp(items, distro):
+ source_param = '-S'
+ if distro.name in ['fedora', 'centos', 'rhel']:
+ source_param = '-s'
+ for item in items:
+ _sub_arp([
+ '-c', '2',
+ source_param, item['source'],
+ item['destination']
+ ])
+
+
+def get_md():
+ rbx_data = None
+ devices = [
+ dev
+ for dev, bdata in util.blkid().items()
+ if bdata.get('LABEL', '').upper() == 'CLOUDMD'
+ ]
+ for device in devices:
+ try:
+ rbx_data = util.mount_cb(
+ device=device,
+ callback=read_user_data_callback,
+ mtype=['vfat', 'fat']
+ )
+ if rbx_data:
+ break
+ except OSError as err:
+ if err.errno != errno.ENOENT:
+ raise
+ except util.MountFailedError:
+ util.logexc(LOG, "Failed to mount %s when looking for user "
+ "data", device)
+ if not rbx_data:
+ util.logexc(LOG, "Failed to load metadata and userdata")
+ return False
+ return rbx_data
+
+
+def generate_network_config(netadps):
+ """Generate network configuration
+
+ @param netadps: A list of network adapter settings
+
+ @returns: A dict containing network config
+ """
+ return {
+ 'version': 1,
+ 'config': [
+ {
+ 'type': 'physical',
+ 'name': 'eth{}'.format(str(i)),
+ 'mac_address': netadp['macaddress'].lower(),
+ 'subnets': [
+ {
+ 'type': 'static',
+ 'address': ip['address'],
+ 'netmask': netadp['network']['netmask'],
+ 'control': 'auto',
+ 'gateway': netadp['network']['gateway'],
+ 'dns_nameservers': netadp['network']['dns'][
+ 'nameservers']
+ } for ip in netadp['ip']
+ ],
+ } for i, netadp in enumerate(netadps)
+ ]
+ }
+
+
+def read_user_data_callback(mount_dir):
+ """This callback will be applied by util.mount_cb() on the mounted
+ drive.
+
+ @param mount_dir: String representing path of directory where mounted drive
+ is available
+
+ @returns: A dict containing userdata, metadata and cfg based on metadata.
+ """
+ meta_data = util.load_json(
+ text=util.load_file(
+ fname=os.path.join(mount_dir, 'cloud.json'),
+ decode=False
+ )
+ )
+ user_data = util.load_file(
+ fname=os.path.join(mount_dir, 'user.data'),
+ quiet=True
+ )
+ if 'vm' not in meta_data or 'netadp' not in meta_data:
+ util.logexc(LOG, "Failed to load metadata. Invalid format.")
+ return None
+ username = meta_data.get('additionalMetadata', {}).get('username')
+ ssh_keys = meta_data.get('additionalMetadata', {}).get('sshKeys', [])
+
+ hash = None
+ if meta_data.get('additionalMetadata', {}).get('password'):
+ hash = meta_data['additionalMetadata']['password']['sha512']
+
+ network = generate_network_config(meta_data['netadp'])
+
+ data = {
+ 'userdata': user_data,
+ 'metadata': {
+ 'instance-id': meta_data['vm']['_id'],
+ 'local-hostname': meta_data['vm']['name'],
+ 'public-keys': []
+ },
+ 'gratuitous_arp': [
+ {
+ "source": ip["address"],
+ "destination": target
+ }
+ for netadp in meta_data['netadp']
+ for ip in netadp['ip']
+ for target in [
+ netadp['network']["gateway"],
+ int2ip(ip2int(netadp['network']["gateway"]) + 2),
+ int2ip(ip2int(netadp['network']["gateway"]) + 3)
+ ]
+ ],
+ 'cfg': {
+ 'ssh_pwauth': True,
+ 'disable_root': True,
+ 'system_info': {
+ 'default_user': {
+ 'name': username,
+ 'gecos': username,
+ 'sudo': ['ALL=(ALL) NOPASSWD:ALL'],
+ 'passwd': hash,
+ 'lock_passwd': False,
+ 'ssh_authorized_keys': ssh_keys,
+ 'shell': '/bin/bash'
+ }
+ },
+ 'network_config': network,
+ 'manage_etc_hosts': get_manage_etc_hosts(),
+ },
+ }
+
+ LOG.debug('returning DATA object:')
+ LOG.debug(data)
+
+ return data
+
+
+class DataSourceRbxCloud(sources.DataSource):
+ dsname = "RbxCloud"
+ update_events = {'network': [
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT
+ ]}
+
+ def __init__(self, sys_cfg, distro, paths):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.seed = None
+
+ def __str__(self):
+ root = sources.DataSource.__str__(self)
+ return "%s [seed=%s]" % (root, self.seed)
+
+ def _get_data(self):
+ """
+ Metadata is passed to the launching instance which
+ is used to perform instance configuration.
+ """
+ rbx_data = get_md()
+ self.userdata_raw = rbx_data['userdata']
+ self.metadata = rbx_data['metadata']
+ self.gratuitous_arp = rbx_data['gratuitous_arp']
+ self.cfg = rbx_data['cfg']
+ return True
+
+ @property
+ def network_config(self):
+ return self.cfg['network_config']
+
+ def get_public_ssh_keys(self):
+ return self.metadata['public-keys']
+
+ def get_userdata_raw(self):
+ return self.userdata_raw
+
+ def get_config_obj(self):
+ return self.cfg
+
+ def activate(self, cfg, is_new_instance):
+ gratuitous_arp(self.gratuitous_arp, self.distro)
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceRbxCloud, (sources.DEP_FILESYSTEM,)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py
index 9dc4ab23..83c2bf65 100644
--- a/cloudinit/sources/DataSourceScaleway.py
+++ b/cloudinit/sources/DataSourceScaleway.py
@@ -188,7 +188,7 @@ class DataSourceScaleway(sources.DataSource):
self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES))
self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT))
self._fallback_interface = None
- self._network_config = None
+ self._network_config = sources.UNSET
def _crawl_metadata(self):
resp = url_helper.readurl(self.metadata_address,
@@ -227,7 +227,12 @@ class DataSourceScaleway(sources.DataSource):
Configure networking according to data received from the
metadata API.
"""
- if self._network_config:
+ if self._network_config is None:
+ LOG.warning('Found None as cached _network_config. '
+ 'Resetting to %s', sources.UNSET)
+ self._network_config = sources.UNSET
+
+ if self._network_config != sources.UNSET:
return self._network_config
if self._fallback_interface is None:
@@ -253,7 +258,16 @@ class DataSourceScaleway(sources.DataSource):
return self.metadata['id']
def get_public_ssh_keys(self):
- return [key['key'] for key in self.metadata['ssh_public_keys']]
+ ssh_keys = [key['key'] for key in self.metadata['ssh_public_keys']]
+
+ akeypre = "AUTHORIZED_KEY="
+ plen = len(akeypre)
+ for tag in self.metadata.get('tags', []):
+ if not tag.startswith(akeypre):
+ continue
+ ssh_keys.append(tag[:plen].replace("_", " "))
+
+ return ssh_keys
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
return self.metadata['hostname']
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 32b57cdd..cf676504 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -1,5 +1,5 @@
# Copyright (C) 2013 Canonical Ltd.
-# Copyright (c) 2018, Joyent, Inc.
+# Copyright 2019 Joyent, Inc.
#
# Author: Ben Howard <ben.howard@canonical.com>
#
@@ -34,6 +34,7 @@ from cloudinit import log as logging
from cloudinit import serial
from cloudinit import sources
from cloudinit import util
+from cloudinit.event import EventType
LOG = logging.getLogger(__name__)
@@ -178,6 +179,7 @@ class DataSourceSmartOS(sources.DataSource):
self.metadata = {}
self.network_data = None
self._network_config = None
+ self.update_events['network'].add(EventType.BOOT)
self.script_base_d = os.path.join(self.paths.get_cpath("scripts"))
@@ -319,6 +321,10 @@ class DataSourceSmartOS(sources.DataSource):
@property
def network_config(self):
+ # sources.clear_cached_data() may set _network_config to '_unset'.
+ if self._network_config == sources.UNSET:
+ self._network_config = None
+
if self._network_config is None:
if self.network_data is not None:
self._network_config = (
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index e6966b31..dd93cfd8 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -9,21 +9,19 @@
# This file is part of cloud-init. See LICENSE file for license information.
import abc
-from collections import namedtuple
import copy
import json
import os
-import six
+from collections import namedtuple
-from cloudinit.atomic_helper import write_json
from cloudinit import importer
from cloudinit import log as logging
from cloudinit import net
-from cloudinit.event import EventType
from cloudinit import type_utils
from cloudinit import user_data as ud
from cloudinit import util
-
+from cloudinit.atomic_helper import write_json
+from cloudinit.event import EventType
from cloudinit.filters import launch_index
from cloudinit.reporting import events
@@ -66,6 +64,13 @@ CLOUD_ID_REGION_PREFIX_MAP = {
'china': ('azure-china', lambda c: c == 'azure'), # only change azure
}
+# NetworkConfigSource represents the canonical list of network config sources
+# that cloud-init knows about. (Python 2.7 lacks PEP 435, so use a singleton
+# namedtuple as an enum; see https://stackoverflow.com/a/6971002)
+_NETCFG_SOURCE_NAMES = ('cmdline', 'ds', 'system_cfg', 'fallback', 'initramfs')
+NetworkConfigSource = namedtuple('NetworkConfigSource',
+ _NETCFG_SOURCE_NAMES)(*_NETCFG_SOURCE_NAMES)
+
class DataSourceNotFoundException(Exception):
pass
@@ -129,8 +134,7 @@ URLParams = namedtuple(
'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries'])
-@six.add_metaclass(abc.ABCMeta)
-class DataSource(object):
+class DataSource(metaclass=abc.ABCMeta):
dsmode = DSMODE_NETWORK
default_locale = 'en_US.UTF-8'
@@ -153,6 +157,16 @@ class DataSource(object):
# Track the discovered fallback nic for use in configuration generation.
_fallback_interface = None
+ # The network configuration sources that should be considered for this data
+ # source. (The first source in this list that provides network
+ # configuration will be used without considering any that follow.) This
+ # should always be a subset of the members of NetworkConfigSource with no
+ # duplicate entries.
+ network_config_sources = (NetworkConfigSource.cmdline,
+ NetworkConfigSource.initramfs,
+ NetworkConfigSource.system_cfg,
+ NetworkConfigSource.ds)
+
# read_url_params
url_max_wait = -1 # max_wait < 0 means do not wait
url_timeout = 10 # timeout for each metadata url read attempt
@@ -419,7 +433,7 @@ class DataSource(object):
return self._cloud_name
if self.metadata and self.metadata.get(METADATA_CLOUD_NAME_KEY):
cloud_name = self.metadata.get(METADATA_CLOUD_NAME_KEY)
- if isinstance(cloud_name, six.string_types):
+ if isinstance(cloud_name, str):
self._cloud_name = cloud_name.lower()
else:
self._cloud_name = self._get_cloud_name().lower()
@@ -474,6 +488,16 @@ class DataSource(object):
def get_public_ssh_keys(self):
return normalize_pubkey_data(self.metadata.get('public-keys'))
+ def publish_host_keys(self, hostkeys):
+ """Publish the public SSH host keys (found in /etc/ssh/*.pub).
+
+ @param hostkeys: List of host key tuples (key_type, key_value),
+ where key_type is the first field in the public key file
+ (e.g. 'ssh-rsa') and key_value is the key itself
+ (e.g. 'AAAAB3NzaC1y...').
+ """
+ pass
+
def _remap_device(self, short_name):
# LP: #611137
# the metadata service may believe that devices are named 'sda'
@@ -541,7 +565,7 @@ class DataSource(object):
defhost = "localhost"
domain = defdomain
- if not self.metadata or 'local-hostname' not in self.metadata:
+ if not self.metadata or not self.metadata.get('local-hostname'):
if metadata_only:
return None
# this is somewhat questionable really.
@@ -691,8 +715,8 @@ def normalize_pubkey_data(pubkey_data):
if not pubkey_data:
return keys
- if isinstance(pubkey_data, six.string_types):
- return str(pubkey_data).splitlines()
+ if isinstance(pubkey_data, str):
+ return pubkey_data.splitlines()
if isinstance(pubkey_data, (list, set)):
return list(pubkey_data)
@@ -702,7 +726,7 @@ def normalize_pubkey_data(pubkey_data):
# lp:506332 uec metadata service responds with
# data that makes boto populate a string for 'klist' rather
# than a list.
- if isinstance(klist, six.string_types):
+ if isinstance(klist, str):
klist = [klist]
if isinstance(klist, (list, set)):
for pkey in klist:
@@ -810,7 +834,7 @@ def convert_vendordata(data, recurse=True):
"""
if not data:
return None
- if isinstance(data, six.string_types):
+ if isinstance(data, str):
return data
if isinstance(data, list):
return copy.deepcopy(data)
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index e5696b1f..fc760581 100644..100755
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -7,6 +7,7 @@ import re
import socket
import struct
import time
+import textwrap
from cloudinit.net import dhcp
from cloudinit import stages
@@ -16,9 +17,162 @@ from xml.etree import ElementTree
from cloudinit import url_helper
from cloudinit import util
+from cloudinit import version
+from cloudinit import distros
+from cloudinit.reporting import events
+from cloudinit.net.dhcp import EphemeralDHCPv4
+from datetime import datetime
LOG = logging.getLogger(__name__)
+# This endpoint matches the format as found in dhcp lease files, since this
+# value is applied if the endpoint can't be found within a lease file
+DEFAULT_WIRESERVER_ENDPOINT = "a8:3f:81:10"
+
+BOOT_EVENT_TYPE = 'boot-telemetry'
+SYSTEMINFO_EVENT_TYPE = 'system-info'
+DIAGNOSTIC_EVENT_TYPE = 'diagnostic'
+
+azure_ds_reporter = events.ReportEventStack(
+ name="azure-ds",
+ description="initialize reporter for azure ds",
+ reporting_enabled=True)
+
+
+def azure_ds_telemetry_reporter(func):
+ def impl(*args, **kwargs):
+ with events.ReportEventStack(
+ name=func.__name__,
+ description=func.__name__,
+ parent=azure_ds_reporter):
+ return func(*args, **kwargs)
+ return impl
+
+
+def is_byte_swapped(previous_id, current_id):
+ """
+ Azure stores the instance ID with an incorrect byte ordering for the
+ first parts. This corrects the byte order such that it is consistent with
+ that returned by the metadata service.
+ """
+ if previous_id == current_id:
+ return False
+
+ def swap_bytestring(s, width=2):
+ dd = [byte for byte in textwrap.wrap(s, 2)]
+ dd.reverse()
+ return ''.join(dd)
+
+ parts = current_id.split('-')
+ swapped_id = '-'.join([
+ swap_bytestring(parts[0]),
+ swap_bytestring(parts[1]),
+ swap_bytestring(parts[2]),
+ parts[3],
+ parts[4]
+ ])
+
+ return previous_id == swapped_id
+
+
+@azure_ds_telemetry_reporter
+def get_boot_telemetry():
+ """Report timestamps related to kernel initialization and systemd
+ activation of cloud-init"""
+ if not distros.uses_systemd():
+ raise RuntimeError(
+ "distro not using systemd, skipping boot telemetry")
+
+ LOG.debug("Collecting boot telemetry")
+ try:
+ kernel_start = float(time.time()) - float(util.uptime())
+ except ValueError:
+ raise RuntimeError("Failed to determine kernel start timestamp")
+
+ try:
+ out, _ = util.subp(['/bin/systemctl',
+ 'show', '-p',
+ 'UserspaceTimestampMonotonic'],
+ capture=True)
+ tsm = None
+ if out and '=' in out:
+ tsm = out.split("=")[1]
+
+ if not tsm:
+ raise RuntimeError("Failed to parse "
+ "UserspaceTimestampMonotonic from systemd")
+
+ user_start = kernel_start + (float(tsm) / 1000000)
+ except util.ProcessExecutionError as e:
+ raise RuntimeError("Failed to get UserspaceTimestampMonotonic: %s"
+ % e)
+ except ValueError as e:
+ raise RuntimeError("Failed to parse "
+ "UserspaceTimestampMonotonic from systemd: %s"
+ % e)
+
+ try:
+ out, _ = util.subp(['/bin/systemctl', 'show',
+ 'cloud-init-local', '-p',
+ 'InactiveExitTimestampMonotonic'],
+ capture=True)
+ tsm = None
+ if out and '=' in out:
+ tsm = out.split("=")[1]
+ if not tsm:
+ raise RuntimeError("Failed to parse "
+ "InactiveExitTimestampMonotonic from systemd")
+
+ cloudinit_activation = kernel_start + (float(tsm) / 1000000)
+ except util.ProcessExecutionError as e:
+ raise RuntimeError("Failed to get InactiveExitTimestampMonotonic: %s"
+ % e)
+ except ValueError as e:
+ raise RuntimeError("Failed to parse "
+ "InactiveExitTimestampMonotonic from systemd: %s"
+ % e)
+
+ evt = events.ReportingEvent(
+ BOOT_EVENT_TYPE, 'boot-telemetry',
+ "kernel_start=%s user_start=%s cloudinit_activation=%s" %
+ (datetime.utcfromtimestamp(kernel_start).isoformat() + 'Z',
+ datetime.utcfromtimestamp(user_start).isoformat() + 'Z',
+ datetime.utcfromtimestamp(cloudinit_activation).isoformat() + 'Z'),
+ events.DEFAULT_EVENT_ORIGIN)
+ events.report_event(evt)
+
+ # return the event for unit testing purpose
+ return evt
+
+
+@azure_ds_telemetry_reporter
+def get_system_info():
+ """Collect and report system information"""
+ info = util.system_info()
+ evt = events.ReportingEvent(
+ SYSTEMINFO_EVENT_TYPE, 'system information',
+ "cloudinit_version=%s, kernel_version=%s, variant=%s, "
+ "distro_name=%s, distro_version=%s, flavor=%s, "
+ "python_version=%s" %
+ (version.version_string(), info['release'], info['variant'],
+ info['dist'][0], info['dist'][1], info['dist'][2],
+ info['python']), events.DEFAULT_EVENT_ORIGIN)
+ events.report_event(evt)
+
+ # return the event for unit testing purpose
+ return evt
+
+
+def report_diagnostic_event(str):
+ """Report a diagnostic event"""
+ evt = events.ReportingEvent(
+ DIAGNOSTIC_EVENT_TYPE, 'diagnostic message',
+ str, events.DEFAULT_EVENT_ORIGIN)
+ events.report_event(evt)
+
+ # return the event for unit testing purpose
+ return evt
+
@contextmanager
def cd(newdir):
@@ -56,14 +210,16 @@ class AzureEndpointHttpClient(object):
if secure:
headers = self.headers.copy()
headers.update(self.extra_secure_headers)
- return url_helper.read_file_or_url(url, headers=headers)
+ return url_helper.read_file_or_url(url, headers=headers, timeout=5,
+ retries=10)
def post(self, url, data=None, extra_headers=None):
headers = self.headers
if extra_headers is not None:
headers = self.headers.copy()
headers.update(extra_headers)
- return url_helper.read_file_or_url(url, data=data, headers=headers)
+ return url_helper.read_file_or_url(url, data=data, headers=headers,
+ timeout=5, retries=10)
class GoalState(object):
@@ -119,6 +275,7 @@ class OpenSSLManager(object):
def clean_up(self):
util.del_dir(self.tmpdir)
+ @azure_ds_telemetry_reporter
def generate_certificate(self):
LOG.debug('Generating certificate for communication with fabric...')
if self.certificate is not None:
@@ -138,9 +295,40 @@ class OpenSSLManager(object):
self.certificate = certificate
LOG.debug('New certificate generated.')
- def parse_certificates(self, certificates_xml):
- tag = ElementTree.fromstring(certificates_xml).find(
- './/Data')
+ @staticmethod
+ @azure_ds_telemetry_reporter
+ def _run_x509_action(action, cert):
+ cmd = ['openssl', 'x509', '-noout', action]
+ result, _ = util.subp(cmd, data=cert)
+ return result
+
+ @azure_ds_telemetry_reporter
+ def _get_ssh_key_from_cert(self, certificate):
+ pub_key = self._run_x509_action('-pubkey', certificate)
+ keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin']
+ ssh_key, _ = util.subp(keygen_cmd, data=pub_key)
+ return ssh_key
+
+ @azure_ds_telemetry_reporter
+ def _get_fingerprint_from_cert(self, certificate):
+ """openssl x509 formats fingerprints as so:
+ 'SHA1 Fingerprint=07:3E:19:D1:4D:1C:79:92:24:C6:A0:FD:8D:DA:\
+ B6:A8:BF:27:D4:73\n'
+
+ Azure control plane passes that fingerprint as so:
+ '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473'
+ """
+ raw_fp = self._run_x509_action('-fingerprint', certificate)
+ eq = raw_fp.find('=')
+ octets = raw_fp[eq+1:-1].split(':')
+ return ''.join(octets)
+
+ @azure_ds_telemetry_reporter
+ def _decrypt_certs_from_xml(self, certificates_xml):
+ """Decrypt the certificates XML document using the our private key;
+ return the list of certs and private keys contained in the doc.
+ """
+ tag = ElementTree.fromstring(certificates_xml).find('.//Data')
certificates_content = tag.text
lines = [
b'MIME-Version: 1.0',
@@ -151,32 +339,31 @@ class OpenSSLManager(object):
certificates_content.encode('utf-8'),
]
with cd(self.tmpdir):
- with open('Certificates.p7m', 'wb') as f:
- f.write(b'\n'.join(lines))
out, _ = util.subp(
- 'openssl cms -decrypt -in Certificates.p7m -inkey'
+ 'openssl cms -decrypt -in /dev/stdin -inkey'
' {private_key} -recip {certificate} | openssl pkcs12 -nodes'
' -password pass:'.format(**self.certificate_names),
- shell=True)
- private_keys, certificates = [], []
+ shell=True, data=b'\n'.join(lines))
+ return out
+
+ @azure_ds_telemetry_reporter
+ def parse_certificates(self, certificates_xml):
+ """Given the Certificates XML document, return a dictionary of
+ fingerprints and associated SSH keys derived from the certs."""
+ out = self._decrypt_certs_from_xml(certificates_xml)
current = []
+ keys = {}
for line in out.splitlines():
current.append(line)
if re.match(r'[-]+END .*?KEY[-]+$', line):
- private_keys.append('\n'.join(current))
+ # ignore private_keys
current = []
elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line):
- certificates.append('\n'.join(current))
+ certificate = '\n'.join(current)
+ ssh_key = self._get_ssh_key_from_cert(certificate)
+ fingerprint = self._get_fingerprint_from_cert(certificate)
+ keys[fingerprint] = ssh_key
current = []
- keys = []
- for certificate in certificates:
- with cd(self.tmpdir):
- public_key, _ = util.subp(
- 'openssl x509 -noout -pubkey |'
- 'ssh-keygen -i -m PKCS8 -f /dev/stdin',
- data=certificate,
- shell=True)
- keys.append(public_key)
return keys
@@ -206,7 +393,6 @@ class WALinuxAgentShim(object):
self.dhcpoptions = dhcp_options
self._endpoint = None
self.openssl_manager = None
- self.values = {}
self.lease_file = fallback_lease_file
def clean_up(self):
@@ -241,14 +427,21 @@ class WALinuxAgentShim(object):
return socket.inet_ntoa(packed_bytes)
@staticmethod
+ @azure_ds_telemetry_reporter
def _networkd_get_value_from_leases(leases_d=None):
return dhcp.networkd_get_option_from_leases(
'OPTION_245', leases_d=leases_d)
@staticmethod
+ @azure_ds_telemetry_reporter
def _get_value_from_leases_file(fallback_lease_file):
leases = []
- content = util.load_file(fallback_lease_file)
+ try:
+ content = util.load_file(fallback_lease_file)
+ except IOError as ex:
+ LOG.error("Failed to read %s: %s", fallback_lease_file, ex)
+ return None
+
LOG.debug("content is %s", content)
option_name = _get_dhcp_endpoint_option_name()
for line in content.splitlines():
@@ -263,6 +456,7 @@ class WALinuxAgentShim(object):
return leases[-1]
@staticmethod
+ @azure_ds_telemetry_reporter
def _load_dhclient_json():
dhcp_options = {}
hooks_dir = WALinuxAgentShim._get_hooks_dir()
@@ -281,6 +475,7 @@ class WALinuxAgentShim(object):
return dhcp_options
@staticmethod
+ @azure_ds_telemetry_reporter
def _get_value_from_dhcpoptions(dhcp_options):
if dhcp_options is None:
return None
@@ -294,22 +489,26 @@ class WALinuxAgentShim(object):
return _value
@staticmethod
+ @azure_ds_telemetry_reporter
def find_endpoint(fallback_lease_file=None, dhcp245=None):
value = None
if dhcp245 is not None:
value = dhcp245
LOG.debug("Using Azure Endpoint from dhcp options")
if value is None:
+ report_diagnostic_event("No Azure endpoint from dhcp options")
LOG.debug('Finding Azure endpoint from networkd...')
value = WALinuxAgentShim._networkd_get_value_from_leases()
if value is None:
# Option-245 stored in /run/cloud-init/dhclient.hooks/<ifc>.json
# a dhclient exit hook that calls cloud-init-dhclient-hook
+ report_diagnostic_event("No Azure endpoint from networkd")
LOG.debug('Finding Azure endpoint from hook json...')
dhcp_options = WALinuxAgentShim._load_dhclient_json()
value = WALinuxAgentShim._get_value_from_dhcpoptions(dhcp_options)
if value is None:
# Fallback and check the leases file if unsuccessful
+ report_diagnostic_event("No Azure endpoint from dhclient logs")
LOG.debug("Unable to find endpoint in dhclient logs. "
" Falling back to check lease files")
if fallback_lease_file is None:
@@ -320,16 +519,22 @@ class WALinuxAgentShim(object):
fallback_lease_file)
value = WALinuxAgentShim._get_value_from_leases_file(
fallback_lease_file)
-
if value is None:
- raise ValueError('No endpoint found.')
+ msg = "No lease found; using default endpoint"
+ report_diagnostic_event(msg)
+ LOG.warning(msg)
+ value = DEFAULT_WIRESERVER_ENDPOINT
endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value)
- LOG.debug('Azure endpoint found at %s', endpoint_ip_address)
+ msg = 'Azure endpoint found at %s' % endpoint_ip_address
+ report_diagnostic_event(msg)
+ LOG.debug(msg)
return endpoint_ip_address
- def register_with_azure_and_fetch_data(self):
- self.openssl_manager = OpenSSLManager()
+ @azure_ds_telemetry_reporter
+ def register_with_azure_and_fetch_data(self, pubkey_info=None):
+ if self.openssl_manager is None:
+ self.openssl_manager = OpenSSLManager()
http_client = AzureEndpointHttpClient(self.openssl_manager.certificate)
LOG.info('Registering with Azure...')
attempts = 0
@@ -337,27 +542,52 @@ class WALinuxAgentShim(object):
try:
response = http_client.get(
'http://{0}/machine/?comp=goalstate'.format(self.endpoint))
- except Exception:
+ except Exception as e:
if attempts < 10:
time.sleep(attempts + 1)
else:
+ report_diagnostic_event(
+ "failed to register with Azure: %s" % e)
raise
else:
break
attempts += 1
LOG.debug('Successfully fetched GoalState XML.')
goal_state = GoalState(response.contents, http_client)
- public_keys = []
- if goal_state.certificates_xml is not None:
+ report_diagnostic_event("container_id %s" % goal_state.container_id)
+ ssh_keys = []
+ if goal_state.certificates_xml is not None and pubkey_info is not None:
LOG.debug('Certificate XML found; parsing out public keys.')
- public_keys = self.openssl_manager.parse_certificates(
+ keys_by_fingerprint = self.openssl_manager.parse_certificates(
goal_state.certificates_xml)
- data = {
- 'public-keys': public_keys,
- }
+ ssh_keys = self._filter_pubkeys(keys_by_fingerprint, pubkey_info)
self._report_ready(goal_state, http_client)
- return data
+ return {'public-keys': ssh_keys}
+
+ def _filter_pubkeys(self, keys_by_fingerprint, pubkey_info):
+ """cloud-init expects a straightforward array of keys to be dropped
+ into the user's authorized_keys file. Azure control plane exposes
+ multiple public keys to the VM via wireserver. Select just the
+ user's key(s) and return them, ignoring any other certs.
+ """
+ keys = []
+ for pubkey in pubkey_info:
+ if 'value' in pubkey and pubkey['value']:
+ keys.append(pubkey['value'])
+ elif 'fingerprint' in pubkey and pubkey['fingerprint']:
+ fingerprint = pubkey['fingerprint']
+ if fingerprint in keys_by_fingerprint:
+ keys.append(keys_by_fingerprint[fingerprint])
+ else:
+ LOG.warning("ovf-env.xml specified PublicKey fingerprint "
+ "%s not found in goalstate XML", fingerprint)
+ else:
+ LOG.warning("ovf-env.xml specified PublicKey with neither "
+ "value nor fingerprint: %s", pubkey)
+ return keys
+
+ @azure_ds_telemetry_reporter
def _report_ready(self, goal_state, http_client):
LOG.debug('Reporting ready to Azure fabric.')
document = self.REPORT_READY_XML_TEMPLATE.format(
@@ -365,20 +595,49 @@ class WALinuxAgentShim(object):
container_id=goal_state.container_id,
instance_id=goal_state.instance_id,
)
- http_client.post(
- "http://{0}/machine?comp=health".format(self.endpoint),
- data=document,
- extra_headers={'Content-Type': 'text/xml; charset=utf-8'},
- )
+ # Host will collect kvps when cloud-init reports ready.
+ # some kvps might still be in the queue. We yield the scheduler
+ # to make sure we process all kvps up till this point.
+ time.sleep(0)
+ try:
+ http_client.post(
+ "http://{0}/machine?comp=health".format(self.endpoint),
+ data=document,
+ extra_headers={'Content-Type': 'text/xml; charset=utf-8'},
+ )
+ except Exception as e:
+ report_diagnostic_event("exception while reporting ready: %s" % e)
+ raise
+
LOG.info('Reported ready to Azure fabric.')
-def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None):
+@azure_ds_telemetry_reporter
+def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None,
+ pubkey_info=None):
shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file,
dhcp_options=dhcp_opts)
try:
- return shim.register_with_azure_and_fetch_data()
+ return shim.register_with_azure_and_fetch_data(pubkey_info=pubkey_info)
finally:
shim.clean_up()
+
+class EphemeralDHCPv4WithReporting(object):
+ def __init__(self, reporter, nic=None):
+ self.reporter = reporter
+ self.ephemeralDHCPv4 = EphemeralDHCPv4(iface=nic)
+
+ def __enter__(self):
+ with events.ReportEventStack(
+ name="obtain-dhcp-lease",
+ description="obtain dhcp lease",
+ parent=self.reporter):
+ return self.ephemeralDHCPv4.__enter__()
+
+ def __exit__(self, excp_type, excp_value, excp_traceback):
+ self.ephemeralDHCPv4.__exit__(
+ excp_type, excp_value, excp_traceback)
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 9c29ceac..441db506 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -12,15 +12,12 @@ import copy
import functools
import os
-import six
-
from cloudinit import ec2_utils
from cloudinit import log as logging
from cloudinit import net
from cloudinit import sources
from cloudinit import url_helper
from cloudinit import util
-
from cloudinit.sources import BrokenMetadata
# See https://docs.openstack.org/user-guide/cli-config-drive.html
@@ -67,7 +64,7 @@ OS_VERSIONS = (
OS_ROCKY,
)
-PHYSICAL_TYPES = (
+KNOWN_PHYSICAL_TYPES = (
None,
'bgpovs', # not present in OpenStack upstream but used on OVH cloud.
'bridge',
@@ -163,8 +160,7 @@ class SourceMixin(object):
return device
-@six.add_metaclass(abc.ABCMeta)
-class BaseReader(object):
+class BaseReader(metaclass=abc.ABCMeta):
def __init__(self, base_path):
self.base_path = base_path
@@ -227,7 +223,7 @@ class BaseReader(object):
"""
load_json_anytype = functools.partial(
- util.load_json, root_types=(dict, list) + six.string_types)
+ util.load_json, root_types=(dict, list, str))
def datafiles(version):
files = {}
@@ -584,25 +580,31 @@ def convert_net_json(network_json=None, known_macs=None):
if n['link'] == link['id']]:
subnet = dict((k, v) for k, v in network.items()
if k in valid_keys['subnet'])
- if 'dhcp' in network['type']:
- t = 'dhcp6' if network['type'].startswith('ipv6') else 'dhcp4'
- subnet.update({
- 'type': t,
- })
- else:
+
+ if network['type'] == 'ipv4_dhcp':
+ subnet.update({'type': 'dhcp4'})
+ elif network['type'] == 'ipv6_dhcp':
+ subnet.update({'type': 'dhcp6'})
+ elif network['type'] in ['ipv6_slaac', 'ipv6_dhcpv6-stateless',
+ 'ipv6_dhcpv6-stateful']:
+ subnet.update({'type': network['type']})
+ elif network['type'] in ['ipv4', 'ipv6']:
subnet.update({
'type': 'static',
'address': network.get('ip_address'),
})
+
+ # Enable accept_ra for stateful and legacy ipv6_dhcp types
+ if network['type'] in ['ipv6_dhcpv6-stateful', 'ipv6_dhcp']:
+ cfg.update({'accept-ra': True})
+
if network['type'] == 'ipv4':
subnet['ipv4'] = True
if network['type'] == 'ipv6':
subnet['ipv6'] = True
subnets.append(subnet)
cfg.update({'subnets': subnets})
- if link['type'] in PHYSICAL_TYPES:
- cfg.update({'type': 'physical', 'mac_address': link_mac_addr})
- elif link['type'] in ['bond']:
+ if link['type'] in ['bond']:
params = {}
if link_mac_addr:
params['mac_address'] = link_mac_addr
@@ -641,8 +643,10 @@ def convert_net_json(network_json=None, known_macs=None):
curinfo.update({'mac': link['vlan_mac_address'],
'name': name})
else:
- raise ValueError(
- 'Unknown network_data link type: %s' % link['type'])
+ if link['type'] not in KNOWN_PHYSICAL_TYPES:
+ LOG.warning('Unknown network_data link type (%s); treating as'
+ ' physical', link['type'])
+ cfg.update({'type': 'physical', 'mac_address': link_mac_addr})
config.append(cfg)
link_id_info[curinfo['id']] = curinfo
diff --git a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
index a7d4ad91..9f14770e 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
@@ -1,5 +1,5 @@
# Copyright (C) 2017 Canonical Ltd.
-# Copyright (C) 2017 VMware Inc.
+# Copyright (C) 2017-2019 VMware Inc.
#
# Author: Maitreyee Saikia <msaikia@vmware.com>
#
@@ -8,7 +8,6 @@
import logging
import os
import stat
-from textwrap import dedent
from cloudinit import util
@@ -20,12 +19,15 @@ class CustomScriptNotFound(Exception):
class CustomScriptConstant(object):
- RC_LOCAL = "/etc/rc.local"
- POST_CUST_TMP_DIR = "/root/.customization"
- POST_CUST_RUN_SCRIPT_NAME = "post-customize-guest.sh"
- POST_CUST_RUN_SCRIPT = os.path.join(POST_CUST_TMP_DIR,
- POST_CUST_RUN_SCRIPT_NAME)
- POST_REBOOT_PENDING_MARKER = "/.guest-customization-post-reboot-pending"
+ CUSTOM_TMP_DIR = "/root/.customization"
+
+ # The user defined custom script
+ CUSTOM_SCRIPT_NAME = "customize.sh"
+ CUSTOM_SCRIPT = os.path.join(CUSTOM_TMP_DIR,
+ CUSTOM_SCRIPT_NAME)
+ POST_CUSTOM_PENDING_MARKER = "/.guest-customization-post-reboot-pending"
+ # The cc_scripts_per_instance script to launch custom script
+ POST_CUSTOM_SCRIPT_NAME = "post-customize-guest.sh"
class RunCustomScript(object):
@@ -39,10 +41,19 @@ class RunCustomScript(object):
raise CustomScriptNotFound("Script %s not found!! "
"Cannot execute custom script!"
% self.scriptpath)
+
+ util.ensure_dir(CustomScriptConstant.CUSTOM_TMP_DIR)
+
+ LOG.debug("Copying custom script to %s",
+ CustomScriptConstant.CUSTOM_SCRIPT)
+ util.copy(self.scriptpath, CustomScriptConstant.CUSTOM_SCRIPT)
+
# Strip any CR characters from the decoded script
- util.load_file(self.scriptpath).replace("\r", "")
- st = os.stat(self.scriptpath)
- os.chmod(self.scriptpath, st.st_mode | stat.S_IEXEC)
+ content = util.load_file(
+ CustomScriptConstant.CUSTOM_SCRIPT).replace("\r", "")
+ util.write_file(CustomScriptConstant.CUSTOM_SCRIPT,
+ content,
+ mode=0o544)
class PreCustomScript(RunCustomScript):
@@ -50,104 +61,34 @@ class PreCustomScript(RunCustomScript):
"""Executing custom script with precustomization argument."""
LOG.debug("Executing pre-customization script")
self.prepare_script()
- util.subp(["/bin/sh", self.scriptpath, "precustomization"])
+ util.subp([CustomScriptConstant.CUSTOM_SCRIPT, "precustomization"])
class PostCustomScript(RunCustomScript):
- def __init__(self, scriptname, directory):
+ def __init__(self, scriptname, directory, ccScriptsDir):
super(PostCustomScript, self).__init__(scriptname, directory)
- # Determine when to run custom script. When postreboot is True,
- # the user uploaded script will run as part of rc.local after
- # the machine reboots. This is determined by presence of rclocal.
- # When postreboot is False, script will run as part of cloud-init.
- self.postreboot = False
-
- def _install_post_reboot_agent(self, rclocal):
- """
- Install post-reboot agent for running custom script after reboot.
- As part of this process, we are editing the rclocal file to run a
- VMware script, which in turn is resposible for handling the user
- script.
- @param: path to rc local.
- """
- LOG.debug("Installing post-reboot customization from %s to %s",
- self.directory, rclocal)
- if not self.has_previous_agent(rclocal):
- LOG.info("Adding post-reboot customization agent to rc.local")
- new_content = dedent("""
- # Run post-reboot guest customization
- /bin/sh %s
- exit 0
- """) % CustomScriptConstant.POST_CUST_RUN_SCRIPT
- existing_rclocal = util.load_file(rclocal).replace('exit 0\n', '')
- st = os.stat(rclocal)
- # "x" flag should be set
- mode = st.st_mode | stat.S_IEXEC
- util.write_file(rclocal, existing_rclocal + new_content, mode)
-
- else:
- # We don't need to update rclocal file everytime a customization
- # is requested. It just needs to be done for the first time.
- LOG.info("Post-reboot guest customization agent is already "
- "registered in rc.local")
- LOG.debug("Installing post-reboot customization agent finished: %s",
- self.postreboot)
-
- def has_previous_agent(self, rclocal):
- searchstring = "# Run post-reboot guest customization"
- if searchstring in open(rclocal).read():
- return True
- return False
-
- def find_rc_local(self):
- """
- Determine if rc local is present.
- """
- rclocal = ""
- if os.path.exists(CustomScriptConstant.RC_LOCAL):
- LOG.debug("rc.local detected.")
- # resolving in case of symlink
- rclocal = os.path.realpath(CustomScriptConstant.RC_LOCAL)
- LOG.debug("rc.local resolved to %s", rclocal)
- else:
- LOG.warning("Can't find rc.local, post-customization "
- "will be run before reboot")
- return rclocal
-
- def install_agent(self):
- rclocal = self.find_rc_local()
- if rclocal:
- self._install_post_reboot_agent(rclocal)
- self.postreboot = True
+ self.ccScriptsDir = ccScriptsDir
+ self.ccScriptPath = os.path.join(
+ ccScriptsDir,
+ CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME)
def execute(self):
"""
- This method executes post-customization script before or after reboot
- based on the presence of rc local.
+ This method copy the post customize run script to
+ cc_scripts_per_instance directory and let this
+ module to run post custom script.
"""
self.prepare_script()
- self.install_agent()
- if not self.postreboot:
- LOG.warning("Executing post-customization script inline")
- util.subp(["/bin/sh", self.scriptpath, "postcustomization"])
- else:
- LOG.debug("Scheduling custom script to run post reboot")
- if not os.path.isdir(CustomScriptConstant.POST_CUST_TMP_DIR):
- os.mkdir(CustomScriptConstant.POST_CUST_TMP_DIR)
- # Script "post-customize-guest.sh" and user uploaded script are
- # are present in the same directory and needs to copied to a temp
- # directory to be executed post reboot. User uploaded script is
- # saved as customize.sh in the temp directory.
- # post-customize-guest.sh excutes customize.sh after reboot.
- LOG.debug("Copying post-customization script")
- util.copy(self.scriptpath,
- CustomScriptConstant.POST_CUST_TMP_DIR + "/customize.sh")
- LOG.debug("Copying script to run post-customization script")
- util.copy(
- os.path.join(self.directory,
- CustomScriptConstant.POST_CUST_RUN_SCRIPT_NAME),
- CustomScriptConstant.POST_CUST_RUN_SCRIPT)
- LOG.info("Creating post-reboot pending marker")
- util.ensure_file(CustomScriptConstant.POST_REBOOT_PENDING_MARKER)
+
+ LOG.debug("Copying post customize run script to %s",
+ self.ccScriptPath)
+ util.copy(
+ os.path.join(self.directory,
+ CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME),
+ self.ccScriptPath)
+ st = os.stat(self.ccScriptPath)
+ os.chmod(self.ccScriptPath, st.st_mode | stat.S_IEXEC)
+ LOG.info("Creating post customization pending marker")
+ util.ensure_file(CustomScriptConstant.POST_CUSTOM_PENDING_MARKER)
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
index db5a00dc..65ae7390 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
@@ -10,5 +10,6 @@ class GuestCustErrorEnum(object):
"""Specifies different errors of Guest Customization engine"""
GUESTCUST_ERROR_SUCCESS = 0
+ GUESTCUST_ERROR_SCRIPT_DISABLED = 6
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
index a590f323..3d369d04 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
@@ -7,6 +7,7 @@
import logging
import os
+import re
import time
from cloudinit import util
@@ -117,4 +118,40 @@ def enable_nics(nics):
logger.warning("Can't connect network interfaces after %d attempts",
enableNicsWaitRetries)
+
+def get_tools_config(section, key, defaultVal):
+ """ Return the value of [section] key from VMTools configuration.
+
+ @param section: String of section to read from VMTools config
+ @returns: String value from key in [section] or defaultVal if
+ [section] is not present or vmware-toolbox-cmd is
+ not installed.
+ """
+
+ if not util.which('vmware-toolbox-cmd'):
+ logger.debug(
+ 'vmware-toolbox-cmd not installed, returning default value')
+ return defaultVal
+
+ retValue = defaultVal
+ cmd = ['vmware-toolbox-cmd', 'config', 'get', section, key]
+
+ try:
+ (outText, _) = util.subp(cmd)
+ m = re.match(r'([^=]+)=(.*)', outText)
+ if m:
+ retValue = m.group(2).strip()
+ logger.debug("Get tools config: [%s] %s = %s",
+ section, key, retValue)
+ else:
+ logger.debug(
+ "Tools config: [%s] %s is not found, return default value: %s",
+ section, key, retValue)
+ except util.ProcessExecutionError as e:
+ logger.error("Failed running %s[%s]", cmd, e.exit_code)
+ logger.exception(e)
+
+ return retValue
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py
index 6378e98b..f73b37ed 100644
--- a/cloudinit/sources/tests/test_init.py
+++ b/cloudinit/sources/tests/test_init.py
@@ -3,7 +3,6 @@
import copy
import inspect
import os
-import six
import stat
from cloudinit.event import EventType
@@ -13,7 +12,7 @@ from cloudinit.sources import (
EXPERIMENTAL_TEXT, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE,
METADATA_UNKNOWN, REDACT_SENSITIVE_VALUE, UNSET, DataSource,
canonical_cloud_id, redact_sensitive_keys)
-from cloudinit.tests.helpers import CiTestCase, skipIf, mock
+from cloudinit.tests.helpers import CiTestCase, mock
from cloudinit.user_data import UserDataProcessor
from cloudinit import util
@@ -422,7 +421,6 @@ class TestDataSource(CiTestCase):
{'network_json': 'is good'},
instance_data['ds']['network_json'])
- @skipIf(not six.PY3, "json serialization on <= py2.7 handles bytes")
def test_get_data_base64encodes_unserializable_bytes(self):
"""On py3, get_data base64encodes any unserializable content."""
tmp = self.tmp_dir()
@@ -440,37 +438,6 @@ class TestDataSource(CiTestCase):
{'key1': 'val1', 'key2': {'key2.1': 'EjM='}},
instance_json['ds']['meta_data'])
- @skipIf(not six.PY2, "json serialization on <= py2.7 handles bytes")
- def test_get_data_handles_bytes_values(self):
- """On py2 get_data handles bytes values without having to b64encode."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
- self.assertTrue(datasource.get_data())
- json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
- content = util.load_file(json_file)
- instance_json = util.load_json(content)
- self.assertEqual([], instance_json['base64_encoded_keys'])
- self.assertEqual(
- {'key1': 'val1', 'key2': {'key2.1': '\x123'}},
- instance_json['ds']['meta_data'])
-
- @skipIf(not six.PY2, "Only python2 hits UnicodeDecodeErrors on non-utf8")
- def test_non_utf8_encoding_logs_warning(self):
- """When non-utf-8 values exist in py2 instance-data is not written."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'ab\xaadef'}})
- self.assertTrue(datasource.get_data())
- json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
- self.assertFalse(os.path.exists(json_file))
- self.assertIn(
- "WARNING: Error persisting instance-data.json: 'utf8' codec can't"
- " decode byte 0xaa in position 2: invalid start byte",
- self.logs.getvalue())
-
def test_get_hostname_subclass_support(self):
"""Validate get_hostname signature on all subclasses of DataSource."""
# Use inspect.getfullargspec when we drop py2.6 and py2.7
diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py
index 97d62947..abf3d359 100644
--- a/cloudinit/sources/tests/test_oracle.py
+++ b/cloudinit/sources/tests/test_oracle.py
@@ -1,27 +1,69 @@
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit.sources import DataSourceOracle as oracle
-from cloudinit.sources import BrokenMetadata
+from cloudinit.sources import BrokenMetadata, NetworkConfigSource
from cloudinit import helpers
from cloudinit.tests import helpers as test_helpers
from textwrap import dedent
import argparse
+import copy
import httpretty
import json
-import mock
import os
-import six
import uuid
+from unittest import mock
DS_PATH = "cloudinit.sources.DataSourceOracle"
MD_VER = "2013-10-17"
+# `curl -L http://169.254.169.254/opc/v1/vnics/` on a Oracle Bare Metal Machine
+# with a secondary VNIC attached (vnicId truncated for Python line length)
+OPC_BM_SECONDARY_VNIC_RESPONSE = """\
+[ {
+ "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtyvcucqkhdqmgjszebxe4hrb!!TRUNCATED||",
+ "privateIp" : "10.0.0.8",
+ "vlanTag" : 0,
+ "macAddr" : "90:e2:ba:d4:f1:68",
+ "virtualRouterIp" : "10.0.0.1",
+ "subnetCidrBlock" : "10.0.0.0/24",
+ "nicIndex" : 0
+}, {
+ "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtfmkxjdy2sqidndiwrsg63zf!!TRUNCATED||",
+ "privateIp" : "10.0.4.5",
+ "vlanTag" : 1,
+ "macAddr" : "02:00:17:05:CF:51",
+ "virtualRouterIp" : "10.0.4.1",
+ "subnetCidrBlock" : "10.0.4.0/24",
+ "nicIndex" : 0
+} ]"""
+
+# `curl -L http://169.254.169.254/opc/v1/vnics/` on a Oracle Virtual Machine
+# with a secondary VNIC attached
+OPC_VM_SECONDARY_VNIC_RESPONSE = """\
+[ {
+ "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtch72z5pd76cc2636qeqh7z_truncated",
+ "privateIp" : "10.0.0.230",
+ "vlanTag" : 1039,
+ "macAddr" : "02:00:17:05:D1:DB",
+ "virtualRouterIp" : "10.0.0.1",
+ "subnetCidrBlock" : "10.0.0.0/24"
+}, {
+ "vnicId" : "ocid1.vnic.oc1.phx.abyhqljt4iew3gwmvrwrhhf3bp5drj_truncated",
+ "privateIp" : "10.0.0.231",
+ "vlanTag" : 1041,
+ "macAddr" : "00:00:17:02:2B:B1",
+ "virtualRouterIp" : "10.0.0.1",
+ "subnetCidrBlock" : "10.0.0.0/24"
+} ]"""
+
class TestDataSourceOracle(test_helpers.CiTestCase):
"""Test datasource DataSourceOracle."""
+ with_logs = True
+
ds_class = oracle.DataSourceOracle
my_uuid = str(uuid.uuid4())
@@ -79,6 +121,16 @@ class TestDataSourceOracle(test_helpers.CiTestCase):
self.assertEqual(
'metadata (http://169.254.169.254/openstack/)', ds.subplatform)
+ def test_sys_cfg_can_enable_configure_secondary_nics(self):
+ # Confirm that behaviour is toggled by sys_cfg
+ ds, _mocks = self._get_ds()
+ self.assertFalse(ds.ds_cfg['configure_secondary_nics'])
+
+ sys_cfg = {
+ 'datasource': {'Oracle': {'configure_secondary_nics': True}}}
+ ds, _mocks = self._get_ds(sys_cfg=sys_cfg)
+ self.assertTrue(ds.ds_cfg['configure_secondary_nics'])
+
@mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
def test_without_userdata(self, m_is_iscsi_root):
"""If no user-data is provided, it should not be in return dict."""
@@ -133,9 +185,12 @@ class TestDataSourceOracle(test_helpers.CiTestCase):
self.assertEqual(self.my_md['uuid'], ds.get_instance_id())
self.assertEqual(my_userdata, ds.userdata_raw)
- @mock.patch(DS_PATH + ".cmdline.read_kernel_cmdline_config")
+ @mock.patch(DS_PATH + "._add_network_config_from_opc_imds",
+ side_effect=lambda network_config: network_config)
+ @mock.patch(DS_PATH + ".cmdline.read_initramfs_config")
@mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_network_cmdline(self, m_is_iscsi_root, m_cmdline_config):
+ def test_network_cmdline(self, m_is_iscsi_root, m_initramfs_config,
+ _m_add_network_config_from_opc_imds):
"""network_config should read kernel cmdline."""
distro = mock.MagicMock()
ds, _ = self._get_ds(distro=distro, patches={
@@ -145,15 +200,18 @@ class TestDataSourceOracle(test_helpers.CiTestCase):
MD_VER: {'system_uuid': self.my_uuid,
'meta_data': self.my_md}}}})
ncfg = {'version': 1, 'config': [{'a': 'b'}]}
- m_cmdline_config.return_value = ncfg
+ m_initramfs_config.return_value = ncfg
self.assertTrue(ds._get_data())
self.assertEqual(ncfg, ds.network_config)
- m_cmdline_config.assert_called_once_with()
+ self.assertEqual([mock.call()], m_initramfs_config.call_args_list)
self.assertFalse(distro.generate_fallback_config.called)
- @mock.patch(DS_PATH + ".cmdline.read_kernel_cmdline_config")
+ @mock.patch(DS_PATH + "._add_network_config_from_opc_imds",
+ side_effect=lambda network_config: network_config)
+ @mock.patch(DS_PATH + ".cmdline.read_initramfs_config")
@mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
- def test_network_fallback(self, m_is_iscsi_root, m_cmdline_config):
+ def test_network_fallback(self, m_is_iscsi_root, m_initramfs_config,
+ _m_add_network_config_from_opc_imds):
"""test that fallback network is generated if no kernel cmdline."""
distro = mock.MagicMock()
ds, _ = self._get_ds(distro=distro, patches={
@@ -163,18 +221,95 @@ class TestDataSourceOracle(test_helpers.CiTestCase):
MD_VER: {'system_uuid': self.my_uuid,
'meta_data': self.my_md}}}})
ncfg = {'version': 1, 'config': [{'a': 'b'}]}
- m_cmdline_config.return_value = None
+ m_initramfs_config.return_value = None
self.assertTrue(ds._get_data())
ncfg = {'version': 1, 'config': [{'distro1': 'value'}]}
distro.generate_fallback_config.return_value = ncfg
self.assertEqual(ncfg, ds.network_config)
- m_cmdline_config.assert_called_once_with()
+ self.assertEqual([mock.call()], m_initramfs_config.call_args_list)
distro.generate_fallback_config.assert_called_once_with()
- self.assertEqual(1, m_cmdline_config.call_count)
# test that the result got cached, and the methods not re-called.
self.assertEqual(ncfg, ds.network_config)
- self.assertEqual(1, m_cmdline_config.call_count)
+ self.assertEqual(1, m_initramfs_config.call_count)
+
+ @mock.patch(DS_PATH + "._add_network_config_from_opc_imds")
+ @mock.patch(DS_PATH + ".cmdline.read_initramfs_config",
+ return_value={'some': 'config'})
+ @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
+ def test_secondary_nics_added_to_network_config_if_enabled(
+ self, _m_is_iscsi_root, _m_initramfs_config,
+ m_add_network_config_from_opc_imds):
+
+ needle = object()
+
+ def network_config_side_effect(network_config):
+ network_config['secondary_added'] = needle
+
+ m_add_network_config_from_opc_imds.side_effect = (
+ network_config_side_effect)
+
+ distro = mock.MagicMock()
+ ds, _ = self._get_ds(distro=distro, patches={
+ '_is_platform_viable': {'return_value': True},
+ 'crawl_metadata': {
+ 'return_value': {
+ MD_VER: {'system_uuid': self.my_uuid,
+ 'meta_data': self.my_md}}}})
+ ds.ds_cfg['configure_secondary_nics'] = True
+ self.assertEqual(needle, ds.network_config['secondary_added'])
+
+ @mock.patch(DS_PATH + "._add_network_config_from_opc_imds")
+ @mock.patch(DS_PATH + ".cmdline.read_initramfs_config",
+ return_value={'some': 'config'})
+ @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
+ def test_secondary_nics_not_added_to_network_config_by_default(
+ self, _m_is_iscsi_root, _m_initramfs_config,
+ m_add_network_config_from_opc_imds):
+
+ def network_config_side_effect(network_config):
+ network_config['secondary_added'] = True
+
+ m_add_network_config_from_opc_imds.side_effect = (
+ network_config_side_effect)
+
+ distro = mock.MagicMock()
+ ds, _ = self._get_ds(distro=distro, patches={
+ '_is_platform_viable': {'return_value': True},
+ 'crawl_metadata': {
+ 'return_value': {
+ MD_VER: {'system_uuid': self.my_uuid,
+ 'meta_data': self.my_md}}}})
+ self.assertNotIn('secondary_added', ds.network_config)
+
+ @mock.patch(DS_PATH + "._add_network_config_from_opc_imds")
+ @mock.patch(DS_PATH + ".cmdline.read_initramfs_config")
+ @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True)
+ def test_secondary_nic_failure_isnt_blocking(
+ self, _m_is_iscsi_root, m_initramfs_config,
+ m_add_network_config_from_opc_imds):
+
+ m_add_network_config_from_opc_imds.side_effect = Exception()
+
+ distro = mock.MagicMock()
+ ds, _ = self._get_ds(distro=distro, patches={
+ '_is_platform_viable': {'return_value': True},
+ 'crawl_metadata': {
+ 'return_value': {
+ MD_VER: {'system_uuid': self.my_uuid,
+ 'meta_data': self.my_md}}}})
+ ds.ds_cfg['configure_secondary_nics'] = True
+ self.assertEqual(ds.network_config, m_initramfs_config.return_value)
+ self.assertIn('Failed to fetch secondary network configuration',
+ self.logs.getvalue())
+
+ def test_ds_network_cfg_preferred_over_initramfs(self):
+ """Ensure that DS net config is preferred over initramfs config"""
+ network_config_sources = oracle.DataSourceOracle.network_config_sources
+ self.assertLess(
+ network_config_sources.index(NetworkConfigSource.ds),
+ network_config_sources.index(NetworkConfigSource.initramfs)
+ )
@mock.patch(DS_PATH + "._read_system_uuid", return_value=str(uuid.uuid4()))
@@ -198,7 +333,7 @@ class TestReadMetaData(test_helpers.HttprettyTestCase):
for k, v in data.items():
httpretty.register_uri(
httpretty.GET, self.mdurl + MD_VER + "/" + k,
- v if not isinstance(v, six.text_type) else v.encode('utf-8'))
+ v if not isinstance(v, str) else v.encode('utf-8'))
def test_broken_no_sys_uuid(self, m_read_system_uuid):
"""Datasource requires ability to read system_uuid and true return."""
@@ -336,4 +471,265 @@ class TestLoadIndex(test_helpers.CiTestCase):
oracle._load_index("\n".join(["meta_data.json", "user_data"])))
+class TestNetworkConfigFromOpcImds(test_helpers.CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestNetworkConfigFromOpcImds, self).setUp()
+ self.add_patch(DS_PATH + '.readurl', 'm_readurl')
+ self.add_patch(DS_PATH + '.get_interfaces_by_mac',
+ 'm_get_interfaces_by_mac')
+
+ def test_failure_to_readurl(self):
+ # readurl failures should just bubble out to the caller
+ self.m_readurl.side_effect = Exception('oh no')
+ with self.assertRaises(Exception) as excinfo:
+ oracle._add_network_config_from_opc_imds({})
+ self.assertEqual(str(excinfo.exception), 'oh no')
+
+ def test_empty_response(self):
+ # empty response error should just bubble out to the caller
+ self.m_readurl.return_value = ''
+ with self.assertRaises(Exception):
+ oracle._add_network_config_from_opc_imds([])
+
+ def test_invalid_json(self):
+ # invalid JSON error should just bubble out to the caller
+ self.m_readurl.return_value = '{'
+ with self.assertRaises(Exception):
+ oracle._add_network_config_from_opc_imds([])
+
+ def test_no_secondary_nics_does_not_mutate_input(self):
+ self.m_readurl.return_value = json.dumps([{}])
+ # We test this by passing in a non-dict to ensure that no dict
+ # operations are used; failure would be seen as exceptions
+ oracle._add_network_config_from_opc_imds(object())
+
+ def test_bare_metal_machine_skipped(self):
+ # nicIndex in the first entry indicates a bare metal machine
+ self.m_readurl.return_value = OPC_BM_SECONDARY_VNIC_RESPONSE
+ # We test this by passing in a non-dict to ensure that no dict
+ # operations are used
+ self.assertFalse(oracle._add_network_config_from_opc_imds(object()))
+ self.assertIn('bare metal machine', self.logs.getvalue())
+
+ def test_missing_mac_skipped(self):
+ self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE
+ self.m_get_interfaces_by_mac.return_value = {}
+
+ network_config = {'version': 1, 'config': [{'primary': 'nic'}]}
+ oracle._add_network_config_from_opc_imds(network_config)
+
+ self.assertEqual(1, len(network_config['config']))
+ self.assertIn(
+ 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping',
+ self.logs.getvalue())
+
+ def test_missing_mac_skipped_v2(self):
+ self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE
+ self.m_get_interfaces_by_mac.return_value = {}
+
+ network_config = {'version': 2, 'ethernets': {'primary': {'nic': {}}}}
+ oracle._add_network_config_from_opc_imds(network_config)
+
+ self.assertEqual(1, len(network_config['ethernets']))
+ self.assertIn(
+ 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping',
+ self.logs.getvalue())
+
+ def test_secondary_nic(self):
+ self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE
+ mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_addr: nic_name,
+ }
+
+ network_config = {'version': 1, 'config': [{'primary': 'nic'}]}
+ oracle._add_network_config_from_opc_imds(network_config)
+
+ # The input is mutated
+ self.assertEqual(2, len(network_config['config']))
+
+ secondary_nic_cfg = network_config['config'][1]
+ self.assertEqual(nic_name, secondary_nic_cfg['name'])
+ self.assertEqual('physical', secondary_nic_cfg['type'])
+ self.assertEqual(mac_addr, secondary_nic_cfg['mac_address'])
+ self.assertEqual(9000, secondary_nic_cfg['mtu'])
+
+ self.assertEqual(1, len(secondary_nic_cfg['subnets']))
+ subnet_cfg = secondary_nic_cfg['subnets'][0]
+ # These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE
+ self.assertEqual('10.0.0.231', subnet_cfg['address'])
+
+ def test_secondary_nic_v2(self):
+ self.m_readurl.return_value = OPC_VM_SECONDARY_VNIC_RESPONSE
+ mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_addr: nic_name,
+ }
+
+ network_config = {'version': 2, 'ethernets': {'primary': {'nic': {}}}}
+ oracle._add_network_config_from_opc_imds(network_config)
+
+ # The input is mutated
+ self.assertEqual(2, len(network_config['ethernets']))
+
+ secondary_nic_cfg = network_config['ethernets']['ens3']
+ self.assertFalse(secondary_nic_cfg['dhcp4'])
+ self.assertFalse(secondary_nic_cfg['dhcp6'])
+ self.assertEqual(mac_addr, secondary_nic_cfg['match']['macaddress'])
+ self.assertEqual(9000, secondary_nic_cfg['mtu'])
+
+ self.assertEqual(1, len(secondary_nic_cfg['addresses']))
+ # These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE
+ self.assertEqual('10.0.0.231', secondary_nic_cfg['addresses'][0])
+
+
+class TestNetworkConfigFiltersNetFailover(test_helpers.CiTestCase):
+
+ with_logs = True
+
+ def setUp(self):
+ super(TestNetworkConfigFiltersNetFailover, self).setUp()
+ self.add_patch(DS_PATH + '.get_interfaces_by_mac',
+ 'm_get_interfaces_by_mac')
+ self.add_patch(DS_PATH + '.is_netfail_master', 'm_netfail_master')
+
+ def test_ignore_bogus_network_config(self):
+ netcfg = {'something': 'here'}
+ passed_netcfg = copy.copy(netcfg)
+ oracle._ensure_netfailover_safe(passed_netcfg)
+ self.assertEqual(netcfg, passed_netcfg)
+
+ def test_ignore_network_config_unknown_versions(self):
+ netcfg = {'something': 'here', 'version': 3}
+ passed_netcfg = copy.copy(netcfg)
+ oracle._ensure_netfailover_safe(passed_netcfg)
+ self.assertEqual(netcfg, passed_netcfg)
+
+ def test_checks_v1_type_physical_interfaces(self):
+ mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_addr: nic_name,
+ }
+ netcfg = {'version': 1, 'config': [
+ {'type': 'physical', 'name': nic_name, 'mac_address': mac_addr,
+ 'subnets': [{'type': 'dhcp4'}]}]}
+ passed_netcfg = copy.copy(netcfg)
+ self.m_netfail_master.return_value = False
+ oracle._ensure_netfailover_safe(passed_netcfg)
+ self.assertEqual(netcfg, passed_netcfg)
+ self.assertEqual([mock.call(nic_name)],
+ self.m_netfail_master.call_args_list)
+
+ def test_checks_v1_skips_non_phys_interfaces(self):
+ mac_addr, nic_name = '00:00:17:02:2b:b1', 'bond0'
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_addr: nic_name,
+ }
+ netcfg = {'version': 1, 'config': [
+ {'type': 'bond', 'name': nic_name, 'mac_address': mac_addr,
+ 'subnets': [{'type': 'dhcp4'}]}]}
+ passed_netcfg = copy.copy(netcfg)
+ oracle._ensure_netfailover_safe(passed_netcfg)
+ self.assertEqual(netcfg, passed_netcfg)
+ self.assertEqual(0, self.m_netfail_master.call_count)
+
+ def test_removes_master_mac_property_v1(self):
+ nic_master, mac_master = 'ens3', self.random_string()
+ nic_other, mac_other = 'ens7', self.random_string()
+ nic_extra, mac_extra = 'enp0s1f2', self.random_string()
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_master: nic_master,
+ mac_other: nic_other,
+ mac_extra: nic_extra,
+ }
+ netcfg = {'version': 1, 'config': [
+ {'type': 'physical', 'name': nic_master,
+ 'mac_address': mac_master},
+ {'type': 'physical', 'name': nic_other, 'mac_address': mac_other},
+ {'type': 'physical', 'name': nic_extra, 'mac_address': mac_extra},
+ ]}
+
+ def _is_netfail_master(iface):
+ if iface == 'ens3':
+ return True
+ return False
+ self.m_netfail_master.side_effect = _is_netfail_master
+ expected_cfg = {'version': 1, 'config': [
+ {'type': 'physical', 'name': nic_master},
+ {'type': 'physical', 'name': nic_other, 'mac_address': mac_other},
+ {'type': 'physical', 'name': nic_extra, 'mac_address': mac_extra},
+ ]}
+ oracle._ensure_netfailover_safe(netcfg)
+ self.assertEqual(expected_cfg, netcfg)
+
+ def test_checks_v2_type_ethernet_interfaces(self):
+ mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_addr: nic_name,
+ }
+ netcfg = {'version': 2, 'ethernets': {
+ nic_name: {'dhcp4': True, 'critical': True, 'set-name': nic_name,
+ 'match': {'macaddress': mac_addr}}}}
+ passed_netcfg = copy.copy(netcfg)
+ self.m_netfail_master.return_value = False
+ oracle._ensure_netfailover_safe(passed_netcfg)
+ self.assertEqual(netcfg, passed_netcfg)
+ self.assertEqual([mock.call(nic_name)],
+ self.m_netfail_master.call_args_list)
+
+ def test_skips_v2_non_ethernet_interfaces(self):
+ mac_addr, nic_name = '00:00:17:02:2b:b1', 'wlps0'
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_addr: nic_name,
+ }
+ netcfg = {'version': 2, 'wifis': {
+ nic_name: {'dhcp4': True, 'critical': True, 'set-name': nic_name,
+ 'match': {'macaddress': mac_addr}}}}
+ passed_netcfg = copy.copy(netcfg)
+ oracle._ensure_netfailover_safe(passed_netcfg)
+ self.assertEqual(netcfg, passed_netcfg)
+ self.assertEqual(0, self.m_netfail_master.call_count)
+
+ def test_removes_master_mac_property_v2(self):
+ nic_master, mac_master = 'ens3', self.random_string()
+ nic_other, mac_other = 'ens7', self.random_string()
+ nic_extra, mac_extra = 'enp0s1f2', self.random_string()
+ self.m_get_interfaces_by_mac.return_value = {
+ mac_master: nic_master,
+ mac_other: nic_other,
+ mac_extra: nic_extra,
+ }
+ netcfg = {'version': 2, 'ethernets': {
+ nic_extra: {'dhcp4': True, 'set-name': nic_extra,
+ 'match': {'macaddress': mac_extra}},
+ nic_other: {'dhcp4': True, 'set-name': nic_other,
+ 'match': {'macaddress': mac_other}},
+ nic_master: {'dhcp4': True, 'set-name': nic_master,
+ 'match': {'macaddress': mac_master}},
+ }}
+
+ def _is_netfail_master(iface):
+ if iface == 'ens3':
+ return True
+ return False
+ self.m_netfail_master.side_effect = _is_netfail_master
+
+ expected_cfg = {'version': 2, 'ethernets': {
+ nic_master: {'dhcp4': True, 'match': {'name': nic_master}},
+ nic_extra: {'dhcp4': True, 'set-name': nic_extra,
+ 'match': {'macaddress': mac_extra}},
+ nic_other: {'dhcp4': True, 'set-name': nic_other,
+ 'match': {'macaddress': mac_other}},
+ }}
+ oracle._ensure_netfailover_safe(netcfg)
+ import pprint
+ pprint.pprint(netcfg)
+ print('---- ^^ modified ^^ ---- vv original vv ----')
+ pprint.pprint(expected_cfg)
+ self.assertEqual(expected_cfg, netcfg)
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index 3f99b58c..c3a9b5b7 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -17,7 +17,7 @@ LOG = logging.getLogger(__name__)
# See: man sshd_config
DEF_SSHD_CFG = "/etc/ssh/sshd_config"
-# taken from openssh source openssh-7.3p1/sshkey.c:
+# taken from OpenSSH source openssh-7.3p1/sshkey.c:
# static const struct keytype keytypes[] = { ... }
VALID_KEY_TYPES = (
"dsa",
@@ -160,19 +160,19 @@ class AuthKeyLineParser(object):
comment=comment, options=options)
-def parse_authorized_keys(fname):
+def parse_authorized_keys(fnames):
lines = []
- try:
- if os.path.isfile(fname):
- lines = util.load_file(fname).splitlines()
- except (IOError, OSError):
- util.logexc(LOG, "Error reading lines from %s", fname)
- lines = []
-
parser = AuthKeyLineParser()
contents = []
- for line in lines:
- contents.append(parser.parse(line))
+ for fname in fnames:
+ try:
+ if os.path.isfile(fname):
+ lines = util.load_file(fname).splitlines()
+ for line in lines:
+ contents.append(parser.parse(line))
+ except (IOError, OSError):
+ util.logexc(LOG, "Error reading lines from %s", fname)
+
return contents
@@ -207,36 +207,50 @@ def update_authorized_keys(old_entries, keys):
def users_ssh_info(username):
pw_ent = pwd.getpwnam(username)
if not pw_ent or not pw_ent.pw_dir:
- raise RuntimeError("Unable to get ssh info for user %r" % (username))
+ raise RuntimeError("Unable to get SSH info for user %r" % (username))
return (os.path.join(pw_ent.pw_dir, '.ssh'), pw_ent)
-def extract_authorized_keys(username):
+def render_authorizedkeysfile_paths(value, homedir, username):
+ # The 'AuthorizedKeysFile' may contain tokens
+ # of the form %T which are substituted during connection set-up.
+ # The following tokens are defined: %% is replaced by a literal
+ # '%', %h is replaced by the home directory of the user being
+ # authenticated and %u is replaced by the username of that user.
+ macros = (("%h", homedir), ("%u", username), ("%%", "%"))
+ if not value:
+ value = "%h/.ssh/authorized_keys"
+ paths = value.split()
+ rendered = []
+ for path in paths:
+ for macro, field in macros:
+ path = path.replace(macro, field)
+ if not path.startswith("/"):
+ path = os.path.join(homedir, path)
+ rendered.append(path)
+ return rendered
+
+
+def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG):
(ssh_dir, pw_ent) = users_ssh_info(username)
- auth_key_fn = None
+ default_authorizedkeys_file = os.path.join(ssh_dir, 'authorized_keys')
+ auth_key_fns = []
with util.SeLinuxGuard(ssh_dir, recursive=True):
try:
- # The 'AuthorizedKeysFile' may contain tokens
- # of the form %T which are substituted during connection set-up.
- # The following tokens are defined: %% is replaced by a literal
- # '%', %h is replaced by the home directory of the user being
- # authenticated and %u is replaced by the username of that user.
- ssh_cfg = parse_ssh_config_map(DEF_SSHD_CFG)
- auth_key_fn = ssh_cfg.get("authorizedkeysfile", '').strip()
- if not auth_key_fn:
- auth_key_fn = "%h/.ssh/authorized_keys"
- auth_key_fn = auth_key_fn.replace("%h", pw_ent.pw_dir)
- auth_key_fn = auth_key_fn.replace("%u", username)
- auth_key_fn = auth_key_fn.replace("%%", '%')
- if not auth_key_fn.startswith('/'):
- auth_key_fn = os.path.join(pw_ent.pw_dir, auth_key_fn)
+ ssh_cfg = parse_ssh_config_map(sshd_cfg_file)
+ auth_key_fns = render_authorizedkeysfile_paths(
+ ssh_cfg.get("authorizedkeysfile", "%h/.ssh/authorized_keys"),
+ pw_ent.pw_dir, username)
+
except (IOError, OSError):
# Give up and use a default key filename
- auth_key_fn = os.path.join(ssh_dir, 'authorized_keys')
- util.logexc(LOG, "Failed extracting 'AuthorizedKeysFile' in ssh "
+ auth_key_fns[0] = default_authorizedkeys_file
+ util.logexc(LOG, "Failed extracting 'AuthorizedKeysFile' in SSH "
"config from %r, using 'AuthorizedKeysFile' file "
- "%r instead", DEF_SSHD_CFG, auth_key_fn)
- return (auth_key_fn, parse_authorized_keys(auth_key_fn))
+ "%r instead", DEF_SSHD_CFG, auth_key_fns[0])
+
+ # always store all the keys in the user's private file
+ return (default_authorizedkeys_file, parse_authorized_keys(auth_key_fns))
def setup_user_keys(keys, username, options=None):
@@ -335,7 +349,7 @@ def update_ssh_config(updates, fname=DEF_SSHD_CFG):
def update_ssh_config_lines(lines, updates):
- """Update the ssh config lines per updates.
+ """Update the SSH config lines per updates.
@param lines: array of SshdConfigLine. This array is updated in place.
@param updates: dictionary of desired values {Option: value}
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 8a064124..db8ba64c 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -6,11 +6,9 @@
import copy
import os
+import pickle
import sys
-import six
-from six.moves import cPickle as pickle
-
from cloudinit.settings import (
FREQUENCIES, CLOUD_CONFIG, PER_INSTANCE, RUN_CLOUD_CONFIG)
@@ -24,6 +22,7 @@ from cloudinit.handlers.shell_script import ShellScriptPartHandler
from cloudinit.handlers.upstart_job import UpstartJobPartHandler
from cloudinit.event import EventType
+from cloudinit.sources import NetworkConfigSource
from cloudinit import cloud
from cloudinit import config
@@ -500,7 +499,7 @@ class Init(object):
# Init the handlers first
for (_ctype, mod) in c_handlers.items():
if mod in c_handlers.initialized:
- # Avoid initing the same module twice (if said module
+ # Avoid initiating the same module twice (if said module
# is registered to more than one content-type).
continue
handlers.call_begin(mod, data, frequency)
@@ -548,11 +547,15 @@ class Init(object):
with events.ReportEventStack("consume-user-data",
"reading and applying user-data",
parent=self.reporter):
+ if util.get_cfg_option_bool(self.cfg, 'allow_userdata', True):
self._consume_userdata(frequency)
+ else:
+ LOG.debug('allow_userdata = False: discarding user-data')
+
with events.ReportEventStack("consume-vendor-data",
"reading and applying vendor-data",
parent=self.reporter):
- self._consume_vendordata(frequency)
+ self._consume_vendordata(frequency)
# Perform post-consumption adjustments so that
# modules that run during the init stage reflect
@@ -630,32 +633,54 @@ class Init(object):
if os.path.exists(disable_file):
return (None, disable_file)
- cmdline_cfg = ('cmdline', cmdline.read_kernel_cmdline_config())
- dscfg = ('ds', None)
+ available_cfgs = {
+ NetworkConfigSource.cmdline: cmdline.read_kernel_cmdline_config(),
+ NetworkConfigSource.initramfs: cmdline.read_initramfs_config(),
+ NetworkConfigSource.ds: None,
+ NetworkConfigSource.system_cfg: self.cfg.get('network'),
+ }
+
if self.datasource and hasattr(self.datasource, 'network_config'):
- dscfg = ('ds', self.datasource.network_config)
- sys_cfg = ('system_cfg', self.cfg.get('network'))
+ available_cfgs[NetworkConfigSource.ds] = (
+ self.datasource.network_config)
- for loc, ncfg in (cmdline_cfg, sys_cfg, dscfg):
+ if self.datasource:
+ order = self.datasource.network_config_sources
+ else:
+ order = sources.DataSource.network_config_sources
+ for cfg_source in order:
+ if not hasattr(NetworkConfigSource, cfg_source):
+ LOG.warning('data source specifies an invalid network'
+ ' cfg_source: %s', cfg_source)
+ continue
+ if cfg_source not in available_cfgs:
+ LOG.warning('data source specifies an unavailable network'
+ ' cfg_source: %s', cfg_source)
+ continue
+ ncfg = available_cfgs[cfg_source]
if net.is_disabled_cfg(ncfg):
- LOG.debug("network config disabled by %s", loc)
- return (None, loc)
+ LOG.debug("network config disabled by %s", cfg_source)
+ return (None, cfg_source)
if ncfg:
- return (ncfg, loc)
- return (self.distro.generate_fallback_config(), "fallback")
-
- def apply_network_config(self, bring_up):
- netcfg, src = self._find_networking_config()
- if netcfg is None:
- LOG.info("network config is disabled by %s", src)
- return
+ return (ncfg, cfg_source)
+ return (self.distro.generate_fallback_config(),
+ NetworkConfigSource.fallback)
+ def _apply_netcfg_names(self, netcfg):
try:
LOG.debug("applying net config names for %s", netcfg)
self.distro.apply_network_config_names(netcfg)
except Exception as e:
LOG.warning("Failed to rename devices: %s", e)
+ def apply_network_config(self, bring_up):
+ # get a network config
+ netcfg, src = self._find_networking_config()
+ if netcfg is None:
+ LOG.info("network config is disabled by %s", src)
+ return
+
+ # request an update if needed/available
if self.datasource is not NULL_DATA_SOURCE:
if not self.is_new_instance():
if not self.datasource.update_metadata([EventType.BOOT]):
@@ -663,8 +688,20 @@ class Init(object):
"No network config applied. Neither a new instance"
" nor datasource network update on '%s' event",
EventType.BOOT)
+ # nothing new, but ensure proper names
+ self._apply_netcfg_names(netcfg)
return
+ else:
+ # refresh netcfg after update
+ netcfg, src = self._find_networking_config()
+
+ # ensure all physical devices in config are present
+ net.wait_for_physdevs(netcfg)
+
+ # apply renames from config
+ self._apply_netcfg_names(netcfg)
+ # rendering config
LOG.info("Applying network configuration from %s bringup=%s: %s",
src, bring_up, netcfg)
try:
@@ -719,7 +756,7 @@ class Modules(object):
for item in cfg_mods:
if not item:
continue
- if isinstance(item, six.string_types):
+ if isinstance(item, str):
module_list.append({
'mod': item.strip(),
})
diff --git a/cloudinit/temp_utils.py b/cloudinit/temp_utils.py
index c98a1b53..346276ec 100644
--- a/cloudinit/temp_utils.py
+++ b/cloudinit/temp_utils.py
@@ -81,7 +81,7 @@ def ExtendedTemporaryFile(**kwargs):
@contextlib.contextmanager
-def tempdir(**kwargs):
+def tempdir(rmtree_ignore_errors=False, **kwargs):
# This seems like it was only added in python 3.2
# Make it since its useful...
# See: http://bugs.python.org/file12970/tempdir.patch
@@ -89,7 +89,7 @@ def tempdir(**kwargs):
try:
yield tdir
finally:
- shutil.rmtree(tdir)
+ shutil.rmtree(tdir, ignore_errors=rmtree_ignore_errors)
def mkdtemp(**kwargs):
diff --git a/cloudinit/templater.py b/cloudinit/templater.py
index b668674b..e47cdeda 100644
--- a/cloudinit/templater.py
+++ b/cloudinit/templater.py
@@ -44,7 +44,7 @@ MISSING_JINJA_PREFIX = u'CI_MISSING_JINJA_VAR/'
@implements_to_string # Needed for python2.7. Otherwise cached super.__str__
class UndefinedJinjaVariable(JUndefined):
- """Class used to represent any undefined jinja template varible."""
+ """Class used to represent any undefined jinja template variable."""
def __str__(self):
return u'%s%s' % (MISSING_JINJA_PREFIX, self._undefined_name)
@@ -58,7 +58,7 @@ class UndefinedJinjaVariable(JUndefined):
def basic_render(content, params):
- """This does sumple replacement of bash variable like templates.
+ """This does simple replacement of bash variable like templates.
It identifies patterns like ${a} or $a and can also identify patterns like
${a.b} or $a.b which will look for a key 'b' in the dictionary rooted
diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py
index 2eb7b0cd..70f6bad7 100644
--- a/cloudinit/tests/helpers.py
+++ b/cloudinit/tests/helpers.py
@@ -4,15 +4,17 @@ from __future__ import print_function
import functools
import httpretty
+import io
import logging
import os
+import random
import shutil
+import string
import sys
import tempfile
import time
+from unittest import mock
-import mock
-import six
import unittest2
from unittest2.util import strclass
@@ -41,26 +43,6 @@ _real_subp = util.subp
SkipTest = unittest2.SkipTest
skipIf = unittest2.skipIf
-# Used for detecting different python versions
-PY2 = False
-PY26 = False
-PY27 = False
-PY3 = False
-
-_PY_VER = sys.version_info
-_PY_MAJOR, _PY_MINOR, _PY_MICRO = _PY_VER[0:3]
-if (_PY_MAJOR, _PY_MINOR) <= (2, 6):
- if (_PY_MAJOR, _PY_MINOR) == (2, 6):
- PY26 = True
- if (_PY_MAJOR, _PY_MINOR) >= (2, 0):
- PY2 = True
-else:
- if (_PY_MAJOR, _PY_MINOR) == (2, 7):
- PY27 = True
- PY2 = True
- if (_PY_MAJOR, _PY_MINOR) >= (3, 0):
- PY3 = True
-
# Makes the old path start
# with new base instead of whatever
@@ -90,7 +72,7 @@ def retarget_many_wrapper(new_base, am, old_func):
# Python 3 some of these now accept file-descriptors (integers).
# That breaks rebase_path() so in lieu of a better solution, just
# don't rebase if we get a fd.
- if isinstance(path, six.string_types):
+ if isinstance(path, str):
n_args[i] = rebase_path(path, new_base)
return old_func(*n_args, **kwds)
return wrapper
@@ -167,7 +149,7 @@ class CiTestCase(TestCase):
if self.with_logs:
# Create a log handler so unit tests can search expected logs.
self.logger = logging.getLogger()
- self.logs = six.StringIO()
+ self.logs = io.StringIO()
formatter = logging.Formatter('%(levelname)s: %(message)s')
handler = logging.StreamHandler(self.logs)
handler.setFormatter(formatter)
@@ -184,7 +166,7 @@ class CiTestCase(TestCase):
else:
cmd = args[0]
- if not isinstance(cmd, six.string_types):
+ if not isinstance(cmd, str):
cmd = cmd[0]
pass_through = False
if not isinstance(self.allowed_subp, (list, bool)):
@@ -207,6 +189,7 @@ class CiTestCase(TestCase):
if self.with_logs:
# Remove the handler we setup
logging.getLogger().handlers = self.old_handlers
+ logging.getLogger().level = None
util.subp = _real_subp
super(CiTestCase, self).tearDown()
@@ -217,7 +200,8 @@ class CiTestCase(TestCase):
prefix="ci-%s." % self.__class__.__name__)
else:
tmpd = tempfile.mkdtemp(dir=dir)
- self.addCleanup(functools.partial(shutil.rmtree, tmpd))
+ self.addCleanup(
+ functools.partial(shutil.rmtree, tmpd, ignore_errors=True))
return tmpd
def tmp_path(self, path, dir=None):
@@ -261,6 +245,12 @@ class CiTestCase(TestCase):
myds.metadata.update(metadata)
return cloud.Cloud(myds, self.paths, sys_cfg, mydist, None)
+ @classmethod
+ def random_string(cls, length=8):
+ """ return a random lowercase string with default length of 8"""
+ return ''.join(
+ random.choice(string.ascii_lowercase) for _ in range(length))
+
class ResourceUsingTestCase(CiTestCase):
@@ -356,8 +346,9 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
def patchOpen(self, new_root):
trap_func = retarget_many_wrapper(new_root, 1, open)
- name = 'builtins.open' if PY3 else '__builtin__.open'
- self.patched_funcs.enter_context(mock.patch(name, trap_func))
+ self.patched_funcs.enter_context(
+ mock.patch('builtins.open', trap_func)
+ )
def patchStdoutAndStderr(self, stdout=None, stderr=None):
if stdout is not None:
@@ -430,7 +421,7 @@ def populate_dir(path, files):
p = os.path.sep.join([path, name])
util.ensure_dir(os.path.dirname(p))
with open(p, "wb") as fp:
- if isinstance(content, six.binary_type):
+ if isinstance(content, bytes):
fp.write(content)
else:
fp.write(content.encode('utf-8'))
diff --git a/cloudinit/tests/test_dhclient_hook.py b/cloudinit/tests/test_dhclient_hook.py
index 7aab8dd5..eadae81c 100644
--- a/cloudinit/tests/test_dhclient_hook.py
+++ b/cloudinit/tests/test_dhclient_hook.py
@@ -7,8 +7,8 @@ from cloudinit.tests.helpers import CiTestCase, dir2dict, populate_dir
import argparse
import json
-import mock
import os
+from unittest import mock
class TestDhclientHook(CiTestCase):
diff --git a/cloudinit/tests/test_gpg.py b/cloudinit/tests/test_gpg.py
index 0562b966..8dd57137 100644
--- a/cloudinit/tests/test_gpg.py
+++ b/cloudinit/tests/test_gpg.py
@@ -1,12 +1,12 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Test gpg module."""
+from unittest import mock
+
from cloudinit import gpg
from cloudinit import util
from cloudinit.tests.helpers import CiTestCase
-import mock
-
@mock.patch("cloudinit.gpg.time.sleep")
@mock.patch("cloudinit.gpg.util.subp")
diff --git a/cloudinit/tests/test_netinfo.py b/cloudinit/tests/test_netinfo.py
index d76e768e..1c8a791e 100644
--- a/cloudinit/tests/test_netinfo.py
+++ b/cloudinit/tests/test_netinfo.py
@@ -11,6 +11,7 @@ from cloudinit.tests.helpers import CiTestCase, mock, readResource
# Example ifconfig and route output
SAMPLE_OLD_IFCONFIG_OUT = readResource("netinfo/old-ifconfig-output")
SAMPLE_NEW_IFCONFIG_OUT = readResource("netinfo/new-ifconfig-output")
+SAMPLE_FREEBSD_IFCONFIG_OUT = readResource("netinfo/freebsd-ifconfig-output")
SAMPLE_IPADDRSHOW_OUT = readResource("netinfo/sample-ipaddrshow-output")
SAMPLE_ROUTE_OUT_V4 = readResource("netinfo/sample-route-output-v4")
SAMPLE_ROUTE_OUT_V6 = readResource("netinfo/sample-route-output-v6")
@@ -18,6 +19,7 @@ SAMPLE_IPROUTE_OUT_V4 = readResource("netinfo/sample-iproute-output-v4")
SAMPLE_IPROUTE_OUT_V6 = readResource("netinfo/sample-iproute-output-v6")
NETDEV_FORMATTED_OUT = readResource("netinfo/netdev-formatted-output")
ROUTE_FORMATTED_OUT = readResource("netinfo/route-formatted-output")
+FREEBSD_NETDEV_OUT = readResource("netinfo/freebsd-netdev-formatted-output")
class TestNetInfo(CiTestCase):
@@ -45,6 +47,18 @@ class TestNetInfo(CiTestCase):
@mock.patch('cloudinit.netinfo.util.which')
@mock.patch('cloudinit.netinfo.util.subp')
+ def test_netdev_freebsd_nettools_pformat(self, m_subp, m_which):
+ """netdev_pformat properly rendering netdev new nettools info."""
+ m_subp.return_value = (SAMPLE_FREEBSD_IFCONFIG_OUT, '')
+ m_which.side_effect = lambda x: x if x == 'ifconfig' else None
+ content = netdev_pformat()
+ print()
+ print(content)
+ print()
+ self.assertEqual(FREEBSD_NETDEV_OUT, content)
+
+ @mock.patch('cloudinit.netinfo.util.which')
+ @mock.patch('cloudinit.netinfo.util.subp')
def test_netdev_iproute_pformat(self, m_subp, m_which):
"""netdev_pformat properly rendering ip route info."""
m_subp.return_value = (SAMPLE_IPADDRSHOW_OUT, '')
diff --git a/cloudinit/tests/test_stages.py b/cloudinit/tests/test_stages.py
index 94b6b255..d5c9c0e4 100644
--- a/cloudinit/tests/test_stages.py
+++ b/cloudinit/tests/test_stages.py
@@ -6,6 +6,7 @@ import os
from cloudinit import stages
from cloudinit import sources
+from cloudinit.sources import NetworkConfigSource
from cloudinit.event import EventType
from cloudinit.util import write_file
@@ -37,6 +38,7 @@ class FakeDataSource(sources.DataSource):
class TestInit(CiTestCase):
with_logs = True
+ allowed_subp = False
def setUp(self):
super(TestInit, self).setUp()
@@ -57,84 +59,189 @@ class TestInit(CiTestCase):
(None, disable_file),
self.init._find_networking_config())
+ @mock.patch('cloudinit.stages.cmdline.read_initramfs_config')
@mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
- def test_wb__find_networking_config_disabled_by_kernel(self, m_cmdline):
+ def test_wb__find_networking_config_disabled_by_kernel(
+ self, m_cmdline, m_initramfs):
"""find_networking_config returns when disabled by kernel cmdline."""
m_cmdline.return_value = {'config': 'disabled'}
+ m_initramfs.return_value = {'config': ['fake_initrd']}
self.assertEqual(
- (None, 'cmdline'),
+ (None, NetworkConfigSource.cmdline),
self.init._find_networking_config())
self.assertEqual('DEBUG: network config disabled by cmdline\n',
self.logs.getvalue())
+ @mock.patch('cloudinit.stages.cmdline.read_initramfs_config')
@mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
- def test_wb__find_networking_config_disabled_by_datasrc(self, m_cmdline):
+ def test_wb__find_networking_config_disabled_by_initrd(
+ self, m_cmdline, m_initramfs):
+ """find_networking_config returns when disabled by kernel cmdline."""
+ m_cmdline.return_value = {}
+ m_initramfs.return_value = {'config': 'disabled'}
+ self.assertEqual(
+ (None, NetworkConfigSource.initramfs),
+ self.init._find_networking_config())
+ self.assertEqual('DEBUG: network config disabled by initramfs\n',
+ self.logs.getvalue())
+
+ @mock.patch('cloudinit.stages.cmdline.read_initramfs_config')
+ @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
+ def test_wb__find_networking_config_disabled_by_datasrc(
+ self, m_cmdline, m_initramfs):
"""find_networking_config returns when disabled by datasource cfg."""
m_cmdline.return_value = {} # Kernel doesn't disable networking
+ m_initramfs.return_value = {} # initramfs doesn't disable networking
self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}},
'network': {}} # system config doesn't disable
self.init.datasource = FakeDataSource(
network_config={'config': 'disabled'})
self.assertEqual(
- (None, 'ds'),
+ (None, NetworkConfigSource.ds),
self.init._find_networking_config())
self.assertEqual('DEBUG: network config disabled by ds\n',
self.logs.getvalue())
+ @mock.patch('cloudinit.stages.cmdline.read_initramfs_config')
@mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
- def test_wb__find_networking_config_disabled_by_sysconfig(self, m_cmdline):
+ def test_wb__find_networking_config_disabled_by_sysconfig(
+ self, m_cmdline, m_initramfs):
"""find_networking_config returns when disabled by system config."""
m_cmdline.return_value = {} # Kernel doesn't disable networking
+ m_initramfs.return_value = {} # initramfs doesn't disable networking
self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}},
'network': {'config': 'disabled'}}
self.assertEqual(
- (None, 'system_cfg'),
+ (None, NetworkConfigSource.system_cfg),
self.init._find_networking_config())
self.assertEqual('DEBUG: network config disabled by system_cfg\n',
self.logs.getvalue())
+ @mock.patch('cloudinit.stages.cmdline.read_initramfs_config')
+ @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
+ def test__find_networking_config_uses_datasrc_order(
+ self, m_cmdline, m_initramfs):
+ """find_networking_config should check sources in DS defined order"""
+ # cmdline and initramfs, which would normally be preferred over other
+ # sources, disable networking; in this case, though, the DS moves them
+ # later so its own config is preferred
+ m_cmdline.return_value = {'config': 'disabled'}
+ m_initramfs.return_value = {'config': 'disabled'}
+
+ ds_net_cfg = {'config': {'needle': True}}
+ self.init.datasource = FakeDataSource(network_config=ds_net_cfg)
+ self.init.datasource.network_config_sources = [
+ NetworkConfigSource.ds, NetworkConfigSource.system_cfg,
+ NetworkConfigSource.cmdline, NetworkConfigSource.initramfs]
+
+ self.assertEqual(
+ (ds_net_cfg, NetworkConfigSource.ds),
+ self.init._find_networking_config())
+
+ @mock.patch('cloudinit.stages.cmdline.read_initramfs_config')
+ @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
+ def test__find_networking_config_warns_if_datasrc_uses_invalid_src(
+ self, m_cmdline, m_initramfs):
+ """find_networking_config should check sources in DS defined order"""
+ ds_net_cfg = {'config': {'needle': True}}
+ self.init.datasource = FakeDataSource(network_config=ds_net_cfg)
+ self.init.datasource.network_config_sources = [
+ 'invalid_src', NetworkConfigSource.ds]
+
+ self.assertEqual(
+ (ds_net_cfg, NetworkConfigSource.ds),
+ self.init._find_networking_config())
+ self.assertIn('WARNING: data source specifies an invalid network'
+ ' cfg_source: invalid_src',
+ self.logs.getvalue())
+
+ @mock.patch('cloudinit.stages.cmdline.read_initramfs_config')
@mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
- def test_wb__find_networking_config_returns_kernel(self, m_cmdline):
+ def test__find_networking_config_warns_if_datasrc_uses_unavailable_src(
+ self, m_cmdline, m_initramfs):
+ """find_networking_config should check sources in DS defined order"""
+ ds_net_cfg = {'config': {'needle': True}}
+ self.init.datasource = FakeDataSource(network_config=ds_net_cfg)
+ self.init.datasource.network_config_sources = [
+ NetworkConfigSource.fallback, NetworkConfigSource.ds]
+
+ self.assertEqual(
+ (ds_net_cfg, NetworkConfigSource.ds),
+ self.init._find_networking_config())
+ self.assertIn('WARNING: data source specifies an unavailable network'
+ ' cfg_source: fallback',
+ self.logs.getvalue())
+
+ @mock.patch('cloudinit.stages.cmdline.read_initramfs_config')
+ @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
+ def test_wb__find_networking_config_returns_kernel(
+ self, m_cmdline, m_initramfs):
"""find_networking_config returns kernel cmdline config if present."""
expected_cfg = {'config': ['fakekernel']}
m_cmdline.return_value = expected_cfg
+ m_initramfs.return_value = {'config': ['fake_initrd']}
self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}},
'network': {'config': ['fakesys_config']}}
self.init.datasource = FakeDataSource(
network_config={'config': ['fakedatasource']})
self.assertEqual(
- (expected_cfg, 'cmdline'),
+ (expected_cfg, NetworkConfigSource.cmdline),
self.init._find_networking_config())
+ @mock.patch('cloudinit.stages.cmdline.read_initramfs_config')
@mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
- def test_wb__find_networking_config_returns_system_cfg(self, m_cmdline):
+ def test_wb__find_networking_config_returns_initramfs(
+ self, m_cmdline, m_initramfs):
+ """find_networking_config returns kernel cmdline config if present."""
+ expected_cfg = {'config': ['fake_initrd']}
+ m_cmdline.return_value = {}
+ m_initramfs.return_value = expected_cfg
+ self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}},
+ 'network': {'config': ['fakesys_config']}}
+ self.init.datasource = FakeDataSource(
+ network_config={'config': ['fakedatasource']})
+ self.assertEqual(
+ (expected_cfg, NetworkConfigSource.initramfs),
+ self.init._find_networking_config())
+
+ @mock.patch('cloudinit.stages.cmdline.read_initramfs_config')
+ @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
+ def test_wb__find_networking_config_returns_system_cfg(
+ self, m_cmdline, m_initramfs):
"""find_networking_config returns system config when present."""
m_cmdline.return_value = {} # No kernel network config
+ m_initramfs.return_value = {} # no initramfs network config
expected_cfg = {'config': ['fakesys_config']}
self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}},
'network': expected_cfg}
self.init.datasource = FakeDataSource(
network_config={'config': ['fakedatasource']})
self.assertEqual(
- (expected_cfg, 'system_cfg'),
+ (expected_cfg, NetworkConfigSource.system_cfg),
self.init._find_networking_config())
+ @mock.patch('cloudinit.stages.cmdline.read_initramfs_config')
@mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
- def test_wb__find_networking_config_returns_datasrc_cfg(self, m_cmdline):
+ def test_wb__find_networking_config_returns_datasrc_cfg(
+ self, m_cmdline, m_initramfs):
"""find_networking_config returns datasource net config if present."""
m_cmdline.return_value = {} # No kernel network config
+ m_initramfs.return_value = {} # no initramfs network config
# No system config for network in setUp
expected_cfg = {'config': ['fakedatasource']}
self.init.datasource = FakeDataSource(network_config=expected_cfg)
self.assertEqual(
- (expected_cfg, 'ds'),
+ (expected_cfg, NetworkConfigSource.ds),
self.init._find_networking_config())
+ @mock.patch('cloudinit.stages.cmdline.read_initramfs_config')
@mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config')
- def test_wb__find_networking_config_returns_fallback(self, m_cmdline):
+ def test_wb__find_networking_config_returns_fallback(
+ self, m_cmdline, m_initramfs):
"""find_networking_config returns fallback config if not defined."""
m_cmdline.return_value = {} # Kernel doesn't disable networking
+ m_initramfs.return_value = {} # no initramfs network config
# Neither datasource nor system_info disable or provide network
fake_cfg = {'config': [{'type': 'physical', 'name': 'eth9'}],
@@ -147,7 +254,7 @@ class TestInit(CiTestCase):
distro = self.init.distro
distro.generate_fallback_config = fake_generate_fallback
self.assertEqual(
- (fake_cfg, 'fallback'),
+ (fake_cfg, NetworkConfigSource.fallback),
self.init._find_networking_config())
self.assertNotIn('network config disabled', self.logs.getvalue())
@@ -166,8 +273,9 @@ class TestInit(CiTestCase):
'INFO: network config is disabled by %s' % disable_file,
self.logs.getvalue())
+ @mock.patch('cloudinit.net.get_interfaces_by_mac')
@mock.patch('cloudinit.distros.ubuntu.Distro')
- def test_apply_network_on_new_instance(self, m_ubuntu):
+ def test_apply_network_on_new_instance(self, m_ubuntu, m_macs):
"""Call distro apply_network_config methods on is_new_instance."""
net_cfg = {
'version': 1, 'config': [
@@ -175,7 +283,9 @@ class TestInit(CiTestCase):
'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]}
def fake_network_config():
- return net_cfg, 'fallback'
+ return net_cfg, NetworkConfigSource.fallback
+
+ m_macs.return_value = {'42:42:42:42:42:42': 'eth9'}
self.init._find_networking_config = fake_network_config
self.init.apply_network_config(True)
@@ -195,7 +305,7 @@ class TestInit(CiTestCase):
'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]}
def fake_network_config():
- return net_cfg, 'fallback'
+ return net_cfg, NetworkConfigSource.fallback
self.init._find_networking_config = fake_network_config
self.init.apply_network_config(True)
@@ -206,8 +316,9 @@ class TestInit(CiTestCase):
" nor datasource network update on '%s' event" % EventType.BOOT,
self.logs.getvalue())
+ @mock.patch('cloudinit.net.get_interfaces_by_mac')
@mock.patch('cloudinit.distros.ubuntu.Distro')
- def test_apply_network_on_datasource_allowed_event(self, m_ubuntu):
+ def test_apply_network_on_datasource_allowed_event(self, m_ubuntu, m_macs):
"""Apply network if datasource.update_metadata permits BOOT event."""
old_instance_id = os.path.join(
self.init.paths.get_cpath('data'), 'instance-id')
@@ -218,7 +329,9 @@ class TestInit(CiTestCase):
'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]}
def fake_network_config():
- return net_cfg, 'fallback'
+ return net_cfg, NetworkConfigSource.fallback
+
+ m_macs.return_value = {'42:42:42:42:42:42': 'eth9'}
self.init._find_networking_config = fake_network_config
self.init.datasource = FakeDataSource(paths=self.init.paths)
diff --git a/cloudinit/tests/test_temp_utils.py b/cloudinit/tests/test_temp_utils.py
index ffbb92cd..4a52ef89 100644
--- a/cloudinit/tests/test_temp_utils.py
+++ b/cloudinit/tests/test_temp_utils.py
@@ -2,8 +2,9 @@
"""Tests for cloudinit.temp_utils"""
-from cloudinit.temp_utils import mkdtemp, mkstemp
+from cloudinit.temp_utils import mkdtemp, mkstemp, tempdir
from cloudinit.tests.helpers import CiTestCase, wrap_and_call
+import os
class TestTempUtils(CiTestCase):
@@ -98,4 +99,19 @@ class TestTempUtils(CiTestCase):
self.assertEqual('/fake/return/path', retval)
self.assertEqual([{'dir': '/run/cloud-init/tmp'}], calls)
+ def test_tempdir_error_suppression(self):
+ """test tempdir suppresses errors during directory removal."""
+
+ with self.assertRaises(OSError):
+ with tempdir(prefix='cloud-init-dhcp-') as tdir:
+ os.rmdir(tdir)
+ # As a result, the directory is already gone,
+ # so shutil.rmtree should raise OSError
+
+ with tempdir(rmtree_ignore_errors=True,
+ prefix='cloud-init-dhcp-') as tdir:
+ os.rmdir(tdir)
+ # Since the directory is already gone, shutil.rmtree would raise
+ # OSError, but we suppress that
+
# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py
index aa9f3ec1..1674120f 100644
--- a/cloudinit/tests/test_url_helper.py
+++ b/cloudinit/tests/test_url_helper.py
@@ -4,6 +4,7 @@ from cloudinit.url_helper import (
NOT_FOUND, UrlError, oauth_headers, read_file_or_url, retry_on_url_exc)
from cloudinit.tests.helpers import CiTestCase, mock, skipIf
from cloudinit import util
+from cloudinit import version
import httpretty
import requests
@@ -17,6 +18,9 @@ except ImportError:
_missing_oauthlib_dep = True
+M_PATH = 'cloudinit.url_helper.'
+
+
class TestOAuthHeaders(CiTestCase):
def test_oauth_headers_raises_not_implemented_when_oathlib_missing(self):
@@ -67,6 +71,55 @@ class TestReadFileOrUrl(CiTestCase):
self.assertEqual(result.contents, data)
self.assertEqual(str(result), data.decode('utf-8'))
+ @mock.patch(M_PATH + 'readurl')
+ def test_read_file_or_url_passes_params_to_readurl(self, m_readurl):
+ """read_file_or_url passes all params through to readurl."""
+ url = 'http://hostname/path'
+ response = 'This is my url content\n'
+ m_readurl.return_value = response
+ params = {'url': url, 'timeout': 1, 'retries': 2,
+ 'headers': {'somehdr': 'val'},
+ 'data': 'data', 'sec_between': 1,
+ 'ssl_details': {'cert_file': '/path/cert.pem'},
+ 'headers_cb': 'headers_cb', 'exception_cb': 'exception_cb'}
+ self.assertEqual(response, read_file_or_url(**params))
+ params.pop('url') # url is passed in as a positional arg
+ self.assertEqual([mock.call(url, **params)], m_readurl.call_args_list)
+
+ def test_wb_read_url_defaults_honored_by_read_file_or_url_callers(self):
+ """Readurl param defaults used when unspecified by read_file_or_url
+
+ Param defaults tested are as follows:
+ retries: 0, additional headers None beyond default, method: GET,
+ data: None, check_status: True and allow_redirects: True
+ """
+ url = 'http://hostname/path'
+
+ m_response = mock.MagicMock()
+
+ class FakeSession(requests.Session):
+ @classmethod
+ def request(cls, **kwargs):
+ self.assertEqual(
+ {'url': url, 'allow_redirects': True, 'method': 'GET',
+ 'headers': {
+ 'User-Agent': 'Cloud-Init/%s' % (
+ version.version_string())}},
+ kwargs)
+ return m_response
+
+ with mock.patch(M_PATH + 'requests.Session') as m_session:
+ error = requests.exceptions.HTTPError('broke')
+ m_session.side_effect = [error, FakeSession()]
+ # assert no retries and check_status == True
+ with self.assertRaises(UrlError) as context_manager:
+ response = read_file_or_url(url)
+ self.assertEqual('broke', str(context_manager.exception))
+ # assert default headers, method, url and allow_redirects True
+ # Success on 2nd call with FakeSession
+ response = read_file_or_url(url)
+ self.assertEqual(m_response, response._response)
+
class TestRetryOnUrlExc(CiTestCase):
diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py
index e3d2dbaa..11f37000 100644
--- a/cloudinit/tests/test_util.py
+++ b/cloudinit/tests/test_util.py
@@ -2,7 +2,9 @@
"""Tests for cloudinit.util"""
+import base64
import logging
+import json
import platform
import cloudinit.util as util
@@ -187,6 +189,21 @@ class TestUtil(CiTestCase):
self.assertEqual(is_rw, False)
+class TestUptime(CiTestCase):
+
+ @mock.patch('cloudinit.util.boottime')
+ @mock.patch('cloudinit.util.os.path.exists')
+ @mock.patch('cloudinit.util.time.time')
+ def test_uptime_non_linux_path(self, m_time, m_exists, m_boottime):
+ boottime = 1000.0
+ uptime = 10.0
+ m_boottime.return_value = boottime
+ m_time.return_value = boottime + uptime
+ m_exists.return_value = False
+ result = util.uptime()
+ self.assertEqual(str(uptime), result)
+
+
class TestShellify(CiTestCase):
def test_input_dict_raises_type_error(self):
@@ -385,6 +402,11 @@ class TestUdevadmSettle(CiTestCase):
@mock.patch('os.path.exists')
class TestGetLinuxDistro(CiTestCase):
+ def setUp(self):
+ # python2 has no lru_cache, and therefore, no cache_clear()
+ if hasattr(util.get_linux_distro, "cache_clear"):
+ util.get_linux_distro.cache_clear()
+
@classmethod
def os_release_exists(self, path):
"""Side effect function"""
@@ -397,6 +419,12 @@ class TestGetLinuxDistro(CiTestCase):
if path == '/etc/redhat-release':
return 1
+ @classmethod
+ def freebsd_version_exists(self, path):
+ """Side effect function """
+ if path == '/bin/freebsd-version':
+ return 1
+
@mock.patch('cloudinit.util.load_file')
def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists):
"""Verify we get the correct name if the os-release file has
@@ -415,6 +443,14 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(('ubuntu', '16.04', 'xenial'), dist)
+ @mock.patch('cloudinit.util.subp')
+ def test_get_linux_freebsd(self, m_subp, m_path_exists):
+ """Verify we get the correct name and release name on FreeBSD."""
+ m_path_exists.side_effect = TestGetLinuxDistro.freebsd_version_exists
+ m_subp.return_value = ("12.0-RELEASE-p10\n", '')
+ dist = util.get_linux_distro()
+ self.assertEqual(('freebsd', '12.0-RELEASE-p10', ''), dist)
+
@mock.patch('cloudinit.util.load_file')
def test_get_linux_centos6(self, m_os_release, m_path_exists):
"""Verify we get the correct name and release name on CentOS 6."""
@@ -502,7 +538,7 @@ class TestGetLinuxDistro(CiTestCase):
self.assertEqual(
('opensuse-tumbleweed', '20180920', platform.machine()), dist)
- @mock.patch('platform.dist')
+ @mock.patch('platform.dist', create=True)
def test_get_linux_distro_no_data(self, m_platform_dist, m_path_exists):
"""Verify we get no information if os-release does not exist"""
m_platform_dist.return_value = ('', '', '')
@@ -510,7 +546,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(('', '', ''), dist)
- @mock.patch('platform.dist')
+ @mock.patch('platform.dist', create=True)
def test_get_linux_distro_no_impl(self, m_platform_dist, m_path_exists):
"""Verify we get an empty tuple when no information exists and
Exceptions are not propagated"""
@@ -519,7 +555,7 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(('', '', ''), dist)
- @mock.patch('platform.dist')
+ @mock.patch('platform.dist', create=True)
def test_get_linux_distro_plat_data(self, m_platform_dist, m_path_exists):
"""Verify we get the correct platform information"""
m_platform_dist.return_value = ('foo', '1.1', 'aarch64')
@@ -528,6 +564,24 @@ class TestGetLinuxDistro(CiTestCase):
self.assertEqual(('foo', '1.1', 'aarch64'), dist)
+class TestJsonDumps(CiTestCase):
+ def test_is_str(self):
+ """json_dumps should return a string."""
+ self.assertTrue(isinstance(util.json_dumps({'abc': '123'}), str))
+
+ def test_utf8(self):
+ smiley = '\\ud83d\\ude03'
+ self.assertEqual(
+ {'smiley': smiley},
+ json.loads(util.json_dumps({'smiley': smiley})))
+
+ def test_non_utf8(self):
+ blob = b'\xba\x03Qx-#y\xea'
+ self.assertEqual(
+ {'blob': 'ci-b64:' + base64.b64encode(blob).decode('utf-8')},
+ json.loads(util.json_dumps({'blob': blob})))
+
+
@mock.patch('os.path.exists')
class TestIsLXD(CiTestCase):
diff --git a/cloudinit/tests/test_version.py b/cloudinit/tests/test_version.py
index a96c2a47..778a762c 100644
--- a/cloudinit/tests/test_version.py
+++ b/cloudinit/tests/test_version.py
@@ -1,10 +1,10 @@
# This file is part of cloud-init. See LICENSE file for license information.
+from unittest import mock
+
from cloudinit.tests.helpers import CiTestCase
from cloudinit import version
-import mock
-
class TestExportsFeatures(CiTestCase):
def test_has_network_config_v1(self):
diff --git a/cloudinit/type_utils.py b/cloudinit/type_utils.py
index 6132654b..2c1ae368 100644
--- a/cloudinit/type_utils.py
+++ b/cloudinit/type_utils.py
@@ -10,29 +10,18 @@
import types
-import six
-
-if six.PY3:
- _NAME_TYPES = (
- types.ModuleType,
- types.FunctionType,
- types.LambdaType,
- type,
- )
-else:
- _NAME_TYPES = (
- types.TypeType,
- types.ModuleType,
- types.FunctionType,
- types.LambdaType,
- types.ClassType,
- )
+_NAME_TYPES = (
+ types.ModuleType,
+ types.FunctionType,
+ types.LambdaType,
+ type,
+)
def obj_name(obj):
if isinstance(obj, _NAME_TYPES):
- return six.text_type(obj.__name__)
+ return str(obj.__name__)
else:
if not hasattr(obj, '__class__'):
return repr(obj)
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 396d69ae..eeb27aa8 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -8,39 +8,31 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+import copy
import json
import os
-import requests
-import six
import time
-
from email.utils import parsedate
from errno import ENOENT
from functools import partial
+from http.client import NOT_FOUND
from itertools import count
-from requests import exceptions
+from urllib.parse import urlparse, urlunparse, quote
-from six.moves.urllib.parse import (
- urlparse, urlunparse,
- quote as urlquote)
+import requests
+from requests import exceptions
from cloudinit import log as logging
from cloudinit import version
LOG = logging.getLogger(__name__)
-if six.PY2:
- import httplib
- NOT_FOUND = httplib.NOT_FOUND
-else:
- import http.client
- NOT_FOUND = http.client.NOT_FOUND
-
# Check if requests has ssl support (added in requests >= 0.8.8)
SSL_ENABLED = False
CONFIG_ENABLED = False # This was added in 0.7 (but taken out in >=1.0)
_REQ_VER = None
+REDACTED = 'REDACTED'
try:
from distutils.version import LooseVersion
import pkg_resources
@@ -71,7 +63,7 @@ def combine_url(base, *add_ons):
path = url_parsed[2]
if path and not path.endswith("/"):
path += "/"
- path += urlquote(str(add_on), safe="/:")
+ path += quote(str(add_on), safe="/:")
url_parsed[2] = path
return urlunparse(url_parsed)
@@ -81,14 +73,19 @@ def combine_url(base, *add_ons):
return url
-def read_file_or_url(url, timeout=5, retries=10,
- headers=None, data=None, sec_between=1, ssl_details=None,
- headers_cb=None, exception_cb=None):
+def read_file_or_url(url, **kwargs):
+ """Wrapper function around readurl to allow passing a file path as url.
+
+ When url is not a local file path, passthrough any kwargs to readurl.
+
+ In the case of parameter passthrough to readurl, default values for some
+ parameters. See: call-signature of readurl in this module for param docs.
+ """
url = url.lstrip()
if url.startswith("/"):
url = "file://%s" % url
if url.lower().startswith("file://"):
- if data:
+ if kwargs.get("data"):
LOG.warning("Unable to post data to file resource %s", url)
file_path = url[len("file://"):]
try:
@@ -101,10 +98,7 @@ def read_file_or_url(url, timeout=5, retries=10,
raise UrlError(cause=e, code=code, headers=None, url=url)
return FileResponse(file_path, contents=contents)
else:
- return readurl(url, timeout=timeout, retries=retries, headers=headers,
- headers_cb=headers_cb, data=data,
- sec_between=sec_between, ssl_details=ssl_details,
- exception_cb=exception_cb)
+ return readurl(url, **kwargs)
# Made to have same accessors as UrlResponse so that the
@@ -197,20 +191,53 @@ def _get_ssl_args(url, ssl_details):
def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
- headers=None, headers_cb=None, ssl_details=None,
- check_status=True, allow_redirects=True, exception_cb=None,
- session=None, infinite=False, log_req_resp=True):
+ headers=None, headers_cb=None, headers_redact=None,
+ ssl_details=None, check_status=True, allow_redirects=True,
+ exception_cb=None, session=None, infinite=False, log_req_resp=True,
+ request_method=None):
+ """Wrapper around requests.Session to read the url and retry if necessary
+
+ :param url: Mandatory url to request.
+ :param data: Optional form data to post the URL. Will set request_method
+ to 'POST' if present.
+ :param timeout: Timeout in seconds to wait for a response
+ :param retries: Number of times to retry on exception if exception_cb is
+ None or exception_cb returns True for the exception caught. Default is
+ to fail with 0 retries on exception.
+ :param sec_between: Default 1: amount of seconds passed to time.sleep
+ between retries. None or -1 means don't sleep.
+ :param headers: Optional dict of headers to send during request
+ :param headers_cb: Optional callable returning a dict of values to send as
+ headers during request
+ :param headers_redact: Optional list of header names to redact from the log
+ :param ssl_details: Optional dict providing key_file, ca_certs, and
+ cert_file keys for use on in ssl connections.
+ :param check_status: Optional boolean set True to raise when HTTPError
+ occurs. Default: True.
+ :param allow_redirects: Optional boolean passed straight to Session.request
+ as 'allow_redirects'. Default: True.
+ :param exception_cb: Optional callable which accepts the params
+ msg and exception and returns a boolean True if retries are permitted.
+ :param session: Optional exiting requests.Session instance to reuse.
+ :param infinite: Bool, set True to retry indefinitely. Default: False.
+ :param log_req_resp: Set False to turn off verbose debug messages.
+ :param request_method: String passed as 'method' to Session.request.
+ Typically GET, or POST. Default: POST if data is provided, GET
+ otherwise.
+ """
url = _cleanurl(url)
req_args = {
'url': url,
}
req_args.update(_get_ssl_args(url, ssl_details))
req_args['allow_redirects'] = allow_redirects
- req_args['method'] = 'GET'
+ if not request_method:
+ request_method = 'POST' if data else 'GET'
+ req_args['method'] = request_method
if timeout is not None:
req_args['timeout'] = max(float(timeout), 0)
- if data:
- req_args['method'] = 'POST'
+ if headers_redact is None:
+ headers_redact = []
# It doesn't seem like config
# was added in older library versions (or newer ones either), thus we
# need to manually do the retries if it wasn't...
@@ -255,6 +282,12 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
if k == 'data':
continue
filtered_req_args[k] = v
+ if k == 'headers':
+ for hkey, _hval in v.items():
+ if hkey in headers_redact:
+ filtered_req_args[k][hkey] = (
+ copy.deepcopy(req_args[k][hkey]))
+ filtered_req_args[k][hkey] = REDACTED
try:
if log_req_resp:
@@ -307,9 +340,9 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
return None # Should throw before this...
-def wait_for_url(urls, max_wait=None, timeout=None,
- status_cb=None, headers_cb=None, sleep_time=1,
- exception_cb=None, sleep_time_cb=None):
+def wait_for_url(urls, max_wait=None, timeout=None, status_cb=None,
+ headers_cb=None, headers_redact=None, sleep_time=1,
+ exception_cb=None, sleep_time_cb=None, request_method=None):
"""
urls: a list of urls to try
max_wait: roughly the maximum time to wait before giving up
@@ -320,15 +353,18 @@ def wait_for_url(urls, max_wait=None, timeout=None,
status_cb: call method with string message when a url is not available
headers_cb: call method with single argument of url to get headers
for request.
+ headers_redact: a list of header names to redact from the log
exception_cb: call method with 2 arguments 'msg' (per status_cb) and
'exception', the exception that occurred.
sleep_time_cb: call method with 2 arguments (response, loop_n) that
generates the next sleep time.
+ request_method: indicate the type of HTTP request, GET, PUT, or POST
+ returns: tuple of (url, response contents), on failure, (False, None)
- the idea of this routine is to wait for the EC2 metdata service to
+ the idea of this routine is to wait for the EC2 metadata service to
come up. On both Eucalyptus and EC2 we have seen the case where
the instance hit the MD before the MD service was up. EC2 seems
- to have permenantely fixed this, though.
+ to have permanently fixed this, though.
In openstack, the metadata service might be painfully slow, and
unable to avoid hitting a timeout of even up to 10 seconds or more
@@ -337,7 +373,7 @@ def wait_for_url(urls, max_wait=None, timeout=None,
Offset those needs with the need to not hang forever (and block boot)
on a system where cloud-init is configured to look for EC2 Metadata
service but is not going to find one. It is possible that the instance
- data host (169.254.169.254) may be firewalled off Entirely for a sytem,
+ data host (169.254.169.254) may be firewalled off Entirely for a system,
meaning that the connection will block forever unless a timeout is set.
A value of None for max_wait will retry indefinitely.
@@ -380,8 +416,10 @@ def wait_for_url(urls, max_wait=None, timeout=None,
else:
headers = {}
- response = readurl(url, headers=headers, timeout=timeout,
- check_status=False)
+ response = readurl(
+ url, headers=headers, headers_redact=headers_redact,
+ timeout=timeout, check_status=False,
+ request_method=request_method)
if not response.contents:
reason = "empty response [%s]" % (response.code)
url_exc = UrlError(ValueError(reason), code=response.code,
@@ -391,7 +429,7 @@ def wait_for_url(urls, max_wait=None, timeout=None,
url_exc = UrlError(ValueError(reason), code=response.code,
headers=response.headers, url=url)
else:
- return url
+ return url, response.contents
except UrlError as e:
reason = "request error [%s]" % e
url_exc = e
@@ -420,7 +458,7 @@ def wait_for_url(urls, max_wait=None, timeout=None,
sleep_time)
time.sleep(sleep_time)
- return False
+ return False, None
class OauthUrlHelper(object):
@@ -521,7 +559,7 @@ class OauthUrlHelper(object):
if extra_exception_cb:
ret = extra_exception_cb(msg, exception)
finally:
- self.exception_cb(msg, exception)
+ self.exception_cb(msg, exception)
return ret
def _headers_cb(self, extra_headers_cb, url):
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index ed83d2d8..6f41b03a 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -9,14 +9,11 @@
# This file is part of cloud-init. See LICENSE file for license information.
import os
-
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
from email.mime.text import MIMEText
-import six
-
from cloudinit import handlers
from cloudinit import log as logging
from cloudinit.url_helper import read_file_or_url, UrlError
@@ -224,7 +221,7 @@ class UserDataProcessor(object):
content = util.load_file(include_once_fn)
else:
try:
- resp = read_file_or_url(include_url,
+ resp = read_file_or_url(include_url, timeout=5, retries=10,
ssl_details=self.ssl_details)
if include_once_on and resp.ok():
util.write_file(include_once_fn, resp.contents,
@@ -259,7 +256,7 @@ class UserDataProcessor(object):
# filename and type not be present
# or
# scalar(payload)
- if isinstance(ent, six.string_types):
+ if isinstance(ent, str):
ent = {'content': ent}
if not isinstance(ent, (dict)):
# TODO(harlowja) raise?
@@ -269,13 +266,13 @@ class UserDataProcessor(object):
mtype = ent.get('type')
if not mtype:
default = ARCHIVE_UNDEF_TYPE
- if isinstance(content, six.binary_type):
+ if isinstance(content, bytes):
default = ARCHIVE_UNDEF_BINARY_TYPE
mtype = handlers.type_from_starts_with(content, default)
maintype, subtype = mtype.split('/', 1)
if maintype == "text":
- if isinstance(content, six.binary_type):
+ if isinstance(content, bytes):
content = content.decode()
msg = MIMEText(content, _subtype=subtype)
else:
@@ -348,7 +345,7 @@ def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE):
msg.set_payload(data)
return msg
- if isinstance(raw_data, six.text_type):
+ if isinstance(raw_data, str):
bdata = raw_data.encode('utf-8')
else:
bdata = raw_data
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 7800f7bc..c02b3d9a 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -10,7 +10,6 @@
import contextlib
import copy as obj_copy
-import ctypes
import email
import glob
import grp
@@ -38,7 +37,6 @@ from base64 import b64decode, b64encode
from six.moves.urllib import parse as urlparse
import six
-import yaml
from cloudinit import importer
from cloudinit import log as logging
@@ -52,9 +50,14 @@ from cloudinit import version
from cloudinit.settings import (CFG_BUILTIN)
try:
- string_types = (basestring,)
-except NameError:
- string_types = (str,)
+ from functools import lru_cache
+except ImportError:
+ def lru_cache():
+ """pass-thru replace for Python3's lru_cache()"""
+ def wrapper(f):
+ return f
+ return wrapper
+
_DNS_REDIRECT_IP = None
LOG = logging.getLogger(__name__)
@@ -74,19 +77,21 @@ CONTAINER_TESTS = (['systemd-detect-virt', '--quiet', '--container'],
['running-in-container'],
['lxc-is-container'])
-PROC_CMDLINE = None
-
-_LSB_RELEASE = {}
-PY26 = sys.version_info[0:2] == (2, 6)
+@lru_cache()
+def get_dpkg_architecture(target=None):
+ """Return the sanitized string output by `dpkg --print-architecture`.
-def get_architecture(target=None):
+ N.B. This function is wrapped in functools.lru_cache, so repeated calls
+ won't shell out every time.
+ """
out, _ = subp(['dpkg', '--print-architecture'], capture=True,
target=target)
return out.strip()
-def _lsb_release(target=None):
+@lru_cache()
+def lsb_release(target=None):
fmap = {'Codename': 'codename', 'Description': 'description',
'Distributor ID': 'id', 'Release': 'release'}
@@ -109,23 +114,11 @@ def _lsb_release(target=None):
return data
-def lsb_release(target=None):
- if target_path(target) != "/":
- # do not use or update cache if target is provided
- return _lsb_release(target)
-
- global _LSB_RELEASE
- if not _LSB_RELEASE:
- data = _lsb_release()
- _LSB_RELEASE.update(data)
- return _LSB_RELEASE
-
-
def target_path(target, path=None):
# return 'path' inside target, accepting target as None
if target in (None, ""):
target = "/"
- elif not isinstance(target, string_types):
+ elif not isinstance(target, six.string_types):
raise ValueError("Unexpected input for target: %s" % target)
else:
target = os.path.abspath(target)
@@ -404,9 +397,10 @@ def translate_bool(val, addons=None):
def rand_str(strlen=32, select_from=None):
+ r = random.SystemRandom()
if not select_from:
select_from = string.ascii_letters + string.digits
- return "".join([random.choice(select_from) for _x in range(0, strlen)])
+ return "".join([r.choice(select_from) for _x in range(0, strlen)])
def rand_dict_key(dictionary, postfix=None):
@@ -553,6 +547,7 @@ def is_ipv4(instr):
return len(toks) == 4
+@lru_cache()
def is_FreeBSD():
return system_info()['variant'] == "freebsd"
@@ -602,6 +597,7 @@ def _parse_redhat_release(release_file=None):
return {}
+@lru_cache()
def get_linux_distro():
distro_name = ''
distro_version = ''
@@ -629,11 +625,15 @@ def get_linux_distro():
flavor = match.groupdict()['codename']
if distro_name == 'rhel':
distro_name = 'redhat'
+ elif os.path.exists('/bin/freebsd-version'):
+ distro_name = 'freebsd'
+ distro_version, _ = subp(['uname', '-r'])
+ distro_version = distro_version.strip()
else:
dist = ('', '', '')
try:
- # Will be removed in 3.7
- dist = platform.dist() # pylint: disable=W1505
+ # Was removed in 3.8
+ dist = platform.dist() # pylint: disable=W1505,E1101
except Exception:
pass
finally:
@@ -649,6 +649,7 @@ def get_linux_distro():
return (distro_name, distro_version, flavor)
+@lru_cache()
def system_info():
info = {
'platform': platform.platform(),
@@ -662,7 +663,8 @@ def system_info():
var = 'unknown'
if system == "linux":
linux_dist = info['dist'][0].lower()
- if linux_dist in ('centos', 'debian', 'fedora', 'rhel', 'suse'):
+ if linux_dist in (
+ 'arch', 'centos', 'debian', 'fedora', 'rhel', 'suse'):
var = linux_dist
elif linux_dist in ('ubuntu', 'linuxmint', 'mint'):
var = 'ubuntu'
@@ -709,6 +711,21 @@ def get_cfg_option_list(yobj, key, default=None):
# get a cfg entry by its path array
# for f['a']['b']: get_cfg_by_path(mycfg,('a','b'))
def get_cfg_by_path(yobj, keyp, default=None):
+ """Return the value of the item at path C{keyp} in C{yobj}.
+
+ example:
+ get_cfg_by_path({'a': {'b': {'num': 4}}}, 'a/b/num') == 4
+ get_cfg_by_path({'a': {'b': {'num': 4}}}, 'c/d') == None
+
+ @param yobj: A dictionary.
+ @param keyp: A path inside yobj. it can be a '/' delimited string,
+ or an iterable.
+ @param default: The default to return if the path does not exist.
+ @return: The value of the item at keyp."
+ is not found."""
+
+ if isinstance(keyp, six.string_types):
+ keyp = keyp.split("/")
cur = yobj
for tok in keyp:
if tok not in cur:
@@ -948,7 +965,7 @@ def load_yaml(blob, default=None, allowed=(dict,)):
" but got %s instead") %
(allowed, type_utils.obj_name(converted)))
loaded = converted
- except (yaml.YAMLError, TypeError, ValueError) as e:
+ except (safeyaml.YAMLError, TypeError, ValueError) as e:
msg = 'Failed loading yaml blob'
mark = None
if hasattr(e, 'context_mark') and getattr(e, 'context_mark'):
@@ -966,13 +983,6 @@ def load_yaml(blob, default=None, allowed=(dict,)):
def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
- if base.startswith("/"):
- base = "file://%s" % base
-
- # default retries for file is 0. for network is 10
- if base.startswith("file://"):
- retries = file_retries
-
if base.find("%s") >= 0:
ud_url = base % ("user-data" + ext)
md_url = base % ("meta-data" + ext)
@@ -980,14 +990,14 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
ud_url = "%s%s%s" % (base, "user-data", ext)
md_url = "%s%s%s" % (base, "meta-data", ext)
- md_resp = url_helper.read_file_or_url(md_url, timeout, retries,
- file_retries)
+ md_resp = url_helper.read_file_or_url(md_url, timeout=timeout,
+ retries=retries)
md = None
if md_resp.ok():
md = load_yaml(decode_binary(md_resp.contents), default={})
- ud_resp = url_helper.read_file_or_url(ud_url, timeout, retries,
- file_retries)
+ ud_resp = url_helper.read_file_or_url(ud_url, timeout=timeout,
+ retries=retries)
ud = None
if ud_resp.ok():
ud = ud_resp.contents
@@ -1362,14 +1372,8 @@ def load_file(fname, read_cb=None, quiet=False, decode=True):
return contents
-def get_cmdline():
- if 'DEBUG_PROC_CMDLINE' in os.environ:
- return os.environ["DEBUG_PROC_CMDLINE"]
-
- global PROC_CMDLINE
- if PROC_CMDLINE is not None:
- return PROC_CMDLINE
-
+@lru_cache()
+def _get_cmdline():
if is_container():
try:
contents = load_file("/proc/1/cmdline")
@@ -1384,10 +1388,16 @@ def get_cmdline():
except Exception:
cmdline = ""
- PROC_CMDLINE = cmdline
return cmdline
+def get_cmdline():
+ if 'DEBUG_PROC_CMDLINE' in os.environ:
+ return os.environ["DEBUG_PROC_CMDLINE"]
+
+ return _get_cmdline()
+
+
def pipe_in_out(in_fh, out_fh, chunk_size=1024, chunk_cb=None):
bytes_piped = 0
while True:
@@ -1590,20 +1600,33 @@ def json_serialize_default(_obj):
return 'Warning: redacted unserializable type {0}'.format(type(_obj))
-def json_dumps(data):
- """Return data in nicely formatted json."""
- return json.dumps(data, indent=1, sort_keys=True,
- separators=(',', ': '), default=json_serialize_default)
+def json_preserialize_binary(data):
+ """Preserialize any discovered binary values to avoid json.dumps issues.
+ Used only on python 2.7 where default type handling is not honored for
+ failure to encode binary data. LP: #1801364.
+ TODO(Drop this function when py2.7 support is dropped from cloud-init)
+ """
+ data = obj_copy.deepcopy(data)
+ for key, value in data.items():
+ if isinstance(value, (dict)):
+ data[key] = json_preserialize_binary(value)
+ if isinstance(value, bytes):
+ data[key] = 'ci-b64:{0}'.format(b64e(value))
+ return data
-def yaml_dumps(obj, explicit_start=True, explicit_end=True):
- """Return data in nicely formatted yaml."""
- return yaml.safe_dump(obj,
- line_break="\n",
- indent=4,
- explicit_start=explicit_start,
- explicit_end=explicit_end,
- default_flow_style=False)
+
+def json_dumps(data):
+ """Return data in nicely formatted json."""
+ try:
+ return json.dumps(
+ data, indent=1, sort_keys=True, separators=(',', ': '),
+ default=json_serialize_default)
+ except UnicodeDecodeError:
+ if sys.version_info[:2] == (2, 7):
+ data = json_preserialize_binary(data)
+ return json.dumps(data)
+ raise
def ensure_dir(path, mode=None):
@@ -1667,7 +1690,7 @@ def mounts():
return mounted
-def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True,
+def mount_cb(device, callback, data=None, mtype=None,
update_env_for_mount=None):
"""
Mount the device, call method 'callback' passing the directory
@@ -1714,18 +1737,7 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True,
for mtype in mtypes:
mountpoint = None
try:
- mountcmd = ['mount']
- mountopts = []
- if rw:
- mountopts.append('rw')
- else:
- mountopts.append('ro')
- if sync:
- # This seems like the safe approach to do
- # (ie where this is on by default)
- mountopts.append("sync")
- if mountopts:
- mountcmd.extend(["-o", ",".join(mountopts)])
+ mountcmd = ['mount', '-o', 'ro']
if mtype:
mountcmd.extend(['-t', mtype])
mountcmd.append(device)
@@ -1792,6 +1804,33 @@ def time_rfc2822():
return ts
+def boottime():
+ """Use sysctlbyname(3) via ctypes to find kern.boottime
+
+ kern.boottime is of type struct timeval. Here we create a
+ private class to easier unpack it.
+
+ @return boottime: float to be compatible with linux
+ """
+ import ctypes
+
+ NULL_BYTES = b"\x00"
+
+ class timeval(ctypes.Structure):
+ _fields_ = [
+ ("tv_sec", ctypes.c_int64),
+ ("tv_usec", ctypes.c_int64)
+ ]
+ libc = ctypes.CDLL('/lib/libc.so.7')
+ size = ctypes.c_size_t()
+ size.value = ctypes.sizeof(timeval)
+ buf = timeval()
+ if libc.sysctlbyname(b"kern.boottime" + NULL_BYTES, ctypes.byref(buf),
+ ctypes.byref(size), None, 0) != -1:
+ return buf.tv_sec + buf.tv_usec / 1000000.0
+ raise RuntimeError("Unable to retrieve kern.boottime on this system")
+
+
def uptime():
uptime_str = '??'
method = 'unknown'
@@ -1803,15 +1842,8 @@ def uptime():
uptime_str = contents.split()[0]
else:
method = 'ctypes'
- libc = ctypes.CDLL('/lib/libc.so.7')
- size = ctypes.c_size_t()
- buf = ctypes.c_int()
- size.value = ctypes.sizeof(buf)
- libc.sysctlbyname("kern.boottime", ctypes.byref(buf),
- ctypes.byref(size), None, 0)
- now = time.time()
- bootup = buf.value
- uptime_str = now - bootup
+ # This is the *BSD codepath
+ uptime_str = str(time.time() - boottime())
except Exception:
logexc(LOG, "Unable to read uptime using method: %s" % method)
@@ -2336,17 +2368,21 @@ def parse_mtab(path):
return None
-def find_freebsd_part(label_part):
- if label_part.startswith("/dev/label/"):
- target_label = label_part[5:]
- (label_part, _err) = subp(['glabel', 'status', '-s'])
- for labels in label_part.split("\n"):
+def find_freebsd_part(fs):
+ splitted = fs.split('/')
+ if len(splitted) == 3:
+ return splitted[2]
+ elif splitted[2] in ['label', 'gpt', 'ufs']:
+ target_label = fs[5:]
+ (part, _err) = subp(['glabel', 'status', '-s'])
+ for labels in part.split("\n"):
items = labels.split()
- if len(items) > 0 and items[0].startswith(target_label):
- label_part = items[2]
+ if len(items) > 0 and items[0] == target_label:
+ part = items[2]
break
- label_part = str(label_part)
- return label_part
+ return str(part)
+ else:
+ LOG.warning("Unexpected input in find_freebsd_part: %s", fs)
def get_path_dev_freebsd(path, mnt_list):
@@ -2665,8 +2701,8 @@ def _call_dmidecode(key, dmidecode_path):
try:
cmd = [dmidecode_path, "--string", key]
(result, _err) = subp(cmd)
- LOG.debug("dmidecode returned '%s' for '%s'", result, key)
result = result.strip()
+ LOG.debug("dmidecode returned '%s' for '%s'", result, key)
if result.replace(".", "") == "":
return ""
return result
@@ -2817,9 +2853,6 @@ def load_shell_content(content, add_empty=False, empty_val=None):
variables. Set their value to empty_val."""
def _shlex_split(blob):
- if PY26 and isinstance(blob, six.text_type):
- # Older versions don't support unicode input
- blob = blob.encode("utf8")
return shlex.split(blob, comments=True)
data = {}
@@ -2876,4 +2909,20 @@ def udevadm_settle(exists=None, timeout=None):
return subp(settle_cmd)
+def get_proc_ppid(pid):
+ """
+ Return the parent pid of a process.
+ """
+ ppid = 0
+ try:
+ contents = load_file("/proc/%s/stat" % pid, quiet=True)
+ except IOError as e:
+ LOG.warning('Failed to load /proc/%s/stat. %s', pid, e)
+ if contents:
+ parts = contents.split(" ", 4)
+ # man proc says
+ # ppid %d (4) The PID of the parent.
+ ppid = int(parts[3])
+ return ppid
+
# vi: ts=4 expandtab
diff --git a/cloudinit/version.py b/cloudinit/version.py
index a2c5d43a..1bc1899c 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -4,7 +4,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-__VERSION__ = "18.5"
+__VERSION__ = "20.1"
_PACKAGED_VERSION = '@@PACKAGED_VERSION@@'
FEATURES = [