summaryrefslogtreecommitdiff
path: root/cloudinit
diff options
context:
space:
mode:
authorJames Falcon <james.falcon@canonical.com>2021-12-15 20:16:38 -0600
committerGitHub <noreply@github.com>2021-12-15 19:16:38 -0700
commitbae9b11da9ed7dd0b16fe5adeaf4774b7cc628cf (patch)
tree1fbb3269fc87e39832e3286ef42eefd2b23fcd44 /cloudinit
parent2bcf4fa972fde686c2e3141c58e640640b44dd00 (diff)
downloadvyos-cloud-init-bae9b11da9ed7dd0b16fe5adeaf4774b7cc628cf.tar.gz
vyos-cloud-init-bae9b11da9ed7dd0b16fe5adeaf4774b7cc628cf.zip
Adopt Black and isort (SC-700) (#1157)
Applied Black and isort, fixed any linting issues, updated tox.ini and CI.
Diffstat (limited to 'cloudinit')
-rw-r--r--cloudinit/analyze/__main__.py269
-rw-r--r--cloudinit/analyze/dump.py71
-rw-r--r--cloudinit/analyze/show.py192
-rw-r--r--cloudinit/apport.py153
-rw-r--r--cloudinit/atomic_helper.py25
-rw-r--r--cloudinit/cloud.py14
-rw-r--r--cloudinit/cmd/clean.py59
-rwxr-xr-xcloudinit/cmd/cloud_id.py68
-rw-r--r--cloudinit/cmd/devel/__init__.py3
-rw-r--r--cloudinit/cmd/devel/hotplug_hook.py138
-rw-r--r--cloudinit/cmd/devel/logs.py120
-rwxr-xr-xcloudinit/cmd/devel/make_mime.py76
-rwxr-xr-xcloudinit/cmd/devel/net_convert.py145
-rw-r--r--cloudinit/cmd/devel/parser.py48
-rwxr-xr-xcloudinit/cmd/devel/render.py54
-rw-r--r--cloudinit/cmd/main.py595
-rw-r--r--cloudinit/cmd/query.py170
-rw-r--r--cloudinit/cmd/status.py101
-rw-r--r--cloudinit/config/__init__.py20
-rw-r--r--cloudinit/config/cc_apk_configure.py195
-rw-r--r--cloudinit/config/cc_apt_configure.py618
-rw-r--r--cloudinit/config/cc_apt_pipelining.py13
-rw-r--r--cloudinit/config/cc_bootcmd.py65
-rwxr-xr-xcloudinit/config/cc_byobu.py27
-rw-r--r--cloudinit/config/cc_ca_certs.py84
-rw-r--r--cloudinit/config/cc_chef.py659
-rw-r--r--cloudinit/config/cc_debug.py21
-rw-r--r--cloudinit/config/cc_disable_ec2_metadata.py25
-rw-r--r--cloudinit/config/cc_disk_setup.py334
-rw-r--r--cloudinit/config/cc_emit_upstart.py24
-rw-r--r--cloudinit/config/cc_fan.py34
-rw-r--r--cloudinit/config/cc_final_message.py24
-rw-r--r--cloudinit/config/cc_foo.py1
-rw-r--r--cloudinit/config/cc_growpart.py134
-rw-r--r--cloudinit/config/cc_grub_dpkg.py54
-rw-r--r--cloudinit/config/cc_install_hotplug.py48
-rw-r--r--cloudinit/config/cc_keys_to_console.py36
-rw-r--r--cloudinit/config/cc_landscape.py24
-rw-r--r--cloudinit/config/cc_locale.py51
-rw-r--r--cloudinit/config/cc_lxd.py186
-rw-r--r--cloudinit/config/cc_mcollective.py50
-rw-r--r--cloudinit/config/cc_migrator.py25
-rw-r--r--cloudinit/config/cc_mounts.py172
-rw-r--r--cloudinit/config/cc_ntp.py546
-rw-r--r--cloudinit/config/cc_package_update_upgrade_install.py30
-rw-r--r--cloudinit/config/cc_phone_home.py98
-rw-r--r--cloudinit/config/cc_power_state_change.py58
-rw-r--r--cloudinit/config/cc_puppet.py194
-rw-r--r--cloudinit/config/cc_refresh_rmc_and_interface.py51
-rw-r--r--cloudinit/config/cc_reset_rmc.py43
-rw-r--r--cloudinit/config/cc_resizefs.py183
-rw-r--r--cloudinit/config/cc_resolv_conf.py41
-rw-r--r--cloudinit/config/cc_rh_subscription.py240
-rw-r--r--cloudinit/config/cc_rightscale_userdata.py31
-rw-r--r--cloudinit/config/cc_rsyslog.py86
-rw-r--r--cloudinit/config/cc_runcmd.py70
-rw-r--r--cloudinit/config/cc_salt_minion.py69
-rw-r--r--cloudinit/config/cc_scripts_per_boot.py14
-rw-r--r--cloudinit/config/cc_scripts_per_instance.py14
-rw-r--r--cloudinit/config/cc_scripts_per_once.py14
-rw-r--r--cloudinit/config/cc_scripts_user.py12
-rw-r--r--cloudinit/config/cc_scripts_vendor.py22
-rw-r--r--cloudinit/config/cc_seed_random.py41
-rw-r--r--cloudinit/config/cc_set_hostname.py30
-rwxr-xr-xcloudinit/config/cc_set_passwords.py65
-rw-r--r--cloudinit/config/cc_snap.py168
-rw-r--r--cloudinit/config/cc_spacewalk.py67
-rwxr-xr-xcloudinit/config/cc_ssh.py106
-rwxr-xr-xcloudinit/config/cc_ssh_authkey_fingerprints.py73
-rwxr-xr-xcloudinit/config/cc_ssh_import_id.py23
-rw-r--r--cloudinit/config/cc_timezone.py2
-rw-r--r--cloudinit/config/cc_ubuntu_advantage.py154
-rw-r--r--cloudinit/config/cc_ubuntu_drivers.py133
-rw-r--r--cloudinit/config/cc_update_etc_hosts.py42
-rw-r--r--cloudinit/config/cc_update_hostname.py25
-rw-r--r--cloudinit/config/cc_users_groups.py39
-rw-r--r--cloudinit/config/cc_write_files.py242
-rw-r--r--cloudinit/config/cc_write_files_deferred.py22
-rw-r--r--cloudinit/config/cc_yum_add_repo.py65
-rw-r--r--cloudinit/config/cc_zypper_add_repo.py159
-rw-r--r--cloudinit/config/schema.py239
-rw-r--r--cloudinit/cs_utils.py20
-rw-r--r--cloudinit/dhclient_hook.py21
-rwxr-xr-xcloudinit/distros/__init__.py420
-rw-r--r--cloudinit/distros/almalinux.py1
-rw-r--r--cloudinit/distros/alpine.py45
-rw-r--r--cloudinit/distros/amazon.py1
-rw-r--r--cloudinit/distros/arch.py147
-rw-r--r--cloudinit/distros/bsd.py66
-rw-r--r--cloudinit/distros/bsd_utils.py18
-rw-r--r--cloudinit/distros/centos.py1
-rw-r--r--cloudinit/distros/cloudlinux.py1
-rw-r--r--cloudinit/distros/debian.py168
-rw-r--r--cloudinit/distros/dragonflybsd.py2
-rw-r--r--cloudinit/distros/eurolinux.py1
-rw-r--r--cloudinit/distros/fedora.py1
-rw-r--r--cloudinit/distros/freebsd.py93
-rw-r--r--cloudinit/distros/gentoo.py140
-rw-r--r--cloudinit/distros/miraclelinux.py2
-rw-r--r--cloudinit/distros/net_util.py68
-rw-r--r--cloudinit/distros/netbsd.py85
-rw-r--r--cloudinit/distros/networking.py13
-rw-r--r--cloudinit/distros/openEuler.py1
-rw-r--r--cloudinit/distros/openbsd.py20
-rw-r--r--cloudinit/distros/opensuse.py119
-rw-r--r--cloudinit/distros/parsers/__init__.py3
-rw-r--r--cloudinit/distros/parsers/hostname.py24
-rw-r--r--cloudinit/distros/parsers/hosts.py24
-rw-r--r--cloudinit/distros/parsers/networkmanager_conf.py6
-rw-r--r--cloudinit/distros/parsers/resolv_conf.py73
-rw-r--r--cloudinit/distros/parsers/sys_conf.py38
-rw-r--r--cloudinit/distros/photon.py86
-rw-r--r--cloudinit/distros/rhel.py76
-rw-r--r--cloudinit/distros/rhel_util.py4
-rw-r--r--cloudinit/distros/rocky.py1
-rw-r--r--cloudinit/distros/sles.py1
-rw-r--r--cloudinit/distros/ubuntu.py33
-rwxr-xr-xcloudinit/distros/ug_util.py106
-rw-r--r--cloudinit/distros/virtuozzo.py1
-rw-r--r--cloudinit/dmi.py68
-rw-r--r--cloudinit/ec2_utils.py165
-rw-r--r--cloudinit/event.py8
-rw-r--r--cloudinit/filters/launch_index.py12
-rw-r--r--cloudinit/gpg.py48
-rw-r--r--cloudinit/handlers/__init__.py152
-rw-r--r--cloudinit/handlers/boot_hook.py21
-rw-r--r--cloudinit/handlers/cloud_config.py29
-rw-r--r--cloudinit/handlers/jinja_template.py87
-rw-r--r--cloudinit/handlers/shell_script.py15
-rw-r--r--cloudinit/handlers/upstart_job.py22
-rw-r--r--cloudinit/helpers.py111
-rw-r--r--cloudinit/importer.py3
-rw-r--r--cloudinit/log.py21
-rw-r--r--cloudinit/mergers/__init__.py43
-rw-r--r--cloudinit/mergers/m_dict.py34
-rw-r--r--cloudinit/mergers/m_list.py37
-rw-r--r--cloudinit/mergers/m_str.py5
-rw-r--r--cloudinit/net/__init__.py579
-rw-r--r--cloudinit/net/activators.py87
-rw-r--r--cloudinit/net/bsd.py112
-rwxr-xr-xcloudinit/net/cmdline.py97
-rw-r--r--cloudinit/net/dhcp.py194
-rw-r--r--cloudinit/net/eni.py454
-rw-r--r--cloudinit/net/freebsd.py44
-rw-r--r--cloudinit/net/netbsd.py27
-rw-r--r--cloudinit/net/netplan.py313
-rw-r--r--cloudinit/net/network_state.py734
-rw-r--r--cloudinit/net/networkd.py208
-rw-r--r--cloudinit/net/openbsd.py33
-rw-r--r--cloudinit/net/renderer.py31
-rw-r--r--cloudinit/net/renderers.py40
-rw-r--r--cloudinit/net/sysconfig.py886
-rw-r--r--cloudinit/net/udev.py23
-rw-r--r--cloudinit/netinfo.py403
-rw-r--r--cloudinit/patcher.py9
-rw-r--r--cloudinit/registry.py4
-rw-r--r--cloudinit/reporting/__init__.py9
-rw-r--r--cloudinit/reporting/events.py97
-rwxr-xr-xcloudinit/reporting/handlers.py128
-rw-r--r--cloudinit/safeyaml.py25
-rw-r--r--cloudinit/serial.py25
-rw-r--r--cloudinit/settings.py82
-rw-r--r--cloudinit/signal_handler.py12
-rw-r--r--cloudinit/simpletable.py26
-rw-r--r--cloudinit/sources/DataSourceAliYun.py18
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py113
-rwxr-xr-xcloudinit/sources/DataSourceAzure.py1350
-rw-r--r--cloudinit/sources/DataSourceBigstep.py9
-rw-r--r--cloudinit/sources/DataSourceCloudSigma.py39
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py135
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py117
-rw-r--r--cloudinit/sources/DataSourceDigitalOcean.py65
-rw-r--r--cloudinit/sources/DataSourceEc2.py461
-rw-r--r--cloudinit/sources/DataSourceExoscale.py171
-rw-r--r--cloudinit/sources/DataSourceGCE.py221
-rw-r--r--cloudinit/sources/DataSourceHetzner.py74
-rw-r--r--cloudinit/sources/DataSourceIBMCloud.py128
-rw-r--r--cloudinit/sources/DataSourceLXD.py61
-rw-r--r--cloudinit/sources/DataSourceMAAS.py180
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py154
-rw-r--r--cloudinit/sources/DataSourceNone.py15
-rw-r--r--cloudinit/sources/DataSourceOVF.py311
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py190
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py129
-rw-r--r--cloudinit/sources/DataSourceOracle.py125
-rw-r--r--cloudinit/sources/DataSourceRbxCloud.py194
-rw-r--r--cloudinit/sources/DataSourceScaleway.py131
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py555
-rw-r--r--cloudinit/sources/DataSourceUpCloud.py7
-rw-r--r--cloudinit/sources/DataSourceVMware.py13
-rw-r--r--cloudinit/sources/DataSourceVultr.py86
-rw-r--r--cloudinit/sources/__init__.py385
-rwxr-xr-xcloudinit/sources/helpers/azure.py693
-rw-r--r--cloudinit/sources/helpers/digitalocean.py195
-rw-r--r--cloudinit/sources/helpers/hetzner.py15
-rw-r--r--cloudinit/sources/helpers/netlink.py187
-rw-r--r--cloudinit/sources/helpers/openstack.py438
-rw-r--r--cloudinit/sources/helpers/upcloud.py12
-rw-r--r--cloudinit/sources/helpers/vmware/imc/boot_proto.py5
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config.py59
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_custom_script.py45
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_file.py7
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_namespace.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py84
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_passwd.py38
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_source.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_error.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_event.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_state.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_util.py46
-rw-r--r--cloudinit/sources/helpers/vmware/imc/ipv4_mode.py11
-rw-r--r--cloudinit/sources/helpers/vmware/imc/nic.py33
-rw-r--r--cloudinit/sources/helpers/vmware/imc/nic_base.py29
-rw-r--r--cloudinit/sources/helpers/vultr.py172
-rw-r--r--cloudinit/ssh_util.py172
-rw-r--r--cloudinit/stages.py649
-rw-r--r--cloudinit/subp.py165
-rw-r--r--cloudinit/temp_utils.py20
-rw-r--r--cloudinit/templater.py96
-rw-r--r--cloudinit/type_utils.py4
-rw-r--r--cloudinit/url_helper.py273
-rw-r--r--cloudinit/user_data.py121
-rw-r--r--cloudinit/util.py873
-rw-r--r--cloudinit/version.py9
-rw-r--r--cloudinit/warnings.py21
225 files changed, 15022 insertions, 11121 deletions
diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py
index 99e5c203..36a5be78 100644
--- a/cloudinit/analyze/__main__.py
+++ b/cloudinit/analyze/__main__.py
@@ -5,62 +5,111 @@
import argparse
import re
import sys
+from datetime import datetime
from cloudinit.util import json_dumps
-from datetime import datetime
-from . import dump
-from . import show
+
+from . import dump, show
def get_parser(parser=None):
if not parser:
parser = argparse.ArgumentParser(
- prog='cloudinit-analyze',
- description='Devel tool: Analyze cloud-init logs and data')
- subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand')
+ prog="cloudinit-analyze",
+ description="Devel tool: Analyze cloud-init logs and data",
+ )
+ subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand")
subparsers.required = True
parser_blame = subparsers.add_parser(
- 'blame', help='Print list of executed stages ordered by time to init')
+ "blame", help="Print list of executed stages ordered by time to init"
+ )
parser_blame.add_argument(
- '-i', '--infile', action='store', dest='infile',
- default='/var/log/cloud-init.log',
- help='specify where to read input.')
+ "-i",
+ "--infile",
+ action="store",
+ dest="infile",
+ default="/var/log/cloud-init.log",
+ help="specify where to read input.",
+ )
parser_blame.add_argument(
- '-o', '--outfile', action='store', dest='outfile', default='-',
- help='specify where to write output. ')
- parser_blame.set_defaults(action=('blame', analyze_blame))
+ "-o",
+ "--outfile",
+ action="store",
+ dest="outfile",
+ default="-",
+ help="specify where to write output. ",
+ )
+ parser_blame.set_defaults(action=("blame", analyze_blame))
parser_show = subparsers.add_parser(
- 'show', help='Print list of in-order events during execution')
- parser_show.add_argument('-f', '--format', action='store',
- dest='print_format', default='%I%D @%Es +%ds',
- help='specify formatting of output.')
- parser_show.add_argument('-i', '--infile', action='store',
- dest='infile', default='/var/log/cloud-init.log',
- help='specify where to read input.')
- parser_show.add_argument('-o', '--outfile', action='store',
- dest='outfile', default='-',
- help='specify where to write output.')
- parser_show.set_defaults(action=('show', analyze_show))
+ "show", help="Print list of in-order events during execution"
+ )
+ parser_show.add_argument(
+ "-f",
+ "--format",
+ action="store",
+ dest="print_format",
+ default="%I%D @%Es +%ds",
+ help="specify formatting of output.",
+ )
+ parser_show.add_argument(
+ "-i",
+ "--infile",
+ action="store",
+ dest="infile",
+ default="/var/log/cloud-init.log",
+ help="specify where to read input.",
+ )
+ parser_show.add_argument(
+ "-o",
+ "--outfile",
+ action="store",
+ dest="outfile",
+ default="-",
+ help="specify where to write output.",
+ )
+ parser_show.set_defaults(action=("show", analyze_show))
parser_dump = subparsers.add_parser(
- 'dump', help='Dump cloud-init events in JSON format')
- parser_dump.add_argument('-i', '--infile', action='store',
- dest='infile', default='/var/log/cloud-init.log',
- help='specify where to read input. ')
- parser_dump.add_argument('-o', '--outfile', action='store',
- dest='outfile', default='-',
- help='specify where to write output. ')
- parser_dump.set_defaults(action=('dump', analyze_dump))
+ "dump", help="Dump cloud-init events in JSON format"
+ )
+ parser_dump.add_argument(
+ "-i",
+ "--infile",
+ action="store",
+ dest="infile",
+ default="/var/log/cloud-init.log",
+ help="specify where to read input. ",
+ )
+ parser_dump.add_argument(
+ "-o",
+ "--outfile",
+ action="store",
+ dest="outfile",
+ default="-",
+ help="specify where to write output. ",
+ )
+ parser_dump.set_defaults(action=("dump", analyze_dump))
parser_boot = subparsers.add_parser(
- 'boot', help='Print list of boot times for kernel and cloud-init')
- parser_boot.add_argument('-i', '--infile', action='store',
- dest='infile', default='/var/log/cloud-init.log',
- help='specify where to read input. ')
- parser_boot.add_argument('-o', '--outfile', action='store',
- dest='outfile', default='-',
- help='specify where to write output.')
- parser_boot.set_defaults(action=('boot', analyze_boot))
+ "boot", help="Print list of boot times for kernel and cloud-init"
+ )
+ parser_boot.add_argument(
+ "-i",
+ "--infile",
+ action="store",
+ dest="infile",
+ default="/var/log/cloud-init.log",
+ help="specify where to read input. ",
+ )
+ parser_boot.add_argument(
+ "-o",
+ "--outfile",
+ action="store",
+ dest="outfile",
+ default="-",
+ help="specify where to write output.",
+ )
+ parser_boot.set_defaults(action=("boot", analyze_boot))
return parser
@@ -78,61 +127,68 @@ def analyze_boot(name, args):
"""
infh, outfh = configure_io(args)
kernel_info = show.dist_check_timestamp()
- status_code, kernel_start, kernel_end, ci_sysd_start = \
- kernel_info
+ status_code, kernel_start, kernel_end, ci_sysd_start = kernel_info
kernel_start_timestamp = datetime.utcfromtimestamp(kernel_start)
kernel_end_timestamp = datetime.utcfromtimestamp(kernel_end)
ci_sysd_start_timestamp = datetime.utcfromtimestamp(ci_sysd_start)
try:
- last_init_local = \
- [e for e in _get_events(infh) if e['name'] == 'init-local' and
- 'starting search' in e['description']][-1]
- ci_start = datetime.utcfromtimestamp(last_init_local['timestamp'])
+ last_init_local = [
+ e
+ for e in _get_events(infh)
+ if e["name"] == "init-local"
+ and "starting search" in e["description"]
+ ][-1]
+ ci_start = datetime.utcfromtimestamp(last_init_local["timestamp"])
except IndexError:
- ci_start = 'Could not find init-local log-line in cloud-init.log'
+ ci_start = "Could not find init-local log-line in cloud-init.log"
status_code = show.FAIL_CODE
- FAILURE_MSG = 'Your Linux distro or container does not support this ' \
- 'functionality.\n' \
- 'You must be running a Kernel Telemetry supported ' \
- 'distro.\nPlease check ' \
- 'https://cloudinit.readthedocs.io/en/latest' \
- '/topics/analyze.html for more ' \
- 'information on supported distros.\n'
-
- SUCCESS_MSG = '-- Most Recent Boot Record --\n' \
- ' Kernel Started at: {k_s_t}\n' \
- ' Kernel ended boot at: {k_e_t}\n' \
- ' Kernel time to boot (seconds): {k_r}\n' \
- ' Cloud-init activated by systemd at: {ci_sysd_t}\n' \
- ' Time between Kernel end boot and Cloud-init ' \
- 'activation (seconds): {bt_r}\n' \
- ' Cloud-init start: {ci_start}\n'
-
- CONTAINER_MSG = '-- Most Recent Container Boot Record --\n' \
- ' Container started at: {k_s_t}\n' \
- ' Cloud-init activated by systemd at: {ci_sysd_t}\n' \
- ' Cloud-init start: {ci_start}\n' \
-
+ FAILURE_MSG = (
+ "Your Linux distro or container does not support this "
+ "functionality.\n"
+ "You must be running a Kernel Telemetry supported "
+ "distro.\nPlease check "
+ "https://cloudinit.readthedocs.io/en/latest"
+ "/topics/analyze.html for more "
+ "information on supported distros.\n"
+ )
+
+ SUCCESS_MSG = (
+ "-- Most Recent Boot Record --\n"
+ " Kernel Started at: {k_s_t}\n"
+ " Kernel ended boot at: {k_e_t}\n"
+ " Kernel time to boot (seconds): {k_r}\n"
+ " Cloud-init activated by systemd at: {ci_sysd_t}\n"
+ " Time between Kernel end boot and Cloud-init "
+ "activation (seconds): {bt_r}\n"
+ " Cloud-init start: {ci_start}\n"
+ )
+
+ CONTAINER_MSG = (
+ "-- Most Recent Container Boot Record --\n"
+ " Container started at: {k_s_t}\n"
+ " Cloud-init activated by systemd at: {ci_sysd_t}\n"
+ " Cloud-init start: {ci_start}\n"
+ )
status_map = {
show.FAIL_CODE: FAILURE_MSG,
show.CONTAINER_CODE: CONTAINER_MSG,
- show.SUCCESS_CODE: SUCCESS_MSG
+ show.SUCCESS_CODE: SUCCESS_MSG,
}
kernel_runtime = kernel_end - kernel_start
between_process_runtime = ci_sysd_start - kernel_end
kwargs = {
- 'k_s_t': kernel_start_timestamp,
- 'k_e_t': kernel_end_timestamp,
- 'k_r': kernel_runtime,
- 'bt_r': between_process_runtime,
- 'k_e': kernel_end,
- 'k_s': kernel_start,
- 'ci_sysd': ci_sysd_start,
- 'ci_sysd_t': ci_sysd_start_timestamp,
- 'ci_start': ci_start
+ "k_s_t": kernel_start_timestamp,
+ "k_e_t": kernel_end_timestamp,
+ "k_r": kernel_runtime,
+ "bt_r": between_process_runtime,
+ "k_e": kernel_end,
+ "k_s": kernel_start,
+ "ci_sysd": ci_sysd_start,
+ "ci_sysd_t": ci_sysd_start_timestamp,
+ "ci_start": ci_start,
}
outfh.write(status_map[status_code].format(**kwargs))
@@ -152,15 +208,16 @@ def analyze_blame(name, args):
and sorting by record data ('delta')
"""
(infh, outfh) = configure_io(args)
- blame_format = ' %ds (%n)'
- r = re.compile(r'(^\s+\d+\.\d+)', re.MULTILINE)
- for idx, record in enumerate(show.show_events(_get_events(infh),
- blame_format)):
+ blame_format = " %ds (%n)"
+ r = re.compile(r"(^\s+\d+\.\d+)", re.MULTILINE)
+ for idx, record in enumerate(
+ show.show_events(_get_events(infh), blame_format)
+ ):
srecs = sorted(filter(r.match, record), reverse=True)
- outfh.write('-- Boot Record %02d --\n' % (idx + 1))
- outfh.write('\n'.join(srecs) + '\n')
- outfh.write('\n')
- outfh.write('%d boot records analyzed\n' % (idx + 1))
+ outfh.write("-- Boot Record %02d --\n" % (idx + 1))
+ outfh.write("\n".join(srecs) + "\n")
+ outfh.write("\n")
+ outfh.write("%d boot records analyzed\n" % (idx + 1))
def analyze_show(name, args):
@@ -184,21 +241,25 @@ def analyze_show(name, args):
Finished stage: (modules-final) 0.NNN seconds
"""
(infh, outfh) = configure_io(args)
- for idx, record in enumerate(show.show_events(_get_events(infh),
- args.print_format)):
- outfh.write('-- Boot Record %02d --\n' % (idx + 1))
- outfh.write('The total time elapsed since completing an event is'
- ' printed after the "@" character.\n')
- outfh.write('The time the event takes is printed after the "+" '
- 'character.\n\n')
- outfh.write('\n'.join(record) + '\n')
- outfh.write('%d boot records analyzed\n' % (idx + 1))
+ for idx, record in enumerate(
+ show.show_events(_get_events(infh), args.print_format)
+ ):
+ outfh.write("-- Boot Record %02d --\n" % (idx + 1))
+ outfh.write(
+ "The total time elapsed since completing an event is"
+ ' printed after the "@" character.\n'
+ )
+ outfh.write(
+ 'The time the event takes is printed after the "+" character.\n\n'
+ )
+ outfh.write("\n".join(record) + "\n")
+ outfh.write("%d boot records analyzed\n" % (idx + 1))
def analyze_dump(name, args):
"""Dump cloud-init events in json format"""
(infh, outfh) = configure_io(args)
- outfh.write(json_dumps(_get_events(infh)) + '\n')
+ outfh.write(json_dumps(_get_events(infh)) + "\n")
def _get_events(infile):
@@ -211,28 +272,28 @@ def _get_events(infile):
def configure_io(args):
"""Common parsing and setup of input/output files"""
- if args.infile == '-':
+ if args.infile == "-":
infh = sys.stdin
else:
try:
- infh = open(args.infile, 'r')
+ infh = open(args.infile, "r")
except OSError:
- sys.stderr.write('Cannot open file %s\n' % args.infile)
+ sys.stderr.write("Cannot open file %s\n" % args.infile)
sys.exit(1)
- if args.outfile == '-':
+ if args.outfile == "-":
outfh = sys.stdout
else:
try:
- outfh = open(args.outfile, 'w')
+ outfh = open(args.outfile, "w")
except OSError:
- sys.stderr.write('Cannot open file %s\n' % args.outfile)
+ sys.stderr.write("Cannot open file %s\n" % args.outfile)
sys.exit(1)
return (infh, outfh)
-if __name__ == '__main__':
+if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
(name, action_functor) = args.action
diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py
index 62ad51fe..8e6e3c6a 100644
--- a/cloudinit/analyze/dump.py
+++ b/cloudinit/analyze/dump.py
@@ -1,21 +1,20 @@
# This file is part of cloud-init. See LICENSE file for license information.
import calendar
-from datetime import datetime
import sys
+from datetime import datetime
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
stage_to_description = {
- 'finished': 'finished running cloud-init',
- 'init-local': 'starting search for local datasources',
- 'init-network': 'searching for network datasources',
- 'init': 'searching for network datasources',
- 'modules-config': 'running config modules',
- 'modules-final': 'finalizing modules',
- 'modules': 'running modules for',
- 'single': 'running single module ',
+ "finished": "finished running cloud-init",
+ "init-local": "starting search for local datasources",
+ "init-network": "searching for network datasources",
+ "init": "searching for network datasources",
+ "modules-config": "running config modules",
+ "modules-final": "finalizing modules",
+ "modules": "running modules for",
+ "single": "running single module ",
}
# logger's asctime format
@@ -34,11 +33,11 @@ def parse_timestamp(timestampstr):
if timestampstr.split()[0] in months:
# Aug 29 22:55:26
FMT = DEFAULT_FMT
- if '.' in timestampstr:
+ if "." in timestampstr:
FMT = CLOUD_INIT_JOURNALCTL_FMT
- dt = datetime.strptime(timestampstr + " " +
- str(datetime.now().year),
- FMT)
+ dt = datetime.strptime(
+ timestampstr + " " + str(datetime.now().year), FMT
+ )
timestamp = dt.strftime("%s.%f")
elif "," in timestampstr:
# 2016-09-12 14:39:20,839
@@ -52,7 +51,7 @@ def parse_timestamp(timestampstr):
def parse_timestamp_from_date(timestampstr):
- out, _ = subp.subp(['date', '+%s.%3N', '-d', timestampstr])
+ out, _ = subp.subp(["date", "+%s.%3N", "-d", timestampstr])
timestamp = out.strip()
return float(timestamp)
@@ -79,8 +78,8 @@ def parse_ci_logline(line):
# Apr 30 19:39:11 cloud-init[2673]: handlers.py[DEBUG]: start: \
# init-local/check-cache: attempting to read from cache [check]
- amazon_linux_2_sep = ' cloud-init['
- separators = [' - ', ' [CLOUDINIT] ', amazon_linux_2_sep]
+ amazon_linux_2_sep = " cloud-init["
+ separators = [" - ", " [CLOUDINIT] ", amazon_linux_2_sep]
found = False
for sep in separators:
if sep in line:
@@ -99,7 +98,7 @@ def parse_ci_logline(line):
if "," in timehost:
timestampstr, extra = timehost.split(",")
timestampstr += ",%s" % extra.split()[0]
- if ' ' in extra:
+ if " " in extra:
hostname = extra.split()[-1]
else:
hostname = timehost.split()[-1]
@@ -111,11 +110,11 @@ def parse_ci_logline(line):
eventstr = eventstr.split(maxsplit=1)[1]
else:
timestampstr = timehost.split(hostname)[0].strip()
- if 'Cloud-init v.' in eventstr:
- event_type = 'start'
- if 'running' in eventstr:
- stage_and_timestamp = eventstr.split('running')[1].lstrip()
- event_name, _ = stage_and_timestamp.split(' at ')
+ if "Cloud-init v." in eventstr:
+ event_type = "start"
+ if "running" in eventstr:
+ stage_and_timestamp = eventstr.split("running")[1].lstrip()
+ event_name, _ = stage_and_timestamp.split(" at ")
event_name = event_name.replace("'", "").replace(":", "-")
if event_name == "init":
event_name = "init-network"
@@ -128,17 +127,17 @@ def parse_ci_logline(line):
event_description = eventstr.split(event_name)[1].strip()
event = {
- 'name': event_name.rstrip(":"),
- 'description': event_description,
- 'timestamp': parse_timestamp(timestampstr),
- 'origin': 'cloudinit',
- 'event_type': event_type.rstrip(":"),
+ "name": event_name.rstrip(":"),
+ "description": event_description,
+ "timestamp": parse_timestamp(timestampstr),
+ "origin": "cloudinit",
+ "event_type": event_type.rstrip(":"),
}
- if event['event_type'] == "finish":
+ if event["event_type"] == "finish":
result = event_description.split(":")[0]
- desc = event_description.split(result)[1].lstrip(':').strip()
- event['result'] = result
- event['description'] = desc.strip()
+ desc = event_description.split(result)[1].lstrip(":").strip()
+ event["result"] = result
+ event["description"] = desc.strip()
return event
@@ -146,10 +145,10 @@ def parse_ci_logline(line):
def dump_events(cisource=None, rawdata=None):
events = []
event = None
- CI_EVENT_MATCHES = ['start:', 'finish:', 'Cloud-init v.']
+ CI_EVENT_MATCHES = ["start:", "finish:", "Cloud-init v."]
if not any([cisource, rawdata]):
- raise ValueError('Either cisource or rawdata parameters are required')
+ raise ValueError("Either cisource or rawdata parameters are required")
if rawdata:
data = rawdata.splitlines()
@@ -162,7 +161,7 @@ def dump_events(cisource=None, rawdata=None):
try:
event = parse_ci_logline(line)
except ValueError:
- sys.stderr.write('Skipping invalid entry\n')
+ sys.stderr.write("Skipping invalid entry\n")
if event:
events.append(event)
diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py
index 01a4d3e5..5fd9cdfd 100644
--- a/cloudinit/analyze/show.py
+++ b/cloudinit/analyze/show.py
@@ -8,11 +8,10 @@ import base64
import datetime
import json
import os
-import time
import sys
+import time
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
from cloudinit.distros import uses_systemd
# Example events:
@@ -35,24 +34,25 @@ from cloudinit.distros import uses_systemd
# }
format_key = {
- '%d': 'delta',
- '%D': 'description',
- '%E': 'elapsed',
- '%e': 'event_type',
- '%I': 'indent',
- '%l': 'level',
- '%n': 'name',
- '%o': 'origin',
- '%r': 'result',
- '%t': 'timestamp',
- '%T': 'total_time',
+ "%d": "delta",
+ "%D": "description",
+ "%E": "elapsed",
+ "%e": "event_type",
+ "%I": "indent",
+ "%l": "level",
+ "%n": "name",
+ "%o": "origin",
+ "%r": "result",
+ "%t": "timestamp",
+ "%T": "total_time",
}
-formatting_help = " ".join(["{0}: {1}".format(k.replace('%', '%%'), v)
- for k, v in format_key.items()])
-SUCCESS_CODE = 'successful'
-FAIL_CODE = 'failure'
-CONTAINER_CODE = 'container'
+formatting_help = " ".join(
+ ["{0}: {1}".format(k.replace("%", "%%"), v) for k, v in format_key.items()]
+)
+SUCCESS_CODE = "successful"
+FAIL_CODE = "failure"
+CONTAINER_CODE = "container"
TIMESTAMP_UNKNOWN = (FAIL_CODE, -1, -1, -1)
@@ -60,7 +60,7 @@ def format_record(msg, event):
for i, j in format_key.items():
if i in msg:
# ensure consistent formatting of time values
- if j in ['delta', 'elapsed', 'timestamp']:
+ if j in ["delta", "elapsed", "timestamp"]:
msg = msg.replace(i, "{%s:08.5f}" % j)
else:
msg = msg.replace(i, "{%s}" % j)
@@ -68,13 +68,13 @@ def format_record(msg, event):
def dump_event_files(event):
- content = dict((k, v) for k, v in event.items() if k not in ['content'])
- files = content['files']
+ content = dict((k, v) for k, v in event.items() if k not in ["content"])
+ files = content["files"]
saved = []
for f in files:
- fname = f['path']
+ fname = f["path"]
fn_local = os.path.basename(fname)
- fcontent = base64.b64decode(f['content']).decode('ascii')
+ fcontent = base64.b64decode(f["content"]).decode("ascii")
util.write_file(fn_local, fcontent)
saved.append(fn_local)
@@ -83,13 +83,13 @@ def dump_event_files(event):
def event_name(event):
if event:
- return event.get('name')
+ return event.get("name")
return None
def event_type(event):
if event:
- return event.get('event_type')
+ return event.get("event_type")
return None
@@ -100,7 +100,7 @@ def event_parent(event):
def event_timestamp(event):
- return float(event.get('timestamp'))
+ return float(event.get("timestamp"))
def event_datetime(event):
@@ -117,41 +117,44 @@ def event_duration(start, finish):
def event_record(start_time, start, finish):
record = finish.copy()
- record.update({
- 'delta': event_duration(start, finish),
- 'elapsed': delta_seconds(start_time, event_datetime(start)),
- 'indent': '|' + ' ' * (event_name(start).count('/') - 1) + '`->',
- })
+ record.update(
+ {
+ "delta": event_duration(start, finish),
+ "elapsed": delta_seconds(start_time, event_datetime(start)),
+ "indent": "|" + " " * (event_name(start).count("/") - 1) + "`->",
+ }
+ )
return record
def total_time_record(total_time):
- return 'Total Time: %3.5f seconds\n' % total_time
+ return "Total Time: %3.5f seconds\n" % total_time
class SystemctlReader(object):
- '''
+ """
Class for dealing with all systemctl subp calls in a consistent manner.
- '''
+ """
+
def __init__(self, property, parameter=None):
self.epoch = None
- self.args = ['/bin/systemctl', 'show']
+ self.args = ["/bin/systemctl", "show"]
if parameter:
self.args.append(parameter)
- self.args.extend(['-p', property])
+ self.args.extend(["-p", property])
# Don't want the init of our object to break. Instead of throwing
# an exception, set an error code that gets checked when data is
# requested from the object
self.failure = self.subp()
def subp(self):
- '''
+ """
Make a subp call based on set args and handle errors by setting
failure code
:return: whether the subp call failed or not
- '''
+ """
try:
value, err = subp.subp(self.args, capture=True)
if err:
@@ -162,41 +165,41 @@ class SystemctlReader(object):
return systemctl_fail
def parse_epoch_as_float(self):
- '''
+ """
If subp call succeeded, return the timestamp from subp as a float.
:return: timestamp as a float
- '''
+ """
# subp has 2 ways to fail: it either fails and throws an exception,
# or returns an error code. Raise an exception here in order to make
# sure both scenarios throw exceptions
if self.failure:
- raise RuntimeError('Subprocess call to systemctl has failed, '
- 'returning error code ({})'
- .format(self.failure))
+ raise RuntimeError(
+ "Subprocess call to systemctl has failed, "
+ "returning error code ({})".format(self.failure)
+ )
# Output from systemctl show has the format Property=Value.
# For example, UserspaceMonotonic=1929304
- timestamp = self.epoch.split('=')[1]
+ timestamp = self.epoch.split("=")[1]
# Timestamps reported by systemctl are in microseconds, converting
return float(timestamp) / 1000000
def dist_check_timestamp():
- '''
+ """
Determine which init system a particular linux distro is using.
Each init system (systemd, upstart, etc) has a different way of
providing timestamps.
:return: timestamps of kernelboot, kernelendboot, and cloud-initstart
or TIMESTAMP_UNKNOWN if the timestamps cannot be retrieved.
- '''
+ """
if uses_systemd():
return gather_timestamps_using_systemd()
# Use dmesg to get timestamps if the distro does not have systemd
- if util.is_FreeBSD() or 'gentoo' in \
- util.system_info()['system'].lower():
+ if util.is_FreeBSD() or "gentoo" in util.system_info()["system"].lower():
return gather_timestamps_using_dmesg()
# this distro doesn't fit anything that is supported by cloud-init. just
@@ -205,20 +208,20 @@ def dist_check_timestamp():
def gather_timestamps_using_dmesg():
- '''
+ """
Gather timestamps that corresponds to kernel begin initialization,
kernel finish initialization using dmesg as opposed to systemctl
:return: the two timestamps plus a dummy timestamp to keep consistency
with gather_timestamps_using_systemd
- '''
+ """
try:
- data, _ = subp.subp(['dmesg'], capture=True)
+ data, _ = subp.subp(["dmesg"], capture=True)
split_entries = data[0].splitlines()
for i in split_entries:
- if i.decode('UTF-8').find('user') != -1:
- splitup = i.decode('UTF-8').split()
- stripped = splitup[1].strip(']')
+ if i.decode("UTF-8").find("user") != -1:
+ splitup = i.decode("UTF-8").split()
+ stripped = splitup[1].strip("]")
# kernel timestamp from dmesg is equal to 0,
# with the userspace timestamp relative to it.
@@ -228,8 +231,7 @@ def gather_timestamps_using_dmesg():
# systemd wont start cloud-init in this case,
# so we cannot get that timestamp
- return SUCCESS_CODE, kernel_start, kernel_end, \
- kernel_end
+ return SUCCESS_CODE, kernel_start, kernel_end, kernel_end
except Exception:
pass
@@ -237,18 +239,20 @@ def gather_timestamps_using_dmesg():
def gather_timestamps_using_systemd():
- '''
+ """
Gather timestamps that corresponds to kernel begin initialization,
kernel finish initialization. and cloud-init systemd unit activation
:return: the three timestamps
- '''
+ """
kernel_start = float(time.time()) - float(util.uptime())
try:
- delta_k_end = SystemctlReader('UserspaceTimestampMonotonic')\
- .parse_epoch_as_float()
- delta_ci_s = SystemctlReader('InactiveExitTimestampMonotonic',
- 'cloud-init-local').parse_epoch_as_float()
+ delta_k_end = SystemctlReader(
+ "UserspaceTimestampMonotonic"
+ ).parse_epoch_as_float()
+ delta_ci_s = SystemctlReader(
+ "InactiveExitTimestampMonotonic", "cloud-init-local"
+ ).parse_epoch_as_float()
base_time = kernel_start
status = SUCCESS_CODE
# lxc based containers do not set their monotonic zero point to be when
@@ -262,12 +266,13 @@ def gather_timestamps_using_systemd():
# in containers when https://github.com/lxc/lxcfs/issues/292
# is fixed, util.uptime() should be used instead of stat on
try:
- file_stat = os.stat('/proc/1/cmdline')
+ file_stat = os.stat("/proc/1/cmdline")
kernel_start = file_stat.st_atime
except OSError as err:
- raise RuntimeError('Could not determine container boot '
- 'time from /proc/1/cmdline. ({})'
- .format(err)) from err
+ raise RuntimeError(
+ "Could not determine container boot "
+ "time from /proc/1/cmdline. ({})".format(err)
+ ) from err
status = CONTAINER_CODE
else:
status = FAIL_CODE
@@ -283,10 +288,14 @@ def gather_timestamps_using_systemd():
return status, kernel_start, kernel_end, cloudinit_sysd
-def generate_records(events, blame_sort=False,
- print_format="(%n) %d seconds in %I%D",
- dump_files=False, log_datafiles=False):
- '''
+def generate_records(
+ events,
+ blame_sort=False,
+ print_format="(%n) %d seconds in %I%D",
+ dump_files=False,
+ log_datafiles=False,
+):
+ """
Take in raw events and create parent-child dependencies between events
in order to order events in chronological order.
@@ -298,9 +307,9 @@ def generate_records(events, blame_sort=False,
:param log_datafiles: whether or not to log events generated
:return: boot records ordered chronologically
- '''
+ """
- sorted_events = sorted(events, key=lambda x: x['timestamp'])
+ sorted_events = sorted(events, key=lambda x: x["timestamp"])
records = []
start_time = None
total_time = 0.0
@@ -316,8 +325,8 @@ def generate_records(events, blame_sort=False,
except IndexError:
next_evt = None
- if event_type(event) == 'start':
- if event.get('name') in stages_seen:
+ if event_type(event) == "start":
+ if event.get("name") in stages_seen:
records.append(total_time_record(total_time))
boot_records.append(records)
records = []
@@ -331,25 +340,28 @@ def generate_records(events, blame_sort=False,
# see if we have a pair
if event_name(event) == event_name(next_evt):
- if event_type(next_evt) == 'finish':
- records.append(format_record(print_format,
- event_record(start_time,
- event,
- next_evt)))
+ if event_type(next_evt) == "finish":
+ records.append(
+ format_record(
+ print_format,
+ event_record(start_time, event, next_evt),
+ )
+ )
else:
# This is a parent event
- records.append("Starting stage: %s" % event.get('name'))
+ records.append("Starting stage: %s" % event.get("name"))
unprocessed.append(event)
- stages_seen.append(event.get('name'))
+ stages_seen.append(event.get("name"))
continue
else:
prev_evt = unprocessed.pop()
if event_name(event) == event_name(prev_evt):
record = event_record(start_time, prev_evt, event)
- records.append(format_record("Finished stage: "
- "(%n) %d seconds",
- record) + "\n")
- total_time += record.get('delta')
+ records.append(
+ format_record("Finished stage: (%n) %d seconds", record)
+ + "\n"
+ )
+ total_time += record.get("delta")
else:
# not a match, put it back
unprocessed.append(prev_evt)
@@ -360,7 +372,7 @@ def generate_records(events, blame_sort=False,
def show_events(events, print_format):
- '''
+ """
A passthrough method that makes it easier to call generate_records()
:param events: JSONs from dump that represents events taken from logs
@@ -368,18 +380,18 @@ def show_events(events, print_format):
and time taken by the event in one line
:return: boot records ordered chronologically
- '''
+ """
return generate_records(events, print_format=print_format)
def load_events_infile(infile):
- '''
+ """
Takes in a log file, read it, and convert to json.
:param infile: The Log file to be read
:return: json version of logfile, raw file
- '''
+ """
data = infile.read()
try:
return json.loads(data), data
diff --git a/cloudinit/apport.py b/cloudinit/apport.py
index aadc638f..92068aa9 100644
--- a/cloudinit/apport.py
+++ b/cloudinit/apport.py
@@ -2,127 +2,143 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-'''Cloud-init apport interface'''
+"""Cloud-init apport interface"""
try:
from apport.hookutils import (
- attach_file, attach_root_command_outputs, root_command_output)
+ attach_file,
+ attach_root_command_outputs,
+ root_command_output,
+ )
+
has_apport = True
except ImportError:
has_apport = False
KNOWN_CLOUD_NAMES = [
- 'AliYun',
- 'AltCloud',
- 'Amazon - Ec2',
- 'Azure',
- 'Bigstep',
- 'Brightbox',
- 'CloudSigma',
- 'CloudStack',
- 'DigitalOcean',
- 'E24Cloud',
- 'GCE - Google Compute Engine',
- 'Exoscale',
- 'Hetzner Cloud',
- 'IBM - (aka SoftLayer or BlueMix)',
- 'LXD',
- 'MAAS',
- 'NoCloud',
- 'OpenNebula',
- 'OpenStack',
- 'Oracle',
- 'OVF',
- 'RbxCloud - (HyperOne, Rootbox, Rubikon)',
- 'OpenTelekomCloud',
- 'SAP Converged Cloud',
- 'Scaleway',
- 'SmartOS',
- 'UpCloud',
- 'VMware',
- 'Vultr',
- 'ZStack',
- 'Other'
+ "AliYun",
+ "AltCloud",
+ "Amazon - Ec2",
+ "Azure",
+ "Bigstep",
+ "Brightbox",
+ "CloudSigma",
+ "CloudStack",
+ "DigitalOcean",
+ "E24Cloud",
+ "GCE - Google Compute Engine",
+ "Exoscale",
+ "Hetzner Cloud",
+ "IBM - (aka SoftLayer or BlueMix)",
+ "LXD",
+ "MAAS",
+ "NoCloud",
+ "OpenNebula",
+ "OpenStack",
+ "Oracle",
+ "OVF",
+ "RbxCloud - (HyperOne, Rootbox, Rubikon)",
+ "OpenTelekomCloud",
+ "SAP Converged Cloud",
+ "Scaleway",
+ "SmartOS",
+ "UpCloud",
+ "VMware",
+ "Vultr",
+ "ZStack",
+ "Other",
]
# Potentially clear text collected logs
-CLOUDINIT_LOG = '/var/log/cloud-init.log'
-CLOUDINIT_OUTPUT_LOG = '/var/log/cloud-init-output.log'
-USER_DATA_FILE = '/var/lib/cloud/instance/user-data.txt' # Optional
+CLOUDINIT_LOG = "/var/log/cloud-init.log"
+CLOUDINIT_OUTPUT_LOG = "/var/log/cloud-init-output.log"
+USER_DATA_FILE = "/var/lib/cloud/instance/user-data.txt" # Optional
def attach_cloud_init_logs(report, ui=None):
- '''Attach cloud-init logs and tarfile from 'cloud-init collect-logs'.'''
- attach_root_command_outputs(report, {
- 'cloud-init-log-warnings':
- 'egrep -i "warn|error" /var/log/cloud-init.log',
- 'cloud-init-output.log.txt': 'cat /var/log/cloud-init-output.log'})
+ """Attach cloud-init logs and tarfile from 'cloud-init collect-logs'."""
+ attach_root_command_outputs(
+ report,
+ {
+ "cloud-init-log-warnings": (
+ 'egrep -i "warn|error" /var/log/cloud-init.log'
+ ),
+ "cloud-init-output.log.txt": "cat /var/log/cloud-init-output.log",
+ },
+ )
root_command_output(
- ['cloud-init', 'collect-logs', '-t', '/tmp/cloud-init-logs.tgz'])
- attach_file(report, '/tmp/cloud-init-logs.tgz', 'logs.tgz')
+ ["cloud-init", "collect-logs", "-t", "/tmp/cloud-init-logs.tgz"]
+ )
+ attach_file(report, "/tmp/cloud-init-logs.tgz", "logs.tgz")
def attach_hwinfo(report, ui=None):
- '''Optionally attach hardware info from lshw.'''
+ """Optionally attach hardware info from lshw."""
prompt = (
- 'Your device details (lshw) may be useful to developers when'
- ' addressing this bug, but gathering it requires admin privileges.'
- ' Would you like to include this info?')
+ "Your device details (lshw) may be useful to developers when"
+ " addressing this bug, but gathering it requires admin privileges."
+ " Would you like to include this info?"
+ )
if ui and ui.yesno(prompt):
- attach_root_command_outputs(report, {'lshw.txt': 'lshw'})
+ attach_root_command_outputs(report, {"lshw.txt": "lshw"})
def attach_cloud_info(report, ui=None):
- '''Prompt for cloud details if available.'''
+ """Prompt for cloud details if available."""
if ui:
- prompt = 'Is this machine running in a cloud environment?'
+ prompt = "Is this machine running in a cloud environment?"
response = ui.yesno(prompt)
if response is None:
raise StopIteration # User cancelled
if response:
- prompt = ('Please select the cloud vendor or environment in which'
- ' this instance is running')
+ prompt = (
+ "Please select the cloud vendor or environment in which"
+ " this instance is running"
+ )
response = ui.choice(prompt, KNOWN_CLOUD_NAMES)
if response:
- report['CloudName'] = KNOWN_CLOUD_NAMES[response[0]]
+ report["CloudName"] = KNOWN_CLOUD_NAMES[response[0]]
else:
- report['CloudName'] = 'None'
+ report["CloudName"] = "None"
def attach_user_data(report, ui=None):
- '''Optionally provide user-data if desired.'''
+ """Optionally provide user-data if desired."""
if ui:
prompt = (
- 'Your user-data or cloud-config file can optionally be provided'
- ' from {0} and could be useful to developers when addressing this'
- ' bug. Do you wish to attach user-data to this bug?'.format(
- USER_DATA_FILE))
+ "Your user-data or cloud-config file can optionally be provided"
+ " from {0} and could be useful to developers when addressing this"
+ " bug. Do you wish to attach user-data to this bug?".format(
+ USER_DATA_FILE
+ )
+ )
response = ui.yesno(prompt)
if response is None:
raise StopIteration # User cancelled
if response:
- attach_file(report, USER_DATA_FILE, 'user_data.txt')
+ attach_file(report, USER_DATA_FILE, "user_data.txt")
def add_bug_tags(report):
- '''Add any appropriate tags to the bug.'''
- if 'JournalErrors' in report.keys():
- errors = report['JournalErrors']
- if 'Breaking ordering cycle' in errors:
- report['Tags'] = 'systemd-ordering'
+ """Add any appropriate tags to the bug."""
+ if "JournalErrors" in report.keys():
+ errors = report["JournalErrors"]
+ if "Breaking ordering cycle" in errors:
+ report["Tags"] = "systemd-ordering"
def add_info(report, ui):
- '''This is an entry point to run cloud-init's apport functionality.
+ """This is an entry point to run cloud-init's apport functionality.
Distros which want apport support will have a cloud-init package-hook at
/usr/share/apport/package-hooks/cloud-init.py which defines an add_info
function and returns the result of cloudinit.apport.add_info(report, ui).
- '''
+ """
if not has_apport:
raise RuntimeError(
- 'No apport imports discovered. Apport functionality disabled')
+ "No apport imports discovered. Apport functionality disabled"
+ )
attach_cloud_init_logs(report, ui)
attach_hwinfo(report, ui)
attach_cloud_info(report, ui)
@@ -130,4 +146,5 @@ def add_info(report, ui):
add_bug_tags(report)
return True
+
# vi: ts=4 expandtab
diff --git a/cloudinit/atomic_helper.py b/cloudinit/atomic_helper.py
index 485ff92f..ae117fad 100644
--- a/cloudinit/atomic_helper.py
+++ b/cloudinit/atomic_helper.py
@@ -10,8 +10,9 @@ _DEF_PERMS = 0o644
LOG = logging.getLogger(__name__)
-def write_file(filename, content, mode=_DEF_PERMS,
- omode="wb", preserve_mode=False):
+def write_file(
+ filename, content, mode=_DEF_PERMS, omode="wb", preserve_mode=False
+):
# open filename in mode 'omode', write content, set permissions to 'mode'
if preserve_mode:
@@ -23,12 +24,18 @@ def write_file(filename, content, mode=_DEF_PERMS,
tf = None
try:
- tf = tempfile.NamedTemporaryFile(dir=os.path.dirname(filename),
- delete=False, mode=omode)
+ tf = tempfile.NamedTemporaryFile(
+ dir=os.path.dirname(filename), delete=False, mode=omode
+ )
LOG.debug(
"Atomically writing to file %s (via temporary file %s) - %s: [%o]"
" %d bytes/chars",
- filename, tf.name, omode, mode, len(content))
+ filename,
+ tf.name,
+ omode,
+ mode,
+ len(content),
+ )
tf.write(content)
tf.close()
os.chmod(tf.name, mode)
@@ -42,7 +49,11 @@ def write_file(filename, content, mode=_DEF_PERMS,
def write_json(filename, data, mode=_DEF_PERMS):
# dump json representation of data to file filename.
return write_file(
- filename, json.dumps(data, indent=1, sort_keys=True) + "\n",
- omode="w", mode=mode)
+ filename,
+ json.dumps(data, indent=1, sort_keys=True) + "\n",
+ omode="w",
+ mode=mode,
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py
index 7ae98e1c..91e48103 100644
--- a/cloudinit/cloud.py
+++ b/cloudinit/cloud.py
@@ -35,7 +35,8 @@ class Cloud(object):
reporter = events.ReportEventStack(
name="unnamed-cloud-reporter",
description="unnamed-cloud-reporter",
- reporting_enabled=False)
+ reporting_enabled=False,
+ )
self.reporter = reporter
# If a 'user' manipulates logging or logging services
@@ -56,8 +57,11 @@ class Cloud(object):
def get_template_filename(self, name):
fn = self.paths.template_tpl % (name)
if not os.path.isfile(fn):
- LOG.warning("No template found in %s for template named %s",
- os.path.dirname(fn), name)
+ LOG.warning(
+ "No template found in %s for template named %s",
+ os.path.dirname(fn),
+ name,
+ )
return None
return fn
@@ -80,7 +84,8 @@ class Cloud(object):
def get_hostname(self, fqdn=False, metadata_only=False):
return self.datasource.get_hostname(
- fqdn=fqdn, metadata_only=metadata_only)
+ fqdn=fqdn, metadata_only=metadata_only
+ )
def device_name_to_device(self, name):
return self.datasource.device_name_to_device(name)
@@ -94,4 +99,5 @@ class Cloud(object):
def get_ipath(self, name=None):
return self.paths.get_ipath(name)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py
index 3502dd56..0e1db118 100644
--- a/cloudinit/cmd/clean.py
+++ b/cloudinit/cmd/clean.py
@@ -10,9 +10,13 @@ import os
import sys
from cloudinit.stages import Init
-from cloudinit.subp import (ProcessExecutionError, subp)
+from cloudinit.subp import ProcessExecutionError, subp
from cloudinit.util import (
- del_dir, del_file, get_config_logfiles, is_link, error
+ del_dir,
+ del_file,
+ error,
+ get_config_logfiles,
+ is_link,
)
@@ -27,18 +31,35 @@ def get_parser(parser=None):
"""
if not parser:
parser = argparse.ArgumentParser(
- prog='clean',
- description=('Remove logs and artifacts so cloud-init re-runs on '
- 'a clean system'))
+ prog="clean",
+ description=(
+ "Remove logs and artifacts so cloud-init re-runs on "
+ "a clean system"
+ ),
+ )
parser.add_argument(
- '-l', '--logs', action='store_true', default=False, dest='remove_logs',
- help='Remove cloud-init logs.')
+ "-l",
+ "--logs",
+ action="store_true",
+ default=False,
+ dest="remove_logs",
+ help="Remove cloud-init logs.",
+ )
parser.add_argument(
- '-r', '--reboot', action='store_true', default=False,
- help='Reboot system after logs are cleaned so cloud-init re-runs.')
+ "-r",
+ "--reboot",
+ action="store_true",
+ default=False,
+ help="Reboot system after logs are cleaned so cloud-init re-runs.",
+ )
parser.add_argument(
- '-s', '--seed', action='store_true', default=False, dest='remove_seed',
- help='Remove cloud-init seed directory /var/lib/cloud/seed.')
+ "-s",
+ "--seed",
+ action="store_true",
+ default=False,
+ dest="remove_seed",
+ help="Remove cloud-init seed directory /var/lib/cloud/seed.",
+ )
return parser
@@ -59,8 +80,8 @@ def remove_artifacts(remove_logs, remove_seed=False):
if not os.path.isdir(init.paths.cloud_dir):
return 0 # Artifacts dir already cleaned
- seed_path = os.path.join(init.paths.cloud_dir, 'seed')
- for path in glob.glob('%s/*' % init.paths.cloud_dir):
+ seed_path = os.path.join(init.paths.cloud_dir, "seed")
+ for path in glob.glob("%s/*" % init.paths.cloud_dir):
if path == seed_path and not remove_seed:
continue
try:
@@ -69,7 +90,7 @@ def remove_artifacts(remove_logs, remove_seed=False):
else:
del_file(path)
except OSError as e:
- error('Could not remove {0}: {1}'.format(path, str(e)))
+ error("Could not remove {0}: {1}".format(path, str(e)))
return 1
return 0
@@ -78,13 +99,15 @@ def handle_clean_args(name, args):
"""Handle calls to 'cloud-init clean' as a subcommand."""
exit_code = remove_artifacts(args.remove_logs, args.remove_seed)
if exit_code == 0 and args.reboot:
- cmd = ['shutdown', '-r', 'now']
+ cmd = ["shutdown", "-r", "now"]
try:
subp(cmd, capture=False)
except ProcessExecutionError as e:
error(
'Could not reboot this system using "{0}": {1}'.format(
- cmd, str(e)))
+ cmd, str(e)
+ )
+ )
exit_code = 1
return exit_code
@@ -92,10 +115,10 @@ def handle_clean_args(name, args):
def main():
"""Tool to collect and tar all cloud-init related logs."""
parser = get_parser()
- sys.exit(handle_clean_args('clean', parser.parse_args()))
+ sys.exit(handle_clean_args("clean", parser.parse_args()))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/cloud_id.py b/cloudinit/cmd/cloud_id.py
index 0cdc9675..b92b03a8 100755
--- a/cloudinit/cmd/cloud_id.py
+++ b/cloudinit/cmd/cloud_id.py
@@ -6,13 +6,16 @@ import argparse
import json
import sys
-from cloudinit.util import error
from cloudinit.sources import (
- INSTANCE_JSON_FILE, METADATA_UNKNOWN, canonical_cloud_id)
+ INSTANCE_JSON_FILE,
+ METADATA_UNKNOWN,
+ canonical_cloud_id,
+)
+from cloudinit.util import error
-DEFAULT_INSTANCE_JSON = '/run/cloud-init/%s' % INSTANCE_JSON_FILE
+DEFAULT_INSTANCE_JSON = "/run/cloud-init/%s" % INSTANCE_JSON_FILE
-NAME = 'cloud-id'
+NAME = "cloud-id"
def get_parser(parser=None):
@@ -27,17 +30,30 @@ def get_parser(parser=None):
if not parser:
parser = argparse.ArgumentParser(
prog=NAME,
- description='Report the canonical cloud-id for this instance')
+ description="Report the canonical cloud-id for this instance",
+ )
parser.add_argument(
- '-j', '--json', action='store_true', default=False,
- help='Report all standardized cloud-id information as json.')
+ "-j",
+ "--json",
+ action="store_true",
+ default=False,
+ help="Report all standardized cloud-id information as json.",
+ )
parser.add_argument(
- '-l', '--long', action='store_true', default=False,
- help='Report extended cloud-id information as tab-delimited string.')
+ "-l",
+ "--long",
+ action="store_true",
+ default=False,
+ help="Report extended cloud-id information as tab-delimited string.",
+ )
parser.add_argument(
- '-i', '--instance-data', type=str, default=DEFAULT_INSTANCE_JSON,
- help=('Path to instance-data.json file. Default is %s' %
- DEFAULT_INSTANCE_JSON))
+ "-i",
+ "--instance-data",
+ type=str,
+ default=DEFAULT_INSTANCE_JSON,
+ help="Path to instance-data.json file. Default is %s"
+ % DEFAULT_INSTANCE_JSON,
+ )
return parser
@@ -53,24 +69,28 @@ def handle_args(name, args):
except IOError:
return error(
"File not found '%s'. Provide a path to instance data json file"
- ' using --instance-data' % args.instance_data)
+ " using --instance-data" % args.instance_data
+ )
except ValueError as e:
return error(
- "File '%s' is not valid json. %s" % (args.instance_data, e))
- v1 = instance_data.get('v1', {})
+ "File '%s' is not valid json. %s" % (args.instance_data, e)
+ )
+ v1 = instance_data.get("v1", {})
cloud_id = canonical_cloud_id(
- v1.get('cloud_name', METADATA_UNKNOWN),
- v1.get('region', METADATA_UNKNOWN),
- v1.get('platform', METADATA_UNKNOWN))
+ v1.get("cloud_name", METADATA_UNKNOWN),
+ v1.get("region", METADATA_UNKNOWN),
+ v1.get("platform", METADATA_UNKNOWN),
+ )
if args.json:
- v1['cloud_id'] = cloud_id
- response = json.dumps( # Pretty, sorted json
- v1, indent=1, sort_keys=True, separators=(',', ': '))
+ v1["cloud_id"] = cloud_id
+ response = json.dumps( # Pretty, sorted json
+ v1, indent=1, sort_keys=True, separators=(",", ": ")
+ )
elif args.long:
- response = '%s\t%s' % (cloud_id, v1.get('region', METADATA_UNKNOWN))
+ response = "%s\t%s" % (cloud_id, v1.get("region", METADATA_UNKNOWN))
else:
response = cloud_id
- sys.stdout.write('%s\n' % response)
+ sys.stdout.write("%s\n" % response)
return 0
@@ -80,7 +100,7 @@ def main():
sys.exit(handle_args(NAME, parser.parse_args()))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/__init__.py b/cloudinit/cmd/devel/__init__.py
index 3ae28b69..ead5f7a9 100644
--- a/cloudinit/cmd/devel/__init__.py
+++ b/cloudinit/cmd/devel/__init__.py
@@ -11,7 +11,7 @@ from cloudinit.stages import Init
def addLogHandlerCLI(logger, log_level):
"""Add a commandline logging handler to emit messages to stderr."""
- formatter = logging.Formatter('%(levelname)s: %(message)s')
+ formatter = logging.Formatter("%(levelname)s: %(message)s")
log.setupBasicLogging(log_level, formatter=formatter)
return logger
@@ -22,4 +22,5 @@ def read_cfg_paths():
init.read_cfg()
return init.paths
+
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/hotplug_hook.py b/cloudinit/cmd/devel/hotplug_hook.py
index f6f36a00..a9be0379 100644
--- a/cloudinit/cmd/devel/hotplug_hook.py
+++ b/cloudinit/cmd/devel/hotplug_hook.py
@@ -6,20 +6,17 @@ import os
import sys
import time
-from cloudinit import log
-from cloudinit import reporting
-from cloudinit import stages
+from cloudinit import log, reporting, stages
from cloudinit.event import EventScope, EventType
from cloudinit.net import activators, read_sys_net_safe
from cloudinit.net.network_state import parse_net_config_data
from cloudinit.reporting import events
-from cloudinit.stages import Init
from cloudinit.sources import DataSource # noqa: F401
from cloudinit.sources import DataSourceNotFoundException
-
+from cloudinit.stages import Init
LOG = log.getLogger(__name__)
-NAME = 'hotplug-hook'
+NAME = "hotplug-hook"
def get_parser(parser=None):
@@ -35,33 +32,38 @@ def get_parser(parser=None):
parser.description = __doc__
parser.add_argument(
- "-s", "--subsystem", required=True,
+ "-s",
+ "--subsystem",
+ required=True,
help="subsystem to act on",
- choices=['net']
+ choices=["net"],
)
subparsers = parser.add_subparsers(
- title='Hotplug Action',
- dest='hotplug_action'
+ title="Hotplug Action", dest="hotplug_action"
)
subparsers.required = True
subparsers.add_parser(
- 'query',
- help='query if hotplug is enabled for given subsystem'
+ "query", help="query if hotplug is enabled for given subsystem"
)
parser_handle = subparsers.add_parser(
- 'handle', help='handle the hotplug event')
+ "handle", help="handle the hotplug event"
+ )
parser_handle.add_argument(
- "-d", "--devpath", required=True,
+ "-d",
+ "--devpath",
+ required=True,
metavar="PATH",
- help="sysfs path to hotplugged device"
+ help="sysfs path to hotplugged device",
)
parser_handle.add_argument(
- "-u", "--udevaction", required=True,
+ "-u",
+ "--udevaction",
+ required=True,
help="action to take",
- choices=['add', 'remove']
+ choices=["add", "remove"],
)
return parser
@@ -90,27 +92,29 @@ class UeventHandler(abc.ABC):
def detect_hotplugged_device(self):
detect_presence = None
- if self.action == 'add':
+ if self.action == "add":
detect_presence = True
- elif self.action == 'remove':
+ elif self.action == "remove":
detect_presence = False
else:
- raise ValueError('Unknown action: %s' % self.action)
+ raise ValueError("Unknown action: %s" % self.action)
if detect_presence != self.device_detected():
raise RuntimeError(
- 'Failed to detect %s in updated metadata' % self.id)
+ "Failed to detect %s in updated metadata" % self.id
+ )
def success(self):
return self.success_fn()
def update_metadata(self):
- result = self.datasource.update_metadata_if_supported([
- EventType.HOTPLUG])
+ result = self.datasource.update_metadata_if_supported(
+ [EventType.HOTPLUG]
+ )
if not result:
raise RuntimeError(
- 'Datasource %s not updated for '
- 'event %s' % (self.datasource, EventType.HOTPLUG)
+ "Datasource %s not updated for event %s"
+ % (self.datasource, EventType.HOTPLUG)
)
return result
@@ -118,7 +122,7 @@ class UeventHandler(abc.ABC):
class NetHandler(UeventHandler):
def __init__(self, datasource, devpath, action, success_fn):
# convert devpath to mac address
- id = read_sys_net_safe(os.path.basename(devpath), 'address')
+ id = read_sys_net_safe(os.path.basename(devpath), "address")
super().__init__(id, datasource, devpath, action, success_fn)
def apply(self):
@@ -128,14 +132,16 @@ class NetHandler(UeventHandler):
)
interface_name = os.path.basename(self.devpath)
activator = activators.select_activator()
- if self.action == 'add':
+ if self.action == "add":
if not activator.bring_up_interface(interface_name):
raise RuntimeError(
- 'Failed to bring up device: {}'.format(self.devpath))
- elif self.action == 'remove':
+ "Failed to bring up device: {}".format(self.devpath)
+ )
+ elif self.action == "remove":
if not activator.bring_down_interface(interface_name):
raise RuntimeError(
- 'Failed to bring down device: {}'.format(self.devpath))
+ "Failed to bring down device: {}".format(self.devpath)
+ )
@property
def config(self):
@@ -144,15 +150,16 @@ class NetHandler(UeventHandler):
def device_detected(self) -> bool:
netstate = parse_net_config_data(self.config)
found = [
- iface for iface in netstate.iter_interfaces()
- if iface.get('mac_address') == self.id
+ iface
+ for iface in netstate.iter_interfaces()
+ if iface.get("mac_address") == self.id
]
- LOG.debug('Ifaces with ID=%s : %s', self.id, found)
+ LOG.debug("Ifaces with ID=%s : %s", self.id, found)
return len(found) > 0
SUBSYSTEM_PROPERTES_MAP = {
- 'net': (NetHandler, EventScope.NETWORK),
+ "net": (NetHandler, EventScope.NETWORK),
}
@@ -161,66 +168,65 @@ def is_enabled(hotplug_init, subsystem):
scope = SUBSYSTEM_PROPERTES_MAP[subsystem][1]
except KeyError as e:
raise Exception(
- 'hotplug-hook: cannot handle events for subsystem: {}'.format(
- subsystem)
+ "hotplug-hook: cannot handle events for subsystem: {}".format(
+ subsystem
+ )
) from e
return stages.update_event_enabled(
datasource=hotplug_init.datasource,
cfg=hotplug_init.cfg,
event_source_type=EventType.HOTPLUG,
- scope=scope
+ scope=scope,
)
def initialize_datasource(hotplug_init, subsystem):
- LOG.debug('Fetching datasource')
+ LOG.debug("Fetching datasource")
datasource = hotplug_init.fetch(existing="trust")
if not datasource.get_supported_events([EventType.HOTPLUG]):
- LOG.debug('hotplug not supported for event of type %s', subsystem)
+ LOG.debug("hotplug not supported for event of type %s", subsystem)
return
if not is_enabled(hotplug_init, subsystem):
- LOG.debug('hotplug not enabled for event of type %s', subsystem)
+ LOG.debug("hotplug not enabled for event of type %s", subsystem)
return
return datasource
-def handle_hotplug(
- hotplug_init: Init, devpath, subsystem, udevaction
-):
+def handle_hotplug(hotplug_init: Init, devpath, subsystem, udevaction):
datasource = initialize_datasource(hotplug_init, subsystem)
if not datasource:
return
handler_cls = SUBSYSTEM_PROPERTES_MAP[subsystem][0]
- LOG.debug('Creating %s event handler', subsystem)
+ LOG.debug("Creating %s event handler", subsystem)
event_handler = handler_cls(
datasource=datasource,
devpath=devpath,
action=udevaction,
- success_fn=hotplug_init._write_to_cache
+ success_fn=hotplug_init._write_to_cache,
) # type: UeventHandler
wait_times = [1, 3, 5, 10, 30]
for attempt, wait in enumerate(wait_times):
LOG.debug(
- 'subsystem=%s update attempt %s/%s',
+ "subsystem=%s update attempt %s/%s",
subsystem,
attempt,
- len(wait_times)
+ len(wait_times),
)
try:
- LOG.debug('Refreshing metadata')
+ LOG.debug("Refreshing metadata")
event_handler.update_metadata()
- LOG.debug('Detecting device in updated metadata')
+ LOG.debug("Detecting device in updated metadata")
event_handler.detect_hotplugged_device()
- LOG.debug('Applying config change')
+ LOG.debug("Applying config change")
event_handler.apply()
- LOG.debug('Updating cache')
+ LOG.debug("Updating cache")
event_handler.success()
break
except Exception as e:
- LOG.debug('Exception while processing hotplug event. %s', e)
+ LOG.debug("Exception while processing hotplug event. %s", e)
time.sleep(wait)
last_exception = e
else:
@@ -238,31 +244,33 @@ def handle_args(name, args):
hotplug_init.read_cfg()
log.setupLogging(hotplug_init.cfg)
- if 'reporting' in hotplug_init.cfg:
- reporting.update_configuration(hotplug_init.cfg.get('reporting'))
+ if "reporting" in hotplug_init.cfg:
+ reporting.update_configuration(hotplug_init.cfg.get("reporting"))
# Logging isn't going to be setup until now
LOG.debug(
- '%s called with the following arguments: {'
- 'hotplug_action: %s, subsystem: %s, udevaction: %s, devpath: %s}',
+ "%s called with the following arguments: {"
+ "hotplug_action: %s, subsystem: %s, udevaction: %s, devpath: %s}",
name,
args.hotplug_action,
args.subsystem,
- args.udevaction if 'udevaction' in args else None,
- args.devpath if 'devpath' in args else None,
+ args.udevaction if "udevaction" in args else None,
+ args.devpath if "devpath" in args else None,
)
with hotplug_reporter:
try:
- if args.hotplug_action == 'query':
+ if args.hotplug_action == "query":
try:
datasource = initialize_datasource(
- hotplug_init, args.subsystem)
+ hotplug_init, args.subsystem
+ )
except DataSourceNotFoundException:
print(
"Unable to determine hotplug state. No datasource "
- "detected")
+ "detected"
+ )
sys.exit(1)
- print('enabled' if datasource else 'disabled')
+ print("enabled" if datasource else "disabled")
else:
handle_hotplug(
hotplug_init=hotplug_init,
@@ -271,13 +279,13 @@ def handle_args(name, args):
udevaction=args.udevaction,
)
except Exception:
- LOG.exception('Received fatal exception handling hotplug!')
+ LOG.exception("Received fatal exception handling hotplug!")
raise
- LOG.debug('Exiting hotplug handler')
+ LOG.debug("Exiting hotplug handler")
reporting.flush_events()
-if __name__ == '__main__':
+if __name__ == "__main__":
args = get_parser().parse_args()
handle_args(NAME, args)
diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py
index 31ade73d..d54b809a 100644
--- a/cloudinit/cmd/devel/logs.py
+++ b/cloudinit/cmd/devel/logs.py
@@ -5,20 +5,19 @@
"""Define 'collect-logs' utility and handler to include in cloud-init cmd."""
import argparse
-from datetime import datetime
import os
import shutil
import sys
+from datetime import datetime
from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
+from cloudinit.subp import ProcessExecutionError, subp
from cloudinit.temp_utils import tempdir
-from cloudinit.subp import (ProcessExecutionError, subp)
-from cloudinit.util import (chdir, copy, ensure_dir, write_file)
+from cloudinit.util import chdir, copy, ensure_dir, write_file
-
-CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log']
-CLOUDINIT_RUN_DIR = '/run/cloud-init'
-USER_DATA_FILE = '/var/lib/cloud/instance/user-data.txt' # Optional
+CLOUDINIT_LOGS = ["/var/log/cloud-init.log", "/var/log/cloud-init-output.log"]
+CLOUDINIT_RUN_DIR = "/run/cloud-init"
+USER_DATA_FILE = "/var/lib/cloud/instance/user-data.txt" # Optional
def get_parser(parser=None):
@@ -32,26 +31,44 @@ def get_parser(parser=None):
"""
if not parser:
parser = argparse.ArgumentParser(
- prog='collect-logs',
- description='Collect and tar all cloud-init debug info')
- parser.add_argument('--verbose', '-v', action='count', default=0,
- dest='verbosity', help="Be more verbose.")
+ prog="collect-logs",
+ description="Collect and tar all cloud-init debug info",
+ )
+ parser.add_argument(
+ "--verbose",
+ "-v",
+ action="count",
+ default=0,
+ dest="verbosity",
+ help="Be more verbose.",
+ )
parser.add_argument(
- "--tarfile", '-t', default='cloud-init.tar.gz',
- help=('The tarfile to create containing all collected logs.'
- ' Default: cloud-init.tar.gz'))
+ "--tarfile",
+ "-t",
+ default="cloud-init.tar.gz",
+ help=(
+ "The tarfile to create containing all collected logs."
+ " Default: cloud-init.tar.gz"
+ ),
+ )
parser.add_argument(
- "--include-userdata", '-u', default=False, action='store_true',
- dest='userdata', help=(
- 'Optionally include user-data from {0} which could contain'
- ' sensitive information.'.format(USER_DATA_FILE)))
+ "--include-userdata",
+ "-u",
+ default=False,
+ action="store_true",
+ dest="userdata",
+ help=(
+ "Optionally include user-data from {0} which could contain"
+ " sensitive information.".format(USER_DATA_FILE)
+ ),
+ )
return parser
def _copytree_rundir_ignore_files(curdir, files):
"""Return a list of files to ignore for /run/cloud-init directory"""
ignored_files = [
- 'hook-hotplug-cmd', # named pipe for hotplug
+ "hook-hotplug-cmd", # named pipe for hotplug
]
if os.getuid() != 0:
# Ignore root-permissioned files
@@ -94,52 +111,67 @@ def collect_logs(tarfile, include_userdata, verbosity=0):
if include_userdata and os.getuid() != 0:
sys.stderr.write(
"To include userdata, root user is required."
- " Try sudo cloud-init collect-logs\n")
+ " Try sudo cloud-init collect-logs\n"
+ )
return 1
tarfile = os.path.abspath(tarfile)
- date = datetime.utcnow().date().strftime('%Y-%m-%d')
- log_dir = 'cloud-init-logs-{0}'.format(date)
- with tempdir(dir='/tmp') as tmp_dir:
+ date = datetime.utcnow().date().strftime("%Y-%m-%d")
+ log_dir = "cloud-init-logs-{0}".format(date)
+ with tempdir(dir="/tmp") as tmp_dir:
log_dir = os.path.join(tmp_dir, log_dir)
version = _write_command_output_to_file(
- ['cloud-init', '--version'],
- os.path.join(log_dir, 'version'),
- "cloud-init --version", verbosity)
+ ["cloud-init", "--version"],
+ os.path.join(log_dir, "version"),
+ "cloud-init --version",
+ verbosity,
+ )
dpkg_ver = _write_command_output_to_file(
- ['dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'],
- os.path.join(log_dir, 'dpkg-version'),
- "dpkg version", verbosity)
+ ["dpkg-query", "--show", "-f=${Version}\n", "cloud-init"],
+ os.path.join(log_dir, "dpkg-version"),
+ "dpkg version",
+ verbosity,
+ )
if not version:
version = dpkg_ver if dpkg_ver else "not-available"
_debug("collected cloud-init version: %s\n" % version, 1, verbosity)
_write_command_output_to_file(
- ['dmesg'], os.path.join(log_dir, 'dmesg.txt'),
- "dmesg output", verbosity)
+ ["dmesg"],
+ os.path.join(log_dir, "dmesg.txt"),
+ "dmesg output",
+ verbosity,
+ )
_write_command_output_to_file(
- ['journalctl', '--boot=0', '-o', 'short-precise'],
- os.path.join(log_dir, 'journal.txt'),
- "systemd journal of current boot", verbosity)
+ ["journalctl", "--boot=0", "-o", "short-precise"],
+ os.path.join(log_dir, "journal.txt"),
+ "systemd journal of current boot",
+ verbosity,
+ )
for log in CLOUDINIT_LOGS:
_collect_file(log, log_dir, verbosity)
if include_userdata:
_collect_file(USER_DATA_FILE, log_dir, verbosity)
- run_dir = os.path.join(log_dir, 'run')
+ run_dir = os.path.join(log_dir, "run")
ensure_dir(run_dir)
if os.path.exists(CLOUDINIT_RUN_DIR):
try:
- shutil.copytree(CLOUDINIT_RUN_DIR,
- os.path.join(run_dir, 'cloud-init'),
- ignore=_copytree_rundir_ignore_files)
+ shutil.copytree(
+ CLOUDINIT_RUN_DIR,
+ os.path.join(run_dir, "cloud-init"),
+ ignore=_copytree_rundir_ignore_files,
+ )
except shutil.Error as e:
sys.stderr.write("Failed collecting file(s) due to error:\n")
- sys.stderr.write(str(e) + '\n')
+ sys.stderr.write(str(e) + "\n")
_debug("collected dir %s\n" % CLOUDINIT_RUN_DIR, 1, verbosity)
else:
- _debug("directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR, 1,
- verbosity)
+ _debug(
+ "directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR,
+ 1,
+ verbosity,
+ )
with chdir(tmp_dir):
- subp(['tar', 'czvf', tarfile, log_dir.replace(tmp_dir + '/', '')])
+ subp(["tar", "czvf", tarfile, log_dir.replace(tmp_dir + "/", "")])
sys.stderr.write("Wrote %s\n" % tarfile)
return 0
@@ -152,10 +184,10 @@ def handle_collect_logs_args(name, args):
def main():
"""Tool to collect and tar all cloud-init related logs."""
parser = get_parser()
- return handle_collect_logs_args('collect-logs', parser.parse_args())
+ return handle_collect_logs_args("collect-logs", parser.parse_args())
-if __name__ == '__main__':
+if __name__ == "__main__":
sys.exit(main())
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/devel/make_mime.py b/cloudinit/cmd/devel/make_mime.py
index 4e6a5778..a7493c74 100755
--- a/cloudinit/cmd/devel/make_mime.py
+++ b/cloudinit/cmd/devel/make_mime.py
@@ -9,19 +9,22 @@ from email.mime.text import MIMEText
from cloudinit import log
from cloudinit.handlers import INCLUSION_TYPES_MAP
+
from . import addLogHandlerCLI
-NAME = 'make-mime'
+NAME = "make-mime"
LOG = log.getLogger(NAME)
-EPILOG = ("Example: make-mime -a config.yaml:cloud-config "
- "-a script.sh:x-shellscript > user-data")
+EPILOG = (
+ "Example: make-mime -a config.yaml:cloud-config "
+ "-a script.sh:x-shellscript > user-data"
+)
def file_content_type(text):
- """ Return file content type by reading the first line of the input. """
+ """Return file content type by reading the first line of the input."""
try:
filename, content_type = text.split(":", 1)
- return (open(filename, 'r'), filename, content_type.strip())
+ return (open(filename, "r"), filename, content_type.strip())
except ValueError as e:
raise argparse.ArgumentError(
text, "Invalid value for %r" % (text)
@@ -41,26 +44,43 @@ def get_parser(parser=None):
# update the parser's doc and add an epilog to show an example
parser.description = __doc__
parser.epilog = EPILOG
- parser.add_argument("-a", "--attach", dest="files", type=file_content_type,
- action='append', default=[],
- metavar="<file>:<content-type>",
- help=("attach the given file as the specified "
- "content-type"))
- parser.add_argument('-l', '--list-types', action='store_true',
- default=False,
- help='List support cloud-init content types.')
- parser.add_argument('-f', '--force', action='store_true',
- default=False,
- help='Ignore unknown content-type warnings')
+ parser.add_argument(
+ "-a",
+ "--attach",
+ dest="files",
+ type=file_content_type,
+ action="append",
+ default=[],
+ metavar="<file>:<content-type>",
+ help="attach the given file as the specified content-type",
+ )
+ parser.add_argument(
+ "-l",
+ "--list-types",
+ action="store_true",
+ default=False,
+ help="List support cloud-init content types.",
+ )
+ parser.add_argument(
+ "-f",
+ "--force",
+ action="store_true",
+ default=False,
+ help="Ignore unknown content-type warnings",
+ )
return parser
def get_content_types(strip_prefix=False):
- """ Return a list of cloud-init supported content types. Optionally
- strip out the leading 'text/' of the type if strip_prefix=True.
+ """Return a list of cloud-init supported content types. Optionally
+ strip out the leading 'text/' of the type if strip_prefix=True.
"""
- return sorted([ctype.replace("text/", "") if strip_prefix else ctype
- for ctype in INCLUSION_TYPES_MAP.values()])
+ return sorted(
+ [
+ ctype.replace("text/", "") if strip_prefix else ctype
+ for ctype in INCLUSION_TYPES_MAP.values()
+ ]
+ )
def handle_args(name, args):
@@ -82,14 +102,16 @@ def handle_args(name, args):
for i, (fh, filename, format_type) in enumerate(args.files):
contents = fh.read()
sub_message = MIMEText(contents, format_type, sys.getdefaultencoding())
- sub_message.add_header('Content-Disposition',
- 'attachment; filename="%s"' % (filename))
+ sub_message.add_header(
+ "Content-Disposition", 'attachment; filename="%s"' % (filename)
+ )
content_type = sub_message.get_content_type().lower()
if content_type not in get_content_types():
level = "WARNING" if args.force else "ERROR"
- msg = (level + ": content type %r for attachment %s "
- "may be incorrect!") % (content_type, i + 1)
- sys.stderr.write(msg + '\n')
+ msg = (
+ level + ": content type %r for attachment %s may be incorrect!"
+ ) % (content_type, i + 1)
+ sys.stderr.write(msg + "\n")
errors.append(msg)
sub_messages.append(sub_message)
if len(errors) and not args.force:
@@ -104,10 +126,10 @@ def handle_args(name, args):
def main():
args = get_parser().parse_args()
- return(handle_args(NAME, args))
+ return handle_args(NAME, args)
-if __name__ == '__main__':
+if __name__ == "__main__":
sys.exit(main())
diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py
index f4a98e5e..18b1e7ff 100755
--- a/cloudinit/cmd/devel/net_convert.py
+++ b/cloudinit/cmd/devel/net_convert.py
@@ -6,15 +6,13 @@ import json
import os
import sys
-from cloudinit.sources.helpers import openstack
+from cloudinit import distros, log, safeyaml
+from cloudinit.net import eni, netplan, network_state, networkd, sysconfig
from cloudinit.sources import DataSourceAzure as azure
from cloudinit.sources import DataSourceOVF as ovf
+from cloudinit.sources.helpers import openstack
-from cloudinit import distros, safeyaml
-from cloudinit.net import eni, netplan, networkd, network_state, sysconfig
-from cloudinit import log
-
-NAME = 'net-convert'
+NAME = "net-convert"
def get_parser(parser=None):
@@ -27,33 +25,59 @@ def get_parser(parser=None):
"""
if not parser:
parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
- parser.add_argument("-p", "--network-data", type=open,
- metavar="PATH", required=True,
- help="The network configuration to read")
- parser.add_argument("-k", "--kind",
- choices=['eni', 'network_data.json', 'yaml',
- 'azure-imds', 'vmware-imc'],
- required=True,
- help="The format of the given network config")
- parser.add_argument("-d", "--directory",
- metavar="PATH",
- help="directory to place output in",
- required=True)
- parser.add_argument("-D", "--distro",
- choices=[item for sublist in
- distros.OSFAMILIES.values()
- for item in sublist],
- required=True)
- parser.add_argument("-m", "--mac",
- metavar="name,mac",
- action='append',
- help="interface name to mac mapping")
- parser.add_argument("--debug", action='store_true',
- help='enable debug logging to stderr.')
- parser.add_argument("-O", "--output-kind",
- choices=['eni', 'netplan', 'networkd', 'sysconfig'],
- required=True,
- help="The network config format to emit")
+ parser.add_argument(
+ "-p",
+ "--network-data",
+ type=open,
+ metavar="PATH",
+ required=True,
+ help="The network configuration to read",
+ )
+ parser.add_argument(
+ "-k",
+ "--kind",
+ choices=[
+ "eni",
+ "network_data.json",
+ "yaml",
+ "azure-imds",
+ "vmware-imc",
+ ],
+ required=True,
+ help="The format of the given network config",
+ )
+ parser.add_argument(
+ "-d",
+ "--directory",
+ metavar="PATH",
+ help="directory to place output in",
+ required=True,
+ )
+ parser.add_argument(
+ "-D",
+ "--distro",
+ choices=[
+ item for sublist in distros.OSFAMILIES.values() for item in sublist
+ ],
+ required=True,
+ )
+ parser.add_argument(
+ "-m",
+ "--mac",
+ metavar="name,mac",
+ action="append",
+ help="interface name to mac mapping",
+ )
+ parser.add_argument(
+ "--debug", action="store_true", help="enable debug logging to stderr."
+ )
+ parser.add_argument(
+ "-O",
+ "--output-kind",
+ choices=["eni", "netplan", "networkd", "sysconfig"],
+ required=True,
+ help="The network config format to emit",
+ )
return parser
@@ -81,59 +105,68 @@ def handle_args(name, args):
pre_ns = eni.convert_eni_data(net_data)
elif args.kind == "yaml":
pre_ns = safeyaml.load(net_data)
- if 'network' in pre_ns:
- pre_ns = pre_ns.get('network')
+ if "network" in pre_ns:
+ pre_ns = pre_ns.get("network")
if args.debug:
- sys.stderr.write('\n'.join(
- ["Input YAML", safeyaml.dumps(pre_ns), ""]))
- elif args.kind == 'network_data.json':
+ sys.stderr.write(
+ "\n".join(["Input YAML", safeyaml.dumps(pre_ns), ""])
+ )
+ elif args.kind == "network_data.json":
pre_ns = openstack.convert_net_json(
- json.loads(net_data), known_macs=known_macs)
- elif args.kind == 'azure-imds':
+ json.loads(net_data), known_macs=known_macs
+ )
+ elif args.kind == "azure-imds":
pre_ns = azure.parse_network_config(json.loads(net_data))
- elif args.kind == 'vmware-imc':
+ elif args.kind == "vmware-imc":
config = ovf.Config(ovf.ConfigFile(args.network_data.name))
pre_ns = ovf.get_network_config_from_conf(config, False)
ns = network_state.parse_net_config_data(pre_ns)
if args.debug:
- sys.stderr.write('\n'.join(
- ["", "Internal State", safeyaml.dumps(ns), ""]))
+ sys.stderr.write(
+ "\n".join(["", "Internal State", safeyaml.dumps(ns), ""])
+ )
distro_cls = distros.fetch(args.distro)
distro = distro_cls(args.distro, {}, None)
config = {}
if args.output_kind == "eni":
r_cls = eni.Renderer
- config = distro.renderer_configs.get('eni')
+ config = distro.renderer_configs.get("eni")
elif args.output_kind == "netplan":
r_cls = netplan.Renderer
- config = distro.renderer_configs.get('netplan')
+ config = distro.renderer_configs.get("netplan")
# don't run netplan generate/apply
- config['postcmds'] = False
+ config["postcmds"] = False
# trim leading slash
- config['netplan_path'] = config['netplan_path'][1:]
+ config["netplan_path"] = config["netplan_path"][1:]
# enable some netplan features
- config['features'] = ['dhcp-use-domains', 'ipv6-mtu']
+ config["features"] = ["dhcp-use-domains", "ipv6-mtu"]
elif args.output_kind == "networkd":
r_cls = networkd.Renderer
- config = distro.renderer_configs.get('networkd')
+ config = distro.renderer_configs.get("networkd")
elif args.output_kind == "sysconfig":
r_cls = sysconfig.Renderer
- config = distro.renderer_configs.get('sysconfig')
+ config = distro.renderer_configs.get("sysconfig")
else:
raise RuntimeError("Invalid output_kind")
r = r_cls(config=config)
- sys.stderr.write(''.join([
- "Read input format '%s' from '%s'.\n" % (
- args.kind, args.network_data.name),
- "Wrote output format '%s' to '%s'\n" % (
- args.output_kind, args.directory)]) + "\n")
+ sys.stderr.write(
+ "".join(
+ [
+ "Read input format '%s' from '%s'.\n"
+ % (args.kind, args.network_data.name),
+ "Wrote output format '%s' to '%s'\n"
+ % (args.output_kind, args.directory),
+ ]
+ )
+ + "\n"
+ )
r.render_network_state(network_state=ns, target=args.directory)
-if __name__ == '__main__':
+if __name__ == "__main__":
args = get_parser().parse_args()
handle_args(NAME, args)
diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py
index be304630..76b16c2e 100644
--- a/cloudinit/cmd/devel/parser.py
+++ b/cloudinit/cmd/devel/parser.py
@@ -5,33 +5,47 @@
"""Define 'devel' subcommand argument parsers to include in cloud-init cmd."""
import argparse
+
from cloudinit.config import schema
-from . import hotplug_hook
-from . import net_convert
-from . import render
-from . import make_mime
+from . import hotplug_hook, make_mime, net_convert, render
def get_parser(parser=None):
if not parser:
parser = argparse.ArgumentParser(
- prog='cloudinit-devel',
- description='Run development cloud-init tools')
- subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand')
+ prog="cloudinit-devel",
+ description="Run development cloud-init tools",
+ )
+ subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand")
subparsers.required = True
subcmds = [
- (hotplug_hook.NAME, hotplug_hook.__doc__,
- hotplug_hook.get_parser, hotplug_hook.handle_args),
- ('schema', 'Validate cloud-config files for document schema',
- schema.get_parser, schema.handle_schema_args),
- (net_convert.NAME, net_convert.__doc__,
- net_convert.get_parser, net_convert.handle_args),
- (render.NAME, render.__doc__,
- render.get_parser, render.handle_args),
- (make_mime.NAME, make_mime.__doc__,
- make_mime.get_parser, make_mime.handle_args),
+ (
+ hotplug_hook.NAME,
+ hotplug_hook.__doc__,
+ hotplug_hook.get_parser,
+ hotplug_hook.handle_args,
+ ),
+ (
+ "schema",
+ "Validate cloud-config files for document schema",
+ schema.get_parser,
+ schema.handle_schema_args,
+ ),
+ (
+ net_convert.NAME,
+ net_convert.__doc__,
+ net_convert.get_parser,
+ net_convert.handle_args,
+ ),
+ (render.NAME, render.__doc__, render.get_parser, render.handle_args),
+ (
+ make_mime.NAME,
+ make_mime.__doc__,
+ make_mime.get_parser,
+ make_mime.handle_args,
+ ),
]
for (subcmd, helpmsg, get_parser, handler) in subcmds:
parser = subparsers.add_parser(subcmd, help=helpmsg)
diff --git a/cloudinit/cmd/devel/render.py b/cloudinit/cmd/devel/render.py
index 1090aa16..2f9a22a8 100755
--- a/cloudinit/cmd/devel/render.py
+++ b/cloudinit/cmd/devel/render.py
@@ -6,12 +6,13 @@ import argparse
import os
import sys
-from cloudinit.handlers.jinja_template import render_jinja_payload_from_file
from cloudinit import log
+from cloudinit.handlers.jinja_template import render_jinja_payload_from_file
from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE
+
from . import addLogHandlerCLI, read_cfg_paths
-NAME = 'render'
+NAME = "render"
LOG = log.getLogger(NAME)
@@ -27,13 +28,24 @@ def get_parser(parser=None):
if not parser:
parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
parser.add_argument(
- 'user_data', type=str, help='Path to the user-data file to render')
+ "user_data", type=str, help="Path to the user-data file to render"
+ )
+ parser.add_argument(
+ "-i",
+ "--instance-data",
+ type=str,
+ help=(
+ "Optional path to instance-data.json file. Defaults to"
+ " /run/cloud-init/instance-data.json"
+ ),
+ )
parser.add_argument(
- '-i', '--instance-data', type=str,
- help=('Optional path to instance-data.json file. Defaults to'
- ' /run/cloud-init/instance-data.json'))
- parser.add_argument('-d', '--debug', action='store_true', default=False,
- help='Add verbose messages during template render')
+ "-d",
+ "--debug",
+ action="store_true",
+ default=False,
+ help="Add verbose messages during template render",
+ )
return parser
@@ -54,34 +66,38 @@ def handle_args(name, args):
redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE)
if uid == 0:
instance_data_fn = os.path.join(
- paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE)
+ paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE
+ )
if not os.path.exists(instance_data_fn):
LOG.warning(
- 'Missing root-readable %s. Using redacted %s instead.',
- instance_data_fn, redacted_data_fn
+ "Missing root-readable %s. Using redacted %s instead.",
+ instance_data_fn,
+ redacted_data_fn,
)
instance_data_fn = redacted_data_fn
else:
instance_data_fn = redacted_data_fn
if not os.path.exists(instance_data_fn):
- LOG.error('Missing instance-data.json file: %s', instance_data_fn)
+ LOG.error("Missing instance-data.json file: %s", instance_data_fn)
return 1
try:
with open(args.user_data) as stream:
user_data = stream.read()
except IOError:
- LOG.error('Missing user-data file: %s', args.user_data)
+ LOG.error("Missing user-data file: %s", args.user_data)
return 1
try:
rendered_payload = render_jinja_payload_from_file(
- payload=user_data, payload_fn=args.user_data,
+ payload=user_data,
+ payload_fn=args.user_data,
instance_data_file=instance_data_fn,
- debug=True if args.debug else False)
+ debug=True if args.debug else False,
+ )
except RuntimeError as e:
- LOG.error('Cannot render from instance data: %s', str(e))
+ LOG.error("Cannot render from instance data: %s", str(e))
return 1
if not rendered_payload:
- LOG.error('Unable to render user-data file: %s', args.user_data)
+ LOG.error("Unable to render user-data file: %s", args.user_data)
return 1
sys.stdout.write(rendered_payload)
return 0
@@ -89,10 +105,10 @@ def handle_args(name, args):
def main():
args = get_parser().parse_args()
- return(handle_args(NAME, args))
+ return handle_args(NAME, args)
-if __name__ == '__main__':
+if __name__ == "__main__":
sys.exit(main())
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index 63186d34..e67edbc3 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -19,6 +19,7 @@ import time
import traceback
from cloudinit import patcher
+
patcher.patch_logging()
from cloudinit import log as logging
@@ -34,8 +35,7 @@ from cloudinit import warnings
from cloudinit import reporting
from cloudinit.reporting import events
-from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE,
- CLOUD_CONFIG)
+from cloudinit.settings import PER_INSTANCE, PER_ALWAYS, PER_ONCE, CLOUD_CONFIG
from cloudinit import atomic_helper
@@ -44,8 +44,10 @@ from cloudinit import dhclient_hook
# Welcome message template
-WELCOME_MSG_TPL = ("Cloud-init v. {version} running '{action}' at "
- "{timestamp}. Up {uptime} seconds.")
+WELCOME_MSG_TPL = (
+ "Cloud-init v. {version} running '{action}' at "
+ "{timestamp}. Up {uptime} seconds."
+)
# Module section template
MOD_SECTION_TPL = "cloud_%s_modules"
@@ -53,9 +55,9 @@ MOD_SECTION_TPL = "cloud_%s_modules"
# Frequency shortname to full name
# (so users don't have to remember the full name...)
FREQ_SHORT_NAMES = {
- 'instance': PER_INSTANCE,
- 'always': PER_ALWAYS,
- 'once': PER_ONCE,
+ "instance": PER_INSTANCE,
+ "always": PER_ALWAYS,
+ "once": PER_ONCE,
}
LOG = logging.getLogger()
@@ -63,21 +65,20 @@ LOG = logging.getLogger()
# Used for when a logger may not be active
# and we still want to print exceptions...
-def print_exc(msg=''):
+def print_exc(msg=""):
if msg:
sys.stderr.write("%s\n" % (msg))
- sys.stderr.write('-' * 60)
+ sys.stderr.write("-" * 60)
sys.stderr.write("\n")
traceback.print_exc(file=sys.stderr)
- sys.stderr.write('-' * 60)
+ sys.stderr.write("-" * 60)
sys.stderr.write("\n")
def welcome(action, msg=None):
if not msg:
msg = welcome_format(action)
- util.multi_log("%s\n" % (msg),
- console=False, stderr=True, log=LOG)
+ util.multi_log("%s\n" % (msg), console=False, stderr=True, log=LOG)
return msg
@@ -86,7 +87,8 @@ def welcome_format(action):
version=version.version_string(),
uptime=util.uptime(),
timestamp=util.time_rfc2822(),
- action=action)
+ action=action,
+ )
def extract_fns(args):
@@ -107,29 +109,31 @@ def run_module_section(mods, action_name, section):
(which_ran, failures) = mods.run_section(full_section_name)
total_attempted = len(which_ran) + len(failures)
if total_attempted == 0:
- msg = ("No '%s' modules to run"
- " under section '%s'") % (action_name, full_section_name)
+ msg = "No '%s' modules to run under section '%s'" % (
+ action_name,
+ full_section_name,
+ )
sys.stderr.write("%s\n" % (msg))
LOG.debug(msg)
return []
else:
- LOG.debug("Ran %s modules with %s failures",
- len(which_ran), len(failures))
+ LOG.debug(
+ "Ran %s modules with %s failures", len(which_ran), len(failures)
+ )
return failures
def apply_reporting_cfg(cfg):
- if cfg.get('reporting'):
- reporting.update_configuration(cfg.get('reporting'))
+ if cfg.get("reporting"):
+ reporting.update_configuration(cfg.get("reporting"))
-def parse_cmdline_url(cmdline, names=('cloud-config-url', 'url')):
+def parse_cmdline_url(cmdline, names=("cloud-config-url", "url")):
data = util.keyval_str_to_dict(cmdline)
for key in names:
if key in data:
return key, data[key]
- raise KeyError("No keys (%s) found in string '%s'" %
- (cmdline, names))
+ raise KeyError("No keys (%s) found in string '%s'" % (cmdline, names))
def attempt_cmdline_url(path, network=True, cmdline=None):
@@ -163,51 +167,60 @@ def attempt_cmdline_url(path, network=True, cmdline=None):
if path_is_local and os.path.exists(path):
if network:
- m = ("file '%s' existed, possibly from local stage download"
- " of command line url '%s'. Not re-writing." % (path, url))
+ m = (
+ "file '%s' existed, possibly from local stage download"
+ " of command line url '%s'. Not re-writing." % (path, url)
+ )
level = logging.INFO
if path_is_local:
level = logging.DEBUG
else:
- m = ("file '%s' existed, possibly from previous boot download"
- " of command line url '%s'. Not re-writing." % (path, url))
+ m = (
+ "file '%s' existed, possibly from previous boot download"
+ " of command line url '%s'. Not re-writing." % (path, url)
+ )
level = logging.WARN
return (level, m)
- kwargs = {'url': url, 'timeout': 10, 'retries': 2}
+ kwargs = {"url": url, "timeout": 10, "retries": 2}
if network or path_is_local:
level = logging.WARN
- kwargs['sec_between'] = 1
+ kwargs["sec_between"] = 1
else:
level = logging.DEBUG
- kwargs['sec_between'] = .1
+ kwargs["sec_between"] = 0.1
data = None
- header = b'#cloud-config'
+ header = b"#cloud-config"
try:
resp = url_helper.read_file_or_url(**kwargs)
if resp.ok():
data = resp.contents
if not resp.contents.startswith(header):
- if cmdline_name == 'cloud-config-url':
+ if cmdline_name == "cloud-config-url":
level = logging.WARN
else:
level = logging.INFO
return (
level,
- "contents of '%s' did not start with %s" % (url, header))
+ "contents of '%s' did not start with %s" % (url, header),
+ )
else:
- return (level,
- "url '%s' returned code %s. Ignoring." % (url, resp.code))
+ return (
+ level,
+ "url '%s' returned code %s. Ignoring." % (url, resp.code),
+ )
except url_helper.UrlError as e:
return (level, "retrieving url '%s' failed: %s" % (url, e))
util.write_file(path, data, mode=0o600)
- return (logging.INFO,
- "wrote cloud-config data from %s='%s' to %s" %
- (cmdline_name, url, path))
+ return (
+ logging.INFO,
+ "wrote cloud-config data from %s='%s' to %s"
+ % (cmdline_name, url, path),
+ )
def purge_cache_on_python_version_change(init):
@@ -216,31 +229,32 @@ def purge_cache_on_python_version_change(init):
There could be changes not represented in our cache (obj.pkl) after we
upgrade to a new version of python, so at that point clear the cache
"""
- current_python_version = '%d.%d' % (
- sys.version_info.major, sys.version_info.minor
+ current_python_version = "%d.%d" % (
+ sys.version_info.major,
+ sys.version_info.minor,
)
python_version_path = os.path.join(
- init.paths.get_cpath('data'), 'python-version'
+ init.paths.get_cpath("data"), "python-version"
)
if os.path.exists(python_version_path):
cached_python_version = open(python_version_path).read()
# The Python version has changed out from under us, anything that was
# pickled previously is likely useless due to API changes.
if cached_python_version != current_python_version:
- LOG.debug('Python version change detected. Purging cache')
+ LOG.debug("Python version change detected. Purging cache")
init.purge_cache(True)
util.write_file(python_version_path, current_python_version)
else:
- if os.path.exists(init.paths.get_ipath_cur('obj_pkl')):
+ if os.path.exists(init.paths.get_ipath_cur("obj_pkl")):
LOG.info(
- 'Writing python-version file. '
- 'Cache compatibility status is currently unknown.'
+ "Writing python-version file. "
+ "Cache compatibility status is currently unknown."
)
util.write_file(python_version_path, current_python_version)
def _should_bring_up_interfaces(init, args):
- if util.get_cfg_option_bool(init.cfg, 'disable_network_activation'):
+ if util.get_cfg_option_bool(init.cfg, "disable_network_activation"):
return False
return not args.local
@@ -250,10 +264,14 @@ def main_init(name, args):
if args.local:
deps = [sources.DEP_FILESYSTEM]
- early_logs = [attempt_cmdline_url(
- path=os.path.join("%s.d" % CLOUD_CONFIG,
- "91_kernel_cmdline_url.cfg"),
- network=not args.local)]
+ early_logs = [
+ attempt_cmdline_url(
+ path=os.path.join(
+ "%s.d" % CLOUD_CONFIG, "91_kernel_cmdline_url.cfg"
+ ),
+ network=not args.local,
+ )
+ ]
# Cloud-init 'init' stage is broken up into the following sub-stages
# 1. Ensure that the init object fetches its config without errors
@@ -289,8 +307,9 @@ def main_init(name, args):
early_logs.append((logging.WARN, msg))
if args.debug:
# Reset so that all the debug handlers are closed out
- LOG.debug(("Logging being reset, this logger may no"
- " longer be active shortly"))
+ LOG.debug(
+ "Logging being reset, this logger may no longer be active shortly"
+ )
logging.resetLogging()
logging.setupLogging(init.cfg)
apply_reporting_cfg(init.cfg)
@@ -317,9 +336,11 @@ def main_init(name, args):
if mode == sources.DSMODE_NETWORK:
existing = "trust"
sys.stderr.write("%s\n" % (netinfo.debug_info()))
- LOG.debug(("Checking to see if files that we need already"
- " exist from a previous run that would allow us"
- " to stop early."))
+ LOG.debug(
+ "Checking to see if files that we need already"
+ " exist from a previous run that would allow us"
+ " to stop early."
+ )
# no-net is written by upstart cloud-init-nonet when network failed
# to come up
stop_files = [
@@ -331,15 +352,18 @@ def main_init(name, args):
existing_files.append(fn)
if existing_files:
- LOG.debug("[%s] Exiting. stop file %s existed",
- mode, existing_files)
+ LOG.debug(
+ "[%s] Exiting. stop file %s existed", mode, existing_files
+ )
return (None, [])
else:
- LOG.debug("Execution continuing, no previous run detected that"
- " would allow us to stop early.")
+ LOG.debug(
+ "Execution continuing, no previous run detected that"
+ " would allow us to stop early."
+ )
else:
existing = "check"
- mcfg = util.get_cfg_option_bool(init.cfg, 'manual_cache_clean', False)
+ mcfg = util.get_cfg_option_bool(init.cfg, "manual_cache_clean", False)
if mcfg:
LOG.debug("manual cache clean set from config")
existing = "trust"
@@ -360,8 +384,11 @@ def main_init(name, args):
# if in network mode, and the datasource is local
# then work was done at that stage.
if mode == sources.DSMODE_NETWORK and init.datasource.dsmode != mode:
- LOG.debug("[%s] Exiting. datasource %s in local mode",
- mode, init.datasource)
+ LOG.debug(
+ "[%s] Exiting. datasource %s in local mode",
+ mode,
+ init.datasource,
+ )
return (None, [])
except sources.DataSourceNotFoundException:
# In the case of 'cloud-init init' without '--local' it is a bit
@@ -371,8 +398,9 @@ def main_init(name, args):
if mode == sources.DSMODE_LOCAL:
LOG.debug("No local datasource found")
else:
- util.logexc(LOG, ("No instance datasource found!"
- " Likely bad things to come!"))
+ util.logexc(
+ LOG, "No instance datasource found! Likely bad things to come!"
+ )
if not args.force:
init.apply_network_config(bring_up=bring_up_interfaces)
LOG.debug("[%s] Exiting without datasource", mode)
@@ -381,46 +409,60 @@ def main_init(name, args):
else:
return (None, ["No instance datasource found."])
else:
- LOG.debug("[%s] barreling on in force mode without datasource",
- mode)
+ LOG.debug(
+ "[%s] barreling on in force mode without datasource", mode
+ )
_maybe_persist_instance_data(init)
# Stage 6
iid = init.instancify()
- LOG.debug("[%s] %s will now be targeting instance id: %s. new=%s",
- mode, name, iid, init.is_new_instance())
+ LOG.debug(
+ "[%s] %s will now be targeting instance id: %s. new=%s",
+ mode,
+ name,
+ iid,
+ init.is_new_instance(),
+ )
if mode == sources.DSMODE_LOCAL:
# Before network comes up, set any configured hostname to allow
# dhcp clients to advertize this hostname to any DDNS services
# LP: #1746455.
- _maybe_set_hostname(init, stage='local', retry_stage='network')
+ _maybe_set_hostname(init, stage="local", retry_stage="network")
init.apply_network_config(bring_up=bring_up_interfaces)
if mode == sources.DSMODE_LOCAL:
if init.datasource.dsmode != mode:
- LOG.debug("[%s] Exiting. datasource %s not in local mode.",
- mode, init.datasource)
+ LOG.debug(
+ "[%s] Exiting. datasource %s not in local mode.",
+ mode,
+ init.datasource,
+ )
return (init.datasource, [])
else:
- LOG.debug("[%s] %s is in local mode, will apply init modules now.",
- mode, init.datasource)
+ LOG.debug(
+ "[%s] %s is in local mode, will apply init modules now.",
+ mode,
+ init.datasource,
+ )
# Give the datasource a chance to use network resources.
# This is used on Azure to communicate with the fabric over network.
init.setup_datasource()
# update fully realizes user-data (pulling in #include if necessary)
init.update()
- _maybe_set_hostname(init, stage='init-net', retry_stage='modules:config')
+ _maybe_set_hostname(init, stage="init-net", retry_stage="modules:config")
# Stage 7
try:
# Attempt to consume the data per instance.
# This may run user-data handlers and/or perform
# url downloads and such as needed.
- (ran, _results) = init.cloudify().run('consume_data',
- init.consume_data,
- args=[PER_INSTANCE],
- freq=PER_INSTANCE)
+ (ran, _results) = init.cloudify().run(
+ "consume_data",
+ init.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE,
+ )
if not ran:
# Just consume anything that is set to run per-always
# if nothing ran in the per-instance code
@@ -442,8 +484,7 @@ def main_init(name, args):
errfmt_orig = errfmt
(outfmt, errfmt) = util.get_output_cfg(mods.cfg, name)
if outfmt_orig != outfmt or errfmt_orig != errfmt:
- LOG.warning("Stdout, stderr changing to (%s, %s)",
- outfmt, errfmt)
+ LOG.warning("Stdout, stderr changing to (%s, %s)", outfmt, errfmt)
(outfmt, errfmt) = util.fixup_output(mods.cfg, name)
except Exception:
util.logexc(LOG, "Failed to re-adjust output redirection!")
@@ -459,11 +500,11 @@ def main_init(name, args):
def di_report_warn(datasource, cfg):
- if 'di_report' not in cfg:
+ if "di_report" not in cfg:
LOG.debug("no di_report found in config.")
return
- dicfg = cfg['di_report']
+ dicfg = cfg["di_report"]
if dicfg is None:
# ds-identify may write 'di_report:\n #comment\n'
# which reads as {'di_report': None}
@@ -474,7 +515,7 @@ def di_report_warn(datasource, cfg):
LOG.warning("di_report config not a dictionary: %s", dicfg)
return
- dslist = dicfg.get('datasource_list')
+ dslist = dicfg.get("datasource_list")
if dslist is None:
LOG.warning("no 'datasource_list' found in di_report.")
return
@@ -486,18 +527,26 @@ def di_report_warn(datasource, cfg):
# where Name is the thing that shows up in datasource_list.
modname = datasource.__module__.rpartition(".")[2]
if modname.startswith(sources.DS_PREFIX):
- modname = modname[len(sources.DS_PREFIX):]
+ modname = modname[len(sources.DS_PREFIX) :]
else:
- LOG.warning("Datasource '%s' came from unexpected module '%s'.",
- datasource, modname)
+ LOG.warning(
+ "Datasource '%s' came from unexpected module '%s'.",
+ datasource,
+ modname,
+ )
if modname in dslist:
- LOG.debug("used datasource '%s' from '%s' was in di_report's list: %s",
- datasource, modname, dslist)
+ LOG.debug(
+ "used datasource '%s' from '%s' was in di_report's list: %s",
+ datasource,
+ modname,
+ dslist,
+ )
return
- warnings.show_warning('dsid_missing_source', cfg,
- source=modname, dslist=str(dslist))
+ warnings.show_warning(
+ "dsid_missing_source", cfg, source=modname, dslist=str(dslist)
+ )
def main_modules(action_name, args):
@@ -521,8 +570,10 @@ def main_modules(action_name, args):
init.fetch(existing="trust")
except sources.DataSourceNotFoundException:
# There was no datasource found, theres nothing to do
- msg = ('Can not apply stage %s, no datasource found! Likely bad '
- 'things to come!' % name)
+ msg = (
+ "Can not apply stage %s, no datasource found! Likely bad "
+ "things to come!" % name
+ )
util.logexc(LOG, msg)
print_exc(msg)
if not args.force:
@@ -539,8 +590,9 @@ def main_modules(action_name, args):
util.logexc(LOG, "Failed to setup output redirection!")
if args.debug:
# Reset so that all the debug handlers are closed out
- LOG.debug(("Logging being reset, this logger may no"
- " longer be active shortly"))
+ LOG.debug(
+ "Logging being reset, this logger may no longer be active shortly"
+ )
logging.resetLogging()
logging.setupLogging(mods.cfg)
apply_reporting_cfg(init.cfg)
@@ -573,10 +625,12 @@ def main_single(name, args):
# There was no datasource found,
# that might be bad (or ok) depending on
# the module being ran (so continue on)
- util.logexc(LOG, ("Failed to fetch your datasource,"
- " likely bad things to come!"))
- print_exc(("Failed to fetch your datasource,"
- " likely bad things to come!"))
+ util.logexc(
+ LOG, "Failed to fetch your datasource, likely bad things to come!"
+ )
+ print_exc(
+ "Failed to fetch your datasource, likely bad things to come!"
+ )
if not args.force:
return 1
_maybe_persist_instance_data(init)
@@ -598,8 +652,9 @@ def main_single(name, args):
util.logexc(LOG, "Failed to setup output redirection!")
if args.debug:
# Reset so that all the debug handlers are closed out
- LOG.debug(("Logging being reset, this logger may no"
- " longer be active shortly"))
+ LOG.debug(
+ "Logging being reset, this logger may no longer be active shortly"
+ )
logging.resetLogging()
logging.setupLogging(mods.cfg)
apply_reporting_cfg(init.cfg)
@@ -608,9 +663,7 @@ def main_single(name, args):
welcome(name, msg=w_msg)
# Stage 5
- (which_ran, failures) = mods.run_single(mod_name,
- mod_args,
- mod_freq)
+ (which_ran, failures) = mods.run_single(mod_name, mod_args, mod_freq)
if failures:
LOG.warning("Ran %s but it failed!", mod_name)
return 1
@@ -633,7 +686,12 @@ def status_wrapper(name, args, data_d=None, link_d=None):
result_path = os.path.join(data_d, "result.json")
result_link = os.path.join(link_d, "result.json")
- util.ensure_dirs((data_d, link_d,))
+ util.ensure_dirs(
+ (
+ data_d,
+ link_d,
+ )
+ )
(_name, functor) = args.action
@@ -647,14 +705,20 @@ def status_wrapper(name, args, data_d=None, link_d=None):
else:
raise ValueError("unknown name: %s" % name)
- modes = ('init', 'init-local', 'modules-init', 'modules-config',
- 'modules-final')
+ modes = (
+ "init",
+ "init-local",
+ "modules-init",
+ "modules-config",
+ "modules-final",
+ )
if mode not in modes:
raise ValueError(
- "Invalid cloud init mode specified '{0}'".format(mode))
+ "Invalid cloud init mode specified '{0}'".format(mode)
+ )
status = None
- if mode == 'init-local':
+ if mode == "init-local":
for f in (status_link, result_link, status_path, result_path):
util.del_file(f)
else:
@@ -664,45 +728,46 @@ def status_wrapper(name, args, data_d=None, link_d=None):
pass
nullstatus = {
- 'errors': [],
- 'start': None,
- 'finished': None,
+ "errors": [],
+ "start": None,
+ "finished": None,
}
if status is None:
- status = {'v1': {}}
- status['v1']['datasource'] = None
+ status = {"v1": {}}
+ status["v1"]["datasource"] = None
for m in modes:
- if m not in status['v1']:
- status['v1'][m] = nullstatus.copy()
+ if m not in status["v1"]:
+ status["v1"][m] = nullstatus.copy()
- v1 = status['v1']
- v1['stage'] = mode
- v1[mode]['start'] = time.time()
+ v1 = status["v1"]
+ v1["stage"] = mode
+ v1[mode]["start"] = time.time()
atomic_helper.write_json(status_path, status)
- util.sym_link(os.path.relpath(status_path, link_d), status_link,
- force=True)
+ util.sym_link(
+ os.path.relpath(status_path, link_d), status_link, force=True
+ )
try:
ret = functor(name, args)
- if mode in ('init', 'init-local'):
+ if mode in ("init", "init-local"):
(datasource, errors) = ret
if datasource is not None:
- v1['datasource'] = str(datasource)
+ v1["datasource"] = str(datasource)
else:
errors = ret
- v1[mode]['errors'] = [str(e) for e in errors]
+ v1[mode]["errors"] = [str(e) for e in errors]
except Exception as e:
util.logexc(LOG, "failed stage %s", mode)
print_exc("failed run of stage %s" % mode)
- v1[mode]['errors'] = [str(e)]
+ v1[mode]["errors"] = [str(e)]
- v1[mode]['finished'] = time.time()
- v1['stage'] = None
+ v1[mode]["finished"] = time.time()
+ v1["stage"] = None
atomic_helper.write_json(status_path, status)
@@ -710,23 +775,26 @@ def status_wrapper(name, args, data_d=None, link_d=None):
# write the 'finished' file
errors = []
for m in modes:
- if v1[m]['errors']:
- errors.extend(v1[m].get('errors', []))
+ if v1[m]["errors"]:
+ errors.extend(v1[m].get("errors", []))
atomic_helper.write_json(
- result_path, {'v1': {'datasource': v1['datasource'],
- 'errors': errors}})
- util.sym_link(os.path.relpath(result_path, link_d), result_link,
- force=True)
+ result_path,
+ {"v1": {"datasource": v1["datasource"], "errors": errors}},
+ )
+ util.sym_link(
+ os.path.relpath(result_path, link_d), result_link, force=True
+ )
- return len(v1[mode]['errors'])
+ return len(v1[mode]["errors"])
def _maybe_persist_instance_data(init):
"""Write instance-data.json file if absent and datasource is restored."""
if init.ds_restored:
instance_data_file = os.path.join(
- init.paths.run_dir, sources.INSTANCE_JSON_FILE)
+ init.paths.run_dir, sources.INSTANCE_JSON_FILE
+ )
if not os.path.exists(instance_data_file):
init.datasource.persist_instance_data()
@@ -739,18 +807,23 @@ def _maybe_set_hostname(init, stage, retry_stage):
"""
cloud = init.cloudify()
(hostname, _fqdn) = util.get_hostname_fqdn(
- init.cfg, cloud, metadata_only=True)
+ init.cfg, cloud, metadata_only=True
+ )
if hostname: # meta-data or user-data hostname content
try:
- cc_set_hostname.handle('set-hostname', init.cfg, cloud, LOG, None)
+ cc_set_hostname.handle("set-hostname", init.cfg, cloud, LOG, None)
except cc_set_hostname.SetHostnameError as e:
LOG.debug(
- 'Failed setting hostname in %s stage. Will'
- ' retry in %s stage. Error: %s.', stage, retry_stage, str(e))
+ "Failed setting hostname in %s stage. Will"
+ " retry in %s stage. Error: %s.",
+ stage,
+ retry_stage,
+ str(e),
+ )
def main_features(name, args):
- sys.stdout.write('\n'.join(sorted(version.FEATURES)) + '\n')
+ sys.stdout.write("\n".join(sorted(version.FEATURES)) + "\n")
def main(sysv_args=None):
@@ -760,129 +833,182 @@ def main(sysv_args=None):
sysv_args = sysv_args[1:]
# Top level args
- parser.add_argument('--version', '-v', action='version',
- version='%(prog)s ' + (version.version_string()))
- parser.add_argument('--file', '-f', action='append',
- dest='files',
- help=('additional yaml configuration'
- ' files to use'),
- type=argparse.FileType('rb'))
- parser.add_argument('--debug', '-d', action='store_true',
- help=('show additional pre-action'
- ' logging (default: %(default)s)'),
- default=False)
- parser.add_argument('--force', action='store_true',
- help=('force running even if no datasource is'
- ' found (use at your own risk)'),
- dest='force',
- default=False)
+ parser.add_argument(
+ "--version",
+ "-v",
+ action="version",
+ version="%(prog)s " + (version.version_string()),
+ )
+ parser.add_argument(
+ "--file",
+ "-f",
+ action="append",
+ dest="files",
+ help="additional yaml configuration files to use",
+ type=argparse.FileType("rb"),
+ )
+ parser.add_argument(
+ "--debug",
+ "-d",
+ action="store_true",
+ help="show additional pre-action logging (default: %(default)s)",
+ default=False,
+ )
+ parser.add_argument(
+ "--force",
+ action="store_true",
+ help=(
+ "force running even if no datasource is"
+ " found (use at your own risk)"
+ ),
+ dest="force",
+ default=False,
+ )
parser.set_defaults(reporter=None)
- subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand')
+ subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand")
subparsers.required = True
# Each action and its sub-options (if any)
- parser_init = subparsers.add_parser('init',
- help=('initializes cloud-init and'
- ' performs initial modules'))
- parser_init.add_argument("--local", '-l', action='store_true',
- help="start in local mode (default: %(default)s)",
- default=False)
+ parser_init = subparsers.add_parser(
+ "init", help="initializes cloud-init and performs initial modules"
+ )
+ parser_init.add_argument(
+ "--local",
+ "-l",
+ action="store_true",
+ help="start in local mode (default: %(default)s)",
+ default=False,
+ )
# This is used so that we can know which action is selected +
# the functor to use to run this subcommand
- parser_init.set_defaults(action=('init', main_init))
+ parser_init.set_defaults(action=("init", main_init))
# These settings are used for the 'config' and 'final' stages
- parser_mod = subparsers.add_parser('modules',
- help=('activates modules using '
- 'a given configuration key'))
- parser_mod.add_argument("--mode", '-m', action='store',
- help=("module configuration name "
- "to use (default: %(default)s)"),
- default='config',
- choices=('init', 'config', 'final'))
- parser_mod.set_defaults(action=('modules', main_modules))
+ parser_mod = subparsers.add_parser(
+ "modules", help="activates modules using a given configuration key"
+ )
+ parser_mod.add_argument(
+ "--mode",
+ "-m",
+ action="store",
+ help="module configuration name to use (default: %(default)s)",
+ default="config",
+ choices=("init", "config", "final"),
+ )
+ parser_mod.set_defaults(action=("modules", main_modules))
# This subcommand allows you to run a single module
- parser_single = subparsers.add_parser('single',
- help=('run a single module '))
- parser_single.add_argument("--name", '-n', action="store",
- help="module name to run",
- required=True)
- parser_single.add_argument("--frequency", action="store",
- help=("frequency of the module"),
- required=False,
- choices=list(FREQ_SHORT_NAMES.keys()))
- parser_single.add_argument("--report", action="store_true",
- help="enable reporting",
- required=False)
- parser_single.add_argument("module_args", nargs="*",
- metavar='argument',
- help=('any additional arguments to'
- ' pass to this module'))
- parser_single.set_defaults(action=('single', main_single))
+ parser_single = subparsers.add_parser(
+ "single", help="run a single module "
+ )
+ parser_single.add_argument(
+ "--name",
+ "-n",
+ action="store",
+ help="module name to run",
+ required=True,
+ )
+ parser_single.add_argument(
+ "--frequency",
+ action="store",
+ help="frequency of the module",
+ required=False,
+ choices=list(FREQ_SHORT_NAMES.keys()),
+ )
+ parser_single.add_argument(
+ "--report",
+ action="store_true",
+ help="enable reporting",
+ required=False,
+ )
+ parser_single.add_argument(
+ "module_args",
+ nargs="*",
+ metavar="argument",
+ help="any additional arguments to pass to this module",
+ )
+ parser_single.set_defaults(action=("single", main_single))
parser_query = subparsers.add_parser(
- 'query',
- help='Query standardized instance metadata from the command line.')
+ "query",
+ help="Query standardized instance metadata from the command line.",
+ )
parser_dhclient = subparsers.add_parser(
- dhclient_hook.NAME, help=dhclient_hook.__doc__)
+ dhclient_hook.NAME, help=dhclient_hook.__doc__
+ )
dhclient_hook.get_parser(parser_dhclient)
- parser_features = subparsers.add_parser('features',
- help=('list defined features'))
- parser_features.set_defaults(action=('features', main_features))
+ parser_features = subparsers.add_parser(
+ "features", help="list defined features"
+ )
+ parser_features.set_defaults(action=("features", main_features))
parser_analyze = subparsers.add_parser(
- 'analyze', help='Devel tool: Analyze cloud-init logs and data')
+ "analyze", help="Devel tool: Analyze cloud-init logs and data"
+ )
- parser_devel = subparsers.add_parser(
- 'devel', help='Run development tools')
+ parser_devel = subparsers.add_parser("devel", help="Run development tools")
parser_collect_logs = subparsers.add_parser(
- 'collect-logs', help='Collect and tar all cloud-init debug info')
+ "collect-logs", help="Collect and tar all cloud-init debug info"
+ )
parser_clean = subparsers.add_parser(
- 'clean', help='Remove logs and artifacts so cloud-init can re-run.')
+ "clean", help="Remove logs and artifacts so cloud-init can re-run."
+ )
parser_status = subparsers.add_parser(
- 'status', help='Report cloud-init status or wait on completion.')
+ "status", help="Report cloud-init status or wait on completion."
+ )
if sysv_args:
# Only load subparsers if subcommand is specified to avoid load cost
- if sysv_args[0] == 'analyze':
+ if sysv_args[0] == "analyze":
from cloudinit.analyze.__main__ import get_parser as analyze_parser
+
# Construct analyze subcommand parser
analyze_parser(parser_analyze)
- elif sysv_args[0] == 'devel':
+ elif sysv_args[0] == "devel":
from cloudinit.cmd.devel.parser import get_parser as devel_parser
+
# Construct devel subcommand parser
devel_parser(parser_devel)
- elif sysv_args[0] == 'collect-logs':
+ elif sysv_args[0] == "collect-logs":
from cloudinit.cmd.devel.logs import (
- get_parser as logs_parser, handle_collect_logs_args)
+ get_parser as logs_parser,
+ handle_collect_logs_args,
+ )
+
logs_parser(parser_collect_logs)
parser_collect_logs.set_defaults(
- action=('collect-logs', handle_collect_logs_args))
- elif sysv_args[0] == 'clean':
+ action=("collect-logs", handle_collect_logs_args)
+ )
+ elif sysv_args[0] == "clean":
from cloudinit.cmd.clean import (
- get_parser as clean_parser, handle_clean_args)
+ get_parser as clean_parser,
+ handle_clean_args,
+ )
+
clean_parser(parser_clean)
- parser_clean.set_defaults(
- action=('clean', handle_clean_args))
- elif sysv_args[0] == 'query':
+ parser_clean.set_defaults(action=("clean", handle_clean_args))
+ elif sysv_args[0] == "query":
from cloudinit.cmd.query import (
- get_parser as query_parser, handle_args as handle_query_args)
+ get_parser as query_parser,
+ handle_args as handle_query_args,
+ )
+
query_parser(parser_query)
- parser_query.set_defaults(
- action=('render', handle_query_args))
- elif sysv_args[0] == 'status':
+ parser_query.set_defaults(action=("render", handle_query_args))
+ elif sysv_args[0] == "status":
from cloudinit.cmd.status import (
- get_parser as status_parser, handle_status_args)
+ get_parser as status_parser,
+ handle_status_args,
+ )
+
status_parser(parser_status)
- parser_status.set_defaults(
- action=('status', handle_status_args))
+ parser_status.set_defaults(action=("status", handle_status_args))
args = parser.parse_args(args=sysv_args)
@@ -906,14 +1032,20 @@ def main(sysv_args=None):
if args.local:
rname, rdesc = ("init-local", "searching for local datasources")
else:
- rname, rdesc = ("init-network",
- "searching for network datasources")
+ rname, rdesc = (
+ "init-network",
+ "searching for network datasources",
+ )
elif name == "modules":
- rname, rdesc = ("modules-%s" % args.mode,
- "running modules for %s" % args.mode)
+ rname, rdesc = (
+ "modules-%s" % args.mode,
+ "running modules for %s" % args.mode,
+ )
elif name == "single":
- rname, rdesc = ("single/%s" % args.name,
- "running single module %s" % args.name)
+ rname, rdesc = (
+ "single/%s" % args.name,
+ "running single module %s" % args.name,
+ )
report_on = args.report
else:
rname = name
@@ -921,19 +1053,24 @@ def main(sysv_args=None):
report_on = False
args.reporter = events.ReportEventStack(
- rname, rdesc, reporting_enabled=report_on)
+ rname, rdesc, reporting_enabled=report_on
+ )
with args.reporter:
retval = util.log_time(
- logfunc=LOG.debug, msg="cloud-init mode '%s'" % name,
- get_uptime=True, func=functor, args=(name, args))
+ logfunc=LOG.debug,
+ msg="cloud-init mode '%s'" % name,
+ get_uptime=True,
+ func=functor,
+ args=(name, args),
+ )
reporting.flush_events()
return retval
-if __name__ == '__main__':
- if 'TZ' not in os.environ:
- os.environ['TZ'] = ":/etc/localtime"
+if __name__ == "__main__":
+ if "TZ" not in os.environ:
+ os.environ["TZ"] = ":/etc/localtime"
return_value = main(sys.argv)
if return_value:
sys.exit(return_value)
diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py
index e53cd855..46f17699 100644
--- a/cloudinit/cmd/query.py
+++ b/cloudinit/cmd/query.py
@@ -14,22 +14,24 @@ output; if this fails, they are treated as binary.
"""
import argparse
-from errno import EACCES
import os
import sys
+from errno import EACCES
+from cloudinit import log, util
+from cloudinit.cmd.devel import addLogHandlerCLI, read_cfg_paths
from cloudinit.handlers.jinja_template import (
convert_jinja_instance_data,
get_jinja_variable_alias,
- render_jinja_payload
+ render_jinja_payload,
)
-from cloudinit.cmd.devel import addLogHandlerCLI, read_cfg_paths
-from cloudinit import log
from cloudinit.sources import (
- INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE, REDACT_SENSITIVE_VALUE)
-from cloudinit import util
+ INSTANCE_JSON_FILE,
+ INSTANCE_JSON_SENSITIVE_FILE,
+ REDACT_SENSITIVE_VALUE,
+)
-NAME = 'query'
+NAME = "query"
LOG = log.getLogger(NAME)
@@ -43,41 +45,79 @@ def get_parser(parser=None):
@returns: ArgumentParser with proper argument configuration.
"""
if not parser:
- parser = argparse.ArgumentParser(
- prog=NAME, description=__doc__)
+ parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
parser.add_argument(
- '-d', '--debug', action='store_true', default=False,
- help='Add verbose messages during template render')
+ "-d",
+ "--debug",
+ action="store_true",
+ default=False,
+ help="Add verbose messages during template render",
+ )
parser.add_argument(
- '-i', '--instance-data', type=str,
- help=('Path to instance-data.json file. Default is /run/cloud-init/%s'
- % INSTANCE_JSON_FILE))
+ "-i",
+ "--instance-data",
+ type=str,
+ help="Path to instance-data.json file. Default is /run/cloud-init/%s"
+ % INSTANCE_JSON_FILE,
+ )
parser.add_argument(
- '-l', '--list-keys', action='store_true', default=False,
- help=('List query keys available at the provided instance-data'
- ' <varname>.'))
+ "-l",
+ "--list-keys",
+ action="store_true",
+ default=False,
+ help=(
+ "List query keys available at the provided instance-data"
+ " <varname>."
+ ),
+ )
parser.add_argument(
- '-u', '--user-data', type=str,
- help=('Path to user-data file. Default is'
- ' /var/lib/cloud/instance/user-data.txt'))
+ "-u",
+ "--user-data",
+ type=str,
+ help=(
+ "Path to user-data file. Default is"
+ " /var/lib/cloud/instance/user-data.txt"
+ ),
+ )
parser.add_argument(
- '-v', '--vendor-data', type=str,
- help=('Path to vendor-data file. Default is'
- ' /var/lib/cloud/instance/vendor-data.txt'))
+ "-v",
+ "--vendor-data",
+ type=str,
+ help=(
+ "Path to vendor-data file. Default is"
+ " /var/lib/cloud/instance/vendor-data.txt"
+ ),
+ )
parser.add_argument(
- 'varname', type=str, nargs='?',
- help=('A dot-delimited specific variable to query from'
- ' instance-data. For example: v1.local_hostname. If the'
- ' value is not JSON serializable, it will be base64-encoded and'
- ' will contain the prefix "ci-b64:". '))
+ "varname",
+ type=str,
+ nargs="?",
+ help=(
+ "A dot-delimited specific variable to query from"
+ " instance-data. For example: v1.local_hostname. If the"
+ " value is not JSON serializable, it will be base64-encoded and"
+ ' will contain the prefix "ci-b64:". '
+ ),
+ )
parser.add_argument(
- '-a', '--all', action='store_true', default=False, dest='dump_all',
- help='Dump all available instance-data')
+ "-a",
+ "--all",
+ action="store_true",
+ default=False,
+ dest="dump_all",
+ help="Dump all available instance-data",
+ )
parser.add_argument(
- '-f', '--format', type=str, dest='format',
- help=('Optionally specify a custom output format string. Any'
- ' instance-data variable can be specified between double-curly'
- ' braces. For example -f "{{ v2.cloud_name }}"'))
+ "-f",
+ "--format",
+ type=str,
+ dest="format",
+ help=(
+ "Optionally specify a custom output format string. Any"
+ " instance-data variable can be specified between double-curly"
+ ' braces. For example -f "{{ v2.cloud_name }}"'
+ ),
+ )
return parser
@@ -91,7 +131,7 @@ def load_userdata(ud_file_path):
"""
bdata = util.load_file(ud_file_path, decode=False)
try:
- return bdata.decode('utf-8')
+ return bdata.decode("utf-8")
except UnicodeDecodeError:
return util.decomp_gzip(bdata, quiet=False, decode=True)
@@ -118,13 +158,15 @@ def _read_instance_data(instance_data, user_data, vendor_data) -> dict:
redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE)
if uid == 0:
sensitive_data_fn = os.path.join(
- paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE)
+ paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE
+ )
if os.path.exists(sensitive_data_fn):
instance_data_fn = sensitive_data_fn
else:
LOG.warning(
- 'Missing root-readable %s. Using redacted %s instead.',
- sensitive_data_fn, redacted_data_fn
+ "Missing root-readable %s. Using redacted %s instead.",
+ sensitive_data_fn,
+ redacted_data_fn,
)
instance_data_fn = redacted_data_fn
else:
@@ -132,11 +174,11 @@ def _read_instance_data(instance_data, user_data, vendor_data) -> dict:
if user_data:
user_data_fn = user_data
else:
- user_data_fn = os.path.join(paths.instance_link, 'user-data.txt')
+ user_data_fn = os.path.join(paths.instance_link, "user-data.txt")
if vendor_data:
vendor_data_fn = vendor_data
else:
- vendor_data_fn = os.path.join(paths.instance_link, 'vendor-data.txt')
+ vendor_data_fn = os.path.join(paths.instance_link, "vendor-data.txt")
try:
instance_json = util.load_file(instance_data_fn)
@@ -144,24 +186,30 @@ def _read_instance_data(instance_data, user_data, vendor_data) -> dict:
if e.errno == EACCES:
LOG.error("No read permission on '%s'. Try sudo", instance_data_fn)
else:
- LOG.error('Missing instance-data file: %s', instance_data_fn)
+ LOG.error("Missing instance-data file: %s", instance_data_fn)
raise
instance_data = util.load_json(instance_json)
if uid != 0:
- instance_data['userdata'] = (
- '<%s> file:%s' % (REDACT_SENSITIVE_VALUE, user_data_fn))
- instance_data['vendordata'] = (
- '<%s> file:%s' % (REDACT_SENSITIVE_VALUE, vendor_data_fn))
+ instance_data["userdata"] = "<%s> file:%s" % (
+ REDACT_SENSITIVE_VALUE,
+ user_data_fn,
+ )
+ instance_data["vendordata"] = "<%s> file:%s" % (
+ REDACT_SENSITIVE_VALUE,
+ vendor_data_fn,
+ )
else:
- instance_data['userdata'] = load_userdata(user_data_fn)
- instance_data['vendordata'] = load_userdata(vendor_data_fn)
+ instance_data["userdata"] = load_userdata(user_data_fn)
+ instance_data["vendordata"] = load_userdata(vendor_data_fn)
return instance_data
def _find_instance_data_leaf_by_varname_path(
- jinja_vars_without_aliases: dict, jinja_vars_with_aliases: dict,
- varname: str, list_keys: bool
+ jinja_vars_without_aliases: dict,
+ jinja_vars_with_aliases: dict,
+ varname: str,
+ list_keys: bool,
):
"""Return the value of the dot-delimited varname path in instance-data
@@ -174,7 +222,7 @@ def _find_instance_data_leaf_by_varname_path(
"""
walked_key_path = ""
response = jinja_vars_without_aliases
- for key_path_part in varname.split('.'):
+ for key_path_part in varname.split("."):
try:
# Walk key path using complete aliases dict, yet response
# should only contain jinja_without_aliases
@@ -205,8 +253,9 @@ def handle_args(name, args):
addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING)
if not any([args.list_keys, args.varname, args.format, args.dump_all]):
LOG.error(
- 'Expected one of the options: --all, --format,'
- ' --list-keys or varname')
+ "Expected one of the options: --all, --format,"
+ " --list-keys or varname"
+ )
get_parser().print_help()
return 1
try:
@@ -216,11 +265,13 @@ def handle_args(name, args):
except (IOError, OSError):
return 1
if args.format:
- payload = '## template: jinja\n{fmt}'.format(fmt=args.format)
+ payload = "## template: jinja\n{fmt}".format(fmt=args.format)
rendered_payload = render_jinja_payload(
- payload=payload, payload_fn='query commandline',
+ payload=payload,
+ payload_fn="query commandline",
instance_data=instance_data,
- debug=True if args.debug else False)
+ debug=True if args.debug else False,
+ )
if rendered_payload:
print(rendered_payload)
return 0
@@ -240,7 +291,7 @@ def handle_args(name, args):
jinja_vars_without_aliases=response,
jinja_vars_with_aliases=jinja_vars_with_aliases,
varname=args.varname,
- list_keys=args.list_keys
+ list_keys=args.list_keys,
)
except (KeyError, ValueError) as e:
LOG.error(e)
@@ -248,11 +299,10 @@ def handle_args(name, args):
if args.list_keys:
if not isinstance(response, dict):
LOG.error(
- "--list-keys provided but '%s' is not a dict",
- args.varname
+ "--list-keys provided but '%s' is not a dict", args.varname
)
return 1
- response = '\n'.join(sorted(response.keys()))
+ response = "\n".join(sorted(response.keys()))
if not isinstance(response, str):
response = util.json_dumps(response)
print(response)
@@ -265,7 +315,7 @@ def main():
sys.exit(handle_args(NAME, parser.parse_args()))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
# vi: ts=4 expandtab
diff --git a/cloudinit/cmd/status.py b/cloudinit/cmd/status.py
index ea79a85b..cff16c34 100644
--- a/cloudinit/cmd/status.py
+++ b/cloudinit/cmd/status.py
@@ -7,20 +7,20 @@
import argparse
import os
import sys
-from time import gmtime, strftime, sleep
+from time import gmtime, sleep, strftime
from cloudinit.distros import uses_systemd
from cloudinit.stages import Init
from cloudinit.util import get_cmdline, load_file, load_json
-CLOUDINIT_DISABLED_FILE = '/etc/cloud/cloud-init.disabled'
+CLOUDINIT_DISABLED_FILE = "/etc/cloud/cloud-init.disabled"
# customer visible status messages
-STATUS_ENABLED_NOT_RUN = 'not run'
-STATUS_RUNNING = 'running'
-STATUS_DONE = 'done'
-STATUS_ERROR = 'error'
-STATUS_DISABLED = 'disabled'
+STATUS_ENABLED_NOT_RUN = "not run"
+STATUS_RUNNING = "running"
+STATUS_DONE = "done"
+STATUS_ERROR = "error"
+STATUS_DISABLED = "disabled"
def get_parser(parser=None):
@@ -34,15 +34,25 @@ def get_parser(parser=None):
"""
if not parser:
parser = argparse.ArgumentParser(
- prog='status',
- description='Report run status of cloud init')
+ prog="status", description="Report run status of cloud init"
+ )
parser.add_argument(
- '-l', '--long', action='store_true', default=False,
- help=('Report long format of statuses including run stage name and'
- ' error messages'))
+ "-l",
+ "--long",
+ action="store_true",
+ default=False,
+ help=(
+ "Report long format of statuses including run stage name and"
+ " error messages"
+ ),
+ )
parser.add_argument(
- '-w', '--wait', action='store_true', default=False,
- help='Block waiting on cloud-init to complete')
+ "-w",
+ "--wait",
+ action="store_true",
+ default=False,
+ help="Block waiting on cloud-init to complete",
+ )
return parser
@@ -55,18 +65,18 @@ def handle_status_args(name, args):
status, status_detail, time = _get_status_details(init.paths)
if args.wait:
while status in (STATUS_ENABLED_NOT_RUN, STATUS_RUNNING):
- sys.stdout.write('.')
+ sys.stdout.write(".")
sys.stdout.flush()
status, status_detail, time = _get_status_details(init.paths)
sleep(0.25)
- sys.stdout.write('\n')
+ sys.stdout.write("\n")
if args.long:
- print('status: {0}'.format(status))
+ print("status: {0}".format(status))
if time:
- print('time: {0}'.format(time))
- print('detail:\n{0}'.format(status_detail))
+ print("time: {0}".format(time))
+ print("detail:\n{0}".format(status_detail))
else:
- print('status: {0}'.format(status))
+ print("status: {0}".format(status))
return 1 if status == STATUS_ERROR else 0
@@ -81,20 +91,20 @@ def _is_cloudinit_disabled(disable_file, paths):
is_disabled = False
cmdline_parts = get_cmdline().split()
if not uses_systemd():
- reason = 'Cloud-init enabled on sysvinit'
- elif 'cloud-init=enabled' in cmdline_parts:
- reason = 'Cloud-init enabled by kernel command line cloud-init=enabled'
+ reason = "Cloud-init enabled on sysvinit"
+ elif "cloud-init=enabled" in cmdline_parts:
+ reason = "Cloud-init enabled by kernel command line cloud-init=enabled"
elif os.path.exists(disable_file):
is_disabled = True
- reason = 'Cloud-init disabled by {0}'.format(disable_file)
- elif 'cloud-init=disabled' in cmdline_parts:
+ reason = "Cloud-init disabled by {0}".format(disable_file)
+ elif "cloud-init=disabled" in cmdline_parts:
is_disabled = True
- reason = 'Cloud-init disabled by kernel parameter cloud-init=disabled'
- elif not os.path.exists(os.path.join(paths.run_dir, 'enabled')):
+ reason = "Cloud-init disabled by kernel parameter cloud-init=disabled"
+ elif not os.path.exists(os.path.join(paths.run_dir, "enabled")):
is_disabled = True
- reason = 'Cloud-init disabled by cloud-init-generator'
+ reason = "Cloud-init disabled by cloud-init-generator"
else:
- reason = 'Cloud-init enabled by systemd cloud-init-generator'
+ reason = "Cloud-init enabled by systemd cloud-init-generator"
return (is_disabled, reason)
@@ -106,34 +116,35 @@ def _get_status_details(paths):
Values are obtained from parsing paths.run_dir/status.json.
"""
status = STATUS_ENABLED_NOT_RUN
- status_detail = ''
+ status_detail = ""
status_v1 = {}
- status_file = os.path.join(paths.run_dir, 'status.json')
- result_file = os.path.join(paths.run_dir, 'result.json')
+ status_file = os.path.join(paths.run_dir, "status.json")
+ result_file = os.path.join(paths.run_dir, "result.json")
(is_disabled, reason) = _is_cloudinit_disabled(
- CLOUDINIT_DISABLED_FILE, paths)
+ CLOUDINIT_DISABLED_FILE, paths
+ )
if is_disabled:
status = STATUS_DISABLED
status_detail = reason
if os.path.exists(status_file):
if not os.path.exists(result_file):
status = STATUS_RUNNING
- status_v1 = load_json(load_file(status_file)).get('v1', {})
+ status_v1 = load_json(load_file(status_file)).get("v1", {})
errors = []
latest_event = 0
for key, value in sorted(status_v1.items()):
- if key == 'stage':
+ if key == "stage":
if value:
status = STATUS_RUNNING
- status_detail = 'Running in stage: {0}'.format(value)
- elif key == 'datasource':
+ status_detail = "Running in stage: {0}".format(value)
+ elif key == "datasource":
status_detail = value
elif isinstance(value, dict):
- errors.extend(value.get('errors', []))
- start = value.get('start') or 0
- finished = value.get('finished') or 0
+ errors.extend(value.get("errors", []))
+ start = value.get("start") or 0
+ finished = value.get("finished") or 0
if finished == 0 and start != 0:
status = STATUS_RUNNING
event_time = max(start, finished)
@@ -141,23 +152,23 @@ def _get_status_details(paths):
latest_event = event_time
if errors:
status = STATUS_ERROR
- status_detail = '\n'.join(errors)
+ status_detail = "\n".join(errors)
elif status == STATUS_ENABLED_NOT_RUN and latest_event > 0:
status = STATUS_DONE
if latest_event:
- time = strftime('%a, %d %b %Y %H:%M:%S %z', gmtime(latest_event))
+ time = strftime("%a, %d %b %Y %H:%M:%S %z", gmtime(latest_event))
else:
- time = ''
+ time = ""
return status, status_detail, time
def main():
"""Tool to report status of cloud-init."""
parser = get_parser()
- sys.exit(handle_status_args('status', parser.parse_args()))
+ sys.exit(handle_status_args("status", parser.parse_args()))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
# vi: ts=4 expandtab
diff --git a/cloudinit/config/__init__.py b/cloudinit/config/__init__.py
index 0ef9a748..ed124180 100644
--- a/cloudinit/config/__init__.py
+++ b/cloudinit/config/__init__.py
@@ -6,9 +6,8 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.settings import (PER_INSTANCE, FREQUENCIES)
-
from cloudinit import log as logging
+from cloudinit.settings import FREQUENCIES, PER_INSTANCE
LOG = logging.getLogger(__name__)
@@ -22,26 +21,27 @@ MOD_PREFIX = "cc_"
def form_module_name(name):
canon_name = name.replace("-", "_")
if canon_name.lower().endswith(".py"):
- canon_name = canon_name[0:(len(canon_name) - 3)]
+ canon_name = canon_name[0 : (len(canon_name) - 3)]
canon_name = canon_name.strip()
if not canon_name:
return None
if not canon_name.startswith(MOD_PREFIX):
- canon_name = '%s%s' % (MOD_PREFIX, canon_name)
+ canon_name = "%s%s" % (MOD_PREFIX, canon_name)
return canon_name
def fixup_module(mod, def_freq=PER_INSTANCE):
- if not hasattr(mod, 'frequency'):
- setattr(mod, 'frequency', def_freq)
+ if not hasattr(mod, "frequency"):
+ setattr(mod, "frequency", def_freq)
else:
freq = mod.frequency
if freq and freq not in FREQUENCIES:
LOG.warning("Module %s has an unknown frequency %s", mod, freq)
- if not hasattr(mod, 'distros'):
- setattr(mod, 'distros', [])
- if not hasattr(mod, 'osfamilies'):
- setattr(mod, 'osfamilies', [])
+ if not hasattr(mod, "distros"):
+ setattr(mod, "distros", [])
+ if not hasattr(mod, "osfamilies"):
+ setattr(mod, "osfamilies", [])
return mod
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_apk_configure.py b/cloudinit/config/cc_apk_configure.py
index d227a58d..a615c814 100644
--- a/cloudinit/config/cc_apk_configure.py
+++ b/cloudinit/config/cc_apk_configure.py
@@ -9,9 +9,7 @@
from textwrap import dedent
from cloudinit import log as logging
-from cloudinit import temp_utils
-from cloudinit import templater
-from cloudinit import util
+from cloudinit import temp_utils, templater, util
from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema
from cloudinit.settings import PER_INSTANCE
@@ -54,34 +52,41 @@ REPOSITORIES_TEMPLATE = """\
frequency = PER_INSTANCE
-distros = ['alpine']
+distros = ["alpine"]
meta = {
- 'id': 'cc_apk_configure',
- 'name': 'APK Configure',
- 'title': 'Configure apk repositories file',
- 'description': dedent("""\
+ "id": "cc_apk_configure",
+ "name": "APK Configure",
+ "title": "Configure apk repositories file",
+ "description": dedent(
+ """\
This module handles configuration of the /etc/apk/repositories file.
.. note::
To ensure that apk configuration is valid yaml, any strings
containing special characters, especially ``:`` should be quoted.
- """),
- 'distros': distros,
- 'examples': [
- dedent("""\
+ """
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
# Keep the existing /etc/apk/repositories file unaltered.
apk_repos:
preserve_repositories: true
- """),
- dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Create repositories file for Alpine v3.12 main and community
# using default mirror site.
apk_repos:
alpine_repo:
community_enabled: true
version: 'v3.12'
- """),
- dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Create repositories file for Alpine Edge main, community, and
# testing using a specified mirror site and also a local repo.
apk_repos:
@@ -91,21 +96,23 @@ meta = {
testing_enabled: true
version: 'edge'
local_repo_base_url: 'https://my-local-server/local-alpine'
- """),
+ """
+ ),
],
- 'frequency': frequency,
+ "frequency": frequency,
}
schema = {
- 'type': 'object',
- 'properties': {
- 'apk_repos': {
- 'type': 'object',
- 'properties': {
- 'preserve_repositories': {
- 'type': 'boolean',
- 'default': False,
- 'description': dedent("""\
+ "type": "object",
+ "properties": {
+ "apk_repos": {
+ "type": "object",
+ "properties": {
+ "preserve_repositories": {
+ "type": "boolean",
+ "default": False,
+ "description": dedent(
+ """\
By default, cloud-init will generate a new repositories
file ``/etc/apk/repositories`` based on any valid
configuration settings specified within a apk_repos
@@ -116,33 +123,41 @@ schema = {
The ``preserve_repositories`` option overrides
all other config keys that would alter
``/etc/apk/repositories``.
- """)
+ """
+ ),
},
- 'alpine_repo': {
- 'type': ['object', 'null'],
- 'properties': {
- 'base_url': {
- 'type': 'string',
- 'default': DEFAULT_MIRROR,
- 'description': dedent("""\
+ "alpine_repo": {
+ "type": ["object", "null"],
+ "properties": {
+ "base_url": {
+ "type": "string",
+ "default": DEFAULT_MIRROR,
+ "description": dedent(
+ """\
The base URL of an Alpine repository, or
mirror, to download official packages from.
If not specified then it defaults to ``{}``
- """.format(DEFAULT_MIRROR))
+ """.format(
+ DEFAULT_MIRROR
+ )
+ ),
},
- 'community_enabled': {
- 'type': 'boolean',
- 'default': False,
- 'description': dedent("""\
+ "community_enabled": {
+ "type": "boolean",
+ "default": False,
+ "description": dedent(
+ """\
Whether to add the Community repo to the
repositories file. By default the Community
repo is not included.
- """)
+ """
+ ),
},
- 'testing_enabled': {
- 'type': 'boolean',
- 'default': False,
- 'description': dedent("""\
+ "testing_enabled": {
+ "type": "boolean",
+ "default": False,
+ "description": dedent(
+ """\
Whether to add the Testing repo to the
repositories file. By default the Testing
repo is not included. It is only recommended
@@ -151,32 +166,37 @@ schema = {
installed from Testing may have dependancies
that conflict with those in non-Edge Main or
Community repos."
- """)
+ """
+ ),
},
- 'version': {
- 'type': 'string',
- 'description': dedent("""\
+ "version": {
+ "type": "string",
+ "description": dedent(
+ """\
The Alpine version to use (e.g. ``v3.12`` or
``edge``)
- """)
+ """
+ ),
},
},
- 'required': ['version'],
- 'minProperties': 1,
- 'additionalProperties': False,
+ "required": ["version"],
+ "minProperties": 1,
+ "additionalProperties": False,
},
- 'local_repo_base_url': {
- 'type': 'string',
- 'description': dedent("""\
+ "local_repo_base_url": {
+ "type": "string",
+ "description": dedent(
+ """\
The base URL of an Alpine repository containing
unofficial packages
- """)
- }
+ """
+ ),
+ },
},
- 'minProperties': 1, # Either preserve_repositories or alpine_repo
- 'additionalProperties': False,
+ "minProperties": 1, # Either preserve_repositories or alpine_repo
+ "additionalProperties": False,
}
- }
+ },
}
__doc__ = get_meta_doc(meta, schema)
@@ -195,38 +215,44 @@ def handle(name, cfg, cloud, log, _args):
# If there is no "apk_repos" section in the configuration
# then do nothing.
- apk_section = cfg.get('apk_repos')
+ apk_section = cfg.get("apk_repos")
if not apk_section:
- LOG.debug(("Skipping module named %s,"
- " no 'apk_repos' section found"), name)
+ LOG.debug(
+ "Skipping module named %s, no 'apk_repos' section found", name
+ )
return
validate_cloudconfig_schema(cfg, schema)
# If "preserve_repositories" is explicitly set to True in
# the configuration do nothing.
- if util.get_cfg_option_bool(apk_section, 'preserve_repositories', False):
- LOG.debug(("Skipping module named %s,"
- " 'preserve_repositories' is set"), name)
+ if util.get_cfg_option_bool(apk_section, "preserve_repositories", False):
+ LOG.debug(
+ "Skipping module named %s, 'preserve_repositories' is set", name
+ )
return
# If there is no "alpine_repo" subsection of "apk_repos" present in the
# configuration then do nothing, as at least "version" is required to
# create valid repositories entries.
- alpine_repo = apk_section.get('alpine_repo')
+ alpine_repo = apk_section.get("alpine_repo")
if not alpine_repo:
- LOG.debug(("Skipping module named %s,"
- " no 'alpine_repo' configuration found"), name)
+ LOG.debug(
+ "Skipping module named %s, no 'alpine_repo' configuration found",
+ name,
+ )
return
# If there is no "version" value present in configuration then do nothing.
- alpine_version = alpine_repo.get('version')
+ alpine_version = alpine_repo.get("version")
if not alpine_version:
- LOG.debug(("Skipping module named %s,"
- " 'version' not specified in alpine_repo"), name)
+ LOG.debug(
+ "Skipping module named %s, 'version' not specified in alpine_repo",
+ name,
+ )
return
- local_repo = apk_section.get('local_repo_base_url', '')
+ local_repo = apk_section.get("local_repo_base_url", "")
_write_repositories_file(alpine_repo, alpine_version, local_repo)
@@ -240,22 +266,23 @@ def _write_repositories_file(alpine_repo, alpine_version, local_repo):
@param local_repo: A string containing the base URL of a local repo.
"""
- repo_file = '/etc/apk/repositories'
+ repo_file = "/etc/apk/repositories"
- alpine_baseurl = alpine_repo.get('base_url', DEFAULT_MIRROR)
+ alpine_baseurl = alpine_repo.get("base_url", DEFAULT_MIRROR)
- params = {'alpine_baseurl': alpine_baseurl,
- 'alpine_version': alpine_version,
- 'community_enabled': alpine_repo.get('community_enabled'),
- 'testing_enabled': alpine_repo.get('testing_enabled'),
- 'local_repo': local_repo}
+ params = {
+ "alpine_baseurl": alpine_baseurl,
+ "alpine_version": alpine_version,
+ "community_enabled": alpine_repo.get("community_enabled"),
+ "testing_enabled": alpine_repo.get("testing_enabled"),
+ "local_repo": local_repo,
+ }
- tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl")
+ tfile = temp_utils.mkstemp(prefix="template_name-", suffix=".tmpl")
template_fn = tfile[1] # Filepath is second item in tuple
util.write_file(template_fn, content=REPOSITORIES_TEMPLATE)
- LOG.debug('Generating Alpine repository configuration file: %s',
- repo_file)
+ LOG.debug("Generating Alpine repository configuration file: %s", repo_file)
templater.render_to_file(template_fn, repo_file, params)
# Clean up temporary template
util.del_file(template_fn)
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index 2e844c2c..b0728517 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -10,16 +10,14 @@
import glob
import os
-import re
import pathlib
+import re
from textwrap import dedent
-from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema
from cloudinit import gpg
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import templater
-from cloudinit import util
+from cloudinit import subp, templater, util
+from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
@@ -27,59 +25,46 @@ LOG = logging.getLogger(__name__)
# this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar')
ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
-APT_LOCAL_KEYS = '/etc/apt/trusted.gpg'
-APT_TRUSTED_GPG_DIR = '/etc/apt/trusted.gpg.d/'
-CLOUD_INIT_GPG_DIR = '/etc/apt/cloud-init.gpg.d/'
+APT_LOCAL_KEYS = "/etc/apt/trusted.gpg"
+APT_TRUSTED_GPG_DIR = "/etc/apt/trusted.gpg.d/"
+CLOUD_INIT_GPG_DIR = "/etc/apt/cloud-init.gpg.d/"
frequency = PER_INSTANCE
distros = ["ubuntu", "debian"]
mirror_property = {
- 'type': 'array',
- 'items': {
- 'type': 'object',
- 'additionalProperties': False,
- 'required': ['arches'],
- 'properties': {
- 'arches': {
- 'type': 'array',
- 'items': {
- 'type': 'string'
- },
- 'minItems': 1
- },
- 'uri': {
- 'type': 'string',
- 'format': 'uri'
- },
- 'search': {
- 'type': 'array',
- 'items': {
- 'type': 'string',
- 'format': 'uri'
- },
- 'minItems': 1
- },
- 'search_dns': {
- 'type': 'boolean',
+ "type": "array",
+ "items": {
+ "type": "object",
+ "additionalProperties": False,
+ "required": ["arches"],
+ "properties": {
+ "arches": {
+ "type": "array",
+ "items": {"type": "string"},
+ "minItems": 1,
},
- 'keyid': {
- 'type': 'string'
+ "uri": {"type": "string", "format": "uri"},
+ "search": {
+ "type": "array",
+ "items": {"type": "string", "format": "uri"},
+ "minItems": 1,
},
- 'key': {
- 'type': 'string'
+ "search_dns": {
+ "type": "boolean",
},
- 'keyserver': {
- 'type': 'string'
- }
- }
- }
+ "keyid": {"type": "string"},
+ "key": {"type": "string"},
+ "keyserver": {"type": "string"},
+ },
+ },
}
meta = {
- 'id': 'cc_apt_configure',
- 'name': 'Apt Configure',
- 'title': 'Configure apt for the user',
- 'description': dedent("""\
+ "id": "cc_apt_configure",
+ "name": "Apt Configure",
+ "title": "Configure apt for the user",
+ "description": dedent(
+ """\
This module handles both configuration of apt options and adding
source lists. There are configuration options such as
``apt_get_wrapper`` and ``apt_get_command`` that control how
@@ -94,9 +79,12 @@ meta = {
.. note::
For more information about apt configuration, see the
- ``Additional apt configuration`` example."""),
- 'distros': distros,
- 'examples': [dedent("""\
+ ``Additional apt configuration`` example."""
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
apt:
preserve_sources_list: false
disable_suites:
@@ -153,21 +141,24 @@ meta = {
key: |
------BEGIN PGP PUBLIC KEY BLOCK-------
<key data>
- ------END PGP PUBLIC KEY BLOCK-------""")],
- 'frequency': frequency,
+ ------END PGP PUBLIC KEY BLOCK-------"""
+ )
+ ],
+ "frequency": frequency,
}
schema = {
- 'type': 'object',
- 'properties': {
- 'apt': {
- 'type': 'object',
- 'additionalProperties': False,
- 'properties': {
- 'preserve_sources_list': {
- 'type': 'boolean',
- 'default': False,
- 'description': dedent("""\
+ "type": "object",
+ "properties": {
+ "apt": {
+ "type": "object",
+ "additionalProperties": False,
+ "properties": {
+ "preserve_sources_list": {
+ "type": "boolean",
+ "default": False,
+ "description": dedent(
+ """\
By default, cloud-init will generate a new sources
list in ``/etc/apt/sources.list.d`` based on any
changes specified in cloud config. To disable this
@@ -179,15 +170,15 @@ schema = {
all other config keys that would alter
``sources.list`` or ``sources.list.d``,
**except** for additional sources to be added
- to ``sources.list.d``.""")
+ to ``sources.list.d``."""
+ ),
},
- 'disable_suites': {
- 'type': 'array',
- 'items': {
- 'type': 'string'
- },
- 'uniqueItems': True,
- 'description': dedent("""\
+ "disable_suites": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": True,
+ "description": dedent(
+ """\
Entries in the sources list can be disabled using
``disable_suites``, which takes a list of suites
to be disabled. If the string ``$RELEASE`` is
@@ -206,11 +197,13 @@ schema = {
When a suite is disabled using ``disable_suites``,
its entry in ``sources.list`` is not deleted; it
- is just commented out.""")
+ is just commented out."""
+ ),
},
- 'primary': {
+ "primary": {
**mirror_property,
- 'description': dedent("""\
+ "description": dedent(
+ """\
The primary and security archive mirrors can
be specified using the ``primary`` and
``security`` keys, respectively. Both the
@@ -264,27 +257,35 @@ schema = {
``http://archive.ubuntu.com/ubuntu``.
- ``security`` => \
``http://security.ubuntu.com/ubuntu``
- """)
+ """
+ ),
},
- 'security': {
+ "security": {
**mirror_property,
- 'description': dedent("""\
- Please refer to the primary config documentation""")
+ "description": dedent(
+ """\
+ Please refer to the primary config documentation"""
+ ),
},
- 'add_apt_repo_match': {
- 'type': 'string',
- 'default': ADD_APT_REPO_MATCH,
- 'description': dedent("""\
+ "add_apt_repo_match": {
+ "type": "string",
+ "default": ADD_APT_REPO_MATCH,
+ "description": dedent(
+ """\
All source entries in ``apt-sources`` that match
regex in ``add_apt_repo_match`` will be added to
the system using ``add-apt-repository``. If
``add_apt_repo_match`` is not specified, it
- defaults to ``{}``""".format(ADD_APT_REPO_MATCH))
+ defaults to ``{}``""".format(
+ ADD_APT_REPO_MATCH
+ )
+ ),
},
- 'debconf_selections': {
- 'type': 'object',
- 'items': {'type': 'string'},
- 'description': dedent("""\
+ "debconf_selections": {
+ "type": "object",
+ "items": {"type": "string"},
+ "description": dedent(
+ """\
Debconf additional configurations can be specified as a
dictionary under the ``debconf_selections`` config
key, with each key in the dict representing a
@@ -308,11 +309,13 @@ schema = {
For example: \
``ippackage ippackage/ip string 127.0.01``
- """)
+ """
+ ),
},
- 'sources_list': {
- 'type': 'string',
- 'description': dedent("""\
+ "sources_list": {
+ "type": "string",
+ "description": dedent(
+ """\
Specifies a custom template for rendering
``sources.list`` . If no ``sources_list`` template
is given, cloud-init will use sane default. Within
@@ -323,45 +326,55 @@ schema = {
- ``$RELEASE``
- ``$PRIMARY``
- ``$SECURITY``
- - ``$KEY_FILE``""")
+ - ``$KEY_FILE``"""
+ ),
},
- 'conf': {
- 'type': 'string',
- 'description': dedent("""\
+ "conf": {
+ "type": "string",
+ "description": dedent(
+ """\
Specify configuration for apt, such as proxy
configuration. This configuration is specified as a
string. For multiline apt configuration, make sure
- to follow yaml syntax.""")
+ to follow yaml syntax."""
+ ),
},
- 'https_proxy': {
- 'type': 'string',
- 'description': dedent("""\
+ "https_proxy": {
+ "type": "string",
+ "description": dedent(
+ """\
More convenient way to specify https apt proxy.
https proxy url is specified in the format
- ``https://[[user][:pass]@]host[:port]/``.""")
+ ``https://[[user][:pass]@]host[:port]/``."""
+ ),
},
- 'http_proxy': {
- 'type': 'string',
- 'description': dedent("""\
+ "http_proxy": {
+ "type": "string",
+ "description": dedent(
+ """\
More convenient way to specify http apt proxy.
http proxy url is specified in the format
- ``http://[[user][:pass]@]host[:port]/``.""")
+ ``http://[[user][:pass]@]host[:port]/``."""
+ ),
},
- 'proxy': {
- 'type': 'string',
- 'description': 'Alias for defining a http apt proxy.'
+ "proxy": {
+ "type": "string",
+ "description": "Alias for defining a http apt proxy.",
},
- 'ftp_proxy': {
- 'type': 'string',
- 'description': dedent("""\
+ "ftp_proxy": {
+ "type": "string",
+ "description": dedent(
+ """\
More convenient way to specify ftp apt proxy.
ftp proxy url is specified in the format
- ``ftp://[[user][:pass]@]host[:port]/``.""")
+ ``ftp://[[user][:pass]@]host[:port]/``."""
+ ),
},
- 'sources': {
- 'type': 'object',
- 'items': {'type': 'string'},
- 'description': dedent("""\
+ "sources": {
+ "type": "object",
+ "items": {"type": "string"},
+ "description": dedent(
+ """\
Source list entries can be specified as a
dictionary under the ``sources`` config key, with
each key in the dict representing a different source
@@ -394,11 +407,12 @@ schema = {
- ``$PRIMARY``
- ``$SECURITY``
- ``$RELEASE``
- - ``$KEY_FILE``""")
- }
- }
+ - ``$KEY_FILE``"""
+ ),
+ },
+ },
}
- }
+ },
}
__doc__ = get_meta_doc(meta, schema)
@@ -415,18 +429,22 @@ APT_PROXY_FN = "/etc/apt/apt.conf.d/90cloud-init-aptproxy"
DEFAULT_KEYSERVER = "keyserver.ubuntu.com"
# Default archive mirrors
-PRIMARY_ARCH_MIRRORS = {"PRIMARY": "http://archive.ubuntu.com/ubuntu/",
- "SECURITY": "http://security.ubuntu.com/ubuntu/"}
-PORTS_MIRRORS = {"PRIMARY": "http://ports.ubuntu.com/ubuntu-ports",
- "SECURITY": "http://ports.ubuntu.com/ubuntu-ports"}
-PRIMARY_ARCHES = ['amd64', 'i386']
-PORTS_ARCHES = ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el', 'riscv64']
+PRIMARY_ARCH_MIRRORS = {
+ "PRIMARY": "http://archive.ubuntu.com/ubuntu/",
+ "SECURITY": "http://security.ubuntu.com/ubuntu/",
+}
+PORTS_MIRRORS = {
+ "PRIMARY": "http://ports.ubuntu.com/ubuntu-ports",
+ "SECURITY": "http://ports.ubuntu.com/ubuntu-ports",
+}
+PRIMARY_ARCHES = ["amd64", "i386"]
+PORTS_ARCHES = ["s390x", "arm64", "armhf", "powerpc", "ppc64el", "riscv64"]
def get_default_mirrors(arch=None, target=None):
"""returns the default mirrors for the target. These depend on the
- architecture, for more see:
- https://wiki.ubuntu.com/UbuntuDevelopment/PackageArchive#Ports"""
+ architecture, for more see:
+ https://wiki.ubuntu.com/UbuntuDevelopment/PackageArchive#Ports"""
if arch is None:
arch = util.get_dpkg_architecture(target)
if arch in PRIMARY_ARCHES:
@@ -438,8 +456,8 @@ def get_default_mirrors(arch=None, target=None):
def handle(name, ocfg, cloud, log, _):
"""process the config for apt_config. This can be called from
- curthooks if a global apt config was provided or via the "apt"
- standalone command."""
+ curthooks if a global apt config was provided or via the "apt"
+ standalone command."""
# keeping code close to curtin codebase via entry handler
target = None
if log is not None:
@@ -447,12 +465,14 @@ def handle(name, ocfg, cloud, log, _):
LOG = log
# feed back converted config, but only work on the subset under 'apt'
ocfg = convert_to_v3_apt_format(ocfg)
- cfg = ocfg.get('apt', {})
+ cfg = ocfg.get("apt", {})
if not isinstance(cfg, dict):
raise ValueError(
"Expected dictionary for 'apt' config, found {config_type}".format(
- config_type=type(cfg)))
+ config_type=type(cfg)
+ )
+ )
validate_cloudconfig_schema(cfg, schema)
apply_debconf_selections(cfg, target)
@@ -463,7 +483,7 @@ def _should_configure_on_empty_apt():
# if no config was provided, should apt configuration be done?
if util.system_is_snappy():
return False, "system is snappy."
- if not (subp.which('apt-get') or subp.which('apt')):
+ if not (subp.which("apt-get") or subp.which("apt")):
return False, "no apt commands."
return True, "Apt is available."
@@ -478,12 +498,12 @@ def apply_apt(cfg, cloud, target):
LOG.debug("handling apt config: %s", cfg)
- release = util.lsb_release(target=target)['codename']
+ release = util.lsb_release(target=target)["codename"]
arch = util.get_dpkg_architecture(target)
mirrors = find_apt_mirror_info(cfg, cloud, arch=arch)
LOG.debug("Apt Mirror info: %s", mirrors)
- if util.is_false(cfg.get('preserve_sources_list', False)):
+ if util.is_false(cfg.get("preserve_sources_list", False)):
add_mirror_keys(cfg, target)
generate_sources_list(cfg, release, mirrors, cloud)
rename_apt_lists(mirrors, target, arch)
@@ -494,25 +514,34 @@ def apply_apt(cfg, cloud, target):
LOG.exception("Failed to apply proxy or apt config info:")
# Process 'apt_source -> sources {dict}'
- if 'sources' in cfg:
+ if "sources" in cfg:
params = mirrors
- params['RELEASE'] = release
- params['MIRROR'] = mirrors["MIRROR"]
+ params["RELEASE"] = release
+ params["MIRROR"] = mirrors["MIRROR"]
matcher = None
- matchcfg = cfg.get('add_apt_repo_match', ADD_APT_REPO_MATCH)
+ matchcfg = cfg.get("add_apt_repo_match", ADD_APT_REPO_MATCH)
if matchcfg:
matcher = re.compile(matchcfg).search
- add_apt_sources(cfg['sources'], cloud, target=target,
- template_params=params, aa_repo_match=matcher)
+ add_apt_sources(
+ cfg["sources"],
+ cloud,
+ target=target,
+ template_params=params,
+ aa_repo_match=matcher,
+ )
def debconf_set_selections(selections, target=None):
- if not selections.endswith(b'\n'):
- selections += b'\n'
- subp.subp(['debconf-set-selections'], data=selections, target=target,
- capture=True)
+ if not selections.endswith(b"\n"):
+ selections += b"\n"
+ subp.subp(
+ ["debconf-set-selections"],
+ data=selections,
+ target=target,
+ capture=True,
+ )
def dpkg_reconfigure(packages, target=None):
@@ -532,12 +561,20 @@ def dpkg_reconfigure(packages, target=None):
unhandled.append(pkg)
if len(unhandled):
- LOG.warning("The following packages were installed and preseeded, "
- "but cannot be unconfigured: %s", unhandled)
+ LOG.warning(
+ "The following packages were installed and preseeded, "
+ "but cannot be unconfigured: %s",
+ unhandled,
+ )
if len(to_config):
- subp.subp(['dpkg-reconfigure', '--frontend=noninteractive'] +
- list(to_config), data=None, target=target, capture=True)
+ subp.subp(
+ ["dpkg-reconfigure", "--frontend=noninteractive"]
+ + list(to_config),
+ data=None,
+ target=target,
+ capture=True,
+ )
def apply_debconf_selections(cfg, target=None):
@@ -546,13 +583,12 @@ def apply_debconf_selections(cfg, target=None):
# set1: |
# cloud-init cloud-init/datasources multiselect MAAS
# set2: pkg pkg/value string bar
- selsets = cfg.get('debconf_selections')
+ selsets = cfg.get("debconf_selections")
if not selsets:
LOG.debug("debconf_selections was not set in config")
return
- selections = '\n'.join(
- [selsets[key] for key in sorted(selsets.keys())])
+ selections = "\n".join([selsets[key] for key in sorted(selsets.keys())])
debconf_set_selections(selections.encode(), target=target)
# get a complete list of packages listed in input
@@ -579,7 +615,8 @@ def apply_debconf_selections(cfg, target=None):
def clean_cloud_init(target):
"""clean out any local cloud-init config"""
flist = glob.glob(
- subp.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*"))
+ subp.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*")
+ )
LOG.debug("cleaning cloud-init config from: %s", flist)
for dpkg_cfg in flist:
@@ -588,18 +625,18 @@ def clean_cloud_init(target):
def mirrorurl_to_apt_fileprefix(mirror):
"""mirrorurl_to_apt_fileprefix
- Convert a mirror url to the file prefix used by apt on disk to
- store cache information for that mirror.
- To do so do:
- - take off ???://
- - drop tailing /
- - convert in string / to _"""
+ Convert a mirror url to the file prefix used by apt on disk to
+ store cache information for that mirror.
+ To do so do:
+ - take off ???://
+ - drop tailing /
+ - convert in string / to _"""
string = mirror
if string.endswith("/"):
string = string[0:-1]
pos = string.find("://")
if pos >= 0:
- string = string[pos + 3:]
+ string = string[pos + 3 :]
string = string.replace("/", "_")
return string
@@ -631,8 +668,8 @@ def rename_apt_lists(new_mirrors, target, arch):
def mirror_to_placeholder(tmpl, mirror, placeholder):
"""mirror_to_placeholder
- replace the specified mirror in a template with a placeholder string
- Checks for existance of the expected mirror and warns if not found"""
+ replace the specified mirror in a template with a placeholder string
+ Checks for existance of the expected mirror and warns if not found"""
if mirror not in tmpl:
LOG.warning("Expected mirror '%s' not found in: %s", mirror, tmpl)
return tmpl.replace(mirror, placeholder)
@@ -640,13 +677,15 @@ def mirror_to_placeholder(tmpl, mirror, placeholder):
def map_known_suites(suite):
"""there are a few default names which will be auto-extended.
- This comes at the inability to use those names literally as suites,
- but on the other hand increases readability of the cfg quite a lot"""
- mapping = {'updates': '$RELEASE-updates',
- 'backports': '$RELEASE-backports',
- 'security': '$RELEASE-security',
- 'proposed': '$RELEASE-proposed',
- 'release': '$RELEASE'}
+ This comes at the inability to use those names literally as suites,
+ but on the other hand increases readability of the cfg quite a lot"""
+ mapping = {
+ "updates": "$RELEASE-updates",
+ "backports": "$RELEASE-backports",
+ "security": "$RELEASE-security",
+ "proposed": "$RELEASE-proposed",
+ "release": "$RELEASE",
+ }
try:
retsuite = mapping[suite]
except KeyError:
@@ -656,14 +695,14 @@ def map_known_suites(suite):
def disable_suites(disabled, src, release):
"""reads the config for suites to be disabled and removes those
- from the template"""
+ from the template"""
if not disabled:
return src
retsrc = src
for suite in disabled:
suite = map_known_suites(suite)
- releasesuite = templater.render_string(suite, {'RELEASE': release})
+ releasesuite = templater.render_string(suite, {"RELEASE": release})
LOG.debug("Disabling suite %s as %s", suite, releasesuite)
newsrc = ""
@@ -685,7 +724,7 @@ def disable_suites(disabled, src, release):
break
if cols[pcol] == releasesuite:
- line = '# suite disabled by cloud-init: %s' % line
+ line = "# suite disabled by cloud-init: %s" % line
newsrc += line
retsrc = newsrc
@@ -694,36 +733,38 @@ def disable_suites(disabled, src, release):
def add_mirror_keys(cfg, target):
"""Adds any keys included in the primary/security mirror clauses"""
- for key in ('primary', 'security'):
+ for key in ("primary", "security"):
for mirror in cfg.get(key, []):
add_apt_key(mirror, target, file_name=key)
def generate_sources_list(cfg, release, mirrors, cloud):
"""generate_sources_list
- create a source.list file based on a custom or default template
- by replacing mirrors and release in the template"""
+ create a source.list file based on a custom or default template
+ by replacing mirrors and release in the template"""
aptsrc = "/etc/apt/sources.list"
- params = {'RELEASE': release, 'codename': release}
+ params = {"RELEASE": release, "codename": release}
for k in mirrors:
params[k] = mirrors[k]
params[k.lower()] = mirrors[k]
- tmpl = cfg.get('sources_list', None)
+ tmpl = cfg.get("sources_list", None)
if tmpl is None:
LOG.info("No custom template provided, fall back to builtin")
- template_fn = cloud.get_template_filename('sources.list.%s' %
- (cloud.distro.name))
+ template_fn = cloud.get_template_filename(
+ "sources.list.%s" % (cloud.distro.name)
+ )
if not template_fn:
- template_fn = cloud.get_template_filename('sources.list')
+ template_fn = cloud.get_template_filename("sources.list")
if not template_fn:
- LOG.warning("No template found, "
- "not rendering /etc/apt/sources.list")
+ LOG.warning(
+ "No template found, not rendering /etc/apt/sources.list"
+ )
return
tmpl = util.load_file(template_fn)
rendered = templater.render_string(tmpl, params)
- disabled = disable_suites(cfg.get('disable_suites'), rendered, release)
+ disabled = disable_suites(cfg.get("disable_suites"), rendered, release)
util.write_file(aptsrc, disabled, mode=0o644)
@@ -735,7 +776,7 @@ def add_apt_key_raw(key, file_name, hardened=False, target=None):
LOG.debug("Adding key:\n'%s'", key)
try:
name = pathlib.Path(file_name).stem
- return apt_key('add', output_file=name, data=key, hardened=hardened)
+ return apt_key("add", output_file=name, data=key, hardened=hardened)
except subp.ProcessExecutionError:
LOG.exception("failed to add apt GPG Key to apt keyring")
raise
@@ -747,26 +788,26 @@ def add_apt_key(ent, target=None, hardened=False, file_name=None):
Supports raw keys or keyid's
The latter will as a first step fetched to get the raw key
"""
- if 'keyid' in ent and 'key' not in ent:
+ if "keyid" in ent and "key" not in ent:
keyserver = DEFAULT_KEYSERVER
- if 'keyserver' in ent:
- keyserver = ent['keyserver']
+ if "keyserver" in ent:
+ keyserver = ent["keyserver"]
- ent['key'] = gpg.getkeybyid(ent['keyid'], keyserver)
+ ent["key"] = gpg.getkeybyid(ent["keyid"], keyserver)
- if 'key' in ent:
+ if "key" in ent:
return add_apt_key_raw(
- ent['key'],
- file_name or ent['filename'],
- hardened=hardened)
+ ent["key"], file_name or ent["filename"], hardened=hardened
+ )
def update_packages(cloud):
cloud.distro.update_package_sources()
-def add_apt_sources(srcdict, cloud, target=None, template_params=None,
- aa_repo_match=None):
+def add_apt_sources(
+ srcdict, cloud, target=None, template_params=None, aa_repo_match=None
+):
"""
install keys and repo source .list files defined in 'sources'
@@ -795,33 +836,34 @@ def add_apt_sources(srcdict, cloud, target=None, template_params=None,
template_params = {}
if aa_repo_match is None:
- raise ValueError('did not get a valid repo matcher')
+ raise ValueError("did not get a valid repo matcher")
if not isinstance(srcdict, dict):
- raise TypeError('unknown apt format: %s' % (srcdict))
+ raise TypeError("unknown apt format: %s" % (srcdict))
for filename in srcdict:
ent = srcdict[filename]
LOG.debug("adding source/key '%s'", ent)
- if 'filename' not in ent:
- ent['filename'] = filename
+ if "filename" not in ent:
+ ent["filename"] = filename
- if 'source' in ent and '$KEY_FILE' in ent['source']:
+ if "source" in ent and "$KEY_FILE" in ent["source"]:
key_file = add_apt_key(ent, target, hardened=True)
- template_params['KEY_FILE'] = key_file
+ template_params["KEY_FILE"] = key_file
else:
key_file = add_apt_key(ent, target)
- if 'source' not in ent:
+ if "source" not in ent:
continue
- source = ent['source']
+ source = ent["source"]
source = templater.render_string(source, template_params)
- if not ent['filename'].startswith("/"):
- ent['filename'] = os.path.join("/etc/apt/sources.list.d/",
- ent['filename'])
- if not ent['filename'].endswith(".list"):
- ent['filename'] += ".list"
+ if not ent["filename"].startswith("/"):
+ ent["filename"] = os.path.join(
+ "/etc/apt/sources.list.d/", ent["filename"]
+ )
+ if not ent["filename"].endswith(".list"):
+ ent["filename"] += ".list"
if aa_repo_match(source):
try:
@@ -831,7 +873,7 @@ def add_apt_sources(srcdict, cloud, target=None, template_params=None,
raise
continue
- sourcefn = subp.target_path(target, ent['filename'])
+ sourcefn = subp.target_path(target, ent["filename"])
try:
contents = "%s\n" % (source)
util.write_file(sourcefn, contents, omode="a")
@@ -850,14 +892,14 @@ def convert_v1_to_v2_apt_format(srclist):
if isinstance(srclist, list):
LOG.debug("apt config: convert V1 to V2 format (source list to dict)")
for srcent in srclist:
- if 'filename' not in srcent:
+ if "filename" not in srcent:
# file collides for multiple !filename cases for compatibility
# yet we need them all processed, so not same dictionary key
- srcent['filename'] = "cloud_config_sources.list"
+ srcent["filename"] = "cloud_config_sources.list"
key = util.rand_dict_key(srcdict, "cloud_config_sources.list")
else:
# all with filename use that as key (matching new format)
- key = srcent['filename']
+ key = srcent["filename"]
srcdict[key] = srcent
elif isinstance(srclist, dict):
srcdict = srclist
@@ -869,7 +911,7 @@ def convert_v1_to_v2_apt_format(srclist):
def convert_key(oldcfg, aptcfg, oldkey, newkey):
"""convert an old key to the new one if the old one exists
- returns true if a key was found and converted"""
+ returns true if a key was found and converted"""
if oldcfg.get(oldkey, None) is not None:
aptcfg[newkey] = oldcfg.get(oldkey)
del oldcfg[oldkey]
@@ -879,33 +921,37 @@ def convert_key(oldcfg, aptcfg, oldkey, newkey):
def convert_mirror(oldcfg, aptcfg):
"""convert old apt_mirror keys into the new more advanced mirror spec"""
- keymap = [('apt_mirror', 'uri'),
- ('apt_mirror_search', 'search'),
- ('apt_mirror_search_dns', 'search_dns')]
+ keymap = [
+ ("apt_mirror", "uri"),
+ ("apt_mirror_search", "search"),
+ ("apt_mirror_search_dns", "search_dns"),
+ ]
converted = False
- newmcfg = {'arches': ['default']}
+ newmcfg = {"arches": ["default"]}
for oldkey, newkey in keymap:
if convert_key(oldcfg, newmcfg, oldkey, newkey):
converted = True
# only insert new style config if anything was converted
if converted:
- aptcfg['primary'] = [newmcfg]
+ aptcfg["primary"] = [newmcfg]
def convert_v2_to_v3_apt_format(oldcfg):
"""convert old to new keys and adapt restructured mirror spec"""
- mapoldkeys = {'apt_sources': 'sources',
- 'apt_mirror': None,
- 'apt_mirror_search': None,
- 'apt_mirror_search_dns': None,
- 'apt_proxy': 'proxy',
- 'apt_http_proxy': 'http_proxy',
- 'apt_ftp_proxy': 'https_proxy',
- 'apt_https_proxy': 'ftp_proxy',
- 'apt_preserve_sources_list': 'preserve_sources_list',
- 'apt_custom_sources_list': 'sources_list',
- 'add_apt_repo_match': 'add_apt_repo_match'}
+ mapoldkeys = {
+ "apt_sources": "sources",
+ "apt_mirror": None,
+ "apt_mirror_search": None,
+ "apt_mirror_search_dns": None,
+ "apt_proxy": "proxy",
+ "apt_http_proxy": "http_proxy",
+ "apt_ftp_proxy": "https_proxy",
+ "apt_https_proxy": "ftp_proxy",
+ "apt_preserve_sources_list": "preserve_sources_list",
+ "apt_custom_sources_list": "sources_list",
+ "add_apt_repo_match": "add_apt_repo_match",
+ }
needtoconvert = []
for oldkey in mapoldkeys:
if oldkey in oldcfg:
@@ -917,11 +963,13 @@ def convert_v2_to_v3_apt_format(oldcfg):
# no old config, so no new one to be created
if not needtoconvert:
return oldcfg
- LOG.debug("apt config: convert V2 to V3 format for keys '%s'",
- ", ".join(needtoconvert))
+ LOG.debug(
+ "apt config: convert V2 to V3 format for keys '%s'",
+ ", ".join(needtoconvert),
+ )
# if old AND new config are provided, prefer the new one (LP #1616831)
- newaptcfg = oldcfg.get('apt', None)
+ newaptcfg = oldcfg.get("apt", None)
if newaptcfg is not None:
LOG.debug("apt config: V1/2 and V3 format specified, preferring V3")
for oldkey in needtoconvert:
@@ -932,10 +980,11 @@ def convert_v2_to_v3_apt_format(oldcfg):
# no simple mapping or no collision on this particular key
continue
if verify != newaptcfg[newkey]:
- raise ValueError("Old and New apt format defined with unequal "
- "values %s vs %s @ %s" % (verify,
- newaptcfg[newkey],
- oldkey))
+ raise ValueError(
+ "Old and New apt format defined with unequal "
+ "values %s vs %s @ %s"
+ % (verify, newaptcfg[newkey], oldkey)
+ )
# return conf after clearing conflicting V1/2 keys
return oldcfg
@@ -955,17 +1004,17 @@ def convert_v2_to_v3_apt_format(oldcfg):
raise ValueError("old apt key '%s' left after conversion" % oldkey)
# insert new format into config and return full cfg with only v3 content
- oldcfg['apt'] = aptcfg
+ oldcfg["apt"] = aptcfg
return oldcfg
def convert_to_v3_apt_format(cfg):
"""convert the old list based format to the new dict based one. After that
- convert the old dict keys/format to v3 a.k.a 'new apt config'"""
+ convert the old dict keys/format to v3 a.k.a 'new apt config'"""
# V1 -> V2, the apt_sources entry from list to dict
- apt_sources = cfg.get('apt_sources', None)
+ apt_sources = cfg.get("apt_sources", None)
if apt_sources is not None:
- cfg['apt_sources'] = convert_v1_to_v2_apt_format(apt_sources)
+ cfg["apt_sources"] = convert_v1_to_v2_apt_format(apt_sources)
# V2 -> V3, move all former globals under the "apt" key
# Restructure into new key names and mirror hierarchy
@@ -997,7 +1046,12 @@ def search_for_mirror_dns(configured, mirrortype, cfg, cloud):
if mydom:
doms.append(".%s" % mydom)
- doms.extend((".localdomain", "",))
+ doms.extend(
+ (
+ ".localdomain",
+ "",
+ )
+ )
mirror_list = []
distro = cloud.distro.name
@@ -1012,12 +1066,11 @@ def search_for_mirror_dns(configured, mirrortype, cfg, cloud):
def update_mirror_info(pmirror, smirror, arch, cloud):
"""sets security mirror to primary if not defined.
- returns defaults if no mirrors are defined"""
+ returns defaults if no mirrors are defined"""
if pmirror is not None:
if smirror is None:
smirror = pmirror
- return {'PRIMARY': pmirror,
- 'SECURITY': smirror}
+ return {"PRIMARY": pmirror, "SECURITY": smirror}
# None specified at all, get default mirrors from cloud
mirror_info = cloud.datasource.get_package_mirror_info()
@@ -1026,8 +1079,8 @@ def update_mirror_info(pmirror, smirror, arch, cloud):
# arbitrary key/value pairs including 'primary' and 'security' keys.
# caller expects dict with PRIMARY and SECURITY.
m = mirror_info.copy()
- m['PRIMARY'] = m['primary']
- m['SECURITY'] = m['security']
+ m["PRIMARY"] = m["primary"]
+ m["SECURITY"] = m["security"]
return m
@@ -1037,7 +1090,7 @@ def update_mirror_info(pmirror, smirror, arch, cloud):
def get_arch_mirrorconfig(cfg, mirrortype, arch):
"""out of a list of potential mirror configurations select
- and return the one matching the architecture (or default)"""
+ and return the one matching the architecture (or default)"""
# select the mirror specification (if-any)
mirror_cfg_list = cfg.get(mirrortype, None)
if mirror_cfg_list is None:
@@ -1056,8 +1109,8 @@ def get_arch_mirrorconfig(cfg, mirrortype, arch):
def get_mirror(cfg, mirrortype, arch, cloud):
"""pass the three potential stages of mirror specification
- returns None is neither of them found anything otherwise the first
- hit is returned"""
+ returns None is neither of them found anything otherwise the first
+ hit is returned"""
mcfg = get_arch_mirrorconfig(cfg, mirrortype, arch)
if mcfg is None:
return None
@@ -1073,18 +1126,19 @@ def get_mirror(cfg, mirrortype, arch, cloud):
# fallback to search_dns if specified
if mirror is None:
# list of mirrors to try to resolve
- mirror = search_for_mirror_dns(mcfg.get("search_dns", None),
- mirrortype, cfg, cloud)
+ mirror = search_for_mirror_dns(
+ mcfg.get("search_dns", None), mirrortype, cfg, cloud
+ )
return mirror
def find_apt_mirror_info(cfg, cloud, arch=None):
"""find_apt_mirror_info
- find an apt_mirror given the cfg provided.
- It can check for separate config of primary and security mirrors
- If only primary is given security is assumed to be equal to primary
- If the generic apt_mirror is given that is defining for both
+ find an apt_mirror given the cfg provided.
+ It can check for separate config of primary and security mirrors
+ If only primary is given security is assumed to be equal to primary
+ If the generic apt_mirror is given that is defining for both
"""
if arch is None:
@@ -1105,32 +1159,35 @@ def find_apt_mirror_info(cfg, cloud, arch=None):
def apply_apt_config(cfg, proxy_fname, config_fname):
"""apply_apt_config
- Applies any apt*proxy config from if specified
+ Applies any apt*proxy config from if specified
"""
# Set up any apt proxy
- cfgs = (('proxy', 'Acquire::http::Proxy "%s";'),
- ('http_proxy', 'Acquire::http::Proxy "%s";'),
- ('ftp_proxy', 'Acquire::ftp::Proxy "%s";'),
- ('https_proxy', 'Acquire::https::Proxy "%s";'))
+ cfgs = (
+ ("proxy", 'Acquire::http::Proxy "%s";'),
+ ("http_proxy", 'Acquire::http::Proxy "%s";'),
+ ("ftp_proxy", 'Acquire::ftp::Proxy "%s";'),
+ ("https_proxy", 'Acquire::https::Proxy "%s";'),
+ )
proxies = [fmt % cfg.get(name) for (name, fmt) in cfgs if cfg.get(name)]
if len(proxies):
LOG.debug("write apt proxy info to %s", proxy_fname)
- util.write_file(proxy_fname, '\n'.join(proxies) + '\n')
+ util.write_file(proxy_fname, "\n".join(proxies) + "\n")
elif os.path.isfile(proxy_fname):
util.del_file(proxy_fname)
LOG.debug("no apt proxy configured, removed %s", proxy_fname)
- if cfg.get('conf', None):
+ if cfg.get("conf", None):
LOG.debug("write apt config info to %s", config_fname)
- util.write_file(config_fname, cfg.get('conf'))
+ util.write_file(config_fname, cfg.get("conf"))
elif os.path.isfile(config_fname):
util.del_file(config_fname)
LOG.debug("no apt config configured, removed %s", config_fname)
-def apt_key(command, output_file=None, data=None, hardened=False,
- human_output=True):
+def apt_key(
+ command, output_file=None, data=None, hardened=False, human_output=True
+):
"""apt-key replacement
commands implemented: 'add', 'list', 'finger'
@@ -1153,32 +1210,36 @@ def apt_key(command, output_file=None, data=None, hardened=False,
key_files = [APT_LOCAL_KEYS] if os.path.isfile(APT_LOCAL_KEYS) else []
for file in os.listdir(APT_TRUSTED_GPG_DIR):
- if file.endswith('.gpg') or file.endswith('.asc'):
+ if file.endswith(".gpg") or file.endswith(".asc"):
key_files.append(APT_TRUSTED_GPG_DIR + file)
- return key_files if key_files else ''
+ return key_files if key_files else ""
def apt_key_add():
"""apt-key add <file>
returns filepath to new keyring, or '/dev/null' when an error occurs
"""
- file_name = '/dev/null'
+ file_name = "/dev/null"
if not output_file:
util.logexc(
- LOG, 'Unknown filename, failed to add key: "{}"'.format(data))
+ LOG, 'Unknown filename, failed to add key: "{}"'.format(data)
+ )
else:
try:
- key_dir = \
+ key_dir = (
CLOUD_INIT_GPG_DIR if hardened else APT_TRUSTED_GPG_DIR
+ )
stdout = gpg.dearmor(data)
- file_name = '{}{}.gpg'.format(key_dir, output_file)
+ file_name = "{}{}.gpg".format(key_dir, output_file)
util.write_file(file_name, stdout)
except subp.ProcessExecutionError:
- util.logexc(LOG, 'Gpg error, failed to add key: {}'.format(
- data))
+ util.logexc(
+ LOG, "Gpg error, failed to add key: {}".format(data)
+ )
except UnicodeDecodeError:
- util.logexc(LOG, 'Decode error, failed to add key: {}'.format(
- data))
+ util.logexc(
+ LOG, "Decode error, failed to add key: {}".format(data)
+ )
return file_name
def apt_key_list():
@@ -1193,19 +1254,20 @@ def apt_key(command, output_file=None, data=None, hardened=False,
key_list.append(gpg.list(key_file, human_output=human_output))
except subp.ProcessExecutionError as error:
LOG.warning('Failed to list key "%s": %s', key_file, error)
- return '\n'.join(key_list)
+ return "\n".join(key_list)
- if command == 'add':
+ if command == "add":
return apt_key_add()
- elif command == 'finger' or command == 'list':
+ elif command == "finger" or command == "list":
return apt_key_list()
else:
raise ValueError(
- 'apt_key() commands add, list, and finger are currently supported')
+ "apt_key() commands add, list, and finger are currently supported"
+ )
CONFIG_CLEANERS = {
- 'cloud-init': clean_cloud_init,
+ "cloud-init": clean_cloud_init,
}
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py
index aa186ce2..569849d1 100644
--- a/cloudinit/config/cc_apt_pipelining.py
+++ b/cloudinit/config/cc_apt_pipelining.py
@@ -29,17 +29,19 @@ not recommended.
apt_pipelining: <false/none/unchanged/os/number>
"""
-from cloudinit.settings import PER_INSTANCE
from cloudinit import util
+from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
-distros = ['ubuntu', 'debian']
+distros = ["ubuntu", "debian"]
DEFAULT_FILE = "/etc/apt/apt.conf.d/90cloud-init-pipelining"
-APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n"
- 'Acquire::http::Pipeline-Depth "%s";\n')
+APT_PIPE_TPL = (
+ "//Written by cloud-init per 'apt_pipelining'\n"
+ 'Acquire::http::Pipeline-Depth "%s";\n'
+)
# Acquire::http::Pipeline-Depth can be a value
# from 0 to 5 indicating how many outstanding requests APT should send.
@@ -49,7 +51,7 @@ APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n"
def handle(_name, cfg, _cloud, log, _args):
- apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", 'os')
+ apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", "os")
apt_pipe_value_s = str(apt_pipe_value).lower().strip()
if apt_pipe_value_s == "false":
@@ -69,4 +71,5 @@ def write_apt_snippet(setting, log, f_name):
util.write_file(f_name, file_contents)
log.debug("Wrote %s with apt pipeline depth setting %s", f_name, setting)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
index 06f7a26e..bff11a24 100644
--- a/cloudinit/config/cc_bootcmd.py
+++ b/cloudinit/config/cc_bootcmd.py
@@ -12,11 +12,9 @@
import os
from textwrap import dedent
+from cloudinit import subp, temp_utils, util
from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema
from cloudinit.settings import PER_ALWAYS
-from cloudinit import temp_utils
-from cloudinit import subp
-from cloudinit import util
frequency = PER_ALWAYS
@@ -26,13 +24,14 @@ frequency = PER_ALWAYS
# configuration options before actually attempting to deploy with said
# configuration.
-distros = ['all']
+distros = ["all"]
meta = {
- 'id': 'cc_bootcmd',
- 'name': 'Bootcmd',
- 'title': 'Run arbitrary commands early in the boot process',
- 'description': dedent("""\
+ "id": "cc_bootcmd",
+ "name": "Bootcmd",
+ "title": "Run arbitrary commands early in the boot process",
+ "description": dedent(
+ """\
This module runs arbitrary commands very early in the boot process,
only slightly after a boothook would run. This is very similar to a
boothook, but more user friendly. The environment variable
@@ -48,31 +47,37 @@ meta = {
when writing files, do not use /tmp dir as it races with
systemd-tmpfiles-clean LP: #1707222. Use /run/somedir instead.
- """),
- 'distros': distros,
- 'examples': [dedent("""\
+ """
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
bootcmd:
- echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts
- [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ]
- """)],
- 'frequency': PER_ALWAYS,
+ """
+ )
+ ],
+ "frequency": PER_ALWAYS,
}
schema = {
- 'type': 'object',
- 'properties': {
- 'bootcmd': {
- 'type': 'array',
- 'items': {
- 'oneOf': [
- {'type': 'array', 'items': {'type': 'string'}},
- {'type': 'string'}]
+ "type": "object",
+ "properties": {
+ "bootcmd": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "array", "items": {"type": "string"}},
+ {"type": "string"},
+ ]
},
- 'additionalItems': False, # Reject items of non-string non-list
- 'additionalProperties': False,
- 'minItems': 1,
+ "additionalItems": False, # Reject items of non-string non-list
+ "additionalProperties": False,
+ "minItems": 1,
}
- }
+ },
}
__doc__ = get_meta_doc(meta, schema) # Supplement python help()
@@ -81,8 +86,9 @@ __doc__ = get_meta_doc(meta, schema) # Supplement python help()
def handle(name, cfg, cloud, log, _args):
if "bootcmd" not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'bootcmd' key in configuration"), name)
+ log.debug(
+ "Skipping module named %s, no 'bootcmd' key in configuration", name
+ )
return
validate_cloudconfig_schema(cfg, schema)
@@ -99,11 +105,12 @@ def handle(name, cfg, cloud, log, _args):
env = os.environ.copy()
iid = cloud.get_instance_id()
if iid:
- env['INSTANCE_ID'] = str(iid)
- cmd = ['/bin/sh', tmpf.name]
+ env["INSTANCE_ID"] = str(iid)
+ cmd = ["/bin/sh", tmpf.name]
subp.subp(cmd, env=env, capture=False)
except Exception:
util.logexc(log, "Failed to run bootcmd module %s", name)
raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py
index 9fdaeba1..53b6d0c8 100755
--- a/cloudinit/config/cc_byobu.py
+++ b/cloudinit/config/cc_byobu.py
@@ -38,11 +38,10 @@ Valid configuration options for this module are:
byobu_by_default: <user/system>
"""
+from cloudinit import subp, util
from cloudinit.distros import ug_util
-from cloudinit import subp
-from cloudinit import util
-distros = ['ubuntu', 'debian']
+distros = ["ubuntu", "debian"]
def handle(name, cfg, cloud, log, args):
@@ -58,8 +57,14 @@ def handle(name, cfg, cloud, log, args):
if value == "user" or value == "system":
value = "enable-%s" % value
- valid = ("enable-user", "enable-system", "enable",
- "disable-user", "disable-system", "disable")
+ valid = (
+ "enable-user",
+ "enable-system",
+ "enable",
+ "disable-user",
+ "disable-system",
+ "disable",
+ )
if value not in valid:
log.warning("Unknown value %s for byobu_by_default", value)
@@ -81,13 +86,16 @@ def handle(name, cfg, cloud, log, args):
(users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
(user, _user_config) = ug_util.extract_default(users)
if not user:
- log.warning(("No default byobu user provided, "
- "can not launch %s for the default user"), bl_inst)
+ log.warning(
+ "No default byobu user provided, "
+ "can not launch %s for the default user",
+ bl_inst,
+ )
else:
- shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst)
+ shcmd += ' sudo -Hu "%s" byobu-launcher-%s' % (user, bl_inst)
shcmd += " || X=$(($X+1)); "
if mod_sys:
- shcmd += "echo \"%s\" | debconf-set-selections" % dc_val
+ shcmd += 'echo "%s" | debconf-set-selections' % dc_val
shcmd += " && dpkg-reconfigure byobu --frontend=noninteractive"
shcmd += " || X=$(($X+1)); "
@@ -96,4 +104,5 @@ def handle(name, cfg, cloud, log, args):
log.debug("Setting byobu to %s", value)
subp.subp(cmd, capture=False)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py
index bd7bead9..9de065ab 100644
--- a/cloudinit/config/cc_ca_certs.py
+++ b/cloudinit/config/cc_ca_certs.py
@@ -41,28 +41,27 @@ can be removed from the system with the configuration option
import os
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
DEFAULT_CONFIG = {
- 'ca_cert_path': '/usr/share/ca-certificates/',
- 'ca_cert_filename': 'cloud-init-ca-certs.crt',
- 'ca_cert_config': '/etc/ca-certificates.conf',
- 'ca_cert_system_path': '/etc/ssl/certs/',
- 'ca_cert_update_cmd': ['update-ca-certificates']
+ "ca_cert_path": "/usr/share/ca-certificates/",
+ "ca_cert_filename": "cloud-init-ca-certs.crt",
+ "ca_cert_config": "/etc/ca-certificates.conf",
+ "ca_cert_system_path": "/etc/ssl/certs/",
+ "ca_cert_update_cmd": ["update-ca-certificates"],
}
DISTRO_OVERRIDES = {
- 'rhel': {
- 'ca_cert_path': '/usr/share/pki/ca-trust-source/',
- 'ca_cert_filename': 'anchors/cloud-init-ca-certs.crt',
- 'ca_cert_config': None,
- 'ca_cert_system_path': '/etc/pki/ca-trust/',
- 'ca_cert_update_cmd': ['update-ca-trust']
+ "rhel": {
+ "ca_cert_path": "/usr/share/pki/ca-trust-source/",
+ "ca_cert_filename": "anchors/cloud-init-ca-certs.crt",
+ "ca_cert_config": None,
+ "ca_cert_system_path": "/etc/pki/ca-trust/",
+ "ca_cert_update_cmd": ["update-ca-trust"],
}
}
-distros = ['alpine', 'debian', 'ubuntu', 'rhel']
+distros = ["alpine", "debian", "ubuntu", "rhel"]
def _distro_ca_certs_configs(distro_name):
@@ -72,8 +71,9 @@ def _distro_ca_certs_configs(distro_name):
@returns: Dict of distro configurations for ca-cert.
"""
cfg = DISTRO_OVERRIDES.get(distro_name, DEFAULT_CONFIG)
- cfg['ca_cert_full_path'] = os.path.join(cfg['ca_cert_path'],
- cfg['ca_cert_filename'])
+ cfg["ca_cert_full_path"] = os.path.join(
+ cfg["ca_cert_path"], cfg["ca_cert_filename"]
+ )
return cfg
@@ -83,7 +83,7 @@ def update_ca_certs(distro_cfg):
@param distro_cfg: A hash providing _distro_ca_certs_configs function.
"""
- subp.subp(distro_cfg['ca_cert_update_cmd'], capture=False)
+ subp.subp(distro_cfg["ca_cert_update_cmd"], capture=False)
def add_ca_certs(distro_cfg, certs):
@@ -98,9 +98,9 @@ def add_ca_certs(distro_cfg, certs):
return
# First ensure they are strings...
cert_file_contents = "\n".join([str(c) for c in certs])
- util.write_file(distro_cfg['ca_cert_full_path'],
- cert_file_contents,
- mode=0o644)
+ util.write_file(
+ distro_cfg["ca_cert_full_path"], cert_file_contents, mode=0o644
+ )
update_cert_config(distro_cfg)
@@ -110,23 +110,27 @@ def update_cert_config(distro_cfg):
@param distro_cfg: A hash providing _distro_ca_certs_configs function.
"""
- if distro_cfg['ca_cert_config'] is None:
+ if distro_cfg["ca_cert_config"] is None:
return
- if os.stat(distro_cfg['ca_cert_config']).st_size == 0:
+ if os.stat(distro_cfg["ca_cert_config"]).st_size == 0:
# If the CA_CERT_CONFIG file is empty (i.e. all existing
# CA certs have been deleted) then simply output a single
# line with the cloud-init cert filename.
- out = "%s\n" % distro_cfg['ca_cert_filename']
+ out = "%s\n" % distro_cfg["ca_cert_filename"]
else:
# Append cert filename to CA_CERT_CONFIG file.
# We have to strip the content because blank lines in the file
# causes subsequent entries to be ignored. (LP: #1077020)
- orig = util.load_file(distro_cfg['ca_cert_config'])
- cr_cont = '\n'.join([line for line in orig.splitlines()
- if line != distro_cfg['ca_cert_filename']])
- out = "%s\n%s\n" % (cr_cont.rstrip(),
- distro_cfg['ca_cert_filename'])
- util.write_file(distro_cfg['ca_cert_config'], out, omode="wb")
+ orig = util.load_file(distro_cfg["ca_cert_config"])
+ cr_cont = "\n".join(
+ [
+ line
+ for line in orig.splitlines()
+ if line != distro_cfg["ca_cert_filename"]
+ ]
+ )
+ out = "%s\n%s\n" % (cr_cont.rstrip(), distro_cfg["ca_cert_filename"])
+ util.write_file(distro_cfg["ca_cert_config"], out, omode="wb")
def remove_default_ca_certs(distro_name, distro_cfg):
@@ -137,14 +141,15 @@ def remove_default_ca_certs(distro_name, distro_cfg):
@param distro_name: String providing the distro class name.
@param distro_cfg: A hash providing _distro_ca_certs_configs function.
"""
- util.delete_dir_contents(distro_cfg['ca_cert_path'])
- util.delete_dir_contents(distro_cfg['ca_cert_system_path'])
- util.write_file(distro_cfg['ca_cert_config'], "", mode=0o644)
+ util.delete_dir_contents(distro_cfg["ca_cert_path"])
+ util.delete_dir_contents(distro_cfg["ca_cert_system_path"])
+ util.write_file(distro_cfg["ca_cert_config"], "", mode=0o644)
- if distro_name in ['debian', 'ubuntu']:
+ if distro_name in ["debian", "ubuntu"]:
debconf_sel = (
- "ca-certificates ca-certificates/trust_new_crts " + "select no")
- subp.subp(('debconf-set-selections', '-'), debconf_sel)
+ "ca-certificates ca-certificates/trust_new_crts " + "select no"
+ )
+ subp.subp(("debconf-set-selections", "-"), debconf_sel)
def handle(name, cfg, cloud, log, _args):
@@ -159,11 +164,13 @@ def handle(name, cfg, cloud, log, _args):
"""
# If there isn't a ca-certs section in the configuration don't do anything
if "ca-certs" not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'ca-certs' key in configuration"), name)
+ log.debug(
+ "Skipping module named %s, no 'ca-certs' key in configuration",
+ name,
+ )
return
- ca_cert_cfg = cfg['ca-certs']
+ ca_cert_cfg = cfg["ca-certs"]
distro_cfg = _distro_ca_certs_configs(cloud.distro.name)
# If there is a remove-defaults option set to true, remove the system
@@ -183,4 +190,5 @@ def handle(name, cfg, cloud, log, _args):
log.debug("Updating certificates")
update_ca_certs(distro_cfg)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index ed734d1c..67889683 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -13,87 +13,91 @@ import json
import os
from textwrap import dedent
-from cloudinit import subp
+from cloudinit import subp, temp_utils, templater, url_helper, util
from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema
-from cloudinit import templater
-from cloudinit import temp_utils
-from cloudinit import url_helper
-from cloudinit import util
from cloudinit.settings import PER_ALWAYS
-
RUBY_VERSION_DEFAULT = "1.8"
-CHEF_DIRS = tuple([
- '/etc/chef',
- '/var/log/chef',
- '/var/lib/chef',
- '/var/cache/chef',
- '/var/backups/chef',
- '/var/run/chef',
-])
-REQUIRED_CHEF_DIRS = tuple([
- '/etc/chef',
-])
+CHEF_DIRS = tuple(
+ [
+ "/etc/chef",
+ "/var/log/chef",
+ "/var/lib/chef",
+ "/var/cache/chef",
+ "/var/backups/chef",
+ "/var/run/chef",
+ ]
+)
+REQUIRED_CHEF_DIRS = tuple(
+ [
+ "/etc/chef",
+ ]
+)
# Used if fetching chef from a omnibus style package
OMNIBUS_URL = "https://www.chef.io/chef/install.sh"
OMNIBUS_URL_RETRIES = 5
-CHEF_VALIDATION_PEM_PATH = '/etc/chef/validation.pem'
-CHEF_ENCRYPTED_DATA_BAG_PATH = '/etc/chef/encrypted_data_bag_secret'
-CHEF_ENVIRONMENT = '_default'
-CHEF_FB_PATH = '/etc/chef/firstboot.json'
+CHEF_VALIDATION_PEM_PATH = "/etc/chef/validation.pem"
+CHEF_ENCRYPTED_DATA_BAG_PATH = "/etc/chef/encrypted_data_bag_secret"
+CHEF_ENVIRONMENT = "_default"
+CHEF_FB_PATH = "/etc/chef/firstboot.json"
CHEF_RB_TPL_DEFAULTS = {
# These are ruby symbols...
- 'ssl_verify_mode': ':verify_none',
- 'log_level': ':info',
+ "ssl_verify_mode": ":verify_none",
+ "log_level": ":info",
# These are not symbols...
- 'log_location': '/var/log/chef/client.log',
- 'validation_key': CHEF_VALIDATION_PEM_PATH,
- 'validation_cert': None,
- 'client_key': '/etc/chef/client.pem',
- 'json_attribs': CHEF_FB_PATH,
- 'file_cache_path': '/var/cache/chef',
- 'file_backup_path': '/var/backups/chef',
- 'pid_file': '/var/run/chef/client.pid',
- 'show_time': True,
- 'encrypted_data_bag_secret': None,
+ "log_location": "/var/log/chef/client.log",
+ "validation_key": CHEF_VALIDATION_PEM_PATH,
+ "validation_cert": None,
+ "client_key": "/etc/chef/client.pem",
+ "json_attribs": CHEF_FB_PATH,
+ "file_cache_path": "/var/cache/chef",
+ "file_backup_path": "/var/backups/chef",
+ "pid_file": "/var/run/chef/client.pid",
+ "show_time": True,
+ "encrypted_data_bag_secret": None,
}
-CHEF_RB_TPL_BOOL_KEYS = frozenset(['show_time'])
-CHEF_RB_TPL_PATH_KEYS = frozenset([
- 'log_location',
- 'validation_key',
- 'client_key',
- 'file_cache_path',
- 'json_attribs',
- 'pid_file',
- 'encrypted_data_bag_secret',
-])
+CHEF_RB_TPL_BOOL_KEYS = frozenset(["show_time"])
+CHEF_RB_TPL_PATH_KEYS = frozenset(
+ [
+ "log_location",
+ "validation_key",
+ "client_key",
+ "file_cache_path",
+ "json_attribs",
+ "pid_file",
+ "encrypted_data_bag_secret",
+ ]
+)
CHEF_RB_TPL_KEYS = list(CHEF_RB_TPL_DEFAULTS.keys())
CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_BOOL_KEYS)
CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_PATH_KEYS)
-CHEF_RB_TPL_KEYS.extend([
- 'server_url',
- 'node_name',
- 'environment',
- 'validation_name',
- 'chef_license',
-])
+CHEF_RB_TPL_KEYS.extend(
+ [
+ "server_url",
+ "node_name",
+ "environment",
+ "validation_name",
+ "chef_license",
+ ]
+)
CHEF_RB_TPL_KEYS = frozenset(CHEF_RB_TPL_KEYS)
-CHEF_RB_PATH = '/etc/chef/client.rb'
-CHEF_EXEC_PATH = '/usr/bin/chef-client'
-CHEF_EXEC_DEF_ARGS = tuple(['-d', '-i', '1800', '-s', '20'])
+CHEF_RB_PATH = "/etc/chef/client.rb"
+CHEF_EXEC_PATH = "/usr/bin/chef-client"
+CHEF_EXEC_DEF_ARGS = tuple(["-d", "-i", "1800", "-s", "20"])
frequency = PER_ALWAYS
distros = ["all"]
meta = {
- 'id': 'cc_chef',
- 'name': 'Chef',
- 'title': 'module that configures, starts and installs chef',
- 'description': dedent("""\
+ "id": "cc_chef",
+ "name": "Chef",
+ "title": "module that configures, starts and installs chef",
+ "description": dedent(
+ """\
This module enables chef to be installed (from packages,
gems, or from omnibus). Before this occurs, chef configuration is
written to disk (validation.pem, client.pem, firstboot.json,
@@ -101,9 +105,12 @@ meta = {
/var/log/chef and so-on). If configured, chef will be
installed and started in either daemon or non-daemon mode.
If run in non-daemon mode, post run actions are executed to do
- finishing activities such as removing validation.pem."""),
- 'distros': distros,
- 'examples': [dedent("""
+ finishing activities such as removing validation.pem."""
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """
chef:
directories:
- /etc/chef
@@ -124,180 +131,237 @@ meta = {
omnibus_url_retries: 2
server_url: https://chef.yourorg.com:4000
ssl_verify_mode: :verify_peer
- validation_name: yourorg-validator""")],
- 'frequency': frequency,
+ validation_name: yourorg-validator"""
+ )
+ ],
+ "frequency": frequency,
}
schema = {
- 'type': 'object',
- 'properties': {
- 'chef': {
- 'type': 'object',
- 'additionalProperties': False,
- 'properties': {
- 'directories': {
- 'type': 'array',
- 'items': {
- 'type': 'string'
- },
- 'uniqueItems': True,
- 'description': dedent("""\
+ "type": "object",
+ "properties": {
+ "chef": {
+ "type": "object",
+ "additionalProperties": False,
+ "properties": {
+ "directories": {
+ "type": "array",
+ "items": {"type": "string"},
+ "uniqueItems": True,
+ "description": dedent(
+ """\
Create the necessary directories for chef to run. By
default, it creates the following directories:
- {chef_dirs}""").format(
+ {chef_dirs}"""
+ ).format(
chef_dirs="\n".join(
[" - ``{}``".format(d) for d in CHEF_DIRS]
)
- )
+ ),
},
- 'validation_cert': {
- 'type': 'string',
- 'description': dedent("""\
+ "validation_cert": {
+ "type": "string",
+ "description": dedent(
+ """\
Optional string to be written to file validation_key.
Special value ``system`` means set use existing file.
- """)
+ """
+ ),
},
- 'validation_key': {
- 'type': 'string',
- 'default': CHEF_VALIDATION_PEM_PATH,
- 'description': dedent("""\
+ "validation_key": {
+ "type": "string",
+ "default": CHEF_VALIDATION_PEM_PATH,
+ "description": dedent(
+ """\
Optional path for validation_cert. default to
- ``{}``.""".format(CHEF_VALIDATION_PEM_PATH))
+ ``{}``.""".format(
+ CHEF_VALIDATION_PEM_PATH
+ )
+ ),
},
- 'firstboot_path': {
- 'type': 'string',
- 'default': CHEF_FB_PATH,
- 'description': dedent("""\
+ "firstboot_path": {
+ "type": "string",
+ "default": CHEF_FB_PATH,
+ "description": dedent(
+ """\
Path to write run_list and initial_attributes keys that
should also be present in this configuration, defaults
- to ``{}``.""".format(CHEF_FB_PATH))
+ to ``{}``.""".format(
+ CHEF_FB_PATH
+ )
+ ),
},
- 'exec': {
- 'type': 'boolean',
- 'default': False,
- 'description': dedent("""\
+ "exec": {
+ "type": "boolean",
+ "default": False,
+ "description": dedent(
+ """\
define if we should run or not run chef (defaults to
false, unless a gem installed is requested where this
- will then default to true).""")
+ will then default to true)."""
+ ),
},
- 'client_key': {
- 'type': 'string',
- 'default': CHEF_RB_TPL_DEFAULTS['client_key'],
- 'description': dedent("""\
+ "client_key": {
+ "type": "string",
+ "default": CHEF_RB_TPL_DEFAULTS["client_key"],
+ "description": dedent(
+ """\
Optional path for client_cert. default to
- ``{}``.""".format(CHEF_RB_TPL_DEFAULTS['client_key']))
+ ``{}``.""".format(
+ CHEF_RB_TPL_DEFAULTS["client_key"]
+ )
+ ),
},
- 'encrypted_data_bag_secret': {
- 'type': 'string',
- 'default': None,
- 'description': dedent("""\
+ "encrypted_data_bag_secret": {
+ "type": "string",
+ "default": None,
+ "description": dedent(
+ """\
Specifies the location of the secret key used by chef
to encrypt data items. By default, this path is set
to None, meaning that chef will have to look at the
path ``{}`` for it.
- """.format(CHEF_ENCRYPTED_DATA_BAG_PATH))
+ """.format(
+ CHEF_ENCRYPTED_DATA_BAG_PATH
+ )
+ ),
},
- 'environment': {
- 'type': 'string',
- 'default': CHEF_ENVIRONMENT,
- 'description': dedent("""\
+ "environment": {
+ "type": "string",
+ "default": CHEF_ENVIRONMENT,
+ "description": dedent(
+ """\
Specifies which environment chef will use. By default,
it will use the ``{}`` configuration.
- """.format(CHEF_ENVIRONMENT))
+ """.format(
+ CHEF_ENVIRONMENT
+ )
+ ),
},
- 'file_backup_path': {
- 'type': 'string',
- 'default': CHEF_RB_TPL_DEFAULTS['file_backup_path'],
- 'description': dedent("""\
+ "file_backup_path": {
+ "type": "string",
+ "default": CHEF_RB_TPL_DEFAULTS["file_backup_path"],
+ "description": dedent(
+ """\
Specifies the location in which backup files are
stored. By default, it uses the
``{}`` location.""".format(
- CHEF_RB_TPL_DEFAULTS['file_backup_path']))
+ CHEF_RB_TPL_DEFAULTS["file_backup_path"]
+ )
+ ),
},
- 'file_cache_path': {
- 'type': 'string',
- 'default': CHEF_RB_TPL_DEFAULTS['file_cache_path'],
- 'description': dedent("""\
+ "file_cache_path": {
+ "type": "string",
+ "default": CHEF_RB_TPL_DEFAULTS["file_cache_path"],
+ "description": dedent(
+ """\
Specifies the location in which chef cache files will
be saved. By default, it uses the ``{}``
location.""".format(
- CHEF_RB_TPL_DEFAULTS['file_cache_path']))
+ CHEF_RB_TPL_DEFAULTS["file_cache_path"]
+ )
+ ),
},
- 'json_attribs': {
- 'type': 'string',
- 'default': CHEF_FB_PATH,
- 'description': dedent("""\
+ "json_attribs": {
+ "type": "string",
+ "default": CHEF_FB_PATH,
+ "description": dedent(
+ """\
Specifies the location in which some chef json data is
stored. By default, it uses the
- ``{}`` location.""".format(CHEF_FB_PATH))
+ ``{}`` location.""".format(
+ CHEF_FB_PATH
+ )
+ ),
},
- 'log_level': {
- 'type': 'string',
- 'default': CHEF_RB_TPL_DEFAULTS['log_level'],
- 'description': dedent("""\
+ "log_level": {
+ "type": "string",
+ "default": CHEF_RB_TPL_DEFAULTS["log_level"],
+ "description": dedent(
+ """\
Defines the level of logging to be stored in the log
file. By default this value is set to ``{}``.
- """.format(CHEF_RB_TPL_DEFAULTS['log_level']))
+ """.format(
+ CHEF_RB_TPL_DEFAULTS["log_level"]
+ )
+ ),
},
- 'log_location': {
- 'type': 'string',
- 'default': CHEF_RB_TPL_DEFAULTS['log_location'],
- 'description': dedent("""\
+ "log_location": {
+ "type": "string",
+ "default": CHEF_RB_TPL_DEFAULTS["log_location"],
+ "description": dedent(
+ """\
Specifies the location of the chef lof file. By
default, the location is specified at
``{}``.""".format(
- CHEF_RB_TPL_DEFAULTS['log_location']))
+ CHEF_RB_TPL_DEFAULTS["log_location"]
+ )
+ ),
},
- 'node_name': {
- 'type': 'string',
- 'description': dedent("""\
+ "node_name": {
+ "type": "string",
+ "description": dedent(
+ """\
The name of the node to run. By default, we will
- use th instance id as the node name.""")
+ use th instance id as the node name."""
+ ),
},
- 'omnibus_url': {
- 'type': 'string',
- 'default': OMNIBUS_URL,
- 'description': dedent("""\
+ "omnibus_url": {
+ "type": "string",
+ "default": OMNIBUS_URL,
+ "description": dedent(
+ """\
Omnibus URL if chef should be installed through
Omnibus. By default, it uses the
- ``{}``.""".format(OMNIBUS_URL))
+ ``{}``.""".format(
+ OMNIBUS_URL
+ )
+ ),
},
- 'omnibus_url_retries': {
- 'type': 'integer',
- 'default': OMNIBUS_URL_RETRIES,
- 'description': dedent("""\
+ "omnibus_url_retries": {
+ "type": "integer",
+ "default": OMNIBUS_URL_RETRIES,
+ "description": dedent(
+ """\
The number of retries that will be attempted to reach
- the Omnibus URL""")
+ the Omnibus URL"""
+ ),
},
- 'omnibus_version': {
- 'type': 'string',
- 'description': dedent("""\
+ "omnibus_version": {
+ "type": "string",
+ "description": dedent(
+ """\
Optional version string to require for omnibus
- install.""")
+ install."""
+ ),
},
- 'pid_file': {
- 'type': 'string',
- 'default': CHEF_RB_TPL_DEFAULTS['pid_file'],
- 'description': dedent("""\
+ "pid_file": {
+ "type": "string",
+ "default": CHEF_RB_TPL_DEFAULTS["pid_file"],
+ "description": dedent(
+ """\
The location in which a process identification
number (pid) is saved. By default, it saves
in the ``{}`` location.""".format(
- CHEF_RB_TPL_DEFAULTS['pid_file']))
+ CHEF_RB_TPL_DEFAULTS["pid_file"]
+ )
+ ),
},
- 'server_url': {
- 'type': 'string',
- 'description': 'The URL for the chef server'
+ "server_url": {
+ "type": "string",
+ "description": "The URL for the chef server",
},
- 'show_time': {
- 'type': 'boolean',
- 'default': True,
- 'description': 'Show time in chef logs'
+ "show_time": {
+ "type": "boolean",
+ "default": True,
+ "description": "Show time in chef logs",
},
- 'ssl_verify_mode': {
- 'type': 'string',
- 'default': CHEF_RB_TPL_DEFAULTS['ssl_verify_mode'],
- 'description': dedent("""\
+ "ssl_verify_mode": {
+ "type": "string",
+ "default": CHEF_RB_TPL_DEFAULTS["ssl_verify_mode"],
+ "description": dedent(
+ """\
Set the verify mode for HTTPS requests. We can have
two possible values for this parameter:
@@ -306,67 +370,76 @@ schema = {
- ``:verify_peer``: Validate all SSL certificates.
By default, the parameter is set as ``{}``.
- """.format(CHEF_RB_TPL_DEFAULTS['ssl_verify_mode']))
+ """.format(
+ CHEF_RB_TPL_DEFAULTS["ssl_verify_mode"]
+ )
+ ),
},
- 'validation_name': {
- 'type': 'string',
- 'description': dedent("""\
+ "validation_name": {
+ "type": "string",
+ "description": dedent(
+ """\
The name of the chef-validator key that Chef Infra
Client uses to access the Chef Infra Server during
- the initial Chef Infra Client run.""")
+ the initial Chef Infra Client run."""
+ ),
},
- 'force_install': {
- 'type': 'boolean',
- 'default': False,
- 'description': dedent("""\
+ "force_install": {
+ "type": "boolean",
+ "default": False,
+ "description": dedent(
+ """\
If set to ``True``, forces chef installation, even
- if it is already installed.""")
+ if it is already installed."""
+ ),
},
- 'initial_attributes': {
- 'type': 'object',
- 'items': {
- 'type': 'string'
- },
- 'description': dedent("""\
+ "initial_attributes": {
+ "type": "object",
+ "items": {"type": "string"},
+ "description": dedent(
+ """\
Specify a list of initial attributes used by the
- cookbooks.""")
+ cookbooks."""
+ ),
},
- 'install_type': {
- 'type': 'string',
- 'default': 'packages',
- 'description': dedent("""\
+ "install_type": {
+ "type": "string",
+ "default": "packages",
+ "description": dedent(
+ """\
The type of installation for chef. It can be one of
the following values:
- ``packages``
- ``gems``
- - ``omnibus``""")
+ - ``omnibus``"""
+ ),
},
- 'run_list': {
- 'type': 'array',
- 'items': {
- 'type': 'string'
- },
- 'description': 'A run list for a first boot json.'
+ "run_list": {
+ "type": "array",
+ "items": {"type": "string"},
+ "description": "A run list for a first boot json.",
},
"chef_license": {
- 'type': 'string',
- 'description': dedent("""\
+ "type": "string",
+ "description": dedent(
+ """\
string that indicates if user accepts or not license
- related to some of chef products""")
- }
- }
+ related to some of chef products"""
+ ),
+ },
+ },
}
- }
+ },
}
__doc__ = get_meta_doc(meta, schema)
def post_run_chef(chef_cfg, log):
- delete_pem = util.get_cfg_option_bool(chef_cfg,
- 'delete_validation_post_exec',
- default=False)
+ delete_pem = util.get_cfg_option_bool(
+ chef_cfg, "delete_validation_post_exec", default=False
+ )
if delete_pem and os.path.isfile(CHEF_VALIDATION_PEM_PATH):
os.unlink(CHEF_VALIDATION_PEM_PATH)
@@ -389,16 +462,20 @@ def get_template_params(iid, chef_cfg, log):
else:
params[k] = util.get_cfg_option_str(chef_cfg, k)
# These ones are overwritten to be exact values...
- params.update({
- 'generated_by': util.make_header(),
- 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name',
- default=iid),
- 'environment': util.get_cfg_option_str(chef_cfg, 'environment',
- default='_default'),
- # These two are mandatory...
- 'server_url': chef_cfg['server_url'],
- 'validation_name': chef_cfg['validation_name'],
- })
+ params.update(
+ {
+ "generated_by": util.make_header(),
+ "node_name": util.get_cfg_option_str(
+ chef_cfg, "node_name", default=iid
+ ),
+ "environment": util.get_cfg_option_str(
+ chef_cfg, "environment", default="_default"
+ ),
+ # These two are mandatory...
+ "server_url": chef_cfg["server_url"],
+ "validation_name": chef_cfg["validation_name"],
+ }
+ )
return params
@@ -406,35 +483,38 @@ def handle(name, cfg, cloud, log, _args):
"""Handler method activated by cloud-init."""
# If there isn't a chef key in the configuration don't do anything
- if 'chef' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'chef' key in configuration"), name)
+ if "chef" not in cfg:
+ log.debug(
+ "Skipping module named %s, no 'chef' key in configuration", name
+ )
return
validate_cloudconfig_schema(cfg, schema)
- chef_cfg = cfg['chef']
+ chef_cfg = cfg["chef"]
# Ensure the chef directories we use exist
- chef_dirs = util.get_cfg_option_list(chef_cfg, 'directories')
+ chef_dirs = util.get_cfg_option_list(chef_cfg, "directories")
if not chef_dirs:
chef_dirs = list(CHEF_DIRS)
for d in itertools.chain(chef_dirs, REQUIRED_CHEF_DIRS):
util.ensure_dir(d)
- vkey_path = chef_cfg.get('validation_key', CHEF_VALIDATION_PEM_PATH)
- vcert = chef_cfg.get('validation_cert')
+ vkey_path = chef_cfg.get("validation_key", CHEF_VALIDATION_PEM_PATH)
+ vcert = chef_cfg.get("validation_cert")
# special value 'system' means do not overwrite the file
# but still render the template to contain 'validation_key'
if vcert:
if vcert != "system":
util.write_file(vkey_path, vcert)
elif not os.path.isfile(vkey_path):
- log.warning("chef validation_cert provided as 'system', but "
- "validation_key path '%s' does not exist.",
- vkey_path)
+ log.warning(
+ "chef validation_cert provided as 'system', but "
+ "validation_key path '%s' does not exist.",
+ vkey_path,
+ )
# Create the chef config from template
- template_fn = cloud.get_template_filename('chef_client.rb')
+ template_fn = cloud.get_template_filename("chef_client.rb")
if template_fn:
iid = str(cloud.datasource.get_instance_id())
params = get_template_params(iid, chef_cfg, log)
@@ -448,32 +528,33 @@ def handle(name, cfg, cloud, log, _args):
util.ensure_dirs(param_paths)
templater.render_to_file(template_fn, CHEF_RB_PATH, params)
else:
- log.warning("No template found, not rendering to %s",
- CHEF_RB_PATH)
+ log.warning("No template found, not rendering to %s", CHEF_RB_PATH)
# Set the firstboot json
- fb_filename = util.get_cfg_option_str(chef_cfg, 'firstboot_path',
- default=CHEF_FB_PATH)
+ fb_filename = util.get_cfg_option_str(
+ chef_cfg, "firstboot_path", default=CHEF_FB_PATH
+ )
if not fb_filename:
log.info("First boot path empty, not writing first boot json file")
else:
initial_json = {}
- if 'run_list' in chef_cfg:
- initial_json['run_list'] = chef_cfg['run_list']
- if 'initial_attributes' in chef_cfg:
- initial_attributes = chef_cfg['initial_attributes']
+ if "run_list" in chef_cfg:
+ initial_json["run_list"] = chef_cfg["run_list"]
+ if "initial_attributes" in chef_cfg:
+ initial_attributes = chef_cfg["initial_attributes"]
for k in list(initial_attributes.keys()):
initial_json[k] = initial_attributes[k]
util.write_file(fb_filename, json.dumps(initial_json))
# Try to install chef, if its not already installed...
- force_install = util.get_cfg_option_bool(chef_cfg,
- 'force_install', default=False)
+ force_install = util.get_cfg_option_bool(
+ chef_cfg, "force_install", default=False
+ )
installed = subp.is_exe(CHEF_EXEC_PATH)
if not installed or force_install:
run = install_chef(cloud, chef_cfg, log)
elif installed:
- run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False)
+ run = util.get_cfg_option_bool(chef_cfg, "exec", default=False)
else:
run = False
if run:
@@ -482,18 +563,21 @@ def handle(name, cfg, cloud, log, _args):
def run_chef(chef_cfg, log):
- log.debug('Running chef-client')
+ log.debug("Running chef-client")
cmd = [CHEF_EXEC_PATH]
- if 'exec_arguments' in chef_cfg:
- cmd_args = chef_cfg['exec_arguments']
+ if "exec_arguments" in chef_cfg:
+ cmd_args = chef_cfg["exec_arguments"]
if isinstance(cmd_args, (list, tuple)):
cmd.extend(cmd_args)
elif isinstance(cmd_args, str):
cmd.append(cmd_args)
else:
- log.warning("Unknown type %s provided for chef"
- " 'exec_arguments' expected list, tuple,"
- " or string", type(cmd_args))
+ log.warning(
+ "Unknown type %s provided for chef"
+ " 'exec_arguments' expected list, tuple,"
+ " or string",
+ type(cmd_args),
+ )
cmd.extend(CHEF_EXEC_DEF_ARGS)
else:
cmd.extend(CHEF_EXEC_DEF_ARGS)
@@ -507,16 +591,16 @@ def subp_blob_in_tempfile(blob, *args, **kwargs):
The 'args' argument to subp will be updated with the full path to the
filename as the first argument.
"""
- basename = kwargs.pop('basename', "subp_blob")
+ basename = kwargs.pop("basename", "subp_blob")
- if len(args) == 0 and 'args' not in kwargs:
+ if len(args) == 0 and "args" not in kwargs:
args = [tuple()]
# Use tmpdir over tmpfile to avoid 'text file busy' on execute
with temp_utils.tempdir(needs_exe=True) as tmpd:
tmpf = os.path.join(tmpd, basename)
- if 'args' in kwargs:
- kwargs['args'] = [tmpf] + list(kwargs['args'])
+ if "args" in kwargs:
+ kwargs["args"] = [tmpf] + list(kwargs["args"])
else:
args = list(args)
args[0] = [tmpf] + args[0]
@@ -543,36 +627,39 @@ def install_chef_from_omnibus(url=None, retries=None, omnibus_version=None):
if omnibus_version is None:
args = []
else:
- args = ['-v', omnibus_version]
+ args = ["-v", omnibus_version]
content = url_helper.readurl(url=url, retries=retries).contents
return subp_blob_in_tempfile(
- blob=content, args=args,
- basename='chef-omnibus-install', capture=False)
+ blob=content, args=args, basename="chef-omnibus-install", capture=False
+ )
def install_chef(cloud, chef_cfg, log):
# If chef is not installed, we install chef based on 'install_type'
- install_type = util.get_cfg_option_str(chef_cfg, 'install_type',
- 'packages')
- run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False)
+ install_type = util.get_cfg_option_str(
+ chef_cfg, "install_type", "packages"
+ )
+ run = util.get_cfg_option_bool(chef_cfg, "exec", default=False)
if install_type == "gems":
# This will install and run the chef-client from gems
- chef_version = util.get_cfg_option_str(chef_cfg, 'version', None)
- ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version',
- RUBY_VERSION_DEFAULT)
+ chef_version = util.get_cfg_option_str(chef_cfg, "version", None)
+ ruby_version = util.get_cfg_option_str(
+ chef_cfg, "ruby_version", RUBY_VERSION_DEFAULT
+ )
install_chef_from_gems(ruby_version, chef_version, cloud.distro)
# Retain backwards compat, by preferring True instead of False
# when not provided/overriden...
- run = util.get_cfg_option_bool(chef_cfg, 'exec', default=True)
- elif install_type == 'packages':
+ run = util.get_cfg_option_bool(chef_cfg, "exec", default=True)
+ elif install_type == "packages":
# This will install and run the chef-client from packages
- cloud.distro.install_packages(('chef',))
- elif install_type == 'omnibus':
+ cloud.distro.install_packages(("chef",))
+ elif install_type == "omnibus":
omnibus_version = util.get_cfg_option_str(chef_cfg, "omnibus_version")
install_chef_from_omnibus(
url=util.get_cfg_option_str(chef_cfg, "omnibus_url"),
retries=util.get_cfg_option_int(chef_cfg, "omnibus_url_retries"),
- omnibus_version=omnibus_version)
+ omnibus_version=omnibus_version,
+ )
else:
log.warning("Unknown chef install type '%s'", install_type)
run = False
@@ -581,25 +668,47 @@ def install_chef(cloud, chef_cfg, log):
def get_ruby_packages(version):
# return a list of packages needed to install ruby at version
- pkgs = ['ruby%s' % version, 'ruby%s-dev' % version]
+ pkgs = ["ruby%s" % version, "ruby%s-dev" % version]
if version == "1.8":
- pkgs.extend(('libopenssl-ruby1.8', 'rubygems1.8'))
+ pkgs.extend(("libopenssl-ruby1.8", "rubygems1.8"))
return pkgs
def install_chef_from_gems(ruby_version, chef_version, distro):
distro.install_packages(get_ruby_packages(ruby_version))
- if not os.path.exists('/usr/bin/gem'):
- util.sym_link('/usr/bin/gem%s' % ruby_version, '/usr/bin/gem')
- if not os.path.exists('/usr/bin/ruby'):
- util.sym_link('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby')
+ if not os.path.exists("/usr/bin/gem"):
+ util.sym_link("/usr/bin/gem%s" % ruby_version, "/usr/bin/gem")
+ if not os.path.exists("/usr/bin/ruby"):
+ util.sym_link("/usr/bin/ruby%s" % ruby_version, "/usr/bin/ruby")
if chef_version:
- subp.subp(['/usr/bin/gem', 'install', 'chef',
- '-v %s' % chef_version, '--no-ri',
- '--no-rdoc', '--bindir', '/usr/bin', '-q'], capture=False)
+ subp.subp(
+ [
+ "/usr/bin/gem",
+ "install",
+ "chef",
+ "-v %s" % chef_version,
+ "--no-ri",
+ "--no-rdoc",
+ "--bindir",
+ "/usr/bin",
+ "-q",
+ ],
+ capture=False,
+ )
else:
- subp.subp(['/usr/bin/gem', 'install', 'chef',
- '--no-ri', '--no-rdoc', '--bindir',
- '/usr/bin', '-q'], capture=False)
+ subp.subp(
+ [
+ "/usr/bin/gem",
+ "install",
+ "chef",
+ "--no-ri",
+ "--no-rdoc",
+ "--bindir",
+ "/usr/bin",
+ "-q",
+ ],
+ capture=False,
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py
index 4d5a6aa2..d09fc129 100644
--- a/cloudinit/config/cc_debug.py
+++ b/cloudinit/config/cc_debug.py
@@ -30,18 +30,16 @@ location that this cloud-init has been configured with when running.
import copy
from io import StringIO
-from cloudinit import type_utils
-from cloudinit import util
-from cloudinit import safeyaml
+from cloudinit import safeyaml, type_utils, util
-SKIP_KEYS = frozenset(['log_cfgs'])
+SKIP_KEYS = frozenset(["log_cfgs"])
def _make_header(text):
header = StringIO()
header.write("-" * 80)
header.write("\n")
- header.write(text.center(80, ' '))
+ header.write(text.center(80, " "))
header.write("\n")
header.write("-" * 80)
header.write("\n")
@@ -56,17 +54,16 @@ def _dumps(obj):
def handle(name, cfg, cloud, log, args):
"""Handler method activated by cloud-init."""
- verbose = util.get_cfg_by_path(cfg, ('debug', 'verbose'), default=True)
+ verbose = util.get_cfg_by_path(cfg, ("debug", "verbose"), default=True)
if args:
# if args are provided (from cmdline) then explicitly set verbose
out_file = args[0]
verbose = True
else:
- out_file = util.get_cfg_by_path(cfg, ('debug', 'output'))
+ out_file = util.get_cfg_by_path(cfg, ("debug", "output"))
if not verbose:
- log.debug(("Skipping module named %s,"
- " verbose printing disabled"), name)
+ log.debug("Skipping module named %s, verbose printing disabled", name)
return
# Clean out some keys that we just don't care about showing...
dump_cfg = copy.deepcopy(cfg)
@@ -85,8 +82,9 @@ def handle(name, cfg, cloud, log, args):
to_print.write(_dumps(cloud.datasource.metadata))
to_print.write("\n")
to_print.write(_make_header("Misc"))
- to_print.write("Datasource: %s\n" %
- (type_utils.obj_name(cloud.datasource)))
+ to_print.write(
+ "Datasource: %s\n" % (type_utils.obj_name(cloud.datasource))
+ )
to_print.write("Distro: %s\n" % (type_utils.obj_name(cloud.distro)))
to_print.write("Hostname: %s\n" % (cloud.get_hostname(True)))
to_print.write("Instance ID: %s\n" % (cloud.get_instance_id()))
@@ -102,4 +100,5 @@ def handle(name, cfg, cloud, log, args):
else:
util.multi_log("".join(content_to_file), console=True, stderr=False)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py
index 61c769b3..5e528e81 100644
--- a/cloudinit/config/cc_disable_ec2_metadata.py
+++ b/cloudinit/config/cc_disable_ec2_metadata.py
@@ -26,32 +26,35 @@ by default.
disable_ec2_metadata: <true/false>
"""
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import subp, util
from cloudinit.settings import PER_ALWAYS
frequency = PER_ALWAYS
-REJECT_CMD_IF = ['route', 'add', '-host', '169.254.169.254', 'reject']
-REJECT_CMD_IP = ['ip', 'route', 'add', 'prohibit', '169.254.169.254']
+REJECT_CMD_IF = ["route", "add", "-host", "169.254.169.254", "reject"]
+REJECT_CMD_IP = ["ip", "route", "add", "prohibit", "169.254.169.254"]
def handle(name, cfg, _cloud, log, _args):
disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False)
if disabled:
reject_cmd = None
- if subp.which('ip'):
+ if subp.which("ip"):
reject_cmd = REJECT_CMD_IP
- elif subp.which('ifconfig'):
+ elif subp.which("ifconfig"):
reject_cmd = REJECT_CMD_IF
else:
- log.error(('Neither "route" nor "ip" command found, unable to '
- 'manipulate routing table'))
+ log.error(
+ 'Neither "route" nor "ip" command found, unable to '
+ "manipulate routing table"
+ )
return
subp.subp(reject_cmd, capture=False)
else:
- log.debug(("Skipping module named %s,"
- " disabling the ec2 route not enabled"), name)
+ log.debug(
+ "Skipping module named %s, disabling the ec2 route not enabled",
+ name,
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index 440f05f1..4d527c7a 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -100,13 +100,13 @@ A label can be specified for the filesystem using
replace_fs: <filesystem type>
"""
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-from cloudinit import subp
import logging
import os
import shlex
+from cloudinit import subp, util
+from cloudinit.settings import PER_INSTANCE
+
frequency = PER_INSTANCE
# Define the commands to use
@@ -118,7 +118,7 @@ BLKDEV_CMD = subp.which("blockdev")
PARTPROBE_CMD = subp.which("partprobe")
WIPEFS_CMD = subp.which("wipefs")
-LANG_C_ENV = {'LANG': 'C'}
+LANG_C_ENV = {"LANG": "C"}
LOG = logging.getLogger(__name__)
@@ -145,9 +145,12 @@ def handle(_name, cfg, cloud, log, _args):
try:
log.debug("Creating new partition table/disk")
- util.log_time(logfunc=LOG.debug,
- msg="Creating partition on %s" % disk,
- func=mkpart, args=(disk, definition))
+ util.log_time(
+ logfunc=LOG.debug,
+ msg="Creating partition on %s" % disk,
+ func=mkpart,
+ args=(disk, definition),
+ )
except Exception as e:
util.logexc(LOG, "Failed partitioning operation\n%s" % e)
@@ -162,10 +165,13 @@ def handle(_name, cfg, cloud, log, _args):
try:
log.debug("Creating new filesystem.")
- device = definition.get('device')
- util.log_time(logfunc=LOG.debug,
- msg="Creating fs for %s" % device,
- func=mkfs, args=(definition,))
+ device = definition.get("device")
+ util.log_time(
+ logfunc=LOG.debug,
+ msg="Creating fs for %s" % device,
+ func=mkfs,
+ args=(definition,),
+ )
except Exception as e:
util.logexc(LOG, "Failed during filesystem operation\n%s" % e)
@@ -178,16 +184,22 @@ def update_disk_setup_devices(disk_setup, tformer):
if transformed is None or transformed == origname:
continue
if transformed in disk_setup:
- LOG.info("Replacing %s in disk_setup for translation of %s",
- origname, transformed)
+ LOG.info(
+ "Replacing %s in disk_setup for translation of %s",
+ origname,
+ transformed,
+ )
del disk_setup[transformed]
disk_setup[transformed] = disk_setup[origname]
if isinstance(disk_setup[transformed], dict):
- disk_setup[transformed]['_origname'] = origname
+ disk_setup[transformed]["_origname"] = origname
del disk_setup[origname]
- LOG.debug("updated disk_setup device entry '%s' to '%s'",
- origname, transformed)
+ LOG.debug(
+ "updated disk_setup device entry '%s' to '%s'",
+ origname,
+ transformed,
+ )
def update_fs_setup_devices(disk_setup, tformer):
@@ -198,7 +210,7 @@ def update_fs_setup_devices(disk_setup, tformer):
LOG.warning("entry in disk_setup not a dict: %s", definition)
continue
- origname = definition.get('device')
+ origname = definition.get("device")
if origname is None:
continue
@@ -208,19 +220,24 @@ def update_fs_setup_devices(disk_setup, tformer):
tformed = tformer(dev)
if tformed is not None:
dev = tformed
- LOG.debug("%s is mapped to disk=%s part=%s",
- origname, tformed, part)
- definition['_origname'] = origname
- definition['device'] = tformed
+ LOG.debug(
+ "%s is mapped to disk=%s part=%s", origname, tformed, part
+ )
+ definition["_origname"] = origname
+ definition["device"] = tformed
if part:
# In origname with <dev>.N, N overrides 'partition' key.
- if 'partition' in definition:
- LOG.warning("Partition '%s' from dotted device name '%s' "
- "overrides 'partition' key in %s", part, origname,
- definition)
- definition['_partition'] = definition['partition']
- definition['partition'] = part
+ if "partition" in definition:
+ LOG.warning(
+ "Partition '%s' from dotted device name '%s' "
+ "overrides 'partition' key in %s",
+ part,
+ origname,
+ definition,
+ )
+ definition["_partition"] = definition["partition"]
+ definition["partition"] = part
def value_splitter(values, start=None):
@@ -232,7 +249,7 @@ def value_splitter(values, start=None):
if start:
_values = _values[start:]
- for key, value in [x.split('=') for x in _values]:
+ for key, value in [x.split("=") for x in _values]:
yield key, value
@@ -251,11 +268,16 @@ def enumerate_disk(device, nodeps=False):
name: the device name, i.e. sda
"""
- lsblk_cmd = [LSBLK_CMD, '--pairs', '--output', 'NAME,TYPE,FSTYPE,LABEL',
- device]
+ lsblk_cmd = [
+ LSBLK_CMD,
+ "--pairs",
+ "--output",
+ "NAME,TYPE,FSTYPE,LABEL",
+ device,
+ ]
if nodeps:
- lsblk_cmd.append('--nodeps')
+ lsblk_cmd.append("--nodeps")
info = None
try:
@@ -269,10 +291,10 @@ def enumerate_disk(device, nodeps=False):
for part in parts:
d = {
- 'name': None,
- 'type': None,
- 'fstype': None,
- 'label': None,
+ "name": None,
+ "type": None,
+ "fstype": None,
+ "label": None,
}
for key, value in value_splitter(part):
@@ -303,9 +325,9 @@ def is_device_valid(name, partition=False):
LOG.warning("Query against device %s failed", name)
return False
- if partition and d_type == 'part':
+ if partition and d_type == "part":
return True
- elif not partition and d_type == 'disk':
+ elif not partition and d_type == "disk":
return True
return False
@@ -321,7 +343,7 @@ def check_fs(device):
"""
out, label, fs_type, uuid = None, None, None, None
- blkid_cmd = [BLKID_CMD, '-c', '/dev/null', device]
+ blkid_cmd = [BLKID_CMD, "-c", "/dev/null", device]
try:
out, _err = subp.subp(blkid_cmd, rcs=[0, 2])
except Exception as e:
@@ -332,11 +354,11 @@ def check_fs(device):
if out:
if len(out.splitlines()) == 1:
for key, value in value_splitter(out, start=1):
- if key.lower() == 'label':
+ if key.lower() == "label":
label = value
- elif key.lower() == 'type':
+ elif key.lower() == "type":
fs_type = value
- elif key.lower() == 'uuid':
+ elif key.lower() == "uuid":
uuid = value
return label, fs_type, uuid
@@ -350,8 +372,14 @@ def is_filesystem(device):
return fs_type
-def find_device_node(device, fs_type=None, label=None, valid_targets=None,
- label_match=True, replace_fs=None):
+def find_device_node(
+ device,
+ fs_type=None,
+ label=None,
+ valid_targets=None,
+ label_match=True,
+ replace_fs=None,
+):
"""
Find a device that is either matches the spec, or the first
@@ -366,31 +394,32 @@ def find_device_node(device, fs_type=None, label=None, valid_targets=None,
label = ""
if not valid_targets:
- valid_targets = ['disk', 'part']
+ valid_targets = ["disk", "part"]
raw_device_used = False
for d in enumerate_disk(device):
- if d['fstype'] == replace_fs and label_match is False:
+ if d["fstype"] == replace_fs and label_match is False:
# We found a device where we want to replace the FS
- return ('/dev/%s' % d['name'], False)
+ return ("/dev/%s" % d["name"], False)
- if (d['fstype'] == fs_type and
- ((label_match and d['label'] == label) or not label_match)):
+ if d["fstype"] == fs_type and (
+ (label_match and d["label"] == label) or not label_match
+ ):
# If we find a matching device, we return that
- return ('/dev/%s' % d['name'], True)
+ return ("/dev/%s" % d["name"], True)
- if d['type'] in valid_targets:
+ if d["type"] in valid_targets:
- if d['type'] != 'disk' or d['fstype']:
+ if d["type"] != "disk" or d["fstype"]:
raw_device_used = True
- if d['type'] == 'disk':
+ if d["type"] == "disk":
# Skip the raw disk, its the default
pass
- elif not d['fstype']:
- return ('/dev/%s' % d['name'], False)
+ elif not d["fstype"]:
+ return ("/dev/%s" % d["name"], False)
if not raw_device_used:
return (device, False)
@@ -433,7 +462,7 @@ def get_dyn_func(*args):
if len(args) < 2:
raise Exception("Unable to determine dynamic funcation name")
- func_name = (args[0] % args[1])
+ func_name = args[0] % args[1]
func_args = args[2:]
try:
@@ -448,8 +477,8 @@ def get_dyn_func(*args):
def get_hdd_size(device):
try:
- size_in_bytes, _ = subp.subp([BLKDEV_CMD, '--getsize64', device])
- sector_size, _ = subp.subp([BLKDEV_CMD, '--getss', device])
+ size_in_bytes, _ = subp.subp([BLKDEV_CMD, "--getsize64", device])
+ sector_size, _ = subp.subp([BLKDEV_CMD, "--getss", device])
except Exception as e:
raise Exception("Failed to get %s size\n%s" % (device, e)) from e
@@ -481,13 +510,13 @@ def check_partition_mbr_layout(device, layout):
if device in _line[0]:
# We don't understand extended partitions yet
- if _line[-1].lower() in ['extended', 'empty']:
+ if _line[-1].lower() in ["extended", "empty"]:
continue
# Find the partition types
type_label = None
for x in sorted(range(1, len(_line)), reverse=True):
- if _line[x].isdigit() and _line[x] != '/':
+ if _line[x].isdigit() and _line[x] != "/":
type_label = _line[x]
break
@@ -496,7 +525,7 @@ def check_partition_mbr_layout(device, layout):
def check_partition_gpt_layout(device, layout):
- prt_cmd = [SGDISK_CMD, '-p', device]
+ prt_cmd = [SGDISK_CMD, "-p", device]
try:
out, _err = subp.subp(prt_cmd, update_env=LANG_C_ENV)
except Exception as e:
@@ -522,7 +551,7 @@ def check_partition_gpt_layout(device, layout):
# Number Start (sector) End (sector) Size Code Name
# 1 2048 206847 100.0 MiB 0700 Microsoft basic data
for line in out_lines:
- if line.strip().startswith('Number'):
+ if line.strip().startswith("Number"):
break
codes = [line.strip().split()[5] for line in out_lines]
@@ -545,10 +574,16 @@ def check_partition_layout(table_type, device, layout):
function called check_partition_%s_layout
"""
found_layout = get_dyn_func(
- "check_partition_%s_layout", table_type, device, layout)
-
- LOG.debug("called check_partition_%s_layout(%s, %s), returned: %s",
- table_type, device, layout, found_layout)
+ "check_partition_%s_layout", table_type, device, layout
+ )
+
+ LOG.debug(
+ "called check_partition_%s_layout(%s, %s), returned: %s",
+ table_type,
+ device,
+ layout,
+ found_layout,
+ )
if isinstance(layout, bool):
# if we are using auto partitioning, or "True" be happy
# if a single partition exists.
@@ -559,10 +594,12 @@ def check_partition_layout(table_type, device, layout):
elif len(found_layout) == len(layout):
# This just makes sure that the number of requested
# partitions and the type labels are right
- layout_types = [str(x[1]) if isinstance(x, (tuple, list)) else None
- for x in layout]
- LOG.debug("Layout types=%s. Found types=%s",
- layout_types, found_layout)
+ layout_types = [
+ str(x[1]) if isinstance(x, (tuple, list)) else None for x in layout
+ ]
+ LOG.debug(
+ "Layout types=%s. Found types=%s", layout_types, found_layout
+ )
for itype, ftype in zip(layout_types, found_layout):
if itype is not None and str(ftype) != str(itype):
return False
@@ -588,8 +625,9 @@ def get_partition_mbr_layout(size, layout):
# Create a single partition
return "0,"
- if ((len(layout) == 0 and isinstance(layout, list)) or
- not isinstance(layout, list)):
+ if (len(layout) == 0 and isinstance(layout, list)) or not isinstance(
+ layout, list
+ ):
raise Exception("Partition layout is invalid")
last_part_num = len(layout)
@@ -617,8 +655,10 @@ def get_partition_mbr_layout(size, layout):
sfdisk_definition = "\n".join(part_definition)
if len(part_definition) > 4:
- raise Exception("Calculated partition definition is too big\n%s" %
- sfdisk_definition)
+ raise Exception(
+ "Calculated partition definition is too big\n%s"
+ % sfdisk_definition
+ )
return sfdisk_definition
@@ -632,14 +672,15 @@ def get_partition_gpt_layout(size, layout):
if isinstance(partition, list):
if len(partition) != 2:
raise Exception(
- "Partition was incorrectly defined: %s" % partition)
+ "Partition was incorrectly defined: %s" % partition
+ )
percent, partition_type = partition
else:
percent = partition
partition_type = None
part_size = int(float(size) * (float(percent) / 100))
- partition_specs.append((partition_type, [0, '+{}'.format(part_size)]))
+ partition_specs.append((partition_type, [0, "+{}".format(part_size)]))
# The last partition should use up all remaining space
partition_specs[-1][-1][-1] = 0
@@ -649,7 +690,7 @@ def get_partition_gpt_layout(size, layout):
def purge_disk_ptable(device):
# wipe the first and last megabyte of a disk (or file)
# gpt stores partition table both at front and at end.
- null = '\0'
+ null = "\0"
start_len = 1024 * 1024
end_len = 1024 * 1024
with open(device, "rb+") as fp:
@@ -668,14 +709,14 @@ def purge_disk(device):
# wipe any file systems first
for d in enumerate_disk(device):
- if d['type'] not in ["disk", "crypt"]:
- wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d['name']]
+ if d["type"] not in ["disk", "crypt"]:
+ wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d["name"]]
try:
- LOG.info("Purging filesystem on /dev/%s", d['name'])
+ LOG.info("Purging filesystem on /dev/%s", d["name"])
subp.subp(wipefs_cmd)
except Exception as e:
raise Exception(
- "Failed FS purge of /dev/%s" % d['name']
+ "Failed FS purge of /dev/%s" % d["name"]
) from e
purge_disk_ptable(device)
@@ -701,7 +742,7 @@ def read_parttbl(device):
if PARTPROBE_CMD is not None:
probe_cmd = [PARTPROBE_CMD, device]
else:
- probe_cmd = [BLKDEV_CMD, '--rereadpt', device]
+ probe_cmd = [BLKDEV_CMD, "--rereadpt", device]
util.udevadm_settle()
try:
subp.subp(probe_cmd)
@@ -730,17 +771,24 @@ def exec_mkpart_mbr(device, layout):
def exec_mkpart_gpt(device, layout):
try:
- subp.subp([SGDISK_CMD, '-Z', device])
+ subp.subp([SGDISK_CMD, "-Z", device])
for index, (partition_type, (start, end)) in enumerate(layout):
index += 1
- subp.subp([SGDISK_CMD,
- '-n', '{}:{}:{}'.format(index, start, end), device])
+ subp.subp(
+ [
+ SGDISK_CMD,
+ "-n",
+ "{}:{}:{}".format(index, start, end),
+ device,
+ ]
+ )
if partition_type is not None:
# convert to a 4 char (or more) string right padded with 0
# 82 -> 8200. 'Linux' -> 'Linux'
pinput = str(partition_type).ljust(4, "0")
subp.subp(
- [SGDISK_CMD, '-t', '{}:{}'.format(index, pinput), device])
+ [SGDISK_CMD, "-t", "{}:{}".format(index, pinput), device]
+ )
except Exception:
LOG.warning("Failed to partition device %s", device)
raise
@@ -766,8 +814,10 @@ def assert_and_settle_device(device):
if not os.path.exists(device):
util.udevadm_settle()
if not os.path.exists(device):
- raise RuntimeError("Device %s did not exist and was not created "
- "with a udevadm settle." % device)
+ raise RuntimeError(
+ "Device %s did not exist and was not created "
+ "with a udevadm settle." % device
+ )
# Whether or not the device existed above, it is possible that udev
# events that would populate udev database (for reading by lsdname) have
@@ -794,9 +844,9 @@ def mkpart(device, definition):
device = os.path.realpath(device)
LOG.debug("Checking values for %s definition", device)
- overwrite = definition.get('overwrite', False)
- layout = definition.get('layout', False)
- table_type = definition.get('table_type', 'mbr')
+ overwrite = definition.get("overwrite", False)
+ layout = definition.get("layout", False)
+ table_type = definition.get("table_type", "mbr")
# Check if the default device is a partition or not
LOG.debug("Checking against default devices")
@@ -809,7 +859,8 @@ def mkpart(device, definition):
LOG.debug("Checking if device %s is a valid device", device)
if not is_device_valid(device):
raise Exception(
- 'Device {device} is not a disk device!'.format(device=device))
+ "Device {device} is not a disk device!".format(device=device)
+ )
# Remove the partition table entries
if isinstance(layout, str) and layout.lower() == "remove":
@@ -845,21 +896,21 @@ def lookup_force_flag(fs):
A force flag might be -F or -F, this look it up
"""
flags = {
- 'ext': '-F',
- 'btrfs': '-f',
- 'xfs': '-f',
- 'reiserfs': '-f',
- 'swap': '-f',
+ "ext": "-F",
+ "btrfs": "-f",
+ "xfs": "-f",
+ "reiserfs": "-f",
+ "swap": "-f",
}
- if 'ext' in fs.lower():
- fs = 'ext'
+ if "ext" in fs.lower():
+ fs = "ext"
if fs.lower() in flags:
return flags[fs]
LOG.warning("Force flag for %s is unknown.", fs)
- return ''
+ return ""
def mkfs(fs_cfg):
@@ -883,14 +934,14 @@ def mkfs(fs_cfg):
When 'cmd' is provided then no other parameter is required.
"""
- label = fs_cfg.get('label')
- device = fs_cfg.get('device')
- partition = str(fs_cfg.get('partition', 'any'))
- fs_type = fs_cfg.get('filesystem')
- fs_cmd = fs_cfg.get('cmd', [])
- fs_opts = fs_cfg.get('extra_opts', [])
- fs_replace = fs_cfg.get('replace_fs', False)
- overwrite = fs_cfg.get('overwrite', False)
+ label = fs_cfg.get("label")
+ device = fs_cfg.get("device")
+ partition = str(fs_cfg.get("partition", "any"))
+ fs_type = fs_cfg.get("filesystem")
+ fs_cmd = fs_cfg.get("cmd", [])
+ fs_opts = fs_cfg.get("extra_opts", [])
+ fs_replace = fs_cfg.get("replace_fs", False)
+ overwrite = fs_cfg.get("overwrite", False)
# ensure that we get a real device rather than a symbolic link
assert_and_settle_device(device)
@@ -903,14 +954,19 @@ def mkfs(fs_cfg):
# Handle manual definition of partition
if partition.isdigit():
device = "%s%s" % (device, partition)
- LOG.debug("Manual request of partition %s for %s",
- partition, device)
+ LOG.debug(
+ "Manual request of partition %s for %s", partition, device
+ )
# Check to see if the fs already exists
LOG.debug("Checking device %s", device)
check_label, check_fstype, _ = check_fs(device)
- LOG.debug("Device '%s' has check_label='%s' check_fstype=%s",
- device, check_label, check_fstype)
+ LOG.debug(
+ "Device '%s' has check_label='%s' check_fstype=%s",
+ device,
+ check_label,
+ check_fstype,
+ )
if check_label == label and check_fstype == fs_type:
LOG.debug("Existing file system found at %s", device)
@@ -924,19 +980,23 @@ def mkfs(fs_cfg):
else:
LOG.debug("Device %s is cleared for formating", device)
- elif partition and str(partition).lower() in ('auto', 'any'):
+ elif partition and str(partition).lower() in ("auto", "any"):
# For auto devices, we match if the filesystem does exist
odevice = device
LOG.debug("Identifying device to create %s filesytem on", label)
# any mean pick the first match on the device with matching fs_type
label_match = True
- if partition.lower() == 'any':
+ if partition.lower() == "any":
label_match = False
- device, reuse = find_device_node(device, fs_type=fs_type, label=label,
- label_match=label_match,
- replace_fs=fs_replace)
+ device, reuse = find_device_node(
+ device,
+ fs_type=fs_type,
+ label=label,
+ label_match=label_match,
+ replace_fs=fs_replace,
+ )
LOG.debug("Automatic device for %s identified as %s", odevice, device)
if reuse:
@@ -947,18 +1007,25 @@ def mkfs(fs_cfg):
LOG.debug("Replacing file system on %s as instructed.", device)
if not device:
- LOG.debug("No device aviable that matches request. "
- "Skipping fs creation for %s", fs_cfg)
+ LOG.debug(
+ "No device aviable that matches request. "
+ "Skipping fs creation for %s",
+ fs_cfg,
+ )
return
- elif not partition or str(partition).lower() == 'none':
+ elif not partition or str(partition).lower() == "none":
LOG.debug("Using the raw device to place filesystem %s on", label)
else:
LOG.debug("Error in device identification handling.")
return
- LOG.debug("File system type '%s' with label '%s' will be created on %s",
- fs_type, label, device)
+ LOG.debug(
+ "File system type '%s' with label '%s' will be created on %s",
+ fs_type,
+ label,
+ device,
+ )
# Make sure the device is defined
if not device:
@@ -969,26 +1036,29 @@ def mkfs(fs_cfg):
if not (fs_type or fs_cmd):
raise Exception(
"No way to create filesystem '{label}'. fs_type or fs_cmd "
- "must be set.".format(label=label))
+ "must be set.".format(label=label)
+ )
# Create the commands
shell = False
if fs_cmd:
- fs_cmd = fs_cfg['cmd'] % {
- 'label': label,
- 'filesystem': fs_type,
- 'device': device,
+ fs_cmd = fs_cfg["cmd"] % {
+ "label": label,
+ "filesystem": fs_type,
+ "device": device,
}
shell = True
if overwrite:
LOG.warning(
"fs_setup:overwrite ignored because cmd was specified: %s",
- fs_cmd)
+ fs_cmd,
+ )
if fs_opts:
LOG.warning(
"fs_setup:extra_opts ignored because cmd was specified: %s",
- fs_cmd)
+ fs_cmd,
+ )
else:
# Find the mkfs command
mkfs_cmd = subp.which("mkfs.%s" % fs_type)
@@ -996,8 +1066,11 @@ def mkfs(fs_cfg):
mkfs_cmd = subp.which("mk%s" % fs_type)
if not mkfs_cmd:
- LOG.warning("Cannot create fstype '%s'. No mkfs.%s command",
- fs_type, fs_type)
+ LOG.warning(
+ "Cannot create fstype '%s'. No mkfs.%s command",
+ fs_type,
+ fs_type,
+ )
return
fs_cmd = [mkfs_cmd, device]
@@ -1022,4 +1095,5 @@ def mkfs(fs_cfg):
except Exception as e:
raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e)) from e
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
index 40eee052..a928082b 100644
--- a/cloudinit/config/cc_emit_upstart.py
+++ b/cloudinit/config/cc_emit_upstart.py
@@ -24,12 +24,12 @@ user configuration should be required.
import os
from cloudinit import log as logging
-from cloudinit.settings import PER_ALWAYS
from cloudinit import subp
+from cloudinit.settings import PER_ALWAYS
frequency = PER_ALWAYS
-distros = ['ubuntu', 'debian']
+distros = ["ubuntu", "debian"]
LOG = logging.getLogger(__name__)
@@ -39,15 +39,18 @@ def is_upstart_system():
return False
myenv = os.environ.copy()
- if 'UPSTART_SESSION' in myenv:
- del myenv['UPSTART_SESSION']
- check_cmd = ['initctl', 'version']
+ if "UPSTART_SESSION" in myenv:
+ del myenv["UPSTART_SESSION"]
+ check_cmd = ["initctl", "version"]
try:
(out, _err) = subp.subp(check_cmd, env=myenv)
- return 'upstart' in out
+ return "upstart" in out
except subp.ProcessExecutionError as e:
- LOG.debug("'%s' returned '%s', not using upstart",
- ' '.join(check_cmd), e.exit_code)
+ LOG.debug(
+ "'%s' returned '%s', not using upstart",
+ " ".join(check_cmd),
+ e.exit_code,
+ )
return False
@@ -56,7 +59,7 @@ def handle(name, _cfg, cloud, log, args):
if not event_names:
# Default to the 'cloud-config'
# event for backwards compat.
- event_names = ['cloud-config']
+ event_names = ["cloud-config"]
if not is_upstart_system():
log.debug("not upstart system, '%s' disabled", name)
@@ -64,11 +67,12 @@ def handle(name, _cfg, cloud, log, args):
cfgpath = cloud.paths.get_ipath_cur("cloud_config")
for n in event_names:
- cmd = ['initctl', 'emit', str(n), 'CLOUD_CFG=%s' % cfgpath]
+ cmd = ["initctl", "emit", str(n), "CLOUD_CFG=%s" % cfgpath]
try:
subp.subp(cmd)
except Exception as e:
# TODO(harlowja), use log exception from utils??
log.warning("Emission of upstart event %s failed due to: %s", n, e)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py
index 91f50e22..50a81744 100644
--- a/cloudinit/config/cc_fan.py
+++ b/cloudinit/config/cc_fan.py
@@ -38,60 +38,62 @@ If cloud-init sees a ``fan`` entry in cloud-config it will:
"""
from cloudinit import log as logging
+from cloudinit import subp, util
from cloudinit.settings import PER_INSTANCE
-from cloudinit import subp
-from cloudinit import util
LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
BUILTIN_CFG = {
- 'config': None,
- 'config_path': '/etc/network/fan',
+ "config": None,
+ "config_path": "/etc/network/fan",
}
def stop_update_start(distro, service, config_file, content):
try:
- distro.manage_service('stop', service)
+ distro.manage_service("stop", service)
stop_failed = False
except subp.ProcessExecutionError as e:
stop_failed = True
LOG.warning("failed to stop %s: %s", service, e)
- if not content.endswith('\n'):
- content += '\n'
+ if not content.endswith("\n"):
+ content += "\n"
util.write_file(config_file, content, omode="w")
try:
- distro.manage_service('start', service)
+ distro.manage_service("start", service)
if stop_failed:
LOG.warning("success: %s started", service)
except subp.ProcessExecutionError as e:
LOG.warning("failed to start %s: %s", service, e)
- distro.manage_service('enable', service)
+ distro.manage_service("enable", service)
def handle(name, cfg, cloud, log, args):
- cfgin = cfg.get('fan')
+ cfgin = cfg.get("fan")
if not cfgin:
cfgin = {}
mycfg = util.mergemanydict([cfgin, BUILTIN_CFG])
- if not mycfg.get('config'):
+ if not mycfg.get("config"):
LOG.debug("%s: no 'fan' config entry. disabling", name)
return
- util.write_file(mycfg.get('config_path'), mycfg.get('config'), omode="w")
+ util.write_file(mycfg.get("config_path"), mycfg.get("config"), omode="w")
distro = cloud.distro
- if not subp.which('fanctl'):
- distro.install_packages(['ubuntu-fan'])
+ if not subp.which("fanctl"):
+ distro.install_packages(["ubuntu-fan"])
stop_update_start(
distro,
- service='ubuntu-fan', config_file=mycfg.get('config_path'),
- content=mycfg.get('config'))
+ service="ubuntu-fan",
+ config_file=mycfg.get("config_path"),
+ content=mycfg.get("config"),
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py
index 4fa5297e..f443ccd8 100644
--- a/cloudinit/config/cc_final_message.py
+++ b/cloudinit/config/cc_final_message.py
@@ -31,10 +31,7 @@ specified as a jinja template with the following variables set:
"""
-from cloudinit import templater
-from cloudinit import util
-from cloudinit import version
-
+from cloudinit import templater, util, version
from cloudinit.settings import PER_ALWAYS
frequency = PER_ALWAYS
@@ -49,7 +46,7 @@ FINAL_MESSAGE_DEF = (
def handle(_name, cfg, cloud, log, args):
- msg_in = ''
+ msg_in = ""
if len(args) != 0:
msg_in = str(args[0])
else:
@@ -64,14 +61,18 @@ def handle(_name, cfg, cloud, log, args):
cver = version.version_string()
try:
subs = {
- 'uptime': uptime,
- 'timestamp': ts,
- 'version': cver,
- 'datasource': str(cloud.datasource),
+ "uptime": uptime,
+ "timestamp": ts,
+ "version": cver,
+ "datasource": str(cloud.datasource),
}
subs.update(dict([(k.upper(), v) for k, v in subs.items()]))
- util.multi_log("%s\n" % (templater.render_string(msg_in, subs)),
- console=False, stderr=True, log=log)
+ util.multi_log(
+ "%s\n" % (templater.render_string(msg_in, subs)),
+ console=False,
+ stderr=True,
+ log=log,
+ )
except Exception:
util.logexc(log, "Failed to render final message template")
@@ -85,4 +86,5 @@ def handle(_name, cfg, cloud, log, args):
if cloud.datasource.is_disconnected:
log.warning("Used fallback datasource")
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_foo.py b/cloudinit/config/cc_foo.py
index 924b967c..3c307153 100644
--- a/cloudinit/config/cc_foo.py
+++ b/cloudinit/config/cc_foo.py
@@ -53,4 +53,5 @@ frequency = PER_INSTANCE
def handle(name, _cfg, _cloud, log, _args):
log.debug("Hi from module %s", name)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index 1ddc9dc7..43334caa 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -70,17 +70,15 @@ import re
import stat
from cloudinit import log as logging
+from cloudinit import subp, temp_utils, util
from cloudinit.settings import PER_ALWAYS
-from cloudinit import subp
-from cloudinit import temp_utils
-from cloudinit import util
frequency = PER_ALWAYS
DEFAULT_CONFIG = {
- 'mode': 'auto',
- 'devices': ['/'],
- 'ignore_growroot_disabled': False,
+ "mode": "auto",
+ "devices": ["/"],
+ "ignore_growroot_disabled": False,
}
@@ -131,7 +129,7 @@ class ResizeFailedException(Exception):
class ResizeGrowPart(object):
def available(self):
myenv = os.environ.copy()
- myenv['LANG'] = 'C'
+ myenv["LANG"] = "C"
try:
(out, _err) = subp.subp(["growpart", "--help"], env=myenv)
@@ -144,7 +142,7 @@ class ResizeGrowPart(object):
def resize(self, diskdev, partnum, partdev):
myenv = os.environ.copy()
- myenv['LANG'] = 'C'
+ myenv["LANG"] = "C"
before = get_size(partdev)
# growpart uses tmp dir to store intermediate states
@@ -153,14 +151,19 @@ class ResizeGrowPart(object):
growpart_tmp = os.path.join(tmpd, "growpart")
if not os.path.exists(growpart_tmp):
os.mkdir(growpart_tmp, 0o700)
- myenv['TMPDIR'] = growpart_tmp
+ myenv["TMPDIR"] = growpart_tmp
try:
- subp.subp(["growpart", '--dry-run', diskdev, partnum],
- env=myenv)
+ subp.subp(
+ ["growpart", "--dry-run", diskdev, partnum], env=myenv
+ )
except subp.ProcessExecutionError as e:
if e.exit_code != 1:
- util.logexc(LOG, "Failed growpart --dry-run for (%s, %s)",
- diskdev, partnum)
+ util.logexc(
+ LOG,
+ "Failed growpart --dry-run for (%s, %s)",
+ diskdev,
+ partnum,
+ )
raise ResizeFailedException(e) from e
return (before, before)
@@ -176,7 +179,7 @@ class ResizeGrowPart(object):
class ResizeGpart(object):
def available(self):
myenv = os.environ.copy()
- myenv['LANG'] = 'C'
+ myenv["LANG"] = "C"
try:
(_out, err) = subp.subp(["gpart", "help"], env=myenv, rcs=[0, 1])
@@ -234,11 +237,11 @@ def device_part_info(devpath):
# the device, like /dev/vtbd0p2.
if util.is_FreeBSD():
freebsd_part = "/dev/" + util.find_freebsd_part(devpath)
- m = re.search('^(/dev/.+)p([0-9])$', freebsd_part)
+ m = re.search("^(/dev/.+)p([0-9])$", freebsd_part)
return (m.group(1), m.group(2))
elif util.is_DragonFlyBSD():
dragonflybsd_part = "/dev/" + util.find_dragonflybsd_part(devpath)
- m = re.search('^(/dev/.+)s([0-9])$', dragonflybsd_part)
+ m = re.search("^(/dev/.+)s([0-9])$", dragonflybsd_part)
return (m.group(1), m.group(2))
if not os.path.exists(syspath):
@@ -275,7 +278,7 @@ def devent2dev(devent):
container = util.is_container()
# Ensure the path is a block device.
- if (dev == "/dev/root" and not container):
+ if dev == "/dev/root" and not container:
dev = util.rootdev_from_cmdline(util.get_cmdline())
if dev is None:
if os.path.exists(dev):
@@ -293,65 +296,102 @@ def resize_devices(resizer, devices):
try:
blockdev = devent2dev(devent)
except ValueError as e:
- info.append((devent, RESIZE.SKIPPED,
- "unable to convert to device: %s" % e,))
+ info.append(
+ (
+ devent,
+ RESIZE.SKIPPED,
+ "unable to convert to device: %s" % e,
+ )
+ )
continue
try:
statret = os.stat(blockdev)
except OSError as e:
- info.append((devent, RESIZE.SKIPPED,
- "stat of '%s' failed: %s" % (blockdev, e),))
+ info.append(
+ (
+ devent,
+ RESIZE.SKIPPED,
+ "stat of '%s' failed: %s" % (blockdev, e),
+ )
+ )
continue
- if (not stat.S_ISBLK(statret.st_mode) and
- not stat.S_ISCHR(statret.st_mode)):
- info.append((devent, RESIZE.SKIPPED,
- "device '%s' not a block device" % blockdev,))
+ if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(
+ statret.st_mode
+ ):
+ info.append(
+ (
+ devent,
+ RESIZE.SKIPPED,
+ "device '%s' not a block device" % blockdev,
+ )
+ )
continue
try:
(disk, ptnum) = device_part_info(blockdev)
except (TypeError, ValueError) as e:
- info.append((devent, RESIZE.SKIPPED,
- "device_part_info(%s) failed: %s" % (blockdev, e),))
+ info.append(
+ (
+ devent,
+ RESIZE.SKIPPED,
+ "device_part_info(%s) failed: %s" % (blockdev, e),
+ )
+ )
continue
try:
(old, new) = resizer.resize(disk, ptnum, blockdev)
if old == new:
- info.append((devent, RESIZE.NOCHANGE,
- "no change necessary (%s, %s)" % (disk, ptnum),))
+ info.append(
+ (
+ devent,
+ RESIZE.NOCHANGE,
+ "no change necessary (%s, %s)" % (disk, ptnum),
+ )
+ )
else:
- info.append((devent, RESIZE.CHANGED,
- "changed (%s, %s) from %s to %s" %
- (disk, ptnum, old, new),))
+ info.append(
+ (
+ devent,
+ RESIZE.CHANGED,
+ "changed (%s, %s) from %s to %s"
+ % (disk, ptnum, old, new),
+ )
+ )
except ResizeFailedException as e:
- info.append((devent, RESIZE.FAILED,
- "failed to resize: disk=%s, ptnum=%s: %s" %
- (disk, ptnum, e),))
+ info.append(
+ (
+ devent,
+ RESIZE.FAILED,
+ "failed to resize: disk=%s, ptnum=%s: %s"
+ % (disk, ptnum, e),
+ )
+ )
return info
def handle(_name, cfg, _cloud, log, _args):
- if 'growpart' not in cfg:
- log.debug("No 'growpart' entry in cfg. Using default: %s" %
- DEFAULT_CONFIG)
- cfg['growpart'] = DEFAULT_CONFIG
+ if "growpart" not in cfg:
+ log.debug(
+ "No 'growpart' entry in cfg. Using default: %s" % DEFAULT_CONFIG
+ )
+ cfg["growpart"] = DEFAULT_CONFIG
- mycfg = cfg.get('growpart')
+ mycfg = cfg.get("growpart")
if not isinstance(mycfg, dict):
log.warning("'growpart' in config was not a dict")
return
- mode = mycfg.get('mode', "auto")
+ mode = mycfg.get("mode", "auto")
if util.is_false(mode):
log.debug("growpart disabled: mode=%s" % mode)
return
- if util.is_false(mycfg.get('ignore_growroot_disabled', False)):
+ if util.is_false(mycfg.get("ignore_growroot_disabled", False)):
if os.path.isfile("/etc/growroot-disabled"):
log.debug("growpart disabled: /etc/growroot-disabled exists")
log.debug("use ignore_growroot_disabled to ignore")
@@ -370,8 +410,12 @@ def handle(_name, cfg, _cloud, log, _args):
raise e
return
- resized = util.log_time(logfunc=log.debug, msg="resize_devices",
- func=resize_devices, args=(resizer, devices))
+ resized = util.log_time(
+ logfunc=log.debug,
+ msg="resize_devices",
+ func=resize_devices,
+ args=(resizer, devices),
+ )
for (entry, action, msg) in resized:
if action == RESIZE.CHANGED:
log.info("'%s' resized: %s" % (entry, msg))
@@ -379,6 +423,6 @@ def handle(_name, cfg, _cloud, log, _args):
log.debug("'%s' %s: %s" % (entry, action, msg))
-RESIZERS = (('growpart', ResizeGrowPart), ('gpart', ResizeGpart))
+RESIZERS = (("growpart", ResizeGrowPart), ("gpart", ResizeGpart))
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py
index eb03c664..ad7243d9 100644
--- a/cloudinit/config/cc_grub_dpkg.py
+++ b/cloudinit/config/cc_grub_dpkg.py
@@ -43,11 +43,10 @@ seeded with empty values, and install_devices_empty is set to true.
import os
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
from cloudinit.subp import ProcessExecutionError
-distros = ['ubuntu', 'debian']
+distros = ["ubuntu", "debian"]
def fetch_idevs(log):
@@ -60,8 +59,9 @@ def fetch_idevs(log):
try:
# get the root disk where the /boot directory resides.
- disk = subp.subp(['grub-probe', '-t', 'disk', '/boot'],
- capture=True)[0].strip()
+ disk = subp.subp(["grub-probe", "-t", "disk", "/boot"], capture=True)[
+ 0
+ ].strip()
except ProcessExecutionError as e:
# grub-common may not be installed, especially on containers
# FileNotFoundError is a nested exception of ProcessExecutionError
@@ -81,26 +81,30 @@ def fetch_idevs(log):
if not disk or not os.path.exists(disk):
# If we failed to detect a disk, we can return early
- return ''
+ return ""
try:
# check if disk exists and use udevadm to fetch symlinks
- devices = subp.subp(
- ['udevadm', 'info', '--root', '--query=symlink', disk],
- capture=True
- )[0].strip().split()
+ devices = (
+ subp.subp(
+ ["udevadm", "info", "--root", "--query=symlink", disk],
+ capture=True,
+ )[0]
+ .strip()
+ .split()
+ )
except Exception:
util.logexc(
log, "udevadm DEVLINKS symlink query failed for disk='%s'", disk
)
- log.debug('considering these device symlinks: %s', ','.join(devices))
+ log.debug("considering these device symlinks: %s", ",".join(devices))
# filter symlinks for /dev/disk/by-id entries
- devices = [dev for dev in devices if 'disk/by-id' in dev]
- log.debug('filtered to these disk/by-id symlinks: %s', ','.join(devices))
+ devices = [dev for dev in devices if "disk/by-id" in dev]
+ log.debug("filtered to these disk/by-id symlinks: %s", ",".join(devices))
# select first device if there is one, else fall back to plain name
idevs = sorted(devices)[0] if devices else disk
- log.debug('selected %s', idevs)
+ log.debug("selected %s", idevs)
return idevs
@@ -111,14 +115,15 @@ def handle(name, cfg, _cloud, log, _args):
if not mycfg:
mycfg = {}
- enabled = mycfg.get('enabled', True)
+ enabled = mycfg.get("enabled", True)
if util.is_false(enabled):
log.debug("%s disabled by config grub_dpkg/enabled=%s", name, enabled)
return
idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None)
idevs_empty = util.get_cfg_option_str(
- mycfg, "grub-pc/install_devices_empty", None)
+ mycfg, "grub-pc/install_devices_empty", None
+ )
if idevs is None:
idevs = fetch_idevs(log)
@@ -128,16 +133,21 @@ def handle(name, cfg, _cloud, log, _args):
# now idevs and idevs_empty are set to determined values
# or, those set by user
- dconf_sel = (("grub-pc grub-pc/install_devices string %s\n"
- "grub-pc grub-pc/install_devices_empty boolean %s\n") %
- (idevs, idevs_empty))
+ dconf_sel = (
+ "grub-pc grub-pc/install_devices string %s\n"
+ "grub-pc grub-pc/install_devices_empty boolean %s\n"
+ % (idevs, idevs_empty)
+ )
- log.debug("Setting grub debconf-set-selections with '%s','%s'" %
- (idevs, idevs_empty))
+ log.debug(
+ "Setting grub debconf-set-selections with '%s','%s'"
+ % (idevs, idevs_empty)
+ )
try:
- subp.subp(['debconf-set-selections'], dconf_sel)
+ subp.subp(["debconf-set-selections"], dconf_sel)
except Exception:
util.logexc(log, "Failed to run debconf-set-selections for grub-dpkg")
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_install_hotplug.py b/cloudinit/config/cc_install_hotplug.py
index 9b4075cc..952d9f13 100644
--- a/cloudinit/config/cc_install_hotplug.py
+++ b/cloudinit/config/cc_install_hotplug.py
@@ -3,15 +3,12 @@
import os
from textwrap import dedent
-from cloudinit import util
-from cloudinit import subp
-from cloudinit import stages
+from cloudinit import stages, subp, util
from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema
from cloudinit.distros import ALL_DISTROS
-from cloudinit.event import EventType, EventScope
+from cloudinit.event import EventScope, EventType
from cloudinit.settings import PER_INSTANCE
-
frequency = PER_INSTANCE
distros = [ALL_DISTROS]
@@ -19,7 +16,8 @@ meta = {
"id": "cc_install_hotplug",
"name": "Install Hotplug",
"title": "Install hotplug if supported and enabled",
- "description": dedent("""\
+ "description": dedent(
+ """\
This module will install the udev rules to enable hotplug if
supported by the datasource and enabled in the userdata. The udev
rules will be installed as
@@ -32,21 +30,26 @@ meta = {
network configuration.
Currently supported datasources: Openstack, EC2
- """),
+ """
+ ),
"distros": distros,
"examples": [
- dedent("""\
+ dedent(
+ """\
# Enable hotplug of network devices
updates:
network:
when: ["hotplug"]
- """),
- dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Enable network hotplug alongside boot event
updates:
network:
when: ["boot", "hotplug"]
- """),
+ """
+ ),
],
"frequency": frequency,
}
@@ -74,14 +77,14 @@ schema = {
"boot-legacy",
"boot",
"hotplug",
- ]
- }
+ ],
+ },
}
- }
+ },
}
- }
+ },
}
- }
+ },
}
__doc__ = get_meta_doc(meta, schema)
@@ -100,14 +103,15 @@ LABEL="cloudinit_end"
def handle(_name, cfg, cloud, log, _args):
validate_cloudconfig_schema(cfg, schema)
network_hotplug_enabled = (
- 'updates' in cfg and
- 'network' in cfg['updates'] and
- 'when' in cfg['updates']['network'] and
- 'hotplug' in cfg['updates']['network']['when']
+ "updates" in cfg
+ and "network" in cfg["updates"]
+ and "when" in cfg["updates"]["network"]
+ and "hotplug" in cfg["updates"]["network"]["when"]
)
hotplug_supported = EventType.HOTPLUG in (
- cloud.datasource.get_supported_events(
- [EventType.HOTPLUG]).get(EventScope.NETWORK, set())
+ cloud.datasource.get_supported_events([EventType.HOTPLUG]).get(
+ EventScope.NETWORK, set()
+ )
)
hotplug_enabled = stages.update_event_enabled(
datasource=cloud.datasource,
diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py
index d72b5244..ab35e136 100644
--- a/cloudinit/config/cc_keys_to_console.py
+++ b/cloudinit/config/cc_keys_to_console.py
@@ -38,49 +38,53 @@ host keys are not written to console.
import os
+from cloudinit import subp, util
from cloudinit.settings import PER_INSTANCE
-from cloudinit import subp
-from cloudinit import util
frequency = PER_INSTANCE
# This is a tool that cloud init provides
-HELPER_TOOL_TPL = '%s/cloud-init/write-ssh-key-fingerprints'
+HELPER_TOOL_TPL = "%s/cloud-init/write-ssh-key-fingerprints"
def _get_helper_tool_path(distro):
try:
base_lib = distro.usr_lib_exec
except AttributeError:
- base_lib = '/usr/lib'
+ base_lib = "/usr/lib"
return HELPER_TOOL_TPL % base_lib
def handle(name, cfg, cloud, log, _args):
if util.is_false(cfg.get("ssh", {}).get("emit_keys_to_console", True)):
- log.debug(("Skipping module named %s, "
- "logging of SSH host keys disabled"), name)
+ log.debug(
+ "Skipping module named %s, logging of SSH host keys disabled", name
+ )
return
helper_path = _get_helper_tool_path(cloud.distro)
if not os.path.exists(helper_path):
- log.warning(("Unable to activate module %s,"
- " helper tool not found at %s"), name, helper_path)
+ log.warning(
+ "Unable to activate module %s, helper tool not found at %s",
+ name,
+ helper_path,
+ )
return
- fp_blacklist = util.get_cfg_option_list(cfg,
- "ssh_fp_console_blacklist", [])
- key_blacklist = util.get_cfg_option_list(cfg,
- "ssh_key_console_blacklist",
- ["ssh-dss"])
+ fp_blacklist = util.get_cfg_option_list(
+ cfg, "ssh_fp_console_blacklist", []
+ )
+ key_blacklist = util.get_cfg_option_list(
+ cfg, "ssh_key_console_blacklist", ["ssh-dss"]
+ )
try:
- cmd = [helper_path, ','.join(fp_blacklist), ','.join(key_blacklist)]
+ cmd = [helper_path, ",".join(fp_blacklist), ",".join(key_blacklist)]
(stdout, _stderr) = subp.subp(cmd)
- util.multi_log("%s\n" % (stdout.strip()),
- stderr=False, console=True)
+ util.multi_log("%s\n" % (stdout.strip()), stderr=False, console=True)
except Exception:
log.warning("Writing keys to the system console failed!")
raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index 299c4d01..03ebf411 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -60,10 +60,7 @@ from io import BytesIO
from configobj import ConfigObj
-from cloudinit import type_utils
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import subp, type_utils, util
from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
@@ -71,15 +68,15 @@ frequency = PER_INSTANCE
LSC_CLIENT_CFG_FILE = "/etc/landscape/client.conf"
LS_DEFAULT_FILE = "/etc/default/landscape-client"
-distros = ['ubuntu']
+distros = ["ubuntu"]
# defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2
LSC_BUILTIN_CFG = {
- 'client': {
- 'log_level': "info",
- 'url': "https://landscape.canonical.com/message-system",
- 'ping_url': "http://landscape.canonical.com/ping",
- 'data_path': "/var/lib/landscape/client",
+ "client": {
+ "log_level": "info",
+ "url": "https://landscape.canonical.com/message-system",
+ "ping_url": "http://landscape.canonical.com/ping",
+ "data_path": "/var/lib/landscape/client",
}
}
@@ -97,11 +94,13 @@ def handle(_name, cfg, cloud, log, _args):
raise RuntimeError(
"'landscape' key existed in config, but not a dictionary type,"
" is a {_type} instead".format(
- _type=type_utils.obj_name(ls_cloudcfg)))
+ _type=type_utils.obj_name(ls_cloudcfg)
+ )
+ )
if not ls_cloudcfg:
return
- cloud.distro.install_packages(('landscape-client',))
+ cloud.distro.install_packages(("landscape-client",))
merge_data = [
LSC_BUILTIN_CFG,
@@ -135,4 +134,5 @@ def merge_together(objs):
cfg.merge(ConfigObj(obj))
return cfg
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py
index 7fed9abd..487f58f7 100644
--- a/cloudinit/config/cc_locale.py
+++ b/cloudinit/config/cc_locale.py
@@ -14,45 +14,48 @@ from cloudinit import util
from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema
from cloudinit.settings import PER_INSTANCE
-
frequency = PER_INSTANCE
-distros = ['all']
+distros = ["all"]
meta = {
- 'id': 'cc_locale',
- 'name': 'Locale',
- 'title': 'Set system locale',
- 'description': dedent(
+ "id": "cc_locale",
+ "name": "Locale",
+ "title": "Set system locale",
+ "description": dedent(
"""\
Configure the system locale and apply it system wide. By default use
the locale specified by the datasource."""
),
- 'distros': distros,
- 'examples': [
- dedent("""\
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
# Set the locale to ar_AE
locale: ar_AE
- """),
- dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Set the locale to fr_CA in /etc/alternate_path/locale
locale: fr_CA
locale_configfile: /etc/alternate_path/locale
- """),
+ """
+ ),
],
- 'frequency': frequency,
+ "frequency": frequency,
}
schema = {
- 'type': 'object',
- 'properties': {
- 'locale': {
- 'type': 'string',
- 'description': (
+ "type": "object",
+ "properties": {
+ "locale": {
+ "type": "string",
+ "description": (
"The locale to set as the system's locale (e.g. ar_PS)"
),
},
- 'locale_configfile': {
- 'type': 'string',
- 'description': (
+ "locale_configfile": {
+ "type": "string",
+ "description": (
"The file in which to write the locale configuration (defaults"
" to the distro's default location)"
),
@@ -70,8 +73,9 @@ def handle(name, cfg, cloud, log, args):
locale = util.get_cfg_option_str(cfg, "locale", cloud.get_locale())
if util.is_false(locale):
- log.debug("Skipping module named %s, disabled by config: %s",
- name, locale)
+ log.debug(
+ "Skipping module named %s, disabled by config: %s", name, locale
+ )
return
validate_cloudconfig_schema(cfg, schema)
@@ -80,4 +84,5 @@ def handle(name, cfg, cloud, log, args):
locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile")
cloud.distro.apply_locale(locale, locale_cfgfile)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index 486037d9..13ddcbe9 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -47,12 +47,12 @@ lxd-bridge will be configured accordingly.
domain: <domain>
"""
-from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
import os
-distros = ['ubuntu']
+from cloudinit import log as logging
+from cloudinit import subp, util
+
+distros = ["ubuntu"]
LOG = logging.getLogger(__name__)
@@ -61,36 +61,42 @@ _DEFAULT_NETWORK_NAME = "lxdbr0"
def handle(name, cfg, cloud, log, args):
# Get config
- lxd_cfg = cfg.get('lxd')
+ lxd_cfg = cfg.get("lxd")
if not lxd_cfg:
- log.debug("Skipping module named %s, not present or disabled by cfg",
- name)
+ log.debug(
+ "Skipping module named %s, not present or disabled by cfg", name
+ )
return
if not isinstance(lxd_cfg, dict):
- log.warning("lxd config must be a dictionary. found a '%s'",
- type(lxd_cfg))
+ log.warning(
+ "lxd config must be a dictionary. found a '%s'", type(lxd_cfg)
+ )
return
# Grab the configuration
- init_cfg = lxd_cfg.get('init')
+ init_cfg = lxd_cfg.get("init")
if not isinstance(init_cfg, dict):
- log.warning("lxd/init config must be a dictionary. found a '%s'",
- type(init_cfg))
+ log.warning(
+ "lxd/init config must be a dictionary. found a '%s'",
+ type(init_cfg),
+ )
init_cfg = {}
- bridge_cfg = lxd_cfg.get('bridge', {})
+ bridge_cfg = lxd_cfg.get("bridge", {})
if not isinstance(bridge_cfg, dict):
- log.warning("lxd/bridge config must be a dictionary. found a '%s'",
- type(bridge_cfg))
+ log.warning(
+ "lxd/bridge config must be a dictionary. found a '%s'",
+ type(bridge_cfg),
+ )
bridge_cfg = {}
# Install the needed packages
packages = []
if not subp.which("lxd"):
- packages.append('lxd')
+ packages.append("lxd")
- if init_cfg.get("storage_backend") == "zfs" and not subp.which('zfs'):
- packages.append('zfsutils-linux')
+ if init_cfg.get("storage_backend") == "zfs" and not subp.which("zfs"):
+ packages.append("zfsutils-linux")
if len(packages):
try:
@@ -102,23 +108,30 @@ def handle(name, cfg, cloud, log, args):
# Set up lxd if init config is given
if init_cfg:
init_keys = (
- 'network_address', 'network_port', 'storage_backend',
- 'storage_create_device', 'storage_create_loop',
- 'storage_pool', 'trust_password')
- subp.subp(['lxd', 'waitready', '--timeout=300'])
- cmd = ['lxd', 'init', '--auto']
+ "network_address",
+ "network_port",
+ "storage_backend",
+ "storage_create_device",
+ "storage_create_loop",
+ "storage_pool",
+ "trust_password",
+ )
+ subp.subp(["lxd", "waitready", "--timeout=300"])
+ cmd = ["lxd", "init", "--auto"]
for k in init_keys:
if init_cfg.get(k):
- cmd.extend(["--%s=%s" %
- (k.replace('_', '-'), str(init_cfg[k]))])
+ cmd.extend(
+ ["--%s=%s" % (k.replace("_", "-"), str(init_cfg[k]))]
+ )
subp.subp(cmd)
# Set up lxd-bridge if bridge config is given
dconf_comm = "debconf-communicate"
if bridge_cfg:
net_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME)
- if os.path.exists("/etc/default/lxd-bridge") \
- and subp.which(dconf_comm):
+ if os.path.exists("/etc/default/lxd-bridge") and subp.which(
+ dconf_comm
+ ):
# Bridge configured through packaging
debconf = bridge_to_debconf(bridge_cfg)
@@ -126,39 +139,47 @@ def handle(name, cfg, cloud, log, args):
# Update debconf database
try:
log.debug("Setting lxd debconf via " + dconf_comm)
- data = "\n".join(["set %s %s" % (k, v)
- for k, v in debconf.items()]) + "\n"
- subp.subp(['debconf-communicate'], data)
+ data = (
+ "\n".join(
+ ["set %s %s" % (k, v) for k, v in debconf.items()]
+ )
+ + "\n"
+ )
+ subp.subp(["debconf-communicate"], data)
except Exception:
- util.logexc(log, "Failed to run '%s' for lxd with" %
- dconf_comm)
+ util.logexc(
+ log, "Failed to run '%s' for lxd with" % dconf_comm
+ )
# Remove the existing configuration file (forces re-generation)
util.del_file("/etc/default/lxd-bridge")
# Run reconfigure
log.debug("Running dpkg-reconfigure for lxd")
- subp.subp(['dpkg-reconfigure', 'lxd',
- '--frontend=noninteractive'])
+ subp.subp(["dpkg-reconfigure", "lxd", "--frontend=noninteractive"])
else:
# Built-in LXD bridge support
cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg)
maybe_cleanup_default(
- net_name=net_name, did_init=bool(init_cfg),
- create=bool(cmd_create), attach=bool(cmd_attach))
+ net_name=net_name,
+ did_init=bool(init_cfg),
+ create=bool(cmd_create),
+ attach=bool(cmd_attach),
+ )
if cmd_create:
- log.debug("Creating lxd bridge: %s" %
- " ".join(cmd_create))
+ log.debug("Creating lxd bridge: %s" % " ".join(cmd_create))
_lxc(cmd_create)
if cmd_attach:
- log.debug("Setting up default lxd bridge: %s" %
- " ".join(cmd_attach))
+ log.debug(
+ "Setting up default lxd bridge: %s" % " ".join(cmd_attach)
+ )
_lxc(cmd_attach)
elif bridge_cfg:
raise RuntimeError(
- "Unable to configure lxd bridge without %s." + dconf_comm)
+ "Unable to configure lxd bridge without %s." + dconf_comm
+ )
def bridge_to_debconf(bridge_cfg):
@@ -180,33 +201,32 @@ def bridge_to_debconf(bridge_cfg):
if bridge_cfg.get("ipv4_address"):
debconf["lxd/bridge-ipv4"] = "true"
- debconf["lxd/bridge-ipv4-address"] = \
- bridge_cfg.get("ipv4_address")
- debconf["lxd/bridge-ipv4-netmask"] = \
- bridge_cfg.get("ipv4_netmask")
- debconf["lxd/bridge-ipv4-dhcp-first"] = \
- bridge_cfg.get("ipv4_dhcp_first")
- debconf["lxd/bridge-ipv4-dhcp-last"] = \
- bridge_cfg.get("ipv4_dhcp_last")
- debconf["lxd/bridge-ipv4-dhcp-leases"] = \
- bridge_cfg.get("ipv4_dhcp_leases")
- debconf["lxd/bridge-ipv4-nat"] = \
- bridge_cfg.get("ipv4_nat", "true")
+ debconf["lxd/bridge-ipv4-address"] = bridge_cfg.get("ipv4_address")
+ debconf["lxd/bridge-ipv4-netmask"] = bridge_cfg.get("ipv4_netmask")
+ debconf["lxd/bridge-ipv4-dhcp-first"] = bridge_cfg.get(
+ "ipv4_dhcp_first"
+ )
+ debconf["lxd/bridge-ipv4-dhcp-last"] = bridge_cfg.get(
+ "ipv4_dhcp_last"
+ )
+ debconf["lxd/bridge-ipv4-dhcp-leases"] = bridge_cfg.get(
+ "ipv4_dhcp_leases"
+ )
+ debconf["lxd/bridge-ipv4-nat"] = bridge_cfg.get("ipv4_nat", "true")
if bridge_cfg.get("ipv6_address"):
debconf["lxd/bridge-ipv6"] = "true"
- debconf["lxd/bridge-ipv6-address"] = \
- bridge_cfg.get("ipv6_address")
- debconf["lxd/bridge-ipv6-netmask"] = \
- bridge_cfg.get("ipv6_netmask")
- debconf["lxd/bridge-ipv6-nat"] = \
- bridge_cfg.get("ipv6_nat", "false")
+ debconf["lxd/bridge-ipv6-address"] = bridge_cfg.get("ipv6_address")
+ debconf["lxd/bridge-ipv6-netmask"] = bridge_cfg.get("ipv6_netmask")
+ debconf["lxd/bridge-ipv6-nat"] = bridge_cfg.get(
+ "ipv6_nat", "false"
+ )
if bridge_cfg.get("domain"):
debconf["lxd/bridge-domain"] = bridge_cfg.get("domain")
else:
- raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode"))
+ raise Exception('invalid bridge mode "%s"' % bridge_cfg.get("mode"))
return debconf
@@ -217,37 +237,41 @@ def bridge_to_cmd(bridge_cfg):
bridge_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME)
cmd_create = []
- cmd_attach = ["network", "attach-profile", bridge_name,
- "default", "eth0"]
+ cmd_attach = ["network", "attach-profile", bridge_name, "default", "eth0"]
if bridge_cfg.get("mode") == "existing":
return None, cmd_attach
if bridge_cfg.get("mode") != "new":
- raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode"))
+ raise Exception('invalid bridge mode "%s"' % bridge_cfg.get("mode"))
cmd_create = ["network", "create", bridge_name]
if bridge_cfg.get("ipv4_address") and bridge_cfg.get("ipv4_netmask"):
- cmd_create.append("ipv4.address=%s/%s" %
- (bridge_cfg.get("ipv4_address"),
- bridge_cfg.get("ipv4_netmask")))
+ cmd_create.append(
+ "ipv4.address=%s/%s"
+ % (bridge_cfg.get("ipv4_address"), bridge_cfg.get("ipv4_netmask"))
+ )
if bridge_cfg.get("ipv4_nat", "true") == "true":
cmd_create.append("ipv4.nat=true")
- if bridge_cfg.get("ipv4_dhcp_first") and \
- bridge_cfg.get("ipv4_dhcp_last"):
- dhcp_range = "%s-%s" % (bridge_cfg.get("ipv4_dhcp_first"),
- bridge_cfg.get("ipv4_dhcp_last"))
+ if bridge_cfg.get("ipv4_dhcp_first") and bridge_cfg.get(
+ "ipv4_dhcp_last"
+ ):
+ dhcp_range = "%s-%s" % (
+ bridge_cfg.get("ipv4_dhcp_first"),
+ bridge_cfg.get("ipv4_dhcp_last"),
+ )
cmd_create.append("ipv4.dhcp.ranges=%s" % dhcp_range)
else:
cmd_create.append("ipv4.address=none")
if bridge_cfg.get("ipv6_address") and bridge_cfg.get("ipv6_netmask"):
- cmd_create.append("ipv6.address=%s/%s" %
- (bridge_cfg.get("ipv6_address"),
- bridge_cfg.get("ipv6_netmask")))
+ cmd_create.append(
+ "ipv6.address=%s/%s"
+ % (bridge_cfg.get("ipv6_address"), bridge_cfg.get("ipv6_netmask"))
+ )
if bridge_cfg.get("ipv6_nat", "false") == "true":
cmd_create.append("ipv6.nat=true")
@@ -262,14 +286,17 @@ def bridge_to_cmd(bridge_cfg):
def _lxc(cmd):
- env = {'LC_ALL': 'C',
- 'HOME': os.environ.get('HOME', '/root'),
- 'USER': os.environ.get('USER', 'root')}
- subp.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env)
+ env = {
+ "LC_ALL": "C",
+ "HOME": os.environ.get("HOME", "/root"),
+ "USER": os.environ.get("USER", "root"),
+ }
+ subp.subp(["lxc"] + list(cmd) + ["--force-local"], update_env=env)
-def maybe_cleanup_default(net_name, did_init, create, attach,
- profile="default", nic_name="eth0"):
+def maybe_cleanup_default(
+ net_name, did_init, create, attach, profile="default", nic_name="eth0"
+):
"""Newer versions of lxc (3.0.1+) create a lxdbr0 network when
'lxd init --auto' is run. Older versions did not.
@@ -306,4 +333,5 @@ def maybe_cleanup_default(net_name, did_init, create, attach,
raise e
LOG.debug(msg, nic_name, profile, fail_assume_enoent)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py
index 41ea4fc9..1b0158ec 100644
--- a/cloudinit/config/cc_mcollective.py
+++ b/cloudinit/config/cc_mcollective.py
@@ -56,18 +56,21 @@ import io
from configobj import ConfigObj
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem"
PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem"
-SERVER_CFG = '/etc/mcollective/server.cfg'
+SERVER_CFG = "/etc/mcollective/server.cfg"
LOG = logging.getLogger(__name__)
-def configure(config, server_cfg=SERVER_CFG,
- pubcert_file=PUBCERT_FILE, pricert_file=PRICERT_FILE):
+def configure(
+ config,
+ server_cfg=SERVER_CFG,
+ pubcert_file=PUBCERT_FILE,
+ pricert_file=PRICERT_FILE,
+):
# Read server.cfg (if it exists) values from the
# original file in order to be able to mix the rest up.
try:
@@ -77,20 +80,20 @@ def configure(config, server_cfg=SERVER_CFG,
if e.errno != errno.ENOENT:
raise
else:
- LOG.debug("Did not find file %s (starting with an empty"
- " config)", server_cfg)
+ LOG.debug(
+ "Did not find file %s (starting with an empty config)",
+ server_cfg,
+ )
mcollective_config = ConfigObj()
for (cfg_name, cfg) in config.items():
- if cfg_name == 'public-cert':
+ if cfg_name == "public-cert":
util.write_file(pubcert_file, cfg, mode=0o644)
- mcollective_config[
- 'plugin.ssl_server_public'] = pubcert_file
- mcollective_config['securityprovider'] = 'ssl'
- elif cfg_name == 'private-cert':
+ mcollective_config["plugin.ssl_server_public"] = pubcert_file
+ mcollective_config["securityprovider"] = "ssl"
+ elif cfg_name == "private-cert":
util.write_file(pricert_file, cfg, mode=0o600)
- mcollective_config[
- 'plugin.ssl_server_private'] = pricert_file
- mcollective_config['securityprovider'] = 'ssl'
+ mcollective_config["plugin.ssl_server_private"] = pricert_file
+ mcollective_config["securityprovider"] = "ssl"
else:
if isinstance(cfg, str):
# Just set it in the 'main' section
@@ -126,21 +129,24 @@ def configure(config, server_cfg=SERVER_CFG,
def handle(name, cfg, cloud, log, _args):
# If there isn't a mcollective key in the configuration don't do anything
- if 'mcollective' not in cfg:
- log.debug(("Skipping module named %s, "
- "no 'mcollective' key in configuration"), name)
+ if "mcollective" not in cfg:
+ log.debug(
+ "Skipping module named %s, no 'mcollective' key in configuration",
+ name,
+ )
return
- mcollective_cfg = cfg['mcollective']
+ mcollective_cfg = cfg["mcollective"]
# Start by installing the mcollective package ...
cloud.distro.install_packages(("mcollective",))
# ... and then update the mcollective configuration
- if 'conf' in mcollective_cfg:
- configure(config=mcollective_cfg['conf'])
+ if "conf" in mcollective_cfg:
+ configure(config=mcollective_cfg["conf"])
# restart mcollective to handle updated config
- subp.subp(['service', 'mcollective', 'restart'], capture=False)
+ subp.subp(["service", "mcollective", "restart"], capture=False)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py
index 79bcc27d..4fafb4af 100644
--- a/cloudinit/config/cc_migrator.py
+++ b/cloudinit/config/cc_migrator.py
@@ -29,16 +29,14 @@ false`` in config.
import os
import shutil
-from cloudinit import helpers
-from cloudinit import util
-
+from cloudinit import helpers, util
from cloudinit.settings import PER_ALWAYS
frequency = PER_ALWAYS
def _migrate_canon_sems(cloud):
- paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem'))
+ paths = (cloud.paths.get_ipath("sem"), cloud.paths.get_cpath("sem"))
am_adjusted = 0
for sem_path in paths:
if not sem_path or not os.path.exists(sem_path):
@@ -57,12 +55,12 @@ def _migrate_canon_sems(cloud):
def _migrate_legacy_sems(cloud, log):
legacy_adjust = {
- 'apt-update-upgrade': [
- 'apt-configure',
- 'package-update-upgrade-install',
+ "apt-update-upgrade": [
+ "apt-configure",
+ "package-update-upgrade-install",
],
}
- paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem'))
+ paths = (cloud.paths.get_ipath("sem"), cloud.paths.get_cpath("sem"))
for sem_path in paths:
if not sem_path or not os.path.exists(sem_path):
continue
@@ -78,8 +76,9 @@ def _migrate_legacy_sems(cloud, log):
util.del_file(os.path.join(sem_path, p))
(_name, freq) = os.path.splitext(p)
for m in migrate_to:
- log.debug("Migrating %s => %s with the same frequency",
- p, m)
+ log.debug(
+ "Migrating %s => %s with the same frequency", p, m
+ )
with sem_helper.lock(m, freq):
pass
@@ -90,8 +89,10 @@ def handle(name, cfg, cloud, log, _args):
log.debug("Skipping module named %s, migration disabled", name)
return
sems_moved = _migrate_canon_sems(cloud)
- log.debug("Migrated %s semaphore files to there canonicalized names",
- sems_moved)
+ log.debug(
+ "Migrated %s semaphore files to there canonicalized names", sems_moved
+ )
_migrate_legacy_sems(cloud, log)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index eeb008d2..ec2e46ff 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -62,15 +62,12 @@ swap file is created.
maxsize: <size in bytes>
"""
-from string import whitespace
-
import logging
import os
import re
+from string import whitespace
-from cloudinit import type_utils
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, type_utils, util
# Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0
DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$"
@@ -105,21 +102,25 @@ def is_network_device(name):
def _get_nth_partition_for_device(device_path, partition_number):
- potential_suffixes = [str(partition_number), 'p%s' % (partition_number,),
- '-part%s' % (partition_number,)]
+ potential_suffixes = [
+ str(partition_number),
+ "p%s" % (partition_number,),
+ "-part%s" % (partition_number,),
+ ]
for suffix in potential_suffixes:
- potential_partition_device = '%s%s' % (device_path, suffix)
+ potential_partition_device = "%s%s" % (device_path, suffix)
if os.path.exists(potential_partition_device):
return potential_partition_device
return None
def _is_block_device(device_path, partition_path=None):
- device_name = os.path.realpath(device_path).split('/')[-1]
- sys_path = os.path.join('/sys/block/', device_name)
+ device_name = os.path.realpath(device_path).split("/")[-1]
+ sys_path = os.path.join("/sys/block/", device_name)
if partition_path is not None:
sys_path = os.path.join(
- sys_path, os.path.realpath(partition_path).split('/')[-1])
+ sys_path, os.path.realpath(partition_path).split("/")[-1]
+ )
return os.path.exists(sys_path)
@@ -159,8 +160,9 @@ def sanitize_devname(startname, transformer, log, aliases=None):
if partition_number is None:
partition_path = _get_nth_partition_for_device(device_path, 1)
else:
- partition_path = _get_nth_partition_for_device(device_path,
- partition_number)
+ partition_path = _get_nth_partition_for_device(
+ device_path, partition_number
+ )
if partition_path is None:
return None
@@ -174,12 +176,12 @@ def sanitize_devname(startname, transformer, log, aliases=None):
def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
# make a suggestion on the size of swap for this system.
if memsize is None:
- memsize = util.read_meminfo()['total']
+ memsize = util.read_meminfo()["total"]
GB = 2 ** 30
sugg_max = 8 * GB
- info = {'avail': 'na', 'max_in': maxsize, 'mem': memsize}
+ info = {"avail": "na", "max_in": maxsize, "mem": memsize}
if fsys is None and maxsize is None:
# set max to 8GB default if no filesystem given
@@ -187,18 +189,18 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
elif fsys:
statvfs = os.statvfs(fsys)
avail = statvfs.f_frsize * statvfs.f_bfree
- info['avail'] = avail
+ info["avail"] = avail
if maxsize is None:
# set to 25% of filesystem space
maxsize = min(int(avail / 4), sugg_max)
- elif maxsize > ((avail * .9)):
+ elif maxsize > ((avail * 0.9)):
# set to 90% of available disk space
- maxsize = int(avail * .9)
+ maxsize = int(avail * 0.9)
elif maxsize is None:
maxsize = sugg_max
- info['max'] = maxsize
+ info["max"] = maxsize
formulas = [
# < 1G: swap = double memory
@@ -226,7 +228,7 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
if size is not None:
size = maxsize
- info['size'] = size
+ info["size"] = size
MB = 2 ** 20
pinfo = {}
@@ -236,9 +238,14 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
else:
pinfo[k] = v
- LOG.debug("suggest %s swap for %s memory with '%s'"
- " disk given max=%s [max=%s]'", pinfo['size'], pinfo['mem'],
- pinfo['avail'], pinfo['max_in'], pinfo['max'])
+ LOG.debug(
+ "suggest %s swap for %s memory with '%s' disk given max=%s [max=%s]'",
+ pinfo["size"],
+ pinfo["mem"],
+ pinfo["avail"],
+ pinfo["max_in"],
+ pinfo["max"],
+ )
return size
@@ -248,14 +255,23 @@ def create_swapfile(fname: str, size: str) -> None:
errmsg = "Failed to create swapfile '%s' of size %sMB via %s: %s"
def create_swap(fname, size, method):
- LOG.debug("Creating swapfile in '%s' on fstype '%s' using '%s'",
- fname, fstype, method)
+ LOG.debug(
+ "Creating swapfile in '%s' on fstype '%s' using '%s'",
+ fname,
+ fstype,
+ method,
+ )
if method == "fallocate":
- cmd = ['fallocate', '-l', '%sM' % size, fname]
+ cmd = ["fallocate", "-l", "%sM" % size, fname]
elif method == "dd":
- cmd = ['dd', 'if=/dev/zero', 'of=%s' % fname, 'bs=1M',
- 'count=%s' % size]
+ cmd = [
+ "dd",
+ "if=/dev/zero",
+ "of=%s" % fname,
+ "bs=1M",
+ "count=%s" % size,
+ ]
try:
subp.subp(cmd, capture=True)
@@ -269,8 +285,9 @@ def create_swapfile(fname: str, size: str) -> None:
fstype = util.get_mount_info(swap_dir)[1]
- if (fstype == "xfs" and
- util.kernel_version() < (4, 18)) or fstype == "btrfs":
+ if (
+ fstype == "xfs" and util.kernel_version() < (4, 18)
+ ) or fstype == "btrfs":
create_swap(fname, size, "dd")
else:
try:
@@ -282,7 +299,7 @@ def create_swapfile(fname: str, size: str) -> None:
if os.path.exists(fname):
util.chmod(fname, 0o600)
try:
- subp.subp(['mkswap', fname])
+ subp.subp(["mkswap", fname])
except subp.ProcessExecutionError:
util.del_file(fname)
raise
@@ -297,37 +314,42 @@ def setup_swapfile(fname, size=None, maxsize=None):
swap_dir = os.path.dirname(fname)
if str(size).lower() == "auto":
try:
- memsize = util.read_meminfo()['total']
+ memsize = util.read_meminfo()["total"]
except IOError:
LOG.debug("Not creating swap: failed to read meminfo")
return
util.ensure_dir(swap_dir)
- size = suggested_swapsize(fsys=swap_dir, maxsize=maxsize,
- memsize=memsize)
+ size = suggested_swapsize(
+ fsys=swap_dir, maxsize=maxsize, memsize=memsize
+ )
mibsize = str(int(size / (2 ** 20)))
if not size:
LOG.debug("Not creating swap: suggested size was 0")
return
- util.log_time(LOG.debug, msg="Setting up swap file", func=create_swapfile,
- args=[fname, mibsize])
+ util.log_time(
+ LOG.debug,
+ msg="Setting up swap file",
+ func=create_swapfile,
+ args=[fname, mibsize],
+ )
return fname
def handle_swapcfg(swapcfg):
"""handle the swap config, calling setup_swap if necessary.
- return None or (filename, size)
+ return None or (filename, size)
"""
if not isinstance(swapcfg, dict):
LOG.warning("input for swap config was not a dict.")
return None
- fname = swapcfg.get('filename', '/swap.img')
- size = swapcfg.get('size', 0)
- maxsize = swapcfg.get('maxsize', None)
+ fname = swapcfg.get("filename", "/swap.img")
+ size = swapcfg.get("size", 0)
+ maxsize = swapcfg.get("maxsize", None)
if not (size and fname):
LOG.debug("no need to setup swap")
@@ -335,8 +357,10 @@ def handle_swapcfg(swapcfg):
if os.path.exists(fname):
if not os.path.exists("/proc/swaps"):
- LOG.debug("swap file %s exists, but no /proc/swaps exists, "
- "being safe", fname)
+ LOG.debug(
+ "swap file %s exists, but no /proc/swaps exists, being safe",
+ fname,
+ )
return fname
try:
for line in util.load_file("/proc/swaps").splitlines():
@@ -345,8 +369,9 @@ def handle_swapcfg(swapcfg):
return fname
LOG.debug("swap file %s exists, but not in /proc/swaps", fname)
except Exception:
- LOG.warning("swap file %s exists. Error reading /proc/swaps",
- fname)
+ LOG.warning(
+ "swap file %s exists. Error reading /proc/swaps", fname
+ )
return fname
try:
@@ -373,8 +398,10 @@ def handle(_name, cfg, cloud, log, _args):
defvals = cfg.get("mount_default_fields", defvals)
# these are our default set of mounts
- defmnts = [["ephemeral0", "/mnt", "auto", defvals[3], "0", "2"],
- ["swap", "none", "swap", "sw", "0", "0"]]
+ defmnts = [
+ ["ephemeral0", "/mnt", "auto", defvals[3], "0", "2"],
+ ["swap", "none", "swap", "sw", "0", "0"],
+ ]
cfgmnt = []
if "mounts" in cfg:
@@ -404,13 +431,17 @@ def handle(_name, cfg, cloud, log, _args):
for i in range(len(cfgmnt)):
# skip something that wasn't a list
if not isinstance(cfgmnt[i], list):
- log.warning("Mount option %s not a list, got a %s instead",
- (i + 1), type_utils.obj_name(cfgmnt[i]))
+ log.warning(
+ "Mount option %s not a list, got a %s instead",
+ (i + 1),
+ type_utils.obj_name(cfgmnt[i]),
+ )
continue
start = str(cfgmnt[i][0])
- sanitized = sanitize_devname(start, cloud.device_name_to_device, log,
- aliases=device_aliases)
+ sanitized = sanitize_devname(
+ start, cloud.device_name_to_device, log, aliases=device_aliases
+ )
if sanitized != start:
log.debug("changed %s => %s" % (start, sanitized))
@@ -418,8 +449,11 @@ def handle(_name, cfg, cloud, log, _args):
log.debug("Ignoring nonexistent named mount %s", start)
continue
elif sanitized in fstab_devs:
- log.info("Device %s already defined in fstab: %s",
- sanitized, fstab_devs[sanitized])
+ log.info(
+ "Device %s already defined in fstab: %s",
+ sanitized,
+ fstab_devs[sanitized],
+ )
continue
cfgmnt[i][0] = sanitized
@@ -452,8 +486,9 @@ def handle(_name, cfg, cloud, log, _args):
# entry has the same device name
for defmnt in defmnts:
start = defmnt[0]
- sanitized = sanitize_devname(start, cloud.device_name_to_device, log,
- aliases=device_aliases)
+ sanitized = sanitize_devname(
+ start, cloud.device_name_to_device, log, aliases=device_aliases
+ )
if sanitized != start:
log.debug("changed default device %s => %s" % (start, sanitized))
@@ -461,8 +496,11 @@ def handle(_name, cfg, cloud, log, _args):
log.debug("Ignoring nonexistent default named mount %s", start)
continue
elif sanitized in fstab_devs:
- log.debug("Device %s already defined in fstab: %s",
- sanitized, fstab_devs[sanitized])
+ log.debug(
+ "Device %s already defined in fstab: %s",
+ sanitized,
+ fstab_devs[sanitized],
+ )
continue
defmnt[0] = sanitized
@@ -474,8 +512,7 @@ def handle(_name, cfg, cloud, log, _args):
break
if cfgmnt_has:
- log.debug(("Not including %s, already"
- " previously included"), start)
+ log.debug("Not including %s, already previously included", start)
continue
cfgmnt.append(defmnt)
@@ -488,7 +525,7 @@ def handle(_name, cfg, cloud, log, _args):
else:
actlist.append(x)
- swapret = handle_swapcfg(cfg.get('swap', {}))
+ swapret = handle_swapcfg(cfg.get("swap", {}))
if swapret:
actlist.append([swapret, "none", "swap", "sw", "0", "0"])
@@ -507,10 +544,11 @@ def handle(_name, cfg, cloud, log, _args):
needswap = True
if line[1].startswith("/"):
dirs.append(line[1])
- cc_lines.append('\t'.join(line))
+ cc_lines.append("\t".join(line))
- mount_points = [v['mountpoint'] for k, v in util.mounts().items()
- if 'mountpoint' in v]
+ mount_points = [
+ v["mountpoint"] for k, v in util.mounts().items() if "mountpoint" in v
+ ]
for d in dirs:
try:
util.ensure_dir(d)
@@ -525,11 +563,12 @@ def handle(_name, cfg, cloud, log, _args):
sadds = [WS.sub(" ", n) for n in cc_lines]
sdrops = [WS.sub(" ", n) for n in fstab_removed]
- sops = (["- " + drop for drop in sdrops if drop not in sadds] +
- ["+ " + add for add in sadds if add not in sdrops])
+ sops = ["- " + drop for drop in sdrops if drop not in sadds] + [
+ "+ " + add for add in sadds if add not in sdrops
+ ]
fstab_lines.extend(cc_lines)
- contents = "%s\n" % ('\n'.join(fstab_lines))
+ contents = "%s\n" % "\n".join(fstab_lines)
util.write_file(FSTAB_PATH, contents)
activate_cmds = []
@@ -549,7 +588,7 @@ def handle(_name, cfg, cloud, log, _args):
fmt = "Activating swap and mounts with: %s"
for cmd in activate_cmds:
- fmt = "Activate mounts: %s:" + ' '.join(cmd)
+ fmt = "Activate mounts: %s:" + " ".join(cmd)
try:
subp.subp(cmd)
log.debug(fmt, "PASS")
@@ -557,4 +596,5 @@ def handle(_name, cfg, cloud, log, _args):
log.warning(fmt, "FAIL")
util.logexc(log, fmt, "FAIL")
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index c55d5d86..a31da9bb 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -11,124 +11,132 @@ import os
from textwrap import dedent
from cloudinit import log as logging
-from cloudinit import temp_utils
-from cloudinit import templater
-from cloudinit import type_utils
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, temp_utils, templater, type_utils, util
from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
-NTP_CONF = '/etc/ntp.conf'
+NTP_CONF = "/etc/ntp.conf"
NR_POOL_SERVERS = 4
-distros = ['almalinux', 'alpine', 'centos', 'cloudlinux', 'debian',
- 'eurolinux', 'fedora', 'miraclelinux', 'openEuler', 'opensuse',
- 'photon', 'rhel', 'rocky', 'sles', 'ubuntu', 'virtuozzo']
+distros = [
+ "almalinux",
+ "alpine",
+ "centos",
+ "cloudlinux",
+ "debian",
+ "eurolinux",
+ "fedora",
+ "miraclelinux",
+ "openEuler",
+ "opensuse",
+ "photon",
+ "rhel",
+ "rocky",
+ "sles",
+ "ubuntu",
+ "virtuozzo",
+]
NTP_CLIENT_CONFIG = {
- 'chrony': {
- 'check_exe': 'chronyd',
- 'confpath': '/etc/chrony.conf',
- 'packages': ['chrony'],
- 'service_name': 'chrony',
- 'template_name': 'chrony.conf.{distro}',
- 'template': None,
+ "chrony": {
+ "check_exe": "chronyd",
+ "confpath": "/etc/chrony.conf",
+ "packages": ["chrony"],
+ "service_name": "chrony",
+ "template_name": "chrony.conf.{distro}",
+ "template": None,
},
- 'ntp': {
- 'check_exe': 'ntpd',
- 'confpath': NTP_CONF,
- 'packages': ['ntp'],
- 'service_name': 'ntp',
- 'template_name': 'ntp.conf.{distro}',
- 'template': None,
+ "ntp": {
+ "check_exe": "ntpd",
+ "confpath": NTP_CONF,
+ "packages": ["ntp"],
+ "service_name": "ntp",
+ "template_name": "ntp.conf.{distro}",
+ "template": None,
},
- 'ntpdate': {
- 'check_exe': 'ntpdate',
- 'confpath': NTP_CONF,
- 'packages': ['ntpdate'],
- 'service_name': 'ntpdate',
- 'template_name': 'ntp.conf.{distro}',
- 'template': None,
+ "ntpdate": {
+ "check_exe": "ntpdate",
+ "confpath": NTP_CONF,
+ "packages": ["ntpdate"],
+ "service_name": "ntpdate",
+ "template_name": "ntp.conf.{distro}",
+ "template": None,
},
- 'systemd-timesyncd': {
- 'check_exe': '/lib/systemd/systemd-timesyncd',
- 'confpath': '/etc/systemd/timesyncd.conf.d/cloud-init.conf',
- 'packages': [],
- 'service_name': 'systemd-timesyncd',
- 'template_name': 'timesyncd.conf',
- 'template': None,
+ "systemd-timesyncd": {
+ "check_exe": "/lib/systemd/systemd-timesyncd",
+ "confpath": "/etc/systemd/timesyncd.conf.d/cloud-init.conf",
+ "packages": [],
+ "service_name": "systemd-timesyncd",
+ "template_name": "timesyncd.conf",
+ "template": None,
},
}
# This is Distro-specific configuration overrides of the base config
DISTRO_CLIENT_CONFIG = {
- 'alpine': {
- 'chrony': {
- 'confpath': '/etc/chrony/chrony.conf',
- 'service_name': 'chronyd',
+ "alpine": {
+ "chrony": {
+ "confpath": "/etc/chrony/chrony.conf",
+ "service_name": "chronyd",
},
- 'ntp': {
- 'confpath': '/etc/ntp.conf',
- 'packages': [],
- 'service_name': 'ntpd',
+ "ntp": {
+ "confpath": "/etc/ntp.conf",
+ "packages": [],
+ "service_name": "ntpd",
},
},
- 'debian': {
- 'chrony': {
- 'confpath': '/etc/chrony/chrony.conf',
+ "debian": {
+ "chrony": {
+ "confpath": "/etc/chrony/chrony.conf",
},
},
- 'opensuse': {
- 'chrony': {
- 'service_name': 'chronyd',
+ "opensuse": {
+ "chrony": {
+ "service_name": "chronyd",
},
- 'ntp': {
- 'confpath': '/etc/ntp.conf',
- 'service_name': 'ntpd',
+ "ntp": {
+ "confpath": "/etc/ntp.conf",
+ "service_name": "ntpd",
},
- 'systemd-timesyncd': {
- 'check_exe': '/usr/lib/systemd/systemd-timesyncd',
+ "systemd-timesyncd": {
+ "check_exe": "/usr/lib/systemd/systemd-timesyncd",
},
},
- 'photon': {
- 'chrony': {
- 'service_name': 'chronyd',
+ "photon": {
+ "chrony": {
+ "service_name": "chronyd",
},
- 'ntp': {
- 'service_name': 'ntpd',
- 'confpath': '/etc/ntp.conf'
- },
- 'systemd-timesyncd': {
- 'check_exe': '/usr/lib/systemd/systemd-timesyncd',
- 'confpath': '/etc/systemd/timesyncd.conf',
+ "ntp": {"service_name": "ntpd", "confpath": "/etc/ntp.conf"},
+ "systemd-timesyncd": {
+ "check_exe": "/usr/lib/systemd/systemd-timesyncd",
+ "confpath": "/etc/systemd/timesyncd.conf",
},
},
- 'rhel': {
- 'ntp': {
- 'service_name': 'ntpd',
+ "rhel": {
+ "ntp": {
+ "service_name": "ntpd",
},
- 'chrony': {
- 'service_name': 'chronyd',
+ "chrony": {
+ "service_name": "chronyd",
},
},
- 'sles': {
- 'chrony': {
- 'service_name': 'chronyd',
+ "sles": {
+ "chrony": {
+ "service_name": "chronyd",
},
- 'ntp': {
- 'confpath': '/etc/ntp.conf',
- 'service_name': 'ntpd',
+ "ntp": {
+ "confpath": "/etc/ntp.conf",
+ "service_name": "ntpd",
},
- 'systemd-timesyncd': {
- 'check_exe': '/usr/lib/systemd/systemd-timesyncd',
+ "systemd-timesyncd": {
+ "check_exe": "/usr/lib/systemd/systemd-timesyncd",
},
},
- 'ubuntu': {
- 'chrony': {
- 'confpath': '/etc/chrony/chrony.conf',
+ "ubuntu": {
+ "chrony": {
+ "confpath": "/etc/chrony/chrony.conf",
},
},
}
@@ -141,10 +149,11 @@ DISTRO_CLIENT_CONFIG = {
# configuration.
meta = {
- 'id': 'cc_ntp',
- 'name': 'NTP',
- 'title': 'enable and configure ntp',
- 'description': dedent("""\
+ "id": "cc_ntp",
+ "name": "NTP",
+ "title": "enable and configure ntp",
+ "description": dedent(
+ """\
Handle ntp configuration. If ntp is not installed on the system and
ntp configuration is specified, ntp will be installed. If there is a
default ntp config file in the image or one is present in the
@@ -152,16 +161,20 @@ meta = {
appended to the filename before any changes are made. A list of ntp
pools and ntp servers can be provided under the ``ntp`` config key.
If no ntp ``servers`` or ``pools`` are provided, 4 pools will be used
- in the format ``{0-3}.{distro}.pool.ntp.org``."""),
- 'distros': distros,
- 'examples': [
- dedent("""\
+ in the format ``{0-3}.{distro}.pool.ntp.org``."""
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
# Override ntp with chrony configuration on Ubuntu
ntp:
enabled: true
ntp_client: chrony # Uses cloud-init default chrony configuration
- """),
- dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Provide a custom ntp client configuration
ntp:
enabled: true
@@ -188,120 +201,137 @@ meta = {
servers:
- ntp.server.local
- ntp.ubuntu.com
- - 192.168.23.2""")],
- 'frequency': PER_INSTANCE,
+ - 192.168.23.2"""
+ ),
+ ],
+ "frequency": PER_INSTANCE,
}
schema = {
- 'type': 'object',
- 'properties': {
- 'ntp': {
- 'type': ['object', 'null'],
- 'properties': {
- 'pools': {
- 'type': 'array',
- 'items': {
- 'type': 'string',
- 'format': 'hostname'
- },
- 'uniqueItems': True,
- 'description': dedent("""\
+ "type": "object",
+ "properties": {
+ "ntp": {
+ "type": ["object", "null"],
+ "properties": {
+ "pools": {
+ "type": "array",
+ "items": {"type": "string", "format": "hostname"},
+ "uniqueItems": True,
+ "description": dedent(
+ """\
List of ntp pools. If both pools and servers are
empty, 4 default pool servers will be provided of
the format ``{0-3}.{distro}.pool.ntp.org``. NOTE:
for Alpine Linux when using the Busybox NTP client
this setting will be ignored due to the limited
- functionality of Busybox's ntpd.""")
+ functionality of Busybox's ntpd."""
+ ),
},
- 'servers': {
- 'type': 'array',
- 'items': {
- 'type': 'string',
- 'format': 'hostname'
- },
- 'uniqueItems': True,
- 'description': dedent("""\
+ "servers": {
+ "type": "array",
+ "items": {"type": "string", "format": "hostname"},
+ "uniqueItems": True,
+ "description": dedent(
+ """\
List of ntp servers. If both pools and servers are
empty, 4 default pool servers will be provided with
- the format ``{0-3}.{distro}.pool.ntp.org``.""")
+ the format ``{0-3}.{distro}.pool.ntp.org``."""
+ ),
},
- 'ntp_client': {
- 'type': 'string',
- 'default': 'auto',
- 'description': dedent("""\
+ "ntp_client": {
+ "type": "string",
+ "default": "auto",
+ "description": dedent(
+ """\
Name of an NTP client to use to configure system NTP.
When unprovided or 'auto' the default client preferred
by the distribution will be used. The following
built-in client names can be used to override existing
configuration defaults: chrony, ntp, ntpdate,
- systemd-timesyncd."""),
+ systemd-timesyncd."""
+ ),
},
- 'enabled': {
- 'type': 'boolean',
- 'default': True,
- 'description': dedent("""\
+ "enabled": {
+ "type": "boolean",
+ "default": True,
+ "description": dedent(
+ """\
Attempt to enable ntp clients if set to True. If set
to False, ntp client will not be configured or
- installed"""),
+ installed"""
+ ),
},
- 'config': {
- 'description': dedent("""\
+ "config": {
+ "description": dedent(
+ """\
Configuration settings or overrides for the
- ``ntp_client`` specified."""),
- 'type': ['object'],
- 'properties': {
- 'confpath': {
- 'type': 'string',
- 'description': dedent("""\
+ ``ntp_client`` specified."""
+ ),
+ "type": ["object"],
+ "properties": {
+ "confpath": {
+ "type": "string",
+ "description": dedent(
+ """\
The path to where the ``ntp_client``
- configuration is written."""),
+ configuration is written."""
+ ),
},
- 'check_exe': {
- 'type': 'string',
- 'description': dedent("""\
+ "check_exe": {
+ "type": "string",
+ "description": dedent(
+ """\
The executable name for the ``ntp_client``.
For example, ntp service ``check_exe`` is
- 'ntpd' because it runs the ntpd binary."""),
+ 'ntpd' because it runs the ntpd binary."""
+ ),
},
- 'packages': {
- 'type': 'array',
- 'items': {
- 'type': 'string',
+ "packages": {
+ "type": "array",
+ "items": {
+ "type": "string",
},
- 'uniqueItems': True,
- 'description': dedent("""\
+ "uniqueItems": True,
+ "description": dedent(
+ """\
List of packages needed to be installed for the
- selected ``ntp_client``."""),
+ selected ``ntp_client``."""
+ ),
},
- 'service_name': {
- 'type': 'string',
- 'description': dedent("""\
+ "service_name": {
+ "type": "string",
+ "description": dedent(
+ """\
The systemd or sysvinit service name used to
start and stop the ``ntp_client``
- service."""),
+ service."""
+ ),
},
- 'template': {
- 'type': 'string',
- 'description': dedent("""\
+ "template": {
+ "type": "string",
+ "description": dedent(
+ """\
Inline template allowing users to define their
own ``ntp_client`` configuration template.
The value must start with '## template:jinja'
to enable use of templating support.
- """),
+ """
+ ),
},
},
# Don't use REQUIRED_NTP_CONFIG_KEYS to allow for override
# of builtin client values.
- 'minProperties': 1, # If we have config, define something
- 'additionalProperties': False
+ "minProperties": 1, # If we have config, define something
+ "additionalProperties": False,
},
},
- 'additionalProperties': False
+ "additionalProperties": False,
}
- }
+ },
}
-REQUIRED_NTP_CONFIG_KEYS = frozenset([
- 'check_exe', 'confpath', 'packages', 'service_name'])
+REQUIRED_NTP_CONFIG_KEYS = frozenset(
+ ["check_exe", "confpath", "packages", "service_name"]
+)
__doc__ = get_meta_doc(meta, schema) # Supplement python help()
@@ -334,21 +364,23 @@ def select_ntp_client(ntp_client, distro):
distro_cfg = distro_ntp_client_configs(distro.name)
# user specified client, return its config
- if ntp_client and ntp_client != 'auto':
- LOG.debug('Selected NTP client "%s" via user-data configuration',
- ntp_client)
+ if ntp_client and ntp_client != "auto":
+ LOG.debug(
+ 'Selected NTP client "%s" via user-data configuration', ntp_client
+ )
return distro_cfg.get(ntp_client, {})
# default to auto if unset in distro
- distro_ntp_client = distro.get_option('ntp_client', 'auto')
+ distro_ntp_client = distro.get_option("ntp_client", "auto")
clientcfg = {}
if distro_ntp_client == "auto":
for client in distro.preferred_ntp_clients:
cfg = distro_cfg.get(client)
- if subp.which(cfg.get('check_exe')):
- LOG.debug('Selected NTP client "%s", already installed',
- client)
+ if subp.which(cfg.get("check_exe")):
+ LOG.debug(
+ 'Selected NTP client "%s", already installed', client
+ )
clientcfg = cfg
break
@@ -356,11 +388,14 @@ def select_ntp_client(ntp_client, distro):
client = distro.preferred_ntp_clients[0]
LOG.debug(
'Selected distro preferred NTP client "%s", not yet installed',
- client)
+ client,
+ )
clientcfg = distro_cfg.get(client)
else:
- LOG.debug('Selected NTP client "%s" via distro system config',
- distro_ntp_client)
+ LOG.debug(
+ 'Selected NTP client "%s" via distro system config',
+ distro_ntp_client,
+ )
clientcfg = distro_cfg.get(distro_ntp_client, {})
return clientcfg
@@ -378,7 +413,7 @@ def install_ntp_client(install_func, packages=None, check_exe="ntpd"):
if subp.which(check_exe):
return
if packages is None:
- packages = ['ntp']
+ packages = ["ntp"]
install_func(packages)
@@ -403,25 +438,34 @@ def generate_server_names(distro):
names = []
pool_distro = distro
- if distro == 'sles':
+ if distro == "sles":
# For legal reasons x.pool.sles.ntp.org does not exist,
# use the opensuse pool
- pool_distro = 'opensuse'
- elif distro == 'alpine' or distro == 'eurolinux':
+ pool_distro = "opensuse"
+ elif distro == "alpine" or distro == "eurolinux":
# Alpine-specific pool (i.e. x.alpine.pool.ntp.org) does not exist
# so use general x.pool.ntp.org instead. The same applies to EuroLinux
- pool_distro = ''
+ pool_distro = ""
for x in range(0, NR_POOL_SERVERS):
- names.append(".".join(
- [n for n in [str(x)] + [pool_distro] + ['pool.ntp.org'] if n]))
+ names.append(
+ ".".join(
+ [n for n in [str(x)] + [pool_distro] + ["pool.ntp.org"] if n]
+ )
+ )
return names
-def write_ntp_config_template(distro_name, service_name=None, servers=None,
- pools=None, path=None, template_fn=None,
- template=None):
+def write_ntp_config_template(
+ distro_name,
+ service_name=None,
+ servers=None,
+ pools=None,
+ path=None,
+ template_fn=None,
+ template=None,
+):
"""Render a ntp client configuration for the specified client.
@param distro_name: string. The distro class name.
@@ -444,27 +488,30 @@ def write_ntp_config_template(distro_name, service_name=None, servers=None,
if not pools:
pools = []
- if (len(servers) == 0 and distro_name == 'alpine' and
- service_name == 'ntpd'):
+ if (
+ len(servers) == 0
+ and distro_name == "alpine"
+ and service_name == "ntpd"
+ ):
# Alpine's Busybox ntpd only understands "servers" configuration
# and not "pool" configuration.
servers = generate_server_names(distro_name)
- LOG.debug(
- 'Adding distro default ntp servers: %s', ','.join(servers))
+ LOG.debug("Adding distro default ntp servers: %s", ",".join(servers))
elif len(servers) == 0 and len(pools) == 0:
pools = generate_server_names(distro_name)
LOG.debug(
- 'Adding distro default ntp pool servers: %s', ','.join(pools))
+ "Adding distro default ntp pool servers: %s", ",".join(pools)
+ )
if not path:
- raise ValueError('Invalid value for path parameter')
+ raise ValueError("Invalid value for path parameter")
if not template_fn and not template:
- raise ValueError('Not template_fn or template provided')
+ raise ValueError("Not template_fn or template provided")
- params = {'servers': servers, 'pools': pools}
+ params = {"servers": servers, "pools": pools}
if template:
- tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl")
+ tfile = temp_utils.mkstemp(prefix="template_name-", suffix=".tmpl")
template_fn = tfile[1] # filepath is second item in tuple
util.write_file(template_fn, content=template)
@@ -487,50 +534,62 @@ def supplemental_schema_validation(ntp_config):
errors = []
missing = REQUIRED_NTP_CONFIG_KEYS.difference(set(ntp_config.keys()))
if missing:
- keys = ', '.join(sorted(missing))
+ keys = ", ".join(sorted(missing))
errors.append(
- 'Missing required ntp:config keys: {keys}'.format(keys=keys))
- elif not any([ntp_config.get('template'),
- ntp_config.get('template_name')]):
+ "Missing required ntp:config keys: {keys}".format(keys=keys)
+ )
+ elif not any(
+ [ntp_config.get("template"), ntp_config.get("template_name")]
+ ):
errors.append(
- 'Either ntp:config:template or ntp:config:template_name values'
- ' are required')
+ "Either ntp:config:template or ntp:config:template_name values"
+ " are required"
+ )
for key, value in sorted(ntp_config.items()):
- keypath = 'ntp:config:' + key
- if key == 'confpath':
+ keypath = "ntp:config:" + key
+ if key == "confpath":
if not all([value, isinstance(value, str)]):
errors.append(
- 'Expected a config file path {keypath}.'
- ' Found ({value})'.format(keypath=keypath, value=value))
- elif key == 'packages':
+ "Expected a config file path {keypath}."
+ " Found ({value})".format(keypath=keypath, value=value)
+ )
+ elif key == "packages":
if not isinstance(value, list):
errors.append(
- 'Expected a list of required package names for {keypath}.'
- ' Found ({value})'.format(keypath=keypath, value=value))
- elif key in ('template', 'template_name'):
+ "Expected a list of required package names for {keypath}."
+ " Found ({value})".format(keypath=keypath, value=value)
+ )
+ elif key in ("template", "template_name"):
if value is None: # Either template or template_name can be none
continue
if not isinstance(value, str):
errors.append(
- 'Expected a string type for {keypath}.'
- ' Found ({value})'.format(keypath=keypath, value=value))
+ "Expected a string type for {keypath}."
+ " Found ({value})".format(keypath=keypath, value=value)
+ )
elif not isinstance(value, str):
errors.append(
- 'Expected a string type for {keypath}.'
- ' Found ({value})'.format(keypath=keypath, value=value))
+ "Expected a string type for {keypath}. Found ({value})".format(
+ keypath=keypath, value=value
+ )
+ )
if errors:
- raise ValueError(r'Invalid ntp configuration:\n{errors}'.format(
- errors='\n'.join(errors)))
+ raise ValueError(
+ r"Invalid ntp configuration:\n{errors}".format(
+ errors="\n".join(errors)
+ )
+ )
def handle(name, cfg, cloud, log, _args):
"""Enable and configure ntp."""
- if 'ntp' not in cfg:
+ if "ntp" not in cfg:
LOG.debug(
- "Skipping module named %s, not present or disabled by cfg", name)
+ "Skipping module named %s, not present or disabled by cfg", name
+ )
return
- ntp_cfg = cfg['ntp']
+ ntp_cfg = cfg["ntp"]
if ntp_cfg is None:
ntp_cfg = {} # Allow empty config which will install the package
@@ -538,52 +597,61 @@ def handle(name, cfg, cloud, log, _args):
if not isinstance(ntp_cfg, (dict)):
raise RuntimeError(
"'ntp' key existed in config, but not a dictionary type,"
- " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg)))
+ " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg))
+ )
validate_cloudconfig_schema(cfg, schema)
# Allow users to explicitly enable/disable
- enabled = ntp_cfg.get('enabled', True)
+ enabled = ntp_cfg.get("enabled", True)
if util.is_false(enabled):
LOG.debug("Skipping module named %s, disabled by cfg", name)
return
# Select which client is going to be used and get the configuration
- ntp_client_config = select_ntp_client(ntp_cfg.get('ntp_client'),
- cloud.distro)
+ ntp_client_config = select_ntp_client(
+ ntp_cfg.get("ntp_client"), cloud.distro
+ )
# Allow user ntp config to override distro configurations
ntp_client_config = util.mergemanydict(
- [ntp_client_config, ntp_cfg.get('config', {})], reverse=True)
+ [ntp_client_config, ntp_cfg.get("config", {})], reverse=True
+ )
supplemental_schema_validation(ntp_client_config)
- rename_ntp_conf(confpath=ntp_client_config.get('confpath'))
+ rename_ntp_conf(confpath=ntp_client_config.get("confpath"))
template_fn = None
- if not ntp_client_config.get('template'):
- template_name = (
- ntp_client_config.get('template_name').replace('{distro}',
- cloud.distro.name))
+ if not ntp_client_config.get("template"):
+ template_name = ntp_client_config.get("template_name").replace(
+ "{distro}", cloud.distro.name
+ )
template_fn = cloud.get_template_filename(template_name)
if not template_fn:
- msg = ('No template found, not rendering %s' %
- ntp_client_config.get('template_name'))
+ msg = (
+ "No template found, not rendering %s"
+ % ntp_client_config.get("template_name")
+ )
raise RuntimeError(msg)
- write_ntp_config_template(cloud.distro.name,
- service_name=ntp_client_config.get(
- 'service_name'),
- servers=ntp_cfg.get('servers', []),
- pools=ntp_cfg.get('pools', []),
- path=ntp_client_config.get('confpath'),
- template_fn=template_fn,
- template=ntp_client_config.get('template'))
-
- install_ntp_client(cloud.distro.install_packages,
- packages=ntp_client_config['packages'],
- check_exe=ntp_client_config['check_exe'])
+ write_ntp_config_template(
+ cloud.distro.name,
+ service_name=ntp_client_config.get("service_name"),
+ servers=ntp_cfg.get("servers", []),
+ pools=ntp_cfg.get("pools", []),
+ path=ntp_client_config.get("confpath"),
+ template_fn=template_fn,
+ template=ntp_client_config.get("template"),
+ )
+
+ install_ntp_client(
+ cloud.distro.install_packages,
+ packages=ntp_client_config["packages"],
+ check_exe=ntp_client_config["check_exe"],
+ )
try:
- cloud.distro.manage_service('reload',
- ntp_client_config.get('service_name'))
+ cloud.distro.manage_service(
+ "reload", ntp_client_config.get("service_name")
+ )
except subp.ProcessExecutionError as e:
LOG.exception("Failed to reload/start ntp service: %s", e)
raise
diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py
index 036baf85..14cdfab8 100644
--- a/cloudinit/config/cc_package_update_upgrade_install.py
+++ b/cloudinit/config/cc_package_update_upgrade_install.py
@@ -43,8 +43,7 @@ import os
import time
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
REBOOT_FILE = "/var/run/reboot-required"
REBOOT_CMD = ["/sbin/reboot"]
@@ -68,17 +67,19 @@ def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2):
log.debug("Rebooted, but still running after %s seconds", int(elapsed))
# If we got here, not good
elapsed = time.time() - start
- raise RuntimeError(("Reboot did not happen"
- " after %s seconds!") % (int(elapsed)))
+ raise RuntimeError(
+ "Reboot did not happen after %s seconds!" % (int(elapsed))
+ )
def handle(_name, cfg, cloud, log, _args):
# Handle the old style + new config names
- update = _multi_cfg_bool_get(cfg, 'apt_update', 'package_update')
- upgrade = _multi_cfg_bool_get(cfg, 'package_upgrade', 'apt_upgrade')
- reboot_if_required = _multi_cfg_bool_get(cfg, 'apt_reboot_if_required',
- 'package_reboot_if_required')
- pkglist = util.get_cfg_option_list(cfg, 'packages', [])
+ update = _multi_cfg_bool_get(cfg, "apt_update", "package_update")
+ upgrade = _multi_cfg_bool_get(cfg, "package_upgrade", "apt_upgrade")
+ reboot_if_required = _multi_cfg_bool_get(
+ cfg, "apt_reboot_if_required", "package_reboot_if_required"
+ )
+ pkglist = util.get_cfg_option_list(cfg, "packages", [])
errors = []
if update or len(pkglist) or upgrade:
@@ -109,8 +110,9 @@ def handle(_name, cfg, cloud, log, _args):
reboot_fn_exists = os.path.isfile(REBOOT_FILE)
if (upgrade or pkglist) and reboot_if_required and reboot_fn_exists:
try:
- log.warning("Rebooting after upgrade or install per "
- "%s", REBOOT_FILE)
+ log.warning(
+ "Rebooting after upgrade or install per %s", REBOOT_FILE
+ )
# Flush the above warning + anything else out...
logging.flushLoggers(log)
_fire_reboot(log)
@@ -119,8 +121,10 @@ def handle(_name, cfg, cloud, log, _args):
errors.append(e)
if len(errors):
- log.warning("%s failed with exceptions, re-raising the last one",
- len(errors))
+ log.warning(
+ "%s failed with exceptions, re-raising the last one", len(errors)
+ )
raise errors[-1]
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index 733c3910..cc1fe53e 100644
--- a/cloudinit/config/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -41,22 +41,19 @@ keys to post. Available keys are:
tries: 10
"""
-from cloudinit import templater
-from cloudinit import url_helper
-from cloudinit import util
-
+from cloudinit import templater, url_helper, util
from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
POST_LIST_ALL = [
- 'pub_key_dsa',
- 'pub_key_rsa',
- 'pub_key_ecdsa',
- 'pub_key_ed25519',
- 'instance_id',
- 'hostname',
- 'fqdn'
+ "pub_key_dsa",
+ "pub_key_rsa",
+ "pub_key_ecdsa",
+ "pub_key_ed25519",
+ "instance_id",
+ "hostname",
+ "fqdn",
]
@@ -74,48 +71,58 @@ def handle(name, cfg, cloud, log, args):
if len(args) != 0:
ph_cfg = util.read_conf(args[0])
else:
- if 'phone_home' not in cfg:
- log.debug(("Skipping module named %s, "
- "no 'phone_home' configuration found"), name)
+ if "phone_home" not in cfg:
+ log.debug(
+ "Skipping module named %s, "
+ "no 'phone_home' configuration found",
+ name,
+ )
return
- ph_cfg = cfg['phone_home']
-
- if 'url' not in ph_cfg:
- log.warning(("Skipping module named %s, "
- "no 'url' found in 'phone_home' configuration"), name)
+ ph_cfg = cfg["phone_home"]
+
+ if "url" not in ph_cfg:
+ log.warning(
+ "Skipping module named %s, "
+ "no 'url' found in 'phone_home' configuration",
+ name,
+ )
return
- url = ph_cfg['url']
- post_list = ph_cfg.get('post', 'all')
- tries = ph_cfg.get('tries')
+ url = ph_cfg["url"]
+ post_list = ph_cfg.get("post", "all")
+ tries = ph_cfg.get("tries")
try:
tries = int(tries)
except Exception:
tries = 10
- util.logexc(log, "Configuration entry 'tries' is not an integer, "
- "using %s instead", tries)
+ util.logexc(
+ log,
+ "Configuration entry 'tries' is not an integer, using %s instead",
+ tries,
+ )
if post_list == "all":
post_list = POST_LIST_ALL
all_keys = {}
- all_keys['instance_id'] = cloud.get_instance_id()
- all_keys['hostname'] = cloud.get_hostname()
- all_keys['fqdn'] = cloud.get_hostname(fqdn=True)
+ all_keys["instance_id"] = cloud.get_instance_id()
+ all_keys["hostname"] = cloud.get_hostname()
+ all_keys["fqdn"] = cloud.get_hostname(fqdn=True)
pubkeys = {
- 'pub_key_dsa': '/etc/ssh/ssh_host_dsa_key.pub',
- 'pub_key_rsa': '/etc/ssh/ssh_host_rsa_key.pub',
- 'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub',
- 'pub_key_ed25519': '/etc/ssh/ssh_host_ed25519_key.pub',
+ "pub_key_dsa": "/etc/ssh/ssh_host_dsa_key.pub",
+ "pub_key_rsa": "/etc/ssh/ssh_host_rsa_key.pub",
+ "pub_key_ecdsa": "/etc/ssh/ssh_host_ecdsa_key.pub",
+ "pub_key_ed25519": "/etc/ssh/ssh_host_ed25519_key.pub",
}
for (n, path) in pubkeys.items():
try:
all_keys[n] = util.load_file(path)
except Exception:
- util.logexc(log, "%s: failed to open, can not phone home that "
- "data!", path)
+ util.logexc(
+ log, "%s: failed to open, can not phone home that data!", path
+ )
submit_keys = {}
for k in post_list:
@@ -123,28 +130,37 @@ def handle(name, cfg, cloud, log, args):
submit_keys[k] = all_keys[k]
else:
submit_keys[k] = None
- log.warning(("Requested key %s from 'post'"
- " configuration list not available"), k)
+ log.warning(
+ "Requested key %s from 'post'"
+ " configuration list not available",
+ k,
+ )
# Get them read to be posted
real_submit_keys = {}
for (k, v) in submit_keys.items():
if v is None:
- real_submit_keys[k] = 'N/A'
+ real_submit_keys[k] = "N/A"
else:
real_submit_keys[k] = str(v)
# Incase the url is parameterized
url_params = {
- 'INSTANCE_ID': all_keys['instance_id'],
+ "INSTANCE_ID": all_keys["instance_id"],
}
url = templater.render_string(url, url_params)
try:
url_helper.read_file_or_url(
- url, data=real_submit_keys, retries=tries, sec_between=3,
- ssl_details=util.fetch_ssl_details(cloud.paths))
+ url,
+ data=real_submit_keys,
+ retries=tries,
+ sec_between=3,
+ ssl_details=util.fetch_ssl_details(cloud.paths),
+ )
except Exception:
- util.logexc(log, "Failed to post phone home data to %s in %s tries",
- url, tries)
+ util.logexc(
+ log, "Failed to post phone home data to %s in %s tries", url, tries
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index 5780a7e9..d4eb68c0 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -58,9 +58,8 @@ import re
import subprocess
import time
+from cloudinit import subp, util
from cloudinit.settings import PER_INSTANCE
-from cloudinit import subp
-from cloudinit import util
frequency = PER_INSTANCE
@@ -75,9 +74,9 @@ def givecmdline(pid):
# PID COMM ARGS
# 1 init /bin/init --
if util.is_FreeBSD():
- (output, _err) = subp.subp(['procstat', '-c', str(pid)])
+ (output, _err) = subp.subp(["procstat", "-c", str(pid)])
line = output.splitlines()[1]
- m = re.search(r'\d+ (\w|\.|-)+\s+(/\w.+)', line)
+ m = re.search(r"\d+ (\w|\.|-)+\s+(/\w.+)", line)
return m.group(2)
else:
return util.load_file("/proc/%s/cmdline" % pid)
@@ -106,8 +105,9 @@ def check_condition(cond, log=None):
return False
else:
if log:
- log.warning(pre + "unexpected exit %s. " % ret +
- "do not apply change.")
+ log.warning(
+ pre + "unexpected exit %s. " % ret + "do not apply change."
+ )
return False
except Exception as e:
if log:
@@ -138,16 +138,24 @@ def handle(_name, cfg, cloud, log, _args):
devnull_fp = open(os.devnull, "w")
- log.debug("After pid %s ends, will execute: %s" % (mypid, ' '.join(args)))
+ log.debug("After pid %s ends, will execute: %s" % (mypid, " ".join(args)))
- util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log,
- condition, execmd, [args, devnull_fp])
+ util.fork_cb(
+ run_after_pid_gone,
+ mypid,
+ cmdline,
+ timeout,
+ log,
+ condition,
+ execmd,
+ [args, devnull_fp],
+ )
def load_power_state(cfg, distro):
# returns a tuple of shutdown_command, timeout
# shutdown_command is None if no config found
- pstate = cfg.get('power_state')
+ pstate = cfg.get("power_state")
if pstate is None:
return (None, None, None)
@@ -155,22 +163,25 @@ def load_power_state(cfg, distro):
if not isinstance(pstate, dict):
raise TypeError("power_state is not a dict.")
- modes_ok = ['halt', 'poweroff', 'reboot']
+ modes_ok = ["halt", "poweroff", "reboot"]
mode = pstate.get("mode")
if mode not in distro.shutdown_options_map:
raise TypeError(
- "power_state[mode] required, must be one of: %s. found: '%s'." %
- (','.join(modes_ok), mode))
+ "power_state[mode] required, must be one of: %s. found: '%s'."
+ % (",".join(modes_ok), mode)
+ )
- args = distro.shutdown_command(mode=mode,
- delay=pstate.get("delay", "now"),
- message=pstate.get("message"))
+ args = distro.shutdown_command(
+ mode=mode,
+ delay=pstate.get("delay", "now"),
+ message=pstate.get("message"),
+ )
try:
- timeout = float(pstate.get('timeout', 30.0))
+ timeout = float(pstate.get("timeout", 30.0))
except ValueError as e:
raise ValueError(
- "failed to convert timeout '%s' to float." % pstate['timeout']
+ "failed to convert timeout '%s' to float." % pstate["timeout"]
) from e
condition = pstate.get("condition", True)
@@ -186,8 +197,12 @@ def doexit(sysexit):
def execmd(exe_args, output=None, data_in=None):
ret = 1
try:
- proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE,
- stdout=output, stderr=subprocess.STDOUT)
+ proc = subprocess.Popen(
+ exe_args,
+ stdin=subprocess.PIPE,
+ stdout=output,
+ stderr=subprocess.STDOUT,
+ )
proc.communicate(data_in)
ret = proc.returncode
except Exception:
@@ -230,7 +245,7 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args):
except Exception as e:
fatal("Unexpected Exception: %s" % e)
- time.sleep(.25)
+ time.sleep(0.25)
if not msg:
fatal("Unexpected error in run_after_pid_gone")
@@ -246,4 +261,5 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args):
func(*args)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index dc20fc44..f51f49bc 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -108,23 +108,20 @@ key (by default the agent will execute with the ``--test`` flag).
import os
import socket
-import yaml
from io import StringIO
-from cloudinit import helpers
-from cloudinit import subp
-from cloudinit import temp_utils
-from cloudinit import util
-from cloudinit import url_helper
+import yaml
-AIO_INSTALL_URL = 'https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh' # noqa: E501
-PUPPET_AGENT_DEFAULT_ARGS = ['--test']
+from cloudinit import helpers, subp, temp_utils, url_helper, util
+AIO_INSTALL_URL = "https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh" # noqa: E501
+PUPPET_AGENT_DEFAULT_ARGS = ["--test"]
-class PuppetConstants(object):
- def __init__(self, puppet_conf_file, puppet_ssl_dir,
- csr_attributes_path, log):
+class PuppetConstants(object):
+ def __init__(
+ self, puppet_conf_file, puppet_ssl_dir, csr_attributes_path, log
+ ):
self.conf_path = puppet_conf_file
self.ssl_dir = puppet_ssl_dir
self.ssl_cert_dir = os.path.join(puppet_ssl_dir, "certs")
@@ -134,18 +131,27 @@ class PuppetConstants(object):
def _autostart_puppet(log):
# Set puppet to automatically start
- if os.path.exists('/etc/default/puppet'):
- subp.subp(['sed', '-i',
- '-e', 's/^START=.*/START=yes/',
- '/etc/default/puppet'], capture=False)
- elif os.path.exists('/bin/systemctl'):
- subp.subp(['/bin/systemctl', 'enable', 'puppet.service'],
- capture=False)
- elif os.path.exists('/sbin/chkconfig'):
- subp.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False)
+ if os.path.exists("/etc/default/puppet"):
+ subp.subp(
+ [
+ "sed",
+ "-i",
+ "-e",
+ "s/^START=.*/START=yes/",
+ "/etc/default/puppet",
+ ],
+ capture=False,
+ )
+ elif os.path.exists("/bin/systemctl"):
+ subp.subp(
+ ["/bin/systemctl", "enable", "puppet.service"], capture=False
+ )
+ elif os.path.exists("/sbin/chkconfig"):
+ subp.subp(["/sbin/chkconfig", "puppet", "on"], capture=False)
else:
- log.warning(("Sorry we do not know how to enable"
- " puppet services on this system"))
+ log.warning(
+ "Sorry we do not know how to enable puppet services on this system"
+ )
def get_config_value(puppet_bin, setting):
@@ -153,12 +159,13 @@ def get_config_value(puppet_bin, setting):
:param puppet_bin: path to puppet binary
:param setting: setting to query
"""
- out, _ = subp.subp([puppet_bin, 'config', 'print', setting])
+ out, _ = subp.subp([puppet_bin, "config", "print", setting])
return out.rstrip()
-def install_puppet_aio(url=AIO_INSTALL_URL, version=None,
- collection=None, cleanup=True):
+def install_puppet_aio(
+ url=AIO_INSTALL_URL, version=None, collection=None, cleanup=True
+):
"""Install puppet-agent from the puppetlabs repositories using the one-shot
shell script
@@ -169,62 +176,70 @@ def install_puppet_aio(url=AIO_INSTALL_URL, version=None,
"""
args = []
if version is not None:
- args = ['-v', version]
+ args = ["-v", version]
if collection is not None:
- args += ['-c', collection]
+ args += ["-c", collection]
# Purge puppetlabs repos after installation
if cleanup:
- args += ['--cleanup']
+ args += ["--cleanup"]
content = url_helper.readurl(url=url, retries=5).contents
# Use tmpdir over tmpfile to avoid 'text file busy' on execute
with temp_utils.tempdir(needs_exe=True) as tmpd:
- tmpf = os.path.join(tmpd, 'puppet-install')
+ tmpf = os.path.join(tmpd, "puppet-install")
util.write_file(tmpf, content, mode=0o700)
return subp.subp([tmpf] + args, capture=False)
def handle(name, cfg, cloud, log, _args):
# If there isn't a puppet key in the configuration don't do anything
- if 'puppet' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'puppet' configuration found"), name)
+ if "puppet" not in cfg:
+ log.debug(
+ "Skipping module named %s, no 'puppet' configuration found", name
+ )
return
- puppet_cfg = cfg['puppet']
+ puppet_cfg = cfg["puppet"]
# Start by installing the puppet package if necessary...
- install = util.get_cfg_option_bool(puppet_cfg, 'install', True)
- version = util.get_cfg_option_str(puppet_cfg, 'version', None)
- collection = util.get_cfg_option_str(puppet_cfg, 'collection', None)
+ install = util.get_cfg_option_bool(puppet_cfg, "install", True)
+ version = util.get_cfg_option_str(puppet_cfg, "version", None)
+ collection = util.get_cfg_option_str(puppet_cfg, "collection", None)
install_type = util.get_cfg_option_str(
- puppet_cfg, 'install_type', 'packages')
- cleanup = util.get_cfg_option_bool(puppet_cfg, 'cleanup', True)
- run = util.get_cfg_option_bool(puppet_cfg, 'exec', default=False)
- start_puppetd = util.get_cfg_option_bool(puppet_cfg,
- 'start_service',
- default=True)
+ puppet_cfg, "install_type", "packages"
+ )
+ cleanup = util.get_cfg_option_bool(puppet_cfg, "cleanup", True)
+ run = util.get_cfg_option_bool(puppet_cfg, "exec", default=False)
+ start_puppetd = util.get_cfg_option_bool(
+ puppet_cfg, "start_service", default=True
+ )
aio_install_url = util.get_cfg_option_str(
- puppet_cfg, 'aio_install_url', default=AIO_INSTALL_URL)
+ puppet_cfg, "aio_install_url", default=AIO_INSTALL_URL
+ )
# AIO and distro packages use different paths
- if install_type == 'aio':
- puppet_user = 'root'
- puppet_bin = '/opt/puppetlabs/bin/puppet'
- puppet_package = 'puppet-agent'
+ if install_type == "aio":
+ puppet_user = "root"
+ puppet_bin = "/opt/puppetlabs/bin/puppet"
+ puppet_package = "puppet-agent"
else: # default to 'packages'
- puppet_user = 'puppet'
- puppet_bin = 'puppet'
- puppet_package = 'puppet'
+ puppet_user = "puppet"
+ puppet_bin = "puppet"
+ puppet_package = "puppet"
package_name = util.get_cfg_option_str(
- puppet_cfg, 'package_name', puppet_package)
+ puppet_cfg, "package_name", puppet_package
+ )
if not install and version:
- log.warning(("Puppet install set to false but version supplied,"
- " doing nothing."))
+ log.warning(
+ "Puppet install set to false but version supplied, doing nothing."
+ )
elif install:
- log.debug(("Attempting to install puppet %s from %s"),
- version if version else 'latest', install_type)
+ log.debug(
+ "Attempting to install puppet %s from %s",
+ version if version else "latest",
+ install_type,
+ )
if install_type == "packages":
cloud.distro.install_packages((package_name, version))
@@ -235,17 +250,21 @@ def handle(name, cfg, cloud, log, _args):
run = False
conf_file = util.get_cfg_option_str(
- puppet_cfg, 'conf_file', get_config_value(puppet_bin, 'config'))
+ puppet_cfg, "conf_file", get_config_value(puppet_bin, "config")
+ )
ssl_dir = util.get_cfg_option_str(
- puppet_cfg, 'ssl_dir', get_config_value(puppet_bin, 'ssldir'))
+ puppet_cfg, "ssl_dir", get_config_value(puppet_bin, "ssldir")
+ )
csr_attributes_path = util.get_cfg_option_str(
- puppet_cfg, 'csr_attributes_path',
- get_config_value(puppet_bin, 'csr_attributes'))
+ puppet_cfg,
+ "csr_attributes_path",
+ get_config_value(puppet_bin, "csr_attributes"),
+ )
p_constants = PuppetConstants(conf_file, ssl_dir, csr_attributes_path, log)
# ... and then update the puppet configuration
- if 'conf' in puppet_cfg:
+ if "conf" in puppet_cfg:
# Add all sections from the conf object to puppet.conf
contents = util.load_file(p_constants.conf_path)
# Create object for reading puppet.conf values
@@ -254,30 +273,31 @@ def handle(name, cfg, cloud, log, _args):
# mix the rest up. First clean them up
# (TODO(harlowja) is this really needed??)
cleaned_lines = [i.lstrip() for i in contents.splitlines()]
- cleaned_contents = '\n'.join(cleaned_lines)
+ cleaned_contents = "\n".join(cleaned_lines)
# Move to puppet_config.read_file when dropping py2.7
puppet_config.read_file(
- StringIO(cleaned_contents),
- source=p_constants.conf_path)
- for (cfg_name, cfg) in puppet_cfg['conf'].items():
+ StringIO(cleaned_contents), source=p_constants.conf_path
+ )
+ for (cfg_name, cfg) in puppet_cfg["conf"].items():
# Cert configuration is a special case
# Dump the puppetserver ca certificate in the correct place
- if cfg_name == 'ca_cert':
+ if cfg_name == "ca_cert":
# Puppet ssl sub-directory isn't created yet
# Create it with the proper permissions and ownership
util.ensure_dir(p_constants.ssl_dir, 0o771)
- util.chownbyname(p_constants.ssl_dir, puppet_user, 'root')
+ util.chownbyname(p_constants.ssl_dir, puppet_user, "root")
util.ensure_dir(p_constants.ssl_cert_dir)
- util.chownbyname(p_constants.ssl_cert_dir, puppet_user, 'root')
+ util.chownbyname(p_constants.ssl_cert_dir, puppet_user, "root")
util.write_file(p_constants.ssl_cert_path, cfg)
- util.chownbyname(p_constants.ssl_cert_path,
- puppet_user, 'root')
+ util.chownbyname(
+ p_constants.ssl_cert_path, puppet_user, "root"
+ )
else:
# Iterate through the config items, we'll use ConfigParser.set
# to overwrite or create new items as needed
for (o, v) in cfg.items():
- if o == 'certname':
+ if o == "certname":
# Expand %f as the fqdn
# TODO(harlowja) should this use the cloud fqdn??
v = v.replace("%f", socket.getfqdn())
@@ -288,14 +308,16 @@ def handle(name, cfg, cloud, log, _args):
puppet_config.set(cfg_name, o, v)
# We got all our config as wanted we'll rename
# the previous puppet.conf and create our new one
- util.rename(p_constants.conf_path, "%s.old"
- % (p_constants.conf_path))
+ util.rename(
+ p_constants.conf_path, "%s.old" % (p_constants.conf_path)
+ )
util.write_file(p_constants.conf_path, puppet_config.stringify())
- if 'csr_attributes' in puppet_cfg:
- util.write_file(p_constants.csr_attributes_path,
- yaml.dump(puppet_cfg['csr_attributes'],
- default_flow_style=False))
+ if "csr_attributes" in puppet_cfg:
+ util.write_file(
+ p_constants.csr_attributes_path,
+ yaml.dump(puppet_cfg["csr_attributes"], default_flow_style=False),
+ )
# Set it up so it autostarts
if start_puppetd:
@@ -303,18 +325,21 @@ def handle(name, cfg, cloud, log, _args):
# Run the agent if needed
if run:
- log.debug('Running puppet-agent')
- cmd = [puppet_bin, 'agent']
- if 'exec_args' in puppet_cfg:
- cmd_args = puppet_cfg['exec_args']
+ log.debug("Running puppet-agent")
+ cmd = [puppet_bin, "agent"]
+ if "exec_args" in puppet_cfg:
+ cmd_args = puppet_cfg["exec_args"]
if isinstance(cmd_args, (list, tuple)):
cmd.extend(cmd_args)
elif isinstance(cmd_args, str):
cmd.extend(cmd_args.split())
else:
- log.warning("Unknown type %s provided for puppet"
- " 'exec_args' expected list, tuple,"
- " or string", type(cmd_args))
+ log.warning(
+ "Unknown type %s provided for puppet"
+ " 'exec_args' expected list, tuple,"
+ " or string",
+ type(cmd_args),
+ )
cmd.extend(PUPPET_AGENT_DEFAULT_ARGS)
else:
cmd.extend(PUPPET_AGENT_DEFAULT_ARGS)
@@ -322,6 +347,7 @@ def handle(name, cfg, cloud, log, _args):
if start_puppetd:
# Start puppetd
- subp.subp(['service', 'puppet', 'start'], capture=False)
+ subp.subp(["service", "puppet", "start"], capture=False)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_refresh_rmc_and_interface.py b/cloudinit/config/cc_refresh_rmc_and_interface.py
index d5e0ecb2..87be5348 100644
--- a/cloudinit/config/cc_refresh_rmc_and_interface.py
+++ b/cloudinit/config/cc_refresh_rmc_and_interface.py
@@ -34,20 +34,18 @@ This module handles
"""
+import errno
+
from cloudinit import log as logging
+from cloudinit import netinfo, subp, util
from cloudinit.settings import PER_ALWAYS
-from cloudinit import util
-from cloudinit import subp
-from cloudinit import netinfo
-
-import errno
frequency = PER_ALWAYS
LOG = logging.getLogger(__name__)
# Ensure that /opt/rsct/bin has been added to standard PATH of the
# distro. The symlink to rmcctrl is /usr/sbin/rsct/bin/rmcctrl .
-RMCCTRL = 'rmcctrl'
+RMCCTRL = "rmcctrl"
def handle(name, _cfg, _cloud, _log, _args):
@@ -56,10 +54,11 @@ def handle(name, _cfg, _cloud, _log, _args):
return
LOG.debug(
- 'Making the IPv6 up explicitly. '
- 'Ensuring IPv6 interface is not being handled by NetworkManager '
- 'and it is restarted to re-establish the communication with '
- 'the hypervisor')
+ "Making the IPv6 up explicitly. "
+ "Ensuring IPv6 interface is not being handled by NetworkManager "
+ "and it is restarted to re-establish the communication with "
+ "the hypervisor"
+ )
ifaces = find_ipv6_ifaces()
@@ -80,7 +79,7 @@ def find_ipv6_ifaces():
ifaces = []
for iface, data in info.items():
if iface == "lo":
- LOG.debug('Skipping localhost interface')
+ LOG.debug("Skipping localhost interface")
if len(data.get("ipv4", [])) != 0:
# skip this interface, as it has ipv4 addrs
continue
@@ -92,16 +91,16 @@ def refresh_ipv6(interface):
# IPv6 interface is explicitly brought up, subsequent to which the
# RMC services are restarted to re-establish the communication with
# the hypervisor.
- subp.subp(['ip', 'link', 'set', interface, 'down'])
- subp.subp(['ip', 'link', 'set', interface, 'up'])
+ subp.subp(["ip", "link", "set", interface, "down"])
+ subp.subp(["ip", "link", "set", interface, "up"])
def sysconfig_path(iface):
- return '/etc/sysconfig/network-scripts/ifcfg-' + iface
+ return "/etc/sysconfig/network-scripts/ifcfg-" + iface
def restart_network_manager():
- subp.subp(['systemctl', 'restart', 'NetworkManager'])
+ subp.subp(["systemctl", "restart", "NetworkManager"])
def disable_ipv6(iface_file):
@@ -113,12 +112,11 @@ def disable_ipv6(iface_file):
contents = util.load_file(iface_file)
except IOError as e:
if e.errno == errno.ENOENT:
- LOG.debug("IPv6 interface file %s does not exist\n",
- iface_file)
+ LOG.debug("IPv6 interface file %s does not exist\n", iface_file)
else:
raise e
- if 'IPV6INIT' not in contents:
+ if "IPV6INIT" not in contents:
LOG.debug("Interface file %s did not have IPV6INIT", iface_file)
return
@@ -135,11 +133,12 @@ def disable_ipv6(iface_file):
def search(contents):
# Search for any NM_CONTROLLED or IPV6 lines in IPv6 interface file.
- return(
- contents.startswith("IPV6ADDR") or
- contents.startswith("IPADDR6") or
- contents.startswith("IPV6INIT") or
- contents.startswith("NM_CONTROLLED"))
+ return (
+ contents.startswith("IPV6ADDR")
+ or contents.startswith("IPADDR6")
+ or contents.startswith("IPV6INIT")
+ or contents.startswith("NM_CONTROLLED")
+ )
def refresh_rmc():
@@ -152,8 +151,8 @@ def refresh_rmc():
# until the subsystem and all resource managers are stopped.
# -s : start Resource Monitoring & Control subsystem.
try:
- subp.subp([RMCCTRL, '-z'])
- subp.subp([RMCCTRL, '-s'])
+ subp.subp([RMCCTRL, "-z"])
+ subp.subp([RMCCTRL, "-s"])
except Exception:
- util.logexc(LOG, 'Failed to refresh the RMC subsystem.')
+ util.logexc(LOG, "Failed to refresh the RMC subsystem.")
raise
diff --git a/cloudinit/config/cc_reset_rmc.py b/cloudinit/config/cc_reset_rmc.py
index 1cd72774..3b929903 100644
--- a/cloudinit/config/cc_reset_rmc.py
+++ b/cloudinit/config/cc_reset_rmc.py
@@ -39,9 +39,8 @@ Prerequisite of using this module is to install RSCT packages.
import os
from cloudinit import log as logging
+from cloudinit import subp, util
from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-from cloudinit import subp
frequency = PER_INSTANCE
@@ -49,34 +48,34 @@ frequency = PER_INSTANCE
# The symlink for RMCCTRL and RECFGCT are
# /usr/sbin/rsct/bin/rmcctrl and
# /usr/sbin/rsct/install/bin/recfgct respectively.
-RSCT_PATH = '/opt/rsct/install/bin'
-RMCCTRL = 'rmcctrl'
-RECFGCT = 'recfgct'
+RSCT_PATH = "/opt/rsct/install/bin"
+RMCCTRL = "rmcctrl"
+RECFGCT = "recfgct"
LOG = logging.getLogger(__name__)
-NODE_ID_FILE = '/etc/ct_node_id'
+NODE_ID_FILE = "/etc/ct_node_id"
def handle(name, _cfg, cloud, _log, _args):
# Ensuring node id has to be generated only once during first boot
- if cloud.datasource.platform_type == 'none':
- LOG.debug('Skipping creation of new ct_node_id node')
+ if cloud.datasource.platform_type == "none":
+ LOG.debug("Skipping creation of new ct_node_id node")
return
if not os.path.isdir(RSCT_PATH):
LOG.debug("module disabled, RSCT_PATH not present")
return
- orig_path = os.environ.get('PATH')
+ orig_path = os.environ.get("PATH")
try:
add_path(orig_path)
reset_rmc()
finally:
if orig_path:
- os.environ['PATH'] = orig_path
+ os.environ["PATH"] = orig_path
else:
- del os.environ['PATH']
+ del os.environ["PATH"]
def reconfigure_rsct_subsystems():
@@ -88,17 +87,17 @@ def reconfigure_rsct_subsystems():
LOG.debug(out.strip())
return out
except subp.ProcessExecutionError:
- util.logexc(LOG, 'Failed to reconfigure the RSCT subsystems.')
+ util.logexc(LOG, "Failed to reconfigure the RSCT subsystems.")
raise
def get_node_id():
try:
fp = util.load_file(NODE_ID_FILE)
- node_id = fp.split('\n')[0]
+ node_id = fp.split("\n")[0]
return node_id
except Exception:
- util.logexc(LOG, 'Failed to get node ID from file %s.' % NODE_ID_FILE)
+ util.logexc(LOG, "Failed to get node ID from file %s." % NODE_ID_FILE)
raise
@@ -107,25 +106,25 @@ def add_path(orig_path):
# So thet cloud init automatically find and
# run RECFGCT to create new node_id.
suff = ":" + orig_path if orig_path else ""
- os.environ['PATH'] = RSCT_PATH + suff
- return os.environ['PATH']
+ os.environ["PATH"] = RSCT_PATH + suff
+ return os.environ["PATH"]
def rmcctrl():
# Stop the RMC subsystem and all resource managers so that we can make
# some changes to it
try:
- return subp.subp([RMCCTRL, '-z'])
+ return subp.subp([RMCCTRL, "-z"])
except Exception:
- util.logexc(LOG, 'Failed to stop the RMC subsystem.')
+ util.logexc(LOG, "Failed to stop the RMC subsystem.")
raise
def reset_rmc():
- LOG.debug('Attempting to reset RMC.')
+ LOG.debug("Attempting to reset RMC.")
node_id_before = get_node_id()
- LOG.debug('Node ID at beginning of module: %s', node_id_before)
+ LOG.debug("Node ID at beginning of module: %s", node_id_before)
# Stop the RMC subsystem and all resource managers so that we can make
# some changes to it
@@ -133,11 +132,11 @@ def reset_rmc():
reconfigure_rsct_subsystems()
node_id_after = get_node_id()
- LOG.debug('Node ID at end of module: %s', node_id_after)
+ LOG.debug("Node ID at end of module: %s", node_id_after)
# Check if new node ID is generated or not
# by comparing old and new node ID
if node_id_after == node_id_before:
- msg = 'New node ID did not get generated.'
+ msg = "New node ID did not get generated."
LOG.error(msg)
raise Exception(msg)
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 00bb7ae7..b009c392 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -13,21 +13,21 @@ import os
import stat
from textwrap import dedent
+from cloudinit import subp, util
from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema
from cloudinit.settings import PER_ALWAYS
-from cloudinit import subp
-from cloudinit import util
NOBLOCK = "noblock"
frequency = PER_ALWAYS
-distros = ['all']
+distros = ["all"]
meta = {
- 'id': 'cc_resizefs',
- 'name': 'Resizefs',
- 'title': 'Resize filesystem',
- 'description': dedent("""\
+ "id": "cc_resizefs",
+ "name": "Resizefs",
+ "title": "Resize filesystem",
+ "description": dedent(
+ """\
Resize a filesystem to use all avaliable space on partition. This
module is useful along with ``cc_growpart`` and will ensure that if the
root partition has been resized the root filesystem will be resized
@@ -36,22 +36,26 @@ meta = {
running. Optionally, the resize operation can be performed in the
background while cloud-init continues running modules. This can be
enabled by setting ``resize_rootfs`` to ``true``. This module can be
- disabled altogether by setting ``resize_rootfs`` to ``false``."""),
- 'distros': distros,
- 'examples': [
- 'resize_rootfs: false # disable root filesystem resize operation'],
- 'frequency': PER_ALWAYS,
+ disabled altogether by setting ``resize_rootfs`` to ``false``."""
+ ),
+ "distros": distros,
+ "examples": [
+ "resize_rootfs: false # disable root filesystem resize operation"
+ ],
+ "frequency": PER_ALWAYS,
}
schema = {
- 'type': 'object',
- 'properties': {
- 'resize_rootfs': {
- 'enum': [True, False, NOBLOCK],
- 'description': dedent("""\
- Whether to resize the root partition. Default: 'true'""")
+ "type": "object",
+ "properties": {
+ "resize_rootfs": {
+ "enum": [True, False, NOBLOCK],
+ "description": dedent(
+ """\
+ Whether to resize the root partition. Default: 'true'"""
+ ),
}
- }
+ },
}
__doc__ = get_meta_doc(meta, schema) # Supplement python help()
@@ -63,32 +67,38 @@ def _resize_btrfs(mount_point, devpth):
# Use a subvolume that is not ro to trick the resize operation to do the
# "right" thing. The use of ".snapshot" is specific to "snapper" a generic
# solution would be walk the subvolumes and find a rw mounted subvolume.
- if (not util.mount_is_read_write(mount_point) and
- os.path.isdir("%s/.snapshots" % mount_point)):
- return ('btrfs', 'filesystem', 'resize', 'max',
- '%s/.snapshots' % mount_point)
+ if not util.mount_is_read_write(mount_point) and os.path.isdir(
+ "%s/.snapshots" % mount_point
+ ):
+ return (
+ "btrfs",
+ "filesystem",
+ "resize",
+ "max",
+ "%s/.snapshots" % mount_point,
+ )
else:
- return ('btrfs', 'filesystem', 'resize', 'max', mount_point)
+ return ("btrfs", "filesystem", "resize", "max", mount_point)
def _resize_ext(mount_point, devpth):
- return ('resize2fs', devpth)
+ return ("resize2fs", devpth)
def _resize_xfs(mount_point, devpth):
- return ('xfs_growfs', mount_point)
+ return ("xfs_growfs", mount_point)
def _resize_ufs(mount_point, devpth):
- return ('growfs', '-y', mount_point)
+ return ("growfs", "-y", mount_point)
def _resize_zfs(mount_point, devpth):
- return ('zpool', 'online', '-e', mount_point, devpth)
+ return ("zpool", "online", "-e", mount_point, devpth)
def _resize_hammer2(mount_point, devpth):
- return ('hammer2', 'growfs', mount_point)
+ return ("hammer2", "growfs", mount_point)
def _can_skip_resize_ufs(mount_point, devpth):
@@ -100,7 +110,7 @@ def _can_skip_resize_ufs(mount_point, devpth):
# growfs exits with 1 for almost all cases up to this one.
# This means we can't just use rcs=[0, 1] as subp parameter:
try:
- subp.subp(['growfs', '-N', devpth])
+ subp.subp(["growfs", "-N", devpth])
except subp.ProcessExecutionError as e:
if e.stderr.startswith(skip_start) and skip_contain in e.stderr:
# This FS is already at the desired size
@@ -114,17 +124,15 @@ def _can_skip_resize_ufs(mount_point, devpth):
# for multiple filesystem types if possible, e.g. one command for
# ext2, ext3 and ext4.
RESIZE_FS_PREFIXES_CMDS = [
- ('btrfs', _resize_btrfs),
- ('ext', _resize_ext),
- ('xfs', _resize_xfs),
- ('ufs', _resize_ufs),
- ('zfs', _resize_zfs),
- ('hammer2', _resize_hammer2),
+ ("btrfs", _resize_btrfs),
+ ("ext", _resize_ext),
+ ("xfs", _resize_xfs),
+ ("ufs", _resize_ufs),
+ ("zfs", _resize_zfs),
+ ("hammer2", _resize_hammer2),
]
-RESIZE_FS_PRECHECK_CMDS = {
- 'ufs': _can_skip_resize_ufs
-}
+RESIZE_FS_PRECHECK_CMDS = {"ufs": _can_skip_resize_ufs}
def can_skip_resize(fs_type, resize_what, devpth):
@@ -148,52 +156,66 @@ def maybe_get_writable_device_path(devpath, info, log):
container = util.is_container()
# Ensure the path is a block device.
- if (devpath == "/dev/root" and not os.path.exists(devpath) and
- not container):
+ if (
+ devpath == "/dev/root"
+ and not os.path.exists(devpath)
+ and not container
+ ):
devpath = util.rootdev_from_cmdline(util.get_cmdline())
if devpath is None:
log.warning("Unable to find device '/dev/root'")
return None
log.debug("Converted /dev/root to '%s' per kernel cmdline", devpath)
- if devpath == 'overlayroot':
+ if devpath == "overlayroot":
log.debug("Not attempting to resize devpath '%s': %s", devpath, info)
return None
# FreeBSD zpool can also just use gpt/<label>
# with that in mind we can not do an os.stat on "gpt/whatever"
# therefore return the devpath already here.
- if devpath.startswith('gpt/'):
- log.debug('We have a gpt label - just go ahead')
+ if devpath.startswith("gpt/"):
+ log.debug("We have a gpt label - just go ahead")
return devpath
# Alternatively, our device could simply be a name as returned by gpart,
# such as da0p3
- if not devpath.startswith('/dev/') and not os.path.exists(devpath):
- fulldevpath = '/dev/' + devpath.lstrip('/')
- log.debug("'%s' doesn't appear to be a valid device path. Trying '%s'",
- devpath, fulldevpath)
+ if not devpath.startswith("/dev/") and not os.path.exists(devpath):
+ fulldevpath = "/dev/" + devpath.lstrip("/")
+ log.debug(
+ "'%s' doesn't appear to be a valid device path. Trying '%s'",
+ devpath,
+ fulldevpath,
+ )
devpath = fulldevpath
try:
statret = os.stat(devpath)
except OSError as exc:
if container and exc.errno == errno.ENOENT:
- log.debug("Device '%s' did not exist in container. "
- "cannot resize: %s", devpath, info)
+ log.debug(
+ "Device '%s' did not exist in container. cannot resize: %s",
+ devpath,
+ info,
+ )
elif exc.errno == errno.ENOENT:
- log.warning("Device '%s' did not exist. cannot resize: %s",
- devpath, info)
+ log.warning(
+ "Device '%s' did not exist. cannot resize: %s", devpath, info
+ )
else:
raise exc
return None
if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode):
if container:
- log.debug("device '%s' not a block device in container."
- " cannot resize: %s" % (devpath, info))
+ log.debug(
+ "device '%s' not a block device in container."
+ " cannot resize: %s" % (devpath, info)
+ )
else:
- log.warning("device '%s' not a block device. cannot resize: %s" %
- (devpath, info))
+ log.warning(
+ "device '%s' not a block device. cannot resize: %s"
+ % (devpath, info)
+ )
return None
return devpath # The writable block devpath
@@ -222,8 +244,8 @@ def handle(name, cfg, _cloud, log, args):
# we will have to get the zpool name out of this
# and set the resize_what variable to the zpool
# so the _resize_zfs function gets the right attribute.
- if fs_type == 'zfs':
- zpool = devpth.split('/')[0]
+ if fs_type == "zfs":
+ zpool = devpth.split("/")[0]
devpth = util.get_device_info_from_zpool(zpool)
if not devpth:
return # could not find device from zpool
@@ -238,8 +260,9 @@ def handle(name, cfg, _cloud, log, args):
resizer = None
if can_skip_resize(fs_type, resize_what, devpth):
- log.debug("Skip resize filesystem type %s for %s",
- fs_type, resize_what)
+ log.debug(
+ "Skip resize filesystem type %s for %s", fs_type, resize_what
+ )
return
fstype_lc = fs_type.lower()
@@ -249,29 +272,42 @@ def handle(name, cfg, _cloud, log, args):
break
if not resizer:
- log.warning("Not resizing unknown filesystem type %s for %s",
- fs_type, resize_what)
+ log.warning(
+ "Not resizing unknown filesystem type %s for %s",
+ fs_type,
+ resize_what,
+ )
return
resize_cmd = resizer(resize_what, devpth)
- log.debug("Resizing %s (%s) using %s", resize_what, fs_type,
- ' '.join(resize_cmd))
+ log.debug(
+ "Resizing %s (%s) using %s", resize_what, fs_type, " ".join(resize_cmd)
+ )
if resize_root == NOBLOCK:
# Fork to a child that will run
# the resize command
util.fork_cb(
- util.log_time, logfunc=log.debug, msg="backgrounded Resizing",
- func=do_resize, args=(resize_cmd, log))
+ util.log_time,
+ logfunc=log.debug,
+ msg="backgrounded Resizing",
+ func=do_resize,
+ args=(resize_cmd, log),
+ )
else:
- util.log_time(logfunc=log.debug, msg="Resizing",
- func=do_resize, args=(resize_cmd, log))
-
- action = 'Resized'
+ util.log_time(
+ logfunc=log.debug,
+ msg="Resizing",
+ func=do_resize,
+ args=(resize_cmd, log),
+ )
+
+ action = "Resized"
if resize_root == NOBLOCK:
- action = 'Resizing (via forking)'
- log.debug("%s root filesystem (type=%s, val=%s)", action, fs_type,
- resize_root)
+ action = "Resizing (via forking)"
+ log.debug(
+ "%s root filesystem (type=%s, val=%s)", action, fs_type, resize_root
+ )
def do_resize(resize_cmd, log):
@@ -283,4 +319,5 @@ def do_resize(resize_cmd, log):
# TODO(harlowja): Should we add a fsck check after this to make
# sure we didn't corrupt anything?
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index 648935e4..b2970d51 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -47,19 +47,18 @@ are configured correctly.
"""
from cloudinit import log as logging
-from cloudinit import templater
+from cloudinit import templater, util
from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
-distros = ['alpine', 'fedora', 'opensuse', 'photon', 'rhel', 'sles']
+distros = ["alpine", "fedora", "opensuse", "photon", "rhel", "sles"]
RESOLVE_CONFIG_TEMPLATE_MAP = {
- '/etc/resolv.conf': 'resolv.conf',
- '/etc/systemd/resolved.conf': 'systemd.resolved.conf',
+ "/etc/resolv.conf": "resolv.conf",
+ "/etc/systemd/resolved.conf": "systemd.resolved.conf",
}
@@ -67,8 +66,8 @@ def generate_resolv_conf(template_fn, params, target_fname):
flags = []
false_flags = []
- if 'options' in params:
- for key, val in params['options'].items():
+ if "options" in params:
+ for key, val in params["options"].items():
if isinstance(val, bool):
if val:
flags.append(key)
@@ -76,12 +75,12 @@ def generate_resolv_conf(template_fn, params, target_fname):
false_flags.append(key)
for flag in flags + false_flags:
- del params['options'][flag]
+ del params["options"][flag]
- if not params.get('options'):
- params['options'] = {}
+ if not params.get("options"):
+ params["options"] = {}
- params['flags'] = flags
+ params["flags"] = flags
LOG.debug("Writing resolv.conf from template %s", template_fn)
templater.render_to_file(template_fn, target_fname, params)
@@ -97,13 +96,19 @@ def handle(name, cfg, cloud, log, _args):
@param args: Any module arguments from cloud.cfg
"""
if "manage_resolv_conf" not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'manage_resolv_conf' key in configuration"), name)
+ log.debug(
+ "Skipping module named %s,"
+ " no 'manage_resolv_conf' key in configuration",
+ name,
+ )
return
if not util.get_cfg_option_bool(cfg, "manage_resolv_conf", False):
- log.debug(("Skipping module named %s,"
- " 'manage_resolv_conf' present but set to False"), name)
+ log.debug(
+ "Skipping module named %s,"
+ " 'manage_resolv_conf' present but set to False",
+ name,
+ )
return
if "resolv_conf" not in cfg:
@@ -112,7 +117,8 @@ def handle(name, cfg, cloud, log, _args):
try:
template_fn = cloud.get_template_filename(
- RESOLVE_CONFIG_TEMPLATE_MAP[cloud.distro.resolve_conf_fn])
+ RESOLVE_CONFIG_TEMPLATE_MAP[cloud.distro.resolve_conf_fn]
+ )
except KeyError:
log.warning("No template found, not rendering resolve configs")
return
@@ -120,8 +126,9 @@ def handle(name, cfg, cloud, log, _args):
generate_resolv_conf(
template_fn=template_fn,
params=cfg["resolv_conf"],
- target_fname=cloud.distro.resolve_conf_fn
+ target_fname=cloud.distro.resolve_conf_fn,
)
return
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index 693317c2..b81a7a9b 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -39,12 +39,11 @@ Subscription`` example config.
"""
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
LOG = logging.getLogger(__name__)
-distros = ['fedora', 'rhel']
+distros = ["fedora", "rhel"]
def handle(name, cfg, _cloud, log, _args):
@@ -60,8 +59,9 @@ def handle(name, cfg, _cloud, log, _args):
raise SubscriptionError(verify_msg)
cont = sm.rhn_register()
if not cont:
- raise SubscriptionError("Registration failed or did not "
- "run completely")
+ raise SubscriptionError(
+ "Registration failed or did not run completely"
+ )
# Splitting up the registration, auto-attach, and servicelevel
# commands because the error codes, messages from subman are not
@@ -70,8 +70,7 @@ def handle(name, cfg, _cloud, log, _args):
# Attempt to change the service level
if sm.auto_attach and sm.servicelevel is not None:
if not sm._set_service_level():
- raise SubscriptionError("Setting of service-level "
- "failed")
+ raise SubscriptionError("Setting of service-level failed")
else:
sm.log.debug("Completed auto-attach with service level")
elif sm.auto_attach:
@@ -87,8 +86,9 @@ def handle(name, cfg, _cloud, log, _args):
return_stat = sm.addPool(sm.pools)
if not return_stat:
- raise SubscriptionError("Unable to attach pools {0}"
- .format(sm.pools))
+ raise SubscriptionError(
+ "Unable to attach pools {0}".format(sm.pools)
+ )
return_stat = sm.update_repos()
if not return_stat:
raise SubscriptionError("Unable to add or remove repos")
@@ -105,72 +105,87 @@ class SubscriptionError(Exception):
class SubscriptionManager(object):
- valid_rh_keys = ['org', 'activation-key', 'username', 'password',
- 'disable-repo', 'enable-repo', 'add-pool',
- 'rhsm-baseurl', 'server-hostname',
- 'auto-attach', 'service-level']
+ valid_rh_keys = [
+ "org",
+ "activation-key",
+ "username",
+ "password",
+ "disable-repo",
+ "enable-repo",
+ "add-pool",
+ "rhsm-baseurl",
+ "server-hostname",
+ "auto-attach",
+ "service-level",
+ ]
def __init__(self, cfg, log=None):
if log is None:
log = LOG
self.log = log
self.cfg = cfg
- self.rhel_cfg = self.cfg.get('rh_subscription', {})
- self.rhsm_baseurl = self.rhel_cfg.get('rhsm-baseurl')
- self.server_hostname = self.rhel_cfg.get('server-hostname')
- self.pools = self.rhel_cfg.get('add-pool')
- self.activation_key = self.rhel_cfg.get('activation-key')
- self.org = self.rhel_cfg.get('org')
- self.userid = self.rhel_cfg.get('username')
- self.password = self.rhel_cfg.get('password')
- self.auto_attach = self.rhel_cfg.get('auto-attach')
- self.enable_repo = self.rhel_cfg.get('enable-repo')
- self.disable_repo = self.rhel_cfg.get('disable-repo')
- self.servicelevel = self.rhel_cfg.get('service-level')
+ self.rhel_cfg = self.cfg.get("rh_subscription", {})
+ self.rhsm_baseurl = self.rhel_cfg.get("rhsm-baseurl")
+ self.server_hostname = self.rhel_cfg.get("server-hostname")
+ self.pools = self.rhel_cfg.get("add-pool")
+ self.activation_key = self.rhel_cfg.get("activation-key")
+ self.org = self.rhel_cfg.get("org")
+ self.userid = self.rhel_cfg.get("username")
+ self.password = self.rhel_cfg.get("password")
+ self.auto_attach = self.rhel_cfg.get("auto-attach")
+ self.enable_repo = self.rhel_cfg.get("enable-repo")
+ self.disable_repo = self.rhel_cfg.get("disable-repo")
+ self.servicelevel = self.rhel_cfg.get("service-level")
def log_success(self, msg):
- '''Simple wrapper for logging info messages. Useful for unittests'''
+ """Simple wrapper for logging info messages. Useful for unittests"""
self.log.info(msg)
def log_warn(self, msg):
- '''Simple wrapper for logging warning messages. Useful for unittests'''
+ """Simple wrapper for logging warning messages. Useful for unittests"""
self.log.warning(msg)
def _verify_keys(self):
- '''
+ """
Checks that the keys in the rh_subscription dict from the user-data
are what we expect.
- '''
+ """
for k in self.rhel_cfg:
if k not in self.valid_rh_keys:
- bad_key = "{0} is not a valid key for rh_subscription. "\
- "Valid keys are: "\
- "{1}".format(k, ', '.join(self.valid_rh_keys))
+ bad_key = (
+ "{0} is not a valid key for rh_subscription. "
+ "Valid keys are: "
+ "{1}".format(k, ", ".join(self.valid_rh_keys))
+ )
return False, bad_key
# Check for bad auto-attach value
- if (self.auto_attach is not None) and \
- not (util.is_true(self.auto_attach) or
- util.is_false(self.auto_attach)):
- not_bool = "The key auto-attach must be a boolean value "\
- "(True/False "
+ if (self.auto_attach is not None) and not (
+ util.is_true(self.auto_attach) or util.is_false(self.auto_attach)
+ ):
+ not_bool = (
+ "The key auto-attach must be a boolean value (True/False "
+ )
return False, not_bool
- if (self.servicelevel is not None) and ((not self.auto_attach) or
- (util.is_false(str(self.auto_attach)))):
- no_auto = ("The service-level key must be used in conjunction "
- "with the auto-attach key. Please re-run with "
- "auto-attach: True")
+ if (self.servicelevel is not None) and (
+ (not self.auto_attach) or (util.is_false(str(self.auto_attach)))
+ ):
+ no_auto = (
+ "The service-level key must be used in conjunction "
+ "with the auto-attach key. Please re-run with "
+ "auto-attach: True"
+ )
return False, no_auto
return True, None
def is_registered(self):
- '''
+ """
Checks if the system is already registered and returns
True if so, else False
- '''
- cmd = ['identity']
+ """
+ cmd = ["identity"]
try:
_sub_man_cli(cmd)
@@ -180,15 +195,18 @@ class SubscriptionManager(object):
return True
def rhn_register(self):
- '''
+ """
Registers the system by userid and password or activation key
and org. Returns True when successful False when not.
- '''
+ """
if (self.activation_key is not None) and (self.org is not None):
# register by activation key
- cmd = ['register', '--activationkey={0}'.
- format(self.activation_key), '--org={0}'.format(self.org)]
+ cmd = [
+ "register",
+ "--activationkey={0}".format(self.activation_key),
+ "--org={0}".format(self.org),
+ ]
# If the baseurl and/or server url are passed in, we register
# with them.
@@ -203,14 +221,18 @@ class SubscriptionManager(object):
return_out = _sub_man_cli(cmd, logstring_val=True)[0]
except subp.ProcessExecutionError as e:
if e.stdout == "":
- self.log_warn("Registration failed due "
- "to: {0}".format(e.stderr))
+ self.log_warn(
+ "Registration failed due to: {0}".format(e.stderr)
+ )
return False
elif (self.userid is not None) and (self.password is not None):
# register by username and password
- cmd = ['register', '--username={0}'.format(self.userid),
- '--password={0}'.format(self.password)]
+ cmd = [
+ "register",
+ "--username={0}".format(self.userid),
+ "--password={0}".format(self.password),
+ ]
# If the baseurl and/or server url are passed in, we register
# with them.
@@ -226,15 +248,18 @@ class SubscriptionManager(object):
return_out = _sub_man_cli(cmd, logstring_val=True)[0]
except subp.ProcessExecutionError as e:
if e.stdout == "":
- self.log_warn("Registration failed due "
- "to: {0}".format(e.stderr))
+ self.log_warn(
+ "Registration failed due to: {0}".format(e.stderr)
+ )
return False
else:
- self.log_warn("Unable to register system due to incomplete "
- "information.")
- self.log_warn("Use either activationkey and org *or* userid "
- "and password")
+ self.log_warn(
+ "Unable to register system due to incomplete information."
+ )
+ self.log_warn(
+ "Use either activationkey and org *or* userid and password"
+ )
return False
reg_id = return_out.split("ID: ")[1].rstrip()
@@ -242,19 +267,25 @@ class SubscriptionManager(object):
return True
def _set_service_level(self):
- cmd = ['attach', '--auto', '--servicelevel={0}'
- .format(self.servicelevel)]
+ cmd = [
+ "attach",
+ "--auto",
+ "--servicelevel={0}".format(self.servicelevel),
+ ]
try:
return_out = _sub_man_cli(cmd)[0]
except subp.ProcessExecutionError as e:
- if e.stdout.rstrip() != '':
+ if e.stdout.rstrip() != "":
for line in e.stdout.split("\n"):
- if line != '':
+ if line != "":
self.log_warn(line)
else:
- self.log_warn("Setting the service level failed with: "
- "{0}".format(e.stderr.strip()))
+ self.log_warn(
+ "Setting the service level failed with: {0}".format(
+ e.stderr.strip()
+ )
+ )
return False
for line in return_out.split("\n"):
if line != "":
@@ -262,7 +293,7 @@ class SubscriptionManager(object):
return True
def _set_auto_attach(self):
- cmd = ['attach', '--auto']
+ cmd = ["attach", "--auto"]
try:
return_out = _sub_man_cli(cmd)[0]
except subp.ProcessExecutionError as e:
@@ -274,52 +305,52 @@ class SubscriptionManager(object):
return True
def _getPools(self):
- '''
+ """
Gets the list pools for the active subscription and returns them
in list form.
- '''
+ """
available = []
consumed = []
# Get all available pools
- cmd = ['list', '--available', '--pool-only']
+ cmd = ["list", "--available", "--pool-only"]
results = _sub_man_cli(cmd)[0]
available = (results.rstrip()).split("\n")
# Get all consumed pools
- cmd = ['list', '--consumed', '--pool-only']
+ cmd = ["list", "--consumed", "--pool-only"]
results = _sub_man_cli(cmd)[0]
consumed = (results.rstrip()).split("\n")
return available, consumed
def _getRepos(self):
- '''
+ """
Obtains the current list of active yum repositories and returns
them in list form.
- '''
+ """
- cmd = ['repos', '--list-enabled']
+ cmd = ["repos", "--list-enabled"]
return_out = _sub_man_cli(cmd)[0]
active_repos = []
for repo in return_out.split("\n"):
if "Repo ID:" in repo:
- active_repos.append((repo.split(':')[1]).strip())
+ active_repos.append((repo.split(":")[1]).strip())
- cmd = ['repos', '--list-disabled']
+ cmd = ["repos", "--list-disabled"]
return_out = _sub_man_cli(cmd)[0]
inactive_repos = []
for repo in return_out.split("\n"):
if "Repo ID:" in repo:
- inactive_repos.append((repo.split(':')[1]).strip())
+ inactive_repos.append((repo.split(":")[1]).strip())
return active_repos, inactive_repos
def addPool(self, pools):
- '''
+ """
Takes a list of subscription pools and "attaches" them to the
current subscription
- '''
+ """
# An empty list was passed
if len(pools) == 0:
@@ -328,31 +359,33 @@ class SubscriptionManager(object):
pool_available, pool_consumed = self._getPools()
pool_list = []
- cmd = ['attach']
+ cmd = ["attach"]
for pool in pools:
if (pool not in pool_consumed) and (pool in pool_available):
- pool_list.append('--pool={0}'.format(pool))
+ pool_list.append("--pool={0}".format(pool))
else:
self.log_warn("Pool {0} is not available".format(pool))
if len(pool_list) > 0:
cmd.extend(pool_list)
try:
_sub_man_cli(cmd)
- self.log.debug("Attached the following pools to your "
- "system: %s", (", ".join(pool_list))
- .replace('--pool=', ''))
+ self.log.debug(
+ "Attached the following pools to your system: %s",
+ (", ".join(pool_list)).replace("--pool=", ""),
+ )
return True
except subp.ProcessExecutionError as e:
- self.log_warn("Unable to attach pool {0} "
- "due to {1}".format(pool, e))
+ self.log_warn(
+ "Unable to attach pool {0} due to {1}".format(pool, e)
+ )
return False
def update_repos(self):
- '''
+ """
Takes a list of yum repo ids that need to be disabled or enabled; then
it verifies if they are already enabled or disabled and finally
executes the action to disable or enable
- '''
+ """
erepos = self.enable_repo
drepos = self.disable_repo
@@ -378,7 +411,7 @@ class SubscriptionManager(object):
enable_list = []
enable_list_fail = []
for repoid in erepos:
- if (repoid in inactive_repos):
+ if repoid in inactive_repos:
enable_list.append("--enable={0}".format(repoid))
else:
enable_list_fail.append(repoid)
@@ -399,14 +432,16 @@ class SubscriptionManager(object):
if fail in active_repos:
self.log.debug("Repo %s is already enabled", fail)
else:
- self.log_warn("Repo {0} does not appear to "
- "exist".format(fail))
+ self.log_warn(
+ "Repo {0} does not appear to exist".format(fail)
+ )
if len(disable_list_fail) > 0:
for fail in disable_list_fail:
- self.log.debug("Repo %s not disabled "
- "because it is not enabled", fail)
+ self.log.debug(
+ "Repo %s not disabled because it is not enabled", fail
+ )
- cmd = ['repos']
+ cmd = ["repos"]
if len(disable_list) > 0:
cmd.extend(disable_list)
@@ -420,11 +455,15 @@ class SubscriptionManager(object):
return False
if len(enable_list) > 0:
- self.log.debug("Enabled the following repos: %s",
- (", ".join(enable_list)).replace('--enable=', ''))
+ self.log.debug(
+ "Enabled the following repos: %s",
+ (", ".join(enable_list)).replace("--enable=", ""),
+ )
if len(disable_list) > 0:
- self.log.debug("Disabled the following repos: %s",
- (", ".join(disable_list)).replace('--disable=', ''))
+ self.log.debug(
+ "Disabled the following repos: %s",
+ (", ".join(disable_list)).replace("--disable=", ""),
+ )
return True
def is_configured(self):
@@ -432,13 +471,12 @@ class SubscriptionManager(object):
def _sub_man_cli(cmd, logstring_val=False):
- '''
+ """
Uses the prefered cloud-init subprocess def of subp.subp
and runs subscription-manager. Breaking this to a
separate function for later use in mocking and unittests
- '''
- return subp.subp(['subscription-manager'] + cmd,
- logstring=logstring_val)
+ """
+ return subp.subp(["subscription-manager"] + cmd, logstring=logstring_val)
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py
index c75dc57d..36a009a2 100644
--- a/cloudinit/config/cc_rightscale_userdata.py
+++ b/cloudinit/config/cc_rightscale_userdata.py
@@ -52,14 +52,14 @@ user scripts configuration directory, to be run later by ``cc_scripts_user``.
import os
from urllib.parse import parse_qs
-from cloudinit.settings import PER_INSTANCE
from cloudinit import url_helper as uhelp
from cloudinit import util
+from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
MY_NAME = "cc_rightscale_userdata"
-MY_HOOKNAME = 'CLOUD_INIT_REMOTE_HOOK'
+MY_HOOKNAME = "CLOUD_INIT_REMOTE_HOOK"
def handle(name, _cfg, cloud, log, _args):
@@ -72,13 +72,16 @@ def handle(name, _cfg, cloud, log, _args):
try:
mdict = parse_qs(ud)
if not mdict or MY_HOOKNAME not in mdict:
- log.debug(("Skipping module %s, "
- "did not find %s in parsed"
- " raw userdata"), name, MY_HOOKNAME)
+ log.debug(
+ "Skipping module %s, did not find %s in parsed raw userdata",
+ name,
+ MY_HOOKNAME,
+ )
return
except Exception:
- util.logexc(log, "Failed to parse query string %s into a dictionary",
- ud)
+ util.logexc(
+ log, "Failed to parse query string %s into a dictionary", ud
+ )
raise
wrote_fns = []
@@ -87,7 +90,7 @@ def handle(name, _cfg, cloud, log, _args):
# These will eventually be then ran by the cc_scripts_user
# TODO(harlowja): maybe this should just be a new user data handler??
# Instead of a late module that acts like a user data handler?
- scripts_d = cloud.get_ipath_cur('scripts')
+ scripts_d = cloud.get_ipath_cur("scripts")
urls = mdict[MY_HOOKNAME]
for (i, url) in enumerate(urls):
fname = os.path.join(scripts_d, "rightscale-%02i" % (i))
@@ -99,8 +102,9 @@ def handle(name, _cfg, cloud, log, _args):
wrote_fns.append(fname)
except Exception as e:
captured_excps.append(e)
- util.logexc(log, "%s failed to read %s and write %s", MY_NAME, url,
- fname)
+ util.logexc(
+ log, "%s failed to read %s and write %s", MY_NAME, url, fname
+ )
if wrote_fns:
log.debug("Wrote out rightscale userdata to %s files", len(wrote_fns))
@@ -110,8 +114,11 @@ def handle(name, _cfg, cloud, log, _args):
log.debug("%s urls were skipped or failed", skipped)
if captured_excps:
- log.warning("%s failed with exceptions, re-raising the last one",
- len(captured_excps))
+ log.warning(
+ "%s failed with exceptions, re-raising the last one",
+ len(captured_excps),
+ )
raise captured_excps[-1]
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index dd2bbd00..8dd4950f 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -182,45 +182,45 @@ import os
import re
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
DEF_FILENAME = "20-cloud-config.conf"
DEF_DIR = "/etc/rsyslog.d"
DEF_RELOAD = "auto"
DEF_REMOTES = {}
-KEYNAME_CONFIGS = 'configs'
-KEYNAME_FILENAME = 'config_filename'
-KEYNAME_DIR = 'config_dir'
-KEYNAME_RELOAD = 'service_reload_command'
-KEYNAME_LEGACY_FILENAME = 'rsyslog_filename'
-KEYNAME_LEGACY_DIR = 'rsyslog_dir'
-KEYNAME_REMOTES = 'remotes'
+KEYNAME_CONFIGS = "configs"
+KEYNAME_FILENAME = "config_filename"
+KEYNAME_DIR = "config_dir"
+KEYNAME_RELOAD = "service_reload_command"
+KEYNAME_LEGACY_FILENAME = "rsyslog_filename"
+KEYNAME_LEGACY_DIR = "rsyslog_dir"
+KEYNAME_REMOTES = "remotes"
LOG = logging.getLogger(__name__)
-COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*')
+COMMENT_RE = re.compile(r"[ ]*[#]+[ ]*")
HOST_PORT_RE = re.compile(
- r'^(?P<proto>[@]{0,2})'
- r'(([\[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))'
- r'([:](?P<port>[0-9]+))?$')
+ r"^(?P<proto>[@]{0,2})"
+ r"(([\[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))"
+ r"([:](?P<port>[0-9]+))?$"
+)
def reload_syslog(distro, command=DEF_RELOAD):
if command == DEF_RELOAD:
- service = distro.get_option('rsyslog_svcname', 'rsyslog')
- return distro.manage_service('try-reload', service)
+ service = distro.get_option("rsyslog_svcname", "rsyslog")
+ return distro.manage_service("try-reload", service)
return subp.subp(command, capture=True)
def load_config(cfg):
# return an updated config with entries of the correct type
# support converting the old top level format into new format
- mycfg = cfg.get('rsyslog', {})
+ mycfg = cfg.get("rsyslog", {})
- if isinstance(cfg.get('rsyslog'), list):
- mycfg = {KEYNAME_CONFIGS: cfg.get('rsyslog')}
+ if isinstance(cfg.get("rsyslog"), list):
+ mycfg = {KEYNAME_CONFIGS: cfg.get("rsyslog")}
if KEYNAME_LEGACY_FILENAME in cfg:
mycfg[KEYNAME_FILENAME] = cfg[KEYNAME_LEGACY_FILENAME]
if KEYNAME_LEGACY_DIR in cfg:
@@ -231,7 +231,8 @@ def load_config(cfg):
(KEYNAME_DIR, DEF_DIR, str),
(KEYNAME_FILENAME, DEF_FILENAME, str),
(KEYNAME_RELOAD, DEF_RELOAD, (str, list)),
- (KEYNAME_REMOTES, DEF_REMOTES, dict))
+ (KEYNAME_REMOTES, DEF_REMOTES, dict),
+ )
for key, default, vtypes in fillup:
if key not in mycfg or not isinstance(mycfg[key], vtypes):
@@ -247,10 +248,11 @@ def apply_rsyslog_changes(configs, def_fname, cfg_dir):
for cur_pos, ent in enumerate(configs):
if isinstance(ent, dict):
if "content" not in ent:
- LOG.warning("No 'content' entry in config entry %s",
- cur_pos + 1)
+ LOG.warning(
+ "No 'content' entry in config entry %s", cur_pos + 1
+ )
continue
- content = ent['content']
+ content = ent["content"]
filename = ent.get("filename", def_fname)
else:
content = ent
@@ -301,9 +303,9 @@ def parse_remotes_line(line, name=None):
if not toks:
raise ValueError("Invalid host specification '%s'" % host_port)
- proto = toks.group('proto')
- addr = toks.group('addr') or toks.group('bracket_addr')
- port = toks.group('port')
+ proto = toks.group("proto")
+ addr = toks.group("addr") or toks.group("bracket_addr")
+ port = toks.group("port")
if addr.startswith("[") and not addr.endswith("]"):
raise ValueError("host spec had invalid brackets: %s" % addr)
@@ -311,15 +313,17 @@ def parse_remotes_line(line, name=None):
if comment and not name:
name = comment
- t = SyslogRemotesLine(name=name, match=match, proto=proto,
- addr=addr, port=port)
+ t = SyslogRemotesLine(
+ name=name, match=match, proto=proto, addr=addr, port=port
+ )
t.validate()
return t
class SyslogRemotesLine(object):
- def __init__(self, name=None, match=None, proto=None, addr=None,
- port=None):
+ def __init__(
+ self, name=None, match=None, proto=None, addr=None, port=None
+ ):
if not match:
match = "*.*"
self.name = name
@@ -352,7 +356,11 @@ class SyslogRemotesLine(object):
def __repr__(self):
return "[name=%s match=%s proto=%s address=%s port=%s]" % (
- self.name, self.match, self.proto, self.addr, self.port
+ self.name,
+ self.match,
+ self.proto,
+ self.addr,
+ self.port,
)
def __str__(self):
@@ -390,13 +398,14 @@ def remotes_to_rsyslog_cfg(remotes, header=None, footer=None):
LOG.warning("failed loading remote %s: %s [%s]", name, line, e)
if footer is not None:
lines.append(footer)
- return '\n'.join(lines) + "\n"
+ return "\n".join(lines) + "\n"
def handle(name, cfg, cloud, log, _args):
- if 'rsyslog' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'rsyslog' key in configuration"), name)
+ if "rsyslog" not in cfg:
+ log.debug(
+ "Skipping module named %s, no 'rsyslog' key in configuration", name
+ )
return
mycfg = load_config(cfg)
@@ -408,16 +417,18 @@ def handle(name, cfg, cloud, log, _args):
mycfg[KEYNAME_REMOTES],
header="# begin remotes",
footer="# end remotes",
- ))
+ )
+ )
- if not mycfg['configs']:
+ if not mycfg["configs"]:
log.debug("Empty config rsyslog['configs'], nothing to do")
return
changes = apply_rsyslog_changes(
configs=mycfg[KEYNAME_CONFIGS],
def_fname=mycfg[KEYNAME_FILENAME],
- cfg_dir=mycfg[KEYNAME_DIR])
+ cfg_dir=mycfg[KEYNAME_DIR],
+ )
if not changes:
log.debug("restart of syslog not necessary, no changes made")
@@ -437,4 +448,5 @@ def handle(name, cfg, cloud, log, _args):
# the logging was setup to use it...
log.debug("%s configured %s files", name, changes)
+
# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
index 2f5e02cb..15cbaf1a 100644
--- a/cloudinit/config/cc_runcmd.py
+++ b/cloudinit/config/cc_runcmd.py
@@ -8,14 +8,13 @@
"""Runcmd: run arbitrary commands at rc.local with output to the console"""
-from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema
-from cloudinit.distros import ALL_DISTROS
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
import os
from textwrap import dedent
+from cloudinit import util
+from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
# The schema definition for each cloud-config module is a strict contract for
# describing supported configuration parameters for each cloud-config section.
@@ -26,10 +25,11 @@ from textwrap import dedent
distros = [ALL_DISTROS]
meta = {
- 'id': 'cc_runcmd',
- 'name': 'Runcmd',
- 'title': 'Run arbitrary commands',
- 'description': dedent("""\
+ "id": "cc_runcmd",
+ "name": "Runcmd",
+ "title": "Run arbitrary commands",
+ "description": dedent(
+ """\
Run arbitrary commands at a rc.local like level with output to the
console. Each item can be either a list or a string. If the item is a
list, it will be properly executed as if passed to ``execve()`` (with
@@ -46,35 +46,41 @@ meta = {
when writing files, do not use /tmp dir as it races with
systemd-tmpfiles-clean LP: #1707222. Use /run/somedir instead.
- """),
- 'distros': distros,
- 'examples': [dedent("""\
+ """
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
runcmd:
- [ ls, -l, / ]
- [ sh, -xc, "echo $(date) ': hello world!'" ]
- [ sh, -c, echo "=========hello world'=========" ]
- ls -l /root
- [ wget, "http://example.org", -O, /tmp/index.html ]
- """)],
- 'frequency': PER_INSTANCE,
+ """
+ )
+ ],
+ "frequency": PER_INSTANCE,
}
schema = {
- 'type': 'object',
- 'properties': {
- 'runcmd': {
- 'type': 'array',
- 'items': {
- 'oneOf': [
- {'type': 'array', 'items': {'type': 'string'}},
- {'type': 'string'},
- {'type': 'null'}]
+ "type": "object",
+ "properties": {
+ "runcmd": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {"type": "array", "items": {"type": "string"}},
+ {"type": "string"},
+ {"type": "null"},
+ ]
},
- 'additionalItems': False, # Reject items of non-string non-list
- 'additionalProperties': False,
- 'minItems': 1,
+ "additionalItems": False, # Reject items of non-string non-list
+ "additionalProperties": False,
+ "minItems": 1,
}
- }
+ },
}
__doc__ = get_meta_doc(meta, schema) # Supplement python help()
@@ -82,17 +88,19 @@ __doc__ = get_meta_doc(meta, schema) # Supplement python help()
def handle(name, cfg, cloud, log, _args):
if "runcmd" not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'runcmd' key in configuration"), name)
+ log.debug(
+ "Skipping module named %s, no 'runcmd' key in configuration", name
+ )
return
validate_cloudconfig_schema(cfg, schema)
- out_fn = os.path.join(cloud.get_ipath('scripts'), "runcmd")
+ out_fn = os.path.join(cloud.get_ipath("scripts"), "runcmd")
cmd = cfg["runcmd"]
try:
content = util.shellify(cmd)
util.write_file(out_fn, content, 0o700)
except Exception as e:
- raise type(e)('Failed to shellify {} into file {}'.format(cmd, out_fn))
+ raise type(e)("Failed to shellify {} into file {}".format(cmd, out_fn))
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py
index b61876aa..b2e5eefd 100644
--- a/cloudinit/config/cc_salt_minion.py
+++ b/cloudinit/config/cc_salt_minion.py
@@ -48,7 +48,6 @@ import os
from cloudinit import safeyaml, subp, util
from cloudinit.distros import rhel_util
-
# Note: see https://docs.saltstack.com/en/latest/topics/installation/
# Note: see https://docs.saltstack.com/en/latest/ref/configuration/
@@ -57,36 +56,40 @@ class SaltConstants(object):
"""
defines default distribution specific salt variables
"""
+
def __init__(self, cfg):
# constants tailored for FreeBSD
if util.is_FreeBSD():
- self.pkg_name = 'py36-salt'
- self.srv_name = 'salt_minion'
- self.conf_dir = '/usr/local/etc/salt'
+ self.pkg_name = "py36-salt"
+ self.srv_name = "salt_minion"
+ self.conf_dir = "/usr/local/etc/salt"
# constants for any other OS
else:
- self.pkg_name = 'salt-minion'
- self.srv_name = 'salt-minion'
- self.conf_dir = '/etc/salt'
+ self.pkg_name = "salt-minion"
+ self.srv_name = "salt-minion"
+ self.conf_dir = "/etc/salt"
# if there are constants given in cloud config use those
- self.pkg_name = util.get_cfg_option_str(cfg, 'pkg_name',
- self.pkg_name)
- self.conf_dir = util.get_cfg_option_str(cfg, 'config_dir',
- self.conf_dir)
- self.srv_name = util.get_cfg_option_str(cfg, 'service_name',
- self.srv_name)
+ self.pkg_name = util.get_cfg_option_str(cfg, "pkg_name", self.pkg_name)
+ self.conf_dir = util.get_cfg_option_str(
+ cfg, "config_dir", self.conf_dir
+ )
+ self.srv_name = util.get_cfg_option_str(
+ cfg, "service_name", self.srv_name
+ )
def handle(name, cfg, cloud, log, _args):
# If there isn't a salt key in the configuration don't do anything
- if 'salt_minion' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'salt_minion' key in configuration"), name)
+ if "salt_minion" not in cfg:
+ log.debug(
+ "Skipping module named %s, no 'salt_minion' key in configuration",
+ name,
+ )
return
- s_cfg = cfg['salt_minion']
+ s_cfg = cfg["salt_minion"]
const = SaltConstants(cfg=s_cfg)
# Start by installing the salt package ...
@@ -96,40 +99,42 @@ def handle(name, cfg, cloud, log, _args):
util.ensure_dir(const.conf_dir)
# ... and then update the salt configuration
- if 'conf' in s_cfg:
+ if "conf" in s_cfg:
# Add all sections from the conf object to minion config file
- minion_config = os.path.join(const.conf_dir, 'minion')
- minion_data = safeyaml.dumps(s_cfg.get('conf'))
+ minion_config = os.path.join(const.conf_dir, "minion")
+ minion_data = safeyaml.dumps(s_cfg.get("conf"))
util.write_file(minion_config, minion_data)
- if 'grains' in s_cfg:
+ if "grains" in s_cfg:
# add grains to /etc/salt/grains
- grains_config = os.path.join(const.conf_dir, 'grains')
- grains_data = safeyaml.dumps(s_cfg.get('grains'))
+ grains_config = os.path.join(const.conf_dir, "grains")
+ grains_data = safeyaml.dumps(s_cfg.get("grains"))
util.write_file(grains_config, grains_data)
# ... copy the key pair if specified
- if 'public_key' in s_cfg and 'private_key' in s_cfg:
+ if "public_key" in s_cfg and "private_key" in s_cfg:
pki_dir_default = os.path.join(const.conf_dir, "pki/minion")
if not os.path.isdir(pki_dir_default):
pki_dir_default = os.path.join(const.conf_dir, "pki")
- pki_dir = s_cfg.get('pki_dir', pki_dir_default)
+ pki_dir = s_cfg.get("pki_dir", pki_dir_default)
with util.umask(0o77):
util.ensure_dir(pki_dir)
- pub_name = os.path.join(pki_dir, 'minion.pub')
- pem_name = os.path.join(pki_dir, 'minion.pem')
- util.write_file(pub_name, s_cfg['public_key'])
- util.write_file(pem_name, s_cfg['private_key'])
+ pub_name = os.path.join(pki_dir, "minion.pub")
+ pem_name = os.path.join(pki_dir, "minion.pem")
+ util.write_file(pub_name, s_cfg["public_key"])
+ util.write_file(pem_name, s_cfg["private_key"])
# we need to have the salt minion service enabled in rc in order to be
# able to start the service. this does only apply on FreeBSD servers.
- if cloud.distro.osfamily == 'freebsd':
+ if cloud.distro.osfamily == "freebsd":
rhel_util.update_sysconfig_file(
- '/etc/rc.conf', {'salt_minion_enable': 'YES'})
+ "/etc/rc.conf", {"salt_minion_enable": "YES"}
+ )
# restart salt-minion. 'service' will start even if not started. if it
# was started, it needs to be restarted for config change.
- subp.subp(['service', const.srv_name, 'restart'], capture=False)
+ subp.subp(["service", const.srv_name, "restart"], capture=False)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_per_boot.py b/cloudinit/config/cc_scripts_per_boot.py
index 1bf3f508..b7bfb7aa 100644
--- a/cloudinit/config/cc_scripts_per_boot.py
+++ b/cloudinit/config/cc_scripts_per_boot.py
@@ -25,23 +25,27 @@ module does not accept any config keys.
import os
from cloudinit import subp
-
from cloudinit.settings import PER_ALWAYS
frequency = PER_ALWAYS
-SCRIPT_SUBDIR = 'per-boot'
+SCRIPT_SUBDIR = "per-boot"
def handle(name, _cfg, cloud, log, _args):
# Comes from the following:
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
- runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
+ runparts_path = os.path.join(cloud.get_cpath(), "scripts", SCRIPT_SUBDIR)
try:
subp.runparts(runparts_path)
except Exception:
- log.warning("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
+ log.warning(
+ "Failed to run module %s (%s in %s)",
+ name,
+ SCRIPT_SUBDIR,
+ runparts_path,
+ )
raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_per_instance.py b/cloudinit/config/cc_scripts_per_instance.py
index 5966fb9a..ef102b1c 100644
--- a/cloudinit/config/cc_scripts_per_instance.py
+++ b/cloudinit/config/cc_scripts_per_instance.py
@@ -28,23 +28,27 @@ the system. As a result per-instance scripts will run again.
import os
from cloudinit import subp
-
from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
-SCRIPT_SUBDIR = 'per-instance'
+SCRIPT_SUBDIR = "per-instance"
def handle(name, _cfg, cloud, log, _args):
# Comes from the following:
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
- runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
+ runparts_path = os.path.join(cloud.get_cpath(), "scripts", SCRIPT_SUBDIR)
try:
subp.runparts(runparts_path)
except Exception:
- log.warning("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
+ log.warning(
+ "Failed to run module %s (%s in %s)",
+ name,
+ SCRIPT_SUBDIR,
+ runparts_path,
+ )
raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_per_once.py b/cloudinit/config/cc_scripts_per_once.py
index bcca859e..bf4231e7 100644
--- a/cloudinit/config/cc_scripts_per_once.py
+++ b/cloudinit/config/cc_scripts_per_once.py
@@ -26,23 +26,27 @@ be run in alphabetical order. This module does not accept any config keys.
import os
from cloudinit import subp
-
from cloudinit.settings import PER_ONCE
frequency = PER_ONCE
-SCRIPT_SUBDIR = 'per-once'
+SCRIPT_SUBDIR = "per-once"
def handle(name, _cfg, cloud, log, _args):
# Comes from the following:
# https://forums.aws.amazon.com/thread.jspa?threadID=96918
- runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
+ runparts_path = os.path.join(cloud.get_cpath(), "scripts", SCRIPT_SUBDIR)
try:
subp.runparts(runparts_path)
except Exception:
- log.warning("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
+ log.warning(
+ "Failed to run module %s (%s in %s)",
+ name,
+ SCRIPT_SUBDIR,
+ runparts_path,
+ )
raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_user.py b/cloudinit/config/cc_scripts_user.py
index 215703ef..e0d6c560 100644
--- a/cloudinit/config/cc_scripts_user.py
+++ b/cloudinit/config/cc_scripts_user.py
@@ -28,12 +28,11 @@ This module does not accept any config keys.
import os
from cloudinit import subp
-
from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
-SCRIPT_SUBDIR = 'scripts'
+SCRIPT_SUBDIR = "scripts"
def handle(name, _cfg, cloud, log, _args):
@@ -44,8 +43,13 @@ def handle(name, _cfg, cloud, log, _args):
try:
subp.runparts(runparts_path)
except Exception:
- log.warning("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
+ log.warning(
+ "Failed to run module %s (%s in %s)",
+ name,
+ SCRIPT_SUBDIR,
+ runparts_path,
+ )
raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py
index e0a4bfff..1b30fa1b 100644
--- a/cloudinit/config/cc_scripts_vendor.py
+++ b/cloudinit/config/cc_scripts_vendor.py
@@ -28,29 +28,33 @@ entry under the ``vendor_data`` config key.
import os
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import subp, util
from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
-SCRIPT_SUBDIR = 'vendor'
+SCRIPT_SUBDIR = "vendor"
def handle(name, cfg, cloud, log, _args):
# This is written to by the vendor data handlers
# any vendor data shell scripts get placed in runparts_path
- runparts_path = os.path.join(cloud.get_ipath_cur(), 'scripts',
- SCRIPT_SUBDIR)
+ runparts_path = os.path.join(
+ cloud.get_ipath_cur(), "scripts", SCRIPT_SUBDIR
+ )
- prefix = util.get_cfg_by_path(cfg, ('vendor_data', 'prefix'), [])
+ prefix = util.get_cfg_by_path(cfg, ("vendor_data", "prefix"), [])
try:
subp.runparts(runparts_path, exe_prefix=prefix)
except Exception:
- log.warning("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
+ log.warning(
+ "Failed to run module %s (%s in %s)",
+ name,
+ SCRIPT_SUBDIR,
+ runparts_path,
+ )
raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
index 911789c7..67ba8ef5 100644
--- a/cloudinit/config/cc_seed_random.py
+++ b/cloudinit/config/cc_seed_random.py
@@ -68,9 +68,8 @@ import os
from io import BytesIO
from cloudinit import log as logging
+from cloudinit import subp, util
from cloudinit.settings import PER_INSTANCE
-from cloudinit import subp
-from cloudinit import util
frequency = PER_INSTANCE
LOG = logging.getLogger(__name__)
@@ -78,12 +77,12 @@ LOG = logging.getLogger(__name__)
def _decode(data, encoding=None):
if not data:
- return b''
- if not encoding or encoding.lower() in ['raw']:
+ return b""
+ if not encoding or encoding.lower() in ["raw"]:
return util.encode_text(data)
- elif encoding.lower() in ['base64', 'b64']:
+ elif encoding.lower() in ["base64", "b64"]:
return base64.b64decode(data)
- elif encoding.lower() in ['gzip', 'gz']:
+ elif encoding.lower() in ["gzip", "gz"]:
return util.decomp_gzip(data, quiet=False, decode=None)
else:
raise IOError("Unknown random_seed encoding: %s" % (encoding))
@@ -100,7 +99,8 @@ def handle_random_seed_command(command, required, env=None):
if not subp.which(cmd):
if required:
raise ValueError(
- "command '{cmd}' not found but required=true".format(cmd=cmd))
+ "command '{cmd}' not found but required=true".format(cmd=cmd)
+ )
else:
LOG.debug("command '%s' not found for seed_command", cmd)
return
@@ -108,34 +108,39 @@ def handle_random_seed_command(command, required, env=None):
def handle(name, cfg, cloud, log, _args):
- mycfg = cfg.get('random_seed', {})
- seed_path = mycfg.get('file', '/dev/urandom')
- seed_data = mycfg.get('data', b'')
+ mycfg = cfg.get("random_seed", {})
+ seed_path = mycfg.get("file", "/dev/urandom")
+ seed_data = mycfg.get("data", b"")
seed_buf = BytesIO()
if seed_data:
- seed_buf.write(_decode(seed_data, encoding=mycfg.get('encoding')))
+ seed_buf.write(_decode(seed_data, encoding=mycfg.get("encoding")))
# 'random_seed' is set up by Azure datasource, and comes already in
# openstack meta_data.json
metadata = cloud.datasource.metadata
- if metadata and 'random_seed' in metadata:
- seed_buf.write(util.encode_text(metadata['random_seed']))
+ if metadata and "random_seed" in metadata:
+ seed_buf.write(util.encode_text(metadata["random_seed"]))
seed_data = seed_buf.getvalue()
if len(seed_data):
- log.debug("%s: adding %s bytes of random seed entropy to %s", name,
- len(seed_data), seed_path)
+ log.debug(
+ "%s: adding %s bytes of random seed entropy to %s",
+ name,
+ len(seed_data),
+ seed_path,
+ )
util.append_file(seed_path, seed_data)
- command = mycfg.get('command', None)
- req = mycfg.get('command_required', False)
+ command = mycfg.get("command", None)
+ req = mycfg.get("command_required", False)
try:
env = os.environ.copy()
- env['RANDOM_SEED_FILE'] = seed_path
+ env["RANDOM_SEED_FILE"] = seed_path
handle_random_seed_command(command=command, required=req, env=env)
except ValueError as e:
log.warning("handling random command [%s] failed: %s", command, e)
raise e
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index a96bcc18..eb0ca328 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -48,9 +48,8 @@ based on initial hostname.
import os
-
-from cloudinit.atomic_helper import write_json
from cloudinit import util
+from cloudinit.atomic_helper import write_json
class SetHostnameError(Exception):
@@ -63,16 +62,19 @@ class SetHostnameError(Exception):
def handle(name, cfg, cloud, log, _args):
if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
- log.debug(("Configuration option 'preserve_hostname' is set,"
- " not setting the hostname in module %s"), name)
+ log.debug(
+ "Configuration option 'preserve_hostname' is set,"
+ " not setting the hostname in module %s",
+ name,
+ )
return
# Set prefer_fqdn_over_hostname value in distro
- hostname_fqdn = util.get_cfg_option_bool(cfg,
- "prefer_fqdn_over_hostname",
- None)
+ hostname_fqdn = util.get_cfg_option_bool(
+ cfg, "prefer_fqdn_over_hostname", None
+ )
if hostname_fqdn is not None:
- cloud.distro.set_option('prefer_fqdn_over_hostname', hostname_fqdn)
+ cloud.distro.set_option("prefer_fqdn_over_hostname", hostname_fqdn)
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
# Check for previous successful invocation of set-hostname
@@ -82,14 +84,15 @@ def handle(name, cfg, cloud, log, _args):
# previous-hostname file which only contains the base hostname.
# TODO consolidate previous-hostname and set-hostname artifact files and
# distro._read_hostname implementation so we only validate one artifact.
- prev_fn = os.path.join(cloud.get_cpath('data'), "set-hostname")
+ prev_fn = os.path.join(cloud.get_cpath("data"), "set-hostname")
prev_hostname = {}
if os.path.exists(prev_fn):
prev_hostname = util.load_json(util.load_file(prev_fn))
- hostname_changed = (hostname != prev_hostname.get('hostname') or
- fqdn != prev_hostname.get('fqdn'))
+ hostname_changed = hostname != prev_hostname.get(
+ "hostname"
+ ) or fqdn != prev_hostname.get("fqdn")
if not hostname_changed:
- log.debug('No hostname changes. Skipping set-hostname')
+ log.debug("No hostname changes. Skipping set-hostname")
return
log.debug("Setting the hostname to %s (%s)", fqdn, hostname)
try:
@@ -98,6 +101,7 @@ def handle(name, cfg, cloud, log, _args):
msg = "Failed to set the hostname to %s (%s)" % (fqdn, hostname)
util.logexc(log, msg)
raise SetHostnameError("%s: %s" % (msg, e)) from e
- write_json(prev_fn, {'hostname': hostname, 'fqdn': fqdn})
+ write_json(prev_fn, {"hostname": hostname, "fqdn": fqdn})
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index 3843aaf7..d8df8e23 100755
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -78,20 +78,17 @@ password.
"""
import re
+from string import ascii_letters, digits
-from cloudinit.distros import ug_util
from cloudinit import log as logging
+from cloudinit import subp, util
+from cloudinit.distros import ug_util
from cloudinit.ssh_util import update_ssh_config
-from cloudinit import subp
-from cloudinit import util
-
-from string import ascii_letters, digits
LOG = logging.getLogger(__name__)
# We are removing certain 'painful' letters/numbers
-PW_SET = (''.join([x for x in ascii_letters + digits
- if x not in 'loLOI01']))
+PW_SET = "".join([x for x in ascii_letters + digits if x not in "loLOI01"])
def handle_ssh_pwauth(pw_auth, distro):
@@ -105,12 +102,12 @@ def handle_ssh_pwauth(pw_auth, distro):
cfg_name = "PasswordAuthentication"
if util.is_true(pw_auth):
- cfg_val = 'yes'
+ cfg_val = "yes"
elif util.is_false(pw_auth):
- cfg_val = 'no'
+ cfg_val = "no"
else:
bmsg = "Leaving SSH config '%s' unchanged." % cfg_name
- if pw_auth is None or pw_auth.lower() == 'unchanged':
+ if pw_auth is None or pw_auth.lower() == "unchanged":
LOG.debug("%s ssh_pwauth=%s", bmsg, pw_auth)
else:
LOG.warning("%s Unrecognized value: ssh_pwauth=%s", bmsg, pw_auth)
@@ -121,7 +118,7 @@ def handle_ssh_pwauth(pw_auth, distro):
LOG.debug("No need to restart SSH service, %s not updated.", cfg_name)
return
- distro.manage_service('restart', distro.get_option('ssh_svcname', 'ssh'))
+ distro.manage_service("restart", distro.get_option("ssh_svcname", "ssh"))
LOG.debug("Restarted the SSH daemon.")
@@ -129,27 +126,27 @@ def handle(_name, cfg, cloud, log, args):
if args:
# if run from command line, and give args, wipe the chpasswd['list']
password = args[0]
- if 'chpasswd' in cfg and 'list' in cfg['chpasswd']:
- del cfg['chpasswd']['list']
+ if "chpasswd" in cfg and "list" in cfg["chpasswd"]:
+ del cfg["chpasswd"]["list"]
else:
password = util.get_cfg_option_str(cfg, "password", None)
expire = True
plist = None
- if 'chpasswd' in cfg:
- chfg = cfg['chpasswd']
- if 'list' in chfg and chfg['list']:
- if isinstance(chfg['list'], list):
+ if "chpasswd" in cfg:
+ chfg = cfg["chpasswd"]
+ if "list" in chfg and chfg["list"]:
+ if isinstance(chfg["list"], list):
log.debug("Handling input for chpasswd as list.")
- plist = util.get_cfg_option_list(chfg, 'list', plist)
+ plist = util.get_cfg_option_list(chfg, "list", plist)
else:
log.debug("Handling input for chpasswd as multiline string.")
- plist = util.get_cfg_option_str(chfg, 'list', plist)
+ plist = util.get_cfg_option_str(chfg, "list", plist)
if plist:
plist = plist.splitlines()
- expire = util.get_cfg_option_bool(chfg, 'expire', expire)
+ expire = util.get_cfg_option_bool(chfg, "expire", expire)
if not plist and password:
(users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
@@ -168,9 +165,9 @@ def handle(_name, cfg, cloud, log, args):
users = []
# N.B. This regex is included in the documentation (i.e. the module
# docstring), so any changes to it should be reflected there.
- prog = re.compile(r'\$(1|2a|2y|5|6)(\$.+){2}')
+ prog = re.compile(r"\$(1|2a|2y|5|6)(\$.+){2}")
for line in plist:
- u, p = line.split(':', 1)
+ u, p = line.split(":", 1)
if prog.match(p) is not None and ":" not in p:
hashed_plist_in.append(line)
hashed_users.append(u)
@@ -182,7 +179,7 @@ def handle(_name, cfg, cloud, log, args):
randlist.append("%s:%s" % (u, p))
plist_in.append("%s:%s" % (u, p))
users.append(u)
- ch_in = '\n'.join(plist_in) + '\n'
+ ch_in = "\n".join(plist_in) + "\n"
if users:
try:
log.debug("Changing password for %s:", users)
@@ -190,9 +187,10 @@ def handle(_name, cfg, cloud, log, args):
except Exception as e:
errors.append(e)
util.logexc(
- log, "Failed to set passwords with chpasswd for %s", users)
+ log, "Failed to set passwords with chpasswd for %s", users
+ )
- hashed_ch_in = '\n'.join(hashed_plist_in) + '\n'
+ hashed_ch_in = "\n".join(hashed_plist_in) + "\n"
if hashed_users:
try:
log.debug("Setting hashed password for %s:", hashed_users)
@@ -200,12 +198,16 @@ def handle(_name, cfg, cloud, log, args):
except Exception as e:
errors.append(e)
util.logexc(
- log, "Failed to set hashed passwords with chpasswd for %s",
- hashed_users)
+ log,
+ "Failed to set hashed passwords with chpasswd for %s",
+ hashed_users,
+ )
if len(randlist):
- blurb = ("Set the following 'random' passwords\n",
- '\n'.join(randlist))
+ blurb = (
+ "Set the following 'random' passwords\n",
+ "\n".join(randlist),
+ )
util.multi_log(
"%s\n%s\n" % blurb, stderr=False, fallback_to_stdout=False
)
@@ -222,7 +224,7 @@ def handle(_name, cfg, cloud, log, args):
if expired_users:
log.debug("Expired passwords for: %s users", expired_users)
- handle_ssh_pwauth(cfg.get('ssh_pwauth'), cloud.distro)
+ handle_ssh_pwauth(cfg.get("ssh_pwauth"), cloud.distro)
if len(errors):
log.debug("%s errors occured, re-raising the last one", len(errors))
@@ -239,7 +241,8 @@ def chpasswd(distro, plist_in, hashed=False):
u, p = pentry.split(":")
distro.set_passwd(u, p, hashed=hashed)
else:
- cmd = ['chpasswd'] + (['-e'] if hashed else [])
+ cmd = ["chpasswd"] + (["-e"] if hashed else [])
subp.subp(cmd, plist_in)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py
index 21f30b57..9c38046c 100644
--- a/cloudinit/config/cc_snap.py
+++ b/cloudinit/config/cc_snap.py
@@ -8,23 +8,22 @@ import sys
from textwrap import dedent
from cloudinit import log as logging
+from cloudinit import subp, util
from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema
from cloudinit.settings import PER_INSTANCE
from cloudinit.subp import prepend_base_command
-from cloudinit import subp
-from cloudinit import util
-
-distros = ['ubuntu']
+distros = ["ubuntu"]
frequency = PER_INSTANCE
LOG = logging.getLogger(__name__)
meta = {
- 'id': 'cc_snap',
- 'name': 'Snap',
- 'title': 'Install, configure and manage snapd and snap packages',
- 'description': dedent("""\
+ "id": "cc_snap",
+ "name": "Snap",
+ "title": "Install, configure and manage snapd and snap packages",
+ "description": dedent(
+ """\
This module provides a simple configuration namespace in cloud-init to
both setup snapd and install snaps.
@@ -55,9 +54,12 @@ meta = {
**Development only**: The ``squashfuse_in_container`` boolean can be
set true to install squashfuse package when in a container to enable
snap installs. Default is false.
- """),
- 'distros': distros,
- 'examples': [dedent("""\
+ """
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
snap:
assertions:
00: |
@@ -68,14 +70,20 @@ meta = {
00: snap create-user --sudoer --known <snap-user>@mydomain.com
01: snap install canonical-livepatch
02: canonical-livepatch enable <AUTH_TOKEN>
- """), dedent("""\
+ """
+ ),
+ dedent(
+ """\
# LXC-based containers require squashfuse before snaps can be installed
snap:
commands:
00: apt-get install squashfuse -y
11: snap install emoj
- """), dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Convenience: the snap command can be omitted when specifying commands
# as a list and 'snap' will automatically be prepended.
# The following commands are equivalent:
@@ -85,7 +93,10 @@ meta = {
01: ['snap', 'install', 'vlc']
02: snap install vlc
03: 'snap install vlc'
- """), dedent("""\
+ """
+ ),
+ dedent(
+ """\
# You can use a list of commands
snap:
commands:
@@ -93,57 +104,61 @@ meta = {
- ['snap', 'install', 'vlc']
- snap install vlc
- 'snap install vlc'
- """), dedent("""\
+ """
+ ),
+ dedent(
+ """\
# You can use a list of assertions
snap:
assertions:
- signed_assertion_blob_here
- |
signed_assertion_blob_here
- """)],
- 'frequency': PER_INSTANCE,
+ """
+ ),
+ ],
+ "frequency": PER_INSTANCE,
}
schema = {
- 'type': 'object',
- 'properties': {
- 'snap': {
- 'type': 'object',
- 'properties': {
- 'assertions': {
- 'type': ['object', 'array'], # Array of strings or dict
- 'items': {'type': 'string'},
- 'additionalItems': False, # Reject items non-string
- 'minItems': 1,
- 'minProperties': 1,
- 'uniqueItems': True,
- 'additionalProperties': {'type': 'string'},
+ "type": "object",
+ "properties": {
+ "snap": {
+ "type": "object",
+ "properties": {
+ "assertions": {
+ "type": ["object", "array"], # Array of strings or dict
+ "items": {"type": "string"},
+ "additionalItems": False, # Reject items non-string
+ "minItems": 1,
+ "minProperties": 1,
+ "uniqueItems": True,
+ "additionalProperties": {"type": "string"},
},
- 'commands': {
- 'type': ['object', 'array'], # Array of strings or dict
- 'items': {
- 'oneOf': [
- {'type': 'array', 'items': {'type': 'string'}},
- {'type': 'string'}]
+ "commands": {
+ "type": ["object", "array"], # Array of strings or dict
+ "items": {
+ "oneOf": [
+ {"type": "array", "items": {"type": "string"}},
+ {"type": "string"},
+ ]
},
- 'additionalItems': False, # Reject non-string & non-list
- 'minItems': 1,
- 'minProperties': 1,
- 'additionalProperties': {
- 'oneOf': [
- {'type': 'string'},
- {'type': 'array', 'items': {'type': 'string'}},
+ "additionalItems": False, # Reject non-string & non-list
+ "minItems": 1,
+ "minProperties": 1,
+ "additionalProperties": {
+ "oneOf": [
+ {"type": "string"},
+ {"type": "array", "items": {"type": "string"}},
],
},
},
- 'squashfuse_in_container': {
- 'type': 'boolean'
- }
+ "squashfuse_in_container": {"type": "boolean"},
},
- 'additionalProperties': False, # Reject keys not in schema
- 'minProperties': 1
+ "additionalProperties": False, # Reject keys not in schema
+ "minProperties": 1,
}
- }
+ },
}
__doc__ = get_meta_doc(meta, schema) # Supplement python help()
@@ -162,45 +177,49 @@ def add_assertions(assertions):
"""
if not assertions:
return
- LOG.debug('Importing user-provided snap assertions')
+ LOG.debug("Importing user-provided snap assertions")
if isinstance(assertions, dict):
assertions = assertions.values()
elif not isinstance(assertions, list):
raise TypeError(
- 'assertion parameter was not a list or dict: {assertions}'.format(
- assertions=assertions))
+ "assertion parameter was not a list or dict: {assertions}".format(
+ assertions=assertions
+ )
+ )
- snap_cmd = [SNAP_CMD, 'ack']
+ snap_cmd = [SNAP_CMD, "ack"]
combined = "\n".join(assertions)
for asrt in assertions:
- LOG.debug('Snap acking: %s', asrt.split('\n')[0:2])
+ LOG.debug("Snap acking: %s", asrt.split("\n")[0:2])
- util.write_file(ASSERTIONS_FILE, combined.encode('utf-8'))
+ util.write_file(ASSERTIONS_FILE, combined.encode("utf-8"))
subp.subp(snap_cmd + [ASSERTIONS_FILE], capture=True)
def run_commands(commands):
"""Run the provided commands provided in snap:commands configuration.
- Commands are run individually. Any errors are collected and reported
- after attempting all commands.
+ Commands are run individually. Any errors are collected and reported
+ after attempting all commands.
- @param commands: A list or dict containing commands to run. Keys of a
- dict will be used to order the commands provided as dict values.
- """
+ @param commands: A list or dict containing commands to run. Keys of a
+ dict will be used to order the commands provided as dict values.
+ """
if not commands:
return
- LOG.debug('Running user-provided snap commands')
+ LOG.debug("Running user-provided snap commands")
if isinstance(commands, dict):
# Sort commands based on dictionary key
commands = [v for _, v in sorted(commands.items())]
elif not isinstance(commands, list):
raise TypeError(
- 'commands parameter was not a list or dict: {commands}'.format(
- commands=commands))
+ "commands parameter was not a list or dict: {commands}".format(
+ commands=commands
+ )
+ )
- fixed_snap_commands = prepend_base_command('snap', commands)
+ fixed_snap_commands = prepend_base_command("snap", commands)
cmd_failures = []
for command in fixed_snap_commands:
@@ -210,8 +229,9 @@ def run_commands(commands):
except subp.ProcessExecutionError as e:
cmd_failures.append(str(e))
if cmd_failures:
- msg = 'Failures running snap commands:\n{cmd_failures}'.format(
- cmd_failures=cmd_failures)
+ msg = "Failures running snap commands:\n{cmd_failures}".format(
+ cmd_failures=cmd_failures
+ )
util.logexc(LOG, msg)
raise RuntimeError(msg)
@@ -227,23 +247,25 @@ def maybe_install_squashfuse(cloud):
util.logexc(LOG, "Package update failed")
raise
try:
- cloud.distro.install_packages(['squashfuse'])
+ cloud.distro.install_packages(["squashfuse"])
except Exception:
util.logexc(LOG, "Failed to install squashfuse")
raise
def handle(name, cfg, cloud, log, args):
- cfgin = cfg.get('snap', {})
+ cfgin = cfg.get("snap", {})
if not cfgin:
- LOG.debug(("Skipping module named %s,"
- " no 'snap' key in configuration"), name)
+ LOG.debug(
+ "Skipping module named %s, no 'snap' key in configuration", name
+ )
return
validate_cloudconfig_schema(cfg, schema)
- if util.is_true(cfgin.get('squashfuse_in_container', False)):
+ if util.is_true(cfgin.get("squashfuse_in_container", False)):
maybe_install_squashfuse(cloud)
- add_assertions(cfgin.get('assertions', []))
- run_commands(cfgin.get('commands', []))
+ add_assertions(cfgin.get("assertions", []))
+ run_commands(cfgin.get("commands", []))
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_spacewalk.py b/cloudinit/config/cc_spacewalk.py
index 95083607..3fa6c388 100644
--- a/cloudinit/config/cc_spacewalk.py
+++ b/cloudinit/config/cc_spacewalk.py
@@ -29,9 +29,8 @@ For more information about spacewalk see: https://fedorahosted.org/spacewalk/
from cloudinit import subp
-
-distros = ['redhat', 'fedora']
-required_packages = ['rhn-setup']
+distros = ["redhat", "fedora"]
+required_packages = ["rhn-setup"]
def_ca_cert_path = "/usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT"
@@ -41,7 +40,7 @@ def is_registered():
# assume we aren't registered; which is sorta ghetto...
already_registered = False
try:
- subp.subp(['rhn-profile-sync', '--verbose'], capture=False)
+ subp.subp(["rhn-profile-sync", "--verbose"], capture=False)
already_registered = True
except subp.ProcessExecutionError as e:
if e.exit_code != 1:
@@ -49,42 +48,58 @@ def is_registered():
return already_registered
-def do_register(server, profile_name,
- ca_cert_path=def_ca_cert_path,
- proxy=None, log=None,
- activation_key=None):
+def do_register(
+ server,
+ profile_name,
+ ca_cert_path=def_ca_cert_path,
+ proxy=None,
+ log=None,
+ activation_key=None,
+):
if log is not None:
- log.info("Registering using `rhnreg_ks` profile '%s'"
- " into server '%s'", profile_name, server)
- cmd = ['rhnreg_ks']
- cmd.extend(['--serverUrl', 'https://%s/XMLRPC' % server])
- cmd.extend(['--profilename', str(profile_name)])
+ log.info(
+ "Registering using `rhnreg_ks` profile '%s' into server '%s'",
+ profile_name,
+ server,
+ )
+ cmd = ["rhnreg_ks"]
+ cmd.extend(["--serverUrl", "https://%s/XMLRPC" % server])
+ cmd.extend(["--profilename", str(profile_name)])
if proxy:
cmd.extend(["--proxy", str(proxy)])
if ca_cert_path:
- cmd.extend(['--sslCACert', str(ca_cert_path)])
+ cmd.extend(["--sslCACert", str(ca_cert_path)])
if activation_key:
- cmd.extend(['--activationkey', str(activation_key)])
+ cmd.extend(["--activationkey", str(activation_key)])
subp.subp(cmd, capture=False)
def handle(name, cfg, cloud, log, _args):
- if 'spacewalk' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'spacewalk' key in configuration"), name)
+ if "spacewalk" not in cfg:
+ log.debug(
+ "Skipping module named %s, no 'spacewalk' key in configuration",
+ name,
+ )
return
- cfg = cfg['spacewalk']
- spacewalk_server = cfg.get('server')
+ cfg = cfg["spacewalk"]
+ spacewalk_server = cfg.get("server")
if spacewalk_server:
# Need to have this installed before further things will work.
cloud.distro.install_packages(required_packages)
if not is_registered():
- do_register(spacewalk_server,
- cloud.datasource.get_hostname(fqdn=True),
- proxy=cfg.get("proxy"), log=log,
- activation_key=cfg.get('activation_key'))
+ do_register(
+ spacewalk_server,
+ cloud.datasource.get_hostname(fqdn=True),
+ proxy=cfg.get("proxy"),
+ log=log,
+ activation_key=cfg.get("activation_key"),
+ )
else:
- log.debug("Skipping module named %s, 'spacewalk/server' key"
- " was not found in configuration", name)
+ log.debug(
+ "Skipping module named %s, 'spacewalk/server' key"
+ " was not found in configuration",
+ name,
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 1053ab67..a1f05176 100755
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -162,27 +162,26 @@ import glob
import os
import sys
+from cloudinit import ssh_util, subp, util
from cloudinit.distros import ug_util
-from cloudinit import ssh_util
-from cloudinit import subp
-from cloudinit import util
-
-GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519']
-KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key'
+GENERATE_KEY_NAMES = ["rsa", "dsa", "ecdsa", "ed25519"]
+KEY_FILE_TPL = "/etc/ssh/ssh_host_%s_key"
PUBLISH_HOST_KEYS = True
# Don't publish the dsa hostkey by default since OpenSSH recommends not using
# it.
-HOST_KEY_PUBLISH_BLACKLIST = ['dsa']
+HOST_KEY_PUBLISH_BLACKLIST = ["dsa"]
CONFIG_KEY_TO_FILE = {}
PRIV_TO_PUB = {}
for k in GENERATE_KEY_NAMES:
CONFIG_KEY_TO_FILE.update({"%s_private" % k: (KEY_FILE_TPL % k, 0o600)})
CONFIG_KEY_TO_FILE.update(
- {"%s_public" % k: (KEY_FILE_TPL % k + ".pub", 0o600)})
+ {"%s_public" % k: (KEY_FILE_TPL % k + ".pub", 0o600)}
+ )
CONFIG_KEY_TO_FILE.update(
- {"%s_certificate" % k: (KEY_FILE_TPL % k + "-cert.pub", 0o600)})
+ {"%s_certificate" % k: (KEY_FILE_TPL % k + "-cert.pub", 0o600)}
+ )
PRIV_TO_PUB["%s_private" % k] = "%s_public" % k
KEY_GEN_TPL = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"'
@@ -209,43 +208,48 @@ def handle(_name, cfg, cloud, log, _args):
tgt_perms = CONFIG_KEY_TO_FILE[key][1]
util.write_file(tgt_fn, val, tgt_perms)
# set server to present the most recently identified certificate
- if '_certificate' in key:
- cert_config = {'HostCertificate': tgt_fn}
+ if "_certificate" in key:
+ cert_config = {"HostCertificate": tgt_fn}
ssh_util.update_ssh_config(cert_config)
for (priv, pub) in PRIV_TO_PUB.items():
- if pub in cfg['ssh_keys'] or priv not in cfg['ssh_keys']:
+ if pub in cfg["ssh_keys"] or priv not in cfg["ssh_keys"]:
continue
pair = (CONFIG_KEY_TO_FILE[priv][0], CONFIG_KEY_TO_FILE[pub][0])
- cmd = ['sh', '-xc', KEY_GEN_TPL % pair]
+ cmd = ["sh", "-xc", KEY_GEN_TPL % pair]
try:
# TODO(harlowja): Is this guard needed?
with util.SeLinuxGuard("/etc/ssh", recursive=True):
subp.subp(cmd, capture=False)
log.debug("Generated a key for %s from %s", pair[0], pair[1])
except Exception:
- util.logexc(log, "Failed generated a key for %s from %s",
- pair[0], pair[1])
+ util.logexc(
+ log,
+ "Failed generated a key for %s from %s",
+ pair[0],
+ pair[1],
+ )
else:
# if not, generate them
- genkeys = util.get_cfg_option_list(cfg,
- 'ssh_genkeytypes',
- GENERATE_KEY_NAMES)
+ genkeys = util.get_cfg_option_list(
+ cfg, "ssh_genkeytypes", GENERATE_KEY_NAMES
+ )
lang_c = os.environ.copy()
- lang_c['LANG'] = 'C'
+ lang_c["LANG"] = "C"
for keytype in genkeys:
keyfile = KEY_FILE_TPL % (keytype)
if os.path.exists(keyfile):
continue
util.ensure_dir(os.path.dirname(keyfile))
- cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]
+ cmd = ["ssh-keygen", "-t", keytype, "-N", "", "-f", keyfile]
# TODO(harlowja): Is this guard needed?
with util.SeLinuxGuard("/etc/ssh", recursive=True):
try:
out, err = subp.subp(cmd, capture=True, env=lang_c)
- if not util.get_cfg_option_bool(cfg, 'ssh_quiet_keygen',
- False):
+ if not util.get_cfg_option_bool(
+ cfg, "ssh_quiet_keygen", False
+ ):
sys.stdout.write(util.decode_binary(out))
gid = util.get_group_id("ssh_keys")
@@ -256,19 +260,27 @@ def handle(_name, cfg, cloud, log, _args):
os.chmod(keyfile + ".pub", 0o644)
except subp.ProcessExecutionError as e:
err = util.decode_binary(e.stderr).lower()
- if (e.exit_code == 1 and
- err.lower().startswith("unknown key")):
+ if e.exit_code == 1 and err.lower().startswith(
+ "unknown key"
+ ):
log.debug("ssh-keygen: unknown key type '%s'", keytype)
else:
- util.logexc(log, "Failed generating key type %s to "
- "file %s", keytype, keyfile)
+ util.logexc(
+ log,
+ "Failed generating key type %s to file %s",
+ keytype,
+ keyfile,
+ )
if "ssh_publish_hostkeys" in cfg:
host_key_blacklist = util.get_cfg_option_list(
- cfg["ssh_publish_hostkeys"], "blacklist",
- HOST_KEY_PUBLISH_BLACKLIST)
+ cfg["ssh_publish_hostkeys"],
+ "blacklist",
+ HOST_KEY_PUBLISH_BLACKLIST,
+ )
publish_hostkeys = util.get_cfg_option_bool(
- cfg["ssh_publish_hostkeys"], "enabled", PUBLISH_HOST_KEYS)
+ cfg["ssh_publish_hostkeys"], "enabled", PUBLISH_HOST_KEYS
+ )
else:
host_key_blacklist = HOST_KEY_PUBLISH_BLACKLIST
publish_hostkeys = PUBLISH_HOST_KEYS
@@ -284,15 +296,18 @@ def handle(_name, cfg, cloud, log, _args):
(users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
(user, _user_config) = ug_util.extract_default(users)
disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
- disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
- ssh_util.DISABLE_USER_OPTS)
+ disable_root_opts = util.get_cfg_option_str(
+ cfg, "disable_root_opts", ssh_util.DISABLE_USER_OPTS
+ )
keys = []
- if util.get_cfg_option_bool(cfg, 'allow_public_ssh_keys', True):
+ if util.get_cfg_option_bool(cfg, "allow_public_ssh_keys", True):
keys = cloud.get_public_ssh_keys() or []
else:
- log.debug('Skipping import of publish SSH keys per '
- 'config setting: allow_public_ssh_keys=False')
+ log.debug(
+ "Skipping import of publish SSH keys per "
+ "config setting: allow_public_ssh_keys=False"
+ )
if "ssh_authorized_keys" in cfg:
cfgkeys = cfg["ssh_authorized_keys"]
@@ -312,12 +327,12 @@ def apply_credentials(keys, user, disable_root, disable_root_opts):
if disable_root:
if not user:
user = "NONE"
- key_prefix = disable_root_opts.replace('$USER', user)
- key_prefix = key_prefix.replace('$DISABLE_USER', 'root')
+ key_prefix = disable_root_opts.replace("$USER", user)
+ key_prefix = key_prefix.replace("$DISABLE_USER", "root")
else:
- key_prefix = ''
+ key_prefix = ""
- ssh_util.setup_user_keys(keys, 'root', options=key_prefix)
+ ssh_util.setup_user_keys(keys, "root", options=key_prefix)
def get_public_host_keys(blacklist=None):
@@ -327,18 +342,21 @@ def get_public_host_keys(blacklist=None):
@returns: List of keys, each formatted as a two-element tuple.
e.g. [('ssh-rsa', 'AAAAB3Nz...'), ('ssh-ed25519', 'AAAAC3Nx...')]
"""
- public_key_file_tmpl = '%s.pub' % (KEY_FILE_TPL,)
+ public_key_file_tmpl = "%s.pub" % (KEY_FILE_TPL,)
key_list = []
blacklist_files = []
if blacklist:
# Convert blacklist to filenames:
# 'dsa' -> '/etc/ssh/ssh_host_dsa_key.pub'
- blacklist_files = [public_key_file_tmpl % (key_type,)
- for key_type in blacklist]
+ blacklist_files = [
+ public_key_file_tmpl % (key_type,) for key_type in blacklist
+ ]
# Get list of public key files and filter out blacklisted files.
- file_list = [hostfile for hostfile
- in glob.glob(public_key_file_tmpl % ('*',))
- if hostfile not in blacklist_files]
+ file_list = [
+ hostfile
+ for hostfile in glob.glob(public_key_file_tmpl % ("*",))
+ if hostfile not in blacklist_files
+ ]
# Read host key files, retrieve first two fields as a tuple and
# append that tuple to key_list.
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
index 5323522c..020c3469 100755
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -28,23 +28,21 @@ the keys can be specified, but defaults to ``sha256``.
import base64
import hashlib
-from cloudinit.simpletable import SimpleTable
-
+from cloudinit import ssh_util, util
from cloudinit.distros import ug_util
-from cloudinit import ssh_util
-from cloudinit import util
+from cloudinit.simpletable import SimpleTable
def _split_hash(bin_hash):
split_up = []
for i in range(0, len(bin_hash), 2):
- split_up.append(bin_hash[i:i + 2])
+ split_up.append(bin_hash[i : i + 2])
return split_up
-def _gen_fingerprint(b64_text, hash_meth='sha256'):
+def _gen_fingerprint(b64_text, hash_meth="sha256"):
if not b64_text:
- return ''
+ return ""
# TBD(harlowja): Maybe we should feed this into 'ssh -lf'?
try:
hasher = hashlib.new(hash_meth)
@@ -54,58 +52,75 @@ def _gen_fingerprint(b64_text, hash_meth='sha256'):
# Raised when b64 not really b64...
# or when the hash type is not really
# a known/supported hash type...
- return '?'
+ return "?"
def _is_printable_key(entry):
if any([entry.keytype, entry.base64, entry.comment, entry.options]):
- if (entry.keytype and entry.keytype.lower().strip()
- in ssh_util.VALID_KEY_TYPES):
+ if (
+ entry.keytype
+ and entry.keytype.lower().strip() in ssh_util.VALID_KEY_TYPES
+ ):
return True
return False
-def _pprint_key_entries(user, key_fn, key_entries, hash_meth='sha256',
- prefix='ci-info: '):
+def _pprint_key_entries(
+ user, key_fn, key_entries, hash_meth="sha256", prefix="ci-info: "
+):
if not key_entries:
- message = ("%sno authorized SSH keys fingerprints found for user %s.\n"
- % (prefix, user))
+ message = (
+ "%sno authorized SSH keys fingerprints found for user %s.\n"
+ % (prefix, user)
+ )
util.multi_log(message, console=True, stderr=False)
return
- tbl_fields = ['Keytype', 'Fingerprint (%s)' % (hash_meth), 'Options',
- 'Comment']
+ tbl_fields = [
+ "Keytype",
+ "Fingerprint (%s)" % (hash_meth),
+ "Options",
+ "Comment",
+ ]
tbl = SimpleTable(tbl_fields)
for entry in key_entries:
if _is_printable_key(entry):
- row = [entry.keytype or '-',
- _gen_fingerprint(entry.base64, hash_meth) or '-',
- entry.options or '-',
- entry.comment or '-']
+ row = [
+ entry.keytype or "-",
+ _gen_fingerprint(entry.base64, hash_meth) or "-",
+ entry.options or "-",
+ entry.comment or "-",
+ ]
tbl.add_row(row)
authtbl_s = tbl.get_string()
authtbl_lines = authtbl_s.splitlines()
max_len = len(max(authtbl_lines, key=len))
lines = [
- util.center("Authorized keys from %s for user %s" %
- (key_fn, user), "+", max_len),
+ util.center(
+ "Authorized keys from %s for user %s" % (key_fn, user),
+ "+",
+ max_len,
+ ),
]
lines.extend(authtbl_lines)
for line in lines:
- util.multi_log(text="%s%s\n" % (prefix, line),
- stderr=False, console=True)
+ util.multi_log(
+ text="%s%s\n" % (prefix, line), stderr=False, console=True
+ )
def handle(name, cfg, cloud, log, _args):
- if util.is_true(cfg.get('no_ssh_fingerprints', False)):
- log.debug(("Skipping module named %s, "
- "logging of SSH fingerprints disabled"), name)
+ if util.is_true(cfg.get("no_ssh_fingerprints", False)):
+ log.debug(
+ "Skipping module named %s, logging of SSH fingerprints disabled",
+ name,
+ )
return
hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "sha256")
(users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
for (user_name, _cfg) in users.items():
(key_fn, key_entries) = ssh_util.extract_authorized_keys(user_name)
- _pprint_key_entries(user_name, key_fn,
- key_entries, hash_meth)
+ _pprint_key_entries(user_name, key_fn, key_entries, hash_meth)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
index 856e5a9e..a9575c59 100755
--- a/cloudinit/config/cc_ssh_import_id.py
+++ b/cloudinit/config/cc_ssh_import_id.py
@@ -30,13 +30,13 @@ either ``lp:`` for launchpad or ``gh:`` for github to the username.
- lp:user
"""
-from cloudinit.distros import ug_util
-from cloudinit import subp
-from cloudinit import util
import pwd
+from cloudinit import subp, util
+from cloudinit.distros import ug_util
+
# https://launchpad.net/ssh-import-id
-distros = ['ubuntu', 'debian']
+distros = ["ubuntu", "debian"]
def handle(_name, cfg, cloud, log, args):
@@ -56,11 +56,11 @@ def handle(_name, cfg, cloud, log, args):
elist = []
for (user, user_cfg) in users.items():
import_ids = []
- if user_cfg['default']:
+ if user_cfg["default"]:
import_ids = util.get_cfg_option_list(cfg, "ssh_import_id", [])
else:
try:
- import_ids = user_cfg['ssh_import_id']
+ import_ids = user_cfg["ssh_import_id"]
except Exception:
log.debug("User %s is not configured for ssh_import_id", user)
continue
@@ -69,8 +69,9 @@ def handle(_name, cfg, cloud, log, args):
import_ids = util.uniq_merge(import_ids)
import_ids = [str(i) for i in import_ids]
except Exception:
- log.debug("User %s is not correctly configured for ssh_import_id",
- user)
+ log.debug(
+ "User %s is not correctly configured for ssh_import_id", user
+ )
continue
if not len(import_ids):
@@ -79,8 +80,9 @@ def handle(_name, cfg, cloud, log, args):
try:
import_ssh_ids(import_ids, user, log)
except Exception as exc:
- util.logexc(log, "ssh-import-id failed for: %s %s", user,
- import_ids)
+ util.logexc(
+ log, "ssh-import-id failed for: %s %s", user, import_ids
+ )
elist.append(exc)
if len(elist):
@@ -107,4 +109,5 @@ def import_ssh_ids(ids, user, log):
util.logexc(log, "Failed to run command to import %s SSH ids", user)
raise exc
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_timezone.py b/cloudinit/config/cc_timezone.py
index a9de8fac..24e6099e 100644
--- a/cloudinit/config/cc_timezone.py
+++ b/cloudinit/config/cc_timezone.py
@@ -27,7 +27,6 @@ the timezone from cloud config.
"""
from cloudinit import util
-
from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
@@ -46,4 +45,5 @@ def handle(name, cfg, cloud, log, args):
# Let the distro handle settings its timezone
cloud.distro.set_timezone(timezone)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py
index 831a92a2..413fd3c1 100644
--- a/cloudinit/config/cc_ubuntu_advantage.py
+++ b/cloudinit/config/cc_ubuntu_advantage.py
@@ -4,22 +4,21 @@
from textwrap import dedent
-from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema
from cloudinit import log as logging
+from cloudinit import subp, util
+from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema
from cloudinit.settings import PER_INSTANCE
-from cloudinit import subp
-from cloudinit import util
-
-UA_URL = 'https://ubuntu.com/advantage'
+UA_URL = "https://ubuntu.com/advantage"
-distros = ['ubuntu']
+distros = ["ubuntu"]
meta = {
- 'id': 'cc_ubuntu_advantage',
- 'name': 'Ubuntu Advantage',
- 'title': 'Configure Ubuntu Advantage support services',
- 'description': dedent("""\
+ "id": "cc_ubuntu_advantage",
+ "name": "Ubuntu Advantage",
+ "title": "Configure Ubuntu Advantage support services",
+ "description": dedent(
+ """\
Attach machine to an existing Ubuntu Advantage support contract and
enable or disable support services such as Livepatch, ESM,
FIPS and FIPS Updates. When attaching a machine to Ubuntu Advantage,
@@ -31,14 +30,21 @@ meta = {
a reboot to ensure the machine is running the FIPS-compliant kernel.
See :ref:`Power State Change` for information on how to configure
cloud-init to perform this reboot.
- """),
- 'distros': distros,
- 'examples': [dedent("""\
+ """
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
# Attach the machine to an Ubuntu Advantage support contract with a
# UA contract token obtained from %s.
ubuntu_advantage:
token: <ua_contract_token>
- """ % UA_URL), dedent("""\
+ """
+ % UA_URL
+ ),
+ dedent(
+ """\
# Attach the machine to an Ubuntu Advantage support contract enabling
# only fips and esm services. Services will only be enabled if
# the environment supports said service. Otherwise warnings will
@@ -48,7 +54,10 @@ meta = {
enable:
- fips
- esm
- """), dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Attach the machine to an Ubuntu Advantage support contract and enable
# the FIPS service. Perform a reboot once cloud-init has
# completed.
@@ -58,30 +67,32 @@ meta = {
token: <ua_contract_token>
enable:
- fips
- """)],
- 'frequency': PER_INSTANCE,
+ """
+ ),
+ ],
+ "frequency": PER_INSTANCE,
}
schema = {
- 'type': 'object',
- 'properties': {
- 'ubuntu_advantage': {
- 'type': 'object',
- 'properties': {
- 'enable': {
- 'type': 'array',
- 'items': {'type': 'string'},
+ "type": "object",
+ "properties": {
+ "ubuntu_advantage": {
+ "type": "object",
+ "properties": {
+ "enable": {
+ "type": "array",
+ "items": {"type": "string"},
+ },
+ "token": {
+ "type": "string",
+ "description": "A contract token obtained from %s."
+ % UA_URL,
},
- 'token': {
- 'type': 'string',
- 'description': (
- 'A contract token obtained from %s.' % UA_URL)
- }
},
- 'required': ['token'],
- 'additionalProperties': False
+ "required": ["token"],
+ "additionalProperties": False,
}
- }
+ },
}
__doc__ = get_meta_doc(meta, schema) # Supplement python help()
@@ -93,52 +104,61 @@ def configure_ua(token=None, enable=None):
"""Call ua commandline client to attach or enable services."""
error = None
if not token:
- error = ('ubuntu_advantage: token must be provided')
+ error = "ubuntu_advantage: token must be provided"
LOG.error(error)
raise RuntimeError(error)
if enable is None:
enable = []
elif isinstance(enable, str):
- LOG.warning('ubuntu_advantage: enable should be a list, not'
- ' a string; treating as a single enable')
+ LOG.warning(
+ "ubuntu_advantage: enable should be a list, not"
+ " a string; treating as a single enable"
+ )
enable = [enable]
elif not isinstance(enable, list):
- LOG.warning('ubuntu_advantage: enable should be a list, not'
- ' a %s; skipping enabling services',
- type(enable).__name__)
+ LOG.warning(
+ "ubuntu_advantage: enable should be a list, not"
+ " a %s; skipping enabling services",
+ type(enable).__name__,
+ )
enable = []
- attach_cmd = ['ua', 'attach', token]
- LOG.debug('Attaching to Ubuntu Advantage. %s', ' '.join(attach_cmd))
+ attach_cmd = ["ua", "attach", token]
+ LOG.debug("Attaching to Ubuntu Advantage. %s", " ".join(attach_cmd))
try:
subp.subp(attach_cmd)
except subp.ProcessExecutionError as e:
- msg = 'Failure attaching Ubuntu Advantage:\n{error}'.format(
- error=str(e))
+ msg = "Failure attaching Ubuntu Advantage:\n{error}".format(
+ error=str(e)
+ )
util.logexc(LOG, msg)
raise RuntimeError(msg) from e
enable_errors = []
for service in enable:
try:
- cmd = ['ua', 'enable', service]
+ cmd = ["ua", "enable", service]
subp.subp(cmd, capture=True)
except subp.ProcessExecutionError as e:
enable_errors.append((service, e))
if enable_errors:
for service, error in enable_errors:
msg = 'Failure enabling "{service}":\n{error}'.format(
- service=service, error=str(error))
+ service=service, error=str(error)
+ )
util.logexc(LOG, msg)
raise RuntimeError(
- 'Failure enabling Ubuntu Advantage service(s): {}'.format(
- ', '.join('"{}"'.format(service)
- for service, _ in enable_errors)))
+ "Failure enabling Ubuntu Advantage service(s): {}".format(
+ ", ".join(
+ '"{}"'.format(service) for service, _ in enable_errors
+ )
+ )
+ )
def maybe_install_ua_tools(cloud):
"""Install ubuntu-advantage-tools if not present."""
- if subp.which('ua'):
+ if subp.which("ua"):
return
try:
cloud.distro.update_package_sources()
@@ -146,7 +166,7 @@ def maybe_install_ua_tools(cloud):
util.logexc(LOG, "Package update failed")
raise
try:
- cloud.distro.install_packages(['ubuntu-advantage-tools'])
+ cloud.distro.install_packages(["ubuntu-advantage-tools"])
except Exception:
util.logexc(LOG, "Failed to install ubuntu-advantage-tools")
raise
@@ -154,27 +174,35 @@ def maybe_install_ua_tools(cloud):
def handle(name, cfg, cloud, log, args):
ua_section = None
- if 'ubuntu-advantage' in cfg:
- LOG.warning('Deprecated configuration key "ubuntu-advantage" provided.'
- ' Expected underscore delimited "ubuntu_advantage"; will'
- ' attempt to continue.')
- ua_section = cfg['ubuntu-advantage']
- if 'ubuntu_advantage' in cfg:
- ua_section = cfg['ubuntu_advantage']
+ if "ubuntu-advantage" in cfg:
+ LOG.warning(
+ 'Deprecated configuration key "ubuntu-advantage" provided.'
+ ' Expected underscore delimited "ubuntu_advantage"; will'
+ " attempt to continue."
+ )
+ ua_section = cfg["ubuntu-advantage"]
+ if "ubuntu_advantage" in cfg:
+ ua_section = cfg["ubuntu_advantage"]
if ua_section is None:
- LOG.debug("Skipping module named %s,"
- " no 'ubuntu_advantage' configuration found", name)
+ LOG.debug(
+ "Skipping module named %s,"
+ " no 'ubuntu_advantage' configuration found",
+ name,
+ )
return
validate_cloudconfig_schema(cfg, schema)
- if 'commands' in ua_section:
+ if "commands" in ua_section:
msg = (
'Deprecated configuration "ubuntu-advantage: commands" provided.'
- ' Expected "token"')
+ ' Expected "token"'
+ )
LOG.error(msg)
raise RuntimeError(msg)
maybe_install_ua_tools(cloud)
- configure_ua(token=ua_section.get('token'),
- enable=ua_section.get('enable'))
+ configure_ua(
+ token=ua_section.get("token"), enable=ua_section.get("enable")
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py
index 7f617efe..6c8494c8 100644
--- a/cloudinit/config/cc_ubuntu_drivers.py
+++ b/cloudinit/config/cc_ubuntu_drivers.py
@@ -5,57 +5,62 @@
import os
from textwrap import dedent
-from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema
from cloudinit import log as logging
+from cloudinit import subp, temp_utils, type_utils, util
+from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema
from cloudinit.settings import PER_INSTANCE
-from cloudinit import subp
-from cloudinit import temp_utils
-from cloudinit import type_utils
-from cloudinit import util
LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
-distros = ['ubuntu']
+distros = ["ubuntu"]
meta = {
- 'id': 'cc_ubuntu_drivers',
- 'name': 'Ubuntu Drivers',
- 'title': 'Interact with third party drivers in Ubuntu.',
- 'description': dedent("""\
+ "id": "cc_ubuntu_drivers",
+ "name": "Ubuntu Drivers",
+ "title": "Interact with third party drivers in Ubuntu.",
+ "description": dedent(
+ """\
This module interacts with the 'ubuntu-drivers' command to install
- third party driver packages."""),
- 'distros': distros,
- 'examples': [dedent("""\
+ third party driver packages."""
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
drivers:
nvidia:
license-accepted: true
- """)],
- 'frequency': frequency,
+ """
+ )
+ ],
+ "frequency": frequency,
}
schema = {
- 'type': 'object',
- 'properties': {
- 'drivers': {
- 'type': 'object',
- 'additionalProperties': False,
- 'properties': {
- 'nvidia': {
- 'type': 'object',
- 'additionalProperties': False,
- 'required': ['license-accepted'],
- 'properties': {
- 'license-accepted': {
- 'type': 'boolean',
- 'description': ("Do you accept the NVIDIA driver"
- " license?"),
+ "type": "object",
+ "properties": {
+ "drivers": {
+ "type": "object",
+ "additionalProperties": False,
+ "properties": {
+ "nvidia": {
+ "type": "object",
+ "additionalProperties": False,
+ "required": ["license-accepted"],
+ "properties": {
+ "license-accepted": {
+ "type": "boolean",
+ "description": (
+ "Do you accept the NVIDIA driver license?"
+ ),
},
- 'version': {
- 'type': 'string',
- 'description': (
- 'The version of the driver to install (e.g.'
+ "version": {
+ "type": "string",
+ "description": (
+ "The version of the driver to install (e.g."
' "390", "410"). Defaults to the latest'
- ' version.'),
+ " version."
+ ),
},
},
},
@@ -64,7 +69,8 @@ schema = {
},
}
OLD_UBUNTU_DRIVERS_STDERR_NEEDLE = (
- "ubuntu-drivers: error: argument <command>: invalid choice: 'install'")
+ "ubuntu-drivers: error: argument <command>: invalid choice: 'install'"
+)
__doc__ = get_meta_doc(meta, schema) # Supplement python help()
@@ -99,10 +105,11 @@ db_x_loadtemplatefile "$1" cloud-init
def install_drivers(cfg, pkg_install_func):
if not isinstance(cfg, dict):
raise TypeError(
- "'drivers' config expected dict, found '%s': %s" %
- (type_utils.obj_name(cfg), cfg))
+ "'drivers' config expected dict, found '%s': %s"
+ % (type_utils.obj_name(cfg), cfg)
+ )
- cfgpath = 'nvidia/license-accepted'
+ cfgpath = "nvidia/license-accepted"
# Call translate_bool to ensure that we treat string values like "yes" as
# acceptance and _don't_ treat string values like "nah" as acceptance
# because they're True-ish
@@ -111,46 +118,56 @@ def install_drivers(cfg, pkg_install_func):
LOG.debug("Not installing NVIDIA drivers. %s=%s", cfgpath, nv_acc)
return
- if not subp.which('ubuntu-drivers'):
- LOG.debug("'ubuntu-drivers' command not available. "
- "Installing ubuntu-drivers-common")
- pkg_install_func(['ubuntu-drivers-common'])
+ if not subp.which("ubuntu-drivers"):
+ LOG.debug(
+ "'ubuntu-drivers' command not available. "
+ "Installing ubuntu-drivers-common"
+ )
+ pkg_install_func(["ubuntu-drivers-common"])
- driver_arg = 'nvidia'
- version_cfg = util.get_cfg_by_path(cfg, 'nvidia/version')
+ driver_arg = "nvidia"
+ version_cfg = util.get_cfg_by_path(cfg, "nvidia/version")
if version_cfg:
- driver_arg += ':{}'.format(version_cfg)
+ driver_arg += ":{}".format(version_cfg)
- LOG.debug("Installing and activating NVIDIA drivers (%s=%s, version=%s)",
- cfgpath, nv_acc, version_cfg if version_cfg else 'latest')
+ LOG.debug(
+ "Installing and activating NVIDIA drivers (%s=%s, version=%s)",
+ cfgpath,
+ nv_acc,
+ version_cfg if version_cfg else "latest",
+ )
# Register and set debconf selection linux/nvidia/latelink = true
tdir = temp_utils.mkdtemp(needs_exe=True)
- debconf_file = os.path.join(tdir, 'nvidia.template')
- debconf_script = os.path.join(tdir, 'nvidia-debconf.sh')
+ debconf_file = os.path.join(tdir, "nvidia.template")
+ debconf_script = os.path.join(tdir, "nvidia-debconf.sh")
try:
util.write_file(debconf_file, NVIDIA_DEBCONF_CONTENT)
util.write_file(
debconf_script,
util.encode_text(NVIDIA_DRIVER_LATELINK_DEBCONF_SCRIPT),
- mode=0o755)
+ mode=0o755,
+ )
subp.subp([debconf_script, debconf_file])
except Exception as e:
util.logexc(
- LOG, "Failed to register NVIDIA debconf template: %s", str(e))
+ LOG, "Failed to register NVIDIA debconf template: %s", str(e)
+ )
raise
finally:
if os.path.isdir(tdir):
util.del_dir(tdir)
try:
- subp.subp(['ubuntu-drivers', 'install', '--gpgpu', driver_arg])
+ subp.subp(["ubuntu-drivers", "install", "--gpgpu", driver_arg])
except subp.ProcessExecutionError as exc:
if OLD_UBUNTU_DRIVERS_STDERR_NEEDLE in exc.stderr:
- LOG.warning('the available version of ubuntu-drivers is'
- ' too old to perform requested driver installation')
- elif 'No drivers found for installation.' in exc.stdout:
- LOG.warning('ubuntu-drivers found no drivers for installation')
+ LOG.warning(
+ "the available version of ubuntu-drivers is"
+ " too old to perform requested driver installation"
+ )
+ elif "No drivers found for installation." in exc.stdout:
+ LOG.warning("ubuntu-drivers found no drivers for installation")
raise
@@ -160,4 +177,4 @@ def handle(name, cfg, cloud, log, _args):
return
validate_cloudconfig_schema(cfg, schema)
- install_drivers(cfg['drivers'], cloud.distro.install_packages)
+ install_drivers(cfg["drivers"], cloud.distro.install_packages)
diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py
index 32368bbb..f0aa9b0f 100644
--- a/cloudinit/config/cc_update_etc_hosts.py
+++ b/cloudinit/config/cc_update_etc_hosts.py
@@ -50,9 +50,7 @@ ping ``127.0.0.1`` or ``127.0.1.1`` or other ip).
hostname: <fqdn/hostname>
"""
-from cloudinit import templater
-from cloudinit import util
-
+from cloudinit import templater, util
from cloudinit.settings import PER_ALWAYS
frequency = PER_ALWAYS
@@ -63,35 +61,45 @@ def handle(name, cfg, cloud, log, _args):
hosts_fn = cloud.distro.hosts_fn
- if util.translate_bool(manage_hosts, addons=['template']):
+ if util.translate_bool(manage_hosts, addons=["template"]):
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
if not hostname:
- log.warning(("Option 'manage_etc_hosts' was set,"
- " but no hostname was found"))
+ log.warning(
+ "Option 'manage_etc_hosts' was set, but no hostname was found"
+ )
return
# Render from a template file
- tpl_fn_name = cloud.get_template_filename("hosts.%s" %
- (cloud.distro.osfamily))
+ tpl_fn_name = cloud.get_template_filename(
+ "hosts.%s" % (cloud.distro.osfamily)
+ )
if not tpl_fn_name:
- raise RuntimeError(("No hosts template could be"
- " found for distro %s") %
- (cloud.distro.osfamily))
+ raise RuntimeError(
+ "No hosts template could be found for distro %s"
+ % (cloud.distro.osfamily)
+ )
- templater.render_to_file(tpl_fn_name, hosts_fn,
- {'hostname': hostname, 'fqdn': fqdn})
+ templater.render_to_file(
+ tpl_fn_name, hosts_fn, {"hostname": hostname, "fqdn": fqdn}
+ )
elif manage_hosts == "localhost":
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
if not hostname:
- log.warning(("Option 'manage_etc_hosts' was set,"
- " but no hostname was found"))
+ log.warning(
+ "Option 'manage_etc_hosts' was set, but no hostname was found"
+ )
return
log.debug("Managing localhost in %s", hosts_fn)
cloud.distro.update_etc_hosts(hostname, fqdn)
else:
- log.debug(("Configuration option 'manage_etc_hosts' is not set,"
- " not managing %s in module %s"), hosts_fn, name)
+ log.debug(
+ "Configuration option 'manage_etc_hosts' is not set,"
+ " not managing %s in module %s",
+ hosts_fn,
+ name,
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py
index 370de73a..09f6f6da 100644
--- a/cloudinit/config/cc_update_hostname.py
+++ b/cloudinit/config/cc_update_hostname.py
@@ -34,33 +34,38 @@ is set, then the hostname will not be altered.
import os
-from cloudinit.settings import PER_ALWAYS
from cloudinit import util
+from cloudinit.settings import PER_ALWAYS
frequency = PER_ALWAYS
def handle(name, cfg, cloud, log, _args):
if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
- log.debug(("Configuration option 'preserve_hostname' is set,"
- " not updating the hostname in module %s"), name)
+ log.debug(
+ "Configuration option 'preserve_hostname' is set,"
+ " not updating the hostname in module %s",
+ name,
+ )
return
# Set prefer_fqdn_over_hostname value in distro
- hostname_fqdn = util.get_cfg_option_bool(cfg,
- "prefer_fqdn_over_hostname",
- None)
+ hostname_fqdn = util.get_cfg_option_bool(
+ cfg, "prefer_fqdn_over_hostname", None
+ )
if hostname_fqdn is not None:
- cloud.distro.set_option('prefer_fqdn_over_hostname', hostname_fqdn)
+ cloud.distro.set_option("prefer_fqdn_over_hostname", hostname_fqdn)
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
try:
- prev_fn = os.path.join(cloud.get_cpath('data'), "previous-hostname")
+ prev_fn = os.path.join(cloud.get_cpath("data"), "previous-hostname")
log.debug("Updating hostname to %s (%s)", fqdn, hostname)
cloud.distro.update_hostname(hostname, fqdn, prev_fn)
except Exception:
- util.logexc(log, "Failed to update the hostname to %s (%s)", fqdn,
- hostname)
+ util.logexc(
+ log, "Failed to update the hostname to %s (%s)", fqdn, hostname
+ )
raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
index ac4a4410..ef77a799 100644
--- a/cloudinit/config/cc_users_groups.py
+++ b/cloudinit/config/cc_users_groups.py
@@ -127,12 +127,12 @@ config keys for an entry in ``users`` are as follows:
uid: <user id>
"""
+from cloudinit import log as logging
+
# Ensure this is aliased to a name not 'distros'
# since the module attribute 'distros'
# is a list of distros that are supported, not a sub-module
from cloudinit.distros import ug_util
-from cloudinit import log as logging
-
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
@@ -149,26 +149,31 @@ def handle(name, cfg, cloud, _log, _args):
for (user, config) in users.items():
ssh_redirect_user = config.pop("ssh_redirect_user", False)
if ssh_redirect_user:
- if 'ssh_authorized_keys' in config or 'ssh_import_id' in config:
+ if "ssh_authorized_keys" in config or "ssh_import_id" in config:
raise ValueError(
- 'Not creating user %s. ssh_redirect_user cannot be'
- ' provided with ssh_import_id or ssh_authorized_keys' %
- user)
- if ssh_redirect_user not in (True, 'default'):
+ "Not creating user %s. ssh_redirect_user cannot be"
+ " provided with ssh_import_id or ssh_authorized_keys"
+ % user
+ )
+ if ssh_redirect_user not in (True, "default"):
raise ValueError(
- 'Not creating user %s. Invalid value of'
- ' ssh_redirect_user: %s. Expected values: true, default'
- ' or false.' % (user, ssh_redirect_user))
+ "Not creating user %s. Invalid value of"
+ " ssh_redirect_user: %s. Expected values: true, default"
+ " or false." % (user, ssh_redirect_user)
+ )
if default_user is None:
LOG.warning(
- 'Ignoring ssh_redirect_user: %s for %s.'
- ' No default_user defined.'
- ' Perhaps missing cloud configuration users: '
- ' [default, ..].',
- ssh_redirect_user, user)
+ "Ignoring ssh_redirect_user: %s for %s."
+ " No default_user defined."
+ " Perhaps missing cloud configuration users: "
+ " [default, ..].",
+ ssh_redirect_user,
+ user,
+ )
else:
- config['ssh_redirect_user'] = default_user
- config['cloud_public_ssh_keys'] = cloud_keys
+ config["ssh_redirect_user"] = default_user
+ config["cloud_public_ssh_keys"] = cloud_keys
cloud.distro.create_user(user, **config)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index 55f8c684..2c580328 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -10,22 +10,21 @@ import base64
import os
from textwrap import dedent
-from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema
from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
from cloudinit import util
-
+from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema
+from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
DEFAULT_OWNER = "root:root"
DEFAULT_PERMS = 0o644
DEFAULT_DEFER = False
-UNKNOWN_ENC = 'text/plain'
+UNKNOWN_ENC = "text/plain"
LOG = logging.getLogger(__name__)
-distros = ['all']
+distros = ["all"]
# The schema definition for each cloud-config module is a strict contract for
# describing supported configuration parameters for each cloud-config section.
@@ -34,14 +33,22 @@ distros = ['all']
# configuration.
supported_encoding_types = [
- 'gz', 'gzip', 'gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64', 'b64',
- 'base64']
+ "gz",
+ "gzip",
+ "gz+base64",
+ "gzip+base64",
+ "gz+b64",
+ "gzip+b64",
+ "b64",
+ "base64",
+]
meta = {
- 'id': 'cc_write_files',
- 'name': 'Write Files',
- 'title': 'write arbitrary files',
- 'description': dedent("""\
+ "id": "cc_write_files",
+ "name": "Write Files",
+ "title": "write arbitrary files",
+ "description": dedent(
+ """\
Write out arbitrary content to files, optionally setting permissions.
Parent folders in the path are created if absent.
Content can be specified in plain text or binary. Data encoded with
@@ -57,10 +64,12 @@ meta = {
Do not write files under /tmp during boot because of a race with
systemd-tmpfiles-clean that can cause temp files to get cleaned during
the early boot process. Use /run/somedir instead to avoid race
- LP:1707222."""),
- 'distros': distros,
- 'examples': [
- dedent("""\
+ LP:1707222."""
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
# Write out base64 encoded content to /etc/sysconfig/selinux
write_files:
- encoding: b64
@@ -68,16 +77,20 @@ meta = {
owner: root:root
path: /etc/sysconfig/selinux
permissions: '0644'
- """),
- dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Appending content to an existing file
write_files:
- content: |
15 * * * * root ship_logs
path: /etc/crontab
append: true
- """),
- dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Provide gziped binary content
write_files:
- encoding: gzip
@@ -85,13 +98,17 @@ meta = {
H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA=
path: /usr/bin/hello
permissions: '0755'
- """),
- dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Create an empty file on the system
write_files:
- path: /root/CLOUD_INIT_WAS_HERE
- """),
- dedent("""\
+ """
+ ),
+ dedent(
+ """\
# Defer writing the file until after the package (Nginx) is
# installed and its user is created alongside
write_files:
@@ -108,85 +125,109 @@ meta = {
owner: 'nginx:nginx'
permissions: '0640'
defer: true
- """)],
- 'frequency': frequency,
+ """
+ ),
+ ],
+ "frequency": frequency,
}
schema = {
- 'type': 'object',
- 'properties': {
- 'write_files': {
- 'type': 'array',
- 'items': {
- 'type': 'object',
- 'properties': {
- 'path': {
- 'type': 'string',
- 'description': dedent("""\
+ "type": "object",
+ "properties": {
+ "write_files": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "path": {
+ "type": "string",
+ "description": dedent(
+ """\
Path of the file to which ``content`` is decoded
and written
- """),
+ """
+ ),
},
- 'content': {
- 'type': 'string',
- 'default': '',
- 'description': dedent("""\
+ "content": {
+ "type": "string",
+ "default": "",
+ "description": dedent(
+ """\
Optional content to write to the provided ``path``.
When content is present and encoding is not '%s',
decode the content prior to writing. Default:
**''**
- """ % UNKNOWN_ENC),
+ """
+ % UNKNOWN_ENC
+ ),
},
- 'owner': {
- 'type': 'string',
- 'default': DEFAULT_OWNER,
- 'description': dedent("""\
+ "owner": {
+ "type": "string",
+ "default": DEFAULT_OWNER,
+ "description": dedent(
+ """\
Optional owner:group to chown on the file. Default:
**{owner}**
- """.format(owner=DEFAULT_OWNER)),
+ """.format(
+ owner=DEFAULT_OWNER
+ )
+ ),
},
- 'permissions': {
- 'type': 'string',
- 'default': oct(DEFAULT_PERMS).replace('o', ''),
- 'description': dedent("""\
+ "permissions": {
+ "type": "string",
+ "default": oct(DEFAULT_PERMS).replace("o", ""),
+ "description": dedent(
+ """\
Optional file permissions to set on ``path``
represented as an octal string '0###'. Default:
**'{perms}'**
- """.format(perms=oct(DEFAULT_PERMS).replace('o', ''))),
+ """.format(
+ perms=oct(DEFAULT_PERMS).replace("o", "")
+ )
+ ),
},
- 'encoding': {
- 'type': 'string',
- 'default': UNKNOWN_ENC,
- 'enum': supported_encoding_types,
- 'description': dedent("""\
+ "encoding": {
+ "type": "string",
+ "default": UNKNOWN_ENC,
+ "enum": supported_encoding_types,
+ "description": dedent(
+ """\
Optional encoding type of the content. Default is
**text/plain** and no content decoding is
performed. Supported encoding types are:
- %s.""" % ", ".join(supported_encoding_types)),
+ %s."""
+ % ", ".join(supported_encoding_types)
+ ),
},
- 'append': {
- 'type': 'boolean',
- 'default': False,
- 'description': dedent("""\
+ "append": {
+ "type": "boolean",
+ "default": False,
+ "description": dedent(
+ """\
Whether to append ``content`` to existing file if
``path`` exists. Default: **false**.
- """),
+ """
+ ),
},
- 'defer': {
- 'type': 'boolean',
- 'default': DEFAULT_DEFER,
- 'description': dedent("""\
+ "defer": {
+ "type": "boolean",
+ "default": DEFAULT_DEFER,
+ "description": dedent(
+ """\
Defer writing the file until 'final' stage, after
users were created, and packages were installed.
Default: **{defer}**.
- """.format(defer=DEFAULT_DEFER)),
+ """.format(
+ defer=DEFAULT_DEFER
+ )
+ ),
},
},
- 'required': ['path'],
- 'additionalProperties': False
+ "required": ["path"],
+ "additionalProperties": False,
},
}
- }
+ },
}
__doc__ = get_meta_doc(meta, schema) # Supplement python help()
@@ -194,36 +235,40 @@ __doc__ = get_meta_doc(meta, schema) # Supplement python help()
def handle(name, cfg, _cloud, log, _args):
validate_cloudconfig_schema(cfg, schema)
- file_list = cfg.get('write_files', [])
+ file_list = cfg.get("write_files", [])
filtered_files = [
- f for f in file_list if not util.get_cfg_option_bool(f,
- 'defer',
- DEFAULT_DEFER)
+ f
+ for f in file_list
+ if not util.get_cfg_option_bool(f, "defer", DEFAULT_DEFER)
]
if not filtered_files:
- log.debug(("Skipping module named %s,"
- " no/empty 'write_files' key in configuration"), name)
+ log.debug(
+ "Skipping module named %s,"
+ " no/empty 'write_files' key in configuration",
+ name,
+ )
return
write_files(name, filtered_files)
def canonicalize_extraction(encoding_type):
if not encoding_type:
- encoding_type = ''
+ encoding_type = ""
encoding_type = encoding_type.lower().strip()
- if encoding_type in ['gz', 'gzip']:
- return ['application/x-gzip']
- if encoding_type in ['gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64']:
- return ['application/base64', 'application/x-gzip']
+ if encoding_type in ["gz", "gzip"]:
+ return ["application/x-gzip"]
+ if encoding_type in ["gz+base64", "gzip+base64", "gz+b64", "gzip+b64"]:
+ return ["application/base64", "application/x-gzip"]
# Yaml already encodes binary data as base64 if it is given to the
# yaml file as binary, so those will be automatically decoded for you.
# But the above b64 is just for people that are more 'comfortable'
# specifing it manually (which might be a possiblity)
- if encoding_type in ['b64', 'base64']:
- return ['application/base64']
+ if encoding_type in ["b64", "base64"]:
+ return ["application/base64"]
if encoding_type:
- LOG.warning("Unknown encoding type %s, assuming %s",
- encoding_type, UNKNOWN_ENC)
+ LOG.warning(
+ "Unknown encoding type %s, assuming %s", encoding_type, UNKNOWN_ENC
+ )
return [UNKNOWN_ENC]
@@ -232,17 +277,20 @@ def write_files(name, files):
return
for (i, f_info) in enumerate(files):
- path = f_info.get('path')
+ path = f_info.get("path")
if not path:
- LOG.warning("No path provided to write for entry %s in module %s",
- i + 1, name)
+ LOG.warning(
+ "No path provided to write for entry %s in module %s",
+ i + 1,
+ name,
+ )
continue
path = os.path.abspath(path)
- extractions = canonicalize_extraction(f_info.get('encoding'))
- contents = extract_contents(f_info.get('content', ''), extractions)
- (u, g) = util.extract_usergroup(f_info.get('owner', DEFAULT_OWNER))
- perms = decode_perms(f_info.get('permissions'), DEFAULT_PERMS)
- omode = 'ab' if util.get_cfg_option_bool(f_info, 'append') else 'wb'
+ extractions = canonicalize_extraction(f_info.get("encoding"))
+ contents = extract_contents(f_info.get("content", ""), extractions)
+ (u, g) = util.extract_usergroup(f_info.get("owner", DEFAULT_OWNER))
+ perms = decode_perms(f_info.get("permissions"), DEFAULT_PERMS)
+ omode = "ab" if util.get_cfg_option_bool(f_info, "append") else "wb"
util.write_file(path, contents, omode=omode, mode=perms)
util.chownbyname(path, u, g)
@@ -264,20 +312,20 @@ def decode_perms(perm, default):
reps.append("%o" % r)
except TypeError:
reps.append("%r" % r)
- LOG.warning(
- "Undecodable permissions %s, returning default %s", *reps)
+ LOG.warning("Undecodable permissions %s, returning default %s", *reps)
return default
def extract_contents(contents, extraction_types):
result = contents
for t in extraction_types:
- if t == 'application/x-gzip':
+ if t == "application/x-gzip":
result = util.decomp_gzip(result, quiet=False, decode=False)
- elif t == 'application/base64':
+ elif t == "application/base64":
result = base64.b64decode(result)
elif t == UNKNOWN_ENC:
pass
return result
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_write_files_deferred.py b/cloudinit/config/cc_write_files_deferred.py
index 4fc8659c..1294628c 100644
--- a/cloudinit/config/cc_write_files_deferred.py
+++ b/cloudinit/config/cc_write_files_deferred.py
@@ -4,10 +4,11 @@
"""Defer writing certain files"""
-from cloudinit.config.schema import validate_cloudconfig_schema
from cloudinit import util
-from cloudinit.config.cc_write_files import (
- schema as write_files_schema, write_files, DEFAULT_DEFER)
+from cloudinit.config.cc_write_files import DEFAULT_DEFER
+from cloudinit.config.cc_write_files import schema as write_files_schema
+from cloudinit.config.cc_write_files import write_files
+from cloudinit.config.schema import validate_cloudconfig_schema
# meta is not used in this module, but it remains as code documentation
#
@@ -36,15 +37,18 @@ __doc__ = None
def handle(name, cfg, _cloud, log, _args):
validate_cloudconfig_schema(cfg, schema)
- file_list = cfg.get('write_files', [])
+ file_list = cfg.get("write_files", [])
filtered_files = [
- f for f in file_list if util.get_cfg_option_bool(f,
- 'defer',
- DEFAULT_DEFER)
+ f
+ for f in file_list
+ if util.get_cfg_option_bool(f, "defer", DEFAULT_DEFER)
]
if not filtered_files:
- log.debug(("Skipping module named %s,"
- " no deferred file defined in configuration"), name)
+ log.debug(
+ "Skipping module named %s,"
+ " no deferred file defined in configuration",
+ name,
+ )
return
write_files(name, filtered_files)
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index 046a2852..7a232689 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -37,8 +37,18 @@ from configparser import ConfigParser
from cloudinit import util
-distros = ['almalinux', 'centos', 'cloudlinux', 'eurolinux', 'fedora',
- 'openEuler', 'photon', 'rhel', 'rocky', 'virtuozzo']
+distros = [
+ "almalinux",
+ "centos",
+ "cloudlinux",
+ "eurolinux",
+ "fedora",
+ "openEuler",
+ "photon",
+ "rhel",
+ "rocky",
+ "virtuozzo",
+]
def _canonicalize_id(repo_id):
@@ -79,25 +89,34 @@ def _format_repository_config(repo_id, repo_config):
def handle(name, cfg, _cloud, log, _args):
- repos = cfg.get('yum_repos')
+ repos = cfg.get("yum_repos")
if not repos:
- log.debug(("Skipping module named %s,"
- " no 'yum_repos' configuration found"), name)
+ log.debug(
+ "Skipping module named %s, no 'yum_repos' configuration found",
+ name,
+ )
return
- repo_base_path = util.get_cfg_option_str(cfg, 'yum_repo_dir',
- '/etc/yum.repos.d/')
+ repo_base_path = util.get_cfg_option_str(
+ cfg, "yum_repo_dir", "/etc/yum.repos.d/"
+ )
repo_locations = {}
repo_configs = {}
for (repo_id, repo_config) in repos.items():
canon_repo_id = _canonicalize_id(repo_id)
repo_fn_pth = os.path.join(repo_base_path, "%s.repo" % (canon_repo_id))
if os.path.exists(repo_fn_pth):
- log.info("Skipping repo %s, file %s already exists!",
- repo_id, repo_fn_pth)
+ log.info(
+ "Skipping repo %s, file %s already exists!",
+ repo_id,
+ repo_fn_pth,
+ )
continue
elif canon_repo_id in repo_locations:
- log.info("Skipping repo %s, file %s already pending!",
- repo_id, repo_fn_pth)
+ log.info(
+ "Skipping repo %s, file %s already pending!",
+ repo_id,
+ repo_fn_pth,
+ )
continue
if not repo_config:
repo_config = {}
@@ -109,21 +128,29 @@ def handle(name, cfg, _cloud, log, _args):
n_repo_config[k] = v
repo_config = n_repo_config
missing_required = 0
- for req_field in ['baseurl']:
+ for req_field in ["baseurl"]:
if req_field not in repo_config:
- log.warning(("Repository %s does not contain a %s"
- " configuration 'required' entry"),
- repo_id, req_field)
+ log.warning(
+ "Repository %s does not contain a %s"
+ " configuration 'required' entry",
+ repo_id,
+ req_field,
+ )
missing_required += 1
if not missing_required:
repo_configs[canon_repo_id] = repo_config
repo_locations[canon_repo_id] = repo_fn_pth
else:
- log.warning("Repository %s is missing %s required fields, "
- "skipping!", repo_id, missing_required)
+ log.warning(
+ "Repository %s is missing %s required fields, skipping!",
+ repo_id,
+ missing_required,
+ )
for (c_repo_id, path) in repo_locations.items():
- repo_blob = _format_repository_config(c_repo_id,
- repo_configs.get(c_repo_id))
+ repo_blob = _format_repository_config(
+ c_repo_id, repo_configs.get(c_repo_id)
+ )
util.write_file(path, repo_blob)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_zypper_add_repo.py b/cloudinit/config/cc_zypper_add_repo.py
index bf1638fb..41605b97 100644
--- a/cloudinit/config/cc_zypper_add_repo.py
+++ b/cloudinit/config/cc_zypper_add_repo.py
@@ -5,22 +5,24 @@
"""zypper_add_repo: Add zyper repositories to the system"""
-import configobj
import os
from textwrap import dedent
-from cloudinit.config.schema import get_meta_doc
+import configobj
+
from cloudinit import log as logging
-from cloudinit.settings import PER_ALWAYS
from cloudinit import util
+from cloudinit.config.schema import get_meta_doc
+from cloudinit.settings import PER_ALWAYS
-distros = ['opensuse', 'sles']
+distros = ["opensuse", "sles"]
meta = {
- 'id': 'cc_zypper_add_repo',
- 'name': 'ZypperAddRepo',
- 'title': 'Configure zypper behavior and add zypper repositories',
- 'description': dedent("""\
+ "id": "cc_zypper_add_repo",
+ "name": "ZypperAddRepo",
+ "title": "Configure zypper behavior and add zypper repositories",
+ "description": dedent(
+ """\
Configure zypper behavior by modifying /etc/zypp/zypp.conf. The
configuration writer is "dumb" and will simply append the provided
configuration options to the configuration file. Option settings
@@ -28,9 +30,12 @@ meta = {
is parsed. The file is in INI format.
Add repositories to the system. No validation is performed on the
repository file entries, it is assumed the user is familiar with
- the zypper repository file format."""),
- 'distros': distros,
- 'examples': [dedent("""\
+ the zypper repository file format."""
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
zypper:
repos:
- id: opensuse-oss
@@ -49,50 +54,56 @@ meta = {
servicesdir: /etc/zypp/services.d
download.use_deltarpm: true
# any setting in /etc/zypp/zypp.conf
- """)],
- 'frequency': PER_ALWAYS,
+ """
+ )
+ ],
+ "frequency": PER_ALWAYS,
}
schema = {
- 'type': 'object',
- 'properties': {
- 'zypper': {
- 'type': 'object',
- 'properties': {
- 'repos': {
- 'type': 'array',
- 'items': {
- 'type': 'object',
- 'properties': {
- 'id': {
- 'type': 'string',
- 'description': dedent("""\
+ "type": "object",
+ "properties": {
+ "zypper": {
+ "type": "object",
+ "properties": {
+ "repos": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": dedent(
+ """\
The unique id of the repo, used when
writing
- /etc/zypp/repos.d/<id>.repo.""")
+ /etc/zypp/repos.d/<id>.repo."""
+ ),
+ },
+ "baseurl": {
+ "type": "string",
+ "format": "uri", # built-in format type
+ "description": "The base repositoy URL",
},
- 'baseurl': {
- 'type': 'string',
- 'format': 'uri', # built-in format type
- 'description': 'The base repositoy URL'
- }
},
- 'required': ['id', 'baseurl'],
- 'additionalProperties': True
+ "required": ["id", "baseurl"],
+ "additionalProperties": True,
},
- 'minItems': 1
+ "minItems": 1,
},
- 'config': {
- 'type': 'object',
- 'description': dedent("""\
+ "config": {
+ "type": "object",
+ "description": dedent(
+ """\
Any supported zypo.conf key is written to
- /etc/zypp/zypp.conf'""")
- }
+ /etc/zypp/zypp.conf'"""
+ ),
+ },
},
- 'minProperties': 1, # Either config or repo must be provided
- 'additionalProperties': False, # only repos and config allowed
+ "minProperties": 1, # Either config or repo must be provided
+ "additionalProperties": False, # only repos and config allowed
}
- }
+ },
}
__doc__ = get_meta_doc(meta, schema) # Supplement python help()
@@ -141,34 +152,43 @@ def _write_repos(repos, repo_base_path):
valid_repos = {}
for index, user_repo_config in enumerate(repos):
# Skip on absent required keys
- missing_keys = set(['id', 'baseurl']).difference(set(user_repo_config))
+ missing_keys = set(["id", "baseurl"]).difference(set(user_repo_config))
if missing_keys:
LOG.warning(
"Repo config at index %d is missing required config keys: %s",
- index, ",".join(missing_keys))
+ index,
+ ",".join(missing_keys),
+ )
continue
- repo_id = user_repo_config.get('id')
+ repo_id = user_repo_config.get("id")
canon_repo_id = _canonicalize_id(repo_id)
repo_fn_pth = os.path.join(repo_base_path, "%s.repo" % (canon_repo_id))
if os.path.exists(repo_fn_pth):
- LOG.info("Skipping repo %s, file %s already exists!",
- repo_id, repo_fn_pth)
+ LOG.info(
+ "Skipping repo %s, file %s already exists!",
+ repo_id,
+ repo_fn_pth,
+ )
continue
elif repo_id in valid_repos:
- LOG.info("Skipping repo %s, file %s already pending!",
- repo_id, repo_fn_pth)
+ LOG.info(
+ "Skipping repo %s, file %s already pending!",
+ repo_id,
+ repo_fn_pth,
+ )
continue
# Do some basic key formatting
repo_config = dict(
(k.lower().strip().replace("-", "_"), v)
for k, v in user_repo_config.items()
- if k and k != 'id')
+ if k and k != "id"
+ )
# Set defaults if not present
- for field in ['enabled', 'autorefresh']:
+ for field in ["enabled", "autorefresh"]:
if field not in repo_config:
- repo_config[field] = '1'
+ repo_config[field] = "1"
valid_repos[repo_id] = (repo_fn_pth, repo_config)
@@ -181,39 +201,44 @@ def _write_zypp_config(zypper_config):
"""Write to the default zypp configuration file /etc/zypp/zypp.conf"""
if not zypper_config:
return
- zypp_config = '/etc/zypp/zypp.conf'
+ zypp_config = "/etc/zypp/zypp.conf"
zypp_conf_content = util.load_file(zypp_config)
- new_settings = ['# Added via cloud.cfg']
+ new_settings = ["# Added via cloud.cfg"]
for setting, value in zypper_config.items():
- if setting == 'configdir':
- msg = 'Changing the location of the zypper configuration is '
+ if setting == "configdir":
+ msg = "Changing the location of the zypper configuration is "
msg += 'not supported, skipping "configdir" setting'
LOG.warning(msg)
continue
if value:
- new_settings.append('%s=%s' % (setting, value))
+ new_settings.append("%s=%s" % (setting, value))
if len(new_settings) > 1:
- new_config = zypp_conf_content + '\n'.join(new_settings)
+ new_config = zypp_conf_content + "\n".join(new_settings)
else:
new_config = zypp_conf_content
util.write_file(zypp_config, new_config)
def handle(name, cfg, _cloud, log, _args):
- zypper_section = cfg.get('zypper')
+ zypper_section = cfg.get("zypper")
if not zypper_section:
- LOG.debug(("Skipping module named %s,"
- " no 'zypper' relevant configuration found"), name)
+ LOG.debug(
+ "Skipping module named %s,"
+ " no 'zypper' relevant configuration found",
+ name,
+ )
return
- repos = zypper_section.get('repos')
+ repos = zypper_section.get("repos")
if not repos:
- LOG.debug(("Skipping module named %s,"
- " no 'repos' configuration found"), name)
+ LOG.debug(
+ "Skipping module named %s, no 'repos' configuration found", name
+ )
return
- zypper_config = zypper_section.get('config', {})
- repo_base_path = zypper_config.get('reposdir', '/etc/zypp/repos.d/')
+ zypper_config = zypper_section.get("config", {})
+ repo_base_path = zypper_config.get("reposdir", "/etc/zypp/repos.d/")
_write_zypp_config(zypper_config)
_write_repos(repos, repo_base_path)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py
index d772b4f9..8ec4ab6a 100644
--- a/cloudinit/config/schema.py
+++ b/cloudinit/config/schema.py
@@ -1,26 +1,27 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""schema.py: Set of module functions for processing cloud-config schema."""
-from cloudinit.cmd.devel import read_cfg_paths
-from cloudinit import importer
-from cloudinit.importer import MetaSchema
-from cloudinit.util import find_modules, load_file, error
-
import argparse
-from collections import defaultdict
-from copy import deepcopy
-from functools import partial
import logging
import os
import re
import sys
+from collections import defaultdict
+from copy import deepcopy
+from functools import partial
+
import yaml
+from cloudinit import importer
+from cloudinit.cmd.devel import read_cfg_paths
+from cloudinit.importer import MetaSchema
+from cloudinit.util import error, find_modules, load_file
+
error = partial(error, sys_exit=True)
LOG = logging.getLogger(__name__)
-_YAML_MAP = {True: 'true', False: 'false', None: 'null'}
-CLOUD_CONFIG_HEADER = b'#cloud-config'
+_YAML_MAP = {True: "true", False: "false", None: "null"}
+CLOUD_CONFIG_HEADER = b"#cloud-config"
SCHEMA_DOC_TMPL = """
{name}
{title_underbar}
@@ -40,9 +41,10 @@ SCHEMA_DOC_TMPL = """
"""
SCHEMA_PROPERTY_TMPL = "{prefix}**{prop_name}:** ({prop_type}) {description}"
SCHEMA_LIST_ITEM_TMPL = (
- '{prefix}Each item in **{prop_name}** list supports the following keys:')
-SCHEMA_EXAMPLES_HEADER = '\n**Examples**::\n\n'
-SCHEMA_EXAMPLES_SPACER_TEMPLATE = '\n # --- Example{0} ---'
+ "{prefix}Each item in **{prop_name}** list supports the following keys:"
+)
+SCHEMA_EXAMPLES_HEADER = "\n**Examples**::\n\n"
+SCHEMA_EXAMPLES_SPACER_TEMPLATE = "\n # --- Example{0} ---"
class SchemaValidationError(ValueError):
@@ -56,10 +58,12 @@ class SchemaValidationError(ValueError):
"""
self.schema_errors = schema_errors
error_messages = [
- '{0}: {1}'.format(config_key, message)
- for config_key, message in schema_errors]
+ "{0}: {1}".format(config_key, message)
+ for config_key, message in schema_errors
+ ]
message = "Cloud config schema errors: {0}".format(
- ', '.join(error_messages))
+ ", ".join(error_messages)
+ )
super(SchemaValidationError, self).__init__(message)
@@ -72,8 +76,9 @@ def is_schema_byte_string(checker, instance):
from jsonschema import Draft4Validator
except ImportError:
return False
- return (Draft4Validator.TYPE_CHECKER.is_type(instance, "string") or
- isinstance(instance, (bytes,)))
+ return Draft4Validator.TYPE_CHECKER.is_type(
+ instance, "string"
+ ) or isinstance(instance, (bytes,))
def get_jsonschema_validator():
@@ -102,25 +107,28 @@ def get_jsonschema_validator():
# http://json-schema.org/understanding-json-schema/reference/object.html#pattern-properties
strict_metaschema["properties"]["label"] = {"type": "string"}
- if hasattr(Draft4Validator, 'TYPE_CHECKER'): # jsonschema 3.0+
+ if hasattr(Draft4Validator, "TYPE_CHECKER"): # jsonschema 3.0+
type_checker = Draft4Validator.TYPE_CHECKER.redefine(
- 'string', is_schema_byte_string)
+ "string", is_schema_byte_string
+ )
cloudinitValidator = create(
meta_schema=strict_metaschema,
validators=Draft4Validator.VALIDATORS,
version="draft4",
- type_checker=type_checker)
+ type_checker=type_checker,
+ )
else: # jsonschema 2.6 workaround
types = Draft4Validator.DEFAULT_TYPES
# Allow bytes as well as string (and disable a spurious unsupported
# assignment-operation pylint warning which appears because this
# code path isn't written against the latest jsonschema).
- types['string'] = (str, bytes) # pylint: disable=E1137
+ types["string"] = (str, bytes) # pylint: disable=E1137
cloudinitValidator = create(
meta_schema=strict_metaschema,
validators=Draft4Validator.VALIDATORS,
version="draft4",
- default_types=types)
+ default_types=types,
+ )
return (cloudinitValidator, FormatChecker)
@@ -147,12 +155,14 @@ def validate_cloudconfig_metaschema(validator, schema: dict, throw=True):
if throw:
raise SchemaValidationError(
schema_errors=(
- ('.'.join([str(p) for p in err.path]), err.message),
+ (".".join([str(p) for p in err.path]), err.message),
)
) from err
LOG.warning(
"Meta-schema validation failed, attempting to validate config "
- "anyway: %s", err)
+ "anyway: %s",
+ err,
+ )
def validate_cloudconfig_schema(
@@ -176,7 +186,8 @@ def validate_cloudconfig_schema(
(cloudinitValidator, FormatChecker) = get_jsonschema_validator()
if strict_metaschema:
validate_cloudconfig_metaschema(
- cloudinitValidator, schema, throw=False)
+ cloudinitValidator, schema, throw=False
+ )
except ImportError:
LOG.debug("Ignoring schema validation. jsonschema is not present")
return
@@ -184,7 +195,7 @@ def validate_cloudconfig_schema(
validator = cloudinitValidator(schema, format_checker=FormatChecker())
errors = ()
for error in sorted(validator.iter_errors(config), key=lambda e: e.path):
- path = '.'.join([str(p) for p in error.path])
+ path = ".".join([str(p) for p in error.path])
errors += ((path, error.message),)
if errors:
if strict:
@@ -208,12 +219,13 @@ def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
schemapaths = {}
if cloudconfig:
schemapaths = _schemapath_for_cloudconfig(
- cloudconfig, original_content)
+ cloudconfig, original_content
+ )
errors_by_line = defaultdict(list)
error_footer = []
annotated_content = []
for path, msg in schema_errors:
- match = re.match(r'format-l(?P<line>\d+)\.c(?P<col>\d+).*', path)
+ match = re.match(r"format-l(?P<line>\d+)\.c(?P<col>\d+).*", path)
if match:
line, col = match.groups()
errors_by_line[int(line)].append(msg)
@@ -221,24 +233,26 @@ def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
col = None
errors_by_line[schemapaths[path]].append(msg)
if col is not None:
- msg = 'Line {line} column {col}: {msg}'.format(
- line=line, col=col, msg=msg)
- lines = original_content.decode().split('\n')
+ msg = "Line {line} column {col}: {msg}".format(
+ line=line, col=col, msg=msg
+ )
+ lines = original_content.decode().split("\n")
error_index = 1
for line_number, line in enumerate(lines, 1):
errors = errors_by_line[line_number]
if errors:
error_label = []
for error in errors:
- error_label.append('E{0}'.format(error_index))
- error_footer.append('# E{0}: {1}'.format(error_index, error))
+ error_label.append("E{0}".format(error_index))
+ error_footer.append("# E{0}: {1}".format(error_index, error))
error_index += 1
- annotated_content.append(line + '\t\t# ' + ','.join(error_label))
+ annotated_content.append(line + "\t\t# " + ",".join(error_label))
else:
annotated_content.append(line)
annotated_content.append(
- '# Errors: -------------\n{0}\n\n'.format('\n'.join(error_footer)))
- return '\n'.join(annotated_content)
+ "# Errors: -------------\n{0}\n\n".format("\n".join(error_footer))
+ )
+ return "\n".join(annotated_content)
def validate_cloudconfig_file(config_path, schema, annotate=False):
@@ -266,15 +280,18 @@ def validate_cloudconfig_file(config_path, schema, annotate=False):
else:
if not os.path.exists(config_path):
raise RuntimeError(
- 'Configfile {0} does not exist'.format(
- config_path
- )
+ "Configfile {0} does not exist".format(config_path)
)
content = load_file(config_path, decode=False)
if not content.startswith(CLOUD_CONFIG_HEADER):
errors = (
- ('format-l1.c1', 'File {0} needs to begin with "{1}"'.format(
- config_path, CLOUD_CONFIG_HEADER.decode())),)
+ (
+ "format-l1.c1",
+ 'File {0} needs to begin with "{1}"'.format(
+ config_path, CLOUD_CONFIG_HEADER.decode()
+ ),
+ ),
+ )
error = SchemaValidationError(errors)
if annotate:
print(annotated_cloudconfig_file({}, content, error.schema_errors))
@@ -284,27 +301,32 @@ def validate_cloudconfig_file(config_path, schema, annotate=False):
except (yaml.YAMLError) as e:
line = column = 1
mark = None
- if hasattr(e, 'context_mark') and getattr(e, 'context_mark'):
- mark = getattr(e, 'context_mark')
- elif hasattr(e, 'problem_mark') and getattr(e, 'problem_mark'):
- mark = getattr(e, 'problem_mark')
+ if hasattr(e, "context_mark") and getattr(e, "context_mark"):
+ mark = getattr(e, "context_mark")
+ elif hasattr(e, "problem_mark") and getattr(e, "problem_mark"):
+ mark = getattr(e, "problem_mark")
if mark:
line = mark.line + 1
column = mark.column + 1
- errors = (('format-l{line}.c{col}'.format(line=line, col=column),
- 'File {0} is not valid yaml. {1}'.format(
- config_path, str(e))),)
+ errors = (
+ (
+ "format-l{line}.c{col}".format(line=line, col=column),
+ "File {0} is not valid yaml. {1}".format(config_path, str(e)),
+ ),
+ )
error = SchemaValidationError(errors)
if annotate:
print(annotated_cloudconfig_file({}, content, error.schema_errors))
raise error from e
try:
- validate_cloudconfig_schema(
- cloudconfig, schema, strict=True)
+ validate_cloudconfig_schema(cloudconfig, schema, strict=True)
except SchemaValidationError as e:
if annotate:
- print(annotated_cloudconfig_file(
- cloudconfig, content, e.schema_errors))
+ print(
+ annotated_cloudconfig_file(
+ cloudconfig, content, e.schema_errors
+ )
+ )
raise
@@ -315,26 +337,26 @@ def _schemapath_for_cloudconfig(config, original_content):
@param original_content: The simple file content of the cloud-config file
"""
# FIXME Doesn't handle multi-line lists or multi-line strings
- content_lines = original_content.decode().split('\n')
+ content_lines = original_content.decode().split("\n")
schema_line_numbers = {}
list_index = 0
- RE_YAML_INDENT = r'^(\s*)'
+ RE_YAML_INDENT = r"^(\s*)"
scopes = []
for line_number, line in enumerate(content_lines, 1):
indent_depth = len(re.match(RE_YAML_INDENT, line).groups()[0])
line = line.strip()
- if not line or line.startswith('#'):
+ if not line or line.startswith("#"):
continue
if scopes:
previous_depth, path_prefix = scopes[-1]
else:
previous_depth = -1
- path_prefix = ''
- if line.startswith('- '):
+ path_prefix = ""
+ if line.startswith("- "):
# Process list items adding a list_index to the path prefix
- previous_list_idx = '.%d' % (list_index - 1)
+ previous_list_idx = ".%d" % (list_index - 1)
if path_prefix and path_prefix.endswith(previous_list_idx):
- path_prefix = path_prefix[:-len(previous_list_idx)]
+ path_prefix = path_prefix[: -len(previous_list_idx)]
key = str(list_index)
schema_line_numbers[key] = line_number
item_indent = len(re.match(RE_YAML_INDENT, line[1:]).groups()[0])
@@ -346,26 +368,26 @@ def _schemapath_for_cloudconfig(config, original_content):
else:
# Process non-list lines setting value if present
list_index = 0
- key, value = line.split(':', 1)
+ key, value = line.split(":", 1)
if path_prefix:
# Append any existing path_prefix for a fully-pathed key
- key = path_prefix + '.' + key
+ key = path_prefix + "." + key
while indent_depth <= previous_depth:
if scopes:
previous_depth, path_prefix = scopes.pop()
if list_index > 0 and indent_depth == previous_depth:
- path_prefix = '.'.join(path_prefix.split('.')[:-1])
+ path_prefix = ".".join(path_prefix.split(".")[:-1])
break
else:
previous_depth = -1
- path_prefix = ''
+ path_prefix = ""
scopes.append((indent_depth, key))
if value:
value = value.strip()
- if value.startswith('['):
- scopes.append((indent_depth + 2, key + '.0'))
+ if value.startswith("["):
+ scopes.append((indent_depth + 2, key + ".0"))
for inner_list_index in range(0, len(yaml.safe_load(value))):
- list_key = key + '.' + str(inner_list_index)
+ list_key = key + "." + str(inner_list_index)
schema_line_numbers[list_key] = line_number
schema_line_numbers[key] = line_number
return schema_line_numbers
@@ -381,14 +403,14 @@ def _get_property_type(property_dict: dict) -> str:
str(_YAML_MAP.get(k, k)) for k in property_dict["enum"]
]
if isinstance(property_type, list):
- property_type = '/'.join(property_type)
- items = property_dict.get('items', {})
- sub_property_type = items.get('type', '')
+ property_type = "/".join(property_type)
+ items = property_dict.get("items", {})
+ sub_property_type = items.get("type", "")
# Collect each item type
- for sub_item in items.get('oneOf', {}):
+ for sub_item in items.get("oneOf", {}):
if sub_property_type:
- sub_property_type += '/'
- sub_property_type += '(' + _get_property_type(sub_item) + ')'
+ sub_property_type += "/"
+ sub_property_type += "(" + _get_property_type(sub_item) + ")"
if sub_property_type:
return "{0} of {1}".format(property_type, sub_property_type)
return property_type or "UNDEFINED"
@@ -408,17 +430,17 @@ def _parse_description(description, prefix) -> str:
"""
list_paragraph = prefix * 3
description = re.sub(r"(\S)\n(\S)", r"\1 \2", description)
+ description = re.sub(r"\n\n", r"\n\n{}".format(prefix), description)
description = re.sub(
- r"\n\n", r"\n\n{}".format(prefix), description)
- description = re.sub(
- r"\n( +)-", r"\n{}-".format(list_paragraph), description)
+ r"\n( +)-", r"\n{}-".format(list_paragraph), description
+ )
return description
def _get_property_doc(schema: dict, prefix=" ") -> str:
"""Return restructured text describing the supported schema properties."""
- new_prefix = prefix + ' '
+ new_prefix = prefix + " "
properties = []
property_keys = [
schema.get("properties", {}),
@@ -473,16 +495,17 @@ def _get_examples(meta: MetaSchema) -> str:
"""Return restructured text describing the meta examples if present."""
examples = meta.get("examples")
if not examples:
- return ''
+ return ""
rst_content = SCHEMA_EXAMPLES_HEADER
for count, example in enumerate(examples):
# Python2.6 is missing textwrapper.indent
- lines = example.split('\n')
- indented_lines = [' {0}'.format(line) for line in lines]
+ lines = example.split("\n")
+ indented_lines = [" {0}".format(line) for line in lines]
if rst_content != SCHEMA_EXAMPLES_HEADER:
indented_lines.insert(
- 0, SCHEMA_EXAMPLES_SPACER_TEMPLATE.format(count + 1))
- rst_content += '\n'.join(indented_lines)
+ 0, SCHEMA_EXAMPLES_SPACER_TEMPLATE.format(count + 1)
+ )
+ rst_content += "\n".join(indented_lines)
return rst_content
@@ -552,7 +575,8 @@ def load_doc(requested_modules: list) -> str:
if invalid_docs:
error(
"Invalid --docs value {}. Must be one of: {}".format(
- list(invalid_docs), ", ".join(all_modules),
+ list(invalid_docs),
+ ", ".join(all_modules),
)
)
for mod_name in all_modules:
@@ -601,17 +625,35 @@ def get_parser(parser=None):
"""Return a parser for supported cmdline arguments."""
if not parser:
parser = argparse.ArgumentParser(
- prog='cloudconfig-schema',
- description='Validate cloud-config files or document schema')
- parser.add_argument('-c', '--config-file',
- help='Path of the cloud-config yaml file to validate')
- parser.add_argument('--system', action='store_true', default=False,
- help='Validate the system cloud-config userdata')
- parser.add_argument('-d', '--docs', nargs='+',
- help=('Print schema module docs. Choices: all or'
- ' space-delimited cc_names.'))
- parser.add_argument('--annotate', action="store_true", default=False,
- help='Annotate existing cloud-config file with errors')
+ prog="cloudconfig-schema",
+ description="Validate cloud-config files or document schema",
+ )
+ parser.add_argument(
+ "-c",
+ "--config-file",
+ help="Path of the cloud-config yaml file to validate",
+ )
+ parser.add_argument(
+ "--system",
+ action="store_true",
+ default=False,
+ help="Validate the system cloud-config userdata",
+ )
+ parser.add_argument(
+ "-d",
+ "--docs",
+ nargs="+",
+ help=(
+ "Print schema module docs. Choices: all or"
+ " space-delimited cc_names."
+ ),
+ )
+ parser.add_argument(
+ "--annotate",
+ action="store_true",
+ default=False,
+ help="Annotate existing cloud-config file with errors",
+ )
return parser
@@ -619,12 +661,13 @@ def handle_schema_args(name, args):
"""Handle provided schema args and perform the appropriate actions."""
exclusive_args = [args.config_file, args.docs, args.system]
if len([arg for arg in exclusive_args if arg]) != 1:
- error('Expected one of --config-file, --system or --docs arguments')
+ error("Expected one of --config-file, --system or --docs arguments")
full_schema = get_schema()
if args.config_file or args.system:
try:
validate_cloudconfig_file(
- args.config_file, full_schema, args.annotate)
+ args.config_file, full_schema, args.annotate
+ )
except SchemaValidationError as e:
if not args.annotate:
error(str(e))
@@ -643,11 +686,11 @@ def handle_schema_args(name, args):
def main():
"""Tool to validate schema of a cloud-config file or print schema docs."""
parser = get_parser()
- handle_schema_args('cloudconfig-schema', parser.parse_args())
+ handle_schema_args("cloudconfig-schema", parser.parse_args())
return 0
-if __name__ == '__main__':
+if __name__ == "__main__":
sys.exit(main())
# vi: ts=4 expandtab
diff --git a/cloudinit/cs_utils.py b/cloudinit/cs_utils.py
index 8bac9c44..6db7e117 100644
--- a/cloudinit/cs_utils.py
+++ b/cloudinit/cs_utils.py
@@ -24,14 +24,13 @@ import platform
from cloudinit import serial
-
# these high timeouts are necessary as read may read a lot of data.
READ_TIMEOUT = 60
WRITE_TIMEOUT = 10
-SERIAL_PORT = '/dev/ttyS1'
-if platform.system() == 'Windows':
- SERIAL_PORT = 'COM2'
+SERIAL_PORT = "/dev/ttyS1"
+if platform.system() == "Windows":
+ SERIAL_PORT = "COM2"
class Cepko(object):
@@ -39,6 +38,7 @@ class Cepko(object):
One instance of that object could be use for one or more
queries to the serial port.
"""
+
request_pattern = "<\n{}\n>"
def get(self, key="", request_pattern=None):
@@ -64,17 +64,18 @@ class CepkoResult(object):
as the instance is initialized and stores the result in both raw and
marshalled format.
"""
+
def __init__(self, request):
self.request = request
self.raw_result = self._execute()
self.result = self._marshal(self.raw_result)
def _execute(self):
- connection = serial.Serial(port=SERIAL_PORT,
- timeout=READ_TIMEOUT,
- writeTimeout=WRITE_TIMEOUT)
- connection.write(self.request.encode('ascii'))
- return connection.readline().strip(b'\x04\n').decode('ascii')
+ connection = serial.Serial(
+ port=SERIAL_PORT, timeout=READ_TIMEOUT, writeTimeout=WRITE_TIMEOUT
+ )
+ connection.write(self.request.encode("ascii"))
+ return connection.readline().strip(b"\x04\n").decode("ascii")
def _marshal(self, raw_result):
try:
@@ -94,4 +95,5 @@ class CepkoResult(object):
def __iter__(self):
return self.result.__iter__()
+
# vi: ts=4 expandtab
diff --git a/cloudinit/dhclient_hook.py b/cloudinit/dhclient_hook.py
index 72b51b6a..46b2e8d9 100644
--- a/cloudinit/dhclient_hook.py
+++ b/cloudinit/dhclient_hook.py
@@ -19,7 +19,7 @@ EVENTS = (UP, DOWN)
def _get_hooks_dir():
i = stages.Init()
- return os.path.join(i.paths.get_runpath(), 'dhclient.hooks')
+ return os.path.join(i.paths.get_runpath(), "dhclient.hooks")
def _filter_env_vals(info):
@@ -28,15 +28,16 @@ def _filter_env_vals(info):
new_info = {}
for k, v in info.items():
if k.startswith("DHCP4_") or k.startswith("new_"):
- key = (k.replace('DHCP4_', '').replace('new_', '')).lower()
+ key = (k.replace("DHCP4_", "").replace("new_", "")).lower()
new_info[key] = v
return new_info
def run_hook(interface, event, data_d=None, env=None):
if event not in EVENTS:
- raise ValueError("Unexpected event '%s'. Expected one of: %s" %
- (event, EVENTS))
+ raise ValueError(
+ "Unexpected event '%s'. Expected one of: %s" % (event, EVENTS)
+ )
if data_d is None:
data_d = _get_hooks_dir()
if env is None:
@@ -58,9 +59,11 @@ def get_parser(parser=None):
if parser is None:
parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
parser.add_argument(
- "event", help='event taken on the interface', choices=EVENTS)
+ "event", help="event taken on the interface", choices=EVENTS
+ )
parser.add_argument(
- "interface", help='the network interface being acted upon')
+ "interface", help="the network interface being acted upon"
+ )
# cloud-init main uses 'action'
parser.set_defaults(action=(NAME, handle_args))
return parser
@@ -72,12 +75,14 @@ def handle_args(name, args, data_d=None):
return run_hook(interface=args.interface, event=args.event, data_d=data_d)
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
+
parser = get_parser()
args = parser.parse_args(args=sys.argv[1:])
return_value = handle_args(
- NAME, args, data_d=os.environ.get('_CI_DHCP_HOOK_DATA_D'))
+ NAME, args, data_d=os.environ.get("_CI_DHCP_HOOK_DATA_D")
+ )
if return_value:
sys.exit(return_value)
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 742804ea..de000b52 100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -20,49 +20,49 @@ from typing import Any, Mapping # noqa: F401
from cloudinit import importer
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit.net import activators
-from cloudinit.net import eni
-from cloudinit.net import network_state
-from cloudinit.net import renderers
+from cloudinit import net, persistence, ssh_util, subp, type_utils, util
+from cloudinit.distros.parsers import hosts
+from cloudinit.features import ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES
+from cloudinit.net import activators, eni, network_state, renderers
from cloudinit.net.network_state import parse_net_config_data
-from cloudinit import persistence
-from cloudinit import ssh_util
-from cloudinit import type_utils
-from cloudinit import subp
-from cloudinit import util
-
-from cloudinit.features import \
- ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES
-from cloudinit.distros.parsers import hosts
from .networking import LinuxNetworking
-
# Used when a cloud-config module can be run on all cloud-init distibutions.
# The value 'all' is surfaced in module documentation for distro support.
-ALL_DISTROS = 'all'
+ALL_DISTROS = "all"
OSFAMILIES = {
- 'alpine': ['alpine'],
- 'arch': ['arch'],
- 'debian': ['debian', 'ubuntu'],
- 'freebsd': ['freebsd'],
- 'gentoo': ['gentoo'],
- 'redhat': ['almalinux', 'amazon', 'centos', 'cloudlinux', 'eurolinux',
- 'fedora', 'miraclelinux', 'openEuler', 'photon', 'rhel',
- 'rocky', 'virtuozzo'],
- 'suse': ['opensuse', 'sles'],
+ "alpine": ["alpine"],
+ "arch": ["arch"],
+ "debian": ["debian", "ubuntu"],
+ "freebsd": ["freebsd"],
+ "gentoo": ["gentoo"],
+ "redhat": [
+ "almalinux",
+ "amazon",
+ "centos",
+ "cloudlinux",
+ "eurolinux",
+ "fedora",
+ "miraclelinux",
+ "openEuler",
+ "photon",
+ "rhel",
+ "rocky",
+ "virtuozzo",
+ ],
+ "suse": ["opensuse", "sles"],
}
LOG = logging.getLogger(__name__)
# This is a best guess regex, based on current EC2 AZs on 2017-12-11.
# It could break when Amazon adds new regions and new AZs.
-_EC2_AZ_RE = re.compile('^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$')
+_EC2_AZ_RE = re.compile("^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$")
# Default NTP Client Configurations
-PREFERRED_NTP_CLIENTS = ['chrony', 'systemd-timesyncd', 'ntp', 'ntpdate']
+PREFERRED_NTP_CLIENTS = ["chrony", "systemd-timesyncd", "ntp", "ntpdate"]
# Letters/Digits/Hyphen characters, for use in domain name validation
LDH_ASCII_CHARS = string.ascii_letters + string.digits + "-"
@@ -75,13 +75,13 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
ci_sudoers_fn = "/etc/sudoers.d/90-cloud-init-users"
hostname_conf_fn = "/etc/hostname"
tz_zone_dir = "/usr/share/zoneinfo"
- init_cmd = ['service'] # systemctl, service etc
+ init_cmd = ["service"] # systemctl, service etc
renderer_configs = {} # type: Mapping[str, Mapping[str, Any]]
_preferred_ntp_clients = None
networking_cls = LinuxNetworking
# This is used by self.shutdown_command(), and can be overridden in
# subclasses
- shutdown_options_map = {'halt': '-H', 'poweroff': '-P', 'reboot': '-r'}
+ shutdown_options_map = {"halt": "-H", "poweroff": "-P", "reboot": "-r"}
_ci_pkl_version = 1
prefer_fqdn = False
@@ -113,23 +113,27 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
"""Deprecated. Remove if/when arch and gentoo support renderers."""
raise NotImplementedError(
"Legacy function '_write_network' was called in distro '%s'.\n"
- "_write_network_config needs implementation.\n" % self.name)
+ "_write_network_config needs implementation.\n" % self.name
+ )
def _write_network_state(self, network_state):
priority = util.get_cfg_by_path(
- self._cfg, ('network', 'renderers'), None)
+ self._cfg, ("network", "renderers"), None
+ )
name, render_cls = renderers.select(priority=priority)
- LOG.debug("Selected renderer '%s' from priority list: %s",
- name, priority)
+ LOG.debug(
+ "Selected renderer '%s' from priority list: %s", name, priority
+ )
renderer = render_cls(config=self.renderer_configs.get(name))
renderer.render_network_state(network_state)
def _find_tz_file(self, tz):
tz_file = os.path.join(self.tz_zone_dir, str(tz))
if not os.path.isfile(tz_file):
- raise IOError(("Invalid timezone %s,"
- " no file found at %s") % (tz, tz_file))
+ raise IOError(
+ "Invalid timezone %s, no file found at %s" % (tz, tz_file)
+ )
return tz_file
def get_option(self, opt_name, default=None):
@@ -171,8 +175,9 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
# This resolves the package_mirrors config option
# down to a single dict of {mirror_name: mirror_url}
arch_info = self._get_arch_package_mirror_info(arch)
- return _get_package_mirror_info(data_source=data_source,
- mirror_info=arch_info)
+ return _get_package_mirror_info(
+ data_source=data_source, mirror_info=arch_info
+ )
def apply_network(self, settings, bring_up=True):
"""Deprecated. Remove if/when arch and gentoo support renderers."""
@@ -192,16 +197,21 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
def _apply_network_from_network_config(self, netconfig, bring_up=True):
"""Deprecated. Remove if/when arch and gentoo support renderers."""
distro = self.__class__
- LOG.warning("apply_network_config is not currently implemented "
- "for distribution '%s'. Attempting to use apply_network",
- distro)
- header = '\n'.join([
- "# Converted from network_config for distro %s" % distro,
- "# Implementation of _write_network_config is needed."
- ])
+ LOG.warning(
+ "apply_network_config is not currently implemented "
+ "for distribution '%s'. Attempting to use apply_network",
+ distro,
+ )
+ header = "\n".join(
+ [
+ "# Converted from network_config for distro %s" % distro,
+ "# Implementation of _write_network_config is needed.",
+ ]
+ )
ns = network_state.parse_net_config_data(netconfig)
contents = eni.network_state_to_eni(
- ns, header=header, render_hwaddress=True)
+ ns, header=header, render_hwaddress=True
+ )
return self.apply_network(contents, bring_up=bring_up)
def generate_fallback_config(self):
@@ -224,16 +234,19 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
except NotImplementedError:
# backwards compat until all distros have apply_network_config
return self._apply_network_from_network_config(
- netconfig, bring_up=bring_up)
+ netconfig, bring_up=bring_up
+ )
# Now try to bring them up
if bring_up:
- LOG.debug('Bringing up newly configured network interfaces')
+ LOG.debug("Bringing up newly configured network interfaces")
try:
network_activator = activators.select_activator()
except activators.NoActivatorException:
- LOG.warning("No network activator found, not bringing up "
- "network interfaces")
+ LOG.warning(
+ "No network activator found, not bringing up "
+ "network interfaces"
+ )
return True
network_activator.bring_up_all_interfaces(network_state)
else:
@@ -274,19 +287,27 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
# temporarily (until reboot so it should
# not be depended on). Use the write
# hostname functions for 'permanent' adjustments.
- LOG.debug("Non-persistently setting the system hostname to %s",
- hostname)
+ LOG.debug(
+ "Non-persistently setting the system hostname to %s", hostname
+ )
try:
- subp.subp(['hostname', hostname])
+ subp.subp(["hostname", hostname])
except subp.ProcessExecutionError:
- util.logexc(LOG, "Failed to non-persistently adjust the system "
- "hostname to %s", hostname)
+ util.logexc(
+ LOG,
+ "Failed to non-persistently adjust the system hostname to %s",
+ hostname,
+ )
def _select_hostname(self, hostname, fqdn):
# Prefer the short hostname over the long
# fully qualified domain name
- if util.get_cfg_option_bool(self._cfg, "prefer_fqdn_over_hostname",
- self.prefer_fqdn) and fqdn:
+ if (
+ util.get_cfg_option_bool(
+ self._cfg, "prefer_fqdn_over_hostname", self.prefer_fqdn
+ )
+ and fqdn
+ ):
return fqdn
if not hostname:
return fqdn
@@ -329,32 +350,39 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
# If the system hostname is different than the previous
# one or the desired one lets update it as well
- if ((not sys_hostname) or (sys_hostname == prev_hostname and
- sys_hostname != hostname)):
+ if (not sys_hostname) or (
+ sys_hostname == prev_hostname and sys_hostname != hostname
+ ):
update_files.append(sys_fn)
# If something else has changed the hostname after we set it
# initially, we should not overwrite those changes (we should
# only be setting the hostname once per instance)
- if (sys_hostname and prev_hostname and
- sys_hostname != prev_hostname):
- LOG.info("%s differs from %s, assuming user maintained hostname.",
- prev_hostname_fn, sys_fn)
+ if sys_hostname and prev_hostname and sys_hostname != prev_hostname:
+ LOG.info(
+ "%s differs from %s, assuming user maintained hostname.",
+ prev_hostname_fn,
+ sys_fn,
+ )
return
# Remove duplicates (incase the previous config filename)
# is the same as the system config filename, don't bother
# doing it twice
update_files = set([f for f in update_files if f])
- LOG.debug("Attempting to update hostname to %s in %s files",
- hostname, len(update_files))
+ LOG.debug(
+ "Attempting to update hostname to %s in %s files",
+ hostname,
+ len(update_files),
+ )
for fn in update_files:
try:
self._write_hostname(hostname, fn)
except IOError:
- util.logexc(LOG, "Failed to write hostname %s to %s", hostname,
- fn)
+ util.logexc(
+ LOG, "Failed to write hostname %s to %s", hostname, fn
+ )
# If the system hostname file name was provided set the
# non-fqdn as the transient hostname.
@@ -362,11 +390,11 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
self._apply_hostname(applying_hostname)
def update_etc_hosts(self, hostname, fqdn):
- header = ''
+ header = ""
if os.path.exists(self.hosts_fn):
eh = hosts.HostsConf(util.load_file(self.hosts_fn))
else:
- eh = hosts.HostsConf('')
+ eh = hosts.HostsConf("")
header = util.make_header(base="added")
local_ip = self._get_localhost_ip()
prev_info = eh.get_entry(local_ip)
@@ -427,7 +455,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
return False
def get_default_user(self):
- return self.get_option('default_user')
+ return self.get_option("default_user")
def add_user(self, name, **kwargs):
"""
@@ -443,43 +471,43 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
LOG.info("User %s already exists, skipping.", name)
return
- if 'create_groups' in kwargs:
- create_groups = kwargs.pop('create_groups')
+ if "create_groups" in kwargs:
+ create_groups = kwargs.pop("create_groups")
else:
create_groups = True
- useradd_cmd = ['useradd', name]
- log_useradd_cmd = ['useradd', name]
+ useradd_cmd = ["useradd", name]
+ log_useradd_cmd = ["useradd", name]
if util.system_is_snappy():
- useradd_cmd.append('--extrausers')
- log_useradd_cmd.append('--extrausers')
+ useradd_cmd.append("--extrausers")
+ log_useradd_cmd.append("--extrausers")
# Since we are creating users, we want to carefully validate the
# inputs. If something goes wrong, we can end up with a system
# that nobody can login to.
useradd_opts = {
- "gecos": '--comment',
- "homedir": '--home',
- "primary_group": '--gid',
- "uid": '--uid',
- "groups": '--groups',
- "passwd": '--password',
- "shell": '--shell',
- "expiredate": '--expiredate',
- "inactive": '--inactive',
- "selinux_user": '--selinux-user',
+ "gecos": "--comment",
+ "homedir": "--home",
+ "primary_group": "--gid",
+ "uid": "--uid",
+ "groups": "--groups",
+ "passwd": "--password",
+ "shell": "--shell",
+ "expiredate": "--expiredate",
+ "inactive": "--inactive",
+ "selinux_user": "--selinux-user",
}
useradd_flags = {
- "no_user_group": '--no-user-group',
- "system": '--system',
- "no_log_init": '--no-log-init',
+ "no_user_group": "--no-user-group",
+ "system": "--system",
+ "no_log_init": "--no-log-init",
}
- redact_opts = ['passwd']
+ redact_opts = ["passwd"]
# support kwargs having groups=[list] or groups="g1,g2"
- groups = kwargs.get('groups')
+ groups = kwargs.get("groups")
if groups:
if isinstance(groups, str):
groups = groups.split(",")
@@ -490,9 +518,9 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
# kwargs.items loop below wants a comma delimeted string
# that can go right through to the command.
- kwargs['groups'] = ",".join(groups)
+ kwargs["groups"] = ",".join(groups)
- primary_group = kwargs.get('primary_group')
+ primary_group = kwargs.get("primary_group")
if primary_group:
groups.append(primary_group)
@@ -510,7 +538,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
# Redact certain fields from the logs
if key in redact_opts:
- log_useradd_cmd.extend([useradd_opts[key], 'REDACTED'])
+ log_useradd_cmd.extend([useradd_opts[key], "REDACTED"])
else:
log_useradd_cmd.extend([useradd_opts[key], val])
@@ -520,12 +548,12 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
# Don't create the home directory if directed so or if the user is a
# system user
- if kwargs.get('no_create_home') or kwargs.get('system'):
- useradd_cmd.append('-M')
- log_useradd_cmd.append('-M')
+ if kwargs.get("no_create_home") or kwargs.get("system"):
+ useradd_cmd.append("-M")
+ log_useradd_cmd.append("-M")
else:
- useradd_cmd.append('-m')
- log_useradd_cmd.append('-m')
+ useradd_cmd.append("-m")
+ log_useradd_cmd.append("-m")
# Run the command
LOG.debug("Adding user %s", name)
@@ -540,8 +568,8 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
Add a snappy user to the system using snappy tools
"""
- snapuser = kwargs.get('snapuser')
- known = kwargs.get('known', False)
+ snapuser = kwargs.get("snapuser")
+ known = kwargs.get("known", False)
create_user_cmd = ["snap", "create-user", "--sudoer", "--json"]
if known:
create_user_cmd.append("--known")
@@ -550,11 +578,12 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
# Run the command
LOG.debug("Adding snap user %s", name)
try:
- (out, err) = subp.subp(create_user_cmd, logstring=create_user_cmd,
- capture=True)
+ (out, err) = subp.subp(
+ create_user_cmd, logstring=create_user_cmd, capture=True
+ )
LOG.debug("snap create-user returned: %s:%s", out, err)
jobj = util.load_json(out)
- username = jobj.get('username', None)
+ username = jobj.get("username", None)
except Exception as e:
util.logexc(LOG, "Failed to create snap user %s", name)
raise e
@@ -582,60 +611,66 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
"""
# Add a snap user, if requested
- if 'snapuser' in kwargs:
+ if "snapuser" in kwargs:
return self.add_snap_user(name, **kwargs)
# Add the user
self.add_user(name, **kwargs)
# Set password if plain-text password provided and non-empty
- if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']:
- self.set_passwd(name, kwargs['plain_text_passwd'])
+ if "plain_text_passwd" in kwargs and kwargs["plain_text_passwd"]:
+ self.set_passwd(name, kwargs["plain_text_passwd"])
# Set password if hashed password is provided and non-empty
- if 'hashed_passwd' in kwargs and kwargs['hashed_passwd']:
- self.set_passwd(name, kwargs['hashed_passwd'], hashed=True)
+ if "hashed_passwd" in kwargs and kwargs["hashed_passwd"]:
+ self.set_passwd(name, kwargs["hashed_passwd"], hashed=True)
# Default locking down the account. 'lock_passwd' defaults to True.
# lock account unless lock_password is False.
- if kwargs.get('lock_passwd', True):
+ if kwargs.get("lock_passwd", True):
self.lock_passwd(name)
# Configure sudo access
- if 'sudo' in kwargs and kwargs['sudo'] is not False:
- self.write_sudo_rules(name, kwargs['sudo'])
+ if "sudo" in kwargs and kwargs["sudo"] is not False:
+ self.write_sudo_rules(name, kwargs["sudo"])
# Import SSH keys
- if 'ssh_authorized_keys' in kwargs:
+ if "ssh_authorized_keys" in kwargs:
# Try to handle this in a smart manner.
- keys = kwargs['ssh_authorized_keys']
+ keys = kwargs["ssh_authorized_keys"]
if isinstance(keys, str):
keys = [keys]
elif isinstance(keys, dict):
keys = list(keys.values())
if keys is not None:
if not isinstance(keys, (tuple, list, set)):
- LOG.warning("Invalid type '%s' detected for"
- " 'ssh_authorized_keys', expected list,"
- " string, dict, or set.", type(keys))
+ LOG.warning(
+ "Invalid type '%s' detected for"
+ " 'ssh_authorized_keys', expected list,"
+ " string, dict, or set.",
+ type(keys),
+ )
keys = []
else:
keys = set(keys) or []
ssh_util.setup_user_keys(set(keys), name)
- if 'ssh_redirect_user' in kwargs:
- cloud_keys = kwargs.get('cloud_public_ssh_keys', [])
+ if "ssh_redirect_user" in kwargs:
+ cloud_keys = kwargs.get("cloud_public_ssh_keys", [])
if not cloud_keys:
LOG.warning(
- 'Unable to disable SSH logins for %s given'
- ' ssh_redirect_user: %s. No cloud public-keys present.',
- name, kwargs['ssh_redirect_user'])
+ "Unable to disable SSH logins for %s given"
+ " ssh_redirect_user: %s. No cloud public-keys present.",
+ name,
+ kwargs["ssh_redirect_user"],
+ )
else:
- redirect_user = kwargs['ssh_redirect_user']
+ redirect_user = kwargs["ssh_redirect_user"]
disable_option = ssh_util.DISABLE_USER_OPTS
- disable_option = disable_option.replace('$USER', redirect_user)
- disable_option = disable_option.replace('$DISABLE_USER', name)
+ disable_option = disable_option.replace("$USER", redirect_user)
+ disable_option = disable_option.replace("$DISABLE_USER", name)
ssh_util.setup_user_keys(
- set(cloud_keys), name, options=disable_option)
+ set(cloud_keys), name, options=disable_option
+ )
return True
def lock_passwd(self, name):
@@ -643,36 +678,36 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
Lock the password of a user, i.e., disable password logins
"""
# passwd must use short '-l' due to SLES11 lacking long form '--lock'
- lock_tools = (['passwd', '-l', name], ['usermod', '--lock', name])
+ lock_tools = (["passwd", "-l", name], ["usermod", "--lock", name])
try:
cmd = next(tool for tool in lock_tools if subp.which(tool[0]))
except StopIteration as e:
- raise RuntimeError((
+ raise RuntimeError(
"Unable to lock user account '%s'. No tools available. "
- " Tried: %s.") % (name, [c[0] for c in lock_tools])
+ " Tried: %s." % (name, [c[0] for c in lock_tools])
) from e
try:
subp.subp(cmd)
except Exception as e:
- util.logexc(LOG, 'Failed to disable password for user %s', name)
+ util.logexc(LOG, "Failed to disable password for user %s", name)
raise e
def expire_passwd(self, user):
try:
- subp.subp(['passwd', '--expire', user])
+ subp.subp(["passwd", "--expire", user])
except Exception as e:
util.logexc(LOG, "Failed to set 'expire' for %s", user)
raise e
def set_passwd(self, user, passwd, hashed=False):
- pass_string = '%s:%s' % (user, passwd)
- cmd = ['chpasswd']
+ pass_string = "%s:%s" % (user, passwd)
+ cmd = ["chpasswd"]
if hashed:
# Need to use the short option name '-e' instead of '--encrypted'
# (which would be more descriptive) since SLES 11 doesn't know
# about long names.
- cmd.append('-e')
+ cmd.append("-e")
try:
subp.subp(cmd, pass_string, logstring="chpasswd for %s" % user)
@@ -682,10 +717,10 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
return True
- def ensure_sudo_dir(self, path, sudo_base='/etc/sudoers'):
+ def ensure_sudo_dir(self, path, sudo_base="/etc/sudoers"):
# Ensure the dir is included and that
# it actually exists as a directory
- sudoers_contents = ''
+ sudoers_contents = ""
base_exists = False
if os.path.exists(sudo_base):
sudoers_contents = util.load_file(sudo_base)
@@ -706,15 +741,23 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
if not found_include:
try:
if not base_exists:
- lines = [('# See sudoers(5) for more information'
- ' on "#include" directives:'), '',
- util.make_header(base="added"),
- "#includedir %s" % (path), '']
+ lines = [
+ "# See sudoers(5) for more information"
+ ' on "#include" directives:',
+ "",
+ util.make_header(base="added"),
+ "#includedir %s" % (path),
+ "",
+ ]
sudoers_contents = "\n".join(lines)
util.write_file(sudo_base, sudoers_contents, 0o440)
else:
- lines = ['', util.make_header(base="added"),
- "#includedir %s" % (path), '']
+ lines = [
+ "",
+ util.make_header(base="added"),
+ "#includedir %s" % (path),
+ "",
+ ]
sudoers_contents = "\n".join(lines)
util.append_file(sudo_base, sudoers_contents)
LOG.debug("Added '#includedir %s' to %s", path, sudo_base)
@@ -728,7 +771,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
sudo_file = self.ci_sudoers_fn
lines = [
- '',
+ "",
"# User rules for %s" % user,
]
if isinstance(rules, (list, tuple)):
@@ -761,9 +804,9 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
raise e
def create_group(self, name, members=None):
- group_add_cmd = ['groupadd', name]
+ group_add_cmd = ["groupadd", name]
if util.system_is_snappy():
- group_add_cmd.append('--extrausers')
+ group_add_cmd.append("--extrausers")
if not members:
members = []
@@ -781,11 +824,15 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
if len(members) > 0:
for member in members:
if not util.is_user(member):
- LOG.warning("Unable to add group member '%s' to group '%s'"
- "; user does not exist.", member, name)
+ LOG.warning(
+ "Unable to add group member '%s' to group '%s'"
+ "; user does not exist.",
+ member,
+ name,
+ )
continue
- subp.subp(['usermod', '-a', '-G', name, member])
+ subp.subp(["usermod", "-a", "-G", name, member])
LOG.info("Added user '%s' to group '%s'", member, name)
def shutdown_command(self, *, mode, delay, message):
@@ -812,23 +859,25 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
May raise ProcessExecutionError
"""
init_cmd = self.init_cmd
- if self.uses_systemd() or 'systemctl' in init_cmd:
- init_cmd = ['systemctl']
- cmds = {'stop': ['stop', service],
- 'start': ['start', service],
- 'enable': ['enable', service],
- 'restart': ['restart', service],
- 'reload': ['reload-or-restart', service],
- 'try-reload': ['reload-or-try-restart', service],
- }
+ if self.uses_systemd() or "systemctl" in init_cmd:
+ init_cmd = ["systemctl"]
+ cmds = {
+ "stop": ["stop", service],
+ "start": ["start", service],
+ "enable": ["enable", service],
+ "restart": ["restart", service],
+ "reload": ["reload-or-restart", service],
+ "try-reload": ["reload-or-try-restart", service],
+ }
else:
- cmds = {'stop': [service, 'stop'],
- 'start': [service, 'start'],
- 'enable': [service, 'start'],
- 'restart': [service, 'restart'],
- 'reload': [service, 'restart'],
- 'try-reload': [service, 'restart'],
- }
+ cmds = {
+ "stop": [service, "stop"],
+ "start": [service, "start"],
+ "enable": [service, "start"],
+ "restart": [service, "restart"],
+ "reload": [service, "restart"],
+ "try-reload": [service, "restart"],
+ }
cmd = list(init_cmd) + list(cmds[action])
return subp.subp(cmd, capture=True)
@@ -919,27 +968,25 @@ def _sanitize_mirror_url(url: str):
# This is an IP address, not a hostname, so no need to apply the
# transformations
lambda hostname: None if net.is_ip_address(hostname) else hostname,
-
# Encode with IDNA to get the correct characters (as `bytes`), then
# decode with ASCII so we return a `str`
- lambda hostname: hostname.encode('idna').decode('ascii'),
-
+ lambda hostname: hostname.encode("idna").decode("ascii"),
# Replace any unacceptable characters with "-"
- lambda hostname: ''.join(
+ lambda hostname: "".join(
c if c in acceptable_chars else "-" for c in hostname
),
-
# Drop leading/trailing hyphens from each part of the hostname
- lambda hostname: '.'.join(
- part.strip('-') for part in hostname.split('.')
+ lambda hostname: ".".join(
+ part.strip("-") for part in hostname.split(".")
),
]
return _apply_hostname_transformations_to_url(url, transformations)
-def _get_package_mirror_info(mirror_info, data_source=None,
- mirror_filter=util.search_for_mirror):
+def _get_package_mirror_info(
+ mirror_info, data_source=None, mirror_filter=util.search_for_mirror
+):
# given a arch specific 'mirror_info' entry (from package_mirrors)
# search through the 'search' entries, and fallback appropriately
# return a dict with only {name: mirror} entries.
@@ -948,7 +995,7 @@ def _get_package_mirror_info(mirror_info, data_source=None,
subst = {}
if data_source and data_source.availability_zone:
- subst['availability_zone'] = data_source.availability_zone
+ subst["availability_zone"] = data_source.availability_zone
# ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b)
# the region is us-east-1. so region = az[0:-1]
@@ -956,18 +1003,18 @@ def _get_package_mirror_info(mirror_info, data_source=None,
ec2_region = data_source.availability_zone[0:-1]
if ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES:
- subst['ec2_region'] = "%s" % ec2_region
+ subst["ec2_region"] = "%s" % ec2_region
elif data_source.platform_type == "ec2":
- subst['ec2_region'] = "%s" % ec2_region
+ subst["ec2_region"] = "%s" % ec2_region
if data_source and data_source.region:
- subst['region'] = data_source.region
+ subst["region"] = data_source.region
results = {}
- for (name, mirror) in mirror_info.get('failsafe', {}).items():
+ for (name, mirror) in mirror_info.get("failsafe", {}).items():
results[name] = mirror
- for (name, searchlist) in mirror_info.get('search', {}).items():
+ for (name, searchlist) in mirror_info.get("search", {}).items():
mirrors = []
for tmpl in searchlist:
try:
@@ -1001,17 +1048,20 @@ def _get_arch_package_mirror_info(package_mirrors, arch):
def fetch(name):
- locs, looked_locs = importer.find_module(name, ['', __name__], ['Distro'])
+ locs, looked_locs = importer.find_module(name, ["", __name__], ["Distro"])
if not locs:
- raise ImportError("No distribution found for distro %s (searched %s)"
- % (name, looked_locs))
+ raise ImportError(
+ "No distribution found for distro %s (searched %s)"
+ % (name, looked_locs)
+ )
mod = importer.import_module(locs[0])
- cls = getattr(mod, 'Distro')
+ cls = getattr(mod, "Distro")
return cls
-def set_etc_timezone(tz, tz_file=None, tz_conf="/etc/timezone",
- tz_local="/etc/localtime"):
+def set_etc_timezone(
+ tz, tz_file=None, tz_conf="/etc/timezone", tz_local="/etc/localtime"
+):
util.write_file(tz_conf, str(tz).rstrip() + "\n")
# This ensures that the correct tz will be used for the system
if tz_local and tz_file:
@@ -1028,7 +1078,7 @@ def set_etc_timezone(tz, tz_file=None, tz_conf="/etc/timezone",
def uses_systemd():
try:
- res = os.lstat('/run/systemd/system')
+ res = os.lstat("/run/systemd/system")
return stat.S_ISDIR(res.st_mode)
except Exception:
return False
diff --git a/cloudinit/distros/almalinux.py b/cloudinit/distros/almalinux.py
index edb3165d..3dc0a342 100644
--- a/cloudinit/distros/almalinux.py
+++ b/cloudinit/distros/almalinux.py
@@ -6,4 +6,5 @@ from cloudinit.distros import rhel
class Distro(rhel.Distro):
pass
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/alpine.py b/cloudinit/distros/alpine.py
index e82965fd..3d7d4891 100644
--- a/cloudinit/distros/alpine.py
+++ b/cloudinit/distros/alpine.py
@@ -6,13 +6,8 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import distros, helpers, subp, util
from cloudinit.distros.parsers.hostname import HostnameConf
-
from cloudinit.settings import PER_INSTANCE
NETWORK_FILE_HEADER = """\
@@ -26,12 +21,11 @@ NETWORK_FILE_HEADER = """\
class Distro(distros.Distro):
- init_cmd = ['rc-service'] # init scripts
+ init_cmd = ["rc-service"] # init scripts
locale_conf_fn = "/etc/profile.d/locale.sh"
network_conf_fn = "/etc/network/interfaces"
renderer_configs = {
- "eni": {"eni_path": network_conf_fn,
- "eni_header": NETWORK_FILE_HEADER}
+ "eni": {"eni_path": network_conf_fn, "eni_header": NETWORK_FILE_HEADER}
}
def __init__(self, name, cfg, paths):
@@ -40,13 +34,13 @@ class Distro(distros.Distro):
# calls from repeatly happening (when they
# should only happen say once per instance...)
self._runner = helpers.Runners(paths)
- self.default_locale = 'C.UTF-8'
- self.osfamily = 'alpine'
- cfg['ssh_svcname'] = 'sshd'
+ self.default_locale = "C.UTF-8"
+ self.osfamily = "alpine"
+ cfg["ssh_svcname"] = "sshd"
def get_locale(self):
"""The default locale for Alpine Linux is different than
- cloud-init's DataSource default.
+ cloud-init's DataSource default.
"""
return self.default_locale
@@ -71,7 +65,7 @@ class Distro(distros.Distro):
def install_packages(self, pkglist):
self.update_package_sources()
- self.package_command('add', pkgs=pkglist)
+ self.package_command("add", pkgs=pkglist)
def _write_hostname(self, hostname, filename):
conf = None
@@ -82,7 +76,7 @@ class Distro(distros.Distro):
except IOError:
pass
if not conf:
- conf = HostnameConf('')
+ conf = HostnameConf("")
conf.set_hostname(hostname)
util.write_file(filename, str(conf), 0o644)
@@ -116,7 +110,7 @@ class Distro(distros.Distro):
if pkgs is None:
pkgs = []
- cmd = ['apk']
+ cmd = ["apk"]
# Redirect output
cmd.append("--quiet")
@@ -128,28 +122,32 @@ class Distro(distros.Distro):
if command:
cmd.append(command)
- if command == 'upgrade':
+ if command == "upgrade":
cmd.extend(["--update-cache", "--available"])
- pkglist = util.expand_package_list('%s-%s', pkgs)
+ pkglist = util.expand_package_list("%s-%s", pkgs)
cmd.extend(pkglist)
# Allow the output of this to flow outwards (ie not be captured)
subp.subp(cmd, capture=False)
def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ["update"], freq=PER_INSTANCE)
+ self._runner.run(
+ "update-sources",
+ self.package_command,
+ ["update"],
+ freq=PER_INSTANCE,
+ )
@property
def preferred_ntp_clients(self):
"""Allow distro to determine the preferred ntp client list"""
if not self._preferred_ntp_clients:
- self._preferred_ntp_clients = ['chrony', 'ntp']
+ self._preferred_ntp_clients = ["chrony", "ntp"]
return self._preferred_ntp_clients
- def shutdown_command(self, mode='poweroff', delay='now', message=None):
+ def shutdown_command(self, mode="poweroff", delay="now", message=None):
# called from cc_power_state_change.load_power_state
# Alpine has halt/poweroff/reboot, with the following specifics:
# - we use them rather than the generic "shutdown"
@@ -163,7 +161,7 @@ class Distro(distros.Distro):
# halt/poweroff/reboot commands take seconds rather than minutes.
if delay == "now":
# Alpine's commands do not understand "now".
- command += ['0']
+ command += ["0"]
else:
try:
command.append(str(int(delay) * 60))
@@ -175,4 +173,5 @@ class Distro(distros.Distro):
return command
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/amazon.py b/cloudinit/distros/amazon.py
index 5fcec952..a3573547 100644
--- a/cloudinit/distros/amazon.py
+++ b/cloudinit/distros/amazon.py
@@ -14,7 +14,6 @@ from cloudinit.distros import rhel
class Distro(rhel.Distro):
-
def update_package_sources(self):
return None
diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py
index 3c5bbb38..0bdfef83 100644
--- a/cloudinit/distros/arch.py
+++ b/cloudinit/distros/arch.py
@@ -4,32 +4,29 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import log as logging
-from cloudinit import util
-from cloudinit import subp
+import os
+from cloudinit import distros, helpers
+from cloudinit import log as logging
+from cloudinit import subp, util
from cloudinit.distros import net_util
from cloudinit.distros.parsers.hostname import HostnameConf
-
from cloudinit.net.renderers import RendererNotFoundError
-
from cloudinit.settings import PER_INSTANCE
-import os
-
LOG = logging.getLogger(__name__)
class Distro(distros.Distro):
locale_gen_fn = "/etc/locale.gen"
network_conf_dir = "/etc/netctl"
- init_cmd = ['systemctl'] # init scripts
+ init_cmd = ["systemctl"] # init scripts
renderer_configs = {
- "netplan": {"netplan_path": "/etc/netplan/50-cloud-init.yaml",
- "netplan_header": "# generated by cloud-init\n",
- "postcmds": True}
+ "netplan": {
+ "netplan_path": "/etc/netplan/50-cloud-init.yaml",
+ "netplan_header": "# generated by cloud-init\n",
+ "postcmds": True,
+ }
}
def __init__(self, name, cfg, paths):
@@ -38,28 +35,31 @@ class Distro(distros.Distro):
# calls from repeatly happening (when they
# should only happen say once per instance...)
self._runner = helpers.Runners(paths)
- self.osfamily = 'arch'
- cfg['ssh_svcname'] = 'sshd'
+ self.osfamily = "arch"
+ cfg["ssh_svcname"] = "sshd"
def apply_locale(self, locale, out_fn=None):
if out_fn is not None and out_fn != "/etc/locale.conf":
- LOG.warning("Invalid locale_configfile %s, only supported "
- "value is /etc/locale.conf", out_fn)
+ LOG.warning(
+ "Invalid locale_configfile %s, only supported "
+ "value is /etc/locale.conf",
+ out_fn,
+ )
lines = [
util.make_header(),
# Hard-coding the charset isn't ideal, but there is no other way.
- '%s UTF-8' % (locale),
+ "%s UTF-8" % (locale),
"",
]
util.write_file(self.locale_gen_fn, "\n".join(lines))
- subp.subp(['locale-gen'], capture=False)
+ subp.subp(["locale-gen"], capture=False)
# In the future systemd can handle locale-gen stuff:
# https://github.com/systemd/systemd/pull/9864
- subp.subp(['localectl', 'set-locale', locale], capture=False)
+ subp.subp(["localectl", "set-locale", locale], capture=False)
def install_packages(self, pkglist):
self.update_package_sources()
- self.package_command('', pkgs=pkglist)
+ self.package_command("", pkgs=pkglist)
def _write_network_state(self, network_state):
try:
@@ -70,32 +70,42 @@ class Distro(distros.Distro):
def _write_network(self, settings):
entries = net_util.translate_network(settings)
- LOG.debug("Translated ubuntu style network settings %s into %s",
- settings, entries)
+ LOG.debug(
+ "Translated ubuntu style network settings %s into %s",
+ settings,
+ entries,
+ )
return _render_network(
- entries, resolv_conf=self.resolve_conf_fn,
+ entries,
+ resolv_conf=self.resolve_conf_fn,
conf_dir=self.network_conf_dir,
- enable_func=self._enable_interface)
+ enable_func=self._enable_interface,
+ )
def _enable_interface(self, device_name):
- cmd = ['netctl', 'reenable', device_name]
+ cmd = ["netctl", "reenable", device_name]
try:
(_out, err) = subp.subp(cmd)
if len(err):
- LOG.warning("Running %s resulted in stderr output: %s",
- cmd, err)
+ LOG.warning(
+ "Running %s resulted in stderr output: %s", cmd, err
+ )
except subp.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
def _bring_up_interface(self, device_name):
- cmd = ['netctl', 'restart', device_name]
- LOG.debug("Attempting to run bring up interface %s using command %s",
- device_name, cmd)
+ cmd = ["netctl", "restart", device_name]
+ LOG.debug(
+ "Attempting to run bring up interface %s using command %s",
+ device_name,
+ cmd,
+ )
try:
(_out, err) = subp.subp(cmd)
if len(err):
- LOG.warning("Running %s resulted in stderr output: %s",
- cmd, err)
+ LOG.warning(
+ "Running %s resulted in stderr output: %s", cmd, err
+ )
return True
except subp.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
@@ -110,7 +120,7 @@ class Distro(distros.Distro):
except IOError:
pass
if not conf:
- conf = HostnameConf('')
+ conf = HostnameConf("")
conf.set_hostname(hostname)
util.write_file(filename, str(conf), omode="w", mode=0o644)
@@ -137,13 +147,17 @@ class Distro(distros.Distro):
# hostname (inetutils) isn't installed per default on arch, so we use
# hostnamectl which is installed per default (systemd).
def _apply_hostname(self, hostname):
- LOG.debug("Non-persistently setting the system hostname to %s",
- hostname)
+ LOG.debug(
+ "Non-persistently setting the system hostname to %s", hostname
+ )
try:
- subp.subp(['hostnamectl', '--transient', 'set-hostname', hostname])
+ subp.subp(["hostnamectl", "--transient", "set-hostname", hostname])
except subp.ProcessExecutionError:
- util.logexc(LOG, "Failed to non-persistently adjust the system "
- "hostname to %s", hostname)
+ util.logexc(
+ LOG,
+ "Failed to non-persistently adjust the system hostname to %s",
+ hostname,
+ )
def set_timezone(self, tz):
distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
@@ -152,7 +166,7 @@ class Distro(distros.Distro):
if pkgs is None:
pkgs = []
- cmd = ['pacman', "-Sy", "--quiet", "--noconfirm"]
+ cmd = ["pacman", "-Sy", "--quiet", "--noconfirm"]
# Redirect output
if args and isinstance(args, str):
@@ -165,19 +179,25 @@ class Distro(distros.Distro):
if command:
cmd.append(command)
- pkglist = util.expand_package_list('%s-%s', pkgs)
+ pkglist = util.expand_package_list("%s-%s", pkgs)
cmd.extend(pkglist)
# Allow the output of this to flow outwards (ie not be captured)
subp.subp(cmd, capture=False)
def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ["-y"], freq=PER_INSTANCE)
-
-
-def _render_network(entries, target="/", conf_dir="etc/netctl",
- resolv_conf="etc/resolv.conf", enable_func=None):
+ self._runner.run(
+ "update-sources", self.package_command, ["-y"], freq=PER_INSTANCE
+ )
+
+
+def _render_network(
+ entries,
+ target="/",
+ conf_dir="etc/netctl",
+ resolv_conf="etc/resolv.conf",
+ enable_func=None,
+):
"""Render the translate_network format into netctl files in target.
Paths will be rendered under target.
"""
@@ -188,29 +208,27 @@ def _render_network(entries, target="/", conf_dir="etc/netctl",
conf_dir = subp.target_path(target, conf_dir)
for (dev, info) in entries.items():
- if dev == 'lo':
+ if dev == "lo":
# no configuration should be rendered for 'lo'
continue
devs.append(dev)
net_fn = os.path.join(conf_dir, dev)
net_cfg = {
- 'Connection': 'ethernet',
- 'Interface': dev,
- 'IP': info.get('bootproto'),
- 'Address': "%s/%s" % (info.get('address'),
- info.get('netmask')),
- 'Gateway': info.get('gateway'),
- 'DNS': info.get('dns-nameservers', []),
+ "Connection": "ethernet",
+ "Interface": dev,
+ "IP": info.get("bootproto"),
+ "Address": "%s/%s" % (info.get("address"), info.get("netmask")),
+ "Gateway": info.get("gateway"),
+ "DNS": info.get("dns-nameservers", []),
}
util.write_file(net_fn, convert_netctl(net_cfg))
- if enable_func and info.get('auto'):
+ if enable_func and info.get("auto"):
enable_func(dev)
- if 'dns-nameservers' in info:
- nameservers.extend(info['dns-nameservers'])
+ if "dns-nameservers" in info:
+ nameservers.extend(info["dns-nameservers"])
if nameservers:
- util.write_file(resolv_conf,
- convert_resolv_conf(nameservers))
+ util.write_file(resolv_conf, convert_resolv_conf(nameservers))
return devs
@@ -227,17 +245,18 @@ def convert_netctl(settings):
if val is None:
val = ""
elif isinstance(val, (tuple, list)):
- val = "(" + ' '.join("'%s'" % v for v in val) + ")"
+ val = "(" + " ".join("'%s'" % v for v in val) + ")"
result.append("%s=%s\n" % (key, val))
- return ''.join(result)
+ return "".join(result)
def convert_resolv_conf(settings):
"""Returns a settings string formatted for resolv.conf."""
- result = ''
+ result = ""
if isinstance(settings, list):
for ns in settings:
- result = result + 'nameserver %s\n' % ns
+ result = result + "nameserver %s\n" % ns
return result
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/bsd.py b/cloudinit/distros/bsd.py
index c2fc1e0b..1b4498b3 100644
--- a/cloudinit/distros/bsd.py
+++ b/cloudinit/distros/bsd.py
@@ -1,12 +1,10 @@
import platform
-from cloudinit import distros
-from cloudinit.distros import bsd_utils
-from cloudinit import helpers
+from cloudinit import distros, helpers
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import net, subp, util
+from cloudinit.distros import bsd_utils
+
from .networking import BSDNetworking
LOG = logging.getLogger(__name__)
@@ -14,12 +12,12 @@ LOG = logging.getLogger(__name__)
class BSD(distros.Distro):
networking_cls = BSDNetworking
- hostname_conf_fn = '/etc/rc.conf'
+ hostname_conf_fn = "/etc/rc.conf"
rc_conf_fn = "/etc/rc.conf"
# This differs from the parent Distro class, which has -P for
# poweroff.
- shutdown_options_map = {'halt': '-H', 'poweroff': '-p', 'reboot': '-r'}
+ shutdown_options_map = {"halt": "-H", "poweroff": "-p", "reboot": "-r"}
# Set in BSD distro subclasses
group_add_cmd_prefix = []
@@ -35,7 +33,7 @@ class BSD(distros.Distro):
# calls from repeatly happening (when they
# should only happen say once per instance...)
self._runner = helpers.Runners(paths)
- cfg['ssh_svcname'] = 'sshd'
+ cfg["ssh_svcname"] = "sshd"
self.osfamily = platform.system().lower()
def _read_system_hostname(self):
@@ -43,13 +41,13 @@ class BSD(distros.Distro):
return (self.hostname_conf_fn, sys_hostname)
def _read_hostname(self, filename, default=None):
- return bsd_utils.get_rc_config_value('hostname')
+ return bsd_utils.get_rc_config_value("hostname")
def _get_add_member_to_group_cmd(self, member_name, group_name):
- raise NotImplementedError('Return list cmd to add member to group')
+ raise NotImplementedError("Return list cmd to add member to group")
def _write_hostname(self, hostname, filename):
- bsd_utils.set_rc_config_value('hostname', hostname, fn='/etc/rc.conf')
+ bsd_utils.set_rc_config_value("hostname", hostname, fn="/etc/rc.conf")
def create_group(self, name, members=None):
if util.is_group(name):
@@ -66,45 +64,55 @@ class BSD(distros.Distro):
members = []
for member in members:
if not util.is_user(member):
- LOG.warning("Unable to add group member '%s' to group '%s'"
- "; user does not exist.", member, name)
+ LOG.warning(
+ "Unable to add group member '%s' to group '%s'"
+ "; user does not exist.",
+ member,
+ name,
+ )
continue
try:
subp.subp(self._get_add_member_to_group_cmd(member, name))
LOG.info("Added user '%s' to group '%s'", member, name)
except Exception:
- util.logexc(LOG, "Failed to add user '%s' to group '%s'",
- member, name)
+ util.logexc(
+ LOG, "Failed to add user '%s' to group '%s'", member, name
+ )
def generate_fallback_config(self):
- nconf = {'config': [], 'version': 1}
+ nconf = {"config": [], "version": 1}
for mac, name in net.get_interfaces_by_mac().items():
- nconf['config'].append(
- {'type': 'physical', 'name': name,
- 'mac_address': mac, 'subnets': [{'type': 'dhcp'}]})
+ nconf["config"].append(
+ {
+ "type": "physical",
+ "name": name,
+ "mac_address": mac,
+ "subnets": [{"type": "dhcp"}],
+ }
+ )
return nconf
def install_packages(self, pkglist):
self.update_package_sources()
- self.package_command('install', pkgs=pkglist)
+ self.package_command("install", pkgs=pkglist)
def _get_pkg_cmd_environ(self):
"""Return environment vars used in *BSD package_command operations"""
- raise NotImplementedError('BSD subclasses return a dict of env vars')
+ raise NotImplementedError("BSD subclasses return a dict of env vars")
def package_command(self, command, args=None, pkgs=None):
if pkgs is None:
pkgs = []
- if command == 'install':
+ if command == "install":
cmd = self.pkg_cmd_install_prefix
- elif command == 'remove':
+ elif command == "remove":
cmd = self.pkg_cmd_remove_prefix
- elif command == 'update':
+ elif command == "update":
if not self.pkg_cmd_update_prefix:
return
cmd = self.pkg_cmd_update_prefix
- elif command == 'upgrade':
+ elif command == "upgrade":
if not self.pkg_cmd_upgrade_prefix:
return
cmd = self.pkg_cmd_upgrade_prefix
@@ -114,7 +122,7 @@ class BSD(distros.Distro):
elif args and isinstance(args, list):
cmd.extend(args)
- pkglist = util.expand_package_list('%s-%s', pkgs)
+ pkglist = util.expand_package_list("%s-%s", pkgs)
cmd.extend(pkglist)
# Allow the output of this to flow outwards (ie not be captured)
@@ -124,7 +132,7 @@ class BSD(distros.Distro):
distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
def apply_locale(self, locale, out_fn=None):
- LOG.debug('Cannot set the locale.')
+ LOG.debug("Cannot set the locale.")
def apply_network_config_names(self, netconfig):
- LOG.debug('Cannot rename network interface.')
+ LOG.debug("Cannot rename network interface.")
diff --git a/cloudinit/distros/bsd_utils.py b/cloudinit/distros/bsd_utils.py
index 079d0d53..00cd0662 100644
--- a/cloudinit/distros/bsd_utils.py
+++ b/cloudinit/distros/bsd_utils.py
@@ -18,31 +18,31 @@ def _unquote(value):
return value
-def get_rc_config_value(key, fn='/etc/rc.conf'):
- key_prefix = '{}='.format(key)
+def get_rc_config_value(key, fn="/etc/rc.conf"):
+ key_prefix = "{}=".format(key)
for line in util.load_file(fn).splitlines():
if line.startswith(key_prefix):
- value = line.replace(key_prefix, '')
+ value = line.replace(key_prefix, "")
return _unquote(value)
-def set_rc_config_value(key, value, fn='/etc/rc.conf'):
+def set_rc_config_value(key, value, fn="/etc/rc.conf"):
lines = []
done = False
value = shlex.quote(value)
original_content = util.load_file(fn)
for line in original_content.splitlines():
- if '=' in line:
- k, v = line.split('=', 1)
+ if "=" in line:
+ k, v = line.split("=", 1)
if k == key:
v = value
done = True
- lines.append('='.join([k, v]))
+ lines.append("=".join([k, v]))
else:
lines.append(line)
if not done:
- lines.append('='.join([key, value]))
- new_content = '\n'.join(lines) + '\n'
+ lines.append("=".join([key, value]))
+ new_content = "\n".join(lines) + "\n"
if new_content != original_content:
util.write_file(fn, new_content)
diff --git a/cloudinit/distros/centos.py b/cloudinit/distros/centos.py
index edb3165d..3dc0a342 100644
--- a/cloudinit/distros/centos.py
+++ b/cloudinit/distros/centos.py
@@ -6,4 +6,5 @@ from cloudinit.distros import rhel
class Distro(rhel.Distro):
pass
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/cloudlinux.py b/cloudinit/distros/cloudlinux.py
index edb3165d..3dc0a342 100644
--- a/cloudinit/distros/cloudlinux.py
+++ b/cloudinit/distros/cloudlinux.py
@@ -6,4 +6,5 @@ from cloudinit.distros import rhel
class Distro(rhel.Distro):
pass
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index b2af0866..0105a383 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -11,25 +11,25 @@ import fcntl
import os
import time
-from cloudinit import distros
-from cloudinit import helpers
+from cloudinit import distros, helpers
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import subp, util
from cloudinit.distros.parsers.hostname import HostnameConf
-
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
APT_LOCK_WAIT_TIMEOUT = 30
-APT_GET_COMMAND = ('apt-get', '--option=Dpkg::Options::=--force-confold',
- '--option=Dpkg::options::=--force-unsafe-io',
- '--assume-yes', '--quiet')
+APT_GET_COMMAND = (
+ "apt-get",
+ "--option=Dpkg::Options::=--force-confold",
+ "--option=Dpkg::options::=--force-unsafe-io",
+ "--assume-yes",
+ "--quiet",
+)
APT_GET_WRAPPER = {
- 'command': 'eatmydata',
- 'enabled': 'auto',
+ "command": "eatmydata",
+ "enabled": "auto",
}
NETWORK_FILE_HEADER = """\
@@ -50,10 +50,10 @@ LOCALE_CONF_FN = "/etc/default/locale"
# More context:
# https://github.com/canonical/cloud-init/pull/1034#issuecomment-986971376
APT_LOCK_FILES = [
- '/var/lib/dpkg/lock-frontend',
- '/var/lib/dpkg/lock',
- '/var/cache/apt/archives/lock',
- '/var/lib/apt/lists/lock',
+ "/var/lib/dpkg/lock-frontend",
+ "/var/lib/dpkg/lock",
+ "/var/cache/apt/archives/lock",
+ "/var/lib/apt/lists/lock",
]
@@ -61,14 +61,18 @@ class Distro(distros.Distro):
hostname_conf_fn = "/etc/hostname"
network_conf_fn = {
"eni": "/etc/network/interfaces.d/50-cloud-init",
- "netplan": "/etc/netplan/50-cloud-init.yaml"
+ "netplan": "/etc/netplan/50-cloud-init.yaml",
}
renderer_configs = {
- "eni": {"eni_path": network_conf_fn["eni"],
- "eni_header": NETWORK_FILE_HEADER},
- "netplan": {"netplan_path": network_conf_fn["netplan"],
- "netplan_header": NETWORK_FILE_HEADER,
- "postcmds": True}
+ "eni": {
+ "eni_path": network_conf_fn["eni"],
+ "eni_header": NETWORK_FILE_HEADER,
+ },
+ "netplan": {
+ "netplan_path": network_conf_fn["netplan"],
+ "netplan_header": NETWORK_FILE_HEADER,
+ "postcmds": True,
+ },
}
def __init__(self, name, cfg, paths):
@@ -77,8 +81,8 @@ class Distro(distros.Distro):
# calls from repeatly happening (when they
# should only happen say once per instance...)
self._runner = helpers.Runners(paths)
- self.osfamily = 'debian'
- self.default_locale = 'en_US.UTF-8'
+ self.osfamily = "debian"
+ self.default_locale = "en_US.UTF-8"
self.system_locale = None
def get_locale(self):
@@ -89,25 +93,29 @@ class Distro(distros.Distro):
self.system_locale = read_system_locale()
# Return system_locale setting if valid, else use default locale
- return (self.system_locale if self.system_locale else
- self.default_locale)
+ return (
+ self.system_locale if self.system_locale else self.default_locale
+ )
- def apply_locale(self, locale, out_fn=None, keyname='LANG'):
+ def apply_locale(self, locale, out_fn=None, keyname="LANG"):
"""Apply specified locale to system, regenerate if specified locale
- differs from system default."""
+ differs from system default."""
if not out_fn:
out_fn = LOCALE_CONF_FN
if not locale:
- raise ValueError('Failed to provide locale value.')
+ raise ValueError("Failed to provide locale value.")
# Only call locale regeneration if needed
# Update system locale config with specified locale if needed
distro_locale = self.get_locale()
conf_fn_exists = os.path.exists(out_fn)
sys_locale_unset = False if self.system_locale else True
- need_regen = (locale.lower() != distro_locale.lower() or
- not conf_fn_exists or sys_locale_unset)
+ need_regen = (
+ locale.lower() != distro_locale.lower()
+ or not conf_fn_exists
+ or sys_locale_unset
+ )
need_conf = not conf_fn_exists or need_regen or sys_locale_unset
if need_regen:
@@ -115,7 +123,10 @@ class Distro(distros.Distro):
else:
LOG.debug(
"System has '%s=%s' requested '%s', skipping regeneration.",
- keyname, self.system_locale, locale)
+ keyname,
+ self.system_locale,
+ locale,
+ )
if need_conf:
update_locale_conf(locale, out_fn, keyname=keyname)
@@ -124,7 +135,7 @@ class Distro(distros.Distro):
def install_packages(self, pkglist):
self.update_package_sources()
- self.package_command('install', pkgs=pkglist)
+ self.package_command("install", pkgs=pkglist)
def _write_network_state(self, network_state):
_maybe_remove_legacy_eth0()
@@ -139,7 +150,7 @@ class Distro(distros.Distro):
except IOError:
pass
if not conf:
- conf = HostnameConf('')
+ conf = HostnameConf("")
conf.set_hostname(hostname)
util.write_file(filename, str(conf), 0o644)
@@ -181,7 +192,7 @@ class Distro(distros.Distro):
if not os.path.exists(lock):
# Only wait for lock files that already exist
continue
- with open(lock, 'w') as handle:
+ with open(lock, "w") as handle:
try:
fcntl.lockf(handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError:
@@ -197,17 +208,17 @@ class Distro(distros.Distro):
subp_kwargs: kwargs to pass to subp
"""
start_time = time.time()
- LOG.debug('Waiting for apt lock')
+ LOG.debug("Waiting for apt lock")
while time.time() - start_time < timeout:
if not self._apt_lock_available():
time.sleep(1)
continue
- LOG.debug('apt lock available')
+ LOG.debug("apt lock available")
try:
# Allow the output of this to flow outwards (not be captured)
log_msg = "apt-%s [%s]" % (
short_cmd,
- ' '.join(subp_kwargs['args'])
+ " ".join(subp_kwargs["args"]),
)
return util.log_time(
logfunc=LOG.debug,
@@ -228,9 +239,9 @@ class Distro(distros.Distro):
# error received. If the lock is unavailable, just keep waiting
if self._apt_lock_available():
raise
- LOG.debug('Another process holds apt lock. Waiting...')
+ LOG.debug("Another process holds apt lock. Waiting...")
time.sleep(1)
- raise TimeoutError('Could not get apt lock')
+ raise TimeoutError("Could not get apt lock")
def package_command(self, command, args=None, pkgs=None):
"""Run the given package command.
@@ -247,12 +258,13 @@ class Distro(distros.Distro):
e = os.environ.copy()
# See: http://manpages.ubuntu.com/manpages/xenial/man7/debconf.7.html
- e['DEBIAN_FRONTEND'] = 'noninteractive'
+ e["DEBIAN_FRONTEND"] = "noninteractive"
wcfg = self.get_option("apt_get_wrapper", APT_GET_WRAPPER)
cmd = _get_wrapper_prefix(
- wcfg.get('command', APT_GET_WRAPPER['command']),
- wcfg.get('enabled', APT_GET_WRAPPER['enabled']))
+ wcfg.get("command", APT_GET_WRAPPER["command"]),
+ wcfg.get("enabled", APT_GET_WRAPPER["enabled"]),
+ )
cmd.extend(list(self.get_option("apt_get_command", APT_GET_COMMAND)))
@@ -263,22 +275,27 @@ class Distro(distros.Distro):
subcmd = command
if command == "upgrade":
- subcmd = self.get_option("apt_get_upgrade_subcommand",
- "dist-upgrade")
+ subcmd = self.get_option(
+ "apt_get_upgrade_subcommand", "dist-upgrade"
+ )
cmd.append(subcmd)
- pkglist = util.expand_package_list('%s=%s', pkgs)
+ pkglist = util.expand_package_list("%s=%s", pkgs)
cmd.extend(pkglist)
self._wait_for_apt_command(
short_cmd=command,
- subp_kwargs={'args': cmd, 'env': e, 'capture': False}
+ subp_kwargs={"args": cmd, "env": e, "capture": False},
)
def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ["update"], freq=PER_INSTANCE)
+ self._runner.run(
+ "update-sources",
+ self.package_command,
+ ["update"],
+ freq=PER_INSTANCE,
+ )
def get_primary_arch(self):
return util.get_dpkg_architecture()
@@ -288,9 +305,9 @@ def _get_wrapper_prefix(cmd, mode):
if isinstance(cmd, str):
cmd = [str(cmd)]
- if (util.is_true(mode) or
- (str(mode).lower() == "auto" and cmd[0] and
- subp.which(cmd[0]))):
+ if util.is_true(mode) or (
+ str(mode).lower() == "auto" and cmd[0] and subp.which(cmd[0])
+ ):
return cmd
else:
return []
@@ -298,13 +315,13 @@ def _get_wrapper_prefix(cmd, mode):
def _maybe_remove_legacy_eth0(path="/etc/network/interfaces.d/eth0.cfg"):
"""Ubuntu cloud images previously included a 'eth0.cfg' that had
- hard coded content. That file would interfere with the rendered
- configuration if it was present.
+ hard coded content. That file would interfere with the rendered
+ configuration if it was present.
- if the file does not exist do nothing.
- If the file exists:
- - with known content, remove it and warn
- - with unknown content, leave it and warn
+ if the file does not exist do nothing.
+ If the file exists:
+ - with known content, remove it and warn
+ - with unknown content, leave it and warn
"""
if not os.path.exists(path):
@@ -314,24 +331,25 @@ def _maybe_remove_legacy_eth0(path="/etc/network/interfaces.d/eth0.cfg"):
try:
contents = util.load_file(path)
known_contents = ["auto eth0", "iface eth0 inet dhcp"]
- lines = [f.strip() for f in contents.splitlines()
- if not f.startswith("#")]
+ lines = [
+ f.strip() for f in contents.splitlines() if not f.startswith("#")
+ ]
if lines == known_contents:
util.del_file(path)
msg = "removed %s with known contents" % path
else:
- msg = (bmsg + " '%s' exists with user configured content." % path)
+ msg = bmsg + " '%s' exists with user configured content." % path
except Exception:
msg = bmsg + " %s exists, but could not be read." % path
LOG.warning(msg)
-def read_system_locale(sys_path=LOCALE_CONF_FN, keyname='LANG'):
+def read_system_locale(sys_path=LOCALE_CONF_FN, keyname="LANG"):
"""Read system default locale setting, if present"""
sys_val = ""
if not sys_path:
- raise ValueError('Invalid path: %s' % sys_path)
+ raise ValueError("Invalid path: %s" % sys_path)
if os.path.exists(sys_path):
locale_content = util.load_file(sys_path)
@@ -341,16 +359,22 @@ def read_system_locale(sys_path=LOCALE_CONF_FN, keyname='LANG'):
return sys_val
-def update_locale_conf(locale, sys_path, keyname='LANG'):
+def update_locale_conf(locale, sys_path, keyname="LANG"):
"""Update system locale config"""
- LOG.debug('Updating %s with locale setting %s=%s',
- sys_path, keyname, locale)
+ LOG.debug(
+ "Updating %s with locale setting %s=%s", sys_path, keyname, locale
+ )
subp.subp(
- ['update-locale', '--locale-file=' + sys_path,
- '%s=%s' % (keyname, locale)], capture=False)
+ [
+ "update-locale",
+ "--locale-file=" + sys_path,
+ "%s=%s" % (keyname, locale),
+ ],
+ capture=False,
+ )
-def regenerate_locale(locale, sys_path, keyname='LANG'):
+def regenerate_locale(locale, sys_path, keyname="LANG"):
"""
Run locale-gen for the provided locale and set the default
system variable `keyname` appropriately in the provided `sys_path`.
@@ -361,13 +385,13 @@ def regenerate_locale(locale, sys_path, keyname='LANG'):
# C
# C.UTF-8
# POSIX
- if locale.lower() in ['c', 'c.utf-8', 'posix']:
- LOG.debug('%s=%s does not require rengeneration', keyname, locale)
+ if locale.lower() in ["c", "c.utf-8", "posix"]:
+ LOG.debug("%s=%s does not require rengeneration", keyname, locale)
return
# finally, trigger regeneration
- LOG.debug('Generating locales for %s', locale)
- subp.subp(['locale-gen', locale], capture=False)
+ LOG.debug("Generating locales for %s", locale)
+ subp.subp(["locale-gen", locale], capture=False)
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/dragonflybsd.py b/cloudinit/distros/dragonflybsd.py
index 2d825518..0d02bee0 100644
--- a/cloudinit/distros/dragonflybsd.py
+++ b/cloudinit/distros/dragonflybsd.py
@@ -6,7 +6,7 @@ import cloudinit.distros.freebsd
class Distro(cloudinit.distros.freebsd.Distro):
- home_dir = '/home'
+ home_dir = "/home"
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/eurolinux.py b/cloudinit/distros/eurolinux.py
index edb3165d..3dc0a342 100644
--- a/cloudinit/distros/eurolinux.py
+++ b/cloudinit/distros/eurolinux.py
@@ -6,4 +6,5 @@ from cloudinit.distros import rhel
class Distro(rhel.Distro):
pass
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/fedora.py b/cloudinit/distros/fedora.py
index 0fe1fbca..39203225 100644
--- a/cloudinit/distros/fedora.py
+++ b/cloudinit/distros/fedora.py
@@ -14,4 +14,5 @@ from cloudinit.distros import rhel
class Distro(rhel.Distro):
pass
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index d94a52b8..513abdc2 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -10,8 +10,7 @@ from io import StringIO
import cloudinit.distros.bsd
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
@@ -24,41 +23,41 @@ class Distro(cloudinit.distros.bsd.BSD):
(N.B. DragonFlyBSD inherits from this class.)
"""
- usr_lib_exec = '/usr/local/lib'
- login_conf_fn = '/etc/login.conf'
- login_conf_fn_bak = '/etc/login.conf.orig'
- ci_sudoers_fn = '/usr/local/etc/sudoers.d/90-cloud-init-users'
- group_add_cmd_prefix = ['pw', 'group', 'add']
+ usr_lib_exec = "/usr/local/lib"
+ login_conf_fn = "/etc/login.conf"
+ login_conf_fn_bak = "/etc/login.conf.orig"
+ ci_sudoers_fn = "/usr/local/etc/sudoers.d/90-cloud-init-users"
+ group_add_cmd_prefix = ["pw", "group", "add"]
pkg_cmd_install_prefix = ["pkg", "install"]
pkg_cmd_remove_prefix = ["pkg", "remove"]
pkg_cmd_update_prefix = ["pkg", "update"]
pkg_cmd_upgrade_prefix = ["pkg", "upgrade"]
prefer_fqdn = True # See rc.conf(5) in FreeBSD
- home_dir = '/usr/home'
+ home_dir = "/usr/home"
def _get_add_member_to_group_cmd(self, member_name, group_name):
- return ['pw', 'usermod', '-n', member_name, '-G', group_name]
+ return ["pw", "usermod", "-n", member_name, "-G", group_name]
def add_user(self, name, **kwargs):
if util.is_user(name):
LOG.info("User %s already exists, skipping.", name)
return False
- pw_useradd_cmd = ['pw', 'useradd', '-n', name]
- log_pw_useradd_cmd = ['pw', 'useradd', '-n', name]
+ pw_useradd_cmd = ["pw", "useradd", "-n", name]
+ log_pw_useradd_cmd = ["pw", "useradd", "-n", name]
pw_useradd_opts = {
- "homedir": '-d',
- "gecos": '-c',
- "primary_group": '-g',
- "groups": '-G',
- "shell": '-s',
- "inactive": '-E',
+ "homedir": "-d",
+ "gecos": "-c",
+ "primary_group": "-g",
+ "groups": "-G",
+ "shell": "-s",
+ "inactive": "-E",
}
pw_useradd_flags = {
- "no_user_group": '--no-user-group',
- "system": '--system',
- "no_log_init": '--no-log-init',
+ "no_user_group": "--no-user-group",
+ "system": "--system",
+ "no_log_init": "--no-log-init",
}
for key, val in kwargs.items():
@@ -69,17 +68,19 @@ class Distro(cloudinit.distros.bsd.BSD):
pw_useradd_cmd.append(pw_useradd_flags[key])
log_pw_useradd_cmd.append(pw_useradd_flags[key])
- if 'no_create_home' in kwargs or 'system' in kwargs:
- pw_useradd_cmd.append('-d/nonexistent')
- log_pw_useradd_cmd.append('-d/nonexistent')
+ if "no_create_home" in kwargs or "system" in kwargs:
+ pw_useradd_cmd.append("-d/nonexistent")
+ log_pw_useradd_cmd.append("-d/nonexistent")
else:
- pw_useradd_cmd.append('-d{home_dir}/{name}'.format(
- home_dir=self.home_dir, name=name))
- pw_useradd_cmd.append('-m')
- log_pw_useradd_cmd.append('-d{home_dir}/{name}'.format(
- home_dir=self.home_dir, name=name))
+ pw_useradd_cmd.append(
+ "-d{home_dir}/{name}".format(home_dir=self.home_dir, name=name)
+ )
+ pw_useradd_cmd.append("-m")
+ log_pw_useradd_cmd.append(
+ "-d{home_dir}/{name}".format(home_dir=self.home_dir, name=name)
+ )
- log_pw_useradd_cmd.append('-m')
+ log_pw_useradd_cmd.append("-m")
# Run the command
LOG.info("Adding user %s", name)
@@ -90,13 +91,13 @@ class Distro(cloudinit.distros.bsd.BSD):
raise
# Set the password if it is provided
# For security consideration, only hashed passwd is assumed
- passwd_val = kwargs.get('passwd', None)
+ passwd_val = kwargs.get("passwd", None)
if passwd_val is not None:
self.set_passwd(name, passwd_val, hashed=True)
def expire_passwd(self, user):
try:
- subp.subp(['pw', 'usermod', user, '-p', '01-Jan-1970'])
+ subp.subp(["pw", "usermod", user, "-p", "01-Jan-1970"])
except Exception:
util.logexc(LOG, "Failed to set pw expiration for %s", user)
raise
@@ -108,15 +109,18 @@ class Distro(cloudinit.distros.bsd.BSD):
hash_opt = "-h"
try:
- subp.subp(['pw', 'usermod', user, hash_opt, '0'],
- data=passwd, logstring="chpasswd for %s" % user)
+ subp.subp(
+ ["pw", "usermod", user, hash_opt, "0"],
+ data=passwd,
+ logstring="chpasswd for %s" % user,
+ )
except Exception:
util.logexc(LOG, "Failed to set password for %s", user)
raise
def lock_passwd(self, name):
try:
- subp.subp(['pw', 'usermod', name, '-h', '-'])
+ subp.subp(["pw", "usermod", name, "-h", "-"])
except Exception:
util.logexc(LOG, "Failed to lock user %s", name)
raise
@@ -125,8 +129,9 @@ class Distro(cloudinit.distros.bsd.BSD):
# Adjust the locales value to the new value
newconf = StringIO()
for line in util.load_file(self.login_conf_fn).splitlines():
- newconf.write(re.sub(r'^default:',
- r'default:lang=%s:' % locale, line))
+ newconf.write(
+ re.sub(r"^default:", r"default:lang=%s:" % locale, line)
+ )
newconf.write("\n")
# Make a backup of login.conf.
@@ -137,15 +142,16 @@ class Distro(cloudinit.distros.bsd.BSD):
try:
LOG.debug("Running cap_mkdb for %s", locale)
- subp.subp(['cap_mkdb', self.login_conf_fn])
+ subp.subp(["cap_mkdb", self.login_conf_fn])
except subp.ProcessExecutionError:
# cap_mkdb failed, so restore the backup.
util.logexc(LOG, "Failed to apply locale %s", locale)
try:
util.copy(self.login_conf_fn_bak, self.login_conf_fn)
except IOError:
- util.logexc(LOG, "Failed to restore %s backup",
- self.login_conf_fn)
+ util.logexc(
+ LOG, "Failed to restore %s backup", self.login_conf_fn
+ )
def apply_network_config_names(self, netconfig):
# This is handled by the freebsd network renderer. It writes in
@@ -157,13 +163,16 @@ class Distro(cloudinit.distros.bsd.BSD):
def _get_pkg_cmd_environ(self):
"""Return environment vars used in *BSD package_command operations"""
e = os.environ.copy()
- e['ASSUME_ALWAYS_YES'] = 'YES'
+ e["ASSUME_ALWAYS_YES"] = "YES"
return e
def update_package_sources(self):
self._runner.run(
- "update-sources", self.package_command,
- ["update"], freq=PER_INSTANCE)
+ "update-sources",
+ self.package_command,
+ ["update"],
+ freq=PER_INSTANCE,
+ )
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py
index 1be76dc8..1384a682 100644
--- a/cloudinit/distros/gentoo.py
+++ b/cloudinit/distros/gentoo.py
@@ -6,25 +6,21 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import distros
-from cloudinit import helpers
+from cloudinit import distros, helpers
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import subp, util
from cloudinit.distros import net_util
from cloudinit.distros.parsers.hostname import HostnameConf
-
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
class Distro(distros.Distro):
- locale_conf_fn = '/etc/locale.gen'
- network_conf_fn = '/etc/conf.d/net'
- hostname_conf_fn = '/etc/conf.d/hostname'
- init_cmd = ['rc-service'] # init scripts
+ locale_conf_fn = "/etc/locale.gen"
+ network_conf_fn = "/etc/conf.d/net"
+ hostname_conf_fn = "/etc/conf.d/hostname"
+ init_cmd = ["rc-service"] # init scripts
def __init__(self, name, cfg, paths):
distros.Distro.__init__(self, name, cfg, paths)
@@ -32,14 +28,14 @@ class Distro(distros.Distro):
# calls from repeatly happening (when they
# should only happen say once per instance...)
self._runner = helpers.Runners(paths)
- self.osfamily = 'gentoo'
+ self.osfamily = "gentoo"
# Fix sshd restarts
- cfg['ssh_svcname'] = '/etc/init.d/sshd'
+ cfg["ssh_svcname"] = "/etc/init.d/sshd"
def apply_locale(self, locale, out_fn=None):
if not out_fn:
out_fn = self.locale_conf_fn
- subp.subp(['locale-gen', '-G', locale], capture=False)
+ subp.subp(["locale-gen", "-G", locale], capture=False)
# "" provides trailing newline during join
lines = [
util.make_header(),
@@ -50,79 +46,97 @@ class Distro(distros.Distro):
def install_packages(self, pkglist):
self.update_package_sources()
- self.package_command('', pkgs=pkglist)
+ self.package_command("", pkgs=pkglist)
def _write_network(self, settings):
entries = net_util.translate_network(settings)
- LOG.debug("Translated ubuntu style network settings %s into %s",
- settings, entries)
+ LOG.debug(
+ "Translated ubuntu style network settings %s into %s",
+ settings,
+ entries,
+ )
dev_names = entries.keys()
nameservers = []
for (dev, info) in entries.items():
- if 'dns-nameservers' in info:
- nameservers.extend(info['dns-nameservers'])
- if dev == 'lo':
+ if "dns-nameservers" in info:
+ nameservers.extend(info["dns-nameservers"])
+ if dev == "lo":
continue
- net_fn = self.network_conf_fn + '.' + dev
- dns_nameservers = info.get('dns-nameservers')
+ net_fn = self.network_conf_fn + "." + dev
+ dns_nameservers = info.get("dns-nameservers")
if isinstance(dns_nameservers, (list, tuple)):
- dns_nameservers = str(tuple(dns_nameservers)).replace(',', '')
+ dns_nameservers = str(tuple(dns_nameservers)).replace(",", "")
# eth0, {'auto': True, 'ipv6': {}, 'bootproto': 'dhcp'}
# lo, {'dns-nameservers': ['10.0.1.3'], 'ipv6': {}, 'auto': True}
- results = ''
- if info.get('bootproto') == 'dhcp':
+ results = ""
+ if info.get("bootproto") == "dhcp":
results += 'config_{name}="dhcp"'.format(name=dev)
else:
results += (
'config_{name}="{ip_address} netmask {netmask}"\n'
'mac_{name}="{hwaddr}"\n'
- ).format(name=dev, ip_address=info.get('address'),
- netmask=info.get('netmask'),
- hwaddr=info.get('hwaddress'))
- results += 'routes_{name}="default via {gateway}"\n'.format(
+ ).format(
name=dev,
- gateway=info.get('gateway')
+ ip_address=info.get("address"),
+ netmask=info.get("netmask"),
+ hwaddr=info.get("hwaddress"),
+ )
+ results += 'routes_{name}="default via {gateway}"\n'.format(
+ name=dev, gateway=info.get("gateway")
)
- if info.get('dns-nameservers'):
+ if info.get("dns-nameservers"):
results += 'dns_servers_{name}="{dnsservers}"\n'.format(
- name=dev,
- dnsservers=dns_nameservers)
+ name=dev, dnsservers=dns_nameservers
+ )
util.write_file(net_fn, results)
self._create_network_symlink(dev)
- if info.get('auto'):
- cmd = ['rc-update', 'add', 'net.{name}'.format(name=dev),
- 'default']
+ if info.get("auto"):
+ cmd = [
+ "rc-update",
+ "add",
+ "net.{name}".format(name=dev),
+ "default",
+ ]
try:
(_out, err) = subp.subp(cmd)
if len(err):
- LOG.warning("Running %s resulted in stderr output: %s",
- cmd, err)
+ LOG.warning(
+ "Running %s resulted in stderr output: %s",
+ cmd,
+ err,
+ )
except subp.ProcessExecutionError:
- util.logexc(LOG, "Running interface command %s failed",
- cmd)
+ util.logexc(
+ LOG, "Running interface command %s failed", cmd
+ )
if nameservers:
- util.write_file(self.resolve_conf_fn,
- convert_resolv_conf(nameservers))
+ util.write_file(
+ self.resolve_conf_fn, convert_resolv_conf(nameservers)
+ )
return dev_names
@staticmethod
def _create_network_symlink(interface_name):
- file_path = '/etc/init.d/net.{name}'.format(name=interface_name)
+ file_path = "/etc/init.d/net.{name}".format(name=interface_name)
if not util.is_link(file_path):
- util.sym_link('/etc/init.d/net.lo', file_path)
+ util.sym_link("/etc/init.d/net.lo", file_path)
def _bring_up_interface(self, device_name):
- cmd = ['/etc/init.d/net.%s' % device_name, 'restart']
- LOG.debug("Attempting to run bring up interface %s using command %s",
- device_name, cmd)
+ cmd = ["/etc/init.d/net.%s" % device_name, "restart"]
+ LOG.debug(
+ "Attempting to run bring up interface %s using command %s",
+ device_name,
+ cmd,
+ )
try:
(_out, err) = subp.subp(cmd)
if len(err):
- LOG.warning("Running %s resulted in stderr output: %s",
- cmd, err)
+ LOG.warning(
+ "Running %s resulted in stderr output: %s", cmd, err
+ )
return True
except subp.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
@@ -131,20 +145,21 @@ class Distro(distros.Distro):
def _bring_up_interfaces(self, device_names):
use_all = False
for d in device_names:
- if d == 'all':
+ if d == "all":
use_all = True
if use_all:
# Grab device names from init scripts
- cmd = ['ls', '/etc/init.d/net.*']
+ cmd = ["ls", "/etc/init.d/net.*"]
try:
(_out, err) = subp.subp(cmd)
if len(err):
- LOG.warning("Running %s resulted in stderr output: %s",
- cmd, err)
+ LOG.warning(
+ "Running %s resulted in stderr output: %s", cmd, err
+ )
except subp.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
return False
- devices = [x.split('.')[2] for x in _out.split(' ')]
+ devices = [x.split(".")[2] for x in _out.split(" ")]
return distros.Distro._bring_up_interfaces(self, devices)
else:
return distros.Distro._bring_up_interfaces(self, device_names)
@@ -158,7 +173,7 @@ class Distro(distros.Distro):
except IOError:
pass
if not conf:
- conf = HostnameConf('')
+ conf = HostnameConf("")
# Many distro's format is the hostname by itself, and that is the
# way HostnameConf works but gentoo expects it to be in
@@ -194,7 +209,7 @@ class Distro(distros.Distro):
if pkgs is None:
pkgs = []
- cmd = list('emerge')
+ cmd = list("emerge")
# Redirect output
cmd.append("--quiet")
@@ -206,23 +221,28 @@ class Distro(distros.Distro):
if command:
cmd.append(command)
- pkglist = util.expand_package_list('%s-%s', pkgs)
+ pkglist = util.expand_package_list("%s-%s", pkgs)
cmd.extend(pkglist)
# Allow the output of this to flow outwards (ie not be captured)
subp.subp(cmd, capture=False)
def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ["-u", "world"], freq=PER_INSTANCE)
+ self._runner.run(
+ "update-sources",
+ self.package_command,
+ ["-u", "world"],
+ freq=PER_INSTANCE,
+ )
def convert_resolv_conf(settings):
"""Returns a settings string formatted for resolv.conf."""
- result = ''
+ result = ""
if isinstance(settings, list):
for ns in settings:
- result += 'nameserver %s\n' % ns
+ result += "nameserver %s\n" % ns
return result
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/miraclelinux.py b/cloudinit/distros/miraclelinux.py
index c7753387..3dc0a342 100644
--- a/cloudinit/distros/miraclelinux.py
+++ b/cloudinit/distros/miraclelinux.py
@@ -5,4 +5,6 @@ from cloudinit.distros import rhel
class Distro(rhel.Distro):
pass
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/net_util.py b/cloudinit/distros/net_util.py
index edfcd99d..e37fb19b 100644
--- a/cloudinit/distros/net_util.py
+++ b/cloudinit/distros/net_util.py
@@ -68,7 +68,9 @@
# }
from cloudinit.net.network_state import (
- net_prefix_to_ipv4_mask, mask_and_ipv4_to_bcast_addr)
+ mask_and_ipv4_to_bcast_addr,
+ net_prefix_to_ipv4_mask,
+)
def translate_network(settings):
@@ -86,7 +88,7 @@ def translate_network(settings):
ifaces = []
consume = {}
for (cmd, args) in entries:
- if cmd == 'iface':
+ if cmd == "iface":
if consume:
ifaces.append(consume)
consume = {}
@@ -96,19 +98,19 @@ def translate_network(settings):
# Check if anything left over to consume
absorb = False
for (cmd, args) in consume.items():
- if cmd == 'iface':
+ if cmd == "iface":
absorb = True
if absorb:
ifaces.append(consume)
# Now translate
real_ifaces = {}
for info in ifaces:
- if 'iface' not in info:
+ if "iface" not in info:
continue
- iface_details = info['iface'].split(None)
+ iface_details = info["iface"].split(None)
# Check if current device *may* have an ipv6 IP
use_ipv6 = False
- if 'inet6' in iface_details:
+ if "inet6" in iface_details:
use_ipv6 = True
dev_name = None
if len(iface_details) >= 1:
@@ -118,55 +120,54 @@ def translate_network(settings):
if not dev_name:
continue
iface_info = {}
- iface_info['ipv6'] = {}
+ iface_info["ipv6"] = {}
if len(iface_details) >= 3:
proto_type = iface_details[2].strip().lower()
# Seems like this can be 'loopback' which we don't
# really care about
- if proto_type in ['dhcp', 'static']:
- iface_info['bootproto'] = proto_type
+ if proto_type in ["dhcp", "static"]:
+ iface_info["bootproto"] = proto_type
# These can just be copied over
if use_ipv6:
- for k in ['address', 'gateway']:
+ for k in ["address", "gateway"]:
if k in info:
val = info[k].strip().lower()
if val:
- iface_info['ipv6'][k] = val
+ iface_info["ipv6"][k] = val
else:
- for k in ['netmask', 'address', 'gateway', 'broadcast']:
+ for k in ["netmask", "address", "gateway", "broadcast"]:
if k in info:
val = info[k].strip().lower()
if val:
iface_info[k] = val
# handle static ip configurations using
# ipaddress/prefix-length format
- if 'address' in iface_info:
- if 'netmask' not in iface_info:
+ if "address" in iface_info:
+ if "netmask" not in iface_info:
# check if the address has a network prefix
- addr, _, prefix = iface_info['address'].partition('/')
+ addr, _, prefix = iface_info["address"].partition("/")
if prefix:
- iface_info['netmask'] = (
- net_prefix_to_ipv4_mask(prefix))
- iface_info['address'] = addr
+ iface_info["netmask"] = net_prefix_to_ipv4_mask(prefix)
+ iface_info["address"] = addr
# if we set the netmask, we also can set the broadcast
- iface_info['broadcast'] = (
- mask_and_ipv4_to_bcast_addr(
- iface_info['netmask'], addr))
+ iface_info["broadcast"] = mask_and_ipv4_to_bcast_addr(
+ iface_info["netmask"], addr
+ )
# Name server info provided??
- if 'dns-nameservers' in info:
- iface_info['dns-nameservers'] = info['dns-nameservers'].split()
+ if "dns-nameservers" in info:
+ iface_info["dns-nameservers"] = info["dns-nameservers"].split()
# Name server search info provided??
- if 'dns-search' in info:
- iface_info['dns-search'] = info['dns-search'].split()
+ if "dns-search" in info:
+ iface_info["dns-search"] = info["dns-search"].split()
# Is any mac address spoofing going on??
- if 'hwaddress' in info:
- hw_info = info['hwaddress'].lower().strip()
+ if "hwaddress" in info:
+ hw_info = info["hwaddress"].lower().strip()
hw_split = hw_info.split(None, 1)
- if len(hw_split) == 2 and hw_split[0].startswith('ether'):
+ if len(hw_split) == 2 and hw_split[0].startswith("ether"):
hw_addr = hw_split[1]
if hw_addr:
- iface_info['hwaddress'] = hw_addr
+ iface_info["hwaddress"] = hw_addr
# If ipv6 is enabled, device will have multiple IPs, so we need to
# update the dictionary instead of overwriting it...
if dev_name in real_ifaces:
@@ -179,13 +180,14 @@ def translate_network(settings):
if not args:
continue
dev_name = args[0].strip().lower()
- if cmd == 'auto':
+ if cmd == "auto":
# Seems like auto can be like 'auto eth0 eth0:1' so just get the
# first part out as the device name
if dev_name in real_ifaces:
- real_ifaces[dev_name]['auto'] = True
- if cmd == 'iface' and 'inet6' in args:
- real_ifaces[dev_name]['inet6'] = True
+ real_ifaces[dev_name]["auto"] = True
+ if cmd == "iface" and "inet6" in args:
+ real_ifaces[dev_name]["inet6"] = True
return real_ifaces
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/netbsd.py b/cloudinit/distros/netbsd.py
index f1a9b182..9c38ae51 100644
--- a/cloudinit/distros/netbsd.py
+++ b/cloudinit/distros/netbsd.py
@@ -8,8 +8,7 @@ import platform
import cloudinit.distros.bsd
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
LOG = logging.getLogger(__name__)
@@ -21,42 +20,42 @@ class NetBSD(cloudinit.distros.bsd.BSD):
(N.B. OpenBSD inherits from this class.)
"""
- ci_sudoers_fn = '/usr/pkg/etc/sudoers.d/90-cloud-init-users'
+ ci_sudoers_fn = "/usr/pkg/etc/sudoers.d/90-cloud-init-users"
group_add_cmd_prefix = ["groupadd"]
def __init__(self, name, cfg, paths):
super().__init__(name, cfg, paths)
if os.path.exists("/usr/pkg/bin/pkgin"):
- self.pkg_cmd_install_prefix = ['pkgin', '-y', 'install']
- self.pkg_cmd_remove_prefix = ['pkgin', '-y', 'remove']
- self.pkg_cmd_update_prefix = ['pkgin', '-y', 'update']
- self.pkg_cmd_upgrade_prefix = ['pkgin', '-y', 'full-upgrade']
+ self.pkg_cmd_install_prefix = ["pkgin", "-y", "install"]
+ self.pkg_cmd_remove_prefix = ["pkgin", "-y", "remove"]
+ self.pkg_cmd_update_prefix = ["pkgin", "-y", "update"]
+ self.pkg_cmd_upgrade_prefix = ["pkgin", "-y", "full-upgrade"]
else:
- self.pkg_cmd_install_prefix = ['pkg_add', '-U']
- self.pkg_cmd_remove_prefix = ['pkg_delete']
+ self.pkg_cmd_install_prefix = ["pkg_add", "-U"]
+ self.pkg_cmd_remove_prefix = ["pkg_delete"]
def _get_add_member_to_group_cmd(self, member_name, group_name):
- return ['usermod', '-G', group_name, member_name]
+ return ["usermod", "-G", group_name, member_name]
def add_user(self, name, **kwargs):
if util.is_user(name):
LOG.info("User %s already exists, skipping.", name)
return False
- adduser_cmd = ['useradd']
- log_adduser_cmd = ['useradd']
+ adduser_cmd = ["useradd"]
+ log_adduser_cmd = ["useradd"]
adduser_opts = {
- "homedir": '-d',
- "gecos": '-c',
- "primary_group": '-g',
- "groups": '-G',
- "shell": '-s',
+ "homedir": "-d",
+ "gecos": "-c",
+ "primary_group": "-g",
+ "groups": "-G",
+ "shell": "-s",
}
adduser_flags = {
- "no_user_group": '--no-user-group',
- "system": '--system',
- "no_log_init": '--no-log-init',
+ "no_user_group": "--no-user-group",
+ "system": "--system",
+ "no_log_init": "--no-log-init",
}
for key, val in kwargs.items():
@@ -67,9 +66,9 @@ class NetBSD(cloudinit.distros.bsd.BSD):
adduser_cmd.append(adduser_flags[key])
log_adduser_cmd.append(adduser_flags[key])
- if 'no_create_home' not in kwargs or 'system' not in kwargs:
- adduser_cmd += ['-m']
- log_adduser_cmd += ['-m']
+ if "no_create_home" not in kwargs or "system" not in kwargs:
+ adduser_cmd += ["-m"]
+ log_adduser_cmd += ["-m"]
adduser_cmd += [name]
log_adduser_cmd += [name]
@@ -83,29 +82,28 @@ class NetBSD(cloudinit.distros.bsd.BSD):
raise
# Set the password if it is provided
# For security consideration, only hashed passwd is assumed
- passwd_val = kwargs.get('passwd', None)
+ passwd_val = kwargs.get("passwd", None)
if passwd_val is not None:
self.set_passwd(name, passwd_val, hashed=True)
def set_passwd(self, user, passwd, hashed=False):
if hashed:
hashed_pw = passwd
- elif not hasattr(crypt, 'METHOD_BLOWFISH'):
+ elif not hasattr(crypt, "METHOD_BLOWFISH"):
# crypt.METHOD_BLOWFISH comes with Python 3.7 which is available
# on NetBSD 7 and 8.
- LOG.error((
- 'Cannot set non-encrypted password for user %s. '
- 'Python >= 3.7 is required.'), user)
+ LOG.error(
+ "Cannot set non-encrypted password for user %s. "
+ "Python >= 3.7 is required.",
+ user,
+ )
return
else:
method = crypt.METHOD_BLOWFISH # pylint: disable=E1101
- hashed_pw = crypt.crypt(
- passwd,
- crypt.mksalt(method)
- )
+ hashed_pw = crypt.crypt(passwd, crypt.mksalt(method))
try:
- subp.subp(['usermod', '-p', hashed_pw, user])
+ subp.subp(["usermod", "-p", hashed_pw, user])
except Exception:
util.logexc(LOG, "Failed to set password for %s", user)
raise
@@ -113,40 +111,42 @@ class NetBSD(cloudinit.distros.bsd.BSD):
def force_passwd_change(self, user):
try:
- subp.subp(['usermod', '-F', user])
+ subp.subp(["usermod", "-F", user])
except Exception:
util.logexc(LOG, "Failed to set pw expiration for %s", user)
raise
def lock_passwd(self, name):
try:
- subp.subp(['usermod', '-C', 'yes', name])
+ subp.subp(["usermod", "-C", "yes", name])
except Exception:
util.logexc(LOG, "Failed to lock user %s", name)
raise
def unlock_passwd(self, name):
try:
- subp.subp(['usermod', '-C', 'no', name])
+ subp.subp(["usermod", "-C", "no", name])
except Exception:
util.logexc(LOG, "Failed to unlock user %s", name)
raise
def apply_locale(self, locale, out_fn=None):
- LOG.debug('Cannot set the locale.')
+ LOG.debug("Cannot set the locale.")
def apply_network_config_names(self, netconfig):
- LOG.debug('NetBSD cannot rename network interface.')
+ LOG.debug("NetBSD cannot rename network interface.")
def _get_pkg_cmd_environ(self):
"""Return env vars used in NetBSD package_command operations"""
os_release = platform.release()
os_arch = platform.machine()
e = os.environ.copy()
- e['PKG_PATH'] = (
- 'http://cdn.netbsd.org/pub/pkgsrc/'
- 'packages/NetBSD/%s/%s/All'
- ) % (os_arch, os_release)
+ e[
+ "PKG_PATH"
+ ] = "http://cdn.netbsd.org/pub/pkgsrc/packages/NetBSD/%s/%s/All" % (
+ os_arch,
+ os_release,
+ )
return e
def update_package_sources(self):
@@ -156,4 +156,5 @@ class NetBSD(cloudinit.distros.bsd.BSD):
class Distro(NetBSD):
pass
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/networking.py b/cloudinit/distros/networking.py
index c291196a..e18a48ca 100644
--- a/cloudinit/distros/networking.py
+++ b/cloudinit/distros/networking.py
@@ -2,9 +2,7 @@ import abc
import logging
import os
-from cloudinit import subp
-from cloudinit import net, util
-
+from cloudinit import net, subp, util
LOG = logging.getLogger(__name__)
@@ -73,7 +71,8 @@ class Networking(metaclass=abc.ABCMeta):
def get_interfaces_by_mac(self) -> dict:
return net.get_interfaces_by_mac(
- blacklist_drivers=self.blacklist_drivers)
+ blacklist_drivers=self.blacklist_drivers
+ )
def get_master(self, devname: DeviceName):
return net.get_master(devname)
@@ -225,7 +224,7 @@ class LinuxNetworking(Networking):
def try_set_link_up(self, devname: DeviceName) -> bool:
"""Try setting the link to up explicitly and return if it is up.
- Not guaranteed to bring the interface up. The caller is expected to
- add wait times before retrying."""
- subp.subp(['ip', 'link', 'set', devname, 'up'])
+ Not guaranteed to bring the interface up. The caller is expected to
+ add wait times before retrying."""
+ subp.subp(["ip", "link", "set", devname, "up"])
return self.is_up(devname)
diff --git a/cloudinit/distros/openEuler.py b/cloudinit/distros/openEuler.py
index edb3165d..3dc0a342 100644
--- a/cloudinit/distros/openEuler.py
+++ b/cloudinit/distros/openEuler.py
@@ -6,4 +6,5 @@ from cloudinit.distros import rhel
class Distro(rhel.Distro):
pass
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/openbsd.py b/cloudinit/distros/openbsd.py
index 720c9cf3..ccdb8799 100644
--- a/cloudinit/distros/openbsd.py
+++ b/cloudinit/distros/openbsd.py
@@ -7,28 +7,27 @@ import platform
import cloudinit.distros.netbsd
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
LOG = logging.getLogger(__name__)
class Distro(cloudinit.distros.netbsd.NetBSD):
- hostname_conf_fn = '/etc/myname'
+ hostname_conf_fn = "/etc/myname"
def _read_hostname(self, filename, default=None):
return util.load_file(self.hostname_conf_fn)
def _write_hostname(self, hostname, filename):
- content = hostname + '\n'
+ content = hostname + "\n"
util.write_file(self.hostname_conf_fn, content)
def _get_add_member_to_group_cmd(self, member_name, group_name):
- return ['usermod', '-G', group_name, member_name]
+ return ["usermod", "-G", group_name, member_name]
def lock_passwd(self, name):
try:
- subp.subp(['usermod', '-p', '*', name])
+ subp.subp(["usermod", "-p", "*", name])
except Exception:
util.logexc(LOG, "Failed to lock user %s", name)
raise
@@ -41,11 +40,10 @@ class Distro(cloudinit.distros.netbsd.NetBSD):
os_release = platform.release()
os_arch = platform.machine()
e = os.environ.copy()
- e['PKG_PATH'] = (
- 'ftp://ftp.openbsd.org/pub/OpenBSD/{os_release}/'
- 'packages/{os_arch}/').format(
- os_arch=os_arch, os_release=os_release
- )
+ e["PKG_PATH"] = (
+ "ftp://ftp.openbsd.org/pub/OpenBSD/{os_release}/"
+ "packages/{os_arch}/"
+ ).format(os_arch=os_arch, os_release=os_release)
return e
diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py
index 2a7497cc..00ed1514 100644
--- a/cloudinit/distros/opensuse.py
+++ b/cloudinit/distros/opensuse.py
@@ -8,68 +8,61 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import distros
-
-from cloudinit.distros.parsers.hostname import HostnameConf
-
-from cloudinit import helpers
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import distros, helpers, subp, util
from cloudinit.distros import rhel_util as rhutil
+from cloudinit.distros.parsers.hostname import HostnameConf
from cloudinit.settings import PER_INSTANCE
class Distro(distros.Distro):
- clock_conf_fn = '/etc/sysconfig/clock'
- hostname_conf_fn = '/etc/HOSTNAME'
- init_cmd = ['service']
- locale_conf_fn = '/etc/sysconfig/language'
- network_conf_fn = '/etc/sysconfig/network/config'
- network_script_tpl = '/etc/sysconfig/network/ifcfg-%s'
- route_conf_tpl = '/etc/sysconfig/network/ifroute-%s'
- systemd_hostname_conf_fn = '/etc/hostname'
- systemd_locale_conf_fn = '/etc/locale.conf'
- tz_local_fn = '/etc/localtime'
+ clock_conf_fn = "/etc/sysconfig/clock"
+ hostname_conf_fn = "/etc/HOSTNAME"
+ init_cmd = ["service"]
+ locale_conf_fn = "/etc/sysconfig/language"
+ network_conf_fn = "/etc/sysconfig/network/config"
+ network_script_tpl = "/etc/sysconfig/network/ifcfg-%s"
+ route_conf_tpl = "/etc/sysconfig/network/ifroute-%s"
+ systemd_hostname_conf_fn = "/etc/hostname"
+ systemd_locale_conf_fn = "/etc/locale.conf"
+ tz_local_fn = "/etc/localtime"
renderer_configs = {
- 'sysconfig': {
- 'control': 'etc/sysconfig/network/config',
- 'flavor': 'suse',
- 'iface_templates': '%(base)s/network/ifcfg-%(name)s',
- 'netrules_path': (
- 'etc/udev/rules.d/85-persistent-net-cloud-init.rules'),
- 'route_templates': {
- 'ipv4': '%(base)s/network/ifroute-%(name)s',
- 'ipv6': '%(base)s/network/ifroute-%(name)s',
- }
+ "sysconfig": {
+ "control": "etc/sysconfig/network/config",
+ "flavor": "suse",
+ "iface_templates": "%(base)s/network/ifcfg-%(name)s",
+ "netrules_path": (
+ "etc/udev/rules.d/85-persistent-net-cloud-init.rules"
+ ),
+ "route_templates": {
+ "ipv4": "%(base)s/network/ifroute-%(name)s",
+ "ipv6": "%(base)s/network/ifroute-%(name)s",
+ },
}
}
def __init__(self, name, cfg, paths):
distros.Distro.__init__(self, name, cfg, paths)
self._runner = helpers.Runners(paths)
- self.osfamily = 'suse'
- cfg['ssh_svcname'] = 'sshd'
+ self.osfamily = "suse"
+ cfg["ssh_svcname"] = "sshd"
if self.uses_systemd():
- self.init_cmd = ['systemctl']
- cfg['ssh_svcname'] = 'sshd.service'
+ self.init_cmd = ["systemctl"]
+ cfg["ssh_svcname"] = "sshd.service"
def apply_locale(self, locale, out_fn=None):
if self.uses_systemd():
if not out_fn:
out_fn = self.systemd_locale_conf_fn
- locale_cfg = {'LANG': locale}
+ locale_cfg = {"LANG": locale}
else:
if not out_fn:
out_fn = self.locale_conf_fn
- locale_cfg = {'RC_LANG': locale}
+ locale_cfg = {"RC_LANG": locale}
rhutil.update_sysconfig_file(out_fn, locale_cfg)
def install_packages(self, pkglist):
self.package_command(
- 'install',
- args='--auto-agree-with-licenses',
- pkgs=pkglist
+ "install", args="--auto-agree-with-licenses", pkgs=pkglist
)
def package_command(self, command, args=None, pkgs=None):
@@ -77,11 +70,11 @@ class Distro(distros.Distro):
pkgs = []
# No user interaction possible, enable non-interactive mode
- cmd = ['zypper', '--non-interactive']
+ cmd = ["zypper", "--non-interactive"]
# Command is the operation, such as install
- if command == 'upgrade':
- command = 'update'
+ if command == "upgrade":
+ command = "update"
cmd.append(command)
# args are the arguments to the command, not global options
@@ -90,7 +83,7 @@ class Distro(distros.Distro):
elif args and isinstance(args, list):
cmd.extend(args)
- pkglist = util.expand_package_list('%s-%s', pkgs)
+ pkglist = util.expand_package_list("%s-%s", pkgs)
cmd.extend(pkglist)
# Allow the output of this to flow outwards (ie not be captured)
@@ -106,21 +99,25 @@ class Distro(distros.Distro):
else:
# Adjust the sysconfig clock zone setting
clock_cfg = {
- 'TIMEZONE': str(tz),
+ "TIMEZONE": str(tz),
}
rhutil.update_sysconfig_file(self.clock_conf_fn, clock_cfg)
# This ensures that the correct tz will be used for the system
util.copy(tz_file, self.tz_local_fn)
def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ['refresh'], freq=PER_INSTANCE)
+ self._runner.run(
+ "update-sources",
+ self.package_command,
+ ["refresh"],
+ freq=PER_INSTANCE,
+ )
def _read_hostname(self, filename, default=None):
- if self.uses_systemd() and filename.endswith('/previous-hostname'):
+ if self.uses_systemd() and filename.endswith("/previous-hostname"):
return util.load_file(filename).strip()
elif self.uses_systemd():
- (out, _err) = subp.subp(['hostname'])
+ (out, _err) = subp.subp(["hostname"])
if len(out):
return out
else:
@@ -151,10 +148,10 @@ class Distro(distros.Distro):
return (host_fn, self._read_hostname(host_fn))
def _write_hostname(self, hostname, filename):
- if self.uses_systemd() and filename.endswith('/previous-hostname'):
+ if self.uses_systemd() and filename.endswith("/previous-hostname"):
util.write_file(filename, hostname)
elif self.uses_systemd():
- subp.subp(['hostnamectl', 'set-hostname', str(hostname)])
+ subp.subp(["hostnamectl", "set-hostname", str(hostname)])
else:
conf = None
try:
@@ -164,7 +161,7 @@ class Distro(distros.Distro):
except IOError:
pass
if not conf:
- conf = HostnameConf('')
+ conf = HostnameConf("")
conf.set_hostname(hostname)
util.write_file(filename, str(conf), 0o644)
@@ -174,22 +171,28 @@ class Distro(distros.Distro):
# Allow distro to determine the preferred ntp client list
if not self._preferred_ntp_clients:
- distro_info = util.system_info()['dist']
+ distro_info = util.system_info()["dist"]
name = distro_info[0]
- major_ver = int(distro_info[1].split('.')[0])
+ major_ver = int(distro_info[1].split(".")[0])
# This is horribly complicated because of a case of
# "we do not care if versions should be increasing syndrome"
- if (
- (major_ver >= 15 and 'openSUSE' not in name) or
- (major_ver >= 15 and 'openSUSE' in name and major_ver != 42)
+ if (major_ver >= 15 and "openSUSE" not in name) or (
+ major_ver >= 15 and "openSUSE" in name and major_ver != 42
):
- self._preferred_ntp_clients = ['chrony',
- 'systemd-timesyncd', 'ntp']
+ self._preferred_ntp_clients = [
+ "chrony",
+ "systemd-timesyncd",
+ "ntp",
+ ]
else:
- self._preferred_ntp_clients = ['ntp',
- 'systemd-timesyncd', 'chrony']
+ self._preferred_ntp_clients = [
+ "ntp",
+ "systemd-timesyncd",
+ "chrony",
+ ]
return self._preferred_ntp_clients
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/parsers/__init__.py b/cloudinit/distros/parsers/__init__.py
index 6b5b6dde..5bea2ae1 100644
--- a/cloudinit/distros/parsers/__init__.py
+++ b/cloudinit/distros/parsers/__init__.py
@@ -9,10 +9,11 @@ def chop_comment(text, comment_chars):
comment_locations = [text.find(c) for c in comment_chars]
comment_locations = [c for c in comment_locations if c != -1]
if not comment_locations:
- return (text, '')
+ return (text, "")
min_comment = min(comment_locations)
before_comment = text[0:min_comment]
comment = text[min_comment:]
return (before_comment, comment)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/parsers/hostname.py b/cloudinit/distros/parsers/hostname.py
index e74c083c..61674082 100644
--- a/cloudinit/distros/parsers/hostname.py
+++ b/cloudinit/distros/parsers/hostname.py
@@ -23,11 +23,11 @@ class HostnameConf(object):
self.parse()
contents = StringIO()
for (line_type, components) in self._contents:
- if line_type == 'blank':
+ if line_type == "blank":
contents.write("%s\n" % (components[0]))
- elif line_type == 'all_comment':
+ elif line_type == "all_comment":
contents.write("%s\n" % (components[0]))
- elif line_type == 'hostname':
+ elif line_type == "hostname":
(hostname, tail) = components
contents.write("%s%s\n" % (hostname, tail))
# Ensure trailing newline
@@ -40,7 +40,7 @@ class HostnameConf(object):
def hostname(self):
self.parse()
for (line_type, components) in self._contents:
- if line_type == 'hostname':
+ if line_type == "hostname":
return components[0]
return None
@@ -51,28 +51,28 @@ class HostnameConf(object):
self.parse()
replaced = False
for (line_type, components) in self._contents:
- if line_type == 'hostname':
+ if line_type == "hostname":
components[0] = str(your_hostname)
replaced = True
if not replaced:
- self._contents.append(('hostname', [str(your_hostname), '']))
+ self._contents.append(("hostname", [str(your_hostname), ""]))
def _parse(self, contents):
entries = []
hostnames_found = set()
for line in contents.splitlines():
if not len(line.strip()):
- entries.append(('blank', [line]))
+ entries.append(("blank", [line]))
continue
- (head, tail) = chop_comment(line.strip(), '#')
+ (head, tail) = chop_comment(line.strip(), "#")
if not len(head):
- entries.append(('all_comment', [line]))
+ entries.append(("all_comment", [line]))
continue
- entries.append(('hostname', [head, tail]))
+ entries.append(("hostname", [head, tail]))
hostnames_found.add(head)
if len(hostnames_found) > 1:
- raise IOError("Multiple hostnames (%s) found!"
- % (hostnames_found))
+ raise IOError("Multiple hostnames (%s) found!" % (hostnames_found))
return entries
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/parsers/hosts.py b/cloudinit/distros/parsers/hosts.py
index 54e4e934..e43880af 100644
--- a/cloudinit/distros/parsers/hosts.py
+++ b/cloudinit/distros/parsers/hosts.py
@@ -25,7 +25,7 @@ class HostsConf(object):
self.parse()
options = []
for (line_type, components) in self._contents:
- if line_type == 'option':
+ if line_type == "option":
(pieces, _tail) = components
if len(pieces) and pieces[0] == ip:
options.append(pieces[1:])
@@ -35,7 +35,7 @@ class HostsConf(object):
self.parse()
n_entries = []
for (line_type, components) in self._contents:
- if line_type != 'option':
+ if line_type != "option":
n_entries.append((line_type, components))
continue
else:
@@ -48,35 +48,37 @@ class HostsConf(object):
def add_entry(self, ip, canonical_hostname, *aliases):
self.parse()
- self._contents.append(('option',
- ([ip, canonical_hostname] + list(aliases), '')))
+ self._contents.append(
+ ("option", ([ip, canonical_hostname] + list(aliases), ""))
+ )
def _parse(self, contents):
entries = []
for line in contents.splitlines():
if not len(line.strip()):
- entries.append(('blank', [line]))
+ entries.append(("blank", [line]))
continue
- (head, tail) = chop_comment(line.strip(), '#')
+ (head, tail) = chop_comment(line.strip(), "#")
if not len(head):
- entries.append(('all_comment', [line]))
+ entries.append(("all_comment", [line]))
continue
- entries.append(('option', [head.split(None), tail]))
+ entries.append(("option", [head.split(None), tail]))
return entries
def __str__(self):
self.parse()
contents = StringIO()
for (line_type, components) in self._contents:
- if line_type == 'blank':
+ if line_type == "blank":
contents.write("%s\n" % (components[0]))
- elif line_type == 'all_comment':
+ elif line_type == "all_comment":
contents.write("%s\n" % (components[0]))
- elif line_type == 'option':
+ elif line_type == "option":
(pieces, tail) = components
pieces = [str(p) for p in pieces]
pieces = "\t".join(pieces)
contents.write("%s%s\n" % (pieces, tail))
return contents.getvalue()
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/parsers/networkmanager_conf.py b/cloudinit/distros/parsers/networkmanager_conf.py
index ac51f122..4b669b0f 100644
--- a/cloudinit/distros/parsers/networkmanager_conf.py
+++ b/cloudinit/distros/parsers/networkmanager_conf.py
@@ -13,9 +13,9 @@ import configobj
class NetworkManagerConf(configobj.ConfigObj):
def __init__(self, contents):
- configobj.ConfigObj.__init__(self, contents,
- interpolation=False,
- write_empty_values=False)
+ configobj.ConfigObj.__init__(
+ self, contents, interpolation=False, write_empty_values=False
+ )
def set_section_keypair(self, section_name, key, value):
if section_name not in self.sections:
diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py
index 62929d03..0ef4e147 100644
--- a/cloudinit/distros/parsers/resolv_conf.py
+++ b/cloudinit/distros/parsers/resolv_conf.py
@@ -6,9 +6,9 @@
from io import StringIO
-from cloudinit.distros.parsers import chop_comment
from cloudinit import log as logging
from cloudinit import util
+from cloudinit.distros.parsers import chop_comment
LOG = logging.getLogger(__name__)
@@ -26,12 +26,12 @@ class ResolvConf(object):
@property
def nameservers(self):
self.parse()
- return self._retr_option('nameserver')
+ return self._retr_option("nameserver")
@property
def local_domain(self):
self.parse()
- dm = self._retr_option('domain')
+ dm = self._retr_option("domain")
if dm:
return dm[0]
return None
@@ -39,7 +39,7 @@ class ResolvConf(object):
@property
def search_domains(self):
self.parse()
- current_sds = self._retr_option('search')
+ current_sds = self._retr_option("search")
flat_sds = []
for sdlist in current_sds:
for sd in sdlist.split(None):
@@ -51,11 +51,11 @@ class ResolvConf(object):
self.parse()
contents = StringIO()
for (line_type, components) in self._contents:
- if line_type == 'blank':
+ if line_type == "blank":
contents.write("\n")
- elif line_type == 'all_comment':
+ elif line_type == "all_comment":
contents.write("%s\n" % (components[0]))
- elif line_type == 'option':
+ elif line_type == "option":
(cfg_opt, cfg_value, comment_tail) = components
line = "%s %s" % (cfg_opt, cfg_value)
if len(comment_tail):
@@ -66,7 +66,7 @@ class ResolvConf(object):
def _retr_option(self, opt_name):
found = []
for (line_type, components) in self._contents:
- if line_type == 'option':
+ if line_type == "option":
(cfg_opt, cfg_value, _comment_tail) = components
if cfg_opt == opt_name:
found.append(cfg_value)
@@ -74,27 +74,29 @@ class ResolvConf(object):
def add_nameserver(self, ns):
self.parse()
- current_ns = self._retr_option('nameserver')
+ current_ns = self._retr_option("nameserver")
new_ns = list(current_ns)
new_ns.append(str(ns))
new_ns = util.uniq_list(new_ns)
if len(new_ns) == len(current_ns):
return current_ns
if len(current_ns) >= 3:
- LOG.warning("ignoring nameserver %r: adding would "
- "exceed the maximum of "
- "'3' name servers (see resolv.conf(5))", ns)
+ LOG.warning(
+ "ignoring nameserver %r: adding would "
+ "exceed the maximum of "
+ "'3' name servers (see resolv.conf(5))",
+ ns,
+ )
return current_ns[:3]
- self._remove_option('nameserver')
+ self._remove_option("nameserver")
for n in new_ns:
- self._contents.append(('option', ['nameserver', n, '']))
+ self._contents.append(("option", ["nameserver", n, ""]))
return new_ns
def _remove_option(self, opt_name):
-
def remove_opt(item):
line_type, components = item
- if line_type != 'option':
+ if line_type != "option":
return False
(cfg_opt, _cfg_value, _comment_tail) = components
if cfg_opt != opt_name:
@@ -116,23 +118,26 @@ class ResolvConf(object):
return new_sds
if len(flat_sds) >= 6:
# Hard restriction on only 6 search domains
- raise ValueError(("Adding %r would go beyond the "
- "'6' maximum search domains") % (search_domain))
+ raise ValueError(
+ "Adding %r would go beyond the '6' maximum search domains"
+ % (search_domain)
+ )
s_list = " ".join(new_sds)
if len(s_list) > 256:
# Some hard limit on 256 chars total
- raise ValueError(("Adding %r would go beyond the "
- "256 maximum search list character limit")
- % (search_domain))
- self._remove_option('search')
- self._contents.append(('option', ['search', s_list, '']))
+ raise ValueError(
+ "Adding %r would go beyond the "
+ "256 maximum search list character limit" % (search_domain)
+ )
+ self._remove_option("search")
+ self._contents.append(("option", ["search", s_list, ""]))
return flat_sds
@local_domain.setter
def local_domain(self, domain):
self.parse()
- self._remove_option('domain')
- self._contents.append(('option', ['domain', str(domain), '']))
+ self._remove_option("domain")
+ self._contents.append(("option", ["domain", str(domain), ""]))
return domain
def _parse(self, contents):
@@ -140,24 +145,30 @@ class ResolvConf(object):
for (i, line) in enumerate(contents.splitlines()):
sline = line.strip()
if not sline:
- entries.append(('blank', [line]))
+ entries.append(("blank", [line]))
continue
- (head, tail) = chop_comment(line, ';#')
+ (head, tail) = chop_comment(line, ";#")
if not len(head.strip()):
- entries.append(('all_comment', [line]))
+ entries.append(("all_comment", [line]))
continue
if not tail:
- tail = ''
+ tail = ""
try:
(cfg_opt, cfg_values) = head.split(None, 1)
except (IndexError, ValueError) as e:
raise IOError(
"Incorrectly formatted resolv.conf line %s" % (i + 1)
) from e
- if cfg_opt not in ['nameserver', 'domain',
- 'search', 'sortlist', 'options']:
+ if cfg_opt not in [
+ "nameserver",
+ "domain",
+ "search",
+ "sortlist",
+ "options",
+ ]:
raise IOError("Unexpected resolv.conf option %s" % (cfg_opt))
entries.append(("option", [cfg_opt, cfg_values, tail]))
return entries
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/parsers/sys_conf.py b/cloudinit/distros/parsers/sys_conf.py
index dee4c551..4132734c 100644
--- a/cloudinit/distros/parsers/sys_conf.py
+++ b/cloudinit/distros/parsers/sys_conf.py
@@ -20,7 +20,7 @@ import configobj
# See: http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap08.html
# or look at the 'param_expand()' function in the subst.c file in the bash
# source tarball...
-SHELL_VAR_RULE = r'[a-zA-Z_]+[a-zA-Z0-9_]*'
+SHELL_VAR_RULE = r"[a-zA-Z_]+[a-zA-Z0-9_]*"
SHELL_VAR_REGEXES = [
# Basic variables
re.compile(r"\$" + SHELL_VAR_RULE),
@@ -48,10 +48,11 @@ class SysConf(configobj.ConfigObj):
``configobj.ConfigObj.__init__`` (i.e. "a filename, file like object,
or list of lines").
"""
+
def __init__(self, contents):
- configobj.ConfigObj.__init__(self, contents,
- interpolation=False,
- write_empty_values=True)
+ configobj.ConfigObj.__init__(
+ self, contents, interpolation=False, write_empty_values=True
+ )
def __str__(self):
contents = self.write()
@@ -66,11 +67,13 @@ class SysConf(configobj.ConfigObj):
if not isinstance(value, str):
raise ValueError('Value "%s" is not a string' % (value))
if len(value) == 0:
- return ''
+ return ""
quot_func = None
if value[0] in ['"', "'"] and value[-1] in ['"', "'"]:
if len(value) == 1:
- quot_func = (lambda x: self._get_single_quote(x) % x)
+ quot_func = (
+ lambda x: self._get_single_quote(x) % x
+ ) # noqa: E731
else:
# Quote whitespace if it isn't the start + end of a shell command
if value.strip().startswith("$(") and value.strip().endswith(")"):
@@ -82,11 +85,13 @@ class SysConf(configobj.ConfigObj):
# leave it alone since the pipes.quote function likes
# to use single quotes which won't get expanded...
if re.search(r"[\n\"']", value):
- quot_func = (lambda x:
- self._get_triple_quote(x) % x)
+ quot_func = (
+ lambda x: self._get_triple_quote(x) % x
+ ) # noqa: E731
else:
- quot_func = (lambda x:
- self._get_single_quote(x) % x)
+ quot_func = (
+ lambda x: self._get_single_quote(x) % x
+ ) # noqa: E731
else:
quot_func = pipes.quote
if not quot_func:
@@ -99,10 +104,13 @@ class SysConf(configobj.ConfigObj):
val = self._decode_element(self._quote(this_entry))
key = self._decode_element(self._quote(entry))
cmnt = self._decode_element(comment)
- return '%s%s%s%s%s' % (indent_string,
- key,
- self._a_to_u('='),
- val,
- cmnt)
+ return "%s%s%s%s%s" % (
+ indent_string,
+ key,
+ self._a_to_u("="),
+ val,
+ cmnt,
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/photon.py b/cloudinit/distros/photon.py
index 4ff90ea6..14cefe90 100644
--- a/cloudinit/distros/photon.py
+++ b/cloudinit/distros/photon.py
@@ -5,28 +5,25 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import net
-from cloudinit import util
-from cloudinit import subp
-from cloudinit import distros
-from cloudinit import helpers
+from cloudinit import distros, helpers
from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
+from cloudinit import net, subp, util
from cloudinit.distros import rhel_util as rhutil
+from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
class Distro(distros.Distro):
- systemd_hostname_conf_fn = '/etc/hostname'
- network_conf_dir = '/etc/systemd/network/'
- systemd_locale_conf_fn = '/etc/locale.conf'
- resolve_conf_fn = '/etc/systemd/resolved.conf'
+ systemd_hostname_conf_fn = "/etc/hostname"
+ network_conf_dir = "/etc/systemd/network/"
+ systemd_locale_conf_fn = "/etc/locale.conf"
+ resolve_conf_fn = "/etc/systemd/resolved.conf"
renderer_configs = {
- 'networkd': {
- 'resolv_conf_fn': resolve_conf_fn,
- 'network_conf_dir': network_conf_dir,
+ "networkd": {
+ "resolv_conf_fn": resolve_conf_fn,
+ "network_conf_dir": network_conf_dir,
}
}
@@ -39,33 +36,34 @@ class Distro(distros.Distro):
# calls from repeatly happening (when they
# should only happen say once per instance...)
self._runner = helpers.Runners(paths)
- self.osfamily = 'photon'
- self.init_cmd = ['systemctl']
+ self.osfamily = "photon"
+ self.init_cmd = ["systemctl"]
def exec_cmd(self, cmd, capture=True):
- LOG.debug('Attempting to run: %s', cmd)
+ LOG.debug("Attempting to run: %s", cmd)
try:
(out, err) = subp.subp(cmd, capture=capture)
if err:
- LOG.warning('Running %s resulted in stderr output: %s',
- cmd, err)
+ LOG.warning(
+ "Running %s resulted in stderr output: %s", cmd, err
+ )
return True, out, err
return False, out, err
except subp.ProcessExecutionError:
- util.logexc(LOG, 'Command %s failed', cmd)
+ util.logexc(LOG, "Command %s failed", cmd)
return True, None, None
def generate_fallback_config(self):
- key = 'disable_fallback_netcfg'
+ key = "disable_fallback_netcfg"
disable_fallback_netcfg = self._cfg.get(key, True)
- LOG.debug('%s value is: %s', key, disable_fallback_netcfg)
+ LOG.debug("%s value is: %s", key, disable_fallback_netcfg)
if not disable_fallback_netcfg:
return net.generate_fallback_config()
LOG.info(
- 'Skipping generate_fallback_config. Rely on PhotonOS default '
- 'network config'
+ "Skipping generate_fallback_config. Rely on PhotonOS default "
+ "network config"
)
return None
@@ -76,7 +74,7 @@ class Distro(distros.Distro):
out_fn = self.systemd_locale_conf_fn
locale_cfg = {
- 'LANG': locale,
+ "LANG": locale,
}
rhutil.update_sysconfig_file(out_fn, locale_cfg)
@@ -84,36 +82,42 @@ class Distro(distros.Distro):
# rhutil will modify /etc/locale.conf
# For locale change to take effect, reboot is needed or we can restart
# systemd-localed. This is equivalent of localectl
- cmd = ['systemctl', 'restart', 'systemd-localed']
+ cmd = ["systemctl", "restart", "systemd-localed"]
self.exec_cmd(cmd)
def install_packages(self, pkglist):
# self.update_package_sources()
- self.package_command('install', pkgs=pkglist)
+ self.package_command("install", pkgs=pkglist)
def _write_hostname(self, hostname, filename):
- if filename and filename.endswith('/previous-hostname'):
+ if filename and filename.endswith("/previous-hostname"):
util.write_file(filename, hostname)
else:
- ret, _out, err = self.exec_cmd(['hostnamectl', 'set-hostname',
- str(hostname)])
+ ret, _out, err = self.exec_cmd(
+ ["hostnamectl", "set-hostname", str(hostname)]
+ )
if ret:
- LOG.warning(('Error while setting hostname: %s\n'
- 'Given hostname: %s', err, hostname))
+ LOG.warning(
+ (
+ "Error while setting hostname: %s\nGiven hostname: %s",
+ err,
+ hostname,
+ )
+ )
def _read_system_hostname(self):
sys_hostname = self._read_hostname(self.systemd_hostname_conf_fn)
return (self.systemd_hostname_conf_fn, sys_hostname)
def _read_hostname(self, filename, default=None):
- if filename and filename.endswith('/previous-hostname'):
+ if filename and filename.endswith("/previous-hostname"):
return util.load_file(filename).strip()
- _ret, out, _err = self.exec_cmd(['hostname', '-f'])
+ _ret, out, _err = self.exec_cmd(["hostname", "-f"])
return out.strip() if out else default
def _get_localhost_ip(self):
- return '127.0.1.1'
+ return "127.0.1.1"
def set_timezone(self, tz):
distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
@@ -122,7 +126,7 @@ class Distro(distros.Distro):
if not pkgs:
pkgs = []
- cmd = ['tdnf', '-y']
+ cmd = ["tdnf", "-y"]
if args and isinstance(args, str):
cmd.append(args)
elif args and isinstance(args, list):
@@ -130,13 +134,17 @@ class Distro(distros.Distro):
cmd.append(command)
- pkglist = util.expand_package_list('%s-%s', pkgs)
+ pkglist = util.expand_package_list("%s-%s", pkgs)
cmd.extend(pkglist)
ret, _out, err = self.exec_cmd(cmd)
if ret:
- LOG.error('Error while installing packages: %s', err)
+ LOG.error("Error while installing packages: %s", err)
def update_package_sources(self):
- self._runner.run('update-sources', self.package_command,
- ['makecache'], freq=PER_INSTANCE)
+ self._runner.run(
+ "update-sources",
+ self.package_command,
+ ["makecache"],
+ freq=PER_INSTANCE,
+ )
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index c9ee2747..84744ece 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -8,12 +8,9 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import distros
-from cloudinit import helpers
+from cloudinit import distros, helpers
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import subp, util
from cloudinit.distros import rhel_util
from cloudinit.settings import PER_INSTANCE
@@ -22,30 +19,30 @@ LOG = logging.getLogger(__name__)
def _make_sysconfig_bool(val):
if val:
- return 'yes'
+ return "yes"
else:
- return 'no'
+ return "no"
class Distro(distros.Distro):
# See: https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/Networking_Guide/sec-Network_Configuration_Using_sysconfig_Files.html # noqa
clock_conf_fn = "/etc/sysconfig/clock"
- locale_conf_fn = '/etc/sysconfig/i18n'
- systemd_locale_conf_fn = '/etc/locale.conf'
+ locale_conf_fn = "/etc/sysconfig/i18n"
+ systemd_locale_conf_fn = "/etc/locale.conf"
network_conf_fn = "/etc/sysconfig/network"
hostname_conf_fn = "/etc/sysconfig/network"
systemd_hostname_conf_fn = "/etc/hostname"
- network_script_tpl = '/etc/sysconfig/network-scripts/ifcfg-%s'
+ network_script_tpl = "/etc/sysconfig/network-scripts/ifcfg-%s"
tz_local_fn = "/etc/localtime"
usr_lib_exec = "/usr/libexec"
renderer_configs = {
- 'sysconfig': {
- 'control': 'etc/sysconfig/network',
- 'iface_templates': '%(base)s/network-scripts/ifcfg-%(name)s',
- 'route_templates': {
- 'ipv4': '%(base)s/network-scripts/route-%(name)s',
- 'ipv6': '%(base)s/network-scripts/route6-%(name)s'
- }
+ "sysconfig": {
+ "control": "etc/sysconfig/network",
+ "iface_templates": "%(base)s/network-scripts/ifcfg-%(name)s",
+ "route_templates": {
+ "ipv4": "%(base)s/network-scripts/route-%(name)s",
+ "ipv6": "%(base)s/network-scripts/route6-%(name)s",
+ },
}
}
@@ -59,11 +56,11 @@ class Distro(distros.Distro):
# calls from repeatly happening (when they
# should only happen say once per instance...)
self._runner = helpers.Runners(paths)
- self.osfamily = 'redhat'
- cfg['ssh_svcname'] = 'sshd'
+ self.osfamily = "redhat"
+ cfg["ssh_svcname"] = "sshd"
def install_packages(self, pkglist):
- self.package_command('install', pkgs=pkglist)
+ self.package_command("install", pkgs=pkglist)
def apply_locale(self, locale, out_fn=None):
if self.uses_systemd():
@@ -74,20 +71,20 @@ class Distro(distros.Distro):
if not out_fn:
out_fn = self.locale_conf_fn
locale_cfg = {
- 'LANG': locale,
+ "LANG": locale,
}
rhel_util.update_sysconfig_file(out_fn, locale_cfg)
def _write_hostname(self, hostname, filename):
# systemd will never update previous-hostname for us, so
# we need to do it ourselves
- if self.uses_systemd() and filename.endswith('/previous-hostname'):
+ if self.uses_systemd() and filename.endswith("/previous-hostname"):
util.write_file(filename, hostname)
elif self.uses_systemd():
- subp.subp(['hostnamectl', 'set-hostname', str(hostname)])
+ subp.subp(["hostnamectl", "set-hostname", str(hostname)])
else:
host_cfg = {
- 'HOSTNAME': hostname,
+ "HOSTNAME": hostname,
}
rhel_util.update_sysconfig_file(filename, host_cfg)
@@ -99,18 +96,18 @@ class Distro(distros.Distro):
return (host_fn, self._read_hostname(host_fn))
def _read_hostname(self, filename, default=None):
- if self.uses_systemd() and filename.endswith('/previous-hostname'):
+ if self.uses_systemd() and filename.endswith("/previous-hostname"):
return util.load_file(filename).strip()
elif self.uses_systemd():
- (out, _err) = subp.subp(['hostname'])
+ (out, _err) = subp.subp(["hostname"])
if len(out):
return out
else:
return default
else:
(_exists, contents) = rhel_util.read_sysconfig_file(filename)
- if 'HOSTNAME' in contents:
- return contents['HOSTNAME']
+ if "HOSTNAME" in contents:
+ return contents["HOSTNAME"]
else:
return default
@@ -124,7 +121,7 @@ class Distro(distros.Distro):
else:
# Adjust the sysconfig clock zone setting
clock_cfg = {
- 'ZONE': str(tz),
+ "ZONE": str(tz),
}
rhel_util.update_sysconfig_file(self.clock_conf_fn, clock_cfg)
# This ensures that the correct tz will be used for the system
@@ -134,18 +131,18 @@ class Distro(distros.Distro):
if pkgs is None:
pkgs = []
- if subp.which('dnf'):
- LOG.debug('Using DNF for package management')
- cmd = ['dnf']
+ if subp.which("dnf"):
+ LOG.debug("Using DNF for package management")
+ cmd = ["dnf"]
else:
- LOG.debug('Using YUM for package management')
+ LOG.debug("Using YUM for package management")
# the '-t' argument makes yum tolerant of errors on the command
# line with regard to packages.
#
# For example: if you request to install foo, bar and baz and baz
# is installed; yum won't error out complaining that baz is already
# installed.
- cmd = ['yum', '-t']
+ cmd = ["yum", "-t"]
# Determines whether or not yum prompts for confirmation
# of critical actions. We don't want to prompt...
cmd.append("-y")
@@ -157,14 +154,19 @@ class Distro(distros.Distro):
cmd.append(command)
- pkglist = util.expand_package_list('%s-%s', pkgs)
+ pkglist = util.expand_package_list("%s-%s", pkgs)
cmd.extend(pkglist)
# Allow the output of this to flow outwards (ie not be captured)
subp.subp(cmd, capture=False)
def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ["makecache"], freq=PER_INSTANCE)
+ self._runner.run(
+ "update-sources",
+ self.package_command,
+ ["makecache"],
+ freq=PER_INSTANCE,
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/rhel_util.py b/cloudinit/distros/rhel_util.py
index d71394b4..c96f93b5 100644
--- a/cloudinit/distros/rhel_util.py
+++ b/cloudinit/distros/rhel_util.py
@@ -8,10 +8,9 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.distros.parsers.sys_conf import SysConf
-
from cloudinit import log as logging
from cloudinit import util
+from cloudinit.distros.parsers.sys_conf import SysConf
LOG = logging.getLogger(__name__)
@@ -49,4 +48,5 @@ def read_sysconfig_file(fn):
contents = []
return (exists, SysConf(contents))
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/rocky.py b/cloudinit/distros/rocky.py
index edb3165d..3dc0a342 100644
--- a/cloudinit/distros/rocky.py
+++ b/cloudinit/distros/rocky.py
@@ -6,4 +6,5 @@ from cloudinit.distros import rhel
class Distro(rhel.Distro):
pass
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py
index f3bfb9c2..484214e7 100644
--- a/cloudinit/distros/sles.py
+++ b/cloudinit/distros/sles.py
@@ -10,4 +10,5 @@ from cloudinit.distros import opensuse
class Distro(opensuse.Distro):
pass
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py
index 2a1f93d9..ec6470a9 100644
--- a/cloudinit/distros/ubuntu.py
+++ b/cloudinit/distros/ubuntu.py
@@ -9,41 +9,44 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.distros import debian
-from cloudinit.distros import PREFERRED_NTP_CLIENTS
-from cloudinit import util
-
import copy
+from cloudinit import util
+from cloudinit.distros import PREFERRED_NTP_CLIENTS, debian
-class Distro(debian.Distro):
+class Distro(debian.Distro):
def __init__(self, name, cfg, paths):
super(Distro, self).__init__(name, cfg, paths)
# Ubuntu specific network cfg locations
self.network_conf_fn = {
"eni": "/etc/network/interfaces.d/50-cloud-init.cfg",
- "netplan": "/etc/netplan/50-cloud-init.yaml"
+ "netplan": "/etc/netplan/50-cloud-init.yaml",
}
self.renderer_configs = {
- "eni": {"eni_path": self.network_conf_fn["eni"],
- "eni_header": debian.NETWORK_FILE_HEADER},
- "netplan": {"netplan_path": self.network_conf_fn["netplan"],
- "netplan_header": debian.NETWORK_FILE_HEADER,
- "postcmds": True}
+ "eni": {
+ "eni_path": self.network_conf_fn["eni"],
+ "eni_header": debian.NETWORK_FILE_HEADER,
+ },
+ "netplan": {
+ "netplan_path": self.network_conf_fn["netplan"],
+ "netplan_header": debian.NETWORK_FILE_HEADER,
+ "postcmds": True,
+ },
}
@property
def preferred_ntp_clients(self):
"""The preferred ntp client is dependent on the version."""
if not self._preferred_ntp_clients:
- (_name, _version, codename) = util.system_info()['dist']
+ (_name, _version, codename) = util.system_info()["dist"]
# Xenial cloud-init only installed ntp, UbuntuCore has timesyncd.
if codename == "xenial" and not util.system_is_snappy():
- self._preferred_ntp_clients = ['ntp']
+ self._preferred_ntp_clients = ["ntp"]
else:
- self._preferred_ntp_clients = (
- copy.deepcopy(PREFERRED_NTP_CLIENTS))
+ self._preferred_ntp_clients = copy.deepcopy(
+ PREFERRED_NTP_CLIENTS
+ )
return self._preferred_ntp_clients
diff --git a/cloudinit/distros/ug_util.py b/cloudinit/distros/ug_util.py
index 600b743f..72766392 100755
--- a/cloudinit/distros/ug_util.py
+++ b/cloudinit/distros/ug_util.py
@@ -10,8 +10,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit import log as logging
-from cloudinit import type_utils
-from cloudinit import util
+from cloudinit import type_utils, util
LOG = logging.getLogger(__name__)
@@ -23,7 +22,7 @@ LOG = logging.getLogger(__name__)
# standard form used in the rest of cloud-init
def _normalize_groups(grp_cfg):
if isinstance(grp_cfg, str):
- grp_cfg = grp_cfg.strip().split(',')
+ grp_cfg = grp_cfg.strip().split(",")
if isinstance(grp_cfg, list):
c_grp_cfg = {}
@@ -31,8 +30,10 @@ def _normalize_groups(grp_cfg):
if isinstance(i, dict):
for k, v in i.items():
if not isinstance(v, (list, str)):
- raise TypeError('Bad group member type %s'
- % (type_utils.obj_name(v)))
+ raise TypeError(
+ "Bad group member type %s"
+ % (type_utils.obj_name(v))
+ )
if isinstance(v, list):
c_grp_cfg.setdefault(k, []).extend(v)
@@ -42,8 +43,9 @@ def _normalize_groups(grp_cfg):
if i not in c_grp_cfg:
c_grp_cfg[i] = []
else:
- raise TypeError('Unknown group name type %s'
- % (type_utils.obj_name(i)))
+ raise TypeError(
+ "Unknown group name type %s" % (type_utils.obj_name(i))
+ )
grp_cfg = c_grp_cfg
groups = {}
@@ -51,8 +53,10 @@ def _normalize_groups(grp_cfg):
for grp_name, grp_members in grp_cfg.items():
groups[grp_name] = util.uniq_merge_sorted(grp_members)
else:
- raise TypeError(('Group config must be list, dict or string type only '
- 'but found %s') % (type_utils.obj_name(grp_cfg)))
+ raise TypeError(
+ "Group config must be list, dict or string type only but found %s"
+ % (type_utils.obj_name(grp_cfg))
+ )
return groups
@@ -73,11 +77,13 @@ def _normalize_users(u_cfg, def_user_cfg=None):
if util.is_true(v):
ad_ucfg.append(str(k))
elif isinstance(v, dict):
- v['name'] = k
+ v["name"] = k
ad_ucfg.append(v)
else:
- raise TypeError(('Unmappable user value type %s for key %s')
- % (type_utils.obj_name(v), k))
+ raise TypeError(
+ "Unmappable user value type %s for key %s"
+ % (type_utils.obj_name(v), k)
+ )
u_cfg = ad_ucfg
elif isinstance(u_cfg, str):
u_cfg = util.uniq_merge_sorted(u_cfg)
@@ -89,13 +95,14 @@ def _normalize_users(u_cfg, def_user_cfg=None):
if u and u not in users:
users[u] = {}
elif isinstance(user_config, dict):
- n = user_config.pop('name', 'default')
+ n = user_config.pop("name", "default")
prev_config = users.get(n) or {}
users[n] = util.mergemanydict([prev_config, user_config])
else:
- raise TypeError(('User config must be dictionary/list or string '
- ' types only and not %s')
- % (type_utils.obj_name(user_config)))
+ raise TypeError(
+ "User config must be dictionary/list or string "
+ " types only and not %s" % (type_utils.obj_name(user_config))
+ )
# Ensure user options are in the right python friendly format
if users:
@@ -103,7 +110,7 @@ def _normalize_users(u_cfg, def_user_cfg=None):
for uname, uconfig in users.items():
c_uconfig = {}
for k, v in uconfig.items():
- k = k.replace('-', '_').strip()
+ k = k.replace("-", "_").strip()
if k:
c_uconfig[k] = v
c_users[uname] = c_uconfig
@@ -111,33 +118,34 @@ def _normalize_users(u_cfg, def_user_cfg=None):
# Fix the default user into the actual default user name and replace it.
def_user = None
- if users and 'default' in users:
- def_config = users.pop('default')
+ if users and "default" in users:
+ def_config = users.pop("default")
if def_user_cfg:
# Pickup what the default 'real name' is and any groups that are
# provided by the default config
def_user_cfg = def_user_cfg.copy()
- def_user = def_user_cfg.pop('name')
- def_groups = def_user_cfg.pop('groups', [])
+ def_user = def_user_cfg.pop("name")
+ def_groups = def_user_cfg.pop("groups", [])
# Pick any config + groups for the user name that we may have
# extracted previously
parsed_config = users.pop(def_user, {})
- parsed_groups = parsed_config.get('groups', [])
+ parsed_groups = parsed_config.get("groups", [])
# Now merge the extracted groups with the default config provided
users_groups = util.uniq_merge_sorted(parsed_groups, def_groups)
- parsed_config['groups'] = ','.join(users_groups)
+ parsed_config["groups"] = ",".join(users_groups)
# The real config for the default user is the combination of the
# default user config provided by the distro, the default user
# config provided by the above merging for the user 'default' and
# then the parsed config from the user's 'real name' which does not
# have to be 'default' (but could be)
- users[def_user] = util.mergemanydict([def_user_cfg, def_config,
- parsed_config])
+ users[def_user] = util.mergemanydict(
+ [def_user_cfg, def_config, parsed_config]
+ )
# Ensure that only the default user that we found (if any) is actually
# marked as the default user
for uname, uconfig in users.items():
- uconfig['default'] = (uname == def_user if def_user else False)
+ uconfig["default"] = uname == def_user if def_user else False
return users
@@ -161,14 +169,17 @@ def normalize_users_groups(cfg, distro):
# overrides the concept of the default user if provided in the user: XYZ
# format.
old_user = {}
- if 'user' in cfg and cfg['user']:
- old_user = cfg['user']
+ if "user" in cfg and cfg["user"]:
+ old_user = cfg["user"]
# Translate it into a format that will be more useful going forward
if isinstance(old_user, str):
- old_user = {'name': old_user}
+ old_user = {"name": old_user}
elif not isinstance(old_user, dict):
- LOG.warning(("Format for 'user' key must be a string or dictionary"
- " and not %s"), type_utils.obj_name(old_user))
+ LOG.warning(
+ "Format for 'user' key must be a string or dictionary"
+ " and not %s",
+ type_utils.obj_name(old_user),
+ )
old_user = {}
# If no old user format, then assume the distro provides what the 'default'
@@ -179,33 +190,37 @@ def normalize_users_groups(cfg, distro):
try:
distro_user_config = distro.get_default_user()
except NotImplementedError:
- LOG.warning(('Distro has not implemented default user access. No '
- 'distribution provided default user will be normalized.'))
+ LOG.warning(
+ "Distro has not implemented default user access. No "
+ "distribution provided default user will be normalized."
+ )
# Merge the old user (which may just be an empty dict when not present)
# with the distro provided default user configuration so that the old user
# style picks up all the distribution specific attributes (if any)
default_user_config = util.mergemanydict([old_user, distro_user_config])
- base_users = cfg.get('users', [])
+ base_users = cfg.get("users", [])
if not isinstance(base_users, (list, dict, str)):
- LOG.warning(("Format for 'users' key must be a comma separated string"
- " or a dictionary or a list but found %s"),
- type_utils.obj_name(base_users))
+ LOG.warning(
+ "Format for 'users' key must be a comma separated string"
+ " or a dictionary or a list but found %s",
+ type_utils.obj_name(base_users),
+ )
base_users = []
if old_user:
# When 'user:' is provided, it should be made as the default user
if isinstance(base_users, list):
- base_users.append({'name': 'default'})
+ base_users.append({"name": "default"})
elif isinstance(base_users, dict):
- base_users['default'] = dict(base_users).get('default', True)
+ base_users["default"] = dict(base_users).get("default", True)
elif isinstance(base_users, str):
- base_users += ',default'
+ base_users += ",default"
groups = {}
- if 'groups' in cfg:
- groups = _normalize_groups(cfg['groups'])
+ if "groups" in cfg:
+ groups = _normalize_groups(cfg["groups"])
users = _normalize_users(base_users, default_user_config)
return (users, groups)
@@ -219,9 +234,9 @@ def extract_default(users, default_name=None, default_config=None):
def safe_find(entry):
config = entry[1]
- if not config or 'default' not in config:
+ if not config or "default" not in config:
return False
- return config['default']
+ return config["default"]
tmp_users = dict(filter(safe_find, users.items()))
if not tmp_users:
@@ -229,7 +244,8 @@ def extract_default(users, default_name=None, default_config=None):
name = list(tmp_users)[0]
config = tmp_users[name]
- config.pop('default', None)
+ config.pop("default", None)
return (name, config)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/virtuozzo.py b/cloudinit/distros/virtuozzo.py
index edb3165d..3dc0a342 100644
--- a/cloudinit/distros/virtuozzo.py
+++ b/cloudinit/distros/virtuozzo.py
@@ -6,4 +6,5 @@ from cloudinit.distros import rhel
class Distro(rhel.Distro):
pass
+
# vi: ts=4 expandtab
diff --git a/cloudinit/dmi.py b/cloudinit/dmi.py
index bba3daf2..3a999d41 100644
--- a/cloudinit/dmi.py
+++ b/cloudinit/dmi.py
@@ -1,17 +1,17 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import os
+from collections import namedtuple
+
from cloudinit import log as logging
from cloudinit import subp
from cloudinit.util import is_container, is_FreeBSD
-from collections import namedtuple
-import os
-
LOG = logging.getLogger(__name__)
# Path for DMI Data
DMI_SYS_PATH = "/sys/class/dmi/id"
-kdmi = namedtuple('KernelNames', ['linux', 'freebsd'])
+kdmi = namedtuple("KernelNames", ["linux", "freebsd"])
kdmi.__new__.defaults__ = (None, None)
# FreeBSD's kenv(1) and Linux /sys/class/dmi/id/* both use different names from
@@ -20,23 +20,23 @@ kdmi.__new__.defaults__ = (None, None)
# This is our canonical translation table. If we add more tools on other
# platforms to find dmidecode's values, their keys need to be put in here.
DMIDECODE_TO_KERNEL = {
- 'baseboard-asset-tag': kdmi('board_asset_tag', 'smbios.planar.tag'),
- 'baseboard-manufacturer': kdmi('board_vendor', 'smbios.planar.maker'),
- 'baseboard-product-name': kdmi('board_name', 'smbios.planar.product'),
- 'baseboard-serial-number': kdmi('board_serial', 'smbios.planar.serial'),
- 'baseboard-version': kdmi('board_version', 'smbios.planar.version'),
- 'bios-release-date': kdmi('bios_date', 'smbios.bios.reldate'),
- 'bios-vendor': kdmi('bios_vendor', 'smbios.bios.vendor'),
- 'bios-version': kdmi('bios_version', 'smbios.bios.version'),
- 'chassis-asset-tag': kdmi('chassis_asset_tag', 'smbios.chassis.tag'),
- 'chassis-manufacturer': kdmi('chassis_vendor', 'smbios.chassis.maker'),
- 'chassis-serial-number': kdmi('chassis_serial', 'smbios.chassis.serial'),
- 'chassis-version': kdmi('chassis_version', 'smbios.chassis.version'),
- 'system-manufacturer': kdmi('sys_vendor', 'smbios.system.maker'),
- 'system-product-name': kdmi('product_name', 'smbios.system.product'),
- 'system-serial-number': kdmi('product_serial', 'smbios.system.serial'),
- 'system-uuid': kdmi('product_uuid', 'smbios.system.uuid'),
- 'system-version': kdmi('product_version', 'smbios.system.version'),
+ "baseboard-asset-tag": kdmi("board_asset_tag", "smbios.planar.tag"),
+ "baseboard-manufacturer": kdmi("board_vendor", "smbios.planar.maker"),
+ "baseboard-product-name": kdmi("board_name", "smbios.planar.product"),
+ "baseboard-serial-number": kdmi("board_serial", "smbios.planar.serial"),
+ "baseboard-version": kdmi("board_version", "smbios.planar.version"),
+ "bios-release-date": kdmi("bios_date", "smbios.bios.reldate"),
+ "bios-vendor": kdmi("bios_vendor", "smbios.bios.vendor"),
+ "bios-version": kdmi("bios_version", "smbios.bios.version"),
+ "chassis-asset-tag": kdmi("chassis_asset_tag", "smbios.chassis.tag"),
+ "chassis-manufacturer": kdmi("chassis_vendor", "smbios.chassis.maker"),
+ "chassis-serial-number": kdmi("chassis_serial", "smbios.chassis.serial"),
+ "chassis-version": kdmi("chassis_version", "smbios.chassis.version"),
+ "system-manufacturer": kdmi("sys_vendor", "smbios.system.maker"),
+ "system-product-name": kdmi("product_name", "smbios.system.product"),
+ "system-serial-number": kdmi("product_serial", "smbios.system.serial"),
+ "system-uuid": kdmi("product_uuid", "smbios.system.uuid"),
+ "system-version": kdmi("product_version", "smbios.system.version"),
}
@@ -62,14 +62,18 @@ def _read_dmi_syspath(key):
# uninitialized dmi values show as all \xff and /sys appends a '\n'.
# in that event, return empty string.
- if key_data == b'\xff' * (len(key_data) - 1) + b'\n':
+ if key_data == b"\xff" * (len(key_data) - 1) + b"\n":
key_data = b""
try:
- return key_data.decode('utf8').strip()
+ return key_data.decode("utf8").strip()
except UnicodeDecodeError as e:
- LOG.error("utf-8 decode of content (%s) in %s failed: %s",
- dmi_key_path, key_data, e)
+ LOG.error(
+ "utf-8 decode of content (%s) in %s failed: %s",
+ dmi_key_path,
+ key_data,
+ e,
+ )
return None
@@ -91,7 +95,7 @@ def _read_kenv(key):
LOG.debug("kenv returned '%s' for '%s'", result, kmap.freebsd)
return result
except subp.ProcessExecutionError as e:
- LOG.debug('failed kenv cmd: %s\n%s', cmd, e)
+ LOG.debug("failed kenv cmd: %s\n%s", cmd, e)
return None
return None
@@ -111,7 +115,7 @@ def _call_dmidecode(key, dmidecode_path):
return ""
return result
except subp.ProcessExecutionError as e:
- LOG.debug('failed dmidecode cmd: %s\n%s', cmd, e)
+ LOG.debug("failed dmidecode cmd: %s\n%s", cmd, e)
return None
@@ -144,20 +148,20 @@ def read_dmi_data(key):
return syspath_value
def is_x86(arch):
- return (arch == 'x86_64' or (arch[0] == 'i' and arch[2:] == '86'))
+ return arch == "x86_64" or (arch[0] == "i" and arch[2:] == "86")
# running dmidecode can be problematic on some arches (LP: #1243287)
uname_arch = os.uname()[4]
- if not (is_x86(uname_arch) or uname_arch in ('aarch64', 'amd64')):
+ if not (is_x86(uname_arch) or uname_arch in ("aarch64", "amd64")):
LOG.debug("dmidata is not supported on %s", uname_arch)
return None
- dmidecode_path = subp.which('dmidecode')
+ dmidecode_path = subp.which("dmidecode")
if dmidecode_path:
return _call_dmidecode(key, dmidecode_path)
- LOG.debug("did not find either path %s or dmidecode command",
- DMI_SYS_PATH)
+ LOG.debug("did not find either path %s or dmidecode command", DMI_SYS_PATH)
return None
+
# vi: ts=4 expandtab
diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
index 34acfe84..d4019557 100644
--- a/cloudinit/ec2_utils.py
+++ b/cloudinit/ec2_utils.py
@@ -10,8 +10,7 @@ import functools
import json
from cloudinit import log as logging
-from cloudinit import url_helper
-from cloudinit import util
+from cloudinit import url_helper, util
LOG = logging.getLogger(__name__)
SKIP_USERDATA_CODES = frozenset([url_helper.NOT_FOUND])
@@ -30,7 +29,7 @@ class MetadataLeafDecoder(object):
def __call__(self, field, blob):
if not blob:
- return ''
+ return ""
try:
blob = util.decode_binary(blob)
except UnicodeDecodeError:
@@ -40,8 +39,11 @@ class MetadataLeafDecoder(object):
# Assume it's json, unless it fails parsing...
return json.loads(blob)
except (ValueError, TypeError) as e:
- LOG.warning("Field %s looked like a json object, but it"
- " was not: %s", field, e)
+ LOG.warning(
+ "Field %s looked like a json object, but it was not: %s",
+ field,
+ e,
+ )
if blob.find("\n") != -1:
return blob.splitlines()
return blob
@@ -85,7 +87,7 @@ class MetadataMaterializer(object):
if not field or not field_name:
continue
# Don't materialize credentials
- if field_name == 'security-credentials':
+ if field_name == "security-credentials":
continue
if has_children(field):
if field_name not in children:
@@ -127,8 +129,7 @@ class MetadataMaterializer(object):
joined.update(child_contents)
for field in leaf_contents.keys():
if field in joined:
- LOG.warning("Duplicate key found in results from %s",
- base_url)
+ LOG.warning("Duplicate key found in results from %s", base_url)
else:
joined[field] = leaf_contents[field]
return joined
@@ -139,25 +140,36 @@ def skip_retry_on_codes(status_codes, _request_args, cause):
return cause.code not in status_codes
-def get_instance_userdata(api_version='latest',
- metadata_address='http://169.254.169.254',
- ssl_details=None, timeout=5, retries=5,
- headers_cb=None, headers_redact=None,
- exception_cb=None):
+def get_instance_userdata(
+ api_version="latest",
+ metadata_address="http://169.254.169.254",
+ ssl_details=None,
+ timeout=5,
+ retries=5,
+ headers_cb=None,
+ headers_redact=None,
+ exception_cb=None,
+):
ud_url = url_helper.combine_url(metadata_address, api_version)
- ud_url = url_helper.combine_url(ud_url, 'user-data')
- user_data = ''
+ ud_url = url_helper.combine_url(ud_url, "user-data")
+ user_data = ""
try:
if not exception_cb:
# It is ok for userdata to not exist (thats why we are stopping if
# NOT_FOUND occurs) and just in that case returning an empty
# string.
- exception_cb = functools.partial(skip_retry_on_codes,
- SKIP_USERDATA_CODES)
+ exception_cb = functools.partial(
+ skip_retry_on_codes, SKIP_USERDATA_CODES
+ )
response = url_helper.read_file_or_url(
- ud_url, ssl_details=ssl_details, timeout=timeout,
- retries=retries, exception_cb=exception_cb, headers_cb=headers_cb,
- headers_redact=headers_redact)
+ ud_url,
+ ssl_details=ssl_details,
+ timeout=timeout,
+ retries=retries,
+ exception_cb=exception_cb,
+ headers_cb=headers_cb,
+ headers_redact=headers_redact,
+ )
user_data = response.contents
except url_helper.UrlError as e:
if e.code not in SKIP_USERDATA_CODES:
@@ -167,27 +179,37 @@ def get_instance_userdata(api_version='latest',
return user_data
-def _get_instance_metadata(tree, api_version='latest',
- metadata_address='http://169.254.169.254',
- ssl_details=None, timeout=5, retries=5,
- leaf_decoder=None, headers_cb=None,
- headers_redact=None,
- exception_cb=None):
+def _get_instance_metadata(
+ tree,
+ api_version="latest",
+ metadata_address="http://169.254.169.254",
+ ssl_details=None,
+ timeout=5,
+ retries=5,
+ leaf_decoder=None,
+ headers_cb=None,
+ headers_redact=None,
+ exception_cb=None,
+):
md_url = url_helper.combine_url(metadata_address, api_version, tree)
caller = functools.partial(
- url_helper.read_file_or_url, ssl_details=ssl_details,
- timeout=timeout, retries=retries, headers_cb=headers_cb,
+ url_helper.read_file_or_url,
+ ssl_details=ssl_details,
+ timeout=timeout,
+ retries=retries,
+ headers_cb=headers_cb,
headers_redact=headers_redact,
- exception_cb=exception_cb)
+ exception_cb=exception_cb,
+ )
def mcaller(url):
return caller(url).contents
try:
response = caller(md_url)
- materializer = MetadataMaterializer(response.contents,
- md_url, mcaller,
- leaf_decoder=leaf_decoder)
+ materializer = MetadataMaterializer(
+ response.contents, md_url, mcaller, leaf_decoder=leaf_decoder
+ )
md = materializer.materialize()
if not isinstance(md, (dict)):
md = {}
@@ -197,35 +219,56 @@ def _get_instance_metadata(tree, api_version='latest',
return {}
-def get_instance_metadata(api_version='latest',
- metadata_address='http://169.254.169.254',
- ssl_details=None, timeout=5, retries=5,
- leaf_decoder=None, headers_cb=None,
- headers_redact=None,
- exception_cb=None):
+def get_instance_metadata(
+ api_version="latest",
+ metadata_address="http://169.254.169.254",
+ ssl_details=None,
+ timeout=5,
+ retries=5,
+ leaf_decoder=None,
+ headers_cb=None,
+ headers_redact=None,
+ exception_cb=None,
+):
# Note, 'meta-data' explicitly has trailing /.
# this is required for CloudStack (LP: #1356855)
- return _get_instance_metadata(tree='meta-data/', api_version=api_version,
- metadata_address=metadata_address,
- ssl_details=ssl_details, timeout=timeout,
- retries=retries, leaf_decoder=leaf_decoder,
- headers_redact=headers_redact,
- headers_cb=headers_cb,
- exception_cb=exception_cb)
-
-
-def get_instance_identity(api_version='latest',
- metadata_address='http://169.254.169.254',
- ssl_details=None, timeout=5, retries=5,
- leaf_decoder=None, headers_cb=None,
- headers_redact=None,
- exception_cb=None):
- return _get_instance_metadata(tree='dynamic/instance-identity',
- api_version=api_version,
- metadata_address=metadata_address,
- ssl_details=ssl_details, timeout=timeout,
- retries=retries, leaf_decoder=leaf_decoder,
- headers_redact=headers_redact,
- headers_cb=headers_cb,
- exception_cb=exception_cb)
+ return _get_instance_metadata(
+ tree="meta-data/",
+ api_version=api_version,
+ metadata_address=metadata_address,
+ ssl_details=ssl_details,
+ timeout=timeout,
+ retries=retries,
+ leaf_decoder=leaf_decoder,
+ headers_redact=headers_redact,
+ headers_cb=headers_cb,
+ exception_cb=exception_cb,
+ )
+
+
+def get_instance_identity(
+ api_version="latest",
+ metadata_address="http://169.254.169.254",
+ ssl_details=None,
+ timeout=5,
+ retries=5,
+ leaf_decoder=None,
+ headers_cb=None,
+ headers_redact=None,
+ exception_cb=None,
+):
+ return _get_instance_metadata(
+ tree="dynamic/instance-identity",
+ api_version=api_version,
+ metadata_address=metadata_address,
+ ssl_details=ssl_details,
+ timeout=timeout,
+ retries=retries,
+ leaf_decoder=leaf_decoder,
+ headers_redact=headers_redact,
+ headers_cb=headers_cb,
+ exception_cb=exception_cb,
+ )
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/event.py b/cloudinit/event.py
index 53ad4c25..eaf8bd0b 100644
--- a/cloudinit/event.py
+++ b/cloudinit/event.py
@@ -13,7 +13,7 @@ class EventScope(Enum):
# NETWORK is currently the only scope, but we want to leave room to
# grow other scopes (e.g., STORAGE) without having to make breaking
# changes to the user config
- NETWORK = 'network'
+ NETWORK = "network"
def __str__(self): # pylint: disable=invalid-str-returned
return self.value
@@ -21,6 +21,7 @@ class EventScope(Enum):
class EventType(Enum):
"""Event types which can generate maintenance requests for cloud-init."""
+
# Cloud-init should grow support for the follow event types:
# HOTPLUG
# METADATA_CHANGE
@@ -29,7 +30,7 @@ class EventType(Enum):
BOOT = "boot"
BOOT_NEW_INSTANCE = "boot-new-instance"
BOOT_LEGACY = "boot-legacy"
- HOTPLUG = 'hotplug'
+ HOTPLUG = "hotplug"
def __str__(self): # pylint: disable=invalid-str-returned
return self.value
@@ -58,7 +59,7 @@ def userdata_to_events(user_config: dict) -> Dict[EventScope, Set[EventType]]:
)
continue
try:
- new_values = [EventType(x) for x in scope_list['when']]
+ new_values = [EventType(x) for x in scope_list["when"]]
except ValueError as e:
LOG.warning(
"%s! Update data will be ignored for '%s' scope",
@@ -70,4 +71,5 @@ def userdata_to_events(user_config: dict) -> Dict[EventScope, Set[EventType]]:
return update_config
+
# vi: ts=4 expandtab
diff --git a/cloudinit/filters/launch_index.py b/cloudinit/filters/launch_index.py
index 5c8bcffb..5aeb0a17 100644
--- a/cloudinit/filters/launch_index.py
+++ b/cloudinit/filters/launch_index.py
@@ -23,7 +23,7 @@ class Filter(object):
self.allow_none = allow_none
def _select(self, message):
- msg_idx = message.get('Launch-Index', None)
+ msg_idx = message.get("Launch-Index", None)
if self.allow_none and msg_idx is None:
return True
msg_idx = util.safe_int(msg_idx)
@@ -47,9 +47,12 @@ class Filter(object):
new_msgs.append(m)
else:
discarded += 1
- LOG.debug(("Discarding %s multipart messages "
- "which do not match launch index %s"),
- discarded, self.wanted_idx)
+ LOG.debug(
+ "Discarding %s multipart messages "
+ "which do not match launch index %s",
+ discarded,
+ self.wanted_idx,
+ )
new_message = copy.copy(message)
new_message.set_payload(new_msgs)
new_message[ud.ATTACHMENT_FIELD] = str(len(new_msgs))
@@ -62,4 +65,5 @@ class Filter(object):
return root_message
return self._do_filter(root_message)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/gpg.py b/cloudinit/gpg.py
index 07d682d2..8daa5e37 100644
--- a/cloudinit/gpg.py
+++ b/cloudinit/gpg.py
@@ -7,22 +7,28 @@
"""gpg.py - Collection of gpg key related functions"""
+import time
+
from cloudinit import log as logging
from cloudinit import subp
-import time
-
LOG = logging.getLogger(__name__)
-GPG_LIST = ['gpg', '--with-fingerprint', '--no-default-keyring', '--list-keys',
- '--keyring']
+GPG_LIST = [
+ "gpg",
+ "--with-fingerprint",
+ "--no-default-keyring",
+ "--list-keys",
+ "--keyring",
+]
def export_armour(key):
"""Export gpg key, armoured key gets returned"""
try:
- (armour, _) = subp.subp(["gpg", "--export", "--armour", key],
- capture=True)
+ (armour, _) = subp.subp(
+ ["gpg", "--export", "--armour", key], capture=True
+ )
except subp.ProcessExecutionError as error:
# debug, since it happens for any key not on the system initially
LOG.debug('Failed to export armoured key "%s": %s', key, error)
@@ -33,7 +39,7 @@ def export_armour(key):
def dearmor(key):
"""Dearmor gpg key, dearmored key gets returned
- note: man gpg(1) makes no mention of an --armour spelling, only --armor
+ note: man gpg(1) makes no mention of an --armour spelling, only --armor
"""
return subp.subp(["gpg", "--dearmor"], data=key, decode=False)[0]
@@ -48,7 +54,7 @@ def list(key_file, human_output=False):
cmd = []
cmd.extend(GPG_LIST)
if not human_output:
- cmd.append('--with-colons')
+ cmd.append("--with-colons")
cmd.append(key_file)
(stdout, stderr) = subp.subp(cmd, capture=True)
@@ -82,8 +88,12 @@ def recv_key(key, keyserver, retries=(1, 1)):
trynum += 1
try:
subp.subp(cmd, capture=True)
- LOG.debug("Imported key '%s' from keyserver '%s' on try %d",
- key, keyserver, trynum)
+ LOG.debug(
+ "Imported key '%s' from keyserver '%s' on try %d",
+ key,
+ keyserver,
+ trynum,
+ )
return
except subp.ProcessExecutionError as e:
error = e
@@ -91,25 +101,28 @@ def recv_key(key, keyserver, retries=(1, 1)):
naplen = next(sleeps)
LOG.debug(
"Import failed with exit code %d, will try again in %ss",
- error.exit_code, naplen)
+ error.exit_code,
+ naplen,
+ )
time.sleep(naplen)
except StopIteration as e:
raise ValueError(
- ("Failed to import key '%s' from keyserver '%s' "
- "after %d tries: %s") % (key, keyserver, trynum, error)
+ "Failed to import key '%s' from keyserver '%s' "
+ "after %d tries: %s" % (key, keyserver, trynum, error)
) from e
def delete_key(key):
"""Delete the specified key from the local gpg ring"""
try:
- subp.subp(["gpg", "--batch", "--yes", "--delete-keys", key],
- capture=True)
+ subp.subp(
+ ["gpg", "--batch", "--yes", "--delete-keys", key], capture=True
+ )
except subp.ProcessExecutionError as error:
LOG.warning('Failed delete key "%s": %s', key, error)
-def getkeybyid(keyid, keyserver='keyserver.ubuntu.com'):
+def getkeybyid(keyid, keyserver="keyserver.ubuntu.com"):
"""get gpg keyid from keyserver"""
armour = export_armour(keyid)
if not armour:
@@ -117,7 +130,7 @@ def getkeybyid(keyid, keyserver='keyserver.ubuntu.com'):
recv_key(keyid, keyserver=keyserver)
armour = export_armour(keyid)
except ValueError:
- LOG.exception('Failed to obtain gpg key %s', keyid)
+ LOG.exception("Failed to obtain gpg key %s", keyid)
raise
finally:
# delete just imported key to leave environment as it was before
@@ -125,4 +138,5 @@ def getkeybyid(keyid, keyserver='keyserver.ubuntu.com'):
return armour
+
# vi: ts=4 expandtab
diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
index a409ff8a..62c1fd26 100644
--- a/cloudinit/handlers/__init__.py
+++ b/cloudinit/handlers/__init__.py
@@ -13,9 +13,8 @@ import os
from cloudinit import importer
from cloudinit import log as logging
-from cloudinit import type_utils
-from cloudinit import util
-from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE, FREQUENCIES)
+from cloudinit import type_utils, util
+from cloudinit.settings import FREQUENCIES, PER_ALWAYS, PER_INSTANCE
LOG = logging.getLogger(__name__)
@@ -24,7 +23,7 @@ LOG = logging.getLogger(__name__)
NOT_MULTIPART_TYPE = "text/x-not-multipart"
# When none is assigned this gets used
-OCTET_TYPE = 'application/octet-stream'
+OCTET_TYPE = "application/octet-stream"
# Special content types that signal the start and end of processing
CONTENT_END = "__end__"
@@ -34,32 +33,32 @@ CONTENT_SIGNALS = [CONTENT_START, CONTENT_END]
# Used when a part-handler type is encountered
# to allow for registration of new types.
PART_CONTENT_TYPES = ["text/part-handler"]
-PART_HANDLER_FN_TMPL = 'part-handler-%03d'
+PART_HANDLER_FN_TMPL = "part-handler-%03d"
# For parts without filenames
-PART_FN_TPL = 'part-%03d'
+PART_FN_TPL = "part-%03d"
# Different file beginnings to their content type
INCLUSION_TYPES_MAP = {
- '#include': 'text/x-include-url',
- '#include-once': 'text/x-include-once-url',
- '#!': 'text/x-shellscript',
- '#cloud-config': 'text/cloud-config',
- '#upstart-job': 'text/upstart-job',
- '#part-handler': 'text/part-handler',
- '#cloud-boothook': 'text/cloud-boothook',
- '#cloud-config-archive': 'text/cloud-config-archive',
- '#cloud-config-jsonp': 'text/cloud-config-jsonp',
- '## template: jinja': 'text/jinja2',
+ "#include": "text/x-include-url",
+ "#include-once": "text/x-include-once-url",
+ "#!": "text/x-shellscript",
+ "#cloud-config": "text/cloud-config",
+ "#upstart-job": "text/upstart-job",
+ "#part-handler": "text/part-handler",
+ "#cloud-boothook": "text/cloud-boothook",
+ "#cloud-config-archive": "text/cloud-config-archive",
+ "#cloud-config-jsonp": "text/cloud-config-jsonp",
+ "## template: jinja": "text/jinja2",
}
# Sorted longest first
-INCLUSION_SRCH = sorted(list(INCLUSION_TYPES_MAP.keys()),
- key=(lambda e: 0 - len(e)))
+INCLUSION_SRCH = sorted(
+ list(INCLUSION_TYPES_MAP.keys()), key=(lambda e: 0 - len(e))
+)
class Handler(metaclass=abc.ABCMeta):
-
def __init__(self, frequency, version=2):
self.handler_version = version
self.frequency = frequency
@@ -69,11 +68,13 @@ class Handler(metaclass=abc.ABCMeta):
def list_types(self):
# Each subclass must define the supported content prefixes it handles.
- if not hasattr(self, 'prefixes'):
- raise NotImplementedError('Missing prefixes subclass attribute')
+ if not hasattr(self, "prefixes"):
+ raise NotImplementedError("Missing prefixes subclass attribute")
else:
- return [INCLUSION_TYPES_MAP[prefix]
- for prefix in getattr(self, 'prefixes')]
+ return [
+ INCLUSION_TYPES_MAP[prefix]
+ for prefix in getattr(self, "prefixes")
+ ]
@abc.abstractmethod
def handle_part(self, *args, **kwargs):
@@ -82,8 +83,10 @@ class Handler(metaclass=abc.ABCMeta):
def run_part(mod, data, filename, payload, frequency, headers):
mod_freq = mod.frequency
- if not (mod_freq == PER_ALWAYS or
- (frequency == PER_INSTANCE and mod_freq == PER_INSTANCE)):
+ if not (
+ mod_freq == PER_ALWAYS
+ or (frequency == PER_INSTANCE and mod_freq == PER_INSTANCE)
+ ):
return
# Sanity checks on version (should be an int convertable)
try:
@@ -91,33 +94,45 @@ def run_part(mod, data, filename, payload, frequency, headers):
mod_ver = int(mod_ver)
except (TypeError, ValueError, AttributeError):
mod_ver = 1
- content_type = headers['Content-Type']
+ content_type = headers["Content-Type"]
try:
- LOG.debug("Calling handler %s (%s, %s, %s) with frequency %s",
- mod, content_type, filename, mod_ver, frequency)
+ LOG.debug(
+ "Calling handler %s (%s, %s, %s) with frequency %s",
+ mod,
+ content_type,
+ filename,
+ mod_ver,
+ frequency,
+ )
if mod_ver == 3:
# Treat as v. 3 which does get a frequency + headers
- mod.handle_part(data, content_type, filename,
- payload, frequency, headers)
+ mod.handle_part(
+ data, content_type, filename, payload, frequency, headers
+ )
elif mod_ver == 2:
# Treat as v. 2 which does get a frequency
- mod.handle_part(data, content_type, filename,
- payload, frequency)
+ mod.handle_part(data, content_type, filename, payload, frequency)
elif mod_ver == 1:
# Treat as v. 1 which gets no frequency
mod.handle_part(data, content_type, filename, payload)
else:
raise ValueError("Unknown module version %s" % (mod_ver))
except Exception:
- util.logexc(LOG, "Failed calling handler %s (%s, %s, %s) with "
- "frequency %s", mod, content_type, filename, mod_ver,
- frequency)
+ util.logexc(
+ LOG,
+ "Failed calling handler %s (%s, %s, %s) with frequency %s",
+ mod,
+ content_type,
+ filename,
+ mod_ver,
+ frequency,
+ )
def call_begin(mod, data, frequency):
# Create a fake header set
headers = {
- 'Content-Type': CONTENT_START,
+ "Content-Type": CONTENT_START,
}
run_part(mod, data, None, None, frequency, headers)
@@ -125,31 +140,35 @@ def call_begin(mod, data, frequency):
def call_end(mod, data, frequency):
# Create a fake header set
headers = {
- 'Content-Type': CONTENT_END,
+ "Content-Type": CONTENT_END,
}
run_part(mod, data, None, None, frequency, headers)
def walker_handle_handler(pdata, _ctype, _filename, payload):
- curcount = pdata['handlercount']
+ curcount = pdata["handlercount"]
modname = PART_HANDLER_FN_TMPL % (curcount)
- frequency = pdata['frequency']
- modfname = os.path.join(pdata['handlerdir'], "%s" % (modname))
+ frequency = pdata["frequency"]
+ modfname = os.path.join(pdata["handlerdir"], "%s" % (modname))
if not modfname.endswith(".py"):
modfname = "%s.py" % (modfname)
# TODO(harlowja): Check if path exists??
util.write_file(modfname, payload, 0o600)
- handlers = pdata['handlers']
+ handlers = pdata["handlers"]
try:
mod = fixup_handler(importer.import_module(modname))
- call_begin(mod, pdata['data'], frequency)
+ call_begin(mod, pdata["data"], frequency)
# Only register and increment after the above have worked, so we don't
# register if it fails starting.
handlers.register(mod, initialized=True)
- pdata['handlercount'] = curcount + 1
+ pdata["handlercount"] = curcount + 1
except Exception:
- util.logexc(LOG, "Failed at registering python file: %s (part "
- "handler %s)", modfname, curcount)
+ util.logexc(
+ LOG,
+ "Failed at registering python file: %s (part handler %s)",
+ modfname,
+ curcount,
+ )
def _extract_first_or_bytes(blob, size):
@@ -161,7 +180,7 @@ def _extract_first_or_bytes(blob, size):
else:
# We want to avoid decoding the whole blob (it might be huge)
# By taking 4*size bytes we guarantee to decode size utf8 chars
- start = blob[:4 * size].decode(errors='ignore').split("\n", 1)[0]
+ start = blob[: 4 * size].decode(errors="ignore").split("\n", 1)[0]
if len(start) >= size:
start = start[:size]
except UnicodeDecodeError:
@@ -176,7 +195,7 @@ def _escape_string(text):
except (LookupError, TypeError):
try:
# Unicode (and Python 3's str) doesn't support string_escape...
- return text.encode('unicode_escape')
+ return text.encode("unicode_escape")
except TypeError:
# Give up...
pass
@@ -189,28 +208,40 @@ def _escape_string(text):
def walker_callback(data, filename, payload, headers):
- content_type = headers['Content-Type']
- if content_type in data.get('excluded'):
+ content_type = headers["Content-Type"]
+ if content_type in data.get("excluded"):
LOG.debug('content_type "%s" is excluded', content_type)
return
if content_type in PART_CONTENT_TYPES:
walker_handle_handler(data, content_type, filename, payload)
return
- handlers = data['handlers']
+ handlers = data["handlers"]
if content_type in handlers:
- run_part(handlers[content_type], data['data'], filename,
- payload, data['frequency'], headers)
+ run_part(
+ handlers[content_type],
+ data["data"],
+ filename,
+ payload,
+ data["frequency"],
+ headers,
+ )
elif payload:
# Extract the first line or 24 bytes for displaying in the log
start = _extract_first_or_bytes(payload, 24)
details = "'%s...'" % (_escape_string(start))
if content_type == NOT_MULTIPART_TYPE:
- LOG.warning("Unhandled non-multipart (%s) userdata: %s",
- content_type, details)
+ LOG.warning(
+ "Unhandled non-multipart (%s) userdata: %s",
+ content_type,
+ details,
+ )
else:
- LOG.warning("Unhandled unknown content-type (%s) userdata: %s",
- content_type, details)
+ LOG.warning(
+ "Unhandled unknown content-type (%s) userdata: %s",
+ content_type,
+ details,
+ )
else:
LOG.debug("Empty payload of type %s", content_type)
@@ -221,7 +252,7 @@ def walk(msg, callback, data):
partnum = 0
for part in msg.walk():
# multipart/* are just containers
- if part.get_content_maintype() == 'multipart':
+ if part.get_content_maintype() == "multipart":
continue
ctype = part.get_content_type()
@@ -234,7 +265,7 @@ def walk(msg, callback, data):
headers = dict(part)
LOG.debug(headers)
- headers['Content-Type'] = ctype
+ headers["Content-Type"] = ctype
payload = util.fully_decoded_payload(part)
callback(data, filename, payload, headers)
partnum = partnum + 1
@@ -243,8 +274,8 @@ def walk(msg, callback, data):
def fixup_handler(mod, def_freq=PER_INSTANCE):
if not hasattr(mod, "handler_version"):
setattr(mod, "handler_version", 1)
- if not hasattr(mod, 'frequency'):
- setattr(mod, 'frequency', def_freq)
+ if not hasattr(mod, "frequency"):
+ setattr(mod, "frequency", def_freq)
else:
freq = mod.frequency
if freq and freq not in FREQUENCIES:
@@ -263,4 +294,5 @@ def type_from_starts_with(payload, default=None):
return INCLUSION_TYPES_MAP[text]
return default
+
# vi: ts=4 expandtab
diff --git a/cloudinit/handlers/boot_hook.py b/cloudinit/handlers/boot_hook.py
index c6205097..602800ed 100644
--- a/cloudinit/handlers/boot_hook.py
+++ b/cloudinit/handlers/boot_hook.py
@@ -12,10 +12,8 @@ import os
from cloudinit import handlers
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
-
-from cloudinit.settings import (PER_ALWAYS)
+from cloudinit import subp, util
+from cloudinit.settings import PER_ALWAYS
LOG = logging.getLogger(__name__)
@@ -23,7 +21,7 @@ LOG = logging.getLogger(__name__)
class BootHookPartHandler(handlers.Handler):
# The content prefixes this handler understands.
- prefixes = ['#cloud-boothook']
+ prefixes = ["#cloud-boothook"]
def __init__(self, paths, datasource, **_kwargs):
handlers.Handler.__init__(self, PER_ALWAYS)
@@ -35,8 +33,9 @@ class BootHookPartHandler(handlers.Handler):
def _write_part(self, payload, filename):
filename = util.clean_filename(filename)
filepath = os.path.join(self.boothook_dir, filename)
- contents = util.strip_prefix_suffix(util.dos2unix(payload),
- prefix=self.prefixes[0])
+ contents = util.strip_prefix_suffix(
+ util.dos2unix(payload), prefix=self.prefixes[0]
+ )
util.write_file(filepath, contents.lstrip(), 0o700)
return filepath
@@ -48,12 +47,14 @@ class BootHookPartHandler(handlers.Handler):
try:
env = os.environ.copy()
if self.instance_id is not None:
- env['INSTANCE_ID'] = str(self.instance_id)
+ env["INSTANCE_ID"] = str(self.instance_id)
subp.subp([filepath], env=env)
except subp.ProcessExecutionError:
util.logexc(LOG, "Boothooks script %s execution error", filepath)
except Exception:
- util.logexc(LOG, "Boothooks unknown error when running %s",
- filepath)
+ util.logexc(
+ LOG, "Boothooks unknown error when running %s", filepath
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py
index 2a307364..2e694671 100644
--- a/cloudinit/handlers/cloud_config.py
+++ b/cloudinit/handlers/cloud_config.py
@@ -12,15 +12,12 @@ import jsonpatch
from cloudinit import handlers
from cloudinit import log as logging
-from cloudinit import mergers
-from cloudinit import util
-from cloudinit import safeyaml
-
-from cloudinit.settings import (PER_ALWAYS)
+from cloudinit import mergers, safeyaml, util
+from cloudinit.settings import PER_ALWAYS
LOG = logging.getLogger(__name__)
-MERGE_HEADER = 'Merge-Type'
+MERGE_HEADER = "Merge-Type"
# Due to the way the loading of yaml configuration was done previously,
# where previously each cloud config part was appended to a larger yaml
@@ -39,7 +36,7 @@ MERGE_HEADER = 'Merge-Type'
# a: 22
#
# This gets loaded into yaml with final result {'a': 22}
-DEF_MERGERS = mergers.string_extract_mergers('dict(replace)+list()+str()')
+DEF_MERGERS = mergers.string_extract_mergers("dict(replace)+list()+str()")
CLOUD_PREFIX = "#cloud-config"
JSONP_PREFIX = "#cloud-config-jsonp"
@@ -53,7 +50,7 @@ class CloudConfigPartHandler(handlers.Handler):
handlers.Handler.__init__(self, PER_ALWAYS, version=3)
self.cloud_buf = None
self.cloud_fn = paths.get_ipath("cloud_config")
- if 'cloud_config_path' in _kwargs:
+ if "cloud_config_path" in _kwargs:
self.cloud_fn = paths.get_ipath(_kwargs["cloud_config_path"])
self.file_names = []
@@ -66,14 +63,14 @@ class CloudConfigPartHandler(handlers.Handler):
file_lines.append("# from %s files" % (len(self.file_names)))
for fn in self.file_names:
if not fn:
- fn = '?'
+ fn = "?"
file_lines.append("# %s" % (fn))
file_lines.append("")
if self.cloud_buf is not None:
# Something was actually gathered....
lines = [
CLOUD_PREFIX,
- '',
+ "",
]
lines.extend(file_lines)
lines.append(safeyaml.dumps(self.cloud_buf))
@@ -82,9 +79,9 @@ class CloudConfigPartHandler(handlers.Handler):
util.write_file(self.cloud_fn, "\n".join(lines), 0o600)
def _extract_mergers(self, payload, headers):
- merge_header_headers = ''
- for h in [MERGE_HEADER, 'X-%s' % (MERGE_HEADER)]:
- tmp_h = headers.get(h, '')
+ merge_header_headers = ""
+ for h in [MERGE_HEADER, "X-%s" % (MERGE_HEADER)]:
+ tmp_h = headers.get(h, "")
if tmp_h:
merge_header_headers = tmp_h
break
@@ -143,7 +140,9 @@ class CloudConfigPartHandler(handlers.Handler):
filename = filename.replace(i, " ")
self.file_names.append(filename.strip())
except Exception:
- util.logexc(LOG, "Failed at merging in cloud config part from %s",
- filename)
+ util.logexc(
+ LOG, "Failed at merging in cloud config part from %s", filename
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/handlers/jinja_template.py b/cloudinit/handlers/jinja_template.py
index de88a5ea..1f9caa64 100644
--- a/cloudinit/handlers/jinja_template.py
+++ b/cloudinit/handlers/jinja_template.py
@@ -1,9 +1,9 @@
# This file is part of cloud-init. See LICENSE file for license information.
import copy
-from errno import EACCES
import os
import re
+from errno import EACCES
from typing import Optional
try:
@@ -12,28 +12,27 @@ try:
except ImportError:
# No jinja2 dependency
JUndefinedError = Exception
- operator_re = re.compile(r'[-.]')
+ operator_re = re.compile(r"[-.]")
from cloudinit import handlers
from cloudinit import log as logging
-from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
-from cloudinit.templater import render_string, MISSING_JINJA_PREFIX
-from cloudinit.util import b64d, load_file, load_json, json_dumps
-
from cloudinit.settings import PER_ALWAYS
+from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
+from cloudinit.templater import MISSING_JINJA_PREFIX, render_string
+from cloudinit.util import b64d, json_dumps, load_file, load_json
LOG = logging.getLogger(__name__)
class JinjaTemplatePartHandler(handlers.Handler):
- prefixes = ['## template: jinja']
+ prefixes = ["## template: jinja"]
def __init__(self, paths, **_kwargs):
handlers.Handler.__init__(self, PER_ALWAYS, version=3)
self.paths = paths
self.sub_handlers = {}
- for handler in _kwargs.get('sub_handlers', []):
+ for handler in _kwargs.get("sub_handlers", []):
for ctype in handler.list_types():
self.sub_handlers[ctype] = handler
@@ -41,28 +40,36 @@ class JinjaTemplatePartHandler(handlers.Handler):
if ctype in handlers.CONTENT_SIGNALS:
return
jinja_json_file = os.path.join(
- self.paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE)
+ self.paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE
+ )
rendered_payload = render_jinja_payload_from_file(
- payload, filename, jinja_json_file)
+ payload, filename, jinja_json_file
+ )
if not rendered_payload:
return
subtype = handlers.type_from_starts_with(rendered_payload)
sub_handler = self.sub_handlers.get(subtype)
if not sub_handler:
LOG.warning(
- 'Ignoring jinja template for %s. Could not find supported'
- ' sub-handler for type %s', filename, subtype)
+ "Ignoring jinja template for %s. Could not find supported"
+ " sub-handler for type %s",
+ filename,
+ subtype,
+ )
return
if sub_handler.handler_version == 3:
sub_handler.handle_part(
- data, ctype, filename, rendered_payload, frequency, headers)
+ data, ctype, filename, rendered_payload, frequency, headers
+ )
elif sub_handler.handler_version == 2:
sub_handler.handle_part(
- data, ctype, filename, rendered_payload, frequency)
+ data, ctype, filename, rendered_payload, frequency
+ )
def render_jinja_payload_from_file(
- payload, payload_fn, instance_data_file, debug=False):
+ payload, payload_fn, instance_data_file, debug=False
+):
"""Render a jinja template payload sourcing variables from jinja_vars_path.
@param payload: String of jinja template content. Should begin with
@@ -80,19 +87,21 @@ def render_jinja_payload_from_file(
rendered_payload = None
if not os.path.exists(instance_data_file):
raise RuntimeError(
- 'Cannot render jinja template vars. Instance data not yet'
- ' present at %s' % instance_data_file)
+ "Cannot render jinja template vars. Instance data not yet"
+ " present at %s" % instance_data_file
+ )
try:
instance_data = load_json(load_file(instance_data_file))
except (IOError, OSError) as e:
if e.errno == EACCES:
raise RuntimeError(
- 'Cannot render jinja template vars. No read permission on'
+ "Cannot render jinja template vars. No read permission on"
" '%s'. Try sudo" % instance_data_file
) from e
rendered_payload = render_jinja_payload(
- payload, payload_fn, instance_data, debug)
+ payload, payload_fn, instance_data, debug
+ )
if not rendered_payload:
return None
return rendered_payload
@@ -101,26 +110,30 @@ def render_jinja_payload_from_file(
def render_jinja_payload(payload, payload_fn, instance_data, debug=False):
instance_jinja_vars = convert_jinja_instance_data(
instance_data,
- decode_paths=instance_data.get('base64-encoded-keys', []),
- include_key_aliases=True
+ decode_paths=instance_data.get("base64-encoded-keys", []),
+ include_key_aliases=True,
)
if debug:
- LOG.debug('Converted jinja variables\n%s',
- json_dumps(instance_jinja_vars))
+ LOG.debug(
+ "Converted jinja variables\n%s", json_dumps(instance_jinja_vars)
+ )
try:
rendered_payload = render_string(payload, instance_jinja_vars)
except (TypeError, JUndefinedError) as e:
- LOG.warning(
- 'Ignoring jinja template for %s: %s', payload_fn, str(e))
+ LOG.warning("Ignoring jinja template for %s: %s", payload_fn, str(e))
return None
warnings = [
- "'%s'" % var.replace(MISSING_JINJA_PREFIX, '')
+ "'%s'" % var.replace(MISSING_JINJA_PREFIX, "")
for var in re.findall(
- r'%s[^\s]+' % MISSING_JINJA_PREFIX, rendered_payload)]
+ r"%s[^\s]+" % MISSING_JINJA_PREFIX, rendered_payload
+ )
+ ]
if warnings:
LOG.warning(
"Could not render jinja template variables in file '%s': %s",
- payload_fn, ', '.join(warnings))
+ payload_fn,
+ ", ".join(warnings),
+ )
return rendered_payload
@@ -139,14 +152,14 @@ def get_jinja_variable_alias(orig_name: str) -> Optional[str]:
:return: A string with any jinja operators replaced if needed. Otherwise,
none if no alias required.
"""
- alias_name = re.sub(operator_re, '_', orig_name)
+ alias_name = re.sub(operator_re, "_", orig_name)
if alias_name != orig_name:
return alias_name
return None
def convert_jinja_instance_data(
- data, prefix='', sep='/', decode_paths=(), include_key_aliases=False
+ data, prefix="", sep="/", decode_paths=(), include_key_aliases=False
):
"""Process instance-data.json dict for use in jinja templates.
@@ -154,17 +167,20 @@ def convert_jinja_instance_data(
base64_encoded_keys.
"""
result = {}
- decode_paths = [path.replace('-', '_') for path in decode_paths]
+ decode_paths = [path.replace("-", "_") for path in decode_paths]
for key, value in sorted(data.items()):
- key_path = '{0}{1}{2}'.format(prefix, sep, key) if prefix else key
+ key_path = "{0}{1}{2}".format(prefix, sep, key) if prefix else key
if key_path in decode_paths:
value = b64d(value)
if isinstance(value, dict):
result[key] = convert_jinja_instance_data(
- value, key_path, sep=sep, decode_paths=decode_paths,
- include_key_aliases=include_key_aliases
+ value,
+ key_path,
+ sep=sep,
+ decode_paths=decode_paths,
+ include_key_aliases=include_key_aliases,
)
- if re.match(r'v\d+$', key):
+ if re.match(r"v\d+$", key):
# Copy values to top-level aliases
for subkey, subvalue in result[key].items():
result[subkey] = copy.deepcopy(subvalue)
@@ -176,4 +192,5 @@ def convert_jinja_instance_data(
result[alias_name] = copy.deepcopy(result[key])
return result
+
# vi: ts=4 expandtab
diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py
index 9917f551..44061838 100644
--- a/cloudinit/handlers/shell_script.py
+++ b/cloudinit/handlers/shell_script.py
@@ -10,21 +10,19 @@
import os
-from cloudinit import handlers
-from cloudinit import util
-
-from cloudinit.settings import (PER_ALWAYS)
+from cloudinit import handlers, util
+from cloudinit.settings import PER_ALWAYS
class ShellScriptPartHandler(handlers.Handler):
- prefixes = ['#!']
+ prefixes = ["#!"]
def __init__(self, paths, **_kwargs):
handlers.Handler.__init__(self, PER_ALWAYS)
- self.script_dir = paths.get_ipath_cur('scripts')
- if 'script_path' in _kwargs:
- self.script_dir = paths.get_ipath_cur(_kwargs['script_path'])
+ self.script_dir = paths.get_ipath_cur("scripts")
+ if "script_path" in _kwargs:
+ self.script_dir = paths.get_ipath_cur(_kwargs["script_path"])
def handle_part(self, data, ctype, filename, payload, frequency):
if ctype in handlers.CONTENT_SIGNALS:
@@ -36,4 +34,5 @@ class ShellScriptPartHandler(handlers.Handler):
path = os.path.join(self.script_dir, filename)
util.write_file(path, payload, 0o700)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py
index a9d29537..4bc95f97 100644
--- a/cloudinit/handlers/upstart_job.py
+++ b/cloudinit/handlers/upstart_job.py
@@ -13,17 +13,15 @@ import re
from cloudinit import handlers
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
-
-from cloudinit.settings import (PER_INSTANCE)
+from cloudinit import subp, util
+from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
class UpstartJobPartHandler(handlers.Handler):
- prefixes = ['#upstart-job']
+ prefixes = ["#upstart-job"]
def __init__(self, paths, **_kwargs):
handlers.Handler.__init__(self, PER_INSTANCE)
@@ -43,7 +41,7 @@ class UpstartJobPartHandler(handlers.Handler):
filename = util.clean_filename(filename)
(_name, ext) = os.path.splitext(filename)
if not ext:
- ext = ''
+ ext = ""
ext = ext.lower()
if ext != ".conf":
filename = filename + ".conf"
@@ -78,9 +76,10 @@ def _has_suitable_upstart():
if not os.path.exists("/usr/bin/dpkg-query"):
return False
try:
- (dpkg_ver, _err) = subp.subp(["dpkg-query",
- "--showformat=${Version}",
- "--show", "upstart"], rcs=[0, 1])
+ (dpkg_ver, _err) = subp.subp(
+ ["dpkg-query", "--showformat=${Version}", "--show", "upstart"],
+ rcs=[0, 1],
+ )
except Exception:
util.logexc(LOG, "dpkg-query failed")
return False
@@ -93,8 +92,9 @@ def _has_suitable_upstart():
if e.exit_code == 1:
pass
else:
- util.logexc(LOG, "dpkg --compare-versions failed [%s]",
- e.exit_code)
+ util.logexc(
+ LOG, "dpkg --compare-versions failed [%s]", e.exit_code
+ )
except Exception:
util.logexc(LOG, "dpkg --compare-versions failed")
return False
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index b8f9d2c3..c2c9e584 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -8,20 +8,15 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from time import time
-
import contextlib
import os
-from configparser import NoSectionError, NoOptionError, RawConfigParser
+from configparser import NoOptionError, NoSectionError, RawConfigParser
from io import StringIO
-
-from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE,
- CFG_ENV_NAME)
+from time import time
from cloudinit import log as logging
-from cloudinit import type_utils
-from cloudinit import persistence
-from cloudinit import util
+from cloudinit import persistence, type_utils, util
+from cloudinit.settings import CFG_ENV_NAME, PER_ALWAYS, PER_INSTANCE, PER_ONCE
LOG = logging.getLogger(__name__)
@@ -92,8 +87,9 @@ class FileSemaphores(object):
try:
util.del_dir(self.sem_path)
except (IOError, OSError):
- util.logexc(LOG, "Failed deleting semaphore directory %s",
- self.sem_path)
+ util.logexc(
+ LOG, "Failed deleting semaphore directory %s", self.sem_path
+ )
def _acquire(self, name, freq):
# Check again if its been already gotten
@@ -125,11 +121,14 @@ class FileSemaphores(object):
# this case could happen if the migrator module hadn't run yet
# but the item had run before we did canon_sem_name.
if cname != name and os.path.exists(self._get_path(name, freq)):
- LOG.warning("%s has run without canonicalized name [%s].\n"
- "likely the migrator has not yet run. "
- "It will run next boot.\n"
- "run manually with: cloud-init single --name=migrator",
- name, cname)
+ LOG.warning(
+ "%s has run without canonicalized name [%s].\n"
+ "likely the migrator has not yet run. "
+ "It will run next boot.\n"
+ "run manually with: cloud-init single --name=migrator",
+ name,
+ cname,
+ )
return True
return False
@@ -188,9 +187,14 @@ class Runners(object):
class ConfigMerger(object):
- def __init__(self, paths=None, datasource=None,
- additional_fns=None, base_cfg=None,
- include_vendor=True):
+ def __init__(
+ self,
+ paths=None,
+ datasource=None,
+ additional_fns=None,
+ base_cfg=None,
+ include_vendor=True,
+ ):
self._paths = paths
self._ds = datasource
self._fns = additional_fns
@@ -207,8 +211,11 @@ class ConfigMerger(object):
if ds_cfg and isinstance(ds_cfg, (dict)):
d_cfgs.append(ds_cfg)
except Exception:
- util.logexc(LOG, "Failed loading of datasource config object "
- "from %s", self._ds)
+ util.logexc(
+ LOG,
+ "Failed loading of datasource config object from %s",
+ self._ds,
+ )
return d_cfgs
def _get_env_configs(self):
@@ -218,8 +225,7 @@ class ConfigMerger(object):
try:
e_cfgs.append(util.read_conf(e_fn))
except Exception:
- util.logexc(LOG, 'Failed loading of env. config from %s',
- e_fn)
+ util.logexc(LOG, "Failed loading of env. config from %s", e_fn)
return e_cfgs
def _get_instance_configs(self):
@@ -229,13 +235,13 @@ class ConfigMerger(object):
if not self._paths:
return i_cfgs
- cc_paths = ['cloud_config']
+ cc_paths = ["cloud_config"]
if self._include_vendor:
# the order is important here: we want vendor2
# (dynamic vendor data from OpenStack)
# to override vendor (static data from OpenStack)
- cc_paths.append('vendor2_cloud_config')
- cc_paths.append('vendor_cloud_config')
+ cc_paths.append("vendor2_cloud_config")
+ cc_paths.append("vendor_cloud_config")
for cc_p in cc_paths:
cc_fn = self._paths.get_ipath_cur(cc_p)
@@ -244,11 +250,14 @@ class ConfigMerger(object):
i_cfgs.append(util.read_conf(cc_fn))
except PermissionError:
LOG.debug(
- 'Skipped loading cloud-config from %s due to'
- ' non-root.', cc_fn)
+ "Skipped loading cloud-config from %s due to"
+ " non-root.",
+ cc_fn,
+ )
except Exception:
- util.logexc(LOG, 'Failed loading of cloud-config from %s',
- cc_fn)
+ util.logexc(
+ LOG, "Failed loading of cloud-config from %s", cc_fn
+ )
return i_cfgs
def _read_cfg(self):
@@ -264,8 +273,9 @@ class ConfigMerger(object):
try:
cfgs.append(util.read_conf(c_fn))
except Exception:
- util.logexc(LOG, "Failed loading of configuration from %s",
- c_fn)
+ util.logexc(
+ LOG, "Failed loading of configuration from %s", c_fn
+ )
cfgs.extend(self._get_env_configs())
cfgs.extend(self._get_instance_configs())
@@ -283,7 +293,6 @@ class ConfigMerger(object):
class ContentHandlers(object):
-
def __init__(self):
self.registered = {}
self.initialized = []
@@ -324,15 +333,15 @@ class Paths(persistence.CloudInitPickleMixin):
def __init__(self, path_cfgs, ds=None):
self.cfgs = path_cfgs
# Populate all the initial paths
- self.cloud_dir = path_cfgs.get('cloud_dir', '/var/lib/cloud')
- self.run_dir = path_cfgs.get('run_dir', '/run/cloud-init')
- self.instance_link = os.path.join(self.cloud_dir, 'instance')
+ self.cloud_dir = path_cfgs.get("cloud_dir", "/var/lib/cloud")
+ self.run_dir = path_cfgs.get("run_dir", "/run/cloud-init")
+ self.instance_link = os.path.join(self.cloud_dir, "instance")
self.boot_finished = os.path.join(self.instance_link, "boot-finished")
- self.upstart_conf_d = path_cfgs.get('upstart_dir')
- self.seed_dir = os.path.join(self.cloud_dir, 'seed')
+ self.upstart_conf_d = path_cfgs.get("upstart_dir")
+ self.seed_dir = os.path.join(self.cloud_dir, "seed")
# This one isn't joined, since it should just be read-only
- template_dir = path_cfgs.get('templates_dir', '/etc/cloud/templates/')
- self.template_tpl = os.path.join(template_dir, '%s.tmpl')
+ template_dir = path_cfgs.get("templates_dir", "/etc/cloud/templates/")
+ self.template_tpl = os.path.join(template_dir, "%s.tmpl")
self.lookups = {
"handlers": "handlers",
"scripts": "scripts",
@@ -366,8 +375,8 @@ class Paths(persistence.CloudInitPickleMixin):
# we will rely on this attribute. To fix that, we are now
# manually adding that attribute here.
self.run_dir = Paths(
- path_cfgs=self.cfgs,
- ds=self.datasource).run_dir
+ path_cfgs=self.cfgs, ds=self.datasource
+ ).run_dir
# get_ipath_cur: get the current instance path for an item
def get_ipath_cur(self, name=None):
@@ -386,8 +395,8 @@ class Paths(persistence.CloudInitPickleMixin):
iid = self.datasource.get_instance_id()
if iid is None:
return None
- path_safe_iid = str(iid).replace(os.sep, '_')
- ipath = os.path.join(self.cloud_dir, 'instances', path_safe_iid)
+ path_safe_iid = str(iid).replace(os.sep, "_")
+ ipath = os.path.join(self.cloud_dir, "instances", path_safe_iid)
add_on = self.lookups.get(name)
if add_on:
ipath = os.path.join(ipath, add_on)
@@ -399,8 +408,10 @@ class Paths(persistence.CloudInitPickleMixin):
def get_ipath(self, name=None):
ipath = self._get_ipath(name)
if not ipath:
- LOG.warning(("No per instance data available, "
- "is there an datasource/iid set?"))
+ LOG.warning(
+ "No per instance data available, "
+ "is there an datasource/iid set?"
+ )
return None
else:
return ipath
@@ -423,6 +434,7 @@ class Paths(persistence.CloudInitPickleMixin):
# you can avoid catching exceptions that you typically don't
# care about...
+
class DefaultingConfigParser(RawConfigParser):
DEF_INT = 0
DEF_FLOAT = 0.0
@@ -440,7 +452,7 @@ class DefaultingConfigParser(RawConfigParser):
return value
def set(self, section, option, value=None):
- if not self.has_section(section) and section.lower() != 'default':
+ if not self.has_section(section) and section.lower() != "default":
self.add_section(section)
RawConfigParser.set(self, section, option, value)
@@ -464,13 +476,14 @@ class DefaultingConfigParser(RawConfigParser):
return RawConfigParser.getint(self, section, option)
def stringify(self, header=None):
- contents = ''
+ contents = ""
outputstream = StringIO()
self.write(outputstream)
outputstream.flush()
contents = outputstream.getvalue()
if header:
- contents = '\n'.join([header, contents, ''])
+ contents = "\n".join([header, contents, ""])
return contents
+
# vi: ts=4 expandtab
diff --git a/cloudinit/importer.py b/cloudinit/importer.py
index 4e677af3..f84ff4da 100644
--- a/cloudinit/importer.py
+++ b/cloudinit/importer.py
@@ -48,7 +48,7 @@ def find_module(base_name: str, search_paths, required_attrs=None) -> tuple:
if path:
real_path.extend(path.split("."))
real_path.append(base_name)
- full_path = '.'.join(real_path)
+ full_path = ".".join(real_path)
lookup_paths.append(full_path)
found_paths = []
for full_path in lookup_paths:
@@ -67,4 +67,5 @@ def find_module(base_name: str, search_paths, required_attrs=None) -> tuple:
found_paths.append(full_path)
return (found_paths, lookup_paths)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/log.py b/cloudinit/log.py
index 10149907..f40201bb 100644
--- a/cloudinit/log.py
+++ b/cloudinit/log.py
@@ -28,7 +28,7 @@ DEBUG = logging.DEBUG
NOTSET = logging.NOTSET
# Default basic format
-DEF_CON_FORMAT = '%(asctime)s - %(filename)s[%(levelname)s]: %(message)s'
+DEF_CON_FORMAT = "%(asctime)s - %(filename)s[%(levelname)s]: %(message)s"
# Always format logging timestamps as UTC time
logging.Formatter.converter = time.gmtime
@@ -39,8 +39,8 @@ def setupBasicLogging(level=DEBUG, formatter=None):
formatter = logging.Formatter(DEF_CON_FORMAT)
root = logging.getLogger()
for handler in root.handlers:
- if hasattr(handler, 'stream') and hasattr(handler.stream, 'name'):
- if handler.stream.name == '<stderr>':
+ if hasattr(handler, "stream") and hasattr(handler.stream, "name"):
+ if handler.stream.name == "<stderr>":
handler.setLevel(level)
return
# Didn't have an existing stderr handler; create a new handler
@@ -69,18 +69,18 @@ def setupLogging(cfg=None):
cfg = {}
log_cfgs = []
- log_cfg = cfg.get('logcfg')
+ log_cfg = cfg.get("logcfg")
if log_cfg and isinstance(log_cfg, str):
# If there is a 'logcfg' entry in the config,
# respect it, it is the old keyname
log_cfgs.append(str(log_cfg))
elif "log_cfgs" in cfg:
- for a_cfg in cfg['log_cfgs']:
+ for a_cfg in cfg["log_cfgs"]:
if isinstance(a_cfg, str):
log_cfgs.append(a_cfg)
elif isinstance(a_cfg, (collections.abc.Iterable)):
cfg_str = [str(c) for c in a_cfg]
- log_cfgs.append('\n'.join(cfg_str))
+ log_cfgs.append("\n".join(cfg_str))
else:
log_cfgs.append(str(a_cfg))
@@ -109,16 +109,17 @@ def setupLogging(cfg=None):
pass
# If it didn't work, at least setup a basic logger (if desired)
- basic_enabled = cfg.get('log_basic', True)
+ basic_enabled = cfg.get("log_basic", True)
- sys.stderr.write(("WARN: no logging configured!"
- " (tried %s configs)\n") % (am_tried))
+ sys.stderr.write(
+ "WARN: no logging configured! (tried %s configs)\n" % (am_tried)
+ )
if basic_enabled:
sys.stderr.write("Setting up basic logging...\n")
setupBasicLogging()
-def getLogger(name='cloudinit'):
+def getLogger(name="cloudinit"):
return logging.getLogger(name)
diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py
index 7fa493a6..a7a6a47f 100644
--- a/cloudinit/mergers/__init__.py
+++ b/cloudinit/mergers/__init__.py
@@ -6,14 +6,13 @@
import re
-from cloudinit import importer
-from cloudinit import type_utils
+from cloudinit import importer, type_utils
NAME_MTCH = re.compile(r"(^[a-zA-Z_][A-Za-z0-9_]*)\((.*?)\)$")
DEF_MERGE_TYPE = "list()+dict()+str()"
-MERGER_PREFIX = 'm_'
-MERGER_ATTR = 'Merger'
+MERGER_PREFIX = "m_"
+MERGER_ATTR = "Merger"
class UnknownMerger(object):
@@ -53,7 +52,7 @@ class LookupMerger(UnknownMerger):
self._lookups = lookups
def __str__(self):
- return 'LookupMerger: (%s)' % (len(self._lookups))
+ return "LookupMerger: (%s)" % (len(self._lookups))
# For items which can not be merged by the parent this object
# will lookup in a internally maintained set of objects and
@@ -69,25 +68,26 @@ class LookupMerger(UnknownMerger):
meth = getattr(merger, meth_wanted)
break
if not meth:
- return UnknownMerger._handle_unknown(self, meth_wanted,
- value, merge_with)
+ return UnknownMerger._handle_unknown(
+ self, meth_wanted, value, merge_with
+ )
return meth(value, merge_with)
def dict_extract_mergers(config):
parsed_mergers = []
- raw_mergers = config.pop('merge_how', None)
+ raw_mergers = config.pop("merge_how", None)
if raw_mergers is None:
- raw_mergers = config.pop('merge_type', None)
+ raw_mergers = config.pop("merge_type", None)
if raw_mergers is None:
return parsed_mergers
if isinstance(raw_mergers, str):
return string_extract_mergers(raw_mergers)
for m in raw_mergers:
if isinstance(m, (dict)):
- name = m['name']
+ name = m["name"]
name = name.replace("-", "_").strip()
- opts = m['settings']
+ opts = m["settings"]
else:
name = m[0]
if len(m) >= 2:
@@ -110,8 +110,9 @@ def string_extract_mergers(merge_how):
continue
match = NAME_MTCH.match(m_name)
if not match:
- msg = ("Matcher identifer '%s' is not in the right format" %
- (m_name))
+ msg = "Matcher identifer '%s' is not in the right format" % (
+ m_name
+ )
raise ValueError(msg)
(m_name, m_ops) = match.groups()
m_ops = m_ops.strip().split(",")
@@ -129,14 +130,15 @@ def construct(parsed_mergers):
for (m_name, m_ops) in parsed_mergers:
if not m_name.startswith(MERGER_PREFIX):
m_name = MERGER_PREFIX + str(m_name)
- merger_locs, looked_locs = importer.find_module(m_name,
- [__name__],
- [MERGER_ATTR])
+ merger_locs, looked_locs = importer.find_module(
+ m_name, [__name__], [MERGER_ATTR]
+ )
if not merger_locs:
- msg = ("Could not find merger module named '%s' "
- "with attribute '%s' (searched %s)") % (m_name,
- MERGER_ATTR,
- looked_locs)
+ msg = (
+ "Could not find merger module named '%s' "
+ "with attribute '%s' (searched %s)"
+ % (m_name, MERGER_ATTR, looked_locs)
+ )
raise ImportError(msg)
else:
mod = importer.import_module(merger_locs[0])
@@ -149,4 +151,5 @@ def construct(parsed_mergers):
mergers.append(attr(root, opts))
return root
+
# vi: ts=4 expandtab
diff --git a/cloudinit/mergers/m_dict.py b/cloudinit/mergers/m_dict.py
index 93472f13..274ccafc 100644
--- a/cloudinit/mergers/m_dict.py
+++ b/cloudinit/mergers/m_dict.py
@@ -4,8 +4,11 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-DEF_MERGE_TYPE = 'no_replace'
-MERGE_TYPES = ('replace', DEF_MERGE_TYPE,)
+DEF_MERGE_TYPE = "no_replace"
+MERGE_TYPES = (
+ "replace",
+ DEF_MERGE_TYPE,
+)
def _has_any(what, *keys):
@@ -25,21 +28,27 @@ class Merger(object):
self._method = m
break
# Affect how recursive merging is done on other primitives.
- self._recurse_str = 'recurse_str' in opts
- self._recurse_array = _has_any(opts, 'recurse_array', 'recurse_list')
- self._allow_delete = 'allow_delete' in opts
+ self._recurse_str = "recurse_str" in opts
+ self._recurse_array = _has_any(opts, "recurse_array", "recurse_list")
+ self._allow_delete = "allow_delete" in opts
# Backwards compat require this to be on.
self._recurse_dict = True
def __str__(self):
- s = ('DictMerger: (method=%s,recurse_str=%s,'
- 'recurse_dict=%s,recurse_array=%s,allow_delete=%s)')
- s = s % (self._method, self._recurse_str,
- self._recurse_dict, self._recurse_array, self._allow_delete)
+ s = (
+ "DictMerger: (method=%s,recurse_str=%s,"
+ "recurse_dict=%s,recurse_array=%s,allow_delete=%s)"
+ )
+ s = s % (
+ self._method,
+ self._recurse_str,
+ self._recurse_dict,
+ self._recurse_array,
+ self._allow_delete,
+ )
return s
def _do_dict_replace(self, value, merge_with, do_replace):
-
def merge_same_key(old_v, new_v):
if do_replace:
return new_v
@@ -65,12 +74,13 @@ class Merger(object):
def _on_dict(self, value, merge_with):
if not isinstance(merge_with, (dict)):
return value
- if self._method == 'replace':
+ if self._method == "replace":
merged = self._do_dict_replace(dict(value), merge_with, True)
- elif self._method == 'no_replace':
+ elif self._method == "no_replace":
merged = self._do_dict_replace(dict(value), merge_with, False)
else:
raise NotImplementedError("Unknown merge type %s" % (self._method))
return merged
+
# vi: ts=4 expandtab
diff --git a/cloudinit/mergers/m_list.py b/cloudinit/mergers/m_list.py
index 19f32771..9dfae8cd 100644
--- a/cloudinit/mergers/m_list.py
+++ b/cloudinit/mergers/m_list.py
@@ -4,8 +4,8 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-DEF_MERGE_TYPE = 'replace'
-MERGE_TYPES = ('append', 'prepend', DEF_MERGE_TYPE, 'no_replace')
+DEF_MERGE_TYPE = "replace"
+MERGE_TYPES = ("append", "prepend", DEF_MERGE_TYPE, "no_replace")
def _has_any(what, *keys):
@@ -25,38 +25,44 @@ class Merger(object):
self._method = m
break
# Affect how recursive merging is done on other primitives
- self._recurse_str = _has_any(opts, 'recurse_str')
- self._recurse_dict = _has_any(opts, 'recurse_dict')
- self._recurse_array = _has_any(opts, 'recurse_array', 'recurse_list')
+ self._recurse_str = _has_any(opts, "recurse_str")
+ self._recurse_dict = _has_any(opts, "recurse_dict")
+ self._recurse_array = _has_any(opts, "recurse_array", "recurse_list")
def __str__(self):
- return ('ListMerger: (method=%s,recurse_str=%s,'
- 'recurse_dict=%s,recurse_array=%s)') % (self._method,
- self._recurse_str,
- self._recurse_dict,
- self._recurse_array)
+ return (
+ "ListMerger: (method=%s,recurse_str=%s,"
+ "recurse_dict=%s,recurse_array=%s)"
+ % (
+ self._method,
+ self._recurse_str,
+ self._recurse_dict,
+ self._recurse_array,
+ )
+ )
def _on_tuple(self, value, merge_with):
return tuple(self._on_list(list(value), merge_with))
def _on_list(self, value, merge_with):
- if (self._method == 'replace' and
- not isinstance(merge_with, (tuple, list))):
+ if self._method == "replace" and not isinstance(
+ merge_with, (tuple, list)
+ ):
return merge_with
# Ok we now know that what we are merging with is a list or tuple.
merged_list = []
- if self._method == 'prepend':
+ if self._method == "prepend":
merged_list.extend(merge_with)
merged_list.extend(value)
return merged_list
- elif self._method == 'append':
+ elif self._method == "append":
merged_list.extend(value)
merged_list.extend(merge_with)
return merged_list
def merge_same_index(old_v, new_v):
- if self._method == 'no_replace':
+ if self._method == "no_replace":
# Leave it be...
return old_v
if isinstance(new_v, (list, tuple)) and self._recurse_array:
@@ -74,4 +80,5 @@ class Merger(object):
merged_list[i] = merge_same_index(merged_list[i], merge_with[i])
return merged_list
+
# vi: ts=4 expandtab
diff --git a/cloudinit/mergers/m_str.py b/cloudinit/mergers/m_str.py
index 539e3e29..a96bae5e 100644
--- a/cloudinit/mergers/m_str.py
+++ b/cloudinit/mergers/m_str.py
@@ -7,10 +7,10 @@
class Merger(object):
def __init__(self, _merger, opts):
- self._append = 'append' in opts
+ self._append = "append" in opts
def __str__(self):
- return 'StringMerger: (append=%s)' % (self._append)
+ return "StringMerger: (append=%s)" % (self._append)
# On encountering a unicode object to merge value with
# we will for now just proxy into the string method to let it handle it.
@@ -27,4 +27,5 @@ class Merger(object):
return merge_with
return value + merge_with
+
# vi: ts=4 expandtab
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index f81f3a7b..1a738dbc 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -13,14 +13,13 @@ import os
import re
from typing import Any, Dict
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
from cloudinit.net.network_state import mask_to_net_prefix
from cloudinit.url_helper import UrlError, readurl
LOG = logging.getLogger(__name__)
SYS_CLASS_NET = "/sys/class/net/"
-DEFAULT_PRIMARY_INTERFACE = 'eth0'
+DEFAULT_PRIMARY_INTERFACE = "eth0"
OVS_INTERNAL_INTERFACE_LOOKUP_CMD = [
"ovs-vsctl",
"--format",
@@ -36,15 +35,17 @@ OVS_INTERNAL_INTERFACE_LOOKUP_CMD = [
]
-def natural_sort_key(s, _nsre=re.compile('([0-9]+)')):
+def natural_sort_key(s, _nsre=re.compile("([0-9]+)")):
"""Sorting for Humans: natural sort order. Can be use as the key to sort
functions.
This will sort ['eth0', 'ens3', 'ens10', 'ens12', 'ens8', 'ens0'] as
['ens0', 'ens3', 'ens8', 'ens10', 'ens12', 'eth0'] instead of the simple
python way which will produce ['ens0', 'ens10', 'ens12', 'ens3', 'ens8',
'eth0']."""
- return [int(text) if text.isdigit() else text.lower()
- for text in re.split(_nsre, s)]
+ return [
+ int(text) if text.isdigit() else text.lower()
+ for text in re.split(_nsre, s)
+ ]
def get_sys_class_path():
@@ -56,14 +57,19 @@ def sys_dev_path(devname, path=""):
return get_sys_class_path() + devname + "/" + path
-def read_sys_net(devname, path, translate=None,
- on_enoent=None, on_keyerror=None,
- on_einval=None):
+def read_sys_net(
+ devname,
+ path,
+ translate=None,
+ on_enoent=None,
+ on_keyerror=None,
+ on_einval=None,
+):
dev_path = sys_dev_path(devname, path)
try:
contents = util.load_file(dev_path)
except (OSError, IOError) as e:
- e_errno = getattr(e, 'errno', None)
+ e_errno = getattr(e, "errno", None)
if e_errno in (errno.ENOENT, errno.ENOTDIR):
if on_enoent is not None:
return on_enoent(e)
@@ -80,19 +86,26 @@ def read_sys_net(devname, path, translate=None,
if on_keyerror is not None:
return on_keyerror(e)
else:
- LOG.debug("Found unexpected (not translatable) value"
- " '%s' in '%s", contents, dev_path)
+ LOG.debug(
+ "Found unexpected (not translatable) value '%s' in '%s",
+ contents,
+ dev_path,
+ )
raise
def read_sys_net_safe(iface, field, translate=None):
def on_excp_false(e):
return False
- return read_sys_net(iface, field,
- on_keyerror=on_excp_false,
- on_enoent=on_excp_false,
- on_einval=on_excp_false,
- translate=translate)
+
+ return read_sys_net(
+ iface,
+ field,
+ on_keyerror=on_excp_false,
+ on_enoent=on_excp_false,
+ on_einval=on_excp_false,
+ translate=translate,
+ )
def read_sys_net_int(iface, field):
@@ -109,7 +122,7 @@ def is_up(devname):
# The linux kernel says to consider devices in 'unknown'
# operstate as up for the purposes of network configuration. See
# Documentation/networking/operstates.txt in the kernel source.
- translate = {'up': True, 'unknown': True, 'down': False}
+ translate = {"up": True, "unknown": True, "down": False}
return read_sys_net_safe(devname, "operstate", translate=translate)
@@ -136,7 +149,7 @@ def master_is_bridge_or_bond(devname):
return False
bonding_path = os.path.join(master_path, "bonding")
bridge_path = os.path.join(master_path, "bridge")
- return (os.path.exists(bonding_path) or os.path.exists(bridge_path))
+ return os.path.exists(bonding_path) or os.path.exists(bridge_path)
def master_is_openvswitch(devname):
@@ -195,23 +208,24 @@ def is_openvswitch_internal_interface(devname: str) -> bool:
def is_netfailover(devname, driver=None):
- """ netfailover driver uses 3 nics, master, primary and standby.
- this returns True if the device is either the primary or standby
- as these devices are to be ignored.
+ """netfailover driver uses 3 nics, master, primary and standby.
+ this returns True if the device is either the primary or standby
+ as these devices are to be ignored.
"""
if driver is None:
driver = device_driver(devname)
- if is_netfail_primary(devname, driver) or is_netfail_standby(devname,
- driver):
+ if is_netfail_primary(devname, driver) or is_netfail_standby(
+ devname, driver
+ ):
return True
return False
def get_dev_features(devname):
- """ Returns a str from reading /sys/class/net/<devname>/device/features."""
- features = ''
+ """Returns a str from reading /sys/class/net/<devname>/device/features."""
+ features = ""
try:
- features = read_sys_net(devname, 'device/features')
+ features = read_sys_net(devname, "device/features")
except Exception:
pass
return features
@@ -231,13 +245,13 @@ def has_netfail_standby_feature(devname):
def is_netfail_master(devname, driver=None):
- """ A device is a "netfail master" device if:
+ """A device is a "netfail master" device if:
- - The device does NOT have the 'master' sysfs attribute
- - The device driver is 'virtio_net'
- - The device has the standby feature bit set
+ - The device does NOT have the 'master' sysfs attribute
+ - The device driver is 'virtio_net'
+ - The device has the standby feature bit set
- Return True if all of the above is True.
+ Return True if all of the above is True.
"""
if get_master(devname) is not None:
return False
@@ -255,17 +269,17 @@ def is_netfail_master(devname, driver=None):
def is_netfail_primary(devname, driver=None):
- """ A device is a "netfail primary" device if:
+ """A device is a "netfail primary" device if:
- - the device has a 'master' sysfs file
- - the device driver is not 'virtio_net'
- - the 'master' sysfs file points to device with virtio_net driver
- - the 'master' device has the 'standby' feature bit set
+ - the device has a 'master' sysfs file
+ - the device driver is not 'virtio_net'
+ - the 'master' sysfs file points to device with virtio_net driver
+ - the 'master' device has the 'standby' feature bit set
- Return True if all of the above is True.
+ Return True if all of the above is True.
"""
# /sys/class/net/<devname>/master -> ../../<master devname>
- master_sysfs_path = sys_dev_path(devname, path='master')
+ master_sysfs_path = sys_dev_path(devname, path="master")
if not os.path.exists(master_sysfs_path):
return False
@@ -288,13 +302,13 @@ def is_netfail_primary(devname, driver=None):
def is_netfail_standby(devname, driver=None):
- """ A device is a "netfail standby" device if:
+ """A device is a "netfail standby" device if:
- - The device has a 'master' sysfs attribute
- - The device driver is 'virtio_net'
- - The device has the standby feature bit set
+ - The device has a 'master' sysfs attribute
+ - The device driver is 'virtio_net'
+ - The device has the standby feature bit set
- Return True if all of the above is True.
+ Return True if all of the above is True.
"""
if get_master(devname) is None:
return False
@@ -320,15 +334,15 @@ def is_renamed(devname):
#define NET_NAME_USER 3 /* provided by user-space */
#define NET_NAME_RENAMED 4 /* renamed by user-space */
"""
- name_assign_type = read_sys_net_safe(devname, 'name_assign_type')
- if name_assign_type and name_assign_type in ['3', '4']:
+ name_assign_type = read_sys_net_safe(devname, "name_assign_type")
+ if name_assign_type and name_assign_type in ["3", "4"]:
return True
return False
def is_vlan(devname):
uevent = str(read_sys_net_safe(devname, "uevent"))
- return 'DEVTYPE=vlan' in uevent.splitlines()
+ return "DEVTYPE=vlan" in uevent.splitlines()
def device_driver(devname):
@@ -372,7 +386,7 @@ class ParserError(Exception):
def is_disabled_cfg(cfg):
if not cfg or not isinstance(cfg, dict):
return False
- return cfg.get('config') == "disabled"
+ return cfg.get("config") == "disabled"
def find_fallback_nic(blacklist_drivers=None):
@@ -386,9 +400,9 @@ def find_fallback_nic(blacklist_drivers=None):
def find_fallback_nic_on_netbsd_or_openbsd(blacklist_drivers=None):
- values = list(sorted(
- get_interfaces_by_mac().values(),
- key=natural_sort_key))
+ values = list(
+ sorted(get_interfaces_by_mac().values(), key=natural_sort_key)
+ )
if values:
return values[0]
@@ -402,7 +416,7 @@ def find_fallback_nic_on_freebsd(blacklist_drivers=None):
we'll use the first interface from ``ifconfig -l -u ether``
"""
- stdout, _stderr = subp.subp(['ifconfig', '-l', '-u', 'ether'])
+ stdout, _stderr = subp.subp(["ifconfig", "-l", "-u", "ether"])
values = stdout.split()
if values:
return values[0]
@@ -419,22 +433,31 @@ def find_fallback_nic_on_linux(blacklist_drivers=None):
if not blacklist_drivers:
blacklist_drivers = []
- if 'net.ifnames=0' in util.get_cmdline():
- LOG.debug('Stable ifnames disabled by net.ifnames=0 in /proc/cmdline')
+ if "net.ifnames=0" in util.get_cmdline():
+ LOG.debug("Stable ifnames disabled by net.ifnames=0 in /proc/cmdline")
else:
- unstable = [device for device in get_devicelist()
- if device != 'lo' and not is_renamed(device)]
+ unstable = [
+ device
+ for device in get_devicelist()
+ if device != "lo" and not is_renamed(device)
+ ]
if len(unstable):
- LOG.debug('Found unstable nic names: %s; calling udevadm settle',
- unstable)
- msg = 'Waiting for udev events to settle'
+ LOG.debug(
+ "Found unstable nic names: %s; calling udevadm settle",
+ unstable,
+ )
+ msg = "Waiting for udev events to settle"
util.log_time(LOG.debug, msg, func=util.udevadm_settle)
# get list of interfaces that could have connections
- invalid_interfaces = set(['lo'])
- potential_interfaces = set([device for device in get_devicelist()
- if device_driver(device) not in
- blacklist_drivers])
+ invalid_interfaces = set(["lo"])
+ potential_interfaces = set(
+ [
+ device
+ for device in get_devicelist()
+ if device_driver(device) not in blacklist_drivers
+ ]
+ )
potential_interfaces = potential_interfaces.difference(invalid_interfaces)
# sort into interfaces with carrier, interfaces which could have carrier,
# and ignore interfaces that are definitely disconnected
@@ -452,19 +475,19 @@ def find_fallback_nic_on_linux(blacklist_drivers=None):
if is_netfailover(interface):
# ignore netfailover primary/standby interfaces
continue
- carrier = read_sys_net_int(interface, 'carrier')
+ carrier = read_sys_net_int(interface, "carrier")
if carrier:
connected.append(interface)
continue
# check if nic is dormant or down, as this may make a nick appear to
# not have a carrier even though it could acquire one when brought
# online by dhclient
- dormant = read_sys_net_int(interface, 'dormant')
+ dormant = read_sys_net_int(interface, "dormant")
if dormant:
possibly_connected.append(interface)
continue
- operstate = read_sys_net_safe(interface, 'operstate')
- if operstate in ['dormant', 'down', 'lowerlayerdown', 'unknown']:
+ operstate = read_sys_net_safe(interface, "operstate")
+ if operstate in ["dormant", "down", "lowerlayerdown", "unknown"]:
possibly_connected.append(interface)
continue
@@ -484,7 +507,7 @@ def find_fallback_nic_on_linux(blacklist_drivers=None):
# pick the first that has a mac-address
for name in names:
- if read_sys_net_safe(name, 'address'):
+ if read_sys_net_safe(name, "address"):
return name
return None
@@ -501,32 +524,32 @@ def generate_fallback_config(blacklist_drivers=None, config_driver=None):
# netfail cannot use mac for matching, they have duplicate macs
if is_netfail_master(target_name):
- match = {'name': target_name}
+ match = {"name": target_name}
else:
match = {
- 'macaddress': read_sys_net_safe(target_name, 'address').lower()}
- cfg = {'dhcp4': True, 'set-name': target_name, 'match': match}
+ "macaddress": read_sys_net_safe(target_name, "address").lower()
+ }
+ cfg = {"dhcp4": True, "set-name": target_name, "match": match}
if config_driver:
driver = device_driver(target_name)
if driver:
- cfg['match']['driver'] = driver
- nconf = {'ethernets': {target_name: cfg}, 'version': 2}
+ cfg["match"]["driver"] = driver
+ nconf = {"ethernets": {target_name: cfg}, "version": 2}
return nconf
def extract_physdevs(netcfg):
-
def _version_1(netcfg):
physdevs = []
- for ent in netcfg.get('config', {}):
- if ent.get('type') != 'physical':
+ for ent in netcfg.get("config", {}):
+ if ent.get("type") != "physical":
continue
- mac = ent.get('mac_address')
+ mac = ent.get("mac_address")
if not mac:
continue
- name = ent.get('name')
- driver = ent.get('params', {}).get('driver')
- device_id = ent.get('params', {}).get('device_id')
+ name = ent.get("name")
+ driver = ent.get("params", {}).get("driver")
+ device_id = ent.get("params", {}).get("device_id")
if not driver:
driver = device_driver(name)
if not device_id:
@@ -536,17 +559,17 @@ def extract_physdevs(netcfg):
def _version_2(netcfg):
physdevs = []
- for ent in netcfg.get('ethernets', {}).values():
+ for ent in netcfg.get("ethernets", {}).values():
# only rename if configured to do so
- name = ent.get('set-name')
+ name = ent.get("set-name")
if not name:
continue
# cloud-init requires macaddress for renaming
- mac = ent.get('match', {}).get('macaddress')
+ mac = ent.get("match", {}).get("macaddress")
if not mac:
continue
- driver = ent.get('match', {}).get('driver')
- device_id = ent.get('match', {}).get('device_id')
+ driver = ent.get("match", {}).get("driver")
+ device_id = ent.get("match", {}).get("device_id")
if not driver:
driver = device_driver(name)
if not device_id:
@@ -554,13 +577,13 @@ def extract_physdevs(netcfg):
physdevs.append([mac, name, driver, device_id])
return physdevs
- version = netcfg.get('version')
+ version = netcfg.get("version")
if version == 1:
return _version_1(netcfg)
elif version == 2:
return _version_2(netcfg)
- raise RuntimeError('Unknown network config version: %s' % version)
+ raise RuntimeError("Unknown network config version: %s" % version)
def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
@@ -577,7 +600,7 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
_rename_interfaces(extract_physdevs(netcfg))
except RuntimeError as e:
raise RuntimeError(
- 'Failed to apply network config names: %s' % e
+ "Failed to apply network config names: %s" % e
) from e
@@ -619,33 +642,37 @@ def _get_current_rename_info(check_downable=True):
cur_info = {}
for (name, mac, driver, device_id) in get_interfaces():
cur_info[name] = {
- 'downable': None,
- 'device_id': device_id,
- 'driver': driver,
- 'mac': mac.lower(),
- 'name': name,
- 'up': is_up(name),
+ "downable": None,
+ "device_id": device_id,
+ "driver": driver,
+ "mac": mac.lower(),
+ "name": name,
+ "up": is_up(name),
}
if check_downable:
nmatch = re.compile(r"[0-9]+:\s+(\w+)[@:]")
- ipv6, _err = subp.subp(['ip', '-6', 'addr', 'show', 'permanent',
- 'scope', 'global'], capture=True)
- ipv4, _err = subp.subp(['ip', '-4', 'addr', 'show'], capture=True)
+ ipv6, _err = subp.subp(
+ ["ip", "-6", "addr", "show", "permanent", "scope", "global"],
+ capture=True,
+ )
+ ipv4, _err = subp.subp(["ip", "-4", "addr", "show"], capture=True)
nics_with_addresses = set()
for bytes_out in (ipv6, ipv4):
nics_with_addresses.update(nmatch.findall(bytes_out))
for d in cur_info.values():
- d['downable'] = (d['up'] is False or
- d['name'] not in nics_with_addresses)
+ d["downable"] = (
+ d["up"] is False or d["name"] not in nics_with_addresses
+ )
return cur_info
-def _rename_interfaces(renames, strict_present=True, strict_busy=True,
- current_info=None):
+def _rename_interfaces(
+ renames, strict_present=True, strict_busy=True, current_info=None
+):
if not len(renames):
LOG.debug("no interfaces to rename")
@@ -657,16 +684,15 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True,
cur_info = {}
for name, data in current_info.items():
cur = data.copy()
- if cur.get('mac'):
- cur['mac'] = cur['mac'].lower()
- cur['name'] = name
+ if cur.get("mac"):
+ cur["mac"] = cur["mac"].lower()
+ cur["name"] = name
cur_info[name] = cur
LOG.debug("Detected interfaces %s", cur_info)
def update_byname(bymac):
- return dict((data['name'], data)
- for data in cur_info.values())
+ return dict((data["name"], data) for data in cur_info.values())
def rename(cur, new):
subp.subp(["ip", "link", "set", cur, "name", new], capture=True)
@@ -687,25 +713,31 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True,
def entry_match(data, mac, driver, device_id):
"""match if set and in data"""
if mac and driver and device_id:
- return (data['mac'] == mac and
- data['driver'] == driver and
- data['device_id'] == device_id)
+ return (
+ data["mac"] == mac
+ and data["driver"] == driver
+ and data["device_id"] == device_id
+ )
elif mac and driver:
- return (data['mac'] == mac and
- data['driver'] == driver)
+ return data["mac"] == mac and data["driver"] == driver
elif mac:
- return (data['mac'] == mac)
+ return data["mac"] == mac
return False
def find_entry(mac, driver, device_id):
- match = [data for data in cur_info.values()
- if entry_match(data, mac, driver, device_id)]
+ match = [
+ data
+ for data in cur_info.values()
+ if entry_match(data, mac, driver, device_id)
+ ]
if len(match):
if len(match) > 1:
- msg = ('Failed to match a single device. Matched devices "%s"'
- ' with search values "(mac:%s driver:%s device_id:%s)"'
- % (match, mac, driver, device_id))
+ msg = (
+ 'Failed to match a single device. Matched devices "%s"'
+ ' with search values "(mac:%s driver:%s device_id:%s)"'
+ % (match, mac, driver, device_id)
+ )
raise ValueError(msg)
return match[0]
@@ -720,10 +752,11 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True,
if strict_present:
errors.append(
"[nic not present] Cannot rename mac=%s to %s"
- ", not available." % (mac, new_name))
+ ", not available." % (mac, new_name)
+ )
continue
- cur_name = cur.get('name')
+ cur_name = cur.get("name")
if cur_name == new_name:
# nothing to do
continue
@@ -732,24 +765,25 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True,
if strict_present:
errors.append(
"[nic not present] Cannot rename mac=%s to %s"
- ", not available." % (mac, new_name))
+ ", not available." % (mac, new_name)
+ )
continue
- if cur['up']:
+ if cur["up"]:
msg = "[busy] Error renaming mac=%s from %s to %s"
- if not cur['downable']:
+ if not cur["downable"]:
if strict_busy:
errors.append(msg % (mac, cur_name, new_name))
continue
- cur['up'] = False
+ cur["up"] = False
cur_ops.append(("down", mac, new_name, (cur_name,)))
ups.append(("up", mac, new_name, (new_name,)))
if new_name in cur_byname:
target = cur_byname[new_name]
- if target['up']:
+ if target["up"]:
msg = "[busy-target] Error renaming mac=%s from %s to %s."
- if not target['downable']:
+ if not target["downable"]:
if strict_busy:
errors.append(msg % (mac, cur_name, new_name))
continue
@@ -762,17 +796,17 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True,
tmp_name = tmpname_fmt % tmpi
cur_ops.append(("rename", mac, new_name, (new_name, tmp_name)))
- target['name'] = tmp_name
+ target["name"] = tmp_name
cur_byname = update_byname(cur_info)
- if target['up']:
+ if target["up"]:
ups.append(("up", mac, new_name, (tmp_name,)))
- cur_ops.append(("rename", mac, new_name, (cur['name'], new_name)))
- cur['name'] = new_name
+ cur_ops.append(("rename", mac, new_name, (cur["name"], new_name)))
+ cur["name"] = new_name
cur_byname = update_byname(cur_info)
ops += cur_ops
- opmap = {'rename': rename, 'down': down, 'up': up}
+ opmap = {"rename": rename, "down": down, "up": up}
if len(ops) + len(ups) == 0:
if len(errors):
@@ -787,11 +821,12 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True,
opmap.get(op)(*params)
except Exception as e:
errors.append(
- "[unknown] Error performing %s%s for %s, %s: %s" %
- (op, params, mac, new_name, e))
+ "[unknown] Error performing %s%s for %s, %s: %s"
+ % (op, params, mac, new_name, e)
+ )
if len(errors):
- raise Exception('\n'.join(errors))
+ raise Exception("\n".join(errors))
def get_interface_mac(ifname):
@@ -810,7 +845,7 @@ def get_ib_interface_hwaddr(ifname, ethernet_format):
representation of the address will be returned.
"""
# Type 32 is Infiniband.
- if read_sys_net_safe(ifname, 'type') == '32':
+ if read_sys_net_safe(ifname, "type") == "32":
mac = get_interface_mac(ifname)
if mac and ethernet_format:
# Use bytes 13-15 and 18-20 of the hardware address.
@@ -821,26 +856,30 @@ def get_ib_interface_hwaddr(ifname, ethernet_format):
def get_interfaces_by_mac(blacklist_drivers=None) -> dict:
if util.is_FreeBSD() or util.is_DragonFlyBSD():
return get_interfaces_by_mac_on_freebsd(
- blacklist_drivers=blacklist_drivers)
+ blacklist_drivers=blacklist_drivers
+ )
elif util.is_NetBSD():
return get_interfaces_by_mac_on_netbsd(
- blacklist_drivers=blacklist_drivers)
+ blacklist_drivers=blacklist_drivers
+ )
elif util.is_OpenBSD():
return get_interfaces_by_mac_on_openbsd(
- blacklist_drivers=blacklist_drivers)
+ blacklist_drivers=blacklist_drivers
+ )
else:
return get_interfaces_by_mac_on_linux(
- blacklist_drivers=blacklist_drivers)
+ blacklist_drivers=blacklist_drivers
+ )
def get_interfaces_by_mac_on_freebsd(blacklist_drivers=None) -> dict():
- (out, _) = subp.subp(['ifconfig', '-a', 'ether'])
+ (out, _) = subp.subp(["ifconfig", "-a", "ether"])
# flatten each interface block in a single line
def flatten(out):
- curr_block = ''
- for line in out.split('\n'):
- if line.startswith('\t'):
+ curr_block = ""
+ for line in out.split("\n"):
+ if line.startswith("\t"):
curr_block += line
else:
if curr_block:
@@ -852,10 +891,11 @@ def get_interfaces_by_mac_on_freebsd(blacklist_drivers=None) -> dict():
def find_mac(flat_list):
for block in flat_list:
m = re.search(
- r"^(?P<ifname>\S*): .*ether\s(?P<mac>[\da-f:]{17}).*",
- block)
+ r"^(?P<ifname>\S*): .*ether\s(?P<mac>[\da-f:]{17}).*", block
+ )
if m:
- yield (m.group('mac'), m.group('ifname'))
+ yield (m.group("mac"), m.group("ifname"))
+
results = {mac: ifname for mac, ifname in find_mac(flatten(out))}
return results
@@ -866,13 +906,13 @@ def get_interfaces_by_mac_on_netbsd(blacklist_drivers=None) -> dict():
r"(?P<ifname>\w+).*address:\s"
r"(?P<mac>([\da-f]{2}[:-]){5}([\da-f]{2})).*"
)
- (out, _) = subp.subp(['ifconfig', '-a'])
- if_lines = re.sub(r'\n\s+', ' ', out).splitlines()
+ (out, _) = subp.subp(["ifconfig", "-a"])
+ if_lines = re.sub(r"\n\s+", " ", out).splitlines()
for line in if_lines:
m = re.match(re_field_match, line)
if m:
fields = m.groupdict()
- ret[fields['mac']] = fields['ifname']
+ ret[fields["mac"]] = fields["ifname"]
return ret
@@ -880,14 +920,15 @@ def get_interfaces_by_mac_on_openbsd(blacklist_drivers=None) -> dict():
ret = {}
re_field_match = (
r"(?P<ifname>\w+).*lladdr\s"
- r"(?P<mac>([\da-f]{2}[:-]){5}([\da-f]{2})).*")
- (out, _) = subp.subp(['ifconfig', '-a'])
- if_lines = re.sub(r'\n\s+', ' ', out).splitlines()
+ r"(?P<mac>([\da-f]{2}[:-]){5}([\da-f]{2})).*"
+ )
+ (out, _) = subp.subp(["ifconfig", "-a"])
+ if_lines = re.sub(r"\n\s+", " ", out).splitlines()
for line in if_lines:
m = re.match(re_field_match, line)
if m:
fields = m.groupdict()
- ret[fields['mac']] = fields['ifname']
+ ret[fields["mac"]] = fields["ifname"]
return ret
@@ -897,11 +938,13 @@ def get_interfaces_by_mac_on_linux(blacklist_drivers=None) -> dict:
Bridges and any devices that have a 'stolen' mac are excluded."""
ret = {}
for name, mac, _driver, _devid in get_interfaces(
- blacklist_drivers=blacklist_drivers):
+ blacklist_drivers=blacklist_drivers
+ ):
if mac in ret:
raise RuntimeError(
- "duplicate mac found! both '%s' and '%s' have mac '%s'" %
- (name, ret[mac], mac))
+ "duplicate mac found! both '%s' and '%s' have mac '%s'"
+ % (name, ret[mac], mac)
+ )
ret[mac] = name
# Try to get an Infiniband hardware address (in 6 byte Ethernet format)
# for the interface.
@@ -909,8 +952,9 @@ def get_interfaces_by_mac_on_linux(blacklist_drivers=None) -> dict:
if ib_mac:
if ib_mac in ret:
raise RuntimeError(
- "duplicate mac found! both '%s' and '%s' have mac '%s'" %
- (name, ret[ib_mac], ib_mac))
+ "duplicate mac found! both '%s' and '%s' have mac '%s'"
+ % (name, ret[ib_mac], ib_mac)
+ )
ret[ib_mac] = name
return ret
@@ -924,7 +968,7 @@ def get_interfaces(blacklist_drivers=None) -> list:
blacklist_drivers = []
devs = get_devicelist()
# 16 somewhat arbitrarily chosen. Normally a mac is 6 '00:' tokens.
- zero_mac = ':'.join(('00',) * 16)
+ zero_mac = ":".join(("00",) * 16)
for name in devs:
if not interface_has_own_mac(name):
continue
@@ -935,8 +979,9 @@ def get_interfaces(blacklist_drivers=None) -> list:
if is_bond(name):
continue
if get_master(name) is not None:
- if (not master_is_bridge_or_bond(name) and
- not master_is_openvswitch(name)):
+ if not master_is_bridge_or_bond(
+ name
+ ) and not master_is_openvswitch(name):
continue
if is_netfailover(name):
continue
@@ -945,7 +990,7 @@ def get_interfaces(blacklist_drivers=None) -> list:
if not mac:
continue
# skip nics that have no mac (00:00....)
- if name != 'lo' and mac == zero_mac[:len(mac)]:
+ if name != "lo" and mac == zero_mac[: len(mac)]:
continue
if is_openvswitch_internal_interface(name):
continue
@@ -966,8 +1011,9 @@ def get_ib_hwaddrs_by_interface():
if ib_mac:
if ib_mac in ret:
raise RuntimeError(
- "duplicate mac found! both '%s' and '%s' have mac '%s'" %
- (name, ret[ib_mac], ib_mac))
+ "duplicate mac found! both '%s' and '%s' have mac '%s'"
+ % (name, ret[ib_mac], ib_mac)
+ )
ret[name] = ib_mac
return ret
@@ -985,18 +1031,21 @@ def has_url_connectivity(url_data: Dict[str, Any]) -> bool:
"timeout": 10
})
"""
- if 'url' not in url_data:
+ if "url" not in url_data:
LOG.warning(
- "Ignoring connectivity check. No 'url' to check in %s", url_data)
+ "Ignoring connectivity check. No 'url' to check in %s", url_data
+ )
return False
- url = url_data['url']
- if not any([url.startswith('http://'), url.startswith('https://')]):
+ url = url_data["url"]
+ if not any([url.startswith("http://"), url.startswith("https://")]):
LOG.warning(
"Ignoring connectivity check. Expected URL beginning with http*://"
- " received '%s'", url)
+ " received '%s'",
+ url,
+ )
return False
- if 'timeout' not in url_data:
- url_data['timeout'] = 5
+ if "timeout" not in url_data:
+ url_data["timeout"] = 5
try:
readurl(**url_data)
except UrlError:
@@ -1047,9 +1096,16 @@ class EphemeralIPv4Network(object):
context exit, clean up the interface leaving no configuration behind.
"""
- def __init__(self, interface, ip, prefix_or_mask, broadcast, router=None,
- connectivity_url_data: Dict[str, Any] = None,
- static_routes=None):
+ def __init__(
+ self,
+ interface,
+ ip,
+ prefix_or_mask,
+ broadcast,
+ router=None,
+ connectivity_url_data: Dict[str, Any] = None,
+ static_routes=None,
+ ):
"""Setup context manager and validate call signature.
@param interface: Name of the network interface to bring up.
@@ -1064,14 +1120,14 @@ class EphemeralIPv4Network(object):
"""
if not all([interface, ip, prefix_or_mask, broadcast]):
raise ValueError(
- 'Cannot init network on {0} with {1}/{2} and bcast {3}'.format(
- interface, ip, prefix_or_mask, broadcast))
+ "Cannot init network on {0} with {1}/{2} and bcast {3}".format(
+ interface, ip, prefix_or_mask, broadcast
+ )
+ )
try:
self.prefix = mask_to_net_prefix(prefix_or_mask)
except ValueError as e:
- raise ValueError(
- 'Cannot setup network: {0}'.format(e)
- ) from e
+ raise ValueError("Cannot setup network: {0}".format(e)) from e
self.connectivity_url_data = connectivity_url_data
self.interface = interface
@@ -1086,8 +1142,10 @@ class EphemeralIPv4Network(object):
if self.connectivity_url_data:
if has_url_connectivity(self.connectivity_url_data):
LOG.debug(
- 'Skip ephemeral network setup, instance has connectivity'
- ' to %s', self.connectivity_url_data['url'])
+ "Skip ephemeral network setup, instance has connectivity"
+ " to %s",
+ self.connectivity_url_data["url"],
+ )
return
self._bringup_device()
@@ -1116,38 +1174,92 @@ class EphemeralIPv4Network(object):
def _delete_address(self, address, prefix):
"""Perform the ip command to remove the specified address."""
subp.subp(
- ['ip', '-family', 'inet', 'addr', 'del',
- '%s/%s' % (address, prefix), 'dev', self.interface],
- capture=True)
+ [
+ "ip",
+ "-family",
+ "inet",
+ "addr",
+ "del",
+ "%s/%s" % (address, prefix),
+ "dev",
+ self.interface,
+ ],
+ capture=True,
+ )
def _bringup_device(self):
"""Perform the ip comands to fully setup the device."""
- cidr = '{0}/{1}'.format(self.ip, self.prefix)
+ cidr = "{0}/{1}".format(self.ip, self.prefix)
LOG.debug(
- 'Attempting setup of ephemeral network on %s with %s brd %s',
- self.interface, cidr, self.broadcast)
+ "Attempting setup of ephemeral network on %s with %s brd %s",
+ self.interface,
+ cidr,
+ self.broadcast,
+ )
try:
subp.subp(
- ['ip', '-family', 'inet', 'addr', 'add', cidr, 'broadcast',
- self.broadcast, 'dev', self.interface],
- capture=True, update_env={'LANG': 'C'})
+ [
+ "ip",
+ "-family",
+ "inet",
+ "addr",
+ "add",
+ cidr,
+ "broadcast",
+ self.broadcast,
+ "dev",
+ self.interface,
+ ],
+ capture=True,
+ update_env={"LANG": "C"},
+ )
except subp.ProcessExecutionError as e:
if "File exists" not in e.stderr:
raise
LOG.debug(
- 'Skip ephemeral network setup, %s already has address %s',
- self.interface, self.ip)
+ "Skip ephemeral network setup, %s already has address %s",
+ self.interface,
+ self.ip,
+ )
else:
# Address creation success, bring up device and queue cleanup
subp.subp(
- ['ip', '-family', 'inet', 'link', 'set', 'dev', self.interface,
- 'up'], capture=True)
+ [
+ "ip",
+ "-family",
+ "inet",
+ "link",
+ "set",
+ "dev",
+ self.interface,
+ "up",
+ ],
+ capture=True,
+ )
self.cleanup_cmds.append(
- ['ip', '-family', 'inet', 'link', 'set', 'dev', self.interface,
- 'down'])
+ [
+ "ip",
+ "-family",
+ "inet",
+ "link",
+ "set",
+ "dev",
+ self.interface,
+ "down",
+ ]
+ )
self.cleanup_cmds.append(
- ['ip', '-family', 'inet', 'addr', 'del', cidr, 'dev',
- self.interface])
+ [
+ "ip",
+ "-family",
+ "inet",
+ "addr",
+ "del",
+ cidr,
+ "dev",
+ self.interface,
+ ]
+ )
def _bringup_static_routes(self):
# static_routes = [("169.254.169.254/32", "130.56.248.255"),
@@ -1155,35 +1267,76 @@ class EphemeralIPv4Network(object):
for net_address, gateway in self.static_routes:
via_arg = []
if gateway != "0.0.0.0":
- via_arg = ['via', gateway]
+ via_arg = ["via", gateway]
subp.subp(
- ['ip', '-4', 'route', 'append', net_address] + via_arg +
- ['dev', self.interface], capture=True)
+ ["ip", "-4", "route", "append", net_address]
+ + via_arg
+ + ["dev", self.interface],
+ capture=True,
+ )
self.cleanup_cmds.insert(
- 0, ['ip', '-4', 'route', 'del', net_address] + via_arg +
- ['dev', self.interface])
+ 0,
+ ["ip", "-4", "route", "del", net_address]
+ + via_arg
+ + ["dev", self.interface],
+ )
def _bringup_router(self):
"""Perform the ip commands to fully setup the router if needed."""
# Check if a default route exists and exit if it does
- out, _ = subp.subp(['ip', 'route', 'show', '0.0.0.0/0'], capture=True)
- if 'default' in out:
+ out, _ = subp.subp(["ip", "route", "show", "0.0.0.0/0"], capture=True)
+ if "default" in out:
LOG.debug(
- 'Skip ephemeral route setup. %s already has default route: %s',
- self.interface, out.strip())
+ "Skip ephemeral route setup. %s already has default route: %s",
+ self.interface,
+ out.strip(),
+ )
return
subp.subp(
- ['ip', '-4', 'route', 'add', self.router, 'dev', self.interface,
- 'src', self.ip], capture=True)
+ [
+ "ip",
+ "-4",
+ "route",
+ "add",
+ self.router,
+ "dev",
+ self.interface,
+ "src",
+ self.ip,
+ ],
+ capture=True,
+ )
self.cleanup_cmds.insert(
0,
- ['ip', '-4', 'route', 'del', self.router, 'dev', self.interface,
- 'src', self.ip])
+ [
+ "ip",
+ "-4",
+ "route",
+ "del",
+ self.router,
+ "dev",
+ self.interface,
+ "src",
+ self.ip,
+ ],
+ )
subp.subp(
- ['ip', '-4', 'route', 'add', 'default', 'via', self.router,
- 'dev', self.interface], capture=True)
+ [
+ "ip",
+ "-4",
+ "route",
+ "add",
+ "default",
+ "via",
+ self.router,
+ "dev",
+ self.interface,
+ ],
+ capture=True,
+ )
self.cleanup_cmds.insert(
- 0, ['ip', '-4', 'route', 'del', 'default', 'dev', self.interface])
+ 0, ["ip", "-4", "route", "del", "default", "dev", self.interface]
+ )
class RendererNotFoundError(RuntimeError):
diff --git a/cloudinit/net/activators.py b/cloudinit/net/activators.py
index 137338d8..e80c26df 100644
--- a/cloudinit/net/activators.py
+++ b/cloudinit/net/activators.py
@@ -4,15 +4,13 @@ import os
from abc import ABC, abstractmethod
from typing import Iterable, List, Type
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
from cloudinit.net.eni import available as eni_available
from cloudinit.net.netplan import available as netplan_available
-from cloudinit.net.networkd import available as networkd_available
from cloudinit.net.network_state import NetworkState
+from cloudinit.net.networkd import available as networkd_available
from cloudinit.net.sysconfig import NM_CFG_FILE
-
LOG = logging.getLogger(__name__)
@@ -25,8 +23,7 @@ def _alter_interface(cmd, device_name) -> bool:
try:
(_out, err) = subp.subp(cmd)
if len(err):
- LOG.warning("Running %s resulted in stderr output: %s",
- cmd, err)
+ LOG.warning("Running %s resulted in stderr output: %s", cmd, err)
return True
except subp.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
@@ -73,7 +70,7 @@ class NetworkActivator(ABC):
Return True is successful, otherwise return False
"""
return cls.bring_up_interfaces(
- [i['name'] for i in network_state.iter_interfaces()]
+ [i["name"] for i in network_state.iter_interfaces()]
)
@classmethod
@@ -91,7 +88,7 @@ class NetworkActivator(ABC):
Return True is successful, otherwise return False
"""
return cls.bring_down_interfaces(
- [i['name'] for i in network_state.iter_interfaces()]
+ [i["name"] for i in network_state.iter_interfaces()]
)
@@ -111,7 +108,7 @@ class IfUpDownActivator(NetworkActivator):
Return True is successful, otherwise return False
"""
- cmd = ['ifup', device_name]
+ cmd = ["ifup", device_name]
return _alter_interface(cmd, device_name)
@staticmethod
@@ -120,18 +117,18 @@ class IfUpDownActivator(NetworkActivator):
Return True is successful, otherwise return False
"""
- cmd = ['ifdown', device_name]
+ cmd = ["ifdown", device_name]
return _alter_interface(cmd, device_name)
class NetworkManagerActivator(NetworkActivator):
@staticmethod
def available(target=None) -> bool:
- """ Return true if network manager can be used on this system."""
+ """Return true if network manager can be used on this system."""
config_present = os.path.isfile(
subp.target_path(target, path=NM_CFG_FILE)
)
- nmcli_present = subp.which('nmcli', target=target)
+ nmcli_present = subp.which("nmcli", target=target)
return config_present and bool(nmcli_present)
@staticmethod
@@ -140,7 +137,7 @@ class NetworkManagerActivator(NetworkActivator):
Return True is successful, otherwise return False
"""
- cmd = ['nmcli', 'connection', 'up', 'ifname', device_name]
+ cmd = ["nmcli", "connection", "up", "ifname", device_name]
return _alter_interface(cmd, device_name)
@staticmethod
@@ -149,16 +146,16 @@ class NetworkManagerActivator(NetworkActivator):
Return True is successful, otherwise return False
"""
- cmd = ['nmcli', 'connection', 'down', device_name]
+ cmd = ["nmcli", "connection", "down", device_name]
return _alter_interface(cmd, device_name)
class NetplanActivator(NetworkActivator):
- NETPLAN_CMD = ['netplan', 'apply']
+ NETPLAN_CMD = ["netplan", "apply"]
@staticmethod
def available(target=None) -> bool:
- """ Return true if netplan can be used on this system."""
+ """Return true if netplan can be used on this system."""
return netplan_available(target=target)
@staticmethod
@@ -167,9 +164,11 @@ class NetplanActivator(NetworkActivator):
Return True is successful, otherwise return False
"""
- LOG.debug("Calling 'netplan apply' rather than "
- "altering individual interfaces")
- return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all')
+ LOG.debug(
+ "Calling 'netplan apply' rather than "
+ "altering individual interfaces"
+ )
+ return _alter_interface(NetplanActivator.NETPLAN_CMD, "all")
@staticmethod
def bring_up_interfaces(device_names: Iterable[str]) -> bool:
@@ -177,9 +176,11 @@ class NetplanActivator(NetworkActivator):
Return True is successful, otherwise return False
"""
- LOG.debug("Calling 'netplan apply' rather than "
- "altering individual interfaces")
- return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all')
+ LOG.debug(
+ "Calling 'netplan apply' rather than "
+ "altering individual interfaces"
+ )
+ return _alter_interface(NetplanActivator.NETPLAN_CMD, "all")
@staticmethod
def bring_up_all_interfaces(network_state: NetworkState) -> bool:
@@ -187,7 +188,7 @@ class NetplanActivator(NetworkActivator):
Return True is successful, otherwise return False
"""
- return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all')
+ return _alter_interface(NetplanActivator.NETPLAN_CMD, "all")
@staticmethod
def bring_down_interface(device_name: str) -> bool:
@@ -195,9 +196,11 @@ class NetplanActivator(NetworkActivator):
Return True is successful, otherwise return False
"""
- LOG.debug("Calling 'netplan apply' rather than "
- "altering individual interfaces")
- return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all')
+ LOG.debug(
+ "Calling 'netplan apply' rather than "
+ "altering individual interfaces"
+ )
+ return _alter_interface(NetplanActivator.NETPLAN_CMD, "all")
@staticmethod
def bring_down_interfaces(device_names: Iterable[str]) -> bool:
@@ -205,9 +208,11 @@ class NetplanActivator(NetworkActivator):
Return True is successful, otherwise return False
"""
- LOG.debug("Calling 'netplan apply' rather than "
- "altering individual interfaces")
- return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all')
+ LOG.debug(
+ "Calling 'netplan apply' rather than "
+ "altering individual interfaces"
+ )
+ return _alter_interface(NetplanActivator.NETPLAN_CMD, "all")
@staticmethod
def bring_down_all_interfaces(network_state: NetworkState) -> bool:
@@ -215,7 +220,7 @@ class NetplanActivator(NetworkActivator):
Return True is successful, otherwise return False
"""
- return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all')
+ return _alter_interface(NetplanActivator.NETPLAN_CMD, "all")
class NetworkdActivator(NetworkActivator):
@@ -226,20 +231,20 @@ class NetworkdActivator(NetworkActivator):
@staticmethod
def bring_up_interface(device_name: str) -> bool:
- """ Return True is successful, otherwise return False """
- cmd = ['ip', 'link', 'set', 'up', device_name]
+ """Return True is successful, otherwise return False"""
+ cmd = ["ip", "link", "set", "up", device_name]
return _alter_interface(cmd, device_name)
@staticmethod
def bring_up_all_interfaces(network_state: NetworkState) -> bool:
- """ Return True is successful, otherwise return False """
- cmd = ['systemctl', 'restart', 'systemd-networkd', 'systemd-resolved']
- return _alter_interface(cmd, 'all')
+ """Return True is successful, otherwise return False"""
+ cmd = ["systemctl", "restart", "systemd-networkd", "systemd-resolved"]
+ return _alter_interface(cmd, "all")
@staticmethod
def bring_down_interface(device_name: str) -> bool:
- """ Return True is successful, otherwise return False """
- cmd = ['ip', 'link', 'set', 'down', device_name]
+ """Return True is successful, otherwise return False"""
+ cmd = ["ip", "link", "set", "down", device_name]
return _alter_interface(cmd, device_name)
@@ -262,7 +267,8 @@ def search_activator(
unknown = [i for i in priority if i not in DEFAULT_PRIORITY]
if unknown:
raise ValueError(
- "Unknown activators provided in priority list: %s" % unknown)
+ "Unknown activators provided in priority list: %s" % unknown
+ )
return [activator for activator in priority if activator.available(target)]
@@ -277,7 +283,8 @@ def select_activator(priority=None, target=None) -> Type[NetworkActivator]:
tmsg = " in target=%s" % target
raise NoActivatorException(
"No available network activators found%s. Searched "
- "through list: %s" % (tmsg, priority))
+ "through list: %s" % (tmsg, priority)
+ )
selected = found[0]
- LOG.debug('Using selected activator: %s', selected)
+ LOG.debug("Using selected activator: %s", selected)
return selected
diff --git a/cloudinit/net/bsd.py b/cloudinit/net/bsd.py
index 916cea32..dc322582 100644
--- a/cloudinit/net/bsd.py
+++ b/cloudinit/net/bsd.py
@@ -3,11 +3,9 @@
import re
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit import util
-from cloudinit import subp
-from cloudinit.distros.parsers.resolv_conf import ResolvConf
+from cloudinit import net, subp, util
from cloudinit.distros import bsd_utils
+from cloudinit.distros.parsers.resolv_conf import ResolvConf
from . import renderer
@@ -15,8 +13,8 @@ LOG = logging.getLogger(__name__)
class BSDRenderer(renderer.Renderer):
- resolv_conf_fn = 'etc/resolv.conf'
- rc_conf_fn = 'etc/rc.conf'
+ resolv_conf_fn = "etc/resolv.conf"
+ rc_conf_fn = "etc/rc.conf"
def get_rc_config_value(self, key):
fn = subp.target_path(self.target, self.rc_conf_fn)
@@ -31,52 +29,59 @@ class BSDRenderer(renderer.Renderer):
config = {}
self.target = None
self.interface_configurations = {}
- self._postcmds = config.get('postcmds', True)
+ self._postcmds = config.get("postcmds", True)
def _ifconfig_entries(self, settings):
ifname_by_mac = net.get_interfaces_by_mac()
for interface in settings.iter_interfaces():
device_name = interface.get("name")
device_mac = interface.get("mac_address")
- if device_name and re.match(r'^lo\d+$', device_name):
+ if device_name and re.match(r"^lo\d+$", device_name):
continue
if device_mac not in ifname_by_mac:
- LOG.info('Cannot find any device with MAC %s', device_mac)
+ LOG.info("Cannot find any device with MAC %s", device_mac)
elif device_mac and device_name:
cur_name = ifname_by_mac[device_mac]
if cur_name != device_name:
- LOG.info('netif service will rename interface %s to %s',
- cur_name, device_name)
+ LOG.info(
+ "netif service will rename interface %s to %s",
+ cur_name,
+ device_name,
+ )
try:
self.rename_interface(cur_name, device_name)
except NotImplementedError:
- LOG.error((
- 'Interface renaming is '
- 'not supported on this OS'))
+ LOG.error(
+ "Interface renaming is not supported on this OS"
+ )
device_name = cur_name
else:
device_name = ifname_by_mac[device_mac]
- LOG.info('Configuring interface %s', device_name)
+ LOG.info("Configuring interface %s", device_name)
- self.interface_configurations[device_name] = 'DHCP'
+ self.interface_configurations[device_name] = "DHCP"
for subnet in interface.get("subnets", []):
- if subnet.get('type') == 'static':
- if not subnet.get('netmask'):
+ if subnet.get("type") == "static":
+ if not subnet.get("netmask"):
LOG.debug(
- 'Skipping IP %s, because there is no netmask',
- subnet.get('address')
+ "Skipping IP %s, because there is no netmask",
+ subnet.get("address"),
)
continue
- LOG.debug('Configuring dev %s with %s / %s', device_name,
- subnet.get('address'), subnet.get('netmask'))
+ LOG.debug(
+ "Configuring dev %s with %s / %s",
+ device_name,
+ subnet.get("address"),
+ subnet.get("netmask"),
+ )
self.interface_configurations[device_name] = {
- 'address': subnet.get('address'),
- 'netmask': subnet.get('netmask'),
- 'mtu': subnet.get('mtu') or interface.get('mtu'),
+ "address": subnet.get("address"),
+ "netmask": subnet.get("netmask"),
+ "mtu": subnet.get("mtu") or interface.get("mtu"),
}
def _route_entries(self, settings):
@@ -84,22 +89,25 @@ class BSDRenderer(renderer.Renderer):
for interface in settings.iter_interfaces():
subnets = interface.get("subnets", [])
for subnet in subnets:
- if subnet.get('type') != 'static':
+ if subnet.get("type") != "static":
continue
- gateway = subnet.get('gateway')
- if gateway and len(gateway.split('.')) == 4:
- routes.append({
- 'network': '0.0.0.0',
- 'netmask': '0.0.0.0',
- 'gateway': gateway})
- routes += subnet.get('routes', [])
+ gateway = subnet.get("gateway")
+ if gateway and len(gateway.split(".")) == 4:
+ routes.append(
+ {
+ "network": "0.0.0.0",
+ "netmask": "0.0.0.0",
+ "gateway": gateway,
+ }
+ )
+ routes += subnet.get("routes", [])
for route in routes:
- network = route.get('network')
+ network = route.get("network")
if not network:
- LOG.debug('Skipping a bad route entry')
+ LOG.debug("Skipping a bad route entry")
continue
- netmask = route.get('netmask')
- gateway = route.get('gateway')
+ netmask = route.get("netmask")
+ gateway = route.get("gateway")
self.set_route(network, netmask, gateway)
def _resolve_conf(self, settings):
@@ -107,20 +115,26 @@ class BSDRenderer(renderer.Renderer):
searchdomains = settings.dns_searchdomains
for interface in settings.iter_interfaces():
for subnet in interface.get("subnets", []):
- if 'dns_nameservers' in subnet:
- nameservers.extend(subnet['dns_nameservers'])
- if 'dns_search' in subnet:
- searchdomains.extend(subnet['dns_search'])
+ if "dns_nameservers" in subnet:
+ nameservers.extend(subnet["dns_nameservers"])
+ if "dns_search" in subnet:
+ searchdomains.extend(subnet["dns_search"])
# Try to read the /etc/resolv.conf or just start from scratch if that
# fails.
try:
- resolvconf = ResolvConf(util.load_file(subp.target_path(
- self.target, self.resolv_conf_fn)))
+ resolvconf = ResolvConf(
+ util.load_file(
+ subp.target_path(self.target, self.resolv_conf_fn)
+ )
+ )
resolvconf.parse()
except IOError:
- util.logexc(LOG, "Failed to parse %s, use new empty file",
- subp.target_path(self.target, self.resolv_conf_fn))
- resolvconf = ResolvConf('')
+ util.logexc(
+ LOG,
+ "Failed to parse %s, use new empty file",
+ subp.target_path(self.target, self.resolv_conf_fn),
+ )
+ resolvconf = ResolvConf("")
resolvconf.parse()
# Add some nameservers
@@ -138,7 +152,9 @@ class BSDRenderer(renderer.Renderer):
util.logexc(LOG, "Failed to add search domain %s", domain)
util.write_file(
subp.target_path(self.target, self.resolv_conf_fn),
- str(resolvconf), 0o644)
+ str(resolvconf),
+ 0o644,
+ )
def render_network_state(self, network_state, templates=None, target=None):
if target:
@@ -152,7 +168,7 @@ class BSDRenderer(renderer.Renderer):
def dhcp_interfaces(self):
ic = self.interface_configurations.items
- return [k for k, v in ic() if v == 'DHCP']
+ return [k for k, v in ic() if v == "DHCP"]
def start_services(self, run=False):
raise NotImplementedError()
diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py
index 7cdd428d..eab86d9f 100755
--- a/cloudinit/net/cmdline.py
+++ b/cloudinit/net/cmdline.py
@@ -16,8 +16,7 @@ import shlex
from cloudinit import util
-from . import get_devicelist
-from . import read_sys_net_safe
+from . import get_devicelist, read_sys_net_safe
_OPEN_ISCSI_INTERFACE_FILE = "/run/initramfs/open-iscsi.interface"
@@ -58,7 +57,7 @@ class KlibcNetworkConfigSource(InitramfsNetworkConfigSource):
if self._mac_addrs is None:
self._mac_addrs = {}
for k in get_devicelist():
- mac_addr = read_sys_net_safe(k, 'address')
+ mac_addr = read_sys_net_safe(k, "address")
if mac_addr:
self._mac_addrs[k] = mac_addr
@@ -74,7 +73,7 @@ class KlibcNetworkConfigSource(InitramfsNetworkConfigSource):
"""
if self._files:
for item in shlex.split(self._cmdline):
- if item.startswith('ip=') or item.startswith('ip6='):
+ if item.startswith("ip=") or item.startswith("ip6="):
return True
if os.path.exists(_OPEN_ISCSI_INTERFACE_FILE):
# iBft can configure networking without ip=
@@ -83,7 +82,8 @@ class KlibcNetworkConfigSource(InitramfsNetworkConfigSource):
def render_config(self) -> dict:
return config_from_klibc_net_cfg(
- files=self._files, mac_addrs=self._mac_addrs,
+ files=self._files,
+ mac_addrs=self._mac_addrs,
)
@@ -113,78 +113,78 @@ def _klibc_to_config_entry(content, mac_addrs=None):
data = util.load_shell_content(content)
try:
- name = data['DEVICE'] if 'DEVICE' in data else data['DEVICE6']
+ name = data["DEVICE"] if "DEVICE" in data else data["DEVICE6"]
except KeyError as e:
raise ValueError("no 'DEVICE' or 'DEVICE6' entry in data") from e
# ipconfig on precise does not write PROTO
# IPv6 config gives us IPV6PROTO, not PROTO.
- proto = data.get('PROTO', data.get('IPV6PROTO'))
+ proto = data.get("PROTO", data.get("IPV6PROTO"))
if not proto:
- if data.get('filename'):
- proto = 'dhcp'
+ if data.get("filename"):
+ proto = "dhcp"
else:
- proto = 'none'
+ proto = "none"
- if proto not in ('none', 'dhcp', 'dhcp6'):
+ if proto not in ("none", "dhcp", "dhcp6"):
raise ValueError("Unexpected value for PROTO: %s" % proto)
iface = {
- 'type': 'physical',
- 'name': name,
- 'subnets': [],
+ "type": "physical",
+ "name": name,
+ "subnets": [],
}
if name in mac_addrs:
- iface['mac_address'] = mac_addrs[name]
+ iface["mac_address"] = mac_addrs[name]
# Handle both IPv4 and IPv6 values
- for pre in ('IPV4', 'IPV6'):
+ for pre in ("IPV4", "IPV6"):
# if no IPV4ADDR or IPV6ADDR, then go on.
if pre + "ADDR" not in data:
continue
# PROTO for ipv4, IPV6PROTO for ipv6
- cur_proto = data.get(pre + 'PROTO', proto)
+ cur_proto = data.get(pre + "PROTO", proto)
# ipconfig's 'none' is called 'static'
- if cur_proto == 'none':
- cur_proto = 'static'
- subnet = {'type': cur_proto, 'control': 'manual'}
+ if cur_proto == "none":
+ cur_proto = "static"
+ subnet = {"type": cur_proto, "control": "manual"}
# only populate address for static types. While the rendered config
# may have an address for dhcp, that is not really expected.
- if cur_proto == 'static':
- subnet['address'] = data[pre + 'ADDR']
+ if cur_proto == "static":
+ subnet["address"] = data[pre + "ADDR"]
# these fields go right on the subnet
- for key in ('NETMASK', 'BROADCAST', 'GATEWAY'):
+ for key in ("NETMASK", "BROADCAST", "GATEWAY"):
if pre + key in data:
subnet[key.lower()] = data[pre + key]
dns = []
# handle IPV4DNS0 or IPV6DNS0
- for nskey in ('DNS0', 'DNS1'):
+ for nskey in ("DNS0", "DNS1"):
ns = data.get(pre + nskey)
# verify it has something other than 0.0.0.0 (or ipv6)
if ns and len(ns.strip(":.0")):
dns.append(data[pre + nskey])
if dns:
- subnet['dns_nameservers'] = dns
+ subnet["dns_nameservers"] = dns
# add search to both ipv4 and ipv6, as it has no namespace
- search = data.get('DOMAINSEARCH')
+ search = data.get("DOMAINSEARCH")
if search:
- if ',' in search:
- subnet['dns_search'] = search.split(",")
+ if "," in search:
+ subnet["dns_search"] = search.split(",")
else:
- subnet['dns_search'] = search.split()
+ subnet["dns_search"] = search.split()
- iface['subnets'].append(subnet)
+ iface["subnets"].append(subnet)
return name, iface
def _get_klibc_net_cfg_files():
- return glob.glob('/run/net-*.conf') + glob.glob('/run/net6-*.conf')
+ return glob.glob("/run/net-*.conf") + glob.glob("/run/net6-*.conf")
def config_from_klibc_net_cfg(files=None, mac_addrs=None):
@@ -194,24 +194,28 @@ def config_from_klibc_net_cfg(files=None, mac_addrs=None):
entries = []
names = {}
for cfg_file in files:
- name, entry = _klibc_to_config_entry(util.load_file(cfg_file),
- mac_addrs=mac_addrs)
+ name, entry = _klibc_to_config_entry(
+ util.load_file(cfg_file), mac_addrs=mac_addrs
+ )
if name in names:
- prev = names[name]['entry']
- if prev.get('mac_address') != entry.get('mac_address'):
+ prev = names[name]["entry"]
+ if prev.get("mac_address") != entry.get("mac_address"):
raise ValueError(
"device '{name}' was defined multiple times ({files})"
" but had differing mac addresses: {old} -> {new}.".format(
- name=name, files=' '.join(names[name]['files']),
- old=prev.get('mac_address'),
- new=entry.get('mac_address')))
- prev['subnets'].extend(entry['subnets'])
- names[name]['files'].append(cfg_file)
+ name=name,
+ files=" ".join(names[name]["files"]),
+ old=prev.get("mac_address"),
+ new=entry.get("mac_address"),
+ )
+ )
+ prev["subnets"].extend(entry["subnets"])
+ names[name]["files"].append(cfg_file)
else:
- names[name] = {'files': [cfg_file], 'entry': entry}
+ names[name] = {"files": [cfg_file], "entry": entry}
entries.append(entry)
- return {'config': entries, 'version': 1}
+ return {"config": entries, "version": 1}
def read_initramfs_config():
@@ -257,8 +261,10 @@ def _b64dgz(data):
except (TypeError, ValueError):
logging.error(
"Expected base64 encoded kernel commandline parameter"
- " network-config. Ignoring network-config=%s.", data)
- return ''
+ " network-config. Ignoring network-config=%s.",
+ data,
+ )
+ return ""
return _decomp_gzip(blob)
@@ -267,7 +273,7 @@ def read_kernel_cmdline_config(cmdline=None):
if cmdline is None:
cmdline = util.get_cmdline()
- if 'network-config=' in cmdline:
+ if "network-config=" in cmdline:
data64 = None
for tok in cmdline.split():
if tok.startswith("network-config="):
@@ -279,4 +285,5 @@ def read_kernel_cmdline_config(cmdline=None):
return None
+
# vi: ts=4 expandtab
diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py
index 3f4b0418..f9af18cf 100644
--- a/cloudinit/net/dhcp.py
+++ b/cloudinit/net/dhcp.py
@@ -4,26 +4,28 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from typing import Dict, Any
-import configobj
import logging
import os
import re
import signal
import time
from io import StringIO
+from typing import Any, Dict
+
+import configobj
+from cloudinit import subp, temp_utils, util
from cloudinit.net import (
- EphemeralIPv4Network, find_fallback_nic, get_devicelist,
- has_url_connectivity)
+ EphemeralIPv4Network,
+ find_fallback_nic,
+ get_devicelist,
+ has_url_connectivity,
+)
from cloudinit.net.network_state import mask_and_ipv4_to_bcast_addr as bcip
-from cloudinit import temp_utils
-from cloudinit import subp
-from cloudinit import util
LOG = logging.getLogger(__name__)
-NETWORKD_LEASES_DIR = '/run/systemd/netif/leases'
+NETWORKD_LEASES_DIR = "/run/systemd/netif/leases"
class InvalidDHCPLeaseFileError(Exception):
@@ -43,7 +45,7 @@ class EphemeralDHCPv4(object):
self,
iface=None,
connectivity_url_data: Dict[str, Any] = None,
- dhcp_log_func=None
+ dhcp_log_func=None,
):
self.iface = iface
self._ephipv4 = None
@@ -57,8 +59,10 @@ class EphemeralDHCPv4(object):
if self.connectivity_url_data:
if has_url_connectivity(self.connectivity_url_data):
LOG.debug(
- 'Skip ephemeral DHCP setup, instance has connectivity'
- ' to %s', self.connectivity_url_data)
+ "Skip ephemeral DHCP setup, instance has connectivity"
+ " to %s",
+ self.connectivity_url_data,
+ )
return
return self.obtain_lease()
@@ -87,31 +91,39 @@ class EphemeralDHCPv4(object):
return self.lease
try:
leases = maybe_perform_dhcp_discovery(
- self.iface, self.dhcp_log_func)
+ self.iface, self.dhcp_log_func
+ )
except InvalidDHCPLeaseFileError as e:
raise NoDHCPLeaseError() from e
if not leases:
raise NoDHCPLeaseError()
self.lease = leases[-1]
- LOG.debug("Received dhcp lease on %s for %s/%s",
- self.lease['interface'], self.lease['fixed-address'],
- self.lease['subnet-mask'])
- nmap = {'interface': 'interface', 'ip': 'fixed-address',
- 'prefix_or_mask': 'subnet-mask',
- 'broadcast': 'broadcast-address',
- 'static_routes': [
- 'rfc3442-classless-static-routes',
- 'classless-static-routes'
- ],
- 'router': 'routers'}
+ LOG.debug(
+ "Received dhcp lease on %s for %s/%s",
+ self.lease["interface"],
+ self.lease["fixed-address"],
+ self.lease["subnet-mask"],
+ )
+ nmap = {
+ "interface": "interface",
+ "ip": "fixed-address",
+ "prefix_or_mask": "subnet-mask",
+ "broadcast": "broadcast-address",
+ "static_routes": [
+ "rfc3442-classless-static-routes",
+ "classless-static-routes",
+ ],
+ "router": "routers",
+ }
kwargs = self.extract_dhcp_options_mapping(nmap)
- if not kwargs['broadcast']:
- kwargs['broadcast'] = bcip(kwargs['prefix_or_mask'], kwargs['ip'])
- if kwargs['static_routes']:
- kwargs['static_routes'] = (
- parse_static_routes(kwargs['static_routes']))
+ if not kwargs["broadcast"]:
+ kwargs["broadcast"] = bcip(kwargs["prefix_or_mask"], kwargs["ip"])
+ if kwargs["static_routes"]:
+ kwargs["static_routes"] = parse_static_routes(
+ kwargs["static_routes"]
+ )
if self.connectivity_url_data:
- kwargs['connectivity_url_data'] = self.connectivity_url_data
+ kwargs["connectivity_url_data"] = self.connectivity_url_data
ephipv4 = EphemeralIPv4Network(**kwargs)
ephipv4.__enter__()
self._ephipv4 = ephipv4
@@ -122,16 +134,15 @@ class EphemeralDHCPv4(object):
for internal_reference, lease_option_names in nmap.items():
if isinstance(lease_option_names, list):
self.get_first_option_value(
- internal_reference,
- lease_option_names,
- result
+ internal_reference, lease_option_names, result
)
else:
result[internal_reference] = self.lease.get(lease_option_names)
return result
- def get_first_option_value(self, internal_mapping,
- lease_option_names, result):
+ def get_first_option_value(
+ self, internal_mapping, lease_option_names, result
+ ):
for different_names in lease_option_names:
if not result.get(internal_mapping):
result[internal_mapping] = self.lease.get(different_names)
@@ -153,19 +164,20 @@ def maybe_perform_dhcp_discovery(nic=None, dhcp_log_func=None):
if nic is None:
nic = find_fallback_nic()
if nic is None:
- LOG.debug('Skip dhcp_discovery: Unable to find fallback nic.')
+ LOG.debug("Skip dhcp_discovery: Unable to find fallback nic.")
return []
elif nic not in get_devicelist():
LOG.debug(
- 'Skip dhcp_discovery: nic %s not found in get_devicelist.', nic)
+ "Skip dhcp_discovery: nic %s not found in get_devicelist.", nic
+ )
return []
- dhclient_path = subp.which('dhclient')
+ dhclient_path = subp.which("dhclient")
if not dhclient_path:
- LOG.debug('Skip dhclient configuration: No dhclient command found.')
+ LOG.debug("Skip dhclient configuration: No dhclient command found.")
return []
- with temp_utils.tempdir(rmtree_ignore_errors=True,
- prefix='cloud-init-dhcp-',
- needs_exe=True) as tdir:
+ with temp_utils.tempdir(
+ rmtree_ignore_errors=True, prefix="cloud-init-dhcp-", needs_exe=True
+ ) as tdir:
# Use /var/tmp because /run/cloud-init/tmp is mounted noexec
return dhcp_discovery(dhclient_path, nic, tdir, dhcp_log_func)
@@ -184,20 +196,23 @@ def parse_dhcp_lease_file(lease_file):
lease_content = util.load_file(lease_file)
if len(lease_content) == 0:
raise InvalidDHCPLeaseFileError(
- 'Cannot parse empty dhcp lease file {0}'.format(lease_file))
+ "Cannot parse empty dhcp lease file {0}".format(lease_file)
+ )
for lease in lease_regex.findall(lease_content):
lease_options = []
- for line in lease.split(';'):
+ for line in lease.split(";"):
# Strip newlines, double-quotes and option prefix
- line = line.strip().replace('"', '').replace('option ', '')
+ line = line.strip().replace('"', "").replace("option ", "")
if not line:
continue
- lease_options.append(line.split(' ', 1))
+ lease_options.append(line.split(" ", 1))
dhcp_leases.append(dict(lease_options))
if not dhcp_leases:
raise InvalidDHCPLeaseFileError(
- 'Cannot parse dhcp lease file {0}. No leases found'.format(
- lease_file))
+ "Cannot parse dhcp lease file {0}. No leases found".format(
+ lease_file
+ )
+ )
return dhcp_leases
@@ -214,17 +229,17 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir, dhcp_log_func=None):
@return: A list of dicts of representing the dhcp leases parsed from the
dhcp.leases file or empty list.
"""
- LOG.debug('Performing a dhcp discovery on %s', interface)
+ LOG.debug("Performing a dhcp discovery on %s", interface)
# XXX We copy dhclient out of /sbin/dhclient to avoid dealing with strict
# app armor profiles which disallow running dhclient -sf <our-script-file>.
# We want to avoid running /sbin/dhclient-script because of side-effects in
# /etc/resolv.conf any any other vendor specific scripts in
# /etc/dhcp/dhclient*hooks.d.
- sandbox_dhclient_cmd = os.path.join(cleandir, 'dhclient')
+ sandbox_dhclient_cmd = os.path.join(cleandir, "dhclient")
util.copy(dhclient_cmd_path, sandbox_dhclient_cmd)
- pid_file = os.path.join(cleandir, 'dhclient.pid')
- lease_file = os.path.join(cleandir, 'dhcp.leases')
+ pid_file = os.path.join(cleandir, "dhclient.pid")
+ lease_file = os.path.join(cleandir, "dhcp.leases")
# In some cases files in /var/tmp may not be executable, launching dhclient
# from there will certainly raise 'Permission denied' error. Try launching
@@ -236,9 +251,19 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir, dhcp_log_func=None):
# Generally dhclient relies on dhclient-script PREINIT action to bring the
# link up before attempting discovery. Since we are using -sf /bin/true,
# we need to do that "link up" ourselves first.
- subp.subp(['ip', 'link', 'set', 'dev', interface, 'up'], capture=True)
- cmd = [sandbox_dhclient_cmd, '-1', '-v', '-lf', lease_file,
- '-pf', pid_file, interface, '-sf', '/bin/true']
+ subp.subp(["ip", "link", "set", "dev", interface, "up"], capture=True)
+ cmd = [
+ sandbox_dhclient_cmd,
+ "-1",
+ "-v",
+ "-lf",
+ lease_file,
+ "-pf",
+ pid_file,
+ interface,
+ "-sf",
+ "/bin/true",
+ ]
out, err = subp.subp(cmd, capture=True)
# Wait for pid file and lease file to appear, and for the process
@@ -249,13 +274,16 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir, dhcp_log_func=None):
# kill the correct process, thus freeing cleandir to be deleted back
# up the callstack.
missing = util.wait_for_files(
- [pid_file, lease_file], maxwait=5, naplen=0.01)
+ [pid_file, lease_file], maxwait=5, naplen=0.01
+ )
if missing:
- LOG.warning("dhclient did not produce expected files: %s",
- ', '.join(os.path.basename(f) for f in missing))
+ LOG.warning(
+ "dhclient did not produce expected files: %s",
+ ", ".join(os.path.basename(f) for f in missing),
+ )
return []
- ppid = 'unknown'
+ ppid = "unknown"
daemonized = False
for _ in range(0, 1000):
pid_content = util.load_file(pid_file).strip()
@@ -266,7 +294,7 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir, dhcp_log_func=None):
else:
ppid = util.get_proc_ppid(pid)
if ppid == 1:
- LOG.debug('killing dhclient with pid=%s', pid)
+ LOG.debug("killing dhclient with pid=%s", pid)
os.kill(pid, signal.SIGKILL)
daemonized = True
break
@@ -274,8 +302,11 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir, dhcp_log_func=None):
if not daemonized:
LOG.error(
- 'dhclient(pid=%s, parentpid=%s) failed to daemonize after %s '
- 'seconds', pid_content, ppid, 0.01 * 1000
+ "dhclient(pid=%s, parentpid=%s) failed to daemonize after %s "
+ "seconds",
+ pid_content,
+ ppid,
+ 0.01 * 1000,
)
if dhcp_log_func is not None:
dhcp_log_func(out, err)
@@ -307,7 +338,8 @@ def networkd_load_leases(leases_d=None):
return ret
for lfile in os.listdir(leases_d):
ret[lfile] = networkd_parse_lease(
- util.load_file(os.path.join(leases_d, lfile)))
+ util.load_file(os.path.join(leases_d, lfile))
+ )
return ret
@@ -322,7 +354,7 @@ def networkd_get_option_from_leases(keyname, leases_d=None):
def parse_static_routes(rfc3442):
- """ parse rfc3442 format and return a list containing tuple of strings.
+ """parse rfc3442 format and return a list containing tuple of strings.
The tuple is composed of the network_address (including net length) and
gateway for a parsed static route. It can parse two formats of rfc3442,
@@ -352,10 +384,12 @@ def parse_static_routes(rfc3442):
static_routes = []
def _trunc_error(cidr, required, remain):
- msg = ("RFC3442 string malformed. Current route has CIDR of %s "
- "and requires %s significant octets, but only %s remain. "
- "Verify DHCP rfc3442-classless-static-routes value: %s"
- % (cidr, required, remain, rfc3442))
+ msg = (
+ "RFC3442 string malformed. Current route has CIDR of %s "
+ "and requires %s significant octets, but only %s remain. "
+ "Verify DHCP rfc3442-classless-static-routes value: %s"
+ % (cidr, required, remain, rfc3442)
+ )
LOG.error(msg)
current_idx = 0
@@ -368,32 +402,32 @@ def parse_static_routes(rfc3442):
if len(tokens[idx:]) < req_toks:
_trunc_error(net_length, req_toks, len(tokens[idx:]))
return static_routes
- net_address = ".".join(tokens[idx+1:idx+5])
- gateway = ".".join(tokens[idx+5:idx+req_toks])
+ net_address = ".".join(tokens[idx + 1 : idx + 5])
+ gateway = ".".join(tokens[idx + 5 : idx + req_toks])
current_idx = idx + req_toks
elif net_length in range(17, 25):
req_toks = 8
if len(tokens[idx:]) < req_toks:
_trunc_error(net_length, req_toks, len(tokens[idx:]))
return static_routes
- net_address = ".".join(tokens[idx+1:idx+4] + ["0"])
- gateway = ".".join(tokens[idx+4:idx+req_toks])
+ net_address = ".".join(tokens[idx + 1 : idx + 4] + ["0"])
+ gateway = ".".join(tokens[idx + 4 : idx + req_toks])
current_idx = idx + req_toks
elif net_length in range(9, 17):
req_toks = 7
if len(tokens[idx:]) < req_toks:
_trunc_error(net_length, req_toks, len(tokens[idx:]))
return static_routes
- net_address = ".".join(tokens[idx+1:idx+3] + ["0", "0"])
- gateway = ".".join(tokens[idx+3:idx+req_toks])
+ net_address = ".".join(tokens[idx + 1 : idx + 3] + ["0", "0"])
+ gateway = ".".join(tokens[idx + 3 : idx + req_toks])
current_idx = idx + req_toks
elif net_length in range(1, 9):
req_toks = 6
if len(tokens[idx:]) < req_toks:
_trunc_error(net_length, req_toks, len(tokens[idx:]))
return static_routes
- net_address = ".".join(tokens[idx+1:idx+2] + ["0", "0", "0"])
- gateway = ".".join(tokens[idx+2:idx+req_toks])
+ net_address = ".".join(tokens[idx + 1 : idx + 2] + ["0", "0", "0"])
+ gateway = ".".join(tokens[idx + 2 : idx + req_toks])
current_idx = idx + req_toks
elif net_length == 0:
req_toks = 5
@@ -401,15 +435,19 @@ def parse_static_routes(rfc3442):
_trunc_error(net_length, req_toks, len(tokens[idx:]))
return static_routes
net_address = "0.0.0.0"
- gateway = ".".join(tokens[idx+1:idx+req_toks])
+ gateway = ".".join(tokens[idx + 1 : idx + req_toks])
current_idx = idx + req_toks
else:
- LOG.error('Parsed invalid net length "%s". Verify DHCP '
- 'rfc3442-classless-static-routes value.', net_length)
+ LOG.error(
+ 'Parsed invalid net length "%s". Verify DHCP '
+ "rfc3442-classless-static-routes value.",
+ net_length,
+ )
return static_routes
static_routes.append(("%s/%s" % (net_address, net_length), gateway))
return static_routes
+
# vi: ts=4 expandtab
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index a89e5ad2..99e3fbb0 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -5,32 +5,58 @@ import glob
import os
import re
-from . import ParserError
-
-from . import renderer
-from .network_state import subnet_is_ipv6
-
from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
+from . import ParserError, renderer
+from .network_state import subnet_is_ipv6
LOG = logging.getLogger(__name__)
NET_CONFIG_COMMANDS = [
- "pre-up", "up", "post-up", "down", "pre-down", "post-down",
+ "pre-up",
+ "up",
+ "post-up",
+ "down",
+ "pre-down",
+ "post-down",
]
NET_CONFIG_BRIDGE_OPTIONS = [
- "bridge_ageing", "bridge_bridgeprio", "bridge_fd", "bridge_gcinit",
- "bridge_hello", "bridge_maxage", "bridge_maxwait", "bridge_stp",
+ "bridge_ageing",
+ "bridge_bridgeprio",
+ "bridge_fd",
+ "bridge_gcinit",
+ "bridge_hello",
+ "bridge_maxage",
+ "bridge_maxwait",
+ "bridge_stp",
]
NET_CONFIG_OPTIONS = [
- "address", "netmask", "broadcast", "network", "metric", "gateway",
- "pointtopoint", "media", "mtu", "hostname", "leasehours", "leasetime",
- "vendor", "client", "bootfile", "server", "hwaddr", "provider", "frame",
- "netnum", "endpoint", "local", "ttl",
+ "address",
+ "netmask",
+ "broadcast",
+ "network",
+ "metric",
+ "gateway",
+ "pointtopoint",
+ "media",
+ "mtu",
+ "hostname",
+ "leasehours",
+ "leasetime",
+ "vendor",
+ "client",
+ "bootfile",
+ "server",
+ "hwaddr",
+ "provider",
+ "frame",
+ "netnum",
+ "endpoint",
+ "local",
+ "ttl",
]
@@ -38,27 +64,27 @@ NET_CONFIG_OPTIONS = [
def _iface_add_subnet(iface, subnet):
content = []
valid_map = [
- 'address',
- 'netmask',
- 'broadcast',
- 'metric',
- 'gateway',
- 'pointopoint',
- 'mtu',
- 'scope',
- 'dns_search',
- 'dns_nameservers',
+ "address",
+ "netmask",
+ "broadcast",
+ "metric",
+ "gateway",
+ "pointopoint",
+ "mtu",
+ "scope",
+ "dns_search",
+ "dns_nameservers",
]
for key, value in subnet.items():
- if key == 'netmask':
+ if key == "netmask":
continue
- if key == 'address':
- value = "%s/%s" % (subnet['address'], subnet['prefix'])
+ if key == "address":
+ value = "%s/%s" % (subnet["address"], subnet["prefix"])
if value and key in valid_map:
if type(value) == list:
value = " ".join(value)
- if '_' in key:
- key = key.replace('_', '-')
+ if "_" in key:
+ key = key.replace("_", "-")
content.append(" {0} {1}".format(key, value))
return sorted(content)
@@ -75,41 +101,44 @@ def _iface_add_attrs(iface, index, ipv4_subnet_mtu):
return []
content = []
ignore_map = [
- 'control',
- 'device_id',
- 'driver',
- 'index',
- 'inet',
- 'mode',
- 'name',
- 'subnets',
- 'type',
+ "control",
+ "device_id",
+ "driver",
+ "index",
+ "inet",
+ "mode",
+ "name",
+ "subnets",
+ "type",
]
# The following parameters require repetitive entries of the key for
# each of the values
multiline_keys = [
- 'bridge_pathcost',
- 'bridge_portprio',
- 'bridge_waitport',
+ "bridge_pathcost",
+ "bridge_portprio",
+ "bridge_waitport",
]
- renames = {'mac_address': 'hwaddress'}
- if iface['type'] not in ['bond', 'bridge', 'infiniband', 'vlan']:
- ignore_map.append('mac_address')
+ renames = {"mac_address": "hwaddress"}
+ if iface["type"] not in ["bond", "bridge", "infiniband", "vlan"]:
+ ignore_map.append("mac_address")
for key, value in iface.items():
# convert bool to string for eni
if type(value) == bool:
- value = 'on' if iface[key] else 'off'
+ value = "on" if iface[key] else "off"
if not value or key in ignore_map:
continue
- if key == 'mtu' and ipv4_subnet_mtu:
+ if key == "mtu" and ipv4_subnet_mtu:
if value != ipv4_subnet_mtu:
LOG.warning(
"Network config: ignoring %s device-level mtu:%s because"
" ipv4 subnet-level mtu:%s provided.",
- iface['name'], value, ipv4_subnet_mtu)
+ iface["name"],
+ value,
+ ipv4_subnet_mtu,
+ )
continue
if key in multiline_keys:
for v in value:
@@ -123,9 +152,9 @@ def _iface_add_attrs(iface, index, ipv4_subnet_mtu):
def _iface_start_entry(iface, index, render_hwaddress=False):
- fullname = iface['name']
+ fullname = iface["name"]
- control = iface['control']
+ control = iface["control"]
if control == "auto":
cverb = "auto"
elif control in ("hotplug",):
@@ -134,12 +163,13 @@ def _iface_start_entry(iface, index, render_hwaddress=False):
cverb = "# control-" + control
subst = iface.copy()
- subst.update({'fullname': fullname, 'cverb': cverb})
+ subst.update({"fullname": fullname, "cverb": cverb})
lines = [
"{cverb} {fullname}".format(**subst),
- "iface {fullname} {inet} {mode}".format(**subst)]
- if render_hwaddress and iface.get('mac_address'):
+ "iface {fullname} {inet} {mode}".format(**subst),
+ ]
+ if render_hwaddress and iface.get("mac_address"):
lines.append(" hwaddress {mac_address}".format(**subst))
return lines
@@ -159,9 +189,9 @@ def _parse_deb_config_data(ifaces, contents, src_dir, src_path):
currif = None
for line in contents.splitlines():
line = line.strip()
- if line.startswith('#'):
+ if line.startswith("#"):
continue
- split = line.split(' ')
+ split = line.split(" ")
option = split[0]
if option == "source-directory":
parsed_src_dir = split[1]
@@ -172,16 +202,18 @@ def _parse_deb_config_data(ifaces, contents, src_dir, src_path):
dir_contents = [
os.path.join(expanded_path, path)
for path in dir_contents
- if (os.path.isfile(os.path.join(expanded_path, path)) and
- re.match("^[a-zA-Z0-9_-]+$", path) is not None)
+ if (
+ os.path.isfile(os.path.join(expanded_path, path))
+ and re.match("^[a-zA-Z0-9_-]+$", path) is not None
+ )
]
for entry in dir_contents:
with open(entry, "r") as fp:
src_data = fp.read().strip()
abs_entry = os.path.abspath(entry)
_parse_deb_config_data(
- ifaces, src_data,
- os.path.dirname(abs_entry), abs_entry)
+ ifaces, src_data, os.path.dirname(abs_entry), abs_entry
+ )
elif option == "source":
new_src_path = split[1]
if not new_src_path.startswith("/"):
@@ -191,8 +223,8 @@ def _parse_deb_config_data(ifaces, contents, src_dir, src_path):
src_data = fp.read().strip()
abs_path = os.path.abspath(expanded_path)
_parse_deb_config_data(
- ifaces, src_data,
- os.path.dirname(abs_path), abs_path)
+ ifaces, src_data, os.path.dirname(abs_path), abs_path
+ )
elif option == "auto":
for iface in split[1:]:
if iface not in ifaces:
@@ -200,7 +232,7 @@ def _parse_deb_config_data(ifaces, contents, src_dir, src_path):
# Include the source path this interface was found in.
"_source_path": src_path
}
- ifaces[iface]['auto'] = True
+ ifaces[iface]["auto"] = True
elif option == "iface":
iface, family, method = split[1:4]
if iface not in ifaces:
@@ -208,71 +240,72 @@ def _parse_deb_config_data(ifaces, contents, src_dir, src_path):
# Include the source path this interface was found in.
"_source_path": src_path
}
- elif 'family' in ifaces[iface]:
+ elif "family" in ifaces[iface]:
raise ParserError(
"Interface %s can only be defined once. "
- "Re-defined in '%s'." % (iface, src_path))
- ifaces[iface]['family'] = family
- ifaces[iface]['method'] = method
+ "Re-defined in '%s'." % (iface, src_path)
+ )
+ ifaces[iface]["family"] = family
+ ifaces[iface]["method"] = method
currif = iface
elif option == "hwaddress":
if split[1] == "ether":
val = split[2]
else:
val = split[1]
- ifaces[currif]['hwaddress'] = val
+ ifaces[currif]["hwaddress"] = val
elif option in NET_CONFIG_OPTIONS:
ifaces[currif][option] = split[1]
elif option in NET_CONFIG_COMMANDS:
if option not in ifaces[currif]:
ifaces[currif][option] = []
- ifaces[currif][option].append(' '.join(split[1:]))
- elif option.startswith('dns-'):
- if 'dns' not in ifaces[currif]:
- ifaces[currif]['dns'] = {}
- if option == 'dns-search':
- ifaces[currif]['dns']['search'] = []
+ ifaces[currif][option].append(" ".join(split[1:]))
+ elif option.startswith("dns-"):
+ if "dns" not in ifaces[currif]:
+ ifaces[currif]["dns"] = {}
+ if option == "dns-search":
+ ifaces[currif]["dns"]["search"] = []
for domain in split[1:]:
- ifaces[currif]['dns']['search'].append(domain)
- elif option == 'dns-nameservers':
- ifaces[currif]['dns']['nameservers'] = []
+ ifaces[currif]["dns"]["search"].append(domain)
+ elif option == "dns-nameservers":
+ ifaces[currif]["dns"]["nameservers"] = []
for server in split[1:]:
- ifaces[currif]['dns']['nameservers'].append(server)
- elif option.startswith('bridge_'):
- if 'bridge' not in ifaces[currif]:
- ifaces[currif]['bridge'] = {}
+ ifaces[currif]["dns"]["nameservers"].append(server)
+ elif option.startswith("bridge_"):
+ if "bridge" not in ifaces[currif]:
+ ifaces[currif]["bridge"] = {}
if option in NET_CONFIG_BRIDGE_OPTIONS:
- bridge_option = option.replace('bridge_', '', 1)
- ifaces[currif]['bridge'][bridge_option] = split[1]
+ bridge_option = option.replace("bridge_", "", 1)
+ ifaces[currif]["bridge"][bridge_option] = split[1]
elif option == "bridge_ports":
- ifaces[currif]['bridge']['ports'] = []
+ ifaces[currif]["bridge"]["ports"] = []
for iface in split[1:]:
- ifaces[currif]['bridge']['ports'].append(iface)
+ ifaces[currif]["bridge"]["ports"].append(iface)
elif option == "bridge_hw":
# doc is confusing and thus some may put literal 'MAC'
# bridge_hw MAC <address>
# but correct is:
# bridge_hw <address>
if split[1].lower() == "mac":
- ifaces[currif]['bridge']['mac'] = split[2]
+ ifaces[currif]["bridge"]["mac"] = split[2]
else:
- ifaces[currif]['bridge']['mac'] = split[1]
+ ifaces[currif]["bridge"]["mac"] = split[1]
elif option == "bridge_pathcost":
- if 'pathcost' not in ifaces[currif]['bridge']:
- ifaces[currif]['bridge']['pathcost'] = {}
- ifaces[currif]['bridge']['pathcost'][split[1]] = split[2]
+ if "pathcost" not in ifaces[currif]["bridge"]:
+ ifaces[currif]["bridge"]["pathcost"] = {}
+ ifaces[currif]["bridge"]["pathcost"][split[1]] = split[2]
elif option == "bridge_portprio":
- if 'portprio' not in ifaces[currif]['bridge']:
- ifaces[currif]['bridge']['portprio'] = {}
- ifaces[currif]['bridge']['portprio'][split[1]] = split[2]
- elif option.startswith('bond-'):
- if 'bond' not in ifaces[currif]:
- ifaces[currif]['bond'] = {}
- bond_option = option.replace('bond-', '', 1)
- ifaces[currif]['bond'][bond_option] = split[1]
+ if "portprio" not in ifaces[currif]["bridge"]:
+ ifaces[currif]["bridge"]["portprio"] = {}
+ ifaces[currif]["bridge"]["portprio"][split[1]] = split[2]
+ elif option.startswith("bond-"):
+ if "bond" not in ifaces[currif]:
+ ifaces[currif]["bond"] = {}
+ bond_option = option.replace("bond-", "", 1)
+ ifaces[currif]["bond"][bond_option] = split[1]
for iface in ifaces.keys():
- if 'auto' not in ifaces[iface]:
- ifaces[iface]['auto'] = False
+ if "auto" not in ifaces[iface]:
+ ifaces[iface]["auto"] = False
def parse_deb_config(path):
@@ -282,8 +315,8 @@ def parse_deb_config(path):
contents = fp.read().strip()
abs_path = os.path.abspath(path)
_parse_deb_config_data(
- ifaces, contents,
- os.path.dirname(abs_path), abs_path)
+ ifaces, contents, os.path.dirname(abs_path), abs_path
+ )
return ifaces
@@ -308,32 +341,31 @@ def _ifaces_to_net_config_data(ifaces):
dtype = "loopback"
else:
dtype = "physical"
- devs[devname] = {'type': dtype, 'name': devname, 'subnets': []}
+ devs[devname] = {"type": dtype, "name": devname, "subnets": []}
# this isnt strictly correct, but some might specify
# hwaddress on a nic for matching / declaring name.
- if 'hwaddress' in data:
- devs[devname]['mac_address'] = data['hwaddress']
- subnet = {'_orig_eni_name': name, 'type': data['method']}
- if data.get('auto'):
- subnet['control'] = 'auto'
+ if "hwaddress" in data:
+ devs[devname]["mac_address"] = data["hwaddress"]
+ subnet = {"_orig_eni_name": name, "type": data["method"]}
+ if data.get("auto"):
+ subnet["control"] = "auto"
else:
- subnet['control'] = 'manual'
+ subnet["control"] = "manual"
- if data.get('method') == 'static':
- subnet['address'] = data['address']
+ if data.get("method") == "static":
+ subnet["address"] = data["address"]
- for copy_key in ('netmask', 'gateway', 'broadcast'):
+ for copy_key in ("netmask", "gateway", "broadcast"):
if copy_key in data:
subnet[copy_key] = data[copy_key]
- if 'dns' in data:
- for n in ('nameservers', 'search'):
- if n in data['dns'] and data['dns'][n]:
- subnet['dns_' + n] = data['dns'][n]
- devs[devname]['subnets'].append(subnet)
+ if "dns" in data:
+ for n in ("nameservers", "search"):
+ if n in data["dns"] and data["dns"][n]:
+ subnet["dns_" + n] = data["dns"][n]
+ devs[devname]["subnets"].append(subnet)
- return {'version': 1,
- 'config': [devs[d] for d in sorted(devs)]}
+ return {"version": 1, "config": [devs[d] for d in sorted(devs)]}
class Renderer(renderer.Renderer):
@@ -342,10 +374,11 @@ class Renderer(renderer.Renderer):
def __init__(self, config=None):
if not config:
config = {}
- self.eni_path = config.get('eni_path', 'etc/network/interfaces')
- self.eni_header = config.get('eni_header', None)
+ self.eni_path = config.get("eni_path", "etc/network/interfaces")
+ self.eni_header = config.get("eni_header", None)
self.netrules_path = config.get(
- 'netrules_path', 'etc/udev/rules.d/70-persistent-net.rules')
+ "netrules_path", "etc/udev/rules.d/70-persistent-net.rules"
+ )
def _render_route(self, route, indent=""):
"""When rendering routes for an iface, in some cases applying a route
@@ -367,153 +400,166 @@ class Renderer(renderer.Renderer):
down = indent + "pre-down route del"
or_true = " || true"
mapping = {
- 'gateway': 'gw',
- 'metric': 'metric',
+ "gateway": "gw",
+ "metric": "metric",
}
- default_gw = ''
- if route['network'] == '0.0.0.0' and route['netmask'] == '0.0.0.0':
- default_gw = ' default'
- elif route['network'] == '::' and route['prefix'] == 0:
- default_gw = ' -A inet6 default'
+ default_gw = ""
+ if route["network"] == "0.0.0.0" and route["netmask"] == "0.0.0.0":
+ default_gw = " default"
+ elif route["network"] == "::" and route["prefix"] == 0:
+ default_gw = " -A inet6 default"
- route_line = ''
- for k in ['network', 'gateway', 'metric']:
- if default_gw and k == 'network':
+ route_line = ""
+ for k in ["network", "gateway", "metric"]:
+ if default_gw and k == "network":
continue
- if k == 'gateway':
- route_line += '%s %s %s' % (default_gw, mapping[k], route[k])
+ if k == "gateway":
+ route_line += "%s %s %s" % (default_gw, mapping[k], route[k])
elif k in route:
- if k == 'network':
- if ':' in route[k]:
- route_line += ' -A inet6'
- elif route.get('prefix') == 32:
- route_line += ' -host'
+ if k == "network":
+ if ":" in route[k]:
+ route_line += " -A inet6"
+ elif route.get("prefix") == 32:
+ route_line += " -host"
else:
- route_line += ' -net'
- if 'prefix' in route:
- route_line += ' %s/%s' % (route[k], route['prefix'])
+ route_line += " -net"
+ if "prefix" in route:
+ route_line += " %s/%s" % (route[k], route["prefix"])
else:
- route_line += ' %s %s' % (mapping[k], route[k])
+ route_line += " %s %s" % (mapping[k], route[k])
content.append(up + route_line + or_true)
content.append(down + route_line + or_true)
return content
def _render_iface(self, iface, render_hwaddress=False):
sections = []
- subnets = iface.get('subnets', {})
- accept_ra = iface.pop('accept-ra', None)
- ethernet_wol = iface.pop('wakeonlan', None)
+ subnets = iface.get("subnets", {})
+ accept_ra = iface.pop("accept-ra", None)
+ ethernet_wol = iface.pop("wakeonlan", None)
if ethernet_wol:
# Specify WOL setting 'g' for using "Magic Packet"
- iface['ethernet-wol'] = 'g'
+ iface["ethernet-wol"] = "g"
if subnets:
for index, subnet in enumerate(subnets):
ipv4_subnet_mtu = None
- iface['index'] = index
- iface['mode'] = subnet['type']
- iface['control'] = subnet.get('control', 'auto')
- subnet_inet = 'inet'
+ iface["index"] = index
+ iface["mode"] = subnet["type"]
+ iface["control"] = subnet.get("control", "auto")
+ subnet_inet = "inet"
if subnet_is_ipv6(subnet):
- subnet_inet += '6'
+ subnet_inet += "6"
else:
- ipv4_subnet_mtu = subnet.get('mtu')
- iface['inet'] = subnet_inet
- if (subnet['type'] == 'dhcp4' or subnet['type'] == 'dhcp6' or
- subnet['type'] == 'ipv6_dhcpv6-stateful'):
+ ipv4_subnet_mtu = subnet.get("mtu")
+ iface["inet"] = subnet_inet
+ if (
+ subnet["type"] == "dhcp4"
+ or subnet["type"] == "dhcp6"
+ or subnet["type"] == "ipv6_dhcpv6-stateful"
+ ):
# Configure network settings using DHCP or DHCPv6
- iface['mode'] = 'dhcp'
+ iface["mode"] = "dhcp"
if accept_ra is not None:
# Accept router advertisements (0=off, 1=on)
- iface['accept_ra'] = '1' if accept_ra else '0'
- elif subnet['type'] == 'ipv6_dhcpv6-stateless':
+ iface["accept_ra"] = "1" if accept_ra else "0"
+ elif subnet["type"] == "ipv6_dhcpv6-stateless":
# Configure network settings using SLAAC from RAs
- iface['mode'] = 'auto'
+ iface["mode"] = "auto"
# Use stateless DHCPv6 (0=off, 1=on)
- iface['dhcp'] = '1'
- elif subnet['type'] == 'ipv6_slaac':
+ iface["dhcp"] = "1"
+ elif subnet["type"] == "ipv6_slaac":
# Configure network settings using SLAAC from RAs
- iface['mode'] = 'auto'
+ iface["mode"] = "auto"
# Use stateless DHCPv6 (0=off, 1=on)
- iface['dhcp'] = '0'
+ iface["dhcp"] = "0"
elif subnet_is_ipv6(subnet):
# mode might be static6, eni uses 'static'
- iface['mode'] = 'static'
+ iface["mode"] = "static"
if accept_ra is not None:
# Accept router advertisements (0=off, 1=on)
- iface['accept_ra'] = '1' if accept_ra else '0'
+ iface["accept_ra"] = "1" if accept_ra else "0"
# do not emit multiple 'auto $IFACE' lines as older (precise)
# ifupdown complains
- if True in ["auto %s" % (iface['name']) in line
- for line in sections]:
- iface['control'] = 'alias'
+ if True in [
+ "auto %s" % (iface["name"]) in line for line in sections
+ ]:
+ iface["control"] = "alias"
lines = list(
_iface_start_entry(
- iface, index, render_hwaddress=render_hwaddress) +
- _iface_add_subnet(iface, subnet) +
- _iface_add_attrs(iface, index, ipv4_subnet_mtu)
+ iface, index, render_hwaddress=render_hwaddress
+ )
+ + _iface_add_subnet(iface, subnet)
+ + _iface_add_attrs(iface, index, ipv4_subnet_mtu)
)
- for route in subnet.get('routes', []):
+ for route in subnet.get("routes", []):
lines.extend(self._render_route(route, indent=" "))
sections.append(lines)
else:
# ifenslave docs say to auto the slave devices
lines = []
- if 'bond-master' in iface or 'bond-slaves' in iface:
+ if "bond-master" in iface or "bond-slaves" in iface:
lines.append("auto {name}".format(**iface))
lines.append("iface {name} {inet} {mode}".format(**iface))
lines.extend(
- _iface_add_attrs(iface, index=0, ipv4_subnet_mtu=None))
+ _iface_add_attrs(iface, index=0, ipv4_subnet_mtu=None)
+ )
sections.append(lines)
return sections
def _render_interfaces(self, network_state, render_hwaddress=False):
- '''Given state, emit etc/network/interfaces content.'''
+ """Given state, emit etc/network/interfaces content."""
# handle 'lo' specifically as we need to insert the global dns entries
# there (as that is the only interface that will be always up).
- lo = {'name': 'lo', 'type': 'physical', 'inet': 'inet',
- 'subnets': [{'type': 'loopback', 'control': 'auto'}]}
+ lo = {
+ "name": "lo",
+ "type": "physical",
+ "inet": "inet",
+ "subnets": [{"type": "loopback", "control": "auto"}],
+ }
for iface in network_state.iter_interfaces():
- if iface.get('name') == "lo":
+ if iface.get("name") == "lo":
lo = copy.deepcopy(iface)
nameservers = network_state.dns_nameservers
if nameservers:
- lo['subnets'][0]["dns_nameservers"] = (" ".join(nameservers))
+ lo["subnets"][0]["dns_nameservers"] = " ".join(nameservers)
searchdomains = network_state.dns_searchdomains
if searchdomains:
- lo['subnets'][0]["dns_search"] = (" ".join(searchdomains))
+ lo["subnets"][0]["dns_search"] = " ".join(searchdomains)
# Apply a sort order to ensure that we write out the physical
# interfaces first; this is critical for bonding
order = {
- 'loopback': 0,
- 'physical': 1,
- 'infiniband': 2,
- 'bond': 3,
- 'bridge': 4,
- 'vlan': 5,
+ "loopback": 0,
+ "physical": 1,
+ "infiniband": 2,
+ "bond": 3,
+ "bridge": 4,
+ "vlan": 5,
}
sections = []
sections.extend(self._render_iface(lo))
- for iface in sorted(network_state.iter_interfaces(),
- key=lambda k: (order[k['type']], k['name'])):
+ for iface in sorted(
+ network_state.iter_interfaces(),
+ key=lambda k: (order[k["type"]], k["name"]),
+ ):
- if iface.get('name') == "lo":
+ if iface.get("name") == "lo":
continue
sections.extend(
- self._render_iface(iface, render_hwaddress=render_hwaddress))
+ self._render_iface(iface, render_hwaddress=render_hwaddress)
+ )
for route in network_state.iter_routes():
sections.append(self._render_route(route))
- return '\n\n'.join(['\n'.join(s) for s in sections]) + "\n"
+ return "\n\n".join(["\n".join(s) for s in sections]) + "\n"
def render_network_state(self, network_state, templates=None, target=None):
fpeni = subp.target_path(target, self.eni_path)
@@ -524,34 +570,38 @@ class Renderer(renderer.Renderer):
if self.netrules_path:
netrules = subp.target_path(target, self.netrules_path)
util.ensure_dir(os.path.dirname(netrules))
- util.write_file(netrules,
- self._render_persistent_net(network_state))
+ util.write_file(
+ netrules, self._render_persistent_net(network_state)
+ )
def network_state_to_eni(network_state, header=None, render_hwaddress=False):
# render the provided network state, return a string of equivalent eni
- eni_path = 'etc/network/interfaces'
- renderer = Renderer(config={
- 'eni_path': eni_path,
- 'eni_header': header,
- 'netrules_path': None,
- })
+ eni_path = "etc/network/interfaces"
+ renderer = Renderer(
+ config={
+ "eni_path": eni_path,
+ "eni_header": header,
+ "netrules_path": None,
+ }
+ )
if not header:
header = ""
if not header.endswith("\n"):
header += "\n"
contents = renderer._render_interfaces(
- network_state, render_hwaddress=render_hwaddress)
+ network_state, render_hwaddress=render_hwaddress
+ )
return header + contents
def available(target=None):
- expected = ['ifquery', 'ifup', 'ifdown']
- search = ['/sbin', '/usr/sbin']
+ expected = ["ifquery", "ifup", "ifdown"]
+ search = ["/sbin", "/usr/sbin"]
for p in expected:
if not subp.which(p, search=search, target=target):
return False
- eni = subp.target_path(target, 'etc/network/interfaces')
+ eni = subp.target_path(target, "etc/network/interfaces")
if not os.path.isfile(eni):
return False
diff --git a/cloudinit/net/freebsd.py b/cloudinit/net/freebsd.py
index f8faf240..ec42b60c 100644
--- a/cloudinit/net/freebsd.py
+++ b/cloudinit/net/freebsd.py
@@ -1,31 +1,29 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import log as logging
import cloudinit.net.bsd
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import log as logging
+from cloudinit import subp, util
LOG = logging.getLogger(__name__)
class Renderer(cloudinit.net.bsd.BSDRenderer):
-
def __init__(self, config=None):
self._route_cpt = 0
super(Renderer, self).__init__()
def rename_interface(self, cur_name, device_name):
- self.set_rc_config_value('ifconfig_%s_name' % cur_name, device_name)
+ self.set_rc_config_value("ifconfig_%s_name" % cur_name, device_name)
def write_config(self):
for device_name, v in self.interface_configurations.items():
- net_config = 'DHCP'
+ net_config = "DHCP"
if isinstance(v, dict):
- net_config = v.get('address') + ' netmask ' + v.get('netmask')
- mtu = v.get('mtu')
+ net_config = v.get("address") + " netmask " + v.get("netmask")
+ mtu = v.get("mtu")
if mtu:
- net_config += (' mtu %d' % mtu)
- self.set_rc_config_value('ifconfig_' + device_name, net_config)
+ net_config += " mtu %d" % mtu
+ self.set_rc_config_value("ifconfig_" + device_name, net_config)
def start_services(self, run=False):
if not run:
@@ -35,29 +33,33 @@ class Renderer(cloudinit.net.bsd.BSDRenderer):
for dhcp_interface in self.dhcp_interfaces():
# Observed on DragonFlyBSD 6. If we use the "restart" parameter,
# the routes are not recreated.
- subp.subp(['service', 'dhclient', 'stop', dhcp_interface],
- rcs=[0, 1],
- capture=True)
+ subp.subp(
+ ["service", "dhclient", "stop", dhcp_interface],
+ rcs=[0, 1],
+ capture=True,
+ )
- subp.subp(['service', 'netif', 'restart'], capture=True)
+ subp.subp(["service", "netif", "restart"], capture=True)
# On FreeBSD 10, the restart of routing and dhclient is likely to fail
# because
# - routing: it cannot remove the loopback route, but it will still set
# up the default route as expected.
# - dhclient: it cannot stop the dhclient started by the netif service.
# In both case, the situation is ok, and we can proceed.
- subp.subp(['service', 'routing', 'restart'], capture=True, rcs=[0, 1])
+ subp.subp(["service", "routing", "restart"], capture=True, rcs=[0, 1])
for dhcp_interface in self.dhcp_interfaces():
- subp.subp(['service', 'dhclient', 'start', dhcp_interface],
- rcs=[0, 1],
- capture=True)
+ subp.subp(
+ ["service", "dhclient", "start", dhcp_interface],
+ rcs=[0, 1],
+ capture=True,
+ )
def set_route(self, network, netmask, gateway):
- if network == '0.0.0.0':
- self.set_rc_config_value('defaultrouter', gateway)
+ if network == "0.0.0.0":
+ self.set_rc_config_value("defaultrouter", gateway)
else:
- route_name = 'route_net%d' % self._route_cpt
+ route_name = "route_net%d" % self._route_cpt
route_cmd = "-route %s/%s %s" % (network, netmask, gateway)
self.set_rc_config_value(route_name, route_cmd)
self._route_cpt += 1
diff --git a/cloudinit/net/netbsd.py b/cloudinit/net/netbsd.py
index 5f8881a5..3d6b85b7 100644
--- a/cloudinit/net/netbsd.py
+++ b/cloudinit/net/netbsd.py
@@ -1,45 +1,42 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
import cloudinit.net.bsd
+from cloudinit import log as logging
+from cloudinit import subp, util
LOG = logging.getLogger(__name__)
class Renderer(cloudinit.net.bsd.BSDRenderer):
-
def __init__(self, config=None):
super(Renderer, self).__init__()
def write_config(self):
if self.dhcp_interfaces():
- self.set_rc_config_value('dhcpcd', 'YES')
+ self.set_rc_config_value("dhcpcd", "YES")
self.set_rc_config_value(
- 'dhcpcd_flags',
- ' '.join(self.dhcp_interfaces())
+ "dhcpcd_flags", " ".join(self.dhcp_interfaces())
)
for device_name, v in self.interface_configurations.items():
if isinstance(v, dict):
- net_config = v.get('address') + ' netmask ' + v.get('netmask')
- mtu = v.get('mtu')
+ net_config = v.get("address") + " netmask " + v.get("netmask")
+ mtu = v.get("mtu")
if mtu:
- net_config += (' mtu %d' % mtu)
- self.set_rc_config_value('ifconfig_' + device_name, net_config)
+ net_config += " mtu %d" % mtu
+ self.set_rc_config_value("ifconfig_" + device_name, net_config)
def start_services(self, run=False):
if not run:
LOG.debug("netbsd generate postcmd disabled")
return
- subp.subp(['service', 'network', 'restart'], capture=True)
+ subp.subp(["service", "network", "restart"], capture=True)
if self.dhcp_interfaces():
- subp.subp(['service', 'dhcpcd', 'restart'], capture=True)
+ subp.subp(["service", "dhcpcd", "restart"], capture=True)
def set_route(self, network, netmask, gateway):
- if network == '0.0.0.0':
- self.set_rc_config_value('defaultroute', gateway)
+ if network == "0.0.0.0":
+ self.set_rc_config_value("defaultroute", gateway)
def available(target=None):
diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
index 41acf963..57ba2d9a 100644
--- a/cloudinit/net/netplan.py
+++ b/cloudinit/net/netplan.py
@@ -3,20 +3,18 @@
import copy
import os
+from cloudinit import log as logging
+from cloudinit import safeyaml, subp, util
+from cloudinit.net import SYS_CLASS_NET, get_devicelist
+
from . import renderer
from .network_state import (
+ IPV6_DYNAMIC_TYPES,
+ NET_CONFIG_TO_V2,
NetworkState,
subnet_is_ipv6,
- NET_CONFIG_TO_V2,
- IPV6_DYNAMIC_TYPES,
)
-from cloudinit import log as logging
-from cloudinit import util
-from cloudinit import subp
-from cloudinit import safeyaml
-from cloudinit.net import SYS_CLASS_NET, get_devicelist
-
KNOWN_SNAPD_CONFIG = b"""\
# This is the initial network config.
# It can be overwritten by cloud-init or console-conf.
@@ -37,8 +35,11 @@ LOG = logging.getLogger(__name__)
def _get_params_dict_by_match(config, match):
- return dict((key, value) for (key, value) in config.items()
- if key.startswith(match))
+ return dict(
+ (key, value)
+ for (key, value) in config.items()
+ if key.startswith(match)
+ )
def _extract_addresses(config, entry, ifname, features=None):
@@ -78,14 +79,16 @@ def _extract_addresses(config, entry, ifname, features=None):
"""
- def _listify(obj, token=' '):
+ def _listify(obj, token=" "):
"Helper to convert strings to list of strings, handle single string"
if not obj or type(obj) not in [str]:
return obj
if token in obj:
return obj.split(token)
else:
- return [obj, ]
+ return [
+ obj,
+ ]
if features is None:
features = []
@@ -93,78 +96,85 @@ def _extract_addresses(config, entry, ifname, features=None):
routes = []
nameservers = []
searchdomains = []
- subnets = config.get('subnets', [])
+ subnets = config.get("subnets", [])
if subnets is None:
subnets = []
for subnet in subnets:
- sn_type = subnet.get('type')
- if sn_type.startswith('dhcp'):
- if sn_type == 'dhcp':
- sn_type += '4'
+ sn_type = subnet.get("type")
+ if sn_type.startswith("dhcp"):
+ if sn_type == "dhcp":
+ sn_type += "4"
entry.update({sn_type: True})
elif sn_type in IPV6_DYNAMIC_TYPES:
- entry.update({'dhcp6': True})
- elif sn_type in ['static', 'static6']:
- addr = "%s" % subnet.get('address')
- if 'prefix' in subnet:
- addr += "/%d" % subnet.get('prefix')
- if 'gateway' in subnet and subnet.get('gateway'):
- gateway = subnet.get('gateway')
+ entry.update({"dhcp6": True})
+ elif sn_type in ["static", "static6"]:
+ addr = "%s" % subnet.get("address")
+ if "prefix" in subnet:
+ addr += "/%d" % subnet.get("prefix")
+ if "gateway" in subnet and subnet.get("gateway"):
+ gateway = subnet.get("gateway")
if ":" in gateway:
- entry.update({'gateway6': gateway})
+ entry.update({"gateway6": gateway})
else:
- entry.update({'gateway4': gateway})
- if 'dns_nameservers' in subnet:
- nameservers += _listify(subnet.get('dns_nameservers', []))
- if 'dns_search' in subnet:
- searchdomains += _listify(subnet.get('dns_search', []))
- if 'mtu' in subnet:
- mtukey = 'mtu'
- if subnet_is_ipv6(subnet) and 'ipv6-mtu' in features:
- mtukey = 'ipv6-mtu'
- entry.update({mtukey: subnet.get('mtu')})
- for route in subnet.get('routes', []):
- to_net = "%s/%s" % (route.get('network'),
- route.get('prefix'))
+ entry.update({"gateway4": gateway})
+ if "dns_nameservers" in subnet:
+ nameservers += _listify(subnet.get("dns_nameservers", []))
+ if "dns_search" in subnet:
+ searchdomains += _listify(subnet.get("dns_search", []))
+ if "mtu" in subnet:
+ mtukey = "mtu"
+ if subnet_is_ipv6(subnet) and "ipv6-mtu" in features:
+ mtukey = "ipv6-mtu"
+ entry.update({mtukey: subnet.get("mtu")})
+ for route in subnet.get("routes", []):
+ to_net = "%s/%s" % (route.get("network"), route.get("prefix"))
new_route = {
- 'via': route.get('gateway'),
- 'to': to_net,
+ "via": route.get("gateway"),
+ "to": to_net,
}
- if 'metric' in route:
- new_route.update({'metric': route.get('metric', 100)})
+ if "metric" in route:
+ new_route.update({"metric": route.get("metric", 100)})
routes.append(new_route)
addresses.append(addr)
- if 'mtu' in config:
- entry_mtu = entry.get('mtu')
- if entry_mtu and config['mtu'] != entry_mtu:
+ if "mtu" in config:
+ entry_mtu = entry.get("mtu")
+ if entry_mtu and config["mtu"] != entry_mtu:
LOG.warning(
"Network config: ignoring %s device-level mtu:%s because"
" ipv4 subnet-level mtu:%s provided.",
- ifname, config['mtu'], entry_mtu)
+ ifname,
+ config["mtu"],
+ entry_mtu,
+ )
else:
- entry['mtu'] = config['mtu']
+ entry["mtu"] = config["mtu"]
if len(addresses) > 0:
- entry.update({'addresses': addresses})
+ entry.update({"addresses": addresses})
if len(routes) > 0:
- entry.update({'routes': routes})
+ entry.update({"routes": routes})
if len(nameservers) > 0:
- ns = {'addresses': nameservers}
- entry.update({'nameservers': ns})
+ ns = {"addresses": nameservers}
+ entry.update({"nameservers": ns})
if len(searchdomains) > 0:
- ns = entry.get('nameservers', {})
- ns.update({'search': searchdomains})
- entry.update({'nameservers': ns})
- if 'accept-ra' in config and config['accept-ra'] is not None:
- entry.update({'accept-ra': util.is_true(config.get('accept-ra'))})
+ ns = entry.get("nameservers", {})
+ ns.update({"search": searchdomains})
+ entry.update({"nameservers": ns})
+ if "accept-ra" in config and config["accept-ra"] is not None:
+ entry.update({"accept-ra": util.is_true(config.get("accept-ra"))})
def _extract_bond_slaves_by_name(interfaces, entry, bond_master):
- bond_slave_names = sorted([name for (name, cfg) in interfaces.items()
- if cfg.get('bond-master', None) == bond_master])
+ bond_slave_names = sorted(
+ [
+ name
+ for (name, cfg) in interfaces.items()
+ if cfg.get("bond-master", None) == bond_master
+ ]
+ )
if len(bond_slave_names) > 0:
- entry.update({'interfaces': bond_slave_names})
+ entry.update({"interfaces": bond_slave_names})
def _clean_default(target=None):
@@ -177,13 +187,20 @@ def _clean_default(target=None):
if content != KNOWN_SNAPD_CONFIG:
return
- derived = [subp.target_path(target, f) for f in (
- 'run/systemd/network/10-netplan-all-en.network',
- 'run/systemd/network/10-netplan-all-eth.network',
- 'run/systemd/generator/netplan.stamp')]
+ derived = [
+ subp.target_path(target, f)
+ for f in (
+ "run/systemd/network/10-netplan-all-en.network",
+ "run/systemd/network/10-netplan-all-eth.network",
+ "run/systemd/generator/netplan.stamp",
+ )
+ ]
existing = [f for f in derived if os.path.isfile(f)]
- LOG.debug("removing known config '%s' and derived existing files: %s",
- tpath, existing)
+ LOG.debug(
+ "removing known config '%s' and derived existing files: %s",
+ tpath,
+ existing,
+ )
for f in [tpath] + existing:
os.unlink(f)
@@ -192,18 +209,19 @@ def _clean_default(target=None):
class Renderer(renderer.Renderer):
"""Renders network information in a /etc/netplan/network.yaml format."""
- NETPLAN_GENERATE = ['netplan', 'generate']
- NETPLAN_INFO = ['netplan', 'info']
+ NETPLAN_GENERATE = ["netplan", "generate"]
+ NETPLAN_INFO = ["netplan", "info"]
def __init__(self, config=None):
if not config:
config = {}
- self.netplan_path = config.get('netplan_path',
- 'etc/netplan/50-cloud-init.yaml')
- self.netplan_header = config.get('netplan_header', None)
- self._postcmds = config.get('postcmds', False)
- self.clean_default = config.get('clean_default', True)
- self._features = config.get('features', None)
+ self.netplan_path = config.get(
+ "netplan_path", "etc/netplan/50-cloud-init.yaml"
+ )
+ self.netplan_header = config.get("netplan_header", None)
+ self._postcmds = config.get("postcmds", False)
+ self.clean_default = config.get("clean_default", True)
+ self._features = config.get("features", None)
@property
def features(self):
@@ -211,13 +229,13 @@ class Renderer(renderer.Renderer):
try:
info_blob, _err = subp.subp(self.NETPLAN_INFO, capture=True)
info = util.load_yaml(info_blob)
- self._features = info['netplan.io']['features']
+ self._features = info["netplan.io"]["features"]
except subp.ProcessExecutionError:
# if the info subcommand is not present then we don't have any
# new features
pass
except (TypeError, KeyError) as e:
- LOG.debug('Failed to list features from netplan info: %s', e)
+ LOG.debug("Failed to list features from netplan info: %s", e)
return self._features
def render_network_state(self, network_state, templates=None, target=None):
@@ -249,26 +267,30 @@ class Renderer(renderer.Renderer):
def _net_setup_link(self, run=False):
"""To ensure device link properties are applied, we poke
- udev to re-evaluate networkd .link files and call
- the setup_link udev builtin command
+ udev to re-evaluate networkd .link files and call
+ the setup_link udev builtin command
"""
if not run:
LOG.debug("netplan net_setup_link postcmd disabled")
return
- setup_lnk = ['udevadm', 'test-builtin', 'net_setup_link']
- for cmd in [setup_lnk + [SYS_CLASS_NET + iface]
- for iface in get_devicelist() if
- os.path.islink(SYS_CLASS_NET + iface)]:
+ setup_lnk = ["udevadm", "test-builtin", "net_setup_link"]
+ for cmd in [
+ setup_lnk + [SYS_CLASS_NET + iface]
+ for iface in get_devicelist()
+ if os.path.islink(SYS_CLASS_NET + iface)
+ ]:
subp.subp(cmd, capture=True)
def _render_content(self, network_state: NetworkState):
# if content already in netplan format, pass it back
if network_state.version == 2:
- LOG.debug('V2 to V2 passthrough')
- return safeyaml.dumps({'network': network_state.config},
- explicit_start=False,
- explicit_end=False)
+ LOG.debug("V2 to V2 passthrough")
+ return safeyaml.dumps(
+ {"network": network_state.config},
+ explicit_start=False,
+ explicit_end=False,
+ )
ethernets = {}
wifis = {}
@@ -277,80 +299,83 @@ class Renderer(renderer.Renderer):
vlans = {}
content = []
- interfaces = network_state._network_state.get('interfaces', [])
+ interfaces = network_state._network_state.get("interfaces", [])
nameservers = network_state.dns_nameservers
searchdomains = network_state.dns_searchdomains
for config in network_state.iter_interfaces():
- ifname = config.get('name')
+ ifname = config.get("name")
# filter None (but not False) entries up front
- ifcfg = dict((key, value) for (key, value) in config.items()
- if value is not None)
-
- if_type = ifcfg.get('type')
- if if_type == 'physical':
+ ifcfg = dict(
+ (key, value)
+ for (key, value) in config.items()
+ if value is not None
+ )
+
+ if_type = ifcfg.get("type")
+ if if_type == "physical":
# required_keys = ['name', 'mac_address']
eth = {
- 'set-name': ifname,
- 'match': ifcfg.get('match', None),
+ "set-name": ifname,
+ "match": ifcfg.get("match", None),
}
- if eth['match'] is None:
- macaddr = ifcfg.get('mac_address', None)
+ if eth["match"] is None:
+ macaddr = ifcfg.get("mac_address", None)
if macaddr is not None:
- eth['match'] = {'macaddress': macaddr.lower()}
+ eth["match"] = {"macaddress": macaddr.lower()}
else:
- del eth['match']
- del eth['set-name']
+ del eth["match"]
+ del eth["set-name"]
_extract_addresses(ifcfg, eth, ifname, self.features)
ethernets.update({ifname: eth})
- elif if_type == 'bond':
+ elif if_type == "bond":
# required_keys = ['name', 'bond_interfaces']
bond = {}
bond_config = {}
# extract bond params and drop the bond_ prefix as it's
# redundent in v2 yaml format
- v2_bond_map = NET_CONFIG_TO_V2.get('bond')
- for match in ['bond_', 'bond-']:
+ v2_bond_map = NET_CONFIG_TO_V2.get("bond")
+ for match in ["bond_", "bond-"]:
bond_params = _get_params_dict_by_match(ifcfg, match)
for (param, value) in bond_params.items():
- newname = v2_bond_map.get(param.replace('_', '-'))
+ newname = v2_bond_map.get(param.replace("_", "-"))
if newname is None:
continue
bond_config.update({newname: value})
if len(bond_config) > 0:
- bond.update({'parameters': bond_config})
- if ifcfg.get('mac_address'):
- bond['macaddress'] = ifcfg.get('mac_address').lower()
- slave_interfaces = ifcfg.get('bond-slaves')
- if slave_interfaces == 'none':
+ bond.update({"parameters": bond_config})
+ if ifcfg.get("mac_address"):
+ bond["macaddress"] = ifcfg.get("mac_address").lower()
+ slave_interfaces = ifcfg.get("bond-slaves")
+ if slave_interfaces == "none":
_extract_bond_slaves_by_name(interfaces, bond, ifname)
_extract_addresses(ifcfg, bond, ifname, self.features)
bonds.update({ifname: bond})
- elif if_type == 'bridge':
+ elif if_type == "bridge":
# required_keys = ['name', 'bridge_ports']
- ports = sorted(copy.copy(ifcfg.get('bridge_ports')))
+ ports = sorted(copy.copy(ifcfg.get("bridge_ports")))
bridge = {
- 'interfaces': ports,
+ "interfaces": ports,
}
# extract bridge params and drop the bridge prefix as it's
# redundent in v2 yaml format
- match_prefix = 'bridge_'
+ match_prefix = "bridge_"
params = _get_params_dict_by_match(ifcfg, match_prefix)
br_config = {}
# v2 yaml uses different names for the keys
# and at least one value format change
- v2_bridge_map = NET_CONFIG_TO_V2.get('bridge')
+ v2_bridge_map = NET_CONFIG_TO_V2.get("bridge")
for (param, value) in params.items():
newname = v2_bridge_map.get(param)
if newname is None:
continue
br_config.update({newname: value})
- if newname in ['path-cost', 'port-priority']:
+ if newname in ["path-cost", "port-priority"]:
# <interface> <value> -> <interface>: int(<value>)
newvalue = {}
for val in value:
@@ -359,58 +384,60 @@ class Renderer(renderer.Renderer):
br_config.update({newname: newvalue})
if len(br_config) > 0:
- bridge.update({'parameters': br_config})
- if ifcfg.get('mac_address'):
- bridge['macaddress'] = ifcfg.get('mac_address').lower()
+ bridge.update({"parameters": br_config})
+ if ifcfg.get("mac_address"):
+ bridge["macaddress"] = ifcfg.get("mac_address").lower()
_extract_addresses(ifcfg, bridge, ifname, self.features)
bridges.update({ifname: bridge})
- elif if_type == 'vlan':
+ elif if_type == "vlan":
# required_keys = ['name', 'vlan_id', 'vlan-raw-device']
vlan = {
- 'id': ifcfg.get('vlan_id'),
- 'link': ifcfg.get('vlan-raw-device')
+ "id": ifcfg.get("vlan_id"),
+ "link": ifcfg.get("vlan-raw-device"),
}
- macaddr = ifcfg.get('mac_address', None)
+ macaddr = ifcfg.get("mac_address", None)
if macaddr is not None:
- vlan['macaddress'] = macaddr.lower()
+ vlan["macaddress"] = macaddr.lower()
_extract_addresses(ifcfg, vlan, ifname, self.features)
vlans.update({ifname: vlan})
# inject global nameserver values under each all interface which
# has addresses and do not already have a DNS configuration
if nameservers or searchdomains:
- nscfg = {'addresses': nameservers, 'search': searchdomains}
+ nscfg = {"addresses": nameservers, "search": searchdomains}
for section in [ethernets, wifis, bonds, bridges, vlans]:
for _name, cfg in section.items():
- if 'nameservers' in cfg or 'addresses' not in cfg:
+ if "nameservers" in cfg or "addresses" not in cfg:
continue
- cfg.update({'nameservers': nscfg})
+ cfg.update({"nameservers": nscfg})
# workaround yaml dictionary key sorting when dumping
def _render_section(name, section):
if section:
- dump = safeyaml.dumps({name: section},
- explicit_start=False,
- explicit_end=False,
- noalias=True)
- txt = util.indent(dump, ' ' * 4)
+ dump = safeyaml.dumps(
+ {name: section},
+ explicit_start=False,
+ explicit_end=False,
+ noalias=True,
+ )
+ txt = util.indent(dump, " " * 4)
return [txt]
return []
content.append("network:\n version: 2\n")
- content += _render_section('ethernets', ethernets)
- content += _render_section('wifis', wifis)
- content += _render_section('bonds', bonds)
- content += _render_section('bridges', bridges)
- content += _render_section('vlans', vlans)
+ content += _render_section("ethernets", ethernets)
+ content += _render_section("wifis", wifis)
+ content += _render_section("bonds", bonds)
+ content += _render_section("bridges", bridges)
+ content += _render_section("vlans", vlans)
return "".join(content)
def available(target=None):
- expected = ['netplan']
- search = ['/usr/sbin', '/sbin']
+ expected = ["netplan"]
+ search = ["/usr/sbin", "/sbin"]
for p in expected:
if not subp.which(p, search=search, target=target):
return False
@@ -419,11 +446,13 @@ def available(target=None):
def network_state_to_netplan(network_state, header=None):
# render the provided network state, return a string of equivalent eni
- netplan_path = 'etc/network/50-cloud-init.yaml'
- renderer = Renderer({
- 'netplan_path': netplan_path,
- 'netplan_header': header,
- })
+ netplan_path = "etc/network/50-cloud-init.yaml"
+ renderer = Renderer(
+ {
+ "netplan_path": netplan_path,
+ "netplan_header": header,
+ }
+ )
if not header:
header = ""
if not header.endswith("\n"):
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index 4862bf91..d7c9144f 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -10,52 +10,70 @@ import logging
import socket
import struct
-from cloudinit import safeyaml
-from cloudinit import util
+from cloudinit import safeyaml, util
LOG = logging.getLogger(__name__)
NETWORK_STATE_VERSION = 1
-IPV6_DYNAMIC_TYPES = ['dhcp6',
- 'ipv6_slaac',
- 'ipv6_dhcpv6-stateless',
- 'ipv6_dhcpv6-stateful']
+IPV6_DYNAMIC_TYPES = [
+ "dhcp6",
+ "ipv6_slaac",
+ "ipv6_dhcpv6-stateless",
+ "ipv6_dhcpv6-stateful",
+]
NETWORK_STATE_REQUIRED_KEYS = {
- 1: ['version', 'config', 'network_state'],
+ 1: ["version", "config", "network_state"],
}
NETWORK_V2_KEY_FILTER = [
- 'addresses', 'dhcp4', 'dhcp4-overrides', 'dhcp6', 'dhcp6-overrides',
- 'gateway4', 'gateway6', 'interfaces', 'match', 'mtu', 'nameservers',
- 'renderer', 'set-name', 'wakeonlan', 'accept-ra'
+ "addresses",
+ "dhcp4",
+ "dhcp4-overrides",
+ "dhcp6",
+ "dhcp6-overrides",
+ "gateway4",
+ "gateway6",
+ "interfaces",
+ "match",
+ "mtu",
+ "nameservers",
+ "renderer",
+ "set-name",
+ "wakeonlan",
+ "accept-ra",
]
NET_CONFIG_TO_V2 = {
- 'bond': {'bond-ad-select': 'ad-select',
- 'bond-arp-interval': 'arp-interval',
- 'bond-arp-ip-target': 'arp-ip-target',
- 'bond-arp-validate': 'arp-validate',
- 'bond-downdelay': 'down-delay',
- 'bond-fail-over-mac': 'fail-over-mac-policy',
- 'bond-lacp-rate': 'lacp-rate',
- 'bond-miimon': 'mii-monitor-interval',
- 'bond-min-links': 'min-links',
- 'bond-mode': 'mode',
- 'bond-num-grat-arp': 'gratuitious-arp',
- 'bond-primary': 'primary',
- 'bond-primary-reselect': 'primary-reselect-policy',
- 'bond-updelay': 'up-delay',
- 'bond-xmit-hash-policy': 'transmit-hash-policy'},
- 'bridge': {'bridge_ageing': 'ageing-time',
- 'bridge_bridgeprio': 'priority',
- 'bridge_fd': 'forward-delay',
- 'bridge_gcint': None,
- 'bridge_hello': 'hello-time',
- 'bridge_maxage': 'max-age',
- 'bridge_maxwait': None,
- 'bridge_pathcost': 'path-cost',
- 'bridge_portprio': 'port-priority',
- 'bridge_stp': 'stp',
- 'bridge_waitport': None}}
+ "bond": {
+ "bond-ad-select": "ad-select",
+ "bond-arp-interval": "arp-interval",
+ "bond-arp-ip-target": "arp-ip-target",
+ "bond-arp-validate": "arp-validate",
+ "bond-downdelay": "down-delay",
+ "bond-fail-over-mac": "fail-over-mac-policy",
+ "bond-lacp-rate": "lacp-rate",
+ "bond-miimon": "mii-monitor-interval",
+ "bond-min-links": "min-links",
+ "bond-mode": "mode",
+ "bond-num-grat-arp": "gratuitious-arp",
+ "bond-primary": "primary",
+ "bond-primary-reselect": "primary-reselect-policy",
+ "bond-updelay": "up-delay",
+ "bond-xmit-hash-policy": "transmit-hash-policy",
+ },
+ "bridge": {
+ "bridge_ageing": "ageing-time",
+ "bridge_bridgeprio": "priority",
+ "bridge_fd": "forward-delay",
+ "bridge_gcint": None,
+ "bridge_hello": "hello-time",
+ "bridge_maxage": "max-age",
+ "bridge_maxwait": None,
+ "bridge_pathcost": "path-cost",
+ "bridge_portprio": "port-priority",
+ "bridge_stp": "stp",
+ "bridge_waitport": None,
+ },
+}
def from_state_file(state_file):
@@ -77,17 +95,16 @@ class InvalidCommand(Exception):
def ensure_command_keys(required_keys):
-
def wrapper(func):
-
@functools.wraps(func)
def decorator(self, command, *args, **kwargs):
if required_keys:
missing_keys = diff_keys(required_keys, command)
if missing_keys:
- raise InvalidCommand("Command missing %s of required"
- " keys %s" % (missing_keys,
- required_keys))
+ raise InvalidCommand(
+ "Command missing %s of required keys %s"
+ % (missing_keys, required_keys)
+ )
return func(self, command, *args, **kwargs)
return decorator
@@ -102,29 +119,28 @@ class CommandHandlerMeta(type):
'handle_' and on finding those will populate a class attribute mapping
so that those methods can be quickly located and called.
"""
+
def __new__(cls, name, parents, dct):
command_handlers = {}
for attr_name, attr in dct.items():
- if callable(attr) and attr_name.startswith('handle_'):
- handles_what = attr_name[len('handle_'):]
+ if callable(attr) and attr_name.startswith("handle_"):
+ handles_what = attr_name[len("handle_") :]
if handles_what:
command_handlers[handles_what] = attr
- dct['command_handlers'] = command_handlers
- return super(CommandHandlerMeta, cls).__new__(cls, name,
- parents, dct)
+ dct["command_handlers"] = command_handlers
+ return super(CommandHandlerMeta, cls).__new__(cls, name, parents, dct)
class NetworkState(object):
-
def __init__(self, network_state, version=NETWORK_STATE_VERSION):
self._network_state = copy.deepcopy(network_state)
self._version = version
- self.use_ipv6 = network_state.get('use_ipv6', False)
+ self.use_ipv6 = network_state.get("use_ipv6", False)
self._has_default_route = None
@property
def config(self):
- return self._network_state['config']
+ return self._network_state["config"]
@property
def version(self):
@@ -133,14 +149,14 @@ class NetworkState(object):
@property
def dns_nameservers(self):
try:
- return self._network_state['dns']['nameservers']
+ return self._network_state["dns"]["nameservers"]
except KeyError:
return []
@property
def dns_searchdomains(self):
try:
- return self._network_state['dns']['search']
+ return self._network_state["dns"]["search"]
except KeyError:
return []
@@ -151,7 +167,7 @@ class NetworkState(object):
return self._has_default_route
def iter_interfaces(self, filter_func=None):
- ifaces = self._network_state.get('interfaces', {})
+ ifaces = self._network_state.get("interfaces", {})
for iface in ifaces.values():
if filter_func is None:
yield iface
@@ -160,7 +176,7 @@ class NetworkState(object):
yield iface
def iter_routes(self, filter_func=None):
- for route in self._network_state.get('routes', []):
+ for route in self._network_state.get("routes", []):
if filter_func is not None:
if filter_func(route):
yield route
@@ -172,38 +188,37 @@ class NetworkState(object):
if self._is_default_route(route):
return True
for iface in self.iter_interfaces():
- for subnet in iface.get('subnets', []):
- for route in subnet.get('routes', []):
+ for subnet in iface.get("subnets", []):
+ for route in subnet.get("routes", []):
if self._is_default_route(route):
return True
return False
def _is_default_route(self, route):
- default_nets = ('::', '0.0.0.0')
+ default_nets = ("::", "0.0.0.0")
return (
- route.get('prefix') == 0
- and route.get('network') in default_nets
+ route.get("prefix") == 0 and route.get("network") in default_nets
)
class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
initial_network_state = {
- 'interfaces': {},
- 'routes': [],
- 'dns': {
- 'nameservers': [],
- 'search': [],
+ "interfaces": {},
+ "routes": [],
+ "dns": {
+ "nameservers": [],
+ "search": [],
},
- 'use_ipv6': False,
- 'config': None,
+ "use_ipv6": False,
+ "config": None,
}
def __init__(self, version=NETWORK_STATE_VERSION, config=None):
self._version = version
self._config = config
self._network_state = copy.deepcopy(self.initial_network_state)
- self._network_state['config'] = config
+ self._network_state["config"] = config
self._parsed = False
self._interface_dns_map = {}
@@ -213,41 +228,41 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
@property
def use_ipv6(self):
- return self._network_state.get('use_ipv6')
+ return self._network_state.get("use_ipv6")
@use_ipv6.setter
def use_ipv6(self, val):
- self._network_state.update({'use_ipv6': val})
+ self._network_state.update({"use_ipv6": val})
def dump(self):
state = {
- 'version': self._version,
- 'config': self._config,
- 'network_state': self._network_state,
+ "version": self._version,
+ "config": self._config,
+ "network_state": self._network_state,
}
return safeyaml.dumps(state)
def load(self, state):
- if 'version' not in state:
- LOG.error('Invalid state, missing version field')
- raise ValueError('Invalid state, missing version field')
+ if "version" not in state:
+ LOG.error("Invalid state, missing version field")
+ raise ValueError("Invalid state, missing version field")
- required_keys = NETWORK_STATE_REQUIRED_KEYS[state['version']]
+ required_keys = NETWORK_STATE_REQUIRED_KEYS[state["version"]]
missing_keys = diff_keys(required_keys, state)
if missing_keys:
- msg = 'Invalid state, missing keys: %s' % (missing_keys)
+ msg = "Invalid state, missing keys: %s" % (missing_keys)
LOG.error(msg)
raise ValueError(msg)
# v1 - direct attr mapping, except version
- for key in [k for k in required_keys if k not in ['version']]:
+ for key in [k for k in required_keys if k not in ["version"]]:
setattr(self, key, state[key])
def dump_network_state(self):
return safeyaml.dumps(self._network_state)
def as_dict(self):
- return {'version': self._version, 'config': self._config}
+ return {"version": self._version, "config": self._config}
def get_network_state(self):
ns = self.network_state
@@ -263,7 +278,7 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
def parse_config_v1(self, skip_broken=True):
for command in self._config:
- command_type = command['type']
+ command_type = command["type"]
try:
handler = self.command_handlers[command_type]
except KeyError as e:
@@ -276,28 +291,29 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
if not skip_broken:
raise
else:
- LOG.warning("Skipping invalid command: %s", command,
- exc_info=True)
+ LOG.warning(
+ "Skipping invalid command: %s", command, exc_info=True
+ )
LOG.debug(self.dump_network_state())
for interface, dns in self._interface_dns_map.items():
iface = None
try:
- iface = self._network_state['interfaces'][interface]
+ iface = self._network_state["interfaces"][interface]
except KeyError as e:
raise ValueError(
- 'Nameserver specified for interface {0}, '
- 'but interface {0} does not exist!'.format(interface)
+ "Nameserver specified for interface {0}, "
+ "but interface {0} does not exist!".format(interface)
) from e
if iface:
nameservers, search = dns
- iface['dns'] = {
- 'addresses': nameservers,
- 'search': search,
+ iface["dns"] = {
+ "addresses": nameservers,
+ "search": search,
}
def parse_config_v2(self, skip_broken=True):
for command_type, command in self._config.items():
- if command_type in ['version', 'renderer']:
+ if command_type in ["version", "renderer"]:
continue
try:
handler = self.command_handlers[command_type]
@@ -312,17 +328,18 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
if not skip_broken:
raise
else:
- LOG.warning("Skipping invalid command: %s", command,
- exc_info=True)
+ LOG.warning(
+ "Skipping invalid command: %s", command, exc_info=True
+ )
LOG.debug(self.dump_network_state())
- @ensure_command_keys(['name'])
+ @ensure_command_keys(["name"])
def handle_loopback(self, command):
return self.handle_physical(command)
- @ensure_command_keys(['name'])
+ @ensure_command_keys(["name"])
def handle_physical(self, command):
- '''
+ """
command = {
'type': 'physical',
'mac_address': 'c0:d6:9f:2c:e8:80',
@@ -332,119 +349,122 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
],
'accept-ra': 'true'
}
- '''
+ """
- interfaces = self._network_state.get('interfaces', {})
- iface = interfaces.get(command['name'], {})
- for param, val in command.get('params', {}).items():
+ interfaces = self._network_state.get("interfaces", {})
+ iface = interfaces.get(command["name"], {})
+ for param, val in command.get("params", {}).items():
iface.update({param: val})
# convert subnet ipv6 netmask to cidr as needed
- subnets = _normalize_subnets(command.get('subnets'))
+ subnets = _normalize_subnets(command.get("subnets"))
# automatically set 'use_ipv6' if any addresses are ipv6
if not self.use_ipv6:
for subnet in subnets:
- if (subnet.get('type').endswith('6') or
- is_ipv6_addr(subnet.get('address'))):
+ if subnet.get("type").endswith("6") or is_ipv6_addr(
+ subnet.get("address")
+ ):
self.use_ipv6 = True
break
- accept_ra = command.get('accept-ra', None)
+ accept_ra = command.get("accept-ra", None)
if accept_ra is not None:
accept_ra = util.is_true(accept_ra)
- wakeonlan = command.get('wakeonlan', None)
+ wakeonlan = command.get("wakeonlan", None)
if wakeonlan is not None:
wakeonlan = util.is_true(wakeonlan)
- iface.update({
- 'name': command.get('name'),
- 'type': command.get('type'),
- 'mac_address': command.get('mac_address'),
- 'inet': 'inet',
- 'mode': 'manual',
- 'mtu': command.get('mtu'),
- 'address': None,
- 'gateway': None,
- 'subnets': subnets,
- 'accept-ra': accept_ra,
- 'wakeonlan': wakeonlan,
- })
- self._network_state['interfaces'].update({command.get('name'): iface})
+ iface.update(
+ {
+ "name": command.get("name"),
+ "type": command.get("type"),
+ "mac_address": command.get("mac_address"),
+ "inet": "inet",
+ "mode": "manual",
+ "mtu": command.get("mtu"),
+ "address": None,
+ "gateway": None,
+ "subnets": subnets,
+ "accept-ra": accept_ra,
+ "wakeonlan": wakeonlan,
+ }
+ )
+ self._network_state["interfaces"].update({command.get("name"): iface})
self.dump_network_state()
- @ensure_command_keys(['name', 'vlan_id', 'vlan_link'])
+ @ensure_command_keys(["name", "vlan_id", "vlan_link"])
def handle_vlan(self, command):
- '''
- auto eth0.222
- iface eth0.222 inet static
- address 10.10.10.1
- netmask 255.255.255.0
- hwaddress ether BC:76:4E:06:96:B3
- vlan-raw-device eth0
- '''
- interfaces = self._network_state.get('interfaces', {})
+ """
+ auto eth0.222
+ iface eth0.222 inet static
+ address 10.10.10.1
+ netmask 255.255.255.0
+ hwaddress ether BC:76:4E:06:96:B3
+ vlan-raw-device eth0
+ """
+ interfaces = self._network_state.get("interfaces", {})
self.handle_physical(command)
- iface = interfaces.get(command.get('name'), {})
- iface['vlan-raw-device'] = command.get('vlan_link')
- iface['vlan_id'] = command.get('vlan_id')
- interfaces.update({iface['name']: iface})
+ iface = interfaces.get(command.get("name"), {})
+ iface["vlan-raw-device"] = command.get("vlan_link")
+ iface["vlan_id"] = command.get("vlan_id")
+ interfaces.update({iface["name"]: iface})
- @ensure_command_keys(['name', 'bond_interfaces', 'params'])
+ @ensure_command_keys(["name", "bond_interfaces", "params"])
def handle_bond(self, command):
- '''
- #/etc/network/interfaces
- auto eth0
- iface eth0 inet manual
- bond-master bond0
- bond-mode 802.3ad
-
- auto eth1
- iface eth1 inet manual
- bond-master bond0
- bond-mode 802.3ad
-
- auto bond0
- iface bond0 inet static
- address 192.168.0.10
- gateway 192.168.0.1
- netmask 255.255.255.0
- bond-slaves none
- bond-mode 802.3ad
- bond-miimon 100
- bond-downdelay 200
- bond-updelay 200
- bond-lacp-rate 4
- '''
+ """
+ #/etc/network/interfaces
+ auto eth0
+ iface eth0 inet manual
+ bond-master bond0
+ bond-mode 802.3ad
+
+ auto eth1
+ iface eth1 inet manual
+ bond-master bond0
+ bond-mode 802.3ad
+
+ auto bond0
+ iface bond0 inet static
+ address 192.168.0.10
+ gateway 192.168.0.1
+ netmask 255.255.255.0
+ bond-slaves none
+ bond-mode 802.3ad
+ bond-miimon 100
+ bond-downdelay 200
+ bond-updelay 200
+ bond-lacp-rate 4
+ """
self.handle_physical(command)
- interfaces = self._network_state.get('interfaces')
- iface = interfaces.get(command.get('name'), {})
- for param, val in command.get('params').items():
+ interfaces = self._network_state.get("interfaces")
+ iface = interfaces.get(command.get("name"), {})
+ for param, val in command.get("params").items():
iface.update({param: val})
- iface.update({'bond-slaves': 'none'})
- self._network_state['interfaces'].update({iface['name']: iface})
+ iface.update({"bond-slaves": "none"})
+ self._network_state["interfaces"].update({iface["name"]: iface})
# handle bond slaves
- for ifname in command.get('bond_interfaces'):
+ for ifname in command.get("bond_interfaces"):
if ifname not in interfaces:
cmd = {
- 'name': ifname,
- 'type': 'bond',
+ "name": ifname,
+ "type": "bond",
}
# inject placeholder
self.handle_physical(cmd)
- interfaces = self._network_state.get('interfaces', {})
+ interfaces = self._network_state.get("interfaces", {})
bond_if = interfaces.get(ifname)
- bond_if['bond-master'] = command.get('name')
+ bond_if["bond-master"] = command.get("name")
# copy in bond config into slave
- for param, val in command.get('params').items():
+ for param, val in command.get("params").items():
bond_if.update({param: val})
- self._network_state['interfaces'].update({ifname: bond_if})
+ self._network_state["interfaces"].update({ifname: bond_if})
- @ensure_command_keys(['name', 'bridge_interfaces'])
+ @ensure_command_keys(["name", "bridge_interfaces"])
def handle_bridge(self, command):
- '''
+ """
auto br0
iface br0 inet static
address 10.10.10.1
@@ -469,89 +489,91 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
"bridge_stp",
"bridge_waitport",
]
- '''
+ """
# find one of the bridge port ifaces to get mac_addr
# handle bridge_slaves
- interfaces = self._network_state.get('interfaces', {})
- for ifname in command.get('bridge_interfaces'):
+ interfaces = self._network_state.get("interfaces", {})
+ for ifname in command.get("bridge_interfaces"):
if ifname in interfaces:
continue
cmd = {
- 'name': ifname,
+ "name": ifname,
}
# inject placeholder
self.handle_physical(cmd)
- interfaces = self._network_state.get('interfaces', {})
+ interfaces = self._network_state.get("interfaces", {})
self.handle_physical(command)
- iface = interfaces.get(command.get('name'), {})
- iface['bridge_ports'] = command['bridge_interfaces']
- for param, val in command.get('params', {}).items():
+ iface = interfaces.get(command.get("name"), {})
+ iface["bridge_ports"] = command["bridge_interfaces"]
+ for param, val in command.get("params", {}).items():
iface.update({param: val})
# convert value to boolean
- bridge_stp = iface.get('bridge_stp')
+ bridge_stp = iface.get("bridge_stp")
if bridge_stp is not None and type(bridge_stp) != bool:
- if bridge_stp in ['on', '1', 1]:
+ if bridge_stp in ["on", "1", 1]:
bridge_stp = True
- elif bridge_stp in ['off', '0', 0]:
+ elif bridge_stp in ["off", "0", 0]:
bridge_stp = False
else:
raise ValueError(
- 'Cannot convert bridge_stp value ({stp}) to'
- ' boolean'.format(stp=bridge_stp))
- iface.update({'bridge_stp': bridge_stp})
+ "Cannot convert bridge_stp value ({stp}) to"
+ " boolean".format(stp=bridge_stp)
+ )
+ iface.update({"bridge_stp": bridge_stp})
- interfaces.update({iface['name']: iface})
+ interfaces.update({iface["name"]: iface})
- @ensure_command_keys(['name'])
+ @ensure_command_keys(["name"])
def handle_infiniband(self, command):
self.handle_physical(command)
def _parse_dns(self, command):
nameservers = []
search = []
- if 'address' in command:
- addrs = command['address']
+ if "address" in command:
+ addrs = command["address"]
if not type(addrs) == list:
addrs = [addrs]
for addr in addrs:
nameservers.append(addr)
- if 'search' in command:
- paths = command['search']
+ if "search" in command:
+ paths = command["search"]
if not isinstance(paths, list):
paths = [paths]
for path in paths:
search.append(path)
return nameservers, search
- @ensure_command_keys(['address'])
+ @ensure_command_keys(["address"])
def handle_nameserver(self, command):
- dns = self._network_state.get('dns')
+ dns = self._network_state.get("dns")
nameservers, search = self._parse_dns(command)
- if 'interface' in command:
- self._interface_dns_map[command['interface']] = (
- nameservers, search
+ if "interface" in command:
+ self._interface_dns_map[command["interface"]] = (
+ nameservers,
+ search,
)
else:
- dns['nameservers'].extend(nameservers)
- dns['search'].extend(search)
+ dns["nameservers"].extend(nameservers)
+ dns["search"].extend(search)
- @ensure_command_keys(['address'])
+ @ensure_command_keys(["address"])
def _handle_individual_nameserver(self, command, iface):
- _iface = self._network_state.get('interfaces')
+ _iface = self._network_state.get("interfaces")
nameservers, search = self._parse_dns(command)
- _iface[iface]['dns'] = {'nameservers': nameservers, 'search': search}
+ _iface[iface]["dns"] = {"nameservers": nameservers, "search": search}
- @ensure_command_keys(['destination'])
+ @ensure_command_keys(["destination"])
def handle_route(self, command):
- self._network_state['routes'].append(_normalize_route(command))
+ self._network_state["routes"].append(_normalize_route(command))
# V2 handlers
def handle_bonds(self, command):
- '''
+ """
v2_command = {
bond0: {
'interfaces': ['interface0', 'interface1'],
@@ -578,12 +600,12 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
}
}
- '''
- self._handle_bond_bridge(command, cmd_type='bond')
+ """
+ self._handle_bond_bridge(command, cmd_type="bond")
def handle_bridges(self, command):
- '''
+ """
v2_command = {
br0: {
'interfaces': ['interface0', 'interface1'],
@@ -604,11 +626,11 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
}
}
- '''
- self._handle_bond_bridge(command, cmd_type='bridge')
+ """
+ self._handle_bond_bridge(command, cmd_type="bridge")
def handle_ethernets(self, command):
- '''
+ """
ethernets:
eno1:
match:
@@ -644,34 +666,38 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
{'type': 'dhcp4'}
]
}
- '''
+ """
for eth, cfg in command.items():
phy_cmd = {
- 'type': 'physical',
- 'name': cfg.get('set-name', eth),
+ "type": "physical",
+ "name": cfg.get("set-name", eth),
}
- match = cfg.get('match', {})
- mac_address = match.get('macaddress', None)
+ match = cfg.get("match", {})
+ mac_address = match.get("macaddress", None)
if not mac_address:
- LOG.debug('NetworkState Version2: missing "macaddress" info '
- 'in config entry: %s: %s', eth, str(cfg))
- phy_cmd['mac_address'] = mac_address
- driver = match.get('driver', None)
+ LOG.debug(
+ 'NetworkState Version2: missing "macaddress" info '
+ "in config entry: %s: %s",
+ eth,
+ str(cfg),
+ )
+ phy_cmd["mac_address"] = mac_address
+ driver = match.get("driver", None)
if driver:
- phy_cmd['params'] = {'driver': driver}
- for key in ['mtu', 'match', 'wakeonlan', 'accept-ra']:
+ phy_cmd["params"] = {"driver": driver}
+ for key in ["mtu", "match", "wakeonlan", "accept-ra"]:
if key in cfg:
phy_cmd[key] = cfg[key]
subnets = self._v2_to_v1_ipcfg(cfg)
if len(subnets) > 0:
- phy_cmd.update({'subnets': subnets})
+ phy_cmd.update({"subnets": subnets})
- LOG.debug('v2(ethernets) -> v1(physical):\n%s', phy_cmd)
+ LOG.debug("v2(ethernets) -> v1(physical):\n%s", phy_cmd)
self.handle_physical(phy_cmd)
def handle_vlans(self, command):
- '''
+ """
v2_vlans = {
'eth0.123': {
'id': 123,
@@ -687,41 +713,43 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
'vlan_id': 123,
'subnets': [{'type': 'dhcp4'}],
}
- '''
+ """
for vlan, cfg in command.items():
vlan_cmd = {
- 'type': 'vlan',
- 'name': vlan,
- 'vlan_id': cfg.get('id'),
- 'vlan_link': cfg.get('link'),
+ "type": "vlan",
+ "name": vlan,
+ "vlan_id": cfg.get("id"),
+ "vlan_link": cfg.get("link"),
}
- if 'mtu' in cfg:
- vlan_cmd['mtu'] = cfg['mtu']
+ if "mtu" in cfg:
+ vlan_cmd["mtu"] = cfg["mtu"]
subnets = self._v2_to_v1_ipcfg(cfg)
if len(subnets) > 0:
- vlan_cmd.update({'subnets': subnets})
- LOG.debug('v2(vlans) -> v1(vlan):\n%s', vlan_cmd)
+ vlan_cmd.update({"subnets": subnets})
+ LOG.debug("v2(vlans) -> v1(vlan):\n%s", vlan_cmd)
self.handle_vlan(vlan_cmd)
def handle_wifis(self, command):
- LOG.warning('Wifi configuration is only available to distros with'
- ' netplan rendering support.')
+ LOG.warning(
+ "Wifi configuration is only available to distros with"
+ " netplan rendering support."
+ )
def _v2_common(self, cfg):
- LOG.debug('v2_common: handling config:\n%s', cfg)
+ LOG.debug("v2_common: handling config:\n%s", cfg)
for iface, dev_cfg in cfg.items():
- if 'set-name' in dev_cfg:
- set_name_iface = dev_cfg.get('set-name')
+ if "set-name" in dev_cfg:
+ set_name_iface = dev_cfg.get("set-name")
if set_name_iface:
iface = set_name_iface
- if 'nameservers' in dev_cfg:
- search = dev_cfg.get('nameservers').get('search', [])
- dns = dev_cfg.get('nameservers').get('addresses', [])
- name_cmd = {'type': 'nameserver'}
+ if "nameservers" in dev_cfg:
+ search = dev_cfg.get("nameservers").get("search", [])
+ dns = dev_cfg.get("nameservers").get("addresses", [])
+ name_cmd = {"type": "nameserver"}
if len(search) > 0:
- name_cmd.update({'search': search})
+ name_cmd.update({"search": search})
if len(dns) > 0:
- name_cmd.update({'address': dns})
+ name_cmd.update({"address": dns})
self.handle_nameserver(name_cmd)
self._handle_individual_nameserver(name_cmd, iface)
@@ -729,98 +757,110 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
"""Common handler for bond and bridge types"""
# inverse mapping for v2 keynames to v1 keynames
- v2key_to_v1 = dict((v, k) for k, v in
- NET_CONFIG_TO_V2.get(cmd_type).items())
+ v2key_to_v1 = dict(
+ (v, k) for k, v in NET_CONFIG_TO_V2.get(cmd_type).items()
+ )
for item_name, item_cfg in command.items():
- item_params = dict((key, value) for (key, value) in
- item_cfg.items() if key not in
- NETWORK_V2_KEY_FILTER)
+ item_params = dict(
+ (key, value)
+ for (key, value) in item_cfg.items()
+ if key not in NETWORK_V2_KEY_FILTER
+ )
# we accept the fixed spelling, but write the old for compatibility
# Xenial does not have an updated netplan which supports the
# correct spelling. LP: #1756701
- params = item_params.get('parameters', {})
- grat_value = params.pop('gratuitous-arp', None)
+ params = item_params.get("parameters", {})
+ grat_value = params.pop("gratuitous-arp", None)
if grat_value:
- params['gratuitious-arp'] = grat_value
+ params["gratuitious-arp"] = grat_value
v1_cmd = {
- 'type': cmd_type,
- 'name': item_name,
- cmd_type + '_interfaces': item_cfg.get('interfaces'),
- 'params': dict((v2key_to_v1[k], v) for k, v in params.items())
+ "type": cmd_type,
+ "name": item_name,
+ cmd_type + "_interfaces": item_cfg.get("interfaces"),
+ "params": dict((v2key_to_v1[k], v) for k, v in params.items()),
}
- if 'mtu' in item_cfg:
- v1_cmd['mtu'] = item_cfg['mtu']
+ if "mtu" in item_cfg:
+ v1_cmd["mtu"] = item_cfg["mtu"]
subnets = self._v2_to_v1_ipcfg(item_cfg)
if len(subnets) > 0:
- v1_cmd.update({'subnets': subnets})
+ v1_cmd.update({"subnets": subnets})
- LOG.debug('v2(%s) -> v1(%s):\n%s', cmd_type, cmd_type, v1_cmd)
+ LOG.debug("v2(%s) -> v1(%s):\n%s", cmd_type, cmd_type, v1_cmd)
if cmd_type == "bridge":
self.handle_bridge(v1_cmd)
elif cmd_type == "bond":
self.handle_bond(v1_cmd)
else:
- raise ValueError('Unknown command type: {cmd_type}'.format(
- cmd_type=cmd_type))
+ raise ValueError(
+ "Unknown command type: {cmd_type}".format(
+ cmd_type=cmd_type
+ )
+ )
def _v2_to_v1_ipcfg(self, cfg):
"""Common ipconfig extraction from v2 to v1 subnets array."""
def _add_dhcp_overrides(overrides, subnet):
- if 'route-metric' in overrides:
- subnet['metric'] = overrides['route-metric']
+ if "route-metric" in overrides:
+ subnet["metric"] = overrides["route-metric"]
subnets = []
- if cfg.get('dhcp4'):
- subnet = {'type': 'dhcp4'}
- _add_dhcp_overrides(cfg.get('dhcp4-overrides', {}), subnet)
+ if cfg.get("dhcp4"):
+ subnet = {"type": "dhcp4"}
+ _add_dhcp_overrides(cfg.get("dhcp4-overrides", {}), subnet)
subnets.append(subnet)
- if cfg.get('dhcp6'):
- subnet = {'type': 'dhcp6'}
+ if cfg.get("dhcp6"):
+ subnet = {"type": "dhcp6"}
self.use_ipv6 = True
- _add_dhcp_overrides(cfg.get('dhcp6-overrides', {}), subnet)
+ _add_dhcp_overrides(cfg.get("dhcp6-overrides", {}), subnet)
subnets.append(subnet)
gateway4 = None
gateway6 = None
nameservers = {}
- for address in cfg.get('addresses', []):
+ for address in cfg.get("addresses", []):
subnet = {
- 'type': 'static',
- 'address': address,
+ "type": "static",
+ "address": address,
}
if ":" in address:
- if 'gateway6' in cfg and gateway6 is None:
- gateway6 = cfg.get('gateway6')
- subnet.update({'gateway': gateway6})
+ if "gateway6" in cfg and gateway6 is None:
+ gateway6 = cfg.get("gateway6")
+ subnet.update({"gateway": gateway6})
else:
- if 'gateway4' in cfg and gateway4 is None:
- gateway4 = cfg.get('gateway4')
- subnet.update({'gateway': gateway4})
+ if "gateway4" in cfg and gateway4 is None:
+ gateway4 = cfg.get("gateway4")
+ subnet.update({"gateway": gateway4})
- if 'nameservers' in cfg and not nameservers:
- addresses = cfg.get('nameservers').get('addresses')
+ if "nameservers" in cfg and not nameservers:
+ addresses = cfg.get("nameservers").get("addresses")
if addresses:
- nameservers['dns_nameservers'] = addresses
- search = cfg.get('nameservers').get('search')
+ nameservers["dns_nameservers"] = addresses
+ search = cfg.get("nameservers").get("search")
if search:
- nameservers['dns_search'] = search
+ nameservers["dns_search"] = search
subnet.update(nameservers)
subnets.append(subnet)
routes = []
- for route in cfg.get('routes', []):
- routes.append(_normalize_route(
- {'destination': route.get('to'), 'gateway': route.get('via')}))
+ for route in cfg.get("routes", []):
+ routes.append(
+ _normalize_route(
+ {
+ "destination": route.get("to"),
+ "gateway": route.get("via"),
+ }
+ )
+ )
# v2 routes are bound to the interface, in v1 we add them under
# the first subnet since there isn't an equivalent interface level.
if len(subnets) and len(routes):
- subnets[0]['routes'] = routes
+ subnets[0]["routes"] = routes
return subnets
@@ -830,18 +870,25 @@ def _normalize_subnet(subnet):
subnet = copy.deepcopy(subnet)
normal_subnet = dict((k, v) for k, v in subnet.items() if v)
- if subnet.get('type') in ('static', 'static6'):
+ if subnet.get("type") in ("static", "static6"):
normal_subnet.update(
- _normalize_net_keys(normal_subnet, address_keys=(
- 'address', 'ip_address',)))
- normal_subnet['routes'] = [_normalize_route(r)
- for r in subnet.get('routes', [])]
+ _normalize_net_keys(
+ normal_subnet,
+ address_keys=(
+ "address",
+ "ip_address",
+ ),
+ )
+ )
+ normal_subnet["routes"] = [
+ _normalize_route(r) for r in subnet.get("routes", [])
+ ]
def listify(snet, name):
if name in snet and not isinstance(snet[name], list):
snet[name] = snet[name].split()
- for k in ('dns_search', 'dns_nameservers'):
+ for k in ("dns_search", "dns_nameservers"):
listify(normal_subnet, k)
return normal_subnet
@@ -865,15 +912,16 @@ def _normalize_net_keys(network, address_keys=()):
addr_key = key
break
if not addr_key:
- message = (
- 'No config network address keys [%s] found in %s' %
- (','.join(address_keys), network))
+ message = "No config network address keys [%s] found in %s" % (
+ ",".join(address_keys),
+ network,
+ )
LOG.error(message)
raise ValueError(message)
addr = net.get(addr_key)
ipv6 = is_ipv6_addr(addr)
- netmask = net.get('netmask')
+ netmask = net.get("netmask")
if "/" in addr:
addr_part, _, maybe_prefix = addr.partition("/")
net[addr_key] = addr_part
@@ -884,23 +932,26 @@ def _normalize_net_keys(network, address_keys=()):
prefix = mask_to_net_prefix(maybe_prefix)
elif netmask:
prefix = mask_to_net_prefix(netmask)
- elif 'prefix' in net:
- prefix = int(net['prefix'])
+ elif "prefix" in net:
+ prefix = int(net["prefix"])
else:
prefix = 64 if ipv6 else 24
- if 'prefix' in net and str(net['prefix']) != str(prefix):
- LOG.warning("Overwriting existing 'prefix' with '%s' in "
- "network info: %s", prefix, net)
- net['prefix'] = prefix
+ if "prefix" in net and str(net["prefix"]) != str(prefix):
+ LOG.warning(
+ "Overwriting existing 'prefix' with '%s' in network info: %s",
+ prefix,
+ net,
+ )
+ net["prefix"] = prefix
if ipv6:
# TODO: we could/maybe should add this back with the very uncommon
# 'netmask' for ipv6. We need a 'net_prefix_to_ipv6_mask' for that.
- if 'netmask' in net:
- del net['netmask']
+ if "netmask" in net:
+ del net["netmask"]
else:
- net['netmask'] = net_prefix_to_ipv4_mask(net['prefix'])
+ net["netmask"] = net_prefix_to_ipv4_mask(net["prefix"])
return net
@@ -913,25 +964,28 @@ def _normalize_route(route):
'prefix': the network prefix for address as an integer.
'metric': integer metric (only if present in input).
'netmask': netmask (string) equivalent to prefix iff network is ipv4.
- """
+ """
# Prune None-value keys. Specifically allow 0 (a valid metric).
- normal_route = dict((k, v) for k, v in route.items()
- if v not in ("", None))
- if 'destination' in normal_route:
- normal_route['network'] = normal_route['destination']
- del normal_route['destination']
+ normal_route = dict(
+ (k, v) for k, v in route.items() if v not in ("", None)
+ )
+ if "destination" in normal_route:
+ normal_route["network"] = normal_route["destination"]
+ del normal_route["destination"]
normal_route.update(
_normalize_net_keys(
- normal_route, address_keys=('network', 'destination')))
+ normal_route, address_keys=("network", "destination")
+ )
+ )
- metric = normal_route.get('metric')
+ metric = normal_route.get("metric")
if metric:
try:
- normal_route['metric'] = int(metric)
+ normal_route["metric"] = int(metric)
except ValueError as e:
raise TypeError(
- 'Route config metric {} is not an integer'.format(metric)
+ "Route config metric {} is not an integer".format(metric)
) from e
return normal_route
@@ -952,10 +1006,10 @@ def subnet_is_ipv6(subnet):
"""Common helper for checking network_state subnets for ipv6."""
# 'static6', 'dhcp6', 'ipv6_dhcpv6-stateful', 'ipv6_dhcpv6-stateless' or
# 'ipv6_slaac'
- if subnet['type'].endswith('6') or subnet['type'] in IPV6_DYNAMIC_TYPES:
+ if subnet["type"].endswith("6") or subnet["type"] in IPV6_DYNAMIC_TYPES:
# This is a request either static6 type or DHCPv6.
return True
- elif subnet['type'] == 'static' and is_ipv6_addr(subnet.get('address')):
+ elif subnet["type"] == "static" and is_ipv6_addr(subnet.get("address")):
return True
return False
@@ -967,7 +1021,8 @@ def net_prefix_to_ipv4_mask(prefix):
24 -> "255.255.255.0"
Also supports input as a string."""
mask = socket.inet_ntoa(
- struct.pack(">I", (0xffffffff << (32 - int(prefix)) & 0xffffffff)))
+ struct.pack(">I", (0xFFFFFFFF << (32 - int(prefix)) & 0xFFFFFFFF))
+ )
return mask
@@ -990,14 +1045,14 @@ def ipv4_mask_to_net_prefix(mask):
else:
raise TypeError("mask '%s' is not a string or int")
- if '.' not in mask:
+ if "." not in mask:
raise ValueError("netmask '%s' does not contain a '.'" % mask)
toks = mask.split(".")
if len(toks) != 4:
raise ValueError("netmask '%s' had only %d parts" % (mask, len(toks)))
- return sum([bin(int(x)).count('1') for x in toks])
+ return sum([bin(int(x)).count("1") for x in toks])
def ipv6_mask_to_net_prefix(mask):
@@ -1017,14 +1072,30 @@ def ipv6_mask_to_net_prefix(mask):
else:
raise TypeError("mask '%s' is not a string or int")
- if ':' not in mask:
+ if ":" not in mask:
raise ValueError("mask '%s' does not have a ':'")
- bitCount = [0, 0x8000, 0xc000, 0xe000, 0xf000, 0xf800, 0xfc00, 0xfe00,
- 0xff00, 0xff80, 0xffc0, 0xffe0, 0xfff0, 0xfff8, 0xfffc,
- 0xfffe, 0xffff]
+ bitCount = [
+ 0,
+ 0x8000,
+ 0xC000,
+ 0xE000,
+ 0xF000,
+ 0xF800,
+ 0xFC00,
+ 0xFE00,
+ 0xFF00,
+ 0xFF80,
+ 0xFFC0,
+ 0xFFE0,
+ 0xFFF0,
+ 0xFFF8,
+ 0xFFFC,
+ 0xFFFE,
+ 0xFFFF,
+ ]
prefix = 0
- for word in mask.split(':'):
+ for word in mask.split(":"):
if not word or int(word, 16) == 0:
break
prefix += bitCount.index(int(word, 16))
@@ -1052,11 +1123,12 @@ def mask_and_ipv4_to_bcast_addr(mask, ip):
"""Calculate the broadcast address from the subnet mask and ip addr.
Supports ipv4 only."""
- ip_bin = int(''.join([bin(int(x) + 256)[3:] for x in ip.split('.')]), 2)
+ ip_bin = int("".join([bin(int(x) + 256)[3:] for x in ip.split(".")]), 2)
mask_dec = ipv4_mask_to_net_prefix(mask)
- bcast_bin = ip_bin | (2**(32 - mask_dec) - 1)
- bcast_str = '.'.join([str(bcast_bin >> (i << 3) & 0xFF)
- for i in range(4)[::-1]])
+ bcast_bin = ip_bin | (2 ** (32 - mask_dec) - 1)
+ bcast_str = ".".join(
+ [str(bcast_bin >> (i << 3) & 0xFF) for i in range(4)[::-1]]
+ )
return bcast_str
@@ -1066,8 +1138,8 @@ def parse_net_config_data(net_config, skip_broken=True) -> NetworkState:
:param net_config: curtin network config dict
"""
state = None
- version = net_config.get('version')
- config = net_config.get('config')
+ version = net_config.get("version")
+ config = net_config.get("config")
if version == 2:
# v2 does not have explicit 'config' key so we
# pass the whole net-config as-is
diff --git a/cloudinit/net/networkd.py b/cloudinit/net/networkd.py
index c97c18f6..3bbeb284 100644
--- a/cloudinit/net/networkd.py
+++ b/cloudinit/net/networkd.py
@@ -8,56 +8,57 @@
# This file is part of cloud-init. See LICENSE file for license information.
import os
+from collections import OrderedDict
+from cloudinit import log as logging
+from cloudinit import subp, util
from . import renderer
-from cloudinit import util
-from cloudinit import subp
-from cloudinit import log as logging
-from collections import OrderedDict
LOG = logging.getLogger(__name__)
class CfgParser:
def __init__(self):
- self.conf_dict = OrderedDict({
- 'Match': [],
- 'Link': [],
- 'Network': [],
- 'DHCPv4': [],
- 'DHCPv6': [],
- 'Address': [],
- 'Route': [],
- })
+ self.conf_dict = OrderedDict(
+ {
+ "Match": [],
+ "Link": [],
+ "Network": [],
+ "DHCPv4": [],
+ "DHCPv6": [],
+ "Address": [],
+ "Route": [],
+ }
+ )
def update_section(self, sec, key, val):
for k in self.conf_dict.keys():
if k == sec:
- self.conf_dict[k].append(key+'='+str(val))
+ self.conf_dict[k].append(key + "=" + str(val))
# remove duplicates from list
self.conf_dict[k] = list(dict.fromkeys(self.conf_dict[k]))
self.conf_dict[k].sort()
def get_final_conf(self):
- contents = ''
+ contents = ""
for k, v in sorted(self.conf_dict.items()):
if not v:
continue
- contents += '['+k+']\n'
+ contents += "[" + k + "]\n"
for e in sorted(v):
- contents += e + '\n'
- contents += '\n'
+ contents += e + "\n"
+ contents += "\n"
return contents
def dump_data(self, target_fn):
if not target_fn:
- LOG.warning('Target file not given')
+ LOG.warning("Target file not given")
return
contents = self.get_final_conf()
- LOG.debug('Final content: %s', contents)
+ LOG.debug("Final content: %s", contents)
util.write_file(target_fn, contents)
@@ -72,17 +73,19 @@ class Renderer(renderer.Renderer):
def __init__(self, config=None):
if not config:
config = {}
- self.resolve_conf_fn = config.get('resolve_conf_fn',
- '/etc/systemd/resolved.conf')
- self.network_conf_dir = config.get('network_conf_dir',
- '/etc/systemd/network/')
+ self.resolve_conf_fn = config.get(
+ "resolve_conf_fn", "/etc/systemd/resolved.conf"
+ )
+ self.network_conf_dir = config.get(
+ "network_conf_dir", "/etc/systemd/network/"
+ )
def generate_match_section(self, iface, cfg):
- sec = 'Match'
+ sec = "Match"
match_dict = {
- 'name': 'Name',
- 'driver': 'Driver',
- 'mac_address': 'MACAddress'
+ "name": "Name",
+ "driver": "Driver",
+ "mac_address": "MACAddress",
}
if not iface:
@@ -92,125 +95,126 @@ class Renderer(renderer.Renderer):
if k in iface and iface[k]:
cfg.update_section(sec, v, iface[k])
- return iface['name']
+ return iface["name"]
def generate_link_section(self, iface, cfg):
- sec = 'Link'
+ sec = "Link"
if not iface:
return
- if 'mtu' in iface and iface['mtu']:
- cfg.update_section(sec, 'MTUBytes', iface['mtu'])
+ if "mtu" in iface and iface["mtu"]:
+ cfg.update_section(sec, "MTUBytes", iface["mtu"])
def parse_routes(self, conf, cfg):
- sec = 'Route'
+ sec = "Route"
route_cfg_map = {
- 'gateway': 'Gateway',
- 'network': 'Destination',
- 'metric': 'Metric',
+ "gateway": "Gateway",
+ "network": "Destination",
+ "metric": "Metric",
}
# prefix is derived using netmask by network_state
- prefix = ''
- if 'prefix' in conf:
- prefix = '/' + str(conf['prefix'])
+ prefix = ""
+ if "prefix" in conf:
+ prefix = "/" + str(conf["prefix"])
for k, v in conf.items():
if k not in route_cfg_map:
continue
- if k == 'network':
+ if k == "network":
v += prefix
cfg.update_section(sec, route_cfg_map[k], v)
def parse_subnets(self, iface, cfg):
- dhcp = 'no'
- sec = 'Network'
- for e in iface.get('subnets', []):
- t = e['type']
- if t == 'dhcp4' or t == 'dhcp':
- if dhcp == 'no':
- dhcp = 'ipv4'
- elif dhcp == 'ipv6':
- dhcp = 'yes'
- elif t == 'dhcp6':
- if dhcp == 'no':
- dhcp = 'ipv6'
- elif dhcp == 'ipv4':
- dhcp = 'yes'
- if 'routes' in e and e['routes']:
- for i in e['routes']:
+ dhcp = "no"
+ sec = "Network"
+ for e in iface.get("subnets", []):
+ t = e["type"]
+ if t == "dhcp4" or t == "dhcp":
+ if dhcp == "no":
+ dhcp = "ipv4"
+ elif dhcp == "ipv6":
+ dhcp = "yes"
+ elif t == "dhcp6":
+ if dhcp == "no":
+ dhcp = "ipv6"
+ elif dhcp == "ipv4":
+ dhcp = "yes"
+ if "routes" in e and e["routes"]:
+ for i in e["routes"]:
self.parse_routes(i, cfg)
- if 'address' in e:
+ if "address" in e:
subnet_cfg_map = {
- 'address': 'Address',
- 'gateway': 'Gateway',
- 'dns_nameservers': 'DNS',
- 'dns_search': 'Domains',
+ "address": "Address",
+ "gateway": "Gateway",
+ "dns_nameservers": "DNS",
+ "dns_search": "Domains",
}
for k, v in e.items():
- if k == 'address':
- if 'prefix' in e:
- v += '/' + str(e['prefix'])
- cfg.update_section('Address', subnet_cfg_map[k], v)
- elif k == 'gateway':
- cfg.update_section('Route', subnet_cfg_map[k], v)
- elif k == 'dns_nameservers' or k == 'dns_search':
- cfg.update_section(sec, subnet_cfg_map[k], ' '.join(v))
-
- cfg.update_section(sec, 'DHCP', dhcp)
-
- if (dhcp in ['ipv6', 'yes'] and
- isinstance(iface.get('accept-ra', ''), bool)):
- cfg.update_section(sec, 'IPv6AcceptRA', iface['accept-ra'])
+ if k == "address":
+ if "prefix" in e:
+ v += "/" + str(e["prefix"])
+ cfg.update_section("Address", subnet_cfg_map[k], v)
+ elif k == "gateway":
+ cfg.update_section("Route", subnet_cfg_map[k], v)
+ elif k == "dns_nameservers" or k == "dns_search":
+ cfg.update_section(sec, subnet_cfg_map[k], " ".join(v))
+
+ cfg.update_section(sec, "DHCP", dhcp)
+
+ if dhcp in ["ipv6", "yes"] and isinstance(
+ iface.get("accept-ra", ""), bool
+ ):
+ cfg.update_section(sec, "IPv6AcceptRA", iface["accept-ra"])
# This is to accommodate extra keys present in VMware config
def dhcp_domain(self, d, cfg):
- for item in ['dhcp4domain', 'dhcp6domain']:
+ for item in ["dhcp4domain", "dhcp6domain"]:
if item not in d:
continue
ret = str(d[item]).casefold()
try:
ret = util.translate_bool(ret)
- ret = 'yes' if ret else 'no'
+ ret = "yes" if ret else "no"
except ValueError:
- if ret != 'route':
- LOG.warning('Invalid dhcp4domain value - %s', ret)
- ret = 'no'
- if item == 'dhcp4domain':
- section = 'DHCPv4'
+ if ret != "route":
+ LOG.warning("Invalid dhcp4domain value - %s", ret)
+ ret = "no"
+ if item == "dhcp4domain":
+ section = "DHCPv4"
else:
- section = 'DHCPv6'
- cfg.update_section(section, 'UseDomains', ret)
+ section = "DHCPv6"
+ cfg.update_section(section, "UseDomains", ret)
def parse_dns(self, iface, cfg, ns):
- sec = 'Network'
+ sec = "Network"
dns_cfg_map = {
- 'search': 'Domains',
- 'nameservers': 'DNS',
- 'addresses': 'DNS',
+ "search": "Domains",
+ "nameservers": "DNS",
+ "addresses": "DNS",
}
- dns = iface.get('dns')
+ dns = iface.get("dns")
if not dns and ns.version == 1:
dns = {
- 'search': ns.dns_searchdomains,
- 'nameservers': ns.dns_nameservers,
+ "search": ns.dns_searchdomains,
+ "nameservers": ns.dns_nameservers,
}
elif not dns and ns.version == 2:
return
for k, v in dns_cfg_map.items():
if k in dns and dns[k]:
- cfg.update_section(sec, v, ' '.join(dns[k]))
+ cfg.update_section(sec, v, " ".join(dns[k]))
def create_network_file(self, link, conf, nwk_dir):
- net_fn_owner = 'systemd-network'
+ net_fn_owner = "systemd-network"
- LOG.debug('Setting Networking Config for %s', link)
+ LOG.debug("Setting Networking Config for %s", link)
- net_fn = nwk_dir + '10-cloud-init-' + link + '.network'
+ net_fn = nwk_dir + "10-cloud-init-" + link + ".network"
util.write_file(net_fn, conf)
util.chownbyname(net_fn, net_fn_owner, net_fn_owner)
@@ -239,7 +243,7 @@ class Renderer(renderer.Renderer):
self.parse_routes(route, cfg)
if ns.version == 2:
- name = iface['name']
+ name = iface["name"]
# network state doesn't give dhcp domain info
# using ns.config as a workaround here
@@ -249,13 +253,13 @@ class Renderer(renderer.Renderer):
# set-name value that matches the current name, then update the
# current name to the device's name. That will be the value in
# the ns.config['ethernets'] dict below.
- for dev_name, dev_cfg in ns.config['ethernets'].items():
- if 'set-name' in dev_cfg:
- if dev_cfg.get('set-name') == name:
+ for dev_name, dev_cfg in ns.config["ethernets"].items():
+ if "set-name" in dev_cfg:
+ if dev_cfg.get("set-name") == name:
name = dev_name
break
- self.dhcp_domain(ns.config['ethernets'][name], cfg)
+ self.dhcp_domain(ns.config["ethernets"][name], cfg)
ret_dict.update({link: cfg.get_final_conf()})
@@ -263,8 +267,8 @@ class Renderer(renderer.Renderer):
def available(target=None):
- expected = ['ip', 'systemctl']
- search = ['/usr/sbin', '/bin']
+ expected = ["ip", "systemctl"]
+ search = ["/usr/sbin", "/bin"]
for p in expected:
if not subp.which(p, search=search, target=target):
return False
diff --git a/cloudinit/net/openbsd.py b/cloudinit/net/openbsd.py
index d87d8a4f..da50d2ba 100644
--- a/cloudinit/net/openbsd.py
+++ b/cloudinit/net/openbsd.py
@@ -1,34 +1,31 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import log as logging
-from cloudinit import subp
-from cloudinit import util
import cloudinit.net.bsd
+from cloudinit import log as logging
+from cloudinit import subp, util
LOG = logging.getLogger(__name__)
class Renderer(cloudinit.net.bsd.BSDRenderer):
-
def write_config(self):
for device_name, v in self.interface_configurations.items():
- if_file = 'etc/hostname.{}'.format(device_name)
+ if_file = "etc/hostname.{}".format(device_name)
fn = subp.target_path(self.target, if_file)
if device_name in self.dhcp_interfaces():
- content = 'dhcp\n'
+ content = "dhcp\n"
elif isinstance(v, dict):
try:
content = "inet {address} {netmask}".format(
- address=v['address'],
- netmask=v['netmask']
+ address=v["address"], netmask=v["netmask"]
)
except KeyError:
LOG.error(
- "Invalid static configuration for %s",
- device_name)
+ "Invalid static configuration for %s", device_name
+ )
mtu = v.get("mtu")
if mtu:
- content += (' mtu %d' % mtu)
+ content += " mtu %d" % mtu
content += "\n"
util.write_file(fn, content)
@@ -36,16 +33,16 @@ class Renderer(cloudinit.net.bsd.BSDRenderer):
if not self._postcmds:
LOG.debug("openbsd generate postcmd disabled")
return
- subp.subp(['pkill', 'dhclient'], capture=True, rcs=[0, 1])
- subp.subp(['route', 'del', 'default'], capture=True, rcs=[0, 1])
- subp.subp(['route', 'flush', 'default'], capture=True, rcs=[0, 1])
- subp.subp(['sh', '/etc/netstart'], capture=True)
+ subp.subp(["pkill", "dhclient"], capture=True, rcs=[0, 1])
+ subp.subp(["route", "del", "default"], capture=True, rcs=[0, 1])
+ subp.subp(["route", "flush", "default"], capture=True, rcs=[0, 1])
+ subp.subp(["sh", "/etc/netstart"], capture=True)
def set_route(self, network, netmask, gateway):
- if network == '0.0.0.0':
- if_file = 'etc/mygate'
+ if network == "0.0.0.0":
+ if_file = "etc/mygate"
fn = subp.target_path(self.target, if_file)
- content = gateway + '\n'
+ content = gateway + "\n"
util.write_file(fn, content)
diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py
index 54a83b51..34b74b80 100644
--- a/cloudinit/net/renderer.py
+++ b/cloudinit/net/renderer.py
@@ -13,18 +13,18 @@ from cloudinit.net.udev import generate_udev_rule
def filter_by_type(match_type):
- return lambda iface: match_type == iface['type']
+ return lambda iface: match_type == iface["type"]
def filter_by_name(match_name):
- return lambda iface: match_name == iface['name']
+ return lambda iface: match_name == iface["name"]
def filter_by_attr(match_name):
return lambda iface: (match_name in iface and iface[match_name])
-filter_by_physical = filter_by_type('physical')
+filter_by_physical = filter_by_type("physical")
class Renderer(object):
@@ -39,22 +39,27 @@ class Renderer(object):
content = io.StringIO()
for iface in network_state.iter_interfaces(filter_by_physical):
# for physical interfaces write out a persist net udev rule
- if 'name' in iface and iface.get('mac_address'):
- driver = iface.get('driver', None)
- content.write(generate_udev_rule(iface['name'],
- iface['mac_address'],
- driver=driver))
+ if "name" in iface and iface.get("mac_address"):
+ driver = iface.get("driver", None)
+ content.write(
+ generate_udev_rule(
+ iface["name"], iface["mac_address"], driver=driver
+ )
+ )
return content.getvalue()
@abc.abstractmethod
- def render_network_state(self, network_state, templates=None,
- target=None):
+ def render_network_state(self, network_state, templates=None, target=None):
"""Render network state."""
- def render_network_config(self, network_config, templates=None,
- target=None):
+ def render_network_config(
+ self, network_config, templates=None, target=None
+ ):
return self.render_network_state(
network_state=parse_net_config_data(network_config),
- templates=templates, target=target)
+ templates=templates,
+ target=target,
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/net/renderers.py b/cloudinit/net/renderers.py
index 822b45de..c755f04c 100644
--- a/cloudinit/net/renderers.py
+++ b/cloudinit/net/renderers.py
@@ -2,15 +2,17 @@
from typing import List, Tuple, Type
-from . import eni
-from . import freebsd
-from . import netbsd
-from . import netplan
-from . import networkd
-from . import renderer
-from . import RendererNotFoundError
-from . import openbsd
-from . import sysconfig
+from . import (
+ RendererNotFoundError,
+ eni,
+ freebsd,
+ netbsd,
+ netplan,
+ networkd,
+ openbsd,
+ renderer,
+ sysconfig,
+)
NAME_TO_RENDERER = {
"eni": eni,
@@ -22,8 +24,15 @@ NAME_TO_RENDERER = {
"sysconfig": sysconfig,
}
-DEFAULT_PRIORITY = ["eni", "sysconfig", "netplan", "freebsd",
- "netbsd", "openbsd", "networkd"]
+DEFAULT_PRIORITY = [
+ "eni",
+ "sysconfig",
+ "netplan",
+ "freebsd",
+ "netbsd",
+ "openbsd",
+ "networkd",
+]
def search(
@@ -37,7 +46,8 @@ def search(
unknown = [i for i in priority if i not in available]
if unknown:
raise ValueError(
- "Unknown renderers provided in priority list: %s" % unknown)
+ "Unknown renderers provided in priority list: %s" % unknown
+ )
found = []
for name in priority:
@@ -60,8 +70,10 @@ def select(priority=None, target=None) -> Tuple[str, Type[renderer.Renderer]]:
if target and target != "/":
tmsg = " in target=%s" % target
raise RendererNotFoundError(
- "No available network renderers found%s. Searched "
- "through list: %s" % (tmsg, priority))
+ "No available network renderers found%s. Searched through list: %s"
+ % (tmsg, priority)
+ )
return found[0]
+
# vi: ts=4 expandtab
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index 85342219..997907bb 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -8,23 +8,35 @@ import re
from configobj import ConfigObj
from cloudinit import log as logging
-from cloudinit import util
-from cloudinit import subp
-from cloudinit.distros.parsers import networkmanager_conf
-from cloudinit.distros.parsers import resolv_conf
+from cloudinit import subp, util
+from cloudinit.distros.parsers import networkmanager_conf, resolv_conf
from . import renderer
from .network_state import (
- is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6, IPV6_DYNAMIC_TYPES)
+ IPV6_DYNAMIC_TYPES,
+ is_ipv6_addr,
+ net_prefix_to_ipv4_mask,
+ subnet_is_ipv6,
+)
LOG = logging.getLogger(__name__)
-KNOWN_DISTROS = ['almalinux', 'centos', 'cloudlinux', 'eurolinux', 'fedora',
- 'miraclelinux', 'openEuler', 'rhel', 'rocky', 'suse',
- 'virtuozzo']
+KNOWN_DISTROS = [
+ "almalinux",
+ "centos",
+ "cloudlinux",
+ "eurolinux",
+ "fedora",
+ "miraclelinux",
+ "openEuler",
+ "rhel",
+ "rocky",
+ "suse",
+ "virtuozzo",
+]
NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf"
-def _make_header(sep='#'):
+def _make_header(sep="#"):
lines = [
"Created by cloud-init on instance boot automatically, do not edit.",
"",
@@ -38,8 +50,8 @@ def _make_header(sep='#'):
def _is_default_route(route):
- default_nets = ('::', '0.0.0.0')
- return route['prefix'] == 0 and route['network'] in default_nets
+ default_nets = ("::", "0.0.0.0")
+ return route["prefix"] == 0 and route["network"] in default_nets
def _quote_value(value):
@@ -56,19 +68,19 @@ def _quote_value(value):
def enable_ifcfg_rh(path):
"""Add ifcfg-rh to NetworkManager.cfg plugins if main section is present"""
config = ConfigObj(path)
- if 'main' in config:
- if 'plugins' in config['main']:
- if 'ifcfg-rh' in config['main']['plugins']:
+ if "main" in config:
+ if "plugins" in config["main"]:
+ if "ifcfg-rh" in config["main"]["plugins"]:
return
else:
- config['main']['plugins'] = []
+ config["main"]["plugins"] = []
- if isinstance(config['main']['plugins'], list):
- config['main']['plugins'].append('ifcfg-rh')
+ if isinstance(config["main"]["plugins"], list):
+ config["main"]["plugins"].append("ifcfg-rh")
else:
- config['main']['plugins'] = [config['main']['plugins'], 'ifcfg-rh']
+ config["main"]["plugins"] = [config["main"]["plugins"], "ifcfg-rh"]
config.write()
- LOG.debug('Enabled ifcfg-rh NetworkManager plugins')
+ LOG.debug("Enabled ifcfg-rh NetworkManager plugins")
class ConfigMap(object):
@@ -76,8 +88,8 @@ class ConfigMap(object):
# Why does redhat prefer yes/no to true/false??
_bool_map = {
- True: 'yes',
- False: 'no',
+ True: "yes",
+ False: "no",
}
def __init__(self):
@@ -128,8 +140,7 @@ class ConfigMap(object):
class Route(ConfigMap):
"""Represents a route configuration."""
- def __init__(self, route_name, base_sysconf_dir,
- ipv4_tpl, ipv6_tpl):
+ def __init__(self, route_name, base_sysconf_dir, ipv4_tpl, ipv6_tpl):
super(Route, self).__init__()
self.last_idx = 1
self.has_set_default_ipv4 = False
@@ -140,8 +151,12 @@ class Route(ConfigMap):
self.route_fn_tpl_ipv6 = ipv6_tpl
def copy(self):
- r = Route(self._route_name, self._base_sysconf_dir,
- self.route_fn_tpl_ipv4, self.route_fn_tpl_ipv6)
+ r = Route(
+ self._route_name,
+ self._base_sysconf_dir,
+ self.route_fn_tpl_ipv4,
+ self.route_fn_tpl_ipv6,
+ )
r._conf = self._conf.copy()
r.last_idx = self.last_idx
r.has_set_default_ipv4 = self.has_set_default_ipv4
@@ -150,20 +165,22 @@ class Route(ConfigMap):
@property
def path_ipv4(self):
- return self.route_fn_tpl_ipv4 % ({'base': self._base_sysconf_dir,
- 'name': self._route_name})
+ return self.route_fn_tpl_ipv4 % (
+ {"base": self._base_sysconf_dir, "name": self._route_name}
+ )
@property
def path_ipv6(self):
- return self.route_fn_tpl_ipv6 % ({'base': self._base_sysconf_dir,
- 'name': self._route_name})
+ return self.route_fn_tpl_ipv6 % (
+ {"base": self._base_sysconf_dir, "name": self._route_name}
+ )
def is_ipv6_route(self, address):
- return ':' in address
+ return ":" in address
def to_string(self, proto="ipv4"):
# only accept ipv4 and ipv6
- if proto not in ['ipv4', 'ipv6']:
+ if proto not in ["ipv4", "ipv6"]:
raise ValueError("Unknown protocol '%s'" % (str(proto)))
buf = io.StringIO()
buf.write(_make_header())
@@ -173,8 +190,8 @@ class Route(ConfigMap):
# (because Route can contain a mix of IPv4 and IPv6)
reindex = -1
for key in sorted(self._conf.keys()):
- if 'ADDRESS' in key:
- index = key.replace('ADDRESS', '')
+ if "ADDRESS" in key:
+ index = key.replace("ADDRESS", "")
address_value = str(self._conf[key])
# only accept combinations:
# if proto ipv6 only display ipv6 routes
@@ -183,33 +200,59 @@ class Route(ConfigMap):
# do not add ipv4 routes if proto is ipv6
# (this array will contain a mix of ipv4 and ipv6)
if proto == "ipv4" and not self.is_ipv6_route(address_value):
- netmask_value = str(self._conf['NETMASK' + index])
- gateway_value = str(self._conf['GATEWAY' + index])
+ netmask_value = str(self._conf["NETMASK" + index])
+ gateway_value = str(self._conf["GATEWAY" + index])
# increase IPv4 index
reindex = reindex + 1
- buf.write("%s=%s\n" % ('ADDRESS' + str(reindex),
- _quote_value(address_value)))
- buf.write("%s=%s\n" % ('GATEWAY' + str(reindex),
- _quote_value(gateway_value)))
- buf.write("%s=%s\n" % ('NETMASK' + str(reindex),
- _quote_value(netmask_value)))
- metric_key = 'METRIC' + index
+ buf.write(
+ "%s=%s\n"
+ % (
+ "ADDRESS" + str(reindex),
+ _quote_value(address_value),
+ )
+ )
+ buf.write(
+ "%s=%s\n"
+ % (
+ "GATEWAY" + str(reindex),
+ _quote_value(gateway_value),
+ )
+ )
+ buf.write(
+ "%s=%s\n"
+ % (
+ "NETMASK" + str(reindex),
+ _quote_value(netmask_value),
+ )
+ )
+ metric_key = "METRIC" + index
if metric_key in self._conf:
- metric_value = str(self._conf['METRIC' + index])
- buf.write("%s=%s\n" % ('METRIC' + str(reindex),
- _quote_value(metric_value)))
+ metric_value = str(self._conf["METRIC" + index])
+ buf.write(
+ "%s=%s\n"
+ % (
+ "METRIC" + str(reindex),
+ _quote_value(metric_value),
+ )
+ )
elif proto == "ipv6" and self.is_ipv6_route(address_value):
- netmask_value = str(self._conf['NETMASK' + index])
- gateway_value = str(self._conf['GATEWAY' + index])
+ netmask_value = str(self._conf["NETMASK" + index])
+ gateway_value = str(self._conf["GATEWAY" + index])
metric_value = (
- 'metric ' + str(self._conf['METRIC' + index])
- if 'METRIC' + index in self._conf else '')
+ "metric " + str(self._conf["METRIC" + index])
+ if "METRIC" + index in self._conf
+ else ""
+ )
buf.write(
- "%s/%s via %s %s dev %s\n" % (address_value,
- netmask_value,
- gateway_value,
- metric_value,
- self._route_name))
+ "%s/%s via %s %s dev %s\n"
+ % (
+ address_value,
+ netmask_value,
+ gateway_value,
+ metric_value,
+ self._route_name,
+ )
+ )
return buf.getvalue()
@@ -218,27 +261,31 @@ class NetInterface(ConfigMap):
"""Represents a sysconfig/networking-script (and its config + children)."""
iface_types = {
- 'ethernet': 'Ethernet',
- 'bond': 'Bond',
- 'bridge': 'Bridge',
- 'infiniband': 'InfiniBand',
- 'vlan': 'Vlan',
+ "ethernet": "Ethernet",
+ "bond": "Bond",
+ "bridge": "Bridge",
+ "infiniband": "InfiniBand",
+ "vlan": "Vlan",
}
- def __init__(self, iface_name, base_sysconf_dir, templates,
- kind='ethernet'):
+ def __init__(
+ self, iface_name, base_sysconf_dir, templates, kind="ethernet"
+ ):
super(NetInterface, self).__init__()
self.children = []
self.templates = templates
- route_tpl = self.templates.get('route_templates')
- self.routes = Route(iface_name, base_sysconf_dir,
- ipv4_tpl=route_tpl.get('ipv4'),
- ipv6_tpl=route_tpl.get('ipv6'))
- self.iface_fn_tpl = self.templates.get('iface_templates')
+ route_tpl = self.templates.get("route_templates")
+ self.routes = Route(
+ iface_name,
+ base_sysconf_dir,
+ ipv4_tpl=route_tpl.get("ipv4"),
+ ipv6_tpl=route_tpl.get("ipv6"),
+ )
+ self.iface_fn_tpl = self.templates.get("iface_templates")
self.kind = kind
self._iface_name = iface_name
- self._conf['DEVICE'] = iface_name
+ self._conf["DEVICE"] = iface_name
self._base_sysconf_dir = base_sysconf_dir
@property
@@ -248,7 +295,7 @@ class NetInterface(ConfigMap):
@name.setter
def name(self, iface_name):
self._iface_name = iface_name
- self._conf['DEVICE'] = iface_name
+ self._conf["DEVICE"] = iface_name
@property
def kind(self):
@@ -259,16 +306,18 @@ class NetInterface(ConfigMap):
if kind not in self.iface_types:
raise ValueError(kind)
self._kind = kind
- self._conf['TYPE'] = self.iface_types[kind]
+ self._conf["TYPE"] = self.iface_types[kind]
@property
def path(self):
- return self.iface_fn_tpl % ({'base': self._base_sysconf_dir,
- 'name': self.name})
+ return self.iface_fn_tpl % (
+ {"base": self._base_sysconf_dir, "name": self.name}
+ )
def copy(self, copy_children=False, copy_routes=False):
- c = NetInterface(self.name, self._base_sysconf_dir,
- self.templates, kind=self._kind)
+ c = NetInterface(
+ self.name, self._base_sysconf_dir, self.templates, kind=self._kind
+ )
c._conf = self._conf.copy()
if copy_children:
c.children = list(self.children)
@@ -277,7 +326,7 @@ class NetInterface(ConfigMap):
return c
def skip_key_value(self, key, val):
- if key == 'TYPE' and val == 'Vlan':
+ if key == "TYPE" and val == "Vlan":
return True
return False
@@ -291,166 +340,180 @@ class Renderer(renderer.Renderer):
# details about this)
iface_defaults = {
- 'rhel': {'ONBOOT': True, 'USERCTL': False, 'NM_CONTROLLED': False,
- 'BOOTPROTO': 'none'},
- 'suse': {'BOOTPROTO': 'static', 'STARTMODE': 'auto'},
+ "rhel": {
+ "ONBOOT": True,
+ "USERCTL": False,
+ "NM_CONTROLLED": False,
+ "BOOTPROTO": "none",
+ },
+ "suse": {"BOOTPROTO": "static", "STARTMODE": "auto"},
}
cfg_key_maps = {
- 'rhel': {
- 'accept-ra': 'IPV6_FORCE_ACCEPT_RA',
- 'bridge_stp': 'STP',
- 'bridge_ageing': 'AGEING',
- 'bridge_bridgeprio': 'PRIO',
- 'mac_address': 'HWADDR',
- 'mtu': 'MTU',
+ "rhel": {
+ "accept-ra": "IPV6_FORCE_ACCEPT_RA",
+ "bridge_stp": "STP",
+ "bridge_ageing": "AGEING",
+ "bridge_bridgeprio": "PRIO",
+ "mac_address": "HWADDR",
+ "mtu": "MTU",
},
- 'suse': {
- 'bridge_stp': 'BRIDGE_STP',
- 'bridge_ageing': 'BRIDGE_AGEINGTIME',
- 'bridge_bridgeprio': 'BRIDGE_PRIORITY',
- 'mac_address': 'LLADDR',
- 'mtu': 'MTU',
+ "suse": {
+ "bridge_stp": "BRIDGE_STP",
+ "bridge_ageing": "BRIDGE_AGEINGTIME",
+ "bridge_bridgeprio": "BRIDGE_PRIORITY",
+ "mac_address": "LLADDR",
+ "mtu": "MTU",
},
}
# If these keys exist, then their values will be used to form
# a BONDING_OPTS / BONDING_MODULE_OPTS grouping; otherwise no
# grouping will be set.
- bond_tpl_opts = tuple([
- ('bond_mode', "mode=%s"),
- ('bond_xmit_hash_policy', "xmit_hash_policy=%s"),
- ('bond_miimon', "miimon=%s"),
- ('bond_min_links', "min_links=%s"),
- ('bond_arp_interval', "arp_interval=%s"),
- ('bond_arp_ip_target', "arp_ip_target=%s"),
- ('bond_arp_validate', "arp_validate=%s"),
- ('bond_ad_select', "ad_select=%s"),
- ('bond_num_grat_arp', "num_grat_arp=%s"),
- ('bond_downdelay', "downdelay=%s"),
- ('bond_updelay', "updelay=%s"),
- ('bond_lacp_rate', "lacp_rate=%s"),
- ('bond_fail_over_mac', "fail_over_mac=%s"),
- ('bond_primary', "primary=%s"),
- ('bond_primary_reselect', "primary_reselect=%s"),
- ])
+ bond_tpl_opts = tuple(
+ [
+ ("bond_mode", "mode=%s"),
+ ("bond_xmit_hash_policy", "xmit_hash_policy=%s"),
+ ("bond_miimon", "miimon=%s"),
+ ("bond_min_links", "min_links=%s"),
+ ("bond_arp_interval", "arp_interval=%s"),
+ ("bond_arp_ip_target", "arp_ip_target=%s"),
+ ("bond_arp_validate", "arp_validate=%s"),
+ ("bond_ad_select", "ad_select=%s"),
+ ("bond_num_grat_arp", "num_grat_arp=%s"),
+ ("bond_downdelay", "downdelay=%s"),
+ ("bond_updelay", "updelay=%s"),
+ ("bond_lacp_rate", "lacp_rate=%s"),
+ ("bond_fail_over_mac", "fail_over_mac=%s"),
+ ("bond_primary", "primary=%s"),
+ ("bond_primary_reselect", "primary_reselect=%s"),
+ ]
+ )
templates = {}
def __init__(self, config=None):
if not config:
config = {}
- self.sysconf_dir = config.get('sysconf_dir', 'etc/sysconfig')
+ self.sysconf_dir = config.get("sysconf_dir", "etc/sysconfig")
self.netrules_path = config.get(
- 'netrules_path', 'etc/udev/rules.d/70-persistent-net.rules')
- self.dns_path = config.get('dns_path', 'etc/resolv.conf')
- nm_conf_path = 'etc/NetworkManager/conf.d/99-cloud-init.conf'
- self.networkmanager_conf_path = config.get('networkmanager_conf_path',
- nm_conf_path)
+ "netrules_path", "etc/udev/rules.d/70-persistent-net.rules"
+ )
+ self.dns_path = config.get("dns_path", "etc/resolv.conf")
+ nm_conf_path = "etc/NetworkManager/conf.d/99-cloud-init.conf"
+ self.networkmanager_conf_path = config.get(
+ "networkmanager_conf_path", nm_conf_path
+ )
self.templates = {
- 'control': config.get('control'),
- 'iface_templates': config.get('iface_templates'),
- 'route_templates': config.get('route_templates'),
+ "control": config.get("control"),
+ "iface_templates": config.get("iface_templates"),
+ "route_templates": config.get("route_templates"),
}
- self.flavor = config.get('flavor', 'rhel')
+ self.flavor = config.get("flavor", "rhel")
@classmethod
def _render_iface_shared(cls, iface, iface_cfg, flavor):
flavor_defaults = copy.deepcopy(cls.iface_defaults.get(flavor, {}))
iface_cfg.update(flavor_defaults)
- for old_key in ('mac_address', 'mtu', 'accept-ra'):
+ for old_key in ("mac_address", "mtu", "accept-ra"):
old_value = iface.get(old_key)
if old_value is not None:
# only set HWADDR on physical interfaces
- if (old_key == 'mac_address' and
- iface['type'] not in ['physical', 'infiniband']):
+ if old_key == "mac_address" and iface["type"] not in [
+ "physical",
+ "infiniband",
+ ]:
continue
new_key = cls.cfg_key_maps[flavor].get(old_key)
if new_key:
iface_cfg[new_key] = old_value
# only set WakeOnLan for physical interfaces
- if ('wakeonlan' in iface and iface['wakeonlan'] and
- iface['type'] == 'physical'):
- iface_cfg['ETHTOOL_OPTS'] = 'wol g'
+ if (
+ "wakeonlan" in iface
+ and iface["wakeonlan"]
+ and iface["type"] == "physical"
+ ):
+ iface_cfg["ETHTOOL_OPTS"] = "wol g"
@classmethod
def _render_subnets(cls, iface_cfg, subnets, has_default_route, flavor):
# setting base values
- if flavor == 'suse':
- iface_cfg['BOOTPROTO'] = 'static'
- if 'BRIDGE' in iface_cfg:
- iface_cfg['BOOTPROTO'] = 'dhcp'
- iface_cfg.drop('BRIDGE')
+ if flavor == "suse":
+ iface_cfg["BOOTPROTO"] = "static"
+ if "BRIDGE" in iface_cfg:
+ iface_cfg["BOOTPROTO"] = "dhcp"
+ iface_cfg.drop("BRIDGE")
else:
- iface_cfg['BOOTPROTO'] = 'none'
+ iface_cfg["BOOTPROTO"] = "none"
# modifying base values according to subnets
for i, subnet in enumerate(subnets, start=len(iface_cfg.children)):
- mtu_key = 'MTU'
- subnet_type = subnet.get('type')
- if subnet_type == 'dhcp6' or subnet_type == 'ipv6_dhcpv6-stateful':
- if flavor == 'suse':
+ mtu_key = "MTU"
+ subnet_type = subnet.get("type")
+ if subnet_type == "dhcp6" or subnet_type == "ipv6_dhcpv6-stateful":
+ if flavor == "suse":
# User wants dhcp for both protocols
- if iface_cfg['BOOTPROTO'] == 'dhcp4':
- iface_cfg['BOOTPROTO'] = 'dhcp'
+ if iface_cfg["BOOTPROTO"] == "dhcp4":
+ iface_cfg["BOOTPROTO"] = "dhcp"
else:
# Only IPv6 is DHCP, IPv4 may be static
- iface_cfg['BOOTPROTO'] = 'dhcp6'
- iface_cfg['DHCLIENT6_MODE'] = 'managed'
+ iface_cfg["BOOTPROTO"] = "dhcp6"
+ iface_cfg["DHCLIENT6_MODE"] = "managed"
# only if rhel AND dhcpv6 stateful
- elif (flavor == 'rhel' and
- subnet_type == 'ipv6_dhcpv6-stateful'):
- iface_cfg['BOOTPROTO'] = 'dhcp'
- iface_cfg['DHCPV6C'] = True
- iface_cfg['IPV6INIT'] = True
- iface_cfg['IPV6_AUTOCONF'] = False
+ elif (
+ flavor == "rhel" and subnet_type == "ipv6_dhcpv6-stateful"
+ ):
+ iface_cfg["BOOTPROTO"] = "dhcp"
+ iface_cfg["DHCPV6C"] = True
+ iface_cfg["IPV6INIT"] = True
+ iface_cfg["IPV6_AUTOCONF"] = False
else:
- iface_cfg['IPV6INIT'] = True
+ iface_cfg["IPV6INIT"] = True
# Configure network settings using DHCPv6
- iface_cfg['DHCPV6C'] = True
- elif subnet_type == 'ipv6_dhcpv6-stateless':
- if flavor == 'suse':
+ iface_cfg["DHCPV6C"] = True
+ elif subnet_type == "ipv6_dhcpv6-stateless":
+ if flavor == "suse":
# User wants dhcp for both protocols
- if iface_cfg['BOOTPROTO'] == 'dhcp4':
- iface_cfg['BOOTPROTO'] = 'dhcp'
+ if iface_cfg["BOOTPROTO"] == "dhcp4":
+ iface_cfg["BOOTPROTO"] = "dhcp"
else:
# Only IPv6 is DHCP, IPv4 may be static
- iface_cfg['BOOTPROTO'] = 'dhcp6'
- iface_cfg['DHCLIENT6_MODE'] = 'info'
+ iface_cfg["BOOTPROTO"] = "dhcp6"
+ iface_cfg["DHCLIENT6_MODE"] = "info"
else:
- iface_cfg['IPV6INIT'] = True
+ iface_cfg["IPV6INIT"] = True
# Configure network settings using SLAAC from RAs and
# optional info from dhcp server using DHCPv6
- iface_cfg['IPV6_AUTOCONF'] = True
- iface_cfg['DHCPV6C'] = True
+ iface_cfg["IPV6_AUTOCONF"] = True
+ iface_cfg["DHCPV6C"] = True
# Use Information-request to get only stateless
# configuration parameters (i.e., without address).
- iface_cfg['DHCPV6C_OPTIONS'] = '-S'
- elif subnet_type == 'ipv6_slaac':
- if flavor == 'suse':
+ iface_cfg["DHCPV6C_OPTIONS"] = "-S"
+ elif subnet_type == "ipv6_slaac":
+ if flavor == "suse":
# User wants dhcp for both protocols
- if iface_cfg['BOOTPROTO'] == 'dhcp4':
- iface_cfg['BOOTPROTO'] = 'dhcp'
+ if iface_cfg["BOOTPROTO"] == "dhcp4":
+ iface_cfg["BOOTPROTO"] = "dhcp"
else:
# Only IPv6 is DHCP, IPv4 may be static
- iface_cfg['BOOTPROTO'] = 'dhcp6'
- iface_cfg['DHCLIENT6_MODE'] = 'info'
+ iface_cfg["BOOTPROTO"] = "dhcp6"
+ iface_cfg["DHCLIENT6_MODE"] = "info"
else:
- iface_cfg['IPV6INIT'] = True
+ iface_cfg["IPV6INIT"] = True
# Configure network settings using SLAAC from RAs
- iface_cfg['IPV6_AUTOCONF'] = True
- elif subnet_type in ['dhcp4', 'dhcp']:
- bootproto_in = iface_cfg['BOOTPROTO']
- iface_cfg['BOOTPROTO'] = 'dhcp'
- if flavor == 'suse' and subnet_type == 'dhcp4':
+ iface_cfg["IPV6_AUTOCONF"] = True
+ elif subnet_type in ["dhcp4", "dhcp"]:
+ bootproto_in = iface_cfg["BOOTPROTO"]
+ iface_cfg["BOOTPROTO"] = "dhcp"
+ if flavor == "suse" and subnet_type == "dhcp4":
# If dhcp6 is already specified the user wants dhcp
# for both protocols
- if bootproto_in != 'dhcp6':
+ if bootproto_in != "dhcp6":
# Only IPv4 is DHCP, IPv6 may be static
- iface_cfg['BOOTPROTO'] = 'dhcp4'
- elif subnet_type in ['static', 'static6']:
+ iface_cfg["BOOTPROTO"] = "dhcp4"
+ elif subnet_type in ["static", "static6"]:
# RH info
# grep BOOTPROTO sysconfig.txt -A2 | head -3
# BOOTPROTO=none|bootp|dhcp
@@ -458,169 +521,184 @@ class Renderer(renderer.Renderer):
# to run on the device. Any other
# value causes any static configuration
# in the file to be applied.
- if subnet_is_ipv6(subnet) and flavor != 'suse':
- mtu_key = 'IPV6_MTU'
- iface_cfg['IPV6INIT'] = True
- if 'mtu' in subnet:
- mtu_mismatch = bool(mtu_key in iface_cfg and
- subnet['mtu'] != iface_cfg[mtu_key])
+ if subnet_is_ipv6(subnet) and flavor != "suse":
+ mtu_key = "IPV6_MTU"
+ iface_cfg["IPV6INIT"] = True
+ if "mtu" in subnet:
+ mtu_mismatch = bool(
+ mtu_key in iface_cfg
+ and subnet["mtu"] != iface_cfg[mtu_key]
+ )
if mtu_mismatch:
LOG.warning(
- 'Network config: ignoring %s device-level mtu:%s'
- ' because ipv4 subnet-level mtu:%s provided.',
- iface_cfg.name, iface_cfg[mtu_key], subnet['mtu'])
+ "Network config: ignoring %s device-level mtu:%s"
+ " because ipv4 subnet-level mtu:%s provided.",
+ iface_cfg.name,
+ iface_cfg[mtu_key],
+ subnet["mtu"],
+ )
if subnet_is_ipv6(subnet):
- if flavor == 'suse':
+ if flavor == "suse":
# TODO(rjschwei) write mtu setting to
# /etc/sysctl.d/
pass
else:
- iface_cfg[mtu_key] = subnet['mtu']
+ iface_cfg[mtu_key] = subnet["mtu"]
else:
- iface_cfg[mtu_key] = subnet['mtu']
+ iface_cfg[mtu_key] = subnet["mtu"]
- if subnet_is_ipv6(subnet) and flavor == 'rhel':
- iface_cfg['IPV6_FORCE_ACCEPT_RA'] = False
- iface_cfg['IPV6_AUTOCONF'] = False
- elif subnet_type == 'manual':
- if flavor == 'suse':
+ if subnet_is_ipv6(subnet) and flavor == "rhel":
+ iface_cfg["IPV6_FORCE_ACCEPT_RA"] = False
+ iface_cfg["IPV6_AUTOCONF"] = False
+ elif subnet_type == "manual":
+ if flavor == "suse":
LOG.debug('Unknown subnet type setting "%s"', subnet_type)
else:
# If the subnet has an MTU setting, then ONBOOT=True
# to apply the setting
- iface_cfg['ONBOOT'] = mtu_key in iface_cfg
+ iface_cfg["ONBOOT"] = mtu_key in iface_cfg
else:
- raise ValueError("Unknown subnet type '%s' found"
- " for interface '%s'" % (subnet_type,
- iface_cfg.name))
- if subnet.get('control') == 'manual':
- if flavor == 'suse':
- iface_cfg['STARTMODE'] = 'manual'
+ raise ValueError(
+ "Unknown subnet type '%s' found for interface '%s'"
+ % (subnet_type, iface_cfg.name)
+ )
+ if subnet.get("control") == "manual":
+ if flavor == "suse":
+ iface_cfg["STARTMODE"] = "manual"
else:
- iface_cfg['ONBOOT'] = False
+ iface_cfg["ONBOOT"] = False
# set IPv4 and IPv6 static addresses
ipv4_index = -1
ipv6_index = -1
for i, subnet in enumerate(subnets, start=len(iface_cfg.children)):
- subnet_type = subnet.get('type')
+ subnet_type = subnet.get("type")
# metric may apply to both dhcp and static config
- if 'metric' in subnet:
- if flavor != 'suse':
- iface_cfg['METRIC'] = subnet['metric']
- if subnet_type in ['dhcp', 'dhcp4']:
+ if "metric" in subnet:
+ if flavor != "suse":
+ iface_cfg["METRIC"] = subnet["metric"]
+ if subnet_type in ["dhcp", "dhcp4"]:
# On SUSE distros 'DHCLIENT_SET_DEFAULT_ROUTE' is a global
# setting in /etc/sysconfig/network/dhcp
- if flavor != 'suse':
- if has_default_route and iface_cfg['BOOTPROTO'] != 'none':
- iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = False
+ if flavor != "suse":
+ if has_default_route and iface_cfg["BOOTPROTO"] != "none":
+ iface_cfg["DHCLIENT_SET_DEFAULT_ROUTE"] = False
continue
elif subnet_type in IPV6_DYNAMIC_TYPES:
continue
- elif subnet_type in ['static', 'static6']:
+ elif subnet_type in ["static", "static6"]:
if subnet_is_ipv6(subnet):
ipv6_index = ipv6_index + 1
- ipv6_cidr = "%s/%s" % (subnet['address'], subnet['prefix'])
+ ipv6_cidr = "%s/%s" % (subnet["address"], subnet["prefix"])
if ipv6_index == 0:
- if flavor == 'suse':
- iface_cfg['IPADDR6'] = ipv6_cidr
+ if flavor == "suse":
+ iface_cfg["IPADDR6"] = ipv6_cidr
else:
- iface_cfg['IPV6ADDR'] = ipv6_cidr
+ iface_cfg["IPV6ADDR"] = ipv6_cidr
elif ipv6_index == 1:
- if flavor == 'suse':
- iface_cfg['IPADDR6_1'] = ipv6_cidr
+ if flavor == "suse":
+ iface_cfg["IPADDR6_1"] = ipv6_cidr
else:
- iface_cfg['IPV6ADDR_SECONDARIES'] = ipv6_cidr
+ iface_cfg["IPV6ADDR_SECONDARIES"] = ipv6_cidr
else:
- if flavor == 'suse':
- iface_cfg['IPADDR6_%d' % ipv6_index] = ipv6_cidr
+ if flavor == "suse":
+ iface_cfg["IPADDR6_%d" % ipv6_index] = ipv6_cidr
else:
- iface_cfg['IPV6ADDR_SECONDARIES'] += \
+ iface_cfg["IPV6ADDR_SECONDARIES"] += (
" " + ipv6_cidr
+ )
else:
ipv4_index = ipv4_index + 1
suff = "" if ipv4_index == 0 else str(ipv4_index)
- iface_cfg['IPADDR' + suff] = subnet['address']
- iface_cfg['NETMASK' + suff] = \
- net_prefix_to_ipv4_mask(subnet['prefix'])
-
- if 'gateway' in subnet and flavor != 'suse':
- iface_cfg['DEFROUTE'] = True
- if is_ipv6_addr(subnet['gateway']):
- iface_cfg['IPV6_DEFAULTGW'] = subnet['gateway']
+ iface_cfg["IPADDR" + suff] = subnet["address"]
+ iface_cfg["NETMASK" + suff] = net_prefix_to_ipv4_mask(
+ subnet["prefix"]
+ )
+
+ if "gateway" in subnet and flavor != "suse":
+ iface_cfg["DEFROUTE"] = True
+ if is_ipv6_addr(subnet["gateway"]):
+ iface_cfg["IPV6_DEFAULTGW"] = subnet["gateway"]
else:
- iface_cfg['GATEWAY'] = subnet['gateway']
+ iface_cfg["GATEWAY"] = subnet["gateway"]
- if 'dns_search' in subnet and flavor != 'suse':
- iface_cfg['DOMAIN'] = ' '.join(subnet['dns_search'])
+ if "dns_search" in subnet and flavor != "suse":
+ iface_cfg["DOMAIN"] = " ".join(subnet["dns_search"])
- if 'dns_nameservers' in subnet and flavor != 'suse':
- if len(subnet['dns_nameservers']) > 3:
+ if "dns_nameservers" in subnet and flavor != "suse":
+ if len(subnet["dns_nameservers"]) > 3:
# per resolv.conf(5) MAXNS sets this to 3.
- LOG.debug("%s has %d entries in dns_nameservers. "
- "Only 3 are used.", iface_cfg.name,
- len(subnet['dns_nameservers']))
- for i, k in enumerate(subnet['dns_nameservers'][:3], 1):
- iface_cfg['DNS' + str(i)] = k
+ LOG.debug(
+ "%s has %d entries in dns_nameservers. "
+ "Only 3 are used.",
+ iface_cfg.name,
+ len(subnet["dns_nameservers"]),
+ )
+ for i, k in enumerate(subnet["dns_nameservers"][:3], 1):
+ iface_cfg["DNS" + str(i)] = k
@classmethod
def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets, flavor):
# TODO(rjschwei): route configuration on SUSE distro happens via
# ifroute-* files, see lp#1812117. SUSE currently carries a local
# patch in their package.
- if flavor == 'suse':
+ if flavor == "suse":
return
for _, subnet in enumerate(subnets, start=len(iface_cfg.children)):
- subnet_type = subnet.get('type')
- for route in subnet.get('routes', []):
- is_ipv6 = subnet.get('ipv6') or is_ipv6_addr(route['gateway'])
+ subnet_type = subnet.get("type")
+ for route in subnet.get("routes", []):
+ is_ipv6 = subnet.get("ipv6") or is_ipv6_addr(route["gateway"])
# Any dynamic configuration method, slaac, dhcpv6-stateful/
# stateless should get router information from router RA's.
- if (_is_default_route(route) and subnet_type not in
- IPV6_DYNAMIC_TYPES):
+ if (
+ _is_default_route(route)
+ and subnet_type not in IPV6_DYNAMIC_TYPES
+ ):
if (
- (subnet.get('ipv4') and
- route_cfg.has_set_default_ipv4) or
- (subnet.get('ipv6') and
- route_cfg.has_set_default_ipv6)
+ subnet.get("ipv4") and route_cfg.has_set_default_ipv4
+ ) or (
+ subnet.get("ipv6") and route_cfg.has_set_default_ipv6
):
- raise ValueError("Duplicate declaration of default "
- "route found for interface '%s'"
- % (iface_cfg.name))
+ raise ValueError(
+ "Duplicate declaration of default "
+ "route found for interface '%s'" % (iface_cfg.name)
+ )
# NOTE(harlowja): ipv6 and ipv4 default gateways
- gw_key = 'GATEWAY0'
- nm_key = 'NETMASK0'
- addr_key = 'ADDRESS0'
+ gw_key = "GATEWAY0"
+ nm_key = "NETMASK0"
+ addr_key = "ADDRESS0"
# The owning interface provides the default route.
#
# TODO(harlowja): add validation that no other iface has
# also provided the default route?
- iface_cfg['DEFROUTE'] = True
- if iface_cfg['BOOTPROTO'] in ('dhcp', 'dhcp4'):
- iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = True
- if 'gateway' in route:
+ iface_cfg["DEFROUTE"] = True
+ if iface_cfg["BOOTPROTO"] in ("dhcp", "dhcp4"):
+ iface_cfg["DHCLIENT_SET_DEFAULT_ROUTE"] = True
+ if "gateway" in route:
if is_ipv6:
- iface_cfg['IPV6_DEFAULTGW'] = route['gateway']
+ iface_cfg["IPV6_DEFAULTGW"] = route["gateway"]
route_cfg.has_set_default_ipv6 = True
else:
- iface_cfg['GATEWAY'] = route['gateway']
+ iface_cfg["GATEWAY"] = route["gateway"]
route_cfg.has_set_default_ipv4 = True
- if 'metric' in route:
- iface_cfg['METRIC'] = route['metric']
+ if "metric" in route:
+ iface_cfg["METRIC"] = route["metric"]
else:
- gw_key = 'GATEWAY%s' % route_cfg.last_idx
- nm_key = 'NETMASK%s' % route_cfg.last_idx
- addr_key = 'ADDRESS%s' % route_cfg.last_idx
- metric_key = 'METRIC%s' % route_cfg.last_idx
+ gw_key = "GATEWAY%s" % route_cfg.last_idx
+ nm_key = "NETMASK%s" % route_cfg.last_idx
+ addr_key = "ADDRESS%s" % route_cfg.last_idx
+ metric_key = "METRIC%s" % route_cfg.last_idx
route_cfg.last_idx += 1
# add default routes only to ifcfg files, not
# to route-* or route6-*
- for (old_key, new_key) in [('gateway', gw_key),
- ('metric', metric_key),
- ('netmask', nm_key),
- ('network', addr_key)]:
+ for (old_key, new_key) in [
+ ("gateway", gw_key),
+ ("metric", metric_key),
+ ("netmask", nm_key),
+ ("network", addr_key),
+ ]:
if old_key in route:
route_cfg[new_key] = route[old_key]
@@ -638,33 +716,35 @@ class Renderer(renderer.Renderer):
bond_opts.append(value_tpl % (bond_value))
break
if bond_opts:
- if flavor == 'suse':
+ if flavor == "suse":
# suse uses the sysconfig support which requires
# BONDING_MODULE_OPTS see
# https://www.kernel.org/doc/Documentation/networking/bonding.txt
# 3.1 Configuration with Sysconfig Support
- iface_cfg['BONDING_MODULE_OPTS'] = " ".join(bond_opts)
+ iface_cfg["BONDING_MODULE_OPTS"] = " ".join(bond_opts)
else:
# rhel uses initscript support and thus requires BONDING_OPTS
# this is also the old default see
# https://www.kernel.org/doc/Documentation/networking/bonding.txt
# 3.2 Configuration with Initscripts Support
- iface_cfg['BONDING_OPTS'] = " ".join(bond_opts)
+ iface_cfg["BONDING_OPTS"] = " ".join(bond_opts)
@classmethod
def _render_physical_interfaces(
- cls, network_state, iface_contents, flavor
+ cls, network_state, iface_contents, flavor
):
physical_filter = renderer.filter_by_physical
for iface in network_state.iter_interfaces(physical_filter):
- iface_name = iface['name']
+ iface_name = iface["name"]
iface_subnets = iface.get("subnets", [])
iface_cfg = iface_contents[iface_name]
route_cfg = iface_cfg.routes
cls._render_subnets(
- iface_cfg, iface_subnets, network_state.has_default_route,
- flavor
+ iface_cfg,
+ iface_subnets,
+ network_state.has_default_route,
+ flavor,
)
cls._render_subnet_routes(
iface_cfg, route_cfg, iface_subnets, flavor
@@ -672,10 +752,10 @@ class Renderer(renderer.Renderer):
@classmethod
def _render_bond_interfaces(cls, network_state, iface_contents, flavor):
- bond_filter = renderer.filter_by_type('bond')
- slave_filter = renderer.filter_by_attr('bond-master')
+ bond_filter = renderer.filter_by_type("bond")
+ slave_filter = renderer.filter_by_attr("bond-master")
for iface in network_state.iter_interfaces(bond_filter):
- iface_name = iface['name']
+ iface_name = iface["name"]
iface_cfg = iface_contents[iface_name]
cls._render_bonding_opts(iface_cfg, iface, flavor)
@@ -684,21 +764,23 @@ class Renderer(renderer.Renderer):
master_cfgs = [iface_cfg]
master_cfgs.extend(iface_cfg.children)
for master_cfg in master_cfgs:
- master_cfg['BONDING_MASTER'] = True
- if flavor != 'suse':
- master_cfg.kind = 'bond'
+ master_cfg["BONDING_MASTER"] = True
+ if flavor != "suse":
+ master_cfg.kind = "bond"
- if iface.get('mac_address'):
- if flavor == 'suse':
- iface_cfg['LLADDR'] = iface.get('mac_address')
+ if iface.get("mac_address"):
+ if flavor == "suse":
+ iface_cfg["LLADDR"] = iface.get("mac_address")
else:
- iface_cfg['MACADDR'] = iface.get('mac_address')
+ iface_cfg["MACADDR"] = iface.get("mac_address")
iface_subnets = iface.get("subnets", [])
route_cfg = iface_cfg.routes
cls._render_subnets(
- iface_cfg, iface_subnets, network_state.has_default_route,
- flavor
+ iface_cfg,
+ iface_subnets,
+ network_state.has_default_route,
+ flavor,
)
cls._render_subnet_routes(
iface_cfg, route_cfg, iface_subnets, flavor
@@ -707,54 +789,64 @@ class Renderer(renderer.Renderer):
# iter_interfaces on network-state is not sorted to produce
# consistent numbers we need to sort.
bond_slaves = sorted(
- [slave_iface['name'] for slave_iface in
- network_state.iter_interfaces(slave_filter)
- if slave_iface['bond-master'] == iface_name])
+ [
+ slave_iface["name"]
+ for slave_iface in network_state.iter_interfaces(
+ slave_filter
+ )
+ if slave_iface["bond-master"] == iface_name
+ ]
+ )
for index, bond_slave in enumerate(bond_slaves):
- if flavor == 'suse':
- slavestr = 'BONDING_SLAVE_%s' % index
+ if flavor == "suse":
+ slavestr = "BONDING_SLAVE_%s" % index
else:
- slavestr = 'BONDING_SLAVE%s' % index
+ slavestr = "BONDING_SLAVE%s" % index
iface_cfg[slavestr] = bond_slave
slave_cfg = iface_contents[bond_slave]
- if flavor == 'suse':
- slave_cfg['BOOTPROTO'] = 'none'
- slave_cfg['STARTMODE'] = 'hotplug'
+ if flavor == "suse":
+ slave_cfg["BOOTPROTO"] = "none"
+ slave_cfg["STARTMODE"] = "hotplug"
else:
- slave_cfg['MASTER'] = iface_name
- slave_cfg['SLAVE'] = True
+ slave_cfg["MASTER"] = iface_name
+ slave_cfg["SLAVE"] = True
@classmethod
def _render_vlan_interfaces(cls, network_state, iface_contents, flavor):
- vlan_filter = renderer.filter_by_type('vlan')
+ vlan_filter = renderer.filter_by_type("vlan")
for iface in network_state.iter_interfaces(vlan_filter):
- iface_name = iface['name']
+ iface_name = iface["name"]
iface_cfg = iface_contents[iface_name]
- if flavor == 'suse':
- vlan_id = iface.get('vlan_id')
+ if flavor == "suse":
+ vlan_id = iface.get("vlan_id")
if vlan_id:
- iface_cfg['VLAN_ID'] = vlan_id
- iface_cfg['ETHERDEVICE'] = iface_name[:iface_name.rfind('.')]
+ iface_cfg["VLAN_ID"] = vlan_id
+ iface_cfg["ETHERDEVICE"] = iface_name[: iface_name.rfind(".")]
else:
- iface_cfg['VLAN'] = True
- iface_cfg.kind = 'vlan'
+ iface_cfg["VLAN"] = True
+ iface_cfg.kind = "vlan"
- rdev = iface['vlan-raw-device']
- supported = _supported_vlan_names(rdev, iface['vlan_id'])
+ rdev = iface["vlan-raw-device"]
+ supported = _supported_vlan_names(rdev, iface["vlan_id"])
if iface_name not in supported:
LOG.info(
"Name '%s' for vlan '%s' is not officially supported"
"by RHEL. Supported: %s",
- iface_name, rdev, ' '.join(supported))
- iface_cfg['PHYSDEV'] = rdev
+ iface_name,
+ rdev,
+ " ".join(supported),
+ )
+ iface_cfg["PHYSDEV"] = rdev
iface_subnets = iface.get("subnets", [])
route_cfg = iface_cfg.routes
cls._render_subnets(
- iface_cfg, iface_subnets, network_state.has_default_route,
- flavor
+ iface_cfg,
+ iface_subnets,
+ network_state.has_default_route,
+ flavor,
)
cls._render_subnet_routes(
iface_cfg, route_cfg, iface_subnets, flavor
@@ -763,8 +855,12 @@ class Renderer(renderer.Renderer):
@staticmethod
def _render_dns(network_state, existing_dns_path=None):
# skip writing resolv.conf if network_state doesn't include any input.
- if not any([len(network_state.dns_nameservers),
- len(network_state.dns_searchdomains)]):
+ if not any(
+ [
+ len(network_state.dns_nameservers),
+ len(network_state.dns_searchdomains),
+ ]
+ ):
return None
content = resolv_conf.ResolvConf("")
if existing_dns_path and os.path.isfile(existing_dns_path):
@@ -773,10 +869,10 @@ class Renderer(renderer.Renderer):
content.add_nameserver(nameserver)
for searchdomain in network_state.dns_searchdomains:
content.add_search_domain(searchdomain)
- header = _make_header(';')
+ header = _make_header(";")
content_str = str(content)
if not content_str.startswith(header):
- content_str = header + '\n' + content_str
+ content_str = header + "\n" + content_str
return content_str
@staticmethod
@@ -787,7 +883,7 @@ class Renderer(renderer.Renderer):
# NetworkManager to not manage dns, so that /etc/resolv.conf
# does not get clobbered.
if network_state.dns_nameservers:
- content.set_section_keypair('main', 'dns', 'none')
+ content.set_section_keypair("main", "dns", "none")
if len(content) == 0:
return None
@@ -797,39 +893,41 @@ class Renderer(renderer.Renderer):
@classmethod
def _render_bridge_interfaces(cls, network_state, iface_contents, flavor):
bridge_key_map = {
- old_k: new_k for old_k, new_k in cls.cfg_key_maps[flavor].items()
- if old_k.startswith('bridge')}
- bridge_filter = renderer.filter_by_type('bridge')
+ old_k: new_k
+ for old_k, new_k in cls.cfg_key_maps[flavor].items()
+ if old_k.startswith("bridge")
+ }
+ bridge_filter = renderer.filter_by_type("bridge")
for iface in network_state.iter_interfaces(bridge_filter):
- iface_name = iface['name']
+ iface_name = iface["name"]
iface_cfg = iface_contents[iface_name]
- if flavor != 'suse':
- iface_cfg.kind = 'bridge'
+ if flavor != "suse":
+ iface_cfg.kind = "bridge"
for old_key, new_key in bridge_key_map.items():
if old_key in iface:
iface_cfg[new_key] = iface[old_key]
- if flavor == 'suse':
- if 'BRIDGE_STP' in iface_cfg:
- if iface_cfg.get('BRIDGE_STP'):
- iface_cfg['BRIDGE_STP'] = 'on'
+ if flavor == "suse":
+ if "BRIDGE_STP" in iface_cfg:
+ if iface_cfg.get("BRIDGE_STP"):
+ iface_cfg["BRIDGE_STP"] = "on"
else:
- iface_cfg['BRIDGE_STP'] = 'off'
-
- if iface.get('mac_address'):
- key = 'MACADDR'
- if flavor == 'suse':
- key = 'LLADDRESS'
- iface_cfg[key] = iface.get('mac_address')
-
- if flavor == 'suse':
- if iface.get('bridge_ports', []):
- iface_cfg['BRIDGE_PORTS'] = '%s' % " ".join(
- iface.get('bridge_ports')
+ iface_cfg["BRIDGE_STP"] = "off"
+
+ if iface.get("mac_address"):
+ key = "MACADDR"
+ if flavor == "suse":
+ key = "LLADDRESS"
+ iface_cfg[key] = iface.get("mac_address")
+
+ if flavor == "suse":
+ if iface.get("bridge_ports", []):
+ iface_cfg["BRIDGE_PORTS"] = "%s" % " ".join(
+ iface.get("bridge_ports")
)
# Is this the right key to get all the connected interfaces?
- for bridged_iface_name in iface.get('bridge_ports', []):
+ for bridged_iface_name in iface.get("bridge_ports", []):
# Ensure all bridged interfaces are correctly tagged
# as being bridged to this interface.
bridged_cfg = iface_contents[bridged_iface_name]
@@ -837,15 +935,17 @@ class Renderer(renderer.Renderer):
bridged_cfgs.extend(bridged_cfg.children)
for bridge_cfg in bridged_cfgs:
bridge_value = iface_name
- if flavor == 'suse':
- bridge_value = 'yes'
- bridge_cfg['BRIDGE'] = bridge_value
+ if flavor == "suse":
+ bridge_value = "yes"
+ bridge_cfg["BRIDGE"] = bridge_value
iface_subnets = iface.get("subnets", [])
route_cfg = iface_cfg.routes
cls._render_subnets(
- iface_cfg, iface_subnets, network_state.has_default_route,
- flavor
+ iface_cfg,
+ iface_subnets,
+ network_state.has_default_route,
+ flavor,
)
cls._render_subnet_routes(
iface_cfg, route_cfg, iface_subnets, flavor
@@ -853,37 +953,40 @@ class Renderer(renderer.Renderer):
@classmethod
def _render_ib_interfaces(cls, network_state, iface_contents, flavor):
- ib_filter = renderer.filter_by_type('infiniband')
+ ib_filter = renderer.filter_by_type("infiniband")
for iface in network_state.iter_interfaces(ib_filter):
- iface_name = iface['name']
+ iface_name = iface["name"]
iface_cfg = iface_contents[iface_name]
- iface_cfg.kind = 'infiniband'
+ iface_cfg.kind = "infiniband"
iface_subnets = iface.get("subnets", [])
route_cfg = iface_cfg.routes
cls._render_subnets(
- iface_cfg, iface_subnets, network_state.has_default_route,
- flavor
+ iface_cfg,
+ iface_subnets,
+ network_state.has_default_route,
+ flavor,
)
cls._render_subnet_routes(
iface_cfg, route_cfg, iface_subnets, flavor
)
@classmethod
- def _render_sysconfig(cls, base_sysconf_dir, network_state, flavor,
- templates=None):
- '''Given state, return /etc/sysconfig files + contents'''
+ def _render_sysconfig(
+ cls, base_sysconf_dir, network_state, flavor, templates=None
+ ):
+ """Given state, return /etc/sysconfig files + contents"""
if not templates:
templates = cls.templates
iface_contents = {}
for iface in network_state.iter_interfaces():
- if iface['type'] == "loopback":
+ if iface["type"] == "loopback":
continue
- iface_name = iface['name']
+ iface_name = iface["name"]
iface_cfg = NetInterface(iface_name, base_sysconf_dir, templates)
- if flavor == 'suse':
- iface_cfg.drop('DEVICE')
+ if flavor == "suse":
+ iface_cfg.drop("DEVICE")
# If type detection fails it is considered a bug in SUSE
- iface_cfg.drop('TYPE')
+ iface_cfg.drop("TYPE")
cls._render_iface_shared(iface, iface_cfg, flavor)
iface_contents[iface_name] = iface_cfg
cls._render_physical_interfaces(network_state, iface_contents, flavor)
@@ -899,9 +1002,10 @@ class Renderer(renderer.Renderer):
if iface_cfg:
contents[iface_cfg.path] = iface_cfg.to_string()
if iface_cfg.routes:
- for cpath, proto in zip([iface_cfg.routes.path_ipv4,
- iface_cfg.routes.path_ipv6],
- ["ipv4", "ipv6"]):
+ for cpath, proto in zip(
+ [iface_cfg.routes.path_ipv4, iface_cfg.routes.path_ipv6],
+ ["ipv4", "ipv6"],
+ ):
if cpath not in contents:
contents[cpath] = iface_cfg.routes.to_string(proto)
return contents
@@ -911,21 +1015,24 @@ class Renderer(renderer.Renderer):
templates = self.templates
file_mode = 0o644
base_sysconf_dir = subp.target_path(target, self.sysconf_dir)
- for path, data in self._render_sysconfig(base_sysconf_dir,
- network_state, self.flavor,
- templates=templates).items():
+ for path, data in self._render_sysconfig(
+ base_sysconf_dir, network_state, self.flavor, templates=templates
+ ).items():
util.write_file(path, data, file_mode)
if self.dns_path:
dns_path = subp.target_path(target, self.dns_path)
- resolv_content = self._render_dns(network_state,
- existing_dns_path=dns_path)
+ resolv_content = self._render_dns(
+ network_state, existing_dns_path=dns_path
+ )
if resolv_content:
util.write_file(dns_path, resolv_content, file_mode)
if self.networkmanager_conf_path:
- nm_conf_path = subp.target_path(target,
- self.networkmanager_conf_path)
- nm_conf_content = self._render_networkmanager_conf(network_state,
- templates)
+ nm_conf_path = subp.target_path(
+ target, self.networkmanager_conf_path
+ )
+ nm_conf_content = self._render_networkmanager_conf(
+ network_state, templates
+ )
if nm_conf_content:
util.write_file(nm_conf_path, nm_conf_content, file_mode)
if self.netrules_path:
@@ -933,20 +1040,19 @@ class Renderer(renderer.Renderer):
netrules_path = subp.target_path(target, self.netrules_path)
util.write_file(netrules_path, netrules_content, file_mode)
if available_nm(target=target):
- enable_ifcfg_rh(subp.target_path(
- target, path=NM_CFG_FILE
- ))
+ enable_ifcfg_rh(subp.target_path(target, path=NM_CFG_FILE))
- sysconfig_path = subp.target_path(target, templates.get('control'))
+ sysconfig_path = subp.target_path(target, templates.get("control"))
# Distros configuring /etc/sysconfig/network as a file e.g. Centos
- if sysconfig_path.endswith('network'):
+ if sysconfig_path.endswith("network"):
util.ensure_dir(os.path.dirname(sysconfig_path))
- netcfg = [_make_header(), 'NETWORKING=yes']
+ netcfg = [_make_header(), "NETWORKING=yes"]
if network_state.use_ipv6:
- netcfg.append('NETWORKING_IPV6=yes')
- netcfg.append('IPV6_AUTOCONF=no')
- util.write_file(sysconfig_path,
- "\n".join(netcfg) + "\n", file_mode)
+ netcfg.append("NETWORKING_IPV6=yes")
+ netcfg.append("IPV6_AUTOCONF=no")
+ util.write_file(
+ sysconfig_path, "\n".join(netcfg) + "\n", file_mode
+ )
def _supported_vlan_names(rdev, vid):
@@ -954,27 +1060,34 @@ def _supported_vlan_names(rdev, vid):
11.5. Naming Scheme for VLAN Interfaces."""
return [
v.format(rdev=rdev, vid=int(vid))
- for v in ("{rdev}{vid:04}", "{rdev}{vid}",
- "{rdev}.{vid:04}", "{rdev}.{vid}")]
+ for v in (
+ "{rdev}{vid:04}",
+ "{rdev}{vid}",
+ "{rdev}.{vid:04}",
+ "{rdev}.{vid}",
+ )
+ ]
def available(target=None):
sysconfig = available_sysconfig(target=target)
nm = available_nm(target=target)
- return (util.system_info()['variant'] in KNOWN_DISTROS
- and any([nm, sysconfig]))
+ return util.system_info()["variant"] in KNOWN_DISTROS and any(
+ [nm, sysconfig]
+ )
def available_sysconfig(target=None):
- expected = ['ifup', 'ifdown']
- search = ['/sbin', '/usr/sbin']
+ expected = ["ifup", "ifdown"]
+ search = ["/sbin", "/usr/sbin"]
for p in expected:
if not subp.which(p, search=search, target=target):
return False
expected_paths = [
- 'etc/sysconfig/network-scripts/network-functions',
- 'etc/sysconfig/config']
+ "etc/sysconfig/network-scripts/network-functions",
+ "etc/sysconfig/config",
+ ]
for p in expected_paths:
if os.path.isfile(subp.target_path(target, p)):
return True
@@ -982,10 +1095,7 @@ def available_sysconfig(target=None):
def available_nm(target=None):
- if not os.path.isfile(subp.target_path(
- target,
- path=NM_CFG_FILE
- )):
+ if not os.path.isfile(subp.target_path(target, path=NM_CFG_FILE)):
return False
return True
diff --git a/cloudinit/net/udev.py b/cloudinit/net/udev.py
index 58c0a708..b79e4426 100644
--- a/cloudinit/net/udev.py
+++ b/cloudinit/net/udev.py
@@ -32,15 +32,18 @@ def generate_udev_rule(interface, mac, driver=None):
ATTR{address}="ff:ee:dd:cc:bb:aa", NAME="eth0"
"""
if not driver:
- driver = '?*'
-
- rule = ', '.join([
- compose_udev_equality('SUBSYSTEM', 'net'),
- compose_udev_equality('ACTION', 'add'),
- compose_udev_equality('DRIVERS', driver),
- compose_udev_attr_equality('address', mac),
- compose_udev_setting('NAME', interface),
- ])
- return '%s\n' % rule
+ driver = "?*"
+
+ rule = ", ".join(
+ [
+ compose_udev_equality("SUBSYSTEM", "net"),
+ compose_udev_equality("ACTION", "add"),
+ compose_udev_equality("DRIVERS", driver),
+ compose_udev_attr_equality("address", mac),
+ compose_udev_setting("NAME", interface),
+ ]
+ )
+ return "%s\n" % rule
+
# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
index 628e2908..74e6b35a 100644
--- a/cloudinit/netinfo.py
+++ b/cloudinit/netinfo.py
@@ -8,25 +8,18 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from copy import copy, deepcopy
import re
+from copy import copy, deepcopy
from cloudinit import log as logging
+from cloudinit import subp, util
from cloudinit.net.network_state import net_prefix_to_ipv4_mask
-from cloudinit import subp
-from cloudinit import util
-
from cloudinit.simpletable import SimpleTable
LOG = logging.getLogger()
-DEFAULT_NETDEV_INFO = {
- "ipv4": [],
- "ipv6": [],
- "hwaddr": "",
- "up": False
-}
+DEFAULT_NETDEV_INFO = {"ipv4": [], "ipv6": [], "hwaddr": "", "up": False}
def _netdev_info_iproute(ipaddr_out):
@@ -42,51 +35,63 @@ def _netdev_info_iproute(ipaddr_out):
devs = {}
dev_name = None
for num, line in enumerate(ipaddr_out.splitlines()):
- m = re.match(r'^\d+:\s(?P<dev>[^:]+):\s+<(?P<flags>\S+)>\s+.*', line)
+ m = re.match(r"^\d+:\s(?P<dev>[^:]+):\s+<(?P<flags>\S+)>\s+.*", line)
if m:
- dev_name = m.group('dev').lower().split('@')[0]
- flags = m.group('flags').split(',')
+ dev_name = m.group("dev").lower().split("@")[0]
+ flags = m.group("flags").split(",")
devs[dev_name] = {
- 'ipv4': [], 'ipv6': [], 'hwaddr': '',
- 'up': bool('UP' in flags and 'LOWER_UP' in flags),
+ "ipv4": [],
+ "ipv6": [],
+ "hwaddr": "",
+ "up": bool("UP" in flags and "LOWER_UP" in flags),
}
- elif 'inet6' in line:
+ elif "inet6" in line:
m = re.match(
- r'\s+inet6\s(?P<ip>\S+)\sscope\s(?P<scope6>\S+).*', line)
+ r"\s+inet6\s(?P<ip>\S+)\sscope\s(?P<scope6>\S+).*", line
+ )
if not m:
LOG.warning(
- 'Could not parse ip addr show: (line:%d) %s', num, line)
+ "Could not parse ip addr show: (line:%d) %s", num, line
+ )
continue
- devs[dev_name]['ipv6'].append(m.groupdict())
- elif 'inet' in line:
+ devs[dev_name]["ipv6"].append(m.groupdict())
+ elif "inet" in line:
m = re.match(
- r'\s+inet\s(?P<cidr4>\S+)(\sbrd\s(?P<bcast>\S+))?\sscope\s'
- r'(?P<scope>\S+).*', line)
+ r"\s+inet\s(?P<cidr4>\S+)(\sbrd\s(?P<bcast>\S+))?\sscope\s"
+ r"(?P<scope>\S+).*",
+ line,
+ )
if not m:
LOG.warning(
- 'Could not parse ip addr show: (line:%d) %s', num, line)
+ "Could not parse ip addr show: (line:%d) %s", num, line
+ )
continue
match = m.groupdict()
- cidr4 = match.pop('cidr4')
- addr, _, prefix = cidr4.partition('/')
+ cidr4 = match.pop("cidr4")
+ addr, _, prefix = cidr4.partition("/")
if not prefix:
- prefix = '32'
- devs[dev_name]['ipv4'].append({
- 'ip': addr,
- 'bcast': match['bcast'] if match['bcast'] else '',
- 'mask': net_prefix_to_ipv4_mask(prefix),
- 'scope': match['scope']})
- elif 'link' in line:
+ prefix = "32"
+ devs[dev_name]["ipv4"].append(
+ {
+ "ip": addr,
+ "bcast": match["bcast"] if match["bcast"] else "",
+ "mask": net_prefix_to_ipv4_mask(prefix),
+ "scope": match["scope"],
+ }
+ )
+ elif "link" in line:
m = re.match(
- r'\s+link/(?P<link_type>\S+)\s(?P<hwaddr>\S+).*', line)
+ r"\s+link/(?P<link_type>\S+)\s(?P<hwaddr>\S+).*", line
+ )
if not m:
LOG.warning(
- 'Could not parse ip addr show: (line:%d) %s', num, line)
+ "Could not parse ip addr show: (line:%d) %s", num, line
+ )
continue
- if m.group('link_type') == 'ether':
- devs[dev_name]['hwaddr'] = m.group('hwaddr')
+ if m.group("link_type") == "ether":
+ devs[dev_name]["hwaddr"] = m.group("hwaddr")
else:
- devs[dev_name]['hwaddr'] = ''
+ devs[dev_name]["hwaddr"] = ""
else:
continue
return devs
@@ -101,40 +106,41 @@ def _netdev_info_ifconfig_netbsd(ifconfig_data):
if line[0] not in ("\t", " "):
curdev = line.split()[0]
# current ifconfig pops a ':' on the end of the device
- if curdev.endswith(':'):
+ if curdev.endswith(":"):
curdev = curdev[:-1]
if curdev not in devs:
devs[curdev] = deepcopy(DEFAULT_NETDEV_INFO)
toks = line.lower().strip().split()
if len(toks) > 1:
if re.search(r"flags=[x\d]+<up.*>", toks[1]):
- devs[curdev]['up'] = True
+ devs[curdev]["up"] = True
for i in range(len(toks)):
if toks[i] == "inet": # Create new ipv4 addr entry
- network, net_bits = toks[i + 1].split('/')
- devs[curdev]['ipv4'].append(
- {'ip': network, 'mask': net_prefix_to_ipv4_mask(net_bits)})
+ network, net_bits = toks[i + 1].split("/")
+ devs[curdev]["ipv4"].append(
+ {"ip": network, "mask": net_prefix_to_ipv4_mask(net_bits)}
+ )
elif toks[i] == "broadcast":
- devs[curdev]['ipv4'][-1]['bcast'] = toks[i + 1]
+ devs[curdev]["ipv4"][-1]["bcast"] = toks[i + 1]
elif toks[i] == "address:":
- devs[curdev]['hwaddr'] = toks[i + 1]
+ devs[curdev]["hwaddr"] = toks[i + 1]
elif toks[i] == "inet6":
if toks[i + 1] == "addr:":
- devs[curdev]['ipv6'].append({'ip': toks[i + 2]})
+ devs[curdev]["ipv6"].append({"ip": toks[i + 2]})
else:
- devs[curdev]['ipv6'].append({'ip': toks[i + 1]})
+ devs[curdev]["ipv6"].append({"ip": toks[i + 1]})
elif toks[i] == "prefixlen": # Add prefix to current ipv6 value
- addr6 = devs[curdev]['ipv6'][-1]['ip'] + "/" + toks[i + 1]
- devs[curdev]['ipv6'][-1]['ip'] = addr6
+ addr6 = devs[curdev]["ipv6"][-1]["ip"] + "/" + toks[i + 1]
+ devs[curdev]["ipv6"][-1]["ip"] = addr6
elif toks[i].startswith("scope:"):
- devs[curdev]['ipv6'][-1]['scope6'] = toks[i].lstrip("scope:")
+ devs[curdev]["ipv6"][-1]["scope6"] = toks[i].lstrip("scope:")
elif toks[i] == "scopeid":
- res = re.match(r'.*<(\S+)>', toks[i + 1])
+ res = re.match(r".*<(\S+)>", toks[i + 1])
if res:
- devs[curdev]['ipv6'][-1]['scope6'] = res.group(1)
+ devs[curdev]["ipv6"][-1]["scope6"] = res.group(1)
else:
- devs[curdev]['ipv6'][-1]['scope6'] = toks[i + 1]
+ devs[curdev]["ipv6"][-1]["scope6"] = toks[i + 1]
return devs
@@ -148,49 +154,50 @@ def _netdev_info_ifconfig(ifconfig_data):
if line[0] not in ("\t", " "):
curdev = line.split()[0]
# current ifconfig pops a ':' on the end of the device
- if curdev.endswith(':'):
+ if curdev.endswith(":"):
curdev = curdev[:-1]
if curdev not in devs:
devs[curdev] = deepcopy(DEFAULT_NETDEV_INFO)
toks = line.lower().strip().split()
if toks[0] == "up":
- devs[curdev]['up'] = True
+ devs[curdev]["up"] = True
# If the output of ifconfig doesn't contain the required info in the
# obvious place, use a regex filter to be sure.
elif len(toks) > 1:
if re.search(r"flags=\d+<up,", toks[1]):
- devs[curdev]['up'] = True
+ devs[curdev]["up"] = True
for i in range(len(toks)):
if toks[i] == "inet": # Create new ipv4 addr entry
- devs[curdev]['ipv4'].append(
- {'ip': toks[i + 1].lstrip("addr:")})
+ devs[curdev]["ipv4"].append(
+ {"ip": toks[i + 1].lstrip("addr:")}
+ )
elif toks[i].startswith("bcast:"):
- devs[curdev]['ipv4'][-1]['bcast'] = toks[i].lstrip("bcast:")
+ devs[curdev]["ipv4"][-1]["bcast"] = toks[i].lstrip("bcast:")
elif toks[i] == "broadcast":
- devs[curdev]['ipv4'][-1]['bcast'] = toks[i + 1]
+ devs[curdev]["ipv4"][-1]["bcast"] = toks[i + 1]
elif toks[i].startswith("mask:"):
- devs[curdev]['ipv4'][-1]['mask'] = toks[i].lstrip("mask:")
+ devs[curdev]["ipv4"][-1]["mask"] = toks[i].lstrip("mask:")
elif toks[i] == "netmask":
- devs[curdev]['ipv4'][-1]['mask'] = toks[i + 1]
+ devs[curdev]["ipv4"][-1]["mask"] = toks[i + 1]
elif toks[i] == "hwaddr" or toks[i] == "ether":
- devs[curdev]['hwaddr'] = toks[i + 1]
+ devs[curdev]["hwaddr"] = toks[i + 1]
elif toks[i] == "inet6":
if toks[i + 1] == "addr:":
- devs[curdev]['ipv6'].append({'ip': toks[i + 2]})
+ devs[curdev]["ipv6"].append({"ip": toks[i + 2]})
else:
- devs[curdev]['ipv6'].append({'ip': toks[i + 1]})
+ devs[curdev]["ipv6"].append({"ip": toks[i + 1]})
elif toks[i] == "prefixlen": # Add prefix to current ipv6 value
- addr6 = devs[curdev]['ipv6'][-1]['ip'] + "/" + toks[i + 1]
- devs[curdev]['ipv6'][-1]['ip'] = addr6
+ addr6 = devs[curdev]["ipv6"][-1]["ip"] + "/" + toks[i + 1]
+ devs[curdev]["ipv6"][-1]["ip"] = addr6
elif toks[i].startswith("scope:"):
- devs[curdev]['ipv6'][-1]['scope6'] = toks[i].lstrip("scope:")
+ devs[curdev]["ipv6"][-1]["scope6"] = toks[i].lstrip("scope:")
elif toks[i] == "scopeid":
- res = re.match(r'.*<(\S+)>', toks[i + 1])
+ res = re.match(r".*<(\S+)>", toks[i + 1])
if res:
- devs[curdev]['ipv6'][-1]['scope6'] = res.group(1)
+ devs[curdev]["ipv6"][-1]["scope6"] = res.group(1)
else:
- devs[curdev]['ipv6'][-1]['scope6'] = toks[i + 1]
+ devs[curdev]["ipv6"][-1]["scope6"] = toks[i + 1]
return devs
@@ -200,17 +207,18 @@ def netdev_info(empty=""):
if util.is_NetBSD():
(ifcfg_out, _err) = subp.subp(["ifconfig", "-a"], rcs=[0, 1])
devs = _netdev_info_ifconfig_netbsd(ifcfg_out)
- elif subp.which('ip'):
+ elif subp.which("ip"):
# Try iproute first of all
(ipaddr_out, _err) = subp.subp(["ip", "addr", "show"])
devs = _netdev_info_iproute(ipaddr_out)
- elif subp.which('ifconfig'):
+ elif subp.which("ifconfig"):
# Fall back to net-tools if iproute2 is not present
(ifcfg_out, _err) = subp.subp(["ifconfig", "-a"], rcs=[0, 1])
devs = _netdev_info_ifconfig(ifcfg_out)
else:
LOG.warning(
- "Could not print networks: missing 'ip' and 'ifconfig' commands")
+ "Could not print networks: missing 'ip' and 'ifconfig' commands"
+ )
if empty == "":
return devs
@@ -219,7 +227,7 @@ def netdev_info(empty=""):
def fill(data, new_val="", empty_vals=("", b"")):
"""Recursively replace 'empty_vals' in data (dict, tuple, list)
- with new_val"""
+ with new_val"""
if isinstance(data, dict):
myiter = data.items()
elif isinstance(data, (tuple, list)):
@@ -249,46 +257,52 @@ def _netdev_route_info_iproute(iproute_data):
"""
routes = {}
- routes['ipv4'] = []
- routes['ipv6'] = []
+ routes["ipv4"] = []
+ routes["ipv6"] = []
entries = iproute_data.splitlines()
default_route_entry = {
- 'destination': '', 'flags': '', 'gateway': '', 'genmask': '',
- 'iface': '', 'metric': ''}
+ "destination": "",
+ "flags": "",
+ "gateway": "",
+ "genmask": "",
+ "iface": "",
+ "metric": "",
+ }
for line in entries:
entry = copy(default_route_entry)
if not line:
continue
toks = line.split()
- flags = ['U']
+ flags = ["U"]
if toks[0] == "default":
- entry['destination'] = "0.0.0.0"
- entry['genmask'] = "0.0.0.0"
+ entry["destination"] = "0.0.0.0"
+ entry["genmask"] = "0.0.0.0"
else:
- if '/' in toks[0]:
+ if "/" in toks[0]:
(addr, cidr) = toks[0].split("/")
else:
addr = toks[0]
- cidr = '32'
+ cidr = "32"
flags.append("H")
- entry['genmask'] = net_prefix_to_ipv4_mask(cidr)
- entry['destination'] = addr
- entry['genmask'] = net_prefix_to_ipv4_mask(cidr)
- entry['gateway'] = "0.0.0.0"
+ entry["genmask"] = net_prefix_to_ipv4_mask(cidr)
+ entry["destination"] = addr
+ entry["genmask"] = net_prefix_to_ipv4_mask(cidr)
+ entry["gateway"] = "0.0.0.0"
for i in range(len(toks)):
if toks[i] == "via":
- entry['gateway'] = toks[i + 1]
+ entry["gateway"] = toks[i + 1]
flags.insert(1, "G")
if toks[i] == "dev":
entry["iface"] = toks[i + 1]
if toks[i] == "metric":
- entry['metric'] = toks[i + 1]
- entry['flags'] = ''.join(flags)
- routes['ipv4'].append(entry)
+ entry["metric"] = toks[i + 1]
+ entry["flags"] = "".join(flags)
+ routes["ipv4"].append(entry)
try:
(iproute_data6, _err6) = subp.subp(
["ip", "--oneline", "-6", "route", "list", "table", "all"],
- rcs=[0, 1])
+ rcs=[0, 1],
+ )
except subp.ProcessExecutionError:
pass
else:
@@ -299,30 +313,30 @@ def _netdev_route_info_iproute(iproute_data):
continue
toks = line.split()
if toks[0] == "default":
- entry['destination'] = "::/0"
- entry['flags'] = "UG"
+ entry["destination"] = "::/0"
+ entry["flags"] = "UG"
else:
- entry['destination'] = toks[0]
- entry['gateway'] = "::"
- entry['flags'] = "U"
+ entry["destination"] = toks[0]
+ entry["gateway"] = "::"
+ entry["flags"] = "U"
for i in range(len(toks)):
if toks[i] == "via":
- entry['gateway'] = toks[i + 1]
- entry['flags'] = "UG"
+ entry["gateway"] = toks[i + 1]
+ entry["flags"] = "UG"
if toks[i] == "dev":
entry["iface"] = toks[i + 1]
if toks[i] == "metric":
- entry['metric'] = toks[i + 1]
+ entry["metric"] = toks[i + 1]
if toks[i] == "expires":
- entry['flags'] = entry['flags'] + 'e'
- routes['ipv6'].append(entry)
+ entry["flags"] = entry["flags"] + "e"
+ routes["ipv6"].append(entry)
return routes
def _netdev_route_info_netstat(route_data):
routes = {}
- routes['ipv4'] = []
- routes['ipv6'] = []
+ routes["ipv4"] = []
+ routes["ipv6"] = []
entries = route_data.splitlines()
for line in entries:
@@ -336,9 +350,14 @@ def _netdev_route_info_netstat(route_data):
# Linux netstat shows 2 more:
# Destination Gateway Genmask Flags Metric Ref Use Iface
# 0.0.0.0 10.65.0.1 0.0.0.0 UG 0 0 0 eth0
- if (len(toks) < 6 or toks[0] == "Kernel" or
- toks[0] == "Destination" or toks[0] == "Internet" or
- toks[0] == "Internet6" or toks[0] == "Routing"):
+ if (
+ len(toks) < 6
+ or toks[0] == "Kernel"
+ or toks[0] == "Destination"
+ or toks[0] == "Internet"
+ or toks[0] == "Internet6"
+ or toks[0] == "Routing"
+ ):
continue
if len(toks) < 8:
toks.append("-")
@@ -346,20 +365,21 @@ def _netdev_route_info_netstat(route_data):
toks[7] = toks[5]
toks[5] = "-"
entry = {
- 'destination': toks[0],
- 'gateway': toks[1],
- 'genmask': toks[2],
- 'flags': toks[3],
- 'metric': toks[4],
- 'ref': toks[5],
- 'use': toks[6],
- 'iface': toks[7],
+ "destination": toks[0],
+ "gateway": toks[1],
+ "genmask": toks[2],
+ "flags": toks[3],
+ "metric": toks[4],
+ "ref": toks[5],
+ "use": toks[6],
+ "iface": toks[7],
}
- routes['ipv4'].append(entry)
+ routes["ipv4"].append(entry)
try:
(route_data6, _err6) = subp.subp(
- ["netstat", "-A", "inet6", "--route", "--numeric"], rcs=[0, 1])
+ ["netstat", "-A", "inet6", "--route", "--numeric"], rcs=[0, 1]
+ )
except subp.ProcessExecutionError:
pass
else:
@@ -368,44 +388,52 @@ def _netdev_route_info_netstat(route_data):
if not line:
continue
toks = line.split()
- if (len(toks) < 7 or toks[0] == "Kernel" or
- toks[0] == "Destination" or toks[0] == "Internet" or
- toks[0] == "Proto" or toks[0] == "Active"):
+ if (
+ len(toks) < 7
+ or toks[0] == "Kernel"
+ or toks[0] == "Destination"
+ or toks[0] == "Internet"
+ or toks[0] == "Proto"
+ or toks[0] == "Active"
+ ):
continue
entry = {
- 'destination': toks[0],
- 'gateway': toks[1],
- 'flags': toks[2],
- 'metric': toks[3],
- 'ref': toks[4],
- 'use': toks[5],
- 'iface': toks[6],
+ "destination": toks[0],
+ "gateway": toks[1],
+ "flags": toks[2],
+ "metric": toks[3],
+ "ref": toks[4],
+ "use": toks[5],
+ "iface": toks[6],
}
# skip lo interface on ipv6
- if entry['iface'] == "lo":
+ if entry["iface"] == "lo":
continue
# strip /128 from address if it's included
- if entry['destination'].endswith('/128'):
- entry['destination'] = re.sub(
- r'\/128$', '', entry['destination'])
- routes['ipv6'].append(entry)
+ if entry["destination"].endswith("/128"):
+ entry["destination"] = re.sub(
+ r"\/128$", "", entry["destination"]
+ )
+ routes["ipv6"].append(entry)
return routes
def route_info():
routes = {}
- if subp.which('ip'):
+ if subp.which("ip"):
# Try iproute first of all
(iproute_out, _err) = subp.subp(["ip", "-o", "route", "list"])
routes = _netdev_route_info_iproute(iproute_out)
- elif subp.which('netstat'):
+ elif subp.which("netstat"):
# Fall back to net-tools if iproute2 is not present
(route_out, _err) = subp.subp(
- ["netstat", "--route", "--numeric", "--extend"], rcs=[0, 1])
+ ["netstat", "--route", "--numeric", "--extend"], rcs=[0, 1]
+ )
routes = _netdev_route_info_netstat(route_out)
else:
LOG.warning(
- "Could not print routes: missing 'ip' and 'netstat' commands")
+ "Could not print routes: missing 'ip' and 'netstat' commands"
+ )
return routes
@@ -418,24 +446,42 @@ def netdev_pformat():
lines.append(
util.center(
"Net device info failed ({error})".format(error=str(e)),
- '!', 80))
+ "!",
+ 80,
+ )
+ )
else:
if not netdev:
- return '\n'
- fields = ['Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address']
+ return "\n"
+ fields = ["Device", "Up", "Address", "Mask", "Scope", "Hw-Address"]
tbl = SimpleTable(fields)
for (dev, data) in sorted(netdev.items()):
- for addr in data.get('ipv4'):
+ for addr in data.get("ipv4"):
tbl.add_row(
- (dev, data["up"], addr["ip"], addr["mask"],
- addr.get('scope', empty), data["hwaddr"]))
- for addr in data.get('ipv6'):
+ (
+ dev,
+ data["up"],
+ addr["ip"],
+ addr["mask"],
+ addr.get("scope", empty),
+ data["hwaddr"],
+ )
+ )
+ for addr in data.get("ipv6"):
tbl.add_row(
- (dev, data["up"], addr["ip"], empty,
- addr.get("scope6", empty), data["hwaddr"]))
- if len(data.get('ipv6')) + len(data.get('ipv4')) == 0:
- tbl.add_row((dev, data["up"], empty, empty, empty,
- data["hwaddr"]))
+ (
+ dev,
+ data["up"],
+ addr["ip"],
+ empty,
+ addr.get("scope6", empty),
+ data["hwaddr"],
+ )
+ )
+ if len(data.get("ipv6")) + len(data.get("ipv4")) == 0:
+ tbl.add_row(
+ (dev, data["up"], empty, empty, empty, data["hwaddr"])
+ )
netdev_s = tbl.get_string()
max_len = len(max(netdev_s.splitlines(), key=len))
header = util.center("Net device info", "+", max_len)
@@ -450,33 +496,59 @@ def route_pformat():
except Exception as e:
lines.append(
util.center(
- 'Route info failed ({error})'.format(error=str(e)),
- '!', 80))
+ "Route info failed ({error})".format(error=str(e)), "!", 80
+ )
+ )
util.logexc(LOG, "Route info failed: %s" % e)
else:
- if routes.get('ipv4'):
- fields_v4 = ['Route', 'Destination', 'Gateway',
- 'Genmask', 'Interface', 'Flags']
+ if routes.get("ipv4"):
+ fields_v4 = [
+ "Route",
+ "Destination",
+ "Gateway",
+ "Genmask",
+ "Interface",
+ "Flags",
+ ]
tbl_v4 = SimpleTable(fields_v4)
- for (n, r) in enumerate(routes.get('ipv4')):
+ for (n, r) in enumerate(routes.get("ipv4")):
route_id = str(n)
- tbl_v4.add_row([route_id, r['destination'],
- r['gateway'], r['genmask'],
- r['iface'], r['flags']])
+ tbl_v4.add_row(
+ [
+ route_id,
+ r["destination"],
+ r["gateway"],
+ r["genmask"],
+ r["iface"],
+ r["flags"],
+ ]
+ )
route_s = tbl_v4.get_string()
max_len = len(max(route_s.splitlines(), key=len))
header = util.center("Route IPv4 info", "+", max_len)
lines.extend([header, route_s])
- if routes.get('ipv6'):
- fields_v6 = ['Route', 'Destination', 'Gateway', 'Interface',
- 'Flags']
+ if routes.get("ipv6"):
+ fields_v6 = [
+ "Route",
+ "Destination",
+ "Gateway",
+ "Interface",
+ "Flags",
+ ]
tbl_v6 = SimpleTable(fields_v6)
- for (n, r) in enumerate(routes.get('ipv6')):
+ for (n, r) in enumerate(routes.get("ipv6")):
route_id = str(n)
- if r['iface'] == 'lo':
+ if r["iface"] == "lo":
continue
- tbl_v6.add_row([route_id, r['destination'],
- r['gateway'], r['iface'], r['flags']])
+ tbl_v6.add_row(
+ [
+ route_id,
+ r["destination"],
+ r["gateway"],
+ r["iface"],
+ r["flags"],
+ ]
+ )
route_s = tbl_v6.get_string()
max_len = len(max(route_s.splitlines(), key=len))
header = util.center("Route IPv6 info", "+", max_len)
@@ -484,7 +556,7 @@ def route_pformat():
return "\n".join(lines) + "\n"
-def debug_info(prefix='ci-info: '):
+def debug_info(prefix="ci-info: "):
lines = []
netdev_lines = netdev_pformat().splitlines()
if prefix:
@@ -500,4 +572,5 @@ def debug_info(prefix='ci-info: '):
lines.extend(route_lines)
return "\n".join(lines)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/patcher.py b/cloudinit/patcher.py
index 186d8ad8..516be22c 100644
--- a/cloudinit/patcher.py
+++ b/cloudinit/patcher.py
@@ -10,8 +10,9 @@ import logging
import sys
# Default fallback format
-FALL_FORMAT = ('FALLBACK: %(asctime)s - %(filename)s[%(levelname)s]: ' +
- '%(message)s')
+FALL_FORMAT = (
+ "FALLBACK: %(asctime)s - %(filename)s[%(levelname)s]: " + "%(message)s"
+)
class QuietStreamHandler(logging.StreamHandler):
@@ -34,6 +35,8 @@ def patch_logging():
fallback_handler.flush()
except IOError:
pass
- setattr(logging.Handler, 'handleError', handleError)
+
+ setattr(logging.Handler, "handleError", handleError)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/registry.py b/cloudinit/registry.py
index 8e495641..5044e760 100644
--- a/cloudinit/registry.py
+++ b/cloudinit/registry.py
@@ -18,7 +18,8 @@ class DictRegistry(object):
"""Add item to the registry."""
if key in self._items:
raise ValueError(
- 'Item already registered with key {0}'.format(key))
+ "Item already registered with key {0}".format(key)
+ )
self._items[key] = item
def unregister_item(self, key, force=True):
@@ -36,4 +37,5 @@ class DictRegistry(object):
"""
return copy.copy(self._items)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/reporting/__init__.py b/cloudinit/reporting/__init__.py
index ed5c7038..06b5b49f 100644
--- a/cloudinit/reporting/__init__.py
+++ b/cloudinit/reporting/__init__.py
@@ -13,7 +13,7 @@ from ..registry import DictRegistry
from .handlers import available_handlers
DEFAULT_CONFIG = {
- 'logging': {'type': 'log'},
+ "logging": {"type": "log"},
}
@@ -28,10 +28,11 @@ def update_configuration(config):
for handler_name, handler_config in config.items():
if not handler_config:
instantiated_handler_registry.unregister_item(
- handler_name, force=True)
+ handler_name, force=True
+ )
continue
handler_config = handler_config.copy()
- cls = available_handlers.registered_items[handler_config.pop('type')]
+ cls = available_handlers.registered_items[handler_config.pop("type")]
instantiated_handler_registry.unregister_item(handler_name)
instance = cls(**handler_config)
instantiated_handler_registry.register_item(handler_name, instance)
@@ -39,7 +40,7 @@ def update_configuration(config):
def flush_events():
for _, handler in instantiated_handler_registry.registered_items.items():
- if hasattr(handler, 'flush'):
+ if hasattr(handler, "flush"):
handler.flush()
diff --git a/cloudinit/reporting/events.py b/cloudinit/reporting/events.py
index 9afad747..e53186a3 100644
--- a/cloudinit/reporting/events.py
+++ b/cloudinit/reporting/events.py
@@ -12,12 +12,12 @@ import base64
import os.path
import time
-from . import instantiated_handler_registry, available_handlers
+from . import available_handlers, instantiated_handler_registry
-FINISH_EVENT_TYPE = 'finish'
-START_EVENT_TYPE = 'start'
+FINISH_EVENT_TYPE = "finish"
+START_EVENT_TYPE = "start"
-DEFAULT_EVENT_ORIGIN = 'cloudinit'
+DEFAULT_EVENT_ORIGIN = "cloudinit"
class _nameset(set):
@@ -33,8 +33,14 @@ status = _nameset(("SUCCESS", "WARN", "FAIL"))
class ReportingEvent(object):
"""Encapsulation of event formatting."""
- def __init__(self, event_type, name, description,
- origin=DEFAULT_EVENT_ORIGIN, timestamp=None):
+ def __init__(
+ self,
+ event_type,
+ name,
+ description,
+ origin=DEFAULT_EVENT_ORIGIN,
+ timestamp=None,
+ ):
self.event_type = event_type
self.name = name
self.description = description
@@ -45,22 +51,28 @@ class ReportingEvent(object):
def as_string(self):
"""The event represented as a string."""
- return '{0}: {1}: {2}'.format(
- self.event_type, self.name, self.description)
+ return "{0}: {1}: {2}".format(
+ self.event_type, self.name, self.description
+ )
def as_dict(self):
"""The event represented as a dictionary."""
- return {'name': self.name, 'description': self.description,
- 'event_type': self.event_type, 'origin': self.origin,
- 'timestamp': self.timestamp}
+ return {
+ "name": self.name,
+ "description": self.description,
+ "event_type": self.event_type,
+ "origin": self.origin,
+ "timestamp": self.timestamp,
+ }
class FinishReportingEvent(ReportingEvent):
-
- def __init__(self, name, description, result=status.SUCCESS,
- post_files=None):
+ def __init__(
+ self, name, description, result=status.SUCCESS, post_files=None
+ ):
super(FinishReportingEvent, self).__init__(
- FINISH_EVENT_TYPE, name, description)
+ FINISH_EVENT_TYPE, name, description
+ )
self.result = result
if post_files is None:
post_files = []
@@ -69,15 +81,16 @@ class FinishReportingEvent(ReportingEvent):
raise ValueError("Invalid result: %s" % result)
def as_string(self):
- return '{0}: {1}: {2}: {3}'.format(
- self.event_type, self.name, self.result, self.description)
+ return "{0}: {1}: {2}: {3}".format(
+ self.event_type, self.name, self.result, self.description
+ )
def as_dict(self):
"""The event represented as json friendly."""
data = super(FinishReportingEvent, self).as_dict()
- data['result'] = self.result
+ data["result"] = self.result
if self.post_files:
- data['files'] = _collect_file_info(self.post_files)
+ data["files"] = _collect_file_info(self.post_files)
return data
@@ -110,14 +123,16 @@ def report_event(event, excluded_handler_types=None):
handler.publish_event(event)
-def report_finish_event(event_name, event_description,
- result=status.SUCCESS, post_files=None):
+def report_finish_event(
+ event_name, event_description, result=status.SUCCESS, post_files=None
+):
"""Report a "finish" event.
See :py:func:`.report_event` for parameter details.
"""
- event = FinishReportingEvent(event_name, event_description, result,
- post_files=post_files)
+ event = FinishReportingEvent(
+ event_name, event_description, result, post_files=post_files
+ )
return report_event(event)
@@ -174,9 +189,16 @@ class ReportEventStack(object):
Default value, if None, is an empty list.
"""
- def __init__(self, name, description, message=None, parent=None,
- reporting_enabled=None, result_on_exception=status.FAIL,
- post_files=None):
+ def __init__(
+ self,
+ name,
+ description,
+ message=None,
+ parent=None,
+ reporting_enabled=None,
+ result_on_exception=status.FAIL,
+ post_files=None,
+ ):
self.parent = parent
self.name = name
self.description = description
@@ -196,14 +218,22 @@ class ReportEventStack(object):
self.reporting_enabled = reporting_enabled
if parent:
- self.fullname = '/'.join((parent.fullname, name,))
+ self.fullname = "/".join(
+ (
+ parent.fullname,
+ name,
+ )
+ )
else:
self.fullname = self.name
self.children = {}
def __repr__(self):
- return ("ReportEventStack(%s, %s, reporting_enabled=%s)" %
- (self.name, self.description, self.reporting_enabled))
+ return "ReportEventStack(%s, %s, reporting_enabled=%s)" % (
+ self.name,
+ self.description,
+ self.reporting_enabled,
+ )
def __enter__(self):
self.result = status.SUCCESS
@@ -251,8 +281,9 @@ class ReportEventStack(object):
if self.parent:
self.parent.children[self.name] = (result, msg)
if self.reporting_enabled:
- report_finish_event(self.fullname, msg, result,
- post_files=self.post_files)
+ report_finish_event(
+ self.fullname, msg, result, post_files=self.post_files
+ )
def _collect_file_info(files):
@@ -265,8 +296,8 @@ def _collect_file_info(files):
else:
with open(fname, "rb") as fp:
content = base64.b64encode(fp.read()).decode()
- ret.append({'path': fname, 'content': content,
- 'encoding': 'base64'})
+ ret.append({"path": fname, "content": content, "encoding": "base64"})
return ret
+
# vi: ts=4 expandtab
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index e32739ef..e163e168 100755
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -12,8 +12,8 @@ import uuid
from datetime import datetime
from cloudinit import log as logging
+from cloudinit import url_helper, util
from cloudinit.registry import DictRegistry
-from cloudinit import (url_helper, util)
LOG = logging.getLogger(__name__)
@@ -55,7 +55,8 @@ class LogHandler(ReportingHandler):
def publish_event(self, event):
logger = logging.getLogger(
- '.'.join(['cloudinit', 'reporting', event.event_type, event.name]))
+ ".".join(["cloudinit", "reporting", event.event_type, event.name])
+ )
logger.log(self.level, event.as_string())
@@ -67,15 +68,25 @@ class PrintHandler(ReportingHandler):
class WebHookHandler(ReportingHandler):
- def __init__(self, endpoint, consumer_key=None, token_key=None,
- token_secret=None, consumer_secret=None, timeout=None,
- retries=None):
+ def __init__(
+ self,
+ endpoint,
+ consumer_key=None,
+ token_key=None,
+ token_secret=None,
+ consumer_secret=None,
+ timeout=None,
+ retries=None,
+ ):
super(WebHookHandler, self).__init__()
if any([consumer_key, token_key, token_secret, consumer_secret]):
self.oauth_helper = url_helper.OauthUrlHelper(
- consumer_key=consumer_key, token_key=token_key,
- token_secret=token_secret, consumer_secret=consumer_secret)
+ consumer_key=consumer_key,
+ token_key=token_key,
+ token_secret=token_secret,
+ consumer_secret=consumer_secret,
+ )
else:
self.oauth_helper = None
self.endpoint = endpoint
@@ -90,9 +101,12 @@ class WebHookHandler(ReportingHandler):
readurl = url_helper.readurl
try:
return readurl(
- self.endpoint, data=json.dumps(event.as_dict()),
+ self.endpoint,
+ data=json.dumps(event.as_dict()),
timeout=self.timeout,
- retries=self.retries, ssl_details=self.ssl_details)
+ retries=self.retries,
+ ssl_details=self.ssl_details,
+ )
except Exception:
LOG.warning("failed posting event: %s", event.as_string())
@@ -112,33 +126,35 @@ class HyperVKvpReportingHandler(ReportingHandler):
For more information, see
https://technet.microsoft.com/en-us/library/dn798287.aspx#Linux%20guests
"""
+
HV_KVP_EXCHANGE_MAX_VALUE_SIZE = 2048
# The maximum value size expected in Azure
HV_KVP_AZURE_MAX_VALUE_SIZE = 1024
HV_KVP_EXCHANGE_MAX_KEY_SIZE = 512
- HV_KVP_RECORD_SIZE = (HV_KVP_EXCHANGE_MAX_KEY_SIZE +
- HV_KVP_EXCHANGE_MAX_VALUE_SIZE)
- EVENT_PREFIX = 'CLOUD_INIT'
- MSG_KEY = 'msg'
- RESULT_KEY = 'result'
- DESC_IDX_KEY = 'msg_i'
- JSON_SEPARATORS = (',', ':')
- KVP_POOL_FILE_GUEST = '/var/lib/hyperv/.kvp_pool_1'
+ HV_KVP_RECORD_SIZE = (
+ HV_KVP_EXCHANGE_MAX_KEY_SIZE + HV_KVP_EXCHANGE_MAX_VALUE_SIZE
+ )
+ EVENT_PREFIX = "CLOUD_INIT"
+ MSG_KEY = "msg"
+ RESULT_KEY = "result"
+ DESC_IDX_KEY = "msg_i"
+ JSON_SEPARATORS = (",", ":")
+ KVP_POOL_FILE_GUEST = "/var/lib/hyperv/.kvp_pool_1"
_already_truncated_pool_file = False
- def __init__(self,
- kvp_file_path=KVP_POOL_FILE_GUEST,
- event_types=None):
+ def __init__(self, kvp_file_path=KVP_POOL_FILE_GUEST, event_types=None):
super(HyperVKvpReportingHandler, self).__init__()
self._kvp_file_path = kvp_file_path
HyperVKvpReportingHandler._truncate_guest_pool_file(
- self._kvp_file_path)
+ self._kvp_file_path
+ )
self._event_types = event_types
self.q = queue.Queue()
self.incarnation_no = self._get_incarnation_no()
- self.event_key_prefix = "{0}|{1}".format(self.EVENT_PREFIX,
- self.incarnation_no)
+ self.event_key_prefix = "{0}|{1}".format(
+ self.EVENT_PREFIX, self.incarnation_no
+ )
self.publish_thread = threading.Thread(
target=self._publish_event_routine
)
@@ -184,7 +200,7 @@ class HyperVKvpReportingHandler(ReportingHandler):
def _iterate_kvps(self, offset):
"""iterate the kvp file from the current offset."""
- with open(self._kvp_file_path, 'rb') as f:
+ with open(self._kvp_file_path, "rb") as f:
fcntl.flock(f, fcntl.LOCK_EX)
f.seek(offset)
record_data = f.read(self.HV_KVP_RECORD_SIZE)
@@ -200,9 +216,9 @@ class HyperVKvpReportingHandler(ReportingHandler):
CLOUD_INIT|<incarnation number>|<event_type>|<event_name>|<uuid>
[|subevent_index]
"""
- return "{0}|{1}|{2}|{3}".format(self.event_key_prefix,
- event.event_type, event.name,
- uuid.uuid4())
+ return "{0}|{1}|{2}|{3}".format(
+ self.event_key_prefix, event.event_type, event.name, uuid.uuid4()
+ )
def _encode_kvp_item(self, key, value):
data = struct.pack(
@@ -220,19 +236,27 @@ class HyperVKvpReportingHandler(ReportingHandler):
record_data_len = len(record_data)
if record_data_len != self.HV_KVP_RECORD_SIZE:
raise ReportException(
- "record_data len not correct {0} {1}."
- .format(record_data_len, self.HV_KVP_RECORD_SIZE))
- k = (record_data[0:self.HV_KVP_EXCHANGE_MAX_KEY_SIZE].decode('utf-8')
- .strip('\x00'))
+ "record_data len not correct {0} {1}.".format(
+ record_data_len, self.HV_KVP_RECORD_SIZE
+ )
+ )
+ k = (
+ record_data[0 : self.HV_KVP_EXCHANGE_MAX_KEY_SIZE]
+ .decode("utf-8")
+ .strip("\x00")
+ )
v = (
record_data[
- self.HV_KVP_EXCHANGE_MAX_KEY_SIZE:self.HV_KVP_RECORD_SIZE
- ].decode('utf-8').strip('\x00'))
+ self.HV_KVP_EXCHANGE_MAX_KEY_SIZE : self.HV_KVP_RECORD_SIZE
+ ]
+ .decode("utf-8")
+ .strip("\x00")
+ )
- return {'key': k, 'value': v}
+ return {"key": k, "value": v}
def _append_kvp_item(self, record_data):
- with open(self._kvp_file_path, 'ab') as f:
+ with open(self._kvp_file_path, "ab") as f:
fcntl.flock(f, fcntl.LOCK_EX)
for data in record_data:
f.write(data)
@@ -242,22 +266,25 @@ class HyperVKvpReportingHandler(ReportingHandler):
def _break_down(self, key, meta_data, description):
del meta_data[self.MSG_KEY]
des_in_json = json.dumps(description)
- des_in_json = des_in_json[1:(len(des_in_json) - 1)]
+ des_in_json = des_in_json[1 : (len(des_in_json) - 1)]
i = 0
result_array = []
- message_place_holder = "\"" + self.MSG_KEY + "\":\"\""
+ message_place_holder = '"' + self.MSG_KEY + '":""'
while True:
meta_data[self.DESC_IDX_KEY] = i
- meta_data[self.MSG_KEY] = ''
- data_without_desc = json.dumps(meta_data,
- separators=self.JSON_SEPARATORS)
+ meta_data[self.MSG_KEY] = ""
+ data_without_desc = json.dumps(
+ meta_data, separators=self.JSON_SEPARATORS
+ )
room_for_desc = (
- self.HV_KVP_AZURE_MAX_VALUE_SIZE -
- len(data_without_desc) - 8)
+ self.HV_KVP_AZURE_MAX_VALUE_SIZE - len(data_without_desc) - 8
+ )
value = data_without_desc.replace(
message_place_holder,
'"{key}":"{desc}"'.format(
- key=self.MSG_KEY, desc=des_in_json[:room_for_desc]))
+ key=self.MSG_KEY, desc=des_in_json[:room_for_desc]
+ ),
+ )
subkey = "{}|{}".format(key, i)
result_array.append(self._encode_kvp_item(subkey, value))
i += 1
@@ -276,8 +303,9 @@ class HyperVKvpReportingHandler(ReportingHandler):
meta_data = {
"name": event.name,
"type": event.event_type,
- "ts": (datetime.utcfromtimestamp(event.timestamp)
- .isoformat() + 'Z'),
+ "ts": (
+ datetime.utcfromtimestamp(event.timestamp).isoformat() + "Z"
+ ),
}
if hasattr(event, self.RESULT_KEY):
meta_data[self.RESULT_KEY] = event.result
@@ -327,14 +355,14 @@ class HyperVKvpReportingHandler(ReportingHandler):
self.q.put(event)
def flush(self):
- LOG.debug('HyperVReportingHandler flushing remaining events')
+ LOG.debug("HyperVReportingHandler flushing remaining events")
self.q.join()
available_handlers = DictRegistry()
-available_handlers.register_item('log', LogHandler)
-available_handlers.register_item('print', PrintHandler)
-available_handlers.register_item('webhook', WebHookHandler)
-available_handlers.register_item('hyperv', HyperVKvpReportingHandler)
+available_handlers.register_item("log", LogHandler)
+available_handlers.register_item("print", PrintHandler)
+available_handlers.register_item("webhook", WebHookHandler)
+available_handlers.register_item("hyperv", HyperVKvpReportingHandler)
# vi: ts=4 expandtab
diff --git a/cloudinit/safeyaml.py b/cloudinit/safeyaml.py
index b95df27d..ba0e88c8 100644
--- a/cloudinit/safeyaml.py
+++ b/cloudinit/safeyaml.py
@@ -15,8 +15,9 @@ class _CustomSafeLoader(yaml.SafeLoader):
_CustomSafeLoader.add_constructor(
- 'tag:yaml.org,2002:python/unicode',
- _CustomSafeLoader.construct_python_unicode)
+ "tag:yaml.org,2002:python/unicode",
+ _CustomSafeLoader.construct_python_unicode,
+)
class NoAliasSafeDumper(yaml.dumper.SafeDumper):
@@ -27,19 +28,21 @@ class NoAliasSafeDumper(yaml.dumper.SafeDumper):
def load(blob):
- return(yaml.load(blob, Loader=_CustomSafeLoader))
+ return yaml.load(blob, Loader=_CustomSafeLoader)
def dumps(obj, explicit_start=True, explicit_end=True, noalias=False):
"""Return data in nicely formatted yaml."""
- return yaml.dump(obj,
- line_break="\n",
- indent=4,
- explicit_start=explicit_start,
- explicit_end=explicit_end,
- default_flow_style=False,
- Dumper=(NoAliasSafeDumper
- if noalias else yaml.dumper.Dumper))
+ return yaml.dump(
+ obj,
+ line_break="\n",
+ indent=4,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ default_flow_style=False,
+ Dumper=(NoAliasSafeDumper if noalias else yaml.dumper.Dumper),
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/serial.py b/cloudinit/serial.py
index 67486e09..a6f710ef 100644
--- a/cloudinit/serial.py
+++ b/cloudinit/serial.py
@@ -16,22 +16,31 @@ except ImportError:
@staticmethod
def write(data):
- raise IOError("Unable to perform serial `write` operation,"
- " pyserial not installed.")
+ raise IOError(
+ "Unable to perform serial `write` operation,"
+ " pyserial not installed."
+ )
@staticmethod
def readline():
- raise IOError("Unable to perform serial `readline` operation,"
- " pyserial not installed.")
+ raise IOError(
+ "Unable to perform serial `readline` operation,"
+ " pyserial not installed."
+ )
@staticmethod
def flush():
- raise IOError("Unable to perform serial `flush` operation,"
- " pyserial not installed.")
+ raise IOError(
+ "Unable to perform serial `flush` operation,"
+ " pyserial not installed."
+ )
@staticmethod
def read(size=1):
- raise IOError("Unable to perform serial `read` operation,"
- " pyserial not installed.")
+ raise IOError(
+ "Unable to perform serial `read` operation,"
+ " pyserial not installed."
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index 43c8fa24..ecc1403b 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -12,55 +12,55 @@
CFG_ENV_NAME = "CLOUD_CFG"
# This is expected to be a yaml formatted file
-CLOUD_CONFIG = '/etc/cloud/cloud.cfg'
+CLOUD_CONFIG = "/etc/cloud/cloud.cfg"
-RUN_CLOUD_CONFIG = '/run/cloud-init/cloud.cfg'
+RUN_CLOUD_CONFIG = "/run/cloud-init/cloud.cfg"
# What u get if no config is provided
CFG_BUILTIN = {
- 'datasource_list': [
- 'NoCloud',
- 'ConfigDrive',
- 'LXD',
- 'OpenNebula',
- 'DigitalOcean',
- 'Azure',
- 'AltCloud',
- 'OVF',
- 'MAAS',
- 'GCE',
- 'OpenStack',
- 'AliYun',
- 'Vultr',
- 'Ec2',
- 'CloudSigma',
- 'CloudStack',
- 'SmartOS',
- 'Bigstep',
- 'Scaleway',
- 'Hetzner',
- 'IBMCloud',
- 'Oracle',
- 'Exoscale',
- 'RbxCloud',
- 'UpCloud',
- 'VMware',
+ "datasource_list": [
+ "NoCloud",
+ "ConfigDrive",
+ "LXD",
+ "OpenNebula",
+ "DigitalOcean",
+ "Azure",
+ "AltCloud",
+ "OVF",
+ "MAAS",
+ "GCE",
+ "OpenStack",
+ "AliYun",
+ "Vultr",
+ "Ec2",
+ "CloudSigma",
+ "CloudStack",
+ "SmartOS",
+ "Bigstep",
+ "Scaleway",
+ "Hetzner",
+ "IBMCloud",
+ "Oracle",
+ "Exoscale",
+ "RbxCloud",
+ "UpCloud",
+ "VMware",
# At the end to act as a 'catch' when none of the above work...
- 'None',
+ "None",
],
- 'def_log_file': '/var/log/cloud-init.log',
- 'log_cfgs': [],
- 'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel', 'root:root'],
- 'system_info': {
- 'paths': {
- 'cloud_dir': '/var/lib/cloud',
- 'templates_dir': '/etc/cloud/templates/',
+ "def_log_file": "/var/log/cloud-init.log",
+ "log_cfgs": [],
+ "syslog_fix_perms": ["syslog:adm", "root:adm", "root:wheel", "root:root"],
+ "system_info": {
+ "paths": {
+ "cloud_dir": "/var/lib/cloud",
+ "templates_dir": "/etc/cloud/templates/",
},
- 'distro': 'ubuntu',
- 'network': {'renderers': None},
+ "distro": "ubuntu",
+ "network": {"renderers": None},
},
- 'vendor_data': {'enabled': True, 'prefix': []},
- 'vendor_data2': {'enabled': True, 'prefix': []},
+ "vendor_data": {"enabled": True, "prefix": []},
+ "vendor_data2": {"enabled": True, "prefix": []},
}
# Valid frequencies of handlers/modules
diff --git a/cloudinit/signal_handler.py b/cloudinit/signal_handler.py
index 9272d22d..382c4616 100644
--- a/cloudinit/signal_handler.py
+++ b/cloudinit/signal_handler.py
@@ -20,11 +20,11 @@ LOG = logging.getLogger(__name__)
BACK_FRAME_TRACE_DEPTH = 3
EXIT_FOR = {
- signal.SIGINT: ('Cloud-init %(version)s received SIGINT, exiting...', 1),
- signal.SIGTERM: ('Cloud-init %(version)s received SIGTERM, exiting...', 1),
+ signal.SIGINT: ("Cloud-init %(version)s received SIGINT, exiting...", 1),
+ signal.SIGTERM: ("Cloud-init %(version)s received SIGTERM, exiting...", 1),
# Can't be caught...
# signal.SIGKILL: ('Cloud-init killed, exiting...', 1),
- signal.SIGABRT: ('Cloud-init %(version)s received SIGABRT, exiting...', 1),
+ signal.SIGABRT: ("Cloud-init %(version)s received SIGABRT, exiting...", 1),
}
@@ -41,12 +41,11 @@ def _pprint_frame(frame, depth, max_depth, contents):
def _handle_exit(signum, frame):
(msg, rc) = EXIT_FOR[signum]
- msg = msg % ({'version': vr.version_string()})
+ msg = msg % ({"version": vr.version_string()})
contents = StringIO()
contents.write("%s\n" % (msg))
_pprint_frame(frame, 1, BACK_FRAME_TRACE_DEPTH, contents)
- util.multi_log(contents.getvalue(),
- console=True, stderr=False, log=LOG)
+ util.multi_log(contents.getvalue(), console=True, stderr=False, log=LOG)
sys.exit(rc)
@@ -57,4 +56,5 @@ def attach_handlers():
sigs_attached += len(EXIT_FOR)
return sigs_attached
+
# vi: ts=4 expandtab
diff --git a/cloudinit/simpletable.py b/cloudinit/simpletable.py
index ca663cce..90281e06 100644
--- a/cloudinit/simpletable.py
+++ b/cloudinit/simpletable.py
@@ -22,27 +22,33 @@ class SimpleTable(object):
def update_column_widths(self, values):
for i, value in enumerate(values):
- self.column_widths[i] = max(
- len(value),
- self.column_widths[i])
+ self.column_widths[i] = max(len(value), self.column_widths[i])
def add_row(self, values):
if len(values) > len(self.fields):
- raise TypeError('too many values')
+ raise TypeError("too many values")
values = [str(value) for value in values]
self.rows.append(values)
self.update_column_widths(values)
def _hdiv(self):
"""Returns a horizontal divider for the table."""
- return '+' + '+'.join(
- ['-' * (w + 2) for w in self.column_widths]) + '+'
+ return (
+ "+" + "+".join(["-" * (w + 2) for w in self.column_widths]) + "+"
+ )
def _row(self, row):
"""Returns a formatted row."""
- return '|' + '|'.join(
- [col.center(self.column_widths[i] + 2)
- for i, col in enumerate(row)]) + '|'
+ return (
+ "|"
+ + "|".join(
+ [
+ col.center(self.column_widths[i] + 2)
+ for i, col in enumerate(row)
+ ]
+ )
+ + "|"
+ )
def __str__(self):
"""Returns a string representation of the table with lines around.
@@ -56,7 +62,7 @@ class SimpleTable(object):
"""
lines = [self._hdiv(), self._row(self.fields), self._hdiv()]
lines += [self._row(r) for r in self.rows] + [self._hdiv()]
- return '\n'.join(lines)
+ return "\n".join(lines)
def get_string(self):
return self.__str__()
diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py
index 09052873..37f512e3 100644
--- a/cloudinit/sources/DataSourceAliYun.py
+++ b/cloudinit/sources/DataSourceAliYun.py
@@ -1,7 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import dmi
-from cloudinit import sources
+from cloudinit import dmi, sources
from cloudinit.sources import DataSourceEc2 as EC2
ALIYUN_PRODUCT = "Alibaba Cloud ECS"
@@ -9,18 +8,18 @@ ALIYUN_PRODUCT = "Alibaba Cloud ECS"
class DataSourceAliYun(EC2.DataSourceEc2):
- dsname = 'AliYun'
- metadata_urls = ['http://100.100.100.200']
+ dsname = "AliYun"
+ metadata_urls = ["http://100.100.100.200"]
# The minimum supported metadata_version from the ec2 metadata apis
- min_metadata_version = '2016-01-01'
+ min_metadata_version = "2016-01-01"
extended_metadata_versions = []
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
- return self.metadata.get('hostname', 'localhost.localdomain')
+ return self.metadata.get("hostname", "localhost.localdomain")
def get_public_ssh_keys(self):
- return parse_public_keys(self.metadata.get('public-keys', {}))
+ return parse_public_keys(self.metadata.get("public-keys", {}))
def _get_cloud_name(self):
if _is_aliyun():
@@ -30,7 +29,7 @@ class DataSourceAliYun(EC2.DataSourceEc2):
def _is_aliyun():
- return dmi.read_dmi_data('system-product-name') == ALIYUN_PRODUCT
+ return dmi.read_dmi_data("system-product-name") == ALIYUN_PRODUCT
def parse_public_keys(public_keys):
@@ -41,7 +40,7 @@ def parse_public_keys(public_keys):
elif isinstance(key_body, list):
keys.extend(key_body)
elif isinstance(key_body, dict):
- key = key_body.get('openssh-key', [])
+ key = key_body.get("openssh-key", [])
if isinstance(key, str):
keys.append(key.strip())
elif isinstance(key, list):
@@ -59,4 +58,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index cd93412a..9029b535 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -7,10 +7,10 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-'''
+"""
This file contains code used to gather the user data passed to an
instance on RHEVm and vSphere.
-'''
+"""
import errno
import os
@@ -18,29 +18,26 @@ import os.path
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import sources, subp, util
LOG = logging.getLogger(__name__)
# Needed file paths
-CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info'
+CLOUD_INFO_FILE = "/etc/sysconfig/cloud-info"
# Shell command lists
-CMD_PROBE_FLOPPY = ['modprobe', 'floppy']
+CMD_PROBE_FLOPPY = ["modprobe", "floppy"]
META_DATA_NOT_SUPPORTED = {
- 'block-device-mapping': {},
- 'instance-id': 455,
- 'local-hostname': 'localhost',
- 'placement': {},
+ "block-device-mapping": {},
+ "instance-id": 455,
+ "local-hostname": "localhost",
+ "placement": {},
}
def read_user_data_callback(mount_dir):
- '''
+ """
Description:
This callback will be applied by util.mount_cb() on the mounted
file.
@@ -55,10 +52,10 @@ def read_user_data_callback(mount_dir):
Returns:
User Data
- '''
+ """
- deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt'
- user_data_file = mount_dir + '/user-data.txt'
+ deltacloud_user_data_file = mount_dir + "/deltacloud-user-data.txt"
+ user_data_file = mount_dir + "/user-data.txt"
# First try deltacloud_user_data_file. On failure try user_data_file.
try:
@@ -67,7 +64,7 @@ def read_user_data_callback(mount_dir):
try:
user_data = util.load_file(user_data_file).strip()
except IOError:
- util.logexc(LOG, 'Failed accessing user data file.')
+ util.logexc(LOG, "Failed accessing user data file.")
return None
return user_data
@@ -75,7 +72,7 @@ def read_user_data_callback(mount_dir):
class DataSourceAltCloud(sources.DataSource):
- dsname = 'AltCloud'
+ dsname = "AltCloud"
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -87,7 +84,7 @@ class DataSourceAltCloud(sources.DataSource):
return "%s [seed=%s]" % (root, self.seed)
def get_cloud_type(self):
- '''
+ """
Description:
Get the type for the cloud back end this instance is running on
by examining the string returned by reading either:
@@ -101,31 +98,34 @@ class DataSourceAltCloud(sources.DataSource):
One of the following strings:
'RHEV', 'VSPHERE' or 'UNKNOWN'
- '''
+ """
if os.path.exists(CLOUD_INFO_FILE):
try:
cloud_type = util.load_file(CLOUD_INFO_FILE).strip().upper()
except IOError:
- util.logexc(LOG, 'Unable to access cloud info file at %s.',
- CLOUD_INFO_FILE)
- return 'UNKNOWN'
+ util.logexc(
+ LOG,
+ "Unable to access cloud info file at %s.",
+ CLOUD_INFO_FILE,
+ )
+ return "UNKNOWN"
return cloud_type
system_name = dmi.read_dmi_data("system-product-name")
if not system_name:
- return 'UNKNOWN'
+ return "UNKNOWN"
sys_name = system_name.upper()
- if sys_name.startswith('RHEV'):
- return 'RHEV'
+ if sys_name.startswith("RHEV"):
+ return "RHEV"
- if sys_name.startswith('VMWARE'):
- return 'VSPHERE'
+ if sys_name.startswith("VMWARE"):
+ return "VSPHERE"
- return 'UNKNOWN'
+ return "UNKNOWN"
def _get_data(self):
- '''
+ """
Description:
User Data is passed to the launching instance which
is used to perform instance configuration.
@@ -140,18 +140,18 @@ class DataSourceAltCloud(sources.DataSource):
Images not built with Imagefactory will try to
determine what the cloud provider is based on system
information.
- '''
+ """
- LOG.debug('Invoked get_data()')
+ LOG.debug("Invoked get_data()")
cloud_type = self.get_cloud_type()
- LOG.debug('cloud_type: %s', str(cloud_type))
+ LOG.debug("cloud_type: %s", str(cloud_type))
- if 'RHEV' in cloud_type:
+ if "RHEV" in cloud_type:
if self.user_data_rhevm():
return True
- elif 'VSPHERE' in cloud_type:
+ elif "VSPHERE" in cloud_type:
if self.user_data_vsphere():
return True
else:
@@ -160,20 +160,20 @@ class DataSourceAltCloud(sources.DataSource):
return False
# No user data found
- util.logexc(LOG, 'Failed accessing user data.')
+ util.logexc(LOG, "Failed accessing user data.")
return False
def _get_subplatform(self):
"""Return the subplatform metadata details."""
cloud_type = self.get_cloud_type()
- if not hasattr(self, 'source'):
+ if not hasattr(self, "source"):
self.source = sources.METADATA_UNKNOWN
- if cloud_type == 'RHEV':
- self.source = '/dev/fd0'
- return '%s (%s)' % (cloud_type.lower(), self.source)
+ if cloud_type == "RHEV":
+ self.source = "/dev/fd0"
+ return "%s (%s)" % (cloud_type.lower(), self.source)
def user_data_rhevm(self):
- '''
+ """
RHEVM specific userdata read
If on RHEV-M the user data will be contained on the
@@ -186,7 +186,7 @@ class DataSourceAltCloud(sources.DataSource):
mount /dev/fd0 <tmp mount dir>
The call back passed to util.mount_cb will do:
read <tmp mount dir>/<user_data_file>
- '''
+ """
return_str = None
@@ -194,16 +194,16 @@ class DataSourceAltCloud(sources.DataSource):
try:
modprobe_floppy()
except subp.ProcessExecutionError as e:
- util.logexc(LOG, 'Failed modprobe: %s', e)
+ util.logexc(LOG, "Failed modprobe: %s", e)
return False
- floppy_dev = '/dev/fd0'
+ floppy_dev = "/dev/fd0"
# udevadm settle for floppy device
try:
util.udevadm_settle(exists=floppy_dev, timeout=5)
except (subp.ProcessExecutionError, OSError) as e:
- util.logexc(LOG, 'Failed udevadm_settle: %s\n', e)
+ util.logexc(LOG, "Failed udevadm_settle: %s\n", e)
return False
try:
@@ -212,8 +212,11 @@ class DataSourceAltCloud(sources.DataSource):
if err.errno != errno.ENOENT:
raise
except util.MountFailedError:
- util.logexc(LOG, "Failed to mount %s when looking for user data",
- floppy_dev)
+ util.logexc(
+ LOG,
+ "Failed to mount %s when looking for user data",
+ floppy_dev,
+ )
self.userdata_raw = return_str
self.metadata = META_DATA_NOT_SUPPORTED
@@ -224,7 +227,7 @@ class DataSourceAltCloud(sources.DataSource):
return False
def user_data_vsphere(self):
- '''
+ """
vSphere specific userdata read
If on vSphere the user data will be contained on the
@@ -235,10 +238,10 @@ class DataSourceAltCloud(sources.DataSource):
mount /dev/fd0 <tmp mount dir>
The call back passed to util.mount_cb will do:
read <tmp mount dir>/<user_data_file>
- '''
+ """
return_str = None
- cdrom_list = util.find_devs_with('LABEL=CDROM')
+ cdrom_list = util.find_devs_with("LABEL=CDROM")
for cdrom_dev in cdrom_list:
try:
return_str = util.mount_cb(cdrom_dev, read_user_data_callback)
@@ -249,8 +252,11 @@ class DataSourceAltCloud(sources.DataSource):
if err.errno != errno.ENOENT:
raise
except util.MountFailedError:
- util.logexc(LOG, "Failed to mount %s when looking for user "
- "data", cdrom_dev)
+ util.logexc(
+ LOG,
+ "Failed to mount %s when looking for user data",
+ cdrom_dev,
+ )
self.userdata_raw = return_str
self.metadata = META_DATA_NOT_SUPPORTED
@@ -263,7 +269,7 @@ class DataSourceAltCloud(sources.DataSource):
def modprobe_floppy():
out, _err = subp.subp(CMD_PROBE_FLOPPY)
- LOG.debug('Command: %s\nOutput%s', ' '.join(CMD_PROBE_FLOPPY), out)
+ LOG.debug("Command: %s\nOutput%s", " ".join(CMD_PROBE_FLOPPY), out)
# Used to match classes to dependencies
@@ -279,4 +285,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index eee98fa8..a8b403e8 100755
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -5,66 +5,62 @@
# This file is part of cloud-init. See LICENSE file for license information.
import base64
-from collections import namedtuple
import crypt
-from functools import partial
import os
import os.path
import re
-from time import time
-from time import sleep
-from xml.dom import minidom
import xml.etree.ElementTree as ET
+from collections import namedtuple
from enum import Enum
+from functools import partial
+from time import sleep, time
+from xml.dom import minidom
+
import requests
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit import net
+from cloudinit import net, sources, ssh_util, subp, util
from cloudinit.event import EventScope, EventType
from cloudinit.net import device_driver
from cloudinit.net.dhcp import EphemeralDHCPv4
-from cloudinit import sources
-from cloudinit.sources.helpers import netlink
-from cloudinit import ssh_util
-from cloudinit import subp
-from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc
-from cloudinit import util
from cloudinit.reporting import events
-
+from cloudinit.sources.helpers import netlink
from cloudinit.sources.helpers.azure import (
DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE,
+ EphemeralDHCPv4WithReporting,
azure_ds_reporter,
azure_ds_telemetry_reporter,
- get_metadata_from_fabric,
+ build_minimal_ovf,
+ dhcp_log_cb,
get_boot_telemetry,
+ get_metadata_from_fabric,
get_system_info,
- report_diagnostic_event,
- EphemeralDHCPv4WithReporting,
is_byte_swapped,
- dhcp_log_cb,
push_log_to_kvp,
+ report_diagnostic_event,
report_failure_to_fabric,
- build_minimal_ovf)
+)
+from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc
LOG = logging.getLogger(__name__)
-DS_NAME = 'Azure'
+DS_NAME = "Azure"
DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}
# azure systems will always have a resource disk, and 66-azure-ephemeral.rules
# ensures that it gets linked to this path.
-RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource'
-LEASE_FILE = '/var/lib/dhcp/dhclient.eth0.leases'
-DEFAULT_FS = 'ext4'
+RESOURCE_DISK_PATH = "/dev/disk/cloud/azure_resource"
+LEASE_FILE = "/var/lib/dhcp/dhclient.eth0.leases"
+DEFAULT_FS = "ext4"
# DMI chassis-asset-tag is set static for all azure instances
-AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77'
+AZURE_CHASSIS_ASSET_TAG = "7783-7084-3265-9085-8269-3286-77"
REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds"
REPROVISION_NIC_ATTACH_MARKER_FILE = "/var/lib/cloud/data/wait_for_nic_attach"
REPROVISION_NIC_DETACHED_MARKER_FILE = "/var/lib/cloud/data/nic_detached"
REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready"
-AGENT_SEED_DIR = '/var/lib/waagent'
-DEFAULT_PROVISIONING_ISO_DEV = '/dev/sr0'
+AGENT_SEED_DIR = "/var/lib/waagent"
+DEFAULT_PROVISIONING_ISO_DEV = "/dev/sr0"
# In the event where the IMDS primary server is not
# available, it takes 1s to fallback to the secondary one
@@ -90,10 +86,10 @@ PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0"
# List of static scripts and network config artifacts created by
# stock ubuntu suported images.
UBUNTU_EXTENDED_NETWORK_SCRIPTS = [
- '/etc/netplan/90-hotplug-azure.yaml',
- '/usr/local/sbin/ephemeral_eth.sh',
- '/etc/udev/rules.d/10-net-device-added.rules',
- '/run/network/interfaces.ephemeral.d',
+ "/etc/netplan/90-hotplug-azure.yaml",
+ "/usr/local/sbin/ephemeral_eth.sh",
+ "/etc/udev/rules.d/10-net-device-added.rules",
+ "/run/network/interfaces.ephemeral.d",
]
# This list is used to blacklist devices that will be considered
@@ -113,7 +109,7 @@ UBUNTU_EXTENDED_NETWORK_SCRIPTS = [
# https://docs.microsoft.com/en-us/azure/virtual-machines/dv2-dsv2-series
# https://docs.microsoft.com/en-us/azure/virtual-machines/dv3-dsv3-series
# https://docs.microsoft.com/en-us/azure/virtual-machines/ev3-esv3-series
-BLACKLIST_DRIVERS = ['mlx4_core', 'mlx5_core']
+BLACKLIST_DRIVERS = ["mlx4_core", "mlx5_core"]
def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid):
@@ -127,11 +123,13 @@ def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid):
if re.search(r"pnpinfo", line):
fields = line.split()
if len(fields) >= 3:
- columns = fields[2].split('=')
- if (len(columns) >= 2 and
- columns[0] == "deviceid" and
- columns[1].startswith(deviceid)):
- comps = fields[0].split('.')
+ columns = fields[2].split("=")
+ if (
+ len(columns) >= 2
+ and columns[0] == "deviceid"
+ and columns[1].startswith(deviceid)
+ ):
+ comps = fields[0].split(".")
return comps[2]
return None
@@ -165,9 +163,9 @@ def find_dev_from_busdev(camcontrol_out, busdev):
"""
for line in camcontrol_out.splitlines():
if re.search(busdev, line):
- items = line.split('(')
+ items = line.split("(")
if len(items) == 2:
- dev_pass = items[1].split(',')
+ dev_pass = items[1].split(",")
return dev_pass[0]
return None
@@ -176,7 +174,7 @@ def execute_or_debug(cmd, fail_ret=None):
try:
return subp.subp(cmd)[0]
except subp.ProcessExecutionError:
- LOG.debug("Failed to execute: %s", ' '.join(cmd))
+ LOG.debug("Failed to execute: %s", " ".join(cmd))
return fail_ret
@@ -185,11 +183,11 @@ def get_dev_storvsc_sysctl():
def get_camcontrol_dev_bus():
- return execute_or_debug(['camcontrol', 'devlist', '-b'])
+ return execute_or_debug(["camcontrol", "devlist", "-b"])
def get_camcontrol_dev():
- return execute_or_debug(['camcontrol', 'devlist'])
+ return execute_or_debug(["camcontrol", "devlist"])
def get_resource_disk_on_freebsd(port_id):
@@ -236,8 +234,8 @@ def get_resource_disk_on_freebsd(port_id):
# update the FreeBSD specific information
if util.is_FreeBSD():
- LEASE_FILE = '/var/db/dhclient.leases.hn0'
- DEFAULT_FS = 'freebsd-ufs'
+ LEASE_FILE = "/var/db/dhclient.leases.hn0"
+ DEFAULT_FS = "freebsd-ufs"
res_disk = get_resource_disk_on_freebsd(1)
if res_disk is not None:
LOG.debug("resource disk is not None")
@@ -248,52 +246,55 @@ if util.is_FreeBSD():
PLATFORM_ENTROPY_SOURCE = None
BUILTIN_DS_CONFIG = {
- 'data_dir': AGENT_SEED_DIR,
- 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH},
- 'dhclient_lease_file': LEASE_FILE,
- 'apply_network_config': True, # Use IMDS published network configuration
+ "data_dir": AGENT_SEED_DIR,
+ "disk_aliases": {"ephemeral0": RESOURCE_DISK_PATH},
+ "dhclient_lease_file": LEASE_FILE,
+ "apply_network_config": True, # Use IMDS published network configuration
}
# RELEASE_BLOCKER: Xenial and earlier apply_network_config default is False
BUILTIN_CLOUD_EPHEMERAL_DISK_CONFIG = {
- 'disk_setup': {
- 'ephemeral0': {'table_type': 'gpt',
- 'layout': [100],
- 'overwrite': True},
+ "disk_setup": {
+ "ephemeral0": {
+ "table_type": "gpt",
+ "layout": [100],
+ "overwrite": True,
+ },
},
- 'fs_setup': [{'filesystem': DEFAULT_FS,
- 'device': 'ephemeral0.1'}],
+ "fs_setup": [{"filesystem": DEFAULT_FS, "device": "ephemeral0.1"}],
}
-DS_CFG_PATH = ['datasource', DS_NAME]
-DS_CFG_KEY_PRESERVE_NTFS = 'never_destroy_ntfs'
-DEF_EPHEMERAL_LABEL = 'Temporary Storage'
+DS_CFG_PATH = ["datasource", DS_NAME]
+DS_CFG_KEY_PRESERVE_NTFS = "never_destroy_ntfs"
+DEF_EPHEMERAL_LABEL = "Temporary Storage"
# The redacted password fails to meet password complexity requirements
# so we can safely use this to mask/redact the password in the ovf-env.xml
-DEF_PASSWD_REDACTION = 'REDACTED'
+DEF_PASSWD_REDACTION = "REDACTED"
class DataSourceAzure(sources.DataSource):
- dsname = 'Azure'
- default_update_events = {EventScope.NETWORK: {
- EventType.BOOT_NEW_INSTANCE,
- EventType.BOOT,
- }}
+ dsname = "Azure"
+ default_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ }
+ }
_negotiated = False
_metadata_imds = sources.UNSET
_ci_pkl_version = 1
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed_dir = os.path.join(paths.seed_dir, 'azure')
+ self.seed_dir = os.path.join(paths.seed_dir, "azure")
self.cfg = {}
self.seed = None
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
- BUILTIN_DS_CONFIG])
- self.dhclient_lease_file = self.ds_cfg.get('dhclient_lease_file')
+ self.ds_cfg = util.mergemanydict(
+ [util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG]
+ )
+ self.dhclient_lease_file = self.ds_cfg.get("dhclient_lease_file")
self._network_config = None
self._ephemeral_dhcp_ctx = None
self.failed_desired_api_version = False
@@ -312,13 +313,13 @@ class DataSourceAzure(sources.DataSource):
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- if self.seed.startswith('/dev'):
- subplatform_type = 'config-disk'
- elif self.seed.lower() == 'imds':
- subplatform_type = 'imds'
+ if self.seed.startswith("/dev"):
+ subplatform_type = "config-disk"
+ elif self.seed.lower() == "imds":
+ subplatform_type = "imds"
else:
- subplatform_type = 'seed-dir'
- return '%s (%s)' % (subplatform_type, self.seed)
+ subplatform_type = "seed-dir"
+ return "%s (%s)" % (subplatform_type, self.seed)
@azure_ds_telemetry_reporter
def crawl_metadata(self):
@@ -332,7 +333,7 @@ class DataSourceAzure(sources.DataSource):
# azure removes/ejects the cdrom containing the ovf-env.xml
# file on reboot. So, in order to successfully reboot we
# need to look in the datadir and consider that valid
- ddir = self.ds_cfg['data_dir']
+ ddir = self.ds_cfg["data_dir"]
# The order in which the candidates are inserted matters here, because
# it determines the value of ret. More specifically, the first one in
@@ -346,25 +347,28 @@ class DataSourceAzure(sources.DataSource):
if os.path.isfile(REPROVISION_MARKER_FILE):
reprovision = True
metadata_source = "IMDS"
- report_diagnostic_event("Reprovision marker file already present "
- "before crawling Azure metadata: %s" %
- REPROVISION_MARKER_FILE,
- logger_func=LOG.debug)
+ report_diagnostic_event(
+ "Reprovision marker file already present "
+ "before crawling Azure metadata: %s" % REPROVISION_MARKER_FILE,
+ logger_func=LOG.debug,
+ )
elif os.path.isfile(REPROVISION_NIC_ATTACH_MARKER_FILE):
reprovision_after_nic_attach = True
metadata_source = "NIC_ATTACH_MARKER_PRESENT"
- report_diagnostic_event("Reprovision nic attach marker file "
- "already present before crawling Azure "
- "metadata: %s" %
- REPROVISION_NIC_ATTACH_MARKER_FILE,
- logger_func=LOG.debug)
+ report_diagnostic_event(
+ "Reprovision nic attach marker file "
+ "already present before crawling Azure "
+ "metadata: %s" % REPROVISION_NIC_ATTACH_MARKER_FILE,
+ logger_func=LOG.debug,
+ )
else:
for src in list_possible_azure_ds(self.seed_dir, ddir):
try:
if src.startswith("/dev/"):
if util.is_FreeBSD():
- ret = util.mount_cb(src, load_azure_ds_dir,
- mtype="udf")
+ ret = util.mount_cb(
+ src, load_azure_ds_dir, mtype="udf"
+ )
else:
ret = util.mount_cb(src, load_azure_ds_dir)
# save the device for ejection later
@@ -377,36 +381,33 @@ class DataSourceAzure(sources.DataSource):
except NonAzureDataSource:
report_diagnostic_event(
"Did not find Azure data source in %s" % src,
- logger_func=LOG.debug)
+ logger_func=LOG.debug,
+ )
continue
except util.MountFailedError:
report_diagnostic_event(
- '%s was not mountable' % src,
- logger_func=LOG.debug)
+ "%s was not mountable" % src, logger_func=LOG.debug
+ )
ovf_is_accessible = False
- empty_md = {'local-hostname': ''}
+ empty_md = {"local-hostname": ""}
empty_cfg = dict(
- system_info=dict(
- default_user=dict(
- name=''
- )
- )
+ system_info=dict(default_user=dict(name=""))
)
- ret = (empty_md, '', empty_cfg, {})
- metadata_source = 'IMDS'
+ ret = (empty_md, "", empty_cfg, {})
+ metadata_source = "IMDS"
continue
except BrokenAzureDataSource as exc:
- msg = 'BrokenAzureDataSource: %s' % exc
+ msg = "BrokenAzureDataSource: %s" % exc
report_diagnostic_event(msg, logger_func=LOG.error)
raise sources.InvalidMetaDataException(msg)
report_diagnostic_event(
"Found provisioning metadata in %s" % metadata_source,
- logger_func=LOG.debug)
+ logger_func=LOG.debug,
+ )
imds_md = self.get_imds_data_with_api_fallback(
- self.fallback_interface,
- retries=10
+ self.fallback_interface, retries=10
)
# reset _fallback_interface so that if the code enters reprovisioning
@@ -414,16 +415,17 @@ class DataSourceAzure(sources.DataSource):
self._fallback_interface = None
if not imds_md and not ovf_is_accessible:
- msg = 'No OVF or IMDS available'
+ msg = "No OVF or IMDS available"
report_diagnostic_event(msg)
raise sources.InvalidMetaDataException(msg)
- perform_reprovision = (
- reprovision or
- self._should_reprovision(ret, imds_md))
+ perform_reprovision = reprovision or self._should_reprovision(
+ ret, imds_md
+ )
perform_reprovision_after_nic_attach = (
- reprovision_after_nic_attach or
- self._should_reprovision_after_nic_attach(ret, imds_md))
+ reprovision_after_nic_attach
+ or self._should_reprovision_after_nic_attach(ret, imds_md)
+ )
if perform_reprovision or perform_reprovision_after_nic_attach:
if util.is_FreeBSD():
@@ -435,45 +437,50 @@ class DataSourceAzure(sources.DataSource):
ret = self._reprovision()
# fetch metadata again as it has changed after reprovisioning
imds_md = self.get_imds_data_with_api_fallback(
- self.fallback_interface,
- retries=10
+ self.fallback_interface, retries=10
)
(md, userdata_raw, cfg, files) = ret
self.seed = metadata_source
- crawled_data.update({
- 'cfg': cfg,
- 'files': files,
- 'metadata': util.mergemanydict(
- [md, {'imds': imds_md}]),
- 'userdata_raw': userdata_raw})
+ crawled_data.update(
+ {
+ "cfg": cfg,
+ "files": files,
+ "metadata": util.mergemanydict([md, {"imds": imds_md}]),
+ "userdata_raw": userdata_raw,
+ }
+ )
imds_username = _username_from_imds(imds_md)
imds_hostname = _hostname_from_imds(imds_md)
imds_disable_password = _disable_password_from_imds(imds_md)
if imds_username:
- LOG.debug('Username retrieved from IMDS: %s', imds_username)
- cfg['system_info']['default_user']['name'] = imds_username
+ LOG.debug("Username retrieved from IMDS: %s", imds_username)
+ cfg["system_info"]["default_user"]["name"] = imds_username
if imds_hostname:
- LOG.debug('Hostname retrieved from IMDS: %s', imds_hostname)
- crawled_data['metadata']['local-hostname'] = imds_hostname
+ LOG.debug("Hostname retrieved from IMDS: %s", imds_hostname)
+ crawled_data["metadata"]["local-hostname"] = imds_hostname
if imds_disable_password:
LOG.debug(
- 'Disable password retrieved from IMDS: %s',
- imds_disable_password
+ "Disable password retrieved from IMDS: %s",
+ imds_disable_password,
)
- crawled_data['metadata']['disable_password'] = imds_disable_password # noqa: E501
+ crawled_data["metadata"][
+ "disable_password"
+ ] = imds_disable_password
- if metadata_source == 'IMDS' and not crawled_data['files']:
+ if metadata_source == "IMDS" and not crawled_data["files"]:
try:
contents = build_minimal_ovf(
username=imds_username,
hostname=imds_hostname,
- disableSshPwd=imds_disable_password)
- crawled_data['files'] = {'ovf-env.xml': contents}
+ disableSshPwd=imds_disable_password,
+ )
+ crawled_data["files"] = {"ovf-env.xml": contents}
except Exception as e:
report_diagnostic_event(
"Failed to construct OVF from IMDS data %s" % e,
- logger_func=LOG.debug)
+ logger_func=LOG.debug,
+ )
# only use userdata from imds if OVF did not provide custom data
# userdata provided by IMDS is always base64 encoded
@@ -482,48 +489,53 @@ class DataSourceAzure(sources.DataSource):
if imds_userdata:
LOG.debug("Retrieved userdata from IMDS")
try:
- crawled_data['userdata_raw'] = base64.b64decode(
- ''.join(imds_userdata.split()))
+ crawled_data["userdata_raw"] = base64.b64decode(
+ "".join(imds_userdata.split())
+ )
except Exception:
report_diagnostic_event(
- "Bad userdata in IMDS",
- logger_func=LOG.warning)
+ "Bad userdata in IMDS", logger_func=LOG.warning
+ )
if not metadata_source:
- msg = 'No Azure metadata found'
+ msg = "No Azure metadata found"
report_diagnostic_event(msg, logger_func=LOG.error)
raise sources.InvalidMetaDataException(msg)
else:
report_diagnostic_event(
- 'found datasource in %s' % metadata_source,
- logger_func=LOG.debug)
+ "found datasource in %s" % metadata_source,
+ logger_func=LOG.debug,
+ )
if metadata_source == ddir:
report_diagnostic_event(
- "using files cached in %s" % ddir, logger_func=LOG.debug)
+ "using files cached in %s" % ddir, logger_func=LOG.debug
+ )
seed = _get_random_seed()
if seed:
- crawled_data['metadata']['random_seed'] = seed
- crawled_data['metadata']['instance-id'] = self._iid()
+ crawled_data["metadata"]["random_seed"] = seed
+ crawled_data["metadata"]["instance-id"] = self._iid()
if perform_reprovision or perform_reprovision_after_nic_attach:
LOG.info("Reporting ready to Azure after getting ReprovisionData")
- use_cached_ephemeral = (
- self.distro.networking.is_up(self.fallback_interface) and
- getattr(self, '_ephemeral_dhcp_ctx', None))
+ use_cached_ephemeral = self.distro.networking.is_up(
+ self.fallback_interface
+ ) and getattr(self, "_ephemeral_dhcp_ctx", None)
if use_cached_ephemeral:
self._report_ready(lease=self._ephemeral_dhcp_ctx.lease)
self._ephemeral_dhcp_ctx.clean_network() # Teardown ephemeral
else:
try:
with EphemeralDHCPv4WithReporting(
- azure_ds_reporter) as lease:
+ azure_ds_reporter
+ ) as lease:
self._report_ready(lease=lease)
except Exception as e:
report_diagnostic_event(
"exception while reporting ready: %s" % e,
- logger_func=LOG.error)
+ logger_func=LOG.error,
+ )
raise
return crawled_data
@@ -559,19 +571,24 @@ class DataSourceAzure(sources.DataSource):
try:
crawled_data = util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=self.crawl_metadata
+ logfunc=LOG.debug,
+ msg="Crawl of metadata service",
+ func=self.crawl_metadata,
)
except Exception as e:
report_diagnostic_event(
- 'Could not crawl Azure metadata: %s' % e,
- logger_func=LOG.error)
+ "Could not crawl Azure metadata: %s" % e, logger_func=LOG.error
+ )
self._report_failure(
- description=DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE)
+ description=DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE
+ )
return False
- if (self.distro and self.distro.name == 'ubuntu' and
- self.ds_cfg.get('apply_network_config')):
+ if (
+ self.distro
+ and self.distro.name == "ubuntu"
+ and self.ds_cfg.get("apply_network_config")
+ ):
maybe_remove_ubuntu_network_config_scripts()
# Process crawled data and augment with various config defaults
@@ -584,21 +601,25 @@ class DataSourceAzure(sources.DataSource):
"Ephemeral resource disk '%s' exists. "
"Merging default Azure cloud ephemeral disk configs."
% devpath,
- logger_func=LOG.debug)
+ logger_func=LOG.debug,
+ )
self.cfg = util.mergemanydict(
- [crawled_data['cfg'], BUILTIN_CLOUD_EPHEMERAL_DISK_CONFIG])
+ [crawled_data["cfg"], BUILTIN_CLOUD_EPHEMERAL_DISK_CONFIG]
+ )
else:
report_diagnostic_event(
"Ephemeral resource disk '%s' does not exist. "
"Not merging default Azure cloud ephemeral disk configs."
% devpath,
- logger_func=LOG.debug)
- self.cfg = crawled_data['cfg']
+ logger_func=LOG.debug,
+ )
+ self.cfg = crawled_data["cfg"]
- self._metadata_imds = crawled_data['metadata']['imds']
+ self._metadata_imds = crawled_data["metadata"]["imds"]
self.metadata = util.mergemanydict(
- [crawled_data['metadata'], DEFAULT_METADATA])
- self.userdata_raw = crawled_data['userdata_raw']
+ [crawled_data["metadata"], DEFAULT_METADATA]
+ )
+ self.userdata_raw = crawled_data["userdata_raw"]
user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
@@ -606,17 +627,19 @@ class DataSourceAzure(sources.DataSource):
# walinux agent writes files world readable, but expects
# the directory to be protected.
write_files(
- self.ds_cfg['data_dir'], crawled_data['files'], dirmode=0o700)
+ self.ds_cfg["data_dir"], crawled_data["files"], dirmode=0o700
+ )
return True
@azure_ds_telemetry_reporter
def get_imds_data_with_api_fallback(
- self,
- fallback_nic,
- retries,
- md_type=metadata_type.all,
- exc_cb=retry_on_url_exc,
- infinite=False):
+ self,
+ fallback_nic,
+ retries,
+ md_type=metadata_type.all,
+ exc_cb=retry_on_url_exc,
+ infinite=False,
+ ):
"""
Wrapper for get_metadata_from_imds so that we can have flexibility
in which IMDS api-version we use. If a particular instance of IMDS
@@ -628,30 +651,23 @@ class DataSourceAzure(sources.DataSource):
if not self.failed_desired_api_version:
for _ in range(retries):
try:
- LOG.info(
- "Attempting IMDS api-version: %s",
- IMDS_VER_WANT
- )
+ LOG.info("Attempting IMDS api-version: %s", IMDS_VER_WANT)
return get_metadata_from_imds(
fallback_nic=fallback_nic,
retries=0,
md_type=md_type,
api_version=IMDS_VER_WANT,
- exc_cb=exc_cb
+ exc_cb=exc_cb,
)
except UrlError as err:
LOG.info(
- "UrlError with IMDS api-version: %s",
- IMDS_VER_WANT
+ "UrlError with IMDS api-version: %s", IMDS_VER_WANT
)
if err.code == 400:
log_msg = "Fall back to IMDS api-version: {}".format(
IMDS_VER_MIN
)
- report_diagnostic_event(
- log_msg,
- logger_func=LOG.info
- )
+ report_diagnostic_event(log_msg, logger_func=LOG.info)
self.failed_desired_api_version = True
break
@@ -662,11 +678,11 @@ class DataSourceAzure(sources.DataSource):
md_type=md_type,
api_version=IMDS_VER_MIN,
exc_cb=exc_cb,
- infinite=infinite
+ infinite=infinite,
)
def device_name_to_device(self, name):
- return self.ds_cfg['disk_aliases'].get(name)
+ return self.ds_cfg["disk_aliases"].get(name)
@azure_ds_telemetry_reporter
def get_public_ssh_keys(self):
@@ -687,15 +703,16 @@ class DataSourceAzure(sources.DataSource):
OVF as a second option for environments that don't have IMDS.
"""
- LOG.debug('Retrieving public SSH keys')
+ LOG.debug("Retrieving public SSH keys")
ssh_keys = []
keys_from_imds = True
- LOG.debug('Attempting to get SSH keys from IMDS')
+ LOG.debug("Attempting to get SSH keys from IMDS")
try:
ssh_keys = [
- public_key['keyData']
- for public_key
- in self.metadata['imds']['compute']['publicKeys']
+ public_key["keyData"]
+ for public_key in self.metadata["imds"]["compute"][
+ "publicKeys"
+ ]
]
for key in ssh_keys:
if not _key_is_openssh_formatted(key=key):
@@ -703,33 +720,28 @@ class DataSourceAzure(sources.DataSource):
break
if not keys_from_imds:
- log_msg = 'Keys not in OpenSSH format, using OVF'
+ log_msg = "Keys not in OpenSSH format, using OVF"
else:
- log_msg = 'Retrieved {} keys from IMDS'.format(
- len(ssh_keys)
- if ssh_keys is not None
- else 0
+ log_msg = "Retrieved {} keys from IMDS".format(
+ len(ssh_keys) if ssh_keys is not None else 0
)
except KeyError:
- log_msg = 'Unable to get keys from IMDS, falling back to OVF'
+ log_msg = "Unable to get keys from IMDS, falling back to OVF"
keys_from_imds = False
finally:
report_diagnostic_event(log_msg, logger_func=LOG.debug)
if not keys_from_imds:
- LOG.debug('Attempting to get SSH keys from OVF')
+ LOG.debug("Attempting to get SSH keys from OVF")
try:
- ssh_keys = self.metadata['public-keys']
- log_msg = 'Retrieved {} keys from OVF'.format(len(ssh_keys))
+ ssh_keys = self.metadata["public-keys"]
+ log_msg = "Retrieved {} keys from OVF".format(len(ssh_keys))
except KeyError:
- log_msg = 'No keys available from OVF'
+ log_msg = "No keys available from OVF"
finally:
report_diagnostic_event(log_msg, logger_func=LOG.debug)
- return SSHKeys(
- keys_from_imds=keys_from_imds,
- ssh_keys=ssh_keys
- )
+ return SSHKeys(keys_from_imds=keys_from_imds, ssh_keys=ssh_keys)
def get_config_obj(self):
return self.cfg
@@ -740,12 +752,13 @@ class DataSourceAzure(sources.DataSource):
def _iid(self, previous=None):
prev_iid_path = os.path.join(
- self.paths.get_cpath('data'), 'instance-id')
+ self.paths.get_cpath("data"), "instance-id"
+ )
# Older kernels than 4.15 will have UPPERCASE product_uuid.
# We don't want Azure to react to an UPPER/lower difference as a new
# instance id as it rewrites SSH host keys.
# LP: #1835584
- iid = dmi.read_dmi_data('system-uuid').lower()
+ iid = dmi.read_dmi_data("system-uuid").lower()
if os.path.exists(prev_iid_path):
previous = util.load_file(prev_iid_path).strip()
if previous.lower() == iid:
@@ -759,22 +772,26 @@ class DataSourceAzure(sources.DataSource):
@azure_ds_telemetry_reporter
def setup(self, is_new_instance):
if self._negotiated is False:
- LOG.debug("negotiating for %s (new_instance=%s)",
- self.get_instance_id(), is_new_instance)
+ LOG.debug(
+ "negotiating for %s (new_instance=%s)",
+ self.get_instance_id(),
+ is_new_instance,
+ )
fabric_data = self._negotiate()
LOG.debug("negotiating returned %s", fabric_data)
if fabric_data:
self.metadata.update(fabric_data)
self._negotiated = True
else:
- LOG.debug("negotiating already done for %s",
- self.get_instance_id())
+ LOG.debug(
+ "negotiating already done for %s", self.get_instance_id()
+ )
@azure_ds_telemetry_reporter
def _wait_for_nic_detach(self, nl_sock):
"""Use the netlink socket provided to wait for nic detach event.
- NOTE: The function doesn't close the socket. The caller owns closing
- the socket and disposing it safely.
+ NOTE: The function doesn't close the socket. The caller owns closing
+ the socket and disposing it safely.
"""
try:
ifname = None
@@ -782,21 +799,27 @@ class DataSourceAzure(sources.DataSource):
# Preprovisioned VM will only have one NIC, and it gets
# detached immediately after deployment.
with events.ReportEventStack(
- name="wait-for-nic-detach",
- description=("wait for nic detach"),
- parent=azure_ds_reporter):
+ name="wait-for-nic-detach",
+ description="wait for nic detach",
+ parent=azure_ds_reporter,
+ ):
ifname = netlink.wait_for_nic_detach_event(nl_sock)
if ifname is None:
- msg = ("Preprovisioned nic not detached as expected. "
- "Proceeding without failing.")
+ msg = (
+ "Preprovisioned nic not detached as expected. "
+ "Proceeding without failing."
+ )
report_diagnostic_event(msg, logger_func=LOG.warning)
else:
- report_diagnostic_event("The preprovisioned nic %s is detached"
- % ifname, logger_func=LOG.warning)
+ report_diagnostic_event(
+ "The preprovisioned nic %s is detached" % ifname,
+ logger_func=LOG.warning,
+ )
path = REPROVISION_NIC_DETACHED_MARKER_FILE
LOG.info("Creating a marker file for nic detached: %s", path)
- util.write_file(path, "{pid}: {time}\n".format(
- pid=os.getpid(), time=time()))
+ util.write_file(
+ path, "{pid}: {time}\n".format(pid=os.getpid(), time=time())
+ )
except AssertionError as error:
report_diagnostic_event(error, logger_func=LOG.error)
raise
@@ -804,14 +827,15 @@ class DataSourceAzure(sources.DataSource):
@azure_ds_telemetry_reporter
def wait_for_link_up(self, ifname):
"""In cases where the link state is still showing down after a nic is
- hot-attached, we can attempt to bring it up by forcing the hv_netvsc
- drivers to query the link state by unbinding and then binding the
- device. This function attempts infinitely until the link is up,
- because we cannot proceed further until we have a stable link."""
+ hot-attached, we can attempt to bring it up by forcing the hv_netvsc
+ drivers to query the link state by unbinding and then binding the
+ device. This function attempts infinitely until the link is up,
+ because we cannot proceed further until we have a stable link."""
if self.distro.networking.try_set_link_up(ifname):
- report_diagnostic_event("The link %s is already up." % ifname,
- logger_func=LOG.info)
+ report_diagnostic_event(
+ "The link %s is already up." % ifname, logger_func=LOG.info
+ )
return
LOG.debug("Attempting to bring %s up", ifname)
@@ -820,22 +844,27 @@ class DataSourceAzure(sources.DataSource):
LOG.info("Unbinding and binding the interface %s", ifname)
while True:
- devicename = net.read_sys_net(ifname,
- 'device/device_id').strip('{}')
- util.write_file('/sys/bus/vmbus/drivers/hv_netvsc/unbind',
- devicename)
- util.write_file('/sys/bus/vmbus/drivers/hv_netvsc/bind',
- devicename)
+ devicename = net.read_sys_net(ifname, "device/device_id").strip(
+ "{}"
+ )
+ util.write_file(
+ "/sys/bus/vmbus/drivers/hv_netvsc/unbind", devicename
+ )
+ util.write_file(
+ "/sys/bus/vmbus/drivers/hv_netvsc/bind", devicename
+ )
attempts = attempts + 1
if self.distro.networking.try_set_link_up(ifname):
- msg = "The link %s is up after %s attempts" % (ifname,
- attempts)
+ msg = "The link %s is up after %s attempts" % (
+ ifname,
+ attempts,
+ )
report_diagnostic_event(msg, logger_func=LOG.info)
return
if attempts % 10 == 0:
- msg = ("Link is not up after %d attempts to rebind" % attempts)
+ msg = "Link is not up after %d attempts to rebind" % attempts
report_diagnostic_event(msg, logger_func=LOG.info)
LOG.info(msg)
@@ -844,13 +873,17 @@ class DataSourceAzure(sources.DataSource):
# again.
sleep_duration = 0.5
max_status_polls = 20
- LOG.debug("Polling %d seconds for primary NIC link up after "
- "rebind.", sleep_duration * max_status_polls)
+ LOG.debug(
+ "Polling %d seconds for primary NIC link up after rebind.",
+ sleep_duration * max_status_polls,
+ )
for i in range(0, max_status_polls):
if self.distro.networking.is_up(ifname):
- msg = ("After %d attempts to rebind, link is up after "
- "polling the link status %d times" % (attempts, i))
+ msg = (
+ "After %d attempts to rebind, link is up after "
+ "polling the link status %d times" % (attempts, i)
+ )
report_diagnostic_event(msg, logger_func=LOG.info)
LOG.debug(msg)
return
@@ -860,40 +893,47 @@ class DataSourceAzure(sources.DataSource):
@azure_ds_telemetry_reporter
def _create_report_ready_marker(self):
path = REPORTED_READY_MARKER_FILE
- LOG.info(
- "Creating a marker file to report ready: %s", path)
- util.write_file(path, "{pid}: {time}\n".format(
- pid=os.getpid(), time=time()))
+ LOG.info("Creating a marker file to report ready: %s", path)
+ util.write_file(
+ path, "{pid}: {time}\n".format(pid=os.getpid(), time=time())
+ )
report_diagnostic_event(
- 'Successfully created reported ready marker file '
- 'while in the preprovisioning pool.',
- logger_func=LOG.debug)
+ "Successfully created reported ready marker file "
+ "while in the preprovisioning pool.",
+ logger_func=LOG.debug,
+ )
@azure_ds_telemetry_reporter
def _report_ready_if_needed(self):
"""Report ready to the platform if the marker file is not present,
and create the marker file.
"""
- have_not_reported_ready = (
- not os.path.isfile(REPORTED_READY_MARKER_FILE))
+ have_not_reported_ready = not os.path.isfile(
+ REPORTED_READY_MARKER_FILE
+ )
if have_not_reported_ready:
- report_diagnostic_event("Reporting ready before nic detach",
- logger_func=LOG.info)
+ report_diagnostic_event(
+ "Reporting ready before nic detach", logger_func=LOG.info
+ )
try:
with EphemeralDHCPv4WithReporting(azure_ds_reporter) as lease:
self._report_ready(lease=lease)
except Exception as e:
- report_diagnostic_event("Exception reporting ready during "
- "preprovisioning before nic detach: %s"
- % e, logger_func=LOG.error)
+ report_diagnostic_event(
+ "Exception reporting ready during "
+ "preprovisioning before nic detach: %s" % e,
+ logger_func=LOG.error,
+ )
raise
self._create_report_ready_marker()
else:
- report_diagnostic_event("Already reported ready before nic detach."
- " The marker file already exists: %s" %
- REPORTED_READY_MARKER_FILE,
- logger_func=LOG.error)
+ report_diagnostic_event(
+ "Already reported ready before nic detach."
+ " The marker file already exists: %s"
+ % REPORTED_READY_MARKER_FILE,
+ logger_func=LOG.error,
+ )
@azure_ds_telemetry_reporter
def _check_if_nic_is_primary(self, ifname):
@@ -915,20 +955,26 @@ class DataSourceAzure(sources.DataSource):
# the primary NIC.
try:
with events.ReportEventStack(
- name="obtain-dhcp-lease",
- description=("obtain dhcp lease for %s when attempting to "
- "determine primary NIC during reprovision of "
- "a pre-provisioned VM" % ifname),
- parent=azure_ds_reporter):
+ name="obtain-dhcp-lease",
+ description=(
+ "obtain dhcp lease for %s when attempting to "
+ "determine primary NIC during reprovision of "
+ "a pre-provisioned VM"
+ )
+ % ifname,
+ parent=azure_ds_reporter,
+ ):
dhcp_ctx = EphemeralDHCPv4(
- iface=ifname,
- dhcp_log_func=dhcp_log_cb)
+ iface=ifname, dhcp_log_func=dhcp_log_cb
+ )
dhcp_ctx.obtain_lease()
except Exception as e:
- report_diagnostic_event("Giving up. Failed to obtain dhcp lease "
- "for %s when attempting to determine "
- "primary NIC during reprovision due to %s"
- % (ifname, e), logger_func=LOG.error)
+ report_diagnostic_event(
+ "Giving up. Failed to obtain dhcp lease "
+ "for %s when attempting to determine "
+ "primary NIC during reprovision due to %s" % (ifname, e),
+ logger_func=LOG.error,
+ )
raise
# Retry polling network metadata for a limited duration only when the
@@ -953,13 +999,15 @@ class DataSourceAzure(sources.DataSource):
report_diagnostic_event(
"Ran into exception when attempting to reach %s "
"after %d polls." % (msg, metadata_poll_count),
- logger_func=LOG.error)
+ logger_func=LOG.error,
+ )
if isinstance(exc, UrlError):
- report_diagnostic_event("poll IMDS with %s failed. "
- "Exception: %s and code: %s" %
- (msg, exc.cause, exc.code),
- logger_func=LOG.error)
+ report_diagnostic_event(
+ "poll IMDS with %s failed. Exception: %s and code: %s"
+ % (msg, exc.cause, exc.code),
+ logger_func=LOG.error,
+ )
# Retry up to a certain limit for both timeout and network
# unreachable errors.
@@ -967,7 +1015,7 @@ class DataSourceAzure(sources.DataSource):
exc.cause, (requests.Timeout, requests.ConnectionError)
):
expected_errors_count = expected_errors_count + 1
- return (expected_errors_count <= 10)
+ return expected_errors_count <= 10
return True
# Primary nic detection will be optimized in the future. The fact that
@@ -975,17 +1023,16 @@ class DataSourceAzure(sources.DataSource):
# could add several seconds of delay.
try:
imds_md = self.get_imds_data_with_api_fallback(
- ifname,
- 0,
- metadata_type.network,
- network_metadata_exc_cb,
- True
+ ifname, 0, metadata_type.network, network_metadata_exc_cb, True
)
except Exception as e:
LOG.warning(
"Failed to get network metadata using nic %s. Attempt to "
"contact IMDS failed with error %s. Assuming this is not the "
- "primary nic.", ifname, e)
+ "primary nic.",
+ ifname,
+ e,
+ )
finally:
# If we are not the primary nic, then clean the dhcp context.
if imds_md is None:
@@ -1000,10 +1047,11 @@ class DataSourceAzure(sources.DataSource):
self._ephemeral_dhcp_ctx = dhcp_ctx
# Set the expected nic count based on the response received.
- expected_nic_count = len(
- imds_md['interface'])
- report_diagnostic_event("Expected nic count: %d" %
- expected_nic_count, logger_func=LOG.info)
+ expected_nic_count = len(imds_md["interface"])
+ report_diagnostic_event(
+ "Expected nic count: %d" % expected_nic_count,
+ logger_func=LOG.info,
+ )
return is_primary, expected_nic_count
@@ -1028,17 +1076,22 @@ class DataSourceAzure(sources.DataSource):
while True:
ifname = None
with events.ReportEventStack(
- name="wait-for-nic-attach",
- description=("wait for nic attach after %d nics have "
- "been attached" % len(nics_found)),
- parent=azure_ds_reporter):
- ifname = netlink.wait_for_nic_attach_event(nl_sock,
- nics_found)
+ name="wait-for-nic-attach",
+ description=(
+ "wait for nic attach after %d nics have been attached"
+ % len(nics_found)
+ ),
+ parent=azure_ds_reporter,
+ ):
+ ifname = netlink.wait_for_nic_attach_event(
+ nl_sock, nics_found
+ )
# wait_for_nic_attach_event guarantees that ifname it not None
nics_found.append(ifname)
- report_diagnostic_event("Detected nic %s attached." % ifname,
- logger_func=LOG.info)
+ report_diagnostic_event(
+ "Detected nic %s attached." % ifname, logger_func=LOG.info
+ )
# Attempt to bring the interface's operating state to
# UP in case it is not already.
@@ -1048,14 +1101,17 @@ class DataSourceAzure(sources.DataSource):
# platform will attach the primary nic first so we
# won't be in primary_nic_found = false state for long.
if not primary_nic_found:
- LOG.info("Checking if %s is the primary nic",
- ifname)
- (primary_nic_found, expected_nic_count) = (
- self._check_if_nic_is_primary(ifname))
+ LOG.info("Checking if %s is the primary nic", ifname)
+ (
+ primary_nic_found,
+ expected_nic_count,
+ ) = self._check_if_nic_is_primary(ifname)
# Exit criteria: check if we've discovered all nics
- if (expected_nic_count != -1
- and len(nics_found) >= expected_nic_count):
+ if (
+ expected_nic_count != -1
+ and len(nics_found) >= expected_nic_count
+ ):
LOG.info("Found all the nics for this VM.")
break
@@ -1065,9 +1121,9 @@ class DataSourceAzure(sources.DataSource):
@azure_ds_telemetry_reporter
def _wait_for_all_nics_ready(self):
"""Wait for nic(s) to be hot-attached. There may be multiple nics
- depending on the customer request.
- But only primary nic would be able to communicate with wireserver
- and IMDS. So we detect and save the primary nic to be used later.
+ depending on the customer request.
+ But only primary nic would be able to communicate with wireserver
+ and IMDS. So we detect and save the primary nic to be used later.
"""
nl_sock = None
@@ -1075,7 +1131,8 @@ class DataSourceAzure(sources.DataSource):
nl_sock = netlink.create_bound_netlink_socket()
report_ready_marker_present = bool(
- os.path.isfile(REPORTED_READY_MARKER_FILE))
+ os.path.isfile(REPORTED_READY_MARKER_FILE)
+ )
# Report ready if the marker file is not already present.
# The nic of the preprovisioned vm gets hot-detached as soon as
@@ -1083,7 +1140,8 @@ class DataSourceAzure(sources.DataSource):
self._report_ready_if_needed()
has_nic_been_detached = bool(
- os.path.isfile(REPROVISION_NIC_DETACHED_MARKER_FILE))
+ os.path.isfile(REPROVISION_NIC_DETACHED_MARKER_FILE)
+ )
if not has_nic_been_detached:
LOG.info("NIC has not been detached yet.")
@@ -1097,12 +1155,14 @@ class DataSourceAzure(sources.DataSource):
if not self.fallback_interface:
self._wait_for_hot_attached_nics(nl_sock)
else:
- report_diagnostic_event("Skipping waiting for nic attach "
- "because we already have a fallback "
- "interface. Report Ready marker "
- "present before detaching nics: %s" %
- report_ready_marker_present,
- logger_func=LOG.info)
+ report_diagnostic_event(
+ "Skipping waiting for nic attach "
+ "because we already have a fallback "
+ "interface. Report Ready marker "
+ "present before detaching nics: %s"
+ % report_ready_marker_present,
+ logger_func=LOG.info,
+ )
except netlink.NetlinkCreateSocketError as e:
report_diagnostic_event(e, logger_func=LOG.warning)
raise
@@ -1115,8 +1175,7 @@ class DataSourceAzure(sources.DataSource):
"""Poll IMDS for the new provisioning data until we get a valid
response. Then return the returned JSON object."""
url = "{}?api-version={}".format(
- metadata_type.reprovisiondata.value,
- IMDS_VER_MIN
+ metadata_type.reprovisiondata.value, IMDS_VER_MIN
)
headers = {"Metadata": "true"}
nl_sock = None
@@ -1133,38 +1192,44 @@ class DataSourceAzure(sources.DataSource):
if self.imds_poll_counter == self.imds_logging_threshold:
# Reducing the logging frequency as we are polling IMDS
self.imds_logging_threshold *= 2
- LOG.debug("Backing off logging threshold for the same "
- "exception to %d",
- self.imds_logging_threshold)
- report_diagnostic_event("poll IMDS with %s failed. "
- "Exception: %s and code: %s" %
- (msg, exception.cause,
- exception.code),
- logger_func=LOG.debug)
+ LOG.debug(
+ "Backing off logging threshold for the same "
+ "exception to %d",
+ self.imds_logging_threshold,
+ )
+ report_diagnostic_event(
+ "poll IMDS with %s failed. "
+ "Exception: %s and code: %s"
+ % (msg, exception.cause, exception.code),
+ logger_func=LOG.debug,
+ )
self.imds_poll_counter += 1
return True
else:
# If we get an exception while trying to call IMDS, we call
# DHCP and setup the ephemeral network to acquire a new IP.
- report_diagnostic_event("poll IMDS with %s failed. "
- "Exception: %s and code: %s" %
- (msg, exception.cause,
- exception.code),
- logger_func=LOG.warning)
+ report_diagnostic_event(
+ "poll IMDS with %s failed. Exception: %s and code: %s"
+ % (msg, exception.cause, exception.code),
+ logger_func=LOG.warning,
+ )
return False
report_diagnostic_event(
- "poll IMDS failed with an "
- "unexpected exception: %s" % exception,
- logger_func=LOG.warning)
+ "poll IMDS failed with an unexpected exception: %s"
+ % exception,
+ logger_func=LOG.warning,
+ )
return False
# When the interface is hot-attached, we would have already
# done dhcp and set the dhcp context. In that case, skip
# the attempt to do dhcp.
is_ephemeral_ctx_present = self._ephemeral_dhcp_ctx is not None
- msg = ("Unexpected error. Dhcp context is not expected to be already "
- "set when we need to wait for vnet switch")
+ msg = (
+ "Unexpected error. Dhcp context is not expected to be already "
+ "set when we need to wait for vnet switch"
+ )
if is_ephemeral_ctx_present and report_ready:
report_diagnostic_event(msg, logger_func=LOG.error)
raise RuntimeError(msg)
@@ -1178,11 +1243,13 @@ class DataSourceAzure(sources.DataSource):
# Save our EphemeralDHCPv4 context to avoid repeated dhcp
# later when we report ready
with events.ReportEventStack(
- name="obtain-dhcp-lease",
- description="obtain dhcp lease",
- parent=azure_ds_reporter):
+ name="obtain-dhcp-lease",
+ description="obtain dhcp lease",
+ parent=azure_ds_reporter,
+ ):
self._ephemeral_dhcp_ctx = EphemeralDHCPv4(
- dhcp_log_func=dhcp_log_cb)
+ dhcp_log_func=dhcp_log_cb
+ )
lease = self._ephemeral_dhcp_ctx.obtain_lease()
if vnet_switched:
@@ -1192,15 +1259,18 @@ class DataSourceAzure(sources.DataSource):
nl_sock = netlink.create_bound_netlink_socket()
except netlink.NetlinkCreateSocketError as e:
report_diagnostic_event(
- 'Failed to create bound netlink socket: %s' % e,
- logger_func=LOG.warning)
+ "Failed to create bound netlink socket: %s" % e,
+ logger_func=LOG.warning,
+ )
self._ephemeral_dhcp_ctx.clean_network()
break
report_ready_succeeded = self._report_ready(lease=lease)
if not report_ready_succeeded:
- msg = ('Failed reporting ready while in '
- 'the preprovisioning pool.')
+ msg = (
+ "Failed reporting ready while in "
+ "the preprovisioning pool."
+ )
report_diagnostic_event(msg, logger_func=LOG.error)
self._ephemeral_dhcp_ctx.clean_network()
raise sources.InvalidMetaDataException(msg)
@@ -1210,31 +1280,37 @@ class DataSourceAzure(sources.DataSource):
LOG.debug("Wait for vnetswitch to happen")
with events.ReportEventStack(
- name="wait-for-media-disconnect-connect",
- description="wait for vnet switch",
- parent=azure_ds_reporter):
+ name="wait-for-media-disconnect-connect",
+ description="wait for vnet switch",
+ parent=azure_ds_reporter,
+ ):
try:
netlink.wait_for_media_disconnect_connect(
- nl_sock, lease['interface'])
+ nl_sock, lease["interface"]
+ )
except AssertionError as e:
report_diagnostic_event(
- 'Error while waiting for vnet switch: %s' % e,
- logger_func=LOG.error)
+ "Error while waiting for vnet switch: %s" % e,
+ logger_func=LOG.error,
+ )
break
vnet_switched = True
self._ephemeral_dhcp_ctx.clean_network()
else:
with events.ReportEventStack(
- name="get-reprovision-data-from-imds",
- description="get reprovision data from imds",
- parent=azure_ds_reporter):
- return_val = readurl(url,
- timeout=IMDS_TIMEOUT_IN_SECONDS,
- headers=headers,
- exception_cb=exc_cb,
- infinite=True,
- log_req_resp=False).contents
+ name="get-reprovision-data-from-imds",
+ description="get reprovision data from imds",
+ parent=azure_ds_reporter,
+ ):
+ return_val = readurl(
+ url,
+ timeout=IMDS_TIMEOUT_IN_SECONDS,
+ headers=headers,
+ exception_cb=exc_cb,
+ infinite=True,
+ log_req_resp=False,
+ ).contents
break
except UrlError:
# Teardown our EphemeralDHCPv4 context on failure as we retry
@@ -1248,12 +1324,14 @@ class DataSourceAzure(sources.DataSource):
nl_sock.close()
if vnet_switched:
- report_diagnostic_event("attempted dhcp %d times after reuse" %
- dhcp_attempts,
- logger_func=LOG.debug)
- report_diagnostic_event("polled imds %d times after reuse" %
- self.imds_poll_counter,
- logger_func=LOG.debug)
+ report_diagnostic_event(
+ "attempted dhcp %d times after reuse" % dhcp_attempts,
+ logger_func=LOG.debug,
+ )
+ report_diagnostic_event(
+ "polled imds %d times after reuse" % self.imds_poll_counter,
+ logger_func=LOG.debug,
+ )
return return_val
@@ -1264,52 +1342,63 @@ class DataSourceAzure(sources.DataSource):
@param description: A description of the error encountered.
@return: The success status of sending the failure signal.
"""
- unknown_245_key = 'unknown-245'
+ unknown_245_key = "unknown-245"
try:
- if (self.distro.networking.is_up(self.fallback_interface) and
- getattr(self, '_ephemeral_dhcp_ctx', None) and
- getattr(self._ephemeral_dhcp_ctx, 'lease', None) and
- unknown_245_key in self._ephemeral_dhcp_ctx.lease):
+ if (
+ self.distro.networking.is_up(self.fallback_interface)
+ and getattr(self, "_ephemeral_dhcp_ctx", None)
+ and getattr(self._ephemeral_dhcp_ctx, "lease", None)
+ and unknown_245_key in self._ephemeral_dhcp_ctx.lease
+ ):
report_diagnostic_event(
- 'Using cached ephemeral dhcp context '
- 'to report failure to Azure', logger_func=LOG.debug)
+ "Using cached ephemeral dhcp context "
+ "to report failure to Azure",
+ logger_func=LOG.debug,
+ )
report_failure_to_fabric(
dhcp_opts=self._ephemeral_dhcp_ctx.lease[unknown_245_key],
- description=description)
+ description=description,
+ )
self._ephemeral_dhcp_ctx.clean_network() # Teardown ephemeral
return True
except Exception as e:
report_diagnostic_event(
- 'Failed to report failure using '
- 'cached ephemeral dhcp context: %s' % e,
- logger_func=LOG.error)
+ "Failed to report failure using "
+ "cached ephemeral dhcp context: %s" % e,
+ logger_func=LOG.error,
+ )
try:
report_diagnostic_event(
- 'Using new ephemeral dhcp to report failure to Azure',
- logger_func=LOG.debug)
+ "Using new ephemeral dhcp to report failure to Azure",
+ logger_func=LOG.debug,
+ )
with EphemeralDHCPv4WithReporting(azure_ds_reporter) as lease:
report_failure_to_fabric(
- dhcp_opts=lease[unknown_245_key],
- description=description)
+ dhcp_opts=lease[unknown_245_key], description=description
+ )
return True
except Exception as e:
report_diagnostic_event(
- 'Failed to report failure using new ephemeral dhcp: %s' % e,
- logger_func=LOG.debug)
+ "Failed to report failure using new ephemeral dhcp: %s" % e,
+ logger_func=LOG.debug,
+ )
try:
report_diagnostic_event(
- 'Using fallback lease to report failure to Azure')
+ "Using fallback lease to report failure to Azure"
+ )
report_failure_to_fabric(
fallback_lease_file=self.dhclient_lease_file,
- description=description)
+ description=description,
+ )
return True
except Exception as e:
report_diagnostic_event(
- 'Failed to report failure using fallback lease: %s' % e,
- logger_func=LOG.debug)
+ "Failed to report failure using fallback lease: %s" % e,
+ logger_func=LOG.debug,
+ )
return False
@@ -1320,27 +1409,33 @@ class DataSourceAzure(sources.DataSource):
@return: The success status of sending the ready signal.
"""
try:
- get_metadata_from_fabric(fallback_lease_file=None,
- dhcp_opts=lease['unknown-245'],
- iso_dev=self.iso_dev)
+ get_metadata_from_fabric(
+ fallback_lease_file=None,
+ dhcp_opts=lease["unknown-245"],
+ iso_dev=self.iso_dev,
+ )
return True
except Exception as e:
report_diagnostic_event(
"Error communicating with Azure fabric; You may experience "
- "connectivity issues: %s" % e, logger_func=LOG.warning)
+ "connectivity issues: %s" % e,
+ logger_func=LOG.warning,
+ )
return False
def _ppstype_from_imds(self, imds_md: dict = None) -> str:
try:
- return imds_md['extended']['compute']['ppsType']
+ return imds_md["extended"]["compute"]["ppsType"]
except Exception as e:
report_diagnostic_event(
- "Could not retrieve pps configuration from IMDS: %s" %
- e, logger_func=LOG.debug)
+ "Could not retrieve pps configuration from IMDS: %s" % e,
+ logger_func=LOG.debug,
+ )
return None
def _should_reprovision_after_nic_attach(
- self, ovf_md, imds_md=None) -> bool:
+ self, ovf_md, imds_md=None
+ ) -> bool:
"""Whether or not we should wait for nic attach and then poll
IMDS for reprovisioning data. Also sets a marker file to poll IMDS.
@@ -1360,14 +1455,19 @@ class DataSourceAzure(sources.DataSource):
return False
(_md, _userdata_raw, cfg, _files) = ovf_md
path = REPROVISION_NIC_ATTACH_MARKER_FILE
- if (cfg.get('PreprovisionedVMType', None) == "Savable" or
- self._ppstype_from_imds(imds_md) == "Savable" or
- os.path.isfile(path)):
+ if (
+ cfg.get("PreprovisionedVMType", None) == "Savable"
+ or self._ppstype_from_imds(imds_md) == "Savable"
+ or os.path.isfile(path)
+ ):
if not os.path.isfile(path):
- LOG.info("Creating a marker file to wait for nic attach: %s",
- path)
- util.write_file(path, "{pid}: {time}\n".format(
- pid=os.getpid(), time=time()))
+ LOG.info(
+ "Creating a marker file to wait for nic attach: %s", path
+ )
+ util.write_file(
+ path,
+ "{pid}: {time}\n".format(pid=os.getpid(), time=time()),
+ )
return True
return False
@@ -1386,15 +1486,18 @@ class DataSourceAzure(sources.DataSource):
return False
(_md, _userdata_raw, cfg, _files) = ovf_md
path = REPROVISION_MARKER_FILE
- if (cfg.get('PreprovisionedVm') is True or
- cfg.get('PreprovisionedVMType', None) == 'Running' or
- self._ppstype_from_imds(imds_md) == "Running" or
- os.path.isfile(path)):
+ if (
+ cfg.get("PreprovisionedVm") is True
+ or cfg.get("PreprovisionedVMType", None) == "Running"
+ or self._ppstype_from_imds(imds_md) == "Running"
+ or os.path.isfile(path)
+ ):
if not os.path.isfile(path):
- LOG.info("Creating a marker file to poll imds: %s",
- path)
- util.write_file(path, "{pid}: {time}\n".format(
- pid=os.getpid(), time=time()))
+ LOG.info("Creating a marker file to poll imds: %s", path)
+ util.write_file(
+ path,
+ "{pid}: {time}\n".format(pid=os.getpid(), time=time()),
+ )
return True
return False
@@ -1402,35 +1505,35 @@ class DataSourceAzure(sources.DataSource):
"""Initiate the reprovisioning workflow."""
contents = self._poll_imds()
with events.ReportEventStack(
- name="reprovisioning-read-azure-ovf",
- description="read azure ovf during reprovisioning",
- parent=azure_ds_reporter):
+ name="reprovisioning-read-azure-ovf",
+ description="read azure ovf during reprovisioning",
+ parent=azure_ds_reporter,
+ ):
md, ud, cfg = read_azure_ovf(contents)
- return (md, ud, cfg, {'ovf-env.xml': contents})
+ return (md, ud, cfg, {"ovf-env.xml": contents})
@azure_ds_telemetry_reporter
def _negotiate(self):
"""Negotiate with fabric and return data from it.
- On success, returns a dictionary including 'public_keys'.
- On failure, returns False.
+ On success, returns a dictionary including 'public_keys'.
+ On failure, returns False.
"""
pubkey_info = None
ssh_keys_and_source = self._get_public_ssh_keys_and_source()
if not ssh_keys_and_source.keys_from_imds:
- pubkey_info = self.cfg.get('_pubkeys', None)
- log_msg = 'Retrieved {} fingerprints from OVF'.format(
- len(pubkey_info)
- if pubkey_info is not None
- else 0
+ pubkey_info = self.cfg.get("_pubkeys", None)
+ log_msg = "Retrieved {} fingerprints from OVF".format(
+ len(pubkey_info) if pubkey_info is not None else 0
)
report_diagnostic_event(log_msg, logger_func=LOG.debug)
- metadata_func = partial(get_metadata_from_fabric,
- fallback_lease_file=self.
- dhclient_lease_file,
- pubkey_info=pubkey_info)
+ metadata_func = partial(
+ get_metadata_from_fabric,
+ fallback_lease_file=self.dhclient_lease_file,
+ pubkey_info=pubkey_info,
+ )
LOG.debug("negotiating with fabric")
try:
@@ -1438,7 +1541,9 @@ class DataSourceAzure(sources.DataSource):
except Exception as e:
report_diagnostic_event(
"Error communicating with Azure fabric; You may experience "
- "connectivity issues: %s" % e, logger_func=LOG.warning)
+ "connectivity issues: %s" % e,
+ logger_func=LOG.warning,
+ )
return False
util.del_file(REPORTED_READY_MARKER_FILE)
@@ -1450,30 +1555,34 @@ class DataSourceAzure(sources.DataSource):
@azure_ds_telemetry_reporter
def activate(self, cfg, is_new_instance):
try:
- address_ephemeral_resize(is_new_instance=is_new_instance,
- preserve_ntfs=self.ds_cfg.get(
- DS_CFG_KEY_PRESERVE_NTFS, False))
+ address_ephemeral_resize(
+ is_new_instance=is_new_instance,
+ preserve_ntfs=self.ds_cfg.get(DS_CFG_KEY_PRESERVE_NTFS, False),
+ )
finally:
- push_log_to_kvp(self.sys_cfg['def_log_file'])
+ push_log_to_kvp(self.sys_cfg["def_log_file"])
return
@property
def availability_zone(self):
- return self.metadata.get(
- 'imds', {}).get('compute', {}).get('platformFaultDomain')
+ return (
+ self.metadata.get("imds", {})
+ .get("compute", {})
+ .get("platformFaultDomain")
+ )
@property
def network_config(self):
"""Generate a network config like net.generate_fallback_network() with
- the following exceptions.
+ the following exceptions.
- 1. Probe the drivers of the net-devices present and inject them in
- the network configuration under params: driver: <driver> value
- 2. Generate a fallback network config that does not include any of
- the blacklisted devices.
+ 1. Probe the drivers of the net-devices present and inject them in
+ the network configuration under params: driver: <driver> value
+ 2. Generate a fallback network config that does not include any of
+ the blacklisted devices.
"""
if not self._network_config or self._network_config == sources.UNSET:
- if self.ds_cfg.get('apply_network_config'):
+ if self.ds_cfg.get("apply_network_config"):
nc_src = self._metadata_imds
else:
nc_src = None
@@ -1482,33 +1591,36 @@ class DataSourceAzure(sources.DataSource):
@property
def region(self):
- return self.metadata.get('imds', {}).get('compute', {}).get('location')
+ return self.metadata.get("imds", {}).get("compute", {}).get("location")
def _username_from_imds(imds_data):
try:
- return imds_data['compute']['osProfile']['adminUsername']
+ return imds_data["compute"]["osProfile"]["adminUsername"]
except KeyError:
return None
def _userdata_from_imds(imds_data):
try:
- return imds_data['compute']['userData']
+ return imds_data["compute"]["userData"]
except KeyError:
return None
def _hostname_from_imds(imds_data):
try:
- return imds_data['compute']['osProfile']['computerName']
+ return imds_data["compute"]["osProfile"]["computerName"]
except KeyError:
return None
def _disable_password_from_imds(imds_data):
try:
- return imds_data['compute']['osProfile']['disablePasswordAuthentication'] == 'true' # noqa: E501
+ return (
+ imds_data["compute"]["osProfile"]["disablePasswordAuthentication"]
+ == "true"
+ )
except KeyError:
return None
@@ -1518,7 +1630,7 @@ def _key_is_openssh_formatted(key):
Validate whether or not the key is OpenSSH-formatted.
"""
# See https://bugs.launchpad.net/cloud-init/+bug/1910835
- if '\r\n' in key.strip():
+ if "\r\n" in key.strip():
return False
parser = ssh_util.AuthKeyLineParser()
@@ -1546,7 +1658,7 @@ def _partitions_on_device(devpath, maxnum=16):
@azure_ds_telemetry_reporter
def _has_ntfs_filesystem(devpath):
ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True)
- LOG.debug('ntfs_devices found = %s', ntfs_devices)
+ LOG.debug("ntfs_devices found = %s", ntfs_devices)
return os.path.realpath(devpath) in ntfs_devices
@@ -1570,24 +1682,29 @@ def can_dev_be_reformatted(devpath, preserve_ntfs):
If cloud-init cannot mount the disk to check for data, destruction
will be allowed, unless the dscfg key is set."""
if preserve_ntfs:
- msg = ('config says to never destroy NTFS (%s.%s), skipping checks' %
- (".".join(DS_CFG_PATH), DS_CFG_KEY_PRESERVE_NTFS))
+ msg = "config says to never destroy NTFS (%s.%s), skipping checks" % (
+ ".".join(DS_CFG_PATH),
+ DS_CFG_KEY_PRESERVE_NTFS,
+ )
return False, msg
if not os.path.exists(devpath):
- return False, 'device %s does not exist' % devpath
+ return False, "device %s does not exist" % devpath
- LOG.debug('Resolving realpath of %s -> %s', devpath,
- os.path.realpath(devpath))
+ LOG.debug(
+ "Resolving realpath of %s -> %s", devpath, os.path.realpath(devpath)
+ )
# devpath of /dev/sd[a-z] or /dev/disk/cloud/azure_resource
# where partitions are "<devpath>1" or "<devpath>-part1" or "<devpath>p1"
partitions = _partitions_on_device(devpath)
if len(partitions) == 0:
- return False, 'device %s was not partitioned' % devpath
+ return False, "device %s was not partitioned" % devpath
elif len(partitions) > 2:
- msg = ('device %s had 3 or more partitions: %s' %
- (devpath, ' '.join([p[1] for p in partitions])))
+ msg = "device %s had 3 or more partitions: %s" % (
+ devpath,
+ " ".join([p[1] for p in partitions]),
+ )
return False, msg
elif len(partitions) == 2:
cand_part, cand_path = partitions[1]
@@ -1595,57 +1712,78 @@ def can_dev_be_reformatted(devpath, preserve_ntfs):
cand_part, cand_path = partitions[0]
if not _has_ntfs_filesystem(cand_path):
- msg = ('partition %s (%s) on device %s was not ntfs formatted' %
- (cand_part, cand_path, devpath))
+ msg = "partition %s (%s) on device %s was not ntfs formatted" % (
+ cand_part,
+ cand_path,
+ devpath,
+ )
return False, msg
@azure_ds_telemetry_reporter
def count_files(mp):
- ignored = set(['dataloss_warning_readme.txt'])
+ ignored = set(["dataloss_warning_readme.txt"])
return len([f for f in os.listdir(mp) if f.lower() not in ignored])
- bmsg = ('partition %s (%s) on device %s was ntfs formatted' %
- (cand_part, cand_path, devpath))
+ bmsg = "partition %s (%s) on device %s was ntfs formatted" % (
+ cand_part,
+ cand_path,
+ devpath,
+ )
with events.ReportEventStack(
name="mount-ntfs-and-count",
description="mount-ntfs-and-count",
- parent=azure_ds_reporter
+ parent=azure_ds_reporter,
) as evt:
try:
- file_count = util.mount_cb(cand_path, count_files, mtype="ntfs",
- update_env_for_mount={'LANG': 'C'})
+ file_count = util.mount_cb(
+ cand_path,
+ count_files,
+ mtype="ntfs",
+ update_env_for_mount={"LANG": "C"},
+ )
except util.MountFailedError as e:
evt.description = "cannot mount ntfs"
if "unknown filesystem type 'ntfs'" in str(e):
- return True, (bmsg + ' but this system cannot mount NTFS,'
- ' assuming there are no important files.'
- ' Formatting allowed.')
- return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e)
+ return (
+ True,
+ (
+ bmsg + " but this system cannot mount NTFS,"
+ " assuming there are no important files."
+ " Formatting allowed."
+ ),
+ )
+ return False, bmsg + " but mount of %s failed: %s" % (cand_part, e)
if file_count != 0:
evt.description = "mounted and counted %d files" % file_count
- LOG.warning("it looks like you're using NTFS on the ephemeral"
- " disk, to ensure that filesystem does not get wiped,"
- " set %s.%s in config", '.'.join(DS_CFG_PATH),
- DS_CFG_KEY_PRESERVE_NTFS)
- return False, bmsg + ' but had %d files on it.' % file_count
+ LOG.warning(
+ "it looks like you're using NTFS on the ephemeral"
+ " disk, to ensure that filesystem does not get wiped,"
+ " set %s.%s in config",
+ ".".join(DS_CFG_PATH),
+ DS_CFG_KEY_PRESERVE_NTFS,
+ )
+ return False, bmsg + " but had %d files on it." % file_count
- return True, bmsg + ' and had no important files. Safe for reformatting.'
+ return True, bmsg + " and had no important files. Safe for reformatting."
@azure_ds_telemetry_reporter
-def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH,
- is_new_instance=False, preserve_ntfs=False):
+def address_ephemeral_resize(
+ devpath=RESOURCE_DISK_PATH, is_new_instance=False, preserve_ntfs=False
+):
if not os.path.exists(devpath):
report_diagnostic_event(
"Ephemeral resource disk '%s' does not exist." % devpath,
- logger_func=LOG.debug)
+ logger_func=LOG.debug,
+ )
return
else:
report_diagnostic_event(
"Ephemeral resource disk '%s' exists." % devpath,
- logger_func=LOG.debug)
+ logger_func=LOG.debug,
+ )
result = False
msg = None
@@ -1658,31 +1796,32 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH,
if not result:
return
- for mod in ['disk_setup', 'mounts']:
- sempath = '/var/lib/cloud/instance/sem/config_' + mod
+ for mod in ["disk_setup", "mounts"]:
+ sempath = "/var/lib/cloud/instance/sem/config_" + mod
bmsg = 'Marker "%s" for module "%s"' % (sempath, mod)
if os.path.exists(sempath):
try:
os.unlink(sempath)
- LOG.debug('%s removed.', bmsg)
+ LOG.debug("%s removed.", bmsg)
except Exception as e:
# python3 throws FileNotFoundError, python2 throws OSError
- LOG.warning('%s: remove failed! (%s)', bmsg, e)
+ LOG.warning("%s: remove failed! (%s)", bmsg, e)
else:
- LOG.debug('%s did not exist.', bmsg)
+ LOG.debug("%s did not exist.", bmsg)
return
@azure_ds_telemetry_reporter
def write_files(datadir, files, dirmode=None):
-
def _redact_password(cnt, fname):
"""Azure provides the UserPassword in plain text. So we redact it"""
try:
root = ET.fromstring(cnt)
for elem in root.iter():
- if ('UserPassword' in elem.tag and
- elem.text != DEF_PASSWD_REDACTION):
+ if (
+ "UserPassword" in elem.tag
+ and elem.text != DEF_PASSWD_REDACTION
+ ):
elem.text = DEF_PASSWD_REDACTION
return ET.tostring(root)
except Exception:
@@ -1696,7 +1835,7 @@ def write_files(datadir, files, dirmode=None):
util.ensure_dir(datadir, dirmode)
for (name, content) in files.items():
fname = os.path.join(datadir, name)
- if 'ovf-env.xml' in name:
+ if "ovf-env.xml" in name:
content = _redact_password(content, fname)
util.write_file(filename=fname, content=content, mode=0o600)
@@ -1728,8 +1867,9 @@ def load_azure_ovf_pubkeys(sshnode):
if len(results) == 0:
return []
if len(results) > 1:
- raise BrokenAzureDataSource("Multiple 'PublicKeys'(%s) in SSH node" %
- len(results))
+ raise BrokenAzureDataSource(
+ "Multiple 'PublicKeys'(%s) in SSH node" % len(results)
+ )
pubkeys_node = results[0]
pubkeys = find_child(pubkeys_node, lambda n: n.localName == "PublicKey")
@@ -1744,7 +1884,7 @@ def load_azure_ovf_pubkeys(sshnode):
if not pk_node.hasChildNodes():
continue
- cur = {'fingerprint': "", 'path': "", 'value': ""}
+ cur = {"fingerprint": "", "path": "", "value": ""}
for child in pk_node.childNodes:
if child.nodeType == text_node or not child.localName:
continue
@@ -1754,8 +1894,10 @@ def load_azure_ovf_pubkeys(sshnode):
if name not in cur.keys():
continue
- if (len(child.childNodes) != 1 or
- child.childNodes[0].nodeType != text_node):
+ if (
+ len(child.childNodes) != 1
+ or child.childNodes[0].nodeType != text_node
+ ):
continue
cur[name] = child.childNodes[0].wholeText.strip()
@@ -1773,33 +1915,37 @@ def read_azure_ovf(contents):
report_diagnostic_event(error_str, logger_func=LOG.warning)
raise BrokenAzureDataSource(error_str) from e
- results = find_child(dom.documentElement,
- lambda n: n.localName == "ProvisioningSection")
+ results = find_child(
+ dom.documentElement, lambda n: n.localName == "ProvisioningSection"
+ )
if len(results) == 0:
raise NonAzureDataSource("No ProvisioningSection")
if len(results) > 1:
- raise BrokenAzureDataSource("found '%d' ProvisioningSection items" %
- len(results))
+ raise BrokenAzureDataSource(
+ "found '%d' ProvisioningSection items" % len(results)
+ )
provSection = results[0]
- lpcs_nodes = find_child(provSection,
- lambda n:
- n.localName == "LinuxProvisioningConfigurationSet")
+ lpcs_nodes = find_child(
+ provSection,
+ lambda n: n.localName == "LinuxProvisioningConfigurationSet",
+ )
if len(lpcs_nodes) == 0:
raise NonAzureDataSource("No LinuxProvisioningConfigurationSet")
if len(lpcs_nodes) > 1:
- raise BrokenAzureDataSource("found '%d' %ss" %
- (len(lpcs_nodes),
- "LinuxProvisioningConfigurationSet"))
+ raise BrokenAzureDataSource(
+ "found '%d' %ss"
+ % (len(lpcs_nodes), "LinuxProvisioningConfigurationSet")
+ )
lpcs = lpcs_nodes[0]
if not lpcs.hasChildNodes():
raise BrokenAzureDataSource("no child nodes of configuration set")
- md_props = 'seedfrom'
- md = {'azure_data': {}}
+ md_props = "seedfrom"
+ md = {"azure_data": {}}
cfg = {}
ud = ""
password = None
@@ -1813,8 +1959,10 @@ def read_azure_ovf(contents):
simple = False
value = ""
- if (len(child.childNodes) == 1 and
- child.childNodes[0].nodeType == dom.TEXT_NODE):
+ if (
+ len(child.childNodes) == 1
+ and child.childNodes[0].nodeType == dom.TEXT_NODE
+ ):
simple = True
value = child.childNodes[0].wholeText
@@ -1823,8 +1971,8 @@ def read_azure_ovf(contents):
# we accept either UserData or CustomData. If both are present
# then behavior is undefined.
if name == "userdata" or name == "customdata":
- if attrs.get('encoding') in (None, "base64"):
- ud = base64.b64decode(''.join(value.split()))
+ if attrs.get("encoding") in (None, "base64"):
+ ud = base64.b64decode("".join(value.split()))
else:
ud = value
elif name == "username":
@@ -1832,36 +1980,36 @@ def read_azure_ovf(contents):
elif name == "userpassword":
password = value
elif name == "hostname":
- md['local-hostname'] = value
+ md["local-hostname"] = value
elif name == "dscfg":
- if attrs.get('encoding') in (None, "base64"):
- dscfg = base64.b64decode(''.join(value.split()))
+ if attrs.get("encoding") in (None, "base64"):
+ dscfg = base64.b64decode("".join(value.split()))
else:
dscfg = value
- cfg['datasource'] = {DS_NAME: util.load_yaml(dscfg, default={})}
+ cfg["datasource"] = {DS_NAME: util.load_yaml(dscfg, default={})}
elif name == "ssh":
- cfg['_pubkeys'] = load_azure_ovf_pubkeys(child)
+ cfg["_pubkeys"] = load_azure_ovf_pubkeys(child)
elif name == "disablesshpasswordauthentication":
- cfg['ssh_pwauth'] = util.is_false(value)
+ cfg["ssh_pwauth"] = util.is_false(value)
elif simple:
if name in md_props:
md[name] = value
else:
- md['azure_data'][name] = value
+ md["azure_data"][name] = value
defuser = {}
if username:
- defuser['name'] = username
+ defuser["name"] = username
if password:
- defuser['lock_passwd'] = False
+ defuser["lock_passwd"] = False
if DEF_PASSWD_REDACTION != password:
- defuser['passwd'] = cfg['password'] = encrypt_pass(password)
+ defuser["passwd"] = cfg["password"] = encrypt_pass(password)
if defuser:
- cfg['system_info'] = {'default_user': defuser}
+ cfg["system_info"] = {"default_user": defuser}
- if 'ssh_pwauth' not in cfg and password:
- cfg['ssh_pwauth'] = True
+ if "ssh_pwauth" not in cfg and password:
+ cfg["ssh_pwauth"] = True
preprovisioning_cfg = _get_preprovisioning_cfgs(dom)
cfg = util.mergemanydict([cfg, preprovisioning_cfg])
@@ -1887,20 +2035,18 @@ def _get_preprovisioning_cfgs(dom):
More specifically, this will never happen:
- PreprovisionedVm=True and PreprovisionedVMType=Savable
"""
- cfg = {
- "PreprovisionedVm": False,
- "PreprovisionedVMType": None
- }
+ cfg = {"PreprovisionedVm": False, "PreprovisionedVMType": None}
platform_settings_section = find_child(
- dom.documentElement,
- lambda n: n.localName == "PlatformSettingsSection")
+ dom.documentElement, lambda n: n.localName == "PlatformSettingsSection"
+ )
if not platform_settings_section or len(platform_settings_section) == 0:
LOG.debug("PlatformSettingsSection not found")
return cfg
platform_settings = find_child(
platform_settings_section[0],
- lambda n: n.localName == "PlatformSettings")
+ lambda n: n.localName == "PlatformSettings",
+ )
if not platform_settings or len(platform_settings) == 0:
LOG.debug("PlatformSettings not found")
return cfg
@@ -1909,10 +2055,12 @@ def _get_preprovisioning_cfgs(dom):
# platform has removed PreprovisionedVm and only surfaces
# PreprovisionedVMType.
cfg["PreprovisionedVm"] = _get_preprovisionedvm_cfg_value(
- platform_settings)
+ platform_settings
+ )
cfg["PreprovisionedVMType"] = _get_preprovisionedvmtype_cfg_value(
- platform_settings)
+ platform_settings
+ )
return cfg
@@ -1924,16 +2072,18 @@ def _get_preprovisionedvm_cfg_value(platform_settings):
# platform has removed PreprovisionedVm and only surfaces
# PreprovisionedVMType.
preprovisionedVmVal = find_child(
- platform_settings[0],
- lambda n: n.localName == "PreprovisionedVm")
+ platform_settings[0], lambda n: n.localName == "PreprovisionedVm"
+ )
if not preprovisionedVmVal or len(preprovisionedVmVal) == 0:
LOG.debug("PreprovisionedVm not found")
return preprovisionedVm
preprovisionedVm = util.translate_bool(
- preprovisionedVmVal[0].firstChild.nodeValue)
+ preprovisionedVmVal[0].firstChild.nodeValue
+ )
report_diagnostic_event(
- "PreprovisionedVm: %s" % preprovisionedVm, logger_func=LOG.info)
+ "PreprovisionedVm: %s" % preprovisionedVm, logger_func=LOG.info
+ )
return preprovisionedVm
@@ -1952,18 +2102,21 @@ def _get_preprovisionedvmtype_cfg_value(platform_settings):
# Once assigned to customer, the customer-requested nics are
# hot-attached to it and reprovision happens like today.
preprovisionedVMTypeVal = find_child(
- platform_settings[0],
- lambda n: n.localName == "PreprovisionedVMType")
- if (not preprovisionedVMTypeVal or len(preprovisionedVMTypeVal) == 0 or
- preprovisionedVMTypeVal[0].firstChild is None):
+ platform_settings[0], lambda n: n.localName == "PreprovisionedVMType"
+ )
+ if (
+ not preprovisionedVMTypeVal
+ or len(preprovisionedVMTypeVal) == 0
+ or preprovisionedVMTypeVal[0].firstChild is None
+ ):
LOG.debug("PreprovisionedVMType not found")
return preprovisionedVMType
preprovisionedVMType = preprovisionedVMTypeVal[0].firstChild.nodeValue
report_diagnostic_event(
- "PreprovisionedVMType: %s" % preprovisionedVMType,
- logger_func=LOG.info)
+ "PreprovisionedVMType: %s" % preprovisionedVMType, logger_func=LOG.info
+ )
return preprovisionedVMType
@@ -1987,7 +2140,7 @@ def _check_freebsd_cdrom(cdrom_dev):
@azure_ds_telemetry_reporter
def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE):
"""Return content random seed file if available, otherwise,
- return None."""
+ return None."""
# azure / hyper-v provides random data here
# now update ds_cfg to reflect contents pass in config
if source is None:
@@ -2034,7 +2187,7 @@ def load_azure_ds_dir(source_dir):
contents = fp.read()
md, ud, cfg = read_azure_ovf(contents)
- return (md, ud, cfg, {'ovf-env.xml': contents})
+ return (md, ud, cfg, {"ovf-env.xml": contents})
@azure_ds_telemetry_reporter
@@ -2051,12 +2204,14 @@ def parse_network_config(imds_metadata) -> dict:
return _generate_network_config_from_imds_metadata(imds_metadata)
except Exception as e:
LOG.error(
- 'Failed generating network config '
- 'from IMDS network metadata: %s', str(e))
+ "Failed generating network config "
+ "from IMDS network metadata: %s",
+ str(e),
+ )
try:
return _generate_network_config_from_fallback_config()
except Exception as e:
- LOG.error('Failed generating fallback network config: %s', str(e))
+ LOG.error("Failed generating fallback network config: %s", str(e))
return {}
@@ -2068,57 +2223,60 @@ def _generate_network_config_from_imds_metadata(imds_metadata) -> dict:
@param: imds_metadata: Dict of content read from IMDS network service.
@return: Dictionary containing network version 2 standard configuration.
"""
- netconfig = {'version': 2, 'ethernets': {}}
- network_metadata = imds_metadata['network']
- for idx, intf in enumerate(network_metadata['interface']):
+ netconfig = {"version": 2, "ethernets": {}}
+ network_metadata = imds_metadata["network"]
+ for idx, intf in enumerate(network_metadata["interface"]):
has_ip_address = False
# First IPv4 and/or IPv6 address will be obtained via DHCP.
# Any additional IPs of each type will be set as static
# addresses.
- nicname = 'eth{idx}'.format(idx=idx)
- dhcp_override = {'route-metric': (idx + 1) * 100}
- dev_config = {'dhcp4': True, 'dhcp4-overrides': dhcp_override,
- 'dhcp6': False}
- for addr_type in ('ipv4', 'ipv6'):
- addresses = intf.get(addr_type, {}).get('ipAddress', [])
+ nicname = "eth{idx}".format(idx=idx)
+ dhcp_override = {"route-metric": (idx + 1) * 100}
+ dev_config = {
+ "dhcp4": True,
+ "dhcp4-overrides": dhcp_override,
+ "dhcp6": False,
+ }
+ for addr_type in ("ipv4", "ipv6"):
+ addresses = intf.get(addr_type, {}).get("ipAddress", [])
# If there are no available IP addresses, then we don't
# want to add this interface to the generated config.
if not addresses:
continue
has_ip_address = True
- if addr_type == 'ipv4':
- default_prefix = '24'
+ if addr_type == "ipv4":
+ default_prefix = "24"
else:
- default_prefix = '128'
+ default_prefix = "128"
if addresses:
- dev_config['dhcp6'] = True
+ dev_config["dhcp6"] = True
# non-primary interfaces should have a higher
# route-metric (cost) so default routes prefer
# primary nic due to lower route-metric value
- dev_config['dhcp6-overrides'] = dhcp_override
+ dev_config["dhcp6-overrides"] = dhcp_override
for addr in addresses[1:]:
# Append static address config for ip > 1
- netPrefix = intf[addr_type]['subnet'][0].get(
- 'prefix', default_prefix)
- privateIp = addr['privateIpAddress']
- if not dev_config.get('addresses'):
- dev_config['addresses'] = []
- dev_config['addresses'].append(
- '{ip}/{prefix}'.format(
- ip=privateIp, prefix=netPrefix))
+ netPrefix = intf[addr_type]["subnet"][0].get(
+ "prefix", default_prefix
+ )
+ privateIp = addr["privateIpAddress"]
+ if not dev_config.get("addresses"):
+ dev_config["addresses"] = []
+ dev_config["addresses"].append(
+ "{ip}/{prefix}".format(ip=privateIp, prefix=netPrefix)
+ )
if dev_config and has_ip_address:
- mac = ':'.join(re.findall(r'..', intf['macAddress']))
- dev_config.update({
- 'match': {'macaddress': mac.lower()},
- 'set-name': nicname
- })
+ mac = ":".join(re.findall(r"..", intf["macAddress"]))
+ dev_config.update(
+ {"match": {"macaddress": mac.lower()}, "set-name": nicname}
+ )
# With netvsc, we can get two interfaces that
# share the same MAC, so we need to make sure
# our match condition also contains the driver
driver = device_driver(nicname)
- if driver and driver == 'hv_netvsc':
- dev_config['match']['driver'] = driver
- netconfig['ethernets'][nicname] = dev_config
+ if driver and driver == "hv_netvsc":
+ dev_config["match"]["driver"] = driver
+ netconfig["ethernets"][nicname] = dev_config
return netconfig
@@ -2129,16 +2287,19 @@ def _generate_network_config_from_fallback_config() -> dict:
@return: Dictionary containing network version 2 standard configuration.
"""
return net.generate_fallback_config(
- blacklist_drivers=BLACKLIST_DRIVERS, config_driver=True)
+ blacklist_drivers=BLACKLIST_DRIVERS, config_driver=True
+ )
@azure_ds_telemetry_reporter
-def get_metadata_from_imds(fallback_nic,
- retries,
- md_type=metadata_type.all,
- api_version=IMDS_VER_MIN,
- exc_cb=retry_on_url_exc,
- infinite=False):
+def get_metadata_from_imds(
+ fallback_nic,
+ retries,
+ md_type=metadata_type.all,
+ api_version=IMDS_VER_MIN,
+ exc_cb=retry_on_url_exc,
+ infinite=False,
+):
"""Query Azure's instance metadata service, returning a dictionary.
If network is not up, setup ephemeral dhcp on fallback_nic to talk to the
@@ -2154,31 +2315,34 @@ def get_metadata_from_imds(fallback_nic,
@return: A dict of instance metadata containing compute and network
info.
"""
- kwargs = {'logfunc': LOG.debug,
- 'msg': 'Crawl of Azure Instance Metadata Service (IMDS)',
- 'func': _get_metadata_from_imds,
- 'args': (retries, exc_cb, md_type, api_version, infinite)}
+ kwargs = {
+ "logfunc": LOG.debug,
+ "msg": "Crawl of Azure Instance Metadata Service (IMDS)",
+ "func": _get_metadata_from_imds,
+ "args": (retries, exc_cb, md_type, api_version, infinite),
+ }
if net.is_up(fallback_nic):
return util.log_time(**kwargs)
else:
try:
- with EphemeralDHCPv4WithReporting(
- azure_ds_reporter, fallback_nic):
+ with EphemeralDHCPv4WithReporting(azure_ds_reporter, fallback_nic):
return util.log_time(**kwargs)
except Exception as e:
report_diagnostic_event(
"exception while getting metadata: %s" % e,
- logger_func=LOG.warning)
+ logger_func=LOG.warning,
+ )
raise
@azure_ds_telemetry_reporter
def _get_metadata_from_imds(
- retries,
- exc_cb,
- md_type=metadata_type.all,
- api_version=IMDS_VER_MIN,
- infinite=False):
+ retries,
+ exc_cb,
+ md_type=metadata_type.all,
+ api_version=IMDS_VER_MIN,
+ infinite=False,
+):
url = "{}?api-version={}".format(md_type.value, api_version)
headers = {"Metadata": "true"}
@@ -2188,20 +2352,27 @@ def _get_metadata_from_imds(
try:
response = readurl(
- url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers,
- retries=retries, exception_cb=exc_cb, infinite=infinite)
+ url,
+ timeout=IMDS_TIMEOUT_IN_SECONDS,
+ headers=headers,
+ retries=retries,
+ exception_cb=exc_cb,
+ infinite=infinite,
+ )
except Exception as e:
# pylint:disable=no-member
if isinstance(e, UrlError) and e.code == 400:
raise
else:
report_diagnostic_event(
- 'Ignoring IMDS instance metadata. '
- 'Get metadata from IMDS failed: %s' % e,
- logger_func=LOG.warning)
+ "Ignoring IMDS instance metadata. "
+ "Get metadata from IMDS failed: %s" % e,
+ logger_func=LOG.warning,
+ )
return {}
try:
from json.decoder import JSONDecodeError
+
json_decode_error = JSONDecodeError
except ImportError:
json_decode_error = ValueError
@@ -2210,9 +2381,10 @@ def _get_metadata_from_imds(
return util.load_json(str(response))
except json_decode_error as e:
report_diagnostic_event(
- 'Ignoring non-json IMDS instance metadata response: %s. '
- 'Loading non-json IMDS response failed: %s' % (str(response), e),
- logger_func=LOG.warning)
+ "Ignoring non-json IMDS instance metadata response: %s. "
+ "Loading non-json IMDS response failed: %s" % (str(response), e),
+ logger_func=LOG.warning,
+ )
return {}
@@ -2242,10 +2414,11 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None):
if os.path.exists(path):
if not logged:
LOG.info(
- 'Removing Ubuntu extended network scripts because'
- ' cloud-init updates Azure network configuration on the'
- ' following events: %s.',
- [EventType.BOOT.value, EventType.BOOT_LEGACY.value])
+ "Removing Ubuntu extended network scripts because"
+ " cloud-init updates Azure network configuration on the"
+ " following events: %s.",
+ [EventType.BOOT.value, EventType.BOOT_LEGACY.value],
+ )
logged = True
if os.path.isdir(path):
util.del_dir(path)
@@ -2258,15 +2431,15 @@ def _is_platform_viable(seed_dir):
with events.ReportEventStack(
name="check-platform-viability",
description="found azure asset tag",
- parent=azure_ds_reporter
+ parent=azure_ds_reporter,
) as evt:
- asset_tag = dmi.read_dmi_data('chassis-asset-tag')
+ asset_tag = dmi.read_dmi_data("chassis-asset-tag")
if asset_tag == AZURE_CHASSIS_ASSET_TAG:
return True
msg = "Non-Azure DMI asset tag '%s' discovered." % asset_tag
evt.description = msg
report_diagnostic_event(msg, logger_func=LOG.debug)
- if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')):
+ if os.path.exists(os.path.join(seed_dir, "ovf-env.xml")):
return True
return False
@@ -2284,7 +2457,7 @@ DataSourceAzureNet = DataSourceAzure
# Used to match classes to dependencies
datasources = [
- (DataSourceAzure, (sources.DEP_FILESYSTEM, )),
+ (DataSourceAzure, (sources.DEP_FILESYSTEM,)),
]
@@ -2292,4 +2465,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py
index 63435279..426a762e 100644
--- a/cloudinit/sources/DataSourceBigstep.py
+++ b/cloudinit/sources/DataSourceBigstep.py
@@ -7,14 +7,12 @@
import errno
import json
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
+from cloudinit import sources, url_helper, util
class DataSourceBigstep(sources.DataSource):
- dsname = 'Bigstep'
+ dsname = "Bigstep"
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -35,7 +33,7 @@ class DataSourceBigstep(sources.DataSource):
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- return 'metadata (%s)' % get_url_from_file()
+ return "metadata (%s)" % get_url_from_file()
def get_url_from_file():
@@ -61,4 +59,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py
index f63baf74..de71c3e9 100644
--- a/cloudinit/sources/DataSourceCloudSigma.py
+++ b/cloudinit/sources/DataSourceCloudSigma.py
@@ -4,14 +4,13 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from base64 import b64decode
import re
-
-from cloudinit.cs_utils import Cepko, SERIAL_PORT
+from base64 import b64decode
from cloudinit import dmi
from cloudinit import log as logging
from cloudinit import sources
+from cloudinit.cs_utils import SERIAL_PORT, Cepko
LOG = logging.getLogger(__name__)
@@ -24,11 +23,11 @@ class DataSourceCloudSigma(sources.DataSource):
http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
"""
- dsname = 'CloudSigma'
+ dsname = "CloudSigma"
def __init__(self, sys_cfg, distro, paths):
self.cepko = Cepko()
- self.ssh_public_key = ''
+ self.ssh_public_key = ""
sources.DataSource.__init__(self, sys_cfg, distro, paths)
def is_running_in_cloudsigma(self):
@@ -43,7 +42,7 @@ class DataSourceCloudSigma(sources.DataSource):
LOG.debug("system-product-name not available in dmi data")
return False
LOG.debug("detected hypervisor as %s", sys_product_name)
- return 'cloudsigma' in sys_product_name.lower()
+ return "cloudsigma" in sys_product_name.lower()
def _get_data(self):
"""
@@ -56,7 +55,7 @@ class DataSourceCloudSigma(sources.DataSource):
try:
server_context = self.cepko.all().result
- server_meta = server_context['meta']
+ server_meta = server_context["meta"]
except Exception:
# TODO: check for explicit "config on", and then warn
# but since no explicit config is available now, just debug.
@@ -64,41 +63,42 @@ class DataSourceCloudSigma(sources.DataSource):
return False
self.dsmode = self._determine_dsmode(
- [server_meta.get('cloudinit-dsmode')])
+ [server_meta.get("cloudinit-dsmode")]
+ )
if dsmode == sources.DSMODE_DISABLED:
return False
- base64_fields = server_meta.get('base64_fields', '').split(',')
- self.userdata_raw = server_meta.get('cloudinit-user-data', "")
- if 'cloudinit-user-data' in base64_fields:
+ base64_fields = server_meta.get("base64_fields", "").split(",")
+ self.userdata_raw = server_meta.get("cloudinit-user-data", "")
+ if "cloudinit-user-data" in base64_fields:
self.userdata_raw = b64decode(self.userdata_raw)
- if 'cloudinit' in server_context.get('vendor_data', {}):
+ if "cloudinit" in server_context.get("vendor_data", {}):
self.vendordata_raw = server_context["vendor_data"]["cloudinit"]
self.metadata = server_context
- self.ssh_public_key = server_meta['ssh_public_key']
+ self.ssh_public_key = server_meta["ssh_public_key"]
return True
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- return 'cepko (%s)' % SERIAL_PORT
+ return "cepko (%s)" % SERIAL_PORT
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
"""
Cleans up and uses the server's name if the latter is set. Otherwise
the first part from uuid is being used.
"""
- if re.match(r'^[A-Za-z0-9 -_\.]+$', self.metadata['name']):
- return self.metadata['name'][:61]
+ if re.match(r"^[A-Za-z0-9 -_\.]+$", self.metadata["name"]):
+ return self.metadata["name"][:61]
else:
- return self.metadata['uuid'].split('-')[0]
+ return self.metadata["uuid"].split("-")[0]
def get_public_ssh_keys(self):
return [self.ssh_public_key]
def get_instance_id(self):
- return self.metadata['uuid']
+ return self.metadata["uuid"]
# Legacy: Must be present in case we load an old pkl object
@@ -107,7 +107,7 @@ DataSourceCloudSigmaNet = DataSourceCloudSigma
# Used to match classes to dependencies. Since this datasource uses the serial
# port network is not really required, so it's okay to load without it, too.
datasources = [
- (DataSourceCloudSigma, (sources.DEP_FILESYSTEM, )),
+ (DataSourceCloudSigma, (sources.DEP_FILESYSTEM,)),
]
@@ -117,4 +117,5 @@ def get_datasource_list(depends):
"""
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 8cb0d5a7..a742a5e6 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -13,17 +13,16 @@
# This file is part of cloud-init. See LICENSE file for license information.
import os
-from socket import inet_ntoa, getaddrinfo, gaierror
-from struct import pack
import time
+from socket import gaierror, getaddrinfo, inet_ntoa
+from struct import pack
from cloudinit import ec2_utils as ec2
from cloudinit import log as logging
-from cloudinit.net import dhcp
-from cloudinit import sources
+from cloudinit import sources, subp
from cloudinit import url_helper as uhelp
-from cloudinit import subp
from cloudinit import util
+from cloudinit.net import dhcp
LOG = logging.getLogger(__name__)
@@ -47,27 +46,36 @@ class CloudStackPasswordServerClient(object):
# The password server was in the past, a broken HTTP server, but is now
# fixed. wget handles this seamlessly, so it's easier to shell out to
# that rather than write our own handling code.
- output, _ = subp.subp([
- 'wget', '--quiet', '--tries', '3', '--timeout', '20',
- '--output-document', '-', '--header',
- 'DomU_Request: {0}'.format(domu_request),
- '{0}:8080'.format(self.virtual_router_address)
- ])
+ output, _ = subp.subp(
+ [
+ "wget",
+ "--quiet",
+ "--tries",
+ "3",
+ "--timeout",
+ "20",
+ "--output-document",
+ "-",
+ "--header",
+ "DomU_Request: {0}".format(domu_request),
+ "{0}:8080".format(self.virtual_router_address),
+ ]
+ )
return output.strip()
def get_password(self):
- password = self._do_request('send_my_password')
- if password in ['', 'saved_password']:
+ password = self._do_request("send_my_password")
+ if password in ["", "saved_password"]:
return None
- if password == 'bad_request':
- raise RuntimeError('Error when attempting to fetch root password.')
- self._do_request('saved_password')
+ if password == "bad_request":
+ raise RuntimeError("Error when attempting to fetch root password.")
+ self._do_request("saved_password")
return password
class DataSourceCloudStack(sources.DataSource):
- dsname = 'CloudStack'
+ dsname = "CloudStack"
# Setup read_url parameters per get_url_params.
url_max_wait = 120
@@ -75,10 +83,10 @@ class DataSourceCloudStack(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed_dir = os.path.join(paths.seed_dir, 'cs')
+ self.seed_dir = os.path.join(paths.seed_dir, "cs")
# Cloudstack has its metadata/userdata URLs located at
# http://<virtual-router-ip>/latest/
- self.api_ver = 'latest'
+ self.api_ver = "latest"
self.vr_addr = get_vr_address()
if not self.vr_addr:
raise RuntimeError("No virtual router found!")
@@ -91,19 +99,28 @@ class DataSourceCloudStack(sources.DataSource):
if url_params.max_wait_seconds <= 0:
return False
- urls = [uhelp.combine_url(self.metadata_address,
- 'latest/meta-data/instance-id')]
+ urls = [
+ uhelp.combine_url(
+ self.metadata_address, "latest/meta-data/instance-id"
+ )
+ ]
start_time = time.time()
url, _response = uhelp.wait_for_url(
- urls=urls, max_wait=url_params.max_wait_seconds,
- timeout=url_params.timeout_seconds, status_cb=LOG.warning)
+ urls=urls,
+ max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds,
+ status_cb=LOG.warning,
+ )
if url:
LOG.debug("Using metadata source: '%s'", url)
else:
- LOG.critical(("Giving up on waiting for the metadata from %s"
- " after %s seconds"),
- urls, int(time.time() - start_time))
+ LOG.critical(
+ "Giving up on waiting for the metadata from %s"
+ " after %s seconds",
+ urls,
+ int(time.time() - start_time),
+ )
return bool(url)
@@ -113,8 +130,8 @@ class DataSourceCloudStack(sources.DataSource):
def _get_data(self):
seed_ret = {}
if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")):
- self.userdata_raw = seed_ret['user-data']
- self.metadata = seed_ret['meta-data']
+ self.userdata_raw = seed_ret["user-data"]
+ self.metadata = seed_ret["meta-data"]
LOG.debug("Using seeded cloudstack data from: %s", self.seed_dir)
return True
try:
@@ -122,39 +139,48 @@ class DataSourceCloudStack(sources.DataSource):
return False
start_time = time.time()
self.userdata_raw = ec2.get_instance_userdata(
- self.api_ver, self.metadata_address)
- self.metadata = ec2.get_instance_metadata(self.api_ver,
- self.metadata_address)
- LOG.debug("Crawl of metadata service took %s seconds",
- int(time.time() - start_time))
+ self.api_ver, self.metadata_address
+ )
+ self.metadata = ec2.get_instance_metadata(
+ self.api_ver, self.metadata_address
+ )
+ LOG.debug(
+ "Crawl of metadata service took %s seconds",
+ int(time.time() - start_time),
+ )
password_client = CloudStackPasswordServerClient(self.vr_addr)
try:
set_password = password_client.get_password()
except Exception:
- util.logexc(LOG,
- 'Failed to fetch password from virtual router %s',
- self.vr_addr)
+ util.logexc(
+ LOG,
+ "Failed to fetch password from virtual router %s",
+ self.vr_addr,
+ )
else:
if set_password:
self.cfg = {
- 'ssh_pwauth': True,
- 'password': set_password,
- 'chpasswd': {
- 'expire': False,
+ "ssh_pwauth": True,
+ "password": set_password,
+ "chpasswd": {
+ "expire": False,
},
}
return True
except Exception:
- util.logexc(LOG, 'Failed fetching from metadata service %s',
- self.metadata_address)
+ util.logexc(
+ LOG,
+ "Failed fetching from metadata service %s",
+ self.metadata_address,
+ )
return False
def get_instance_id(self):
- return self.metadata['instance-id']
+ return self.metadata["instance-id"]
@property
def availability_zone(self):
- return self.metadata['availability-zone']
+ return self.metadata["availability-zone"]
def get_data_server():
@@ -183,8 +209,11 @@ def get_default_gateway():
def get_dhclient_d():
# find lease files directory
- supported_dirs = ["/var/lib/dhclient", "/var/lib/dhcp",
- "/var/lib/NetworkManager"]
+ supported_dirs = [
+ "/var/lib/dhclient",
+ "/var/lib/dhcp",
+ "/var/lib/NetworkManager",
+ ]
for d in supported_dirs:
if os.path.exists(d) and len(os.listdir(d)) > 0:
LOG.debug("Using %s lease directory", d)
@@ -233,15 +262,18 @@ def get_vr_address():
# Try data-server DNS entry first
latest_address = get_data_server()
if latest_address:
- LOG.debug("Found metadata server '%s' via data-server DNS entry",
- latest_address)
+ LOG.debug(
+ "Found metadata server '%s' via data-server DNS entry",
+ latest_address,
+ )
return latest_address
# Try networkd second...
- latest_address = dhcp.networkd_get_option_from_leases('SERVER_ADDRESS')
+ latest_address = dhcp.networkd_get_option_from_leases("SERVER_ADDRESS")
if latest_address:
- LOG.debug("Found SERVER_ADDRESS '%s' via networkd_leases",
- latest_address)
+ LOG.debug(
+ "Found SERVER_ADDRESS '%s' via networkd_leases", latest_address
+ )
return latest_address
# Try dhcp lease files next...
@@ -275,4 +307,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 19c8d126..f7c58b12 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -9,9 +9,7 @@
import os
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import sources, subp, util
from cloudinit.event import EventScope, EventType
from cloudinit.net import eni
from cloudinit.sources.DataSourceIBMCloud import get_ibm_platform
@@ -21,32 +19,35 @@ LOG = logging.getLogger(__name__)
# Various defaults/constants...
DEFAULT_IID = "iid-dsconfigdrive"
-DEFAULT_MODE = 'pass'
+DEFAULT_MODE = "pass"
DEFAULT_METADATA = {
"instance-id": DEFAULT_IID,
}
-FS_TYPES = ('vfat', 'iso9660')
-LABEL_TYPES = ('config-2', 'CONFIG-2')
-POSSIBLE_MOUNTS = ('sr', 'cd')
-OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS
- for i in range(0, 2)))
+FS_TYPES = ("vfat", "iso9660")
+LABEL_TYPES = ("config-2", "CONFIG-2")
+POSSIBLE_MOUNTS = ("sr", "cd")
+OPTICAL_DEVICES = tuple(
+ ("/dev/%s%s" % (z, i) for z in POSSIBLE_MOUNTS for i in range(0, 2))
+)
class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
- dsname = 'ConfigDrive'
+ dsname = "ConfigDrive"
- supported_update_events = {EventScope.NETWORK: {
- EventType.BOOT_NEW_INSTANCE,
- EventType.BOOT,
- EventType.BOOT_LEGACY,
- EventType.HOTPLUG,
- }}
+ supported_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ EventType.HOTPLUG,
+ }
+ }
def __init__(self, sys_cfg, distro, paths):
super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths)
self.source = None
- self.seed_dir = os.path.join(paths.seed_dir, 'config_drive')
+ self.seed_dir = os.path.join(paths.seed_dir, "config_drive")
self.version = None
self.ec2_metadata = None
self._network_config = None
@@ -76,15 +77,16 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
util.logexc(LOG, "Failed reading config drive from %s", sdir)
if not found:
- dslist = self.sys_cfg.get('datasource_list')
+ dslist = self.sys_cfg.get("datasource_list")
for dev in find_candidate_devs(dslist=dslist):
mtype = None
if util.is_BSD():
if dev.startswith("/dev/cd"):
mtype = "cd9660"
try:
- results = util.mount_cb(dev, read_config_drive,
- mtype=mtype)
+ results = util.mount_cb(
+ dev, read_config_drive, mtype=mtype
+ )
found = dev
except openstack.NonReadable:
pass
@@ -97,41 +99,49 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
if not found:
return False
- md = results.get('metadata', {})
+ md = results.get("metadata", {})
md = util.mergemanydict([md, DEFAULT_METADATA])
self.dsmode = self._determine_dsmode(
- [results.get('dsmode'), self.ds_cfg.get('dsmode'),
- sources.DSMODE_PASS if results['version'] == 1 else None])
+ [
+ results.get("dsmode"),
+ self.ds_cfg.get("dsmode"),
+ sources.DSMODE_PASS if results["version"] == 1 else None,
+ ]
+ )
if self.dsmode == sources.DSMODE_DISABLED:
return False
prev_iid = get_previous_iid(self.paths)
- cur_iid = md['instance-id']
+ cur_iid = md["instance-id"]
if prev_iid != cur_iid:
# better would be to handle this centrally, allowing
# the datasource to do something on new instance id
# note, networking is only rendered here if dsmode is DSMODE_PASS
# which means "DISABLED, but render files and networking"
- on_first_boot(results, distro=self.distro,
- network=self.dsmode == sources.DSMODE_PASS)
+ on_first_boot(
+ results,
+ distro=self.distro,
+ network=self.dsmode == sources.DSMODE_PASS,
+ )
# This is legacy and sneaky. If dsmode is 'pass' then do not claim
# the datasource was used, even though we did run on_first_boot above.
if self.dsmode == sources.DSMODE_PASS:
- LOG.debug("%s: not claiming datasource, dsmode=%s", self,
- self.dsmode)
+ LOG.debug(
+ "%s: not claiming datasource, dsmode=%s", self, self.dsmode
+ )
return False
self.source = found
self.metadata = md
- self.ec2_metadata = results.get('ec2-metadata')
- self.userdata_raw = results.get('userdata')
- self.version = results['version']
- self.files.update(results.get('files', {}))
+ self.ec2_metadata = results.get("ec2-metadata")
+ self.userdata_raw = results.get("userdata")
+ self.version = results["version"]
+ self.files.update(results.get("files", {}))
- vd = results.get('vendordata')
+ vd = results.get("vendordata")
self.vendordata_pure = vd
try:
self.vendordata_raw = sources.convert_vendordata(vd)
@@ -143,7 +153,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
# obsolete compared to networkdata (from network_data.json) but both
# might be present.
self.network_eni = results.get("network_config")
- self.network_json = results.get('networkdata')
+ self.network_json = results.get("networkdata")
return True
def check_instance_id(self, sys_cfg):
@@ -156,7 +166,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
if self.network_json not in (None, sources.UNSET):
LOG.debug("network config provided via network_json")
self._network_config = openstack.convert_net_json(
- self.network_json, known_macs=self.known_macs)
+ self.network_json, known_macs=self.known_macs
+ )
elif self.network_eni is not None:
self._network_config = eni.convert_eni_data(self.network_eni)
LOG.debug("network config provided via converted eni data")
@@ -166,15 +177,15 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
@property
def platform(self):
- return 'openstack'
+ return "openstack"
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- if self.source.startswith('/dev'):
- subplatform_type = 'config-disk'
+ if self.source.startswith("/dev"):
+ subplatform_type = "config-disk"
else:
- subplatform_type = 'seed-dir'
- return '%s (%s)' % (subplatform_type, self.source)
+ subplatform_type = "seed-dir"
+ return "%s (%s)" % (subplatform_type, self.source)
def read_config_drive(source_dir):
@@ -196,7 +207,7 @@ def get_previous_iid(paths):
# interestingly, for this purpose the "previous" instance-id is the current
# instance-id. cloud-init hasn't moved them over yet as this datasource
# hasn't declared itself found.
- fname = os.path.join(paths.get_cpath('data'), 'instance-id')
+ fname = os.path.join(paths.get_cpath("data"), "instance-id")
try:
return util.load_file(fname).rstrip("\n")
except IOError:
@@ -206,14 +217,15 @@ def get_previous_iid(paths):
def on_first_boot(data, distro=None, network=True):
"""Performs any first-boot actions using data read from a config-drive."""
if not isinstance(data, dict):
- raise TypeError("Config-drive data expected to be a dict; not %s"
- % (type(data)))
+ raise TypeError(
+ "Config-drive data expected to be a dict; not %s" % (type(data))
+ )
if network:
- net_conf = data.get("network_config", '')
+ net_conf = data.get("network_config", "")
if net_conf and distro:
LOG.warning("Updating network interfaces from config drive")
distro.apply_network_config(eni.convert_eni_data(net_conf))
- write_injected_files(data.get('files'))
+ write_injected_files(data.get("files"))
def write_injected_files(files):
@@ -270,12 +282,13 @@ def find_candidate_devs(probe_optical=True, dslist=None):
# combine list of items by putting by-label items first
# followed by fstype items, but with dupes removed
- candidates = (by_label + [d for d in by_fstype if d not in by_label])
+ candidates = by_label + [d for d in by_fstype if d not in by_label]
# We are looking for a block device or partition with necessary label or
# an unpartitioned block device (ex sda, not sda1)
- devices = [d for d in candidates
- if d in by_label or not util.is_partition(d)]
+ devices = [
+ d for d in candidates if d in by_label or not util.is_partition(d)
+ ]
LOG.debug("devices=%s dslist=%s", devices, dslist)
if devices and "IBMCloud" in dslist:
@@ -283,8 +296,11 @@ def find_candidate_devs(probe_optical=True, dslist=None):
ibm_platform, ibm_path = get_ibm_platform()
if ibm_path in devices:
devices.remove(ibm_path)
- LOG.debug("IBMCloud device '%s' (%s) removed from candidate list",
- ibm_path, ibm_platform)
+ LOG.debug(
+ "IBMCloud device '%s' (%s) removed from candidate list",
+ ibm_path,
+ ibm_platform,
+ )
return devices
@@ -302,4 +318,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
index 08805d99..52d3ad26 100644
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ b/cloudinit/sources/DataSourceDigitalOcean.py
@@ -6,16 +6,14 @@
# DigitalOcean Droplet API:
# https://developers.digitalocean.com/documentation/metadata/
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import util
-
import cloudinit.sources.helpers.digitalocean as do_helper
+from cloudinit import log as logging
+from cloudinit import sources, util
LOG = logging.getLogger(__name__)
BUILTIN_DS_CONFIG = {
- 'metadata_url': 'http://169.254.169.254/metadata/v1.json',
+ "metadata_url": "http://169.254.169.254/metadata/v1.json",
}
# Wait for a up to a minute, retrying the meta-data server
@@ -28,20 +26,25 @@ MD_USE_IPV4LL = True
class DataSourceDigitalOcean(sources.DataSource):
- dsname = 'DigitalOcean'
+ dsname = "DigitalOcean"
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.distro = distro
self.metadata = dict()
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ["datasource", "DigitalOcean"], {}),
- BUILTIN_DS_CONFIG])
- self.metadata_address = self.ds_cfg['metadata_url']
- self.retries = self.ds_cfg.get('retries', MD_RETRIES)
- self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT)
- self.use_ip4LL = self.ds_cfg.get('use_ip4LL', MD_USE_IPV4LL)
- self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY)
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(
+ sys_cfg, ["datasource", "DigitalOcean"], {}
+ ),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
+ self.metadata_address = self.ds_cfg["metadata_url"]
+ self.retries = self.ds_cfg.get("retries", MD_RETRIES)
+ self.timeout = self.ds_cfg.get("timeout", MD_TIMEOUT)
+ self.use_ip4LL = self.ds_cfg.get("use_ip4LL", MD_USE_IPV4LL)
+ self.wait_retry = self.ds_cfg.get("wait_retry", MD_WAIT_RETRY)
self._network_config = None
def _get_sysinfo(self):
@@ -61,15 +64,18 @@ class DataSourceDigitalOcean(sources.DataSource):
ipv4LL_nic = do_helper.assign_ipv4_link_local(self.distro)
md = do_helper.read_metadata(
- self.metadata_address, timeout=self.timeout,
- sec_between=self.wait_retry, retries=self.retries)
+ self.metadata_address,
+ timeout=self.timeout,
+ sec_between=self.wait_retry,
+ retries=self.retries,
+ )
self.metadata_full = md
- self.metadata['instance-id'] = md.get('droplet_id', droplet_id)
- self.metadata['local-hostname'] = md.get('hostname', droplet_id)
- self.metadata['interfaces'] = md.get('interfaces')
- self.metadata['public-keys'] = md.get('public_keys')
- self.metadata['availability_zone'] = md.get('region', 'default')
+ self.metadata["instance-id"] = md.get("droplet_id", droplet_id)
+ self.metadata["local-hostname"] = md.get("hostname", droplet_id)
+ self.metadata["interfaces"] = md.get("interfaces")
+ self.metadata["public-keys"] = md.get("public_keys")
+ self.metadata["availability_zone"] = md.get("region", "default")
self.vendordata_raw = md.get("vendor_data", None)
self.userdata_raw = md.get("user_data", None)
@@ -80,32 +86,34 @@ class DataSourceDigitalOcean(sources.DataSource):
def check_instance_id(self, sys_cfg):
return sources.instance_id_matches_system_uuid(
- self.get_instance_id(), 'system-serial-number')
+ self.get_instance_id(), "system-serial-number"
+ )
@property
def network_config(self):
"""Configure the networking. This needs to be done each boot, since
- the IP information may have changed due to snapshot and/or
- migration.
+ the IP information may have changed due to snapshot and/or
+ migration.
"""
if self._network_config:
return self._network_config
- interfaces = self.metadata.get('interfaces')
+ interfaces = self.metadata.get("interfaces")
LOG.debug(interfaces)
if not interfaces:
raise Exception("Unable to get meta-data from server....")
- nameservers = self.metadata_full['dns']['nameservers']
+ nameservers = self.metadata_full["dns"]["nameservers"]
self._network_config = do_helper.convert_network_configuration(
- interfaces, nameservers)
+ interfaces, nameservers
+ )
return self._network_config
# Used to match classes to dependencies
datasources = [
- (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, )),
+ (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM,)),
]
@@ -113,4 +121,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 700437b0..03b3870c 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -15,13 +15,11 @@ import time
from cloudinit import dmi
from cloudinit import ec2_utils as ec2
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
-from cloudinit import sources
+from cloudinit import net, sources
from cloudinit import url_helper as uhelp
-from cloudinit import util
-from cloudinit import warnings
+from cloudinit import util, warnings
from cloudinit.event import EventScope, EventType
+from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
LOG = logging.getLogger(__name__)
@@ -30,10 +28,10 @@ SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND])
STRICT_ID_PATH = ("datasource", "Ec2", "strict_id")
STRICT_ID_DEFAULT = "warn"
-API_TOKEN_ROUTE = 'latest/api/token'
-AWS_TOKEN_TTL_SECONDS = '21600'
-AWS_TOKEN_PUT_HEADER = 'X-aws-ec2-metadata-token'
-AWS_TOKEN_REQ_HEADER = AWS_TOKEN_PUT_HEADER + '-ttl-seconds'
+API_TOKEN_ROUTE = "latest/api/token"
+AWS_TOKEN_TTL_SECONDS = "21600"
+AWS_TOKEN_PUT_HEADER = "X-aws-ec2-metadata-token"
+AWS_TOKEN_REQ_HEADER = AWS_TOKEN_PUT_HEADER + "-ttl-seconds"
AWS_TOKEN_REDACT = [AWS_TOKEN_PUT_HEADER, AWS_TOKEN_REQ_HEADER]
@@ -53,18 +51,18 @@ class CloudNames(object):
class DataSourceEc2(sources.DataSource):
- dsname = 'Ec2'
+ dsname = "Ec2"
# Default metadata urls that will be used if none are provided
# They will be checked for 'resolveability' and some of the
# following may be discarded if they do not resolve
metadata_urls = ["http://169.254.169.254", "http://instance-data.:8773"]
# The minimum supported metadata_version from the ec2 metadata apis
- min_metadata_version = '2009-04-04'
+ min_metadata_version = "2009-04-04"
# Priority ordered list of additional metadata versions which will be tried
# for extended metadata content. IPv6 support comes in 2016-09-02
- extended_metadata_versions = ['2018-09-24', '2016-09-02']
+ extended_metadata_versions = ["2018-09-24", "2016-09-02"]
# Setup read_url parameters per get_url_params.
url_max_wait = 120
@@ -76,12 +74,14 @@ class DataSourceEc2(sources.DataSource):
# Whether we want to get network configuration from the metadata service.
perform_dhcp_setup = False
- supported_update_events = {EventScope.NETWORK: {
- EventType.BOOT_NEW_INSTANCE,
- EventType.BOOT,
- EventType.BOOT_LEGACY,
- EventType.HOTPLUG,
- }}
+ supported_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ EventType.HOTPLUG,
+ }
+ }
def __init__(self, sys_cfg, distro, paths):
super(DataSourceEc2, self).__init__(sys_cfg, distro, paths)
@@ -93,11 +93,18 @@ class DataSourceEc2(sources.DataSource):
def _get_data(self):
strict_mode, _sleep = read_strict_mode(
- util.get_cfg_by_path(self.sys_cfg, STRICT_ID_PATH,
- STRICT_ID_DEFAULT), ("warn", None))
-
- LOG.debug("strict_mode: %s, cloud_name=%s cloud_platform=%s",
- strict_mode, self.cloud_name, self.platform)
+ util.get_cfg_by_path(
+ self.sys_cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT
+ ),
+ ("warn", None),
+ )
+
+ LOG.debug(
+ "strict_mode: %s, cloud_name=%s cloud_platform=%s",
+ strict_mode,
+ self.cloud_name,
+ self.platform,
+ )
if strict_mode == "true" and self.cloud_name == CloudNames.UNKNOWN:
return False
elif self.cloud_name == CloudNames.NO_EC2_METADATA:
@@ -110,20 +117,27 @@ class DataSourceEc2(sources.DataSource):
try:
with EphemeralDHCPv4(self.fallback_interface):
self._crawled_metadata = util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=self.crawl_metadata)
+ logfunc=LOG.debug,
+ msg="Crawl of metadata service",
+ func=self.crawl_metadata,
+ )
except NoDHCPLeaseError:
return False
else:
self._crawled_metadata = util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=self.crawl_metadata)
+ logfunc=LOG.debug,
+ msg="Crawl of metadata service",
+ func=self.crawl_metadata,
+ )
if not self._crawled_metadata:
return False
- self.metadata = self._crawled_metadata.get('meta-data', None)
- self.userdata_raw = self._crawled_metadata.get('user-data', None)
- self.identity = self._crawled_metadata.get(
- 'dynamic', {}).get('instance-identity', {}).get('document', {})
+ self.metadata = self._crawled_metadata.get("meta-data", None)
+ self.userdata_raw = self._crawled_metadata.get("user-data", None)
+ self.identity = (
+ self._crawled_metadata.get("dynamic", {})
+ .get("instance-identity", {})
+ .get("document", {})
+ )
return True
def is_classic_instance(self):
@@ -133,9 +147,9 @@ class DataSourceEc2(sources.DataSource):
# network_config where metadata will be present.
# Secondary call site is in packaging postinst script.
return False
- ifaces_md = self.metadata.get('network', {}).get('interfaces', {})
- for _mac, mac_data in ifaces_md.get('macs', {}).items():
- if 'vpc-id' in mac_data:
+ ifaces_md = self.metadata.get("network", {}).get("interfaces", {})
+ for _mac, mac_data in ifaces_md.get("macs", {}).items():
+ if "vpc-id" in mac_data:
return False
return True
@@ -143,12 +157,12 @@ class DataSourceEc2(sources.DataSource):
def launch_index(self):
if not self.metadata:
return None
- return self.metadata.get('ami-launch-index')
+ return self.metadata.get("ami-launch-index")
@property
def platform(self):
# Handle upgrade path of pickled ds
- if not hasattr(self, '_platform_type'):
+ if not hasattr(self, "_platform_type"):
self._platform_type = DataSourceEc2.dsname.lower()
if not self._platform_type:
self._platform_type = DataSourceEc2.dsname.lower()
@@ -164,44 +178,47 @@ class DataSourceEc2(sources.DataSource):
min_metadata_version.
"""
# Assumes metadata service is already up
- url_tmpl = '{0}/{1}/meta-data/instance-id'
+ url_tmpl = "{0}/{1}/meta-data/instance-id"
headers = self._get_headers()
for api_ver in self.extended_metadata_versions:
url = url_tmpl.format(self.metadata_address, api_ver)
try:
- resp = uhelp.readurl(url=url, headers=headers,
- headers_redact=AWS_TOKEN_REDACT)
+ resp = uhelp.readurl(
+ url=url, headers=headers, headers_redact=AWS_TOKEN_REDACT
+ )
except uhelp.UrlError as e:
- LOG.debug('url %s raised exception %s', url, e)
+ LOG.debug("url %s raised exception %s", url, e)
else:
if resp.code == 200:
- LOG.debug('Found preferred metadata version %s', api_ver)
+ LOG.debug("Found preferred metadata version %s", api_ver)
return api_ver
elif resp.code == 404:
- msg = 'Metadata api version %s not present. Headers: %s'
+ msg = "Metadata api version %s not present. Headers: %s"
LOG.debug(msg, api_ver, resp.headers)
return self.min_metadata_version
def get_instance_id(self):
if self.cloud_name == CloudNames.AWS:
# Prefer the ID from the instance identity document, but fall back
- if not getattr(self, 'identity', None):
+ if not getattr(self, "identity", None):
# If re-using cached datasource, it's get_data run didn't
# setup self.identity. So we need to do that now.
api_version = self.get_metadata_api_version()
self.identity = ec2.get_instance_identity(
- api_version, self.metadata_address,
+ api_version,
+ self.metadata_address,
headers_cb=self._get_headers,
headers_redact=AWS_TOKEN_REDACT,
- exception_cb=self._refresh_stale_aws_token_cb).get(
- 'document', {})
+ exception_cb=self._refresh_stale_aws_token_cb,
+ ).get("document", {})
return self.identity.get(
- 'instanceId', self.metadata['instance-id'])
+ "instanceId", self.metadata["instance-id"]
+ )
else:
- return self.metadata['instance-id']
+ return self.metadata["instance-id"]
def _maybe_fetch_api_token(self, mdurls, timeout=None, max_wait=None):
- """ Get an API token for EC2 Instance Metadata Service.
+ """Get an API token for EC2 Instance Metadata Service.
On EC2. IMDS will always answer an API token, unless
the instance owner has disabled the IMDS HTTP endpoint or
@@ -213,26 +230,29 @@ class DataSourceEc2(sources.DataSource):
urls = []
url2base = {}
url_path = API_TOKEN_ROUTE
- request_method = 'PUT'
+ request_method = "PUT"
for url in mdurls:
- cur = '{0}/{1}'.format(url, url_path)
+ cur = "{0}/{1}".format(url, url_path)
urls.append(cur)
url2base[cur] = url
# use the self._imds_exception_cb to check for Read errors
- LOG.debug('Fetching Ec2 IMDSv2 API Token')
+ LOG.debug("Fetching Ec2 IMDSv2 API Token")
response = None
url = None
url_params = self.get_url_params()
try:
url, response = uhelp.wait_for_url(
- urls=urls, max_wait=url_params.max_wait_seconds,
- timeout=url_params.timeout_seconds, status_cb=LOG.warning,
+ urls=urls,
+ max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds,
+ status_cb=LOG.warning,
headers_cb=self._get_headers,
exception_cb=self._imds_exception_cb,
request_method=request_method,
- headers_redact=AWS_TOKEN_REDACT)
+ headers_redact=AWS_TOKEN_REDACT,
+ )
except uhelp.UrlError:
# We use the raised exception to interupt the retry loop.
# Nothing else to do here.
@@ -258,8 +278,10 @@ class DataSourceEc2(sources.DataSource):
filtered = [x for x in mdurls if util.is_resolvable_url(x)]
if set(filtered) != set(mdurls):
- LOG.debug("Removed the following from metadata urls: %s",
- list((set(mdurls) - set(filtered))))
+ LOG.debug(
+ "Removed the following from metadata urls: %s",
+ list((set(mdurls) - set(filtered))),
+ )
if len(filtered):
mdurls = filtered
@@ -277,20 +299,25 @@ class DataSourceEc2(sources.DataSource):
# if we can't get a token, use instance-id path
urls = []
url2base = {}
- url_path = '{ver}/meta-data/instance-id'.format(
- ver=self.min_metadata_version)
- request_method = 'GET'
+ url_path = "{ver}/meta-data/instance-id".format(
+ ver=self.min_metadata_version
+ )
+ request_method = "GET"
for url in mdurls:
- cur = '{0}/{1}'.format(url, url_path)
+ cur = "{0}/{1}".format(url, url_path)
urls.append(cur)
url2base[cur] = url
start_time = time.time()
url, _ = uhelp.wait_for_url(
- urls=urls, max_wait=url_params.max_wait_seconds,
- timeout=url_params.timeout_seconds, status_cb=LOG.warning,
- headers_redact=AWS_TOKEN_REDACT, headers_cb=self._get_headers,
- request_method=request_method)
+ urls=urls,
+ max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds,
+ status_cb=LOG.warning,
+ headers_redact=AWS_TOKEN_REDACT,
+ headers_cb=self._get_headers,
+ request_method=request_method,
+ )
if url:
metadata_address = url2base[url]
@@ -301,8 +328,11 @@ class DataSourceEc2(sources.DataSource):
elif self.cloud_name == CloudNames.AWS:
LOG.warning("IMDS's HTTP endpoint is probably disabled")
else:
- LOG.critical("Giving up on md from %s after %s seconds",
- urls, int(time.time() - start_time))
+ LOG.critical(
+ "Giving up on md from %s after %s seconds",
+ urls,
+ int(time.time() - start_time),
+ )
return bool(metadata_address)
@@ -310,7 +340,7 @@ class DataSourceEc2(sources.DataSource):
# Consult metadata service, that has
# ephemeral0: sdb
# and return 'sdb' for input 'ephemeral0'
- if 'block-device-mapping' not in self.metadata:
+ if "block-device-mapping" not in self.metadata:
return None
# Example:
@@ -319,7 +349,7 @@ class DataSourceEc2(sources.DataSource):
# 'ephemeral0': '/dev/sdb',
# 'root': '/dev/sda1'}
found = None
- bdm = self.metadata['block-device-mapping']
+ bdm = self.metadata["block-device-mapping"]
if not isinstance(bdm, dict):
LOG.debug("block-device-mapping not a dictionary: '%s'", bdm)
return None
@@ -362,17 +392,18 @@ class DataSourceEc2(sources.DataSource):
try:
if self.cloud_name == CloudNames.AWS:
return self.identity.get(
- 'availabilityZone',
- self.metadata['placement']['availability-zone'])
+ "availabilityZone",
+ self.metadata["placement"]["availability-zone"],
+ )
else:
- return self.metadata['placement']['availability-zone']
+ return self.metadata["placement"]["availability-zone"]
except KeyError:
return None
@property
def region(self):
if self.cloud_name == CloudNames.AWS:
- region = self.identity.get('region')
+ region = self.identity.get("region")
# Fallback to trimming the availability zone if region is missing
if self.availability_zone and not region:
region = self.availability_zone[:-1]
@@ -389,7 +420,8 @@ class DataSourceEc2(sources.DataSource):
if self.cloud_name == CloudNames.UNKNOWN:
warn_if_necessary(
util.get_cfg_by_path(cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT),
- cfg)
+ cfg,
+ )
@property
def network_config(self):
@@ -400,30 +432,39 @@ class DataSourceEc2(sources.DataSource):
if self.metadata is None:
# this would happen if get_data hadn't been called. leave as UNSET
LOG.warning(
- "Unexpected call to network_config when metadata is None.")
+ "Unexpected call to network_config when metadata is None."
+ )
return None
result = None
no_network_metadata_on_aws = bool(
- 'network' not in self.metadata and
- self.cloud_name == CloudNames.AWS)
+ "network" not in self.metadata
+ and self.cloud_name == CloudNames.AWS
+ )
if no_network_metadata_on_aws:
- LOG.debug("Metadata 'network' not present:"
- " Refreshing stale metadata from prior to upgrade.")
+ LOG.debug(
+ "Metadata 'network' not present:"
+ " Refreshing stale metadata from prior to upgrade."
+ )
util.log_time(
- logfunc=LOG.debug, msg='Re-crawl of metadata service',
- func=self.get_data)
+ logfunc=LOG.debug,
+ msg="Re-crawl of metadata service",
+ func=self.get_data,
+ )
iface = self.fallback_interface
- net_md = self.metadata.get('network')
+ net_md = self.metadata.get("network")
if isinstance(net_md, dict):
# SRU_BLOCKER: xenial, bionic and eoan should default
# apply_full_imds_network_config to False to retain original
# behavior on those releases.
result = convert_ec2_metadata_network_config(
- net_md, fallback_nic=iface,
+ net_md,
+ fallback_nic=iface,
full_network_config=util.get_cfg_option_bool(
- self.ds_cfg, 'apply_full_imds_network_config', True))
+ self.ds_cfg, "apply_full_imds_network_config", True
+ ),
+ )
# RELEASE_BLOCKER: xenial should drop the below if statement,
# because the issue being addressed doesn't exist pre-netplan.
@@ -435,11 +476,14 @@ class DataSourceEc2(sources.DataSource):
# network config file every boot due to MAC address change.
if self.is_classic_instance():
self.default_update_events = copy.deepcopy(
- self.default_update_events)
+ self.default_update_events
+ )
self.default_update_events[EventScope.NETWORK].add(
- EventType.BOOT)
+ EventType.BOOT
+ )
self.default_update_events[EventScope.NETWORK].add(
- EventType.BOOT_LEGACY)
+ EventType.BOOT_LEGACY
+ )
else:
LOG.warning("Metadata 'network' key not valid: %s.", net_md)
self._network_config = result
@@ -451,7 +495,7 @@ class DataSourceEc2(sources.DataSource):
if self._fallback_interface is None:
# fallback_nic was used at one point, so restored objects may
# have an attribute there. respect that if found.
- _legacy_fbnic = getattr(self, 'fallback_nic', None)
+ _legacy_fbnic = getattr(self, "fallback_nic", None)
if _legacy_fbnic:
self._fallback_interface = _legacy_fbnic
self.fallback_nic = None
@@ -476,26 +520,37 @@ class DataSourceEc2(sources.DataSource):
else:
exc_cb = exc_cb_ud = None
try:
- crawled_metadata['user-data'] = ec2.get_instance_userdata(
- api_version, self.metadata_address,
- headers_cb=self._get_headers, headers_redact=redact,
- exception_cb=exc_cb_ud)
- crawled_metadata['meta-data'] = ec2.get_instance_metadata(
- api_version, self.metadata_address,
- headers_cb=self._get_headers, headers_redact=redact,
- exception_cb=exc_cb)
+ crawled_metadata["user-data"] = ec2.get_instance_userdata(
+ api_version,
+ self.metadata_address,
+ headers_cb=self._get_headers,
+ headers_redact=redact,
+ exception_cb=exc_cb_ud,
+ )
+ crawled_metadata["meta-data"] = ec2.get_instance_metadata(
+ api_version,
+ self.metadata_address,
+ headers_cb=self._get_headers,
+ headers_redact=redact,
+ exception_cb=exc_cb,
+ )
if self.cloud_name == CloudNames.AWS:
identity = ec2.get_instance_identity(
- api_version, self.metadata_address,
- headers_cb=self._get_headers, headers_redact=redact,
- exception_cb=exc_cb)
- crawled_metadata['dynamic'] = {'instance-identity': identity}
+ api_version,
+ self.metadata_address,
+ headers_cb=self._get_headers,
+ headers_redact=redact,
+ exception_cb=exc_cb,
+ )
+ crawled_metadata["dynamic"] = {"instance-identity": identity}
except Exception:
util.logexc(
- LOG, "Failed reading from metadata address %s",
- self.metadata_address)
+ LOG,
+ "Failed reading from metadata address %s",
+ self.metadata_address,
+ )
return {}
- crawled_metadata['_metadata_api_version'] = api_version
+ crawled_metadata["_metadata_api_version"] = api_version
return crawled_metadata
def _refresh_api_token(self, seconds=AWS_TOKEN_TTL_SECONDS):
@@ -508,23 +563,27 @@ class DataSourceEc2(sources.DataSource):
return None
LOG.debug("Refreshing Ec2 metadata API token")
request_header = {AWS_TOKEN_REQ_HEADER: seconds}
- token_url = '{}/{}'.format(self.metadata_address, API_TOKEN_ROUTE)
+ token_url = "{}/{}".format(self.metadata_address, API_TOKEN_ROUTE)
try:
- response = uhelp.readurl(token_url, headers=request_header,
- headers_redact=AWS_TOKEN_REDACT,
- request_method="PUT")
+ response = uhelp.readurl(
+ token_url,
+ headers=request_header,
+ headers_redact=AWS_TOKEN_REDACT,
+ request_method="PUT",
+ )
except uhelp.UrlError as e:
LOG.warning(
- 'Unable to get API token: %s raised exception %s',
- token_url, e)
+ "Unable to get API token: %s raised exception %s", token_url, e
+ )
return None
return response.contents
def _skip_or_refresh_stale_aws_token_cb(self, msg, exception):
"""Callback will not retry on SKIP_USERDATA_CODES or if no token
- is available."""
+ is available."""
retry = ec2.skip_retry_on_codes(
- ec2.SKIP_USERDATA_CODES, msg, exception)
+ ec2.SKIP_USERDATA_CODES, msg, exception
+ )
if not retry:
return False # False raises exception
return self._refresh_stale_aws_token_cb(msg, exception)
@@ -554,14 +613,17 @@ class DataSourceEc2(sources.DataSource):
# requests.ConnectionError will have exception.code == None
if exception.code and exception.code >= 400:
if exception.code == 403:
- LOG.warning('Ec2 IMDS endpoint returned a 403 error. '
- 'HTTP endpoint is disabled. Aborting.')
+ LOG.warning(
+ "Ec2 IMDS endpoint returned a 403 error. "
+ "HTTP endpoint is disabled. Aborting."
+ )
else:
- LOG.warning('Fatal error while requesting '
- 'Ec2 IMDSv2 API tokens')
+ LOG.warning(
+ "Fatal error while requesting Ec2 IMDSv2 API tokens"
+ )
raise exception
- def _get_headers(self, url=''):
+ def _get_headers(self, url=""):
"""Return a dict of headers for accessing a url.
If _api_token is unset on AWS, attempt to refresh the token via a PUT
@@ -591,13 +653,17 @@ class DataSourceEc2Local(DataSourceEc2):
metadata service. If the metadata service provides network configuration
then render the network configuration for that instance based on metadata.
"""
+
perform_dhcp_setup = True # Use dhcp before querying metadata
def get_data(self):
supported_platforms = (CloudNames.AWS,)
if self.cloud_name not in supported_platforms:
- LOG.debug("Local Ec2 mode only supported on %s, not %s",
- supported_platforms, self.cloud_name)
+ LOG.debug(
+ "Local Ec2 mode only supported on %s, not %s",
+ supported_platforms,
+ self.cloud_name,
+ )
return False
return super(DataSourceEc2Local, self).get_data()
@@ -615,18 +681,19 @@ def parse_strict_mode(cfgval):
# true, false, warn,[sleep]
# return tuple with string mode (true|false|warn) and sleep.
if cfgval is True:
- return 'true', None
+ return "true", None
if cfgval is False:
- return 'false', None
+ return "false", None
if not cfgval:
- return 'warn', 0
+ return "warn", 0
mode, _, sleep = cfgval.partition(",")
- if mode not in ('true', 'false', 'warn'):
+ if mode not in ("true", "false", "warn"):
raise ValueError(
"Invalid mode '%s' in strict_id setting '%s': "
- "Expected one of 'true', 'false', 'warn'." % (mode, cfgval))
+ "Expected one of 'true', 'false', 'warn'." % (mode, cfgval)
+ )
if sleep:
try:
@@ -652,47 +719,53 @@ def warn_if_necessary(cfgval, cfg):
if mode == "false":
return
- warnings.show_warning('non_ec2_md', cfg, mode=True, sleep=sleep)
+ warnings.show_warning("non_ec2_md", cfg, mode=True, sleep=sleep)
def identify_aws(data):
# data is a dictionary returned by _collect_platform_data.
- if (data['uuid'].startswith('ec2') and
- (data['uuid_source'] == 'hypervisor' or
- data['uuid'] == data['serial'])):
+ if data["uuid"].startswith("ec2") and (
+ data["uuid_source"] == "hypervisor" or data["uuid"] == data["serial"]
+ ):
return CloudNames.AWS
return None
def identify_brightbox(data):
- if data['serial'].endswith('.brightbox.com'):
+ if data["serial"].endswith(".brightbox.com"):
return CloudNames.BRIGHTBOX
def identify_zstack(data):
- if data['asset_tag'].endswith('.zstack.io'):
+ if data["asset_tag"].endswith(".zstack.io"):
return CloudNames.ZSTACK
def identify_e24cloud(data):
- if data['vendor'] == 'e24cloud':
+ if data["vendor"] == "e24cloud":
return CloudNames.E24CLOUD
def identify_platform():
# identify the platform and return an entry in CloudNames.
data = _collect_platform_data()
- checks = (identify_aws, identify_brightbox, identify_zstack,
- identify_e24cloud, lambda x: CloudNames.UNKNOWN)
+ checks = (
+ identify_aws,
+ identify_brightbox,
+ identify_zstack,
+ identify_e24cloud,
+ lambda x: CloudNames.UNKNOWN,
+ )
for checker in checks:
try:
result = checker(data)
if result:
return result
except Exception as e:
- LOG.warning("calling %s with %s raised exception: %s",
- checker, data, e)
+ LOG.warning(
+ "calling %s with %s raised exception: %s", checker, data, e
+ )
def _collect_platform_data():
@@ -711,36 +784,36 @@ def _collect_platform_data():
data = {}
try:
uuid = util.load_file("/sys/hypervisor/uuid").strip()
- data['uuid_source'] = 'hypervisor'
+ data["uuid_source"] = "hypervisor"
except Exception:
- uuid = dmi.read_dmi_data('system-uuid')
- data['uuid_source'] = 'dmi'
+ uuid = dmi.read_dmi_data("system-uuid")
+ data["uuid_source"] = "dmi"
if uuid is None:
- uuid = ''
- data['uuid'] = uuid.lower()
+ uuid = ""
+ data["uuid"] = uuid.lower()
- serial = dmi.read_dmi_data('system-serial-number')
+ serial = dmi.read_dmi_data("system-serial-number")
if serial is None:
- serial = ''
+ serial = ""
- data['serial'] = serial.lower()
+ data["serial"] = serial.lower()
- asset_tag = dmi.read_dmi_data('chassis-asset-tag')
+ asset_tag = dmi.read_dmi_data("chassis-asset-tag")
if asset_tag is None:
- asset_tag = ''
+ asset_tag = ""
- data['asset_tag'] = asset_tag.lower()
+ data["asset_tag"] = asset_tag.lower()
- vendor = dmi.read_dmi_data('system-manufacturer')
- data['vendor'] = (vendor if vendor else '').lower()
+ vendor = dmi.read_dmi_data("system-manufacturer")
+ data["vendor"] = (vendor if vendor else "").lower()
return data
def convert_ec2_metadata_network_config(
- network_md, macs_to_nics=None, fallback_nic=None,
- full_network_config=True):
+ network_md, macs_to_nics=None, fallback_nic=None, full_network_config=True
+):
"""Convert ec2 metadata to network config version 2 data dict.
@param: network_md: 'network' portion of EC2 metadata.
@@ -759,23 +832,25 @@ def convert_ec2_metadata_network_config(
@return A dict of network config version 2 based on the metadata and macs.
"""
- netcfg = {'version': 2, 'ethernets': {}}
+ netcfg = {"version": 2, "ethernets": {}}
if not macs_to_nics:
macs_to_nics = net.get_interfaces_by_mac()
- macs_metadata = network_md['interfaces']['macs']
+ macs_metadata = network_md["interfaces"]["macs"]
if not full_network_config:
for mac, nic_name in macs_to_nics.items():
if nic_name == fallback_nic:
break
- dev_config = {'dhcp4': True,
- 'dhcp6': False,
- 'match': {'macaddress': mac.lower()},
- 'set-name': nic_name}
+ dev_config = {
+ "dhcp4": True,
+ "dhcp6": False,
+ "match": {"macaddress": mac.lower()},
+ "set-name": nic_name,
+ }
nic_metadata = macs_metadata.get(mac)
- if nic_metadata.get('ipv6s'): # Any IPv6 addresses configured
- dev_config['dhcp6'] = True
- netcfg['ethernets'][nic_name] = dev_config
+ if nic_metadata.get("ipv6s"): # Any IPv6 addresses configured
+ dev_config["dhcp6"] = True
+ netcfg["ethernets"][nic_name] = dev_config
return netcfg
# Apply network config for all nics and any secondary IPv4/v6 addresses
nic_idx = 0
@@ -785,24 +860,27 @@ def convert_ec2_metadata_network_config(
continue # Not a physical nic represented in metadata
# device-number is zero-indexed, we want it 1-indexed for the
# multiplication on the following line
- nic_idx = int(nic_metadata.get('device-number', nic_idx)) + 1
- dhcp_override = {'route-metric': nic_idx * 100}
- dev_config = {'dhcp4': True, 'dhcp4-overrides': dhcp_override,
- 'dhcp6': False,
- 'match': {'macaddress': mac.lower()},
- 'set-name': nic_name}
- if nic_metadata.get('ipv6s'): # Any IPv6 addresses configured
- dev_config['dhcp6'] = True
- dev_config['dhcp6-overrides'] = dhcp_override
- dev_config['addresses'] = get_secondary_addresses(nic_metadata, mac)
- if not dev_config['addresses']:
- dev_config.pop('addresses') # Since we found none configured
- netcfg['ethernets'][nic_name] = dev_config
+ nic_idx = int(nic_metadata.get("device-number", nic_idx)) + 1
+ dhcp_override = {"route-metric": nic_idx * 100}
+ dev_config = {
+ "dhcp4": True,
+ "dhcp4-overrides": dhcp_override,
+ "dhcp6": False,
+ "match": {"macaddress": mac.lower()},
+ "set-name": nic_name,
+ }
+ if nic_metadata.get("ipv6s"): # Any IPv6 addresses configured
+ dev_config["dhcp6"] = True
+ dev_config["dhcp6-overrides"] = dhcp_override
+ dev_config["addresses"] = get_secondary_addresses(nic_metadata, mac)
+ if not dev_config["addresses"]:
+ dev_config.pop("addresses") # Since we found none configured
+ netcfg["ethernets"][nic_name] = dev_config
# Remove route-metric dhcp overrides if only one nic configured
- if len(netcfg['ethernets']) == 1:
- for nic_name in netcfg['ethernets'].keys():
- netcfg['ethernets'][nic_name].pop('dhcp4-overrides')
- netcfg['ethernets'][nic_name].pop('dhcp6-overrides', None)
+ if len(netcfg["ethernets"]) == 1:
+ for nic_name in netcfg["ethernets"].keys():
+ netcfg["ethernets"][nic_name].pop("dhcp4-overrides")
+ netcfg["ethernets"][nic_name].pop("dhcp6-overrides", None)
return netcfg
@@ -812,18 +890,22 @@ def get_secondary_addresses(nic_metadata, mac):
:return: List of secondary IPv4 or IPv6 addresses to configure on the
interface
"""
- ipv4s = nic_metadata.get('local-ipv4s')
- ipv6s = nic_metadata.get('ipv6s')
+ ipv4s = nic_metadata.get("local-ipv4s")
+ ipv6s = nic_metadata.get("ipv6s")
addresses = []
# In version < 2018-09-24 local_ipv4s or ipv6s is a str with one IP
if bool(isinstance(ipv4s, list) and len(ipv4s) > 1):
addresses.extend(
_get_secondary_addresses(
- nic_metadata, 'subnet-ipv4-cidr-block', mac, ipv4s, '24'))
+ nic_metadata, "subnet-ipv4-cidr-block", mac, ipv4s, "24"
+ )
+ )
if bool(isinstance(ipv6s, list) and len(ipv6s) > 1):
addresses.extend(
_get_secondary_addresses(
- nic_metadata, 'subnet-ipv6-cidr-block', mac, ipv6s, '128'))
+ nic_metadata, "subnet-ipv6-cidr-block", mac, ipv6s, "128"
+ )
+ )
return sorted(addresses)
@@ -836,18 +918,22 @@ def _get_secondary_addresses(nic_metadata, cidr_key, mac, ips, default_prefix):
addresses = []
cidr = nic_metadata.get(cidr_key)
prefix = default_prefix
- if not cidr or len(cidr.split('/')) != 2:
- ip_type = 'ipv4' if 'ipv4' in cidr_key else 'ipv6'
+ if not cidr or len(cidr.split("/")) != 2:
+ ip_type = "ipv4" if "ipv4" in cidr_key else "ipv6"
LOG.warning(
- 'Could not parse %s %s for mac %s. %s network'
- ' config prefix defaults to /%s',
- cidr_key, cidr, mac, ip_type, prefix)
+ "Could not parse %s %s for mac %s. %s network"
+ " config prefix defaults to /%s",
+ cidr_key,
+ cidr,
+ mac,
+ ip_type,
+ prefix,
+ )
else:
- prefix = cidr.split('/')[1]
+ prefix = cidr.split("/")[1]
# We know we have > 1 ips for in metadata for this IP type
for ip in ips[1:]:
- addresses.append(
- '{ip}/{prefix}'.format(ip=ip, prefix=prefix))
+ addresses.append("{ip}/{prefix}".format(ip=ip, prefix=prefix))
return addresses
@@ -862,4 +948,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py
index adee6d79..cc5136d7 100644
--- a/cloudinit/sources/DataSourceExoscale.py
+++ b/cloudinit/sources/DataSourceExoscale.py
@@ -5,11 +5,9 @@
from cloudinit import dmi
from cloudinit import ec2_utils as ec2
-from cloudinit import log as logging
-from cloudinit import sources
from cloudinit import helpers
-from cloudinit import url_helper
-from cloudinit import util
+from cloudinit import log as logging
+from cloudinit import sources, url_helper, util
LOG = logging.getLogger(__name__)
@@ -25,7 +23,7 @@ EXOSCALE_DMI_NAME = "Exoscale"
class DataSourceExoscale(sources.DataSource):
- dsname = 'Exoscale'
+ dsname = "Exoscale"
url_max_wait = 120
@@ -33,12 +31,13 @@ class DataSourceExoscale(sources.DataSource):
super(DataSourceExoscale, self).__init__(sys_cfg, distro, paths)
LOG.debug("Initializing the Exoscale datasource")
- self.metadata_url = self.ds_cfg.get('metadata_url', METADATA_URL)
- self.api_version = self.ds_cfg.get('api_version', API_VERSION)
+ self.metadata_url = self.ds_cfg.get("metadata_url", METADATA_URL)
+ self.api_version = self.ds_cfg.get("api_version", API_VERSION)
self.password_server_port = int(
- self.ds_cfg.get('password_server_port', PASSWORD_SERVER_PORT))
- self.url_timeout = self.ds_cfg.get('timeout', URL_TIMEOUT)
- self.url_retries = self.ds_cfg.get('retries', URL_RETRIES)
+ self.ds_cfg.get("password_server_port", PASSWORD_SERVER_PORT)
+ )
+ self.url_timeout = self.ds_cfg.get("timeout", URL_TIMEOUT)
+ self.url_retries = self.ds_cfg.get("retries", URL_RETRIES)
self.extra_config = {}
def activate(self, cfg, is_new_instance):
@@ -50,23 +49,25 @@ class DataSourceExoscale(sources.DataSource):
# a user has triggered a password reset. So calling that password
# service generally results in no additional cloud-config.
# TODO(Create util functions for overriding merged sys_cfg module freq)
- mod = 'set_passwords'
- sem_path = self.paths.get_ipath_cur('sem')
+ mod = "set_passwords"
+ sem_path = self.paths.get_ipath_cur("sem")
sem_helper = helpers.FileSemaphores(sem_path)
- if sem_helper.clear('config_' + mod, None):
- LOG.debug('Overriding module set-passwords with frequency always')
+ if sem_helper.clear("config_" + mod, None):
+ LOG.debug("Overriding module set-passwords with frequency always")
def wait_for_metadata_service(self):
"""Wait for the metadata service to be reachable."""
metadata_url = "{}/{}/meta-data/instance-id".format(
- self.metadata_url, self.api_version)
+ self.metadata_url, self.api_version
+ )
url, _response = url_helper.wait_for_url(
urls=[metadata_url],
max_wait=self.url_max_wait,
timeout=self.url_timeout,
- status_cb=LOG.critical)
+ status_cb=LOG.critical,
+ )
return bool(url)
@@ -78,15 +79,20 @@ class DataSourceExoscale(sources.DataSource):
"""
metadata_ready = util.log_time(
logfunc=LOG.info,
- msg='waiting for the metadata service',
- func=self.wait_for_metadata_service)
+ msg="waiting for the metadata service",
+ func=self.wait_for_metadata_service,
+ )
if not metadata_ready:
return {}
- return read_metadata(self.metadata_url, self.api_version,
- self.password_server_port, self.url_timeout,
- self.url_retries)
+ return read_metadata(
+ self.metadata_url,
+ self.api_version,
+ self.password_server_port,
+ self.url_timeout,
+ self.url_retries,
+ )
def _get_data(self):
"""Fetch the user data, the metadata and the VM password
@@ -100,15 +106,16 @@ class DataSourceExoscale(sources.DataSource):
data = util.log_time(
logfunc=LOG.debug,
- msg='Crawl of metadata service',
- func=self.crawl_metadata)
+ msg="Crawl of metadata service",
+ func=self.crawl_metadata,
+ )
if not data:
return False
- self.userdata_raw = data['user-data']
- self.metadata = data['meta-data']
- password = data.get('password')
+ self.userdata_raw = data["user-data"]
+ self.metadata = data["meta-data"]
+ password = data.get("password")
password_config = {}
if password:
@@ -119,16 +126,17 @@ class DataSourceExoscale(sources.DataSource):
# leave the password always disabled if no password is ever set, or
# leave the password login enabled if we set it once.
password_config = {
- 'ssh_pwauth': True,
- 'password': password,
- 'chpasswd': {
- 'expire': False,
+ "ssh_pwauth": True,
+ "password": password,
+ "chpasswd": {
+ "expire": False,
},
}
# builtin extra_config overrides password_config
self.extra_config = util.mergemanydict(
- [self.extra_config, password_config])
+ [self.extra_config, password_config]
+ )
return True
@@ -136,8 +144,9 @@ class DataSourceExoscale(sources.DataSource):
return self.extra_config
def _is_platform_viable(self):
- return dmi.read_dmi_data('system-product-name').startswith(
- EXOSCALE_DMI_NAME)
+ return dmi.read_dmi_data("system-product-name").startswith(
+ EXOSCALE_DMI_NAME
+ )
# Used to match classes to dependencies
@@ -151,28 +160,32 @@ def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
-def get_password(metadata_url=METADATA_URL,
- api_version=API_VERSION,
- password_server_port=PASSWORD_SERVER_PORT,
- url_timeout=URL_TIMEOUT,
- url_retries=URL_RETRIES):
+def get_password(
+ metadata_url=METADATA_URL,
+ api_version=API_VERSION,
+ password_server_port=PASSWORD_SERVER_PORT,
+ url_timeout=URL_TIMEOUT,
+ url_retries=URL_RETRIES,
+):
"""Obtain the VM's password if set.
Once fetched the password is marked saved. Future calls to this method may
return empty string or 'saved_password'."""
- password_url = "{}:{}/{}/".format(metadata_url, password_server_port,
- api_version)
+ password_url = "{}:{}/{}/".format(
+ metadata_url, password_server_port, api_version
+ )
response = url_helper.read_file_or_url(
password_url,
ssl_details=None,
headers={"DomU_Request": "send_my_password"},
timeout=url_timeout,
- retries=url_retries)
- password = response.contents.decode('utf-8')
+ retries=url_retries,
+ )
+ password = response.contents.decode("utf-8")
# the password is empty or already saved
# Note: the original metadata server would answer an additional
# 'bad_request' status, but the Exoscale implementation does not.
- if password in ['', 'saved_password']:
+ if password in ["", "saved_password"]:
return None
# save the password
url_helper.read_file_or_url(
@@ -180,44 +193,50 @@ def get_password(metadata_url=METADATA_URL,
ssl_details=None,
headers={"DomU_Request": "saved_password"},
timeout=url_timeout,
- retries=url_retries)
+ retries=url_retries,
+ )
return password
-def read_metadata(metadata_url=METADATA_URL,
- api_version=API_VERSION,
- password_server_port=PASSWORD_SERVER_PORT,
- url_timeout=URL_TIMEOUT,
- url_retries=URL_RETRIES):
+def read_metadata(
+ metadata_url=METADATA_URL,
+ api_version=API_VERSION,
+ password_server_port=PASSWORD_SERVER_PORT,
+ url_timeout=URL_TIMEOUT,
+ url_retries=URL_RETRIES,
+):
"""Query the metadata server and return the retrieved data."""
crawled_metadata = {}
- crawled_metadata['_metadata_api_version'] = api_version
+ crawled_metadata["_metadata_api_version"] = api_version
try:
- crawled_metadata['user-data'] = ec2.get_instance_userdata(
- api_version,
- metadata_url,
- timeout=url_timeout,
- retries=url_retries)
- crawled_metadata['meta-data'] = ec2.get_instance_metadata(
- api_version,
- metadata_url,
- timeout=url_timeout,
- retries=url_retries)
+ crawled_metadata["user-data"] = ec2.get_instance_userdata(
+ api_version, metadata_url, timeout=url_timeout, retries=url_retries
+ )
+ crawled_metadata["meta-data"] = ec2.get_instance_metadata(
+ api_version, metadata_url, timeout=url_timeout, retries=url_retries
+ )
except Exception as e:
- util.logexc(LOG, "failed reading from metadata url %s (%s)",
- metadata_url, e)
+ util.logexc(
+ LOG, "failed reading from metadata url %s (%s)", metadata_url, e
+ )
return {}
try:
- crawled_metadata['password'] = get_password(
+ crawled_metadata["password"] = get_password(
api_version=api_version,
metadata_url=metadata_url,
password_server_port=password_server_port,
url_retries=url_retries,
- url_timeout=url_timeout)
+ url_timeout=url_timeout,
+ )
except Exception as e:
- util.logexc(LOG, "failed to read from password server url %s:%s (%s)",
- metadata_url, password_server_port, e)
+ util.logexc(
+ LOG,
+ "failed to read from password server url %s:%s (%s)",
+ metadata_url,
+ password_server_port,
+ e,
+ )
return crawled_metadata
@@ -225,35 +244,40 @@ def read_metadata(metadata_url=METADATA_URL,
if __name__ == "__main__":
import argparse
- parser = argparse.ArgumentParser(description='Query Exoscale Metadata')
+ parser = argparse.ArgumentParser(description="Query Exoscale Metadata")
parser.add_argument(
"--endpoint",
metavar="URL",
help="The url of the metadata service.",
- default=METADATA_URL)
+ default=METADATA_URL,
+ )
parser.add_argument(
"--version",
metavar="VERSION",
help="The version of the metadata endpoint to query.",
- default=API_VERSION)
+ default=API_VERSION,
+ )
parser.add_argument(
"--retries",
metavar="NUM",
type=int,
help="The number of retries querying the endpoint.",
- default=URL_RETRIES)
+ default=URL_RETRIES,
+ )
parser.add_argument(
"--timeout",
metavar="NUM",
type=int,
help="The time in seconds to wait before timing out.",
- default=URL_TIMEOUT)
+ default=URL_TIMEOUT,
+ )
parser.add_argument(
"--password-port",
metavar="PORT",
type=int,
help="The port on which the password endpoint listens",
- default=PASSWORD_SERVER_PORT)
+ default=PASSWORD_SERVER_PORT,
+ )
args = parser.parse_args()
@@ -262,7 +286,8 @@ if __name__ == "__main__":
api_version=args.version,
password_server_port=args.password_port,
url_timeout=args.timeout,
- url_retries=args.retries)
+ url_retries=args.retries,
+ )
print(util.json_dumps(data))
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index b82fa410..c470bea8 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -4,31 +4,29 @@
import datetime
import json
-from contextlib import suppress as noop
-
from base64 import b64decode
+from contextlib import suppress as noop
from cloudinit import dmi
-from cloudinit.distros import ug_util
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
+from cloudinit import sources, url_helper, util
+from cloudinit.distros import ug_util
from cloudinit.net.dhcp import EphemeralDHCPv4
LOG = logging.getLogger(__name__)
-MD_V1_URL = 'http://metadata.google.internal/computeMetadata/v1/'
-BUILTIN_DS_CONFIG = {'metadata_url': MD_V1_URL}
-REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname')
-GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/'
- 'v1/instance/guest-attributes')
-HOSTKEY_NAMESPACE = 'hostkeys'
-HEADERS = {'Metadata-Flavor': 'Google'}
+MD_V1_URL = "http://metadata.google.internal/computeMetadata/v1/"
+BUILTIN_DS_CONFIG = {"metadata_url": MD_V1_URL}
+REQUIRED_FIELDS = ("instance-id", "availability-zone", "local-hostname")
+GUEST_ATTRIBUTES_URL = (
+ "http://metadata.google.internal/computeMetadata/"
+ "v1/instance/guest-attributes"
+)
+HOSTKEY_NAMESPACE = "hostkeys"
+HEADERS = {"Metadata-Flavor": "Google"}
class GoogleMetadataFetcher(object):
-
def __init__(self, metadata_address, num_retries, sec_between_retries):
self.metadata_address = metadata_address
self.num_retries = num_retries
@@ -39,10 +37,13 @@ class GoogleMetadataFetcher(object):
try:
url = self.metadata_address + path
if is_recursive:
- url += '/?recursive=True'
- resp = url_helper.readurl(url=url, headers=HEADERS,
- retries=self.num_retries,
- sec_between=self.sec_between_retries)
+ url += "/?recursive=True"
+ resp = url_helper.readurl(
+ url=url,
+ headers=HEADERS,
+ retries=self.num_retries,
+ sec_between=self.sec_between_retries,
+ )
except url_helper.UrlError as exc:
msg = "url %s raised exception %s"
LOG.debug(msg, path, exc)
@@ -51,7 +52,7 @@ class GoogleMetadataFetcher(object):
if is_text:
value = util.decode_binary(resp.contents)
else:
- value = resp.contents.decode('utf-8')
+ value = resp.contents.decode("utf-8")
else:
LOG.debug("url %s returned code %s", path, resp.code)
return value
@@ -59,7 +60,7 @@ class GoogleMetadataFetcher(object):
class DataSourceGCE(sources.DataSource):
- dsname = 'GCE'
+ dsname = "GCE"
perform_dhcp_setup = False
def __init__(self, sys_cfg, distro, paths):
@@ -69,10 +70,13 @@ class DataSourceGCE(sources.DataSource):
(users, _groups) = ug_util.normalize_users_groups(sys_cfg, distro)
(self.default_user, _user_config) = ug_util.extract_default(users)
self.metadata = dict()
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ["datasource", "GCE"], {}),
- BUILTIN_DS_CONFIG])
- self.metadata_address = self.ds_cfg['metadata_url']
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(sys_cfg, ["datasource", "GCE"], {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
+ self.metadata_address = self.ds_cfg["metadata_url"]
def _get_data(self):
url_params = self.get_url_params()
@@ -90,14 +94,14 @@ class DataSourceGCE(sources.DataSource):
},
)
- if not ret['success']:
- if ret['platform_reports_gce']:
- LOG.warning(ret['reason'])
+ if not ret["success"]:
+ if ret["platform_reports_gce"]:
+ LOG.warning(ret["reason"])
else:
- LOG.debug(ret['reason'])
+ LOG.debug(ret["reason"])
return False
- self.metadata = ret['meta-data']
- self.userdata_raw = ret['user-data']
+ self.metadata = ret["meta-data"]
+ self.userdata_raw = ret["user-data"]
return True
@property
@@ -106,10 +110,10 @@ class DataSourceGCE(sources.DataSource):
return None
def get_instance_id(self):
- return self.metadata['instance-id']
+ return self.metadata["instance-id"]
def get_public_ssh_keys(self):
- public_keys_data = self.metadata['public-keys-data']
+ public_keys_data = self.metadata["public-keys-data"]
return _parse_public_keys(public_keys_data, self.default_user)
def publish_host_keys(self, hostkeys):
@@ -118,15 +122,15 @@ class DataSourceGCE(sources.DataSource):
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
# GCE has long FDQN's and has asked for short hostnames.
- return self.metadata['local-hostname'].split('.')[0]
+ return self.metadata["local-hostname"].split(".")[0]
@property
def availability_zone(self):
- return self.metadata['availability-zone']
+ return self.metadata["availability-zone"]
@property
def region(self):
- return self.availability_zone.rsplit('-', 1)[0]
+ return self.availability_zone.rsplit("-", 1)[0]
class DataSourceGCELocal(DataSourceGCE):
@@ -134,14 +138,19 @@ class DataSourceGCELocal(DataSourceGCE):
def _write_host_key_to_guest_attributes(key_type, key_value):
- url = '%s/%s/%s' % (GUEST_ATTRIBUTES_URL, HOSTKEY_NAMESPACE, key_type)
- key_value = key_value.encode('utf-8')
- resp = url_helper.readurl(url=url, data=key_value, headers=HEADERS,
- request_method='PUT', check_status=False)
+ url = "%s/%s/%s" % (GUEST_ATTRIBUTES_URL, HOSTKEY_NAMESPACE, key_type)
+ key_value = key_value.encode("utf-8")
+ resp = url_helper.readurl(
+ url=url,
+ data=key_value,
+ headers=HEADERS,
+ request_method="PUT",
+ check_status=False,
+ )
if resp.ok():
- LOG.debug('Wrote %s host key to guest attributes.', key_type)
+ LOG.debug("Wrote %s host key to guest attributes.", key_type)
else:
- LOG.debug('Unable to write %s host key to guest attributes.', key_type)
+ LOG.debug("Unable to write %s host key to guest attributes.", key_type)
def _has_expired(public_key):
@@ -155,7 +164,7 @@ def _has_expired(public_key):
return False
# Do not expire keys if they do not have the expected schema identifier.
- if schema != 'google-ssh':
+ if schema != "google-ssh":
return False
try:
@@ -164,11 +173,11 @@ def _has_expired(public_key):
return False
# Do not expire keys if there is no expriation timestamp.
- if 'expireOn' not in json_obj:
+ if "expireOn" not in json_obj:
return False
- expire_str = json_obj['expireOn']
- format_str = '%Y-%m-%dT%H:%M:%S+0000'
+ expire_str = json_obj["expireOn"]
+ format_str = "%Y-%m-%dT%H:%M:%S+0000"
try:
expire_time = datetime.datetime.strptime(expire_str, format_str)
except ValueError:
@@ -189,11 +198,11 @@ def _parse_public_keys(public_keys_data, default_user=None):
for public_key in public_keys_data:
if not public_key or not all(ord(c) < 128 for c in public_key):
continue
- split_public_key = public_key.split(':', 1)
+ split_public_key = public_key.split(":", 1)
if len(split_public_key) != 2:
continue
user, key = split_public_key
- if user in ('cloudinit', default_user) and not _has_expired(key):
+ if user in ("cloudinit", default_user) and not _has_expired(key):
public_keys.append(key)
return public_keys
@@ -203,31 +212,35 @@ def read_md(address=None, url_params=None, platform_check=True):
if address is None:
address = MD_V1_URL
- ret = {'meta-data': None, 'user-data': None,
- 'success': False, 'reason': None}
- ret['platform_reports_gce'] = platform_reports_gce()
+ ret = {
+ "meta-data": None,
+ "user-data": None,
+ "success": False,
+ "reason": None,
+ }
+ ret["platform_reports_gce"] = platform_reports_gce()
- if platform_check and not ret['platform_reports_gce']:
- ret['reason'] = "Not running on GCE."
+ if platform_check and not ret["platform_reports_gce"]:
+ ret["reason"] = "Not running on GCE."
return ret
# If we cannot resolve the metadata server, then no point in trying.
if not util.is_resolvable_url(address):
LOG.debug("%s is not resolvable", address)
- ret['reason'] = 'address "%s" is not resolvable' % address
+ ret["reason"] = 'address "%s" is not resolvable' % address
return ret
# url_map: (our-key, path, required, is_text, is_recursive)
url_map = [
- ('instance-id', ('instance/id',), True, True, False),
- ('availability-zone', ('instance/zone',), True, True, False),
- ('local-hostname', ('instance/hostname',), True, True, False),
- ('instance-data', ('instance/attributes',), False, False, True),
- ('project-data', ('project/attributes',), False, False, True),
+ ("instance-id", ("instance/id",), True, True, False),
+ ("availability-zone", ("instance/zone",), True, True, False),
+ ("local-hostname", ("instance/hostname",), True, True, False),
+ ("instance-data", ("instance/attributes",), False, False, True),
+ ("project-data", ("project/attributes",), False, False, True),
]
- metadata_fetcher = GoogleMetadataFetcher(address,
- url_params.num_retries,
- url_params.sec_between_retries)
+ metadata_fetcher = GoogleMetadataFetcher(
+ address, url_params.num_retries, url_params.sec_between_retries
+ )
md = {}
# Iterate over url_map keys to get metadata items.
for (mkey, paths, required, is_text, is_recursive) in url_map:
@@ -238,51 +251,52 @@ def read_md(address=None, url_params=None, platform_check=True):
value = new_value
if required and value is None:
msg = "required key %s returned nothing. not GCE"
- ret['reason'] = msg % mkey
+ ret["reason"] = msg % mkey
return ret
md[mkey] = value
- instance_data = json.loads(md['instance-data'] or '{}')
- project_data = json.loads(md['project-data'] or '{}')
- valid_keys = [instance_data.get('sshKeys'), instance_data.get('ssh-keys')]
- block_project = instance_data.get('block-project-ssh-keys', '').lower()
- if block_project != 'true' and not instance_data.get('sshKeys'):
- valid_keys.append(project_data.get('ssh-keys'))
- valid_keys.append(project_data.get('sshKeys'))
- public_keys_data = '\n'.join([key for key in valid_keys if key])
- md['public-keys-data'] = public_keys_data.splitlines()
+ instance_data = json.loads(md["instance-data"] or "{}")
+ project_data = json.loads(md["project-data"] or "{}")
+ valid_keys = [instance_data.get("sshKeys"), instance_data.get("ssh-keys")]
+ block_project = instance_data.get("block-project-ssh-keys", "").lower()
+ if block_project != "true" and not instance_data.get("sshKeys"):
+ valid_keys.append(project_data.get("ssh-keys"))
+ valid_keys.append(project_data.get("sshKeys"))
+ public_keys_data = "\n".join([key for key in valid_keys if key])
+ md["public-keys-data"] = public_keys_data.splitlines()
- if md['availability-zone']:
- md['availability-zone'] = md['availability-zone'].split('/')[-1]
+ if md["availability-zone"]:
+ md["availability-zone"] = md["availability-zone"].split("/")[-1]
- if 'user-data' in instance_data:
+ if "user-data" in instance_data:
# instance_data was json, so values are all utf-8 strings.
- ud = instance_data['user-data'].encode("utf-8")
- encoding = instance_data.get('user-data-encoding')
- if encoding == 'base64':
+ ud = instance_data["user-data"].encode("utf-8")
+ encoding = instance_data.get("user-data-encoding")
+ if encoding == "base64":
ud = b64decode(ud)
elif encoding:
- LOG.warning('unknown user-data-encoding: %s, ignoring', encoding)
- ret['user-data'] = ud
+ LOG.warning("unknown user-data-encoding: %s, ignoring", encoding)
+ ret["user-data"] = ud
- ret['meta-data'] = md
- ret['success'] = True
+ ret["meta-data"] = md
+ ret["success"] = True
return ret
def platform_reports_gce():
- pname = dmi.read_dmi_data('system-product-name') or "N/A"
+ pname = dmi.read_dmi_data("system-product-name") or "N/A"
if pname == "Google Compute Engine" or pname == "Google":
return True
# system-product-name is not always guaranteed (LP: #1674861)
- serial = dmi.read_dmi_data('system-serial-number') or "N/A"
+ serial = dmi.read_dmi_data("system-serial-number") or "N/A"
if serial.startswith("GoogleCloud-"):
return True
- LOG.debug("Not running on google cloud. product-name=%s serial=%s",
- pname, serial)
+ LOG.debug(
+ "Not running on google cloud. product-name=%s serial=%s", pname, serial
+ )
return False
@@ -301,31 +315,38 @@ def get_datasource_list(depends):
if __name__ == "__main__":
import argparse
import sys
-
from base64 import b64encode
- parser = argparse.ArgumentParser(description='Query GCE Metadata Service')
- parser.add_argument("--endpoint", metavar="URL",
- help="The url of the metadata service.",
- default=MD_V1_URL)
- parser.add_argument("--no-platform-check", dest="platform_check",
- help="Ignore smbios platform check",
- action='store_false', default=True)
+ parser = argparse.ArgumentParser(description="Query GCE Metadata Service")
+ parser.add_argument(
+ "--endpoint",
+ metavar="URL",
+ help="The url of the metadata service.",
+ default=MD_V1_URL,
+ )
+ parser.add_argument(
+ "--no-platform-check",
+ dest="platform_check",
+ help="Ignore smbios platform check",
+ action="store_false",
+ default=True,
+ )
args = parser.parse_args()
data = read_md(address=args.endpoint, platform_check=args.platform_check)
- if 'user-data' in data:
+ if "user-data" in data:
# user-data is bytes not string like other things. Handle it specially.
# If it can be represented as utf-8 then do so. Otherwise print base64
# encoded value in the key user-data-b64.
try:
- data['user-data'] = data['user-data'].decode()
+ data["user-data"] = data["user-data"].decode()
except UnicodeDecodeError:
- sys.stderr.write("User-data cannot be decoded. "
- "Writing as base64\n")
- del data['user-data']
+ sys.stderr.write(
+ "User-data cannot be decoded. Writing as base64\n"
+ )
+ del data["user-data"]
# b64encode returns a bytes value. Decode to get the string.
- data['user-data-b64'] = b64encode(data['user-data']).decode()
+ data["user-data-b64"] = b64encode(data["user-data"]).decode()
- print(json.dumps(data, indent=1, sort_keys=True, separators=(',', ': ')))
+ print(json.dumps(data, indent=1, sort_keys=True, separators=(",", ": ")))
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py
index c7c88dd7..50324cc4 100644
--- a/cloudinit/sources/DataSourceHetzner.py
+++ b/cloudinit/sources/DataSourceHetzner.py
@@ -6,21 +6,19 @@
"""Hetzner Cloud API Documentation
https://docs.hetzner.cloud/"""
+import cloudinit.sources.helpers.hetzner as hc_helper
from cloudinit import dmi
from cloudinit import log as logging
from cloudinit import net as cloudnet
-from cloudinit import sources
-from cloudinit import util
-
-import cloudinit.sources.helpers.hetzner as hc_helper
+from cloudinit import sources, util
LOG = logging.getLogger(__name__)
-BASE_URL_V1 = 'http://169.254.169.254/hetzner/v1'
+BASE_URL_V1 = "http://169.254.169.254/hetzner/v1"
BUILTIN_DS_CONFIG = {
- 'metadata_url': BASE_URL_V1 + '/metadata',
- 'userdata_url': BASE_URL_V1 + '/userdata',
+ "metadata_url": BASE_URL_V1 + "/metadata",
+ "userdata_url": BASE_URL_V1 + "/userdata",
}
MD_RETRIES = 60
@@ -30,20 +28,23 @@ MD_WAIT_RETRY = 2
class DataSourceHetzner(sources.DataSource):
- dsname = 'Hetzner'
+ dsname = "Hetzner"
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.distro = distro
self.metadata = dict()
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ["datasource", "Hetzner"], {}),
- BUILTIN_DS_CONFIG])
- self.metadata_address = self.ds_cfg['metadata_url']
- self.userdata_address = self.ds_cfg['userdata_url']
- self.retries = self.ds_cfg.get('retries', MD_RETRIES)
- self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT)
- self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY)
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(sys_cfg, ["datasource", "Hetzner"], {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
+ self.metadata_address = self.ds_cfg["metadata_url"]
+ self.userdata_address = self.ds_cfg["userdata_url"]
+ self.retries = self.ds_cfg.get("retries", MD_RETRIES)
+ self.timeout = self.ds_cfg.get("timeout", MD_TIMEOUT)
+ self.wait_retry = self.ds_cfg.get("wait_retry", MD_WAIT_RETRY)
self._network_config = None
self.dsmode = sources.DSMODE_NETWORK
@@ -54,14 +55,21 @@ class DataSourceHetzner(sources.DataSource):
return False
nic = cloudnet.find_fallback_nic()
- with cloudnet.EphemeralIPv4Network(nic, "169.254.0.1", 16,
- "169.254.255.255"):
+ with cloudnet.EphemeralIPv4Network(
+ nic, "169.254.0.1", 16, "169.254.255.255"
+ ):
md = hc_helper.read_metadata(
- self.metadata_address, timeout=self.timeout,
- sec_between=self.wait_retry, retries=self.retries)
+ self.metadata_address,
+ timeout=self.timeout,
+ sec_between=self.wait_retry,
+ retries=self.retries,
+ )
ud = hc_helper.read_userdata(
- self.userdata_address, timeout=self.timeout,
- sec_between=self.wait_retry, retries=self.retries)
+ self.userdata_address,
+ timeout=self.timeout,
+ sec_between=self.wait_retry,
+ retries=self.retries,
+ )
# Hetzner cloud does not support binary user-data. So here, do a
# base64 decode of the data if we can. The end result being that a
@@ -76,10 +84,10 @@ class DataSourceHetzner(sources.DataSource):
# hostname is name provided by user at launch. The API enforces it is
# a valid hostname, but it is not guaranteed to be resolvable in dns or
# fully qualified.
- self.metadata['instance-id'] = md['instance-id']
- self.metadata['local-hostname'] = md['hostname']
- self.metadata['network-config'] = md.get('network-config', None)
- self.metadata['public-keys'] = md.get('public-keys', None)
+ self.metadata["instance-id"] = md["instance-id"]
+ self.metadata["local-hostname"] = md["hostname"]
+ self.metadata["network-config"] = md.get("network-config", None)
+ self.metadata["public-keys"] = md.get("public-keys", None)
self.vendordata_raw = md.get("vendor_data", None)
# instance-id and serial from SMBIOS should be identical
@@ -92,19 +100,20 @@ class DataSourceHetzner(sources.DataSource):
def check_instance_id(self, sys_cfg):
return sources.instance_id_matches_system_uuid(
- self.get_instance_id(), 'system-serial-number')
+ self.get_instance_id(), "system-serial-number"
+ )
@property
def network_config(self):
"""Configure the networking. This needs to be done each boot, since
- the IP information may have changed due to snapshot and/or
- migration.
+ the IP information may have changed due to snapshot and/or
+ migration.
"""
if self._network_config:
return self._network_config
- _net_config = self.metadata['network-config']
+ _net_config = self.metadata["network-config"]
if not _net_config:
raise Exception("Unable to get meta-data from server....")
@@ -114,7 +123,7 @@ class DataSourceHetzner(sources.DataSource):
def get_hcloud_data():
- vendor_name = dmi.read_dmi_data('system-manufacturer')
+ vendor_name = dmi.read_dmi_data("system-manufacturer")
if vendor_name != "Hetzner":
return (False, None)
@@ -129,7 +138,7 @@ def get_hcloud_data():
# Used to match classes to dependencies
datasources = [
- (DataSourceHetzner, (sources.DEP_FILESYSTEM, )),
+ (DataSourceHetzner, (sources.DEP_FILESYSTEM,)),
]
@@ -137,4 +146,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py
index 8d196185..18c3848f 100644
--- a/cloudinit/sources/DataSourceIBMCloud.py
+++ b/cloudinit/sources/DataSourceIBMCloud.py
@@ -97,10 +97,8 @@ import json
import os
from cloudinit import log as logging
-from cloudinit import sources
+from cloudinit import sources, subp, util
from cloudinit.sources.helpers import openstack
-from cloudinit import subp
-from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -117,12 +115,13 @@ class Platforms(object):
PROVISIONING = (
Platforms.TEMPLATE_PROVISIONING_METADATA,
- Platforms.TEMPLATE_PROVISIONING_NODATA)
+ Platforms.TEMPLATE_PROVISIONING_NODATA,
+)
class DataSourceIBMCloud(sources.DataSource):
- dsname = 'IBMCloud'
+ dsname = "IBMCloud"
system_uuid = None
def __init__(self, sys_cfg, distro, paths):
@@ -142,14 +141,14 @@ class DataSourceIBMCloud(sources.DataSource):
if results is None:
return False
- self.source = results['source']
- self.platform = results['platform']
- self.metadata = results['metadata']
- self.userdata_raw = results.get('userdata')
- self.network_json = results.get('networkdata')
- vd = results.get('vendordata')
+ self.source = results["source"]
+ self.platform = results["platform"]
+ self.metadata = results["metadata"]
+ self.userdata_raw = results.get("userdata")
+ self.network_json = results.get("networkdata")
+ vd = results.get("vendordata")
self.vendordata_pure = vd
- self.system_uuid = results['system-uuid']
+ self.system_uuid = results["system-uuid"]
try:
self.vendordata_raw = sources.convert_vendordata(vd)
except ValueError as e:
@@ -160,7 +159,7 @@ class DataSourceIBMCloud(sources.DataSource):
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- return '%s (%s)' % (self.platform, self.source)
+ return "%s (%s)" % (self.platform, self.source)
def check_instance_id(self, sys_cfg):
"""quickly (local check only) if self.instance_id is still valid
@@ -177,12 +176,13 @@ class DataSourceIBMCloud(sources.DataSource):
if self.platform != Platforms.OS_CODE:
# If deployed from template, an agent in the provisioning
# environment handles networking configuration. Not cloud-init.
- return {'config': 'disabled', 'version': 1}
+ return {"config": "disabled", "version": 1}
if self._network_config is None:
if self.network_json is not None:
LOG.debug("network config provided via network_json")
self._network_config = openstack.convert_net_json(
- self.network_json, known_macs=None)
+ self.network_json, known_macs=None
+ )
else:
LOG.debug("no network configuration available.")
return self._network_config
@@ -200,22 +200,28 @@ def _is_xen():
def _is_ibm_provisioning(
- prov_cfg="/root/provisioningConfiguration.cfg",
- inst_log="/root/swinstall.log",
- boot_ref="/proc/1/environ"):
+ prov_cfg="/root/provisioningConfiguration.cfg",
+ inst_log="/root/swinstall.log",
+ boot_ref="/proc/1/environ",
+):
"""Return boolean indicating if this boot is ibm provisioning boot."""
if os.path.exists(prov_cfg):
msg = "config '%s' exists." % prov_cfg
result = True
if os.path.exists(inst_log):
if os.path.exists(boot_ref):
- result = (os.stat(inst_log).st_mtime >
- os.stat(boot_ref).st_mtime)
- msg += (" log '%s' from %s boot." %
- (inst_log, "current" if result else "previous"))
+ result = (
+ os.stat(inst_log).st_mtime > os.stat(boot_ref).st_mtime
+ )
+ msg += " log '%s' from %s boot." % (
+ inst_log,
+ "current" if result else "previous",
+ )
else:
- msg += (" log '%s' existed, but no reference file '%s'." %
- (inst_log, boot_ref))
+ msg += " log '%s' existed, but no reference file '%s'." % (
+ inst_log,
+ boot_ref,
+ )
result = False
else:
msg += " log '%s' did not exist." % inst_log
@@ -252,17 +258,26 @@ def get_ibm_platform():
if label not in (label_mdata, label_cfg2):
continue
if label in fslabels:
- LOG.warning("Duplicate fslabel '%s'. existing=%s current=%s",
- label, fslabels[label], data)
+ LOG.warning(
+ "Duplicate fslabel '%s'. existing=%s current=%s",
+ label,
+ fslabels[label],
+ data,
+ )
continue
if label == label_cfg2 and uuid != IBM_CONFIG_UUID:
- LOG.debug("Skipping %s with LABEL=%s due to uuid != %s: %s",
- dev, label, uuid, data)
+ LOG.debug(
+ "Skipping %s with LABEL=%s due to uuid != %s: %s",
+ dev,
+ label,
+ uuid,
+ data,
+ )
continue
fslabels[label] = data
- metadata_path = fslabels.get(label_mdata, {}).get('DEVNAME')
- cfg2_path = fslabels.get(label_cfg2, {}).get('DEVNAME')
+ metadata_path = fslabels.get(label_mdata, {}).get("DEVNAME")
+ cfg2_path = fslabels.get(label_cfg2, {}).get("DEVNAME")
if cfg2_path:
return (Platforms.OS_CODE, cfg2_path)
@@ -288,12 +303,14 @@ def read_md():
LOG.debug("This is not an IBMCloud platform.")
return None
elif platform in PROVISIONING:
- LOG.debug("Cloud-init is disabled during provisioning: %s.",
- platform)
+ LOG.debug("Cloud-init is disabled during provisioning: %s.", platform)
return None
- ret = {'platform': platform, 'source': path,
- 'system-uuid': _read_system_uuid()}
+ ret = {
+ "platform": platform,
+ "source": path,
+ "system-uuid": _read_system_uuid(),
+ }
try:
if os.path.isdir(path):
@@ -302,8 +319,8 @@ def read_md():
results = util.mount_cb(path, metadata_from_dir)
except sources.BrokenMetadata as e:
raise RuntimeError(
- "Failed reading IBM config disk (platform=%s path=%s): %s" %
- (platform, path, e)
+ "Failed reading IBM config disk (platform=%s path=%s): %s"
+ % (platform, path, e)
) from e
ret.update(results)
@@ -329,14 +346,14 @@ def metadata_from_dir(source_dir):
return os.path.join("openstack", "latest", fname)
def load_json_bytes(blob):
- return json.loads(blob.decode('utf-8'))
+ return json.loads(blob.decode("utf-8"))
files = [
# tuples of (results_name, path, translator)
- ('metadata_raw', opath('meta_data.json'), load_json_bytes),
- ('userdata', opath('user_data'), None),
- ('vendordata', opath('vendor_data.json'), load_json_bytes),
- ('networkdata', opath('network_data.json'), load_json_bytes),
+ ("metadata_raw", opath("meta_data.json"), load_json_bytes),
+ ("userdata", opath("user_data"), None),
+ ("vendordata", opath("vendor_data.json"), load_json_bytes),
+ ("networkdata", opath("network_data.json"), load_json_bytes),
]
results = {}
@@ -355,28 +372,33 @@ def metadata_from_dir(source_dir):
data = transl(raw)
except Exception as e:
raise sources.BrokenMetadata(
- "Failed decoding %s: %s" % (path, e))
+ "Failed decoding %s: %s" % (path, e)
+ )
results[name] = data
- if results.get('metadata_raw') is None:
+ if results.get("metadata_raw") is None:
raise sources.BrokenMetadata(
- "%s missing required file 'meta_data.json'" % source_dir)
+ "%s missing required file 'meta_data.json'" % source_dir
+ )
- results['metadata'] = {}
+ results["metadata"] = {}
- md_raw = results['metadata_raw']
- md = results['metadata']
- if 'random_seed' in md_raw:
+ md_raw = results["metadata_raw"]
+ md = results["metadata"]
+ if "random_seed" in md_raw:
try:
- md['random_seed'] = base64.b64decode(md_raw['random_seed'])
+ md["random_seed"] = base64.b64decode(md_raw["random_seed"])
except (ValueError, TypeError) as e:
raise sources.BrokenMetadata(
- "Badly formatted metadata random_seed entry: %s" % e)
+ "Badly formatted metadata random_seed entry: %s" % e
+ )
renames = (
- ('public_keys', 'public-keys'), ('hostname', 'local-hostname'),
- ('uuid', 'instance-id'))
+ ("public_keys", "public-keys"),
+ ("hostname", "local-hostname"),
+ ("uuid", "instance-id"),
+ )
for mdname, newname in renames:
if mdname in md_raw:
md[newname] = md_raw[mdname]
@@ -398,7 +420,7 @@ def get_datasource_list(depends):
if __name__ == "__main__":
import argparse
- parser = argparse.ArgumentParser(description='Query IBM Cloud Metadata')
+ parser = argparse.ArgumentParser(description="Query IBM Cloud Metadata")
args = parser.parse_args()
data = read_md()
print(util.json_dumps(data))
diff --git a/cloudinit/sources/DataSourceLXD.py b/cloudinit/sources/DataSourceLXD.py
index 469707d2..071ea87c 100644
--- a/cloudinit/sources/DataSourceLXD.py
+++ b/cloudinit/sources/DataSourceLXD.py
@@ -1,4 +1,3 @@
-
"""Datasource for LXD, reads /dev/lxd/sock representaton of instance data.
Notes:
@@ -10,8 +9,10 @@ Notes:
* TODO( Hotplug support using websockets API 1.0/events )
"""
-from json.decoder import JSONDecodeError
import os
+import socket
+import stat
+from json.decoder import JSONDecodeError
import requests
from requests.adapters import HTTPAdapter
@@ -29,9 +30,6 @@ from requests.adapters import HTTPAdapter
from requests.packages.urllib3.connection import HTTPConnection
from requests.packages.urllib3.connectionpool import HTTPConnectionPool
-import socket
-import stat
-
from cloudinit import log as logging
from cloudinit import sources, subp, util
@@ -47,7 +45,7 @@ CONFIG_KEY_ALIASES = {
"cloud-init.vendor-data": "vendor-data",
"user.user-data": "user-data",
"user.network-config": "network-config",
- "user.vendor-data": "vendor-data"
+ "user.vendor-data": "vendor-data",
}
@@ -57,18 +55,20 @@ def generate_fallback_network_config() -> dict:
"version": 1,
"config": [
{
- "type": "physical", "name": "eth0",
- "subnets": [{"type": "dhcp", "control": "auto"}]
+ "type": "physical",
+ "name": "eth0",
+ "subnets": [{"type": "dhcp", "control": "auto"}],
}
- ]
+ ],
}
if subp.which("systemd-detect-virt"):
try:
- virt_type, _ = subp.subp(['systemd-detect-virt'])
+ virt_type, _ = subp.subp(["systemd-detect-virt"])
except subp.ProcessExecutionError as err:
LOG.warning(
"Unable to run systemd-detect-virt: %s."
- " Rendering default network config.", err
+ " Rendering default network config.",
+ err,
)
return network_v1
if virt_type.strip() == "kvm": # instance.type VIRTUAL-MACHINE
@@ -84,7 +84,7 @@ def generate_fallback_network_config() -> dict:
class SocketHTTPConnection(HTTPConnection):
def __init__(self, socket_path):
- super().__init__('localhost')
+ super().__init__("localhost")
self.socket_path = socket_path
def connect(self):
@@ -95,7 +95,7 @@ class SocketHTTPConnection(HTTPConnection):
class SocketConnectionPool(HTTPConnectionPool):
def __init__(self, socket_path):
self.socket_path = socket_path
- super().__init__('localhost')
+ super().__init__("localhost")
def _new_conn(self):
return SocketHTTPConnection(self.socket_path)
@@ -118,16 +118,16 @@ def _maybe_remove_top_network(cfg):
if "network" not in cfg:
return cfg
network_val = cfg["network"]
- bmsg = 'Top level network key in network-config %s: %s'
+ bmsg = "Top level network key in network-config %s: %s"
if not isinstance(network_val, dict):
LOG.debug(bmsg, "was not a dict", cfg)
return cfg
if len(list(cfg.keys())) != 1:
LOG.debug(bmsg, "had multiple top level keys", cfg)
return cfg
- if network_val.get('config') == "disabled":
+ if network_val.get("config") == "disabled":
LOG.debug(bmsg, "was config/disabled", cfg)
- elif not all(('config' in network_val, 'version' in network_val)):
+ elif not all(("config" in network_val, "version" in network_val)):
LOG.debug(bmsg, "but missing 'config' or 'version'", cfg)
return cfg
LOG.debug(bmsg, "fixed by removing shifting network.", cfg)
@@ -165,13 +165,16 @@ def _raw_instance_data_to_dict(metadata_type: str, metadata_value) -> dict:
class DataSourceLXD(sources.DataSource):
- dsname = 'LXD'
+ dsname = "LXD"
_network_config = sources.UNSET
_crawled_metadata = sources.UNSET
sensitive_metadata_keys = (
- 'merged_cfg', 'user.meta-data', 'user.vendor-data', 'user.user-data',
+ "merged_cfg",
+ "user.meta-data",
+ "user.vendor-data",
+ "user.user-data",
)
def _is_platform_viable(self) -> bool:
@@ -185,8 +188,10 @@ class DataSourceLXD(sources.DataSource):
return False
self._crawled_metadata = util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=read_metadata)
+ logfunc=LOG.debug,
+ msg="Crawl of metadata service",
+ func=read_metadata,
+ )
self.metadata = _raw_instance_data_to_dict(
"meta-data", self._crawled_metadata.get("meta-data")
)
@@ -293,7 +298,7 @@ def read_metadata(
"Invalid HTTP response [{code}] from {route}: {resp}".format(
code=response.status_code,
route=md_route,
- resp=response.text
+ resp=response.text,
)
)
@@ -304,7 +309,7 @@ def read_metadata(
md = {
"_metadata_api_version": api_version, # Document API version read
"config": {},
- "meta-data": md["meta-data"]
+ "meta-data": md["meta-data"],
}
config_url = version_url + "config"
@@ -317,7 +322,7 @@ def read_metadata(
"Invalid HTTP response [{code}] from {route}: {resp}".format(
code=response.status_code,
route=config_url,
- resp=response.text
+ resp=response.text,
)
)
try:
@@ -326,8 +331,7 @@ def read_metadata(
raise sources.InvalidMetaDataException(
"Unable to determine cloud-init config from {route}."
" Expected JSON but found: {resp}".format(
- route=config_url,
- resp=response.text
+ route=config_url, resp=response.text
)
) from exc
@@ -354,12 +358,15 @@ def read_metadata(
else:
LOG.warning(
"Ignoring LXD config %s in favor of %s value.",
- cfg_key, cfg_key.replace("user", "cloud-init", 1)
+ cfg_key,
+ cfg_key.replace("user", "cloud-init", 1),
)
else:
LOG.debug(
"Skipping %s on [HTTP:%d]:%s",
- url, response.status_code, response.text
+ url,
+ response.status_code,
+ response.text,
)
return md
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 9156925f..d554db0d 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -11,20 +11,18 @@ import os
import time
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
+from cloudinit import sources, url_helper, util
LOG = logging.getLogger(__name__)
MD_VERSION = "2012-03-01"
DS_FIELDS = [
# remote path, location in dictionary, binary data?, optional?
- ("meta-data/instance-id", 'meta-data/instance-id', False, False),
- ("meta-data/local-hostname", 'meta-data/local-hostname', False, False),
- ("meta-data/public-keys", 'meta-data/public-keys', False, True),
- ('meta-data/vendor-data', 'vendor-data', True, True),
- ('user-data', 'user-data', True, True),
+ ("meta-data/instance-id", "meta-data/instance-id", False, False),
+ ("meta-data/local-hostname", "meta-data/local-hostname", False, False),
+ ("meta-data/public-keys", "meta-data/public-keys", False, True),
+ ("meta-data/vendor-data", "vendor-data", True, True),
+ ("user-data", "user-data", True, True),
]
@@ -46,7 +44,7 @@ class DataSourceMAAS(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.base_url = None
- self.seed_dir = os.path.join(paths.seed_dir, 'maas')
+ self.seed_dir = os.path.join(paths.seed_dir, "maas")
self.id_hash = get_id_from_ds_cfg(self.ds_cfg)
@property
@@ -72,7 +70,7 @@ class DataSourceMAAS(sources.DataSource):
raise
# If there is no metadata_url, then we're not configured
- url = mcfg.get('metadata_url', None)
+ url = mcfg.get("metadata_url", None)
if not url:
return False
@@ -85,9 +83,14 @@ class DataSourceMAAS(sources.DataSource):
return False
self._set_data(
- url, read_maas_seed_url(
- url, read_file_or_url=self.oauth_helper.readurl,
- paths=self.paths, retries=1))
+ url,
+ read_maas_seed_url(
+ url,
+ read_file_or_url=self.oauth_helper.readurl,
+ paths=self.paths,
+ retries=1,
+ ),
+ )
return True
except Exception:
util.logexc(LOG, "Failed fetching metadata from url %s", url)
@@ -109,7 +112,7 @@ class DataSourceMAAS(sources.DataSource):
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- return 'seed-dir (%s)' % self.base_url
+ return "seed-dir (%s)" % self.base_url
def wait_for_metadata_service(self, url):
mcfg = self.ds_cfg
@@ -135,13 +138,17 @@ class DataSourceMAAS(sources.DataSource):
check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION)
urls = [check_url]
url, _response = self.oauth_helper.wait_for_url(
- urls=urls, max_wait=max_wait, timeout=timeout)
+ urls=urls, max_wait=max_wait, timeout=timeout
+ )
if url:
LOG.debug("Using metadata source: '%s'", url)
else:
- LOG.critical("Giving up on md from %s after %i seconds",
- urls, int(time.time() - starttime))
+ LOG.critical(
+ "Giving up on md from %s after %i seconds",
+ urls,
+ int(time.time() - starttime),
+ )
return bool(url)
@@ -154,26 +161,26 @@ class DataSourceMAAS(sources.DataSource):
if self.id_hash is None:
return False
ncfg = util.get_cfg_by_path(sys_cfg, ("datasource", self.dsname), {})
- return (self.id_hash == get_id_from_ds_cfg(ncfg))
+ return self.id_hash == get_id_from_ds_cfg(ncfg)
def get_oauth_helper(cfg):
"""Return an oauth helper instance for values in cfg.
- @raises ValueError from OauthUrlHelper if some required fields have
- true-ish values but others do not."""
- keys = ('consumer_key', 'consumer_secret', 'token_key', 'token_secret')
+ @raises ValueError from OauthUrlHelper if some required fields have
+ true-ish values but others do not."""
+ keys = ("consumer_key", "consumer_secret", "token_key", "token_secret")
kwargs = dict([(r, cfg.get(r)) for r in keys])
return url_helper.OauthUrlHelper(**kwargs)
def get_id_from_ds_cfg(ds_cfg):
"""Given a config, generate a unique identifier for this node."""
- fields = ('consumer_key', 'token_key', 'token_secret')
- idstr = '\0'.join([ds_cfg.get(f, "") for f in fields])
+ fields = ("consumer_key", "token_key", "token_secret")
+ idstr = "\0".join([ds_cfg.get(f, "") for f in fields])
# store the encoding version as part of the hash in the event
# that it ever changed we can compute older versions.
- return 'v1:' + hashlib.sha256(idstr.encode('utf-8')).hexdigest()
+ return "v1:" + hashlib.sha256(idstr.encode("utf-8")).hexdigest()
def read_maas_seed_dir(seed_d):
@@ -186,8 +193,14 @@ def read_maas_seed_dir(seed_d):
return read_maas_seed_url("file://%s" % seed_d, version=None)
-def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
- version=MD_VERSION, paths=None, retries=None):
+def read_maas_seed_url(
+ seed_url,
+ read_file_or_url=None,
+ timeout=None,
+ version=MD_VERSION,
+ paths=None,
+ retries=None,
+):
"""
Read the maas datasource at seed_url.
read_file_or_url is a method that should provide an interface
@@ -213,16 +226,20 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
url = "%s/%s/%s" % (seed_url, version, path)
try:
ssl_details = util.fetch_ssl_details(paths)
- resp = read_file_or_url(url, retries=retries, timeout=timeout,
- ssl_details=ssl_details)
+ resp = read_file_or_url(
+ url, retries=retries, timeout=timeout, ssl_details=ssl_details
+ )
if resp.ok():
if binary:
md[path] = resp.contents
else:
md[path] = util.decode_binary(resp.contents)
else:
- LOG.warning(("Fetching from %s resulted in"
- " an invalid http code %s"), url, resp.code)
+ LOG.warning(
+ "Fetching from %s resulted in an invalid http code %s",
+ url,
+ resp.code,
+ )
except url_helper.UrlError as e:
if e.code == 404 and not optional:
raise MAASSeedDirMalformed(
@@ -236,8 +253,8 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
def check_seed_contents(content, seed):
"""Validate if dictionary content valid as a return for a datasource.
- Either return a (userdata, metadata, vendordata) tuple or
- Raise MAASSeedDirMalformed or MAASSeedDirNone
+ Either return a (userdata, metadata, vendordata) tuple or
+ Raise MAASSeedDirMalformed or MAASSeedDirNone
"""
ret = {}
missing = []
@@ -262,14 +279,15 @@ def check_seed_contents(content, seed):
raise MAASSeedDirMalformed("%s: missing files %s" % (seed, missing))
vd_data = None
- if ret.get('vendor-data'):
+ if ret.get("vendor-data"):
err = object()
- vd_data = util.load_yaml(ret.get('vendor-data'), default=err,
- allowed=(object))
+ vd_data = util.load_yaml(
+ ret.get("vendor-data"), default=err, allowed=(object)
+ )
if vd_data is err:
raise MAASSeedDirMalformed("vendor-data was not loadable as yaml.")
- return ret.get('user-data'), ret.get('meta-data'), vd_data
+ return ret.get("user-data"), ret.get("meta-data"), vd_data
class MAASSeedDirNone(Exception):
@@ -292,6 +310,7 @@ def get_datasource_list(depends):
if __name__ == "__main__":
+
def main():
"""
Call with single argument of directory or http or https url.
@@ -302,36 +321,66 @@ if __name__ == "__main__":
import pprint
import sys
- parser = argparse.ArgumentParser(description='Interact with MAAS DS')
- parser.add_argument("--config", metavar="file",
- help="specify DS config file", default=None)
- parser.add_argument("--ckey", metavar="key",
- help="the consumer key to auth with", default=None)
- parser.add_argument("--tkey", metavar="key",
- help="the token key to auth with", default=None)
- parser.add_argument("--csec", metavar="secret",
- help="the consumer secret (likely '')", default="")
- parser.add_argument("--tsec", metavar="secret",
- help="the token secret to auth with", default=None)
- parser.add_argument("--apiver", metavar="version",
- help="the apiver to use ("" can be used)",
- default=MD_VERSION)
+ parser = argparse.ArgumentParser(description="Interact with MAAS DS")
+ parser.add_argument(
+ "--config",
+ metavar="file",
+ help="specify DS config file",
+ default=None,
+ )
+ parser.add_argument(
+ "--ckey",
+ metavar="key",
+ help="the consumer key to auth with",
+ default=None,
+ )
+ parser.add_argument(
+ "--tkey",
+ metavar="key",
+ help="the token key to auth with",
+ default=None,
+ )
+ parser.add_argument(
+ "--csec",
+ metavar="secret",
+ help="the consumer secret (likely '')",
+ default="",
+ )
+ parser.add_argument(
+ "--tsec",
+ metavar="secret",
+ help="the token secret to auth with",
+ default=None,
+ )
+ parser.add_argument(
+ "--apiver",
+ metavar="version",
+ help="the apiver to use ( can be used)",
+ default=MD_VERSION,
+ )
subcmds = parser.add_subparsers(title="subcommands", dest="subcmd")
- for (name, help) in (('crawl', 'crawl the datasource'),
- ('get', 'do a single GET of provided url'),
- ('check-seed', 'read and verify seed at url')):
+ for (name, help) in (
+ ("crawl", "crawl the datasource"),
+ ("get", "do a single GET of provided url"),
+ ("check-seed", "read and verify seed at url"),
+ ):
p = subcmds.add_parser(name, help=help)
- p.add_argument("url", help="the datasource url", nargs='?',
- default=None)
+ p.add_argument(
+ "url", help="the datasource url", nargs="?", default=None
+ )
args = parser.parse_args()
- creds = {'consumer_key': args.ckey, 'token_key': args.tkey,
- 'token_secret': args.tsec, 'consumer_secret': args.csec}
+ creds = {
+ "consumer_key": args.ckey,
+ "token_key": args.tkey,
+ "token_secret": args.tsec,
+ "consumer_secret": args.csec,
+ }
if args.config is None:
- for fname in ('91_kernel_cmdline_url', '90_dpkg_maas'):
+ for fname in ("91_kernel_cmdline_url", "90_dpkg_maas"):
fpath = "/etc/cloud/cloud.cfg.d/" + fname + ".cfg"
if os.path.exists(fpath) and os.access(fpath, os.R_OK):
sys.stderr.write("Used config in %s.\n" % fpath)
@@ -339,13 +388,13 @@ if __name__ == "__main__":
if args.config:
cfg = util.read_conf(args.config)
- if 'datasource' in cfg:
- cfg = cfg['datasource']['MAAS']
+ if "datasource" in cfg:
+ cfg = cfg["datasource"]["MAAS"]
for key in creds.keys():
if key in cfg and creds[key] is None:
creds[key] = cfg[key]
- if args.url is None and 'metadata_url' in cfg:
- args.url = cfg['metadata_url']
+ if args.url is None and "metadata_url" in cfg:
+ args.url = cfg["metadata_url"]
if args.url is None:
sys.stderr.write("Must provide a url or a config with url.\n")
@@ -380,8 +429,11 @@ if __name__ == "__main__":
(userdata, metadata, vd) = read_maas_seed_dir(args.url)
else:
(userdata, metadata, vd) = read_maas_seed_url(
- args.url, version=args.apiver, read_file_or_url=readurl,
- retries=2)
+ args.url,
+ version=args.apiver,
+ read_file_or_url=readurl,
+ retries=2,
+ )
print("=== user-data ===")
print("N/A" if userdata is None else userdata.decode())
print("=== meta-data ===")
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index 2d9e86b4..56559630 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -13,9 +13,8 @@ import os
from cloudinit import dmi
from cloudinit import log as logging
+from cloudinit import sources, util
from cloudinit.net import eni
-from cloudinit import sources
-from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -27,8 +26,10 @@ class DataSourceNoCloud(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.seed = None
- self.seed_dirs = [os.path.join(paths.seed_dir, 'nocloud'),
- os.path.join(paths.seed_dir, 'nocloud-net')]
+ self.seed_dirs = [
+ os.path.join(paths.seed_dir, "nocloud"),
+ os.path.join(paths.seed_dir, "nocloud-net"),
+ ]
self.seed_dir = None
self.supported_seed_starts = ("/", "file://")
@@ -55,17 +56,21 @@ class DataSourceNoCloud(sources.DataSource):
}
found = []
- mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': "",
- 'network-config': None}
+ mydata = {
+ "meta-data": {},
+ "user-data": "",
+ "vendor-data": "",
+ "network-config": None,
+ }
try:
# Parse the system serial label from dmi. If not empty, try parsing
# like the commandline
md = {}
- serial = dmi.read_dmi_data('system-serial-number')
+ serial = dmi.read_dmi_data("system-serial-number")
if serial and load_cmdline_data(md, serial):
found.append("dmi")
- mydata = _merge_new_seed(mydata, {'meta-data': md})
+ mydata = _merge_new_seed(mydata, {"meta-data": md})
except Exception:
util.logexc(LOG, "Unable to parse dmi data")
return False
@@ -75,14 +80,16 @@ class DataSourceNoCloud(sources.DataSource):
md = {}
if load_cmdline_data(md):
found.append("cmdline")
- mydata = _merge_new_seed(mydata, {'meta-data': md})
+ mydata = _merge_new_seed(mydata, {"meta-data": md})
except Exception:
util.logexc(LOG, "Unable to parse command line data")
return False
# Check to see if the seed dir has data.
- pp2d_kwargs = {'required': ['user-data', 'meta-data'],
- 'optional': ['vendor-data', 'network-config']}
+ pp2d_kwargs = {
+ "required": ["user-data", "meta-data"],
+ "optional": ["vendor-data", "network-config"],
+ }
for path in self.seed_dirs:
try:
@@ -97,31 +104,35 @@ class DataSourceNoCloud(sources.DataSource):
# If the datasource config had a 'seedfrom' entry, then that takes
# precedence over a 'seedfrom' that was found in a filesystem
# but not over external media
- if self.ds_cfg.get('seedfrom'):
+ if self.ds_cfg.get("seedfrom"):
found.append("ds_config_seedfrom")
- mydata['meta-data']["seedfrom"] = self.ds_cfg['seedfrom']
+ mydata["meta-data"]["seedfrom"] = self.ds_cfg["seedfrom"]
# fields appropriately named can also just come from the datasource
# config (ie, 'user-data', 'meta-data', 'vendor-data' there)
- if 'user-data' in self.ds_cfg and 'meta-data' in self.ds_cfg:
+ if "user-data" in self.ds_cfg and "meta-data" in self.ds_cfg:
mydata = _merge_new_seed(mydata, self.ds_cfg)
found.append("ds_config")
def _pp2d_callback(mp, data):
return util.pathprefix2dict(mp, **data)
- label = self.ds_cfg.get('fs_label', "cidata")
+ label = self.ds_cfg.get("fs_label", "cidata")
if label is not None:
for dev in self._get_devices(label):
try:
LOG.debug("Attempting to use data from %s", dev)
try:
- seeded = util.mount_cb(dev, _pp2d_callback,
- pp2d_kwargs)
+ seeded = util.mount_cb(
+ dev, _pp2d_callback, pp2d_kwargs
+ )
except ValueError:
- LOG.warning("device %s with label=%s not a "
- "valid seed.", dev, label)
+ LOG.warning(
+ "device %s with label=%s not a valid seed.",
+ dev,
+ label,
+ )
continue
mydata = _merge_new_seed(mydata, seeded)
@@ -133,8 +144,9 @@ class DataSourceNoCloud(sources.DataSource):
if e.errno != errno.ENOENT:
raise
except util.MountFailedError:
- util.logexc(LOG, "Failed to mount %s when looking for "
- "data", dev)
+ util.logexc(
+ LOG, "Failed to mount %s when looking for data", dev
+ )
# There was no indication on kernel cmdline or data
# in the seeddir suggesting this handler should be used.
@@ -145,8 +157,8 @@ class DataSourceNoCloud(sources.DataSource):
# attempt to seed the userdata / metadata from its value
# its primarily value is in allowing the user to type less
# on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg
- if "seedfrom" in mydata['meta-data']:
- seedfrom = mydata['meta-data']["seedfrom"]
+ if "seedfrom" in mydata["meta-data"]:
+ seedfrom = mydata["meta-data"]["seedfrom"]
seedfound = False
for proto in self.supported_seed_starts:
if seedfrom.startswith(proto):
@@ -162,39 +174,43 @@ class DataSourceNoCloud(sources.DataSource):
LOG.debug("Using seeded cache data from %s", seedfrom)
# Values in the command line override those from the seed
- mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
- md_seed])
- mydata['user-data'] = ud
- mydata['vendor-data'] = vd
+ mydata["meta-data"] = util.mergemanydict(
+ [mydata["meta-data"], md_seed]
+ )
+ mydata["user-data"] = ud
+ mydata["vendor-data"] = vd
found.append(seedfrom)
# Now that we have exhausted any other places merge in the defaults
- mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
- defaults])
+ mydata["meta-data"] = util.mergemanydict(
+ [mydata["meta-data"], defaults]
+ )
self.dsmode = self._determine_dsmode(
- [mydata['meta-data'].get('dsmode')])
+ [mydata["meta-data"].get("dsmode")]
+ )
if self.dsmode == sources.DSMODE_DISABLED:
- LOG.debug("%s: not claiming datasource, dsmode=%s", self,
- self.dsmode)
+ LOG.debug(
+ "%s: not claiming datasource, dsmode=%s", self, self.dsmode
+ )
return False
self.seed = ",".join(found)
- self.metadata = mydata['meta-data']
- self.userdata_raw = mydata['user-data']
- self.vendordata_raw = mydata['vendor-data']
- self._network_config = mydata['network-config']
- self._network_eni = mydata['meta-data'].get('network-interfaces')
+ self.metadata = mydata["meta-data"]
+ self.userdata_raw = mydata["user-data"]
+ self.vendordata_raw = mydata["vendor-data"]
+ self._network_config = mydata["network-config"]
+ self._network_eni = mydata["meta-data"].get("network-interfaces")
return True
@property
def platform_type(self):
# Handle upgrade path of pickled ds
- if not hasattr(self, '_platform_type'):
+ if not hasattr(self, "_platform_type"):
self._platform_type = None
if not self._platform_type:
- self._platform_type = 'lxd' if util.is_lxd() else 'nocloud'
+ self._platform_type = "lxd" if util.is_lxd() else "nocloud"
return self._platform_type
def _get_cloud_name(self):
@@ -203,11 +219,11 @@ class DataSourceNoCloud(sources.DataSource):
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- if self.seed.startswith('/dev'):
- subplatform_type = 'config-disk'
+ if self.seed.startswith("/dev"):
+ subplatform_type = "config-disk"
else:
- subplatform_type = 'seed-dir'
- return '%s (%s)' % (subplatform_type, self.seed)
+ subplatform_type = "seed-dir"
+ return "%s (%s)" % (subplatform_type, self.seed)
def check_instance_id(self, sys_cfg):
# quickly (local check only) if self.instance_id is still valid
@@ -218,7 +234,7 @@ class DataSourceNoCloud(sources.DataSource):
# LP: #1568150 need getattr in the case that an old class object
# has been loaded from a pickled file and now executing new source.
- dirs = getattr(self, 'seed_dirs', [self.seed_dir])
+ dirs = getattr(self, "seed_dirs", [self.seed_dir])
quick_id = _quick_read_instance_id(dirs=dirs)
if not quick_id:
return None
@@ -236,7 +252,7 @@ def _quick_read_instance_id(dirs=None):
if dirs is None:
dirs = []
- iid_key = 'instance-id'
+ iid_key = "instance-id"
fill = {}
if load_cmdline_data(fill) and iid_key in fill:
return fill[iid_key]
@@ -245,8 +261,8 @@ def _quick_read_instance_id(dirs=None):
if d is None:
continue
try:
- data = util.pathprefix2dict(d, required=['meta-data'])
- md = util.load_yaml(data['meta-data'])
+ data = util.pathprefix2dict(d, required=["meta-data"])
+ md = util.load_yaml(data["meta-data"])
if md and iid_key in md:
return md[iid_key]
except ValueError:
@@ -256,14 +272,16 @@ def _quick_read_instance_id(dirs=None):
def load_cmdline_data(fill, cmdline=None):
- pairs = [("ds=nocloud", sources.DSMODE_LOCAL),
- ("ds=nocloud-net", sources.DSMODE_NETWORK)]
+ pairs = [
+ ("ds=nocloud", sources.DSMODE_LOCAL),
+ ("ds=nocloud-net", sources.DSMODE_NETWORK),
+ ]
for idstr, dsmode in pairs:
if parse_cmdline_data(idstr, fill, cmdline):
# if dsmode was explicitly in the command line, then
# prefer it to the dsmode based on the command line id
- if 'dsmode' not in fill:
- fill['dsmode'] = dsmode
+ if "dsmode" not in fill:
+ fill["dsmode"] = dsmode
return True
return False
@@ -323,19 +341,19 @@ def _maybe_remove_top_network(cfg):
Return the original value if no change or the updated value if changed."""
nullval = object()
- network_val = cfg.get('network', nullval)
+ network_val = cfg.get("network", nullval)
if network_val is nullval:
return cfg
- bmsg = 'Top level network key in network-config %s: %s'
+ bmsg = "Top level network key in network-config %s: %s"
if not isinstance(network_val, dict):
LOG.debug(bmsg, "was not a dict", cfg)
return cfg
if len(list(cfg.keys())) != 1:
LOG.debug(bmsg, "had multiple top level keys", cfg)
return cfg
- if network_val.get('config') == "disabled":
+ if network_val.get("config") == "disabled":
LOG.debug(bmsg, "was config/disabled", cfg)
- elif not all(('config' in network_val, 'version' in network_val)):
+ elif not all(("config" in network_val, "version" in network_val)):
LOG.debug(bmsg, "but missing 'config' or 'version'", cfg)
return cfg
LOG.debug(bmsg, "fixed by removing shifting network.", cfg)
@@ -345,19 +363,20 @@ def _maybe_remove_top_network(cfg):
def _merge_new_seed(cur, seeded):
ret = cur.copy()
- newmd = seeded.get('meta-data', {})
- if not isinstance(seeded['meta-data'], dict):
- newmd = util.load_yaml(seeded['meta-data'])
- ret['meta-data'] = util.mergemanydict([cur['meta-data'], newmd])
+ newmd = seeded.get("meta-data", {})
+ if not isinstance(seeded["meta-data"], dict):
+ newmd = util.load_yaml(seeded["meta-data"])
+ ret["meta-data"] = util.mergemanydict([cur["meta-data"], newmd])
- if seeded.get('network-config'):
- ret['network-config'] = _maybe_remove_top_network(
- util.load_yaml(seeded.get('network-config')))
+ if seeded.get("network-config"):
+ ret["network-config"] = _maybe_remove_top_network(
+ util.load_yaml(seeded.get("network-config"))
+ )
- if 'user-data' in seeded:
- ret['user-data'] = seeded['user-data']
- if 'vendor-data' in seeded:
- ret['vendor-data'] = seeded['vendor-data']
+ if "user-data" in seeded:
+ ret["user-data"] = seeded["user-data"]
+ if "vendor-data" in seeded:
+ ret["vendor-data"] = seeded["vendor-data"]
return ret
@@ -369,7 +388,7 @@ class DataSourceNoCloudNet(DataSourceNoCloud):
# Used to match classes to dependencies
datasources = [
- (DataSourceNoCloud, (sources.DEP_FILESYSTEM, )),
+ (DataSourceNoCloud, (sources.DEP_FILESYSTEM,)),
(DataSourceNoCloudNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
@@ -378,4 +397,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py
index b7656ac5..036d00b2 100644
--- a/cloudinit/sources/DataSourceNone.py
+++ b/cloudinit/sources/DataSourceNone.py
@@ -14,23 +14,23 @@ class DataSourceNone(sources.DataSource):
def __init__(self, sys_cfg, distro, paths, ud_proc=None):
sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc)
self.metadata = {}
- self.userdata_raw = ''
+ self.userdata_raw = ""
def _get_data(self):
# If the datasource config has any provided 'fallback'
# userdata or metadata, use it...
- if 'userdata_raw' in self.ds_cfg:
- self.userdata_raw = self.ds_cfg['userdata_raw']
- if 'metadata' in self.ds_cfg:
- self.metadata = self.ds_cfg['metadata']
+ if "userdata_raw" in self.ds_cfg:
+ self.userdata_raw = self.ds_cfg["userdata_raw"]
+ if "metadata" in self.ds_cfg:
+ self.metadata = self.ds_cfg["metadata"]
return True
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- return 'config'
+ return "config"
def get_instance_id(self):
- return 'iid-datasource-none'
+ return "iid-datasource-none"
@property
def is_disconnected(self):
@@ -48,4 +48,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index 5257a534..0df39824 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -16,32 +16,32 @@ from xml.dom import minidom
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit import safeyaml
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import util
-from cloudinit.sources.helpers.vmware.imc.config \
- import Config
-from cloudinit.sources.helpers.vmware.imc.config_custom_script \
- import PreCustomScript, PostCustomScript
-from cloudinit.sources.helpers.vmware.imc.config_file \
- import ConfigFile
-from cloudinit.sources.helpers.vmware.imc.config_nic \
- import NicConfigurator
-from cloudinit.sources.helpers.vmware.imc.config_passwd \
- import PasswordConfigurator
-from cloudinit.sources.helpers.vmware.imc.guestcust_error \
- import GuestCustErrorEnum
-from cloudinit.sources.helpers.vmware.imc.guestcust_event \
- import GuestCustEventEnum as GuestCustEvent
-from cloudinit.sources.helpers.vmware.imc.guestcust_state \
- import GuestCustStateEnum
+from cloudinit import safeyaml, sources, subp, util
+from cloudinit.sources.helpers.vmware.imc.config import Config
+from cloudinit.sources.helpers.vmware.imc.config_custom_script import (
+ PostCustomScript,
+ PreCustomScript,
+)
+from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile
+from cloudinit.sources.helpers.vmware.imc.config_nic import NicConfigurator
+from cloudinit.sources.helpers.vmware.imc.config_passwd import (
+ PasswordConfigurator,
+)
+from cloudinit.sources.helpers.vmware.imc.guestcust_error import (
+ GuestCustErrorEnum,
+)
+from cloudinit.sources.helpers.vmware.imc.guestcust_event import (
+ GuestCustEventEnum as GuestCustEvent,
+)
+from cloudinit.sources.helpers.vmware.imc.guestcust_state import (
+ GuestCustStateEnum,
+)
from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
enable_nics,
get_nics_to_enable,
- set_customization_status,
get_tools_config,
- set_gc_status
+ set_customization_status,
+ set_gc_status,
)
LOG = logging.getLogger(__name__)
@@ -58,7 +58,7 @@ class DataSourceOVF(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.seed = None
- self.seed_dir = os.path.join(paths.seed_dir, 'ovf')
+ self.seed_dir = os.path.join(paths.seed_dir, "ovf")
self.environment = None
self.cfg = {}
self.supported_seed_starts = ("/", "file://")
@@ -96,36 +96,43 @@ class DataSourceOVF(sources.DataSource):
(md, ud, cfg) = read_ovf_environment(contents)
self.environment = contents
found.append(seed)
- elif system_type and 'vmware' in system_type.lower():
+ elif system_type and "vmware" in system_type.lower():
LOG.debug("VMware Virtualization Platform found")
allow_vmware_cust = False
allow_raw_data = False
if not self.vmware_customization_supported:
- LOG.debug("Skipping the check for "
- "VMware Customization support")
+ LOG.debug(
+ "Skipping the check for VMware Customization support"
+ )
else:
allow_vmware_cust = not util.get_cfg_option_bool(
- self.sys_cfg, "disable_vmware_customization", True)
+ self.sys_cfg, "disable_vmware_customization", True
+ )
allow_raw_data = util.get_cfg_option_bool(
- self.ds_cfg, "allow_raw_data", True)
+ self.ds_cfg, "allow_raw_data", True
+ )
if not (allow_vmware_cust or allow_raw_data):
- LOG.debug(
- "Customization for VMware platform is disabled.")
+ LOG.debug("Customization for VMware platform is disabled.")
else:
search_paths = (
- "/usr/lib/vmware-tools", "/usr/lib64/vmware-tools",
- "/usr/lib/open-vm-tools", "/usr/lib64/open-vm-tools",
+ "/usr/lib/vmware-tools",
+ "/usr/lib64/vmware-tools",
+ "/usr/lib/open-vm-tools",
+ "/usr/lib64/open-vm-tools",
"/usr/lib/x86_64-linux-gnu/open-vm-tools",
- "/usr/lib/aarch64-linux-gnu/open-vm-tools")
+ "/usr/lib/aarch64-linux-gnu/open-vm-tools",
+ )
plugin = "libdeployPkgPlugin.so"
deployPkgPluginPath = None
for path in search_paths:
deployPkgPluginPath = search_file(path, plugin)
if deployPkgPluginPath:
- LOG.debug("Found the customization plugin at %s",
- deployPkgPluginPath)
+ LOG.debug(
+ "Found the customization plugin at %s",
+ deployPkgPluginPath,
+ )
break
if deployPkgPluginPath:
@@ -140,7 +147,8 @@ class DataSourceOVF(sources.DataSource):
logfunc=LOG.debug,
msg="waiting for configuration file",
func=wait_for_imc_cfg_file,
- args=("cust.cfg", max_wait))
+ args=("cust.cfg", max_wait),
+ )
else:
LOG.debug("Did not find the customization plugin.")
@@ -149,30 +157,34 @@ class DataSourceOVF(sources.DataSource):
imcdirpath = os.path.dirname(vmwareImcConfigFilePath)
cf = ConfigFile(vmwareImcConfigFilePath)
self._vmware_cust_conf = Config(cf)
- LOG.debug("Found VMware Customization Config File at %s",
- vmwareImcConfigFilePath)
+ LOG.debug(
+ "Found VMware Customization Config File at %s",
+ vmwareImcConfigFilePath,
+ )
try:
(md_path, ud_path, nicspath) = collect_imc_file_paths(
- self._vmware_cust_conf)
+ self._vmware_cust_conf
+ )
except FileNotFoundError as e:
_raise_error_status(
"File(s) missing in directory",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
# Don't handle the customization for below 2 cases:
# 1. meta data is found, allow_raw_data is False.
# 2. no meta data is found, allow_vmware_cust is False.
if md_path and not allow_raw_data:
- LOG.debug(
- "Customization using raw data is disabled.")
+ LOG.debug("Customization using raw data is disabled.")
# reset vmwareImcConfigFilePath to None to avoid
# customization for VMware platform
vmwareImcConfigFilePath = None
if md_path is None and not allow_vmware_cust:
LOG.debug(
- "Customization using VMware config is disabled.")
+ "Customization using VMware config is disabled."
+ )
vmwareImcConfigFilePath = None
else:
LOG.debug("Did not find VMware Customization Config File")
@@ -197,22 +209,25 @@ class DataSourceOVF(sources.DataSource):
e,
GuestCustErrorEnum.GUESTCUST_ERROR_WRONG_META_FORMAT,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
except Exception as e:
_raise_error_status(
"Error loading cloud-init configuration",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
self._vmware_cust_found = True
- found.append('vmware-tools')
+ found.append("vmware-tools")
util.del_dir(imcdirpath)
set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_DONE,
- GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS)
+ GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS,
+ )
set_gc_status(self._vmware_cust_conf, "Successful")
elif vmwareImcConfigFilePath:
@@ -225,7 +240,8 @@ class DataSourceOVF(sources.DataSource):
self._vmware_nics_to_enable = get_nics_to_enable(nicspath)
product_marker = self._vmware_cust_conf.marker_id
hasmarkerfile = check_marker_exists(
- product_marker, os.path.join(self.paths.cloud_dir, 'data'))
+ product_marker, os.path.join(self.paths.cloud_dir, "data")
+ )
special_customization = product_marker and not hasmarkerfile
customscript = self._vmware_cust_conf.custom_script_name
@@ -243,7 +259,8 @@ class DataSourceOVF(sources.DataSource):
custScriptConfig = get_tools_config(
CONFGROUPNAME_GUESTCUSTOMIZATION,
GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS,
- defVal)
+ defVal,
+ )
if custScriptConfig.lower() != "true":
# Update the customization status if custom script
# is disabled
@@ -251,19 +268,21 @@ class DataSourceOVF(sources.DataSource):
LOG.debug(msg)
set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
- GuestCustErrorEnum.GUESTCUST_ERROR_SCRIPT_DISABLED)
+ GuestCustErrorEnum.GUESTCUST_ERROR_SCRIPT_DISABLED,
+ )
raise RuntimeError(msg)
ccScriptsDir = os.path.join(
- self.paths.get_cpath("scripts"),
- "per-instance")
+ self.paths.get_cpath("scripts"), "per-instance"
+ )
except Exception as e:
_raise_error_status(
"Error parsing the customization Config File",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
if special_customization:
if customscript:
@@ -276,22 +295,22 @@ class DataSourceOVF(sources.DataSource):
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
try:
LOG.debug("Preparing the Network configuration")
self._network_config = get_network_config_from_conf(
- self._vmware_cust_conf,
- True,
- True,
- self.distro.osfamily)
+ self._vmware_cust_conf, True, True, self.distro.osfamily
+ )
except Exception as e:
_raise_error_status(
"Error preparing Network Configuration",
e,
GuestCustEvent.GUESTCUST_EVENT_NETWORK_SETUP_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
if special_customization:
LOG.debug("Applying password customization")
@@ -300,8 +319,9 @@ class DataSourceOVF(sources.DataSource):
try:
resetpwd = self._vmware_cust_conf.reset_password
if adminpwd or resetpwd:
- pwdConfigurator.configure(adminpwd, resetpwd,
- self.distro)
+ pwdConfigurator.configure(
+ adminpwd, resetpwd, self.distro
+ )
else:
LOG.debug("Changing password is not needed")
except Exception as e:
@@ -310,13 +330,14 @@ class DataSourceOVF(sources.DataSource):
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
if customscript:
try:
- postcust = PostCustomScript(customscript,
- imcdirpath,
- ccScriptsDir)
+ postcust = PostCustomScript(
+ customscript, imcdirpath, ccScriptsDir
+ )
postcust.execute()
except Exception as e:
_raise_error_status(
@@ -324,23 +345,26 @@ class DataSourceOVF(sources.DataSource):
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
if product_marker:
try:
setup_marker_files(
product_marker,
- os.path.join(self.paths.cloud_dir, 'data'))
+ os.path.join(self.paths.cloud_dir, "data"),
+ )
except Exception as e:
_raise_error_status(
"Error creating marker files",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
self._vmware_cust_found = True
- found.append('vmware-tools')
+ found.append("vmware-tools")
# TODO: Need to set the status to DONE only when the
# customization is done successfully.
@@ -348,12 +372,15 @@ class DataSourceOVF(sources.DataSource):
enable_nics(self._vmware_nics_to_enable)
set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_DONE,
- GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS)
+ GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS,
+ )
set_gc_status(self._vmware_cust_conf, "Successful")
else:
- np = [('com.vmware.guestInfo', transport_vmware_guestinfo),
- ('iso', transport_iso9660)]
+ np = [
+ ("com.vmware.guestInfo", transport_vmware_guestinfo),
+ ("iso", transport_iso9660),
+ ]
name = None
for name, transfunc in np:
contents = transfunc()
@@ -362,24 +389,23 @@ class DataSourceOVF(sources.DataSource):
if contents:
(md, ud, cfg) = read_ovf_environment(contents, True)
self.environment = contents
- if 'network-config' in md and md['network-config']:
- self._network_config = md['network-config']
+ if "network-config" in md and md["network-config"]:
+ self._network_config = md["network-config"]
found.append(name)
# There was no OVF transports found
if len(found) == 0:
return False
- if 'seedfrom' in md and md['seedfrom']:
- seedfrom = md['seedfrom']
+ if "seedfrom" in md and md["seedfrom"]:
+ seedfrom = md["seedfrom"]
seedfound = False
for proto in self.supported_seed_starts:
if seedfrom.startswith(proto):
seedfound = proto
break
if not seedfound:
- LOG.debug("Seed from %s not supported by %s",
- seedfrom, self)
+ LOG.debug("Seed from %s not supported by %s", seedfrom, self)
return False
(md_seed, ud, vd) = util.read_seeded(seedfrom, timeout=None)
@@ -400,14 +426,14 @@ class DataSourceOVF(sources.DataSource):
def _get_subplatform(self):
system_type = dmi.read_dmi_data("system-product-name").lower()
- if system_type == 'vmware':
- return 'vmware (%s)' % self.seed
- return 'ovf (%s)' % self.seed
+ if system_type == "vmware":
+ return "vmware (%s)" % self.seed
+ return "ovf (%s)" % self.seed
def get_public_ssh_keys(self):
- if 'public-keys' not in self.metadata:
+ if "public-keys" not in self.metadata:
return []
- pks = self.metadata['public-keys']
+ pks = self.metadata["public-keys"]
if isinstance(pks, (list)):
return pks
else:
@@ -427,14 +453,14 @@ class DataSourceOVF(sources.DataSource):
class DataSourceOVFNet(DataSourceOVF):
def __init__(self, sys_cfg, distro, paths):
DataSourceOVF.__init__(self, sys_cfg, distro, paths)
- self.seed_dir = os.path.join(paths.seed_dir, 'ovf-net')
+ self.seed_dir = os.path.join(paths.seed_dir, "ovf-net")
self.supported_seed_starts = ("http://", "https://")
self.vmware_customization_supported = False
def get_max_wait_from_cfg(cfg):
default_max_wait = 15
- max_wait_cfg_option = 'vmware_cust_file_max_wait'
+ max_wait_cfg_option = "vmware_cust_file_max_wait"
max_wait = default_max_wait
if not cfg:
@@ -443,19 +469,27 @@ def get_max_wait_from_cfg(cfg):
try:
max_wait = int(cfg.get(max_wait_cfg_option, default_max_wait))
except ValueError:
- LOG.warning("Failed to get '%s', using %s",
- max_wait_cfg_option, default_max_wait)
+ LOG.warning(
+ "Failed to get '%s', using %s",
+ max_wait_cfg_option,
+ default_max_wait,
+ )
if max_wait < 0:
- LOG.warning("Invalid value '%s' for '%s', using '%s' instead",
- max_wait, max_wait_cfg_option, default_max_wait)
+ LOG.warning(
+ "Invalid value '%s' for '%s', using '%s' instead",
+ max_wait,
+ max_wait_cfg_option,
+ default_max_wait,
+ )
max_wait = default_max_wait
return max_wait
-def wait_for_imc_cfg_file(filename, maxwait=180, naplen=5,
- dirpath="/var/run/vmware-imc"):
+def wait_for_imc_cfg_file(
+ filename, maxwait=180, naplen=5, dirpath="/var/run/vmware-imc"
+):
waited = 0
if maxwait <= naplen:
naplen = 1
@@ -470,24 +504,26 @@ def wait_for_imc_cfg_file(filename, maxwait=180, naplen=5,
return None
-def get_network_config_from_conf(config, use_system_devices=True,
- configure=False, osfamily=None):
+def get_network_config_from_conf(
+ config, use_system_devices=True, configure=False, osfamily=None
+):
nicConfigurator = NicConfigurator(config.nics, use_system_devices)
nics_cfg_list = nicConfigurator.generate(configure, osfamily)
- return get_network_config(nics_cfg_list,
- config.name_servers,
- config.dns_suffixes)
+ return get_network_config(
+ nics_cfg_list, config.name_servers, config.dns_suffixes
+ )
def get_network_config(nics=None, nameservers=None, search=None):
config_list = nics
if nameservers or search:
- config_list.append({'type': 'nameserver', 'address': nameservers,
- 'search': search})
+ config_list.append(
+ {"type": "nameserver", "address": nameservers, "search": search}
+ )
- return {'version': 1, 'config': config_list}
+ return {"version": 1, "config": config_list}
# This will return a dict with some content
@@ -498,14 +534,14 @@ def read_vmware_imc(config):
ud = None
if config.host_name:
if config.domain_name:
- md['local-hostname'] = config.host_name + "." + config.domain_name
+ md["local-hostname"] = config.host_name + "." + config.domain_name
else:
- md['local-hostname'] = config.host_name
+ md["local-hostname"] = config.host_name
if config.timezone:
- cfg['timezone'] = config.timezone
+ cfg["timezone"] = config.timezone
- md['instance-id'] = "iid-vmware-imc"
+ md["instance-id"] = "iid-vmware-imc"
return (md, ud, cfg)
@@ -516,11 +552,11 @@ def read_ovf_environment(contents, read_network=False):
md = {}
cfg = {}
ud = None
- cfg_props = ['password']
- md_props = ['seedfrom', 'local-hostname', 'public-keys', 'instance-id']
- network_props = ['network-config']
+ cfg_props = ["password"]
+ md_props = ["seedfrom", "local-hostname", "public-keys", "instance-id"]
+ network_props = ["network-config"]
for (prop, val) in props.items():
- if prop == 'hostname':
+ if prop == "hostname":
prop = "local-hostname"
if prop in md_props:
md[prop] = val
@@ -529,7 +565,7 @@ def read_ovf_environment(contents, read_network=False):
elif prop in network_props and read_network:
try:
network_config = base64.b64decode(val.encode())
- md[prop] = safeload_yaml_or_dict(network_config).get('network')
+ md[prop] = safeload_yaml_or_dict(network_config).get("network")
except Exception:
LOG.debug("Ignore network-config in wrong format")
elif prop == "user-data":
@@ -601,12 +637,12 @@ def transport_iso9660(require_iso=True):
# Go through mounts to see if it was already mounted
mounts = util.mounts()
for (dev, info) in mounts.items():
- fstype = info['fstype']
+ fstype = info["fstype"]
if fstype != "iso9660" and require_iso:
continue
if not maybe_cdrom_device(dev):
continue
- mp = info['mountpoint']
+ mp = info["mountpoint"]
(_fname, contents) = get_ovf_env(mp)
if contents is not False:
return contents
@@ -617,9 +653,11 @@ def transport_iso9660(require_iso=True):
mtype = None
# generate a list of devices with mtype filesystem, filter by regex
- devs = [dev for dev in
- util.find_devs_with("TYPE=%s" % mtype if mtype else None)
- if maybe_cdrom_device(dev)]
+ devs = [
+ dev
+ for dev in util.find_devs_with("TYPE=%s" % mtype if mtype else None)
+ if maybe_cdrom_device(dev)
+ ]
for dev in devs:
try:
(_fname, contents) = util.mount_cb(dev, get_ovf_env, mtype=mtype)
@@ -674,15 +712,17 @@ def get_properties(contents):
# could also check here that elem.namespaceURI ==
# "http://schemas.dmtf.org/ovf/environment/1"
- propSections = find_child(dom.documentElement,
- lambda n: n.localName == "PropertySection")
+ propSections = find_child(
+ dom.documentElement, lambda n: n.localName == "PropertySection"
+ )
if len(propSections) == 0:
raise XmlError("No 'PropertySection's")
props = {}
- propElems = find_child(propSections[0],
- (lambda n: n.localName == "Property"))
+ propElems = find_child(
+ propSections[0], (lambda n: n.localName == "Property")
+ )
for elem in propElems:
key = elem.attributes.getNamedItemNS(envNsURI, "key").value
@@ -709,7 +749,7 @@ class XmlError(Exception):
# Used to match classes to dependencies
datasources = (
- (DataSourceOVF, (sources.DEP_FILESYSTEM, )),
+ (DataSourceOVF, (sources.DEP_FILESYSTEM,)),
(DataSourceOVFNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
)
@@ -753,7 +793,7 @@ def setup_marker_files(markerid, marker_dir):
for fname in os.listdir(marker_dir):
if fname.startswith(".markerfile"):
util.del_file(os.path.join(marker_dir, fname))
- open(markerfile, 'w').close()
+ open(markerfile, "w").close()
def _raise_error_status(prefix, error, event, config_file, conf):
@@ -761,10 +801,8 @@ def _raise_error_status(prefix, error, event, config_file, conf):
Raise error and send customization status to the underlying VMware
Virtualization Platform. Also, cleanup the imc directory.
"""
- LOG.debug('%s: %s', prefix, error)
- set_customization_status(
- GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
- event)
+ LOG.debug("%s: %s", prefix, error)
+ set_customization_status(GuestCustStateEnum.GUESTCUST_STATE_RUNNING, event)
set_gc_status(conf, prefix)
util.del_dir(os.path.dirname(config_file))
raise error
@@ -780,16 +818,15 @@ def load_cloudinit_data(md_path, ud_path):
@raises: FileNotFoundError if md_path or ud_path are absent
"""
- LOG.debug('load meta data from: %s: user data from: %s',
- md_path, ud_path)
+ LOG.debug("load meta data from: %s: user data from: %s", md_path, ud_path)
md = {}
ud = None
network = None
md = safeload_yaml_or_dict(util.load_file(md_path))
- if 'network' in md:
- network = md['network']
+ if "network" in md:
+ network = md["network"]
if ud_path:
ud = util.load_file(ud_path).replace("\r", "")
@@ -797,18 +834,18 @@ def load_cloudinit_data(md_path, ud_path):
def safeload_yaml_or_dict(data):
- '''
+ """
The meta data could be JSON or YAML. Since YAML is a strict superset of
JSON, we will unmarshal the data as YAML. If data is None then a new
dictionary is returned.
- '''
+ """
if not data:
return {}
return safeyaml.load(data)
def collect_imc_file_paths(cust_conf):
- '''
+ """
collect all the other imc files.
metadata is preferred to nics.txt configuration data.
@@ -822,7 +859,7 @@ def collect_imc_file_paths(cust_conf):
2. user provided metadata (md_path, None, None)
3. user-provided network config (None, None, nics_path)
4. No config found (None, None, None)
- '''
+ """
md_path = None
ud_path = None
nics_path = None
@@ -830,19 +867,21 @@ def collect_imc_file_paths(cust_conf):
if md_file:
md_path = os.path.join(VMWARE_IMC_DIR, md_file)
if not os.path.exists(md_path):
- raise FileNotFoundError("meta data file is not found: %s"
- % md_path)
+ raise FileNotFoundError(
+ "meta data file is not found: %s" % md_path
+ )
ud_file = cust_conf.user_data_name
if ud_file:
ud_path = os.path.join(VMWARE_IMC_DIR, ud_file)
if not os.path.exists(ud_path):
- raise FileNotFoundError("user data file is not found: %s"
- % ud_path)
+ raise FileNotFoundError(
+ "user data file is not found: %s" % ud_path
+ )
else:
nics_path = os.path.join(VMWARE_IMC_DIR, "nics.txt")
if not os.path.exists(nics_path):
- LOG.debug('%s does not exist.', nics_path)
+ LOG.debug("%s does not exist.", nics_path)
nics_path = None
return md_path, ud_path, nics_path
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index 21603fbd..9734d1a8 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -20,16 +20,12 @@ import re
import string
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import net, sources, subp, util
LOG = logging.getLogger(__name__)
DEFAULT_IID = "iid-dsopennebula"
-DEFAULT_PARSEUSER = 'nobody'
+DEFAULT_PARSEUSER = "nobody"
CONTEXT_DISK_FILES = ["context.sh"]
@@ -40,7 +36,7 @@ class DataSourceOpenNebula(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.seed = None
- self.seed_dir = os.path.join(paths.seed_dir, 'opennebula')
+ self.seed_dir = os.path.join(paths.seed_dir, "opennebula")
def __str__(self):
root = sources.DataSource.__str__(self)
@@ -53,8 +49,8 @@ class DataSourceOpenNebula(sources.DataSource):
# decide parseuser for context.sh shell reader
parseuser = DEFAULT_PARSEUSER
- if 'parseuser' in self.ds_cfg:
- parseuser = self.ds_cfg.get('parseuser')
+ if "parseuser" in self.ds_cfg:
+ parseuser = self.ds_cfg.get("parseuser")
candidates = [self.seed_dir]
candidates.extend(find_candidate_devs())
@@ -90,29 +86,30 @@ class DataSourceOpenNebula(sources.DataSource):
return False
# merge fetched metadata with datasource defaults
- md = results['metadata']
+ md = results["metadata"]
md = util.mergemanydict([md, defaults])
# check for valid user specified dsmode
self.dsmode = self._determine_dsmode(
- [results.get('DSMODE'), self.ds_cfg.get('dsmode')])
+ [results.get("DSMODE"), self.ds_cfg.get("dsmode")]
+ )
if self.dsmode == sources.DSMODE_DISABLED:
return False
self.seed = seed
- self.network = results.get('network-interfaces')
+ self.network = results.get("network-interfaces")
self.metadata = md
- self.userdata_raw = results.get('userdata')
+ self.userdata_raw = results.get("userdata")
return True
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
if self.seed_dir in self.seed:
- subplatform_type = 'seed-dir'
+ subplatform_type = "seed-dir"
else:
- subplatform_type = 'config-disk'
- return '%s (%s)' % (subplatform_type, self.seed)
+ subplatform_type = "config-disk"
+ return "%s (%s)" % (subplatform_type, self.seed)
@property
def network_config(self):
@@ -144,19 +141,25 @@ class OpenNebulaNetwork(object):
if system_nics_by_mac is None:
system_nics_by_mac = get_physical_nics_by_mac(distro)
self.ifaces = collections.OrderedDict(
- [k for k in sorted(system_nics_by_mac.items(),
- key=lambda k: net.natural_sort_key(k[1]))])
+ [
+ k
+ for k in sorted(
+ system_nics_by_mac.items(),
+ key=lambda k: net.natural_sort_key(k[1]),
+ )
+ ]
+ )
# OpenNebula 4.14+ provide macaddr for ETHX in variable ETH_MAC.
# context_devname provides {mac.lower():ETHX, mac2.lower():ETHX}
self.context_devname = {}
for k, v in context.items():
- m = re.match(r'^(.+)_MAC$', k)
+ m = re.match(r"^(.+)_MAC$", k)
if m:
self.context_devname[v.lower()] = m.group(1)
def mac2ip(self, mac):
- return '.'.join([str(int(c, 16)) for c in mac.split(':')[2:]])
+ return ".".join([str(int(c, 16)) for c in mac.split(":")[2:]])
def mac2network(self, mac):
return self.mac2ip(mac).rpartition(".")[0] + ".0"
@@ -164,12 +167,12 @@ class OpenNebulaNetwork(object):
def get_nameservers(self, dev):
nameservers = {}
dns = self.get_field(dev, "dns", "").split()
- dns.extend(self.context.get('DNS', "").split())
+ dns.extend(self.context.get("DNS", "").split())
if dns:
- nameservers['addresses'] = dns
+ nameservers["addresses"] = dns
search_domain = self.get_field(dev, "search_domain", "").split()
if search_domain:
- nameservers['search'] = search_domain
+ nameservers["search"] = search_domain
return nameservers
def get_mtu(self, dev):
@@ -198,8 +201,9 @@ class OpenNebulaNetwork(object):
# OpenNebula 6.1.80 introduced new context parameter ETHx_IP6_GATEWAY
# to replace old ETHx_GATEWAY6. Old ETHx_GATEWAY6 will be removed in
# OpenNebula 6.4.0 (https://github.com/OpenNebula/one/issues/5536).
- return self.get_field(dev, "ip6_gateway",
- self.get_field(dev, "gateway6"))
+ return self.get_field(
+ dev, "ip6_gateway", self.get_field(dev, "gateway6")
+ )
def get_mask(self, dev):
return self.get_field(dev, "mask", "255.255.255.0")
@@ -212,14 +216,21 @@ class OpenNebulaNetwork(object):
context stores <dev>_<NAME> (example: eth0_DOMAIN).
an empty string for value will return default."""
- val = self.context.get('_'.join((dev, name,)).upper())
+ val = self.context.get(
+ "_".join(
+ (
+ dev,
+ name,
+ )
+ ).upper()
+ )
# allow empty string to return the default.
return default if val in (None, "") else val
def gen_conf(self):
netconf = {}
- netconf['version'] = 2
- netconf['ethernets'] = {}
+ netconf["version"] = 2
+ netconf["ethernets"] = {}
ethernets = {}
for mac, dev in self.ifaces.items():
@@ -232,46 +243,46 @@ class OpenNebulaNetwork(object):
devconf = {}
# Set MAC address
- devconf['match'] = {'macaddress': mac}
+ devconf["match"] = {"macaddress": mac}
# Set IPv4 address
- devconf['addresses'] = []
+ devconf["addresses"] = []
mask = self.get_mask(c_dev)
prefix = str(net.mask_to_net_prefix(mask))
- devconf['addresses'].append(
- self.get_ip(c_dev, mac) + '/' + prefix)
+ devconf["addresses"].append(self.get_ip(c_dev, mac) + "/" + prefix)
# Set IPv6 Global and ULA address
addresses6 = self.get_ip6(c_dev)
if addresses6:
prefix6 = self.get_ip6_prefix(c_dev)
- devconf['addresses'].extend(
- [i + '/' + prefix6 for i in addresses6])
+ devconf["addresses"].extend(
+ [i + "/" + prefix6 for i in addresses6]
+ )
# Set IPv4 default gateway
gateway = self.get_gateway(c_dev)
if gateway:
- devconf['gateway4'] = gateway
+ devconf["gateway4"] = gateway
# Set IPv6 default gateway
gateway6 = self.get_gateway6(c_dev)
if gateway6:
- devconf['gateway6'] = gateway6
+ devconf["gateway6"] = gateway6
# Set DNS servers and search domains
nameservers = self.get_nameservers(c_dev)
if nameservers:
- devconf['nameservers'] = nameservers
+ devconf["nameservers"] = nameservers
# Set MTU size
mtu = self.get_mtu(c_dev)
if mtu:
- devconf['mtu'] = mtu
+ devconf["mtu"] = mtu
ethernets[dev] = devconf
- netconf['ethernets'] = ethernets
- return(netconf)
+ netconf["ethernets"] = ethernets
+ return netconf
def find_candidate_devs():
@@ -279,7 +290,7 @@ def find_candidate_devs():
Return a list of devices that may contain the context disk.
"""
combined = []
- for f in ('LABEL=CONTEXT', 'LABEL=CDROM', 'TYPE=iso9660'):
+ for f in ("LABEL=CONTEXT", "LABEL=CDROM", "TYPE=iso9660"):
devs = util.find_devs_with(f)
devs.sort()
for d in devs:
@@ -290,16 +301,17 @@ def find_candidate_devs():
def switch_user_cmd(user):
- return ['sudo', '-u', user]
+ return ["sudo", "-u", user]
-def parse_shell_config(content, keylist=None, bash=None, asuser=None,
- switch_user_cb=None):
+def parse_shell_config(
+ content, keylist=None, bash=None, asuser=None, switch_user_cb=None
+):
if isinstance(bash, str):
bash = [bash]
elif bash is None:
- bash = ['bash', '-e']
+ bash = ["bash", "-e"]
if switch_user_cb is None:
switch_user_cb = switch_user_cmd
@@ -313,17 +325,24 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
keylist = allvars
keylist_in = []
- setup = '\n'.join(('__v="";', '',))
+ setup = "\n".join(
+ (
+ '__v="";',
+ "",
+ )
+ )
def varprinter(vlist):
# output '\0'.join(['_start_', key=value NULL for vars in vlist]
- return '\n'.join((
- 'printf "%s\\0" _start_',
- 'for __v in %s; do' % ' '.join(vlist),
- ' printf "%s=%s\\0" "$__v" "${!__v}";',
- 'done',
- ''
- ))
+ return "\n".join(
+ (
+ 'printf "%s\\0" _start_',
+ "for __v in %s; do" % " ".join(vlist),
+ ' printf "%s=%s\\0" "$__v" "${!__v}";',
+ "done",
+ "",
+ )
+ )
# the rendered 'bcmd' is bash syntax that does
# setup: declare variables we use (so they show up in 'all')
@@ -336,12 +355,15 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
# key=value (for each preset variable)
# literal '_start_'
# key=value (for each post set variable)
- bcmd = ('unset IFS\n' +
- setup +
- varprinter(allvars) +
- '{\n%s\n\n:\n} > /dev/null\n' % content +
- 'unset IFS\n' +
- varprinter(keylist) + "\n")
+ bcmd = (
+ "unset IFS\n"
+ + setup
+ + varprinter(allvars)
+ + "{\n%s\n\n:\n} > /dev/null\n" % content
+ + "unset IFS\n"
+ + varprinter(keylist)
+ + "\n"
+ )
cmd = []
if asuser is not None:
@@ -353,8 +375,14 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
# exclude vars in bash that change on their own or that we used
excluded = (
- "EPOCHREALTIME", "EPOCHSECONDS", "RANDOM", "LINENO", "SECONDS", "_",
- "SRANDOM", "__v",
+ "EPOCHREALTIME",
+ "EPOCHSECONDS",
+ "RANDOM",
+ "LINENO",
+ "SECONDS",
+ "_",
+ "SRANDOM",
+ "__v",
)
preset = {}
ret = {}
@@ -368,8 +396,9 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
(key, val) = line.split("=", 1)
if target is preset:
preset[key] = val
- elif (key not in excluded and
- (key in keylist_in or preset.get(key) != val)):
+ elif key not in excluded and (
+ key in keylist_in or preset.get(key) != val
+ ):
ret[key] = val
except ValueError:
if line != "_start_":
@@ -398,7 +427,7 @@ def read_context_disk_dir(source_dir, distro, asuser=None):
raise NonContextDiskDir("%s: %s" % (source_dir, "no files found"))
context = {}
- results = {'userdata': None, 'metadata': {}}
+ results = {"userdata": None, "metadata": {}}
if "context.sh" in found:
if asuser is not None:
@@ -407,10 +436,11 @@ def read_context_disk_dir(source_dir, distro, asuser=None):
except KeyError as e:
raise BrokenContextDiskDir(
"configured user '{user}' does not exist".format(
- user=asuser)
+ user=asuser
+ )
) from e
try:
- path = os.path.join(source_dir, 'context.sh')
+ path = os.path.join(source_dir, "context.sh")
content = util.load_file(path)
context = parse_shell_config(content, asuser=asuser)
except subp.ProcessExecutionError as e:
@@ -427,7 +457,7 @@ def read_context_disk_dir(source_dir, distro, asuser=None):
if not context:
return results
- results['metadata'] = context
+ results["metadata"] = context
# process single or multiple SSH keys
ssh_key_var = None
@@ -438,40 +468,41 @@ def read_context_disk_dir(source_dir, distro, asuser=None):
if ssh_key_var:
lines = context.get(ssh_key_var).splitlines()
- results['metadata']['public-keys'] = [
+ results["metadata"]["public-keys"] = [
line for line in lines if len(line) and not line.startswith("#")
]
# custom hostname -- try hostname or leave cloud-init
# itself create hostname from IP address later
- for k in ('SET_HOSTNAME', 'HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'):
+ for k in ("SET_HOSTNAME", "HOSTNAME", "PUBLIC_IP", "IP_PUBLIC", "ETH0_IP"):
if k in context:
- results['metadata']['local-hostname'] = context[k]
+ results["metadata"]["local-hostname"] = context[k]
break
# raw user data
if "USER_DATA" in context:
- results['userdata'] = context["USER_DATA"]
+ results["userdata"] = context["USER_DATA"]
elif "USERDATA" in context:
- results['userdata'] = context["USERDATA"]
+ results["userdata"] = context["USERDATA"]
# b64decode user data if necessary (default)
- if 'userdata' in results:
- encoding = context.get('USERDATA_ENCODING',
- context.get('USER_DATA_ENCODING'))
+ if "userdata" in results:
+ encoding = context.get(
+ "USERDATA_ENCODING", context.get("USER_DATA_ENCODING")
+ )
if encoding == "base64":
try:
- results['userdata'] = util.b64d(results['userdata'])
+ results["userdata"] = util.b64d(results["userdata"])
except TypeError:
LOG.warning("Failed base64 decoding of userdata")
# generate Network Configuration v2
# only if there are any required context variables
# http://docs.opennebula.org/5.4/operation/references/template.html#context-section
- ipaddr_keys = [k for k in context if re.match(r'^ETH\d+_IP.*$', k)]
+ ipaddr_keys = [k for k in context if re.match(r"^ETH\d+_IP.*$", k)]
if ipaddr_keys:
onet = OpenNebulaNetwork(context, distro)
- results['network-interfaces'] = onet.gen_conf()
+ results["network-interfaces"] = onet.gen_conf()
return results
@@ -488,7 +519,7 @@ DataSourceOpenNebulaNet = DataSourceOpenNebula
# Used to match classes to dependencies
datasources = [
- (DataSourceOpenNebula, (sources.DEP_FILESYSTEM, )),
+ (DataSourceOpenNebula, (sources.DEP_FILESYSTEM,)),
]
@@ -496,4 +527,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index a85b71d7..6878528d 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -8,13 +8,11 @@ import time
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
+from cloudinit import sources, url_helper, util
from cloudinit.event import EventScope, EventType
from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
-from cloudinit.sources.helpers import openstack
from cloudinit.sources import DataSourceOracle as oracle
+from cloudinit.sources.helpers import openstack
LOG = logging.getLogger(__name__)
@@ -26,13 +24,13 @@ DEFAULT_METADATA = {
}
# OpenStack DMI constants
-DMI_PRODUCT_NOVA = 'OpenStack Nova'
-DMI_PRODUCT_COMPUTE = 'OpenStack Compute'
+DMI_PRODUCT_NOVA = "OpenStack Nova"
+DMI_PRODUCT_COMPUTE = "OpenStack Compute"
VALID_DMI_PRODUCT_NAMES = [DMI_PRODUCT_NOVA, DMI_PRODUCT_COMPUTE]
-DMI_ASSET_TAG_OPENTELEKOM = 'OpenTelekomCloud'
+DMI_ASSET_TAG_OPENTELEKOM = "OpenTelekomCloud"
# See github.com/sapcc/helm-charts/blob/master/openstack/nova/values.yaml
# -> compute.defaults.vmware.smbios_asset_tag for this value
-DMI_ASSET_TAG_SAPCCLOUD = 'SAP CCloud VM'
+DMI_ASSET_TAG_SAPCCLOUD = "SAP CCloud VM"
VALID_DMI_ASSET_TAGS = VALID_DMI_PRODUCT_NAMES
VALID_DMI_ASSET_TAGS += [DMI_ASSET_TAG_OPENTELEKOM, DMI_ASSET_TAG_SAPCCLOUD]
@@ -46,12 +44,14 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
# Whether we want to get network configuration from the metadata service.
perform_dhcp_setup = False
- supported_update_events = {EventScope.NETWORK: {
- EventType.BOOT_NEW_INSTANCE,
- EventType.BOOT,
- EventType.BOOT_LEGACY,
- EventType.HOTPLUG
- }}
+ supported_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ EventType.HOTPLUG,
+ }
+ }
def __init__(self, sys_cfg, distro, paths):
super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths)
@@ -71,8 +71,10 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL])
filtered = [x for x in urls if util.is_resolvable_url(x)]
if set(filtered) != set(urls):
- LOG.debug("Removed the following from metadata urls: %s",
- list((set(urls) - set(filtered))))
+ LOG.debug(
+ "Removed the following from metadata urls: %s",
+ list((set(urls) - set(filtered))),
+ )
if len(filtered):
urls = filtered
else:
@@ -82,20 +84,25 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
md_urls = []
url2base = {}
for url in urls:
- md_url = url_helper.combine_url(url, 'openstack')
+ md_url = url_helper.combine_url(url, "openstack")
md_urls.append(md_url)
url2base[md_url] = url
url_params = self.get_url_params()
start_time = time.time()
avail_url, _response = url_helper.wait_for_url(
- urls=md_urls, max_wait=url_params.max_wait_seconds,
- timeout=url_params.timeout_seconds)
+ urls=md_urls,
+ max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds,
+ )
if avail_url:
LOG.debug("Using metadata source: '%s'", url2base[avail_url])
else:
- LOG.debug("Giving up on OpenStack md from %s after %s seconds",
- md_urls, int(time.time() - start_time))
+ LOG.debug(
+ "Giving up on OpenStack md from %s after %s seconds",
+ md_urls,
+ int(time.time() - start_time),
+ )
self.metadata_address = url2base.get(avail_url)
return bool(avail_url)
@@ -113,18 +120,20 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
# RELEASE_BLOCKER: SRU to Xenial and Artful SRU should not provide
# network_config by default unless configured in /etc/cloud/cloud.cfg*.
# Patch Xenial and Artful before release to default to False.
- if util.is_false(self.ds_cfg.get('apply_network_config', True)):
+ if util.is_false(self.ds_cfg.get("apply_network_config", True)):
self._network_config = None
return self._network_config
if self.network_json == sources.UNSET:
# this would happen if get_data hadn't been called. leave as UNSET
LOG.warning(
- 'Unexpected call to network_config when network_json is None.')
+ "Unexpected call to network_config when network_json is None."
+ )
return None
- LOG.debug('network config provided via network_json')
+ LOG.debug("network config provided via network_json")
self._network_config = openstack.convert_net_json(
- self.network_json, known_macs=None)
+ self.network_json, known_macs=None
+ )
return self._network_config
def _get_data(self):
@@ -134,7 +143,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
False when unable to contact metadata service or when metadata
format is invalid or disabled.
"""
- oracle_considered = 'Oracle' in self.sys_cfg.get('datasource_list')
+ oracle_considered = "Oracle" in self.sys_cfg.get("datasource_list")
if not detect_openstack(accept_oracle=not oracle_considered):
return False
@@ -142,8 +151,10 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
try:
with EphemeralDHCPv4(self.fallback_interface):
results = util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=self._crawl_metadata)
+ logfunc=LOG.debug,
+ msg="Crawl of metadata service",
+ func=self._crawl_metadata,
+ )
except (NoDHCPLeaseError, sources.InvalidMetaDataException) as e:
util.logexc(LOG, str(e))
return False
@@ -154,19 +165,19 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
util.logexc(LOG, str(e))
return False
- self.dsmode = self._determine_dsmode([results.get('dsmode')])
+ self.dsmode = self._determine_dsmode([results.get("dsmode")])
if self.dsmode == sources.DSMODE_DISABLED:
return False
- md = results.get('metadata', {})
+ md = results.get("metadata", {})
md = util.mergemanydict([md, DEFAULT_METADATA])
self.metadata = md
- self.ec2_metadata = results.get('ec2-metadata')
- self.network_json = results.get('networkdata')
- self.userdata_raw = results.get('userdata')
- self.version = results['version']
- self.files.update(results.get('files', {}))
+ self.ec2_metadata = results.get("ec2-metadata")
+ self.network_json = results.get("networkdata")
+ self.userdata_raw = results.get("userdata")
+ self.version = results["version"]
+ self.files.update(results.get("files", {}))
- vd = results.get('vendordata')
+ vd = results.get("vendordata")
self.vendordata_pure = vd
try:
self.vendordata_raw = sources.convert_vendordata(vd)
@@ -174,7 +185,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
LOG.warning("Invalid content in vendor-data: %s", e)
self.vendordata_raw = None
- vd2 = results.get('vendordata2')
+ vd2 = results.get("vendordata2")
self.vendordata2_pure = vd2
try:
self.vendordata2_raw = sources.convert_vendordata(vd2)
@@ -194,26 +205,35 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
try:
if not self.wait_for_metadata_service():
raise sources.InvalidMetaDataException(
- 'No active metadata service found')
+ "No active metadata service found"
+ )
except IOError as e:
raise sources.InvalidMetaDataException(
- 'IOError contacting metadata service: {error}'.format(
- error=str(e)))
+ "IOError contacting metadata service: {error}".format(
+ error=str(e)
+ )
+ )
url_params = self.get_url_params()
try:
result = util.log_time(
- LOG.debug, 'Crawl of openstack metadata service',
- read_metadata_service, args=[self.metadata_address],
- kwargs={'ssl_details': self.ssl_details,
- 'retries': url_params.num_retries,
- 'timeout': url_params.timeout_seconds})
+ LOG.debug,
+ "Crawl of openstack metadata service",
+ read_metadata_service,
+ args=[self.metadata_address],
+ kwargs={
+ "ssl_details": self.ssl_details,
+ "retries": url_params.num_retries,
+ "timeout": url_params.timeout_seconds,
+ },
+ )
except openstack.NonReadable as e:
raise sources.InvalidMetaDataException(str(e))
except (openstack.BrokenMetadata, IOError) as e:
- msg = 'Broken metadata address {addr}'.format(
- addr=self.metadata_address)
+ msg = "Broken metadata address {addr}".format(
+ addr=self.metadata_address
+ )
raise sources.InvalidMetaDataException(msg) from e
return result
@@ -230,10 +250,10 @@ class DataSourceOpenStackLocal(DataSourceOpenStack):
perform_dhcp_setup = True # Get metadata network config if present
-def read_metadata_service(base_url, ssl_details=None,
- timeout=5, retries=5):
- reader = openstack.MetadataReader(base_url, ssl_details=ssl_details,
- timeout=timeout, retries=retries)
+def read_metadata_service(base_url, ssl_details=None, timeout=5, retries=5):
+ reader = openstack.MetadataReader(
+ base_url, ssl_details=ssl_details, timeout=timeout, retries=retries
+ )
return reader.read_v2()
@@ -241,14 +261,14 @@ def detect_openstack(accept_oracle=False):
"""Return True when a potential OpenStack platform is detected."""
if not util.is_x86():
return True # Non-Intel cpus don't properly report dmi product names
- product_name = dmi.read_dmi_data('system-product-name')
+ product_name = dmi.read_dmi_data("system-product-name")
if product_name in VALID_DMI_PRODUCT_NAMES:
return True
- elif dmi.read_dmi_data('chassis-asset-tag') in VALID_DMI_ASSET_TAGS:
+ elif dmi.read_dmi_data("chassis-asset-tag") in VALID_DMI_ASSET_TAGS:
return True
elif accept_oracle and oracle._is_platform_viable():
return True
- elif util.get_proc_env(1).get('product_name') == DMI_PRODUCT_NOVA:
+ elif util.get_proc_env(1).get("product_name") == DMI_PRODUCT_NOVA:
return True
return False
@@ -264,4 +284,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py
index fbb5312a..3f918672 100644
--- a/cloudinit/sources/DataSourceOracle.py
+++ b/cloudinit/sources/DataSourceOracle.py
@@ -32,7 +32,7 @@ LOG = logging.getLogger(__name__)
BUILTIN_DS_CONFIG = {
# Don't use IMDS to configure secondary NICs by default
- 'configure_secondary_nics': False,
+ "configure_secondary_nics": False,
}
CHASSIS_ASSET_TAG = "OracleCloud.com"
METADATA_ROOT = "http://169.254.169.254/opc/v{version}/"
@@ -61,43 +61,45 @@ def _ensure_netfailover_safe(network_config):
"""
# ignore anything that's not an actual network-config
- if 'version' not in network_config:
+ if "version" not in network_config:
return
- if network_config['version'] not in [1, 2]:
- LOG.debug('Ignoring unknown network config version: %s',
- network_config['version'])
+ if network_config["version"] not in [1, 2]:
+ LOG.debug(
+ "Ignoring unknown network config version: %s",
+ network_config["version"],
+ )
return
mac_to_name = get_interfaces_by_mac()
- if network_config['version'] == 1:
- for cfg in [c for c in network_config['config'] if 'type' in c]:
- if cfg['type'] == 'physical':
- if 'mac_address' in cfg:
- mac = cfg['mac_address']
+ if network_config["version"] == 1:
+ for cfg in [c for c in network_config["config"] if "type" in c]:
+ if cfg["type"] == "physical":
+ if "mac_address" in cfg:
+ mac = cfg["mac_address"]
cur_name = mac_to_name.get(mac)
if not cur_name:
continue
elif is_netfail_master(cur_name):
- del cfg['mac_address']
+ del cfg["mac_address"]
- elif network_config['version'] == 2:
- for _, cfg in network_config.get('ethernets', {}).items():
- if 'match' in cfg:
- macaddr = cfg.get('match', {}).get('macaddress')
+ elif network_config["version"] == 2:
+ for _, cfg in network_config.get("ethernets", {}).items():
+ if "match" in cfg:
+ macaddr = cfg.get("match", {}).get("macaddress")
if macaddr:
cur_name = mac_to_name.get(macaddr)
if not cur_name:
continue
elif is_netfail_master(cur_name):
- del cfg['match']['macaddress']
- del cfg['set-name']
- cfg['match']['name'] = cur_name
+ del cfg["match"]["macaddress"]
+ del cfg["set-name"]
+ cfg["match"]["name"] = cur_name
class DataSourceOracle(sources.DataSource):
- dsname = 'Oracle'
+ dsname = "Oracle"
system_uuid = None
vendordata_pure = None
network_config_sources = (
@@ -113,9 +115,12 @@ class DataSourceOracle(sources.DataSource):
super(DataSourceOracle, self).__init__(sys_cfg, *args, **kwargs)
self._vnics_data = None
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ['datasource', self.dsname], {}),
- BUILTIN_DS_CONFIG])
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(sys_cfg, ["datasource", self.dsname], {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
def _is_platform_viable(self):
"""Check platform environment to report if this datasource may run."""
@@ -130,8 +135,8 @@ class DataSourceOracle(sources.DataSource):
# network may be configured if iscsi root. If that is the case
# then read_initramfs_config will return non-None.
fetch_vnics_data = self.ds_cfg.get(
- 'configure_secondary_nics',
- BUILTIN_DS_CONFIG["configure_secondary_nics"]
+ "configure_secondary_nics",
+ BUILTIN_DS_CONFIG["configure_secondary_nics"],
)
network_context = noop()
if not _is_iscsi_root():
@@ -140,7 +145,7 @@ class DataSourceOracle(sources.DataSource):
connectivity_url_data={
"url": METADATA_PATTERN.format(version=2, path="instance"),
"headers": V2_HEADERS,
- }
+ },
)
with network_context:
fetched_metadata = read_opc_metadata(
@@ -179,7 +184,7 @@ class DataSourceOracle(sources.DataSource):
return sources.instance_id_matches_system_uuid(self.system_uuid)
def get_public_ssh_keys(self):
- return sources.normalize_pubkey_data(self.metadata.get('public_keys'))
+ return sources.normalize_pubkey_data(self.metadata.get("public_keys"))
@property
def network_config(self):
@@ -196,8 +201,8 @@ class DataSourceOracle(sources.DataSource):
self._network_config = self.distro.generate_fallback_config()
if self.ds_cfg.get(
- 'configure_secondary_nics',
- BUILTIN_DS_CONFIG["configure_secondary_nics"]
+ "configure_secondary_nics",
+ BUILTIN_DS_CONFIG["configure_secondary_nics"],
):
try:
# Mutate self._network_config to include secondary
@@ -205,8 +210,8 @@ class DataSourceOracle(sources.DataSource):
self._add_network_config_from_opc_imds()
except Exception:
util.logexc(
- LOG,
- "Failed to parse secondary network configuration!")
+ LOG, "Failed to parse secondary network configuration!"
+ )
# we need to verify that the nic selected is not a netfail over
# device and, if it is a netfail master, then we need to avoid
@@ -230,11 +235,10 @@ class DataSourceOracle(sources.DataSource):
(if the IMDS returns valid JSON with unexpected contents).
"""
if self._vnics_data is None:
- LOG.warning(
- "Secondary NIC data is UNSET but should not be")
+ LOG.warning("Secondary NIC data is UNSET but should not be")
return
- if 'nicIndex' in self._vnics_data[0]:
+ if "nicIndex" in self._vnics_data[0]:
# TODO: Once configure_secondary_nics defaults to True, lower the
# level of this log message. (Currently, if we're running this
# code at all, someone has explicitly opted-in to secondary
@@ -243,8 +247,8 @@ class DataSourceOracle(sources.DataSource):
# Metal Machine launch, which means INFO or DEBUG would be more
# appropriate.)
LOG.warning(
- 'VNIC metadata indicates this is a bare metal machine; '
- 'skipping secondary VNIC configuration.'
+ "VNIC metadata indicates this is a bare metal machine; "
+ "skipping secondary VNIC configuration."
)
return
@@ -254,39 +258,45 @@ class DataSourceOracle(sources.DataSource):
# We skip the first entry in the response because the primary
# interface is already configured by iSCSI boot; applying
# configuration from the IMDS is not required.
- mac_address = vnic_dict['macAddr'].lower()
+ mac_address = vnic_dict["macAddr"].lower()
if mac_address not in interfaces_by_mac:
- LOG.debug('Interface with MAC %s not found; skipping',
- mac_address)
+ LOG.debug(
+ "Interface with MAC %s not found; skipping", mac_address
+ )
continue
name = interfaces_by_mac[mac_address]
- if self._network_config['version'] == 1:
+ if self._network_config["version"] == 1:
subnet = {
- 'type': 'static',
- 'address': vnic_dict['privateIp'],
+ "type": "static",
+ "address": vnic_dict["privateIp"],
+ }
+ self._network_config["config"].append(
+ {
+ "name": name,
+ "type": "physical",
+ "mac_address": mac_address,
+ "mtu": MTU,
+ "subnets": [subnet],
+ }
+ )
+ elif self._network_config["version"] == 2:
+ self._network_config["ethernets"][name] = {
+ "addresses": [vnic_dict["privateIp"]],
+ "mtu": MTU,
+ "dhcp4": False,
+ "dhcp6": False,
+ "match": {"macaddress": mac_address},
}
- self._network_config['config'].append({
- 'name': name,
- 'type': 'physical',
- 'mac_address': mac_address,
- 'mtu': MTU,
- 'subnets': [subnet],
- })
- elif self._network_config['version'] == 2:
- self._network_config['ethernets'][name] = {
- 'addresses': [vnic_dict['privateIp']],
- 'mtu': MTU, 'dhcp4': False, 'dhcp6': False,
- 'match': {'macaddress': mac_address}}
def _read_system_uuid():
- sys_uuid = dmi.read_dmi_data('system-uuid')
+ sys_uuid = dmi.read_dmi_data("system-uuid")
return None if sys_uuid is None else sys_uuid.lower()
def _is_platform_viable():
- asset_tag = dmi.read_dmi_data('chassis-asset-tag')
+ asset_tag = dmi.read_dmi_data("chassis-asset-tag")
return asset_tag == CHASSIS_ASSET_TAG
@@ -329,8 +339,9 @@ def read_opc_metadata(*, fetch_vnics_data: bool = False):
try:
vnics_data = _fetch(metadata_version, path="vnics")
except UrlError:
- util.logexc(LOG,
- "Failed to fetch secondary network configuration!")
+ util.logexc(
+ LOG, "Failed to fetch secondary network configuration!"
+ )
return OpcMetadata(metadata_version, instance_data, vnics_data)
diff --git a/cloudinit/sources/DataSourceRbxCloud.py b/cloudinit/sources/DataSourceRbxCloud.py
index bb69e998..14ac77e4 100644
--- a/cloudinit/sources/DataSourceRbxCloud.py
+++ b/cloudinit/sources/DataSourceRbxCloud.py
@@ -14,32 +14,34 @@ import os
import os.path
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import sources, subp, util
from cloudinit.event import EventScope, EventType
LOG = logging.getLogger(__name__)
-ETC_HOSTS = '/etc/hosts'
+ETC_HOSTS = "/etc/hosts"
def get_manage_etc_hosts():
hosts = util.load_file(ETC_HOSTS, quiet=True)
if hosts:
- LOG.debug('/etc/hosts exists - setting manage_etc_hosts to False')
+ LOG.debug("/etc/hosts exists - setting manage_etc_hosts to False")
return False
- LOG.debug('/etc/hosts does not exists - setting manage_etc_hosts to True')
+ LOG.debug("/etc/hosts does not exists - setting manage_etc_hosts to True")
return True
def ip2int(addr):
- parts = addr.split('.')
- return (int(parts[0]) << 24) + (int(parts[1]) << 16) + \
- (int(parts[2]) << 8) + int(parts[3])
+ parts = addr.split(".")
+ return (
+ (int(parts[0]) << 24)
+ + (int(parts[1]) << 16)
+ + (int(parts[2]) << 8)
+ + int(parts[3])
+ )
def int2ip(addr):
- return '.'.join([str(addr >> (i << 3) & 0xFF) for i in range(4)[::-1]])
+ return ".".join([str(addr >> (i << 3) & 0xFF) for i in range(4)[::-1]])
def _sub_arp(cmd):
@@ -48,33 +50,35 @@ def _sub_arp(cmd):
and runs arping. Breaking this to a separate function
for later use in mocking and unittests
"""
- return subp.subp(['arping'] + cmd)
+ return subp.subp(["arping"] + cmd)
def gratuitous_arp(items, distro):
- source_param = '-S'
- if distro.name in ['fedora', 'centos', 'rhel']:
- source_param = '-s'
+ source_param = "-S"
+ if distro.name in ["fedora", "centos", "rhel"]:
+ source_param = "-s"
for item in items:
try:
- _sub_arp([
- '-c', '2',
- source_param, item['source'],
- item['destination']
- ])
+ _sub_arp(
+ ["-c", "2", source_param, item["source"], item["destination"]]
+ )
except subp.ProcessExecutionError as error:
# warning, because the system is able to function properly
# despite no success - some ARP table may be waiting for
# expiration, but the system may continue
- LOG.warning('Failed to arping from "%s" to "%s": %s',
- item['source'], item['destination'], error)
+ LOG.warning(
+ 'Failed to arping from "%s" to "%s": %s',
+ item["source"],
+ item["destination"],
+ error,
+ )
def get_md():
"""Returns False (not found or error) or a dictionary with metadata."""
devices = set(
- util.find_devs_with('LABEL=CLOUDMD') +
- util.find_devs_with('LABEL=cloudmd')
+ util.find_devs_with("LABEL=CLOUDMD")
+ + util.find_devs_with("LABEL=cloudmd")
)
if not devices:
return False
@@ -83,7 +87,7 @@ def get_md():
rbx_data = util.mount_cb(
device=device,
callback=read_user_data_callback,
- mtype=['vfat', 'fat', 'msdosfs']
+ mtype=["vfat", "fat", "msdosfs"],
)
if rbx_data:
return rbx_data
@@ -91,11 +95,13 @@ def get_md():
if err.errno != errno.ENOENT:
raise
except util.MountFailedError:
- util.logexc(LOG, "Failed to mount %s when looking for user "
- "data", device)
+ util.logexc(
+ LOG, "Failed to mount %s when looking for user data", device
+ )
- LOG.debug("Did not find RbxCloud data, searched devices: %s",
- ",".join(devices))
+ LOG.debug(
+ "Did not find RbxCloud data, searched devices: %s", ",".join(devices)
+ )
return False
@@ -107,25 +113,28 @@ def generate_network_config(netadps):
@returns: A dict containing network config
"""
return {
- 'version': 1,
- 'config': [
+ "version": 1,
+ "config": [
{
- 'type': 'physical',
- 'name': 'eth{}'.format(str(i)),
- 'mac_address': netadp['macaddress'].lower(),
- 'subnets': [
+ "type": "physical",
+ "name": "eth{}".format(str(i)),
+ "mac_address": netadp["macaddress"].lower(),
+ "subnets": [
{
- 'type': 'static',
- 'address': ip['address'],
- 'netmask': netadp['network']['netmask'],
- 'control': 'auto',
- 'gateway': netadp['network']['gateway'],
- 'dns_nameservers': netadp['network']['dns'][
- 'nameservers']
- } for ip in netadp['ip']
+ "type": "static",
+ "address": ip["address"],
+ "netmask": netadp["network"]["netmask"],
+ "control": "auto",
+ "gateway": netadp["network"]["gateway"],
+ "dns_nameservers": netadp["network"]["dns"][
+ "nameservers"
+ ],
+ }
+ for ip in netadp["ip"]
],
- } for i, netadp in enumerate(netadps)
- ]
+ }
+ for i, netadp in enumerate(netadps)
+ ],
}
@@ -140,65 +149,60 @@ def read_user_data_callback(mount_dir):
"""
meta_data = util.load_json(
text=util.load_file(
- fname=os.path.join(mount_dir, 'cloud.json'),
- decode=False
+ fname=os.path.join(mount_dir, "cloud.json"), decode=False
)
)
user_data = util.load_file(
- fname=os.path.join(mount_dir, 'user.data'),
- quiet=True
+ fname=os.path.join(mount_dir, "user.data"), quiet=True
)
- if 'vm' not in meta_data or 'netadp' not in meta_data:
+ if "vm" not in meta_data or "netadp" not in meta_data:
util.logexc(LOG, "Failed to load metadata. Invalid format.")
return None
- username = meta_data.get('additionalMetadata', {}).get('username')
- ssh_keys = meta_data.get('additionalMetadata', {}).get('sshKeys', [])
+ username = meta_data.get("additionalMetadata", {}).get("username")
+ ssh_keys = meta_data.get("additionalMetadata", {}).get("sshKeys", [])
hash = None
- if meta_data.get('additionalMetadata', {}).get('password'):
- hash = meta_data['additionalMetadata']['password']['sha512']
+ if meta_data.get("additionalMetadata", {}).get("password"):
+ hash = meta_data["additionalMetadata"]["password"]["sha512"]
- network = generate_network_config(meta_data['netadp'])
+ network = generate_network_config(meta_data["netadp"])
data = {
- 'userdata': user_data,
- 'metadata': {
- 'instance-id': meta_data['vm']['_id'],
- 'local-hostname': meta_data['vm']['name'],
- 'public-keys': []
+ "userdata": user_data,
+ "metadata": {
+ "instance-id": meta_data["vm"]["_id"],
+ "local-hostname": meta_data["vm"]["name"],
+ "public-keys": [],
},
- 'gratuitous_arp': [
- {
- "source": ip["address"],
- "destination": target
- }
- for netadp in meta_data['netadp']
- for ip in netadp['ip']
+ "gratuitous_arp": [
+ {"source": ip["address"], "destination": target}
+ for netadp in meta_data["netadp"]
+ for ip in netadp["ip"]
for target in [
- netadp['network']["gateway"],
- int2ip(ip2int(netadp['network']["gateway"]) + 2),
- int2ip(ip2int(netadp['network']["gateway"]) + 3)
+ netadp["network"]["gateway"],
+ int2ip(ip2int(netadp["network"]["gateway"]) + 2),
+ int2ip(ip2int(netadp["network"]["gateway"]) + 3),
]
],
- 'cfg': {
- 'ssh_pwauth': True,
- 'disable_root': True,
- 'system_info': {
- 'default_user': {
- 'name': username,
- 'gecos': username,
- 'sudo': ['ALL=(ALL) NOPASSWD:ALL'],
- 'passwd': hash,
- 'lock_passwd': False,
- 'ssh_authorized_keys': ssh_keys,
+ "cfg": {
+ "ssh_pwauth": True,
+ "disable_root": True,
+ "system_info": {
+ "default_user": {
+ "name": username,
+ "gecos": username,
+ "sudo": ["ALL=(ALL) NOPASSWD:ALL"],
+ "passwd": hash,
+ "lock_passwd": False,
+ "ssh_authorized_keys": ssh_keys,
}
},
- 'network_config': network,
- 'manage_etc_hosts': get_manage_etc_hosts(),
+ "network_config": network,
+ "manage_etc_hosts": get_manage_etc_hosts(),
},
}
- LOG.debug('returning DATA object:')
+ LOG.debug("returning DATA object:")
LOG.debug(data)
return data
@@ -206,11 +210,13 @@ def read_user_data_callback(mount_dir):
class DataSourceRbxCloud(sources.DataSource):
dsname = "RbxCloud"
- default_update_events = {EventScope.NETWORK: {
- EventType.BOOT_NEW_INSTANCE,
- EventType.BOOT,
- EventType.BOOT_LEGACY
- }}
+ default_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ }
+ }
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -228,18 +234,18 @@ class DataSourceRbxCloud(sources.DataSource):
rbx_data = get_md()
if rbx_data is False:
return False
- self.userdata_raw = rbx_data['userdata']
- self.metadata = rbx_data['metadata']
- self.gratuitous_arp = rbx_data['gratuitous_arp']
- self.cfg = rbx_data['cfg']
+ self.userdata_raw = rbx_data["userdata"]
+ self.metadata = rbx_data["metadata"]
+ self.gratuitous_arp = rbx_data["gratuitous_arp"]
+ self.cfg = rbx_data["cfg"]
return True
@property
def network_config(self):
- return self.cfg['network_config']
+ return self.cfg["network_config"]
def get_public_ssh_keys(self):
- return self.metadata['public-keys']
+ return self.metadata["public-keys"]
def get_userdata_raw(self):
return self.userdata_raw
diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py
index 7b8974a2..8e5dd82c 100644
--- a/cloudinit/sources/DataSourceScaleway.py
+++ b/cloudinit/sources/DataSourceScaleway.py
@@ -27,21 +27,18 @@ from requests.packages.urllib3.poolmanager import PoolManager
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
-from cloudinit import net
+from cloudinit import net, sources, url_helper, util
from cloudinit.event import EventScope, EventType
from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
LOG = logging.getLogger(__name__)
-DS_BASE_URL = 'http://169.254.42.42'
+DS_BASE_URL = "http://169.254.42.42"
BUILTIN_DS_CONFIG = {
- 'metadata_url': DS_BASE_URL + '/conf?format=json',
- 'userdata_url': DS_BASE_URL + '/user_data/cloud-init',
- 'vendordata_url': DS_BASE_URL + '/vendor_data/cloud-init'
+ "metadata_url": DS_BASE_URL + "/conf?format=json",
+ "userdata_url": DS_BASE_URL + "/user_data/cloud-init",
+ "vendordata_url": DS_BASE_URL + "/vendor_data/cloud-init",
}
DEF_MD_RETRIES = 5
@@ -57,15 +54,15 @@ def on_scaleway():
* the initrd created the file /var/run/scaleway.
* "scaleway" is in the kernel cmdline.
"""
- vendor_name = dmi.read_dmi_data('system-manufacturer')
- if vendor_name == 'Scaleway':
+ vendor_name = dmi.read_dmi_data("system-manufacturer")
+ if vendor_name == "Scaleway":
return True
- if os.path.exists('/var/run/scaleway'):
+ if os.path.exists("/var/run/scaleway"):
return True
cmdline = util.get_cmdline()
- if 'scaleway' in cmdline:
+ if "scaleway" in cmdline:
return True
return False
@@ -75,6 +72,7 @@ class SourceAddressAdapter(requests.adapters.HTTPAdapter):
"""
Adapter for requests to choose the local address to bind to.
"""
+
def __init__(self, source_address, **kwargs):
self.source_address = source_address
super(SourceAddressAdapter, self).__init__(**kwargs)
@@ -83,11 +81,13 @@ class SourceAddressAdapter(requests.adapters.HTTPAdapter):
socket_options = HTTPConnection.default_socket_options + [
(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
]
- self.poolmanager = PoolManager(num_pools=connections,
- maxsize=maxsize,
- block=block,
- source_address=self.source_address,
- socket_options=socket_options)
+ self.poolmanager = PoolManager(
+ num_pools=connections,
+ maxsize=maxsize,
+ block=block,
+ source_address=self.source_address,
+ socket_options=socket_options,
+ )
def query_data_api_once(api_address, timeout, requests_session):
@@ -117,9 +117,10 @@ def query_data_api_once(api_address, timeout, requests_session):
session=requests_session,
# If the error is a HTTP/404 or a ConnectionError, go into raise
# block below and don't bother retrying.
- exception_cb=lambda _, exc: exc.code != 404 and (
+ exception_cb=lambda _, exc: exc.code != 404
+ and (
not isinstance(exc.cause, requests.exceptions.ConnectionError)
- )
+ ),
)
return util.decode_binary(resp.contents)
except url_helper.UrlError as exc:
@@ -143,25 +144,22 @@ def query_data_api(api_type, api_address, retries, timeout):
for port in range(1, max(retries, 2)):
try:
LOG.debug(
- 'Trying to get %s data (bind on port %d)...',
- api_type, port
+ "Trying to get %s data (bind on port %d)...", api_type, port
)
requests_session = requests.Session()
requests_session.mount(
- 'http://',
- SourceAddressAdapter(source_address=('0.0.0.0', port))
+ "http://",
+ SourceAddressAdapter(source_address=("0.0.0.0", port)),
)
data = query_data_api_once(
- api_address,
- timeout=timeout,
- requests_session=requests_session
+ api_address, timeout=timeout, requests_session=requests_session
)
- LOG.debug('%s-data downloaded', api_type)
+ LOG.debug("%s-data downloaded", api_type)
return data
except url_helper.UrlError as exc:
# Local port already in use or HTTP/429.
- LOG.warning('Error while trying to get %s data: %s', api_type, exc)
+ LOG.warning("Error while trying to get %s data: %s", api_type, exc)
time.sleep(5)
last_exc = exc
continue
@@ -176,40 +174,40 @@ class DataSourceScaleway(sources.DataSource):
EventScope.NETWORK: {
EventType.BOOT_NEW_INSTANCE,
EventType.BOOT,
- EventType.BOOT_LEGACY
+ EventType.BOOT_LEGACY,
}
}
def __init__(self, sys_cfg, distro, paths):
super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths)
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}),
- BUILTIN_DS_CONFIG
- ])
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
- self.metadata_address = self.ds_cfg['metadata_url']
- self.userdata_address = self.ds_cfg['userdata_url']
- self.vendordata_address = self.ds_cfg['vendordata_url']
+ self.metadata_address = self.ds_cfg["metadata_url"]
+ self.userdata_address = self.ds_cfg["userdata_url"]
+ self.vendordata_address = self.ds_cfg["vendordata_url"]
- self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES))
- self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT))
+ self.retries = int(self.ds_cfg.get("retries", DEF_MD_RETRIES))
+ self.timeout = int(self.ds_cfg.get("timeout", DEF_MD_TIMEOUT))
self._fallback_interface = None
self._network_config = sources.UNSET
def _crawl_metadata(self):
- resp = url_helper.readurl(self.metadata_address,
- timeout=self.timeout,
- retries=self.retries)
+ resp = url_helper.readurl(
+ self.metadata_address, timeout=self.timeout, retries=self.retries
+ )
self.metadata = json.loads(util.decode_binary(resp.contents))
self.userdata_raw = query_data_api(
- 'user-data', self.userdata_address,
- self.retries, self.timeout
+ "user-data", self.userdata_address, self.retries, self.timeout
)
self.vendordata_raw = query_data_api(
- 'vendor-data', self.vendordata_address,
- self.retries, self.timeout
+ "vendor-data", self.vendordata_address, self.retries, self.timeout
)
def _get_data(self):
@@ -221,8 +219,10 @@ class DataSourceScaleway(sources.DataSource):
try:
with EphemeralDHCPv4(self._fallback_interface):
util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=self._crawl_metadata)
+ logfunc=LOG.debug,
+ msg="Crawl of metadata service",
+ func=self._crawl_metadata,
+ )
except (NoDHCPLeaseError) as e:
util.logexc(LOG, str(e))
return False
@@ -235,8 +235,10 @@ class DataSourceScaleway(sources.DataSource):
metadata API.
"""
if self._network_config is None:
- LOG.warning('Found None as cached _network_config. '
- 'Resetting to %s', sources.UNSET)
+ LOG.warning(
+ "Found None as cached _network_config. Resetting to %s",
+ sources.UNSET,
+ )
self._network_config = sources.UNSET
if self._network_config != sources.UNSET:
@@ -245,16 +247,19 @@ class DataSourceScaleway(sources.DataSource):
if self._fallback_interface is None:
self._fallback_interface = net.find_fallback_nic()
- netcfg = {'type': 'physical', 'name': '%s' % self._fallback_interface}
- subnets = [{'type': 'dhcp4'}]
- if self.metadata['ipv6']:
- subnets += [{'type': 'static',
- 'address': '%s' % self.metadata['ipv6']['address'],
- 'gateway': '%s' % self.metadata['ipv6']['gateway'],
- 'netmask': '%s' % self.metadata['ipv6']['netmask'],
- }]
- netcfg['subnets'] = subnets
- self._network_config = {'version': 1, 'config': [netcfg]}
+ netcfg = {"type": "physical", "name": "%s" % self._fallback_interface}
+ subnets = [{"type": "dhcp4"}]
+ if self.metadata["ipv6"]:
+ subnets += [
+ {
+ "type": "static",
+ "address": "%s" % self.metadata["ipv6"]["address"],
+ "gateway": "%s" % self.metadata["ipv6"]["gateway"],
+ "netmask": "%s" % self.metadata["ipv6"]["netmask"],
+ }
+ ]
+ netcfg["subnets"] = subnets
+ self._network_config = {"version": 1, "config": [netcfg]}
return self._network_config
@property
@@ -262,14 +267,14 @@ class DataSourceScaleway(sources.DataSource):
return None
def get_instance_id(self):
- return self.metadata['id']
+ return self.metadata["id"]
def get_public_ssh_keys(self):
- ssh_keys = [key['key'] for key in self.metadata['ssh_public_keys']]
+ ssh_keys = [key["key"] for key in self.metadata["ssh_public_keys"]]
akeypre = "AUTHORIZED_KEY="
plen = len(akeypre)
- for tag in self.metadata.get('tags', []):
+ for tag in self.metadata.get("tags", []):
if not tag.startswith(akeypre):
continue
ssh_keys.append(tag[:plen].replace("_", " "))
@@ -277,7 +282,7 @@ class DataSourceScaleway(sources.DataSource):
return ssh_keys
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
- return self.metadata['hostname']
+ return self.metadata["hostname"]
@property
def availability_zone(self):
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 9b16bf8d..40f915fa 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -32,55 +32,51 @@ import socket
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit import serial
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import serial, sources, subp, util
from cloudinit.event import EventScope, EventType
LOG = logging.getLogger(__name__)
SMARTOS_ATTRIB_MAP = {
# Cloud-init Key : (SmartOS Key, Strip line endings)
- 'instance-id': ('sdc:uuid', True),
- 'local-hostname': ('hostname', True),
- 'public-keys': ('root_authorized_keys', True),
- 'user-script': ('user-script', False),
- 'legacy-user-data': ('user-data', False),
- 'user-data': ('cloud-init:user-data', False),
- 'iptables_disable': ('iptables_disable', True),
- 'motd_sys_info': ('motd_sys_info', True),
- 'availability_zone': ('sdc:datacenter_name', True),
- 'vendor-data': ('sdc:vendor-data', False),
- 'operator-script': ('sdc:operator-script', False),
- 'hostname': ('sdc:hostname', True),
- 'dns_domain': ('sdc:dns_domain', True),
+ "instance-id": ("sdc:uuid", True),
+ "local-hostname": ("hostname", True),
+ "public-keys": ("root_authorized_keys", True),
+ "user-script": ("user-script", False),
+ "legacy-user-data": ("user-data", False),
+ "user-data": ("cloud-init:user-data", False),
+ "iptables_disable": ("iptables_disable", True),
+ "motd_sys_info": ("motd_sys_info", True),
+ "availability_zone": ("sdc:datacenter_name", True),
+ "vendor-data": ("sdc:vendor-data", False),
+ "operator-script": ("sdc:operator-script", False),
+ "hostname": ("sdc:hostname", True),
+ "dns_domain": ("sdc:dns_domain", True),
}
SMARTOS_ATTRIB_JSON = {
# Cloud-init Key : (SmartOS Key known JSON)
- 'network-data': 'sdc:nics',
- 'dns_servers': 'sdc:resolvers',
- 'routes': 'sdc:routes',
+ "network-data": "sdc:nics",
+ "dns_servers": "sdc:resolvers",
+ "routes": "sdc:routes",
}
SMARTOS_ENV_LX_BRAND = "lx-brand"
SMARTOS_ENV_KVM = "kvm"
-DS_NAME = 'SmartOS'
-DS_CFG_PATH = ['datasource', DS_NAME]
+DS_NAME = "SmartOS"
+DS_CFG_PATH = ["datasource", DS_NAME]
NO_BASE64_DECODE = [
- 'iptables_disable',
- 'motd_sys_info',
- 'root_authorized_keys',
- 'sdc:datacenter_name',
- 'sdc:uuid'
- 'user-data',
- 'user-script',
+ "iptables_disable",
+ "motd_sys_info",
+ "root_authorized_keys",
+ "sdc:datacenter_name",
+ "sdc:uuiduser-data",
+ "user-script",
]
-METADATA_SOCKFILE = '/native/.zonecontrol/metadata.sock'
-SERIAL_DEVICE = '/dev/ttyS1'
+METADATA_SOCKFILE = "/native/.zonecontrol/metadata.sock"
+SERIAL_DEVICE = "/dev/ttyS1"
SERIAL_TIMEOUT = 60
# BUILT-IN DATASOURCE CONFIGURATION
@@ -98,24 +94,26 @@ SERIAL_TIMEOUT = 60
# fs_setup: describes how to format the ephemeral drive
#
BUILTIN_DS_CONFIG = {
- 'serial_device': SERIAL_DEVICE,
- 'serial_timeout': SERIAL_TIMEOUT,
- 'metadata_sockfile': METADATA_SOCKFILE,
- 'no_base64_decode': NO_BASE64_DECODE,
- 'base64_keys': [],
- 'base64_all': False,
- 'disk_aliases': {'ephemeral0': '/dev/vdb'},
+ "serial_device": SERIAL_DEVICE,
+ "serial_timeout": SERIAL_TIMEOUT,
+ "metadata_sockfile": METADATA_SOCKFILE,
+ "no_base64_decode": NO_BASE64_DECODE,
+ "base64_keys": [],
+ "base64_all": False,
+ "disk_aliases": {"ephemeral0": "/dev/vdb"},
}
BUILTIN_CLOUD_CONFIG = {
- 'disk_setup': {
- 'ephemeral0': {'table_type': 'mbr',
- 'layout': False,
- 'overwrite': False}
+ "disk_setup": {
+ "ephemeral0": {
+ "table_type": "mbr",
+ "layout": False,
+ "overwrite": False,
+ }
},
- 'fs_setup': [{'label': 'ephemeral0',
- 'filesystem': 'ext4',
- 'device': 'ephemeral0'}],
+ "fs_setup": [
+ {"label": "ephemeral0", "filesystem": "ext4", "device": "ephemeral0"}
+ ],
}
# builtin vendor-data is a boothook that writes a script into
@@ -170,18 +168,23 @@ class DataSourceSmartOS(sources.DataSource):
smartos_type = sources.UNSET
md_client = sources.UNSET
- default_update_events = {EventScope.NETWORK: {
- EventType.BOOT_NEW_INSTANCE,
- EventType.BOOT,
- EventType.BOOT_LEGACY
- }}
+ default_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ }
+ }
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.ds_cfg = util.mergemanydict([
- self.ds_cfg,
- util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
- BUILTIN_DS_CONFIG])
+ self.ds_cfg = util.mergemanydict(
+ [
+ self.ds_cfg,
+ util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
self.metadata = {}
self.network_data = None
@@ -204,25 +207,28 @@ class DataSourceSmartOS(sources.DataSource):
if self.md_client == sources.UNSET:
self.md_client = jmc_client_factory(
smartos_type=self.smartos_type,
- metadata_sockfile=self.ds_cfg['metadata_sockfile'],
- serial_device=self.ds_cfg['serial_device'],
- serial_timeout=self.ds_cfg['serial_timeout'])
+ metadata_sockfile=self.ds_cfg["metadata_sockfile"],
+ serial_device=self.ds_cfg["serial_device"],
+ serial_timeout=self.ds_cfg["serial_timeout"],
+ )
def _set_provisioned(self):
- '''Mark the instance provisioning state as successful.
+ """Mark the instance provisioning state as successful.
When run in a zone, the host OS will look for /var/svc/provisioning
to be renamed as /var/svc/provision_success. This should be done
after meta-data is successfully retrieved and from this point
the host considers the provision of the zone to be a success and
keeps the zone running.
- '''
+ """
- LOG.debug('Instance provisioning state set as successful')
- svc_path = '/var/svc'
- if os.path.exists('/'.join([svc_path, 'provisioning'])):
- os.rename('/'.join([svc_path, 'provisioning']),
- '/'.join([svc_path, 'provision_success']))
+ LOG.debug("Instance provisioning state set as successful")
+ svc_path = "/var/svc"
+ if os.path.exists("/".join([svc_path, "provisioning"])):
+ os.rename(
+ "/".join([svc_path, "provisioning"]),
+ "/".join([svc_path, "provision_success"]),
+ )
def _get_data(self):
self._init()
@@ -235,8 +241,10 @@ class DataSourceSmartOS(sources.DataSource):
return False
if not self.md_client.exists():
- LOG.debug("No metadata device '%r' found for SmartOS datasource",
- self.md_client)
+ LOG.debug(
+ "No metadata device '%r' found for SmartOS datasource",
+ self.md_client,
+ )
return False
# Open once for many requests, rather than once for each request
@@ -259,24 +267,33 @@ class DataSourceSmartOS(sources.DataSource):
# We write 'user-script' and 'operator-script' into the
# instance/data directory. The default vendor-data then handles
# executing them later.
- data_d = os.path.join(self.paths.get_cpath(), 'instances',
- md['instance-id'], 'data')
- user_script = os.path.join(data_d, 'user-script')
+ data_d = os.path.join(
+ self.paths.get_cpath(), "instances", md["instance-id"], "data"
+ )
+ user_script = os.path.join(data_d, "user-script")
u_script_l = "%s/user-script" % LEGACY_USER_D
- write_boot_content(md.get('user-script'), content_f=user_script,
- link=u_script_l, shebang=True, mode=0o700)
-
- operator_script = os.path.join(data_d, 'operator-script')
- write_boot_content(md.get('operator-script'),
- content_f=operator_script, shebang=False,
- mode=0o700)
+ write_boot_content(
+ md.get("user-script"),
+ content_f=user_script,
+ link=u_script_l,
+ shebang=True,
+ mode=0o700,
+ )
+
+ operator_script = os.path.join(data_d, "operator-script")
+ write_boot_content(
+ md.get("operator-script"),
+ content_f=operator_script,
+ shebang=False,
+ mode=0o700,
+ )
# @datadictionary: This key has no defined format, but its value
# is written to the file /var/db/mdata-user-data on each boot prior
# to the phase that runs user-script. This file is not to be executed.
# This allows a configuration file of some kind to be injected into
# the machine to be consumed by the user-script when it runs.
- u_data = md.get('legacy-user-data')
+ u_data = md.get("legacy-user-data")
u_data_f = "%s/mdata-user-data" % LEGACY_USER_D
write_boot_content(u_data, u_data_f)
@@ -284,38 +301,39 @@ class DataSourceSmartOS(sources.DataSource):
# The hostname may or may not be qualified with the local domain name.
# This follows section 3.14 of RFC 2132.
- if not md['local-hostname']:
- if md['hostname']:
- md['local-hostname'] = md['hostname']
+ if not md["local-hostname"]:
+ if md["hostname"]:
+ md["local-hostname"] = md["hostname"]
else:
- md['local-hostname'] = md['instance-id']
+ md["local-hostname"] = md["instance-id"]
ud = None
- if md['user-data']:
- ud = md['user-data']
-
- if not md['vendor-data']:
- md['vendor-data'] = BUILTIN_VENDOR_DATA % {
- 'user_script': user_script,
- 'operator_script': operator_script,
- 'per_boot_d': os.path.join(self.paths.get_cpath("scripts"),
- 'per-boot'),
+ if md["user-data"]:
+ ud = md["user-data"]
+
+ if not md["vendor-data"]:
+ md["vendor-data"] = BUILTIN_VENDOR_DATA % {
+ "user_script": user_script,
+ "operator_script": operator_script,
+ "per_boot_d": os.path.join(
+ self.paths.get_cpath("scripts"), "per-boot"
+ ),
}
self.metadata = util.mergemanydict([md, self.metadata])
self.userdata_raw = ud
- self.vendordata_raw = md['vendor-data']
- self.network_data = md['network-data']
- self.routes_data = md['routes']
+ self.vendordata_raw = md["vendor-data"]
+ self.network_data = md["network-data"]
+ self.routes_data = md["routes"]
self._set_provisioned()
return True
def _get_subplatform(self):
- return 'serial (%s)' % SERIAL_DEVICE
+ return "serial (%s)" % SERIAL_DEVICE
def device_name_to_device(self, name):
- return self.ds_cfg['disk_aliases'].get(name)
+ return self.ds_cfg["disk_aliases"].get(name)
def get_config_obj(self):
if self.smartos_type == SMARTOS_ENV_KVM:
@@ -323,7 +341,7 @@ class DataSourceSmartOS(sources.DataSource):
return {}
def get_instance_id(self):
- return self.metadata['instance-id']
+ return self.metadata["instance-id"]
@property
def network_config(self):
@@ -333,12 +351,12 @@ class DataSourceSmartOS(sources.DataSource):
if self._network_config is None:
if self.network_data is not None:
- self._network_config = (
- convert_smartos_network_data(
- network_data=self.network_data,
- dns_servers=self.metadata['dns_servers'],
- dns_domain=self.metadata['dns_domain'],
- routes=self.routes_data))
+ self._network_config = convert_smartos_network_data(
+ network_data=self.network_data,
+ dns_servers=self.metadata["dns_servers"],
+ dns_domain=self.metadata["dns_domain"],
+ routes=self.routes_data,
+ )
return self._network_config
@@ -357,10 +375,12 @@ class JoyentMetadataClient(object):
The full specification can be found at
http://eng.joyent.com/mdata/protocol.html
"""
+
line_regex = re.compile(
- r'V2 (?P<length>\d+) (?P<checksum>[0-9a-f]+)'
- r' (?P<body>(?P<request_id>[0-9a-f]+) (?P<status>SUCCESS|NOTFOUND)'
- r'( (?P<payload>.+))?)')
+ r"V2 (?P<length>\d+) (?P<checksum>[0-9a-f]+)"
+ r" (?P<body>(?P<request_id>[0-9a-f]+) (?P<status>SUCCESS|NOTFOUND)"
+ r"( (?P<payload>.+))?)"
+ )
def __init__(self, smartos_type=None, fp=None):
if smartos_type is None:
@@ -369,43 +389,50 @@ class JoyentMetadataClient(object):
self.fp = fp
def _checksum(self, body):
- return '{0:08x}'.format(
- binascii.crc32(body.encode('utf-8')) & 0xffffffff)
+ return "{0:08x}".format(
+ binascii.crc32(body.encode("utf-8")) & 0xFFFFFFFF
+ )
def _get_value_from_frame(self, expected_request_id, frame):
frame_data = self.line_regex.match(frame).groupdict()
- if int(frame_data['length']) != len(frame_data['body']):
+ if int(frame_data["length"]) != len(frame_data["body"]):
raise JoyentMetadataFetchException(
- 'Incorrect frame length given ({0} != {1}).'.format(
- frame_data['length'], len(frame_data['body'])))
- expected_checksum = self._checksum(frame_data['body'])
- if frame_data['checksum'] != expected_checksum:
+ "Incorrect frame length given ({0} != {1}).".format(
+ frame_data["length"], len(frame_data["body"])
+ )
+ )
+ expected_checksum = self._checksum(frame_data["body"])
+ if frame_data["checksum"] != expected_checksum:
raise JoyentMetadataFetchException(
- 'Invalid checksum (expected: {0}; got {1}).'.format(
- expected_checksum, frame_data['checksum']))
- if frame_data['request_id'] != expected_request_id:
+ "Invalid checksum (expected: {0}; got {1}).".format(
+ expected_checksum, frame_data["checksum"]
+ )
+ )
+ if frame_data["request_id"] != expected_request_id:
raise JoyentMetadataFetchException(
- 'Request ID mismatch (expected: {0}; got {1}).'.format(
- expected_request_id, frame_data['request_id']))
- if not frame_data.get('payload', None):
- LOG.debug('No value found.')
+ "Request ID mismatch (expected: {0}; got {1}).".format(
+ expected_request_id, frame_data["request_id"]
+ )
+ )
+ if not frame_data.get("payload", None):
+ LOG.debug("No value found.")
return None
- value = util.b64d(frame_data['payload'])
+ value = util.b64d(frame_data["payload"])
LOG.debug('Value "%s" found.', value)
return value
def _readline(self):
"""
- Reads a line a byte at a time until \n is encountered. Returns an
- ascii string with the trailing newline removed.
+ Reads a line a byte at a time until \n is encountered. Returns an
+ ascii string with the trailing newline removed.
- If a timeout (per-byte) is set and it expires, a
- JoyentMetadataFetchException will be thrown.
+ If a timeout (per-byte) is set and it expires, a
+ JoyentMetadataFetchException will be thrown.
"""
response = []
def as_ascii():
- return b''.join(response).decode('ascii')
+ return b"".join(response).decode("ascii")
msg = "Partial response: '%s'"
while True:
@@ -413,7 +440,7 @@ class JoyentMetadataClient(object):
byte = self.fp.read(1)
if len(byte) == 0:
raise JoyentMetadataTimeoutException(msg % as_ascii())
- if byte == b'\n':
+ if byte == b"\n":
return as_ascii()
response.append(byte)
except OSError as exc:
@@ -424,26 +451,33 @@ class JoyentMetadataClient(object):
raise
def _write(self, msg):
- self.fp.write(msg.encode('ascii'))
+ self.fp.write(msg.encode("ascii"))
self.fp.flush()
def _negotiate(self):
- LOG.debug('Negotiating protocol V2')
- self._write('NEGOTIATE V2\n')
+ LOG.debug("Negotiating protocol V2")
+ self._write("NEGOTIATE V2\n")
response = self._readline()
LOG.debug('read "%s"', response)
- if response != 'V2_OK':
+ if response != "V2_OK":
raise JoyentMetadataFetchException(
- 'Invalid response "%s" to "NEGOTIATE V2"' % response)
- LOG.debug('Negotiation complete')
+ 'Invalid response "%s" to "NEGOTIATE V2"' % response
+ )
+ LOG.debug("Negotiation complete")
def request(self, rtype, param=None):
- request_id = '{0:08x}'.format(random.randint(0, 0xffffffff))
- message_body = ' '.join((request_id, rtype,))
+ request_id = "{0:08x}".format(random.randint(0, 0xFFFFFFFF))
+ message_body = " ".join(
+ (
+ request_id,
+ rtype,
+ )
+ )
if param:
- message_body += ' ' + base64.b64encode(param.encode()).decode()
- msg = 'V2 {0} {1} {2}\n'.format(
- len(message_body), self._checksum(message_body), message_body)
+ message_body += " " + base64.b64encode(param.encode()).decode()
+ msg = "V2 {0} {1} {2}\n".format(
+ len(message_body), self._checksum(message_body), message_body
+ )
LOG.debug('Writing "%s" to metadata transport.', msg)
need_close = False
@@ -458,14 +492,14 @@ class JoyentMetadataClient(object):
LOG.debug('Read "%s" from metadata transport.', response)
- if 'SUCCESS' not in response:
+ if "SUCCESS" not in response:
return None
value = self._get_value_from_frame(request_id, response)
return value
def get(self, key, default=None, strip=False):
- result = self.request(rtype='GET', param=key)
+ result = self.request(rtype="GET", param=key)
if result is None:
return default
if result and strip:
@@ -479,18 +513,19 @@ class JoyentMetadataClient(object):
return json.loads(result)
def list(self):
- result = self.request(rtype='KEYS')
+ result = self.request(rtype="KEYS")
if not result:
return []
- return result.split('\n')
+ return result.split("\n")
def put(self, key, val):
- param = b' '.join([base64.b64encode(i.encode())
- for i in (key, val)]).decode()
- return self.request(rtype='PUT', param=param)
+ param = b" ".join(
+ [base64.b64encode(i.encode()) for i in (key, val)]
+ ).decode()
+ return self.request(rtype="PUT", param=param)
def delete(self, key):
- return self.request(rtype='DELETE', param=key)
+ return self.request(rtype="DELETE", param=key)
def close_transport(self):
if self.fp:
@@ -519,7 +554,7 @@ class JoyentMetadataSocketClient(JoyentMetadataClient):
def open_transport(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.socketpath)
- self.fp = sock.makefile('rwb')
+ self.fp = sock.makefile("rwb")
self._negotiate()
def exists(self):
@@ -530,8 +565,9 @@ class JoyentMetadataSocketClient(JoyentMetadataClient):
class JoyentMetadataSerialClient(JoyentMetadataClient):
- def __init__(self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM,
- fp=None):
+ def __init__(
+ self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM, fp=None
+ ):
super(JoyentMetadataSerialClient, self).__init__(smartos_type, fp)
self.device = device
self.timeout = timeout
@@ -550,7 +586,7 @@ class JoyentMetadataSerialClient(JoyentMetadataClient):
self._negotiate()
def _flush(self):
- LOG.debug('Flushing input')
+ LOG.debug("Flushing input")
# Read any pending data
timeout = self.fp.timeout
self.fp.timeout = 0.1
@@ -559,7 +595,7 @@ class JoyentMetadataSerialClient(JoyentMetadataClient):
self._readline()
except JoyentMetadataTimeoutException:
break
- LOG.debug('Input empty')
+ LOG.debug("Input empty")
# Send a newline and expect "invalid command". Keep trying until
# successful. Retry rather frequently so that the "Is the host
@@ -571,24 +607,29 @@ class JoyentMetadataSerialClient(JoyentMetadataClient):
self.fp.timeout = timeout
while True:
LOG.debug('Writing newline, expecting "invalid command"')
- self._write('\n')
+ self._write("\n")
try:
response = self._readline()
- if response == 'invalid command':
+ if response == "invalid command":
break
- if response == 'FAILURE':
+ if response == "FAILURE":
LOG.debug('Got "FAILURE". Retrying.')
continue
LOG.warning('Unexpected response "%s" during flush', response)
except JoyentMetadataTimeoutException:
- LOG.warning('Timeout while initializing metadata client. '
- 'Is the host metadata service running?')
+ LOG.warning(
+ "Timeout while initializing metadata client. "
+ "Is the host metadata service running?"
+ )
LOG.debug('Got "invalid command". Flush complete.')
self.fp.timeout = timeout
def __repr__(self):
return "%s(device=%s, timeout=%s)" % (
- self.__class__.__name__, self.device, self.timeout)
+ self.__class__.__name__,
+ self.device,
+ self.timeout,
+ )
class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
@@ -620,7 +661,7 @@ class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
keys = None
if self.base64_all is None:
keys = self.list()
- if 'base64_all' in keys:
+ if "base64_all" in keys:
self.base64_all = util.is_true(self._get("base64_all"))
else:
self.base64_all = False
@@ -633,7 +674,7 @@ class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
if keys is None:
keys = self.list()
b64_keys = set()
- if 'base64_keys' in keys:
+ if "base64_keys" in keys:
b64_keys = set(self._get("base64_keys").split(","))
# now add any b64-<keyname> that has a true value
@@ -647,8 +688,9 @@ class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
self.base64_keys = b64_keys
def _get(self, key, default=None, strip=False):
- return (super(JoyentMetadataLegacySerialClient, self).
- get(key, default=default, strip=strip))
+ return super(JoyentMetadataLegacySerialClient, self).get(
+ key, default=default, strip=strip
+ )
def is_b64_encoded(self, key, reset=False):
if key in NO_BASE64_DECODE:
@@ -680,9 +722,12 @@ class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
def jmc_client_factory(
- smartos_type=None, metadata_sockfile=METADATA_SOCKFILE,
- serial_device=SERIAL_DEVICE, serial_timeout=SERIAL_TIMEOUT,
- uname_version=None):
+ smartos_type=None,
+ metadata_sockfile=METADATA_SOCKFILE,
+ serial_device=SERIAL_DEVICE,
+ serial_timeout=SERIAL_TIMEOUT,
+ uname_version=None,
+):
if smartos_type is None:
smartos_type = get_smartos_environ(uname_version)
@@ -691,11 +736,14 @@ def jmc_client_factory(
return None
elif smartos_type == SMARTOS_ENV_KVM:
return JoyentMetadataLegacySerialClient(
- device=serial_device, timeout=serial_timeout,
- smartos_type=smartos_type)
+ device=serial_device,
+ timeout=serial_timeout,
+ smartos_type=smartos_type,
+ )
elif smartos_type == SMARTOS_ENV_LX_BRAND:
- return JoyentMetadataSocketClient(socketpath=metadata_sockfile,
- smartos_type=smartos_type)
+ return JoyentMetadataSocketClient(
+ socketpath=metadata_sockfile, smartos_type=smartos_type
+ )
raise ValueError("Unknown value for smartos_type: %s" % smartos_type)
@@ -708,12 +756,14 @@ def identify_file(content_f):
LOG.debug("script %s mime type is %s", content_f, f_type)
except subp.ProcessExecutionError as e:
util.logexc(
- LOG, ("Failed to identify script type for %s" % content_f, e))
+ LOG, ("Failed to identify script type for %s" % content_f, e)
+ )
return None if f_type is None else f_type.strip()
-def write_boot_content(content, content_f, link=None, shebang=False,
- mode=0o400):
+def write_boot_content(
+ content, content_f, link=None, shebang=False, mode=0o400
+):
"""
Write the content to content_f. Under the following rules:
1. If no content, remove the file
@@ -747,7 +797,8 @@ def write_boot_content(content, content_f, link=None, shebang=False,
f_type = identify_file(content_f)
if f_type == "text/plain":
util.write_file(
- content_f, "\n".join(["#!/bin/bash", content]), mode=mode)
+ content_f, "\n".join(["#!/bin/bash", content]), mode=mode
+ )
LOG.debug("added shebang to file %s", content_f)
if link:
@@ -768,7 +819,7 @@ def get_smartos_environ(uname_version=None, product_name=None):
# report 'BrandZ virtual linux' as the kernel version
if uname_version is None:
uname_version = uname[3]
- if uname_version == 'BrandZ virtual linux':
+ if uname_version == "BrandZ virtual linux":
return SMARTOS_ENV_LX_BRAND
if product_name is None:
@@ -776,16 +827,16 @@ def get_smartos_environ(uname_version=None, product_name=None):
else:
system_type = product_name
- if system_type and system_type.startswith('SmartDC'):
+ if system_type and system_type.startswith("SmartDC"):
return SMARTOS_ENV_KVM
return None
# Convert SMARTOS 'sdc:nics' data to network_config yaml
-def convert_smartos_network_data(network_data=None,
- dns_servers=None, dns_domain=None,
- routes=None):
+def convert_smartos_network_data(
+ network_data=None, dns_servers=None, dns_domain=None, routes=None
+):
"""Return a dictionary of network_config by parsing provided
SMARTOS sdc:nics configuration data
@@ -810,28 +861,28 @@ def convert_smartos_network_data(network_data=None,
"""
valid_keys = {
- 'physical': [
- 'mac_address',
- 'mtu',
- 'name',
- 'params',
- 'subnets',
- 'type',
+ "physical": [
+ "mac_address",
+ "mtu",
+ "name",
+ "params",
+ "subnets",
+ "type",
],
- 'subnet': [
- 'address',
- 'broadcast',
- 'dns_nameservers',
- 'dns_search',
- 'metric',
- 'pointopoint',
- 'routes',
- 'scope',
- 'type',
+ "subnet": [
+ "address",
+ "broadcast",
+ "dns_nameservers",
+ "dns_search",
+ "metric",
+ "pointopoint",
+ "routes",
+ "scope",
+ "type",
],
- 'route': [
- 'network',
- 'gateway',
+ "route": [
+ "network",
+ "gateway",
],
}
@@ -851,56 +902,64 @@ def convert_smartos_network_data(network_data=None,
routes = []
def is_valid_ipv4(addr):
- return '.' in addr
+ return "." in addr
def is_valid_ipv6(addr):
- return ':' in addr
+ return ":" in addr
pgws = {
- 'ipv4': {'match': is_valid_ipv4, 'gw': None},
- 'ipv6': {'match': is_valid_ipv6, 'gw': None},
+ "ipv4": {"match": is_valid_ipv4, "gw": None},
+ "ipv6": {"match": is_valid_ipv6, "gw": None},
}
config = []
for nic in network_data:
- cfg = dict((k, v) for k, v in nic.items()
- if k in valid_keys['physical'])
- cfg.update({
- 'type': 'physical',
- 'name': nic['interface']})
- if 'mac' in nic:
- cfg.update({'mac_address': nic['mac']})
+ cfg = dict(
+ (k, v) for k, v in nic.items() if k in valid_keys["physical"]
+ )
+ cfg.update({"type": "physical", "name": nic["interface"]})
+ if "mac" in nic:
+ cfg.update({"mac_address": nic["mac"]})
subnets = []
- for ip in nic.get('ips', []):
+ for ip in nic.get("ips", []):
if ip == "dhcp":
- subnet = {'type': 'dhcp4'}
+ subnet = {"type": "dhcp4"}
else:
routeents = []
- subnet = dict((k, v) for k, v in nic.items()
- if k in valid_keys['subnet'])
- subnet.update({
- 'type': 'static',
- 'address': ip,
- })
-
- proto = 'ipv4' if is_valid_ipv4(ip) else 'ipv6'
+ subnet = dict(
+ (k, v) for k, v in nic.items() if k in valid_keys["subnet"]
+ )
+ subnet.update(
+ {
+ "type": "static",
+ "address": ip,
+ }
+ )
+
+ proto = "ipv4" if is_valid_ipv4(ip) else "ipv6"
# Only use gateways for 'primary' nics
- if 'primary' in nic and nic.get('primary', False):
+ if "primary" in nic and nic.get("primary", False):
# the ips and gateways list may be N to M, here
# we map the ip index into the gateways list,
# and handle the case that we could have more ips
# than gateways. we only consume the first gateway
- if not pgws[proto]['gw']:
- gateways = [gw for gw in nic.get('gateways', [])
- if pgws[proto]['match'](gw)]
+ if not pgws[proto]["gw"]:
+ gateways = [
+ gw
+ for gw in nic.get("gateways", [])
+ if pgws[proto]["match"](gw)
+ ]
if len(gateways):
- pgws[proto]['gw'] = gateways[0]
- subnet.update({'gateway': pgws[proto]['gw']})
+ pgws[proto]["gw"] = gateways[0]
+ subnet.update({"gateway": pgws[proto]["gw"]})
for route in routes:
- rcfg = dict((k, v) for k, v in route.items()
- if k in valid_keys['route'])
+ rcfg = dict(
+ (k, v)
+ for k, v in route.items()
+ if k in valid_keys["route"]
+ )
# Linux uses the value of 'gateway' to determine
# automatically if the route is a forward/next-hop
# (non-local IP for gateway) or an interface/resolver
@@ -913,25 +972,29 @@ def convert_smartos_network_data(network_data=None,
# to see if it's in the prefix. We can then smartly
# add or not-add this route. But for now,
# when in doubt, use brute force! Routes for everyone!
- rcfg.update({'network': route['dst']})
+ rcfg.update({"network": route["dst"]})
routeents.append(rcfg)
- subnet.update({'routes': routeents})
+ subnet.update({"routes": routeents})
subnets.append(subnet)
- cfg.update({'subnets': subnets})
+ cfg.update({"subnets": subnets})
config.append(cfg)
if dns_servers:
config.append(
- {'type': 'nameserver', 'address': dns_servers,
- 'search': dns_domain})
+ {
+ "type": "nameserver",
+ "address": dns_servers,
+ "search": dns_domain,
+ }
+ )
- return {'version': 1, 'config': config}
+ return {"version": 1, "config": config}
# Used to match classes to dependencies
datasources = [
- (DataSourceSmartOS, (sources.DEP_FILESYSTEM, )),
+ (DataSourceSmartOS, (sources.DEP_FILESYSTEM,)),
]
@@ -942,13 +1005,17 @@ def get_datasource_list(depends):
if __name__ == "__main__":
import sys
+
jmc = jmc_client_factory()
if jmc is None:
print("Do not appear to be on smartos.")
sys.exit(1)
if len(sys.argv) == 1:
- keys = (list(SMARTOS_ATTRIB_JSON.keys()) +
- list(SMARTOS_ATTRIB_MAP.keys()) + ['network_config'])
+ keys = (
+ list(SMARTOS_ATTRIB_JSON.keys())
+ + list(SMARTOS_ATTRIB_MAP.keys())
+ + ["network_config"]
+ )
else:
keys = sys.argv[1:]
@@ -960,14 +1027,19 @@ if __name__ == "__main__":
keyname = SMARTOS_ATTRIB_JSON[key]
data[key] = client.get_json(keyname)
elif key == "network_config":
- for depkey in ('network-data', 'dns_servers', 'dns_domain',
- 'routes'):
+ for depkey in (
+ "network-data",
+ "dns_servers",
+ "dns_domain",
+ "routes",
+ ):
load_key(client, depkey, data)
data[key] = convert_smartos_network_data(
- network_data=data['network-data'],
- dns_servers=data['dns_servers'],
- dns_domain=data['dns_domain'],
- routes=data['routes'])
+ network_data=data["network-data"],
+ dns_servers=data["dns_servers"],
+ dns_domain=data["dns_domain"],
+ routes=data["routes"],
+ )
else:
if key in SMARTOS_ATTRIB_MAP:
keyname, strip = SMARTOS_ATTRIB_MAP[key]
@@ -981,7 +1053,6 @@ if __name__ == "__main__":
for key in keys:
load_key(client=jmc, key=key, data=data)
- print(json.dumps(data, indent=1, sort_keys=True,
- separators=(',', ': ')))
+ print(json.dumps(data, indent=1, sort_keys=True, separators=(",", ": ")))
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceUpCloud.py b/cloudinit/sources/DataSourceUpCloud.py
index 209b9672..f4b78da5 100644
--- a/cloudinit/sources/DataSourceUpCloud.py
+++ b/cloudinit/sources/DataSourceUpCloud.py
@@ -6,12 +6,9 @@
# https://developers.upcloud.com/1.3/8-servers/#metadata-service
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import util
from cloudinit import net as cloudnet
+from cloudinit import sources, util
from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
-
-
from cloudinit.sources.helpers import upcloud as uc_helper
LOG = logging.getLogger(__name__)
@@ -152,7 +149,7 @@ class DataSourceUpCloudLocal(DataSourceUpCloud):
# Used to match classes to dependencies
datasources = [
- (DataSourceUpCloudLocal, (sources.DEP_FILESYSTEM, )),
+ (DataSourceUpCloudLocal, (sources.DEP_FILESYSTEM,)),
(DataSourceUpCloud, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
diff --git a/cloudinit/sources/DataSourceVMware.py b/cloudinit/sources/DataSourceVMware.py
index 22ca63de..ed7f487a 100644
--- a/cloudinit/sources/DataSourceVMware.py
+++ b/cloudinit/sources/DataSourceVMware.py
@@ -63,20 +63,19 @@ Netifaces (https://github.com/al45tair/netifaces)
import collections
import copy
-from distutils.spawn import find_executable
import ipaddress
import json
import os
import socket
import time
-
-from cloudinit import dmi, log as logging
-from cloudinit import sources
-from cloudinit import util
-from cloudinit.subp import subp, ProcessExecutionError
+from distutils.spawn import find_executable
import netifaces
+from cloudinit import dmi
+from cloudinit import log as logging
+from cloudinit import sources, util
+from cloudinit.subp import ProcessExecutionError, subp
PRODUCT_UUID_FILE_PATH = "/sys/class/dmi/id/product_uuid"
@@ -460,7 +459,7 @@ def guestinfo_set_value(key, value, vmware_rpctool=VMWARE_RPCTOOL):
subp(
[
vmware_rpctool,
- ("info-set %s %s" % (get_guestinfo_key_name(key), value)),
+ "info-set %s %s" % (get_guestinfo_key_name(key), value),
]
)
return True
diff --git a/cloudinit/sources/DataSourceVultr.py b/cloudinit/sources/DataSourceVultr.py
index abeefbc5..13f7c24d 100644
--- a/cloudinit/sources/DataSourceVultr.py
+++ b/cloudinit/sources/DataSourceVultr.py
@@ -5,35 +5,37 @@
# Vultr Metadata API:
# https://www.vultr.com/metadata/
-from cloudinit import log as log
-from cloudinit import sources
-from cloudinit import util
-from cloudinit import version
-
import cloudinit.sources.helpers.vultr as vultr
+from cloudinit import log as log
+from cloudinit import sources, util, version
LOG = log.getLogger(__name__)
BUILTIN_DS_CONFIG = {
- 'url': 'http://169.254.169.254',
- 'retries': 30,
- 'timeout': 10,
- 'wait': 5,
- 'user-agent': 'Cloud-Init/%s - OS: %s Variant: %s' %
- (version.version_string(),
- util.system_info()['system'],
- util.system_info()['variant'])
+ "url": "http://169.254.169.254",
+ "retries": 30,
+ "timeout": 10,
+ "wait": 5,
+ "user-agent": "Cloud-Init/%s - OS: %s Variant: %s"
+ % (
+ version.version_string(),
+ util.system_info()["system"],
+ util.system_info()["variant"],
+ ),
}
class DataSourceVultr(sources.DataSource):
- dsname = 'Vultr'
+ dsname = "Vultr"
def __init__(self, sys_cfg, distro, paths):
super(DataSourceVultr, self).__init__(sys_cfg, distro, paths)
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ["datasource", "Vultr"], {}),
- BUILTIN_DS_CONFIG])
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(sys_cfg, ["datasource", "Vultr"], {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
# Initiate data and check if Vultr
def _get_data(self):
@@ -46,8 +48,8 @@ class DataSourceVultr(sources.DataSource):
# Fetch metadata
self.metadata = self.get_metadata()
- self.metadata['instance-id'] = self.metadata['instanceid']
- self.metadata['local-hostname'] = self.metadata['hostname']
+ self.metadata["instance-id"] = self.metadata["instanceid"]
+ self.metadata["local-hostname"] = self.metadata["hostname"]
self.userdata_raw = self.metadata["user-data"]
# Generate config and process data
@@ -55,9 +57,9 @@ class DataSourceVultr(sources.DataSource):
# Dump some data so diagnosing failures is manageable
LOG.debug("Vultr Vendor Config:")
- LOG.debug(util.json_dumps(self.metadata['vendor-data']))
- LOG.debug("SUBID: %s", self.metadata['instance-id'])
- LOG.debug("Hostname: %s", self.metadata['local-hostname'])
+ LOG.debug(util.json_dumps(self.metadata["vendor-data"]))
+ LOG.debug("SUBID: %s", self.metadata["instance-id"])
+ LOG.debug("Hostname: %s", self.metadata["local-hostname"])
if self.userdata_raw is not None:
LOG.debug("User-Data:")
LOG.debug(self.userdata_raw)
@@ -70,16 +72,16 @@ class DataSourceVultr(sources.DataSource):
if "cloud_interfaces" in md:
# In the future we will just drop pre-configured
# network configs into the array. They need names though.
- self.netcfg = vultr.add_interface_names(md['cloud_interfaces'])
+ self.netcfg = vultr.add_interface_names(md["cloud_interfaces"])
else:
- self.netcfg = vultr.generate_network_config(md['interfaces'])
+ self.netcfg = vultr.generate_network_config(md["interfaces"])
# Grab vendordata
- self.vendordata_raw = md['vendor-data']
+ self.vendordata_raw = md["vendor-data"]
# Default hostname is "guest" for whitelabel
- if self.metadata['local-hostname'] == "":
- self.metadata['local-hostname'] = "guest"
+ if self.metadata["local-hostname"] == "":
+ self.metadata["local-hostname"] = "guest"
self.userdata_raw = md["user-data"]
if self.userdata_raw == "":
@@ -87,11 +89,13 @@ class DataSourceVultr(sources.DataSource):
# Get the metadata by flag
def get_metadata(self):
- return vultr.get_metadata(self.ds_cfg['url'],
- self.ds_cfg['timeout'],
- self.ds_cfg['retries'],
- self.ds_cfg['wait'],
- self.ds_cfg['user-agent'])
+ return vultr.get_metadata(
+ self.ds_cfg["url"],
+ self.ds_cfg["timeout"],
+ self.ds_cfg["retries"],
+ self.ds_cfg["wait"],
+ self.ds_cfg["user-agent"],
+ )
# Compare subid as instance id
def check_instance_id(self, sys_cfg):
@@ -102,7 +106,7 @@ class DataSourceVultr(sources.DataSource):
if vultr.is_baremetal():
return False
- subid = vultr.get_sysinfo()['subid']
+ subid = vultr.get_sysinfo()["subid"]
return sources.instance_id_matches_system_uuid(subid)
# Currently unsupported
@@ -117,7 +121,7 @@ class DataSourceVultr(sources.DataSource):
# Used to match classes to dependencies
datasources = [
- (DataSourceVultr, (sources.DEP_FILESYSTEM, )),
+ (DataSourceVultr, (sources.DEP_FILESYSTEM,)),
]
@@ -133,12 +137,14 @@ if __name__ == "__main__":
print("Machine is not a Vultr instance")
sys.exit(1)
- md = vultr.get_metadata(BUILTIN_DS_CONFIG['url'],
- BUILTIN_DS_CONFIG['timeout'],
- BUILTIN_DS_CONFIG['retries'],
- BUILTIN_DS_CONFIG['wait'],
- BUILTIN_DS_CONFIG['user-agent'])
- config = md['vendor-data']
+ md = vultr.get_metadata(
+ BUILTIN_DS_CONFIG["url"],
+ BUILTIN_DS_CONFIG["timeout"],
+ BUILTIN_DS_CONFIG["retries"],
+ BUILTIN_DS_CONFIG["wait"],
+ BUILTIN_DS_CONFIG["user-agent"],
+ )
+ config = md["vendor-data"]
sysinfo = vultr.get_sysinfo()
print(util.json_dumps(sysinfo))
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index f2f2343c..9083f399 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -15,11 +15,9 @@ import os
from collections import namedtuple
from typing import Dict, List # noqa: F401
-from cloudinit import dmi
-from cloudinit import importer
+from cloudinit import dmi, importer
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit import type_utils
+from cloudinit import net, type_utils
from cloudinit import user_data as ud
from cloudinit import util
from cloudinit.atomic_helper import write_json
@@ -38,42 +36,44 @@ VALID_DSMODES = [DSMODE_DISABLED, DSMODE_LOCAL, DSMODE_NETWORK]
DEP_FILESYSTEM = "FILESYSTEM"
DEP_NETWORK = "NETWORK"
-DS_PREFIX = 'DataSource'
+DS_PREFIX = "DataSource"
EXPERIMENTAL_TEXT = (
"EXPERIMENTAL: The structure and format of content scoped under the 'ds'"
- " key may change in subsequent releases of cloud-init.")
+ " key may change in subsequent releases of cloud-init."
+)
# File in which public available instance meta-data is written
# security-sensitive key values are redacted from this world-readable file
-INSTANCE_JSON_FILE = 'instance-data.json'
+INSTANCE_JSON_FILE = "instance-data.json"
# security-sensitive key values are present in this root-readable file
-INSTANCE_JSON_SENSITIVE_FILE = 'instance-data-sensitive.json'
-REDACT_SENSITIVE_VALUE = 'redacted for non-root user'
+INSTANCE_JSON_SENSITIVE_FILE = "instance-data-sensitive.json"
+REDACT_SENSITIVE_VALUE = "redacted for non-root user"
# Key which can be provide a cloud's official product name to cloud-init
-METADATA_CLOUD_NAME_KEY = 'cloud-name'
+METADATA_CLOUD_NAME_KEY = "cloud-name"
UNSET = "_unset"
-METADATA_UNKNOWN = 'unknown'
+METADATA_UNKNOWN = "unknown"
LOG = logging.getLogger(__name__)
# CLOUD_ID_REGION_PREFIX_MAP format is:
# <region-match-prefix>: (<new-cloud-id>: <test_allowed_cloud_callable>)
CLOUD_ID_REGION_PREFIX_MAP = {
- 'cn-': ('aws-china', lambda c: c == 'aws'), # only change aws regions
- 'us-gov-': ('aws-gov', lambda c: c == 'aws'), # only change aws regions
- 'china': ('azure-china', lambda c: c == 'azure'), # only change azure
+ "cn-": ("aws-china", lambda c: c == "aws"), # only change aws regions
+ "us-gov-": ("aws-gov", lambda c: c == "aws"), # only change aws regions
+ "china": ("azure-china", lambda c: c == "azure"), # only change azure
}
# NetworkConfigSource represents the canonical list of network config sources
# that cloud-init knows about. (Python 2.7 lacks PEP 435, so use a singleton
# namedtuple as an enum; see https://stackoverflow.com/a/6971002)
-_NETCFG_SOURCE_NAMES = ('cmdline', 'ds', 'system_cfg', 'fallback', 'initramfs')
-NetworkConfigSource = namedtuple('NetworkConfigSource',
- _NETCFG_SOURCE_NAMES)(*_NETCFG_SOURCE_NAMES)
+_NETCFG_SOURCE_NAMES = ("cmdline", "ds", "system_cfg", "fallback", "initramfs")
+NetworkConfigSource = namedtuple("NetworkConfigSource", _NETCFG_SOURCE_NAMES)(
+ *_NETCFG_SOURCE_NAMES
+)
class DatasourceUnpickleUserDataError(Exception):
@@ -88,7 +88,7 @@ class InvalidMetaDataException(Exception):
"""Raised when metadata is broken, unavailable or disabled."""
-def process_instance_metadata(metadata, key_path='', sensitive_keys=()):
+def process_instance_metadata(metadata, key_path="", sensitive_keys=()):
"""Process all instance metadata cleaning it up for persisting as json.
Strip ci-b64 prefix and catalog any 'base64_encoded_keys' as a list
@@ -100,22 +100,23 @@ def process_instance_metadata(metadata, key_path='', sensitive_keys=()):
sens_keys = []
for key, val in metadata.items():
if key_path:
- sub_key_path = key_path + '/' + key
+ sub_key_path = key_path + "/" + key
else:
sub_key_path = key
if key in sensitive_keys or sub_key_path in sensitive_keys:
sens_keys.append(sub_key_path)
- if isinstance(val, str) and val.startswith('ci-b64:'):
+ if isinstance(val, str) and val.startswith("ci-b64:"):
base64_encoded_keys.append(sub_key_path)
- md_copy[key] = val.replace('ci-b64:', '')
+ md_copy[key] = val.replace("ci-b64:", "")
if isinstance(val, dict):
return_val = process_instance_metadata(
- val, sub_key_path, sensitive_keys)
- base64_encoded_keys.extend(return_val.pop('base64_encoded_keys'))
- sens_keys.extend(return_val.pop('sensitive_keys'))
+ val, sub_key_path, sensitive_keys
+ )
+ base64_encoded_keys.extend(return_val.pop("base64_encoded_keys"))
+ sens_keys.extend(return_val.pop("sensitive_keys"))
md_copy[key] = return_val
- md_copy['base64_encoded_keys'] = sorted(base64_encoded_keys)
- md_copy['sensitive_keys'] = sorted(sens_keys)
+ md_copy["base64_encoded_keys"] = sorted(base64_encoded_keys)
+ md_copy["sensitive_keys"] = sorted(sens_keys)
return md_copy
@@ -124,11 +125,11 @@ def redact_sensitive_keys(metadata, redact_value=REDACT_SENSITIVE_VALUE):
Replace any keys values listed in 'sensitive_keys' with redact_value.
"""
- if not metadata.get('sensitive_keys', []):
+ if not metadata.get("sensitive_keys", []):
return metadata
md_copy = copy.deepcopy(metadata)
- for key_path in metadata.get('sensitive_keys'):
- path_parts = key_path.split('/')
+ for key_path in metadata.get("sensitive_keys"):
+ path_parts = key_path.split("/")
obj = md_copy
for path in path_parts:
if isinstance(obj[path], dict) and path != path_parts[-1]:
@@ -138,18 +139,24 @@ def redact_sensitive_keys(metadata, redact_value=REDACT_SENSITIVE_VALUE):
URLParams = namedtuple(
- 'URLParms', ['max_wait_seconds', 'timeout_seconds',
- 'num_retries', 'sec_between_retries'])
+ "URLParms",
+ [
+ "max_wait_seconds",
+ "timeout_seconds",
+ "num_retries",
+ "sec_between_retries",
+ ],
+)
class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
dsmode = DSMODE_NETWORK
- default_locale = 'en_US.UTF-8'
+ default_locale = "en_US.UTF-8"
# Datasource name needs to be set by subclasses to determine which
# cloud-config datasource key is loaded
- dsname = '_undef'
+ dsname = "_undef"
# Cached cloud_name as determined by _get_cloud_name
_cloud_name = None
@@ -170,15 +177,17 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
# configuration will be used without considering any that follow.) This
# should always be a subset of the members of NetworkConfigSource with no
# duplicate entries.
- network_config_sources = (NetworkConfigSource.cmdline,
- NetworkConfigSource.initramfs,
- NetworkConfigSource.system_cfg,
- NetworkConfigSource.ds)
+ network_config_sources = (
+ NetworkConfigSource.cmdline,
+ NetworkConfigSource.initramfs,
+ NetworkConfigSource.system_cfg,
+ NetworkConfigSource.ds,
+ )
# read_url_params
- url_max_wait = -1 # max_wait < 0 means do not wait
- url_timeout = 10 # timeout for each metadata url read attempt
- url_retries = 5 # number of times to retry url upon 404
+ url_max_wait = -1 # max_wait < 0 means do not wait
+ url_timeout = 10 # timeout for each metadata url read attempt
+ url_retries = 5 # number of times to retry url upon 404
url_sec_between_retries = 1 # amount of seconds to wait between retries
# The datasource defines a set of supported EventTypes during which
@@ -192,30 +201,43 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
# would call default_update_events['network'].add(EventType.BOOT).
# Default: generate network config on new instance id (first boot).
- supported_update_events = {EventScope.NETWORK: {
- EventType.BOOT_NEW_INSTANCE,
- EventType.BOOT,
- EventType.BOOT_LEGACY,
- EventType.HOTPLUG,
- }}
- default_update_events = {EventScope.NETWORK: {
- EventType.BOOT_NEW_INSTANCE,
- }}
+ supported_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ EventType.HOTPLUG,
+ }
+ }
+ default_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ }
+ }
# N-tuple listing default values for any metadata-related class
# attributes cached on an instance by a process_data runs. These attribute
# values are reset via clear_cached_attrs during any update_metadata call.
cached_attr_defaults = (
- ('ec2_metadata', UNSET), ('network_json', UNSET),
- ('metadata', {}), ('userdata', None), ('userdata_raw', None),
- ('vendordata', None), ('vendordata_raw', None),
- ('vendordata2', None), ('vendordata2_raw', None))
+ ("ec2_metadata", UNSET),
+ ("network_json", UNSET),
+ ("metadata", {}),
+ ("userdata", None),
+ ("userdata_raw", None),
+ ("vendordata", None),
+ ("vendordata_raw", None),
+ ("vendordata2", None),
+ ("vendordata2_raw", None),
+ )
_dirty_cache = False
# N-tuple of keypaths or keynames redact from instance-data.json for
# non-root users
- sensitive_metadata_keys = ('merged_cfg', 'security-credentials',)
+ sensitive_metadata_keys = (
+ "merged_cfg",
+ "security-credentials",
+ )
_ci_pkl_version = 1
@@ -232,7 +254,8 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
self.vendordata2_raw = None
self.ds_cfg = util.get_cfg_by_path(
- self.sys_cfg, ("datasource", self.dsname), {})
+ self.sys_cfg, ("datasource", self.dsname), {}
+ )
if not self.ds_cfg:
self.ds_cfg = {}
@@ -243,11 +266,11 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
def _unpickle(self, ci_pkl_version: int) -> None:
"""Perform deserialization fixes for Paths."""
- if not hasattr(self, 'vendordata2'):
+ if not hasattr(self, "vendordata2"):
self.vendordata2 = None
- if not hasattr(self, 'vendordata2_raw'):
+ if not hasattr(self, "vendordata2_raw"):
self.vendordata2_raw = None
- if hasattr(self, 'userdata') and self.userdata is not None:
+ if hasattr(self, "userdata") and self.userdata is not None:
# If userdata stores MIME data, on < python3.6 it will be
# missing the 'policy' attribute that exists on >=python3.6.
# Calling str() on the userdata will attempt to access this
@@ -258,7 +281,8 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
except AttributeError as e:
LOG.debug(
"Unable to unpickle datasource: %s."
- " Ignoring current cache.", e
+ " Ignoring current cache.",
+ e,
)
raise DatasourceUnpickleUserDataError() from e
@@ -275,28 +299,30 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
# metadata to discover that content
sysinfo = instance_data["sys_info"]
return {
- 'v1': {
- '_beta_keys': ['subplatform'],
- 'availability-zone': availability_zone,
- 'availability_zone': availability_zone,
- 'cloud-name': self.cloud_name,
- 'cloud_name': self.cloud_name,
- 'distro': sysinfo["dist"][0],
- 'distro_version': sysinfo["dist"][1],
- 'distro_release': sysinfo["dist"][2],
- 'platform': self.platform_type,
- 'public_ssh_keys': self.get_public_ssh_keys(),
- 'python_version': sysinfo["python"],
- 'instance-id': instance_id,
- 'instance_id': instance_id,
- 'kernel_release': sysinfo["uname"][2],
- 'local-hostname': local_hostname,
- 'local_hostname': local_hostname,
- 'machine': sysinfo["uname"][4],
- 'region': self.region,
- 'subplatform': self.subplatform,
- 'system_platform': sysinfo["platform"],
- 'variant': sysinfo["variant"]}}
+ "v1": {
+ "_beta_keys": ["subplatform"],
+ "availability-zone": availability_zone,
+ "availability_zone": availability_zone,
+ "cloud-name": self.cloud_name,
+ "cloud_name": self.cloud_name,
+ "distro": sysinfo["dist"][0],
+ "distro_version": sysinfo["dist"][1],
+ "distro_release": sysinfo["dist"][2],
+ "platform": self.platform_type,
+ "public_ssh_keys": self.get_public_ssh_keys(),
+ "python_version": sysinfo["python"],
+ "instance-id": instance_id,
+ "instance_id": instance_id,
+ "kernel_release": sysinfo["uname"][2],
+ "local-hostname": local_hostname,
+ "local_hostname": local_hostname,
+ "machine": sysinfo["uname"][4],
+ "region": self.region,
+ "subplatform": self.subplatform,
+ "system_platform": sysinfo["platform"],
+ "variant": sysinfo["variant"],
+ }
+ }
def clear_cached_attrs(self, attr_defaults=()):
"""Reset any cached metadata attributes to datasource defaults.
@@ -337,48 +363,51 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
@return True on successful write, False otherwise.
"""
- if hasattr(self, '_crawled_metadata'):
+ if hasattr(self, "_crawled_metadata"):
# Any datasource with _crawled_metadata will best represent
# most recent, 'raw' metadata
crawled_metadata = copy.deepcopy(
- getattr(self, '_crawled_metadata'))
- crawled_metadata.pop('user-data', None)
- crawled_metadata.pop('vendor-data', None)
- instance_data = {'ds': crawled_metadata}
+ getattr(self, "_crawled_metadata")
+ )
+ crawled_metadata.pop("user-data", None)
+ crawled_metadata.pop("vendor-data", None)
+ instance_data = {"ds": crawled_metadata}
else:
- instance_data = {'ds': {'meta_data': self.metadata}}
- if hasattr(self, 'network_json'):
- network_json = getattr(self, 'network_json')
+ instance_data = {"ds": {"meta_data": self.metadata}}
+ if hasattr(self, "network_json"):
+ network_json = getattr(self, "network_json")
if network_json != UNSET:
- instance_data['ds']['network_json'] = network_json
- if hasattr(self, 'ec2_metadata'):
- ec2_metadata = getattr(self, 'ec2_metadata')
+ instance_data["ds"]["network_json"] = network_json
+ if hasattr(self, "ec2_metadata"):
+ ec2_metadata = getattr(self, "ec2_metadata")
if ec2_metadata != UNSET:
- instance_data['ds']['ec2_metadata'] = ec2_metadata
- instance_data['ds']['_doc'] = EXPERIMENTAL_TEXT
+ instance_data["ds"]["ec2_metadata"] = ec2_metadata
+ instance_data["ds"]["_doc"] = EXPERIMENTAL_TEXT
# Add merged cloud.cfg and sys info for jinja templates and cli query
- instance_data['merged_cfg'] = copy.deepcopy(self.sys_cfg)
- instance_data['merged_cfg']['_doc'] = (
- 'Merged cloud-init system config from /etc/cloud/cloud.cfg and'
- ' /etc/cloud/cloud.cfg.d/')
- instance_data['sys_info'] = util.system_info()
- instance_data.update(
- self._get_standardized_metadata(instance_data))
+ instance_data["merged_cfg"] = copy.deepcopy(self.sys_cfg)
+ instance_data["merged_cfg"]["_doc"] = (
+ "Merged cloud-init system config from /etc/cloud/cloud.cfg and"
+ " /etc/cloud/cloud.cfg.d/"
+ )
+ instance_data["sys_info"] = util.system_info()
+ instance_data.update(self._get_standardized_metadata(instance_data))
try:
# Process content base64encoding unserializable values
content = util.json_dumps(instance_data)
# Strip base64: prefix and set base64_encoded_keys list.
processed_data = process_instance_metadata(
json.loads(content),
- sensitive_keys=self.sensitive_metadata_keys)
+ sensitive_keys=self.sensitive_metadata_keys,
+ )
except TypeError as e:
- LOG.warning('Error persisting instance-data.json: %s', str(e))
+ LOG.warning("Error persisting instance-data.json: %s", str(e))
return False
except UnicodeDecodeError as e:
- LOG.warning('Error persisting instance-data.json: %s', str(e))
+ LOG.warning("Error persisting instance-data.json: %s", str(e))
return False
- json_sensitive_file = os.path.join(self.paths.run_dir,
- INSTANCE_JSON_SENSITIVE_FILE)
+ json_sensitive_file = os.path.join(
+ self.paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE
+ )
write_json(json_sensitive_file, processed_data, mode=0o600)
json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
# World readable
@@ -388,8 +417,9 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
def _get_data(self):
"""Walk metadata sources, process crawled data and save attributes."""
raise NotImplementedError(
- 'Subclasses of DataSource must implement _get_data which'
- ' sets self.metadata, vendordata_raw and userdata_raw.')
+ "Subclasses of DataSource must implement _get_data which"
+ " sets self.metadata, vendordata_raw and userdata_raw."
+ )
def get_url_params(self):
"""Return the Datasource's prefered url_read parameters.
@@ -404,37 +434,50 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
max_wait = int(self.ds_cfg.get("max_wait", self.url_max_wait))
except ValueError:
util.logexc(
- LOG, "Config max_wait '%s' is not an int, using default '%s'",
- self.ds_cfg.get("max_wait"), max_wait)
+ LOG,
+ "Config max_wait '%s' is not an int, using default '%s'",
+ self.ds_cfg.get("max_wait"),
+ max_wait,
+ )
timeout = self.url_timeout
try:
- timeout = max(
- 0, int(self.ds_cfg.get("timeout", self.url_timeout)))
+ timeout = max(0, int(self.ds_cfg.get("timeout", self.url_timeout)))
except ValueError:
timeout = self.url_timeout
util.logexc(
- LOG, "Config timeout '%s' is not an int, using default '%s'",
- self.ds_cfg.get('timeout'), timeout)
+ LOG,
+ "Config timeout '%s' is not an int, using default '%s'",
+ self.ds_cfg.get("timeout"),
+ timeout,
+ )
retries = self.url_retries
try:
retries = int(self.ds_cfg.get("retries", self.url_retries))
except Exception:
util.logexc(
- LOG, "Config retries '%s' is not an int, using default '%s'",
- self.ds_cfg.get('retries'), retries)
+ LOG,
+ "Config retries '%s' is not an int, using default '%s'",
+ self.ds_cfg.get("retries"),
+ retries,
+ )
sec_between_retries = self.url_sec_between_retries
try:
- sec_between_retries = int(self.ds_cfg.get(
- "sec_between_retries",
- self.url_sec_between_retries))
+ sec_between_retries = int(
+ self.ds_cfg.get(
+ "sec_between_retries", self.url_sec_between_retries
+ )
+ )
except Exception:
util.logexc(
- LOG, "Config sec_between_retries '%s' is not an int,"
- " using default '%s'",
- self.ds_cfg.get("sec_between_retries"), sec_between_retries)
+ LOG,
+ "Config sec_between_retries '%s' is not an int,"
+ " using default '%s'",
+ self.ds_cfg.get("sec_between_retries"),
+ sec_between_retries,
+ )
return URLParams(max_wait, timeout, retries, sec_between_retries)
@@ -462,13 +505,13 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
self._fallback_interface = net.find_fallback_nic()
if self._fallback_interface is None:
LOG.warning(
- "Did not find a fallback interface on %s.",
- self.cloud_name)
+ "Did not find a fallback interface on %s.", self.cloud_name
+ )
return self._fallback_interface
@property
def platform_type(self):
- if not hasattr(self, '_platform_type'):
+ if not hasattr(self, "_platform_type"):
# Handle upgrade path where pickled datasource has no _platform.
self._platform_type = self.dsname.lower()
if not self._platform_type:
@@ -487,7 +530,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
nocloud: seed-dir (/seed/dir/path)
lxd: nocloud (/seed/dir/path)
"""
- if not hasattr(self, '_subplatform'):
+ if not hasattr(self, "_subplatform"):
# Handle upgrade path where pickled datasource has no _platform.
self._subplatform = self._get_subplatform()
if not self._subplatform:
@@ -496,8 +539,8 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
def _get_subplatform(self):
"""Subclasses should implement to return a "slug (detail)" string."""
- if hasattr(self, 'metadata_address'):
- return 'metadata (%s)' % getattr(self, 'metadata_address')
+ if hasattr(self, "metadata_address"):
+ return "metadata (%s)" % getattr(self, "metadata_address")
return METADATA_UNKNOWN
@property
@@ -516,8 +559,10 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
else:
self._cloud_name = self._get_cloud_name().lower()
LOG.debug(
- 'Ignoring metadata provided key %s: non-string type %s',
- METADATA_CLOUD_NAME_KEY, type(cloud_name))
+ "Ignoring metadata provided key %s: non-string type %s",
+ METADATA_CLOUD_NAME_KEY,
+ type(cloud_name),
+ )
else:
self._cloud_name = self._get_cloud_name().lower()
return self._cloud_name
@@ -534,8 +579,8 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
def launch_index(self):
if not self.metadata:
return None
- if 'launch-index' in self.metadata:
- return self.metadata['launch-index']
+ if "launch-index" in self.metadata:
+ return self.metadata["launch-index"]
return None
def _filter_xdata(self, processed_ud):
@@ -567,7 +612,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
return {}
def get_public_ssh_keys(self):
- return normalize_pubkey_data(self.metadata.get('public-keys'))
+ return normalize_pubkey_data(self.metadata.get("public-keys"))
def publish_host_keys(self, hostkeys):
"""Publish the public SSH host keys (found in /etc/ssh/*.pub).
@@ -589,7 +634,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
if not short_name.startswith(nfrom):
continue
for nto in tlist:
- cand = "/dev/%s%s" % (nto, short_name[len(nfrom):])
+ cand = "/dev/%s%s" % (nto, short_name[len(nfrom) :])
if os.path.exists(cand):
return cand
return None
@@ -614,20 +659,21 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
@property
def availability_zone(self):
top_level_az = self.metadata.get(
- 'availability-zone', self.metadata.get('availability_zone'))
+ "availability-zone", self.metadata.get("availability_zone")
+ )
if top_level_az:
return top_level_az
- return self.metadata.get('placement', {}).get('availability-zone')
+ return self.metadata.get("placement", {}).get("availability-zone")
@property
def region(self):
- return self.metadata.get('region')
+ return self.metadata.get("region")
def get_instance_id(self):
- if not self.metadata or 'instance-id' not in self.metadata:
+ if not self.metadata or "instance-id" not in self.metadata:
# Return a magic not really instance id string
return "iid-datasource"
- return str(self.metadata['instance-id'])
+ return str(self.metadata["instance-id"])
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
"""Get hostname or fqdn from the datasource. Look it up if desired.
@@ -645,7 +691,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
defhost = "localhost"
domain = defdomain
- if not self.metadata or not self.metadata.get('local-hostname'):
+ if not self.metadata or not self.metadata.get("local-hostname"):
if metadata_only:
return None
# this is somewhat questionable really.
@@ -666,14 +712,14 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
else:
# if there is an ipv4 address in 'local-hostname', then
# make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx
- lhost = self.metadata['local-hostname']
+ lhost = self.metadata["local-hostname"]
if net.is_ipv4_address(lhost):
toks = []
if resolve_ip:
toks = util.gethostbyaddr(lhost)
if toks:
- toks = str(toks).split('.')
+ toks = str(toks).split(".")
else:
toks = ["ip-%s" % lhost.replace(".", "-")]
else:
@@ -681,7 +727,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
if len(toks) > 1:
hostname = toks[0]
- domain = '.'.join(toks[1:])
+ domain = ".".join(toks[1:])
else:
hostname = toks[0]
@@ -696,7 +742,10 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
def get_supported_events(self, source_event_types: List[EventType]):
supported_events = {} # type: Dict[EventScope, set]
for event in source_event_types:
- for update_scope, update_events in self.supported_update_events.items(): # noqa: E501
+ for (
+ update_scope,
+ update_events,
+ ) in self.supported_update_events.items():
if event in update_events:
if not supported_events.get(update_scope):
supported_events[update_scope] = set()
@@ -723,18 +772,22 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
LOG.debug(
"Update datasource metadata and %s config due to events: %s",
scope.value,
- ', '.join([event.value for event in matched_events]))
+ ", ".join([event.value for event in matched_events]),
+ )
# Each datasource has a cached config property which needs clearing
# Once cleared that config property will be regenerated from
# current metadata.
- self.clear_cached_attrs((('_%s_config' % scope, UNSET),))
+ self.clear_cached_attrs((("_%s_config" % scope, UNSET),))
if supported_events:
self.clear_cached_attrs()
result = self.get_data()
if result:
return True
- LOG.debug("Datasource %s not updated for events: %s", self,
- ', '.join([event.value for event in source_event_types]))
+ LOG.debug(
+ "Datasource %s not updated for events: %s",
+ self,
+ ", ".join([event.value for event in source_event_types]),
+ )
return False
def check_instance_id(self, sys_cfg):
@@ -756,8 +809,9 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
if candidate in valid:
return candidate
else:
- LOG.warning("invalid dsmode '%s', using default=%s",
- candidate, default)
+ LOG.warning(
+ "invalid dsmode '%s', using default=%s", candidate, default
+ )
return default
return default
@@ -836,7 +890,8 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
name="search-%s" % name.replace("DataSource", ""),
description="searching for %s data from %s" % (mode, name),
message="no %s data found from %s" % (mode, name),
- parent=reporter)
+ parent=reporter,
+ )
try:
with myrep:
LOG.debug("Seeing if we can get any data from %s", cls)
@@ -849,8 +904,9 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
except Exception:
util.logexc(LOG, "Getting data from %s failed", cls)
- msg = ("Did not find any data source,"
- " searched classes: (%s)") % (", ".join(ds_names))
+ msg = "Did not find any data source, searched classes: (%s)" % ", ".join(
+ ds_names
+ )
raise DataSourceNotFoundException(msg)
@@ -860,15 +916,19 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
# Return an ordered list of classes that match (if any)
def list_sources(cfg_list, depends, pkg_list):
src_list = []
- LOG.debug(("Looking for data source in: %s,"
- " via packages %s that matches dependencies %s"),
- cfg_list, pkg_list, depends)
+ LOG.debug(
+ "Looking for data source in: %s,"
+ " via packages %s that matches dependencies %s",
+ cfg_list,
+ pkg_list,
+ depends,
+ )
for ds_name in cfg_list:
if not ds_name.startswith(DS_PREFIX):
- ds_name = '%s%s' % (DS_PREFIX, ds_name)
- m_locs, _looked_locs = importer.find_module(ds_name,
- pkg_list,
- ['get_datasource_list'])
+ ds_name = "%s%s" % (DS_PREFIX, ds_name)
+ m_locs, _looked_locs = importer.find_module(
+ ds_name, pkg_list, ["get_datasource_list"]
+ )
for m_loc in m_locs:
mod = importer.import_module(m_loc)
lister = getattr(mod, "get_datasource_list")
@@ -879,7 +939,7 @@ def list_sources(cfg_list, depends, pkg_list):
return src_list
-def instance_id_matches_system_uuid(instance_id, field='system-uuid'):
+def instance_id_matches_system_uuid(instance_id, field="system-uuid"):
# quickly (local check only) if self.instance_id is still valid
# we check kernel command line or files.
if not instance_id:
@@ -929,8 +989,7 @@ def convert_vendordata(data, recurse=True):
return copy.deepcopy(data)
if isinstance(data, dict):
if recurse is True:
- return convert_vendordata(data.get('cloud-init'),
- recurse=False)
+ return convert_vendordata(data.get("cloud-init"), recurse=False)
raise ValueError("vendordata['cloud-init'] cannot be dict")
raise ValueError("Unknown data type for vendordata: %s" % type(data))
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index a5ac1d57..50058fe0 100755
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -6,27 +6,28 @@ import os
import re
import socket
import struct
-import time
import textwrap
+import time
import zlib
-from errno import ENOENT
-
-from cloudinit.settings import CFG_BUILTIN
-from cloudinit.net import dhcp
-from cloudinit import stages
-from cloudinit import temp_utils
from contextlib import contextmanager
+from datetime import datetime
+from errno import ENOENT
from xml.etree import ElementTree
from xml.sax.saxutils import escape
-from cloudinit import subp
-from cloudinit import url_helper
-from cloudinit import util
-from cloudinit import version
-from cloudinit import distros
-from cloudinit.reporting import events
+from cloudinit import (
+ distros,
+ stages,
+ subp,
+ temp_utils,
+ url_helper,
+ util,
+ version,
+)
+from cloudinit.net import dhcp
from cloudinit.net.dhcp import EphemeralDHCPv4
-from datetime import datetime
+from cloudinit.reporting import events
+from cloudinit.settings import CFG_BUILTIN
LOG = logging.getLogger(__name__)
@@ -34,10 +35,10 @@ LOG = logging.getLogger(__name__)
# value is applied if the endpoint can't be found within a lease file
DEFAULT_WIRESERVER_ENDPOINT = "a8:3f:81:10"
-BOOT_EVENT_TYPE = 'boot-telemetry'
-SYSTEMINFO_EVENT_TYPE = 'system-info'
-DIAGNOSTIC_EVENT_TYPE = 'diagnostic'
-COMPRESSED_EVENT_TYPE = 'compressed'
+BOOT_EVENT_TYPE = "boot-telemetry"
+SYSTEMINFO_EVENT_TYPE = "system-info"
+DIAGNOSTIC_EVENT_TYPE = "diagnostic"
+COMPRESSED_EVENT_TYPE = "compressed"
# Maximum number of bytes of the cloud-init.log file that can be dumped to KVP
# at once. This number is based on the analysis done on a large sample of
# cloud-init.log files where the P95 of the file sizes was 537KB and the time
@@ -45,25 +46,29 @@ COMPRESSED_EVENT_TYPE = 'compressed'
MAX_LOG_TO_KVP_LENGTH = 512000
# File to store the last byte of cloud-init.log that was pushed to KVP. This
# file will be deleted with every VM reboot.
-LOG_PUSHED_TO_KVP_INDEX_FILE = '/run/cloud-init/log_pushed_to_kvp_index'
+LOG_PUSHED_TO_KVP_INDEX_FILE = "/run/cloud-init/log_pushed_to_kvp_index"
azure_ds_reporter = events.ReportEventStack(
name="azure-ds",
description="initialize reporter for azure ds",
- reporting_enabled=True)
+ reporting_enabled=True,
+)
DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE = (
- 'The VM encountered an error during deployment. '
- 'Please visit https://aka.ms/linuxprovisioningerror '
- 'for more information on remediation.')
+ "The VM encountered an error during deployment. "
+ "Please visit https://aka.ms/linuxprovisioningerror "
+ "for more information on remediation."
+)
def azure_ds_telemetry_reporter(func):
def impl(*args, **kwargs):
with events.ReportEventStack(
- name=func.__name__,
- description=func.__name__,
- parent=azure_ds_reporter):
+ name=func.__name__,
+ description=func.__name__,
+ parent=azure_ds_reporter,
+ ):
return func(*args, **kwargs)
+
return impl
@@ -79,16 +84,16 @@ def is_byte_swapped(previous_id, current_id):
def swap_bytestring(s, width=2):
dd = [byte for byte in textwrap.wrap(s, 2)]
dd.reverse()
- return ''.join(dd)
+ return "".join(dd)
- parts = current_id.split('-')
- swapped_id = '-'.join(
+ parts = current_id.split("-")
+ swapped_id = "-".join(
[
swap_bytestring(parts[0]),
swap_bytestring(parts[1]),
swap_bytestring(parts[2]),
parts[3],
- parts[4]
+ parts[4],
]
)
@@ -98,31 +103,29 @@ def is_byte_swapped(previous_id, current_id):
@azure_ds_telemetry_reporter
def get_boot_telemetry():
"""Report timestamps related to kernel initialization and systemd
- activation of cloud-init"""
+ activation of cloud-init"""
if not distros.uses_systemd():
- raise RuntimeError(
- "distro not using systemd, skipping boot telemetry")
+ raise RuntimeError("distro not using systemd, skipping boot telemetry")
LOG.debug("Collecting boot telemetry")
try:
kernel_start = float(time.time()) - float(util.uptime())
except ValueError as e:
- raise RuntimeError(
- "Failed to determine kernel start timestamp"
- ) from e
+ raise RuntimeError("Failed to determine kernel start timestamp") from e
try:
- out, _ = subp.subp(['/bin/systemctl',
- 'show', '-p',
- 'UserspaceTimestampMonotonic'],
- capture=True)
+ out, _ = subp.subp(
+ ["/bin/systemctl", "show", "-p", "UserspaceTimestampMonotonic"],
+ capture=True,
+ )
tsm = None
- if out and '=' in out:
+ if out and "=" in out:
tsm = out.split("=")[1]
if not tsm:
- raise RuntimeError("Failed to parse "
- "UserspaceTimestampMonotonic from systemd")
+ raise RuntimeError(
+ "Failed to parse UserspaceTimestampMonotonic from systemd"
+ )
user_start = kernel_start + (float(tsm) / 1000000)
except subp.ProcessExecutionError as e:
@@ -135,16 +138,23 @@ def get_boot_telemetry():
) from e
try:
- out, _ = subp.subp(['/bin/systemctl', 'show',
- 'cloud-init-local', '-p',
- 'InactiveExitTimestampMonotonic'],
- capture=True)
+ out, _ = subp.subp(
+ [
+ "/bin/systemctl",
+ "show",
+ "cloud-init-local",
+ "-p",
+ "InactiveExitTimestampMonotonic",
+ ],
+ capture=True,
+ )
tsm = None
- if out and '=' in out:
+ if out and "=" in out:
tsm = out.split("=")[1]
if not tsm:
- raise RuntimeError("Failed to parse "
- "InactiveExitTimestampMonotonic from systemd")
+ raise RuntimeError(
+ "Failed to parse InactiveExitTimestampMonotonic from systemd"
+ )
cloudinit_activation = kernel_start + (float(tsm) / 1000000)
except subp.ProcessExecutionError as e:
@@ -158,12 +168,16 @@ def get_boot_telemetry():
) from e
evt = events.ReportingEvent(
- BOOT_EVENT_TYPE, 'boot-telemetry',
- "kernel_start=%s user_start=%s cloudinit_activation=%s" %
- (datetime.utcfromtimestamp(kernel_start).isoformat() + 'Z',
- datetime.utcfromtimestamp(user_start).isoformat() + 'Z',
- datetime.utcfromtimestamp(cloudinit_activation).isoformat() + 'Z'),
- events.DEFAULT_EVENT_ORIGIN)
+ BOOT_EVENT_TYPE,
+ "boot-telemetry",
+ "kernel_start=%s user_start=%s cloudinit_activation=%s"
+ % (
+ datetime.utcfromtimestamp(kernel_start).isoformat() + "Z",
+ datetime.utcfromtimestamp(user_start).isoformat() + "Z",
+ datetime.utcfromtimestamp(cloudinit_activation).isoformat() + "Z",
+ ),
+ events.DEFAULT_EVENT_ORIGIN,
+ )
events.report_event(evt)
# return the event for unit testing purpose
@@ -175,13 +189,22 @@ def get_system_info():
"""Collect and report system information"""
info = util.system_info()
evt = events.ReportingEvent(
- SYSTEMINFO_EVENT_TYPE, 'system information',
+ SYSTEMINFO_EVENT_TYPE,
+ "system information",
"cloudinit_version=%s, kernel_version=%s, variant=%s, "
"distro_name=%s, distro_version=%s, flavor=%s, "
- "python_version=%s" %
- (version.version_string(), info['release'], info['variant'],
- info['dist'][0], info['dist'][1], info['dist'][2],
- info['python']), events.DEFAULT_EVENT_ORIGIN)
+ "python_version=%s"
+ % (
+ version.version_string(),
+ info["release"],
+ info["variant"],
+ info["dist"][0],
+ info["dist"][1],
+ info["dist"][2],
+ info["python"],
+ ),
+ events.DEFAULT_EVENT_ORIGIN,
+ )
events.report_event(evt)
# return the event for unit testing purpose
@@ -189,13 +212,17 @@ def get_system_info():
def report_diagnostic_event(
- msg: str, *, logger_func=None) -> events.ReportingEvent:
+ msg: str, *, logger_func=None
+) -> events.ReportingEvent:
"""Report a diagnostic event"""
if callable(logger_func):
logger_func(msg)
evt = events.ReportingEvent(
- DIAGNOSTIC_EVENT_TYPE, 'diagnostic message',
- msg, events.DEFAULT_EVENT_ORIGIN)
+ DIAGNOSTIC_EVENT_TYPE,
+ "diagnostic message",
+ msg,
+ events.DEFAULT_EVENT_ORIGIN,
+ )
events.report_event(evt, excluded_handler_types={"log"})
# return the event for unit testing purpose
@@ -205,21 +232,26 @@ def report_diagnostic_event(
def report_compressed_event(event_name, event_content):
"""Report a compressed event"""
compressed_data = base64.encodebytes(zlib.compress(event_content))
- event_data = {"encoding": "gz+b64",
- "data": compressed_data.decode('ascii')}
+ event_data = {
+ "encoding": "gz+b64",
+ "data": compressed_data.decode("ascii"),
+ }
evt = events.ReportingEvent(
- COMPRESSED_EVENT_TYPE, event_name,
+ COMPRESSED_EVENT_TYPE,
+ event_name,
json.dumps(event_data),
- events.DEFAULT_EVENT_ORIGIN)
- events.report_event(evt,
- excluded_handler_types={"log", "print", "webhook"})
+ events.DEFAULT_EVENT_ORIGIN,
+ )
+ events.report_event(
+ evt, excluded_handler_types={"log", "print", "webhook"}
+ )
# return the event for unit testing purpose
return evt
@azure_ds_telemetry_reporter
-def push_log_to_kvp(file_name=CFG_BUILTIN['def_log_file']):
+def push_log_to_kvp(file_name=CFG_BUILTIN["def_log_file"]):
"""Push a portion of cloud-init.log file or the whole file to KVP
based on the file size.
The first time this function is called after VM boot, It will push the last
@@ -237,23 +269,26 @@ def push_log_to_kvp(file_name=CFG_BUILTIN['def_log_file']):
report_diagnostic_event(
"Dumping last {0} bytes of cloud-init.log file to KVP starting"
" from index: {1}".format(f.tell() - seek_index, seek_index),
- logger_func=LOG.debug)
+ logger_func=LOG.debug,
+ )
f.seek(seek_index, os.SEEK_SET)
report_compressed_event("cloud-init.log", f.read())
util.write_file(LOG_PUSHED_TO_KVP_INDEX_FILE, str(f.tell()))
except Exception as ex:
report_diagnostic_event(
"Exception when dumping log file: %s" % repr(ex),
- logger_func=LOG.warning)
+ logger_func=LOG.warning,
+ )
LOG.debug("Dumping dmesg log to KVP")
try:
- out, _ = subp.subp(['dmesg'], decode=False, capture=True)
+ out, _ = subp.subp(["dmesg"], decode=False, capture=True)
report_compressed_event("dmesg", out)
except Exception as ex:
report_diagnostic_event(
"Exception when dumping dmesg log: %s" % repr(ex),
- logger_func=LOG.warning)
+ logger_func=LOG.warning,
+ )
@azure_ds_telemetry_reporter
@@ -263,16 +298,20 @@ def get_last_log_byte_pushed_to_kvp_index():
return int(f.read())
except IOError as e:
if e.errno != ENOENT:
- report_diagnostic_event("Reading LOG_PUSHED_TO_KVP_INDEX_FILE"
- " failed: %s." % repr(e),
- logger_func=LOG.warning)
+ report_diagnostic_event(
+ "Reading LOG_PUSHED_TO_KVP_INDEX_FILE failed: %s." % repr(e),
+ logger_func=LOG.warning,
+ )
except ValueError as e:
- report_diagnostic_event("Invalid value in LOG_PUSHED_TO_KVP_INDEX_FILE"
- ": %s." % repr(e),
- logger_func=LOG.warning)
+ report_diagnostic_event(
+ "Invalid value in LOG_PUSHED_TO_KVP_INDEX_FILE: %s." % repr(e),
+ logger_func=LOG.warning,
+ )
except Exception as e:
- report_diagnostic_event("Failed to get the last log byte pushed to KVP"
- ": %s." % repr(e), logger_func=LOG.warning)
+ report_diagnostic_event(
+ "Failed to get the last log byte pushed to KVP: %s." % repr(e),
+ logger_func=LOG.warning,
+ )
return 0
@@ -306,28 +345,31 @@ def http_with_retries(url, **kwargs) -> str:
sleep_duration_between_retries = 5
periodic_logging_attempts = 12
- if 'timeout' not in kwargs:
- kwargs['timeout'] = default_readurl_timeout
+ if "timeout" not in kwargs:
+ kwargs["timeout"] = default_readurl_timeout
# remove kwargs that cause url_helper.readurl to retry,
# since we are already implementing our own retry logic.
- if kwargs.pop('retries', None):
+ if kwargs.pop("retries", None):
LOG.warning(
- 'Ignoring retries kwarg passed in for '
- 'communication with Azure endpoint.')
- if kwargs.pop('infinite', None):
+ "Ignoring retries kwarg passed in for "
+ "communication with Azure endpoint."
+ )
+ if kwargs.pop("infinite", None):
LOG.warning(
- 'Ignoring infinite kwarg passed in for communication '
- 'with Azure endpoint.')
+ "Ignoring infinite kwarg passed in for communication "
+ "with Azure endpoint."
+ )
for attempt in range(1, max_readurl_attempts + 1):
try:
ret = url_helper.readurl(url, **kwargs)
report_diagnostic_event(
- 'Successful HTTP request with Azure endpoint %s after '
- '%d attempts' % (url, attempt),
- logger_func=LOG.debug)
+ "Successful HTTP request with Azure endpoint %s after "
+ "%d attempts" % (url, attempt),
+ logger_func=LOG.debug,
+ )
return ret
@@ -335,20 +377,20 @@ def http_with_retries(url, **kwargs) -> str:
exc = e
if attempt % periodic_logging_attempts == 0:
report_diagnostic_event(
- 'Failed HTTP request with Azure endpoint %s during '
- 'attempt %d with exception: %s' %
- (url, attempt, e),
- logger_func=LOG.debug)
+ "Failed HTTP request with Azure endpoint %s during "
+ "attempt %d with exception: %s" % (url, attempt, e),
+ logger_func=LOG.debug,
+ )
time.sleep(sleep_duration_between_retries)
raise exc
def build_minimal_ovf(
- username: str,
- hostname: str,
- disableSshPwd: str) -> bytes:
- OVF_ENV_TEMPLATE = textwrap.dedent('''\
+ username: str, hostname: str, disableSshPwd: str
+) -> bytes:
+ OVF_ENV_TEMPLATE = textwrap.dedent(
+ """\
<ns0:Environment xmlns:ns0="http://schemas.dmtf.org/ovf/environment/1"
xmlns:ns1="http://schemas.microsoft.com/windowsazure"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
@@ -370,19 +412,19 @@ def build_minimal_ovf(
</ns1:PlatformSettings>
</ns1:PlatformSettingsSection>
</ns0:Environment>
- ''')
+ """
+ )
ret = OVF_ENV_TEMPLATE.format(
- username=username,
- hostname=hostname,
- disableSshPwd=disableSshPwd)
- return ret.encode('utf-8')
+ username=username, hostname=hostname, disableSshPwd=disableSshPwd
+ )
+ return ret.encode("utf-8")
class AzureEndpointHttpClient:
headers = {
- 'x-ms-agent-name': 'WALinuxAgent',
- 'x-ms-version': '2012-11-30',
+ "x-ms-agent-name": "WALinuxAgent",
+ "x-ms-version": "2012-11-30",
}
def __init__(self, certificate):
@@ -403,8 +445,7 @@ class AzureEndpointHttpClient:
if extra_headers is not None:
headers = self.headers.copy()
headers.update(extra_headers)
- return http_with_retries(
- url, data=data, headers=headers)
+ return http_with_retries(url, data=data, headers=headers)
class InvalidGoalStateXMLException(Exception):
@@ -412,12 +453,12 @@ class InvalidGoalStateXMLException(Exception):
class GoalState:
-
def __init__(
- self,
- unparsed_xml: str,
- azure_endpoint_client: AzureEndpointHttpClient,
- need_certificate: bool = True) -> None:
+ self,
+ unparsed_xml: str,
+ azure_endpoint_client: AzureEndpointHttpClient,
+ need_certificate: bool = True,
+ ) -> None:
"""Parses a GoalState XML string and returns a GoalState object.
@param unparsed_xml: string representing a GoalState XML.
@@ -431,36 +472,41 @@ class GoalState:
self.root = ElementTree.fromstring(unparsed_xml)
except ElementTree.ParseError as e:
report_diagnostic_event(
- 'Failed to parse GoalState XML: %s' % e,
- logger_func=LOG.warning)
+ "Failed to parse GoalState XML: %s" % e,
+ logger_func=LOG.warning,
+ )
raise
- self.container_id = self._text_from_xpath('./Container/ContainerId')
+ self.container_id = self._text_from_xpath("./Container/ContainerId")
self.instance_id = self._text_from_xpath(
- './Container/RoleInstanceList/RoleInstance/InstanceId')
- self.incarnation = self._text_from_xpath('./Incarnation')
+ "./Container/RoleInstanceList/RoleInstance/InstanceId"
+ )
+ self.incarnation = self._text_from_xpath("./Incarnation")
for attr in ("container_id", "instance_id", "incarnation"):
if getattr(self, attr) is None:
- msg = 'Missing %s in GoalState XML' % attr
+ msg = "Missing %s in GoalState XML" % attr
report_diagnostic_event(msg, logger_func=LOG.warning)
raise InvalidGoalStateXMLException(msg)
self.certificates_xml = None
url = self._text_from_xpath(
- './Container/RoleInstanceList/RoleInstance'
- '/Configuration/Certificates')
+ "./Container/RoleInstanceList/RoleInstance"
+ "/Configuration/Certificates"
+ )
if url is not None and need_certificate:
with events.ReportEventStack(
- name="get-certificates-xml",
- description="get certificates xml",
- parent=azure_ds_reporter):
- self.certificates_xml = \
- self.azure_endpoint_client.get(
- url, secure=True).contents
+ name="get-certificates-xml",
+ description="get certificates xml",
+ parent=azure_ds_reporter,
+ ):
+ self.certificates_xml = self.azure_endpoint_client.get(
+ url, secure=True
+ ).contents
if self.certificates_xml is None:
raise InvalidGoalStateXMLException(
- 'Azure endpoint returned empty certificates xml.')
+ "Azure endpoint returned empty certificates xml."
+ )
def _text_from_xpath(self, xpath):
element = self.root.find(xpath)
@@ -472,8 +518,8 @@ class GoalState:
class OpenSSLManager:
certificate_names = {
- 'private_key': 'TransportPrivate.pem',
- 'certificate': 'TransportCert.pem',
+ "private_key": "TransportPrivate.pem",
+ "certificate": "TransportCert.pem",
}
def __init__(self):
@@ -494,35 +540,47 @@ class OpenSSLManager:
@azure_ds_telemetry_reporter
def generate_certificate(self):
- LOG.debug('Generating certificate for communication with fabric...')
+ LOG.debug("Generating certificate for communication with fabric...")
if self.certificate is not None:
- LOG.debug('Certificate already generated.')
+ LOG.debug("Certificate already generated.")
return
with cd(self.tmpdir):
- subp.subp([
- 'openssl', 'req', '-x509', '-nodes', '-subj',
- '/CN=LinuxTransport', '-days', '32768', '-newkey', 'rsa:2048',
- '-keyout', self.certificate_names['private_key'],
- '-out', self.certificate_names['certificate'],
- ])
- certificate = ''
- for line in open(self.certificate_names['certificate']):
+ subp.subp(
+ [
+ "openssl",
+ "req",
+ "-x509",
+ "-nodes",
+ "-subj",
+ "/CN=LinuxTransport",
+ "-days",
+ "32768",
+ "-newkey",
+ "rsa:2048",
+ "-keyout",
+ self.certificate_names["private_key"],
+ "-out",
+ self.certificate_names["certificate"],
+ ]
+ )
+ certificate = ""
+ for line in open(self.certificate_names["certificate"]):
if "CERTIFICATE" not in line:
certificate += line.rstrip()
self.certificate = certificate
- LOG.debug('New certificate generated.')
+ LOG.debug("New certificate generated.")
@staticmethod
@azure_ds_telemetry_reporter
def _run_x509_action(action, cert):
- cmd = ['openssl', 'x509', '-noout', action]
+ cmd = ["openssl", "x509", "-noout", action]
result, _ = subp.subp(cmd, data=cert)
return result
@azure_ds_telemetry_reporter
def _get_ssh_key_from_cert(self, certificate):
- pub_key = self._run_x509_action('-pubkey', certificate)
- keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin']
+ pub_key = self._run_x509_action("-pubkey", certificate)
+ keygen_cmd = ["ssh-keygen", "-i", "-m", "PKCS8", "-f", "/dev/stdin"]
ssh_key, _ = subp.subp(keygen_cmd, data=pub_key)
return ssh_key
@@ -535,48 +593,50 @@ class OpenSSLManager:
Azure control plane passes that fingerprint as so:
'073E19D14D1C799224C6A0FD8DDAB6A8BF27D473'
"""
- raw_fp = self._run_x509_action('-fingerprint', certificate)
- eq = raw_fp.find('=')
- octets = raw_fp[eq+1:-1].split(':')
- return ''.join(octets)
+ raw_fp = self._run_x509_action("-fingerprint", certificate)
+ eq = raw_fp.find("=")
+ octets = raw_fp[eq + 1 : -1].split(":")
+ return "".join(octets)
@azure_ds_telemetry_reporter
def _decrypt_certs_from_xml(self, certificates_xml):
"""Decrypt the certificates XML document using the our private key;
- return the list of certs and private keys contained in the doc.
+ return the list of certs and private keys contained in the doc.
"""
- tag = ElementTree.fromstring(certificates_xml).find('.//Data')
+ tag = ElementTree.fromstring(certificates_xml).find(".//Data")
certificates_content = tag.text
lines = [
- b'MIME-Version: 1.0',
+ b"MIME-Version: 1.0",
b'Content-Disposition: attachment; filename="Certificates.p7m"',
b'Content-Type: application/x-pkcs7-mime; name="Certificates.p7m"',
- b'Content-Transfer-Encoding: base64',
- b'',
- certificates_content.encode('utf-8'),
+ b"Content-Transfer-Encoding: base64",
+ b"",
+ certificates_content.encode("utf-8"),
]
with cd(self.tmpdir):
out, _ = subp.subp(
- 'openssl cms -decrypt -in /dev/stdin -inkey'
- ' {private_key} -recip {certificate} | openssl pkcs12 -nodes'
- ' -password pass:'.format(**self.certificate_names),
- shell=True, data=b'\n'.join(lines))
+ "openssl cms -decrypt -in /dev/stdin -inkey"
+ " {private_key} -recip {certificate} | openssl pkcs12 -nodes"
+ " -password pass:".format(**self.certificate_names),
+ shell=True,
+ data=b"\n".join(lines),
+ )
return out
@azure_ds_telemetry_reporter
def parse_certificates(self, certificates_xml):
"""Given the Certificates XML document, return a dictionary of
- fingerprints and associated SSH keys derived from the certs."""
+ fingerprints and associated SSH keys derived from the certs."""
out = self._decrypt_certs_from_xml(certificates_xml)
current = []
keys = {}
for line in out.splitlines():
current.append(line)
- if re.match(r'[-]+END .*?KEY[-]+$', line):
+ if re.match(r"[-]+END .*?KEY[-]+$", line):
# ignore private_keys
current = []
- elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line):
- certificate = '\n'.join(current)
+ elif re.match(r"[-]+END .*?CERTIFICATE[-]+$", line):
+ certificate = "\n".join(current)
ssh_key = self._get_ssh_key_from_cert(certificate)
fingerprint = self._get_fingerprint_from_cert(certificate)
keys[fingerprint] = ssh_key
@@ -586,7 +646,8 @@ class OpenSSLManager:
class GoalStateHealthReporter:
- HEALTH_REPORT_XML_TEMPLATE = textwrap.dedent('''\
+ HEALTH_REPORT_XML_TEMPLATE = textwrap.dedent(
+ """\
<?xml version="1.0" encoding="utf-8"?>
<Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
@@ -604,25 +665,30 @@ class GoalStateHealthReporter:
</RoleInstanceList>
</Container>
</Health>
- ''')
+ """
+ )
- HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = textwrap.dedent('''\
+ HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = textwrap.dedent(
+ """\
<Details>
<SubStatus>{health_substatus}</SubStatus>
<Description>{health_description}</Description>
</Details>
- ''')
+ """
+ )
- PROVISIONING_SUCCESS_STATUS = 'Ready'
- PROVISIONING_NOT_READY_STATUS = 'NotReady'
- PROVISIONING_FAILURE_SUBSTATUS = 'ProvisioningFailed'
+ PROVISIONING_SUCCESS_STATUS = "Ready"
+ PROVISIONING_NOT_READY_STATUS = "NotReady"
+ PROVISIONING_FAILURE_SUBSTATUS = "ProvisioningFailed"
HEALTH_REPORT_DESCRIPTION_TRIM_LEN = 512
def __init__(
- self, goal_state: GoalState,
- azure_endpoint_client: AzureEndpointHttpClient,
- endpoint: str) -> None:
+ self,
+ goal_state: GoalState,
+ azure_endpoint_client: AzureEndpointHttpClient,
+ endpoint: str,
+ ) -> None:
"""Creates instance that will report provisioning status to an endpoint
@param goal_state: An instance of class GoalState that contains
@@ -644,17 +710,19 @@ class GoalStateHealthReporter:
incarnation=self._goal_state.incarnation,
container_id=self._goal_state.container_id,
instance_id=self._goal_state.instance_id,
- status=self.PROVISIONING_SUCCESS_STATUS)
- LOG.debug('Reporting ready to Azure fabric.')
+ status=self.PROVISIONING_SUCCESS_STATUS,
+ )
+ LOG.debug("Reporting ready to Azure fabric.")
try:
self._post_health_report(document=document)
except Exception as e:
report_diagnostic_event(
"exception while reporting ready: %s" % e,
- logger_func=LOG.error)
+ logger_func=LOG.error,
+ )
raise
- LOG.info('Reported ready to Azure fabric.')
+ LOG.info("Reported ready to Azure fabric.")
@azure_ds_telemetry_reporter
def send_failure_signal(self, description: str) -> None:
@@ -664,7 +732,8 @@ class GoalStateHealthReporter:
instance_id=self._goal_state.instance_id,
status=self.PROVISIONING_NOT_READY_STATUS,
substatus=self.PROVISIONING_FAILURE_SUBSTATUS,
- description=description)
+ description=description,
+ )
try:
self._post_health_report(document=document)
except Exception as e:
@@ -672,24 +741,33 @@ class GoalStateHealthReporter:
report_diagnostic_event(msg, logger_func=LOG.error)
raise
- LOG.warning('Reported failure to Azure fabric.')
+ LOG.warning("Reported failure to Azure fabric.")
def build_report(
- self, incarnation: str, container_id: str, instance_id: str,
- status: str, substatus=None, description=None) -> str:
- health_detail = ''
+ self,
+ incarnation: str,
+ container_id: str,
+ instance_id: str,
+ status: str,
+ substatus=None,
+ description=None,
+ ) -> str:
+ health_detail = ""
if substatus is not None:
health_detail = self.HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE.format(
health_substatus=escape(substatus),
health_description=escape(
- description[:self.HEALTH_REPORT_DESCRIPTION_TRIM_LEN]))
+ description[: self.HEALTH_REPORT_DESCRIPTION_TRIM_LEN]
+ ),
+ )
health_report = self.HEALTH_REPORT_XML_TEMPLATE.format(
incarnation=escape(str(incarnation)),
container_id=escape(container_id),
instance_id=escape(instance_id),
health_status=escape(status),
- health_detail_subsection=health_detail)
+ health_detail_subsection=health_detail,
+ )
return health_report
@@ -717,20 +795,22 @@ class GoalStateHealthReporter:
# reporting handler that writes to the special KVP files.
time.sleep(0)
- LOG.debug('Sending health report to Azure fabric.')
+ LOG.debug("Sending health report to Azure fabric.")
url = "http://{}/machine?comp=health".format(self._endpoint)
self._azure_endpoint_client.post(
url,
data=document,
- extra_headers={'Content-Type': 'text/xml; charset=utf-8'})
- LOG.debug('Successfully sent health report to Azure fabric')
+ extra_headers={"Content-Type": "text/xml; charset=utf-8"},
+ )
+ LOG.debug("Successfully sent health report to Azure fabric")
class WALinuxAgentShim:
-
def __init__(self, fallback_lease_file=None, dhcp_options=None):
- LOG.debug('WALinuxAgentShim instantiated, fallback_lease_file=%s',
- fallback_lease_file)
+ LOG.debug(
+ "WALinuxAgentShim instantiated, fallback_lease_file=%s",
+ fallback_lease_file,
+ )
self.dhcpoptions = dhcp_options
self._endpoint = None
self.openssl_manager = None
@@ -749,30 +829,33 @@ class WALinuxAgentShim:
@property
def endpoint(self):
if self._endpoint is None:
- self._endpoint = self.find_endpoint(self.lease_file,
- self.dhcpoptions)
+ self._endpoint = self.find_endpoint(
+ self.lease_file, self.dhcpoptions
+ )
return self._endpoint
@staticmethod
def get_ip_from_lease_value(fallback_lease_value):
- unescaped_value = fallback_lease_value.replace('\\', '')
+ unescaped_value = fallback_lease_value.replace("\\", "")
if len(unescaped_value) > 4:
- hex_string = ''
- for hex_pair in unescaped_value.split(':'):
+ hex_string = ""
+ for hex_pair in unescaped_value.split(":"):
if len(hex_pair) == 1:
- hex_pair = '0' + hex_pair
+ hex_pair = "0" + hex_pair
hex_string += hex_pair
packed_bytes = struct.pack(
- '>L', int(hex_string.replace(':', ''), 16))
+ ">L", int(hex_string.replace(":", ""), 16)
+ )
else:
- packed_bytes = unescaped_value.encode('utf-8')
+ packed_bytes = unescaped_value.encode("utf-8")
return socket.inet_ntoa(packed_bytes)
@staticmethod
@azure_ds_telemetry_reporter
def _networkd_get_value_from_leases(leases_d=None):
return dhcp.networkd_get_option_from_leases(
- 'OPTION_245', leases_d=leases_d)
+ "OPTION_245", leases_d=leases_d
+ )
@staticmethod
@azure_ds_telemetry_reporter
@@ -790,7 +873,7 @@ class WALinuxAgentShim:
if option_name in line:
# Example line from Ubuntu
# option unknown-245 a8:3f:81:10;
- leases.append(line.strip(' ').split(' ', 2)[-1].strip(';\n"'))
+ leases.append(line.strip(" ").split(" ", 2)[-1].strip(';\n"'))
# Return the "most recent" one in the list
if len(leases) < 1:
return None
@@ -805,15 +888,16 @@ class WALinuxAgentShim:
if not os.path.exists(hooks_dir):
LOG.debug("%s not found.", hooks_dir)
return None
- hook_files = [os.path.join(hooks_dir, x)
- for x in os.listdir(hooks_dir)]
+ hook_files = [
+ os.path.join(hooks_dir, x) for x in os.listdir(hooks_dir)
+ ]
for hook_file in hook_files:
try:
- name = os.path.basename(hook_file).replace('.json', '')
+ name = os.path.basename(hook_file).replace(".json", "")
dhcp_options[name] = json.loads(util.load_file((hook_file)))
except ValueError as e:
raise ValueError(
- '{_file} is not valid JSON data'.format(_file=hook_file)
+ "{_file} is not valid JSON data".format(_file=hook_file)
) from e
return dhcp_options
@@ -825,7 +909,7 @@ class WALinuxAgentShim:
# the MS endpoint server is given to us as DHPC option 245
_value = None
for interface in dhcp_options:
- _value = dhcp_options[interface].get('unknown_245', None)
+ _value = dhcp_options[interface].get("unknown_245", None)
if _value is not None:
LOG.debug("Endpoint server found in dhclient options")
break
@@ -855,63 +939,73 @@ class WALinuxAgentShim:
LOG.debug("Using Azure Endpoint from dhcp options")
if value is None:
report_diagnostic_event(
- 'No Azure endpoint from dhcp options. '
- 'Finding Azure endpoint from networkd...',
- logger_func=LOG.debug)
+ "No Azure endpoint from dhcp options. "
+ "Finding Azure endpoint from networkd...",
+ logger_func=LOG.debug,
+ )
value = WALinuxAgentShim._networkd_get_value_from_leases()
if value is None:
# Option-245 stored in /run/cloud-init/dhclient.hooks/<ifc>.json
# a dhclient exit hook that calls cloud-init-dhclient-hook
report_diagnostic_event(
- 'No Azure endpoint from networkd. '
- 'Finding Azure endpoint from hook json...',
- logger_func=LOG.debug)
+ "No Azure endpoint from networkd. "
+ "Finding Azure endpoint from hook json...",
+ logger_func=LOG.debug,
+ )
dhcp_options = WALinuxAgentShim._load_dhclient_json()
value = WALinuxAgentShim._get_value_from_dhcpoptions(dhcp_options)
if value is None:
# Fallback and check the leases file if unsuccessful
report_diagnostic_event(
- 'No Azure endpoint from dhclient logs. '
- 'Unable to find endpoint in dhclient logs. '
- 'Falling back to check lease files',
- logger_func=LOG.debug)
+ "No Azure endpoint from dhclient logs. "
+ "Unable to find endpoint in dhclient logs. "
+ "Falling back to check lease files",
+ logger_func=LOG.debug,
+ )
if fallback_lease_file is None:
report_diagnostic_event(
- 'No fallback lease file was specified.',
- logger_func=LOG.warning)
+ "No fallback lease file was specified.",
+ logger_func=LOG.warning,
+ )
value = None
else:
report_diagnostic_event(
- 'Looking for endpoint in lease file %s'
- % fallback_lease_file, logger_func=LOG.debug)
+ "Looking for endpoint in lease file %s"
+ % fallback_lease_file,
+ logger_func=LOG.debug,
+ )
value = WALinuxAgentShim._get_value_from_leases_file(
- fallback_lease_file)
+ fallback_lease_file
+ )
if value is None:
value = DEFAULT_WIRESERVER_ENDPOINT
report_diagnostic_event(
- 'No lease found; using default endpoint: %s' % value,
- logger_func=LOG.warning)
+ "No lease found; using default endpoint: %s" % value,
+ logger_func=LOG.warning,
+ )
endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value)
report_diagnostic_event(
- 'Azure endpoint found at %s' % endpoint_ip_address,
- logger_func=LOG.debug)
+ "Azure endpoint found at %s" % endpoint_ip_address,
+ logger_func=LOG.debug,
+ )
return endpoint_ip_address
@azure_ds_telemetry_reporter
def eject_iso(self, iso_dev) -> None:
try:
LOG.debug("Ejecting the provisioning iso")
- subp.subp(['eject', iso_dev])
+ subp.subp(["eject", iso_dev])
except Exception as e:
report_diagnostic_event(
"Failed ejecting the provisioning iso: %s" % e,
- logger_func=LOG.debug)
+ logger_func=LOG.debug,
+ )
@azure_ds_telemetry_reporter
- def register_with_azure_and_fetch_data(self,
- pubkey_info=None,
- iso_dev=None) -> dict:
+ def register_with_azure_and_fetch_data(
+ self, pubkey_info=None, iso_dev=None
+ ) -> dict:
"""Gets the VM's GoalState from Azure, uses the GoalState information
to report ready/send the ready signal/provisioning complete signal to
Azure, and then uses pubkey_info to filter and obtain the user's
@@ -928,7 +1022,8 @@ class WALinuxAgentShim:
http_client_certificate = self.openssl_manager.certificate
if self.azure_endpoint_client is None:
self.azure_endpoint_client = AzureEndpointHttpClient(
- http_client_certificate)
+ http_client_certificate
+ )
goal_state = self._fetch_goal_state_from_azure(
need_certificate=http_client_certificate is not None
)
@@ -936,13 +1031,14 @@ class WALinuxAgentShim:
if pubkey_info is not None:
ssh_keys = self._get_user_pubkeys(goal_state, pubkey_info)
health_reporter = GoalStateHealthReporter(
- goal_state, self.azure_endpoint_client, self.endpoint)
+ goal_state, self.azure_endpoint_client, self.endpoint
+ )
if iso_dev is not None:
self.eject_iso(iso_dev)
health_reporter.send_ready_signal()
- return {'public-keys': ssh_keys}
+ return {"public-keys": ssh_keys}
@azure_ds_telemetry_reporter
def register_with_azure_and_report_failure(self, description: str) -> None:
@@ -955,13 +1051,14 @@ class WALinuxAgentShim:
self.azure_endpoint_client = AzureEndpointHttpClient(None)
goal_state = self._fetch_goal_state_from_azure(need_certificate=False)
health_reporter = GoalStateHealthReporter(
- goal_state, self.azure_endpoint_client, self.endpoint)
+ goal_state, self.azure_endpoint_client, self.endpoint
+ )
health_reporter.send_failure_signal(description=description)
@azure_ds_telemetry_reporter
def _fetch_goal_state_from_azure(
- self,
- need_certificate: bool) -> GoalState:
+ self, need_certificate: bool
+ ) -> GoalState:
"""Fetches the GoalState XML from the Azure endpoint, parses the XML,
and returns a GoalState object.
@@ -970,8 +1067,7 @@ class WALinuxAgentShim:
"""
unparsed_goal_state_xml = self._get_raw_goal_state_xml_from_azure()
return self._parse_raw_goal_state_xml(
- unparsed_goal_state_xml,
- need_certificate
+ unparsed_goal_state_xml, need_certificate
)
@azure_ds_telemetry_reporter
@@ -982,27 +1078,29 @@ class WALinuxAgentShim:
@return: GoalState XML string
"""
- LOG.info('Registering with Azure...')
- url = 'http://{}/machine/?comp=goalstate'.format(self.endpoint)
+ LOG.info("Registering with Azure...")
+ url = "http://{}/machine/?comp=goalstate".format(self.endpoint)
try:
with events.ReportEventStack(
- name="goalstate-retrieval",
- description="retrieve goalstate",
- parent=azure_ds_reporter):
+ name="goalstate-retrieval",
+ description="retrieve goalstate",
+ parent=azure_ds_reporter,
+ ):
response = self.azure_endpoint_client.get(url)
except Exception as e:
report_diagnostic_event(
- 'failed to register with Azure and fetch GoalState XML: %s'
- % e, logger_func=LOG.warning)
+ "failed to register with Azure and fetch GoalState XML: %s"
+ % e,
+ logger_func=LOG.warning,
+ )
raise
- LOG.debug('Successfully fetched GoalState XML.')
+ LOG.debug("Successfully fetched GoalState XML.")
return response.contents
@azure_ds_telemetry_reporter
def _parse_raw_goal_state_xml(
- self,
- unparsed_goal_state_xml: str,
- need_certificate: bool) -> GoalState:
+ self, unparsed_goal_state_xml: str, need_certificate: bool
+ ) -> GoalState:
"""Parses a GoalState XML string and returns a GoalState object.
@param unparsed_goal_state_xml: GoalState XML string
@@ -1013,23 +1111,28 @@ class WALinuxAgentShim:
goal_state = GoalState(
unparsed_goal_state_xml,
self.azure_endpoint_client,
- need_certificate
+ need_certificate,
)
except Exception as e:
report_diagnostic_event(
- 'Error processing GoalState XML: %s' % e,
- logger_func=LOG.warning)
+ "Error processing GoalState XML: %s" % e,
+ logger_func=LOG.warning,
+ )
raise
- msg = ', '.join([
- 'GoalState XML container id: %s' % goal_state.container_id,
- 'GoalState XML instance id: %s' % goal_state.instance_id,
- 'GoalState XML incarnation: %s' % goal_state.incarnation])
+ msg = ", ".join(
+ [
+ "GoalState XML container id: %s" % goal_state.container_id,
+ "GoalState XML instance id: %s" % goal_state.instance_id,
+ "GoalState XML incarnation: %s" % goal_state.incarnation,
+ ]
+ )
report_diagnostic_event(msg, logger_func=LOG.debug)
return goal_state
@azure_ds_telemetry_reporter
def _get_user_pubkeys(
- self, goal_state: GoalState, pubkey_info: list) -> list:
+ self, goal_state: GoalState, pubkey_info: list
+ ) -> list:
"""Gets and filters the VM admin user's authorized pubkeys.
The admin user in this case is the username specified as "admin"
@@ -1057,15 +1160,16 @@ class WALinuxAgentShim:
"""
ssh_keys = []
if goal_state.certificates_xml is not None and pubkey_info is not None:
- LOG.debug('Certificate XML found; parsing out public keys.')
+ LOG.debug("Certificate XML found; parsing out public keys.")
keys_by_fingerprint = self.openssl_manager.parse_certificates(
- goal_state.certificates_xml)
+ goal_state.certificates_xml
+ )
ssh_keys = self._filter_pubkeys(keys_by_fingerprint, pubkey_info)
return ssh_keys
@staticmethod
def _filter_pubkeys(keys_by_fingerprint: dict, pubkey_info: list) -> list:
- """ Filter and return only the user's actual pubkeys.
+ """Filter and return only the user's actual pubkeys.
@param keys_by_fingerprint: pubkey fingerprint -> pubkey value dict
that was obtained from GoalState Certificates XML. May contain
@@ -1078,71 +1182,84 @@ class WALinuxAgentShim:
"""
keys = []
for pubkey in pubkey_info:
- if 'value' in pubkey and pubkey['value']:
- keys.append(pubkey['value'])
- elif 'fingerprint' in pubkey and pubkey['fingerprint']:
- fingerprint = pubkey['fingerprint']
+ if "value" in pubkey and pubkey["value"]:
+ keys.append(pubkey["value"])
+ elif "fingerprint" in pubkey and pubkey["fingerprint"]:
+ fingerprint = pubkey["fingerprint"]
if fingerprint in keys_by_fingerprint:
keys.append(keys_by_fingerprint[fingerprint])
else:
- LOG.warning("ovf-env.xml specified PublicKey fingerprint "
- "%s not found in goalstate XML", fingerprint)
+ LOG.warning(
+ "ovf-env.xml specified PublicKey fingerprint "
+ "%s not found in goalstate XML",
+ fingerprint,
+ )
else:
- LOG.warning("ovf-env.xml specified PublicKey with neither "
- "value nor fingerprint: %s", pubkey)
+ LOG.warning(
+ "ovf-env.xml specified PublicKey with neither "
+ "value nor fingerprint: %s",
+ pubkey,
+ )
return keys
@azure_ds_telemetry_reporter
-def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None,
- pubkey_info=None, iso_dev=None):
- shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file,
- dhcp_options=dhcp_opts)
+def get_metadata_from_fabric(
+ fallback_lease_file=None, dhcp_opts=None, pubkey_info=None, iso_dev=None
+):
+ shim = WALinuxAgentShim(
+ fallback_lease_file=fallback_lease_file, dhcp_options=dhcp_opts
+ )
try:
return shim.register_with_azure_and_fetch_data(
- pubkey_info=pubkey_info, iso_dev=iso_dev)
+ pubkey_info=pubkey_info, iso_dev=iso_dev
+ )
finally:
shim.clean_up()
@azure_ds_telemetry_reporter
-def report_failure_to_fabric(fallback_lease_file=None, dhcp_opts=None,
- description=None):
- shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file,
- dhcp_options=dhcp_opts)
+def report_failure_to_fabric(
+ fallback_lease_file=None, dhcp_opts=None, description=None
+):
+ shim = WALinuxAgentShim(
+ fallback_lease_file=fallback_lease_file, dhcp_options=dhcp_opts
+ )
if not description:
description = DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE
try:
- shim.register_with_azure_and_report_failure(
- description=description)
+ shim.register_with_azure_and_report_failure(description=description)
finally:
shim.clean_up()
def dhcp_log_cb(out, err):
report_diagnostic_event(
- "dhclient output stream: %s" % out, logger_func=LOG.debug)
+ "dhclient output stream: %s" % out, logger_func=LOG.debug
+ )
report_diagnostic_event(
- "dhclient error stream: %s" % err, logger_func=LOG.debug)
+ "dhclient error stream: %s" % err, logger_func=LOG.debug
+ )
class EphemeralDHCPv4WithReporting:
def __init__(self, reporter, nic=None):
self.reporter = reporter
self.ephemeralDHCPv4 = EphemeralDHCPv4(
- iface=nic, dhcp_log_func=dhcp_log_cb)
+ iface=nic, dhcp_log_func=dhcp_log_cb
+ )
def __enter__(self):
with events.ReportEventStack(
- name="obtain-dhcp-lease",
- description="obtain dhcp lease",
- parent=self.reporter):
+ name="obtain-dhcp-lease",
+ description="obtain dhcp lease",
+ parent=self.reporter,
+ ):
return self.ephemeralDHCPv4.__enter__()
def __exit__(self, excp_type, excp_value, excp_traceback):
- self.ephemeralDHCPv4.__exit__(
- excp_type, excp_value, excp_traceback)
+ self.ephemeralDHCPv4.__exit__(excp_type, excp_value, excp_traceback)
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py
index f9be4ecb..72515caf 100644
--- a/cloudinit/sources/helpers/digitalocean.py
+++ b/cloudinit/sources/helpers/digitalocean.py
@@ -8,20 +8,18 @@ import random
from cloudinit import dmi
from cloudinit import log as logging
from cloudinit import net as cloudnet
-from cloudinit import url_helper
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, url_helper, util
-NIC_MAP = {'public': 'eth0', 'private': 'eth1'}
+NIC_MAP = {"public": "eth0", "private": "eth1"}
LOG = logging.getLogger(__name__)
def assign_ipv4_link_local(distro, nic=None):
- """Bring up NIC using an address using link-local (ip4LL) IPs. On
- DigitalOcean, the link-local domain is per-droplet routed, so there
- is no risk of collisions. However, to be more safe, the ip4LL
- address is random.
+ """Bring up NIC using an address using link-local (ip4LL) IPs.
+ On DigitalOcean, the link-local domain is per-droplet routed, so there
+ is no risk of collisions. However, to be more safe, the ip4LL
+ address is random.
"""
if not nic:
@@ -29,18 +27,22 @@ def assign_ipv4_link_local(distro, nic=None):
LOG.debug("selected interface '%s' for reading metadata", nic)
if not nic:
- raise RuntimeError("unable to find interfaces to access the"
- "meta-data server. This droplet is broken.")
+ raise RuntimeError(
+ "unable to find interfaces to access the"
+ "meta-data server. This droplet is broken."
+ )
- addr = "169.254.{0}.{1}/16".format(random.randint(1, 168),
- random.randint(0, 255))
+ addr = "169.254.{0}.{1}/16".format(
+ random.randint(1, 168), random.randint(0, 255)
+ )
- ip_addr_cmd = ['ip', 'addr', 'add', addr, 'dev', nic]
- ip_link_cmd = ['ip', 'link', 'set', 'dev', nic, 'up']
+ ip_addr_cmd = ["ip", "addr", "add", addr, "dev", nic]
+ ip_link_cmd = ["ip", "link", "set", "dev", nic, "up"]
- if not subp.which('ip'):
- raise RuntimeError("No 'ip' command available to configure ip4LL "
- "address")
+ if not subp.which("ip"):
+ raise RuntimeError(
+ "No 'ip' command available to configure ip4LL address"
+ )
try:
subp.subp(ip_addr_cmd)
@@ -48,8 +50,13 @@ def assign_ipv4_link_local(distro, nic=None):
subp.subp(ip_link_cmd)
LOG.debug("brought device '%s' up", nic)
except Exception:
- util.logexc(LOG, "ip4LL address assignment of '%s' to '%s' failed."
- " Droplet networking will be broken", addr, nic)
+ util.logexc(
+ LOG,
+ "ip4LL address assignment of '%s' to '%s' failed."
+ " Droplet networking will be broken",
+ addr,
+ nic,
+ )
raise
return nic
@@ -63,21 +70,23 @@ def get_link_local_nic(distro):
]
if not nics:
return None
- return min(nics, key=lambda d: cloudnet.read_sys_net_int(d, 'ifindex'))
+ return min(nics, key=lambda d: cloudnet.read_sys_net_int(d, "ifindex"))
def del_ipv4_link_local(nic=None):
"""Remove the ip4LL address. While this is not necessary, the ip4LL
- address is extraneous and confusing to users.
+ address is extraneous and confusing to users.
"""
if not nic:
- LOG.debug("no link_local address interface defined, skipping link "
- "local address cleanup")
+ LOG.debug(
+ "no link_local address interface defined, skipping link "
+ "local address cleanup"
+ )
return
LOG.debug("cleaning up ipv4LL address")
- ip_addr_cmd = ['ip', 'addr', 'flush', 'dev', nic]
+ ip_addr_cmd = ["ip", "addr", "flush", "dev", nic]
try:
subp.subp(ip_addr_cmd)
@@ -89,44 +98,47 @@ def del_ipv4_link_local(nic=None):
def convert_network_configuration(config, dns_servers):
"""Convert the DigitalOcean Network description into Cloud-init's netconfig
- format.
-
- Example JSON:
- {'public': [
- {'mac': '04:01:58:27:7f:01',
- 'ipv4': {'gateway': '45.55.32.1',
- 'netmask': '255.255.224.0',
- 'ip_address': '45.55.50.93'},
- 'anchor_ipv4': {
- 'gateway': '10.17.0.1',
- 'netmask': '255.255.0.0',
- 'ip_address': '10.17.0.9'},
- 'type': 'public',
- 'ipv6': {'gateway': '....',
- 'ip_address': '....',
- 'cidr': 64}}
- ],
- 'private': [
- {'mac': '04:01:58:27:7f:02',
- 'ipv4': {'gateway': '10.132.0.1',
- 'netmask': '255.255.0.0',
- 'ip_address': '10.132.75.35'},
- 'type': 'private'}
- ]
- }
+ format.
+
+ Example JSON:
+ {'public': [
+ {'mac': '04:01:58:27:7f:01',
+ 'ipv4': {'gateway': '45.55.32.1',
+ 'netmask': '255.255.224.0',
+ 'ip_address': '45.55.50.93'},
+ 'anchor_ipv4': {
+ 'gateway': '10.17.0.1',
+ 'netmask': '255.255.0.0',
+ 'ip_address': '10.17.0.9'},
+ 'type': 'public',
+ 'ipv6': {'gateway': '....',
+ 'ip_address': '....',
+ 'cidr': 64}}
+ ],
+ 'private': [
+ {'mac': '04:01:58:27:7f:02',
+ 'ipv4': {'gateway': '10.132.0.1',
+ 'netmask': '255.255.0.0',
+ 'ip_address': '10.132.75.35'},
+ 'type': 'private'}
+ ]
+ }
"""
def _get_subnet_part(pcfg):
- subpart = {'type': 'static',
- 'control': 'auto',
- 'address': pcfg.get('ip_address'),
- 'gateway': pcfg.get('gateway')}
-
- if ":" in pcfg.get('ip_address'):
- subpart['address'] = "{0}/{1}".format(pcfg.get('ip_address'),
- pcfg.get('cidr'))
+ subpart = {
+ "type": "static",
+ "control": "auto",
+ "address": pcfg.get("ip_address"),
+ "gateway": pcfg.get("gateway"),
+ }
+
+ if ":" in pcfg.get("ip_address"):
+ subpart["address"] = "{0}/{1}".format(
+ pcfg.get("ip_address"), pcfg.get("cidr")
+ )
else:
- subpart['netmask'] = pcfg.get('netmask')
+ subpart["netmask"] = pcfg.get("netmask")
return subpart
@@ -138,54 +150,66 @@ def convert_network_configuration(config, dns_servers):
nic = config[n][0]
LOG.debug("considering %s", nic)
- mac_address = nic.get('mac')
+ mac_address = nic.get("mac")
if mac_address not in macs_to_nics:
- raise RuntimeError("Did not find network interface on system "
- "with mac '%s'. Cannot apply configuration: %s"
- % (mac_address, nic))
+ raise RuntimeError(
+ "Did not find network interface on system "
+ "with mac '%s'. Cannot apply configuration: %s"
+ % (mac_address, nic)
+ )
sysfs_name = macs_to_nics.get(mac_address)
- nic_type = nic.get('type', 'unknown')
+ nic_type = nic.get("type", "unknown")
if_name = NIC_MAP.get(nic_type, sysfs_name)
if if_name != sysfs_name:
- LOG.debug("Found %s interface '%s' on '%s', assigned name of '%s'",
- nic_type, mac_address, sysfs_name, if_name)
+ LOG.debug(
+ "Found %s interface '%s' on '%s', assigned name of '%s'",
+ nic_type,
+ mac_address,
+ sysfs_name,
+ if_name,
+ )
else:
- msg = ("Found interface '%s' on '%s', which is not a public "
- "or private interface. Using default system naming.")
+ msg = (
+ "Found interface '%s' on '%s', which is not a public "
+ "or private interface. Using default system naming."
+ )
LOG.debug(msg, mac_address, sysfs_name)
- ncfg = {'type': 'physical',
- 'mac_address': mac_address,
- 'name': if_name}
+ ncfg = {
+ "type": "physical",
+ "mac_address": mac_address,
+ "name": if_name,
+ }
subnets = []
- for netdef in ('ipv4', 'ipv6', 'anchor_ipv4', 'anchor_ipv6'):
+ for netdef in ("ipv4", "ipv6", "anchor_ipv4", "anchor_ipv6"):
raw_subnet = nic.get(netdef, None)
if not raw_subnet:
continue
sub_part = _get_subnet_part(raw_subnet)
if nic_type != "public" or "anchor" in netdef:
- del sub_part['gateway']
+ del sub_part["gateway"]
subnets.append(sub_part)
- ncfg['subnets'] = subnets
+ ncfg["subnets"] = subnets
nic_configs.append(ncfg)
LOG.debug("nic '%s' configuration: %s", if_name, ncfg)
if dns_servers:
LOG.debug("added dns servers: %s", dns_servers)
- nic_configs.append({'type': 'nameserver', 'address': dns_servers})
+ nic_configs.append({"type": "nameserver", "address": dns_servers})
- return {'version': 1, 'config': nic_configs}
+ return {"version": 1, "config": nic_configs}
def read_metadata(url, timeout=2, sec_between=2, retries=30):
- response = url_helper.readurl(url, timeout=timeout,
- sec_between=sec_between, retries=retries)
+ response = url_helper.readurl(
+ url, timeout=timeout, sec_between=sec_between, retries=retries
+ )
if not response.ok():
raise RuntimeError("unable to read metadata at %s" % url)
return json.loads(response.contents.decode())
@@ -202,16 +226,21 @@ def read_sysinfo():
droplet_id = dmi.read_dmi_data("system-serial-number")
if droplet_id:
- LOG.debug("system identified via SMBIOS as DigitalOcean Droplet: %s",
- droplet_id)
+ LOG.debug(
+ "system identified via SMBIOS as DigitalOcean Droplet: %s",
+ droplet_id,
+ )
else:
- msg = ("system identified via SMBIOS as a DigitalOcean "
- "Droplet, but did not provide an ID. Please file a "
- "support ticket at: "
- "https://cloud.digitalocean.com/support/tickets/new")
+ msg = (
+ "system identified via SMBIOS as a DigitalOcean "
+ "Droplet, but did not provide an ID. Please file a "
+ "support ticket at: "
+ "https://cloud.digitalocean.com/support/tickets/new"
+ )
LOG.critical(msg)
raise RuntimeError(msg)
return (True, droplet_id)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/hetzner.py b/cloudinit/sources/helpers/hetzner.py
index 33dc4c53..592ae80b 100644
--- a/cloudinit/sources/helpers/hetzner.py
+++ b/cloudinit/sources/helpers/hetzner.py
@@ -3,24 +3,25 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import url_helper
-from cloudinit import util
-
import base64
import binascii
+from cloudinit import url_helper, util
+
def read_metadata(url, timeout=2, sec_between=2, retries=30):
- response = url_helper.readurl(url, timeout=timeout,
- sec_between=sec_between, retries=retries)
+ response = url_helper.readurl(
+ url, timeout=timeout, sec_between=sec_between, retries=retries
+ )
if not response.ok():
raise RuntimeError("unable to read metadata at %s" % url)
return util.load_yaml(response.contents.decode())
def read_userdata(url, timeout=2, sec_between=2, retries=30):
- response = url_helper.readurl(url, timeout=timeout,
- sec_between=sec_between, retries=retries)
+ response = url_helper.readurl(
+ url, timeout=timeout, sec_between=sec_between, retries=retries
+ )
if not response.ok():
raise RuntimeError("unable to read userdata at %s" % url)
return response.contents
diff --git a/cloudinit/sources/helpers/netlink.py b/cloudinit/sources/helpers/netlink.py
index e13d6834..2953e858 100644
--- a/cloudinit/sources/helpers/netlink.py
+++ b/cloudinit/sources/helpers/netlink.py
@@ -2,14 +2,14 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import log as logging
-from cloudinit import util
-from collections import namedtuple
-
import os
import select
import socket
import struct
+from collections import namedtuple
+
+from cloudinit import log as logging
+from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -47,29 +47,30 @@ OPER_TESTING = 4
OPER_DORMANT = 5
OPER_UP = 6
-RTAAttr = namedtuple('RTAAttr', ['length', 'rta_type', 'data'])
-InterfaceOperstate = namedtuple('InterfaceOperstate', ['ifname', 'operstate'])
-NetlinkHeader = namedtuple('NetlinkHeader', ['length', 'type', 'flags', 'seq',
- 'pid'])
+RTAAttr = namedtuple("RTAAttr", ["length", "rta_type", "data"])
+InterfaceOperstate = namedtuple("InterfaceOperstate", ["ifname", "operstate"])
+NetlinkHeader = namedtuple(
+ "NetlinkHeader", ["length", "type", "flags", "seq", "pid"]
+)
class NetlinkCreateSocketError(RuntimeError):
- '''Raised if netlink socket fails during create or bind.'''
+ """Raised if netlink socket fails during create or bind."""
def create_bound_netlink_socket():
- '''Creates netlink socket and bind on netlink group to catch interface
+ """Creates netlink socket and bind on netlink group to catch interface
down/up events. The socket will bound only on RTMGRP_LINK (which only
includes RTM_NEWLINK/RTM_DELLINK/RTM_GETLINK events). The socket is set to
non-blocking mode since we're only receiving messages.
:returns: netlink socket in non-blocking mode
:raises: NetlinkCreateSocketError
- '''
+ """
try:
- netlink_socket = socket.socket(socket.AF_NETLINK,
- socket.SOCK_RAW,
- socket.NETLINK_ROUTE)
+ netlink_socket = socket.socket(
+ socket.AF_NETLINK, socket.SOCK_RAW, socket.NETLINK_ROUTE
+ )
netlink_socket.bind((os.getpid(), RTMGRP_LINK))
netlink_socket.setblocking(0)
except socket.error as e:
@@ -80,7 +81,7 @@ def create_bound_netlink_socket():
def get_netlink_msg_header(data):
- '''Gets netlink message type and length
+ """Gets netlink message type and length
:param: data read from netlink socket
:returns: netlink message type
@@ -92,18 +93,20 @@ def get_netlink_msg_header(data):
__u32 nlmsg_seq; /* Sequence number */
__u32 nlmsg_pid; /* Sender port ID */
};
- '''
- assert (data is not None), ("data is none")
- assert (len(data) >= NLMSGHDR_SIZE), (
- "data is smaller than netlink message header")
- msg_len, msg_type, flags, seq, pid = struct.unpack(NLMSGHDR_FMT,
- data[:MSG_TYPE_OFFSET])
+ """
+ assert data is not None, "data is none"
+ assert (
+ len(data) >= NLMSGHDR_SIZE
+ ), "data is smaller than netlink message header"
+ msg_len, msg_type, flags, seq, pid = struct.unpack(
+ NLMSGHDR_FMT, data[:MSG_TYPE_OFFSET]
+ )
LOG.debug("Got netlink msg of type %d", msg_type)
return NetlinkHeader(msg_len, msg_type, flags, seq, pid)
def read_netlink_socket(netlink_socket, timeout=None):
- '''Select and read from the netlink socket if ready.
+ """Select and read from the netlink socket if ready.
:param: netlink_socket: specify which socket object to read from
:param: timeout: specify a timeout value (integer) to wait while reading,
@@ -111,8 +114,8 @@ def read_netlink_socket(netlink_socket, timeout=None):
:returns: string of data read (max length = <MAX_SIZE>) from socket,
if no data read, returns None
:raises: AssertionError if netlink_socket is None
- '''
- assert (netlink_socket is not None), ("netlink socket is none")
+ """
+ assert netlink_socket is not None, "netlink socket is none"
read_set, _, _ = select.select([netlink_socket], [], [], timeout)
# Incase of timeout,read_set doesn't contain netlink socket.
# just return from this function
@@ -126,32 +129,33 @@ def read_netlink_socket(netlink_socket, timeout=None):
def unpack_rta_attr(data, offset):
- '''Unpack a single rta attribute.
+ """Unpack a single rta attribute.
:param: data: string of data read from netlink socket
:param: offset: starting offset of RTA Attribute
:return: RTAAttr object with length, type and data. On error, return None.
:raises: AssertionError if data is None or offset is not integer.
- '''
- assert (data is not None), ("data is none")
- assert (type(offset) == int), ("offset is not integer")
- assert (offset >= RTATTR_START_OFFSET), (
- "rta offset is less than expected length")
+ """
+ assert data is not None, "data is none"
+ assert type(offset) == int, "offset is not integer"
+ assert (
+ offset >= RTATTR_START_OFFSET
+ ), "rta offset is less than expected length"
length = rta_type = 0
attr_data = None
try:
length = struct.unpack_from("H", data, offset=offset)[0]
- rta_type = struct.unpack_from("H", data, offset=offset+2)[0]
+ rta_type = struct.unpack_from("H", data, offset=offset + 2)[0]
except struct.error:
return None # Should mean our offset is >= remaining data
# Unpack just the attribute's data. Offset by 4 to skip length/type header
- attr_data = data[offset+RTA_DATA_START_OFFSET:offset+length]
+ attr_data = data[offset + RTA_DATA_START_OFFSET : offset + length]
return RTAAttr(length, rta_type, attr_data)
def read_rta_oper_state(data):
- '''Reads Interface name and operational state from RTA Data.
+ """Reads Interface name and operational state from RTA Data.
:param: data: string of data read from netlink socket
:returns: InterfaceOperstate object containing if_name and oper_state.
@@ -159,10 +163,11 @@ def read_rta_oper_state(data):
IFLA_IFNAME messages.
:raises: AssertionError if data is None or length of data is
smaller than RTATTR_START_OFFSET.
- '''
- assert (data is not None), ("data is none")
- assert (len(data) > RTATTR_START_OFFSET), (
- "length of data is smaller than RTATTR_START_OFFSET")
+ """
+ assert data is not None, "data is none"
+ assert (
+ len(data) > RTATTR_START_OFFSET
+ ), "length of data is smaller than RTATTR_START_OFFSET"
ifname = operstate = None
offset = RTATTR_START_OFFSET
while offset <= len(data):
@@ -170,15 +175,16 @@ def read_rta_oper_state(data):
if not attr or attr.length == 0:
break
# Each attribute is 4-byte aligned. Determine pad length.
- padlen = (PAD_ALIGNMENT -
- (attr.length % PAD_ALIGNMENT)) % PAD_ALIGNMENT
+ padlen = (
+ PAD_ALIGNMENT - (attr.length % PAD_ALIGNMENT)
+ ) % PAD_ALIGNMENT
offset += attr.length + padlen
if attr.rta_type == IFLA_OPERSTATE:
operstate = ord(attr.data)
elif attr.rta_type == IFLA_IFNAME:
- interface_name = util.decode_binary(attr.data, 'utf-8')
- ifname = interface_name.strip('\0')
+ interface_name = util.decode_binary(attr.data, "utf-8")
+ ifname = interface_name.strip("\0")
if not ifname or operstate is None:
return None
LOG.debug("rta attrs: ifname %s operstate %d", ifname, operstate)
@@ -186,12 +192,12 @@ def read_rta_oper_state(data):
def wait_for_nic_attach_event(netlink_socket, existing_nics):
- '''Block until a single nic is attached.
+ """Block until a single nic is attached.
:param: netlink_socket: netlink_socket to receive events
:param: existing_nics: List of existing nics so that we can skip them.
:raises: AssertionError if netlink_socket is none.
- '''
+ """
LOG.debug("Preparing to wait for nic attach.")
ifname = None
@@ -204,19 +210,21 @@ def wait_for_nic_attach_event(netlink_socket, existing_nics):
# We can return even if the operational state of the new nic is DOWN
# because we set it to UP before doing dhcp.
- read_netlink_messages(netlink_socket,
- None,
- [RTM_NEWLINK],
- [OPER_UP, OPER_DOWN],
- should_continue_cb)
+ read_netlink_messages(
+ netlink_socket,
+ None,
+ [RTM_NEWLINK],
+ [OPER_UP, OPER_DOWN],
+ should_continue_cb,
+ )
return ifname
def wait_for_nic_detach_event(netlink_socket):
- '''Block until a single nic is detached and its operational state is down.
+ """Block until a single nic is detached and its operational state is down.
:param: netlink_socket: netlink_socket to receive events.
- '''
+ """
LOG.debug("Preparing to wait for nic detach.")
ifname = None
@@ -225,16 +233,14 @@ def wait_for_nic_detach_event(netlink_socket):
ifname = iname
return False
- read_netlink_messages(netlink_socket,
- None,
- [RTM_DELLINK],
- [OPER_DOWN],
- should_continue_cb)
+ read_netlink_messages(
+ netlink_socket, None, [RTM_DELLINK], [OPER_DOWN], should_continue_cb
+ )
return ifname
def wait_for_media_disconnect_connect(netlink_socket, ifname):
- '''Block until media disconnect and connect has happened on an interface.
+ """Block until media disconnect and connect has happened on an interface.
Listens on netlink socket to receive netlink events and when the carrier
changes from 0 to 1, it considers event has happened and
return from this function
@@ -242,10 +248,10 @@ def wait_for_media_disconnect_connect(netlink_socket, ifname):
:param: netlink_socket: netlink_socket to receive events
:param: ifname: Interface name to lookout for netlink events
:raises: AssertionError if netlink_socket is None or ifname is None.
- '''
- assert (netlink_socket is not None), ("netlink socket is none")
- assert (ifname is not None), ("interface name is none")
- assert (len(ifname) > 0), ("interface name cannot be empty")
+ """
+ assert netlink_socket is not None, "netlink socket is none"
+ assert ifname is not None, "interface name is none"
+ assert len(ifname) > 0, "interface name cannot be empty"
def should_continue_cb(iname, carrier, prevCarrier):
# check for carrier down, up sequence
@@ -256,19 +262,23 @@ def wait_for_media_disconnect_connect(netlink_socket, ifname):
return True
LOG.debug("Wait for media disconnect and reconnect to happen")
- read_netlink_messages(netlink_socket,
- ifname,
- [RTM_NEWLINK, RTM_DELLINK],
- [OPER_UP, OPER_DOWN],
- should_continue_cb)
-
-
-def read_netlink_messages(netlink_socket,
- ifname_filter,
- rtm_types,
- operstates,
- should_continue_callback):
- ''' Reads from the netlink socket until the condition specified by
+ read_netlink_messages(
+ netlink_socket,
+ ifname,
+ [RTM_NEWLINK, RTM_DELLINK],
+ [OPER_UP, OPER_DOWN],
+ should_continue_cb,
+ )
+
+
+def read_netlink_messages(
+ netlink_socket,
+ ifname_filter,
+ rtm_types,
+ operstates,
+ should_continue_callback,
+):
+ """Reads from the netlink socket until the condition specified by
the continuation callback is met.
:param: netlink_socket: netlink_socket to receive events.
@@ -276,7 +286,7 @@ def read_netlink_messages(netlink_socket,
:param: rtm_types: Type of netlink events to listen for.
:param: operstates: Operational states to listen.
:param: should_continue_callback: Specifies when to stop listening.
- '''
+ """
if netlink_socket is None:
raise RuntimeError("Netlink socket is none")
data = bytes()
@@ -286,9 +296,9 @@ def read_netlink_messages(netlink_socket,
recv_data = read_netlink_socket(netlink_socket, SELECT_TIMEOUT)
if recv_data is None:
continue
- LOG.debug('read %d bytes from socket', len(recv_data))
+ LOG.debug("read %d bytes from socket", len(recv_data))
data += recv_data
- LOG.debug('Length of data after concat %d', len(data))
+ LOG.debug("Length of data after concat %d", len(data))
offset = 0
datalen = len(data)
while offset < datalen:
@@ -300,30 +310,37 @@ def read_netlink_messages(netlink_socket,
if len(nl_msg) < nlheader.length:
LOG.debug("Partial data. Smaller than netlink message")
break
- padlen = (nlheader.length+PAD_ALIGNMENT-1) & ~(PAD_ALIGNMENT-1)
+ padlen = (nlheader.length + PAD_ALIGNMENT - 1) & ~(
+ PAD_ALIGNMENT - 1
+ )
offset = offset + padlen
- LOG.debug('offset to next netlink message: %d', offset)
+ LOG.debug("offset to next netlink message: %d", offset)
# Continue if we are not interested in this message.
if nlheader.type not in rtm_types:
continue
interface_state = read_rta_oper_state(nl_msg)
if interface_state is None:
- LOG.debug('Failed to read rta attributes: %s', interface_state)
+ LOG.debug("Failed to read rta attributes: %s", interface_state)
continue
- if (ifname_filter is not None and
- interface_state.ifname != ifname_filter):
+ if (
+ ifname_filter is not None
+ and interface_state.ifname != ifname_filter
+ ):
LOG.debug(
"Ignored netlink event on interface %s. Waiting for %s.",
- interface_state.ifname, ifname_filter)
+ interface_state.ifname,
+ ifname_filter,
+ )
continue
if interface_state.operstate not in operstates:
continue
prevCarrier = carrier
carrier = interface_state.operstate
- if not should_continue_callback(interface_state.ifname,
- carrier,
- prevCarrier):
+ if not should_continue_callback(
+ interface_state.ifname, carrier, prevCarrier
+ ):
return
data = data[offset:]
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 4f566e64..a42543e4 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -14,11 +14,7 @@ import os
from cloudinit import ec2_utils
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import url_helper
-from cloudinit import util
+from cloudinit import net, sources, subp, url_helper, util
from cloudinit.sources import BrokenMetadata
# See https://docs.openstack.org/user-guide/cli-config-drive.html
@@ -27,30 +23,30 @@ LOG = logging.getLogger(__name__)
FILES_V1 = {
# Path <-> (metadata key name, translator function, default value)
- 'etc/network/interfaces': ('network_config', lambda x: x, ''),
- 'meta.js': ('meta_js', util.load_json, {}),
- "root/.ssh/authorized_keys": ('authorized_keys', lambda x: x, ''),
+ "etc/network/interfaces": ("network_config", lambda x: x, ""),
+ "meta.js": ("meta_js", util.load_json, {}),
+ "root/.ssh/authorized_keys": ("authorized_keys", lambda x: x, ""),
}
KEY_COPIES = (
# Cloud-init metadata names <-> (metadata key, is required)
- ('local-hostname', 'hostname', False),
- ('instance-id', 'uuid', True),
+ ("local-hostname", "hostname", False),
+ ("instance-id", "uuid", True),
)
# Versions and names taken from nova source nova/api/metadata/base.py
-OS_LATEST = 'latest'
-OS_FOLSOM = '2012-08-10'
-OS_GRIZZLY = '2013-04-04'
-OS_HAVANA = '2013-10-17'
-OS_LIBERTY = '2015-10-15'
+OS_LATEST = "latest"
+OS_FOLSOM = "2012-08-10"
+OS_GRIZZLY = "2013-04-04"
+OS_HAVANA = "2013-10-17"
+OS_LIBERTY = "2015-10-15"
# NEWTON_ONE adds 'devices' to md (sriov-pf-passthrough-neutron-port-vlan)
-OS_NEWTON_ONE = '2016-06-30'
+OS_NEWTON_ONE = "2016-06-30"
# NEWTON_TWO adds vendor_data2.json (vendordata-reboot)
-OS_NEWTON_TWO = '2016-10-06'
+OS_NEWTON_TWO = "2016-10-06"
# OS_OCATA adds 'vif' field to devices (sriov-pf-passthrough-neutron-port-vlan)
-OS_OCATA = '2017-02-22'
+OS_OCATA = "2017-02-22"
# OS_ROCKY adds a vf_trusted field to devices (sriov-trusted-vfs)
-OS_ROCKY = '2018-08-27'
+OS_ROCKY = "2018-08-27"
# keep this in chronological order. new supported versions go at the end.
@@ -67,18 +63,18 @@ OS_VERSIONS = (
KNOWN_PHYSICAL_TYPES = (
None,
- 'bgpovs', # not present in OpenStack upstream but used on OVH cloud.
- 'bridge',
- 'cascading', # not present in OpenStack upstream, used on OpenTelekomCloud
- 'dvs',
- 'ethernet',
- 'hw_veb',
- 'hyperv',
- 'ovs',
- 'phy',
- 'tap',
- 'vhostuser',
- 'vif',
+ "bgpovs", # not present in OpenStack upstream but used on OVH cloud.
+ "bridge",
+ "cascading", # not present in OpenStack upstream, used on OpenTelekomCloud
+ "dvs",
+ "ethernet",
+ "hw_veb",
+ "hyperv",
+ "ovs",
+ "phy",
+ "tap",
+ "vhostuser",
+ "vif",
)
@@ -90,7 +86,7 @@ class SourceMixin(object):
def _ec2_name_to_device(self, name):
if not self.ec2_metadata:
return None
- bdm = self.ec2_metadata.get('block-device-mapping', {})
+ bdm = self.ec2_metadata.get("block-device-mapping", {})
for (ent_name, device) in bdm.items():
if name == ent_name:
return device
@@ -105,9 +101,9 @@ class SourceMixin(object):
def _os_name_to_device(self, name):
device = None
try:
- criteria = 'LABEL=%s' % (name)
- if name == 'swap':
- criteria = 'TYPE=%s' % (name)
+ criteria = "LABEL=%s" % (name)
+ if name == "swap":
+ criteria = "TYPE=%s" % (name)
dev_entries = util.find_devs_with(criteria)
if dev_entries:
device = dev_entries[0]
@@ -135,10 +131,10 @@ class SourceMixin(object):
return None
# Try the ec2 mapping first
names = [name]
- if name == 'root':
- names.insert(0, 'ami')
- if name == 'ami':
- names.append('root')
+ if name == "root":
+ names.insert(0, "ami")
+ if name == "ami":
+ names.append("root")
device = None
LOG.debug("Using ec2 style lookup to find device %s", names)
for n in names:
@@ -163,7 +159,6 @@ class SourceMixin(object):
class BaseReader(metaclass=abc.ABCMeta):
-
def __init__(self, base_path):
self.base_path = base_path
@@ -187,8 +182,11 @@ class BaseReader(metaclass=abc.ABCMeta):
try:
versions_available = self._fetch_available_versions()
except Exception as e:
- LOG.debug("Unable to read openstack versions from %s due to: %s",
- self.base_path, e)
+ LOG.debug(
+ "Unable to read openstack versions from %s due to: %s",
+ self.base_path,
+ e,
+ )
versions_available = []
# openstack.OS_VERSIONS is stored in chronological order, so
@@ -202,12 +200,15 @@ class BaseReader(metaclass=abc.ABCMeta):
selected_version = potential_version
break
- LOG.debug("Selected version '%s' from %s", selected_version,
- versions_available)
+ LOG.debug(
+ "Selected version '%s' from %s",
+ selected_version,
+ versions_available,
+ )
return selected_version
def _read_content_path(self, item, decode=False):
- path = item.get('content_path', '').lstrip("/")
+ path = item.get("content_path", "").lstrip("/")
path_pieces = path.split("/")
valid_pieces = [p for p in path_pieces if len(p)]
if not valid_pieces:
@@ -225,43 +226,44 @@ class BaseReader(metaclass=abc.ABCMeta):
"""
load_json_anytype = functools.partial(
- util.load_json, root_types=(dict, list, str))
+ util.load_json, root_types=(dict, list, str)
+ )
def datafiles(version):
files = {}
- files['metadata'] = (
+ files["metadata"] = (
# File path to read
- self._path_join("openstack", version, 'meta_data.json'),
+ self._path_join("openstack", version, "meta_data.json"),
# Is it required?
True,
# Translator function (applied after loading)
util.load_json,
)
- files['userdata'] = (
- self._path_join("openstack", version, 'user_data'),
+ files["userdata"] = (
+ self._path_join("openstack", version, "user_data"),
False,
lambda x: x,
)
- files['vendordata'] = (
- self._path_join("openstack", version, 'vendor_data.json'),
+ files["vendordata"] = (
+ self._path_join("openstack", version, "vendor_data.json"),
False,
load_json_anytype,
)
- files['vendordata2'] = (
- self._path_join("openstack", version, 'vendor_data2.json'),
+ files["vendordata2"] = (
+ self._path_join("openstack", version, "vendor_data2.json"),
False,
load_json_anytype,
)
- files['networkdata'] = (
- self._path_join("openstack", version, 'network_data.json'),
+ files["networkdata"] = (
+ self._path_join("openstack", version, "network_data.json"),
False,
load_json_anytype,
)
return files
results = {
- 'userdata': '',
- 'version': 2,
+ "userdata": "",
+ "version": 2,
}
data = datafiles(self._find_working_version())
for (name, (path, required, translator)) in data.items():
@@ -272,11 +274,13 @@ class BaseReader(metaclass=abc.ABCMeta):
data = self._path_read(path)
except IOError as e:
if not required:
- LOG.debug("Failed reading optional path %s due"
- " to: %s", path, e)
+ LOG.debug(
+ "Failed reading optional path %s due to: %s", path, e
+ )
else:
- LOG.debug("Failed reading mandatory path %s due"
- " to: %s", path, e)
+ LOG.debug(
+ "Failed reading mandatory path %s due to: %s", path, e
+ )
else:
found = True
if required and not found:
@@ -291,11 +295,11 @@ class BaseReader(metaclass=abc.ABCMeta):
if found:
results[name] = data
- metadata = results['metadata']
- if 'random_seed' in metadata:
- random_seed = metadata['random_seed']
+ metadata = results["metadata"]
+ if "random_seed" in metadata:
+ random_seed = metadata["random_seed"]
try:
- metadata['random_seed'] = base64.b64decode(random_seed)
+ metadata["random_seed"] = base64.b64decode(random_seed)
except (ValueError, TypeError) as e:
raise BrokenMetadata(
"Badly formatted metadata random_seed entry: %s" % e
@@ -303,18 +307,18 @@ class BaseReader(metaclass=abc.ABCMeta):
# load any files that were provided
files = {}
- metadata_files = metadata.get('files', [])
+ metadata_files = metadata.get("files", [])
for item in metadata_files:
- if 'path' not in item:
+ if "path" not in item:
continue
- path = item['path']
+ path = item["path"]
try:
files[path] = self._read_content_path(item)
except Exception as e:
raise BrokenMetadata(
"Failed to read provided file %s: %s" % (path, e)
) from e
- results['files'] = files
+ results["files"] = files
# The 'network_config' item in metadata is a content pointer
# to the network config that should be applied. It is just a
@@ -323,7 +327,7 @@ class BaseReader(metaclass=abc.ABCMeta):
if net_item:
try:
content = self._read_content_path(net_item, decode=True)
- results['network_config'] = content
+ results["network_config"] = content
except IOError as e:
raise BrokenMetadata(
"Failed to read network configuration: %s" % (e)
@@ -334,12 +338,12 @@ class BaseReader(metaclass=abc.ABCMeta):
# if they specify 'dsmode' they're indicating the mode that they intend
# for this datasource to operate in.
try:
- results['dsmode'] = metadata['meta']['dsmode']
+ results["dsmode"] = metadata["meta"]["dsmode"]
except KeyError:
pass
# Read any ec2-metadata (if applicable)
- results['ec2-metadata'] = self._read_ec2_metadata()
+ results["ec2-metadata"] = self._read_ec2_metadata()
# Perform some misc. metadata key renames...
for (target_key, source_key, is_required) in KEY_COPIES:
@@ -364,15 +368,19 @@ class ConfigDriveReader(BaseReader):
def _fetch_available_versions(self):
if self._versions is None:
- path = self._path_join(self.base_path, 'openstack')
- found = [d for d in os.listdir(path)
- if os.path.isdir(os.path.join(path))]
+ path = self._path_join(self.base_path, "openstack")
+ found = [
+ d
+ for d in os.listdir(path)
+ if os.path.isdir(os.path.join(path))
+ ]
self._versions = sorted(found)
return self._versions
def _read_ec2_metadata(self):
- path = self._path_join(self.base_path,
- 'ec2', 'latest', 'meta-data.json')
+ path = self._path_join(
+ self.base_path, "ec2", "latest", "meta-data.json"
+ )
if not os.path.exists(path):
return {}
else:
@@ -419,14 +427,14 @@ class ConfigDriveReader(BaseReader):
else:
md[key] = copy.deepcopy(default)
- keydata = md['authorized_keys']
- meta_js = md['meta_js']
+ keydata = md["authorized_keys"]
+ meta_js = md["meta_js"]
# keydata in meta_js is preferred over "injected"
- keydata = meta_js.get('public-keys', keydata)
+ keydata = meta_js.get("public-keys", keydata)
if keydata:
lines = keydata.splitlines()
- md['public-keys'] = [
+ md["public-keys"] = [
line
for line in lines
if len(line) and not line.startswith("#")
@@ -434,25 +442,25 @@ class ConfigDriveReader(BaseReader):
# config-drive-v1 has no way for openstack to provide the instance-id
# so we copy that into metadata from the user input
- if 'instance-id' in meta_js:
- md['instance-id'] = meta_js['instance-id']
+ if "instance-id" in meta_js:
+ md["instance-id"] = meta_js["instance-id"]
results = {
- 'version': 1,
- 'metadata': md,
+ "version": 1,
+ "metadata": md,
}
# allow the user to specify 'dsmode' in a meta tag
- if 'dsmode' in meta_js:
- results['dsmode'] = meta_js['dsmode']
+ if "dsmode" in meta_js:
+ results["dsmode"] = meta_js["dsmode"]
# config-drive-v1 has no way of specifying user-data, so the user has
# to cheat and stuff it in a meta tag also.
- results['userdata'] = meta_js.get('user-data', '')
+ results["userdata"] = meta_js.get("user-data", "")
# this implementation does not support files other than
# network/interfaces and authorized_keys...
- results['files'] = {}
+ results["files"] = {}
return results
@@ -481,7 +489,6 @@ class MetadataReader(BaseReader):
return self._versions
def _path_read(self, path, decode=False):
-
def should_retry_cb(_request_args, cause):
try:
code = int(cause.code)
@@ -492,11 +499,13 @@ class MetadataReader(BaseReader):
pass
return True
- response = url_helper.readurl(path,
- retries=self.retries,
- ssl_details=self.ssl_details,
- timeout=self.timeout,
- exception_cb=should_retry_cb)
+ response = url_helper.readurl(
+ path,
+ retries=self.retries,
+ ssl_details=self.ssl_details,
+ timeout=self.timeout,
+ exception_cb=should_retry_cb,
+ )
if decode:
return response.contents.decode()
else:
@@ -506,9 +515,11 @@ class MetadataReader(BaseReader):
return url_helper.combine_url(base, *add_ons)
def _read_ec2_metadata(self):
- return ec2_utils.get_instance_metadata(ssl_details=self.ssl_details,
- timeout=self.timeout,
- retries=self.retries)
+ return ec2_utils.get_instance_metadata(
+ ssl_details=self.ssl_details,
+ timeout=self.timeout,
+ retries=self.retries,
+ )
# Convert OpenStack ConfigDrive NetworkData json to network_config yaml
@@ -544,32 +555,32 @@ def convert_net_json(network_json=None, known_macs=None):
# dict of network_config key for filtering network_json
valid_keys = {
- 'physical': [
- 'name',
- 'type',
- 'mac_address',
- 'subnets',
- 'params',
- 'mtu',
+ "physical": [
+ "name",
+ "type",
+ "mac_address",
+ "subnets",
+ "params",
+ "mtu",
],
- 'subnet': [
- 'type',
- 'address',
- 'netmask',
- 'broadcast',
- 'metric',
- 'gateway',
- 'pointopoint',
- 'scope',
- 'dns_nameservers',
- 'dns_search',
- 'routes',
+ "subnet": [
+ "type",
+ "address",
+ "netmask",
+ "broadcast",
+ "metric",
+ "gateway",
+ "pointopoint",
+ "scope",
+ "dns_nameservers",
+ "dns_search",
+ "routes",
],
}
- links = network_json.get('links', [])
- networks = network_json.get('networks', [])
- services = network_json.get('services', [])
+ links = network_json.get("links", [])
+ networks = network_json.get("networks", [])
+ services = network_json.get("services", [])
link_updates = []
link_id_info = {}
@@ -578,65 +589,77 @@ def convert_net_json(network_json=None, known_macs=None):
config = []
for link in links:
subnets = []
- cfg = dict((k, v) for k, v in link.items()
- if k in valid_keys['physical'])
+ cfg = dict(
+ (k, v) for k, v in link.items() if k in valid_keys["physical"]
+ )
# 'name' is not in openstack spec yet, but we will support it if it is
# present. The 'id' in the spec is currently implemented as the host
# nic's name, meaning something like 'tap-adfasdffd'. We do not want
# to name guest devices with such ugly names.
- if 'name' in link:
- cfg['name'] = link['name']
+ if "name" in link:
+ cfg["name"] = link["name"]
link_mac_addr = None
- if link.get('ethernet_mac_address'):
- link_mac_addr = link.get('ethernet_mac_address').lower()
- link_id_info[link['id']] = link_mac_addr
-
- curinfo = {'name': cfg.get('name'), 'mac': link_mac_addr,
- 'id': link['id'], 'type': link['type']}
-
- for network in [n for n in networks
- if n['link'] == link['id']]:
- subnet = dict((k, v) for k, v in network.items()
- if k in valid_keys['subnet'])
-
- if network['type'] == 'ipv4_dhcp':
- subnet.update({'type': 'dhcp4'})
- elif network['type'] == 'ipv6_dhcp':
- subnet.update({'type': 'dhcp6'})
- elif network['type'] in ['ipv6_slaac', 'ipv6_dhcpv6-stateless',
- 'ipv6_dhcpv6-stateful']:
- subnet.update({'type': network['type']})
- elif network['type'] in ['ipv4', 'static']:
- subnet.update({
- 'type': 'static',
- 'address': network.get('ip_address'),
- })
- elif network['type'] in ['ipv6', 'static6']:
- cfg.update({'accept-ra': False})
- subnet.update({
- 'type': 'static6',
- 'address': network.get('ip_address'),
- })
+ if link.get("ethernet_mac_address"):
+ link_mac_addr = link.get("ethernet_mac_address").lower()
+ link_id_info[link["id"]] = link_mac_addr
+
+ curinfo = {
+ "name": cfg.get("name"),
+ "mac": link_mac_addr,
+ "id": link["id"],
+ "type": link["type"],
+ }
+
+ for network in [n for n in networks if n["link"] == link["id"]]:
+ subnet = dict(
+ (k, v) for k, v in network.items() if k in valid_keys["subnet"]
+ )
+
+ if network["type"] == "ipv4_dhcp":
+ subnet.update({"type": "dhcp4"})
+ elif network["type"] == "ipv6_dhcp":
+ subnet.update({"type": "dhcp6"})
+ elif network["type"] in [
+ "ipv6_slaac",
+ "ipv6_dhcpv6-stateless",
+ "ipv6_dhcpv6-stateful",
+ ]:
+ subnet.update({"type": network["type"]})
+ elif network["type"] in ["ipv4", "static"]:
+ subnet.update(
+ {
+ "type": "static",
+ "address": network.get("ip_address"),
+ }
+ )
+ elif network["type"] in ["ipv6", "static6"]:
+ cfg.update({"accept-ra": False})
+ subnet.update(
+ {
+ "type": "static6",
+ "address": network.get("ip_address"),
+ }
+ )
# Enable accept_ra for stateful and legacy ipv6_dhcp types
- if network['type'] in ['ipv6_dhcpv6-stateful', 'ipv6_dhcp']:
- cfg.update({'accept-ra': True})
+ if network["type"] in ["ipv6_dhcpv6-stateful", "ipv6_dhcp"]:
+ cfg.update({"accept-ra": True})
- if network['type'] == 'ipv4':
- subnet['ipv4'] = True
- if network['type'] == 'ipv6':
- subnet['ipv6'] = True
+ if network["type"] == "ipv4":
+ subnet["ipv4"] = True
+ if network["type"] == "ipv6":
+ subnet["ipv6"] = True
subnets.append(subnet)
- cfg.update({'subnets': subnets})
- if link['type'] in ['bond']:
+ cfg.update({"subnets": subnets})
+ if link["type"] in ["bond"]:
params = {}
if link_mac_addr:
- params['mac_address'] = link_mac_addr
+ params["mac_address"] = link_mac_addr
for k, v in link.items():
- if k == 'bond_links':
+ if k == "bond_links":
continue
- elif k.startswith('bond'):
+ elif k.startswith("bond"):
params.update({k: v})
# openstack does not provide a name for the bond.
@@ -649,35 +672,45 @@ def convert_net_json(network_json=None, known_macs=None):
# to the network config by their nic name.
# store that in bond_links_needed, and update these later.
link_updates.append(
- (cfg, 'bond_interfaces', '%s',
- copy.deepcopy(link['bond_links']))
+ (
+ cfg,
+ "bond_interfaces",
+ "%s",
+ copy.deepcopy(link["bond_links"]),
+ )
+ )
+ cfg.update({"params": params, "name": link_name})
+
+ curinfo["name"] = link_name
+ elif link["type"] in ["vlan"]:
+ name = "%s.%s" % (link["vlan_link"], link["vlan_id"])
+ cfg.update(
+ {
+ "name": name,
+ "vlan_id": link["vlan_id"],
+ "mac_address": link["vlan_mac_address"],
+ }
+ )
+ link_updates.append((cfg, "vlan_link", "%s", link["vlan_link"]))
+ link_updates.append(
+ (cfg, "name", "%%s.%s" % link["vlan_id"], link["vlan_link"])
)
- cfg.update({'params': params, 'name': link_name})
-
- curinfo['name'] = link_name
- elif link['type'] in ['vlan']:
- name = "%s.%s" % (link['vlan_link'], link['vlan_id'])
- cfg.update({
- 'name': name,
- 'vlan_id': link['vlan_id'],
- 'mac_address': link['vlan_mac_address'],
- })
- link_updates.append((cfg, 'vlan_link', '%s', link['vlan_link']))
- link_updates.append((cfg, 'name', "%%s.%s" % link['vlan_id'],
- link['vlan_link']))
- curinfo.update({'mac': link['vlan_mac_address'],
- 'name': name})
+ curinfo.update({"mac": link["vlan_mac_address"], "name": name})
else:
- if link['type'] not in KNOWN_PHYSICAL_TYPES:
- LOG.warning('Unknown network_data link type (%s); treating as'
- ' physical', link['type'])
- cfg.update({'type': 'physical', 'mac_address': link_mac_addr})
+ if link["type"] not in KNOWN_PHYSICAL_TYPES:
+ LOG.warning(
+ "Unknown network_data link type (%s); treating as"
+ " physical",
+ link["type"],
+ )
+ cfg.update({"type": "physical", "mac_address": link_mac_addr})
config.append(cfg)
- link_id_info[curinfo['id']] = curinfo
+ link_id_info[curinfo["id"]] = curinfo
- need_names = [d for d in config
- if d.get('type') == 'physical' and 'name' not in d]
+ need_names = [
+ d for d in config if d.get("type") == "physical" and "name" not in d
+ ]
if need_names or link_updates:
if known_macs is None:
@@ -685,26 +718,26 @@ def convert_net_json(network_json=None, known_macs=None):
# go through and fill out the link_id_info with names
for _link_id, info in link_id_info.items():
- if info.get('name'):
+ if info.get("name"):
continue
- if info.get('mac') in known_macs:
- info['name'] = known_macs[info['mac']]
+ if info.get("mac") in known_macs:
+ info["name"] = known_macs[info["mac"]]
for d in need_names:
- mac = d.get('mac_address')
+ mac = d.get("mac_address")
if not mac:
raise ValueError("No mac_address or name entry for %s" % d)
if mac not in known_macs:
raise ValueError("Unable to find a system nic for %s" % d)
- d['name'] = known_macs[mac]
+ d["name"] = known_macs[mac]
for cfg, key, fmt, targets in link_updates:
if isinstance(targets, (list, tuple)):
cfg[key] = [
- fmt % link_id_info[target]['name'] for target in targets
+ fmt % link_id_info[target]["name"] for target in targets
]
else:
- cfg[key] = fmt % link_id_info[targets]['name']
+ cfg[key] = fmt % link_id_info[targets]["name"]
# Infiniband interfaces may be referenced in network_data.json by a 6 byte
# Ethernet MAC-style address, and we use that address to look up the
@@ -713,15 +746,16 @@ def convert_net_json(network_json=None, known_macs=None):
ib_known_hwaddrs = net.get_ib_hwaddrs_by_interface()
if ib_known_hwaddrs:
for cfg in config:
- if cfg['name'] in ib_known_hwaddrs:
- cfg['mac_address'] = ib_known_hwaddrs[cfg['name']]
- cfg['type'] = 'infiniband'
+ if cfg["name"] in ib_known_hwaddrs:
+ cfg["mac_address"] = ib_known_hwaddrs[cfg["name"]]
+ cfg["type"] = "infiniband"
for service in services:
cfg = service
- cfg.update({'type': 'nameserver'})
+ cfg.update({"type": "nameserver"})
config.append(cfg)
- return {'version': 1, 'config': config}
+ return {"version": 1, "config": config}
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/upcloud.py b/cloudinit/sources/helpers/upcloud.py
index 199baa58..e7b95a5e 100644
--- a/cloudinit/sources/helpers/upcloud.py
+++ b/cloudinit/sources/helpers/upcloud.py
@@ -169,7 +169,7 @@ def convert_to_network_config_v1(config):
interface = {
"type": "physical",
"name": sysfs_name,
- "mac_address": mac_address
+ "mac_address": mac_address,
}
subnets = []
@@ -182,10 +182,9 @@ def convert_to_network_config_v1(config):
if config.get("dns"):
LOG.debug("Setting DNS nameservers to %s", config.get("dns"))
- nic_configs.append({
- "type": "nameserver",
- "address": config.get("dns")
- })
+ nic_configs.append(
+ {"type": "nameserver", "address": config.get("dns")}
+ )
return {"version": 1, "config": nic_configs}
@@ -216,8 +215,7 @@ def read_sysinfo():
server_uuid = dmi.read_dmi_data("system-uuid")
if server_uuid:
LOG.debug(
- "system identified via SMBIOS as UpCloud server: %s",
- server_uuid
+ "system identified via SMBIOS as UpCloud server: %s", server_uuid
)
else:
msg = (
diff --git a/cloudinit/sources/helpers/vmware/imc/boot_proto.py b/cloudinit/sources/helpers/vmware/imc/boot_proto.py
index 9a07eafa..a5c67bb7 100644
--- a/cloudinit/sources/helpers/vmware/imc/boot_proto.py
+++ b/cloudinit/sources/helpers/vmware/imc/boot_proto.py
@@ -9,7 +9,8 @@
class BootProtoEnum(object):
"""Specifies the NIC Boot Settings."""
- DHCP = 'dhcp'
- STATIC = 'static'
+ DHCP = "dhcp"
+ STATIC = "static"
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py
index bdfab5a0..39dacee0 100644
--- a/cloudinit/sources/helpers/vmware/imc/config.py
+++ b/cloudinit/sources/helpers/vmware/imc/config.py
@@ -15,20 +15,20 @@ class Config(object):
Specification file.
"""
- CUSTOM_SCRIPT = 'CUSTOM-SCRIPT|SCRIPT-NAME'
- DNS = 'DNS|NAMESERVER|'
- DOMAINNAME = 'NETWORK|DOMAINNAME'
- HOSTNAME = 'NETWORK|HOSTNAME'
- MARKERID = 'MISC|MARKER-ID'
- PASS = 'PASSWORD|-PASS'
- RESETPASS = 'PASSWORD|RESET'
- SUFFIX = 'DNS|SUFFIX|'
- TIMEZONE = 'DATETIME|TIMEZONE'
- UTC = 'DATETIME|UTC'
- POST_GC_STATUS = 'MISC|POST-GC-STATUS'
- DEFAULT_RUN_POST_SCRIPT = 'MISC|DEFAULT-RUN-POST-CUST-SCRIPT'
- CLOUDINIT_META_DATA = 'CLOUDINIT|METADATA'
- CLOUDINIT_USER_DATA = 'CLOUDINIT|USERDATA'
+ CUSTOM_SCRIPT = "CUSTOM-SCRIPT|SCRIPT-NAME"
+ DNS = "DNS|NAMESERVER|"
+ DOMAINNAME = "NETWORK|DOMAINNAME"
+ HOSTNAME = "NETWORK|HOSTNAME"
+ MARKERID = "MISC|MARKER-ID"
+ PASS = "PASSWORD|-PASS"
+ RESETPASS = "PASSWORD|RESET"
+ SUFFIX = "DNS|SUFFIX|"
+ TIMEZONE = "DATETIME|TIMEZONE"
+ UTC = "DATETIME|UTC"
+ POST_GC_STATUS = "MISC|POST-GC-STATUS"
+ DEFAULT_RUN_POST_SCRIPT = "MISC|DEFAULT-RUN-POST-CUST-SCRIPT"
+ CLOUDINIT_META_DATA = "CLOUDINIT|METADATA"
+ CLOUDINIT_USER_DATA = "CLOUDINIT|USERDATA"
def __init__(self, configFile):
self._configFile = configFile
@@ -84,8 +84,8 @@ class Config(object):
def nics(self):
"""Return the list of associated NICs."""
res = []
- nics = self._configFile['NIC-CONFIG|NICS']
- for nic in nics.split(','):
+ nics = self._configFile["NIC-CONFIG|NICS"]
+ for nic in nics.split(","):
res.append(Nic(nic, self._configFile))
return res
@@ -93,11 +93,11 @@ class Config(object):
@property
def reset_password(self):
"""Retreives if the root password needs to be reset."""
- resetPass = self._configFile.get(Config.RESETPASS, 'no')
+ resetPass = self._configFile.get(Config.RESETPASS, "no")
resetPass = resetPass.lower()
- if resetPass not in ('yes', 'no'):
- raise ValueError('ResetPassword value should be yes/no')
- return resetPass == 'yes'
+ if resetPass not in ("yes", "no"):
+ raise ValueError("ResetPassword value should be yes/no")
+ return resetPass == "yes"
@property
def marker_id(self):
@@ -112,11 +112,11 @@ class Config(object):
@property
def post_gc_status(self):
"""Return whether to post guestinfo.gc.status VMX property."""
- postGcStatus = self._configFile.get(Config.POST_GC_STATUS, 'no')
+ postGcStatus = self._configFile.get(Config.POST_GC_STATUS, "no")
postGcStatus = postGcStatus.lower()
- if postGcStatus not in ('yes', 'no'):
- raise ValueError('PostGcStatus value should be yes/no')
- return postGcStatus == 'yes'
+ if postGcStatus not in ("yes", "no"):
+ raise ValueError("PostGcStatus value should be yes/no")
+ return postGcStatus == "yes"
@property
def default_run_post_script(self):
@@ -125,12 +125,12 @@ class Config(object):
is absent in VM Tools configuration
"""
defaultRunPostScript = self._configFile.get(
- Config.DEFAULT_RUN_POST_SCRIPT,
- 'no')
+ Config.DEFAULT_RUN_POST_SCRIPT, "no"
+ )
defaultRunPostScript = defaultRunPostScript.lower()
- if defaultRunPostScript not in ('yes', 'no'):
- raise ValueError('defaultRunPostScript value should be yes/no')
- return defaultRunPostScript == 'yes'
+ if defaultRunPostScript not in ("yes", "no"):
+ raise ValueError("defaultRunPostScript value should be yes/no")
+ return defaultRunPostScript == "yes"
@property
def meta_data_name(self):
@@ -142,4 +142,5 @@ class Config(object):
"""Return the name of cloud-init user data."""
return self._configFile.get(Config.CLOUDINIT_USER_DATA, None)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
index 2ab22de9..8240ea8f 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
@@ -9,8 +9,7 @@ import logging
import os
import stat
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
LOG = logging.getLogger(__name__)
@@ -24,8 +23,7 @@ class CustomScriptConstant(object):
# The user defined custom script
CUSTOM_SCRIPT_NAME = "customize.sh"
- CUSTOM_SCRIPT = os.path.join(CUSTOM_TMP_DIR,
- CUSTOM_SCRIPT_NAME)
+ CUSTOM_SCRIPT = os.path.join(CUSTOM_TMP_DIR, CUSTOM_SCRIPT_NAME)
POST_CUSTOM_PENDING_MARKER = "/.guest-customization-post-reboot-pending"
# The cc_scripts_per_instance script to launch custom script
POST_CUSTOM_SCRIPT_NAME = "post-customize-guest.sh"
@@ -39,22 +37,25 @@ class RunCustomScript(object):
def prepare_script(self):
if not os.path.exists(self.scriptpath):
- raise CustomScriptNotFound("Script %s not found!! "
- "Cannot execute custom script!"
- % self.scriptpath)
+ raise CustomScriptNotFound(
+ "Script %s not found!! Cannot execute custom script!"
+ % self.scriptpath
+ )
util.ensure_dir(CustomScriptConstant.CUSTOM_TMP_DIR)
- LOG.debug("Copying custom script to %s",
- CustomScriptConstant.CUSTOM_SCRIPT)
+ LOG.debug(
+ "Copying custom script to %s", CustomScriptConstant.CUSTOM_SCRIPT
+ )
util.copy(self.scriptpath, CustomScriptConstant.CUSTOM_SCRIPT)
# Strip any CR characters from the decoded script
- content = util.load_file(
- CustomScriptConstant.CUSTOM_SCRIPT).replace("\r", "")
- util.write_file(CustomScriptConstant.CUSTOM_SCRIPT,
- content,
- mode=0o544)
+ content = util.load_file(CustomScriptConstant.CUSTOM_SCRIPT).replace(
+ "\r", ""
+ )
+ util.write_file(
+ CustomScriptConstant.CUSTOM_SCRIPT, content, mode=0o544
+ )
class PreCustomScript(RunCustomScript):
@@ -70,8 +71,8 @@ class PostCustomScript(RunCustomScript):
super(PostCustomScript, self).__init__(scriptname, directory)
self.ccScriptsDir = ccScriptsDir
self.ccScriptPath = os.path.join(
- ccScriptsDir,
- CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME)
+ ccScriptsDir, CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME
+ )
def execute(self):
"""
@@ -81,15 +82,17 @@ class PostCustomScript(RunCustomScript):
"""
self.prepare_script()
- LOG.debug("Copying post customize run script to %s",
- self.ccScriptPath)
+ LOG.debug("Copying post customize run script to %s", self.ccScriptPath)
util.copy(
- os.path.join(self.directory,
- CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME),
- self.ccScriptPath)
+ os.path.join(
+ self.directory, CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME
+ ),
+ self.ccScriptPath,
+ )
st = os.stat(self.ccScriptPath)
os.chmod(self.ccScriptPath, st.st_mode | stat.S_IEXEC)
LOG.info("Creating post customization pending marker")
util.ensure_file(CustomScriptConstant.POST_CUSTOM_PENDING_MARKER)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py
index fc034c95..845294ec 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_file.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_file.py
@@ -35,7 +35,7 @@ class ConfigFile(ConfigSource, dict):
key = key.strip()
val = val.strip()
- if key.startswith('-') or '|-' in key:
+ if key.startswith("-") or "|-" in key:
canLog = False
else:
canLog = True
@@ -59,7 +59,7 @@ class ConfigFile(ConfigSource, dict):
Keyword arguments:
filename - The full path to the config file.
"""
- logger.info('Parsing the config file %s.', filename)
+ logger.info("Parsing the config file %s.", filename)
config = configparser.ConfigParser()
config.optionxform = str
@@ -71,7 +71,7 @@ class ConfigFile(ConfigSource, dict):
logger.debug("FOUND CATEGORY = '%s'", category)
for (key, value) in config.items(category):
- self._insertKey(category + '|' + key, value)
+ self._insertKey(category + "|" + key, value)
def should_keep_current_value(self, key):
"""
@@ -115,4 +115,5 @@ class ConfigFile(ConfigSource, dict):
"""
return len([key for key in self if key.startswith(prefix)])
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_namespace.py b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
index 5899d8f7..3b3b2d5a 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_namespace.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
@@ -11,4 +11,5 @@ from .config_source import ConfigSource
class ConfigNamespace(ConfigSource):
"""Specifies the Config Namespace."""
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index f5a0ebe4..df621f20 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -9,9 +9,8 @@ import logging
import os
import re
+from cloudinit import subp, util
from cloudinit.net.network_state import mask_to_net_prefix
-from cloudinit import subp
-from cloudinit import util
logger = logging.getLogger(__name__)
@@ -63,8 +62,10 @@ class NicConfigurator(object):
if not primary_nics:
return None
elif len(primary_nics) > 1:
- raise Exception('There can only be one primary nic',
- [nic.mac for nic in primary_nics])
+ raise Exception(
+ "There can only be one primary nic",
+ [nic.mac for nic in primary_nics],
+ )
else:
return primary_nics[0]
@@ -73,17 +74,17 @@ class NicConfigurator(object):
Create the mac2Name dictionary
The mac address(es) are in the lower case
"""
- cmd = ['ip', 'addr', 'show']
+ cmd = ["ip", "addr", "show"]
output, _err = subp.subp(cmd)
- sections = re.split(r'\n\d+: ', '\n' + output)[1:]
+ sections = re.split(r"\n\d+: ", "\n" + output)[1:]
- macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))'
+ macPat = r"link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))"
for section in sections:
match = re.search(macPat, section)
if not match: # Only keep info about nics
continue
mac = match.group(1).lower()
- name = section.split(':', 1)[0]
+ name = section.split(":", 1)[0]
self.mac2Name[mac] = name
def gen_one_nic(self, nic):
@@ -95,11 +96,11 @@ class NicConfigurator(object):
mac = nic.mac.lower()
name = self.mac2Name.get(mac)
if not name:
- raise ValueError('No known device has MACADDR: %s' % nic.mac)
+ raise ValueError("No known device has MACADDR: %s" % nic.mac)
nics_cfg_list = []
- cfg = {'type': 'physical', 'name': name, 'mac_address': mac}
+ cfg = {"type": "physical", "name": name, "mac_address": mac}
subnet_list = []
route_list = []
@@ -114,7 +115,7 @@ class NicConfigurator(object):
subnet_list.extend(subnets)
route_list.extend(routes)
- cfg.update({'subnets': subnet_list})
+ cfg.update({"subnets": subnet_list})
nics_cfg_list.append(cfg)
if route_list:
@@ -135,17 +136,17 @@ class NicConfigurator(object):
route_list = []
if nic.onboot:
- subnet.update({'control': 'auto'})
+ subnet.update({"control": "auto"})
bootproto = nic.bootProto.lower()
- if nic.ipv4_mode.lower() == 'disabled':
- bootproto = 'manual'
+ if nic.ipv4_mode.lower() == "disabled":
+ bootproto = "manual"
- if bootproto != 'static':
- subnet.update({'type': 'dhcp'})
+ if bootproto != "static":
+ subnet.update({"type": "dhcp"})
return ([subnet], route_list)
else:
- subnet.update({'type': 'static'})
+ subnet.update({"type": "static"})
# Static Ipv4
addrs = nic.staticIpv4
@@ -154,20 +155,21 @@ class NicConfigurator(object):
v4 = addrs[0]
if v4.ip:
- subnet.update({'address': v4.ip})
+ subnet.update({"address": v4.ip})
if v4.netmask:
- subnet.update({'netmask': v4.netmask})
+ subnet.update({"netmask": v4.netmask})
# Add the primary gateway
if nic.primary and v4.gateways:
self.ipv4PrimaryGateway = v4.gateways[0]
- subnet.update({'gateway': self.ipv4PrimaryGateway})
+ subnet.update({"gateway": self.ipv4PrimaryGateway})
return ([subnet], route_list)
# Add routes if there is no primary nic
if not self._primaryNic and v4.gateways:
subnet.update(
- {'routes': self.gen_ipv4_route(nic, v4.gateways, v4.netmask)})
+ {"routes": self.gen_ipv4_route(nic, v4.gateways, v4.netmask)}
+ )
return ([subnet], route_list)
@@ -184,10 +186,14 @@ class NicConfigurator(object):
for gateway in gateways:
destination = "%s/%d" % (gen_subnet(gateway, netmask), cidr)
- route_list.append({'destination': destination,
- 'type': 'route',
- 'gateway': gateway,
- 'metric': 10000})
+ route_list.append(
+ {
+ "destination": destination,
+ "type": "route",
+ "gateway": gateway,
+ "metric": 10000,
+ }
+ )
return route_list
@@ -208,9 +214,11 @@ class NicConfigurator(object):
addrs = nic.staticIpv6
for addr in addrs:
- subnet = {'type': 'static6',
- 'address': addr.ip,
- 'netmask': addr.netmask}
+ subnet = {
+ "type": "static6",
+ "address": addr.ip,
+ "netmask": addr.netmask,
+ }
subnet_list.append(subnet)
# TODO: Add the primary gateway
@@ -226,9 +234,9 @@ class NicConfigurator(object):
route_list = []
for addr in addrs:
- route_list.append({'type': 'route',
- 'gateway': addr.gateway,
- 'metric': 10000})
+ route_list.append(
+ {"type": "route", "gateway": addr.gateway, "metric": 10000}
+ )
return route_list
@@ -246,7 +254,7 @@ class NicConfigurator(object):
return nics_cfg_list
def clear_dhcp(self):
- logger.info('Clearing DHCP leases')
+ logger.info("Clearing DHCP leases")
# Ignore the return code 1.
subp.subp(["pkill", "dhclient"], rcs=[0, 1])
@@ -262,11 +270,12 @@ class NicConfigurator(object):
logger.info("Debian OS not detected. Skipping the configure step")
return
- containingDir = '/etc/network'
+ containingDir = "/etc/network"
- interfaceFile = os.path.join(containingDir, 'interfaces')
- originalFile = os.path.join(containingDir,
- 'interfaces.before_vmware_customization')
+ interfaceFile = os.path.join(containingDir, "interfaces")
+ originalFile = os.path.join(
+ containingDir, "interfaces.before_vmware_customization"
+ )
if not os.path.exists(originalFile) and os.path.exists(interfaceFile):
os.rename(interfaceFile, originalFile)
@@ -278,8 +287,9 @@ class NicConfigurator(object):
"source-directory /etc/network/interfaces.d",
]
- util.write_file(interfaceFile, content='\n'.join(lines))
+ util.write_file(interfaceFile, content="\n".join(lines))
self.clear_dhcp()
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_passwd.py b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
index d16a7690..4d3967a1 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_passwd.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
@@ -9,8 +9,7 @@
import logging
import os
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
LOG = logging.getLogger(__name__)
@@ -20,6 +19,7 @@ class PasswordConfigurator(object):
Class for changing configurations related to passwords in a VM. Includes
setting and expiring passwords.
"""
+
def configure(self, passwd, resetPasswd, distro):
"""
Main method to perform all functionalities based on configuration file
@@ -28,25 +28,25 @@ class PasswordConfigurator(object):
@param resetPasswd: boolean to determine if password needs to be reset.
@return cfg: dict to be used by cloud-init set_passwd code.
"""
- LOG.info('Starting password configuration')
+ LOG.info("Starting password configuration")
if passwd:
passwd = util.b64d(passwd)
allRootUsers = []
- for line in open('/etc/passwd', 'r'):
- if line.split(':')[2] == '0':
- allRootUsers.append(line.split(':')[0])
+ for line in open("/etc/passwd", "r"):
+ if line.split(":")[2] == "0":
+ allRootUsers.append(line.split(":")[0])
# read shadow file and check for each user, if its uid0 or root.
uidUsersList = []
- for line in open('/etc/shadow', 'r'):
- user = line.split(':')[0]
+ for line in open("/etc/shadow", "r"):
+ user = line.split(":")[0]
if user in allRootUsers:
uidUsersList.append(user)
if passwd:
- LOG.info('Setting admin password')
- distro.set_passwd('root', passwd)
+ LOG.info("Setting admin password")
+ distro.set_passwd("root", passwd)
if resetPasswd:
self.reset_password(uidUsersList)
- LOG.info('Configure Password completed!')
+ LOG.info("Configure Password completed!")
def reset_password(self, uidUserList):
"""
@@ -54,15 +54,19 @@ class PasswordConfigurator(object):
not succeeded using passwd command. Log failure message otherwise.
@param: list of users for which to expire password.
"""
- LOG.info('Expiring password.')
+ LOG.info("Expiring password.")
for user in uidUserList:
try:
- subp.subp(['passwd', '--expire', user])
+ subp.subp(["passwd", "--expire", user])
except subp.ProcessExecutionError as e:
- if os.path.exists('/usr/bin/chage'):
- subp.subp(['chage', '-d', '0', user])
+ if os.path.exists("/usr/bin/chage"):
+ subp.subp(["chage", "-d", "0", user])
else:
- LOG.warning('Failed to expire password for %s with error: '
- '%s', user, e)
+ LOG.warning(
+ "Failed to expire password for %s with error: %s",
+ user,
+ e,
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_source.py b/cloudinit/sources/helpers/vmware/imc/config_source.py
index 7ec06a9c..e99f9b43 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_source.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_source.py
@@ -9,4 +9,5 @@
class ConfigSource(object):
"""Specifies a source for the Config Content."""
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
index 96d839b8..eda84cfb 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
@@ -13,4 +13,5 @@ class GuestCustErrorEnum(object):
GUESTCUST_ERROR_SCRIPT_DISABLED = 6
GUESTCUST_ERROR_WRONG_META_FORMAT = 9
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_event.py b/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
index e84c1cb0..33169a7e 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
@@ -14,4 +14,5 @@ class GuestCustEventEnum(object):
GUESTCUST_EVENT_ENABLE_NICS = 103
GUESTCUST_EVENT_QUERY_NICS = 104
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_state.py b/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
index a8211dea..c74fbc8b 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
@@ -12,4 +12,5 @@ class GuestCustStateEnum(object):
GUESTCUST_STATE_RUNNING = 4
GUESTCUST_STATE_DONE = 5
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
index d919f693..08763e62 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
@@ -73,7 +73,7 @@ def get_nics_to_enable(nicsfilepath):
if not os.path.exists(nicsfilepath):
return None
- with open(nicsfilepath, 'r') as fp:
+ with open(nicsfilepath, "r") as fp:
nics = fp.read(NICS_SIZE)
return nics
@@ -95,7 +95,8 @@ def enable_nics(nics):
(out, _err) = set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
GuestCustEventEnum.GUESTCUST_EVENT_ENABLE_NICS,
- nics)
+ nics,
+ )
if not out:
time.sleep(enableNicsWaitCount * enableNicsWaitSeconds)
continue
@@ -108,32 +109,36 @@ def enable_nics(nics):
(out, _err) = set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
GuestCustEventEnum.GUESTCUST_EVENT_QUERY_NICS,
- nics)
+ nics,
+ )
if out and out == NICS_STATUS_CONNECTED:
logger.info("NICS are connected on %d second", count)
return
time.sleep(enableNicsWaitSeconds)
- logger.warning("Can't connect network interfaces after %d attempts",
- enableNicsWaitRetries)
+ logger.warning(
+ "Can't connect network interfaces after %d attempts",
+ enableNicsWaitRetries,
+ )
def get_tools_config(section, key, defaultVal):
- """ Return the value of [section] key from VMTools configuration.
+ """Return the value of [section] key from VMTools configuration.
- @param section: String of section to read from VMTools config
- @returns: String value from key in [section] or defaultVal if
- [section] is not present or vmware-toolbox-cmd is
- not installed.
+ @param section: String of section to read from VMTools config
+ @returns: String value from key in [section] or defaultVal if
+ [section] is not present or vmware-toolbox-cmd is
+ not installed.
"""
- if not subp.which('vmware-toolbox-cmd'):
+ if not subp.which("vmware-toolbox-cmd"):
logger.debug(
- 'vmware-toolbox-cmd not installed, returning default value')
+ "vmware-toolbox-cmd not installed, returning default value"
+ )
return defaultVal
- cmd = ['vmware-toolbox-cmd', 'config', 'get', section, key]
+ cmd = ["vmware-toolbox-cmd", "config", "get", section, key]
try:
(outText, _) = subp.subp(cmd)
@@ -141,22 +146,27 @@ def get_tools_config(section, key, defaultVal):
if e.exit_code == 69:
logger.debug(
"vmware-toolbox-cmd returned 69 (unavailable) for cmd: %s."
- " Return default value: %s", " ".join(cmd), defaultVal)
+ " Return default value: %s",
+ " ".join(cmd),
+ defaultVal,
+ )
else:
logger.error("Failed running %s[%s]", cmd, e.exit_code)
logger.exception(e)
return defaultVal
retValue = defaultVal
- m = re.match(r'([^=]+)=(.*)', outText)
+ m = re.match(r"([^=]+)=(.*)", outText)
if m:
retValue = m.group(2).strip()
- logger.debug("Get tools config: [%s] %s = %s",
- section, key, retValue)
+ logger.debug("Get tools config: [%s] %s = %s", section, key, retValue)
else:
logger.debug(
"Tools config: [%s] %s is not found, return default value: %s",
- section, key, retValue)
+ section,
+ key,
+ retValue,
+ )
return retValue
diff --git a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
index d793bdeb..673204a0 100644
--- a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
+++ b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
@@ -18,18 +18,19 @@ class Ipv4ModeEnum(object):
# The legacy mode which only allows dhcp/static based on whether IPv4
# addresses list is empty or not
- IPV4_MODE_BACKWARDS_COMPATIBLE = 'BACKWARDS_COMPATIBLE'
+ IPV4_MODE_BACKWARDS_COMPATIBLE = "BACKWARDS_COMPATIBLE"
# IPv4 must use static address. Reserved for future use
- IPV4_MODE_STATIC = 'STATIC'
+ IPV4_MODE_STATIC = "STATIC"
# IPv4 must use DHCPv4. Reserved for future use
- IPV4_MODE_DHCP = 'DHCP'
+ IPV4_MODE_DHCP = "DHCP"
# IPv4 must be disabled
- IPV4_MODE_DISABLED = 'DISABLED'
+ IPV4_MODE_DISABLED = "DISABLED"
# IPv4 settings should be left untouched. Reserved for future use
- IPV4_MODE_AS_IS = 'AS_IS'
+ IPV4_MODE_AS_IS = "AS_IS"
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/nic.py b/cloudinit/sources/helpers/vmware/imc/nic.py
index ef8f87f7..7b742d0f 100644
--- a/cloudinit/sources/helpers/vmware/imc/nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/nic.py
@@ -20,7 +20,7 @@ class Nic(NicBase):
self._configFile = configFile
def _get(self, what):
- return self._configFile.get(self.name + '|' + what, None)
+ return self._configFile.get(self.name + "|" + what, None)
def _get_count_with_prefix(self, prefix):
return self._configFile.get_count_with_prefix(self.name + prefix)
@@ -31,29 +31,29 @@ class Nic(NicBase):
@property
def mac(self):
- return self._get('MACADDR').lower()
+ return self._get("MACADDR").lower()
@property
def primary(self):
- value = self._get('PRIMARY')
+ value = self._get("PRIMARY")
if value:
value = value.lower()
- return value == 'yes' or value == 'true'
+ return value == "yes" or value == "true"
else:
return False
@property
def onboot(self):
- value = self._get('ONBOOT')
+ value = self._get("ONBOOT")
if value:
value = value.lower()
- return value == 'yes' or value == 'true'
+ return value == "yes" or value == "true"
else:
return False
@property
def bootProto(self):
- value = self._get('BOOTPROTO')
+ value = self._get("BOOTPROTO")
if value:
return value.lower()
else:
@@ -61,7 +61,7 @@ class Nic(NicBase):
@property
def ipv4_mode(self):
- value = self._get('IPv4_MODE')
+ value = self._get("IPv4_MODE")
if value:
return value.lower()
else:
@@ -80,7 +80,7 @@ class Nic(NicBase):
@property
def staticIpv6(self):
- cnt = self._get_count_with_prefix('|IPv6ADDR|')
+ cnt = self._get_count_with_prefix("|IPv6ADDR|")
if not cnt:
return None
@@ -100,17 +100,17 @@ class StaticIpv4Addr(StaticIpv4Base):
@property
def ip(self):
- return self._nic._get('IPADDR')
+ return self._nic._get("IPADDR")
@property
def netmask(self):
- return self._nic._get('NETMASK')
+ return self._nic._get("NETMASK")
@property
def gateways(self):
- value = self._nic._get('GATEWAY')
+ value = self._nic._get("GATEWAY")
if value:
- return [x.strip() for x in value.split(',')]
+ return [x.strip() for x in value.split(",")]
else:
return None
@@ -124,14 +124,15 @@ class StaticIpv6Addr(StaticIpv6Base):
@property
def ip(self):
- return self._nic._get('IPv6ADDR|' + str(self._index))
+ return self._nic._get("IPv6ADDR|" + str(self._index))
@property
def netmask(self):
- return self._nic._get('IPv6NETMASK|' + str(self._index))
+ return self._nic._get("IPv6NETMASK|" + str(self._index))
@property
def gateway(self):
- return self._nic._get('IPv6GATEWAY|' + str(self._index))
+ return self._nic._get("IPv6GATEWAY|" + str(self._index))
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/nic_base.py b/cloudinit/sources/helpers/vmware/imc/nic_base.py
index de7b866d..37d9602f 100644
--- a/cloudinit/sources/helpers/vmware/imc/nic_base.py
+++ b/cloudinit/sources/helpers/vmware/imc/nic_base.py
@@ -18,7 +18,7 @@ class NicBase(object):
Retrieves the mac address of the nic
@return (str) : the MACADDR setting
"""
- raise NotImplementedError('MACADDR')
+ raise NotImplementedError("MACADDR")
@property
def primary(self):
@@ -29,7 +29,7 @@ class NicBase(object):
be set.
@return (bool): the PRIMARY setting
"""
- raise NotImplementedError('PRIMARY')
+ raise NotImplementedError("PRIMARY")
@property
def onboot(self):
@@ -37,7 +37,7 @@ class NicBase(object):
Retrieves whether the nic should be up at the boot time
@return (bool) : the ONBOOT setting
"""
- raise NotImplementedError('ONBOOT')
+ raise NotImplementedError("ONBOOT")
@property
def bootProto(self):
@@ -45,7 +45,7 @@ class NicBase(object):
Retrieves the boot protocol of the nic
@return (str): the BOOTPROTO setting, valid values: dhcp and static.
"""
- raise NotImplementedError('BOOTPROTO')
+ raise NotImplementedError("BOOTPROTO")
@property
def ipv4_mode(self):
@@ -54,7 +54,7 @@ class NicBase(object):
@return (str): the IPv4_MODE setting, valid values:
backwards_compatible, static, dhcp, disabled, as_is
"""
- raise NotImplementedError('IPv4_MODE')
+ raise NotImplementedError("IPv4_MODE")
@property
def staticIpv4(self):
@@ -62,7 +62,7 @@ class NicBase(object):
Retrieves the static IPv4 configuration of the nic
@return (StaticIpv4Base list): the static ipv4 setting
"""
- raise NotImplementedError('Static IPv4')
+ raise NotImplementedError("Static IPv4")
@property
def staticIpv6(self):
@@ -70,7 +70,7 @@ class NicBase(object):
Retrieves the IPv6 configuration of the nic
@return (StaticIpv6Base list): the static ipv6 setting
"""
- raise NotImplementedError('Static Ipv6')
+ raise NotImplementedError("Static Ipv6")
def validate(self):
"""
@@ -78,7 +78,7 @@ class NicBase(object):
For example, the staticIpv4 property is required and should not be
empty when ipv4Mode is STATIC
"""
- raise NotImplementedError('Check constraints on properties')
+ raise NotImplementedError("Check constraints on properties")
class StaticIpv4Base(object):
@@ -93,7 +93,7 @@ class StaticIpv4Base(object):
Retrieves the Ipv4 address
@return (str): the IPADDR setting
"""
- raise NotImplementedError('Ipv4 Address')
+ raise NotImplementedError("Ipv4 Address")
@property
def netmask(self):
@@ -101,7 +101,7 @@ class StaticIpv4Base(object):
Retrieves the Ipv4 NETMASK setting
@return (str): the NETMASK setting
"""
- raise NotImplementedError('Ipv4 NETMASK')
+ raise NotImplementedError("Ipv4 NETMASK")
@property
def gateways(self):
@@ -109,7 +109,7 @@ class StaticIpv4Base(object):
Retrieves the gateways on this Ipv4 subnet
@return (str list): the GATEWAY setting
"""
- raise NotImplementedError('Ipv4 GATEWAY')
+ raise NotImplementedError("Ipv4 GATEWAY")
class StaticIpv6Base(object):
@@ -123,7 +123,7 @@ class StaticIpv6Base(object):
Retrieves the Ipv6 address
@return (str): the IPv6ADDR setting
"""
- raise NotImplementedError('Ipv6 Address')
+ raise NotImplementedError("Ipv6 Address")
@property
def netmask(self):
@@ -131,7 +131,7 @@ class StaticIpv6Base(object):
Retrieves the Ipv6 NETMASK setting
@return (str): the IPv6NETMASK setting
"""
- raise NotImplementedError('Ipv6 NETMASK')
+ raise NotImplementedError("Ipv6 NETMASK")
@property
def gateway(self):
@@ -139,6 +139,7 @@ class StaticIpv6Base(object):
Retrieves the Ipv6 GATEWAY setting
@return (str): the IPv6GATEWAY setting
"""
- raise NotImplementedError('Ipv6 GATEWAY')
+ raise NotImplementedError("Ipv6 GATEWAY")
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vultr.py b/cloudinit/sources/helpers/vultr.py
index ad347bea..eb504eba 100644
--- a/cloudinit/sources/helpers/vultr.py
+++ b/cloudinit/sources/helpers/vultr.py
@@ -3,16 +3,12 @@
# This file is part of cloud-init. See LICENSE file for license information.
import json
+from functools import lru_cache
-from cloudinit import log as log
-from cloudinit import url_helper
from cloudinit import dmi
-from cloudinit import util
-from cloudinit import net
-from cloudinit import netinfo
-from cloudinit import subp
+from cloudinit import log as log
+from cloudinit import net, netinfo, subp, url_helper, util
from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
-from functools import lru_cache
# Get LOG
LOG = log.getLogger(__name__)
@@ -41,21 +37,21 @@ def set_route():
routes = netinfo.route_info()
# If no tools exist and empty dict is returned
- if 'ipv4' not in routes:
+ if "ipv4" not in routes:
return
# We only care about IPv4
- routes = routes['ipv4']
+ routes = routes["ipv4"]
# Searchable list
dests = []
# Parse each route into a more searchable format
for route in routes:
- dests.append(route['destination'])
+ dests.append(route["destination"])
- gw_present = '100.64.0.0' in dests or '100.64.0.0/10' in dests
- dest_present = '169.254.169.254' in dests
+ gw_present = "100.64.0.0" in dests or "100.64.0.0/10" in dests
+ dest_present = "169.254.169.254" in dests
# If not IPv6 only (No link local)
# or the route is already present
@@ -63,36 +59,32 @@ def set_route():
return
# Set metadata route
- if subp.which('ip'):
- subp.subp([
- 'ip',
- 'route',
- 'add',
- '169.254.169.254/32',
- 'dev',
- net.find_fallback_nic()
- ])
- elif subp.which('route'):
- subp.subp([
- 'route',
- 'add',
- '-net',
- '169.254.169.254/32',
- '100.64.0.1'
- ])
+ if subp.which("ip"):
+ subp.subp(
+ [
+ "ip",
+ "route",
+ "add",
+ "169.254.169.254/32",
+ "dev",
+ net.find_fallback_nic(),
+ ]
+ )
+ elif subp.which("route"):
+ subp.subp(["route", "add", "-net", "169.254.169.254/32", "100.64.0.1"])
# Read the system information from SMBIOS
def get_sysinfo():
return {
- 'manufacturer': dmi.read_dmi_data("system-manufacturer"),
- 'subid': dmi.read_dmi_data("system-serial-number")
+ "manufacturer": dmi.read_dmi_data("system-manufacturer"),
+ "subid": dmi.read_dmi_data("system-serial-number"),
}
# Assumes is Vultr is already checked
def is_baremetal():
- if get_sysinfo()['manufacturer'] != "Vultr":
+ if get_sysinfo()["manufacturer"] != "Vultr":
return True
return False
@@ -102,7 +94,7 @@ def is_vultr():
# VC2, VDC, and HFC use DMI
sysinfo = get_sysinfo()
- if sysinfo['manufacturer'] == "Vultr":
+ if sysinfo["manufacturer"] == "Vultr":
return True
# Baremetal requires a kernel parameter
@@ -118,20 +110,20 @@ def read_metadata(url, timeout, retries, sec_between, agent):
# Announce os details so we can handle non Vultr origin
# images and provide correct vendordata generation.
- headers = {
- 'Metadata-Token': 'cloudinit',
- 'User-Agent': agent
- }
+ headers = {"Metadata-Token": "cloudinit", "User-Agent": agent}
- response = url_helper.readurl(url,
- timeout=timeout,
- retries=retries,
- headers=headers,
- sec_between=sec_between)
+ response = url_helper.readurl(
+ url,
+ timeout=timeout,
+ retries=retries,
+ headers=headers,
+ sec_between=sec_between,
+ )
if not response.ok():
- raise RuntimeError("Failed to connect to %s: Code: %s" %
- url, response.code)
+ raise RuntimeError(
+ "Failed to connect to %s: Code: %s" % url, response.code
+ )
return response.contents.decode()
@@ -156,95 +148,82 @@ def get_interface_name(mac):
def generate_network_config(interfaces):
network = {
"version": 1,
- "config": [
- {
- "type": "nameserver",
- "address": [
- "108.61.10.10"
- ]
- }
- ]
+ "config": [{"type": "nameserver", "address": ["108.61.10.10"]}],
}
# Prepare interface 0, public
if len(interfaces) > 0:
public = generate_public_network_interface(interfaces[0])
- network['config'].append(public)
+ network["config"].append(public)
# Prepare additional interfaces, private
for i in range(1, len(interfaces)):
private = generate_private_network_interface(interfaces[i])
- network['config'].append(private)
+ network["config"].append(private)
return network
# Input Metadata and generate public network config part
def generate_public_network_interface(interface):
- interface_name = get_interface_name(interface['mac'])
+ interface_name = get_interface_name(interface["mac"])
if not interface_name:
raise RuntimeError(
- "Interface: %s could not be found on the system" %
- interface['mac'])
+ "Interface: %s could not be found on the system" % interface["mac"]
+ )
netcfg = {
"name": interface_name,
"type": "physical",
- "mac_address": interface['mac'],
+ "mac_address": interface["mac"],
"accept-ra": 1,
"subnets": [
- {
- "type": "dhcp",
- "control": "auto"
- },
- {
- "type": "ipv6_slaac",
- "control": "auto"
- },
- ]
+ {"type": "dhcp", "control": "auto"},
+ {"type": "ipv6_slaac", "control": "auto"},
+ ],
}
# Options that may or may not be used
if "mtu" in interface:
- netcfg['mtu'] = interface['mtu']
+ netcfg["mtu"] = interface["mtu"]
if "accept-ra" in interface:
- netcfg['accept-ra'] = interface['accept-ra']
+ netcfg["accept-ra"] = interface["accept-ra"]
if "routes" in interface:
- netcfg['subnets'][0]['routes'] = interface['routes']
+ netcfg["subnets"][0]["routes"] = interface["routes"]
# Check for additional IP's
- additional_count = len(interface['ipv4']['additional'])
+ additional_count = len(interface["ipv4"]["additional"])
if "ipv4" in interface and additional_count > 0:
- for additional in interface['ipv4']['additional']:
+ for additional in interface["ipv4"]["additional"]:
add = {
"type": "static",
"control": "auto",
- "address": additional['address'],
- "netmask": additional['netmask']
+ "address": additional["address"],
+ "netmask": additional["netmask"],
}
if "routes" in additional:
- add['routes'] = additional['routes']
+ add["routes"] = additional["routes"]
- netcfg['subnets'].append(add)
+ netcfg["subnets"].append(add)
# Check for additional IPv6's
- additional_count = len(interface['ipv6']['additional'])
+ additional_count = len(interface["ipv6"]["additional"])
if "ipv6" in interface and additional_count > 0:
- for additional in interface['ipv6']['additional']:
+ for additional in interface["ipv6"]["additional"]:
add = {
"type": "static6",
"control": "auto",
- "address": additional['address'],
- "netmask": additional['netmask']
+ "address": additional["address"],
+ "netmask": additional["netmask"],
}
if "routes" in additional:
- add['routes'] = additional['routes']
+ add["routes"] = additional["routes"]
- netcfg['subnets'].append(add)
+ netcfg["subnets"].append(add)
# Add config to template
return netcfg
@@ -252,35 +231,35 @@ def generate_public_network_interface(interface):
# Input Metadata and generate private network config part
def generate_private_network_interface(interface):
- interface_name = get_interface_name(interface['mac'])
+ interface_name = get_interface_name(interface["mac"])
if not interface_name:
raise RuntimeError(
- "Interface: %s could not be found on the system" %
- interface['mac'])
+ "Interface: %s could not be found on the system" % interface["mac"]
+ )
netcfg = {
"name": interface_name,
"type": "physical",
- "mac_address": interface['mac'],
+ "mac_address": interface["mac"],
"subnets": [
{
"type": "static",
"control": "auto",
- "address": interface['ipv4']['address'],
- "netmask": interface['ipv4']['netmask']
+ "address": interface["ipv4"]["address"],
+ "netmask": interface["ipv4"]["netmask"],
}
- ]
+ ],
}
# Options that may or may not be used
if "mtu" in interface:
- netcfg['mtu'] = interface['mtu']
+ netcfg["mtu"] = interface["mtu"]
if "accept-ra" in interface:
- netcfg['accept-ra'] = interface['accept-ra']
+ netcfg["accept-ra"] = interface["accept-ra"]
if "routes" in interface:
- netcfg['subnets'][0]['routes'] = interface['routes']
+ netcfg["subnets"][0]["routes"] = interface["routes"]
return netcfg
@@ -288,12 +267,13 @@ def generate_private_network_interface(interface):
# Make required adjustments to the network configs provided
def add_interface_names(interfaces):
for interface in interfaces:
- interface_name = get_interface_name(interface['mac'])
+ interface_name = get_interface_name(interface["mac"])
if not interface_name:
raise RuntimeError(
- "Interface: %s could not be found on the system" %
- interface['mac'])
- interface['name'] = interface_name
+ "Interface: %s could not be found on the system"
+ % interface["mac"]
+ )
+ interface["name"] = interface_name
return interfaces
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index 33679dcc..ab4c63aa 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -60,14 +60,16 @@ _DISABLE_USER_SSH_EXIT = 142
DISABLE_USER_OPTS = (
"no-port-forwarding,no-agent-forwarding,"
- "no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\""
- " rather than the user \\\"$DISABLE_USER\\\".\';echo;sleep 10;"
- "exit " + str(_DISABLE_USER_SSH_EXIT) + "\"")
+ 'no-X11-forwarding,command="echo \'Please login as the user \\"$USER\\"'
+ ' rather than the user \\"$DISABLE_USER\\".\';echo;sleep 10;'
+ "exit " + str(_DISABLE_USER_SSH_EXIT) + '"'
+)
class AuthKeyLine(object):
- def __init__(self, source, keytype=None, base64=None,
- comment=None, options=None):
+ def __init__(
+ self, source, keytype=None, base64=None, comment=None, options=None
+ ):
self.base64 = base64
self.comment = comment
self.options = options
@@ -75,7 +77,7 @@ class AuthKeyLine(object):
self.source = source
def valid(self):
- return (self.base64 and self.keytype)
+ return self.base64 and self.keytype
def __str__(self):
toks = []
@@ -90,7 +92,7 @@ class AuthKeyLine(object):
if not toks:
return self.source
else:
- return ' '.join(toks)
+ return " ".join(toks)
class AuthKeyLineParser(object):
@@ -121,8 +123,7 @@ class AuthKeyLineParser(object):
"""
quoted = False
i = 0
- while (i < len(ent) and
- ((quoted) or (ent[i] not in (" ", "\t")))):
+ while i < len(ent) and ((quoted) or (ent[i] not in (" ", "\t"))):
curc = ent[i]
if i + 1 >= len(ent):
i = i + 1
@@ -143,7 +144,7 @@ class AuthKeyLineParser(object):
def parse(self, src_line, options=None):
# modeled after opensshes auth2-pubkey.c:user_key_allowed2
line = src_line.rstrip("\r\n")
- if line.startswith("#") or line.strip() == '':
+ if line.startswith("#") or line.strip() == "":
return AuthKeyLine(src_line)
def parse_ssh_key(ent):
@@ -174,8 +175,13 @@ class AuthKeyLineParser(object):
except TypeError:
return AuthKeyLine(src_line)
- return AuthKeyLine(src_line, keytype=keytype, base64=base64,
- comment=comment, options=options)
+ return AuthKeyLine(
+ src_line,
+ keytype=keytype,
+ base64=base64,
+ comment=comment,
+ options=options,
+ )
def parse_authorized_keys(fnames):
@@ -218,15 +224,15 @@ def update_authorized_keys(old_entries, keys):
lines = [str(b) for b in old_entries]
# Ensure it ends with a newline
- lines.append('')
- return '\n'.join(lines)
+ lines.append("")
+ return "\n".join(lines)
def users_ssh_info(username):
pw_ent = pwd.getpwnam(username)
if not pw_ent or not pw_ent.pw_dir:
raise RuntimeError("Unable to get SSH info for user %r" % (username))
- return (os.path.join(pw_ent.pw_dir, '.ssh'), pw_ent)
+ return (os.path.join(pw_ent.pw_dir, ".ssh"), pw_ent)
def render_authorizedkeysfile_paths(value, homedir, username):
@@ -269,9 +275,14 @@ def check_permissions(username, current_path, full_path, is_file, strictmodes):
# 1. owner must be either root or the user itself
owner = util.get_owner(current_path)
if strictmodes and owner != username and owner != "root":
- LOG.debug("Path %s in %s must be own by user %s or"
- " by root, but instead is own by %s. Ignoring key.",
- current_path, full_path, username, owner)
+ LOG.debug(
+ "Path %s in %s must be own by user %s or"
+ " by root, but instead is own by %s. Ignoring key.",
+ current_path,
+ full_path,
+ username,
+ owner,
+ )
return False
parent_permission = util.get_permissions(current_path)
@@ -291,17 +302,24 @@ def check_permissions(username, current_path, full_path, is_file, strictmodes):
minimal_permissions &= 0o007
if parent_permission & minimal_permissions == 0:
- LOG.debug("Path %s in %s must be accessible by user %s,"
- " check its permissions",
- current_path, full_path, username)
+ LOG.debug(
+ "Path %s in %s must be accessible by user %s,"
+ " check its permissions",
+ current_path,
+ full_path,
+ username,
+ )
return False
# 3. no write permission (w) is given to group and world users (022)
# Group and world user can still have +rx.
if strictmodes and parent_permission & 0o022 != 0:
- LOG.debug("Path %s in %s must not give write"
- "permission to group or world users. Ignoring key.",
- current_path, full_path)
+ LOG.debug(
+ "Path %s in %s must not give write"
+ "permission to group or world users. Ignoring key.",
+ current_path,
+ full_path,
+ )
return False
return True
@@ -326,17 +344,20 @@ def check_create_path(username, filename, strictmodes):
if os.path.islink(parent_folder):
LOG.debug(
"Invalid directory. Symlink exists in path: %s",
- parent_folder)
+ parent_folder,
+ )
return False
if os.path.isfile(parent_folder):
LOG.debug(
- "Invalid directory. File exists in path: %s",
- parent_folder)
+ "Invalid directory. File exists in path: %s", parent_folder
+ )
return False
- if (home_folder.startswith(parent_folder) or
- parent_folder == user_pwent.pw_dir):
+ if (
+ home_folder.startswith(parent_folder)
+ or parent_folder == user_pwent.pw_dir
+ ):
continue
if not os.path.exists(parent_folder):
@@ -354,8 +375,9 @@ def check_create_path(username, filename, strictmodes):
os.makedirs(parent_folder, mode=mode, exist_ok=True)
util.chownbyid(parent_folder, uid, gid)
- permissions = check_permissions(username, parent_folder,
- filename, False, strictmodes)
+ permissions = check_permissions(
+ username, parent_folder, filename, False, strictmodes
+ )
if not permissions:
return False
@@ -367,11 +389,12 @@ def check_create_path(username, filename, strictmodes):
if not os.path.exists(filename):
# if file does not exist: we need to create it, since the
# folders at this point exist and have right permissions
- util.write_file(filename, '', mode=0o600, ensure_dir_exists=True)
+ util.write_file(filename, "", mode=0o600, ensure_dir_exists=True)
util.chownbyid(filename, user_pwent.pw_uid, user_pwent.pw_gid)
- permissions = check_permissions(username, filename,
- filename, True, strictmodes)
+ permissions = check_permissions(
+ username, filename, filename, True, strictmodes
+ )
if not permissions:
return False
except (IOError, OSError) as e:
@@ -383,34 +406,44 @@ def check_create_path(username, filename, strictmodes):
def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG):
(ssh_dir, pw_ent) = users_ssh_info(username)
- default_authorizedkeys_file = os.path.join(ssh_dir, 'authorized_keys')
+ default_authorizedkeys_file = os.path.join(ssh_dir, "authorized_keys")
user_authorizedkeys_file = default_authorizedkeys_file
auth_key_fns = []
with util.SeLinuxGuard(ssh_dir, recursive=True):
try:
ssh_cfg = parse_ssh_config_map(sshd_cfg_file)
- key_paths = ssh_cfg.get("authorizedkeysfile",
- "%h/.ssh/authorized_keys")
+ key_paths = ssh_cfg.get(
+ "authorizedkeysfile", "%h/.ssh/authorized_keys"
+ )
strictmodes = ssh_cfg.get("strictmodes", "yes")
auth_key_fns = render_authorizedkeysfile_paths(
- key_paths, pw_ent.pw_dir, username)
+ key_paths, pw_ent.pw_dir, username
+ )
except (IOError, OSError):
# Give up and use a default key filename
auth_key_fns[0] = default_authorizedkeys_file
- util.logexc(LOG, "Failed extracting 'AuthorizedKeysFile' in SSH "
- "config from %r, using 'AuthorizedKeysFile' file "
- "%r instead", DEF_SSHD_CFG, auth_key_fns[0])
+ util.logexc(
+ LOG,
+ "Failed extracting 'AuthorizedKeysFile' in SSH "
+ "config from %r, using 'AuthorizedKeysFile' file "
+ "%r instead",
+ DEF_SSHD_CFG,
+ auth_key_fns[0],
+ )
# check if one of the keys is the user's one and has the right permissions
for key_path, auth_key_fn in zip(key_paths.split(), auth_key_fns):
- if any([
- '%u' in key_path,
- '%h' in key_path,
- auth_key_fn.startswith('{}/'.format(pw_ent.pw_dir))
- ]):
- permissions_ok = check_create_path(username, auth_key_fn,
- strictmodes == "yes")
+ if any(
+ [
+ "%u" in key_path,
+ "%h" in key_path,
+ auth_key_fn.startswith("{}/".format(pw_ent.pw_dir)),
+ ]
+ ):
+ permissions_ok = check_create_path(
+ username, auth_key_fn, strictmodes == "yes"
+ )
if permissions_ok:
user_authorizedkeys_file = auth_key_fn
break
@@ -418,11 +451,13 @@ def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG):
if user_authorizedkeys_file != default_authorizedkeys_file:
LOG.debug(
"AuthorizedKeysFile has an user-specific authorized_keys, "
- "using %s", user_authorizedkeys_file)
+ "using %s",
+ user_authorizedkeys_file,
+ )
return (
user_authorizedkeys_file,
- parse_authorized_keys([user_authorizedkeys_file])
+ parse_authorized_keys([user_authorizedkeys_file]),
)
@@ -485,11 +520,13 @@ def parse_ssh_config_lines(lines):
key, val = line.split(None, 1)
except ValueError:
try:
- key, val = line.split('=', 1)
+ key, val = line.split("=", 1)
except ValueError:
LOG.debug(
- "sshd_config: option \"%s\" has no key/value pair,"
- " skipping it", line)
+ 'sshd_config: option "%s" has no key/value pair,'
+ " skipping it",
+ line,
+ )
continue
ret.append(SshdConfigLine(line, key, val))
return ret
@@ -516,9 +553,10 @@ def update_ssh_config(updates, fname=DEF_SSHD_CFG):
changed = update_ssh_config_lines(lines=lines, updates=updates)
if changed:
util.write_file(
- fname, "\n".join(
- [str(line) for line in lines]
- ) + "\n", preserve_mode=True)
+ fname,
+ "\n".join([str(line) for line in lines]) + "\n",
+ preserve_mode=True,
+ )
return len(changed) != 0
@@ -542,12 +580,18 @@ def update_ssh_config_lines(lines, updates):
value = updates[key]
found.add(key)
if line.value == value:
- LOG.debug("line %d: option %s already set to %s",
- i, key, value)
+ LOG.debug(
+ "line %d: option %s already set to %s", i, key, value
+ )
else:
changed.append(key)
- LOG.debug("line %d: option %s updated %s -> %s", i,
- key, line.value, value)
+ LOG.debug(
+ "line %d: option %s updated %s -> %s",
+ i,
+ key,
+ line.value,
+ value,
+ )
line.value = value
if len(found) != len(updates):
@@ -555,9 +599,11 @@ def update_ssh_config_lines(lines, updates):
if key in found:
continue
changed.append(key)
- lines.append(SshdConfigLine('', key, value))
- LOG.debug("line %d: option %s added with %s",
- len(lines), key, value)
+ lines.append(SshdConfigLine("", key, value))
+ LOG.debug(
+ "line %d: option %s added with %s", len(lines), key, value
+ )
return changed
+
# vi: ts=4 expandtab
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 731b2982..b1a6bc49 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -11,10 +11,10 @@ import sys
from collections import namedtuple
from typing import Dict, Set # noqa: F401
-from cloudinit.settings import (
- FREQUENCIES, CLOUD_CONFIG, PER_INSTANCE, PER_ONCE, RUN_CLOUD_CONFIG)
-
-from cloudinit import handlers
+from cloudinit import cloud, config, distros, handlers, helpers, importer
+from cloudinit import log as logging
+from cloudinit import net, sources, type_utils, util
+from cloudinit.event import EventScope, EventType, userdata_to_events
# Default handlers (used if not overridden)
from cloudinit.handlers.boot_hook import BootHookPartHandler
@@ -22,26 +22,16 @@ from cloudinit.handlers.cloud_config import CloudConfigPartHandler
from cloudinit.handlers.jinja_template import JinjaTemplatePartHandler
from cloudinit.handlers.shell_script import ShellScriptPartHandler
from cloudinit.handlers.upstart_job import UpstartJobPartHandler
-
-from cloudinit.event import (
- EventScope,
- EventType,
- userdata_to_events,
-)
-from cloudinit.sources import NetworkConfigSource
-
-from cloudinit import cloud
-from cloudinit import config
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import importer
-from cloudinit import log as logging
-from cloudinit import net
from cloudinit.net import cmdline
from cloudinit.reporting import events
-from cloudinit import sources
-from cloudinit import type_utils
-from cloudinit import util
+from cloudinit.settings import (
+ CLOUD_CONFIG,
+ FREQUENCIES,
+ PER_INSTANCE,
+ PER_ONCE,
+ RUN_CLOUD_CONFIG,
+)
+from cloudinit.sources import NetworkConfigSource
LOG = logging.getLogger(__name__)
@@ -53,7 +43,7 @@ def update_event_enabled(
datasource: sources.DataSource,
cfg: dict,
event_source_type: EventType,
- scope: EventScope = None
+ scope: EventScope = None,
) -> bool:
"""Determine if a particular EventType is enabled.
@@ -67,14 +57,20 @@ def update_event_enabled(
case, we only have the data source's `default_update_events`,
so an event that should be enabled in userdata may be denied.
"""
- default_events = datasource.default_update_events # type: Dict[EventScope, Set[EventType]] # noqa: E501
- user_events = userdata_to_events(cfg.get('updates', {})) # type: Dict[EventScope, Set[EventType]] # noqa: E501
+ default_events = (
+ datasource.default_update_events
+ ) # type: Dict[EventScope, Set[EventType]]
+ user_events = userdata_to_events(
+ cfg.get("updates", {})
+ ) # type: Dict[EventScope, Set[EventType]]
# A value in the first will override a value in the second
- allowed = util.mergemanydict([
- copy.deepcopy(user_events),
- copy.deepcopy(default_events),
- ])
- LOG.debug('Allowed events: %s', allowed)
+ allowed = util.mergemanydict(
+ [
+ copy.deepcopy(user_events),
+ copy.deepcopy(default_events),
+ ]
+ )
+ LOG.debug("Allowed events: %s", allowed)
if not scope:
scopes = allowed.keys()
@@ -85,14 +81,14 @@ def update_event_enabled(
for evt_scope in scopes:
if event_source_type in allowed.get(evt_scope, []):
LOG.debug(
- 'Event Allowed: scope=%s EventType=%s',
- evt_scope.value, event_source_type
+ "Event Allowed: scope=%s EventType=%s",
+ evt_scope.value,
+ event_source_type,
)
return True
LOG.debug(
- 'Event Denied: scopes=%s EventType=%s',
- scope_values, event_source_type
+ "Event Denied: scopes=%s EventType=%s", scope_values, event_source_type
)
return False
@@ -114,8 +110,10 @@ class Init(object):
if reporter is None:
reporter = events.ReportEventStack(
- name="init-reporter", description="init-desc",
- reporting_enabled=False)
+ name="init-reporter",
+ description="init-desc",
+ reporting_enabled=False,
+ )
self.reporter = reporter
def _reset(self, reset_ds=False):
@@ -131,8 +129,8 @@ class Init(object):
def distro(self):
if not self._distro:
# Try to find the right class to use
- system_config = self._extract_cfg('system')
- distro_name = system_config.pop('distro', 'ubuntu')
+ system_config = self._extract_cfg("system")
+ distro_name = system_config.pop("distro", "ubuntu")
distro_cls = distros.fetch(distro_name)
LOG.debug("Using distro class %s", distro_cls)
self._distro = distro_cls(distro_name, system_config, self.paths)
@@ -146,19 +144,19 @@ class Init(object):
@property
def cfg(self):
- return self._extract_cfg('restricted')
+ return self._extract_cfg("restricted")
def _extract_cfg(self, restriction):
# Ensure actually read
self.read_cfg()
# Nobody gets the real config
ocfg = copy.deepcopy(self._cfg)
- if restriction == 'restricted':
- ocfg.pop('system_info', None)
- elif restriction == 'system':
- ocfg = util.get_cfg_by_path(ocfg, ('system_info',), {})
- elif restriction == 'paths':
- ocfg = util.get_cfg_by_path(ocfg, ('system_info', 'paths'), {})
+ if restriction == "restricted":
+ ocfg.pop("system_info", None)
+ elif restriction == "system":
+ ocfg = util.get_cfg_by_path(ocfg, ("system_info",), {})
+ elif restriction == "paths":
+ ocfg = util.get_cfg_by_path(ocfg, ("system_info", "paths"), {})
if not isinstance(ocfg, (dict)):
ocfg = {}
return ocfg
@@ -166,7 +164,7 @@ class Init(object):
@property
def paths(self):
if not self._paths:
- path_info = self._extract_cfg('paths')
+ path_info = self._extract_cfg("paths")
self._paths = helpers.Paths(path_info, self.datasource)
return self._paths
@@ -175,17 +173,17 @@ class Init(object):
run_dir = self.paths.run_dir
initial_dirs = [
c_dir,
- os.path.join(c_dir, 'scripts'),
- os.path.join(c_dir, 'scripts', 'per-instance'),
- os.path.join(c_dir, 'scripts', 'per-once'),
- os.path.join(c_dir, 'scripts', 'per-boot'),
- os.path.join(c_dir, 'scripts', 'vendor'),
- os.path.join(c_dir, 'seed'),
- os.path.join(c_dir, 'instances'),
- os.path.join(c_dir, 'handlers'),
- os.path.join(c_dir, 'sem'),
- os.path.join(c_dir, 'data'),
- os.path.join(run_dir, 'sem'),
+ os.path.join(c_dir, "scripts"),
+ os.path.join(c_dir, "scripts", "per-instance"),
+ os.path.join(c_dir, "scripts", "per-once"),
+ os.path.join(c_dir, "scripts", "per-boot"),
+ os.path.join(c_dir, "scripts", "vendor"),
+ os.path.join(c_dir, "seed"),
+ os.path.join(c_dir, "instances"),
+ os.path.join(c_dir, "handlers"),
+ os.path.join(c_dir, "sem"),
+ os.path.join(c_dir, "data"),
+ os.path.join(run_dir, "sem"),
]
return initial_dirs
@@ -202,10 +200,10 @@ class Init(object):
def _initialize_filesystem(self):
util.ensure_dirs(self._initial_subdirs())
- log_file = util.get_cfg_option_str(self.cfg, 'def_log_file')
+ log_file = util.get_cfg_option_str(self.cfg, "def_log_file")
if log_file:
util.ensure_file(log_file, mode=0o640, preserve_mode=True)
- perms = self.cfg.get('syslog_fix_perms')
+ perms = self.cfg.get("syslog_fix_perms")
if not perms:
perms = {}
if not isinstance(perms, list):
@@ -220,8 +218,12 @@ class Init(object):
except OSError as e:
error = e
- LOG.warning("Failed changing perms on '%s'. tried: %s. %s",
- log_file, ','.join(perms), error)
+ LOG.warning(
+ "Failed changing perms on '%s'. tried: %s. %s",
+ log_file,
+ ",".join(perms),
+ error,
+ )
def read_cfg(self, extra_fns=None):
# None check so that we don't keep on re-loading if empty
@@ -231,37 +233,41 @@ class Init(object):
def _read_cfg(self, extra_fns):
no_cfg_paths = helpers.Paths({}, self.datasource)
- merger = helpers.ConfigMerger(paths=no_cfg_paths,
- datasource=self.datasource,
- additional_fns=extra_fns,
- base_cfg=fetch_base_config())
+ merger = helpers.ConfigMerger(
+ paths=no_cfg_paths,
+ datasource=self.datasource,
+ additional_fns=extra_fns,
+ base_cfg=fetch_base_config(),
+ )
return merger.cfg
def _restore_from_cache(self):
# We try to restore from a current link and static path
# by using the instance link, if purge_cache was called
# the file wont exist.
- return _pkl_load(self.paths.get_ipath_cur('obj_pkl'))
+ return _pkl_load(self.paths.get_ipath_cur("obj_pkl"))
def _write_to_cache(self):
if self.datasource is NULL_DATA_SOURCE:
return False
- if util.get_cfg_option_bool(self.cfg, 'manual_cache_clean', False):
+ if util.get_cfg_option_bool(self.cfg, "manual_cache_clean", False):
# The empty file in instance/ dir indicates manual cleaning,
# and can be read by ds-identify.
util.write_file(
self.paths.get_ipath_cur("manual_clean_marker"),
- omode="w", content="")
+ omode="w",
+ content="",
+ )
return _pkl_store(self.datasource, self.paths.get_ipath_cur("obj_pkl"))
def _get_datasources(self):
# Any config provided???
- pkg_list = self.cfg.get('datasource_pkg_list') or []
+ pkg_list = self.cfg.get("datasource_pkg_list") or []
# Add the defaults at the end
- for n in ['', type_utils.obj_name(sources)]:
+ for n in ["", type_utils.obj_name(sources)]:
if n not in pkg_list:
pkg_list.append(n)
- cfg_list = self.cfg.get('datasource_list') or []
+ cfg_list = self.cfg.get("datasource_list") or []
return (cfg_list, pkg_list)
def _restore_from_checked_cache(self, existing):
@@ -272,7 +278,7 @@ class Init(object):
if not ds:
return (None, "no cache found")
- run_iid_fn = self.paths.get_runpath('instance_id')
+ run_iid_fn = self.paths.get_runpath("instance_id")
if os.path.exists(run_iid_fn):
run_iid = util.load_file(run_iid_fn).strip()
else:
@@ -283,8 +289,9 @@ class Init(object):
elif existing == "trust":
return (ds, "restored from cache: %s" % ds)
else:
- if (hasattr(ds, 'check_instance_id') and
- ds.check_instance_id(self.cfg)):
+ if hasattr(ds, "check_instance_id") and ds.check_instance_id(
+ self.cfg
+ ):
return (ds, "restored from checked cache: %s" % ds)
else:
return (None, "cache invalid in datasource: %s" % ds)
@@ -294,9 +301,10 @@ class Init(object):
return self.datasource
with events.ReportEventStack(
- name="check-cache",
- description="attempting to read from cache [%s]" % existing,
- parent=self.reporter) as myrep:
+ name="check-cache",
+ description="attempting to read from cache [%s]" % existing,
+ parent=self.reporter,
+ ) as myrep:
ds, desc = self._restore_from_checked_cache(existing)
myrep.description = desc
@@ -308,12 +316,15 @@ class Init(object):
(cfg_list, pkg_list) = self._get_datasources()
# Deep copy so that user-data handlers can not modify
# (which will affect user-data handlers down the line...)
- (ds, dsname) = sources.find_source(self.cfg,
- self.distro,
- self.paths,
- copy.deepcopy(self.ds_deps),
- cfg_list,
- pkg_list, self.reporter)
+ (ds, dsname) = sources.find_source(
+ self.cfg,
+ self.distro,
+ self.paths,
+ copy.deepcopy(self.ds_deps),
+ cfg_list,
+ pkg_list,
+ self.reporter,
+ )
LOG.info("Loaded datasource %s - %s", dsname, ds)
self.datasource = ds # type: sources.DataSource
# Ensure we adjust our path members datasource
@@ -322,7 +333,7 @@ class Init(object):
return ds
def _get_instance_subdirs(self):
- return ['handlers', 'scripts', 'sem']
+ return ["handlers", "scripts", "sem"]
def _get_ipath(self, subname=None):
# Force a check to see if anything
@@ -330,8 +341,10 @@ class Init(object):
# then a datasource has not been assigned...
instance_dir = self.paths.get_ipath(subname)
if not instance_dir:
- raise RuntimeError(("No instance directory is available."
- " Has a datasource been fetched??"))
+ raise RuntimeError(
+ "No instance directory is available."
+ " Has a datasource been fetched??"
+ )
return instance_dir
def _reflect_cur_instance(self):
@@ -349,12 +362,12 @@ class Init(object):
# Write out information on what is being used for the current instance
# and what may have been used for a previous instance...
- dp = self.paths.get_cpath('data')
+ dp = self.paths.get_cpath("data")
# Write what the datasource was and is..
ds = "%s: %s" % (type_utils.obj_name(self.datasource), self.datasource)
previous_ds = None
- ds_fn = os.path.join(idir, 'datasource')
+ ds_fn = os.path.join(idir, "datasource")
try:
previous_ds = util.load_file(ds_fn).strip()
except Exception:
@@ -362,18 +375,20 @@ class Init(object):
if not previous_ds:
previous_ds = ds
util.write_file(ds_fn, "%s\n" % ds)
- util.write_file(os.path.join(dp, 'previous-datasource'),
- "%s\n" % (previous_ds))
+ util.write_file(
+ os.path.join(dp, "previous-datasource"), "%s\n" % (previous_ds)
+ )
# What the instance id was and is...
iid = self.datasource.get_instance_id()
- iid_fn = os.path.join(dp, 'instance-id')
+ iid_fn = os.path.join(dp, "instance-id")
previous_iid = self.previous_iid()
util.write_file(iid_fn, "%s\n" % iid)
- util.write_file(self.paths.get_runpath('instance_id'), "%s\n" % iid)
- util.write_file(os.path.join(dp, 'previous-instance-id'),
- "%s\n" % (previous_iid))
+ util.write_file(self.paths.get_runpath("instance_id"), "%s\n" % iid)
+ util.write_file(
+ os.path.join(dp, "previous-instance-id"), "%s\n" % (previous_iid)
+ )
self._write_to_cache()
# Ensure needed components are regenerated
@@ -386,8 +401,8 @@ class Init(object):
if self._previous_iid is not None:
return self._previous_iid
- dp = self.paths.get_cpath('data')
- iid_fn = os.path.join(dp, 'instance-id')
+ dp = self.paths.get_cpath("data")
+ iid_fn = os.path.join(dp, "instance-id")
try:
self._previous_iid = util.load_file(iid_fn).strip()
except Exception:
@@ -403,8 +418,10 @@ class Init(object):
even on first boot.
"""
previous = self.previous_iid()
- ret = (previous == NO_PREVIOUS_INSTANCE_ID or
- previous != self.datasource.get_instance_id())
+ ret = (
+ previous == NO_PREVIOUS_INSTANCE_ID
+ or previous != self.datasource.get_instance_id()
+ )
return ret
def fetch(self, existing="check"):
@@ -415,54 +432,64 @@ class Init(object):
def cloudify(self):
# Form the needed options to cloudify our members
- return cloud.Cloud(self.datasource,
- self.paths, self.cfg,
- self.distro, helpers.Runners(self.paths),
- reporter=self.reporter)
+ return cloud.Cloud(
+ self.datasource,
+ self.paths,
+ self.cfg,
+ self.distro,
+ helpers.Runners(self.paths),
+ reporter=self.reporter,
+ )
def update(self):
- self._store_rawdata(self.datasource.get_userdata_raw(),
- 'userdata')
- self._store_processeddata(self.datasource.get_userdata(),
- 'userdata')
- self._store_raw_vendordata(self.datasource.get_vendordata_raw(),
- 'vendordata')
- self._store_processeddata(self.datasource.get_vendordata(),
- 'vendordata')
- self._store_raw_vendordata(self.datasource.get_vendordata2_raw(),
- 'vendordata2')
- self._store_processeddata(self.datasource.get_vendordata2(),
- 'vendordata2')
+ self._store_rawdata(self.datasource.get_userdata_raw(), "userdata")
+ self._store_processeddata(self.datasource.get_userdata(), "userdata")
+ self._store_raw_vendordata(
+ self.datasource.get_vendordata_raw(), "vendordata"
+ )
+ self._store_processeddata(
+ self.datasource.get_vendordata(), "vendordata"
+ )
+ self._store_raw_vendordata(
+ self.datasource.get_vendordata2_raw(), "vendordata2"
+ )
+ self._store_processeddata(
+ self.datasource.get_vendordata2(), "vendordata2"
+ )
def setup_datasource(self):
- with events.ReportEventStack("setup-datasource",
- "setting up datasource",
- parent=self.reporter):
+ with events.ReportEventStack(
+ "setup-datasource", "setting up datasource", parent=self.reporter
+ ):
if self.datasource is None:
raise RuntimeError("Datasource is None, cannot setup.")
self.datasource.setup(is_new_instance=self.is_new_instance())
def activate_datasource(self):
- with events.ReportEventStack("activate-datasource",
- "activating datasource",
- parent=self.reporter):
+ with events.ReportEventStack(
+ "activate-datasource",
+ "activating datasource",
+ parent=self.reporter,
+ ):
if self.datasource is None:
raise RuntimeError("Datasource is None, cannot activate.")
- self.datasource.activate(cfg=self.cfg,
- is_new_instance=self.is_new_instance())
+ self.datasource.activate(
+ cfg=self.cfg, is_new_instance=self.is_new_instance()
+ )
self._write_to_cache()
def _store_rawdata(self, data, datasource):
# Raw data is bytes, not a string
if data is None:
- data = b''
- util.write_file(self._get_ipath('%s_raw' % datasource), data, 0o600)
+ data = b""
+ util.write_file(self._get_ipath("%s_raw" % datasource), data, 0o600)
def _store_raw_vendordata(self, data, datasource):
# Only these data types
if data is not None and type(data) not in [bytes, str, list]:
- raise TypeError("vendordata_raw is unsupported type '%s'" %
- str(type(data)))
+ raise TypeError(
+ "vendordata_raw is unsupported type '%s'" % str(type(data))
+ )
# This data may be a list, convert it to a string if so
if isinstance(data, list):
data = util.json_dumps(data)
@@ -471,18 +498,21 @@ class Init(object):
def _store_processeddata(self, processed_data, datasource):
# processed is a Mime message, so write as string.
if processed_data is None:
- processed_data = ''
- util.write_file(self._get_ipath(datasource),
- str(processed_data), 0o600)
+ processed_data = ""
+ util.write_file(
+ self._get_ipath(datasource), str(processed_data), 0o600
+ )
def _default_handlers(self, opts=None):
if opts is None:
opts = {}
- opts.update({
- 'paths': self.paths,
- 'datasource': self.datasource,
- })
+ opts.update(
+ {
+ "paths": self.paths,
+ "datasource": self.datasource,
+ }
+ )
# TODO(harlowja) Hmmm, should we dynamically import these??
cloudconfig_handler = CloudConfigPartHandler(**opts)
shellscript_handler = ShellScriptPartHandler(**opts)
@@ -493,7 +523,8 @@ class Init(object):
UpstartJobPartHandler(**opts),
]
opts.update(
- {'sub_handlers': [cloudconfig_handler, shellscript_handler]})
+ {"sub_handlers": [cloudconfig_handler, shellscript_handler]}
+ )
def_handlers.append(JinjaTemplatePartHandler(**opts))
return def_handlers
@@ -502,16 +533,23 @@ class Init(object):
def _default_vendordata_handlers(self):
return self._default_handlers(
- opts={'script_path': 'vendor_scripts',
- 'cloud_config_path': 'vendor_cloud_config'})
+ opts={
+ "script_path": "vendor_scripts",
+ "cloud_config_path": "vendor_cloud_config",
+ }
+ )
def _default_vendordata2_handlers(self):
return self._default_handlers(
- opts={'script_path': 'vendor_scripts',
- 'cloud_config_path': 'vendor2_cloud_config'})
+ opts={
+ "script_path": "vendor_scripts",
+ "cloud_config_path": "vendor2_cloud_config",
+ }
+ )
- def _do_handlers(self, data_msg, c_handlers_list, frequency,
- excluded=None):
+ def _do_handlers(
+ self, data_msg, c_handlers_list, frequency, excluded=None
+ ):
"""
Generalized handlers suitable for use with either vendordata
or userdata
@@ -538,21 +576,31 @@ class Init(object):
for (fname, mod_name) in potential_handlers.items():
try:
mod_locs, looked_locs = importer.find_module(
- mod_name, [''], ['list_types', 'handle_part'])
+ mod_name, [""], ["list_types", "handle_part"]
+ )
if not mod_locs:
- LOG.warning("Could not find a valid user-data handler"
- " named %s in file %s (searched %s)",
- mod_name, fname, looked_locs)
+ LOG.warning(
+ "Could not find a valid user-data handler"
+ " named %s in file %s (searched %s)",
+ mod_name,
+ fname,
+ looked_locs,
+ )
continue
mod = importer.import_module(mod_locs[0])
mod = handlers.fixup_handler(mod)
types = c_handlers.register(mod)
if types:
- LOG.debug("Added custom handler for %s [%s] from %s",
- types, mod, fname)
+ LOG.debug(
+ "Added custom handler for %s [%s] from %s",
+ types,
+ mod,
+ fname,
+ )
except Exception:
- util.logexc(LOG, "Failed to register handler from %s",
- fname)
+ util.logexc(
+ LOG, "Failed to register handler from %s", fname
+ )
# This keeps track of all the active handlers
c_handlers = helpers.ContentHandlers()
@@ -584,17 +632,17 @@ class Init(object):
def walk_handlers(excluded):
# Walk the user data
part_data = {
- 'handlers': c_handlers,
+ "handlers": c_handlers,
# Any new handlers that are encountered get writen here
- 'handlerdir': idir,
- 'data': data,
+ "handlerdir": idir,
+ "data": data,
# The default frequency if handlers don't have one
- 'frequency': frequency,
+ "frequency": frequency,
# This will be used when new handlers are found
# to help write their contents to files with numbered
# names...
- 'handlercount': 0,
- 'excluded': excluded,
+ "handlercount": 0,
+ "excluded": excluded,
}
handlers.walk(data_msg, handlers.walker_callback, data=part_data)
@@ -620,22 +668,28 @@ class Init(object):
def consume_data(self, frequency=PER_INSTANCE):
# Consume the userdata first, because we need want to let the part
# handlers run first (for merging stuff)
- with events.ReportEventStack("consume-user-data",
- "reading and applying user-data",
- parent=self.reporter):
- if util.get_cfg_option_bool(self.cfg, 'allow_userdata', True):
+ with events.ReportEventStack(
+ "consume-user-data",
+ "reading and applying user-data",
+ parent=self.reporter,
+ ):
+ if util.get_cfg_option_bool(self.cfg, "allow_userdata", True):
self._consume_userdata(frequency)
else:
- LOG.debug('allow_userdata = False: discarding user-data')
+ LOG.debug("allow_userdata = False: discarding user-data")
- with events.ReportEventStack("consume-vendor-data",
- "reading and applying vendor-data",
- parent=self.reporter):
+ with events.ReportEventStack(
+ "consume-vendor-data",
+ "reading and applying vendor-data",
+ parent=self.reporter,
+ ):
self._consume_vendordata("vendordata", frequency)
- with events.ReportEventStack("consume-vendor-data2",
- "reading and applying vendor-data2",
- parent=self.reporter):
+ with events.ReportEventStack(
+ "consume-vendor-data2",
+ "reading and applying vendor-data2",
+ parent=self.reporter,
+ ):
self._consume_vendordata("vendordata2", frequency)
# Perform post-consumption adjustments so that
@@ -658,48 +712,56 @@ class Init(object):
# So we merge the other available cloud-configs (everything except
# vendor provided), and check whether or not we should consume
# vendor data at all. That gives user or system a chance to override.
- if vendor_source == 'vendordata':
+ if vendor_source == "vendordata":
if not self.datasource.get_vendordata_raw():
LOG.debug("no vendordata from datasource")
return
- cfg_name = 'vendor_data'
- elif vendor_source == 'vendordata2':
+ cfg_name = "vendor_data"
+ elif vendor_source == "vendordata2":
if not self.datasource.get_vendordata2_raw():
LOG.debug("no vendordata2 from datasource")
return
- cfg_name = 'vendor_data2'
+ cfg_name = "vendor_data2"
else:
- raise RuntimeError("vendor_source arg must be either 'vendordata'"
- " or 'vendordata2'")
-
- _cc_merger = helpers.ConfigMerger(paths=self._paths,
- datasource=self.datasource,
- additional_fns=[],
- base_cfg=self.cfg,
- include_vendor=False)
+ raise RuntimeError(
+ "vendor_source arg must be either 'vendordata'"
+ " or 'vendordata2'"
+ )
+
+ _cc_merger = helpers.ConfigMerger(
+ paths=self._paths,
+ datasource=self.datasource,
+ additional_fns=[],
+ base_cfg=self.cfg,
+ include_vendor=False,
+ )
vdcfg = _cc_merger.cfg.get(cfg_name, {})
if not isinstance(vdcfg, dict):
- vdcfg = {'enabled': False}
- LOG.warning("invalid %s setting. resetting to: %s",
- cfg_name, vdcfg)
+ vdcfg = {"enabled": False}
+ LOG.warning(
+ "invalid %s setting. resetting to: %s", cfg_name, vdcfg
+ )
- enabled = vdcfg.get('enabled')
- no_handlers = vdcfg.get('disabled_handlers', None)
+ enabled = vdcfg.get("enabled")
+ no_handlers = vdcfg.get("disabled_handlers", None)
if not util.is_true(enabled):
LOG.debug("%s consumption is disabled.", vendor_source)
return
- LOG.debug("%s will be consumed. disabled_handlers=%s",
- vendor_source, no_handlers)
+ LOG.debug(
+ "%s will be consumed. disabled_handlers=%s",
+ vendor_source,
+ no_handlers,
+ )
# Ensure vendordata source fetched before activation (just in case.)
# c_handlers_list keeps track of all the active handlers, while
# excluding what the users doesn't want run, i.e. boot_hook,
# cloud_config, shell_script
- if vendor_source == 'vendordata':
+ if vendor_source == "vendordata":
vendor_data_msg = self.datasource.get_vendordata()
c_handlers_list = self._default_vendordata_handlers()
else:
@@ -707,8 +769,9 @@ class Init(object):
c_handlers_list = self._default_vendordata2_handlers()
# Run the handlers
- self._do_handlers(vendor_data_msg, c_handlers_list, frequency,
- excluded=no_handlers)
+ self._do_handlers(
+ vendor_data_msg, c_handlers_list, frequency, excluded=no_handlers
+ )
def _consume_userdata(self, frequency=PER_INSTANCE):
"""
@@ -726,7 +789,8 @@ class Init(object):
def _find_networking_config(self):
disable_file = os.path.join(
- self.paths.get_cpath('data'), 'upgraded-network')
+ self.paths.get_cpath("data"), "upgraded-network"
+ )
if os.path.exists(disable_file):
return (None, disable_file)
@@ -734,12 +798,13 @@ class Init(object):
NetworkConfigSource.cmdline: cmdline.read_kernel_cmdline_config(),
NetworkConfigSource.initramfs: cmdline.read_initramfs_config(),
NetworkConfigSource.ds: None,
- NetworkConfigSource.system_cfg: self.cfg.get('network'),
+ NetworkConfigSource.system_cfg: self.cfg.get("network"),
}
- if self.datasource and hasattr(self.datasource, 'network_config'):
- available_cfgs[NetworkConfigSource.ds] = (
- self.datasource.network_config)
+ if self.datasource and hasattr(self.datasource, "network_config"):
+ available_cfgs[
+ NetworkConfigSource.ds
+ ] = self.datasource.network_config
if self.datasource:
order = self.datasource.network_config_sources
@@ -747,12 +812,17 @@ class Init(object):
order = sources.DataSource.network_config_sources
for cfg_source in order:
if not hasattr(NetworkConfigSource, cfg_source):
- LOG.warning('data source specifies an invalid network'
- ' cfg_source: %s', cfg_source)
+ LOG.warning(
+ "data source specifies an invalid network cfg_source: %s",
+ cfg_source,
+ )
continue
if cfg_source not in available_cfgs:
- LOG.warning('data source specifies an unavailable network'
- ' cfg_source: %s', cfg_source)
+ LOG.warning(
+ "data source specifies an unavailable network"
+ " cfg_source: %s",
+ cfg_source,
+ )
continue
ncfg = available_cfgs[cfg_source]
if net.is_disabled_cfg(ncfg):
@@ -760,8 +830,10 @@ class Init(object):
return (None, cfg_source)
if ncfg:
return (ncfg, cfg_source)
- return (self.distro.generate_fallback_config(),
- NetworkConfigSource.fallback)
+ return (
+ self.distro.generate_fallback_config(),
+ NetworkConfigSource.fallback,
+ )
def _apply_netcfg_names(self, netcfg):
try:
@@ -771,9 +843,9 @@ class Init(object):
LOG.warning("Failed to rename devices: %s", e)
def _get_per_boot_network_semaphore(self):
- return namedtuple('Semaphore', 'semaphore args')(
- helpers.FileSemaphores(self.paths.get_runpath('sem')),
- ('apply_network_config', PER_ONCE)
+ return namedtuple("Semaphore", "semaphore args")(
+ helpers.FileSemaphores(self.paths.get_runpath("sem")),
+ ("apply_network_config", PER_ONCE),
)
def _network_already_configured(self) -> bool:
@@ -792,26 +864,32 @@ class Init(object):
return
def event_enabled_and_metadata_updated(event_type):
- return update_event_enabled(
- datasource=self.datasource,
- cfg=self.cfg,
- event_source_type=event_type,
- scope=EventScope.NETWORK
- ) and self.datasource.update_metadata_if_supported([event_type])
+ return (
+ update_event_enabled(
+ datasource=self.datasource,
+ cfg=self.cfg,
+ event_source_type=event_type,
+ scope=EventScope.NETWORK,
+ )
+ and self.datasource.update_metadata_if_supported([event_type])
+ )
def should_run_on_boot_event():
- return (not self._network_already_configured() and
- event_enabled_and_metadata_updated(EventType.BOOT))
+ return (
+ not self._network_already_configured()
+ and event_enabled_and_metadata_updated(EventType.BOOT)
+ )
if (
- self.datasource is not NULL_DATA_SOURCE and
- not self.is_new_instance() and
- not should_run_on_boot_event() and
- not event_enabled_and_metadata_updated(EventType.BOOT_LEGACY)
+ self.datasource is not NULL_DATA_SOURCE
+ and not self.is_new_instance()
+ and not should_run_on_boot_event()
+ and not event_enabled_and_metadata_updated(EventType.BOOT_LEGACY)
):
LOG.debug(
"No network config applied. Neither a new instance"
- " nor datasource network update allowed")
+ " nor datasource network update allowed"
+ )
# nothing new, but ensure proper names
self._apply_netcfg_names(netcfg)
return
@@ -826,22 +904,32 @@ class Init(object):
self._apply_netcfg_names(netcfg)
# rendering config
- LOG.info("Applying network configuration from %s bringup=%s: %s",
- src, bring_up, netcfg)
+ LOG.info(
+ "Applying network configuration from %s bringup=%s: %s",
+ src,
+ bring_up,
+ netcfg,
+ )
sem = self._get_per_boot_network_semaphore()
try:
with sem.semaphore.lock(*sem.args):
return self.distro.apply_network_config(
- netcfg, bring_up=bring_up)
+ netcfg, bring_up=bring_up
+ )
except net.RendererNotFoundError as e:
- LOG.error("Unable to render networking. Network config is "
- "likely broken: %s", e)
+ LOG.error(
+ "Unable to render networking. Network config is "
+ "likely broken: %s",
+ e,
+ )
return
except NotImplementedError:
- LOG.warning("distro '%s' does not implement apply_network_config. "
- "networking may not be configured properly.",
- self.distro)
+ LOG.warning(
+ "distro '%s' does not implement apply_network_config. "
+ "networking may not be configured properly.",
+ self.distro,
+ )
return
@@ -853,18 +941,22 @@ class Modules(object):
self._cached_cfg = None
if reporter is None:
reporter = events.ReportEventStack(
- name="module-reporter", description="module-desc",
- reporting_enabled=False)
+ name="module-reporter",
+ description="module-desc",
+ reporting_enabled=False,
+ )
self.reporter = reporter
@property
def cfg(self):
# None check to avoid empty case causing re-reading
if self._cached_cfg is None:
- merger = helpers.ConfigMerger(paths=self.init.paths,
- datasource=self.init.datasource,
- additional_fns=self.cfg_files,
- base_cfg=self.init.cfg)
+ merger = helpers.ConfigMerger(
+ paths=self.init.paths,
+ datasource=self.init.datasource,
+ additional_fns=self.cfg_files,
+ base_cfg=self.init.cfg,
+ )
self._cached_cfg = merger.cfg
# LOG.debug("Loading 'module' config %s", self._cached_cfg)
# Only give out a copy so that others can't modify this...
@@ -885,57 +977,67 @@ class Modules(object):
if not item:
continue
if isinstance(item, str):
- module_list.append({
- 'mod': item.strip(),
- })
+ module_list.append(
+ {
+ "mod": item.strip(),
+ }
+ )
elif isinstance(item, (list)):
contents = {}
# Meant to fall through...
if len(item) >= 1:
- contents['mod'] = item[0].strip()
+ contents["mod"] = item[0].strip()
if len(item) >= 2:
- contents['freq'] = item[1].strip()
+ contents["freq"] = item[1].strip()
if len(item) >= 3:
- contents['args'] = item[2:]
+ contents["args"] = item[2:]
if contents:
module_list.append(contents)
elif isinstance(item, (dict)):
contents = {}
valid = False
- if 'name' in item:
- contents['mod'] = item['name'].strip()
+ if "name" in item:
+ contents["mod"] = item["name"].strip()
valid = True
- if 'frequency' in item:
- contents['freq'] = item['frequency'].strip()
- if 'args' in item:
- contents['args'] = item['args'] or []
+ if "frequency" in item:
+ contents["freq"] = item["frequency"].strip()
+ if "args" in item:
+ contents["args"] = item["args"] or []
if contents and valid:
module_list.append(contents)
else:
- raise TypeError(("Failed to read '%s' item in config,"
- " unknown type %s") %
- (item, type_utils.obj_name(item)))
+ raise TypeError(
+ "Failed to read '%s' item in config, unknown type %s"
+ % (item, type_utils.obj_name(item))
+ )
return module_list
def _fixup_modules(self, raw_mods):
mostly_mods = []
for raw_mod in raw_mods:
- raw_name = raw_mod['mod']
- freq = raw_mod.get('freq')
- run_args = raw_mod.get('args') or []
+ raw_name = raw_mod["mod"]
+ freq = raw_mod.get("freq")
+ run_args = raw_mod.get("args") or []
mod_name = config.form_module_name(raw_name)
if not mod_name:
continue
if freq and freq not in FREQUENCIES:
- LOG.warning(("Config specified module %s"
- " has an unknown frequency %s"), raw_name, freq)
+ LOG.warning(
+ "Config specified module %s has an unknown frequency %s",
+ raw_name,
+ freq,
+ )
# Reset it so when ran it will get set to a known value
freq = None
mod_locs, looked_locs = importer.find_module(
- mod_name, ['', type_utils.obj_name(config)], ['handle'])
+ mod_name, ["", type_utils.obj_name(config)], ["handle"]
+ )
if not mod_locs:
- LOG.warning("Could not find module named %s (searched %s)",
- mod_name, looked_locs)
+ LOG.warning(
+ "Could not find module named %s (searched %s)",
+ mod_name,
+ looked_locs,
+ )
continue
mod = config.fixup_module(importer.import_module(mod_locs[0]))
mostly_mods.append([mod, raw_name, freq, run_args])
@@ -954,15 +1056,15 @@ class Modules(object):
freq = mod.frequency
if freq not in FREQUENCIES:
freq = PER_INSTANCE
- LOG.debug("Running module %s (%s) with frequency %s",
- name, mod, freq)
+ LOG.debug(
+ "Running module %s (%s) with frequency %s", name, mod, freq
+ )
# Use the configs logger and not our own
# TODO(harlowja): possibly check the module
# for having a LOG attr and just give it back
# its own logger?
- func_args = [name, self.cfg,
- cc, config.LOG, args]
+ func_args = [name, self.cfg, cc, config.LOG, args]
# Mark it as having started running
which_ran.append(name)
# This name will affect the semaphore name created
@@ -970,11 +1072,13 @@ class Modules(object):
desc = "running %s with frequency %s" % (run_name, freq)
myrep = events.ReportEventStack(
- name=run_name, description=desc, parent=self.reporter)
+ name=run_name, description=desc, parent=self.reporter
+ )
with myrep:
- ran, _r = cc.run(run_name, mod.handle, func_args,
- freq=freq)
+ ran, _r = cc.run(
+ run_name, mod.handle, func_args, freq=freq
+ )
if ran:
myrep.message = "%s ran successfully" % run_name
else:
@@ -988,9 +1092,9 @@ class Modules(object):
def run_single(self, mod_name, args=None, freq=None):
# Form the users module 'specs'
mod_to_be = {
- 'mod': mod_name,
- 'args': args,
- 'freq': freq,
+ "mod": mod_name,
+ "args": args,
+ "freq": freq,
}
# Now resume doing the normal fixups and running
raw_mods = [mod_to_be]
@@ -1004,13 +1108,14 @@ class Modules(object):
skipped = []
forced = []
- overridden = self.cfg.get('unverified_modules', [])
+ overridden = self.cfg.get("unverified_modules", [])
active_mods = []
all_distros = set([distros.ALL_DISTROS])
for (mod, name, _freq, _args) in mostly_mods:
worked_distros = set(mod.distros) # Minimally [] per fixup_modules
worked_distros.update(
- distros.Distro.expand_osfamily(mod.osfamilies))
+ distros.Distro.expand_osfamily(mod.osfamilies)
+ )
# Skip only when the following conditions are all met:
# - distros are defined in the module != ALL_DISTROS
@@ -1026,12 +1131,15 @@ class Modules(object):
active_mods.append([mod, name, _freq, _args])
if skipped:
- LOG.info("Skipping modules '%s' because they are not verified "
- "on distro '%s'. To run anyway, add them to "
- "'unverified_modules' in config.",
- ','.join(skipped), d_name)
+ LOG.info(
+ "Skipping modules '%s' because they are not verified "
+ "on distro '%s'. To run anyway, add them to "
+ "'unverified_modules' in config.",
+ ",".join(skipped),
+ d_name,
+ )
if forced:
- LOG.info("running unverified_modules: '%s'", ', '.join(forced))
+ LOG.info("running unverified_modules: '%s'", ", ".join(forced))
return self._run_modules(active_mods)
@@ -1051,7 +1159,9 @@ def fetch_base_config():
read_runtime_config(),
# Kernel/cmdline parameters override system config
util.read_conf_from_cmdline(),
- ], reverse=True)
+ ],
+ reverse=True,
+ )
def _pkl_store(obj, fname):
@@ -1087,4 +1197,5 @@ def _pkl_load(fname):
util.logexc(LOG, "Failed loading pickled blob from %s", fname)
return None
+
# vi: ts=4 expandtab
diff --git a/cloudinit/subp.py b/cloudinit/subp.py
index 024e1a98..7693601d 100644
--- a/cloudinit/subp.py
+++ b/cloudinit/subp.py
@@ -4,7 +4,6 @@
import logging
import os
import subprocess
-
from errno import ENOEXEC
LOG = logging.getLogger(__name__)
@@ -37,7 +36,7 @@ def prepend_base_command(base_command, commands):
elif command[0] != base_command: # Automatically prepend
command.insert(0, base_command)
elif isinstance(command, str):
- if not command.startswith('%s ' % base_command):
+ if not command.startswith("%s " % base_command):
warnings.append(command)
else:
errors.append(str(command))
@@ -46,30 +45,43 @@ def prepend_base_command(base_command, commands):
if warnings:
LOG.warning(
- 'Non-%s commands in %s config:\n%s',
- base_command, base_command, '\n'.join(warnings))
+ "Non-%s commands in %s config:\n%s",
+ base_command,
+ base_command,
+ "\n".join(warnings),
+ )
if errors:
raise TypeError(
- 'Invalid {name} config.'
- ' These commands are not a string or list:\n{errors}'.format(
- name=base_command, errors='\n'.join(errors)))
+ "Invalid {name} config."
+ " These commands are not a string or list:\n{errors}".format(
+ name=base_command, errors="\n".join(errors)
+ )
+ )
return fixed_commands
class ProcessExecutionError(IOError):
- MESSAGE_TMPL = ('%(description)s\n'
- 'Command: %(cmd)s\n'
- 'Exit code: %(exit_code)s\n'
- 'Reason: %(reason)s\n'
- 'Stdout: %(stdout)s\n'
- 'Stderr: %(stderr)s')
- empty_attr = '-'
-
- def __init__(self, stdout=None, stderr=None,
- exit_code=None, cmd=None,
- description=None, reason=None,
- errno=None):
+ MESSAGE_TMPL = (
+ "%(description)s\n"
+ "Command: %(cmd)s\n"
+ "Exit code: %(exit_code)s\n"
+ "Reason: %(reason)s\n"
+ "Stdout: %(stdout)s\n"
+ "Stderr: %(stderr)s"
+ )
+ empty_attr = "-"
+
+ def __init__(
+ self,
+ stdout=None,
+ stderr=None,
+ exit_code=None,
+ cmd=None,
+ description=None,
+ reason=None,
+ errno=None,
+ ):
if not cmd:
self.cmd = self.empty_attr
else:
@@ -77,9 +89,9 @@ class ProcessExecutionError(IOError):
if not description:
if not exit_code and errno == ENOEXEC:
- self.description = 'Exec format error. Missing #! in script?'
+ self.description = "Exec format error. Missing #! in script?"
else:
- self.description = 'Unexpected error while running command.'
+ self.description = "Unexpected error while running command."
else:
self.description = description
@@ -111,12 +123,12 @@ class ProcessExecutionError(IOError):
self.errno = errno
message = self.MESSAGE_TMPL % {
- 'description': self._ensure_string(self.description),
- 'cmd': self._ensure_string(self.cmd),
- 'exit_code': self._ensure_string(self.exit_code),
- 'stdout': self._ensure_string(self.stdout),
- 'stderr': self._ensure_string(self.stderr),
- 'reason': self._ensure_string(self.reason),
+ "description": self._ensure_string(self.description),
+ "cmd": self._ensure_string(self.cmd),
+ "exit_code": self._ensure_string(self.exit_code),
+ "stdout": self._ensure_string(self.stdout),
+ "stderr": self._ensure_string(self.stderr),
+ "reason": self._ensure_string(self.reason),
}
IOError.__init__(self, message)
@@ -130,8 +142,8 @@ class ProcessExecutionError(IOError):
"""
indent text on all but the first line, allowing for easy to read output
"""
- cr = '\n'
- indent = ' ' * indent_level
+ cr = "\n"
+ indent = " " * indent_level
# if input is bytes, return bytes
if isinstance(text, bytes):
cr = cr.encode()
@@ -141,10 +153,21 @@ class ProcessExecutionError(IOError):
return text.rstrip(cr).replace(cr, cr + indent)
-def subp(args, data=None, rcs=None, env=None, capture=True,
- combine_capture=False, shell=False,
- logstring=False, decode="replace", target=None, update_env=None,
- status_cb=None, cwd=None):
+def subp(
+ args,
+ data=None,
+ rcs=None,
+ env=None,
+ capture=True,
+ combine_capture=False,
+ shell=False,
+ logstring=False,
+ decode="replace",
+ target=None,
+ update_env=None,
+ status_cb=None,
+ cwd=None,
+):
"""Run a subprocess.
:param args: command to run in a list. [cmd, arg1, arg2...]
@@ -210,18 +233,26 @@ def subp(args, data=None, rcs=None, env=None, capture=True,
env.update(update_env)
if target_path(target) != "/":
- args = ['chroot', target] + list(args)
+ args = ["chroot", target] + list(args)
if status_cb:
- command = ' '.join(args) if isinstance(args, list) else args
- status_cb('Begin run command: {command}\n'.format(command=command))
+ command = " ".join(args) if isinstance(args, list) else args
+ status_cb("Begin run command: {command}\n".format(command=command))
if not logstring:
- LOG.debug(("Running command %s with allowed return codes %s"
- " (shell=%s, capture=%s)"),
- args, rcs, shell, 'combine' if combine_capture else capture)
+ LOG.debug(
+ "Running command %s with allowed return codes %s"
+ " (shell=%s, capture=%s)",
+ args,
+ rcs,
+ shell,
+ "combine" if combine_capture else capture,
+ )
else:
- LOG.debug(("Running hidden command to protect sensitive "
- "input/output logstring: %s"), logstring)
+ LOG.debug(
+ "Running hidden command to protect sensitive "
+ "input/output logstring: %s",
+ logstring,
+ )
stdin = None
stdout = None
@@ -251,20 +282,28 @@ def subp(args, data=None, rcs=None, env=None, capture=True,
bytes_args = args.encode("utf-8")
else:
bytes_args = [
- x if isinstance(x, bytes) else x.encode("utf-8")
- for x in args]
+ x if isinstance(x, bytes) else x.encode("utf-8") for x in args
+ ]
try:
- sp = subprocess.Popen(bytes_args, stdout=stdout,
- stderr=stderr, stdin=stdin,
- env=env, shell=shell, cwd=cwd)
+ sp = subprocess.Popen(
+ bytes_args,
+ stdout=stdout,
+ stderr=stderr,
+ stdin=stdin,
+ env=env,
+ shell=shell,
+ cwd=cwd,
+ )
(out, err) = sp.communicate(data)
except OSError as e:
if status_cb:
- status_cb('ERROR: End run command: invalid command provided\n')
+ status_cb("ERROR: End run command: invalid command provided\n")
raise ProcessExecutionError(
- cmd=args, reason=e, errno=e.errno,
+ cmd=args,
+ reason=e,
+ errno=e.errno,
stdout="-" if decode else b"-",
- stderr="-" if decode else b"-"
+ stderr="-" if decode else b"-",
) from e
finally:
if devnull_fp:
@@ -273,11 +312,12 @@ def subp(args, data=None, rcs=None, env=None, capture=True,
# Just ensure blank instead of none.
if capture or combine_capture:
if not out:
- out = b''
+ out = b""
if not err:
- err = b''
+ err = b""
if decode:
- def ldecode(data, m='utf-8'):
+
+ def ldecode(data, m="utf-8"):
if not isinstance(data, bytes):
return data
return data.decode(m, decode)
@@ -288,13 +328,12 @@ def subp(args, data=None, rcs=None, env=None, capture=True,
rc = sp.returncode
if rc not in rcs:
if status_cb:
- status_cb(
- 'ERROR: End run command: exit({code})\n'.format(code=rc))
- raise ProcessExecutionError(stdout=out, stderr=err,
- exit_code=rc,
- cmd=args)
+ status_cb("ERROR: End run command: exit({code})\n".format(code=rc))
+ raise ProcessExecutionError(
+ stdout=out, stderr=err, exit_code=rc, cmd=args
+ )
if status_cb:
- status_cb('End run command: exit({code})\n'.format(code=rc))
+ status_cb("End run command: exit({code})\n".format(code=rc))
return (out, err)
@@ -331,8 +370,9 @@ def which(program, search=None, target=None):
return program
if search is None:
- paths = [p.strip('"') for p in
- os.environ.get("PATH", "").split(os.pathsep)]
+ paths = [
+ p.strip('"') for p in os.environ.get("PATH", "").split(os.pathsep)
+ ]
if target == "/":
search = paths
else:
@@ -382,8 +422,9 @@ def runparts(dirp, skip_no_exist=True, exe_prefix=None):
if failed and attempted:
raise RuntimeError(
- 'Runparts: %s failures (%s) in %s attempted commands' %
- (len(failed), ",".join(failed), len(attempted)))
+ "Runparts: %s failures (%s) in %s attempted commands"
+ % (len(failed), ",".join(failed), len(attempted))
+ )
# vi: ts=4 expandtab
diff --git a/cloudinit/temp_utils.py b/cloudinit/temp_utils.py
index 346276ec..e23b6599 100644
--- a/cloudinit/temp_utils.py
+++ b/cloudinit/temp_utils.py
@@ -42,7 +42,7 @@ def _tempfile_dir_arg(odir=None, needs_exe=False):
if os.getuid() == 0:
tdir = _ROOT_TMPDIR
else:
- tdir = os.environ.get('TMPDIR', '/tmp')
+ tdir = os.environ.get("TMPDIR", "/tmp")
if not os.path.isdir(tdir):
os.makedirs(tdir)
os.chmod(tdir, 0o1777)
@@ -52,8 +52,9 @@ def _tempfile_dir_arg(odir=None, needs_exe=False):
def ExtendedTemporaryFile(**kwargs):
- kwargs['dir'] = _tempfile_dir_arg(
- kwargs.pop('dir', None), kwargs.pop('needs_exe', False))
+ kwargs["dir"] = _tempfile_dir_arg(
+ kwargs.pop("dir", None), kwargs.pop("needs_exe", False)
+ )
fh = tempfile.NamedTemporaryFile(**kwargs)
# Replace its unlink with a quiet version
# that does not raise errors when the
@@ -76,7 +77,7 @@ def ExtendedTemporaryFile(**kwargs):
def unlink_now():
fh.unlink(fh.name)
- setattr(fh, 'unlink_now', unlink_now)
+ setattr(fh, "unlink_now", unlink_now)
return fh
@@ -93,14 +94,17 @@ def tempdir(rmtree_ignore_errors=False, **kwargs):
def mkdtemp(**kwargs):
- kwargs['dir'] = _tempfile_dir_arg(
- kwargs.pop('dir', None), kwargs.pop('needs_exe', False))
+ kwargs["dir"] = _tempfile_dir_arg(
+ kwargs.pop("dir", None), kwargs.pop("needs_exe", False)
+ )
return tempfile.mkdtemp(**kwargs)
def mkstemp(**kwargs):
- kwargs['dir'] = _tempfile_dir_arg(
- kwargs.pop('dir', None), kwargs.pop('needs_exe', False))
+ kwargs["dir"] = _tempfile_dir_arg(
+ kwargs.pop("dir", None), kwargs.pop("needs_exe", False)
+ )
return tempfile.mkstemp(**kwargs)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/templater.py b/cloudinit/templater.py
index 009bed32..c215bbbb 100644
--- a/cloudinit/templater.py
+++ b/cloudinit/templater.py
@@ -13,16 +13,17 @@
import collections
import re
-
try:
from Cheetah.Template import Template as CTemplate
+
CHEETAH_AVAILABLE = True
except (ImportError, AttributeError):
CHEETAH_AVAILABLE = False
try:
- from jinja2 import Template as JTemplate
from jinja2 import DebugUndefined as JUndefined
+ from jinja2 import Template as JTemplate
+
JINJA_AVAILABLE = True
except (ImportError, AttributeError):
JINJA_AVAILABLE = False
@@ -32,25 +33,26 @@ from cloudinit import log as logging
from cloudinit import type_utils as tu
from cloudinit import util
-
LOG = logging.getLogger(__name__)
TYPE_MATCHER = re.compile(r"##\s*template:(.*)", re.I)
-BASIC_MATCHER = re.compile(r'\$\{([A-Za-z0-9_.]+)\}|\$([A-Za-z0-9_.]+)')
-MISSING_JINJA_PREFIX = 'CI_MISSING_JINJA_VAR/'
+BASIC_MATCHER = re.compile(r"\$\{([A-Za-z0-9_.]+)\}|\$([A-Za-z0-9_.]+)")
+MISSING_JINJA_PREFIX = "CI_MISSING_JINJA_VAR/"
class UndefinedJinjaVariable(JUndefined):
"""Class used to represent any undefined jinja template variable."""
def __str__(self):
- return '%s%s' % (MISSING_JINJA_PREFIX, self._undefined_name)
+ return "%s%s" % (MISSING_JINJA_PREFIX, self._undefined_name)
def __sub__(self, other):
- other = str(other).replace(MISSING_JINJA_PREFIX, '')
+ other = str(other).replace(MISSING_JINJA_PREFIX, "")
raise TypeError(
'Undefined jinja variable: "{this}-{other}". Jinja tried'
' subtraction. Perhaps you meant "{this}_{other}"?'.format(
- this=self._undefined_name, other=other))
+ this=self._undefined_name, other=other
+ )
+ )
def basic_render(content, params):
@@ -73,67 +75,75 @@ def basic_render(content, params):
while len(path) > 1:
key = path.popleft()
if not isinstance(selected_params, dict):
- raise TypeError("Can not traverse into"
- " non-dictionary '%s' of type %s while"
- " looking for subkey '%s'"
- % (selected_params,
- tu.obj_name(selected_params),
- key))
+ raise TypeError(
+ "Can not traverse into"
+ " non-dictionary '%s' of type %s while"
+ " looking for subkey '%s'"
+ % (selected_params, tu.obj_name(selected_params), key)
+ )
selected_params = selected_params[key]
key = path.popleft()
if not isinstance(selected_params, dict):
- raise TypeError("Can not extract key '%s' from non-dictionary"
- " '%s' of type %s"
- % (key, selected_params,
- tu.obj_name(selected_params)))
+ raise TypeError(
+ "Can not extract key '%s' from non-dictionary '%s' of type %s"
+ % (key, selected_params, tu.obj_name(selected_params))
+ )
return str(selected_params[key])
return BASIC_MATCHER.sub(replacer, content)
def detect_template(text):
-
def cheetah_render(content, params):
return CTemplate(content, searchList=[params]).respond()
def jinja_render(content, params):
# keep_trailing_newline is in jinja2 2.7+, not 2.6
add = "\n" if content.endswith("\n") else ""
- return JTemplate(content,
- undefined=UndefinedJinjaVariable,
- trim_blocks=True).render(**params) + add
+ return (
+ JTemplate(
+ content, undefined=UndefinedJinjaVariable, trim_blocks=True
+ ).render(**params)
+ + add
+ )
if text.find("\n") != -1:
ident, rest = text.split("\n", 1)
else:
ident = text
- rest = ''
+ rest = ""
type_match = TYPE_MATCHER.match(ident)
if not type_match:
if CHEETAH_AVAILABLE:
LOG.debug("Using Cheetah as the renderer for unknown template.")
- return ('cheetah', cheetah_render, text)
+ return ("cheetah", cheetah_render, text)
else:
- return ('basic', basic_render, text)
+ return ("basic", basic_render, text)
else:
template_type = type_match.group(1).lower().strip()
- if template_type not in ('jinja', 'cheetah', 'basic'):
- raise ValueError("Unknown template rendering type '%s' requested"
- % template_type)
- if template_type == 'jinja' and not JINJA_AVAILABLE:
- LOG.warning("Jinja not available as the selected renderer for"
- " desired template, reverting to the basic renderer.")
- return ('basic', basic_render, rest)
- elif template_type == 'jinja' and JINJA_AVAILABLE:
- return ('jinja', jinja_render, rest)
- if template_type == 'cheetah' and not CHEETAH_AVAILABLE:
- LOG.warning("Cheetah not available as the selected renderer for"
- " desired template, reverting to the basic renderer.")
- return ('basic', basic_render, rest)
- elif template_type == 'cheetah' and CHEETAH_AVAILABLE:
- return ('cheetah', cheetah_render, rest)
+ if template_type not in ("jinja", "cheetah", "basic"):
+ raise ValueError(
+ "Unknown template rendering type '%s' requested"
+ % template_type
+ )
+ if template_type == "jinja" and not JINJA_AVAILABLE:
+ LOG.warning(
+ "Jinja not available as the selected renderer for"
+ " desired template, reverting to the basic renderer."
+ )
+ return ("basic", basic_render, rest)
+ elif template_type == "jinja" and JINJA_AVAILABLE:
+ return ("jinja", jinja_render, rest)
+ if template_type == "cheetah" and not CHEETAH_AVAILABLE:
+ LOG.warning(
+ "Cheetah not available as the selected renderer for"
+ " desired template, reverting to the basic renderer."
+ )
+ return ("basic", basic_render, rest)
+ elif template_type == "cheetah" and CHEETAH_AVAILABLE:
+ return ("cheetah", cheetah_render, rest)
# Only thing left over is the basic renderer (it is always available).
- return ('basic', basic_render, rest)
+ return ("basic", basic_render, rest)
def render_from_file(fn, params):
@@ -143,7 +153,8 @@ def render_from_file(fn, params):
# If it is given a str that has non-ascii then it will raise a
# UnicodeDecodeError. So we explicitly convert to unicode type here.
template_type, renderer, content = detect_template(
- util.load_file(fn, decode=False).decode('utf-8'))
+ util.load_file(fn, decode=False).decode("utf-8")
+ )
LOG.debug("Rendering content of '%s' using renderer %s", fn, template_type)
return renderer(content, params)
@@ -168,4 +179,5 @@ def render_string(content, params):
_template_type, renderer, content = detect_template(content)
return renderer(content, params)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/type_utils.py b/cloudinit/type_utils.py
index 2c1ae368..d971b278 100644
--- a/cloudinit/type_utils.py
+++ b/cloudinit/type_utils.py
@@ -10,7 +10,6 @@
import types
-
_NAME_TYPES = (
types.ModuleType,
types.FunctionType,
@@ -23,9 +22,10 @@ def obj_name(obj):
if isinstance(obj, _NAME_TYPES):
return str(obj.__name__)
else:
- if not hasattr(obj, '__class__'):
+ if not hasattr(obj, "__class__"):
return repr(obj)
else:
return obj_name(obj.__class__)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index caa88435..847e5379 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -17,7 +17,7 @@ from errno import ENOENT
from functools import partial
from http.client import NOT_FOUND
from itertools import count
-from urllib.parse import urlparse, urlunparse, quote
+from urllib.parse import quote, urlparse, urlunparse
import requests
from requests import exceptions
@@ -32,32 +32,33 @@ LOG = logging.getLogger(__name__)
SSL_ENABLED = False
CONFIG_ENABLED = False # This was added in 0.7 (but taken out in >=1.0)
_REQ_VER = None
-REDACTED = 'REDACTED'
+REDACTED = "REDACTED"
try:
from distutils.version import LooseVersion
+
import pkg_resources
- _REQ = pkg_resources.get_distribution('requests')
+
+ _REQ = pkg_resources.get_distribution("requests")
_REQ_VER = LooseVersion(_REQ.version) # pylint: disable=no-member
- if _REQ_VER >= LooseVersion('0.8.8'):
+ if _REQ_VER >= LooseVersion("0.8.8"):
SSL_ENABLED = True
- if LooseVersion('0.7.0') <= _REQ_VER < LooseVersion('1.0.0'):
+ if LooseVersion("0.7.0") <= _REQ_VER < LooseVersion("1.0.0"):
CONFIG_ENABLED = True
except ImportError:
pass
def _cleanurl(url):
- parsed_url = list(urlparse(url, scheme='http'))
+ parsed_url = list(urlparse(url, scheme="http"))
if not parsed_url[1] and parsed_url[2]:
# Swap these since this seems to be a common
# occurrence when given urls like 'www.google.com'
parsed_url[1] = parsed_url[2]
- parsed_url[2] = ''
+ parsed_url[2] = ""
return urlunparse(parsed_url)
def combine_url(base, *add_ons):
-
def combine_single(url, add_on):
url_parsed = list(urlparse(url))
path = url_parsed[2]
@@ -87,7 +88,7 @@ def read_file_or_url(url, **kwargs):
if url.lower().startswith("file://"):
if kwargs.get("data"):
LOG.warning("Unable to post data to file resource %s", url)
- file_path = url[len("file://"):]
+ file_path = url[len("file://") :]
try:
with open(file_path, "rb") as fp:
contents = fp.read()
@@ -117,7 +118,7 @@ class StringResponse(object):
return True
def __str__(self):
- return self.contents.decode('utf-8')
+ return self.contents.decode("utf-8")
class FileResponse(StringResponse):
@@ -173,28 +174,46 @@ class UrlError(IOError):
def _get_ssl_args(url, ssl_details):
ssl_args = {}
scheme = urlparse(url).scheme
- if scheme == 'https' and ssl_details:
+ if scheme == "https" and ssl_details:
if not SSL_ENABLED:
- LOG.warning("SSL is not supported in requests v%s, "
- "cert. verification can not occur!", _REQ_VER)
+ LOG.warning(
+ "SSL is not supported in requests v%s, "
+ "cert. verification can not occur!",
+ _REQ_VER,
+ )
else:
- if 'ca_certs' in ssl_details and ssl_details['ca_certs']:
- ssl_args['verify'] = ssl_details['ca_certs']
+ if "ca_certs" in ssl_details and ssl_details["ca_certs"]:
+ ssl_args["verify"] = ssl_details["ca_certs"]
else:
- ssl_args['verify'] = True
- if 'cert_file' in ssl_details and 'key_file' in ssl_details:
- ssl_args['cert'] = [ssl_details['cert_file'],
- ssl_details['key_file']]
- elif 'cert_file' in ssl_details:
- ssl_args['cert'] = str(ssl_details['cert_file'])
+ ssl_args["verify"] = True
+ if "cert_file" in ssl_details and "key_file" in ssl_details:
+ ssl_args["cert"] = [
+ ssl_details["cert_file"],
+ ssl_details["key_file"],
+ ]
+ elif "cert_file" in ssl_details:
+ ssl_args["cert"] = str(ssl_details["cert_file"])
return ssl_args
-def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
- headers=None, headers_cb=None, headers_redact=None,
- ssl_details=None, check_status=True, allow_redirects=True,
- exception_cb=None, session=None, infinite=False, log_req_resp=True,
- request_method=None):
+def readurl(
+ url,
+ data=None,
+ timeout=None,
+ retries=0,
+ sec_between=1,
+ headers=None,
+ headers_cb=None,
+ headers_redact=None,
+ ssl_details=None,
+ check_status=True,
+ allow_redirects=True,
+ exception_cb=None,
+ session=None,
+ infinite=False,
+ log_req_resp=True,
+ request_method=None,
+):
"""Wrapper around requests.Session to read the url and retry if necessary
:param url: Mandatory url to request.
@@ -227,15 +246,15 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
"""
url = _cleanurl(url)
req_args = {
- 'url': url,
+ "url": url,
}
req_args.update(_get_ssl_args(url, ssl_details))
- req_args['allow_redirects'] = allow_redirects
+ req_args["allow_redirects"] = allow_redirects
if not request_method:
- request_method = 'POST' if data else 'GET'
- req_args['method'] = request_method
+ request_method = "POST" if data else "GET"
+ req_args["method"] = request_method
if timeout is not None:
- req_args['timeout'] = max(float(timeout), 0)
+ req_args["timeout"] = max(float(timeout), 0)
if headers_redact is None:
headers_redact = []
# It doesn't seem like config
@@ -243,31 +262,33 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
# need to manually do the retries if it wasn't...
if CONFIG_ENABLED:
req_config = {
- 'store_cookies': False,
+ "store_cookies": False,
}
# Don't use the retry support built-in
# since it doesn't allow for 'sleep_times'
# in between tries....
# if retries:
# req_config['max_retries'] = max(int(retries), 0)
- req_args['config'] = req_config
+ req_args["config"] = req_config
manual_tries = 1
if retries:
manual_tries = max(int(retries) + 1, 1)
def_headers = {
- 'User-Agent': 'Cloud-Init/%s' % (version.version_string()),
+ "User-Agent": "Cloud-Init/%s" % (version.version_string()),
}
if headers:
def_headers.update(headers)
headers = def_headers
if not headers_cb:
+
def _cb(url):
return headers
+
headers_cb = _cb
if data:
- req_args['data'] = data
+ req_args["data"] = data
if sec_between is None:
sec_between = -1
@@ -276,12 +297,12 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
# doesn't handle sleeping between tries...
# Infinitely retry if infinite is True
for i in count() if infinite else range(0, manual_tries):
- req_args['headers'] = headers_cb(url)
+ req_args["headers"] = headers_cb(url)
filtered_req_args = {}
for (k, v) in req_args.items():
- if k == 'data':
+ if k == "data":
continue
- if k == 'headers' and headers_redact:
+ if k == "headers" and headers_redact:
matched_headers = [k for k in headers_redact if v.get(k)]
if matched_headers:
filtered_req_args[k] = copy.deepcopy(v)
@@ -292,9 +313,13 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
try:
if log_req_resp:
- LOG.debug("[%s/%s] open '%s' with %s configuration", i,
- "infinite" if infinite else manual_tries, url,
- filtered_req_args)
+ LOG.debug(
+ "[%s/%s] open '%s' with %s configuration",
+ i,
+ "infinite" if infinite else manual_tries,
+ url,
+ filtered_req_args,
+ )
if session is None:
session = requests.Session()
@@ -304,19 +329,33 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
if check_status:
r.raise_for_status()
- LOG.debug("Read from %s (%s, %sb) after %s attempts", url,
- r.status_code, len(r.content), (i + 1))
+ LOG.debug(
+ "Read from %s (%s, %sb) after %s attempts",
+ url,
+ r.status_code,
+ len(r.content),
+ (i + 1),
+ )
# Doesn't seem like we can make it use a different
# subclass for responses, so add our own backward-compat
# attrs
return UrlResponse(r)
except exceptions.RequestException as e:
- if (isinstance(e, (exceptions.HTTPError)) and
- hasattr(e, 'response') and # This appeared in v 0.10.8
- hasattr(e.response, 'status_code')):
- excps.append(UrlError(e, code=e.response.status_code,
- headers=e.response.headers,
- url=url))
+ if (
+ isinstance(e, (exceptions.HTTPError))
+ and hasattr(e, "response")
+ and hasattr( # This appeared in v 0.10.8
+ e.response, "status_code"
+ )
+ ):
+ excps.append(
+ UrlError(
+ e,
+ code=e.response.status_code,
+ headers=e.response.headers,
+ url=url,
+ )
+ )
else:
excps.append(UrlError(e, url=url))
if SSL_ENABLED and isinstance(e, exceptions.SSLError):
@@ -328,22 +367,33 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
# to continue retrying and False to break and re-raise the
# exception
break
- if (infinite and sec_between > 0) or \
- (i + 1 < manual_tries and sec_between > 0):
+ if (infinite and sec_between > 0) or (
+ i + 1 < manual_tries and sec_between > 0
+ ):
if log_req_resp:
LOG.debug(
"Please wait %s seconds while we wait to try again",
- sec_between)
+ sec_between,
+ )
time.sleep(sec_between)
if excps:
raise excps[-1]
return None # Should throw before this...
-def wait_for_url(urls, max_wait=None, timeout=None, status_cb=None,
- headers_cb=None, headers_redact=None, sleep_time=1,
- exception_cb=None, sleep_time_cb=None, request_method=None):
+def wait_for_url(
+ urls,
+ max_wait=None,
+ timeout=None,
+ status_cb=None,
+ headers_cb=None,
+ headers_redact=None,
+ sleep_time=1,
+ exception_cb=None,
+ sleep_time_cb=None,
+ request_method=None,
+):
"""
urls: a list of urls to try
max_wait: roughly the maximum time to wait before giving up
@@ -388,9 +438,9 @@ def wait_for_url(urls, max_wait=None, timeout=None, status_cb=None,
status_cb = log_status_cb
def timeup(max_wait, start_time):
- if (max_wait is None):
+ if max_wait is None:
return False
- return ((max_wait <= 0) or (time.time() - start_time > max_wait))
+ return (max_wait <= 0) or (time.time() - start_time > max_wait)
loop_n = 0
response = None
@@ -404,8 +454,11 @@ def wait_for_url(urls, max_wait=None, timeout=None, status_cb=None,
if loop_n != 0:
if timeup(max_wait, start_time):
break
- if (max_wait is not None and
- timeout and (now + timeout > (start_time + max_wait))):
+ if (
+ max_wait is not None
+ and timeout
+ and (now + timeout > (start_time + max_wait))
+ ):
# shorten timeout to not run way over max_time
timeout = int((start_time + max_wait) - now)
@@ -418,17 +471,29 @@ def wait_for_url(urls, max_wait=None, timeout=None, status_cb=None,
headers = {}
response = readurl(
- url, headers=headers, headers_redact=headers_redact,
- timeout=timeout, check_status=False,
- request_method=request_method)
+ url,
+ headers=headers,
+ headers_redact=headers_redact,
+ timeout=timeout,
+ check_status=False,
+ request_method=request_method,
+ )
if not response.contents:
reason = "empty response [%s]" % (response.code)
- url_exc = UrlError(ValueError(reason), code=response.code,
- headers=response.headers, url=url)
+ url_exc = UrlError(
+ ValueError(reason),
+ code=response.code,
+ headers=response.headers,
+ url=url,
+ )
elif not response.ok():
reason = "bad status code [%s]" % (response.code)
- url_exc = UrlError(ValueError(reason), code=response.code,
- headers=response.headers, url=url)
+ url_exc = UrlError(
+ ValueError(reason),
+ code=response.code,
+ headers=response.headers,
+ url=url,
+ )
else:
return url, response.contents
except UrlError as e:
@@ -440,10 +505,12 @@ def wait_for_url(urls, max_wait=None, timeout=None, status_cb=None,
time_taken = int(time.time() - start_time)
max_wait_str = "%ss" % max_wait if max_wait else "unlimited"
- status_msg = "Calling '%s' failed [%s/%s]: %s" % (url,
- time_taken,
- max_wait_str,
- reason)
+ status_msg = "Calling '%s' failed [%s/%s]: %s" % (
+ url,
+ time_taken,
+ max_wait_str,
+ reason,
+ )
status_cb(status_msg)
if exception_cb:
# This can be used to alter the headers that will be sent
@@ -455,17 +522,23 @@ def wait_for_url(urls, max_wait=None, timeout=None, status_cb=None,
break
loop_n = loop_n + 1
- LOG.debug("Please wait %s seconds while we wait to try again",
- sleep_time)
+ LOG.debug(
+ "Please wait %s seconds while we wait to try again", sleep_time
+ )
time.sleep(sleep_time)
return False, None
class OauthUrlHelper(object):
- def __init__(self, consumer_key=None, token_key=None,
- token_secret=None, consumer_secret=None,
- skew_data_file="/run/oauth_skew.json"):
+ def __init__(
+ self,
+ consumer_key=None,
+ token_key=None,
+ token_secret=None,
+ consumer_secret=None,
+ skew_data_file="/run/oauth_skew.json",
+ ):
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret or ""
self.token_key = token_key
@@ -477,8 +550,10 @@ class OauthUrlHelper(object):
if not any(required):
self._do_oauth = False
elif not all(required):
- raise ValueError("all or none of token_key, token_secret, or "
- "consumer_key can be set")
+ raise ValueError(
+ "all or none of token_key, token_secret, or "
+ "consumer_key can be set"
+ )
old = self.read_skew_file()
self.skew_data = old or {}
@@ -501,16 +576,17 @@ class OauthUrlHelper(object):
fp.write(json.dumps(cur))
def exception_cb(self, msg, exception):
- if not (isinstance(exception, UrlError) and
- (exception.code == 403 or exception.code == 401)):
+ if not (
+ isinstance(exception, UrlError)
+ and (exception.code == 403 or exception.code == 401)
+ ):
return
- if 'date' not in exception.headers:
- LOG.warning("Missing header 'date' in %s response",
- exception.code)
+ if "date" not in exception.headers:
+ LOG.warning("Missing header 'date' in %s response", exception.code)
return
- date = exception.headers['date']
+ date = exception.headers["date"]
try:
remote_time = time.mktime(parsedate(date))
except Exception as e:
@@ -537,15 +613,21 @@ class OauthUrlHelper(object):
timestamp = int(time.time()) + self.skew_data[host]
return oauth_headers(
- url=url, consumer_key=self.consumer_key,
- token_key=self.token_key, token_secret=self.token_secret,
- consumer_secret=self.consumer_secret, timestamp=timestamp)
+ url=url,
+ consumer_key=self.consumer_key,
+ token_key=self.token_key,
+ token_secret=self.token_secret,
+ consumer_secret=self.consumer_secret,
+ timestamp=timestamp,
+ )
def _wrapped(self, wrapped_func, args, kwargs):
- kwargs['headers_cb'] = partial(
- self._headers_cb, kwargs.get('headers_cb'))
- kwargs['exception_cb'] = partial(
- self._exception_cb, kwargs.get('exception_cb'))
+ kwargs["headers_cb"] = partial(
+ self._headers_cb, kwargs.get("headers_cb")
+ )
+ kwargs["exception_cb"] = partial(
+ self._exception_cb, kwargs.get("exception_cb")
+ )
return wrapped_func(*args, **kwargs)
def wait_for_url(self, *args, **kwargs):
@@ -571,12 +653,13 @@ class OauthUrlHelper(object):
return headers
-def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret,
- timestamp=None):
+def oauth_headers(
+ url, consumer_key, token_key, token_secret, consumer_secret, timestamp=None
+):
try:
import oauthlib.oauth1 as oauth1
except ImportError as e:
- raise NotImplementedError('oauth support is not available') from e
+ raise NotImplementedError("oauth support is not available") from e
if timestamp:
timestamp = str(timestamp)
@@ -589,7 +672,8 @@ def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret,
resource_owner_key=token_key,
resource_owner_secret=token_secret,
signature_method=oauth1.SIGNATURE_PLAINTEXT,
- timestamp=timestamp)
+ timestamp=timestamp,
+ )
_uri, signed_headers, _body = client.sign(url)
return signed_headers
@@ -607,4 +691,5 @@ def retry_on_url_exc(msg, exc):
return True
return False
+
# vi: ts=4 expandtab
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index 1317e063..05c66741 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -14,11 +14,10 @@ from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
from email.mime.text import MIMEText
-from cloudinit import handlers
+from cloudinit import features, handlers
from cloudinit import log as logging
-from cloudinit import features
-from cloudinit.url_helper import read_file_or_url, UrlError
from cloudinit import util
+from cloudinit.url_helper import UrlError, read_file_or_url
LOG = logging.getLogger(__name__)
@@ -28,11 +27,11 @@ PART_FN_TPL = handlers.PART_FN_TPL
OCTET_TYPE = handlers.OCTET_TYPE
# Saves typing errors
-CONTENT_TYPE = 'Content-Type'
+CONTENT_TYPE = "Content-Type"
# Various special content types that cause special actions
TYPE_NEEDED = ["text/plain", "text/x-not-multipart"]
-INCLUDE_TYPES = ['text/x-include-url', 'text/x-include-once-url']
+INCLUDE_TYPES = ["text/x-include-url", "text/x-include-once-url"]
ARCHIVE_TYPES = ["text/cloud-config-archive"]
UNDEF_TYPE = "text/plain"
ARCHIVE_UNDEF_TYPE = "text/cloud-config"
@@ -40,18 +39,18 @@ ARCHIVE_UNDEF_BINARY_TYPE = "application/octet-stream"
# This seems to hit most of the gzip possible content types.
DECOMP_TYPES = [
- 'application/gzip',
- 'application/gzip-compressed',
- 'application/gzipped',
- 'application/x-compress',
- 'application/x-compressed',
- 'application/x-gunzip',
- 'application/x-gzip',
- 'application/x-gzip-compressed',
+ "application/gzip",
+ "application/gzip-compressed",
+ "application/gzipped",
+ "application/x-compress",
+ "application/x-compressed",
+ "application/x-gunzip",
+ "application/x-gzip",
+ "application/x-gzip-compressed",
]
# Msg header used to track attachments
-ATTACHMENT_FIELD = 'Number-Attachments'
+ATTACHMENT_FIELD = "Number-Attachments"
# Only the following content types can have there launch index examined
# in there payload, evey other content type can still provide a header
@@ -64,9 +63,8 @@ def _replace_header(msg, key, value):
def _set_filename(msg, filename):
- del msg['Content-Disposition']
- msg.add_header('Content-Disposition',
- 'attachment', filename=str(filename))
+ del msg["Content-Disposition"]
+ msg.add_header("Content-Disposition", "attachment", filename=str(filename))
def _handle_error(error_message, source_exception=None):
@@ -91,7 +89,6 @@ class UserDataProcessor(object):
return accumulating_msg
def _process_msg(self, base_msg, append_msg):
-
def find_ctype(payload):
return handlers.type_from_starts_with(payload)
@@ -118,7 +115,9 @@ class UserDataProcessor(object):
error_message = (
"Failed decompressing payload from {} of"
" length {} due to: {}".format(
- ctype_orig, len(payload), e))
+ ctype_orig, len(payload), e
+ )
+ )
_handle_error(error_message, e)
continue
@@ -130,7 +129,7 @@ class UserDataProcessor(object):
# to check the true MIME type for x-shellscript type since all
# shellscript payloads must have a #! header. The other MIME types
# that cloud-init supports do not have the same guarantee.
- if ctype_orig in TYPE_NEEDED + ['text/x-shellscript']:
+ if ctype_orig in TYPE_NEEDED + ["text/x-shellscript"]:
ctype = find_ctype(payload)
if ctype is None:
ctype = ctype_orig
@@ -148,7 +147,7 @@ class UserDataProcessor(object):
# after decoding and decompression.
if part.get_filename():
_set_filename(n_part, part.get_filename())
- for h in ('Launch-Index',):
+ for h in ("Launch-Index",):
if h in part:
_replace_header(n_part, h, str(part[h]))
part = n_part
@@ -171,7 +170,7 @@ class UserDataProcessor(object):
self._attach_part(append_msg, part)
def _attach_launch_index(self, msg):
- header_idx = msg.get('Launch-Index', None)
+ header_idx = msg.get("Launch-Index", None)
payload_idx = None
if msg.get_content_type() in EXAMINE_FOR_LAUNCH_INDEX:
try:
@@ -179,7 +178,7 @@ class UserDataProcessor(object):
# that might affect the final header
payload = util.load_yaml(msg.get_payload(decode=True))
if payload:
- payload_idx = payload.get('launch-index')
+ payload_idx = payload.get("launch-index")
except Exception:
pass
# Header overrides contents, for now (?) or the other way around?
@@ -190,14 +189,15 @@ class UserDataProcessor(object):
payload_idx = header_idx
if payload_idx is not None:
try:
- msg.add_header('Launch-Index', str(int(payload_idx)))
+ msg.add_header("Launch-Index", str(int(payload_idx)))
except (ValueError, TypeError):
pass
def _get_include_once_filename(self, entry):
- entry_fn = util.hash_blob(entry, 'md5', 64)
- return os.path.join(self.paths.get_ipath_cur('data'),
- 'urlcache', entry_fn)
+ entry_fn = util.hash_blob(entry, "md5", 64)
+ return os.path.join(
+ self.paths.get_ipath_cur("data"), "urlcache", entry_fn
+ )
def _process_before_attach(self, msg, attached_id):
if not msg.get_filename():
@@ -212,13 +212,13 @@ class UserDataProcessor(object):
for line in content.splitlines():
lc_line = line.lower()
if lc_line.startswith("#include-once"):
- line = line[len("#include-once"):].lstrip()
+ line = line[len("#include-once") :].lstrip()
# Every following include will now
# not be refetched.... but will be
# re-read from a local urlcache (if it worked)
include_once_on = True
elif lc_line.startswith("#include"):
- line = line[len("#include"):].lstrip()
+ line = line[len("#include") :].lstrip()
# Disable the include once if it was on
# if it wasn't, then this has no effect.
include_once_on = False
@@ -236,29 +236,37 @@ class UserDataProcessor(object):
content = util.load_file(include_once_fn)
else:
try:
- resp = read_file_or_url(include_url, timeout=5, retries=10,
- ssl_details=self.ssl_details)
+ resp = read_file_or_url(
+ include_url,
+ timeout=5,
+ retries=10,
+ ssl_details=self.ssl_details,
+ )
if include_once_on and resp.ok():
- util.write_file(include_once_fn, resp.contents,
- mode=0o600)
+ util.write_file(
+ include_once_fn, resp.contents, mode=0o600
+ )
if resp.ok():
content = resp.contents
else:
error_message = (
"Fetching from {} resulted in"
" a invalid http code of {}".format(
- include_url, resp.code))
+ include_url, resp.code
+ )
+ )
_handle_error(error_message)
except UrlError as urle:
message = str(urle)
# Older versions of requests.exceptions.HTTPError may not
# include the errant url. Append it for clarity in logs.
if include_url not in message:
- message += ' for url: {0}'.format(include_url)
+ message += " for url: {0}".format(include_url)
_handle_error(message, urle)
except IOError as ioe:
error_message = "Fetching from {} resulted in {}".format(
- include_url, ioe)
+ include_url, ioe
+ )
_handle_error(error_message, ioe)
if content is not None:
@@ -275,20 +283,20 @@ class UserDataProcessor(object):
# or
# scalar(payload)
if isinstance(ent, str):
- ent = {'content': ent}
+ ent = {"content": ent}
if not isinstance(ent, (dict)):
# TODO(harlowja) raise?
continue
- content = ent.get('content', '')
- mtype = ent.get('type')
+ content = ent.get("content", "")
+ mtype = ent.get("type")
if not mtype:
default = ARCHIVE_UNDEF_TYPE
if isinstance(content, bytes):
default = ARCHIVE_UNDEF_BINARY_TYPE
mtype = handlers.type_from_starts_with(content, default)
- maintype, subtype = mtype.split('/', 1)
+ maintype, subtype = mtype.split("/", 1)
if maintype == "text":
if isinstance(content, bytes):
content = content.decode()
@@ -297,16 +305,21 @@ class UserDataProcessor(object):
msg = MIMEBase(maintype, subtype)
msg.set_payload(content)
- if 'filename' in ent:
- _set_filename(msg, ent['filename'])
- if 'launch-index' in ent:
- msg.add_header('Launch-Index', str(ent['launch-index']))
+ if "filename" in ent:
+ _set_filename(msg, ent["filename"])
+ if "launch-index" in ent:
+ msg.add_header("Launch-Index", str(ent["launch-index"]))
for header in list(ent.keys()):
- if header.lower() in ('content', 'filename', 'type',
- 'launch-index', 'content-disposition',
- ATTACHMENT_FIELD.lower(),
- CONTENT_TYPE.lower()):
+ if header.lower() in (
+ "content",
+ "filename",
+ "type",
+ "launch-index",
+ "content-disposition",
+ ATTACHMENT_FIELD.lower(),
+ CONTENT_TYPE.lower(),
+ ):
continue
msg.add_header(header, ent[header])
@@ -318,7 +331,7 @@ class UserDataProcessor(object):
at its 'Number-Attachments' header.
"""
if ATTACHMENT_FIELD not in outer_msg:
- outer_msg[ATTACHMENT_FIELD] = '0'
+ outer_msg[ATTACHMENT_FIELD] = "0"
if new_count is not None:
_replace_header(outer_msg, ATTACHMENT_FIELD, str(new_count))
@@ -344,8 +357,8 @@ class UserDataProcessor(object):
def is_skippable(part):
# multipart/* are just containers
- part_maintype = part.get_content_maintype() or ''
- if part_maintype.lower() == 'multipart':
+ part_maintype = part.get_content_maintype() or ""
+ if part_maintype.lower() == "multipart":
return True
return False
@@ -355,7 +368,7 @@ def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE):
"""convert a string (more likely bytes) or a message into
a mime message."""
if not raw_data:
- raw_data = b''
+ raw_data = b""
def create_binmsg(data, content_type):
maintype, subtype = content_type.split("/", 1)
@@ -364,12 +377,12 @@ def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE):
return msg
if isinstance(raw_data, str):
- bdata = raw_data.encode('utf-8')
+ bdata = raw_data.encode("utf-8")
else:
bdata = raw_data
bdata = util.decomp_gzip(bdata, decode=False)
if b"mime-version:" in bdata[0:4096].lower():
- msg = util.message_from_string(bdata.decode('utf-8'))
+ msg = util.message_from_string(bdata.decode("utf-8"))
else:
msg = create_binmsg(bdata, content_type)
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 27821de5..569fc215 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -34,15 +34,15 @@ import time
from base64 import b64decode, b64encode
from errno import ENOENT
from functools import lru_cache
-from urllib import parse
from typing import List
+from urllib import parse
from cloudinit import importer
from cloudinit import log as logging
-from cloudinit import subp
from cloudinit import (
mergers,
safeyaml,
+ subp,
temp_utils,
type_utils,
url_helper,
@@ -55,16 +55,16 @@ LOG = logging.getLogger(__name__)
# Helps cleanup filenames to ensure they aren't FS incompatible
FN_REPLACEMENTS = {
- os.sep: '_',
+ os.sep: "_",
}
-FN_ALLOWED = ('_-.()' + string.digits + string.ascii_letters)
+FN_ALLOWED = "_-.()" + string.digits + string.ascii_letters
-TRUE_STRINGS = ('true', '1', 'on', 'yes')
-FALSE_STRINGS = ('off', '0', 'no', 'false')
+TRUE_STRINGS = ("true", "1", "on", "yes")
+FALSE_STRINGS = ("off", "0", "no", "false")
def kernel_version():
- return tuple(map(int, os.uname().release.split('.')[:2]))
+ return tuple(map(int, os.uname().release.split(".")[:2]))
@lru_cache()
@@ -74,28 +74,36 @@ def get_dpkg_architecture(target=None):
N.B. This function is wrapped in functools.lru_cache, so repeated calls
won't shell out every time.
"""
- out, _ = subp.subp(['dpkg', '--print-architecture'], capture=True,
- target=target)
+ out, _ = subp.subp(
+ ["dpkg", "--print-architecture"], capture=True, target=target
+ )
return out.strip()
@lru_cache()
def lsb_release(target=None):
- fmap = {'Codename': 'codename', 'Description': 'description',
- 'Distributor ID': 'id', 'Release': 'release'}
+ fmap = {
+ "Codename": "codename",
+ "Description": "description",
+ "Distributor ID": "id",
+ "Release": "release",
+ }
data = {}
try:
- out, _ = subp.subp(['lsb_release', '--all'], capture=True,
- target=target)
+ out, _ = subp.subp(
+ ["lsb_release", "--all"], capture=True, target=target
+ )
for line in out.splitlines():
fname, _, val = line.partition(":")
if fname in fmap:
data[fmap[fname]] = val.strip()
missing = [k for k in fmap.values() if k not in data]
if len(missing):
- LOG.warning("Missing fields in lsb_release --all output: %s",
- ','.join(missing))
+ LOG.warning(
+ "Missing fields in lsb_release --all output: %s",
+ ",".join(missing),
+ )
except subp.ProcessExecutionError as err:
LOG.warning("Unable to get lsb_release --all: %s", err)
@@ -104,14 +112,14 @@ def lsb_release(target=None):
return data
-def decode_binary(blob, encoding='utf-8'):
+def decode_binary(blob, encoding="utf-8"):
# Converts a binary type into a text type using given encoding.
if isinstance(blob, str):
return blob
return blob.decode(encoding)
-def encode_text(text, encoding='utf-8'):
+def encode_text(text, encoding="utf-8"):
# Converts a text string into a binary type using given encoding.
if isinstance(text, bytes):
return text
@@ -123,7 +131,7 @@ def b64d(source):
# str/unicode if the result is utf-8 compatible, otherwise returning bytes.
decoded = b64decode(source)
try:
- return decoded.decode('utf-8')
+ return decoded.decode("utf-8")
except UnicodeDecodeError:
return decoded
@@ -132,8 +140,8 @@ def b64e(source):
# Base64 encode some data, accepting bytes or unicode/str, and returning
# str/unicode if the result is utf-8 compatible, otherwise returning bytes.
if not isinstance(source, bytes):
- source = source.encode('utf-8')
- return b64encode(source).decode('utf-8')
+ source = source.encode("utf-8")
+ return b64encode(source).decode("utf-8")
def fully_decoded_payload(part):
@@ -143,14 +151,15 @@ def fully_decoded_payload(part):
# bytes, first try to decode to str via CT charset, and failing that, try
# utf-8 using surrogate escapes.
cte_payload = part.get_payload(decode=True)
- if (part.get_content_maintype() == 'text' and
- isinstance(cte_payload, bytes)):
+ if part.get_content_maintype() == "text" and isinstance(
+ cte_payload, bytes
+ ):
charset = part.get_charset()
if charset and charset.input_codec:
encoding = charset.input_codec
else:
- encoding = 'utf-8'
- return cte_payload.decode(encoding, 'surrogateescape')
+ encoding = "utf-8"
+ return cte_payload.decode(encoding, "surrogateescape")
return cte_payload
@@ -159,7 +168,7 @@ class SeLinuxGuard(object):
# Late import since it might not always
# be possible to use this
try:
- self.selinux = importer.import_module('selinux')
+ self.selinux = importer.import_module("selinux")
except ImportError:
self.selinux = None
self.path = path
@@ -184,13 +193,20 @@ class SeLinuxGuard(object):
except OSError:
return
- LOG.debug("Restoring selinux mode for %s (recursive=%s)",
- path, self.recursive)
+ LOG.debug(
+ "Restoring selinux mode for %s (recursive=%s)",
+ path,
+ self.recursive,
+ )
try:
self.selinux.restorecon(path, recursive=self.recursive)
except OSError as e:
- LOG.warning('restorecon failed on %s,%s maybe badness? %s',
- path, self.recursive, e)
+ LOG.warning(
+ "restorecon failed on %s,%s maybe badness? %s",
+ path,
+ self.recursive,
+ e,
+ )
class MountFailedError(Exception):
@@ -208,12 +224,18 @@ def fork_cb(child_cb, *args, **kwargs):
child_cb(*args, **kwargs)
os._exit(0)
except Exception:
- logexc(LOG, "Failed forking and calling callback %s",
- type_utils.obj_name(child_cb))
+ logexc(
+ LOG,
+ "Failed forking and calling callback %s",
+ type_utils.obj_name(child_cb),
+ )
os._exit(1)
else:
- LOG.debug("Forked child %s who will run callback %s",
- fid, type_utils.obj_name(child_cb))
+ LOG.debug(
+ "Forked child %s who will run callback %s",
+ fid,
+ type_utils.obj_name(child_cb),
+ )
def is_true(val, addons=None):
@@ -310,7 +332,7 @@ def clean_filename(fn):
if k not in FN_ALLOWED:
removals.append(k)
for k in removals:
- fn = fn.replace(k, '')
+ fn = fn.replace(k, "")
fn = fn.strip()
return fn
@@ -334,7 +356,7 @@ def decomp_gzip(data, quiet=True, decode=True):
def extract_usergroup(ug_pair):
if not ug_pair:
return (None, None)
- ug_parted = ug_pair.split(':', 1)
+ ug_parted = ug_pair.split(":", 1)
u = ug_parted[0].strip()
if len(ug_parted) == 2:
g = ug_parted[1].strip()
@@ -359,14 +381,20 @@ def find_modules(root_dir) -> dict:
return entries
-def multi_log(text, console=True, stderr=True,
- log=None, log_level=logging.DEBUG, fallback_to_stdout=True):
+def multi_log(
+ text,
+ console=True,
+ stderr=True,
+ log=None,
+ log_level=logging.DEBUG,
+ fallback_to_stdout=True,
+):
if stderr:
sys.stderr.write(text)
if console:
conpath = "/dev/console"
if os.path.exists(conpath):
- with open(conpath, 'w') as wfh:
+ with open(conpath, "w") as wfh:
wfh.write(text)
wfh.flush()
elif fallback_to_stdout:
@@ -388,36 +416,36 @@ def multi_log(text, console=True, stderr=True,
@lru_cache()
def is_Linux():
- return 'Linux' in platform.system()
+ return "Linux" in platform.system()
@lru_cache()
def is_BSD():
- if 'BSD' in platform.system():
+ if "BSD" in platform.system():
return True
- if platform.system() == 'DragonFly':
+ if platform.system() == "DragonFly":
return True
return False
@lru_cache()
def is_FreeBSD():
- return system_info()['variant'] == "freebsd"
+ return system_info()["variant"] == "freebsd"
@lru_cache()
def is_DragonFlyBSD():
- return system_info()['variant'] == "dragonfly"
+ return system_info()["variant"] == "dragonfly"
@lru_cache()
def is_NetBSD():
- return system_info()['variant'] == "netbsd"
+ return system_info()["variant"] == "netbsd"
@lru_cache()
def is_OpenBSD():
- return system_info()['variant'] == "openbsd"
+ return system_info()["variant"] == "openbsd"
def get_cfg_option_bool(yobj, key, default=False):
@@ -447,74 +475,80 @@ def _parse_redhat_release(release_file=None):
"""
if not release_file:
- release_file = '/etc/redhat-release'
+ release_file = "/etc/redhat-release"
if not os.path.exists(release_file):
return {}
redhat_release = load_file(release_file)
redhat_regex = (
- r'(?P<name>.+) release (?P<version>[\d\.]+) '
- r'\((?P<codename>[^)]+)\)')
+ r"(?P<name>.+) release (?P<version>[\d\.]+) "
+ r"\((?P<codename>[^)]+)\)"
+ )
# Virtuozzo deviates here
if "Virtuozzo" in redhat_release:
- redhat_regex = r'(?P<name>.+) release (?P<version>[\d\.]+)'
+ redhat_regex = r"(?P<name>.+) release (?P<version>[\d\.]+)"
match = re.match(redhat_regex, redhat_release)
if match:
group = match.groupdict()
# Virtuozzo has no codename in this file
- if "Virtuozzo" in group['name']:
- group['codename'] = group['name']
-
- group['name'] = group['name'].lower().partition(' linux')[0]
- if group['name'] == 'red hat enterprise':
- group['name'] = 'redhat'
- return {'ID': group['name'], 'VERSION_ID': group['version'],
- 'VERSION_CODENAME': group['codename']}
+ if "Virtuozzo" in group["name"]:
+ group["codename"] = group["name"]
+
+ group["name"] = group["name"].lower().partition(" linux")[0]
+ if group["name"] == "red hat enterprise":
+ group["name"] = "redhat"
+ return {
+ "ID": group["name"],
+ "VERSION_ID": group["version"],
+ "VERSION_CODENAME": group["codename"],
+ }
return {}
@lru_cache()
def get_linux_distro():
- distro_name = ''
- distro_version = ''
- flavor = ''
+ distro_name = ""
+ distro_version = ""
+ flavor = ""
os_release = {}
os_release_rhel = False
- if os.path.exists('/etc/os-release'):
- os_release = load_shell_content(load_file('/etc/os-release'))
+ if os.path.exists("/etc/os-release"):
+ os_release = load_shell_content(load_file("/etc/os-release"))
if not os_release:
os_release_rhel = True
os_release = _parse_redhat_release()
if os_release:
- distro_name = os_release.get('ID', '')
- distro_version = os_release.get('VERSION_ID', '')
- if 'sles' in distro_name or 'suse' in distro_name:
+ distro_name = os_release.get("ID", "")
+ distro_version = os_release.get("VERSION_ID", "")
+ if "sles" in distro_name or "suse" in distro_name:
# RELEASE_BLOCKER: We will drop this sles divergent behavior in
# the future so that get_linux_distro returns a named tuple
# which will include both version codename and architecture
# on all distributions.
flavor = platform.machine()
- elif distro_name == 'photon':
- flavor = os_release.get('PRETTY_NAME', '')
- elif distro_name == 'virtuozzo' and not os_release_rhel:
+ elif distro_name == "photon":
+ flavor = os_release.get("PRETTY_NAME", "")
+ elif distro_name == "virtuozzo" and not os_release_rhel:
# Only use this if the redhat file is not parsed
- flavor = os_release.get('PRETTY_NAME', '')
+ flavor = os_release.get("PRETTY_NAME", "")
else:
- flavor = os_release.get('VERSION_CODENAME', '')
+ flavor = os_release.get("VERSION_CODENAME", "")
if not flavor:
- match = re.match(r'[^ ]+ \((?P<codename>[^)]+)\)',
- os_release.get('VERSION', ''))
+ match = re.match(
+ r"[^ ]+ \((?P<codename>[^)]+)\)",
+ os_release.get("VERSION", ""),
+ )
if match:
- flavor = match.groupdict()['codename']
- if distro_name == 'rhel':
- distro_name = 'redhat'
+ flavor = match.groupdict()["codename"]
+ if distro_name == "rhel":
+ distro_name = "redhat"
elif is_BSD():
distro_name = platform.system().lower()
distro_version = platform.release()
else:
- dist = ('', '', '')
+ dist = ("", "", "")
try:
# Was removed in 3.8
dist = platform.dist() # pylint: disable=W1505,E1101
@@ -526,36 +560,60 @@ def get_linux_distro():
if entry:
found = 1
if not found:
- LOG.warning('Unable to determine distribution, template '
- 'expansion may have unexpected results')
+ LOG.warning(
+ "Unable to determine distribution, template "
+ "expansion may have unexpected results"
+ )
return dist
return (distro_name, distro_version, flavor)
def _get_variant(info):
- system = info['system'].lower()
- variant = 'unknown'
+ system = info["system"].lower()
+ variant = "unknown"
if system == "linux":
- linux_dist = info['dist'][0].lower()
+ linux_dist = info["dist"][0].lower()
if linux_dist in (
- 'almalinux', 'alpine', 'arch', 'centos', 'cloudlinux',
- 'debian', 'eurolinux', 'fedora', 'miraclelinux', 'openeuler',
- 'photon', 'rhel', 'rocky', 'suse', 'virtuozzo'):
+ "almalinux",
+ "alpine",
+ "arch",
+ "centos",
+ "cloudlinux",
+ "debian",
+ "eurolinux",
+ "fedora",
+ "miraclelinux",
+ "openeuler",
+ "photon",
+ "rhel",
+ "rocky",
+ "suse",
+ "virtuozzo",
+ ):
variant = linux_dist
- elif linux_dist in ('ubuntu', 'linuxmint', 'mint'):
- variant = 'ubuntu'
- elif linux_dist == 'redhat':
- variant = 'rhel'
+ elif linux_dist in ("ubuntu", "linuxmint", "mint"):
+ variant = "ubuntu"
+ elif linux_dist == "redhat":
+ variant = "rhel"
elif linux_dist in (
- 'opensuse', 'opensuse-tumbleweed', 'opensuse-leap',
- 'sles', 'sle_hpc'):
- variant = 'suse'
+ "opensuse",
+ "opensuse-tumbleweed",
+ "opensuse-leap",
+ "sles",
+ "sle_hpc",
+ ):
+ variant = "suse"
else:
- variant = 'linux'
+ variant = "linux"
elif system in (
- 'windows', 'darwin', "freebsd", "netbsd",
- "openbsd", "dragonfly"):
+ "windows",
+ "darwin",
+ "freebsd",
+ "netbsd",
+ "openbsd",
+ "dragonfly",
+ ):
variant = system
return variant
@@ -564,14 +622,14 @@ def _get_variant(info):
@lru_cache()
def system_info():
info = {
- 'platform': platform.platform(),
- 'system': platform.system(),
- 'release': platform.release(),
- 'python': platform.python_version(),
- 'uname': list(platform.uname()),
- 'dist': get_linux_distro()
+ "platform": platform.platform(),
+ "system": platform.system(),
+ "release": platform.release(),
+ "python": platform.python_version(),
+ "uname": list(platform.uname()),
+ "dist": get_linux_distro(),
}
- info['variant'] = _get_variant(info)
+ info["variant"] = _get_variant(info)
return info
@@ -726,23 +784,24 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None):
os.dup2(new_fp.fileno(), o_err.fileno())
-def make_url(scheme, host, port=None,
- path='', params='', query='', fragment=''):
+def make_url(
+ scheme, host, port=None, path="", params="", query="", fragment=""
+):
- pieces = [scheme or '']
+ pieces = [scheme or ""]
- netloc = ''
+ netloc = ""
if host:
netloc = str(host)
if port is not None:
netloc += ":" + "%s" % (port)
- pieces.append(netloc or '')
- pieces.append(path or '')
- pieces.append(params or '')
- pieces.append(query or '')
- pieces.append(fragment or '')
+ pieces.append(netloc or "")
+ pieces.append(path or "")
+ pieces.append(params or "")
+ pieces.append(query or "")
+ pieces.append(fragment or "")
return parse.urlunparse(pieces)
@@ -782,8 +841,9 @@ def umask(n_msk):
def center(text, fill, max_len):
- return '{0:{fill}{align}{size}}'.format(text, fill=fill,
- align="^", size=max_len)
+ return "{0:{fill}{align}{size}}".format(
+ text, fill=fill, align="^", size=max_len
+ )
def del_dir(path):
@@ -798,9 +858,9 @@ def del_dir(path):
def read_optional_seed(fill, base="", ext="", timeout=5):
try:
(md, ud, vd) = read_seeded(base, ext, timeout)
- fill['user-data'] = ud
- fill['vendor-data'] = vd
- fill['meta-data'] = md
+ fill["user-data"] = ud
+ fill["vendor-data"] = vd
+ fill["meta-data"] = md
return True
except url_helper.UrlError as e:
if e.code == url_helper.NOT_FOUND:
@@ -812,31 +872,33 @@ def fetch_ssl_details(paths=None):
ssl_details = {}
# Lookup in these locations for ssl key/cert files
ssl_cert_paths = [
- '/var/lib/cloud/data/ssl',
- '/var/lib/cloud/instance/data/ssl',
+ "/var/lib/cloud/data/ssl",
+ "/var/lib/cloud/instance/data/ssl",
]
if paths:
- ssl_cert_paths.extend([
- os.path.join(paths.get_ipath_cur('data'), 'ssl'),
- os.path.join(paths.get_cpath('data'), 'ssl'),
- ])
+ ssl_cert_paths.extend(
+ [
+ os.path.join(paths.get_ipath_cur("data"), "ssl"),
+ os.path.join(paths.get_cpath("data"), "ssl"),
+ ]
+ )
ssl_cert_paths = uniq_merge(ssl_cert_paths)
ssl_cert_paths = [d for d in ssl_cert_paths if d and os.path.isdir(d)]
cert_file = None
for d in ssl_cert_paths:
- if os.path.isfile(os.path.join(d, 'cert.pem')):
- cert_file = os.path.join(d, 'cert.pem')
+ if os.path.isfile(os.path.join(d, "cert.pem")):
+ cert_file = os.path.join(d, "cert.pem")
break
key_file = None
for d in ssl_cert_paths:
- if os.path.isfile(os.path.join(d, 'key.pem')):
- key_file = os.path.join(d, 'key.pem')
+ if os.path.isfile(os.path.join(d, "key.pem")):
+ key_file = os.path.join(d, "key.pem")
break
if cert_file and key_file:
- ssl_details['cert_file'] = cert_file
- ssl_details['key_file'] = key_file
+ ssl_details["cert_file"] = cert_file
+ ssl_details["key_file"] = key_file
elif cert_file:
- ssl_details['cert_file'] = cert_file
+ ssl_details["cert_file"] = cert_file
return ssl_details
@@ -844,32 +906,38 @@ def load_yaml(blob, default=None, allowed=(dict,)):
loaded = default
blob = decode_binary(blob)
try:
- LOG.debug("Attempting to load yaml from string "
- "of length %s with allowed root types %s",
- len(blob), allowed)
+ LOG.debug(
+ "Attempting to load yaml from string "
+ "of length %s with allowed root types %s",
+ len(blob),
+ allowed,
+ )
converted = safeyaml.load(blob)
if converted is None:
LOG.debug("loaded blob returned None, returning default.")
converted = default
elif not isinstance(converted, allowed):
# Yes this will just be caught, but thats ok for now...
- raise TypeError(("Yaml load allows %s root types,"
- " but got %s instead") %
- (allowed, type_utils.obj_name(converted)))
+ raise TypeError(
+ "Yaml load allows %s root types, but got %s instead"
+ % (allowed, type_utils.obj_name(converted))
+ )
loaded = converted
except (safeyaml.YAMLError, TypeError, ValueError) as e:
- msg = 'Failed loading yaml blob'
+ msg = "Failed loading yaml blob"
mark = None
- if hasattr(e, 'context_mark') and getattr(e, 'context_mark'):
- mark = getattr(e, 'context_mark')
- elif hasattr(e, 'problem_mark') and getattr(e, 'problem_mark'):
- mark = getattr(e, 'problem_mark')
+ if hasattr(e, "context_mark") and getattr(e, "context_mark"):
+ mark = getattr(e, "context_mark")
+ elif hasattr(e, "problem_mark") and getattr(e, "problem_mark"):
+ mark = getattr(e, "problem_mark")
if mark:
msg += (
'. Invalid format at line {line} column {col}: "{err}"'.format(
- line=mark.line + 1, col=mark.column + 1, err=e))
+ line=mark.line + 1, col=mark.column + 1, err=e
+ )
+ )
else:
- msg += '. {err}'.format(err=e)
+ msg += ". {err}".format(err=e)
LOG.warning(msg)
return loaded
@@ -884,22 +952,25 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
vd_url = "%s%s%s" % (base, "vendor-data", ext)
md_url = "%s%s%s" % (base, "meta-data", ext)
- md_resp = url_helper.read_file_or_url(md_url, timeout=timeout,
- retries=retries)
+ md_resp = url_helper.read_file_or_url(
+ md_url, timeout=timeout, retries=retries
+ )
md = None
if md_resp.ok():
md = load_yaml(decode_binary(md_resp.contents), default={})
- ud_resp = url_helper.read_file_or_url(ud_url, timeout=timeout,
- retries=retries)
+ ud_resp = url_helper.read_file_or_url(
+ ud_url, timeout=timeout, retries=retries
+ )
ud = None
if ud_resp.ok():
ud = ud_resp.contents
vd = None
try:
- vd_resp = url_helper.read_file_or_url(vd_url, timeout=timeout,
- retries=retries)
+ vd_resp = url_helper.read_file_or_url(
+ vd_url, timeout=timeout, retries=retries
+ )
except url_helper.UrlError as e:
LOG.debug("Error in vendor-data response: %s", e)
else:
@@ -919,8 +990,7 @@ def read_conf_d(confd):
confs = [f for f in confs if f.endswith(".cfg")]
# Remove anything not a file
- confs = [f for f in confs
- if os.path.isfile(os.path.join(confd, f))]
+ confs = [f for f in confs if os.path.isfile(os.path.join(confd, f))]
# Load them all so that they can be merged
cfgs = []
@@ -935,12 +1005,13 @@ def read_conf_with_confd(cfgfile):
confd = False
if "conf_d" in cfg:
- confd = cfg['conf_d']
+ confd = cfg["conf_d"]
if confd:
if not isinstance(confd, str):
- raise TypeError(("Config file %s contains 'conf_d' "
- "with non-string type %s") %
- (cfgfile, type_utils.obj_name(confd)))
+ raise TypeError(
+ "Config file %s contains 'conf_d' with non-string type %s"
+ % (cfgfile, type_utils.obj_name(confd))
+ )
else:
confd = str(confd).strip()
elif os.path.isdir("%s.d" % cfgfile):
@@ -984,19 +1055,21 @@ def read_cc_from_cmdline(cmdline=None):
if end < 0:
end = clen
tokens.append(
- parse.unquote(
- cmdline[begin + begin_l:end].lstrip()).replace("\\n", "\n"))
+ parse.unquote(cmdline[begin + begin_l : end].lstrip()).replace(
+ "\\n", "\n"
+ )
+ )
begin = cmdline.find(tag_begin, end + end_l)
- return '\n'.join(tokens)
+ return "\n".join(tokens)
def dos2unix(contents):
# find first end of line
- pos = contents.find('\n')
- if pos <= 0 or contents[pos - 1] != '\r':
+ pos = contents.find("\n")
+ if pos <= 0 or contents[pos - 1] != "\r":
return contents
- return contents.replace('\r\n', '\n')
+ return contents.replace("\r\n", "\n")
def get_hostname_fqdn(cfg, cloud, metadata_only=False):
@@ -1011,20 +1084,20 @@ def get_hostname_fqdn(cfg, cloud, metadata_only=False):
"""
if "fqdn" in cfg:
# user specified a fqdn. Default hostname then is based off that
- fqdn = cfg['fqdn']
- hostname = get_cfg_option_str(cfg, "hostname", fqdn.split('.')[0])
+ fqdn = cfg["fqdn"]
+ hostname = get_cfg_option_str(cfg, "hostname", fqdn.split(".")[0])
else:
- if "hostname" in cfg and cfg['hostname'].find('.') > 0:
+ if "hostname" in cfg and cfg["hostname"].find(".") > 0:
# user specified hostname, and it had '.' in it
# be nice to them. set fqdn and hostname from that
- fqdn = cfg['hostname']
- hostname = cfg['hostname'][:fqdn.find('.')]
+ fqdn = cfg["hostname"]
+ hostname = cfg["hostname"][: fqdn.find(".")]
else:
# no fqdn set, get fqdn from cloud.
# get hostname from cfg if available otherwise cloud
fqdn = cloud.get_hostname(fqdn=True, metadata_only=metadata_only)
if "hostname" in cfg:
- hostname = cfg['hostname']
+ hostname = cfg["hostname"]
else:
hostname = cloud.get_hostname(metadata_only=metadata_only)
return (hostname, fqdn)
@@ -1085,14 +1158,17 @@ def is_resolvable(name):
global _DNS_REDIRECT_IP
if _DNS_REDIRECT_IP is None:
badips = set()
- badnames = ("does-not-exist.example.com.", "example.invalid.",
- "__cloud_init_expected_not_found__")
+ badnames = (
+ "does-not-exist.example.com.",
+ "example.invalid.",
+ "__cloud_init_expected_not_found__",
+ )
badresults = {}
for iname in badnames:
try:
- result = socket.getaddrinfo(iname, None, 0, 0,
- socket.SOCK_STREAM,
- socket.AI_CANONNAME)
+ result = socket.getaddrinfo(
+ iname, None, 0, 0, socket.SOCK_STREAM, socket.AI_CANONNAME
+ )
badresults[iname] = []
for (_fam, _stype, _proto, cname, sockaddr) in result:
badresults[iname].append("%s: %s" % (cname, sockaddr[0]))
@@ -1128,9 +1204,12 @@ def gethostbyaddr(ip):
def is_resolvable_url(url):
"""determine if this url is resolvable (existing or ip)."""
- return log_time(logfunc=LOG.debug, msg="Resolving URL: " + url,
- func=is_resolvable,
- args=(parse.urlparse(url).hostname,))
+ return log_time(
+ logfunc=LOG.debug,
+ msg="Resolving URL: " + url,
+ func=is_resolvable,
+ args=(parse.urlparse(url).hostname,),
+ )
def search_for_mirror(candidates):
@@ -1166,16 +1245,19 @@ def close_stdin():
os.dup2(fp.fileno(), sys.stdin.fileno())
-def find_devs_with_freebsd(criteria=None, oformat='device',
- tag=None, no_cache=False, path=None):
+def find_devs_with_freebsd(
+ criteria=None, oformat="device", tag=None, no_cache=False, path=None
+):
devlist = []
if not criteria:
return glob.glob("/dev/msdosfs/*") + glob.glob("/dev/iso9660/*")
if criteria.startswith("LABEL="):
label = criteria.lstrip("LABEL=")
devlist = [
- p for p in ['/dev/msdosfs/' + label, '/dev/iso9660/' + label]
- if os.path.exists(p)]
+ p
+ for p in ["/dev/msdosfs/" + label, "/dev/iso9660/" + label]
+ if os.path.exists(p)
+ ]
elif criteria == "TYPE=vfat":
devlist = glob.glob("/dev/msdosfs/*")
elif criteria == "TYPE=iso9660":
@@ -1183,8 +1265,9 @@ def find_devs_with_freebsd(criteria=None, oformat='device',
return devlist
-def find_devs_with_netbsd(criteria=None, oformat='device',
- tag=None, no_cache=False, path=None):
+def find_devs_with_netbsd(
+ criteria=None, oformat="device", tag=None, no_cache=False, path=None
+):
devlist = []
label = None
_type = None
@@ -1193,55 +1276,65 @@ def find_devs_with_netbsd(criteria=None, oformat='device',
label = criteria.lstrip("LABEL=")
if criteria.startswith("TYPE="):
_type = criteria.lstrip("TYPE=")
- out, _err = subp.subp(['sysctl', '-n', 'hw.disknames'], rcs=[0])
+ out, _err = subp.subp(["sysctl", "-n", "hw.disknames"], rcs=[0])
for dev in out.split():
if label or _type:
- mscdlabel_out, _ = subp.subp(['mscdlabel', dev], rcs=[0, 1])
+ mscdlabel_out, _ = subp.subp(["mscdlabel", dev], rcs=[0, 1])
if label and not ('label "%s"' % label) in mscdlabel_out:
continue
if _type == "iso9660" and "ISO filesystem" not in mscdlabel_out:
continue
if _type == "vfat" and "ISO filesystem" in mscdlabel_out:
continue
- devlist.append('/dev/' + dev)
+ devlist.append("/dev/" + dev)
return devlist
-def find_devs_with_openbsd(criteria=None, oformat='device',
- tag=None, no_cache=False, path=None):
- out, _err = subp.subp(['sysctl', '-n', 'hw.disknames'], rcs=[0])
+def find_devs_with_openbsd(
+ criteria=None, oformat="device", tag=None, no_cache=False, path=None
+):
+ out, _err = subp.subp(["sysctl", "-n", "hw.disknames"], rcs=[0])
devlist = []
- for entry in out.rstrip().split(','):
- if not entry.endswith(':'):
+ for entry in out.rstrip().split(","):
+ if not entry.endswith(":"):
# ffs partition with a serial, not a config-drive
continue
- if entry == 'fd0:':
+ if entry == "fd0:":
continue
- devlist.append(entry[:-1] + 'a')
- if not entry.startswith('cd'):
- devlist.append(entry[:-1] + 'i')
- return ['/dev/' + i for i in devlist]
+ devlist.append(entry[:-1] + "a")
+ if not entry.startswith("cd"):
+ devlist.append(entry[:-1] + "i")
+ return ["/dev/" + i for i in devlist]
-def find_devs_with_dragonflybsd(criteria=None, oformat='device',
- tag=None, no_cache=False, path=None):
- out, _err = subp.subp(['sysctl', '-n', 'kern.disks'], rcs=[0])
- devlist = [i for i in sorted(out.split(), reverse=True)
- if not i.startswith("md") and not i.startswith("vn")]
+def find_devs_with_dragonflybsd(
+ criteria=None, oformat="device", tag=None, no_cache=False, path=None
+):
+ out, _err = subp.subp(["sysctl", "-n", "kern.disks"], rcs=[0])
+ devlist = [
+ i
+ for i in sorted(out.split(), reverse=True)
+ if not i.startswith("md") and not i.startswith("vn")
+ ]
if criteria == "TYPE=iso9660":
- devlist = [i for i in devlist
- if i.startswith('cd') or i.startswith('acd')]
+ devlist = [
+ i for i in devlist if i.startswith("cd") or i.startswith("acd")
+ ]
elif criteria in ["LABEL=CONFIG-2", "TYPE=vfat"]:
- devlist = [i for i in devlist
- if not (i.startswith('cd') or i.startswith('acd'))]
+ devlist = [
+ i
+ for i in devlist
+ if not (i.startswith("cd") or i.startswith("acd"))
+ ]
elif criteria:
LOG.debug("Unexpected criteria: %s", criteria)
- return ['/dev/' + i for i in devlist]
+ return ["/dev/" + i for i in devlist]
-def find_devs_with(criteria=None, oformat='device',
- tag=None, no_cache=False, path=None):
+def find_devs_with(
+ criteria=None, oformat="device", tag=None, no_cache=False, path=None
+):
"""
find devices matching given criteria (via blkid)
criteria can be *one* of:
@@ -1250,19 +1343,17 @@ def find_devs_with(criteria=None, oformat='device',
UUID=<uuid>
"""
if is_FreeBSD():
- return find_devs_with_freebsd(criteria, oformat,
- tag, no_cache, path)
+ return find_devs_with_freebsd(criteria, oformat, tag, no_cache, path)
elif is_NetBSD():
- return find_devs_with_netbsd(criteria, oformat,
- tag, no_cache, path)
+ return find_devs_with_netbsd(criteria, oformat, tag, no_cache, path)
elif is_OpenBSD():
- return find_devs_with_openbsd(criteria, oformat,
- tag, no_cache, path)
+ return find_devs_with_openbsd(criteria, oformat, tag, no_cache, path)
elif is_DragonFlyBSD():
- return find_devs_with_dragonflybsd(criteria, oformat,
- tag, no_cache, path)
+ return find_devs_with_dragonflybsd(
+ criteria, oformat, tag, no_cache, path
+ )
- blk_id_cmd = ['blkid']
+ blk_id_cmd = ["blkid"]
options = []
if criteria:
# Search for block devices with tokens named NAME that
@@ -1284,7 +1375,7 @@ def find_devs_with(criteria=None, oformat='device',
# Display blkid's output using the specified format.
# The format parameter may be:
# full, value, list, device, udev, export
- options.append('-o%s' % (oformat))
+ options.append("-o%s" % (oformat))
if path:
options.append(path)
cmd = blk_id_cmd + options
@@ -1318,9 +1409,9 @@ def blkid(devs=None, disable_cache=False):
else:
devs = list(devs)
- cmd = ['blkid', '-o', 'full']
+ cmd = ["blkid", "-o", "full"]
if disable_cache:
- cmd.extend(['-c', '/dev/null'])
+ cmd.extend(["-c", "/dev/null"])
cmd.extend(devs)
# we have to decode with 'replace' as shelx.split (called by
@@ -1338,7 +1429,7 @@ def blkid(devs=None, disable_cache=False):
def peek_file(fname, max_bytes):
LOG.debug("Peeking at %s (max_bytes=%s)", fname, max_bytes)
- with open(fname, 'rb') as ifh:
+ with open(fname, "rb") as ifh:
return ifh.read(max_bytes)
@@ -1356,7 +1447,7 @@ def load_file(fname, read_cb=None, quiet=False, decode=True):
LOG.debug("Reading from %s (quiet=%s)", fname, quiet)
ofh = io.BytesIO()
try:
- with open(fname, 'rb') as ifh:
+ with open(fname, "rb") as ifh:
pipe_in_out(ifh, ofh, chunk_cb=read_cb)
except IOError as e:
if not quiet:
@@ -1391,7 +1482,7 @@ def _get_cmdline():
def get_cmdline():
- if 'DEBUG_PROC_CMDLINE' in os.environ:
+ if "DEBUG_PROC_CMDLINE" in os.environ:
return os.environ["DEBUG_PROC_CMDLINE"]
return _get_cmdline()
@@ -1444,18 +1535,18 @@ def chownbyname(fname, user=None, group=None):
# this returns the specific 'mode' entry, cleanly formatted, with value
def get_output_cfg(cfg, mode):
ret = [None, None]
- if not cfg or 'output' not in cfg:
+ if not cfg or "output" not in cfg:
return ret
- outcfg = cfg['output']
+ outcfg = cfg["output"]
if mode in outcfg:
modecfg = outcfg[mode]
else:
- if 'all' not in outcfg:
+ if "all" not in outcfg:
return ret
# if there is a 'all' item in the output list
# then it applies to all users of this (init, config, final)
- modecfg = outcfg['all']
+ modecfg = outcfg["all"]
# if value is a string, it specifies stdout and stderr
if isinstance(modecfg, str):
@@ -1471,10 +1562,10 @@ def get_output_cfg(cfg, mode):
# if it is a dictionary, expect 'out' and 'error'
# items, which indicate out and error
if isinstance(modecfg, dict):
- if 'output' in modecfg:
- ret[0] = modecfg['output']
- if 'error' in modecfg:
- ret[1] = modecfg['error']
+ if "output" in modecfg:
+ ret[0] = modecfg["output"]
+ if "error" in modecfg:
+ ret[1] = modecfg["error"]
# if err's entry == "&1", then make it same as stdout
# as in shell syntax of "echo foo >/dev/null 2>&1"
@@ -1489,7 +1580,7 @@ def get_output_cfg(cfg, mode):
found = False
for s in swlist:
if val.startswith(s):
- val = "%s %s" % (s, val[len(s):].strip())
+ val = "%s %s" % (s, val[len(s) :].strip())
found = True
break
if not found:
@@ -1508,20 +1599,20 @@ def get_config_logfiles(cfg):
logs = []
if not cfg or not isinstance(cfg, dict):
return logs
- default_log = cfg.get('def_log_file')
+ default_log = cfg.get("def_log_file")
if default_log:
logs.append(default_log)
for fmt in get_output_cfg(cfg, None):
if not fmt:
continue
- match = re.match(r'(?P<type>\||>+)\s*(?P<target>.*)', fmt)
+ match = re.match(r"(?P<type>\||>+)\s*(?P<target>.*)", fmt)
if not match:
continue
- target = match.group('target')
+ target = match.group("target")
parts = target.split()
if len(parts) == 1:
logs.append(target)
- elif ['tee', '-a'] == parts[:2]:
+ elif ["tee", "-a"] == parts[:2]:
logs.append(parts[2])
return list(set(logs))
@@ -1586,17 +1677,19 @@ def load_json(text, root_types=(dict,)):
decoded = json.loads(decode_binary(text))
if not isinstance(decoded, tuple(root_types)):
expected_types = ", ".join([str(t) for t in root_types])
- raise TypeError("(%s) root types expected, got %s instead"
- % (expected_types, type(decoded)))
+ raise TypeError(
+ "(%s) root types expected, got %s instead"
+ % (expected_types, type(decoded))
+ )
return decoded
def json_serialize_default(_obj):
"""Handler for types which aren't json serializable."""
try:
- return 'ci-b64:{0}'.format(b64e(_obj))
+ return "ci-b64:{0}".format(b64e(_obj))
except AttributeError:
- return 'Warning: redacted unserializable type {0}'.format(type(_obj))
+ return "Warning: redacted unserializable type {0}".format(type(_obj))
def json_preserialize_binary(data):
@@ -1611,7 +1704,7 @@ def json_preserialize_binary(data):
if isinstance(value, (dict)):
data[key] = json_preserialize_binary(value)
if isinstance(value, bytes):
- data[key] = 'ci-b64:{0}'.format(b64e(value))
+ data[key] = "ci-b64:{0}".format(b64e(value))
return data
@@ -1619,8 +1712,12 @@ def json_dumps(data):
"""Return data in nicely formatted json."""
try:
return json.dumps(
- data, indent=1, sort_keys=True, separators=(',', ': '),
- default=json_serialize_default)
+ data,
+ indent=1,
+ sort_keys=True,
+ separators=(",", ": "),
+ default=json_serialize_default,
+ )
except UnicodeDecodeError:
if sys.version_info[:2] == (2, 7):
data = json_preserialize_binary(data)
@@ -1655,17 +1752,17 @@ def mounts():
# Go through mounts to see what is already mounted
if os.path.exists("/proc/mounts"):
mount_locs = load_file("/proc/mounts").splitlines()
- method = 'proc'
+ method = "proc"
else:
(mountoutput, _err) = subp.subp("mount")
mount_locs = mountoutput.splitlines()
- method = 'mount'
- mountre = r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$'
+ method = "mount"
+ mountre = r"^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$"
for mpline in mount_locs:
# Linux: /dev/sda1 on /boot type ext4 (rw,relatime,data=ordered)
# FreeBSD: /dev/vtbd0p2 on / (ufs, local, journaled soft-updates)
try:
- if method == 'proc':
+ if method == "proc":
(dev, mp, fstype, opts, _freq, _passno) = mpline.split()
else:
m = re.search(mountre, mpline)
@@ -1679,9 +1776,9 @@ def mounts():
# can be escaped as '\040', so undo that..
mp = mp.replace("\\040", " ")
mounted[dev] = {
- 'fstype': fstype,
- 'mountpoint': mp,
- 'opts': opts,
+ "fstype": fstype,
+ "mountpoint": mp,
+ "opts": opts,
}
LOG.debug("Fetched %s mounts from %s", mounted, method)
except (IOError, OSError):
@@ -1689,8 +1786,9 @@ def mounts():
return mounted
-def mount_cb(device, callback, data=None, mtype=None,
- update_env_for_mount=None):
+def mount_cb(
+ device, callback, data=None, mtype=None, update_env_for_mount=None
+):
"""
Mount the device, call method 'callback' passing the directory
in which it was mounted, then unmount. Return whatever 'callback'
@@ -1708,8 +1806,10 @@ def mount_cb(device, callback, data=None, mtype=None,
mtypes = None
else:
raise TypeError(
- 'Unsupported type provided for mtype parameter: {_type}'.format(
- _type=type(mtype)))
+ "Unsupported type provided for mtype parameter: {_type}".format(
+ _type=type(mtype)
+ )
+ )
# clean up 'mtype' input a bit based on platform.
if is_Linux():
@@ -1717,7 +1817,7 @@ def mount_cb(device, callback, data=None, mtype=None,
mtypes = ["auto"]
elif is_BSD():
if mtypes is None:
- mtypes = ['ufs', 'cd9660', 'msdos']
+ mtypes = ["ufs", "cd9660", "msdos"]
for index, mtype in enumerate(mtypes):
if mtype == "iso9660":
mtypes[index] = "cd9660"
@@ -1725,21 +1825,21 @@ def mount_cb(device, callback, data=None, mtype=None,
mtypes[index] = "msdos"
else:
# we cannot do a smart "auto", so just call 'mount' once with no -t
- mtypes = ['']
+ mtypes = [""]
mounted = mounts()
with temp_utils.tempdir() as tmpd:
umount = False
if os.path.realpath(device) in mounted:
- mountpoint = mounted[os.path.realpath(device)]['mountpoint']
+ mountpoint = mounted[os.path.realpath(device)]["mountpoint"]
else:
failure_reason = None
for mtype in mtypes:
mountpoint = None
try:
- mountcmd = ['mount', '-o', 'ro']
+ mountcmd = ["mount", "-o", "ro"]
if mtype:
- mountcmd.extend(['-t', mtype])
+ mountcmd.extend(["-t", mtype])
mountcmd.append(device)
mountcmd.append(tmpd)
subp.subp(mountcmd, update_env=update_env_for_mount)
@@ -1747,14 +1847,21 @@ def mount_cb(device, callback, data=None, mtype=None,
mountpoint = tmpd
break
except (IOError, OSError) as exc:
- LOG.debug("Failed to mount device: '%s' with type: '%s' "
- "using mount command: '%s', "
- "which caused exception: %s",
- device, mtype, ' '.join(mountcmd), exc)
+ LOG.debug(
+ "Failed to mount device: '%s' with type: '%s' "
+ "using mount command: '%s', "
+ "which caused exception: %s",
+ device,
+ mtype,
+ " ".join(mountcmd),
+ exc,
+ )
failure_reason = exc
if not mountpoint:
- raise MountFailedError("Failed mounting %s to %s due to: %s" %
- (device, tmpd, failure_reason))
+ raise MountFailedError(
+ "Failed mounting %s to %s due to: %s"
+ % (device, tmpd, failure_reason)
+ )
# Be nice and ensure it ends with a slash
if not mountpoint.endswith("/"):
@@ -1821,31 +1928,37 @@ def boottime():
NULL_BYTES = b"\x00"
class timeval(ctypes.Structure):
- _fields_ = [
- ("tv_sec", ctypes.c_int64),
- ("tv_usec", ctypes.c_int64)
- ]
- libc = ctypes.CDLL(ctypes.util.find_library('c'))
+ _fields_ = [("tv_sec", ctypes.c_int64), ("tv_usec", ctypes.c_int64)]
+
+ libc = ctypes.CDLL(ctypes.util.find_library("c"))
size = ctypes.c_size_t()
size.value = ctypes.sizeof(timeval)
buf = timeval()
- if libc.sysctlbyname(b"kern.boottime" + NULL_BYTES, ctypes.byref(buf),
- ctypes.byref(size), None, 0) != -1:
+ if (
+ libc.sysctlbyname(
+ b"kern.boottime" + NULL_BYTES,
+ ctypes.byref(buf),
+ ctypes.byref(size),
+ None,
+ 0,
+ )
+ != -1
+ ):
return buf.tv_sec + buf.tv_usec / 1000000.0
raise RuntimeError("Unable to retrieve kern.boottime on this system")
def uptime():
- uptime_str = '??'
- method = 'unknown'
+ uptime_str = "??"
+ method = "unknown"
try:
if os.path.exists("/proc/uptime"):
- method = '/proc/uptime'
+ method = "/proc/uptime"
contents = load_file("/proc/uptime")
if contents:
uptime_str = contents.split()[0]
else:
- method = 'ctypes'
+ method = "ctypes"
# This is the *BSD codepath
uptime_str = str(time.time() - boottime())
@@ -1862,7 +1975,7 @@ def ensure_file(
path, mode: int = 0o644, *, preserve_mode: bool = False
) -> None:
write_file(
- path, content='', omode="ab", mode=mode, preserve_mode=preserve_mode
+ path, content="", omode="ab", mode=mode, preserve_mode=preserve_mode
)
@@ -1973,18 +2086,24 @@ def write_file(
if ensure_dir_exists:
ensure_dir(os.path.dirname(filename))
- if 'b' in omode.lower():
+ if "b" in omode.lower():
content = encode_text(content)
- write_type = 'bytes'
+ write_type = "bytes"
else:
content = decode_binary(content)
- write_type = 'characters'
+ write_type = "characters"
try:
mode_r = "%o" % mode
except TypeError:
mode_r = "%r" % mode
- LOG.debug("Writing to %s - %s: [%s] %s %s",
- filename, omode, mode_r, len(content), write_type)
+ LOG.debug(
+ "Writing to %s - %s: [%s] %s %s",
+ filename,
+ omode,
+ mode_r,
+ len(content),
+ write_type,
+ )
with SeLinuxGuard(path=filename):
with open(filename, omode) as fh:
fh.write(content)
@@ -2006,7 +2125,7 @@ def delete_dir_contents(dirname):
del_file(node_fullpath)
-def make_header(comment_char="#", base='created'):
+def make_header(comment_char="#", base="created"):
ci_ver = version.version_string()
header = str(comment_char)
header += " %s by cloud-init v. %s" % (base.title(), ci_ver)
@@ -2025,13 +2144,14 @@ def abs_join(base, *paths):
def shellify(cmdlist, add_header=True):
if not isinstance(cmdlist, (tuple, list)):
raise TypeError(
- "Input to shellify was type '%s'. Expected list or tuple." %
- (type_utils.obj_name(cmdlist)))
+ "Input to shellify was type '%s'. Expected list or tuple."
+ % (type_utils.obj_name(cmdlist))
+ )
- content = ''
+ content = ""
if add_header:
content += "#!/bin/sh\n"
- escaped = "%s%s%s%s" % ("'", '\\', "'", "'")
+ escaped = "%s%s%s%s" % ("'", "\\", "'", "'")
cmds_made = 0
for args in cmdlist:
# If the item is a list, wrap all items in single tick.
@@ -2040,7 +2160,7 @@ def shellify(cmdlist, add_header=True):
fixed = []
for f in args:
fixed.append("'%s'" % (str(f).replace("'", escaped)))
- content = "%s%s\n" % (content, ' '.join(fixed))
+ content = "%s%s\n" % (content, " ".join(fixed))
cmds_made += 1
elif isinstance(args, str):
content = "%s%s\n" % (content, args)
@@ -2051,7 +2171,8 @@ def shellify(cmdlist, add_header=True):
else:
raise TypeError(
"Unable to shellify type '%s'. Expected list, string, tuple. "
- "Got: %s" % (type_utils.obj_name(args), args))
+ "Got: %s" % (type_utils.obj_name(args), args)
+ )
LOG.debug("Shellified %s commands.", cmds_made)
return content
@@ -2059,9 +2180,9 @@ def shellify(cmdlist, add_header=True):
def strip_prefix_suffix(line, prefix=None, suffix=None):
if prefix and line.startswith(prefix):
- line = line[len(prefix):]
+ line = line[len(prefix) :]
if suffix and line.endswith(suffix):
- line = line[:-len(suffix)]
+ line = line[: -len(suffix)]
return line
@@ -2106,7 +2227,8 @@ def is_container():
_is_container_systemd,
_is_container_freebsd,
_is_container_upstart,
- _is_container_old_lxc)
+ _is_container_old_lxc,
+ )
for helper in checks:
if helper():
@@ -2145,10 +2267,10 @@ def is_container():
def is_lxd():
"""Check to see if we are running in a lxd container."""
- return os.path.exists('/dev/lxd/sock')
+ return os.path.exists("/dev/lxd/sock")
-def get_proc_env(pid, encoding='utf-8', errors='replace'):
+def get_proc_env(pid, encoding="utf-8", errors="replace"):
"""
Return the environment in a dict that a given process id was started with.
@@ -2228,7 +2350,7 @@ def parse_mount_info(path, mountinfo_lines, log=LOG, get_mnt_opts=False):
"""Return the mount information for PATH given the lines from
/proc/$$/mountinfo."""
- path_elements = [e for e in path.split('/') if e]
+ path_elements = [e for e in path.split("/") if e]
devpth = None
fs_type = None
match_mount_point = None
@@ -2243,12 +2365,13 @@ def parse_mount_info(path, mountinfo_lines, log=LOG, get_mnt_opts=False):
# The minimum number of elements in a valid line is 10.
if len(parts) < 10:
- log.debug("Line %d has two few columns (%d): %s",
- i + 1, len(parts), line)
+ log.debug(
+ "Line %d has two few columns (%d): %s", i + 1, len(parts), line
+ )
return None
mount_point = parts[4]
- mount_point_elements = [e for e in mount_point.split('/') if e]
+ mount_point_elements = [e for e in mount_point.split("/") if e]
# Ignore mounts deeper than the path in question.
if len(mount_point_elements) > len(path_elements):
@@ -2261,18 +2384,20 @@ def parse_mount_info(path, mountinfo_lines, log=LOG, get_mnt_opts=False):
# Ignore mount points higher than an already seen mount
# point.
- if (match_mount_point_elements is not None and
- len(match_mount_point_elements) > len(mount_point_elements)):
+ if match_mount_point_elements is not None and len(
+ match_mount_point_elements
+ ) > len(mount_point_elements):
continue
# Find the '-' which terminates a list of optional columns to
# find the filesystem type and the path to the device. See
# man 5 proc for the format of this file.
try:
- i = parts.index('-')
+ i = parts.index("-")
except ValueError:
- log.debug("Did not find column named '-' in line %d: %s",
- i + 1, line)
+ log.debug(
+ "Did not find column named '-' in line %d: %s", i + 1, line
+ )
return None
# Get the path to the device.
@@ -2280,8 +2405,9 @@ def parse_mount_info(path, mountinfo_lines, log=LOG, get_mnt_opts=False):
fs_type = parts[i + 1]
devpth = parts[i + 2]
except IndexError:
- log.debug("Too few columns after '-' column in line %d: %s",
- i + 1, line)
+ log.debug(
+ "Too few columns after '-' column in line %d: %s", i + 1, line
+ )
return None
match_mount_point = mount_point
@@ -2308,12 +2434,12 @@ def parse_mtab(path):
def find_freebsd_part(fs):
- splitted = fs.split('/')
+ splitted = fs.split("/")
if len(splitted) == 3:
return splitted[2]
- elif splitted[2] in ['label', 'gpt', 'ufs']:
+ elif splitted[2] in ["label", "gpt", "ufs"]:
target_label = fs[5:]
- (part, _err) = subp.subp(['glabel', 'status', '-s'])
+ (part, _err) = subp.subp(["glabel", "status", "-s"])
for labels in part.split("\n"):
items = labels.split()
if len(items) > 0 and items[0] == target_label:
@@ -2325,8 +2451,8 @@ def find_freebsd_part(fs):
def find_dragonflybsd_part(fs):
- splitted = fs.split('/')
- if len(splitted) == 3 and splitted[1] == 'dev':
+ splitted = fs.split("/")
+ if len(splitted) == 3 and splitted[1] == "dev":
return splitted[2]
else:
LOG.warning("Unexpected input in find_dragonflybsd_part: %s", fs)
@@ -2336,19 +2462,19 @@ def get_path_dev_freebsd(path, mnt_list):
path_found = None
for line in mnt_list.split("\n"):
items = line.split()
- if (len(items) > 2 and os.path.exists(items[1] + path)):
+ if len(items) > 2 and os.path.exists(items[1] + path):
path_found = line
break
return path_found
def get_mount_info_freebsd(path):
- (result, err) = subp.subp(['mount', '-p', path], rcs=[0, 1])
+ (result, err) = subp.subp(["mount", "-p", path], rcs=[0, 1])
if len(err):
# find a path if the input is not a mounting point
- (mnt_list, err) = subp.subp(['mount', '-p'])
+ (mnt_list, err) = subp.subp(["mount", "-p"])
path_found = get_path_dev_freebsd(path, mnt_list)
- if (path_found is None):
+ if path_found is None:
return None
result = path_found
ret = result.split()
@@ -2358,17 +2484,17 @@ def get_mount_info_freebsd(path):
def get_device_info_from_zpool(zpool):
# zpool has 10 second timeout waiting for /dev/zfs LP: #1760173
- if not os.path.exists('/dev/zfs'):
- LOG.debug('Cannot get zpool info, no /dev/zfs')
+ if not os.path.exists("/dev/zfs"):
+ LOG.debug("Cannot get zpool info, no /dev/zfs")
return None
try:
- (zpoolstatus, err) = subp.subp(['zpool', 'status', zpool])
+ (zpoolstatus, err) = subp.subp(["zpool", "status", zpool])
except subp.ProcessExecutionError as err:
LOG.warning("Unable to get zpool status of %s: %s", zpool, err)
return None
if len(err):
return None
- r = r'.*(ONLINE).*'
+ r = r".*(ONLINE).*"
for line in zpoolstatus.split("\n"):
if re.search(r, line) and zpool not in line and "state" not in line:
disk = line.split()[0]
@@ -2377,17 +2503,21 @@ def get_device_info_from_zpool(zpool):
def parse_mount(path):
- (mountoutput, _err) = subp.subp(['mount'])
+ (mountoutput, _err) = subp.subp(["mount"])
mount_locs = mountoutput.splitlines()
# there are 2 types of mount outputs we have to parse therefore
# the regex is a bit complex. to better understand this regex see:
# https://regex101.com/r/2F6c1k/1
# https://regex101.com/r/T2en7a/1
- regex = (r'^(/dev/[\S]+|.*zroot\S*?) on (/[\S]*) '
- r'(?=(?:type)[\s]+([\S]+)|\(([^,]*))')
+ regex = (
+ r"^(/dev/[\S]+|.*zroot\S*?) on (/[\S]*) "
+ r"(?=(?:type)[\s]+([\S]+)|\(([^,]*))"
+ )
if is_DragonFlyBSD():
- regex = (r'^(/dev/[\S]+|\S*?) on (/[\S]*) '
- r'(?=(?:type)[\s]+([\S]+)|\(([^,]*))')
+ regex = (
+ r"^(/dev/[\S]+|\S*?) on (/[\S]*) "
+ r"(?=(?:type)[\s]+([\S]+)|\(([^,]*))"
+ )
for line in mount_locs:
m = re.search(regex, line)
if not m:
@@ -2399,15 +2529,19 @@ def parse_mount(path):
fs_type = m.group(3)
if fs_type is None:
fs_type = m.group(4)
- LOG.debug('found line in mount -> devpth: %s, mount_point: %s, '
- 'fs_type: %s', devpth, mount_point, fs_type)
+ LOG.debug(
+ "found line in mount -> devpth: %s, mount_point: %s, fs_type: %s",
+ devpth,
+ mount_point,
+ fs_type,
+ )
# check whether the dev refers to a label on FreeBSD
# for example, if dev is '/dev/label/rootfs', we should
# continue finding the real device like '/dev/da0'.
# this is only valid for non zfs file systems as a zpool
# can have gpt labels as disk.
- devm = re.search('^(/dev/.+)p([0-9])$', devpth)
- if not devm and is_FreeBSD() and fs_type != 'zfs':
+ devm = re.search("^(/dev/.+)p([0-9])$", devpth)
+ if not devm and is_FreeBSD() and fs_type != "zfs":
return get_mount_info_freebsd(path)
elif mount_point == path:
return devpth, fs_type, mount_point
@@ -2443,7 +2577,7 @@ def get_mount_info(path, log=LOG, get_mnt_opts=False):
#
# So use /proc/$$/mountinfo to find the device underlying the
# input path.
- mountinfo_path = '/proc/%s/mountinfo' % os.getpid()
+ mountinfo_path = "/proc/%s/mountinfo" % os.getpid()
if os.path.exists(mountinfo_path):
lines = load_file(mountinfo_path).splitlines()
return parse_mount_info(path, lines, log, get_mnt_opts)
@@ -2521,7 +2655,8 @@ def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep):
if len(missing):
raise ValueError(
- 'Missing required files: {files}'.format(files=','.join(missing)))
+ "Missing required files: {files}".format(files=",".join(missing))
+ )
return ret
@@ -2529,16 +2664,19 @@ def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep):
def read_meminfo(meminfo="/proc/meminfo", raw=False):
# read a /proc/meminfo style file and return
# a dict with 'total', 'free', and 'available'
- mpliers = {'kB': 2 ** 10, 'mB': 2 ** 20, 'B': 1, 'gB': 2 ** 30}
- kmap = {'MemTotal:': 'total', 'MemFree:': 'free',
- 'MemAvailable:': 'available'}
+ mpliers = {"kB": 2 ** 10, "mB": 2 ** 20, "B": 1, "gB": 2 ** 30}
+ kmap = {
+ "MemTotal:": "total",
+ "MemFree:": "free",
+ "MemAvailable:": "available",
+ }
ret = {}
for line in load_file(meminfo).splitlines():
try:
key, value, unit = line.split()
except ValueError:
key, value = line.split()
- unit = 'B'
+ unit = "B"
if raw:
ret[key] = int(value) * mpliers[unit]
elif key in kmap:
@@ -2549,21 +2687,21 @@ def read_meminfo(meminfo="/proc/meminfo", raw=False):
def human2bytes(size):
"""Convert human string or integer to size in bytes
- 10M => 10485760
- .5G => 536870912
+ 10M => 10485760
+ .5G => 536870912
"""
size_in = size
if size.endswith("B"):
size = size[:-1]
- mpliers = {'B': 1, 'K': 2 ** 10, 'M': 2 ** 20, 'G': 2 ** 30, 'T': 2 ** 40}
+ mpliers = {"B": 1, "K": 2 ** 10, "M": 2 ** 20, "G": 2 ** 30, "T": 2 ** 40}
num = size
- mplier = 'B'
+ mplier = "B"
for m in mpliers:
if size.endswith(m):
mplier = m
- num = size[0:-len(m)]
+ num = size[0 : -len(m)]
try:
num = float(num)
@@ -2580,9 +2718,9 @@ def is_x86(uname_arch=None):
"""Return True if platform is x86-based"""
if uname_arch is None:
uname_arch = os.uname()[4]
- x86_arch_match = (
- uname_arch == 'x86_64' or
- (uname_arch[0] == 'i' and uname_arch[2:] == '86'))
+ x86_arch_match = uname_arch == "x86_64" or (
+ uname_arch[0] == "i" and uname_arch[2:] == "86"
+ )
return x86_arch_match
@@ -2593,7 +2731,7 @@ def message_from_string(string):
def get_installed_packages(target=None):
- (out, _) = subp.subp(['dpkg-query', '--list'], target=target, capture=True)
+ (out, _) = subp.subp(["dpkg-query", "--list"], target=target, capture=True)
pkgs_inst = set()
for line in out.splitlines():
@@ -2614,17 +2752,17 @@ def system_is_snappy():
orpath = "/etc/os-release"
try:
orinfo = load_shell_content(load_file(orpath, quiet=True))
- if orinfo.get('ID', '').lower() == "ubuntu-core":
+ if orinfo.get("ID", "").lower() == "ubuntu-core":
return True
except ValueError as e:
LOG.warning("Unexpected error loading '%s': %s", orpath, e)
cmdline = get_cmdline()
- if 'snap_core=' in cmdline:
+ if "snap_core=" in cmdline:
return True
content = load_file("/etc/system-image/channel.ini", quiet=True)
- if 'ubuntu-core' in content.lower():
+ if "ubuntu-core" in content.lower():
return True
if os.path.isdir("/etc/system-image/config.d/"):
return True
@@ -2636,7 +2774,7 @@ def indent(text, prefix):
lines = []
for line in text.splitlines(True):
lines.append(prefix + line)
- return ''.join(lines)
+ return "".join(lines)
def rootdev_from_cmdline(cmdline):
@@ -2651,12 +2789,13 @@ def rootdev_from_cmdline(cmdline):
if found.startswith("/dev/"):
return found
if found.startswith("LABEL="):
- return "/dev/disk/by-label/" + found[len("LABEL="):]
+ return "/dev/disk/by-label/" + found[len("LABEL=") :]
if found.startswith("UUID="):
- return "/dev/disk/by-uuid/" + found[len("UUID="):].lower()
+ return "/dev/disk/by-uuid/" + found[len("UUID=") :].lower()
if found.startswith("PARTUUID="):
- disks_path = ("/dev/disk/by-partuuid/" +
- found[len("PARTUUID="):].lower())
+ disks_path = (
+ "/dev/disk/by-partuuid/" + found[len("PARTUUID=") :].lower()
+ )
if os.path.exists(disks_path):
return disks_path
results = find_devs_with(found)
@@ -2671,9 +2810,9 @@ def rootdev_from_cmdline(cmdline):
def load_shell_content(content, add_empty=False, empty_val=None):
"""Given shell like syntax (key=value\nkey2=value2\n) in content
- return the data in dictionary form. If 'add_empty' is True
- then add entries in to the returned dictionary for 'VAR='
- variables. Set their value to empty_val."""
+ return the data in dictionary form. If 'add_empty' is True
+ then add entries in to the returned dictionary for 'VAR='
+ variables. Set their value to empty_val."""
def _shlex_split(blob):
return shlex.split(blob, comments=True)
@@ -2689,33 +2828,42 @@ def load_shell_content(content, add_empty=False, empty_val=None):
return data
-def wait_for_files(flist, maxwait, naplen=.5, log_pre=""):
+def wait_for_files(flist, maxwait, naplen=0.5, log_pre=""):
need = set(flist)
waited = 0
while True:
need -= set([f for f in need if os.path.exists(f)])
if len(need) == 0:
- LOG.debug("%sAll files appeared after %s seconds: %s",
- log_pre, waited, flist)
+ LOG.debug(
+ "%sAll files appeared after %s seconds: %s",
+ log_pre,
+ waited,
+ flist,
+ )
return []
if waited == 0:
- LOG.debug("%sWaiting up to %s seconds for the following files: %s",
- log_pre, maxwait, flist)
+ LOG.debug(
+ "%sWaiting up to %s seconds for the following files: %s",
+ log_pre,
+ maxwait,
+ flist,
+ )
if waited + naplen > maxwait:
break
time.sleep(naplen)
waited += naplen
- LOG.debug("%sStill missing files after %s seconds: %s",
- log_pre, maxwait, need)
+ LOG.debug(
+ "%sStill missing files after %s seconds: %s", log_pre, maxwait, need
+ )
return need
def mount_is_read_write(mount_point):
"""Check whether the given mount point is mounted rw"""
result = get_mount_info(mount_point, get_mnt_opts=True)
- mount_opts = result[-1].split(',')
- return mount_opts[0] == 'rw'
+ mount_opts = result[-1].split(",")
+ return mount_opts[0] == "rw"
def udevadm_settle(exists=None, timeout=None):
@@ -2725,9 +2873,9 @@ def udevadm_settle(exists=None, timeout=None):
# skip the settle if the requested path already exists
if os.path.exists(exists):
return
- settle_cmd.extend(['--exit-if-exists=%s' % exists])
+ settle_cmd.extend(["--exit-if-exists=%s" % exists])
if timeout:
- settle_cmd.extend(['--timeout=%s' % timeout])
+ settle_cmd.extend(["--timeout=%s" % timeout])
return subp.subp(settle_cmd)
@@ -2740,7 +2888,7 @@ def get_proc_ppid(pid):
try:
contents = load_file("/proc/%s/stat" % pid, quiet=True)
except IOError as e:
- LOG.warning('Failed to load /proc/%s/stat. %s', pid, e)
+ LOG.warning("Failed to load /proc/%s/stat. %s", pid, e)
if contents:
parts = contents.split(" ", 4)
# man proc says
@@ -2749,7 +2897,7 @@ def get_proc_ppid(pid):
return ppid
-def error(msg, rc=1, fmt='Error:\n{}', sys_exit=False):
+def error(msg, rc=1, fmt="Error:\n{}", sys_exit=False):
"""
Print error to stderr and return or exit
@@ -2763,4 +2911,5 @@ def error(msg, rc=1, fmt='Error:\n{}', sys_exit=False):
sys.exit(rc)
return rc
+
# vi: ts=4 expandtab
diff --git a/cloudinit/version.py b/cloudinit/version.py
index ab93f902..6ad90bd3 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -5,20 +5,21 @@
# This file is part of cloud-init. See LICENSE file for license information.
__VERSION__ = "21.4"
-_PACKAGED_VERSION = '@@PACKAGED_VERSION@@'
+_PACKAGED_VERSION = "@@PACKAGED_VERSION@@"
FEATURES = [
# supports network config version 1
- 'NETWORK_CONFIG_V1',
+ "NETWORK_CONFIG_V1",
# supports network config version 2 (netplan)
- 'NETWORK_CONFIG_V2',
+ "NETWORK_CONFIG_V2",
]
def version_string():
"""Extract a version string from cloud-init."""
- if not _PACKAGED_VERSION.startswith('@@'):
+ if not _PACKAGED_VERSION.startswith("@@"):
return _PACKAGED_VERSION
return __VERSION__
+
# vi: ts=4 expandtab
diff --git a/cloudinit/warnings.py b/cloudinit/warnings.py
index 1da90c40..7ddd2f8d 100644
--- a/cloudinit/warnings.py
+++ b/cloudinit/warnings.py
@@ -1,16 +1,16 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import os
+import time
+
from cloudinit import helpers
from cloudinit import log as logging
from cloudinit import util
-import os
-import time
-
LOG = logging.getLogger()
WARNINGS = {
- 'non_ec2_md': """
+ "non_ec2_md": """
This system is using the EC2 Metadata Service, but does not appear to
be running on Amazon EC2 or one of cloud-init's known platforms that
provide a EC2 Metadata service. In the future, cloud-init may stop
@@ -35,7 +35,7 @@ putting that content into
datasource:
Ec2:
strict_id: false""",
- 'dsid_missing_source': """
+ "dsid_missing_source": """
A new feature in cloud-init identified possible datasources for
this system as:
{dslist}
@@ -64,8 +64,9 @@ warnings:
def _get_warn_dir(cfg):
paths = helpers.Paths(
- path_cfgs=cfg.get('system_info', {}).get('paths', {}))
- return paths.get_ipath_cur('warnings')
+ path_cfgs=cfg.get("system_info", {}).get("paths", {})
+ )
+ return paths.get_ipath_cur("warnings")
def _load_warn_cfg(cfg, name, mode=True, sleep=None):
@@ -77,7 +78,7 @@ def _load_warn_cfg(cfg, name, mode=True, sleep=None):
if not cfg or not isinstance(cfg, dict):
return default
- ncfg = util.get_cfg_by_path(cfg, ('warnings', name))
+ ncfg = util.get_cfg_by_path(cfg, ("warnings", name))
if ncfg is None:
return default
@@ -128,7 +129,8 @@ def show_warning(name, cfg=None, sleep=None, mode=True, **kwargs):
util.write_file(
os.path.join(_get_warn_dir(cfg), name),
- topline + "\n".join(fmtlines) + "\n" + topline)
+ topline + "\n".join(fmtlines) + "\n" + topline,
+ )
LOG.warning("%s%s\n%s", topline, "\n".join(fmtlines), closeline)
@@ -136,4 +138,5 @@ def show_warning(name, cfg=None, sleep=None, mode=True, **kwargs):
LOG.debug("sleeping %d seconds for warning '%s'", sleep, name)
time.sleep(sleep)
+
# vi: ts=4 expandtab