summaryrefslogtreecommitdiff
path: root/cloudinit
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit')
-rw-r--r--cloudinit/cmd/main.py166
-rw-r--r--cloudinit/config/cc_set_hostname.py2
-rw-r--r--cloudinit/distros/rhel.py19
-rw-r--r--cloudinit/ec2_utils.py5
-rw-r--r--cloudinit/helpers.py2
-rw-r--r--cloudinit/net/eni.py33
-rw-r--r--cloudinit/net/sysconfig.py8
-rw-r--r--cloudinit/settings.py2
-rw-r--r--cloudinit/sources/DataSourceAliYun.py4
-rw-r--r--cloudinit/sources/DataSourceEc2.py147
-rw-r--r--cloudinit/sources/DataSourceOVF.py37
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py15
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py24
-rw-r--r--cloudinit/ssh_util.py3
-rw-r--r--cloudinit/stages.py15
-rw-r--r--cloudinit/util.py44
-rw-r--r--cloudinit/warnings.py139
17 files changed, 550 insertions, 115 deletions
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index c83496c5..6ff4e1c0 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -26,8 +26,10 @@ from cloudinit import signal_handler
from cloudinit import sources
from cloudinit import stages
from cloudinit import templater
+from cloudinit import url_helper
from cloudinit import util
from cloudinit import version
+from cloudinit import warnings
from cloudinit import reporting
from cloudinit.reporting import events
@@ -129,23 +131,104 @@ def apply_reporting_cfg(cfg):
reporting.update_configuration(cfg.get('reporting'))
+def parse_cmdline_url(cmdline, names=('cloud-config-url', 'url')):
+ data = util.keyval_str_to_dict(cmdline)
+ for key in names:
+ if key in data:
+ return key, data[key]
+ raise KeyError("No keys (%s) found in string '%s'" %
+ (cmdline, names))
+
+
+def attempt_cmdline_url(path, network=True, cmdline=None):
+ """Write data from url referenced in command line to path.
+
+ path: a file to write content to if downloaded.
+ network: should network access be assumed.
+ cmdline: the cmdline to parse for cloud-config-url.
+
+ This is used in MAAS datasource, in "ephemeral" (read-only root)
+ environment where the instance netboots to iscsi ro root.
+ and the entity that controls the pxe config has to configure
+ the maas datasource.
+
+ An attempt is made on network urls even in local datasource
+ for case of network set up in initramfs.
+
+ Return value is a tuple of a logger function (logging.DEBUG)
+ and a message indicating what happened.
+ """
+
+ if cmdline is None:
+ cmdline = util.get_cmdline()
+
+ try:
+ cmdline_name, url = parse_cmdline_url(cmdline)
+ except KeyError:
+ return (logging.DEBUG, "No kernel command line url found.")
+
+ path_is_local = url.startswith("file://") or url.startswith("/")
+
+ if path_is_local and os.path.exists(path):
+ if network:
+ m = ("file '%s' existed, possibly from local stage download"
+ " of command line url '%s'. Not re-writing." % (path, url))
+ level = logging.INFO
+ if path_is_local:
+ level = logging.DEBUG
+ else:
+ m = ("file '%s' existed, possibly from previous boot download"
+ " of command line url '%s'. Not re-writing." % (path, url))
+ level = logging.WARN
+
+ return (level, m)
+
+ kwargs = {'url': url, 'timeout': 10, 'retries': 2}
+ if network or path_is_local:
+ level = logging.WARN
+ kwargs['sec_between'] = 1
+ else:
+ level = logging.DEBUG
+ kwargs['sec_between'] = .1
+
+ data = None
+ header = b'#cloud-config'
+ try:
+ resp = util.read_file_or_url(**kwargs)
+ if resp.ok():
+ data = resp.contents
+ if not resp.contents.startswith(header):
+ if cmdline_name == 'cloud-config-url':
+ level = logging.WARN
+ else:
+ level = logging.INFO
+ return (
+ level,
+ "contents of '%s' did not start with %s" % (url, header))
+ else:
+ return (level,
+ "url '%s' returned code %s. Ignoring." % (url, resp.code))
+
+ except url_helper.UrlError as e:
+ return (level, "retrieving url '%s' failed: %s" % (url, e))
+
+ util.write_file(path, data, mode=0o600)
+ return (logging.INFO,
+ "wrote cloud-config data from %s='%s' to %s" %
+ (cmdline_name, url, path))
+
+
def main_init(name, args):
deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
if args.local:
deps = [sources.DEP_FILESYSTEM]
- if not args.local:
- # See doc/kernel-cmdline.txt
- #
- # This is used in maas datasource, in "ephemeral" (read-only root)
- # environment where the instance netboots to iscsi ro root.
- # and the entity that controls the pxe config has to configure
- # the maas datasource.
- #
- # Could be used elsewhere, only works on network based (not local).
- root_name = "%s.d" % (CLOUD_CONFIG)
- target_fn = os.path.join(root_name, "91_kernel_cmdline_url.cfg")
- util.read_write_cmdline_url(target_fn)
+ early_logs = []
+ early_logs.append(
+ attempt_cmdline_url(
+ path=os.path.join("%s.d" % CLOUD_CONFIG,
+ "91_kernel_cmdline_url.cfg"),
+ network=not args.local))
# Cloud-init 'init' stage is broken up into the following sub-stages
# 1. Ensure that the init object fetches its config without errors
@@ -171,12 +254,14 @@ def main_init(name, args):
outfmt = None
errfmt = None
try:
- LOG.debug("Closing stdin")
+ early_logs.append((logging.DEBUG, "Closing stdin."))
util.close_stdin()
(outfmt, errfmt) = util.fixup_output(init.cfg, name)
except Exception:
- util.logexc(LOG, "Failed to setup output redirection!")
- print_exc("Failed to setup output redirection!")
+ msg = "Failed to setup output redirection!"
+ util.logexc(LOG, msg)
+ print_exc(msg)
+ early_logs.append((logging.WARN, msg))
if args.debug:
# Reset so that all the debug handlers are closed out
LOG.debug(("Logging being reset, this logger may no"
@@ -190,6 +275,10 @@ def main_init(name, args):
# been redirected and log now configured.
welcome(name, msg=w_msg)
+ # re-play early log messages before logging was setup
+ for lvl, msg in early_logs:
+ LOG.log(lvl, msg)
+
# Stage 3
try:
init.initialize()
@@ -224,8 +313,15 @@ def main_init(name, args):
" would allow us to stop early.")
else:
existing = "check"
- if util.get_cfg_option_bool(init.cfg, 'manual_cache_clean', False):
+ mcfg = util.get_cfg_option_bool(init.cfg, 'manual_cache_clean', False)
+ if mcfg:
+ LOG.debug("manual cache clean set from config")
existing = "trust"
+ else:
+ mfile = path_helper.get_ipath_cur("manual_clean_marker")
+ if os.path.exists(mfile):
+ LOG.debug("manual cache clean found from marker: %s", mfile)
+ existing = "trust"
init.purge_cache()
# Delete the non-net file as well
@@ -318,10 +414,48 @@ def main_init(name, args):
# give the activated datasource a chance to adjust
init.activate_datasource()
+ di_report_warn(datasource=init.datasource, cfg=init.cfg)
+
# Stage 10
return (init.datasource, run_module_section(mods, name, name))
+def di_report_warn(datasource, cfg):
+ if 'di_report' not in cfg:
+ LOG.debug("no di_report found in config.")
+ return
+
+ dicfg = cfg.get('di_report', {})
+ if not isinstance(dicfg, dict):
+ LOG.warn("di_report config not a dictionary: %s", dicfg)
+ return
+
+ dslist = dicfg.get('datasource_list')
+ if dslist is None:
+ LOG.warn("no 'datasource_list' found in di_report.")
+ return
+ elif not isinstance(dslist, list):
+ LOG.warn("di_report/datasource_list not a list: %s", dslist)
+ return
+
+ # ds.__module__ is like cloudinit.sources.DataSourceName
+ # where Name is the thing that shows up in datasource_list.
+ modname = datasource.__module__.rpartition(".")[2]
+ if modname.startswith(sources.DS_PREFIX):
+ modname = modname[len(sources.DS_PREFIX):]
+ else:
+ LOG.warn("Datasource '%s' came from unexpected module '%s'.",
+ datasource, modname)
+
+ if modname in dslist:
+ LOG.debug("used datasource '%s' from '%s' was in di_report's list: %s",
+ datasource, modname, dslist)
+ return
+
+ warnings.show_warning('dsid_missing_source', cfg,
+ source=modname, dslist=str(dslist))
+
+
def main_modules(action_name, args):
name = args.mode
# Cloud-init 'modules' stages are broken up into the following sub-stages
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index e42799f9..aa3dfe5f 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -27,7 +27,7 @@ will be used.
**Config keys**::
- perserve_hostname: <true/false>
+ preserve_hostname: <true/false>
fqdn: <fqdn>
hostname: <fqdn/hostname>
"""
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index aa558381..7498c63a 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -190,13 +190,18 @@ class Distro(distros.Distro):
if pkgs is None:
pkgs = []
- cmd = ['yum']
- # If enabled, then yum will be tolerant of errors on the command line
- # with regard to packages.
- # For example: if you request to install foo, bar and baz and baz is
- # installed; yum won't error out complaining that baz is already
- # installed.
- cmd.append("-t")
+ if util.which('dnf'):
+ LOG.debug('Using DNF for package management')
+ cmd = ['dnf']
+ else:
+ LOG.debug('Using YUM for package management')
+ # the '-t' argument makes yum tolerant of errors on the command
+ # line with regard to packages.
+ #
+ # For example: if you request to install foo, bar and baz and baz
+ # is installed; yum won't error out complaining that baz is already
+ # installed.
+ cmd = ['yum', '-t']
# Determines whether or not yum prompts for confirmation
# of critical actions. We don't want to prompt...
cmd.append("-y")
diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
index c656ef14..13691549 100644
--- a/cloudinit/ec2_utils.py
+++ b/cloudinit/ec2_utils.py
@@ -28,7 +28,7 @@ class MetadataLeafDecoder(object):
def __call__(self, field, blob):
if not blob:
- return blob
+ return ''
try:
blob = util.decode_binary(blob)
except UnicodeDecodeError:
@@ -82,6 +82,9 @@ class MetadataMaterializer(object):
field_name = get_name(field)
if not field or not field_name:
continue
+ # Don't materialize credentials
+ if field_name == 'security-credentials':
+ continue
if has_children(field):
if field_name not in children:
children.append(field_name)
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index 4528fb01..7435d58d 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -339,6 +339,8 @@ class Paths(object):
"vendordata_raw": "vendor-data.txt",
"vendordata": "vendor-data.txt.i",
"instance_id": ".instance-id",
+ "manual_clean_marker": "manual-clean",
+ "warnings": "warnings",
}
# Set when a datasource becomes active
self.datasource = ds
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index b06ffac9..5b249f1f 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -90,8 +90,6 @@ def _iface_add_attrs(iface, index):
def _iface_start_entry(iface, index, render_hwaddress=False):
fullname = iface['name']
- if index != 0:
- fullname += ":%s" % index
control = iface['control']
if control == "auto":
@@ -113,6 +111,16 @@ def _iface_start_entry(iface, index, render_hwaddress=False):
return lines
+def _subnet_is_ipv6(subnet):
+ # 'static6' or 'dhcp6'
+ if subnet['type'].endswith('6'):
+ # This is a request for DHCPv6.
+ return True
+ elif subnet['type'] == 'static' and ":" in subnet['address']:
+ return True
+ return False
+
+
def _parse_deb_config_data(ifaces, contents, src_dir, src_path):
"""Parses the file contents, placing result into ifaces.
@@ -354,21 +362,23 @@ class Renderer(renderer.Renderer):
sections = []
subnets = iface.get('subnets', {})
if subnets:
- for index, subnet in zip(range(0, len(subnets)), subnets):
+ for index, subnet in enumerate(subnets):
iface['index'] = index
iface['mode'] = subnet['type']
iface['control'] = subnet.get('control', 'auto')
subnet_inet = 'inet'
- if iface['mode'].endswith('6'):
- # This is a request for DHCPv6.
- subnet_inet += '6'
- elif iface['mode'] == 'static' and ":" in subnet['address']:
- # This is a static IPv6 address.
+ if _subnet_is_ipv6(subnet):
subnet_inet += '6'
iface['inet'] = subnet_inet
- if iface['mode'].startswith('dhcp'):
+ if subnet['type'].startswith('dhcp'):
iface['mode'] = 'dhcp'
+ # do not emit multiple 'auto $IFACE' lines as older (precise)
+ # ifupdown complains
+ if True in ["auto %s" % (iface['name']) in line
+ for line in sections]:
+ iface['control'] = 'alias'
+
lines = list(
_iface_start_entry(
iface, index, render_hwaddress=render_hwaddress) +
@@ -378,11 +388,6 @@ class Renderer(renderer.Renderer):
for route in subnet.get('routes', []):
lines.extend(self._render_route(route, indent=" "))
- if len(subnets) > 1 and index == 0:
- tmpl = " post-up ifup %s:%s\n"
- for i in range(1, len(subnets)):
- lines.append(tmpl % (iface['name'], i))
-
sections.append(lines)
else:
# ifenslave docs say to auto the slave devices
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index 9be74070..6e7739fb 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -282,12 +282,12 @@ class Renderer(renderer.Renderer):
if len(iface_subnets) == 1:
cls._render_subnet(iface_cfg, route_cfg, iface_subnets[0])
elif len(iface_subnets) > 1:
- for i, iface_subnet in enumerate(iface_subnets,
- start=len(iface.children)):
+ for i, isubnet in enumerate(iface_subnets,
+ start=len(iface_cfg.children)):
iface_sub_cfg = iface_cfg.copy()
iface_sub_cfg.name = "%s:%s" % (iface_name, i)
- iface.children.append(iface_sub_cfg)
- cls._render_subnet(iface_sub_cfg, route_cfg, iface_subnet)
+ iface_cfg.children.append(iface_sub_cfg)
+ cls._render_subnet(iface_sub_cfg, route_cfg, isubnet)
@classmethod
def _render_bond_interfaces(cls, network_state, iface_contents):
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index b1fdd31f..692ff5e5 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -14,6 +14,8 @@ CFG_ENV_NAME = "CLOUD_CFG"
# This is expected to be a yaml formatted file
CLOUD_CONFIG = '/etc/cloud/cloud.cfg'
+RUN_CLOUD_CONFIG = '/run/cloud-init/cloud.cfg'
+
# What u get if no config is provided
CFG_BUILTIN = {
'datasource_list': [
diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py
index 2d00255c..9debe947 100644
--- a/cloudinit/sources/DataSourceAliYun.py
+++ b/cloudinit/sources/DataSourceAliYun.py
@@ -22,6 +22,10 @@ class DataSourceAliYun(EC2.DataSourceEc2):
def get_public_ssh_keys(self):
return parse_public_keys(self.metadata.get('public-keys', {}))
+ @property
+ def cloud_platform(self):
+ return EC2.Platforms.ALIYUN
+
def parse_public_keys(public_keys):
keys = []
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index c657fd09..6f01a139 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -16,18 +16,31 @@ from cloudinit import log as logging
from cloudinit import sources
from cloudinit import url_helper as uhelp
from cloudinit import util
+from cloudinit import warnings
LOG = logging.getLogger(__name__)
# Which version we are requesting of the ec2 metadata apis
DEF_MD_VERSION = '2009-04-04'
+STRICT_ID_PATH = ("datasource", "Ec2", "strict_id")
+STRICT_ID_DEFAULT = "warn"
+
+
+class Platforms(object):
+ ALIYUN = "AliYun"
+ AWS = "AWS"
+ BRIGHTBOX = "Brightbox"
+ SEEDED = "Seeded"
+ UNKNOWN = "Unknown"
+
class DataSourceEc2(sources.DataSource):
# Default metadata urls that will be used if none are provided
# They will be checked for 'resolveability' and some of the
# following may be discarded if they do not resolve
metadata_urls = ["http://169.254.169.254", "http://instance-data.:8773"]
+ _cloud_platform = None
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -41,8 +54,18 @@ class DataSourceEc2(sources.DataSource):
self.userdata_raw = seed_ret['user-data']
self.metadata = seed_ret['meta-data']
LOG.debug("Using seeded ec2 data from %s", self.seed_dir)
+ self._cloud_platform = Platforms.SEEDED
return True
+ strict_mode, _sleep = read_strict_mode(
+ util.get_cfg_by_path(self.sys_cfg, STRICT_ID_PATH,
+ STRICT_ID_DEFAULT), ("warn", None))
+
+ LOG.debug("strict_mode: %s, cloud_platform=%s",
+ strict_mode, self.cloud_platform)
+ if strict_mode == "true" and self.cloud_platform == Platforms.UNKNOWN:
+ return False
+
try:
if not self.wait_for_metadata_service():
return False
@@ -51,8 +74,8 @@ class DataSourceEc2(sources.DataSource):
ec2.get_instance_userdata(self.api_ver, self.metadata_address)
self.metadata = ec2.get_instance_metadata(self.api_ver,
self.metadata_address)
- LOG.debug("Crawl of metadata service took %s seconds",
- int(time.time() - start_time))
+ LOG.debug("Crawl of metadata service took %.3f seconds",
+ time.time() - start_time)
return True
except Exception:
util.logexc(LOG, "Failed reading from metadata address %s",
@@ -190,6 +213,126 @@ class DataSourceEc2(sources.DataSource):
return az[:-1]
return None
+ @property
+ def cloud_platform(self):
+ if self._cloud_platform is None:
+ self._cloud_platform = identify_platform()
+ return self._cloud_platform
+
+ def activate(self, cfg, is_new_instance):
+ if not is_new_instance:
+ return
+ if self.cloud_platform == Platforms.UNKNOWN:
+ warn_if_necessary(
+ util.get_cfg_by_path(cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT),
+ cfg)
+
+
+def read_strict_mode(cfgval, default):
+ try:
+ return parse_strict_mode(cfgval)
+ except ValueError as e:
+ LOG.warn(e)
+ return default
+
+
+def parse_strict_mode(cfgval):
+ # given a mode like:
+ # true, false, warn,[sleep]
+ # return tuple with string mode (true|false|warn) and sleep.
+ if cfgval is True:
+ return 'true', None
+ if cfgval is False:
+ return 'false', None
+
+ if not cfgval:
+ return 'warn', 0
+
+ mode, _, sleep = cfgval.partition(",")
+ if mode not in ('true', 'false', 'warn'):
+ raise ValueError(
+ "Invalid mode '%s' in strict_id setting '%s': "
+ "Expected one of 'true', 'false', 'warn'." % (mode, cfgval))
+
+ if sleep:
+ try:
+ sleep = int(sleep)
+ except ValueError:
+ raise ValueError("Invalid sleep '%s' in strict_id setting '%s': "
+ "not an integer" % (sleep, cfgval))
+ else:
+ sleep = None
+
+ return mode, sleep
+
+
+def warn_if_necessary(cfgval, cfg):
+ try:
+ mode, sleep = parse_strict_mode(cfgval)
+ except ValueError as e:
+ LOG.warn(e)
+ return
+
+ if mode == "false":
+ return
+
+ warnings.show_warning('non_ec2_md', cfg, mode=True, sleep=sleep)
+
+
+def identify_aws(data):
+ # data is a dictionary returned by _collect_platform_data.
+ if (data['uuid'].startswith('ec2') and
+ (data['uuid_source'] == 'hypervisor' or
+ data['uuid'] == data['serial'])):
+ return Platforms.AWS
+
+ return None
+
+
+def identify_brightbox(data):
+ if data['serial'].endswith('brightbox.com'):
+ return Platforms.BRIGHTBOX
+
+
+def identify_platform():
+ # identify the platform and return an entry in Platforms.
+ data = _collect_platform_data()
+ checks = (identify_aws, identify_brightbox, lambda x: Platforms.UNKNOWN)
+ for checker in checks:
+ try:
+ result = checker(data)
+ if result:
+ return result
+ except Exception as e:
+ LOG.warn("calling %s with %s raised exception: %s",
+ checker, data, e)
+
+
+def _collect_platform_data():
+ # returns a dictionary with all lower case values:
+ # uuid: system-uuid from dmi or /sys/hypervisor
+ # uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi'
+ # serial: dmi 'system-serial-number' (/sys/.../product_serial)
+ data = {}
+ try:
+ uuid = util.load_file("/sys/hypervisor/uuid").strip()
+ data['uuid_source'] = 'hypervisor'
+ except Exception:
+ uuid = util.read_dmi_data('system-uuid')
+ data['uuid_source'] = 'dmi'
+
+ if uuid is None:
+ uuid = ''
+ data['uuid'] = uuid.lower()
+
+ serial = util.read_dmi_data('system-serial-number')
+ if serial is None:
+ serial = ''
+
+ data['serial'] = serial.lower()
+
+ return data
+
# Used to match classes to dependencies
datasources = [
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index 78928c77..d70784ac 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -48,6 +48,7 @@ class DataSourceOVF(sources.DataSource):
self.environment = None
self.cfg = {}
self.supported_seed_starts = ("/", "file://")
+ self.vmware_customization_supported = True
def __str__(self):
root = sources.DataSource.__str__(self)
@@ -78,7 +79,10 @@ class DataSourceOVF(sources.DataSource):
found.append(seed)
elif system_type and 'vmware' in system_type.lower():
LOG.debug("VMware Virtualization Platform found")
- if not util.get_cfg_option_bool(
+ if not self.vmware_customization_supported:
+ LOG.debug("Skipping the check for "
+ "VMware Customization support")
+ elif not util.get_cfg_option_bool(
self.sys_cfg, "disable_vmware_customization", True):
deployPkgPluginPath = search_file("/usr/lib/vmware-tools",
"libdeployPkgPlugin.so")
@@ -90,17 +94,18 @@ class DataSourceOVF(sources.DataSource):
# copies the customization specification file to
# /var/run/vmware-imc directory. cloud-init code needs
# to search for the file in that directory.
+ max_wait = get_max_wait_from_cfg(self.ds_cfg)
vmwareImcConfigFilePath = util.log_time(
logfunc=LOG.debug,
msg="waiting for configuration file",
func=wait_for_imc_cfg_file,
- args=("/var/run/vmware-imc", "cust.cfg"))
+ args=("/var/run/vmware-imc", "cust.cfg", max_wait))
if vmwareImcConfigFilePath:
- LOG.debug("Found VMware DeployPkg Config File at %s" %
+ LOG.debug("Found VMware Customization Config File at %s",
vmwareImcConfigFilePath)
else:
- LOG.debug("Did not find VMware DeployPkg Config File Path")
+ LOG.debug("Did not find VMware Customization Config File")
else:
LOG.debug("Customization for VMware platform is disabled.")
@@ -206,6 +211,29 @@ class DataSourceOVFNet(DataSourceOVF):
DataSourceOVF.__init__(self, sys_cfg, distro, paths)
self.seed_dir = os.path.join(paths.seed_dir, 'ovf-net')
self.supported_seed_starts = ("http://", "https://", "ftp://")
+ self.vmware_customization_supported = False
+
+
+def get_max_wait_from_cfg(cfg):
+ default_max_wait = 90
+ max_wait_cfg_option = 'vmware_cust_file_max_wait'
+ max_wait = default_max_wait
+
+ if not cfg:
+ return max_wait
+
+ try:
+ max_wait = int(cfg.get(max_wait_cfg_option, default_max_wait))
+ except ValueError:
+ LOG.warn("Failed to get '%s', using %s",
+ max_wait_cfg_option, default_max_wait)
+
+ if max_wait <= 0:
+ LOG.warn("Invalid value '%s' for '%s', using '%s' instead",
+ max_wait, max_wait_cfg_option, default_max_wait)
+ max_wait = default_max_wait
+
+ return max_wait
def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5):
@@ -215,6 +243,7 @@ def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5):
fileFullPath = search_file(dirpath, filename)
if fileFullPath:
return fileFullPath
+ LOG.debug("Waiting for VMware Customization Config File")
time.sleep(naplen)
waited += naplen
return None
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index 2a58f1cd..e1ea21f8 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -45,6 +45,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
# max_wait < 0 indicates do not wait
max_wait = -1
timeout = 10
+ retries = 5
try:
max_wait = int(self.ds_cfg.get("max_wait", max_wait))
@@ -55,7 +56,13 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
timeout = max(0, int(self.ds_cfg.get("timeout", timeout)))
except Exception:
util.logexc(LOG, "Failed to get timeout, using %s", timeout)
- return (max_wait, timeout)
+
+ try:
+ retries = int(self.ds_cfg.get("retries", retries))
+ except Exception:
+ util.logexc(LOG, "Failed to get max wait. using %s", retries)
+
+ return (max_wait, timeout, retries)
def wait_for_metadata_service(self):
urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL])
@@ -76,7 +83,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
md_urls.append(md_url)
url2base[md_url] = url
- (max_wait, timeout) = self._get_url_settings()
+ (max_wait, timeout, retries) = self._get_url_settings()
start_time = time.time()
avail_url = url_helper.wait_for_url(urls=md_urls, max_wait=max_wait,
timeout=timeout)
@@ -89,13 +96,15 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
self.metadata_address = url2base.get(avail_url)
return bool(avail_url)
- def get_data(self, retries=5, timeout=5):
+ def get_data(self):
try:
if not self.wait_for_metadata_service():
return False
except IOError:
return False
+ (max_wait, timeout, retries) = self._get_url_settings()
+
try:
results = util.log_time(LOG.debug,
'Crawl of openstack metadata service',
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index d5a7c346..67ac21db 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -101,7 +101,11 @@ class NicConfigurator(object):
return lines
# Static Ipv4
- v4 = nic.staticIpv4
+ addrs = nic.staticIpv4
+ if not addrs:
+ return lines
+
+ v4 = addrs[0]
if v4.ip:
lines.append(' address %s' % v4.ip)
if v4.netmask:
@@ -197,22 +201,6 @@ class NicConfigurator(object):
util.subp(["pkill", "dhclient"], rcs=[0, 1])
util.subp(["rm", "-f", "/var/lib/dhcp/*"])
- def if_down_up(self):
- names = []
- for nic in self.nics:
- name = self.mac2Name.get(nic.mac.lower())
- names.append(name)
-
- for name in names:
- logger.info('Bring down interface %s' % name)
- util.subp(["ifdown", "%s" % name])
-
- self.clear_dhcp()
-
- for name in names:
- logger.info('Bring up interface %s' % name)
- util.subp(["ifup", "%s" % name])
-
def configure(self):
"""
Configure the /etc/network/intefaces
@@ -232,6 +220,6 @@ class NicConfigurator(object):
for line in lines:
fp.write('%s\n' % line)
- self.if_down_up()
+ self.clear_dhcp()
# vi: ts=4 expandtab
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index be8a49e8..b95b956f 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -22,8 +22,11 @@ DEF_SSHD_CFG = "/etc/ssh/sshd_config"
VALID_KEY_TYPES = (
"dsa",
"ecdsa",
+ "ecdsa-sha2-nistp256",
"ecdsa-sha2-nistp256-cert-v01@openssh.com",
+ "ecdsa-sha2-nistp384",
"ecdsa-sha2-nistp384-cert-v01@openssh.com",
+ "ecdsa-sha2-nistp521",
"ecdsa-sha2-nistp521-cert-v01@openssh.com",
"ed25519",
"rsa",
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index b0552dde..5bed9032 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -11,7 +11,8 @@ import sys
import six
from six.moves import cPickle as pickle
-from cloudinit.settings import (PER_INSTANCE, FREQUENCIES, CLOUD_CONFIG)
+from cloudinit.settings import (
+ FREQUENCIES, CLOUD_CONFIG, PER_INSTANCE, RUN_CLOUD_CONFIG)
from cloudinit import handlers
@@ -188,6 +189,12 @@ class Init(object):
def _write_to_cache(self):
if self.datasource is NULL_DATA_SOURCE:
return False
+ if util.get_cfg_option_bool(self.cfg, 'manual_cache_clean', False):
+ # The empty file in instance/ dir indicates manual cleaning,
+ # and can be read by ds-identify.
+ util.write_file(
+ self.paths.get_ipath_cur("manual_clean_marker"),
+ omode="w", content="")
return _pkl_store(self.datasource, self.paths.get_ipath_cur("obj_pkl"))
def _get_datasources(self):
@@ -828,6 +835,10 @@ class Modules(object):
return self._run_modules(mostly_mods)
+def read_runtime_config():
+ return util.read_conf(RUN_CLOUD_CONFIG)
+
+
def fetch_base_config():
return util.mergemanydict(
[
@@ -835,6 +846,8 @@ def fetch_base_config():
util.get_builtin_cfg(),
# Anything in your conf.d or 'default' cloud.cfg location.
util.read_conf_with_confd(CLOUD_CONFIG),
+ # runtime config
+ read_runtime_config(),
# Kernel/cmdline parameters override system config
util.read_conf_from_cmdline(),
], reverse=True)
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 5725129e..7196a7ca 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -1089,31 +1089,6 @@ def get_fqdn_from_hosts(hostname, filename="/etc/hosts"):
return fqdn
-def get_cmdline_url(names=('cloud-config-url', 'url'),
- starts=b"#cloud-config", cmdline=None):
- if cmdline is None:
- cmdline = get_cmdline()
-
- data = keyval_str_to_dict(cmdline)
- url = None
- key = None
- for key in names:
- if key in data:
- url = data[key]
- break
-
- if not url:
- return (None, None, None)
-
- resp = read_file_or_url(url)
- # allow callers to pass starts as text when comparing to bytes contents
- starts = encode_text(starts)
- if resp.ok() and resp.contents.startswith(starts):
- return (key, url, resp.contents)
-
- return (key, url, None)
-
-
def is_resolvable(name):
"""determine if a url is resolvable, return a boolean
This also attempts to be resilent against dns redirection.
@@ -1475,25 +1450,6 @@ def ensure_dirs(dirlist, mode=0o755):
ensure_dir(d, mode)
-def read_write_cmdline_url(target_fn):
- if not os.path.exists(target_fn):
- try:
- (key, url, content) = get_cmdline_url()
- except Exception:
- logexc(LOG, "Failed fetching command line url")
- return
- try:
- if key and content:
- write_file(target_fn, content, mode=0o600)
- LOG.debug(("Wrote to %s with contents of command line"
- " url %s (len=%s)"), target_fn, url, len(content))
- elif key and not content:
- LOG.debug(("Command line key %s with url"
- " %s had no contents"), key, url)
- except Exception:
- logexc(LOG, "Failed writing url content to %s", target_fn)
-
-
def yaml_dumps(obj, explicit_start=True, explicit_end=True):
return yaml.safe_dump(obj,
line_break="\n",
diff --git a/cloudinit/warnings.py b/cloudinit/warnings.py
new file mode 100644
index 00000000..3206d4e9
--- /dev/null
+++ b/cloudinit/warnings.py
@@ -0,0 +1,139 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import helpers
+from cloudinit import log as logging
+from cloudinit import util
+
+import os
+import time
+
+LOG = logging.getLogger()
+
+WARNINGS = {
+ 'non_ec2_md': """
+This system is using the EC2 Metadata Service, but does not appear to
+be running on Amazon EC2 or one of cloud-init's known platforms that
+provide a EC2 Metadata service. In the future, cloud-init may stop
+reading metadata from the EC2 Metadata Service unless the platform can
+be identified.
+
+If you are seeing this message, please file a bug against
+cloud-init at
+ https://bugs.launchpad.net/cloud-init/+filebug?field.tags=dsid
+Make sure to include the cloud provider your instance is
+running on.
+
+For more information see
+ https://bugs.launchpad.net/bugs/1660385
+
+After you have filed a bug, you can disable this warning by
+launching your instance with the cloud-config below, or
+putting that content into
+ /etc/cloud/cloud.cfg.d/99-ec2-datasource.cfg
+
+#cloud-config
+datasource:
+ Ec2:
+ strict_id: false""",
+ 'dsid_missing_source': """
+A new feature in cloud-init identified possible datasources for
+this system as:
+ {dslist}
+However, the datasource used was: {source}
+
+In the future, cloud-init will only attempt to use datasources that
+are identified or specifically configured.
+For more information see
+ https://bugs.launchpad.net/bugs/1669675
+
+If you are seeing this message, please file a bug against
+cloud-init at
+ https://bugs.launchpad.net/cloud-init/+filebug?field.tags=dsid
+Make sure to include the cloud provider your instance is
+running on.
+
+After you have filed a bug, you can disable this warning by launching
+your instance with the cloud-config below, or putting that content
+into /etc/cloud/cloud.cfg.d/99-warnings.cfg
+
+#cloud-config
+warnings:
+ dsid_missing_source: off""",
+}
+
+
+def _get_warn_dir(cfg):
+ paths = helpers.Paths(
+ path_cfgs=cfg.get('system_info', {}).get('paths', {}))
+ return paths.get_ipath_cur('warnings')
+
+
+def _load_warn_cfg(cfg, name, mode=True, sleep=None):
+ # parse cfg['warnings']['name'] returning boolean, sleep
+ # expected value is form of:
+ # (on|off|true|false|sleep)[,sleeptime]
+ # boolean True == on, False == off
+ default = (mode, sleep)
+ if not cfg or not isinstance(cfg, dict):
+ return default
+
+ ncfg = util.get_cfg_by_path(cfg, ('warnings', name))
+ if ncfg is None:
+ return default
+
+ if ncfg in ("on", "true", True):
+ return True, None
+
+ if ncfg in ("off", "false", False):
+ return False, None
+
+ mode, _, csleep = ncfg.partition(",")
+ if mode != "sleep":
+ return default
+
+ if csleep:
+ try:
+ sleep = int(csleep)
+ except ValueError:
+ return default
+
+ return True, sleep
+
+
+def show_warning(name, cfg=None, sleep=None, mode=True, **kwargs):
+ # kwargs are used for .format of the message.
+ # sleep and mode are default values used if
+ # cfg['warnings']['name'] is not present.
+ if cfg is None:
+ cfg = {}
+
+ mode, sleep = _load_warn_cfg(cfg, name, mode=mode, sleep=sleep)
+ if not mode:
+ return
+
+ msg = WARNINGS[name].format(**kwargs)
+ msgwidth = 70
+ linewidth = msgwidth + 4
+
+ fmt = "# %%-%ds #" % msgwidth
+ topline = "*" * linewidth + "\n"
+ fmtlines = []
+ for line in msg.strip("\n").splitlines():
+ fmtlines.append(fmt % line)
+
+ closeline = topline
+ if sleep:
+ sleepmsg = " [sleeping for %d seconds] " % sleep
+ closeline = sleepmsg.center(linewidth, "*") + "\n"
+
+ util.write_file(
+ os.path.join(_get_warn_dir(cfg), name),
+ topline + "\n".join(fmtlines) + "\n" + topline)
+
+ LOG.warn(topline + "\n".join(fmtlines) + "\n" + closeline)
+
+ if sleep:
+ LOG.debug("sleeping %d seconds for warning '%s'" % (sleep, name))
+ time.sleep(sleep)
+
+# vi: ts=4 expandtab