summaryrefslogtreecommitdiff
path: root/cloudinit/sources
diff options
context:
space:
mode:
authorChad Smith <chad.smith@canonical.com>2017-10-06 13:22:54 -0600
committerChad Smith <chad.smith@canonical.com>2017-10-06 13:22:54 -0600
commit9fd022780ae516df3499b17b2d69b72fc502917c (patch)
treebc33ac6296f374414ccb15dce233a4293b8633d3 /cloudinit/sources
parent89630a6658c099d59f2766493a35c2ad266a8f42 (diff)
parent45d361cb0b7f5e4e7d79522bd285871898358623 (diff)
downloadvyos-cloud-init-9fd022780ae516df3499b17b2d69b72fc502917c.tar.gz
vyos-cloud-init-9fd022780ae516df3499b17b2d69b72fc502917c.zip
merge from master at 17.1-17-g45d361cb
Diffstat (limited to 'cloudinit/sources')
-rw-r--r--cloudinit/sources/DataSourceAliYun.py9
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py4
-rw-r--r--cloudinit/sources/DataSourceAzure.py10
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py51
-rw-r--r--cloudinit/sources/DataSourceEc2.py186
-rw-r--r--cloudinit/sources/DataSourceGCE.py198
-rw-r--r--cloudinit/sources/DataSourceOVF.py220
-rw-r--r--cloudinit/sources/__init__.py9
-rw-r--r--cloudinit/sources/helpers/azure.py24
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config.py24
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py201
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_passwd.py67
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_util.py12
13 files changed, 760 insertions, 255 deletions
diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py
index 380e27cb..43a7e42c 100644
--- a/cloudinit/sources/DataSourceAliYun.py
+++ b/cloudinit/sources/DataSourceAliYun.py
@@ -6,17 +6,20 @@ from cloudinit import sources
from cloudinit.sources import DataSourceEc2 as EC2
from cloudinit import util
-DEF_MD_VERSION = "2016-01-01"
ALIYUN_PRODUCT = "Alibaba Cloud ECS"
class DataSourceAliYun(EC2.DataSourceEc2):
- metadata_urls = ["http://100.100.100.200"]
+
+ metadata_urls = ['http://100.100.100.200']
+
+ # The minimum supported metadata_version from the ec2 metadata apis
+ min_metadata_version = '2016-01-01'
+ extended_metadata_versions = []
def __init__(self, sys_cfg, distro, paths):
super(DataSourceAliYun, self).__init__(sys_cfg, distro, paths)
self.seed_dir = os.path.join(paths.seed_dir, "AliYun")
- self.api_ver = DEF_MD_VERSION
def get_hostname(self, fqdn=False, _resolve_ip=False):
return self.metadata.get('hostname', 'localhost.localdomain')
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index ed1d691a..c78ad9eb 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -28,8 +28,8 @@ LOG = logging.getLogger(__name__)
CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info'
# Shell command lists
-CMD_PROBE_FLOPPY = ['/sbin/modprobe', 'floppy']
-CMD_UDEVADM_SETTLE = ['/sbin/udevadm', 'settle', '--timeout=5']
+CMD_PROBE_FLOPPY = ['modprobe', 'floppy']
+CMD_UDEVADM_SETTLE = ['udevadm', 'settle', '--timeout=5']
META_DATA_NOT_SUPPORTED = {
'block-device-mapping': {},
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index b5a95a1f..80c2bd12 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -317,9 +317,13 @@ class DataSourceAzure(sources.DataSource):
LOG.debug("ssh authentication: "
"using fingerprint from fabirc")
- missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
+ # wait very long for public SSH keys to arrive
+ # https://bugs.launchpad.net/cloud-init/+bug/1717611
+ missing = util.log_time(logfunc=LOG.debug,
+ msg="waiting for SSH public key files",
func=wait_for_files,
- args=(fp_files,))
+ args=(fp_files, 900))
+
if len(missing):
LOG.warning("Did not find files, but going on: %s", missing)
@@ -656,7 +660,7 @@ def pubkeys_from_crt_files(flist):
return pubkeys
-def wait_for_files(flist, maxwait=60, naplen=.5, log_pre=""):
+def wait_for_files(flist, maxwait, naplen=.5, log_pre=""):
need = set(flist)
waited = 0
while True:
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 0188d894..9dc473fc 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -19,6 +19,7 @@ import time
from cloudinit import ec2_utils as ec2
from cloudinit import log as logging
+from cloudinit.net import dhcp
from cloudinit import sources
from cloudinit import url_helper as uhelp
from cloudinit import util
@@ -187,22 +188,36 @@ def get_dhclient_d():
return None
-def get_latest_lease():
+def get_latest_lease(lease_d=None):
# find latest lease file
- lease_d = get_dhclient_d()
+ if lease_d is None:
+ lease_d = get_dhclient_d()
if not lease_d:
return None
lease_files = os.listdir(lease_d)
latest_mtime = -1
latest_file = None
- for file_name in lease_files:
- if file_name.startswith("dhclient.") and \
- (file_name.endswith(".lease") or file_name.endswith(".leases")):
- abs_path = os.path.join(lease_d, file_name)
- mtime = os.path.getmtime(abs_path)
- if mtime > latest_mtime:
- latest_mtime = mtime
- latest_file = abs_path
+
+ # lease files are named inconsistently across distros.
+ # We assume that 'dhclient6' indicates ipv6 and ignore it.
+ # ubuntu:
+ # dhclient.<iface>.leases, dhclient.leases, dhclient6.leases
+ # centos6:
+ # dhclient-<iface>.leases, dhclient6.leases
+ # centos7: ('--' is not a typo)
+ # dhclient--<iface>.lease, dhclient6.leases
+ for fname in lease_files:
+ if fname.startswith("dhclient6"):
+ # avoid files that start with dhclient6 assuming dhcpv6.
+ continue
+ if not (fname.endswith(".lease") or fname.endswith(".leases")):
+ continue
+
+ abs_path = os.path.join(lease_d, fname)
+ mtime = os.path.getmtime(abs_path)
+ if mtime > latest_mtime:
+ latest_mtime = mtime
+ latest_file = abs_path
return latest_file
@@ -210,20 +225,28 @@ def get_vr_address():
# Get the address of the virtual router via dhcp leases
# If no virtual router is detected, fallback on default gateway.
# See http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/4.8/virtual_machines/user-data.html # noqa
+
+ # Try networkd first...
+ latest_address = dhcp.networkd_get_option_from_leases('SERVER_ADDRESS')
+ if latest_address:
+ LOG.debug("Found SERVER_ADDRESS '%s' via networkd_leases",
+ latest_address)
+ return latest_address
+
+ # Try dhcp lease files next...
lease_file = get_latest_lease()
if not lease_file:
LOG.debug("No lease file found, using default gateway")
return get_default_gateway()
- latest_address = None
with open(lease_file, "r") as fd:
for line in fd:
if "dhcp-server-identifier" in line:
words = line.strip(" ;\r\n").split(" ")
if len(words) > 2:
- dhcp = words[2]
- LOG.debug("Found DHCP identifier %s", dhcp)
- latest_address = dhcp
+ dhcptok = words[2]
+ LOG.debug("Found DHCP identifier %s", dhcptok)
+ latest_address = dhcptok
if not latest_address:
# No virtual router found, fallback on default gateway
LOG.debug("No DHCP found, using default gateway")
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 4ec9592f..41367a8b 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -13,6 +13,8 @@ import time
from cloudinit import ec2_utils as ec2
from cloudinit import log as logging
+from cloudinit import net
+from cloudinit.net import dhcp
from cloudinit import sources
from cloudinit import url_helper as uhelp
from cloudinit import util
@@ -20,12 +22,13 @@ from cloudinit import warnings
LOG = logging.getLogger(__name__)
-# Which version we are requesting of the ec2 metadata apis
-DEF_MD_VERSION = '2009-04-04'
+SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND])
STRICT_ID_PATH = ("datasource", "Ec2", "strict_id")
STRICT_ID_DEFAULT = "warn"
+_unset = "_unset"
+
class Platforms(object):
ALIYUN = "AliYun"
@@ -41,17 +44,30 @@ class Platforms(object):
class DataSourceEc2(sources.DataSource):
+
# Default metadata urls that will be used if none are provided
# They will be checked for 'resolveability' and some of the
# following may be discarded if they do not resolve
metadata_urls = ["http://169.254.169.254", "http://instance-data.:8773"]
+
+ # The minimum supported metadata_version from the ec2 metadata apis
+ min_metadata_version = '2009-04-04'
+
+ # Priority ordered list of additional metadata versions which will be tried
+ # for extended metadata content. IPv6 support comes in 2016-09-02
+ extended_metadata_versions = ['2016-09-02']
+
_cloud_platform = None
+ _network_config = _unset # Used for caching calculated network config v1
+
+ # Whether we want to get network configuration from the metadata service.
+ get_network_metadata = False
+
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.metadata_address = None
self.seed_dir = os.path.join(paths.seed_dir, "ec2")
- self.api_ver = DEF_MD_VERSION
def get_data(self):
seed_ret = {}
@@ -73,21 +89,27 @@ class DataSourceEc2(sources.DataSource):
elif self.cloud_platform == Platforms.NO_EC2_METADATA:
return False
- try:
- if not self.wait_for_metadata_service():
+ if self.get_network_metadata: # Setup networking in init-local stage.
+ if util.is_FreeBSD():
+ LOG.debug("FreeBSD doesn't support running dhclient with -sf")
return False
- start_time = time.time()
- self.userdata_raw = \
- ec2.get_instance_userdata(self.api_ver, self.metadata_address)
- self.metadata = ec2.get_instance_metadata(self.api_ver,
- self.metadata_address)
- LOG.debug("Crawl of metadata service took %.3f seconds",
- time.time() - start_time)
- return True
- except Exception:
- util.logexc(LOG, "Failed reading from metadata address %s",
- self.metadata_address)
- return False
+ dhcp_leases = dhcp.maybe_perform_dhcp_discovery()
+ if not dhcp_leases:
+ # DataSourceEc2Local failed in init-local stage. DataSourceEc2
+ # will still run in init-network stage.
+ return False
+ dhcp_opts = dhcp_leases[-1]
+ net_params = {'interface': dhcp_opts.get('interface'),
+ 'ip': dhcp_opts.get('fixed-address'),
+ 'prefix_or_mask': dhcp_opts.get('subnet-mask'),
+ 'broadcast': dhcp_opts.get('broadcast-address'),
+ 'router': dhcp_opts.get('routers')}
+ with net.EphemeralIPv4Network(**net_params):
+ return util.log_time(
+ logfunc=LOG.debug, msg='Crawl of metadata service',
+ func=self._crawl_metadata)
+ else:
+ return self._crawl_metadata()
@property
def launch_index(self):
@@ -95,6 +117,32 @@ class DataSourceEc2(sources.DataSource):
return None
return self.metadata.get('ami-launch-index')
+ def get_metadata_api_version(self):
+ """Get the best supported api version from the metadata service.
+
+ Loop through all extended support metadata versions in order and
+ return the most-fully featured metadata api version discovered.
+
+ If extended_metadata_versions aren't present, return the datasource's
+ min_metadata_version.
+ """
+ # Assumes metadata service is already up
+ for api_ver in self.extended_metadata_versions:
+ url = '{0}/{1}/meta-data/instance-id'.format(
+ self.metadata_address, api_ver)
+ try:
+ resp = uhelp.readurl(url=url)
+ except uhelp.UrlError as e:
+ LOG.debug('url %s raised exception %s', url, e)
+ else:
+ if resp.code == 200:
+ LOG.debug('Found preferred metadata version %s', api_ver)
+ return api_ver
+ elif resp.code == 404:
+ msg = 'Metadata api version %s not present. Headers: %s'
+ LOG.debug(msg, api_ver, resp.headers)
+ return self.min_metadata_version
+
def get_instance_id(self):
return self.metadata['instance-id']
@@ -138,21 +186,22 @@ class DataSourceEc2(sources.DataSource):
urls = []
url2base = {}
for url in mdurls:
- cur = "%s/%s/meta-data/instance-id" % (url, self.api_ver)
+ cur = '{0}/{1}/meta-data/instance-id'.format(
+ url, self.min_metadata_version)
urls.append(cur)
url2base[cur] = url
start_time = time.time()
- url = uhelp.wait_for_url(urls=urls, max_wait=max_wait,
- timeout=timeout, status_cb=LOG.warn)
+ url = uhelp.wait_for_url(
+ urls=urls, max_wait=max_wait, timeout=timeout, status_cb=LOG.warn)
if url:
- LOG.debug("Using metadata source: '%s'", url2base[url])
+ self.metadata_address = url2base[url]
+ LOG.debug("Using metadata source: '%s'", self.metadata_address)
else:
LOG.critical("Giving up on md from %s after %s seconds",
urls, int(time.time() - start_time))
- self.metadata_address = url2base.get(url)
return bool(url)
def device_name_to_device(self, name):
@@ -234,6 +283,68 @@ class DataSourceEc2(sources.DataSource):
util.get_cfg_by_path(cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT),
cfg)
+ @property
+ def network_config(self):
+ """Return a network config dict for rendering ENI or netplan files."""
+ if self._network_config != _unset:
+ return self._network_config
+
+ if self.metadata is None:
+ # this would happen if get_data hadn't been called. leave as _unset
+ LOG.warning(
+ "Unexpected call to network_config when metadata is None.")
+ return None
+
+ result = None
+ net_md = self.metadata.get('network')
+ if isinstance(net_md, dict):
+ result = convert_ec2_metadata_network_config(net_md)
+ else:
+ LOG.warning("unexpected metadata 'network' key not valid: %s",
+ net_md)
+ self._network_config = result
+
+ return self._network_config
+
+ def _crawl_metadata(self):
+ """Crawl metadata service when available.
+
+ @returns: True on success, False otherwise.
+ """
+ if not self.wait_for_metadata_service():
+ return False
+ api_version = self.get_metadata_api_version()
+ try:
+ self.userdata_raw = ec2.get_instance_userdata(
+ api_version, self.metadata_address)
+ self.metadata = ec2.get_instance_metadata(
+ api_version, self.metadata_address)
+ except Exception:
+ util.logexc(
+ LOG, "Failed reading from metadata address %s",
+ self.metadata_address)
+ return False
+ return True
+
+
+class DataSourceEc2Local(DataSourceEc2):
+ """Datasource run at init-local which sets up network to query metadata.
+
+ In init-local, no network is available. This subclass sets up minimal
+ networking with dhclient on a viable nic so that it can talk to the
+ metadata service. If the metadata service provides network configuration
+ then render the network configuration for that instance based on metadata.
+ """
+ get_network_metadata = True # Get metadata network config if present
+
+ def get_data(self):
+ supported_platforms = (Platforms.AWS,)
+ if self.cloud_platform not in supported_platforms:
+ LOG.debug("Local Ec2 mode only supported on %s, not %s",
+ supported_platforms, self.cloud_platform)
+ return False
+ return super(DataSourceEc2Local, self).get_data()
+
def read_strict_mode(cfgval, default):
try:
@@ -347,8 +458,39 @@ def _collect_platform_data():
return data
+def convert_ec2_metadata_network_config(network_md, macs_to_nics=None):
+ """Convert ec2 metadata to network config version 1 data dict.
+
+ @param: network_md: 'network' portion of EC2 metadata.
+ generally formed as {"interfaces": {"macs": {}} where
+ 'macs' is a dictionary with mac address as key and contents like:
+ {"device-number": "0", "interface-id": "...", "local-ipv4s": ...}
+ @param: macs_to_name: Optional dict mac addresses and the nic name. If
+ not provided, get_interfaces_by_mac is called to get it from the OS.
+
+ @return A dict of network config version 1 based on the metadata and macs.
+ """
+ netcfg = {'version': 1, 'config': []}
+ if not macs_to_nics:
+ macs_to_nics = net.get_interfaces_by_mac()
+ macs_metadata = network_md['interfaces']['macs']
+ for mac, nic_name in macs_to_nics.items():
+ nic_metadata = macs_metadata.get(mac)
+ if not nic_metadata:
+ continue # Not a physical nic represented in metadata
+ nic_cfg = {'type': 'physical', 'name': nic_name, 'subnets': []}
+ nic_cfg['mac_address'] = mac
+ if nic_metadata.get('public-ipv4s'):
+ nic_cfg['subnets'].append({'type': 'dhcp4'})
+ if nic_metadata.get('ipv6s'):
+ nic_cfg['subnets'].append({'type': 'dhcp6'})
+ netcfg['config'].append(nic_cfg)
+ return netcfg
+
+
# Used to match classes to dependencies
datasources = [
+ (DataSourceEc2Local, (sources.DEP_FILESYSTEM,)), # Run at init-local
(DataSourceEc2, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index 684eac86..ccae4200 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -11,9 +11,8 @@ from cloudinit import util
LOG = logging.getLogger(__name__)
-BUILTIN_DS_CONFIG = {
- 'metadata_url': 'http://metadata.google.internal/computeMetadata/v1/'
-}
+MD_V1_URL = 'http://metadata.google.internal/computeMetadata/v1/'
+BUILTIN_DS_CONFIG = {'metadata_url': MD_V1_URL}
REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname')
@@ -51,75 +50,20 @@ class DataSourceGCE(sources.DataSource):
BUILTIN_DS_CONFIG])
self.metadata_address = self.ds_cfg['metadata_url']
- # GCE takes sshKeys attribute in the format of '<user>:<public_key>'
- # so we have to trim each key to remove the username part
- def _trim_key(self, public_key):
- try:
- index = public_key.index(':')
- if index > 0:
- return public_key[(index + 1):]
- except Exception:
- return public_key
-
def get_data(self):
- if not platform_reports_gce():
- return False
-
- # url_map: (our-key, path, required, is_text)
- url_map = [
- ('instance-id', ('instance/id',), True, True),
- ('availability-zone', ('instance/zone',), True, True),
- ('local-hostname', ('instance/hostname',), True, True),
- ('public-keys', ('project/attributes/sshKeys',
- 'instance/attributes/ssh-keys'), False, True),
- ('user-data', ('instance/attributes/user-data',), False, False),
- ('user-data-encoding', ('instance/attributes/user-data-encoding',),
- False, True),
- ]
-
- # if we cannot resolve the metadata server, then no point in trying
- if not util.is_resolvable_url(self.metadata_address):
- LOG.debug("%s is not resolvable", self.metadata_address)
- return False
+ ret = util.log_time(
+ LOG.debug, 'Crawl of GCE metadata service',
+ read_md, kwargs={'address': self.metadata_address})
- metadata_fetcher = GoogleMetadataFetcher(self.metadata_address)
- # iterate over url_map keys to get metadata items
- running_on_gce = False
- for (mkey, paths, required, is_text) in url_map:
- value = None
- for path in paths:
- new_value = metadata_fetcher.get_value(path, is_text)
- if new_value is not None:
- value = new_value
- if value:
- running_on_gce = True
- if required and value is None:
- msg = "required key %s returned nothing. not GCE"
- if not running_on_gce:
- LOG.debug(msg, mkey)
- else:
- LOG.warning(msg, mkey)
- return False
- self.metadata[mkey] = value
-
- if self.metadata['public-keys']:
- lines = self.metadata['public-keys'].splitlines()
- self.metadata['public-keys'] = [self._trim_key(k) for k in lines]
-
- if self.metadata['availability-zone']:
- self.metadata['availability-zone'] = self.metadata[
- 'availability-zone'].split('/')[-1]
-
- encoding = self.metadata.get('user-data-encoding')
- if encoding:
- if encoding == 'base64':
- self.metadata['user-data'] = b64decode(
- self.metadata['user-data'])
+ if not ret['success']:
+ if ret['platform_reports_gce']:
+ LOG.warning(ret['reason'])
else:
- LOG.warning('unknown user-data-encoding: %s, ignoring',
- encoding)
-
- return running_on_gce
+ LOG.debug(ret['reason'])
+ return False
+ self.metadata = ret['meta-data']
+ self.userdata_raw = ret['user-data']
+ return True
@property
def launch_index(self):
@@ -136,9 +80,6 @@ class DataSourceGCE(sources.DataSource):
# GCE has long FDQN's and has asked for short hostnames
return self.metadata['local-hostname'].split('.')[0]
- def get_userdata_raw(self):
- return self.metadata['user-data']
-
@property
def availability_zone(self):
return self.metadata['availability-zone']
@@ -148,6 +89,87 @@ class DataSourceGCE(sources.DataSource):
return self.availability_zone.rsplit('-', 1)[0]
+def _trim_key(public_key):
+ # GCE takes sshKeys attribute in the format of '<user>:<public_key>'
+ # so we have to trim each key to remove the username part
+ try:
+ index = public_key.index(':')
+ if index > 0:
+ return public_key[(index + 1):]
+ except Exception:
+ return public_key
+
+
+def read_md(address=None, platform_check=True):
+
+ if address is None:
+ address = MD_V1_URL
+
+ ret = {'meta-data': None, 'user-data': None,
+ 'success': False, 'reason': None}
+ ret['platform_reports_gce'] = platform_reports_gce()
+
+ if platform_check and not ret['platform_reports_gce']:
+ ret['reason'] = "Not running on GCE."
+ return ret
+
+ # if we cannot resolve the metadata server, then no point in trying
+ if not util.is_resolvable_url(address):
+ LOG.debug("%s is not resolvable", address)
+ ret['reason'] = 'address "%s" is not resolvable' % address
+ return ret
+
+ # url_map: (our-key, path, required, is_text)
+ url_map = [
+ ('instance-id', ('instance/id',), True, True),
+ ('availability-zone', ('instance/zone',), True, True),
+ ('local-hostname', ('instance/hostname',), True, True),
+ ('public-keys', ('project/attributes/sshKeys',
+ 'instance/attributes/ssh-keys'), False, True),
+ ('user-data', ('instance/attributes/user-data',), False, False),
+ ('user-data-encoding', ('instance/attributes/user-data-encoding',),
+ False, True),
+ ]
+
+ metadata_fetcher = GoogleMetadataFetcher(address)
+ md = {}
+ # iterate over url_map keys to get metadata items
+ for (mkey, paths, required, is_text) in url_map:
+ value = None
+ for path in paths:
+ new_value = metadata_fetcher.get_value(path, is_text)
+ if new_value is not None:
+ value = new_value
+ if required and value is None:
+ msg = "required key %s returned nothing. not GCE"
+ ret['reason'] = msg % mkey
+ return ret
+ md[mkey] = value
+
+ if md['public-keys']:
+ lines = md['public-keys'].splitlines()
+ md['public-keys'] = [_trim_key(k) for k in lines]
+
+ if md['availability-zone']:
+ md['availability-zone'] = md['availability-zone'].split('/')[-1]
+
+ encoding = md.get('user-data-encoding')
+ if encoding:
+ if encoding == 'base64':
+ md['user-data'] = b64decode(md['user-data'])
+ else:
+ LOG.warning('unknown user-data-encoding: %s, ignoring', encoding)
+
+ if 'user-data' in md:
+ ret['user-data'] = md['user-data']
+ del md['user-data']
+
+ ret['meta-data'] = md
+ ret['success'] = True
+
+ return ret
+
+
def platform_reports_gce():
pname = util.read_dmi_data('system-product-name') or "N/A"
if pname == "Google Compute Engine":
@@ -173,4 +195,36 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
+if __name__ == "__main__":
+ import argparse
+ import json
+ import sys
+
+ from base64 import b64encode
+
+ parser = argparse.ArgumentParser(description='Query GCE Metadata Service')
+ parser.add_argument("--endpoint", metavar="URL",
+ help="The url of the metadata service.",
+ default=MD_V1_URL)
+ parser.add_argument("--no-platform-check", dest="platform_check",
+ help="Ignore smbios platform check",
+ action='store_false', default=True)
+ args = parser.parse_args()
+ data = read_md(address=args.endpoint, platform_check=args.platform_check)
+ if 'user-data' in data:
+ # user-data is bytes not string like other things. Handle it specially.
+ # if it can be represented as utf-8 then do so. Otherwise print base64
+ # encoded value in the key user-data-b64.
+ try:
+ data['user-data'] = data['user-data'].decode()
+ except UnicodeDecodeError:
+ sys.stderr.write("User-data cannot be decoded. "
+ "Writing as base64\n")
+ del data['user-data']
+ # b64encode returns a bytes value. decode to get the string.
+ data['user-data-b64'] = b64encode(data['user-data']).decode()
+
+ print(json.dumps(data, indent=1, sort_keys=True, separators=(',', ': ')))
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index f20c9a65..ccebf11a 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -25,6 +25,8 @@ from cloudinit.sources.helpers.vmware.imc.config_file \
import ConfigFile
from cloudinit.sources.helpers.vmware.imc.config_nic \
import NicConfigurator
+from cloudinit.sources.helpers.vmware.imc.config_passwd \
+ import PasswordConfigurator
from cloudinit.sources.helpers.vmware.imc.guestcust_error \
import GuestCustErrorEnum
from cloudinit.sources.helpers.vmware.imc.guestcust_event \
@@ -49,6 +51,10 @@ class DataSourceOVF(sources.DataSource):
self.cfg = {}
self.supported_seed_starts = ("/", "file://")
self.vmware_customization_supported = True
+ self._network_config = None
+ self._vmware_nics_to_enable = None
+ self._vmware_cust_conf = None
+ self._vmware_cust_found = False
def __str__(self):
root = sources.DataSource.__str__(self)
@@ -58,8 +64,8 @@ class DataSourceOVF(sources.DataSource):
found = []
md = {}
ud = ""
- vmwarePlatformFound = False
- vmwareImcConfigFilePath = ''
+ vmwareImcConfigFilePath = None
+ nicspath = None
defaults = {
"instance-id": "iid-dsovf",
@@ -99,53 +105,88 @@ class DataSourceOVF(sources.DataSource):
logfunc=LOG.debug,
msg="waiting for configuration file",
func=wait_for_imc_cfg_file,
- args=("/var/run/vmware-imc", "cust.cfg", max_wait))
+ args=("cust.cfg", max_wait))
if vmwareImcConfigFilePath:
LOG.debug("Found VMware Customization Config File at %s",
vmwareImcConfigFilePath)
+ nicspath = wait_for_imc_cfg_file(
+ filename="nics.txt", maxwait=10, naplen=5)
else:
LOG.debug("Did not find VMware Customization Config File")
else:
LOG.debug("Customization for VMware platform is disabled.")
if vmwareImcConfigFilePath:
- nics = ""
+ self._vmware_nics_to_enable = ""
try:
cf = ConfigFile(vmwareImcConfigFilePath)
- conf = Config(cf)
- (md, ud, cfg) = read_vmware_imc(conf)
- dirpath = os.path.dirname(vmwareImcConfigFilePath)
- nics = get_nics_to_enable(dirpath)
+ self._vmware_cust_conf = Config(cf)
+ (md, ud, cfg) = read_vmware_imc(self._vmware_cust_conf)
+ self._vmware_nics_to_enable = get_nics_to_enable(nicspath)
+ markerid = self._vmware_cust_conf.marker_id
+ markerexists = check_marker_exists(markerid)
except Exception as e:
LOG.debug("Error parsing the customization Config File")
LOG.exception(e)
set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED)
- enable_nics(nics)
- return False
+ raise e
finally:
util.del_dir(os.path.dirname(vmwareImcConfigFilePath))
-
try:
- LOG.debug("Applying the Network customization")
- nicConfigurator = NicConfigurator(conf.nics)
- nicConfigurator.configure()
+ LOG.debug("Preparing the Network configuration")
+ self._network_config = get_network_config_from_conf(
+ self._vmware_cust_conf,
+ True,
+ True,
+ self.distro.osfamily)
except Exception as e:
- LOG.debug("Error applying the Network Configuration")
LOG.exception(e)
set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
GuestCustEventEnum.GUESTCUST_EVENT_NETWORK_SETUP_FAILED)
- enable_nics(nics)
- return False
-
- vmwarePlatformFound = True
+ raise e
+
+ if markerid and not markerexists:
+ LOG.debug("Applying password customization")
+ pwdConfigurator = PasswordConfigurator()
+ adminpwd = self._vmware_cust_conf.admin_password
+ try:
+ resetpwd = self._vmware_cust_conf.reset_password
+ if adminpwd or resetpwd:
+ pwdConfigurator.configure(adminpwd, resetpwd,
+ self.distro)
+ else:
+ LOG.debug("Changing password is not needed")
+ except Exception as e:
+ LOG.debug("Error applying Password Configuration: %s", e)
+ set_customization_status(
+ GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
+ GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED)
+ return False
+ if markerid:
+ LOG.debug("Handle marker creation")
+ try:
+ setup_marker_files(markerid)
+ except Exception as e:
+ LOG.debug("Error creating marker files: %s", e)
+ set_customization_status(
+ GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
+ GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED)
+ return False
+
+ self._vmware_cust_found = True
+ found.append('vmware-tools')
+
+ # TODO: Need to set the status to DONE only when the
+ # customization is done successfully.
+ enable_nics(self._vmware_nics_to_enable)
set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_DONE,
GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS)
- enable_nics(nics)
+
else:
np = {'iso': transport_iso9660,
'vmware-guestd': transport_vmware_guestd, }
@@ -160,7 +201,7 @@ class DataSourceOVF(sources.DataSource):
found.append(name)
# There was no OVF transports found
- if len(found) == 0 and not vmwarePlatformFound:
+ if len(found) == 0:
return False
if 'seedfrom' in md and md['seedfrom']:
@@ -205,6 +246,10 @@ class DataSourceOVF(sources.DataSource):
def get_config_obj(self):
return self.cfg
+ @property
+ def network_config(self):
+ return self._network_config
+
class DataSourceOVFNet(DataSourceOVF):
def __init__(self, sys_cfg, distro, paths):
@@ -236,12 +281,13 @@ def get_max_wait_from_cfg(cfg):
return max_wait
-def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5):
+def wait_for_imc_cfg_file(filename, maxwait=180, naplen=5,
+ dirpath="/var/run/vmware-imc"):
waited = 0
while waited < maxwait:
- fileFullPath = search_file(dirpath, filename)
- if fileFullPath:
+ fileFullPath = os.path.join(dirpath, filename)
+ if os.path.isfile(fileFullPath):
return fileFullPath
LOG.debug("Waiting for VMware Customization Config File")
time.sleep(naplen)
@@ -249,6 +295,26 @@ def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5):
return None
+def get_network_config_from_conf(config, use_system_devices=True,
+ configure=False, osfamily=None):
+ nicConfigurator = NicConfigurator(config.nics, use_system_devices)
+ nics_cfg_list = nicConfigurator.generate(configure, osfamily)
+
+ return get_network_config(nics_cfg_list,
+ config.name_servers,
+ config.dns_suffixes)
+
+
+def get_network_config(nics=None, nameservers=None, search=None):
+ config_list = nics
+
+ if nameservers or search:
+ config_list.append({'type': 'nameserver', 'address': nameservers,
+ 'search': search})
+
+ return {'version': 1, 'config': config_list}
+
+
# This will return a dict with some content
# meta-data, user-data, some config
def read_vmware_imc(config):
@@ -264,6 +330,9 @@ def read_vmware_imc(config):
if config.timezone:
cfg['timezone'] = config.timezone
+ # Generate a unique instance-id so that re-customization will
+ # happen in cloud-init
+ md['instance-id'] = "iid-vmware-" + util.rand_str(strlen=8)
return (md, ud, cfg)
@@ -306,26 +375,56 @@ def get_ovf_env(dirname):
return (None, False)
-# Transport functions take no input and return
-# a 3 tuple of content, path, filename
-def transport_iso9660(require_iso=True):
+def maybe_cdrom_device(devname):
+ """Test if devname matches known list of devices which may contain iso9660
+ filesystems.
- # default_regex matches values in
- # /lib/udev/rules.d/60-cdrom_id.rules
- # KERNEL!="sr[0-9]*|hd[a-z]|xvd*", GOTO="cdrom_end"
- envname = "CLOUD_INIT_CDROM_DEV_REGEX"
- default_regex = "^(sr[0-9]+|hd[a-z]|xvd.*)"
+ Be helpful in accepting either knames (with no leading /dev/) or full path
+ names, but do not allow paths outside of /dev/, like /dev/foo/bar/xxx.
+ """
+ if not devname:
+ return False
+ elif not isinstance(devname, util.string_types):
+ raise ValueError("Unexpected input for devname: %s" % devname)
+
+ # resolve '..' and multi '/' elements
+ devname = os.path.normpath(devname)
+
+ # drop leading '/dev/'
+ if devname.startswith("/dev/"):
+ # partition returns tuple (before, partition, after)
+ devname = devname.partition("/dev/")[-1]
- devname_regex = os.environ.get(envname, default_regex)
+ # ignore leading slash (/sr0), else fail on / in name (foo/bar/xvdc)
+ if devname.startswith("/"):
+ devname = devname.split("/")[-1]
+ elif devname.count("/") > 0:
+ return False
+
+ # if empty string
+ if not devname:
+ return False
+
+ # default_regex matches values in /lib/udev/rules.d/60-cdrom_id.rules
+ # KERNEL!="sr[0-9]*|hd[a-z]|xvd*", GOTO="cdrom_end"
+ default_regex = r"^(sr[0-9]+|hd[a-z]|xvd.*)"
+ devname_regex = os.environ.get("CLOUD_INIT_CDROM_DEV_REGEX", default_regex)
cdmatch = re.compile(devname_regex)
+ return cdmatch.match(devname) is not None
+
+
+# Transport functions take no input and return
+# a 3 tuple of content, path, filename
+def transport_iso9660(require_iso=True):
+
# Go through mounts to see if it was already mounted
mounts = util.mounts()
for (dev, info) in mounts.items():
fstype = info['fstype']
if fstype != "iso9660" and require_iso:
continue
- if cdmatch.match(dev[5:]) is None: # take off '/dev/'
+ if not maybe_cdrom_device(dev):
continue
mp = info['mountpoint']
(fname, contents) = get_ovf_env(mp)
@@ -337,29 +436,19 @@ def transport_iso9660(require_iso=True):
else:
mtype = None
- devs = os.listdir("/dev/")
- devs.sort()
+ # generate a list of devices with mtype filesystem, filter by regex
+ devs = [dev for dev in
+ util.find_devs_with("TYPE=%s" % mtype if mtype else None)
+ if maybe_cdrom_device(dev)]
for dev in devs:
- fullp = os.path.join("/dev/", dev)
-
- if (fullp in mounts or
- not cdmatch.match(dev) or os.path.isdir(fullp)):
- continue
-
- try:
- # See if we can read anything at all...??
- util.peek_file(fullp, 512)
- except IOError:
- continue
-
try:
- (fname, contents) = util.mount_cb(fullp, get_ovf_env, mtype=mtype)
+ (fname, contents) = util.mount_cb(dev, get_ovf_env, mtype=mtype)
except util.MountFailedError:
- LOG.debug("%s not mountable as iso9660", fullp)
+ LOG.debug("%s not mountable as iso9660", dev)
continue
if contents is not False:
- return (contents, fullp, fname)
+ return (contents, dev, fname)
return (False, None, None)
@@ -445,4 +534,33 @@ datasources = (
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
+# To check if marker file exists
+def check_marker_exists(markerid):
+ """
+ Check the existence of a marker file.
+ Presence of marker file determines whether a certain code path is to be
+ executed. It is needed for partial guest customization in VMware.
+ """
+ if not markerid:
+ return False
+ markerfile = "/.markerfile-" + markerid
+ if os.path.exists(markerfile):
+ return True
+ return False
+
+
+# Create a marker file
+def setup_marker_files(markerid):
+ """
+ Create a new marker file.
+ Marker files are unique to a full customization workflow in VMware
+ environment.
+ """
+ if not markerid:
+ return
+ markerfile = "/.markerfile-" + markerid
+ util.del_file("/.markerfile-*.txt")
+ open(markerfile, 'w').close()
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 952caf35..9a43fbee 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -44,6 +44,7 @@ class DataSourceNotFoundException(Exception):
class DataSource(object):
dsmode = DSMODE_NETWORK
+ default_locale = 'en_US.UTF-8'
def __init__(self, sys_cfg, distro, paths, ud_proc=None):
self.sys_cfg = sys_cfg
@@ -150,7 +151,13 @@ class DataSource(object):
return None
def get_locale(self):
- return 'en_US.UTF-8'
+ """Default locale is en_US.UTF-8, but allow distros to override"""
+ locale = self.default_locale
+ try:
+ locale = self.distro.get_locale()
+ except NotImplementedError:
+ pass
+ return locale
@property
def availability_zone(self):
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index e22409d1..959b1bda 100644
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -6,16 +6,16 @@ import os
import re
import socket
import struct
-import tempfile
import time
+from cloudinit.net import dhcp
from cloudinit import stages
+from cloudinit import temp_utils
from contextlib import contextmanager
from xml.etree import ElementTree
from cloudinit import util
-
LOG = logging.getLogger(__name__)
@@ -111,7 +111,7 @@ class OpenSSLManager(object):
}
def __init__(self):
- self.tmpdir = tempfile.mkdtemp()
+ self.tmpdir = temp_utils.mkdtemp()
self.certificate = None
self.generate_certificate()
@@ -239,6 +239,11 @@ class WALinuxAgentShim(object):
return socket.inet_ntoa(packed_bytes)
@staticmethod
+ def _networkd_get_value_from_leases(leases_d=None):
+ return dhcp.networkd_get_option_from_leases(
+ 'OPTION_245', leases_d=leases_d)
+
+ @staticmethod
def _get_value_from_leases_file(fallback_lease_file):
leases = []
content = util.load_file(fallback_lease_file)
@@ -287,12 +292,15 @@ class WALinuxAgentShim(object):
@staticmethod
def find_endpoint(fallback_lease_file=None):
- LOG.debug('Finding Azure endpoint...')
value = None
- # Option-245 stored in /run/cloud-init/dhclient.hooks/<ifc>.json
- # a dhclient exit hook that calls cloud-init-dhclient-hook
- dhcp_options = WALinuxAgentShim._load_dhclient_json()
- value = WALinuxAgentShim._get_value_from_dhcpoptions(dhcp_options)
+ LOG.debug('Finding Azure endpoint from networkd...')
+ value = WALinuxAgentShim._networkd_get_value_from_leases()
+ if value is None:
+ # Option-245 stored in /run/cloud-init/dhclient.hooks/<ifc>.json
+ # a dhclient exit hook that calls cloud-init-dhclient-hook
+ LOG.debug('Finding Azure endpoint from hook json...')
+ dhcp_options = WALinuxAgentShim._load_dhclient_json()
+ value = WALinuxAgentShim._get_value_from_dhcpoptions(dhcp_options)
if value is None:
# Fallback and check the leases file if unsuccessful
LOG.debug("Unable to find endpoint in dhclient logs. "
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py
index 9a5e3a8a..49d441db 100644
--- a/cloudinit/sources/helpers/vmware/imc/config.py
+++ b/cloudinit/sources/helpers/vmware/imc/config.py
@@ -5,6 +5,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+
from .nic import Nic
@@ -14,13 +15,16 @@ class Config(object):
Specification file.
"""
+ CUSTOM_SCRIPT = 'CUSTOM-SCRIPT|SCRIPT-NAME'
DNS = 'DNS|NAMESERVER|'
- SUFFIX = 'DNS|SUFFIX|'
+ DOMAINNAME = 'NETWORK|DOMAINNAME'
+ HOSTNAME = 'NETWORK|HOSTNAME'
+ MARKERID = 'MISC|MARKER-ID'
PASS = 'PASSWORD|-PASS'
+ RESETPASS = 'PASSWORD|RESET'
+ SUFFIX = 'DNS|SUFFIX|'
TIMEZONE = 'DATETIME|TIMEZONE'
UTC = 'DATETIME|UTC'
- HOSTNAME = 'NETWORK|HOSTNAME'
- DOMAINNAME = 'NETWORK|DOMAINNAME'
def __init__(self, configFile):
self._configFile = configFile
@@ -82,4 +86,18 @@ class Config(object):
return res
+ @property
+ def reset_password(self):
+ """Retreives if the root password needs to be reset."""
+ resetPass = self._configFile.get(Config.RESETPASS, 'no')
+ resetPass = resetPass.lower()
+ if resetPass not in ('yes', 'no'):
+ raise ValueError('ResetPassword value should be yes/no')
+ return resetPass == 'yes'
+
+ @property
+ def marker_id(self):
+ """Returns marker id."""
+ return self._configFile.get(Config.MARKERID, None)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index 67ac21db..2fb07c59 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -9,22 +9,48 @@ import logging
import os
import re
+from cloudinit.net.network_state import mask_to_net_prefix
from cloudinit import util
logger = logging.getLogger(__name__)
+def gen_subnet(ip, netmask):
+ """
+ Return the subnet for a given ip address and a netmask
+ @return (str): the subnet
+ @param ip: ip address
+ @param netmask: netmask
+ """
+ ip_array = ip.split(".")
+ mask_array = netmask.split(".")
+ result = []
+ for index in list(range(4)):
+ result.append(int(ip_array[index]) & int(mask_array[index]))
+
+ return ".".join([str(x) for x in result])
+
+
class NicConfigurator(object):
- def __init__(self, nics):
+ def __init__(self, nics, use_system_devices=True):
"""
Initialize the Nic Configurator
@param nics (list) an array of nics to configure
+ @param use_system_devices (Bool) Get the MAC names from the system
+ if this is True. If False, then mac names will be retrieved from
+ the specified nics.
"""
self.nics = nics
self.mac2Name = {}
self.ipv4PrimaryGateway = None
self.ipv6PrimaryGateway = None
- self.find_devices()
+
+ if use_system_devices:
+ self.find_devices()
+ else:
+ for nic in self.nics:
+ self.mac2Name[nic.mac.lower()] = nic.name
+
self._primaryNic = self.get_primary_nic()
def get_primary_nic(self):
@@ -61,138 +87,163 @@ class NicConfigurator(object):
def gen_one_nic(self, nic):
"""
- Return the lines needed to configure a nic
- @return (str list): the string list to configure the nic
+ Return the config list needed to configure a nic
+ @return (list): the subnets and routes list to configure the nic
@param nic (NicBase): the nic to configure
"""
- lines = []
- name = self.mac2Name.get(nic.mac.lower())
+ mac = nic.mac.lower()
+ name = self.mac2Name.get(mac)
if not name:
raise ValueError('No known device has MACADDR: %s' % nic.mac)
- if nic.onboot:
- lines.append('auto %s' % name)
+ nics_cfg_list = []
+
+ cfg = {'type': 'physical', 'name': name, 'mac_address': mac}
+
+ subnet_list = []
+ route_list = []
# Customize IPv4
- lines.extend(self.gen_ipv4(name, nic))
+ (subnets, routes) = self.gen_ipv4(name, nic)
+ subnet_list.extend(subnets)
+ route_list.extend(routes)
# Customize IPv6
- lines.extend(self.gen_ipv6(name, nic))
+ (subnets, routes) = self.gen_ipv6(name, nic)
+ subnet_list.extend(subnets)
+ route_list.extend(routes)
+
+ cfg.update({'subnets': subnet_list})
- lines.append('')
+ nics_cfg_list.append(cfg)
+ if route_list:
+ nics_cfg_list.extend(route_list)
- return lines
+ return nics_cfg_list
def gen_ipv4(self, name, nic):
"""
- Return the lines needed to configure the IPv4 setting of a nic
- @return (str list): the string list to configure the gateways
- @param name (str): name of the nic
+ Return the set of subnets and routes needed to configure the
+ IPv4 settings of a nic
+ @return (set): the set of subnet and routes to configure the gateways
+ @param name (str): subnet and route list for the nic
@param nic (NicBase): the nic to configure
"""
- lines = []
+
+ subnet = {}
+ route_list = []
+
+ if nic.onboot:
+ subnet.update({'control': 'auto'})
bootproto = nic.bootProto.lower()
if nic.ipv4_mode.lower() == 'disabled':
bootproto = 'manual'
- lines.append('iface %s inet %s' % (name, bootproto))
if bootproto != 'static':
- return lines
+ subnet.update({'type': 'dhcp'})
+ return ([subnet], route_list)
+ else:
+ subnet.update({'type': 'static'})
# Static Ipv4
addrs = nic.staticIpv4
if not addrs:
- return lines
+ return ([subnet], route_list)
v4 = addrs[0]
if v4.ip:
- lines.append(' address %s' % v4.ip)
+ subnet.update({'address': v4.ip})
if v4.netmask:
- lines.append(' netmask %s' % v4.netmask)
+ subnet.update({'netmask': v4.netmask})
# Add the primary gateway
if nic.primary and v4.gateways:
self.ipv4PrimaryGateway = v4.gateways[0]
- lines.append(' gateway %s metric 0' % self.ipv4PrimaryGateway)
- return lines
+ subnet.update({'gateway': self.ipv4PrimaryGateway})
+ return [subnet]
# Add routes if there is no primary nic
if not self._primaryNic:
- lines.extend(self.gen_ipv4_route(nic, v4.gateways))
+ route_list.extend(self.gen_ipv4_route(nic,
+ v4.gateways,
+ v4.netmask))
- return lines
+ return ([subnet], route_list)
- def gen_ipv4_route(self, nic, gateways):
+ def gen_ipv4_route(self, nic, gateways, netmask):
"""
- Return the lines needed to configure additional Ipv4 route
- @return (str list): the string list to configure the gateways
+ Return the routes list needed to configure additional Ipv4 route
+ @return (list): the route list to configure the gateways
@param nic (NicBase): the nic to configure
@param gateways (str list): the list of gateways
"""
- lines = []
+ route_list = []
+
+ cidr = mask_to_net_prefix(netmask)
for gateway in gateways:
- lines.append(' up route add default gw %s metric 10000' %
- gateway)
+ destination = "%s/%d" % (gen_subnet(gateway, netmask), cidr)
+ route_list.append({'destination': destination,
+ 'type': 'route',
+ 'gateway': gateway,
+ 'metric': 10000})
- return lines
+ return route_list
def gen_ipv6(self, name, nic):
"""
- Return the lines needed to configure the gateways for a nic
- @return (str list): the string list to configure the gateways
+ Return the set of subnets and routes needed to configure the
+ gateways for a nic
+ @return (set): the set of subnets and routes to configure the gateways
@param name (str): name of the nic
@param nic (NicBase): the nic to configure
"""
- lines = []
if not nic.staticIpv6:
- return lines
+ return ([], [])
+ subnet_list = []
# Static Ipv6
addrs = nic.staticIpv6
- lines.append('iface %s inet6 static' % name)
- lines.append(' address %s' % addrs[0].ip)
- lines.append(' netmask %s' % addrs[0].netmask)
- for addr in addrs[1:]:
- lines.append(' up ifconfig %s inet6 add %s/%s' % (name, addr.ip,
- addr.netmask))
- # Add the primary gateway
- if nic.primary:
- for addr in addrs:
- if addr.gateway:
- self.ipv6PrimaryGateway = addr.gateway
- lines.append(' gateway %s' % self.ipv6PrimaryGateway)
- return lines
+ for addr in addrs:
+ subnet = {'type': 'static6',
+ 'address': addr.ip,
+ 'netmask': addr.netmask}
+ subnet_list.append(subnet)
- # Add routes if there is no primary nic
- if not self._primaryNic:
- lines.extend(self._genIpv6Route(name, nic, addrs))
+ # TODO: Add the primary gateway
+
+ route_list = []
+ # TODO: Add routes if there is no primary nic
+ # if not self._primaryNic:
+ # route_list.extend(self._genIpv6Route(name, nic, addrs))
- return lines
+ return (subnet_list, route_list)
def _genIpv6Route(self, name, nic, addrs):
- lines = []
+ route_list = []
for addr in addrs:
- lines.append(' up route -A inet6 add default gw '
- '%s metric 10000' % addr.gateway)
+ route_list.append({'type': 'route',
+ 'gateway': addr.gateway,
+ 'metric': 10000})
+
+ return route_list
- return lines
+ def generate(self, configure=False, osfamily=None):
+ """Return the config elements that are needed to configure the nics"""
+ if configure:
+ logger.info("Configuring the interfaces file")
+ self.configure(osfamily)
- def generate(self):
- """Return the lines that is needed to configure the nics"""
- lines = []
- lines.append('iface lo inet loopback')
- lines.append('auto lo')
- lines.append('')
+ nics_cfg_list = []
for nic in self.nics:
- lines.extend(self.gen_one_nic(nic))
+ nics_cfg_list.extend(self.gen_one_nic(nic))
- return lines
+ return nics_cfg_list
def clear_dhcp(self):
logger.info('Clearing DHCP leases')
@@ -201,11 +252,16 @@ class NicConfigurator(object):
util.subp(["pkill", "dhclient"], rcs=[0, 1])
util.subp(["rm", "-f", "/var/lib/dhcp/*"])
- def configure(self):
+ def configure(self, osfamily=None):
"""
- Configure the /etc/network/intefaces
+ Configure the /etc/network/interfaces
Make a back up of the original
"""
+
+ if not osfamily or osfamily != "debian":
+ logger.info("Debian OS not detected. Skipping the configure step")
+ return
+
containingDir = '/etc/network'
interfaceFile = os.path.join(containingDir, 'interfaces')
@@ -215,10 +271,13 @@ class NicConfigurator(object):
if not os.path.exists(originalFile) and os.path.exists(interfaceFile):
os.rename(interfaceFile, originalFile)
- lines = self.generate()
- with open(interfaceFile, 'w') as fp:
- for line in lines:
- fp.write('%s\n' % line)
+ lines = [
+ "# DO NOT EDIT THIS FILE BY HAND --"
+ " AUTOMATICALLY GENERATED BY cloud-init",
+ "source /etc/network/interfaces.d/*.cfg",
+ ]
+
+ util.write_file(interfaceFile, content='\n'.join(lines))
self.clear_dhcp()
diff --git a/cloudinit/sources/helpers/vmware/imc/config_passwd.py b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
new file mode 100644
index 00000000..75cfbaaf
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
@@ -0,0 +1,67 @@
+# Copyright (C) 2016 Canonical Ltd.
+# Copyright (C) 2016 VMware INC.
+#
+# Author: Maitreyee Saikia <msaikia@vmware.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+
+import logging
+import os
+
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+
+class PasswordConfigurator(object):
+ """
+ Class for changing configurations related to passwords in a VM. Includes
+ setting and expiring passwords.
+ """
+ def configure(self, passwd, resetPasswd, distro):
+ """
+ Main method to perform all functionalities based on configuration file
+ inputs.
+ @param passwd: encoded admin password.
+ @param resetPasswd: boolean to determine if password needs to be reset.
+ @return cfg: dict to be used by cloud-init set_passwd code.
+ """
+ LOG.info('Starting password configuration')
+ if passwd:
+ passwd = util.b64d(passwd)
+ allRootUsers = []
+ for line in open('/etc/passwd', 'r'):
+ if line.split(':')[2] == '0':
+ allRootUsers.append(line.split(':')[0])
+ # read shadow file and check for each user, if its uid0 or root.
+ uidUsersList = []
+ for line in open('/etc/shadow', 'r'):
+ user = line.split(':')[0]
+ if user in allRootUsers:
+ uidUsersList.append(user)
+ if passwd:
+ LOG.info('Setting admin password')
+ distro.set_passwd('root', passwd)
+ if resetPasswd:
+ self.reset_password(uidUsersList)
+ LOG.info('Configure Password completed!')
+
+ def reset_password(self, uidUserList):
+ """
+ Method to reset password. Use passwd --expire command. Use chage if
+ not succeeded using passwd command. Log failure message otherwise.
+ @param: list of users for which to expire password.
+ """
+ LOG.info('Expiring password.')
+ for user in uidUserList:
+ try:
+ out, err = util.subp(['passwd', '--expire', user])
+ except util.ProcessExecutionError as e:
+ if os.path.exists('/usr/bin/chage'):
+ out, e = util.subp(['chage', '-d', '0', user])
+ else:
+ LOG.warning('Failed to expire password for %s with error: '
+ '%s', user, e)
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
index 1ab6bd41..44075255 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
@@ -59,14 +59,16 @@ def set_customization_status(custstate, custerror, errormessage=None):
return (out, err)
-# This will read the file nics.txt in the specified directory
-# and return the content
-def get_nics_to_enable(dirpath):
- if not dirpath:
+def get_nics_to_enable(nicsfilepath):
+ """Reads the NICS from the specified file path and returns the content
+
+ @param nicsfilepath: Absolute file path to the NICS.txt file.
+ """
+
+ if not nicsfilepath:
return None
NICS_SIZE = 1024
- nicsfilepath = os.path.join(dirpath, "nics.txt")
if not os.path.exists(nicsfilepath):
return None