summaryrefslogtreecommitdiff
path: root/cloudinit/sources
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit/sources')
-rw-r--r--cloudinit/sources/DataSourceAzure.py2
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py135
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py89
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py2
-rw-r--r--cloudinit/sources/__init__.py6
-rw-r--r--cloudinit/sources/helpers/openstack.py9
6 files changed, 206 insertions, 37 deletions
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 832b3063..698f4cac 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -254,7 +254,7 @@ class DataSourceAzureNet(sources.DataSource):
def get_config_obj(self):
return self.cfg
- def check_instance_id(self):
+ def check_instance_id(self, sys_cfg):
# quickly (local check only) if self.instance_id is still valid
return sources.instance_id_matches_system_uuid(self.get_instance_id())
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 6fc9e05b..3fa62ef3 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -18,6 +18,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import copy
import os
from cloudinit import log as logging
@@ -50,6 +51,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
self.seed_dir = os.path.join(paths.seed_dir, 'config_drive')
self.version = None
self.ec2_metadata = None
+ self._network_config = None
+ self.network_json = None
self.files = {}
def __str__(self):
@@ -144,12 +147,25 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
LOG.warn("Invalid content in vendor-data: %s", e)
self.vendordata_raw = None
+ try:
+ self.network_json = results.get('networkdata')
+ except ValueError as e:
+ LOG.warn("Invalid content in network-data: %s", e)
+ self.network_json = None
+
return True
def check_instance_id(self):
# quickly (local check only) if self.instance_id is still valid
return sources.instance_id_matches_system_uuid(self.get_instance_id())
+ @property
+ def network_config(self):
+ if self._network_config is None:
+ if self.network_json is not None:
+ self._network_config = convert_network_data(self.network_json)
+ return self._network_config
+
class DataSourceConfigDriveNet(DataSourceConfigDrive):
def __init__(self, sys_cfg, distro, paths):
@@ -287,3 +303,122 @@ datasources = [
# Return a list of data sources that match this set of dependencies
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
+
+# Convert OpenStack ConfigDrive NetworkData json to network_config yaml
+def convert_network_data(network_json=None):
+ """Return a dictionary of network_config by parsing provided
+ OpenStack ConfigDrive NetworkData json format
+
+ OpenStack network_data.json provides a 3 element dictionary
+ - "links" (links are network devices, physical or virtual)
+ - "networks" (networks are ip network configurations for one or more
+ links)
+ - services (non-ip services, like dns)
+
+ networks and links are combined via network items referencing specific
+ links via a 'link_id' which maps to a links 'id' field.
+
+ To convert this format to network_config yaml, we first iterate over the
+ links and then walk the network list to determine if any of the networks
+ utilize the current link; if so we generate a subnet entry for the device
+
+ We also need to map network_data.json fields to network_config fields. For
+ example, the network_data links 'id' field is equivalent to network_config
+ 'name' field for devices. We apply more of this mapping to the various
+ link types that we encounter.
+
+ There are additional fields that are populated in the network_data.json
+ from OpenStack that are not relevant to network_config yaml, so we
+ enumerate a dictionary of valid keys for network_yaml and apply filtering
+ to drop these superflous keys from the network_config yaml.
+ """
+ if network_json is None:
+ return None
+
+ # dict of network_config key for filtering network_json
+ valid_keys = {
+ 'physical': [
+ 'name',
+ 'type',
+ 'mac_address',
+ 'subnets',
+ 'params',
+ ],
+ 'subnet': [
+ 'type',
+ 'address',
+ 'netmask',
+ 'broadcast',
+ 'metric',
+ 'gateway',
+ 'pointopoint',
+ 'mtu',
+ 'scope',
+ 'dns_nameservers',
+ 'dns_search',
+ 'routes',
+ ],
+ }
+
+ links = network_json.get('links', [])
+ networks = network_json.get('networks', [])
+ services = network_json.get('services', [])
+
+ config = []
+ for link in links:
+ subnets = []
+ cfg = {k: v for k, v in link.items()
+ if k in valid_keys['physical']}
+ cfg.update({'name': link['id']})
+ for network in [net for net in networks
+ if net['link'] == link['id']]:
+ subnet = {k: v for k, v in network.items()
+ if k in valid_keys['subnet']}
+ if 'dhcp' in network['type']:
+ t = 'dhcp6' if network['type'].startswith('ipv6') else 'dhcp4'
+ subnet.update({
+ 'type': t,
+ })
+ else:
+ subnet.update({
+ 'type': 'static',
+ 'address': network.get('ip_address'),
+ })
+ subnets.append(subnet)
+ cfg.update({'subnets': subnets})
+ if link['type'] in ['ethernet', 'vif', 'ovs', 'phy']:
+ cfg.update({
+ 'type': 'physical',
+ 'mac_address': link['ethernet_mac_address']})
+ elif link['type'] in ['bond']:
+ params = {}
+ for k, v in link.items():
+ if k == 'bond_links':
+ continue
+ elif k.startswith('bond'):
+ params.update({k: v})
+ cfg.update({
+ 'bond_interfaces': copy.deepcopy(link['bond_links']),
+ 'params': params,
+ })
+ elif link['type'] in ['vlan']:
+ cfg.update({
+ 'name': "%s.%s" % (link['vlan_link'],
+ link['vlan_id']),
+ 'vlan_link': link['vlan_link'],
+ 'vlan_id': link['vlan_id'],
+ 'mac_address': link['vlan_mac_address'],
+ })
+ else:
+ raise ValueError(
+ 'Unknown network_data link type: %s' % link['type'])
+
+ config.append(cfg)
+
+ for service in services:
+ cfg = service
+ cfg.update({'type': 'nameserver'})
+ config.append(cfg)
+
+ return {'version': 1, 'config': config}
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index d07e6f84..c2fba4d2 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -36,7 +36,9 @@ class DataSourceNoCloud(sources.DataSource):
self.dsmode = 'local'
self.seed = None
self.cmdline_id = "ds=nocloud"
- self.seed_dir = os.path.join(paths.seed_dir, 'nocloud')
+ self.seed_dirs = [os.path.join(paths.seed_dir, 'nocloud'),
+ os.path.join(paths.seed_dir, 'nocloud-net')]
+ self.seed_dir = None
self.supported_seed_starts = ("/", "file://")
def __str__(self):
@@ -50,31 +52,32 @@ class DataSourceNoCloud(sources.DataSource):
}
found = []
- mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': ""}
+ mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': "",
+ 'network-config': {}}
try:
# Parse the kernel command line, getting data passed in
md = {}
if parse_cmdline_data(self.cmdline_id, md):
found.append("cmdline")
- mydata['meta-data'].update(md)
+ mydata = _merge_new_seed(mydata, {'meta-data': md})
except:
util.logexc(LOG, "Unable to parse command line data")
return False
# Check to see if the seed dir has data.
pp2d_kwargs = {'required': ['user-data', 'meta-data'],
- 'optional': ['vendor-data']}
-
- try:
- seeded = util.pathprefix2dict(self.seed_dir, **pp2d_kwargs)
- found.append(self.seed_dir)
- LOG.debug("Using seeded data from %s", self.seed_dir)
- except ValueError as e:
- pass
-
- if self.seed_dir in found:
- mydata = _merge_new_seed(mydata, seeded)
+ 'optional': ['vendor-data', 'network-config']}
+
+ for path in self.seed_dirs:
+ try:
+ seeded = util.pathprefix2dict(path, **pp2d_kwargs)
+ found.append(path)
+ LOG.debug("Using seeded data from %s", path)
+ mydata = _merge_new_seed(mydata, seeded)
+ break
+ except ValueError as e:
+ pass
# If the datasource config had a 'seedfrom' entry, then that takes
# precedence over a 'seedfrom' that was found in a filesystem
@@ -141,8 +144,7 @@ class DataSourceNoCloud(sources.DataSource):
if len(found) == 0:
return False
- seeded_interfaces = None
-
+ seeded_network = None
# The special argument "seedfrom" indicates we should
# attempt to seed the userdata / metadata from its value
# its primarily value is in allowing the user to type less
@@ -158,8 +160,9 @@ class DataSourceNoCloud(sources.DataSource):
LOG.debug("Seed from %s not supported by %s", seedfrom, self)
return False
- if 'network-interfaces' in mydata['meta-data']:
- seeded_interfaces = self.dsmode
+ if (mydata['meta-data'].get('network-interfaces') or
+ mydata.get('network-config')):
+ seeded_network = self.dsmode
# This could throw errors, but the user told us to do it
# so if errors are raised, let them raise
@@ -176,28 +179,37 @@ class DataSourceNoCloud(sources.DataSource):
mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
defaults])
- # Update the network-interfaces if metadata had 'network-interfaces'
- # entry and this is the local datasource, or 'seedfrom' was used
- # and the source of the seed was self.dsmode
- # ('local' for NoCloud, 'net' for NoCloudNet')
- if ('network-interfaces' in mydata['meta-data'] and
- (self.dsmode in ("local", seeded_interfaces))):
- LOG.debug("Updating network interfaces from %s", self)
- self.distro.apply_network(
- mydata['meta-data']['network-interfaces'])
+ netdata = {'format': None, 'data': None}
+ if mydata['meta-data'].get('network-interfaces'):
+ netdata['format'] = 'interfaces'
+ netdata['data'] = mydata['meta-data']['network-interfaces']
+ elif mydata.get('network-config'):
+ netdata['format'] = 'network-config'
+ netdata['data'] = mydata['network-config']
+
+ # if this is the local datasource or 'seedfrom' was used
+ # and the source of the seed was self.dsmode.
+ # Then see if there is network config to apply.
+ # note this is obsolete network-interfaces style seeding.
+ if self.dsmode in ("local", seeded_network):
+ if mydata['meta-data'].get('network-interfaces'):
+ LOG.debug("Updating network interfaces from %s", self)
+ self.distro.apply_network(
+ mydata['meta-data']['network-interfaces'])
if mydata['meta-data']['dsmode'] == self.dsmode:
self.seed = ",".join(found)
self.metadata = mydata['meta-data']
self.userdata_raw = mydata['user-data']
self.vendordata_raw = mydata['vendor-data']
+ self._network_config = mydata['network-config']
return True
LOG.debug("%s: not claiming datasource, dsmode=%s", self,
mydata['meta-data']['dsmode'])
return False
- def check_instance_id(self):
+ def check_instance_id(self, sys_cfg):
# quickly (local check only) if self.instance_id is still valid
# we check kernel command line or files.
current = self.get_instance_id()
@@ -205,11 +217,15 @@ class DataSourceNoCloud(sources.DataSource):
return None
quick_id = _quick_read_instance_id(cmdline_id=self.cmdline_id,
- dirs=[self.seed_dir])
+ dirs=self.seed_dirs)
if not quick_id:
return None
return quick_id == current
+ @property
+ def network_config(self):
+ return self._network_config
+
def _quick_read_instance_id(cmdline_id, dirs=None):
if dirs is None:
@@ -279,9 +295,17 @@ def parse_cmdline_data(ds_id, fill, cmdline=None):
def _merge_new_seed(cur, seeded):
ret = cur.copy()
- ret['meta-data'] = util.mergemanydict([cur['meta-data'],
- util.load_yaml(seeded['meta-data'])])
- ret['user-data'] = seeded['user-data']
+
+ newmd = seeded.get('meta-data', {})
+ if not isinstance(seeded['meta-data'], dict):
+ newmd = util.load_yaml(seeded['meta-data'])
+ ret['meta-data'] = util.mergemanydict([cur['meta-data'], newmd])
+
+ if seeded.get('network-config'):
+ ret['network-config'] = util.load_yaml(seeded['network-config'])
+
+ if 'user-data' in seeded:
+ ret['user-data'] = seeded['user-data']
if 'vendor-data' in seeded:
ret['vendor-data'] = seeded['vendor-data']
return ret
@@ -292,7 +316,6 @@ class DataSourceNoCloudNet(DataSourceNoCloud):
DataSourceNoCloud.__init__(self, sys_cfg, distro, paths)
self.cmdline_id = "ds=nocloud-net"
self.supported_seed_starts = ("http://", "https://", "ftp://")
- self.seed_dir = os.path.join(paths.seed_dir, 'nocloud-net')
self.dsmode = "net"
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index 79bb9d63..f7f4590b 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -150,7 +150,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
return True
- def check_instance_id(self):
+ def check_instance_id(self, sys_cfg):
# quickly (local check only) if self.instance_id is still valid
return sources.instance_id_matches_system_uuid(self.get_instance_id())
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 28540a7b..82cd3553 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -217,10 +217,14 @@ class DataSource(object):
def get_package_mirror_info(self):
return self.distro.get_package_mirror_info(data_source=self)
- def check_instance_id(self):
+ def check_instance_id(self, sys_cfg):
# quickly (local check only) if self.instance_id is still
return False
+ @property
+ def network_config(self):
+ return None
+
def normalize_pubkey_data(pubkey_data):
keys = []
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index bd93d22f..1aa6bbae 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -51,11 +51,13 @@ OS_LATEST = 'latest'
OS_FOLSOM = '2012-08-10'
OS_GRIZZLY = '2013-04-04'
OS_HAVANA = '2013-10-17'
+OS_LIBERTY = '2015-10-15'
# keep this in chronological order. new supported versions go at the end.
OS_VERSIONS = (
OS_FOLSOM,
OS_GRIZZLY,
OS_HAVANA,
+ OS_LIBERTY,
)
@@ -229,6 +231,11 @@ class BaseReader(object):
False,
load_json_anytype,
)
+ files['networkdata'] = (
+ self._path_join("openstack", version, 'network_data.json'),
+ False,
+ load_json_anytype,
+ )
return files
results = {
@@ -334,7 +341,7 @@ class ConfigDriveReader(BaseReader):
path = self._path_join(self.base_path, 'openstack')
found = [d for d in os.listdir(path)
if os.path.isdir(os.path.join(path))]
- self._versions = found
+ self._versions = sorted(found)
return self._versions
def _read_ec2_metadata(self):