From a2249942522d140a063acdc007aced991d4f0588 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 5 May 2016 14:57:48 -0700 Subject: Work on refactoring (and adding) network conversion tests --- cloudinit/sources/DataSourceConfigDrive.py | 127 ++--------------------------- cloudinit/sources/helpers/openstack.py | 116 ++++++++++++++++++++++++++ 2 files changed, 121 insertions(+), 122 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 52a9f543..70373b43 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -61,7 +61,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): mstr += "[source=%s]" % (self.source) return mstr - def get_data(self): + def get_data(self, skip_first_boot=False): found = None md = {} results = {} @@ -119,7 +119,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): # instance-id prev_iid = get_previous_iid(self.paths) cur_iid = md['instance-id'] - if prev_iid != cur_iid and self.dsmode == "local": + if prev_iid != cur_iid and \ + self.dsmode == "local" and not skip_first_boot: on_first_boot(results, distro=self.distro) # dsmode != self.dsmode here if: @@ -163,7 +164,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): def network_config(self): if self._network_config is None: if self.network_json is not None: - self._network_config = convert_network_data(self.network_json) + self._network_config = openstack.convert_net_json( + self.network_json) return self._network_config @@ -303,122 +305,3 @@ datasources = [ # Return a list of data sources that match this set of dependencies def get_datasource_list(depends): return sources.list_from_depends(depends, datasources) - - -# Convert OpenStack ConfigDrive NetworkData json to network_config yaml -def convert_network_data(network_json=None): - """Return a dictionary of network_config by parsing provided - OpenStack ConfigDrive NetworkData json format - - OpenStack network_data.json provides a 3 element dictionary - - "links" (links are network devices, physical or virtual) - - "networks" (networks are ip network configurations for one or more - links) - - services (non-ip services, like dns) - - networks and links are combined via network items referencing specific - links via a 'link_id' which maps to a links 'id' field. - - To convert this format to network_config yaml, we first iterate over the - links and then walk the network list to determine if any of the networks - utilize the current link; if so we generate a subnet entry for the device - - We also need to map network_data.json fields to network_config fields. For - example, the network_data links 'id' field is equivalent to network_config - 'name' field for devices. We apply more of this mapping to the various - link types that we encounter. - - There are additional fields that are populated in the network_data.json - from OpenStack that are not relevant to network_config yaml, so we - enumerate a dictionary of valid keys for network_yaml and apply filtering - to drop these superflous keys from the network_config yaml. - """ - if network_json is None: - return None - - # dict of network_config key for filtering network_json - valid_keys = { - 'physical': [ - 'name', - 'type', - 'mac_address', - 'subnets', - 'params', - ], - 'subnet': [ - 'type', - 'address', - 'netmask', - 'broadcast', - 'metric', - 'gateway', - 'pointopoint', - 'mtu', - 'scope', - 'dns_nameservers', - 'dns_search', - 'routes', - ], - } - - links = network_json.get('links', []) - networks = network_json.get('networks', []) - services = network_json.get('services', []) - - config = [] - for link in links: - subnets = [] - cfg = {k: v for k, v in link.items() - if k in valid_keys['physical']} - cfg.update({'name': link['id']}) - for network in [net for net in networks - if net['link'] == link['id']]: - subnet = {k: v for k, v in network.items() - if k in valid_keys['subnet']} - if 'dhcp' in network['type']: - t = 'dhcp6' if network['type'].startswith('ipv6') else 'dhcp4' - subnet.update({ - 'type': t, - }) - else: - subnet.update({ - 'type': 'static', - 'address': network.get('ip_address'), - }) - subnets.append(subnet) - cfg.update({'subnets': subnets}) - if link['type'] in ['ethernet', 'vif', 'ovs', 'phy']: - cfg.update({ - 'type': 'physical', - 'mac_address': link['ethernet_mac_address']}) - elif link['type'] in ['bond']: - params = {} - for k, v in link.items(): - if k == 'bond_links': - continue - elif k.startswith('bond'): - params.update({k: v}) - cfg.update({ - 'bond_interfaces': copy.deepcopy(link['bond_links']), - 'params': params, - }) - elif link['type'] in ['vlan']: - cfg.update({ - 'name': "%s.%s" % (link['vlan_link'], - link['vlan_id']), - 'vlan_link': link['vlan_link'], - 'vlan_id': link['vlan_id'], - 'mac_address': link['vlan_mac_address'], - }) - else: - raise ValueError( - 'Unknown network_data link type: %s' % link['type']) - - config.append(cfg) - - for service in services: - cfg = service - cfg.update({'type': 'nameserver'}) - config.append(cfg) - - return {'version': 1, 'config': config} diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 1aa6bbae..475ccab3 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -474,6 +474,122 @@ class MetadataReader(BaseReader): retries=self.retries) +def convert_net_json(network_json): + """Return a dictionary of network_config by parsing provided + OpenStack ConfigDrive NetworkData json format + + OpenStack network_data.json provides a 3 element dictionary + - "links" (links are network devices, physical or virtual) + - "networks" (networks are ip network configurations for one or more + links) + - services (non-ip services, like dns) + + networks and links are combined via network items referencing specific + links via a 'link_id' which maps to a links 'id' field. + + To convert this format to network_config yaml, we first iterate over the + links and then walk the network list to determine if any of the networks + utilize the current link; if so we generate a subnet entry for the device + + We also need to map network_data.json fields to network_config fields. For + example, the network_data links 'id' field is equivalent to network_config + 'name' field for devices. We apply more of this mapping to the various + link types that we encounter. + + There are additional fields that are populated in the network_data.json + from OpenStack that are not relevant to network_config yaml, so we + enumerate a dictionary of valid keys for network_yaml and apply filtering + to drop these superflous keys from the network_config yaml. + """ + + # Dict of network_config key for filtering network_json + valid_keys = { + 'physical': [ + 'name', + 'type', + 'mac_address', + 'subnets', + 'params', + ], + 'subnet': [ + 'type', + 'address', + 'netmask', + 'broadcast', + 'metric', + 'gateway', + 'pointopoint', + 'mtu', + 'scope', + 'dns_nameservers', + 'dns_search', + 'routes', + ], + } + + links = network_json.get('links', []) + networks = network_json.get('networks', []) + services = network_json.get('services', []) + + config = [] + for link in links: + subnets = [] + cfg = {k: v for k, v in link.items() + if k in valid_keys['physical']} + cfg.update({'name': link['id']}) + for network in [net for net in networks + if net['link'] == link['id']]: + subnet = {k: v for k, v in network.items() + if k in valid_keys['subnet']} + if 'dhcp' in network['type']: + t = 'dhcp6' if network['type'].startswith('ipv6') else 'dhcp4' + subnet.update({ + 'type': t, + }) + else: + subnet.update({ + 'type': 'static', + 'address': network.get('ip_address'), + }) + subnets.append(subnet) + cfg.update({'subnets': subnets}) + if link['type'] in ['ethernet', 'vif', 'ovs', 'phy']: + cfg.update({ + 'type': 'physical', + 'mac_address': link['ethernet_mac_address']}) + elif link['type'] in ['bond']: + params = {} + for k, v in link.items(): + if k == 'bond_links': + continue + elif k.startswith('bond'): + params.update({k: v}) + cfg.update({ + 'bond_interfaces': copy.deepcopy(link['bond_links']), + 'params': params, + }) + elif link['type'] in ['vlan']: + cfg.update({ + 'name': "%s.%s" % (link['vlan_link'], + link['vlan_id']), + 'vlan_link': link['vlan_link'], + 'vlan_id': link['vlan_id'], + 'mac_address': link['vlan_mac_address'], + }) + else: + raise ValueError( + 'Unknown network_data link type: %s' % link['type']) + + config.append(cfg) + + for service in services: + cfg = copy.deepcopy(service) + cfg.update({'type': 'nameserver'}) + config.append(cfg) + + return {'version': 1, 'config': config} + + def convert_vendordata_json(data, recurse=True): """ data: a loaded json *object* (strings, arrays, dicts). return something suitable for cloudinit vendordata_raw. -- cgit v1.2.3 From db9d958a0e76c2c59298041a1355aba97fda000f Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 5 May 2016 16:38:53 -0700 Subject: Add the bridge net type --- cloudinit/net/__init__.py | 2 +- cloudinit/net/network_state.py | 39 +++++++++++++++++++++------------- cloudinit/sources/helpers/openstack.py | 5 +++++ 3 files changed, 30 insertions(+), 16 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 31544fd8..cc154c57 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -262,7 +262,7 @@ def parse_deb_config(path): def parse_net_config_data(net_config): - """Parses the config, returns NetworkState dictionary + """Parses the config, returns NetworkState object :param net_config: curtin network config dict """ diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index d08e94fe..27d35256 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -15,6 +15,8 @@ # You should have received a copy of the GNU Affero General Public License # along with Curtin. If not, see . +import six + from cloudinit import log as logging from cloudinit import util from cloudinit.util import yaml_dumps as dump_config @@ -32,11 +34,31 @@ def from_state_file(state_file): state = util.read_conf(state_file) network_state = NetworkState() network_state.load(state) - return network_state -class NetworkState: +class CommandHandlerMeta(type): + """Metaclass that dynamically creates a 'command_handlers' attribute. + + This will scan the to-be-created class for methods that start with + 'handle_' and on finding those will populate a class attribute mapping + so that those methods can be quickly located and called. + """ + def __new__(cls, name, parents, dct): + command_handlers = {} + for attr_name, attr in six.iteritems(dct): + if six.callable(attr) and attr_name.startswith('handle_'): + handles_what = attr_name[len('handle_'):] + if handles_what: + command_handlers[handles_what] = attr + dct['command_handlers'] = command_handlers + return super(CommandHandlerMeta, cls).__new__(cls, name, + parents, dct) + + +@six.add_metaclass(CommandHandlerMeta) +class NetworkState(object): + def __init__(self, version=NETWORK_STATE_VERSION, config=None): self.version = version self.config = config @@ -48,18 +70,6 @@ class NetworkState: 'search': [], } } - self.command_handlers = self.get_command_handlers() - - def get_command_handlers(self): - METHOD_PREFIX = 'handle_' - methods = filter(lambda x: callable(getattr(self, x)) and - x.startswith(METHOD_PREFIX), dir(self)) - handlers = {} - for m in methods: - key = m.replace(METHOD_PREFIX, '') - handlers[key] = getattr(self, m) - - return handlers def dump(self): state = { @@ -83,7 +93,6 @@ class NetworkState: # v1 - direct attr mapping, except version for key in [k for k in required_keys if k not in ['version']]: setattr(self, key, state[key]) - self.command_handlers = self.get_command_handlers() def dump_network_state(self): return dump_config(self.network_state) diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 475ccab3..845ea971 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -576,6 +576,11 @@ def convert_net_json(network_json): 'vlan_id': link['vlan_id'], 'mac_address': link['vlan_mac_address'], }) + elif link['type'] in ['bridge']: + cfg.update({ + 'type': 'bridge', + 'mac_address': link['ethernet_mac_address'], + 'mtu': link['mtu']}) else: raise ValueError( 'Unknown network_data link type: %s' % link['type']) -- cgit v1.2.3 From 26ea813d293467921ab6b1e32abd2ab8fcefa3bd Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 11 May 2016 14:18:02 -0700 Subject: Fix py26 for rhel (and older versions of python) --- cloudinit/net/cmdline.py | 26 ++++--- cloudinit/sources/DataSourceAzure.py | 2 +- cloudinit/sources/helpers/openstack.py | 8 +- cloudinit/util.py | 25 ++++++- requirements.txt | 8 +- setup.py | 1 - test-requirements.txt | 1 + tests/unittests/helpers.py | 86 +++------------------- tests/unittests/test__init__.py | 18 ++--- tests/unittests/test_cli.py | 7 +- tests/unittests/test_cs_util.py | 42 ++++------- tests/unittests/test_datasource/test_azure.py | 12 +-- .../unittests/test_datasource/test_azure_helper.py | 12 +-- tests/unittests/test_datasource/test_cloudsigma.py | 26 +++++-- tests/unittests/test_datasource/test_cloudstack.py | 11 +-- .../unittests/test_datasource/test_configdrive.py | 11 +-- tests/unittests/test_datasource/test_nocloud.py | 14 +--- tests/unittests/test_datasource/test_smartos.py | 21 ++++-- tests/unittests/test_net.py | 7 +- tests/unittests/test_reporting.py | 4 +- tests/unittests/test_rh_subscription.py | 23 ++++-- tox.ini | 36 ++++----- 22 files changed, 163 insertions(+), 238 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py index 958c264b..21bc35d9 100644 --- a/cloudinit/net/cmdline.py +++ b/cloudinit/net/cmdline.py @@ -20,7 +20,6 @@ import base64 import glob import gzip import io -import shlex from cloudinit.net import get_devicelist from cloudinit.net import sys_netdev_info @@ -34,13 +33,17 @@ def _load_shell_content(content, add_empty=False, empty_val=None): then add entries in to the returned dictionary for 'VAR=' variables. Set their value to empty_val.""" data = {} - for line in shlex.split(content): - key, value = line.split("=", 1) - if not value: - value = empty_val - if add_empty or value: - data[key] = value - + for line in util.shlex_split(content): + try: + key, value = line.split("=", 1) + except ValueError: + # Unsplittable line, skip it... + pass + else: + if not value: + value = empty_val + if add_empty or value: + data[key] = value return data @@ -60,6 +63,9 @@ def _klibc_to_config_entry(content, mac_addrs=None): if mac_addrs is None: mac_addrs = {} + print("Reading content") + print(content) + data = _load_shell_content(content) try: name = data['DEVICE'] @@ -185,7 +191,7 @@ def read_kernel_cmdline_config(files=None, mac_addrs=None, cmdline=None): return None if mac_addrs is None: - mac_addrs = {k: sys_netdev_info(k, 'address') - for k in get_devicelist()} + mac_addrs = dict((k, sys_netdev_info(k, 'address')) + for k in get_devicelist()) return config_from_klibc_net_cfg(files=files, mac_addrs=mac_addrs) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 698f4cac..66c8ced8 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -421,7 +421,7 @@ def write_files(datadir, files, dirmode=None): elem.text = DEF_PASSWD_REDACTION return ET.tostring(root) except Exception: - LOG.critical("failed to redact userpassword in {}".format(fname)) + LOG.critical("failed to redact userpassword in %s", fname) return cnt if not datadir: diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 845ea971..b2acc648 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -534,13 +534,13 @@ def convert_net_json(network_json): config = [] for link in links: subnets = [] - cfg = {k: v for k, v in link.items() - if k in valid_keys['physical']} + cfg = dict((k, v) for k, v in link.items() + if k in valid_keys['physical']) cfg.update({'name': link['id']}) for network in [net for net in networks if net['link'] == link['id']]: - subnet = {k: v for k, v in network.items() - if k in valid_keys['subnet']} + subnet = dict((k, v) for k, v in network.items() + if k in valid_keys['subnet']) if 'dhcp' in network['type']: t = 'dhcp6' if network['type'].startswith('ipv6') else 'dhcp4' subnet.update({ diff --git a/cloudinit/util.py b/cloudinit/util.py index 0d21e11b..7562b97a 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -37,6 +37,7 @@ import pwd import random import re import shutil +import shlex import socket import stat import string @@ -81,6 +82,7 @@ CONTAINER_TESTS = (['systemd-detect-virt', '--quiet', '--container'], ['lxc-is-container']) PROC_CMDLINE = None +PY26 = sys.version_info[0:2] == (2, 6) def decode_binary(blob, encoding='utf-8'): @@ -171,7 +173,8 @@ class ProcessExecutionError(IOError): def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, - description=None, reason=None): + description=None, reason=None, + errno=None): if not cmd: self.cmd = '-' else: @@ -202,6 +205,7 @@ class ProcessExecutionError(IOError): else: self.reason = '-' + self.errno = errno message = self.MESSAGE_TMPL % { 'description': self.description, 'cmd': self.cmd, @@ -1147,7 +1151,14 @@ def find_devs_with(criteria=None, oformat='device', options.append(path) cmd = blk_id_cmd + options # See man blkid for why 2 is added - (out, _err) = subp(cmd, rcs=[0, 2]) + try: + (out, _err) = subp(cmd, rcs=[0, 2]) + except ProcessExecutionError as e: + if e.errno == errno.ENOENT: + # blkid not found... + out = "" + else: + raise entries = [] for line in out.splitlines(): line = line.strip() @@ -1191,6 +1202,13 @@ def load_file(fname, read_cb=None, quiet=False, decode=True): return contents +def shlex_split(blob): + if PY26 and isinstance(blob, six.text_type): + # Older versions don't support unicode input + blob = blob.encode("utf8") + return shlex.split(blob) + + def get_cmdline(): if 'DEBUG_PROC_CMDLINE' in os.environ: return os.environ["DEBUG_PROC_CMDLINE"] @@ -1696,7 +1714,8 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, sp = subprocess.Popen(args, **kws) (out, err) = sp.communicate(data) except OSError as e: - raise ProcessExecutionError(cmd=args, reason=e) + raise ProcessExecutionError(cmd=args, reason=e, + errno=e.errno) rc = sp.returncode if rc not in rcs: raise ProcessExecutionError(stdout=out, stderr=err, diff --git a/requirements.txt b/requirements.txt index 19c88857..cc1dc05f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,8 +11,12 @@ PrettyTable oauthlib # This one is currently used only by the CloudSigma and SmartOS datasources. -# If these datasources are removed, this is no longer needed -pyserial +# If these datasources are removed, this is no longer needed. +# +# This will not work in py2.6 so it is only optionally installed on +# python 2.7 and later. +# +# pyserial # This is only needed for places where we need to support configs in a manner # that the built-in config parser is not sufficent (ie diff --git a/setup.py b/setup.py index f86727b2..6b4fc031 100755 --- a/setup.py +++ b/setup.py @@ -197,7 +197,6 @@ requirements = read_requires() if sys.version_info < (3,): requirements.append('cheetah') - setuptools.setup( name='cloud-init', version=get_version(), diff --git a/test-requirements.txt b/test-requirements.txt index 9b3d07c5..d9c757e6 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -5,3 +5,4 @@ pep8==1.5.7 pyflakes contextlib2 setuptools +unittest2 diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index fb9c83a7..33f89254 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -7,12 +7,10 @@ import shutil import tempfile import unittest +import mock import six +import unittest2 -try: - from unittest import mock -except ImportError: - import mock try: from contextlib import ExitStack except ImportError: @@ -21,6 +19,9 @@ except ImportError: from cloudinit import helpers as ch from cloudinit import util +# Used for skipping tests +SkipTest = unittest2.SkipTest + # Used for detecting different python versions PY2 = False PY26 = False @@ -44,79 +45,6 @@ else: if _PY_MINOR == 4 and _PY_MICRO < 3: FIX_HTTPRETTY = True -if PY26: - # For now add these on, taken from python 2.7 + slightly adjusted. Drop - # all this once Python 2.6 is dropped as a minimum requirement. - class TestCase(unittest.TestCase): - def setUp(self): - super(TestCase, self).setUp() - self.__all_cleanups = ExitStack() - - def tearDown(self): - self.__all_cleanups.close() - unittest.TestCase.tearDown(self) - - def addCleanup(self, function, *args, **kws): - self.__all_cleanups.callback(function, *args, **kws) - - def assertIs(self, expr1, expr2, msg=None): - if expr1 is not expr2: - standardMsg = '%r is not %r' % (expr1, expr2) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertIn(self, member, container, msg=None): - if member not in container: - standardMsg = '%r not found in %r' % (member, container) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertNotIn(self, member, container, msg=None): - if member in container: - standardMsg = '%r unexpectedly found in %r' - standardMsg = standardMsg % (member, container) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertIsNone(self, value, msg=None): - if value is not None: - standardMsg = '%r is not None' - standardMsg = standardMsg % (value) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertIsInstance(self, obj, cls, msg=None): - """Same as self.assertTrue(isinstance(obj, cls)), with a nicer - default message.""" - if not isinstance(obj, cls): - standardMsg = '%s is not an instance of %r' % (repr(obj), cls) - self.fail(self._formatMessage(msg, standardMsg)) - - def assertDictContainsSubset(self, expected, actual, msg=None): - missing = [] - mismatched = [] - for k, v in expected.items(): - if k not in actual: - missing.append(k) - elif actual[k] != v: - mismatched.append('%r, expected: %r, actual: %r' - % (k, v, actual[k])) - - if len(missing) == 0 and len(mismatched) == 0: - return - - standardMsg = '' - if missing: - standardMsg = 'Missing: %r' % ','.join(m for m in missing) - if mismatched: - if standardMsg: - standardMsg += '; ' - standardMsg += 'Mismatched values: %s' % ','.join(mismatched) - - self.fail(self._formatMessage(msg, standardMsg)) - - -else: - class TestCase(unittest.TestCase): - pass - - # Makes the old path start # with new base instead of whatever # it previously had @@ -151,6 +79,10 @@ def retarget_many_wrapper(new_base, am, old_func): return wrapper +class TestCase(unittest2.TestCase): + pass + + class ResourceUsingTestCase(TestCase): def setUp(self): super(ResourceUsingTestCase, self).setUp() diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py index 153f1658..a9b35afe 100644 --- a/tests/unittests/test__init__.py +++ b/tests/unittests/test__init__.py @@ -1,16 +1,7 @@ import os import shutil import tempfile -import unittest - -try: - from unittest import mock -except ImportError: - import mock -try: - from contextlib import ExitStack -except ImportError: - from contextlib2 import ExitStack +import unittest2 from cloudinit import handlers from cloudinit import helpers @@ -18,7 +9,7 @@ from cloudinit import settings from cloudinit import url_helper from cloudinit import util -from .helpers import TestCase +from .helpers import TestCase, ExitStack, mock class FakeModule(handlers.Handler): @@ -99,9 +90,10 @@ class TestWalkerHandleHandler(TestCase): self.assertEqual(self.data['handlercount'], 0) -class TestHandlerHandlePart(unittest.TestCase): +class TestHandlerHandlePart(TestCase): def setUp(self): + super(TestHandlerHandlePart, self).setUp() self.data = "fake data" self.ctype = "fake ctype" self.filename = "fake filename" @@ -177,7 +169,7 @@ class TestHandlerHandlePart(unittest.TestCase): self.data, self.ctype, self.filename, self.payload) -class TestCmdlineUrl(unittest.TestCase): +class TestCmdlineUrl(TestCase): def test_invalid_content(self): url = "http://example.com/foo" key = "mykey" diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index ed863399..f8fe7c9b 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -4,12 +4,7 @@ import sys import six from . import helpers as test_helpers - -try: - from unittest import mock -except ImportError: - import mock - +mock = test_helpers.mock BIN_CLOUDINIT = "bin/cloud-init" diff --git a/tests/unittests/test_cs_util.py b/tests/unittests/test_cs_util.py index d7273035..8c9ac0cd 100644 --- a/tests/unittests/test_cs_util.py +++ b/tests/unittests/test_cs_util.py @@ -1,20 +1,14 @@ from __future__ import print_function -import sys -import unittest +from . import helpers as test_helpers -from cloudinit.cs_utils import Cepko +import unittest2 try: - skip = unittest.skip -except AttributeError: - # Python 2.6. Doesn't have to be high fidelity. - def skip(reason): - def decorator(func): - def wrapper(*args, **kws): - print(reason, file=sys.stderr) - return wrapper - return decorator + from cloudinit.cs_utils import Cepko + WILL_WORK = True +except ImportError: + WILL_WORK = False SERVER_CONTEXT = { @@ -32,29 +26,21 @@ SERVER_CONTEXT = { } -class CepkoMock(Cepko): - def all(self): - return SERVER_CONTEXT +if WILL_WORK: + class CepkoMock(Cepko): + def all(self): + return SERVER_CONTEXT - def get(self, key="", request_pattern=None): - return SERVER_CONTEXT['tags'] + def get(self, key="", request_pattern=None): + return SERVER_CONTEXT['tags'] # 2015-01-22 BAW: This test is completely useless because it only ever tests # the CepkoMock object. Even in its original form, I don't think it ever # touched the underlying Cepko class methods. -@skip('This test is completely useless') -class CepkoResultTests(unittest.TestCase): +class CepkoResultTests(test_helpers.TestCase): def setUp(self): - pass - # self.mocked = self.mocker.replace("cloudinit.cs_utils.Cepko", - # spec=CepkoMock, - # count=False, - # passthrough=False) - # self.mocked() - # self.mocker.result(CepkoMock()) - # self.mocker.replay() - # self.c = Cepko() + raise unittest2.SkipTest('This test is completely useless') def test_getitem(self): result = self.c.all() diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 444e2799..aafe1bc2 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1,16 +1,8 @@ from cloudinit import helpers from cloudinit.util import b64e, decode_binary, load_file from cloudinit.sources import DataSourceAzure -from ..helpers import TestCase, populate_dir -try: - from unittest import mock -except ImportError: - import mock -try: - from contextlib import ExitStack -except ImportError: - from contextlib2 import ExitStack +from ..helpers import TestCase, populate_dir, mock, ExitStack, PY26, SkipTest import crypt import os @@ -83,6 +75,8 @@ class TestAzureDataSource(TestCase): def setUp(self): super(TestAzureDataSource, self).setUp() + if PY26: + raise SkipTest("Does not work on python 2.6") self.tmp = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.tmp) diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index 1134199b..736f4463 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -1,17 +1,11 @@ import os from cloudinit.sources.helpers import azure as azure_helper -from ..helpers import TestCase -try: - from unittest import mock -except ImportError: - import mock +from ..helpers import ExitStack +from ..helpers import TestCase -try: - from contextlib import ExitStack -except ImportError: - from contextlib2 import ExitStack +from ..helpers import mock GOAL_STATE_TEMPLATE = """\ diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py index 772d189a..11968796 100644 --- a/tests/unittests/test_datasource/test_cloudsigma.py +++ b/tests/unittests/test_datasource/test_cloudsigma.py @@ -1,11 +1,18 @@ # coding: utf-8 + import copy -from cloudinit.cs_utils import Cepko -from cloudinit.sources import DataSourceCloudSigma +try: + # Serial does not work on py2.6 (anymore) + import pyserial + from cloudinit.cs_utils import Cepko + from cloudinit.sources import DataSourceCloudSigma + WILL_WORK = True +except ImportError: + WILL_WORK = False from .. import helpers as test_helpers - +from ..helpers import SkipTest SERVER_CONTEXT = { "cpu": 1000, @@ -29,17 +36,20 @@ SERVER_CONTEXT = { } -class CepkoMock(Cepko): - def __init__(self, mocked_context): - self.result = mocked_context +if WILL_WORK: + class CepkoMock(Cepko): + def __init__(self, mocked_context): + self.result = mocked_context - def all(self): - return self + def all(self): + return self class DataSourceCloudSigmaTest(test_helpers.TestCase): def setUp(self): super(DataSourceCloudSigmaTest, self).setUp() + if not WILL_WORK: + raise SkipTest("Datasource testing not supported") self.datasource = DataSourceCloudSigma.DataSourceCloudSigma("", "", "") self.datasource.is_running_in_cloudsigma = lambda: True self.datasource.cepko = CepkoMock(SERVER_CONTEXT) diff --git a/tests/unittests/test_datasource/test_cloudstack.py b/tests/unittests/test_datasource/test_cloudstack.py index 656d80d1..4d6b47b4 100644 --- a/tests/unittests/test_datasource/test_cloudstack.py +++ b/tests/unittests/test_datasource/test_cloudstack.py @@ -1,15 +1,6 @@ from cloudinit import helpers from cloudinit.sources.DataSourceCloudStack import DataSourceCloudStack -from ..helpers import TestCase - -try: - from unittest import mock -except ImportError: - import mock -try: - from contextlib import ExitStack -except ImportError: - from contextlib2 import ExitStack +from ..helpers import TestCase, mock, ExitStack class TestCloudStackPasswordFetching(TestCase): diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 8beaf95e..14cc8266 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -5,22 +5,13 @@ import shutil import six import tempfile -try: - from unittest import mock -except ImportError: - import mock -try: - from contextlib import ExitStack -except ImportError: - from contextlib2 import ExitStack - from cloudinit import helpers from cloudinit import settings from cloudinit.sources import DataSourceConfigDrive as ds from cloudinit.sources.helpers import openstack from cloudinit import util -from ..helpers import TestCase +from ..helpers import TestCase, ExitStack, mock PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index 2d5fc37c..a92dd3b3 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -1,22 +1,12 @@ from cloudinit import helpers from cloudinit.sources import DataSourceNoCloud from cloudinit import util -from ..helpers import TestCase, populate_dir +from ..helpers import TestCase, populate_dir, mock, ExitStack import os import yaml import shutil import tempfile -import unittest - -try: - from unittest import mock -except ImportError: - import mock -try: - from contextlib import ExitStack -except ImportError: - from contextlib2 import ExitStack class TestNoCloudDataSource(TestCase): @@ -139,7 +129,7 @@ class TestNoCloudDataSource(TestCase): self.assertTrue(ret) -class TestParseCommandLineData(unittest.TestCase): +class TestParseCommandLineData(TestCase): def test_parse_cmdline_data_valid(self): ds_id = "ds=nocloud" diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 5c49966a..6b628276 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -33,19 +33,21 @@ import tempfile import uuid from binascii import crc32 -import serial +try: + # Serial does not work on py2.6 (anymore) + import serial + from cloudinit.sources import DataSourceSmartOS + WILL_WORK = True +except ImportError: + WILL_WORK = False + import six from cloudinit import helpers as c_helpers -from cloudinit.sources import DataSourceSmartOS from cloudinit.util import b64e from .. import helpers - -try: - from unittest import mock -except ImportError: - import mock +from ..helpers import mock, SkipTest MOCK_RETURNS = { 'hostname': 'test-host', @@ -79,7 +81,8 @@ def get_mock_client(mockdata): class TestSmartOSDataSource(helpers.FilesystemMockingTestCase): def setUp(self): super(TestSmartOSDataSource, self).setUp() - + if not WILL_WORK: + raise SkipTest("This test will not work") self.tmp = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.tmp) self.legacy_user_d = tempfile.mkdtemp() @@ -445,6 +448,8 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase): def setUp(self): super(TestJoyentMetadataClient, self).setUp() + if not WILL_WORK: + raise SkipTest("This test will not work") self.serial = mock.MagicMock(spec=serial.Serial) self.request_id = 0xabcdef12 self.metadata_value = 'value' diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 005957de..ed2c6d0f 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -78,8 +78,9 @@ class TestEniNetRendering(TestCase): @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.sys_netdev_info") @mock.patch("cloudinit.net.get_devicelist") - def test_generation(self, mock_get_devicelist, mock_sys_netdev_info, - mock_sys_dev_path): + def test_default_generation(self, mock_get_devicelist, + mock_sys_netdev_info, + mock_sys_dev_path): mock_get_devicelist.return_value = ['eth1000', 'lo'] dev_characteristics = { @@ -138,7 +139,7 @@ iface eth1000 inet dhcp self.assertEqual(expected.lstrip(), contents.lstrip()) -class TestNetConfigParsing(TestCase): +class TestCmdlineConfigParsing(TestCase): simple_cfg = { 'config': [{"type": "physical", "name": "eth0", "mac_address": "c0:d6:9f:2c:e8:80", diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py index 32356ef9..493bb261 100644 --- a/tests/unittests/test_reporting.py +++ b/tests/unittests/test_reporting.py @@ -7,7 +7,9 @@ from cloudinit import reporting from cloudinit.reporting import handlers from cloudinit.reporting import events -from .helpers import (mock, TestCase) +import mock + +from .helpers import TestCase def _fake_registry(): diff --git a/tests/unittests/test_rh_subscription.py b/tests/unittests/test_rh_subscription.py index 8c586ad7..13045aaf 100644 --- a/tests/unittests/test_rh_subscription.py +++ b/tests/unittests/test_rh_subscription.py @@ -1,11 +1,24 @@ +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import logging + from cloudinit import util from cloudinit.config import cc_rh_subscription -import logging -import mock -import unittest + +from .helpers import TestCase, mock -class GoodTests(unittest.TestCase): +class GoodTests(TestCase): def setUp(self): super(GoodTests, self).setUp() self.name = "cc_rh_subscription" @@ -92,7 +105,7 @@ class GoodTests(unittest.TestCase): self.assertEqual(self.SM._sub_man_cli.call_count, 9) -class TestBadInput(unittest.TestCase): +class TestBadInput(TestCase): name = "cc_rh_subscription" cloud_init = None log = logging.getLogger("bad_tests") diff --git a/tox.ini b/tox.ini index bd7c27dd..3210b0ee 100644 --- a/tox.ini +++ b/tox.ini @@ -1,32 +1,32 @@ [tox] -envlist = py27,py3,pyflakes +envlist = py27,py26,py3,pyflakes recreate = True +usedevelop = True [testenv] commands = python -m nose {posargs:tests} deps = -r{toxinidir}/test-requirements.txt - -r{toxinidir}/requirements.txt - -[testenv:py3] -basepython = python3 + -r{toxinidir}/requirements.txt +setenv = + LC_ALL = en_US.utf-8 [testenv:pyflakes] basepython = python3 commands = {envpython} -m pyflakes {posargs:cloudinit/ tests/ tools/} - {envpython} -m pep8 {posargs:cloudinit/ tests/ tools/} + {envpython} -m pep8 {posargs:cloudinit/ tests/ tools/} -# https://github.com/gabrielfalcao/HTTPretty/issues/223 -setenv = - LC_ALL = en_US.utf-8 +[testenv:py3] +basepython = python3 +deps = -r{toxinidir}/test-requirements.txt + -r{toxinidir}/requirements.txt + pyserial + +[testenv:py27] +deps = -r{toxinidir}/test-requirements.txt + -r{toxinidir}/requirements.txt + pyserial [testenv:py26] commands = nosetests {posargs:tests} -deps = - contextlib2 - httpretty>=0.7.1 - mock - nose - pep8==1.5.7 - pyflakes -setenv = - LC_ALL = C +deps = -r{toxinidir}/test-requirements.txt + -r{toxinidir}/requirements.txt -- cgit v1.2.3 From 12d7ee2cb6589b866ab26b508b15c65326481d6c Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 11 May 2016 16:47:50 -0700 Subject: Use a fake serial module that will allow tests to contine Instead of aborting all serial using tests instead just create a serial module in cloudinit that will create a fake and broken serial class when pyserial is not actually installed. This allows for using the datasource and tests that exist in a more functional and tested manner (even when pyserial is not found). --- cloudinit/cs_utils.py | 3 +- cloudinit/serial.py | 50 ++++++++++++++++++++++ cloudinit/sources/DataSourceSmartOS.py | 4 +- tests/unittests/test_cs_util.py | 21 +++------ tests/unittests/test_datasource/test_cloudsigma.py | 23 +++------- tests/unittests/test_datasource/test_smartos.py | 15 ++----- 6 files changed, 71 insertions(+), 45 deletions(-) create mode 100644 cloudinit/serial.py (limited to 'cloudinit/sources') diff --git a/cloudinit/cs_utils.py b/cloudinit/cs_utils.py index 83ac1a0e..412431f2 100644 --- a/cloudinit/cs_utils.py +++ b/cloudinit/cs_utils.py @@ -33,7 +33,8 @@ API Docs: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html import json import platform -import serial +from cloudinit import serial + # these high timeouts are necessary as read may read a lot of data. READ_TIMEOUT = 60 diff --git a/cloudinit/serial.py b/cloudinit/serial.py new file mode 100644 index 00000000..af45c13e --- /dev/null +++ b/cloudinit/serial.py @@ -0,0 +1,50 @@ +# vi: ts=4 expandtab +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +from __future__ import absolute_import + +try: + from serial import Serial +except ImportError: + # For older versions of python (ie 2.6) pyserial may not exist and/or + # work and/or be installed, so make a dummy/fake serial that blows up + # when used... + class Serial(object): + def __init__(self, *args, **kwargs): + pass + + @staticmethod + def isOpen(): + return False + + @staticmethod + def write(data): + raise IOError("Unable to perform serial `write` operation," + " pyserial not installed.") + + @staticmethod + def readline(): + raise IOError("Unable to perform serial `readline` operation," + " pyserial not installed.") + + @staticmethod + def flush(): + raise IOError("Unable to perform serial `flush` operation," + " pyserial not installed.") + + @staticmethod + def read(size=1): + raise IOError("Unable to perform serial `read` operation," + " pyserial not installed.") diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 6cbd8dfa..c7641eb3 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -40,13 +40,11 @@ import re import socket import stat -import serial - from cloudinit import log as logging +from cloudinit import serial from cloudinit import sources from cloudinit import util - LOG = logging.getLogger(__name__) SMARTOS_ATTRIB_MAP = { diff --git a/tests/unittests/test_cs_util.py b/tests/unittests/test_cs_util.py index 8c9ac0cd..56c9ce9e 100644 --- a/tests/unittests/test_cs_util.py +++ b/tests/unittests/test_cs_util.py @@ -2,13 +2,7 @@ from __future__ import print_function from . import helpers as test_helpers -import unittest2 - -try: - from cloudinit.cs_utils import Cepko - WILL_WORK = True -except ImportError: - WILL_WORK = False +from cloudinit.cs_utils import Cepko SERVER_CONTEXT = { @@ -26,13 +20,12 @@ SERVER_CONTEXT = { } -if WILL_WORK: - class CepkoMock(Cepko): - def all(self): - return SERVER_CONTEXT +class CepkoMock(Cepko): + def all(self): + return SERVER_CONTEXT - def get(self, key="", request_pattern=None): - return SERVER_CONTEXT['tags'] + def get(self, key="", request_pattern=None): + return SERVER_CONTEXT['tags'] # 2015-01-22 BAW: This test is completely useless because it only ever tests @@ -40,7 +33,7 @@ if WILL_WORK: # touched the underlying Cepko class methods. class CepkoResultTests(test_helpers.TestCase): def setUp(self): - raise unittest2.SkipTest('This test is completely useless') + raise test_helpers.SkipTest('This test is completely useless') def test_getitem(self): result = self.c.all() diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py index 11968796..7950fc52 100644 --- a/tests/unittests/test_datasource/test_cloudsigma.py +++ b/tests/unittests/test_datasource/test_cloudsigma.py @@ -2,14 +2,8 @@ import copy -try: - # Serial does not work on py2.6 (anymore) - import pyserial - from cloudinit.cs_utils import Cepko - from cloudinit.sources import DataSourceCloudSigma - WILL_WORK = True -except ImportError: - WILL_WORK = False +from cloudinit.cs_utils import Cepko +from cloudinit.sources import DataSourceCloudSigma from .. import helpers as test_helpers from ..helpers import SkipTest @@ -36,20 +30,17 @@ SERVER_CONTEXT = { } -if WILL_WORK: - class CepkoMock(Cepko): - def __init__(self, mocked_context): - self.result = mocked_context +class CepkoMock(Cepko): + def __init__(self, mocked_context): + self.result = mocked_context - def all(self): - return self + def all(self): + return self class DataSourceCloudSigmaTest(test_helpers.TestCase): def setUp(self): super(DataSourceCloudSigmaTest, self).setUp() - if not WILL_WORK: - raise SkipTest("Datasource testing not supported") self.datasource = DataSourceCloudSigma.DataSourceCloudSigma("", "", "") self.datasource.is_running_in_cloudsigma = lambda: True self.datasource.cepko = CepkoMock(SERVER_CONTEXT) diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 6b628276..f536ef4f 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -33,13 +33,8 @@ import tempfile import uuid from binascii import crc32 -try: - # Serial does not work on py2.6 (anymore) - import serial - from cloudinit.sources import DataSourceSmartOS - WILL_WORK = True -except ImportError: - WILL_WORK = False +from cloudinit import serial +from cloudinit.sources import DataSourceSmartOS import six @@ -81,8 +76,7 @@ def get_mock_client(mockdata): class TestSmartOSDataSource(helpers.FilesystemMockingTestCase): def setUp(self): super(TestSmartOSDataSource, self).setUp() - if not WILL_WORK: - raise SkipTest("This test will not work") + self.tmp = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.tmp) self.legacy_user_d = tempfile.mkdtemp() @@ -448,8 +442,7 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase): def setUp(self): super(TestJoyentMetadataClient, self).setUp() - if not WILL_WORK: - raise SkipTest("This test will not work") + self.serial = mock.MagicMock(spec=serial.Serial) self.request_id = 0xabcdef12 self.metadata_value = 'value' -- cgit v1.2.3 From 880d9fc2f9c62abf19b1506595aa81e5417dea45 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 19 May 2016 14:13:07 -0700 Subject: Adjust net module to be more isolated This allows it to be used outside of cloudinit more easily in the future. --- cloudinit/net/__init__.py | 108 ++++++++++++++++++--------------- cloudinit/net/cmdline.py | 15 ++++- cloudinit/net/eni.py | 35 +++++------ cloudinit/net/network_state.py | 38 +++++++++--- cloudinit/sources/helpers/openstack.py | 4 ++ cloudinit/util.py | 9 --- tests/unittests/test_net.py | 7 ++- 7 files changed, 129 insertions(+), 87 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 0202cbd8..07e7307e 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -17,44 +17,81 @@ # along with Curtin. If not, see . import errno +import logging import os - -from cloudinit import log as logging -from cloudinit.net import network_state -from cloudinit import util - +import six +import yaml LOG = logging.getLogger(__name__) SYS_CLASS_NET = "/sys/class/net/" LINKS_FNAME_PREFIX = "etc/systemd/network/50-cloud-init-" DEFAULT_PRIMARY_INTERFACE = 'eth0' +# NOTE(harlowja): some of these are similar to what is in cloudinit main +# source or utils tree/module but the reason that is done is so that this +# whole module can be easily extracted and placed into other +# code-bases (curtin for example). -def sys_dev_path(devname, path=""): - return SYS_CLASS_NET + devname + "/" + path +def write_file(path, content): + """Simple writing a file helper.""" + base_path = os.path.dirname(path) + if not os.path.isdir(base_path): + os.makedirs(base_path) + with open(path, "wb+") as fh: + if isinstance(content, six.text_type): + content = content.encode("utf8") + fh.write(content) -def read_sys_net(devname, path, translate=None, enoent=None, keyerror=None): +def read_file(path, decode='utf8', enoent=None): try: - contents = "" - with open(sys_dev_path(devname, path), "r") as fp: - contents = fp.read().strip() - if translate is None: - return contents - - try: - return translate.get(contents) - except KeyError: - LOG.debug("found unexpected value '%s' in '%s/%s'", contents, - devname, path) - if keyerror is not None: - return keyerror - raise + with open(path, "rb") as fh: + contents = fh.load() except OSError as e: if e.errno == errno.ENOENT and enoent is not None: return enoent raise + if decode: + return contents.decode(decode) + return contents + + +def dump_yaml(obj): + return yaml.safe_dump(obj, + line_break="\n", + indent=4, + explicit_start=True, + explicit_end=True, + default_flow_style=False) + + +def read_yaml_file(path): + val = yaml.safe_load(read_file(path)) + if not isinstance(val, dict): + gotten_type_name = type(val).__name__ + raise TypeError("Expected dict to be loaded from %s, got" + " '%s' instead" % (path, gotten_type_name)) + return val + + +def sys_dev_path(devname, path=""): + return SYS_CLASS_NET + devname + "/" + path + + +def read_sys_net(devname, path, translate=None, enoent=None, keyerror=None): + contents = read_file(sys_dev_path(devname, path), enoent=enoent) + contents = contents.strip() + if translate is None: + return contents + try: + return translate.get(contents) + except KeyError: + LOG.debug("found unexpected value '%s' in '%s/%s'", contents, + devname, path) + if keyerror is not None: + return keyerror + raise def is_up(devname): @@ -107,31 +144,6 @@ class ParserError(Exception): """Raised when parser has issue parsing the interfaces file.""" -def parse_net_config_data(net_config, skip_broken=True): - """Parses the config, returns NetworkState object - - :param net_config: curtin network config dict - """ - state = None - if 'version' in net_config and 'config' in net_config: - ns = network_state.NetworkState(version=net_config.get('version'), - config=net_config.get('config')) - ns.parse_config(skip_broken=skip_broken) - state = ns.network_state - return state - - -def parse_net_config(path, skip_broken=True): - """Parses a curtin network configuration file and - return network state""" - ns = None - net_config = util.read_conf(path) - if 'network' in net_config: - ns = parse_net_config_data(net_config.get('network'), - skip_broken=skip_broken) - return ns - - def is_disabled_cfg(cfg): if not cfg or not isinstance(cfg, dict): return False @@ -146,7 +158,7 @@ def sys_netdev_info(name, field): fname = os.path.join(SYS_CLASS_NET, name, field) if not os.path.exists(fname): raise OSError("%s: could not find sysfs entry: %s" % (name, fname)) - data = util.load_file(fname) + data = read_file(fname) if data[-1] == '\n': data = data[:-1] return data diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py index f5712533..b85d4b0a 100644 --- a/cloudinit/net/cmdline.py +++ b/cloudinit/net/cmdline.py @@ -20,12 +20,25 @@ import base64 import glob import gzip import io +import shlex +import sys + +import six from cloudinit.net import get_devicelist from cloudinit.net import sys_netdev_info from cloudinit import util +PY26 = sys.version_info[0:2] == (2, 6) + + +def _shlex_split(blob): + if PY26 and isinstance(blob, six.text_type): + # Older versions don't support unicode input + blob = blob.encode("utf8") + return shlex.split(blob) + def _load_shell_content(content, add_empty=False, empty_val=None): """Given shell like syntax (key=value\nkey2=value2\n) in content @@ -33,7 +46,7 @@ def _load_shell_content(content, add_empty=False, empty_val=None): then add entries in to the returned dictionary for 'VAR=' variables. Set their value to empty_val.""" data = {} - for line in util.shlex_split(content): + for line in _shlex_split(content): try: key, value = line.split("=", 1) except ValueError: diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index b1bdac24..adb31c22 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -16,10 +16,11 @@ import glob import os import re +from cloudinit import net + from cloudinit.net import LINKS_FNAME_PREFIX from cloudinit.net import ParserError from cloudinit.net.udev import generate_udev_rule -from cloudinit import util NET_CONFIG_COMMANDS = [ @@ -363,16 +364,13 @@ class Renderer(object): links_prefix=LINKS_FNAME_PREFIX, netrules='etc/udev/rules.d/70-persistent-net.rules'): - fpeni = os.path.sep.join((target, eni,)) - util.ensure_dir(os.path.dirname(fpeni)) - with open(fpeni, 'w+') as f: - f.write(self._render_interfaces(network_state)) + fpeni = os.path.join(target, eni) + net.write_file(fpeni, self._render_interfaces(network_state)) if netrules: - netrules = os.path.sep.join((target, netrules,)) - util.ensure_dir(os.path.dirname(netrules)) - with open(netrules, 'w+') as f: - f.write(self._render_persistent_net(network_state)) + netrules = os.path.join(target, netrules) + net.write_file(netrules, + self._render_persistent_net(network_state)) if links_prefix: self._render_systemd_links(target, network_state, links_prefix) @@ -382,18 +380,17 @@ class Renderer(object): fp_prefix = os.path.sep.join((target, links_prefix)) for f in glob.glob(fp_prefix + "*"): os.unlink(f) - interfaces = network_state.get('interfaces') for iface in interfaces.values(): if (iface['type'] == 'physical' and 'name' in iface and iface.get('mac_address')): fname = fp_prefix + iface['name'] + ".link" - with open(fname, "w") as fp: - fp.write("\n".join([ - "[Match]", - "MACAddress=" + iface['mac_address'], - "", - "[Link]", - "Name=" + iface['name'], - "" - ])) + content = "\n".join([ + "[Match]", + "MACAddress=" + iface['mac_address'], + "", + "[Link]", + "Name=" + iface['name'], + "" + ]) + net.write_file(fname, content) diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 2feffa71..c5aeadb5 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -16,12 +16,11 @@ # along with Curtin. If not, see . import copy +import logging import six -from cloudinit import log as logging -from cloudinit import util -from cloudinit.util import yaml_dumps as dump_config +from cloudinit import net LOG = logging.getLogger(__name__) @@ -31,9 +30,34 @@ NETWORK_STATE_REQUIRED_KEYS = { } +def parse_net_config_data(net_config, skip_broken=True): + """Parses the config, returns NetworkState object + + :param net_config: curtin network config dict + """ + state = None + if 'version' in net_config and 'config' in net_config: + ns = NetworkState(version=net_config.get('version'), + config=net_config.get('config')) + ns.parse_config(skip_broken=skip_broken) + state = ns.network_state + return state + + +def parse_net_config(path, skip_broken=True): + """Parses a curtin network configuration file and + return network state""" + ns = None + net_config = net.read_yaml_file(path) + if 'network' in net_config: + ns = parse_net_config_data(net_config.get('network'), + skip_broken=skip_broken) + return ns + + def from_state_file(state_file): network_state = None - state = util.read_conf(state_file) + state = net.read_yaml_file(state_file) network_state = NetworkState() network_state.load(state) return network_state @@ -111,7 +135,7 @@ class NetworkState(object): 'config': self.config, 'network_state': self.network_state, } - return dump_config(state) + return net.dump_yaml(state) def load(self, state): if 'version' not in state: @@ -121,7 +145,7 @@ class NetworkState(object): required_keys = NETWORK_STATE_REQUIRED_KEYS[state['version']] missing_keys = diff_keys(required_keys, state) if missing_keys: - msg = 'Invalid state, missing keys: %s'.format(missing_keys) + msg = 'Invalid state, missing keys: %s' % (missing_keys) LOG.error(msg) raise ValueError(msg) @@ -130,7 +154,7 @@ class NetworkState(object): setattr(self, key, state[key]) def dump_network_state(self): - return dump_config(self.network_state) + return net.dump_yaml(self.network_state) def parse_config(self, skip_broken=True): # rebuild network state diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index b2acc648..f85fd864 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -551,6 +551,10 @@ def convert_net_json(network_json): 'type': 'static', 'address': network.get('ip_address'), }) + if network['type'] == 'ipv6': + subnet['ipv6'] = True + else: + subnet['ipv4'] = True subnets.append(subnet) cfg.update({'subnets': subnets}) if link['type'] in ['ethernet', 'vif', 'ovs', 'phy']: diff --git a/cloudinit/util.py b/cloudinit/util.py index 7562b97a..2bec476e 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -37,7 +37,6 @@ import pwd import random import re import shutil -import shlex import socket import stat import string @@ -82,7 +81,6 @@ CONTAINER_TESTS = (['systemd-detect-virt', '--quiet', '--container'], ['lxc-is-container']) PROC_CMDLINE = None -PY26 = sys.version_info[0:2] == (2, 6) def decode_binary(blob, encoding='utf-8'): @@ -1202,13 +1200,6 @@ def load_file(fname, read_cb=None, quiet=False, decode=True): return contents -def shlex_split(blob): - if PY26 and isinstance(blob, six.text_type): - # Older versions don't support unicode input - blob = blob.encode("utf8") - return shlex.split(blob) - - def get_cmdline(): if 'DEBUG_PROC_CMDLINE' in os.environ: return os.environ["DEBUG_PROC_CMDLINE"] diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index ed2c6d0f..75c433f6 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -2,6 +2,7 @@ from cloudinit import util from cloudinit import net from cloudinit.net import cmdline from cloudinit.net import eni +from cloudinit.net import network_state from .helpers import TestCase from .helpers import mock @@ -112,14 +113,14 @@ class TestEniNetRendering(TestCase): mock_sys_dev_path.side_effect = sys_dev_path network_cfg = net.generate_fallback_config() - network_state = net.parse_net_config_data(network_cfg, - skip_broken=False) + ns = network_state.parse_net_config_data(network_cfg, + skip_broken=False) render_dir = os.path.join(tmp_dir, "render") os.makedirs(render_dir) renderer = eni.Renderer() - renderer.render_network_state(render_dir, network_state, + renderer.render_network_state(render_dir, ns, eni="interfaces", links_prefix=None, netrules=None) -- cgit v1.2.3 From 2fb5af62229b8975910bf0ef63731047bd8d7e63 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 19 May 2016 15:33:15 -0700 Subject: Fix up tests and flake8 warnings --- cloudinit/net/__init__.py | 2 +- cloudinit/net/eni.py | 16 ++++----- cloudinit/sources/DataSourceConfigDrive.py | 1 - cloudinit/stages.py | 2 +- tests/unittests/helpers.py | 1 + tests/unittests/test__init__.py | 1 - tests/unittests/test_datasource/test_cloudsigma.py | 1 - .../unittests/test_datasource/test_configdrive.py | 41 ++++++++++++---------- tests/unittests/test_datasource/test_nocloud.py | 2 ++ tests/unittests/test_datasource/test_smartos.py | 2 +- tests/unittests/test_net.py | 6 ++-- tests/unittests/test_rh_subscription.py | 5 +-- tox.ini | 4 +-- 13 files changed, 41 insertions(+), 43 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index ad44911b..ba0e39ae 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -49,8 +49,8 @@ DEFAULT_PRIMARY_INTERFACE = 'eth0' # whole module can be easily extracted and placed into other # code-bases (curtin for example). + def write_file(path, content): - """Simple writing a file helper.""" base_path = os.path.dirname(path) if not os.path.isdir(base_path): os.makedirs(base_path) diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index adb31c22..18bae97a 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -258,7 +258,7 @@ class Renderer(object): return content def _render_route(self, route, indent=""): - """ When rendering routes for an iface, in some cases applying a route + """When rendering routes for an iface, in some cases applying a route may result in the route command returning non-zero which produces some confusing output for users manually using ifup/ifdown[1]. To that end, we will optionally include an '|| true' postfix to each @@ -302,7 +302,7 @@ class Renderer(object): return content def _render_interfaces(self, network_state): - ''' Given state, emit etc/network/interfaces content ''' + '''Given state, emit etc/network/interfaces content''' content = "" interfaces = network_state.get('interfaces') @@ -336,8 +336,8 @@ class Renderer(object): iface['control'] = subnet.get('control', 'auto') if iface['mode'].endswith('6'): iface['inet'] += '6' - elif iface['mode'] == 'static' \ - and ":" in subnet['address']: + elif (iface['mode'] == 'static' + and ":" in subnet['address']): iface['inet'] += '6' if iface['mode'].startswith('dhcp'): iface['mode'] = 'dhcp' @@ -359,10 +359,10 @@ class Renderer(object): content = content.replace('mac_address', 'hwaddress') return content - def render_network_state(self, - target, network_state, eni="etc/network/interfaces", - links_prefix=LINKS_FNAME_PREFIX, - netrules='etc/udev/rules.d/70-persistent-net.rules'): + def render_network_state( + self, target, network_state, + eni="etc/network/interfaces", links_prefix=LINKS_FNAME_PREFIX, + netrules='etc/udev/rules.d/70-persistent-net.rules'): fpeni = os.path.join(target, eni) net.write_file(fpeni, self._render_interfaces(network_state)) diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 70373b43..4478c4e2 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -18,7 +18,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import copy import os from cloudinit import log as logging diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 5dd31539..b837009a 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -44,8 +44,8 @@ from cloudinit import helpers from cloudinit import importer from cloudinit import log as logging from cloudinit import net -from cloudinit.reporting import events from cloudinit.net import cmdline +from cloudinit.reporting import events from cloudinit import sources from cloudinit import type_utils from cloudinit import util diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index 7b4d44e8..8d46a8bf 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -45,6 +45,7 @@ else: if _PY_MINOR == 4 and _PY_MICRO < 3: FIX_HTTPRETTY = True + # Makes the old path start # with new base instead of whatever # it previously had diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py index a9b35afe..0154784a 100644 --- a/tests/unittests/test__init__.py +++ b/tests/unittests/test__init__.py @@ -1,7 +1,6 @@ import os import shutil import tempfile -import unittest2 from cloudinit import handlers from cloudinit import helpers diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py index 7950fc52..2a42ce0c 100644 --- a/tests/unittests/test_datasource/test_cloudsigma.py +++ b/tests/unittests/test_datasource/test_cloudsigma.py @@ -6,7 +6,6 @@ from cloudinit.cs_utils import Cepko from cloudinit.sources import DataSourceCloudSigma from .. import helpers as test_helpers -from ..helpers import SkipTest SERVER_CONTEXT = { "cpu": 1000, diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 1db50798..5395e544 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -368,30 +368,32 @@ class TestNetJson(TestCase): self.assertEqual(myds.network_config, network_config) def test_network_config_conversions(self): - """Tests a bunch of input network json and checks the expected conversions.""" + """Tests a bunch of input network json and checks the + expected conversions.""" in_datas = [ NETWORK_DATA, { 'services': [{'type': 'dns', 'address': '172.19.0.12'}], - 'networks': [ - {'network_id': 'dacd568d-5be6-4786-91fe-750c374b78b4', - 'type': 'ipv4', 'netmask': '255.255.252.0', - 'link': 'tap1a81968a-79', - 'routes': [ - { - 'netmask': '0.0.0.0', - 'network': '0.0.0.0', - 'gateway': '172.19.3.254' - }, - ], - 'ip_address': '172.19.1.34', - 'id': 'network0', + 'networks': [{ + 'network_id': 'dacd568d-5be6-4786-91fe-750c374b78b4', + 'type': 'ipv4', + 'netmask': '255.255.252.0', + 'link': 'tap1a81968a-79', + 'routes': [{ + 'netmask': '0.0.0.0', + 'network': '0.0.0.0', + 'gateway': '172.19.3.254', + }], + 'ip_address': '172.19.1.34', + 'id': 'network0', + }], + 'links': [{ + 'type': 'bridge', + 'vif_id': '1a81968a-797a-400f-8a80-567f997eb93f', + 'ethernet_mac_address': 'fa:16:3e:ed:9a:59', + 'id': 'tap1a81968a-79', + 'mtu': None, }], - 'links': [ - {'type': 'bridge', - 'vif_id': '1a81968a-797a-400f-8a80-567f997eb93f', - 'ethernet_mac_address': 'fa:16:3e:ed:9a:59', - 'id': 'tap1a81968a-79', 'mtu': None}] }, ] out_datas = [ @@ -440,6 +442,7 @@ class TestNetJson(TestCase): 'address': '172.19.1.34', 'netmask': '255.255.252.0', 'type': 'static', + 'ipv4': True, 'routes': [{ 'gateway': '172.19.3.254', 'netmask': '0.0.0.0', diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index 077603b4..b0fa1130 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -7,6 +7,8 @@ import os import shutil import tempfile +import yaml + class TestNoCloudDataSource(TestCase): diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 2f159ac4..28f56039 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -42,7 +42,7 @@ from cloudinit import helpers as c_helpers from cloudinit.util import b64e from .. import helpers -from ..helpers import mock, SkipTest +from ..helpers import mock MOCK_RETURNS = { 'hostname': 'test-host', diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index faf0f0fb..7998111a 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1,13 +1,11 @@ -from cloudinit import net -from cloudinit import util - from cloudinit import net from cloudinit.net import cmdline from cloudinit.net import eni from cloudinit.net import network_state +from cloudinit import util -from .helpers import TestCase from .helpers import mock +from .helpers import TestCase import base64 import copy diff --git a/tests/unittests/test_rh_subscription.py b/tests/unittests/test_rh_subscription.py index e4dcc58b..891dbe77 100644 --- a/tests/unittests/test_rh_subscription.py +++ b/tests/unittests/test_rh_subscription.py @@ -10,13 +10,10 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . - import logging -import mock -import unittest - from cloudinit.config import cc_rh_subscription +from cloudinit import util from .helpers import TestCase, mock diff --git a/tox.ini b/tox.ini index b92ebac1..7802a291 100644 --- a/tox.ini +++ b/tox.ini @@ -1,11 +1,11 @@ [tox] -envlist = py27,py3,flake8 +envlist = py26,py27,py3,flake8 recreate = True usedevelop = True [testenv] -commands = python -m nose {posargs:tests} +commands = nosetests {posargs:tests} deps = -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt setenv = -- cgit v1.2.3 From 42a7d2b6d44be5fd6e41734902e08897b709015d Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 3 Jun 2016 15:06:55 -0400 Subject: config drive conversion: recognize 'bridge' as a physical type, fix mtu the network json in openstack provides a type of 'bridge' when the underlying (host) type is a bridge. Silly, but we need to consider that a physical device as it will be for us. also, the 'mtu' will appear on the link, not on the route --- cloudinit/sources/DataSourceConfigDrive.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 3cc9155d..c87f57fd 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -293,6 +293,7 @@ def convert_network_data(network_json=None, known_macs=None): 'mac_address', 'subnets', 'params', + 'mtu', ], 'subnet': [ 'type', @@ -302,7 +303,6 @@ def convert_network_data(network_json=None, known_macs=None): 'metric', 'gateway', 'pointopoint', - 'mtu', 'scope', 'dns_nameservers', 'dns_search', @@ -342,7 +342,7 @@ def convert_network_data(network_json=None, known_macs=None): }) subnets.append(subnet) cfg.update({'subnets': subnets}) - if link['type'] in ['ethernet', 'vif', 'ovs', 'phy']: + if link['type'] in ['ethernet', 'vif', 'ovs', 'phy', 'bridge']: cfg.update({ 'type': 'physical', 'mac_address': link['ethernet_mac_address']}) -- cgit v1.2.3 From 6ada153ebfd9483d76f904dafaa1fc80f61c9205 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 10 Jun 2016 14:16:51 -0700 Subject: Just mock 'on_first_boot' vs special argument --- cloudinit/sources/DataSourceConfigDrive.py | 5 ++--- tests/unittests/test_datasource/test_configdrive.py | 11 +++++++---- 2 files changed, 9 insertions(+), 7 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index cea08de2..3130e618 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -62,7 +62,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): mstr += "[source=%s]" % (self.source) return mstr - def get_data(self, skip_first_boot=False): + def get_data(self): found = None md = {} results = {} @@ -111,8 +111,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): # 'injected files' and apply legacy ENI network format. prev_iid = get_previous_iid(self.paths) cur_iid = md['instance-id'] - if (prev_iid != cur_iid and self.dsmode == sources.DSMODE_PASS - and not skip_first_boot): + if prev_iid != cur_iid and self.dsmode == sources.DSMODE_PASS: on_first_boot(results, distro=self.distro) LOG.debug("%s: not claiming datasource, dsmode=%s", self, self.dsmode) diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index b898e2bd..18551b92 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -370,7 +370,8 @@ class TestConfigDriveDataSource(TestCase): util.find_devs_with = orig_find_devs_with util.is_partition = orig_is_partition - def test_pubkeys_v2(self): + @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot') + def test_pubkeys_v2(self, on_first_boot): """Verify that public-keys work in config-drive-v2.""" populate_dir(self.tmp, CFG_DRIVE_FILES_V2) myds = cfg_ds_from_dir(self.tmp) @@ -385,13 +386,15 @@ class TestNetJson(TestCase): self.addCleanup(shutil.rmtree, self.tmp) self.maxDiff = None - def test_network_data_is_found(self): + @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot') + def test_network_data_is_found(self, on_first_boot): """Verify that network_data is present in ds in config-drive-v2.""" populate_dir(self.tmp, CFG_DRIVE_FILES_V2) myds = cfg_ds_from_dir(self.tmp) self.assertIsNotNone(myds.network_json) - def test_network_config_is_converted(self): + @mock.patch('cloudinit.sources.DataSourceConfigDrive.on_first_boot') + def test_network_config_is_converted(self, on_first_boot): """Verify that network_data is converted and present on ds object.""" populate_dir(self.tmp, CFG_DRIVE_FILES_V2) myds = cfg_ds_from_dir(self.tmp) @@ -558,7 +561,7 @@ def cfg_ds_from_dir(seed_d): helpers.Paths({})) cfg_ds.seed_dir = seed_d cfg_ds.known_macs = KNOWN_MACS.copy() - if not cfg_ds.get_data(skip_first_boot=True): + if not cfg_ds.get_data(): raise RuntimeError("Data source did not extract itself from" " seed directory %s" % seed_d) return cfg_ds -- cgit v1.2.3 From df583f83ac9d4869e0b3df5a904d13890d2f0986 Mon Sep 17 00:00:00 2001 From: Phil Roche Date: Mon, 13 Jun 2016 09:52:52 +0100 Subject: Removes trailing dot in metadata.google.internal GCE metadata lookup. A bug was reported (lp:1581200) where if there is no DNS server configured or it is not running then the metadata lookup on GCE will fail as it contains a trailing dot 'metadata.google.internal.'. As there is no DNS configured or running it will use the /etc/hosts file but the hosts file does not contain an entry with the trailing dot. One solution is to add an entry to the /etc/hosts file with the trailing dot but according to the manpage, /etc/hosts entries must end with an alphanumeric character and cannot end with a dot. The trailing dot was added to avoid MIM by dns search but we should probably assume the instance being started has no DNS and as such when querying metadata should use a URL that will resolve using /etc/hosts. LP: #1581200 --- cloudinit/sources/DataSourceGCE.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index 9234d1f8..c660a350 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -25,7 +25,7 @@ from cloudinit import util LOG = logging.getLogger(__name__) BUILTIN_DS_CONFIG = { - 'metadata_url': 'http://metadata.google.internal./computeMetadata/v1/' + 'metadata_url': 'http://metadata.google.internal/computeMetadata/v1/' } REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname') -- cgit v1.2.3 From 780f88f4dd8ddd36c54e217ea5af3b18b7339758 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 14 Jun 2016 15:08:24 -0400 Subject: [Revert] Remove trailing dot from GCE metadata URL This change broke tox tests. --- ChangeLog | 1 - cloudinit/sources/DataSourceGCE.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/ChangeLog b/ChangeLog index 66539792..e2869522 100644 --- a/ChangeLog +++ b/ChangeLog @@ -122,7 +122,6 @@ - ConfigDrive: improved support for networking information from a network_data.json or older interfaces formated network_config. - Change missing Cheetah log warning to debug [Andrew Jorgensen] - - Remove trailing dot from GCE metadata URL (LP: #1581200) [Phil Roche] 0.7.6: - open 0.7.6 diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index c660a350..9234d1f8 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -25,7 +25,7 @@ from cloudinit import util LOG = logging.getLogger(__name__) BUILTIN_DS_CONFIG = { - 'metadata_url': 'http://metadata.google.internal/computeMetadata/v1/' + 'metadata_url': 'http://metadata.google.internal./computeMetadata/v1/' } REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname') -- cgit v1.2.3 From 946b0f5542ffb43149f60b5dd50b2fb450d23713 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 15 Jun 2016 10:31:44 +0100 Subject: Re-apply "Remove trailing dot from GCE metadata URL (LP: #1581200) [Phil Roche]" This commit includes the content of that commit, plus a fix for the tests (provided by Phil). --- ChangeLog | 1 + cloudinit/sources/DataSourceGCE.py | 2 +- tests/unittests/test_datasource/test_gce.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/ChangeLog b/ChangeLog index e2869522..66539792 100644 --- a/ChangeLog +++ b/ChangeLog @@ -122,6 +122,7 @@ - ConfigDrive: improved support for networking information from a network_data.json or older interfaces formated network_config. - Change missing Cheetah log warning to debug [Andrew Jorgensen] + - Remove trailing dot from GCE metadata URL (LP: #1581200) [Phil Roche] 0.7.6: - open 0.7.6 diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index 9234d1f8..c660a350 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -25,7 +25,7 @@ from cloudinit import util LOG = logging.getLogger(__name__) BUILTIN_DS_CONFIG = { - 'metadata_url': 'http://metadata.google.internal./computeMetadata/v1/' + 'metadata_url': 'http://metadata.google.internal/computeMetadata/v1/' } REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname') diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index 1f7eb99e..6e62a4d2 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -52,7 +52,7 @@ GCE_META_ENCODING = { HEADERS = {'X-Google-Metadata-Request': 'True'} MD_URL_RE = re.compile( - r'http://metadata.google.internal./computeMetadata/v1/.*') + r'http://metadata.google.internal/computeMetadata/v1/.*') def _set_mock_metadata(gce_meta=None): -- cgit v1.2.3 From 4f989cc595775ab6b06fac604218d77910f3e4f4 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 15 Jun 2016 21:08:04 -0400 Subject: fis some Datasourcenocloud issues LP: #1592505 --- cloudinit/sources/DataSourceNoCloud.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 7e30118c..19d63950 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -24,7 +24,7 @@ import errno import os from cloudinit import log as logging -from cloudinit import net +from cloudinit.net import eni from cloudinit import sources from cloudinit import util @@ -194,8 +194,7 @@ class DataSourceNoCloud(sources.DataSource): # LP: #1568150 need getattr in the case that an old class object # has been loaded from a pickled file and now executing new source. dirs = getattr(self, 'seed_dirs', [self.seed_dir]) - quick_id = _quick_read_instance_id(cmdline_id=self.cmdline_id, - dirs=dirs) + quick_id = _quick_read_instance_id(dirs=dirs) if not quick_id: return None return quick_id == current @@ -203,20 +202,19 @@ class DataSourceNoCloud(sources.DataSource): @property def network_config(self): if self._network_config is None: - if self.network_eni is not None: - self._network_config = net.convert_eni_data(self.network_eni) + if self._network_eni is not None: + self._network_config = eni.convert_eni_data(self._network_eni) return self._network_config -def _quick_read_instance_id(cmdline_id, dirs=None): +def _quick_read_instance_id(dirs=None): if dirs is None: dirs = [] iid_key = 'instance-id' - if cmdline_id is None: - fill = {} - if parse_cmdline_data(cmdline_id, fill) and iid_key in fill: - return fill[iid_key] + fill = {} + if load_cmdline_data(fill) and iid_key in fill: + return fill[iid_key] for d in dirs: if d is None: -- cgit v1.2.3 From d4b587ebf500ddc2259fffc94a3c69c199c9a427 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 15 Jun 2016 22:50:12 -0400 Subject: fix some errors reported by pylint pylint --errors-only found several errors. Some of the changes here represent real errors, others just code that pylint did not like. --- cloudinit/config/cc_apt_configure.py | 3 ++- cloudinit/config/cc_growpart.py | 10 +++++----- cloudinit/distros/__init__.py | 2 +- cloudinit/distros/arch.py | 4 ++-- cloudinit/sources/DataSourceOVF.py | 10 +++++----- cloudinit/sources/DataSourceOpenNebula.py | 2 +- cloudinit/sources/helpers/openstack.py | 2 +- 7 files changed, 17 insertions(+), 16 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 96c4a43d..05ad4b03 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -208,8 +208,9 @@ def add_apt_sources(srclist, template_params=None, aa_repo_match=None): template_params = {} if aa_repo_match is None: - def aa_repo_match(x): + def _aa_repo_match(x): return False + aa_repo_match = _aa_repo_match errorlist = [] srcdict = convert_to_new_format(srclist) diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 859d69f1..40560f11 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -36,13 +36,13 @@ DEFAULT_CONFIG = { } -def enum(**enums): - return type('Enum', (), enums) +class RESIZE(object): + SKIPPED = "SKIPPED" + CHANGED = "CHANGED" + NOCHANGE = "NOCHANGE" + FAILED = "FAILED" -RESIZE = enum(SKIPPED="SKIPPED", CHANGED="CHANGED", NOCHANGE="NOCHANGE", - FAILED="FAILED") - LOG = logging.getLogger(__name__) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 5c29c804..14b500f8 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -454,7 +454,7 @@ class Distro(object): keys = kwargs['ssh_authorized_keys'] if isinstance(keys, six.string_types): keys = [keys] - if isinstance(keys, dict): + elif isinstance(keys, dict): keys = list(keys.values()) if keys is not None: if not isinstance(keys, (tuple, list, set)): diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py index 93a2e008..66209f22 100644 --- a/cloudinit/distros/arch.py +++ b/cloudinit/distros/arch.py @@ -196,6 +196,6 @@ def convert_resolv_conf(settings): """Returns a settings string formatted for resolv.conf.""" result = '' if isinstance(settings, list): - for ns in list: + for ns in settings: result = result + 'nameserver %s\n' % ns - return result + return result diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index f2bb9366..43347cfb 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -37,16 +37,16 @@ from cloudinit.sources.helpers.vmware.imc.config_file \ import ConfigFile from cloudinit.sources.helpers.vmware.imc.config_nic \ import NicConfigurator +from cloudinit.sources.helpers.vmware.imc.guestcust_error \ + import GuestCustErrorEnum from cloudinit.sources.helpers.vmware.imc.guestcust_event \ import GuestCustEventEnum from cloudinit.sources.helpers.vmware.imc.guestcust_state \ import GuestCustStateEnum -from cloudinit.sourceshelpers.vmware.imc.guestcust_error \ - import GuestCustErrorEnum -from cloudinit.sourceshelpers.vmware.imc.guestcust_util import ( - set_customization_status, +from cloudinit.sources.helpers.vmware.imc.guestcust_util import ( + enable_nics, get_nics_to_enable, - enable_nics + set_customization_status ) LOG = logging.getLogger(__name__) diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index 8f85b115..7b3a76b9 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -104,7 +104,7 @@ class DataSourceOpenNebula(sources.DataSource): def get_hostname(self, fqdn=False, resolve_ip=None): if resolve_ip is None: - if self.dsmode == sources.DSMODE_NET: + if self.dsmode == sources.DSMODE_NETWORK: resolve_ip = True else: resolve_ip = False diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 494335b3..d52cb56a 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -157,7 +157,7 @@ class BaseReader(object): pass @abc.abstractmethod - def _path_read(self, path): + def _path_read(self, path, decode=False): pass @abc.abstractmethod -- cgit v1.2.3 From b2fd5b65dc89c3fd5dbc1414968505367fc7126a Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 15 Jun 2016 23:03:11 -0400 Subject: fix usage of OSError.message that will not work in python3 python3's OSError does not have a .message attribute. --- cloudinit/sources/DataSourceAltCloud.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index cd61df31..a3529609 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -205,8 +205,7 @@ class DataSourceAltCloud(sources.DataSource): _err.message) return False except OSError as _err: - util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), - _err.message) + util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) return False floppy_dev = '/dev/fd0' -- cgit v1.2.3 From 8e5f63cb2f11ce08f7de1df23b8937305b91b707 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 15 Jun 2016 23:16:35 -0400 Subject: remove declaration of dsmode as local in DataSourceNoCloud this would cause the datasource to operate in local mode by default. --- cloudinit/sources/DataSourceNoCloud.py | 1 - 1 file changed, 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 19d63950..cdc9eef5 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -34,7 +34,6 @@ LOG = logging.getLogger(__name__) class DataSourceNoCloud(sources.DataSource): def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) - self.dsmode = 'local' self.seed = None self.seed_dirs = [os.path.join(paths.seed_dir, 'nocloud'), os.path.join(paths.seed_dir, 'nocloud-net')] -- cgit v1.2.3