From 4a60af54957634920e84a928aa22b4fc9a6dfd11 Mon Sep 17 00:00:00 2001 From: Junjie Wang <jingni.wjj@alibaba-inc.com> Date: Fri, 21 Apr 2017 20:06:09 +0800 Subject: AliYun: Enable platform identification and enable by default. AliYun cloud platform is now identifying themselves by setting the dmi product id to the well known value "Alibaba Cloud ECS". The changes here identify that properly in tools/ds-identify and in the DataSourceAliYun. Since the 'get_data' for AliYun now identifies itself correctly, we can enable AliYun by default. LP: #1638931 --- tools/ds-identify | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'tools') diff --git a/tools/ds-identify b/tools/ds-identify index 74d26537..5fc500b9 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -110,7 +110,8 @@ DI_DSNAME="" # this has to match the builtin list in cloud-init, it is what will # be searched if there is no setting found in config. DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ -CloudSigma CloudStack DigitalOcean Ec2 GCE OpenNebula OpenStack OVF SmartOS" +CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \ +OVF SmartOS" DI_DSLIST="" DI_MODE="" DI_ON_FOUND="" @@ -821,10 +822,11 @@ dscheck_OpenStack() { } dscheck_AliYun() { - # aliyun is not enabled by default (LP: #1638931) - # so if we are here, it is because the datasource_list was - # set to include it. Thus, 'maybe'. - return $DS_MAYBE + check_seed_dir "AliYun" meta-data user-data && return ${DS_FOUND} + if dmi_product_name_is "Alibaba Cloud ECS"; then + return $DS_FOUND + fi + return $DS_NOT_FOUND } dscheck_AltCloud() { -- cgit v1.2.3 From 0a448dd034883c07f85091dbfc9117de7227eb8d Mon Sep 17 00:00:00 2001 From: Chad Smith <chad.smith@canonical.com> Date: Thu, 25 May 2017 11:04:55 -0600 Subject: ntp: Add schema definition and passive schema validation. cloud-config files are very flexible and permissive. This adds a jsonsschema definition to the cc_ntp module and validation functions in cloudinit/config/schema which will log warnings about invalid configuration values in the ntp section. A cmdline tools/cloudconfig-schema is added which can be used in our dev environments to quickly attempt to exercise the ntp schema. It is also exposed as a main in cloudinit.config.schema. (python3 -m cloudinit.config.schema) LP: #1692916 --- cloudinit/config/cc_ntp.py | 69 ++++++- cloudinit/config/schema.py | 222 +++++++++++++++++++++++ requirements.txt | 3 + tests/unittests/helpers.py | 6 +- tests/unittests/test_handler/test_handler_ntp.py | 109 +++++++++++ tests/unittests/test_handler/test_schema.py | 220 ++++++++++++++++++++++ tools/cloudconfig-schema | 35 ++++ 7 files changed, 657 insertions(+), 7 deletions(-) create mode 100644 cloudinit/config/schema.py create mode 100644 tests/unittests/test_handler/test_schema.py create mode 100755 tools/cloudconfig-schema (limited to 'tools') diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index 5cc54536..31ed64e3 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -36,6 +36,7 @@ servers or pools are provided, 4 pools will be used in the format - 192.168.23.2 """ +from cloudinit.config.schema import validate_cloudconfig_schema from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE from cloudinit import templater @@ -43,6 +44,7 @@ from cloudinit import type_utils from cloudinit import util import os +from textwrap import dedent LOG = logging.getLogger(__name__) @@ -52,21 +54,84 @@ NR_POOL_SERVERS = 4 distros = ['centos', 'debian', 'fedora', 'opensuse', 'ubuntu'] +# The schema definition for each cloud-config module is a strict contract for +# describing supported configuration parameters for each cloud-config section. +# It allows cloud-config to validate and alert users to invalid or ignored +# configuration options before actually attempting to deploy with said +# configuration. + +schema = { + 'id': 'cc_ntp', + 'name': 'NTP', + 'title': 'enable and configure ntp', + 'description': dedent("""\ + Handle ntp configuration. If ntp is not installed on the system and + ntp configuration is specified, ntp will be installed. If there is a + default ntp config file in the image or one is present in the + distro's ntp package, it will be copied to ``/etc/ntp.conf.dist`` + before any changes are made. A list of ntp pools and ntp servers can + be provided under the ``ntp`` config key. If no ntp ``servers`` or + ``pools`` are provided, 4 pools will be used in the format + ``{0-3}.{distro}.pool.ntp.org``."""), + 'distros': distros, + 'examples': [ + {'ntp': {'pools': ['0.company.pool.ntp.org', '1.company.pool.ntp.org', + 'ntp.myorg.org'], + 'servers': ['my.ntp.server.local', 'ntp.ubuntu.com', + '192.168.23.2']}}], + 'frequency': PER_INSTANCE, + 'type': 'object', + 'properties': { + 'ntp': { + 'type': ['object', 'null'], + 'properties': { + 'pools': { + 'type': 'array', + 'items': { + 'type': 'string', + 'format': 'hostname' + }, + 'uniqueItems': True, + 'description': dedent("""\ + List of ntp pools. If both pools and servers are + empty, 4 default pool servers will be provided of + the format ``{0-3}.{distro}.pool.ntp.org``.""") + }, + 'servers': { + 'type': 'array', + 'items': { + 'type': 'string', + 'format': 'hostname' + }, + 'uniqueItems': True, + 'description': dedent("""\ + List of ntp servers. If both pools and servers are + empty, 4 default pool servers will be provided with + the format ``{0-3}.{distro}.pool.ntp.org``.""") + } + }, + 'required': [], + 'additionalProperties': False + } + } +} + + def handle(name, cfg, cloud, log, _args): """Enable and configure ntp.""" - if 'ntp' not in cfg: LOG.debug( "Skipping module named %s, not present or disabled by cfg", name) return - ntp_cfg = cfg.get('ntp', {}) + # TODO drop this when validate_cloudconfig_schema is strict=True if not isinstance(ntp_cfg, (dict)): raise RuntimeError(("'ntp' key existed in config," " but not a dictionary type," " is a %s %instead"), type_utils.obj_name(ntp_cfg)) + validate_cloudconfig_schema(cfg, schema) rename_ntp_conf() # ensure when ntp is installed it has a configuration file # to use instead of starting up with packaged defaults diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py new file mode 100644 index 00000000..6400f005 --- /dev/null +++ b/cloudinit/config/schema.py @@ -0,0 +1,222 @@ +# This file is part of cloud-init. See LICENSE file for license information. +"""schema.py: Set of module functions for processing cloud-config schema.""" + +from __future__ import print_function + +from cloudinit.util import read_file_or_url + +import argparse +import logging +import os +import sys +import yaml + +SCHEMA_UNDEFINED = b'UNDEFINED' +CLOUD_CONFIG_HEADER = b'#cloud-config' +SCHEMA_DOC_TMPL = """ +{name} +--- +**Summary:** {title} + +{description} + +**Internal name:** ``{id}`` + +**Module frequency:** {frequency} + +**Supported distros:** {distros} + +**Config schema**: +{property_doc} +{examples} +""" +SCHEMA_PROPERTY_TMPL = '{prefix}**{prop_name}:** ({type}) {description}' + + +class SchemaValidationError(ValueError): + """Raised when validating a cloud-config file against a schema.""" + + def __init__(self, schema_errors=()): + """Init the exception an n-tuple of schema errors. + + @param schema_errors: An n-tuple of the format: + ((flat.config.key, msg),) + """ + self.schema_errors = schema_errors + error_messages = [ + '{0}: {1}'.format(config_key, message) + for config_key, message in schema_errors] + message = "Cloud config schema errors: {0}".format( + ', '.join(error_messages)) + super(SchemaValidationError, self).__init__(message) + + +def validate_cloudconfig_schema(config, schema, strict=False): + """Validate provided config meets the schema definition. + + @param config: Dict of cloud configuration settings validated against + schema. + @param schema: jsonschema dict describing the supported schema definition + for the cloud config module (config.cc_*). + @param strict: Boolean, when True raise SchemaValidationErrors instead of + logging warnings. + + @raises: SchemaValidationError when provided config does not validate + against the provided schema. + """ + try: + from jsonschema import Draft4Validator, FormatChecker + except ImportError: + logging.warning( + 'Ignoring schema validation. python-jsonschema is not present') + return + validator = Draft4Validator(schema, format_checker=FormatChecker()) + errors = () + for error in sorted(validator.iter_errors(config), key=lambda e: e.path): + path = '.'.join([str(p) for p in error.path]) + errors += ((path, error.message),) + if errors: + if strict: + raise SchemaValidationError(errors) + else: + messages = ['{0}: {1}'.format(k, msg) for k, msg in errors] + logging.warning('Invalid config:\n%s', '\n'.join(messages)) + + +def validate_cloudconfig_file(config_path, schema): + """Validate cloudconfig file adheres to a specific jsonschema. + + @param config_path: Path to the yaml cloud-config file to parse. + @param schema: Dict describing a valid jsonschema to validate against. + + @raises SchemaValidationError containing any of schema_errors encountered. + @raises RuntimeError when config_path does not exist. + """ + if not os.path.exists(config_path): + raise RuntimeError('Configfile {0} does not exist'.format(config_path)) + content = read_file_or_url('file://{0}'.format(config_path)).contents + if not content.startswith(CLOUD_CONFIG_HEADER): + errors = ( + ('header', 'File {0} needs to begin with "{1}"'.format( + config_path, CLOUD_CONFIG_HEADER.decode())),) + raise SchemaValidationError(errors) + + try: + cloudconfig = yaml.safe_load(content) + except yaml.parser.ParserError as e: + errors = ( + ('format', 'File {0} is not valid yaml. {1}'.format( + config_path, str(e))),) + raise SchemaValidationError(errors) + validate_cloudconfig_schema( + cloudconfig, schema, strict=True) + + +def _get_property_type(property_dict): + """Return a string representing a property type from a given jsonschema.""" + property_type = property_dict.get('type', SCHEMA_UNDEFINED) + if isinstance(property_type, list): + property_type = '/'.join(property_type) + item_type = property_dict.get('items', {}).get('type') + if item_type: + property_type = '{0} of {1}'.format(property_type, item_type) + return property_type + + +def _get_property_doc(schema, prefix=' '): + """Return restructured text describing the supported schema properties.""" + new_prefix = prefix + ' ' + properties = [] + for prop_key, prop_config in schema.get('properties', {}).items(): + # Define prop_name and dscription for SCHEMA_PROPERTY_TMPL + description = prop_config.get('description', '') + properties.append(SCHEMA_PROPERTY_TMPL.format( + prefix=prefix, + prop_name=prop_key, + type=_get_property_type(prop_config), + description=description.replace('\n', ''))) + if 'properties' in prop_config: + properties.append( + _get_property_doc(prop_config, prefix=new_prefix)) + return '\n\n'.join(properties) + + +def _get_schema_examples(schema, prefix=''): + """Return restructured text describing the schema examples if present.""" + examples = schema.get('examples') + if not examples: + return '' + rst_content = '\n**Examples**::\n\n' + for example in examples: + example_yaml = yaml.dump(example, default_flow_style=False) + # Python2.6 is missing textwrapper.indent + lines = example_yaml.split('\n') + indented_lines = [' {0}'.format(line) for line in lines] + rst_content += '\n'.join(indented_lines) + return rst_content + + +def get_schema_doc(schema): + """Return reStructured text rendering the provided jsonschema. + + @param schema: Dict of jsonschema to render. + @raise KeyError: If schema lacks an expected key. + """ + schema['property_doc'] = _get_property_doc(schema) + schema['examples'] = _get_schema_examples(schema) + schema['distros'] = ', '.join(schema['distros']) + return SCHEMA_DOC_TMPL.format(**schema) + + +def get_schema(section_key=None): + """Return a dict of jsonschema defined in any cc_* module. + + @param: section_key: Optionally limit schema to a specific top-level key. + """ + # TODO use util.find_modules in subsequent branch + from cloudinit.config.cc_ntp import schema + return schema + + +def error(message): + print(message, file=sys.stderr) + return 1 + + +def get_parser(): + """Return a parser for supported cmdline arguments.""" + parser = argparse.ArgumentParser() + parser.add_argument('-c', '--config-file', + help='Path of the cloud-config yaml file to validate') + parser.add_argument('-d', '--doc', action="store_true", default=False, + help='Print schema documentation') + parser.add_argument('-k', '--key', + help='Limit validation or docs to a section key') + return parser + + +def main(): + """Tool to validate schema of a cloud-config file or print schema docs.""" + parser = get_parser() + args = parser.parse_args() + exclusive_args = [args.config_file, args.doc] + if not any(exclusive_args) or all(exclusive_args): + return error('Expected either --config-file argument or --doc') + + schema = get_schema() + if args.config_file: + try: + validate_cloudconfig_file(args.config_file, schema) + except (SchemaValidationError, RuntimeError) as e: + return error(str(e)) + print("Valid cloud-config file {0}".format(args.config_file)) + if args.doc: + print(get_schema_doc(schema)) + return 0 + + +if __name__ == '__main__': + sys.exit(main()) + + +# vi: ts=4 expandtab diff --git a/requirements.txt b/requirements.txt index 0c4951f5..60abab16 100644 --- a/requirements.txt +++ b/requirements.txt @@ -36,5 +36,8 @@ requests # For patching pieces of cloud-config together jsonpatch +# For validating cloud-config sections per schema definitions +jsonschema + # For Python 2/3 compatibility six diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index 9ff15993..e78abce2 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -19,10 +19,6 @@ try: from contextlib import ExitStack except ImportError: from contextlib2 import ExitStack -try: - from cStringIO import StringIO -except ImportError: - from io import StringIO from cloudinit import helpers as ch from cloudinit import util @@ -102,7 +98,7 @@ class CiTestCase(TestCase): if self.with_logs: # Create a log handler so unit tests can search expected logs. logger = logging.getLogger() - self.logs = StringIO() + self.logs = six.StringIO() handler = logging.StreamHandler(self.logs) self.old_handlers = logger.handlers logger.handlers = [handler] diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py index bc4277b7..6cafa63d 100644 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ b/tests/unittests/test_handler/test_handler_ntp.py @@ -212,4 +212,113 @@ class TestNtp(FilesystemMockingTestCase): 'Skipping module named cc_ntp, not present or disabled by cfg\n', self.logs.getvalue()) + def test_ntp_handler_schema_validation_allows_empty_ntp_config(self): + """Ntp schema validation allows for an empty ntp: configuration.""" + invalid_config = {'ntp': {}} + distro = 'ubuntu' + cc = self._get_cloud(distro) + ntp_conf = os.path.join(self.new_root, 'ntp.conf') + with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: + stream.write(NTP_TEMPLATE) + with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): + cc_ntp.handle('cc_ntp', invalid_config, cc, None, []) + self.assertNotIn('Invalid config:', self.logs.getvalue()) + with open(ntp_conf) as stream: + content = stream.read() + default_pools = [ + "{0}.{1}.pool.ntp.org".format(x, distro) + for x in range(0, cc_ntp.NR_POOL_SERVERS)] + self.assertEqual( + "servers []\npools {0}\n".format(default_pools), + content) + + def test_ntp_handler_schema_validation_warns_non_string_item_type(self): + """Ntp schema validation warns of non-strings in pools or servers. + + Schema validation is not strict, so ntp config is still be rendered. + """ + invalid_config = {'ntp': {'pools': [123], 'servers': ['valid', None]}} + cc = self._get_cloud('ubuntu') + ntp_conf = os.path.join(self.new_root, 'ntp.conf') + with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: + stream.write(NTP_TEMPLATE) + with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): + cc_ntp.handle('cc_ntp', invalid_config, cc, None, []) + self.assertIn( + "Invalid config:\nntp.pools.0: 123 is not of type 'string'\n" + "ntp.servers.1: None is not of type 'string'", + self.logs.getvalue()) + with open(ntp_conf) as stream: + content = stream.read() + self.assertEqual("servers ['valid', None]\npools [123]\n", content) + + def test_ntp_handler_schema_validation_warns_of_non_array_type(self): + """Ntp schema validation warns of non-array pools or servers types. + + Schema validation is not strict, so ntp config is still be rendered. + """ + invalid_config = {'ntp': {'pools': 123, 'servers': 'non-array'}} + cc = self._get_cloud('ubuntu') + ntp_conf = os.path.join(self.new_root, 'ntp.conf') + with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: + stream.write(NTP_TEMPLATE) + with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): + cc_ntp.handle('cc_ntp', invalid_config, cc, None, []) + self.assertIn( + "Invalid config:\nntp.pools: 123 is not of type 'array'\n" + "ntp.servers: 'non-array' is not of type 'array'", + self.logs.getvalue()) + with open(ntp_conf) as stream: + content = stream.read() + self.assertEqual("servers non-array\npools 123\n", content) + + def test_ntp_handler_schema_validation_warns_invalid_key_present(self): + """Ntp schema validation warns of invalid keys present in ntp config. + + Schema validation is not strict, so ntp config is still be rendered. + """ + invalid_config = { + 'ntp': {'invalidkey': 1, 'pools': ['0.mycompany.pool.ntp.org']}} + cc = self._get_cloud('ubuntu') + ntp_conf = os.path.join(self.new_root, 'ntp.conf') + with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: + stream.write(NTP_TEMPLATE) + with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): + cc_ntp.handle('cc_ntp', invalid_config, cc, None, []) + self.assertIn( + "Invalid config:\nntp: Additional properties are not allowed " + "('invalidkey' was unexpected)", + self.logs.getvalue()) + with open(ntp_conf) as stream: + content = stream.read() + self.assertEqual( + "servers []\npools ['0.mycompany.pool.ntp.org']\n", + content) + + def test_ntp_handler_schema_validation_warns_of_duplicates(self): + """Ntp schema validation warns of duplicates in servers or pools. + + Schema validation is not strict, so ntp config is still be rendered. + """ + invalid_config = { + 'ntp': {'pools': ['0.mypool.org', '0.mypool.org'], + 'servers': ['10.0.0.1', '10.0.0.1']}} + cc = self._get_cloud('ubuntu') + ntp_conf = os.path.join(self.new_root, 'ntp.conf') + with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: + stream.write(NTP_TEMPLATE) + with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): + cc_ntp.handle('cc_ntp', invalid_config, cc, None, []) + self.assertIn( + "Invalid config:\nntp.pools: ['0.mypool.org', '0.mypool.org'] has " + "non-unique elements\nntp.servers: ['10.0.0.1', '10.0.0.1'] has " + "non-unique elements", + self.logs.getvalue()) + with open(ntp_conf) as stream: + content = stream.read() + self.assertEqual( + "servers ['10.0.0.1', '10.0.0.1']\n" + "pools ['0.mypool.org', '0.mypool.org']\n", + content) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py new file mode 100644 index 00000000..3239e326 --- /dev/null +++ b/tests/unittests/test_handler/test_schema.py @@ -0,0 +1,220 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.config.schema import ( + CLOUD_CONFIG_HEADER, SchemaValidationError, get_schema_doc, + validate_cloudconfig_file, validate_cloudconfig_schema, + main) +from cloudinit.util import write_file + +from ..helpers import CiTestCase, mock + +from copy import copy +from six import StringIO +from textwrap import dedent + + +class SchemaValidationErrorTest(CiTestCase): + """Test validate_cloudconfig_schema""" + + def test_schema_validation_error_expects_schema_errors(self): + """SchemaValidationError is initialized from schema_errors.""" + errors = (('key.path', 'unexpected key "junk"'), + ('key2.path', '"-123" is not a valid "hostname" format')) + exception = SchemaValidationError(schema_errors=errors) + self.assertIsInstance(exception, Exception) + self.assertEqual(exception.schema_errors, errors) + self.assertEqual( + 'Cloud config schema errors: key.path: unexpected key "junk", ' + 'key2.path: "-123" is not a valid "hostname" format', + str(exception)) + self.assertTrue(isinstance(exception, ValueError)) + + +class ValidateCloudConfigSchemaTest(CiTestCase): + """Tests for validate_cloudconfig_schema.""" + + with_logs = True + + def test_validateconfig_schema_non_strict_emits_warnings(self): + """When strict is False validate_cloudconfig_schema emits warnings.""" + schema = {'properties': {'p1': {'type': 'string'}}} + validate_cloudconfig_schema({'p1': -1}, schema, strict=False) + self.assertIn( + "Invalid config:\np1: -1 is not of type 'string'\n", + self.logs.getvalue()) + + def test_validateconfig_schema_emits_warning_on_missing_jsonschema(self): + """Warning from validate_cloudconfig_schema when missing jsonschema.""" + schema = {'properties': {'p1': {'type': 'string'}}} + with mock.patch.dict('sys.modules', **{'jsonschema': ImportError()}): + validate_cloudconfig_schema({'p1': -1}, schema, strict=True) + self.assertIn( + 'Ignoring schema validation. python-jsonschema is not present', + self.logs.getvalue()) + + def test_validateconfig_schema_strict_raises_errors(self): + """When strict is True validate_cloudconfig_schema raises errors.""" + schema = {'properties': {'p1': {'type': 'string'}}} + with self.assertRaises(SchemaValidationError) as context_mgr: + validate_cloudconfig_schema({'p1': -1}, schema, strict=True) + self.assertEqual( + "Cloud config schema errors: p1: -1 is not of type 'string'", + str(context_mgr.exception)) + + def test_validateconfig_schema_honors_formats(self): + """When strict is True validate_cloudconfig_schema raises errors.""" + schema = { + 'properties': {'p1': {'type': 'string', 'format': 'hostname'}}} + with self.assertRaises(SchemaValidationError) as context_mgr: + validate_cloudconfig_schema({'p1': '-1'}, schema, strict=True) + self.assertEqual( + "Cloud config schema errors: p1: '-1' is not a 'hostname'", + str(context_mgr.exception)) + + +class ValidateCloudConfigFileTest(CiTestCase): + """Tests for validate_cloudconfig_file.""" + + def setUp(self): + super(ValidateCloudConfigFileTest, self).setUp() + self.config_file = self.tmp_path('cloudcfg.yaml') + + def test_validateconfig_file_error_on_absent_file(self): + """On absent config_path, validate_cloudconfig_file errors.""" + with self.assertRaises(RuntimeError) as context_mgr: + validate_cloudconfig_file('/not/here', {}) + self.assertEqual( + 'Configfile /not/here does not exist', + str(context_mgr.exception)) + + def test_validateconfig_file_error_on_invalid_header(self): + """On invalid header, validate_cloudconfig_file errors. + + A SchemaValidationError is raised when the file doesn't begin with + CLOUD_CONFIG_HEADER. + """ + write_file(self.config_file, '#junk') + with self.assertRaises(SchemaValidationError) as context_mgr: + validate_cloudconfig_file(self.config_file, {}) + self.assertEqual( + 'Cloud config schema errors: header: File {0} needs to begin with ' + '"{1}"'.format(self.config_file, CLOUD_CONFIG_HEADER.decode()), + str(context_mgr.exception)) + + def test_validateconfig_file_error_on_non_yaml_format(self): + """On non-yaml format, validate_cloudconfig_file errors.""" + write_file(self.config_file, '#cloud-config\n{}}') + with self.assertRaises(SchemaValidationError) as context_mgr: + validate_cloudconfig_file(self.config_file, {}) + self.assertIn( + 'schema errors: format: File {0} is not valid yaml.'.format( + self.config_file), + str(context_mgr.exception)) + + def test_validateconfig_file_sctricty_validates_schema(self): + """validate_cloudconfig_file raises errors on invalid schema.""" + schema = { + 'properties': {'p1': {'type': 'string', 'format': 'hostname'}}} + write_file(self.config_file, '#cloud-config\np1: "-1"') + with self.assertRaises(SchemaValidationError) as context_mgr: + validate_cloudconfig_file(self.config_file, schema) + self.assertEqual( + "Cloud config schema errors: p1: '-1' is not a 'hostname'", + str(context_mgr.exception)) + + +class GetSchemaDocTest(CiTestCase): + """Tests for get_schema_doc.""" + + def setUp(self): + super(GetSchemaDocTest, self).setUp() + self.required_schema = { + 'title': 'title', 'description': 'description', 'id': 'id', + 'name': 'name', 'frequency': 'frequency', + 'distros': ['debian', 'rhel']} + + def test_get_schema_doc_returns_restructured_text(self): + """get_schema_doc returns restructured text for a cloudinit schema.""" + full_schema = copy(self.required_schema) + full_schema.update( + {'properties': { + 'prop1': {'type': 'array', 'description': 'prop-description', + 'items': {'type': 'int'}}}}) + self.assertEqual( + dedent(""" + name + --- + **Summary:** title + + description + + **Internal name:** ``id`` + + **Module frequency:** frequency + + **Supported distros:** debian, rhel + + **Config schema**: + **prop1:** (array of int) prop-description\n\n"""), + get_schema_doc(full_schema)) + + def test_get_schema_doc_returns_restructured_text_with_examples(self): + """get_schema_doc returns indented examples when present in schema.""" + full_schema = copy(self.required_schema) + full_schema.update( + {'examples': {'ex1': [1, 2, 3]}, + 'properties': { + 'prop1': {'type': 'array', 'description': 'prop-description', + 'items': {'type': 'int'}}}}) + self.assertIn( + dedent(""" + **Config schema**: + **prop1:** (array of int) prop-description + + **Examples**:: + + ex1"""), + get_schema_doc(full_schema)) + + def test_get_schema_doc_raises_key_errors(self): + """get_schema_doc raises KeyErrors on missing keys.""" + for key in self.required_schema: + invalid_schema = copy(self.required_schema) + invalid_schema.pop(key) + with self.assertRaises(KeyError) as context_mgr: + get_schema_doc(invalid_schema) + self.assertEqual("'{0}'".format(key), str(context_mgr.exception)) + + +class MainTest(CiTestCase): + + def test_main_missing_args(self): + """Main exits non-zero and reports an error on missing parameters.""" + with mock.patch('sys.argv', ['mycmd']): + with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: + self.assertEqual(1, main(), 'Expected non-zero exit code') + self.assertEqual( + 'Expected either --config-file argument or --doc\n', + m_stderr.getvalue()) + + def test_main_prints_docs(self): + """When --doc parameter is provided, main generates documentation.""" + myargs = ['mycmd', '--doc'] + with mock.patch('sys.argv', myargs): + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + self.assertEqual(0, main(), 'Expected 0 exit code') + self.assertIn('\nNTP\n---\n', m_stdout.getvalue()) + + def test_main_validates_config_file(self): + """When --config-file parameter is provided, main validates schema.""" + myyaml = self.tmp_path('my.yaml') + myargs = ['mycmd', '--config-file', myyaml] + with open(myyaml, 'wb') as stream: + stream.write(b'#cloud-config\nntp:') # shortest ntp schema + with mock.patch('sys.argv', myargs): + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + self.assertEqual(0, main(), 'Expected 0 exit code') + self.assertIn( + 'Valid cloud-config file {0}'.format(myyaml), m_stdout.getvalue()) + +# vi: ts=4 expandtab syntax=python diff --git a/tools/cloudconfig-schema b/tools/cloudconfig-schema new file mode 100755 index 00000000..32f0d61e --- /dev/null +++ b/tools/cloudconfig-schema @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +# This file is part of cloud-init. See LICENSE file for license information. + +"""cloudconfig-schema + +Validate existing files against cloud-config schema or provide supported schema +documentation. +""" + +import os +import sys + + +def call_entry_point(name): + (istr, dot, ent) = name.rpartition('.') + try: + __import__(istr) + except ImportError: + # if that import failed, check dirname(__file__/..) + # to support ./bin/program with modules in . + _tdir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) + sys.path.insert(0, _tdir) + try: + __import__(istr) + except ImportError as e: + sys.stderr.write("Unable to find %s: %s\n" % (name, e)) + sys.exit(2) + + sys.exit(getattr(sys.modules[istr], ent)()) + + +if __name__ == '__main__': + call_entry_point("cloudinit.config.schema.main") + +# vi: ts=4 expandtab syntax=python -- cgit v1.2.3 From 79236a629f1e0e61b260d0cb995b6299a5c7aac1 Mon Sep 17 00:00:00 2001 From: Scott Moser <smoser@brickies.net> Date: Thu, 1 Jun 2017 10:39:34 -0400 Subject: tools/net-convert.py: support old cloudinit versions by using kwargs. Older cloud-init versions have a bug in the signature of the render_network_state method for netplan (bug 1685944). The old had: render_network_state(target, network_state) The fix was to change netplan's so it had the correct signature: render_network_state(network_state, target) This just changes our caller to use kwargs style when invoking that method so that it works with either the broken form or correct form. --- tools/net-convert.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/net-convert.py b/tools/net-convert.py index b2db8adf..68559cbf 100755 --- a/tools/net-convert.py +++ b/tools/net-convert.py @@ -75,7 +75,7 @@ def main(): r_cls = sysconfig.Renderer r = r_cls() - r.render_network_state(ns, target=args.directory) + r.render_network_state(network_state=ns, target=args.directory) if __name__ == '__main__': -- cgit v1.2.3 From 5fb49bacf7441d8d20a7b4e0e7008ca586f5ebab Mon Sep 17 00:00:00 2001 From: Chad Smith <chad.smith@canonical.com> Date: Tue, 30 May 2017 10:28:05 -0600 Subject: azure: identify platform by well known value in chassis asset tag. Azure sets a known chassis asset tag to 7783-7084-3265-9085-8269-3286-77. We can inspect this in both ds-identify and DataSource.get_data to determine whether we are on Azure. Added unit tests to cover these changes and some minor tweaks to Exception error message content to give more context on malformed or missing ovf-env.xml files. LP: #1693939 --- cloudinit/sources/DataSourceAzure.py | 9 +++- tests/unittests/test_datasource/test_azure.py | 66 +++++++++++++++++++++++++-- tests/unittests/test_ds_identify.py | 39 ++++++++++++++++ tools/ds-identify | 35 +++++++++----- 4 files changed, 134 insertions(+), 15 deletions(-) (limited to 'tools') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index b9458ffa..314848e4 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -36,6 +36,8 @@ RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource' DEFAULT_PRIMARY_NIC = 'eth0' LEASE_FILE = '/var/lib/dhcp/dhclient.eth0.leases' DEFAULT_FS = 'ext4' +# DMI chassis-asset-tag is set static for all azure instances +AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid): @@ -320,6 +322,11 @@ class DataSourceAzureNet(sources.DataSource): # azure removes/ejects the cdrom containing the ovf-env.xml # file on reboot. So, in order to successfully reboot we # need to look in the datadir and consider that valid + asset_tag = util.read_dmi_data('chassis-asset-tag') + if asset_tag != AZURE_CHASSIS_ASSET_TAG: + LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) + return False + asset_tag = util.read_dmi_data('chassis-asset-tag') ddir = self.ds_cfg['data_dir'] candidates = [self.seed_dir] @@ -694,7 +701,7 @@ def read_azure_ovf(contents): try: dom = minidom.parseString(contents) except Exception as e: - raise BrokenAzureDataSource("invalid xml: %s" % e) + raise BrokenAzureDataSource("Invalid ovf-env.xml: %s" % e) results = find_child(dom.documentElement, lambda n: n.localName == "ProvisioningSection") diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 852ec703..42f49e06 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -76,7 +76,9 @@ def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None): return content -class TestAzureDataSource(TestCase): +class TestAzureDataSource(CiTestCase): + + with_logs = True def setUp(self): super(TestAzureDataSource, self).setUp() @@ -160,6 +162,12 @@ scbus-1 on xpt0 bus 0 self.instance_id = 'test-instance-id' + def _dmi_mocks(key): + if key == 'system-uuid': + return self.instance_id + elif key == 'chassis-asset-tag': + return '7783-7084-3265-9085-8269-3286-77' + self.apply_patches([ (dsaz, 'list_possible_azure_ds_devs', dsdevs), (dsaz, 'invoke_agent', _invoke_agent), @@ -170,7 +178,7 @@ scbus-1 on xpt0 bus 0 (dsaz, 'set_hostname', mock.MagicMock()), (dsaz, 'get_metadata_from_fabric', self.get_metadata_from_fabric), (dsaz.util, 'read_dmi_data', mock.MagicMock( - return_value=self.instance_id)), + side_effect=_dmi_mocks)), ]) dsrc = dsaz.DataSourceAzureNet( @@ -241,6 +249,23 @@ fdescfs /dev/fd fdescfs rw 0 0 res = get_path_dev_freebsd('/etc', mnt_list) self.assertIsNotNone(res) + @mock.patch('cloudinit.sources.DataSourceAzure.util.read_dmi_data') + def test_non_azure_dmi_chassis_asset_tag(self, m_read_dmi_data): + """Report non-azure when DMI's chassis asset tag doesn't match. + + Return False when the asset tag doesn't match Azure's static + AZURE_CHASSIS_ASSET_TAG. + """ + # Return a non-matching asset tag value + nonazure_tag = dsaz.AZURE_CHASSIS_ASSET_TAG + 'X' + m_read_dmi_data.return_value = nonazure_tag + dsrc = dsaz.DataSourceAzureNet( + {}, distro=None, paths=self.paths) + self.assertFalse(dsrc.get_data()) + self.assertEqual( + "Non-Azure DMI asset tag '{0}' discovered.\n".format(nonazure_tag), + self.logs.getvalue()) + def test_basic_seed_dir(self): odata = {'HostName': "myhost", 'UserName': "myuser"} data = {'ovfcontent': construct_valid_ovf_env(data=odata), @@ -531,9 +556,17 @@ class TestAzureBounce(TestCase): self.patches.enter_context( mock.patch.object(dsaz, 'get_metadata_from_fabric', mock.MagicMock(return_value={}))) + + def _dmi_mocks(key): + if key == 'system-uuid': + return 'test-instance-id' + elif key == 'chassis-asset-tag': + return '7783-7084-3265-9085-8269-3286-77' + raise RuntimeError('should not get here') + self.patches.enter_context( mock.patch.object(dsaz.util, 'read_dmi_data', - mock.MagicMock(return_value='test-instance-id'))) + mock.MagicMock(side_effect=_dmi_mocks))) def setUp(self): super(TestAzureBounce, self).setUp() @@ -696,6 +729,33 @@ class TestAzureBounce(TestCase): self.assertEqual(0, self.set_hostname.call_count) +class TestLoadAzureDsDir(CiTestCase): + """Tests for load_azure_ds_dir.""" + + def setUp(self): + self.source_dir = self.tmp_dir() + super(TestLoadAzureDsDir, self).setUp() + + def test_missing_ovf_env_xml_raises_non_azure_datasource_error(self): + """load_azure_ds_dir raises an error When ovf-env.xml doesn't exit.""" + with self.assertRaises(dsaz.NonAzureDataSource) as context_manager: + dsaz.load_azure_ds_dir(self.source_dir) + self.assertEqual( + 'No ovf-env file found', + str(context_manager.exception)) + + def test_wb_invalid_ovf_env_xml_calls_read_azure_ovf(self): + """load_azure_ds_dir calls read_azure_ovf to parse the xml.""" + ovf_path = os.path.join(self.source_dir, 'ovf-env.xml') + with open(ovf_path, 'wb') as stream: + stream.write(b'invalid xml') + with self.assertRaises(dsaz.BrokenAzureDataSource) as context_manager: + dsaz.load_azure_ds_dir(self.source_dir) + self.assertEqual( + 'Invalid ovf-env.xml: syntax error: line 1, column 0', + str(context_manager.exception)) + + class TestReadAzureOvf(TestCase): def test_invalid_xml_raises_non_azure_ds(self): invalid_xml = "<foo>" + construct_valid_ovf_env(data={}) diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 5c26e65f..8ccfe55c 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -39,9 +39,11 @@ RC_FOUND = 0 RC_NOT_FOUND = 1 DS_NONE = 'None' +P_CHASSIS_ASSET_TAG = "sys/class/dmi/id/chassis_asset_tag" P_PRODUCT_NAME = "sys/class/dmi/id/product_name" P_PRODUCT_SERIAL = "sys/class/dmi/id/product_serial" P_PRODUCT_UUID = "sys/class/dmi/id/product_uuid" +P_SEED_DIR = "var/lib/cloud/seed" P_DSID_CFG = "etc/cloud/ds-identify.cfg" MOCK_VIRT_IS_KVM = {'name': 'detect_virt', 'RET': 'kvm', 'ret': 0} @@ -160,6 +162,30 @@ class TestDsIdentify(CiTestCase): _print_run_output(rc, out, err, cfg, files) return rc, out, err, cfg, files + def test_wb_print_variables(self): + """_print_info reports an array of discovered variables to stderr.""" + data = VALID_CFG['Azure-dmi-detection'] + _, _, err, _, _ = self._call_via_dict(data) + expected_vars = [ + 'DMI_PRODUCT_NAME', 'DMI_SYS_VENDOR', 'DMI_PRODUCT_SERIAL', + 'DMI_PRODUCT_UUID', 'PID_1_PRODUCT_NAME', 'DMI_CHASSIS_ASSET_TAG', + 'FS_LABELS', 'KERNEL_CMDLINE', 'VIRT', 'UNAME_KERNEL_NAME', + 'UNAME_KERNEL_RELEASE', 'UNAME_KERNEL_VERSION', 'UNAME_MACHINE', + 'UNAME_NODENAME', 'UNAME_OPERATING_SYSTEM', 'DSNAME', 'DSLIST', + 'MODE', 'ON_FOUND', 'ON_MAYBE', 'ON_NOTFOUND'] + for var in expected_vars: + self.assertIn('{0}='.format(var), err) + + def test_azure_dmi_detection_from_chassis_asset_tag(self): + """Azure datasource is detected from DMI chassis-asset-tag""" + self._test_ds_found('Azure-dmi-detection') + + def test_azure_seed_file_detection(self): + """Azure datasource is detected due to presence of a seed file. + + The seed file tested is /var/lib/cloud/seed/azure/ovf-env.xml.""" + self._test_ds_found('Azure-seed-detection') + def test_aws_ec2_hvm(self): """EC2: hvm instances use dmi serial and uuid starting with 'ec2'.""" self._test_ds_found('Ec2-hvm') @@ -272,6 +298,19 @@ VALID_CFG = { 'ds': 'AliYun', 'files': {P_PRODUCT_NAME: 'Alibaba Cloud ECS\n'}, }, + 'Azure-dmi-detection': { + 'ds': 'Azure', + 'files': { + P_CHASSIS_ASSET_TAG: '7783-7084-3265-9085-8269-3286-77\n', + } + }, + 'Azure-seed-detection': { + 'ds': 'Azure', + 'files': { + P_CHASSIS_ASSET_TAG: 'No-match\n', + os.path.join(P_SEED_DIR, 'azure', 'ovf-env.xml'): 'present\n', + } + }, 'Ec2-hvm': { 'ds': 'Ec2', 'mocks': [{'name': 'detect_virt', 'RET': 'kvm', 'ret': 0}], diff --git a/tools/ds-identify b/tools/ds-identify index 5fc500b9..546e0f59 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -85,6 +85,7 @@ DI_MAIN=${DI_MAIN:-main} DI_DEFAULT_POLICY="search,found=all,maybe=all,notfound=${DI_DISABLED}" DI_DEFAULT_POLICY_NO_DMI="search,found=all,maybe=all,notfound=${DI_ENABLED}" +DI_DMI_CHASSIS_ASSET_TAG="" DI_DMI_PRODUCT_NAME="" DI_DMI_SYS_VENDOR="" DI_DMI_PRODUCT_SERIAL="" @@ -259,6 +260,12 @@ read_kernel_cmdline() { DI_KERNEL_CMDLINE="$cmdline" } +read_dmi_chassis_asset_tag() { + cached "${DI_DMI_CHASSIS_ASSET_TAG}" && return + get_dmi_field chassis_asset_tag + DI_DMI_CHASSIS_ASSET_TAG="$_RET" +} + read_dmi_sys_vendor() { cached "${DI_DMI_SYS_VENDOR}" && return get_dmi_field sys_vendor @@ -386,6 +393,14 @@ read_pid1_product_name() { DI_PID_1_PRODUCT_NAME="$product_name" } +dmi_chassis_asset_tag_matches() { + is_container && return 1 + case "${DI_DMI_CHASSIS_ASSET_TAG}" in + $1) return 0;; + esac + return 1 +} + dmi_product_name_matches() { is_container && return 1 case "${DI_DMI_PRODUCT_NAME}" in @@ -402,11 +417,6 @@ dmi_product_serial_matches() { return 1 } -dmi_product_name_is() { - is_container && return 1 - [ "${DI_DMI_PRODUCT_NAME}" = "$1" ] -} - dmi_sys_vendor_is() { is_container && return 1 [ "${DI_DMI_SYS_VENDOR}" = "$1" ] @@ -478,7 +488,7 @@ dscheck_CloudStack() { dscheck_CloudSigma() { # http://paste.ubuntu.com/23624795/ - dmi_product_name_is "CloudSigma" && return $DS_FOUND + dmi_product_name_matches "CloudSigma" && return $DS_FOUND return $DS_NOT_FOUND } @@ -654,6 +664,8 @@ dscheck_Azure() { # UUID="112D211272645f72" LABEL="rd_rdfe_stable.161212-1209" # TYPE="udf">/dev/sr0</device> # + local azure_chassis="7783-7084-3265-9085-8269-3286-77" + dmi_chassis_asset_tag_matches "${azure_chassis}" && return $DS_FOUND check_seed_dir azure ovf-env.xml && return ${DS_FOUND} [ "${DI_VIRT}" = "microsoft" ] || return ${DS_NOT_FOUND} @@ -786,7 +798,7 @@ dscheck_Ec2() { } dscheck_GCE() { - if dmi_product_name_is "Google Compute Engine"; then + if dmi_product_name_matches "Google Compute Engine"; then return ${DS_FOUND} fi # product name is not guaranteed (LP: #1674861) @@ -807,10 +819,10 @@ dscheck_OpenStack() { return ${DS_NOT_FOUND} fi local nova="OpenStack Nova" compute="OpenStack Compute" - if dmi_product_name_is "$nova"; then + if dmi_product_name_matches "$nova"; then return ${DS_FOUND} fi - if dmi_product_name_is "$compute"; then + if dmi_product_name_matches "$compute"; then # RDO installed nova (LP: #1675349). return ${DS_FOUND} fi @@ -823,7 +835,7 @@ dscheck_OpenStack() { dscheck_AliYun() { check_seed_dir "AliYun" meta-data user-data && return ${DS_FOUND} - if dmi_product_name_is "Alibaba Cloud ECS"; then + if dmi_product_name_matches "Alibaba Cloud ECS"; then return $DS_FOUND fi return $DS_NOT_FOUND @@ -889,6 +901,7 @@ collect_info() { read_config read_datasource_list read_dmi_sys_vendor + read_dmi_chassis_asset_tag read_dmi_product_name read_dmi_product_serial read_dmi_product_uuid @@ -903,7 +916,7 @@ print_info() { _print_info() { local n="" v="" vars="" vars="DMI_PRODUCT_NAME DMI_SYS_VENDOR DMI_PRODUCT_SERIAL" - vars="$vars DMI_PRODUCT_UUID PID_1_PRODUCT_NAME" + vars="$vars DMI_PRODUCT_UUID PID_1_PRODUCT_NAME DMI_CHASSIS_ASSET_TAG" vars="$vars FS_LABELS KERNEL_CMDLINE VIRT" vars="$vars UNAME_KERNEL_NAME UNAME_KERNEL_RELEASE UNAME_KERNEL_VERSION" vars="$vars UNAME_MACHINE UNAME_NODENAME UNAME_OPERATING_SYSTEM" -- cgit v1.2.3 From 802e7cb2da8e2d0225525160e6edd6b58b275b8c Mon Sep 17 00:00:00 2001 From: Vladimir Pouzanov <farcaller@google.com> Date: Tue, 2 May 2017 16:08:34 +0100 Subject: NoCloud: support seed of nocloud from smbios information This allows the user to seed NoCloud in a trivial way from qemu/libvirt, by using a stock image and passing a single command line flag. No custom command line, no filesystem modification, no bootstrap disk image. This is particularly handy now that Ec2 backend is discouraged from use under bug 1660385. LP: #1691772 --- cloudinit/sources/DataSourceNoCloud.py | 12 ++++++++++++ doc/rtd/topics/datasources/nocloud.rst | 22 ++++++++++++++++++++++ tools/ds-identify | 3 +++ 3 files changed, 37 insertions(+) (limited to 'tools') diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index c68f6b8c..e641244d 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -42,6 +42,18 @@ class DataSourceNoCloud(sources.DataSource): mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': "", 'network-config': None} + try: + # Parse the system serial label from dmi. If not empty, try parsing + # like the commandline + md = {} + serial = util.read_dmi_data('system-serial-number') + if serial and load_cmdline_data(md, serial): + found.append("dmi") + mydata = _merge_new_seed(mydata, {'meta-data': md}) + except Exception: + util.logexc(LOG, "Unable to parse dmi data") + return False + try: # Parse the kernel command line, getting data passed in md = {} diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst index 0159e853..665057f3 100644 --- a/doc/rtd/topics/datasources/nocloud.rst +++ b/doc/rtd/topics/datasources/nocloud.rst @@ -11,6 +11,28 @@ You can provide meta-data and user-data to a local vm boot via files on a `vfat`_ or `iso9660`_ filesystem. The filesystem volume label must be ``cidata``. +Alternatively, you can provide meta-data via kernel command line or SMBIOS +"serial number" option. The data must be passed in the form of a string: + +:: + + ds=nocloud[;key=val;key=val] + +or + +:: + + ds=nocloud-net[;key=val;key=val] + +e.g. you can pass this option to QEMU: + +:: + + -smbios type=1,serial=ds=nocloud-net;s=http://10.10.0.1:8000/ + +to cause NoCloud to fetch the full meta-data from http://10.10.0.1:8000/meta-data +after the network initialization is complete. + These user-data and meta-data files are expected to be in the following format. :: diff --git a/tools/ds-identify b/tools/ds-identify index 546e0f59..7c8b144b 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -555,6 +555,9 @@ dscheck_NoCloud() { case " ${DI_KERNEL_CMDLINE} " in *\ ds=nocloud*) return ${DS_FOUND};; esac + case " ${DI_DMI_PRODUCT_SERIAL} " in + *\ ds=nocloud*) return ${DS_FOUND};; + esac for d in nocloud nocloud-net; do check_seed_dir "$d" meta-data user-data && return ${DS_FOUND} done -- cgit v1.2.3 From 41d46bfb85929c79dabcec3cf21c8d71401fd2b8 Mon Sep 17 00:00:00 2001 From: Scott Moser <smoser@ubuntu.com> Date: Wed, 28 Sep 2016 13:20:55 -0700 Subject: cloud.cfg: move to a template. setup.py changes along the way. Here we move the config/cloud.cfg to be rendered as a template. That allows us to maintain deltas between distros in one place. Currently we use 'variant' variable to make decisions. A tools/render-cloudcfg is provided to render the file. There were changes to setup.py, MANIFEST.in to allow us to put all files into a virtual env installation and to render the cloud-config file in 'install' or 'bdist' targets. We have also included some config changes that were found in the redhat distro spec. * include some config changes from the redhat distro spec. The rendered cloud.cfg has some differences. Ubuntu: white space and comment changes only. Freebsd: - whitespace changes and comment changes - datasource_list definition moved to be closer to 'datasource'. - enable modules: migrator, write_files - move package-update-upgrade-install to final. The initial work was done by Josh Harlow. --- MANIFEST.in | 11 ++- Makefile | 3 + cloudinit/util.py | 29 ++++++- config/cloud.cfg | 117 ---------------------------- config/cloud.cfg-freebsd | 88 --------------------- config/cloud.cfg.tmpl | 194 +++++++++++++++++++++++++++++++++++++++++++++++ setup.py | 168 ++++++++++++++++++++++++---------------- tools/render-cloudcfg | 43 +++++++++++ 8 files changed, 379 insertions(+), 274 deletions(-) delete mode 100644 config/cloud.cfg delete mode 100644 config/cloud.cfg-freebsd create mode 100644 config/cloud.cfg.tmpl create mode 100755 tools/render-cloudcfg (limited to 'tools') diff --git a/MANIFEST.in b/MANIFEST.in index 94264640..1a4d7711 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,6 +1,15 @@ -include *.py MANIFEST.in ChangeLog +include *.py MANIFEST.in LICENSE* ChangeLog global-include *.txt *.rst *.ini *.in *.conf *.cfg *.sh +graft config +graft doc +graft packages +graft systemd +graft sysvinit +graft templates +graft tests graft tools +graft udev +graft upstart prune build prune dist prune .tox diff --git a/Makefile b/Makefile index 66d1dcad..a3bfaf79 100644 --- a/Makefile +++ b/Makefile @@ -69,6 +69,9 @@ check_version: "not equal to code version '$(CODE_VERSION)'"; exit 2; \ else true; fi +config/cloud.cfg: + $(PYVER) ./tools/render-cloudcfg config/cloud.cfg.tmpl config/cloud.cfg + clean_pyc: @find . -type f -name "*.pyc" -delete diff --git a/cloudinit/util.py b/cloudinit/util.py index 135e4608..b8c3e4ee 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -592,13 +592,40 @@ def get_cfg_option_int(yobj, key, default=0): def system_info(): - return { + info = { 'platform': platform.platform(), 'release': platform.release(), 'python': platform.python_version(), 'uname': platform.uname(), 'dist': platform.linux_distribution(), # pylint: disable=W1505 } + plat = info['platform'].lower() + # Try to get more info about what it actually is, in a format + # that we can easily use across linux and variants... + if plat.startswith('darwin'): + info['variant'] = 'darwin' + elif plat.endswith("bsd"): + info['variant'] = 'bsd' + elif plat.startswith('win'): + info['variant'] = 'windows' + elif 'linux' in plat: + # Try to get a single string out of these... + linux_dist, _version, _id = info['dist'] + linux_dist = linux_dist.lower() + if linux_dist in ('ubuntu', 'linuxmint', 'mint'): + info['variant'] = 'ubuntu' + else: + for prefix, variant in [('redhat', 'rhel'), + ('centos', 'centos'), + ('fedora', 'fedora'), + ('debian', 'debian')]: + if linux_dist.startswith(prefix): + info['variant'] = variant + if 'variant' not in info: + info['variant'] = 'linux' + if 'variant' not in info: + info['variant'] = 'unknown' + return info def get_cfg_option_list(yobj, key, default=None): diff --git a/config/cloud.cfg b/config/cloud.cfg deleted file mode 100644 index 1b93e7f9..00000000 --- a/config/cloud.cfg +++ /dev/null @@ -1,117 +0,0 @@ -# The top level settings are used as module -# and system configuration. - -# A set of users which may be applied and/or used by various modules -# when a 'default' entry is found it will reference the 'default_user' -# from the distro configuration specified below -users: - - default - -# If this is set, 'root' will not be able to ssh in and they -# will get a message to login instead as the above $user (ubuntu) -disable_root: true - -# This will cause the set+update hostname module to not operate (if true) -preserve_hostname: false - -# Example datasource config -# datasource: -# Ec2: -# metadata_urls: [ 'blah.com' ] -# timeout: 5 # (defaults to 50 seconds) -# max_wait: 10 # (defaults to 120 seconds) - -# The modules that run in the 'init' stage -cloud_init_modules: - - migrator - - ubuntu-init-switch - - seed_random - - bootcmd - - write-files - - growpart - - resizefs - - disk_setup - - mounts - - set_hostname - - update_hostname - - update_etc_hosts - - ca-certs - - rsyslog - - users-groups - - ssh - -# The modules that run in the 'config' stage -cloud_config_modules: -# Emit the cloud config ready event -# this can be used by upstart jobs for 'start on cloud-config'. - - emit_upstart - - snap_config - - ssh-import-id - - locale - - set-passwords - - grub-dpkg - - apt-pipelining - - apt-configure - - ntp - - timezone - - disable-ec2-metadata - - runcmd - - byobu - -# The modules that run in the 'final' stage -cloud_final_modules: - - snappy - - package-update-upgrade-install - - fan - - landscape - - lxd - - puppet - - chef - - salt-minion - - mcollective - - rightscale_userdata - - scripts-vendor - - scripts-per-once - - scripts-per-boot - - scripts-per-instance - - scripts-user - - ssh-authkey-fingerprints - - keys-to-console - - phone-home - - final-message - - power-state-change - -# System and/or distro specific settings -# (not accessible to handlers/transforms) -system_info: - # This will affect which distro class gets used - distro: ubuntu - # Default user name + that default users groups (if added/used) - default_user: - name: ubuntu - lock_passwd: True - gecos: Ubuntu - groups: [adm, audio, cdrom, dialout, dip, floppy, lxd, netdev, plugdev, sudo, video] - sudo: ["ALL=(ALL) NOPASSWD:ALL"] - shell: /bin/bash - # Other config here will be given to the distro class and/or path classes - paths: - cloud_dir: /var/lib/cloud/ - templates_dir: /etc/cloud/templates/ - upstart_dir: /etc/init/ - package_mirrors: - - arches: [i386, amd64] - failsafe: - primary: http://archive.ubuntu.com/ubuntu - security: http://security.ubuntu.com/ubuntu - search: - primary: - - http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/ - - http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/ - - http://%(region)s.clouds.archive.ubuntu.com/ubuntu/ - security: [] - - arches: [armhf, armel, default] - failsafe: - primary: http://ports.ubuntu.com/ubuntu-ports - security: http://ports.ubuntu.com/ubuntu-ports - ssh_svcname: ssh diff --git a/config/cloud.cfg-freebsd b/config/cloud.cfg-freebsd deleted file mode 100644 index d666c397..00000000 --- a/config/cloud.cfg-freebsd +++ /dev/null @@ -1,88 +0,0 @@ -# The top level settings are used as module -# and system configuration. - -syslog_fix_perms: root:wheel - -# This should not be required, but leave it in place until the real cause of -# not beeing able to find -any- datasources is resolved. -datasource_list: ['ConfigDrive', 'Azure', 'OpenStack', 'Ec2'] - -# A set of users which may be applied and/or used by various modules -# when a 'default' entry is found it will reference the 'default_user' -# from the distro configuration specified below -users: - - default - -# If this is set, 'root' will not be able to ssh in and they -# will get a message to login instead as the above $user (ubuntu) -disable_root: false - -# This will cause the set+update hostname module to not operate (if true) -preserve_hostname: false - -# Example datasource config -# datasource: -# Ec2: -# metadata_urls: [ 'blah.com' ] -# timeout: 5 # (defaults to 50 seconds) -# max_wait: 10 # (defaults to 120 seconds) - -# The modules that run in the 'init' stage -cloud_init_modules: -# - migrator - - seed_random - - bootcmd -# - write-files - - growpart - - resizefs - - set_hostname - - update_hostname -# - update_etc_hosts -# - ca-certs -# - rsyslog - - users-groups - - ssh - -# The modules that run in the 'config' stage -cloud_config_modules: -# - disk_setup -# - mounts - - ssh-import-id - - locale - - set-passwords - - package-update-upgrade-install -# - landscape - - timezone -# - puppet -# - chef -# - salt-minion -# - mcollective - - disable-ec2-metadata - - runcmd -# - byobu - -# The modules that run in the 'final' stage -cloud_final_modules: - - rightscale_userdata - - scripts-vendor - - scripts-per-once - - scripts-per-boot - - scripts-per-instance - - scripts-user - - ssh-authkey-fingerprints - - keys-to-console - - phone-home - - final-message - - power-state-change - -# System and/or distro specific settings -# (not accessible to handlers/transforms) -system_info: - distro: freebsd - default_user: - name: freebsd - lock_passwd: True - gecos: FreeBSD - groups: [wheel] - sudo: ["ALL=(ALL) NOPASSWD:ALL"] - shell: /bin/tcsh diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl new file mode 100644 index 00000000..5af2a88f --- /dev/null +++ b/config/cloud.cfg.tmpl @@ -0,0 +1,194 @@ +## template:jinja +# The top level settings are used as module +# and system configuration. + +{% if variant in ["bsd"] %} +syslog_fix_perms: root:wheel +{% endif %} +# A set of users which may be applied and/or used by various modules +# when a 'default' entry is found it will reference the 'default_user' +# from the distro configuration specified below +users: + - default + +# If this is set, 'root' will not be able to ssh in and they +# will get a message to login instead as the default $user +{% if variant in ["bsd"] %} +disable_root: false +{% else %} +disable_root: true +{% endif %} + +{% if variant in ["centos", "fedora", "rhel"] %} +mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2'] +resize_rootfs_tmp: /dev +ssh_deletekeys: 0 +ssh_genkeytypes: ~ +ssh_pwauth: 0 + +{% endif %} +# This will cause the set+update hostname module to not operate (if true) +preserve_hostname: false + +{% if variant in ["bsd"] %} +# This should not be required, but leave it in place until the real cause of +# not beeing able to find -any- datasources is resolved. +datasource_list: ['ConfigDrive', 'Azure', 'OpenStack', 'Ec2'] +{% endif %} +# Example datasource config +# datasource: +# Ec2: +# metadata_urls: [ 'blah.com' ] +# timeout: 5 # (defaults to 50 seconds) +# max_wait: 10 # (defaults to 120 seconds) + +# The modules that run in the 'init' stage +cloud_init_modules: + - migrator +{% if variant in ["ubuntu", "unknown", "debian"] %} + - ubuntu-init-switch +{% endif %} + - seed_random + - bootcmd + - write-files + - growpart + - resizefs +{% if variant not in ["bsd"] %} + - disk_setup + - mounts +{% endif %} + - set_hostname + - update_hostname +{% if variant not in ["bsd"] %} + - update_etc_hosts + - ca-certs + - rsyslog +{% endif %} + - users-groups + - ssh + +# The modules that run in the 'config' stage +cloud_config_modules: +{% if variant in ["ubuntu", "unknown", "debian"] %} +# Emit the cloud config ready event +# this can be used by upstart jobs for 'start on cloud-config'. + - emit_upstart + - snap_config +{% endif %} + - ssh-import-id + - locale + - set-passwords +{% if variant in ["rhel", "fedora"] %} + - spacewalk + - yum-add-repo +{% endif %} +{% if variant in ["ubuntu", "unknown", "debian"] %} + - grub-dpkg + - apt-pipelining + - apt-configure +{% endif %} +{% if variant not in ["bsd"] %} + - ntp +{% endif %} + - timezone + - disable-ec2-metadata + - runcmd +{% if variant in ["ubuntu", "unknown", "debian"] %} + - byobu +{% endif %} + +# The modules that run in the 'final' stage +cloud_final_modules: +{% if variant in ["ubuntu", "unknown", "debian"] %} + - snappy +{% endif %} + - package-update-upgrade-install +{% if variant in ["ubuntu", "unknown", "debian"] %} + - fan + - landscape + - lxd +{% endif %} +{% if variant not in ["bsd"] %} + - puppet + - chef + - salt-minion + - mcollective +{% endif %} + - rightscale_userdata + - scripts-vendor + - scripts-per-once + - scripts-per-boot + - scripts-per-instance + - scripts-user + - ssh-authkey-fingerprints + - keys-to-console + - phone-home + - final-message + - power-state-change + +# System and/or distro specific settings +# (not accessible to handlers/transforms) +system_info: + # This will affect which distro class gets used +{% if variant in ["centos", "debian", "fedora", "rhel", "ubuntu"] %} + distro: {{ variant }} +{% elif variant in ["bsd"] %} + distro: freebsd +{% else %} + # Unknown/fallback distro. + distro: ubuntu +{% endif %} +{% if variant in ["ubuntu", "unknown", "debian"] %} + # Default user name + that default users groups (if added/used) + default_user: + name: ubuntu + lock_passwd: True + gecos: Ubuntu + groups: [adm, audio, cdrom, dialout, dip, floppy, lxd, netdev, plugdev, sudo, video] + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + shell: /bin/bash + # Other config here will be given to the distro class and/or path classes + paths: + cloud_dir: /var/lib/cloud/ + templates_dir: /etc/cloud/templates/ + upstart_dir: /etc/init/ + package_mirrors: + - arches: [i386, amd64] + failsafe: + primary: http://archive.ubuntu.com/ubuntu + security: http://security.ubuntu.com/ubuntu + search: + primary: + - http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/ + - http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/ + - http://%(region)s.clouds.archive.ubuntu.com/ubuntu/ + security: [] + - arches: [armhf, armel, default] + failsafe: + primary: http://ports.ubuntu.com/ubuntu-ports + security: http://ports.ubuntu.com/ubuntu-ports + ssh_svcname: ssh +{% elif variant in ["centos", "rhel", "fedora"] %} + # Default user name + that default users groups (if added/used) + default_user: + name: {{ variant }} + lock_passwd: True + gecos: {{ variant }} Cloud User + groups: [wheel, adm, systemd-journal] + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + shell: /bin/bash + # Other config here will be given to the distro class and/or path classes + paths: + cloud_dir: /var/lib/cloud/ + templates_dir: /etc/cloud/templates/ + ssh_svcname: sshd +{% elif variant in ["bsd"] %} + # Default user name + that default users groups (if added/used) + default_user: + name: freebsd + lock_passwd: True + gecos: FreeBSD + groups: [wheel] + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + shell: /bin/tcsh +{% endif %} diff --git a/setup.py b/setup.py index 4616599b..d5223285 100755 --- a/setup.py +++ b/setup.py @@ -10,8 +10,11 @@ from glob import glob +import atexit import os +import shutil import sys +import tempfile import setuptools from setuptools.command.install import install @@ -53,47 +56,15 @@ def pkg_config_read(library, var): cmd = ['pkg-config', '--variable=%s' % var, library] try: (path, err) = tiny_p(cmd) + path = path.strip() except Exception: - return fallbacks[library][var] - return str(path).strip() + path = fallbacks[library][var] + if path.startswith("/"): + path = path[1:] + return path -INITSYS_FILES = { - 'sysvinit': [f for f in glob('sysvinit/redhat/*') if is_f(f)], - 'sysvinit_freebsd': [f for f in glob('sysvinit/freebsd/*') if is_f(f)], - 'sysvinit_deb': [f for f in glob('sysvinit/debian/*') if is_f(f)], - 'sysvinit_openrc': [f for f in glob('sysvinit/gentoo/*') if is_f(f)], - 'systemd': [f for f in (glob('systemd/*.service') + - glob('systemd/*.target')) if is_f(f)], - 'systemd.generators': [f for f in glob('systemd/*-generator') if is_f(f)], - 'upstart': [f for f in glob('upstart/*') if is_f(f)], -} -INITSYS_ROOTS = { - 'sysvinit': '/etc/rc.d/init.d', - 'sysvinit_freebsd': '/usr/local/etc/rc.d', - 'sysvinit_deb': '/etc/init.d', - 'sysvinit_openrc': '/etc/init.d', - 'systemd': pkg_config_read('systemd', 'systemdsystemunitdir'), - 'systemd.generators': pkg_config_read('systemd', - 'systemdsystemgeneratordir'), - 'upstart': '/etc/init/', -} -INITSYS_TYPES = sorted([f.partition(".")[0] for f in INITSYS_ROOTS.keys()]) - -# Install everything in the right location and take care of Linux (default) and -# FreeBSD systems. -USR = "/usr" -ETC = "/etc" -USR_LIB_EXEC = "/usr/lib" -LIB = "/lib" -if os.uname()[0] == 'FreeBSD': - USR = "/usr/local" - USR_LIB_EXEC = "/usr/local/lib" -elif os.path.isfile('/etc/redhat-release'): - USR_LIB_EXEC = "/usr/libexec" - -# Avoid having datafiles installed in a virtualenv... def in_virtualenv(): try: if sys.real_prefix == sys.prefix: @@ -116,6 +87,66 @@ def read_requires(): return str(deps).splitlines() +def render_cloud_cfg(): + """render cloud.cfg into a tmpdir under same dir as setup.py + + This is rendered to a temporary directory under the top level + directory with the name 'cloud.cfg'. The reason for not just rendering + to config/cloud.cfg is for a.) don't want to write over contents + in that file if user had something there. b.) debuild will complain + that files are different outside of the debian directory.""" + + # older versions of tox use bdist (xenial), and then install from there. + # newer versions just use install. + if not (sys.argv[1] == 'install' or sys.argv[1].startswith('bdist*')): + return 'config/cloud.cfg.tmpl' + topdir = os.path.dirname(sys.argv[0]) + tmpd = tempfile.mkdtemp(dir=topdir) + atexit.register(shutil.rmtree, tmpd) + fpath = os.path.join(tmpd, 'cloud.cfg') + tiny_p([sys.executable, './tools/render-cloudcfg', + 'config/cloud.cfg.tmpl', fpath]) + # relpath is relative to setup.py + relpath = os.path.join(os.path.basename(tmpd), 'cloud.cfg') + return relpath + + +INITSYS_FILES = { + 'sysvinit': [f for f in glob('sysvinit/redhat/*') if is_f(f)], + 'sysvinit_freebsd': [f for f in glob('sysvinit/freebsd/*') if is_f(f)], + 'sysvinit_deb': [f for f in glob('sysvinit/debian/*') if is_f(f)], + 'sysvinit_openrc': [f for f in glob('sysvinit/gentoo/*') if is_f(f)], + 'systemd': [f for f in (glob('systemd/*.service') + + glob('systemd/*.target')) if is_f(f)], + 'systemd.generators': [f for f in glob('systemd/*-generator') if is_f(f)], + 'upstart': [f for f in glob('upstart/*') if is_f(f)], +} +INITSYS_ROOTS = { + 'sysvinit': 'etc/rc.d/init.d', + 'sysvinit_freebsd': 'usr/local/etc/rc.d', + 'sysvinit_deb': 'etc/init.d', + 'sysvinit_openrc': 'etc/init.d', + 'systemd': pkg_config_read('systemd', 'systemdsystemunitdir'), + 'systemd.generators': pkg_config_read('systemd', + 'systemdsystemgeneratordir'), + 'upstart': 'etc/init/', +} +INITSYS_TYPES = sorted([f.partition(".")[0] for f in INITSYS_ROOTS.keys()]) + + +# Install everything in the right location and take care of Linux (default) and +# FreeBSD systems. +USR = "usr" +ETC = "etc" +USR_LIB_EXEC = "usr/lib" +LIB = "lib" +if os.uname()[0] == 'FreeBSD': + USR = "usr/local" + USR_LIB_EXEC = "usr/local/lib" +elif os.path.isfile('/etc/redhat-release'): + USR_LIB_EXEC = "usr/libexec" + + # TODO: Is there a better way to do this?? class InitsysInstallData(install): init_system = None @@ -155,36 +186,39 @@ class InitsysInstallData(install): self.distribution.reinitialize_command('install_data', True) -if in_virtualenv(): - data_files = [] - cmdclass = {} -else: - data_files = [ - (ETC + '/cloud', glob('config/*.cfg')), - (ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')), - (ETC + '/cloud/templates', glob('templates/*')), - (USR_LIB_EXEC + '/cloud-init', ['tools/ds-identify', - 'tools/uncloud-init', - 'tools/write-ssh-key-fingerprints']), - (USR + '/share/doc/cloud-init', [f for f in glob('doc/*') if is_f(f)]), - (USR + '/share/doc/cloud-init/examples', - [f for f in glob('doc/examples/*') if is_f(f)]), - (USR + '/share/doc/cloud-init/examples/seed', - [f for f in glob('doc/examples/seed/*') if is_f(f)]), - ] - if os.uname()[0] != 'FreeBSD': - data_files.extend([ - (ETC + '/NetworkManager/dispatcher.d/', - ['tools/hook-network-manager']), - (ETC + '/dhcp/dhclient-exit-hooks.d/', ['tools/hook-dhclient']), - (LIB + '/udev/rules.d', [f for f in glob('udev/*.rules')]) - ]) - # Use a subclass for install that handles - # adding on the right init system configuration files - cmdclass = { - 'install': InitsysInstallData, - } - +if not in_virtualenv(): + USR = "/" + USR + ETC = "/" + ETC + USR_LIB_EXEC = "/" + USR_LIB_EXEC + LIB = "/" + LIB + for k in INITSYS_ROOTS.keys(): + INITSYS_ROOTS[k] = "/" + INITSYS_ROOTS[k] + +data_files = [ + (ETC + '/cloud', [render_cloud_cfg()]), + (ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')), + (ETC + '/cloud/templates', glob('templates/*')), + (USR_LIB_EXEC + '/cloud-init', ['tools/ds-identify', + 'tools/uncloud-init', + 'tools/write-ssh-key-fingerprints']), + (USR + '/share/doc/cloud-init', [f for f in glob('doc/*') if is_f(f)]), + (USR + '/share/doc/cloud-init/examples', + [f for f in glob('doc/examples/*') if is_f(f)]), + (USR + '/share/doc/cloud-init/examples/seed', + [f for f in glob('doc/examples/seed/*') if is_f(f)]), +] +if os.uname()[0] != 'FreeBSD': + data_files.extend([ + (ETC + '/NetworkManager/dispatcher.d/', + ['tools/hook-network-manager']), + (ETC + '/dhcp/dhclient-exit-hooks.d/', ['tools/hook-dhclient']), + (LIB + '/udev/rules.d', [f for f in glob('udev/*.rules')]) + ]) +# Use a subclass for install that handles +# adding on the right init system configuration files +cmdclass = { + 'install': InitsysInstallData, +} requirements = read_requires() if sys.version_info < (3,): diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg new file mode 100755 index 00000000..e624541a --- /dev/null +++ b/tools/render-cloudcfg @@ -0,0 +1,43 @@ +#!/usr/bin/env python3 + +import argparse +import os +import sys + +if "avoid-pep8-E402-import-not-top-of-file": + _tdir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) + sys.path.insert(0, _tdir) + from cloudinit import templater + from cloudinit import util + from cloudinit.atomic_helper import write_file + + +def main(): + parser = argparse.ArgumentParser() + variants = ["bsd", "centos", "fedora", "rhel", "ubuntu", "unknown"] + platform = util.system_info() + parser.add_argument( + "--variant", default=platform['variant'], action="store", + help="define the variant.", choices=variants) + parser.add_argument( + "template", nargs="?", action="store", + default='./config/cloud.cfg.tmpl', + help="Path to the cloud.cfg template") + parser.add_argument( + "output", nargs="?", action="store", default="-", + help="Output file. Use '-' to write to stdout") + + args = parser.parse_args() + + with open(args.template, 'r') as fh: + contents = fh.read() + tpl_params = {'variant': args.variant} + contents = (templater.render_string(contents, tpl_params)).rstrip() + "\n" + util.load_yaml(contents) + if args.output == "-": + sys.stdout.write(contents) + else: + write_file(args.output, contents, omode="w") + +if __name__ == '__main__': + main() -- cgit v1.2.3 From ee324391bcb436b1d3a1c44951aa1aa673005cf6 Mon Sep 17 00:00:00 2001 From: Joshua Powers <josh.powers@canonical.com> Date: Thu, 1 Jun 2017 16:39:50 -0700 Subject: tools: add centos scripts to build and test The added 'run-centos' does: - Creates centos 6 or 7 lxd container * Sets http_proxy variable for yum if set locally * Creates centos user - Push local tree * Tar's up working directory * Pushes to container and untars - Installs pip and yum dependencies - As user centos it can then based on flags: * runs unittests * run ./packages/brpm * run ./packages/brpm --srpm * artifact the built *.rpm --- tools/run-centos | 218 +++++++++++++++++++++++++++++++++++++++++++++++++++++ tools/setup-centos | 49 ++++++++++++ 2 files changed, 267 insertions(+) create mode 100755 tools/run-centos create mode 100755 tools/setup-centos (limited to 'tools') diff --git a/tools/run-centos b/tools/run-centos new file mode 100755 index 00000000..de21d756 --- /dev/null +++ b/tools/run-centos @@ -0,0 +1,218 @@ +#!/bin/bash +# This file is part of cloud-init. See LICENSE file for license information. + +set -u + +VERBOSITY=0 +TEMP_D="" +KEEP=false +CONTAINER="" + +error() { echo "$@" 1>&2; } +fail() { [ $# -eq 0 ] || error "$@"; exit 1; } +errorrc() { local r=$?; error "$@" "ret=$r"; return $r; } + +Usage() { + cat <<EOF +Usage: ${0##*/} [ options ] CentOS version + + This utility can makes it easier to run tests, build rpm and source rpm + generation inside a LXC of the specified version of CentOS. + + options: + -a | --artifact keep .rpm artifacts + -k | --keep keep container after tests + -r | --rpm build .rpm + -s | --srpm build .src.rpm + -u | --unittest run unit tests +EOF +} + +bad_Usage() { Usage 1>&2; [ $# -eq 0 ] || error "$@"; return 1; } +cleanup() { + if [ -n "$CONTAINER" -a "$KEEP" = "false" ]; then + delete_container "$CONTAINER" + fi + [ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}" +} + +debug() { + local level=${1}; shift; + [ "${level}" -gt "${VERBOSITY}" ] && return + error "${@}" +} + + +inside_as() { + # inside_as(container_name, user, cmd[, args]) + # executes cmd with args inside container as user in users home dir. + local name="$1" user="$2" + shift 2 + local stuffed="" b64="" + stuffed=$(getopt --shell sh --options "" -- -- "$@") + stuffed=${stuffed# -- } + b64=$(printf "%s\n" "$stuffed" | base64 --wrap=0) + inside "$name" su "$user" -c \ + 'cd; eval set -- "$(echo '$b64' | base64 --decode)" && exec "$@"' +} + +inside() { + local name="$1" + shift + lxc exec "$name" -- "$@" +} + +inject_cloud_init(){ + local name="$1" + tarball_name='cloud-init.tar.gz' + top_d=$(git rev-parse --show-toplevel) || + fail "failed to get top level" + cd "$top_d" || + fail "failed to cd to git top dir" + tar_folder=${PWD##*/} + cd .. + tar -czf "$TEMP_D/$tarball_name" "$tar_folder" || + fail "failed: creating tarball_name" + cd "$tar_folder" || + fail "failed: changing directory" + + user='centos' + tarball="/home/$user/$tarball_name" + inside "$name" useradd "$user" + lxc file push "$TEMP_D/$tarball_name" "$name/home/$user"/ + inside "$name" chown "$user:$user" "$tarball" + inside_as "$name" "$user" tar -C "/home/$user" -xzf "$tarball" || + fail "failed: extracting tarball" +} + +start_container() { + local src="$1" name="$2" + debug 1 "starting container $name from '$src'" + lxc launch "$src" "$name" || { + errorrc "Failed to start container '$name' from '$src'"; + return + } + CONTAINER=$name + + local out="" ret="" + debug 1 "waiting for networking" + out=$(inside "$name" sh -c ' + i=0 + while [ $i -lt 60 ]; do + getent hosts mirrorlist.centos.org && exit 0 + sleep 2 + done' 2>&1) + ret=$? + if [ $ret -ne 0 ]; then + error "Waiting for network in container '$name' failed. [$ret]" + error "$out" + return $ret + fi + + if [ ! -z "${http_proxy-}" ]; then + debug 1 "configuring proxy ${http_proxy}" + inside "$name" sh -c "echo proxy=$http_proxy >> /etc/yum.conf" + fi +} + +delete_container() { + debug 1 "removing container $1 [--keep to keep]" + lxc delete --force "$1" +} + +main() { + local short_opts="ahkrsuv:" + local long_opts="artifact,help,keep,rpm,srpm,unittest,verbose:" + local getopt_out="" + getopt_out=$(getopt --name "${0##*/}" \ + --options "${short_opts}" --long "${long_opts}" -- "$@") && + eval set -- "${getopt_out}" || + { bad_Usage; return; } + + local cur="" next="" + local artifact="" keep="" rpm="" srpm="" unittest="" version="" + + while [ $# -ne 0 ]; do + cur="${1:-}"; next="${2:-}"; + case "$cur" in + -a|--artifact) artifact=1;; + -h|--help) Usage ; exit 0;; + -k|--keep) KEEP=true;; + -r|--rpm) rpm=1;; + -s|--srpm) srpm=1;; + -u|--unittest) unittest=1;; + -v|--verbose) VERBOSITY=$((${VERBOSITY}+1));; + --) shift; break;; + esac + shift; + done + + [ $# -eq 1 ] || { bad_Usage "ERROR: Must provide version!"; return; } + version="$1" + + TEMP_D=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXX") || + fail "failed to make tempdir" + trap cleanup EXIT + + # program starts here + local uuid="" name="" + uuid=$(uuidgen -t) || { error "no uuidgen"; return 1; } + name="cloud-init-centos-${uuid%%-*}" + + start_container "images:centos/$version" "$name" + # CentOS 6 does not come with tar + if [ "$version" = "6" ]; then + inside "$name" yum install --assumeyes tar || { + errorrc "FAIL: yum install tar failed"; + } + fi + + debug 1 "inserting cloud-init" + inject_cloud_init "$name" || { + errorrc "FAIL: injecting cloud-init into $name failed." + return + } + + # install dependencies + debug 1 "installing dependencies" + inside "$name" /bin/sh <tools/setup-centos || + fail "failed: setting up container $name" + + local errors=0 do_cd="cd $tar_folder" + inside_as "$name" "$user" sh -ec "$do_cd; git checkout .; git status" || + { errorrc "git checkout failed."; errors=$(($errors+1)); } + + if [ -n "$unittest" ]; then + debug 1 "running unit tests." + inside_as "$name" "$user" sh -ec "$do_cd; nosetests tests/unittests" || + { errorrc "nosetests failed."; errors=$(($errors+1)); } + fi + + if [ -n "$srpm" ]; then + debug 1 "building srpm." + inside_as "$name" "$user" sh -ec "$do_cd; ./packages/brpm --srpm" || + { errorrc "brpm --srpm."; errors=$(($errors+1)); } + fi + + if [ -n "$rpm" ]; then + debug 1 "building rpm." + inside_as "$name" "$user" sh -ec "$do_cd; ./packages/brpm" || + { errorrc "brpm failed."; errors=$(($errors+1)); } + fi + + if [ -n "$artifact" ]; then + cmd="ls /home/$user/$tar_folder/*.rpm" + for built_rpm in $(lxc exec "$name" -- sh -c "$cmd"); do + lxc file pull "$name/$built_rpm" . + done + fi + + if [ "$errors" != "0" ]; then + error "there were $errors errors." + return 1 + fi + return 0 +} + +main "$@" +# vi: ts=4 expandtab diff --git a/tools/setup-centos b/tools/setup-centos new file mode 100755 index 00000000..bc5da8a7 --- /dev/null +++ b/tools/setup-centos @@ -0,0 +1,49 @@ +#!/bin/sh +# This file is part of cloud-init. See LICENSE file for license information. +set -fux +export LANG=C + +packages=" + file + git + pyserial + python-argparse + python-cheetah + python-configobj + python-devel + python-jinja2 + python-jsonpatch + python-oauthlib + python-pip + python-prettytable + python-requests + python-six + PyYAML + rpm-build +" + +pips=" + contextlib2 + httpretty + mock + nose + pep8 + unittest2 +" + +error() { echo "$@" 1>&2; } +fail() { [ $# -eq 0 ] || error "$@"; exit 1; } +info() { echo "$@"; } + +pips=$(for p in $pips; do echo "$p"; done | sort -u) +packages=$(for p in $packages; do echo "$p"; done | sort -u) + +if ! rpm -q epel-release >/dev/null; then + yum install --assumeyes epel-release || + fail "failed: yum install epel-release" +fi +yum install --assumeyes $packages || + fail "failed: yum install" "$packages" + +pip install --upgrade $pips || + fail "failed: pip install $pips" -- cgit v1.2.3 From 744e648eaf6325758282ef23bffcc4194faa6bac Mon Sep 17 00:00:00 2001 From: Chad Smith <chad.smith@canonical.com> Date: Wed, 7 Jun 2017 17:26:52 -0600 Subject: pkg build ci: Add make ci-deps-<distro> target to install pkgs This change adds a couple of makefile targets for ci environments to install all necessary dependencies for package builds and test runs. It adds a number of arguments to ./tools/read-dependencies to facilitate reading pip dependencies, translating pip deps to system package names and optionally installing needed system-package dependencies on the local system. This relocates all package dependency and translation logic into ./tools/read-dependencies instead of duplication found in packages/brpm and packages/bddeb. In this branch, we also define buildrequires as including all runtime requires when rendering cloud-init.spec.in and debian/control files because our package build infrastructure will also be running all unit test during the package build process so we need runtime deps at build time. Additionally, this branch converts packages/(redhat|suse)/cloud-init.spec.in from cheetah templates to jinja to allow building python3 envs. --- Makefile | 8 ++ packages/bddeb | 43 ++------ packages/brpm | 45 +++----- packages/debian/control.in | 11 +- packages/pkg-deps.json | 88 ++++++++++++++++ packages/redhat/cloud-init.spec.in | 78 ++++++-------- packages/suse/cloud-init.spec.in | 52 ++++------ tools/read-dependencies | 204 ++++++++++++++++++++++++++++++++----- 8 files changed, 354 insertions(+), 175 deletions(-) create mode 100644 packages/pkg-deps.json (limited to 'tools') diff --git a/Makefile b/Makefile index a3bfaf79..c752530c 100644 --- a/Makefile +++ b/Makefile @@ -53,6 +53,14 @@ unittest: clean_pyc unittest3: clean_pyc nosetests3 $(noseopts) tests/unittests +ci-deps-ubuntu: + @$(PYVER) $(CWD)/tools/read-dependencies --distro ubuntu --install --python-version 3 + @$(PYVER) $(CWD)/tools/read-dependencies --distro ubuntu --requirements-file test-requirements.txt --install --python-version 3 + +ci-deps-centos: + @$(PYVER) $(CWD)/tools/read-dependencies --distro centos --install + @$(PYVER) $(CWD)/tools/read-dependencies --distro centos --requirements-file test-requirements.txt --install + pip-requirements: @echo "Installing cloud-init dependencies..." $(PIP_INSTALL) -r "$@.txt" -q diff --git a/packages/bddeb b/packages/bddeb index f415209f..e45af6ee 100755 --- a/packages/bddeb +++ b/packages/bddeb @@ -24,19 +24,6 @@ if "avoid-pep8-E402-import-not-top-of-file": from cloudinit import templater from cloudinit import util -# Package names that will showup in requires which have unique package names. -# Format is '<pypi-name>': {'<python_major_version>': <pkg_name_or_none>, ...}. -NONSTD_NAMED_PACKAGES = { - 'argparse': {'2': 'python-argparse', '3': None}, - 'contextlib2': {'2': 'python-contextlib2', '3': None}, - 'cheetah': {'2': 'python-cheetah', '3': None}, - 'pyserial': {'2': 'python-serial', '3': 'python3-serial'}, - 'pyyaml': {'2': 'python-yaml', '3': 'python3-yaml'}, - 'six': {'2': 'python-six', '3': 'python3-six'}, - 'pep8': {'2': 'pep8', '3': 'python3-pep8'}, - 'pyflakes': {'2': 'pyflakes', '3': 'pyflakes'}, -} - DEBUILD_ARGS = ["-S", "-d"] @@ -59,7 +46,6 @@ def write_debian_folder(root, templ_data, is_python2, cloud_util_deps): else: pyver = "3" python = "python3" - pkgfmt = "{}-{}" deb_dir = util.abs_join(root, 'debian') @@ -74,30 +60,23 @@ def write_debian_folder(root, templ_data, is_python2, cloud_util_deps): params=templ_data) # Write out the control file template - reqs = run_helper('read-dependencies').splitlines() + reqs_output = run_helper( + 'read-dependencies', + args=['--distro', 'debian', '--python-version', pyver]) + reqs = reqs_output.splitlines() test_reqs = run_helper( - 'read-dependencies', ['test-requirements.txt']).splitlines() - - pypi_pkgs = [p.lower().strip() for p in reqs] - pypi_test_pkgs = [p.lower().strip() for p in test_reqs] + 'read-dependencies', + ['--requirements-file', 'test-requirements.txt', + '--system-pkg-names', '--python-version', pyver]).splitlines() - # Map to known packages requires = ['cloud-utils | cloud-guest-utils'] if cloud_util_deps else [] - test_requires = [] - lists = ((pypi_pkgs, requires), (pypi_test_pkgs, test_requires)) - for pypilist, target in lists: - for p in pypilist: - if p in NONSTD_NAMED_PACKAGES: - if NONSTD_NAMED_PACKAGES[p][pyver]: - target.append(NONSTD_NAMED_PACKAGES[p][pyver]) - else: # Then standard package prefix - target.append(pkgfmt.format(python, p)) - + # We consolidate all deps as Build-Depends as our package build runs all + # tests so we need all runtime dependencies anyway. + requires.extend(reqs + test_reqs + [python]) templater.render_to_file(util.abs_join(find_root(), 'packages', 'debian', 'control.in'), util.abs_join(deb_dir, 'control'), - params={'requires': ','.join(requires), - 'test_requires': ','.join(test_requires), + params={'build_depends': ','.join(requires), 'python': python}) templater.render_to_file(util.abs_join(find_root(), diff --git a/packages/brpm b/packages/brpm index 89696ab8..3439cf35 100755 --- a/packages/brpm +++ b/packages/brpm @@ -27,17 +27,6 @@ if "avoid-pep8-E402-import-not-top-of-file": from cloudinit import templater from cloudinit import util -# Map python requirements to package names. If a match isn't found -# here, we assume 'python-<pypi_name>'. -PACKAGE_MAP = { - 'redhat': { - 'pyserial': 'pyserial', - 'pyyaml': 'PyYAML', - }, - 'suse': { - 'pyyaml': 'python-yaml', - } -} # Subdirectories of the ~/rpmbuild dir RPM_BUILD_SUBDIRS = ['BUILD', 'RPMS', 'SOURCES', 'SPECS', 'SRPMS'] @@ -53,23 +42,18 @@ def run_helper(helper, args=None, strip=True): return stdout -def read_dependencies(): - '''Returns the Python depedencies from requirements.txt. This explicitly - removes 'argparse' from the list of requirements for python >= 2.7, - because with 2.7 argparse became part of the standard library.''' - stdout = run_helper('read-dependencies') - return [p.lower().strip() for p in stdout.splitlines() - if p != 'argparse' or (p == 'argparse' and - sys.version_info[0:2] < (2, 7))] +def read_dependencies(requirements_file='requirements.txt'): + """Returns the Python package depedencies from requirements.txt files. - -def translate_dependencies(deps, distro): - '''Maps python requirements into package names. We assume - python-<pypi_name> for packages not listed explicitly in - PACKAGE_MAP.''' - return [PACKAGE_MAP[distro][req] - if req in PACKAGE_MAP[distro] else 'python-%s' % req - for req in deps] + @returns a tuple of (requirements, test_requirements) + """ + pkg_deps = run_helper( + 'read-dependencies', args=['--distro', 'redhat']).splitlines() + test_deps = run_helper( + 'read-dependencies', args=[ + '--requirements-file', 'test-requirements.txt', + '--system-pkg-names']).splitlines() + return (pkg_deps, test_deps) def read_version(): @@ -99,10 +83,9 @@ def generate_spec_contents(args, version_data, tmpl_fn, top_dir, arc_fn): rpm_upstream_version = version_data['version'] subs['rpm_upstream_version'] = rpm_upstream_version - # Map to known packages - python_deps = read_dependencies() - package_deps = translate_dependencies(python_deps, args.distro) - subs['requires'] = package_deps + deps, test_deps = read_dependencies() + subs['buildrequires'] = deps + test_deps + subs['requires'] = deps if args.boot == 'sysvinit': subs['sysvinit'] = True diff --git a/packages/debian/control.in b/packages/debian/control.in index 6c39d531..265b261f 100644 --- a/packages/debian/control.in +++ b/packages/debian/control.in @@ -3,20 +3,13 @@ Source: cloud-init Section: admin Priority: optional Maintainer: Scott Moser <smoser@ubuntu.com> -Build-Depends: debhelper (>= 9), - dh-python, - dh-systemd, - ${python}, - ${test_requires}, - ${requires} +Build-Depends: ${build_depends} XS-Python-Version: all Standards-Version: 3.9.6 Package: cloud-init Architecture: all -Depends: procps, - ${python}, - ${misc:Depends}, +Depends: ${misc:Depends}, ${${python}:Depends} Recommends: eatmydata, sudo, software-properties-common, gdisk XB-Python-Version: ${python:Versions} diff --git a/packages/pkg-deps.json b/packages/pkg-deps.json new file mode 100644 index 00000000..8b8f3c37 --- /dev/null +++ b/packages/pkg-deps.json @@ -0,0 +1,88 @@ +{ + "debian" : { + "build-requires" : [ + "debhelper", + "dh-python", + "dh-systemd" + ], + "renames" : { + "pyyaml" : { + "2" : "python-yaml", + "3" : "python3-yaml" + }, + "contextlib2" : { + "2" : "python-contextlib2" + }, + "pyserial" : { + "2" : "python-serial", + "3" : "python3-serial" + } + }, + "requires" : [ + "procps" + ] + }, + "redhat" : { + "build-requires" : [ + "python-devel", + "python-setuptools" + ], + "renames" : { + "jinja2" : { + "3" : "python34-jinja2" + }, + "jsonschema" : { + "3" : "python34-jsonschema" + }, + "prettytable" : { + "3" : "python34-prettytable" + }, + "pyflakes" : { + "2" : "pyflakes", + "3" : "python34-pyflakes" + }, + "pyyaml" : { + "2" : "PyYAML", + "3" : "python34-PyYAML" + }, + "pyserial" : { + "2" : "pyserial" + }, + "requests" : { + "3" : "python34-requests" + }, + "six" : { + "3" : "python34-six" + } + }, + "requires" : [ + "e2fsprogs", + "iproute", + "net-tools", + "procps", + "rsyslog", + "shadow-utils", + "sudo >= 1.7.2p2-3" + ] + }, + "suse" : { + "renames" : { + "pyyaml" : { + "2" : "python-yaml" + } + }, + "build-requires" : [ + "fdupes", + "filesystem", + "python-devel", + "python-setuptools" + ], + "requires" : [ + "iproute2", + "e2fsprogs", + "net-tools", + "procps", + "sudo" + ] + } +} diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in index 3e92c98f..9f75c4b8 100644 --- a/packages/redhat/cloud-init.spec.in +++ b/packages/redhat/cloud-init.spec.in @@ -1,4 +1,4 @@ -## template: cheetah +## template: jinja %{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")} %define use_systemd (0%{?fedora} && 0%{?fedora} >= 18) || (0%{?rhel} && 0%{?rhel} >= 7) @@ -14,20 +14,18 @@ # Or: http://www.rpm.org/max-rpm/ch-rpm-inside.html Name: cloud-init -Version: ${rpm_upstream_version} -Release: 1${subrelease}%{?dist} +Version: {{rpm_upstream_version}} +Release: 1{{subrelease}}%{?dist} Summary: Cloud instance init scripts Group: System Environment/Base License: Dual-licesed GPLv3 or Apache 2.0 URL: http://launchpad.net/cloud-init -Source0: ${archive_name} +Source0: {{archive_name}} BuildArch: noarch BuildRoot: %{_tmppath} -BuildRequires: python-devel -BuildRequires: python-setuptools %if "%{?el6}" == "1" BuildRequires: python-argparse %endif @@ -46,40 +44,30 @@ Requires(preun): chkconfig # These are runtime dependencies, but declared as BuildRequires so that # - tests can be run here. # - parts of cloud-init such (setup.py) use these dependencies. -#for $r in $requires -BuildRequires: ${r} -#end for +{% for r in requires %} +BuildRequires: {{r}} +{% endfor %} # System util packages needed %ifarch %{?ix86} x86_64 ia64 Requires: dmidecode %endif -Requires: shadow-utils -Requires: rsyslog -Requires: iproute -Requires: e2fsprogs -Requires: net-tools -Requires: procps -Requires: shadow-utils -Requires: sudo >= 1.7.2p2-3 - -Requires: python-setuptools + # python2.6 needs argparse %if "%{?el6}" == "1" Requires: python-argparse %endif -# Install pypi 'dynamic' requirements -#for $r in $requires -Requires: ${r} -#end for + +# Install 'dynamic' runtime reqs from *requirements.txt and pkg-deps.json +{% for r in requires %} +Requires: {{r}} +{% endfor %} # Custom patches -#set $size = 0 -#for $p in $patches -Patch${size}: $p -#set $size += 1 -#end for +{% for p in patches %} +Patch{{loop.index0}}: {{p}} +{% endfor %} %if "%{init_system}" == "systemd" Requires(post): systemd @@ -98,14 +86,12 @@ need special scripts to run during initialization to retrieve and install ssh keys and to let the user run various scripts. %prep -%setup -q -n ${source_name} +%setup -q -n {{source_name}} # Custom patches activation -#set $size = 0 -#for $p in $patches -%patch${size} -p1 -#set $size += 1 -#end for +{% for p in patches %} +%patch{{loop.index0}} -p1 +{% endfor %} %build %{__python} setup.py build @@ -113,34 +99,34 @@ ssh keys and to let the user run various scripts. %install %{__python} setup.py install -O1 \ - --skip-build --root \$RPM_BUILD_ROOT \ + --skip-build --root $RPM_BUILD_ROOT \ --init-system=%{init_system} # Note that /etc/rsyslog.d didn't exist by default until F15. # el6 request: https://bugzilla.redhat.com/show_bug.cgi?id=740420 -mkdir -p \$RPM_BUILD_ROOT/%{_sysconfdir}/rsyslog.d +mkdir -p $RPM_BUILD_ROOT/%{_sysconfdir}/rsyslog.d cp -p tools/21-cloudinit.conf \ - \$RPM_BUILD_ROOT/%{_sysconfdir}/rsyslog.d/21-cloudinit.conf + $RPM_BUILD_ROOT/%{_sysconfdir}/rsyslog.d/21-cloudinit.conf # Remove the tests -rm -rf \$RPM_BUILD_ROOT%{python_sitelib}/tests +rm -rf $RPM_BUILD_ROOT%{python_sitelib}/tests # Required dirs... -mkdir -p \$RPM_BUILD_ROOT/%{_sharedstatedir}/cloud -mkdir -p \$RPM_BUILD_ROOT/%{_libexecdir}/%{name} +mkdir -p $RPM_BUILD_ROOT/%{_sharedstatedir}/cloud +mkdir -p $RPM_BUILD_ROOT/%{_libexecdir}/%{name} %if "%{init_system}" == "systemd" -mkdir -p \$RPM_BUILD_ROOT/%{_unitdir} -cp -p systemd/* \$RPM_BUILD_ROOT/%{_unitdir} +mkdir -p $RPM_BUILD_ROOT/%{_unitdir} +cp -p systemd/* $RPM_BUILD_ROOT/%{_unitdir} %endif %clean -rm -rf \$RPM_BUILD_ROOT +rm -rf $RPM_BUILD_ROOT %post %if "%{init_system}" == "systemd" -if [ \$1 -eq 1 ] +if [ $1 -eq 1 ] then /bin/systemctl enable cloud-config.service >/dev/null 2>&1 || : /bin/systemctl enable cloud-final.service >/dev/null 2>&1 || : @@ -157,7 +143,7 @@ fi %preun %if "%{init_system}" == "systemd" -if [ \$1 -eq 0 ] +if [ $1 -eq 0 ] then /bin/systemctl --no-reload disable cloud-config.service >/dev/null 2>&1 || : /bin/systemctl --no-reload disable cloud-final.service >/dev/null 2>&1 || : @@ -165,7 +151,7 @@ then /bin/systemctl --no-reload disable cloud-init-local.service >/dev/null 2>&1 || : fi %else -if [ \$1 -eq 0 ] +if [ $1 -eq 0 ] then /sbin/service cloud-init stop >/dev/null 2>&1 || : /sbin/chkconfig --del cloud-init || : diff --git a/packages/suse/cloud-init.spec.in b/packages/suse/cloud-init.spec.in index 6ce0be8c..86e18b1b 100644 --- a/packages/suse/cloud-init.spec.in +++ b/packages/suse/cloud-init.spec.in @@ -1,19 +1,19 @@ -## template: cheetah +## template: jinja # See: http://www.zarb.org/~jasonc/macros.php # Or: http://fedoraproject.org/wiki/Packaging:ScriptletSnippets # Or: http://www.rpm.org/max-rpm/ch-rpm-inside.html Name: cloud-init -Version: ${version} -Release: 1${subrelease}%{?dist} +Version: {{version}} +Release: 1{{subrelease}}%{?dist} Summary: Cloud instance init scripts Group: System/Management License: Dual licensed GPLv3 or Apache 2.0 URL: http://launchpad.net/cloud-init -Source0: ${archive_name} +Source0: {{archive_name}} BuildRoot: %{_tmppath}/%{name}-%{version}-build %if 0%{?suse_version} && 0%{?suse_version} <= 1110 @@ -22,11 +22,9 @@ BuildRoot: %{_tmppath}/%{name}-%{version}-build BuildArch: noarch %endif -BuildRequires: fdupes -BuildRequires: filesystem -BuildRequires: python-devel -BuildRequires: python-setuptools -BuildRequires: python-cheetah +{% for r in buildrequires %} +BuildRequires: {{r}} +{% endfor %} %if 0%{?suse_version} && 0%{?suse_version} <= 1210 %define initsys sysvinit @@ -34,24 +32,15 @@ BuildRequires: python-cheetah %define initsys systemd %endif -# System util packages needed -Requires: iproute2 -Requires: e2fsprogs -Requires: net-tools -Requires: procps -Requires: sudo - # Install pypi 'dynamic' requirements -#for $r in $requires -Requires: ${r} -#end for +{% for r in requires %} +Requires: {{r}} +{% endfor %} # Custom patches -#set $size = 0 -#for $p in $patches -Patch${size}: $p -#set $size += 1 -#end for +{% for p in patches %} +Patch{{loop.index0}: {{p}} +{% endfor %} %description Cloud-init is a set of init scripts for cloud instances. Cloud instances @@ -59,14 +48,13 @@ need special scripts to run during initialization to retrieve and install ssh keys and to let the user run various scripts. %prep -%setup -q -n ${source_name} +%setup -q -n {{source_name}} # Custom patches activation -#set $size = 0 -#for $p in $patches -%patch${size} -p1 -#set $size += 1 -#end for +{% for p in patches %} +%patch{{loop.index0}} -p1 +end for +{% endfor %} %build %{__python} setup.py build @@ -95,7 +83,7 @@ rm -r %{buildroot}/%{python_sitelib}/tests mkdir -p %{buildroot}/%{_sbindir} pushd %{buildroot}/%{_initddir} for file in * ; do - ln -s %{_initddir}/\${file} %{buildroot}/%{_sbindir}/rc\${file} + ln -s %{_initddir}/${file} %{buildroot}/%{_sbindir}/rc${file} done popd %endif @@ -104,7 +92,7 @@ rm -r %{buildroot}/%{python_sitelib}/tests mkdir -p %{buildroot}/%{_defaultdocdir} mv %{buildroot}/usr/share/doc/cloud-init %{buildroot}/%{_defaultdocdir} for doc in TODO LICENSE ChangeLog requirements.txt; do - cp \${doc} %{buildroot}/%{_defaultdocdir}/cloud-init + cp ${doc} %{buildroot}/%{_defaultdocdir}/cloud-init done # Remove duplicate files diff --git a/tools/read-dependencies b/tools/read-dependencies index f4349055..4ba2c1bc 100755 --- a/tools/read-dependencies +++ b/tools/read-dependencies @@ -1,43 +1,197 @@ #!/usr/bin/env python +"""List pip dependencies or system package dependencies for cloud-init.""" # You might be tempted to rewrite this as a shell script, but you # would be surprised to discover that things like 'egrep' or 'sed' may # differ between Linux and *BSD. +try: + from argparse import ArgumentParser +except ImportError: + raise RuntimeError( + 'Could not import python-argparse. Please install python-argparse ' + 'package to continue') + +import json import os import re -import sys import subprocess +import sys -if 'CLOUD_INIT_TOP_D' in os.environ: - topd = os.path.realpath(os.environ.get('CLOUD_INIT_TOP_D')) -else: - topd = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) -for fname in ("setup.py", "requirements.txt"): - if not os.path.isfile(os.path.join(topd, fname)): - sys.stderr.write("Unable to locate '%s' file that should " - "exist in cloud-init root directory." % fname) - sys.exit(1) +# Map the appropriate package dir needed for each distro choice +DISTRO_PKG_TYPE_MAP = { + 'centos': 'redhat', + 'redhat': 'redhat', + 'debian': 'debian', + 'ubuntu': 'debian', + 'opensuse': 'suse', + 'suse': 'suse' +} + +DISTRO_INSTALL_PKG_CMD = { + 'centos': ['yum', 'install', '--assumeyes'], + 'redhat': ['yum', 'install', '--assumeyes'], + 'debian': ['apt', 'install', '-y'], + 'ubuntu': ['apt', 'install', '-y'], + 'opensuse': ['zypper', 'install'], + 'suse': ['zypper', 'install'] +} + + +# List of base system packages required to start using make +EXTRA_SYSTEM_BASE_PKGS = ['make', 'sudo', 'tar'] + + +# JSON definition of distro-specific package dependencies +DISTRO_PKG_DEPS_PATH = "packages/pkg-deps.json" + + +def get_parser(): + """Return an argument parser for this command.""" + parser = ArgumentParser(description=__doc__) + parser.add_argument( + '-r', '--requirements-file', type=str, dest='req_file', + default='requirements.txt', help='The pip-style requirements file') + parser.add_argument( + '-d', '--distro', type=str, choices=DISTRO_PKG_TYPE_MAP.keys(), + help='The name of the distro to generate package deps for.') + parser.add_argument( + '--dry-run', action='store_true', default=False, dest='dry_run', + help='Dry run the install, making no package changes.') + parser.add_argument( + '-s', '--system-pkg-names', action='store_true', default=False, + dest='system_pkg_names', + help='The name of the distro to generate package deps for.') + parser.add_argument( + '-i', '--install', action='store_true', default=False, + dest='install', + help='When specified, install the required system packages.') + parser.add_argument( + '-v', '--python-version', type=str, dest='python_version', default="2", + choices=["2", "3"], + help='The version of python we want to generate system package ' + 'dependencies for.') + return parser + -if len(sys.argv) > 1: - reqfile = sys.argv[1] -else: - reqfile = "requirements.txt" +def get_package_deps_from_json(topdir, distro): + """Get a dict of build and runtime package requirements for a distro. + + @param topdir: The root directory in which to search for the + DISTRO_PKG_DEPS_PATH json blob of package requirements information. + @param distro: The specific distribution shortname to pull dependencies + for. + @return: Dict containing "requires", "build-requires" and "rename" lists + for a given distribution. + """ + with open(os.path.join(topdir, DISTRO_PKG_DEPS_PATH), 'r') as stream: + deps = json.loads(stream.read()) + if distro is None: + return {} + return deps[DISTRO_PKG_TYPE_MAP[distro]] + + +def parse_pip_requirements(requirements_path): + """Return the pip requirement names from pip-style requirements_path.""" + dep_names = [] + with open(requirements_path, "r") as fp: + for line in fp: + line = line.strip() + if not line or line.startswith("#"): + continue + + # remove pip-style markers + dep = line.split(';')[0] + + # remove version requirements + if re.search('[>=.<]+', dep): + dep_names.append(re.split(r'[>=.<]+', dep)[0].strip()) + else: + dep_names.append(dep) + return dep_names + + +def translate_pip_to_system_pkg(pip_requires, renames, python_ver="2"): + """Translate pip package names to distro-specific package names. + + @param pip_requires: List of versionless pip package names to translate. + @param renames: Dict containg special case renames from pip name to system + package name for the distro. + """ + if python_ver == "2": + prefix = "python-" + else: + prefix = "python3-" + standard_pkg_name = "{0}{1}" + translated_names = [] + for pip_name in pip_requires: + pip_name = pip_name.lower() + # Find a rename if present for the distro package and python version + rename = renames.get(pip_name, {}).get(python_ver, None) + if rename: + translated_names.append(rename) + else: + translated_names.append( + standard_pkg_name.format(prefix, pip_name)) + return translated_names + + +def main(distro): + parser = get_parser() + args = parser.parse_args() + if 'CLOUD_INIT_TOP_D' in os.environ: + topd = os.path.realpath(os.environ.get('CLOUD_INIT_TOP_D')) + else: + topd = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + req_path = os.path.join(topd, args.req_file) + if not os.path.isfile(req_path): + sys.stderr.write("Unable to locate '%s' file that should " + "exist in cloud-init root directory." % req_path) + return 1 + pip_pkg_names = parse_pip_requirements(req_path) + deps_from_json = get_package_deps_from_json(topd, args.distro) + renames = deps_from_json.get('renames', {}) + translated_pip_names = translate_pip_to_system_pkg( + pip_pkg_names, renames, args.python_version) + all_deps = [] + if args.distro: + all_deps.extend( + translated_pip_names + deps_from_json['requires'] + + deps_from_json['build-requires']) + else: + if args.system_pkg_names: + all_deps = translated_pip_names + else: + all_deps = pip_pkg_names + if args.install: + pkg_install(all_deps, args.distro, args.dry_run) + else: + print('\n'.join(all_deps)) -with open(os.path.join(topd, reqfile), "r") as fp: - for line in fp: - line = line.strip() - if not line or line.startswith("#"): - continue - # remove pip-style markers - dep = line.split(';')[0] +def pkg_install(pkg_list, distro, dry_run=False): + """Install a list of packages using the DISTRO_INSTALL_PKG_CMD.""" + print('Installing deps: {0}{1}'.format( + '(dryrun)' if dry_run else '', ' '.join(pkg_list))) + pkg_list.extend(EXTRA_SYSTEM_BASE_PKGS) + install_cmd = [] + if dry_run: + install_cmd.append('echo') + if os.geteuid() != 0: + install_cmd.append('sudo') + install_cmd.extend(DISTRO_INSTALL_PKG_CMD[distro]) + if distro in ['centos', 'redhat']: + # CentOS and Redhat need epel-release to access oauthlib and jsonschema + subprocess.check_call(install_cmd + ['epel-release']) + if distro in ['suse', 'opensuse', 'redhat', 'centos']: + pkg_list.append('rpm-build') + subprocess.check_call(install_cmd + pkg_list) - # remove version requirements - dep = re.split("[>=.<]*", dep)[0].strip() - print(dep) -sys.exit(0) +if __name__ == "__main__": + parser = get_parser() + args = parser.parse_args() + sys.exit(main(args.distro)) # vi: ts=4 expandtab -- cgit v1.2.3 From 55a006afca73633c607c537dee62097e85011443 Mon Sep 17 00:00:00 2001 From: Scott Moser <smoser@brickies.net> Date: Wed, 14 Jun 2017 09:33:54 -0400 Subject: tools/run-centos: cleanups and move to using read-dependencies These changes are all in an effort to get tools/run-centos using read-dependencies rather than the 'setup-centos' script with a separate set of dependencies listed. - tools/read-dependencies: support taking multiple --requirements options. This allows run-centos to get both test and build dependencies. Ultimately, I think it might be nicer for read-dependencies to take a list of "goals" (build, test, run or test-tox) rather than having the caller need to know to provide multiple --requirements. - packages/pkg-deps.json: drop the version on the sudo package. centos 6 has newer (1.8.6p3) version than listed, so its not a problem. - test_handler_disk_setup.py: a test case here was using assertLogs which is not present in the version of unittest2 that is available in centos 6 epel. We just adjust it to use with_logs = True. - tools/run-cents: - improve usage with example - add 'inside_as_cd' to provide the dir you want to cd first to. - avoid the intermediate tarball on disk in the container. - add 'prep' subcommand and use it to install pre-dependencies. - use read-dependencies. --- packages/pkg-deps.json | 2 +- .../test_handler/test_handler_disk_setup.py | 32 ++--- tools/read-dependencies | 32 +++-- tools/run-centos | 142 ++++++++++++++------- tools/setup-centos | 49 ------- 5 files changed, 139 insertions(+), 118 deletions(-) delete mode 100755 tools/setup-centos (limited to 'tools') diff --git a/packages/pkg-deps.json b/packages/pkg-deps.json index 8b8f3c37..822d29d9 100644 --- a/packages/pkg-deps.json +++ b/packages/pkg-deps.json @@ -62,7 +62,7 @@ "procps", "rsyslog", "shadow-utils", - "sudo >= 1.7.2p2-3" + "sudo" ] }, "suse" : { diff --git a/tests/unittests/test_handler/test_handler_disk_setup.py b/tests/unittests/test_handler/test_handler_disk_setup.py index 916a0d7a..8a6d49ed 100644 --- a/tests/unittests/test_handler/test_handler_disk_setup.py +++ b/tests/unittests/test_handler/test_handler_disk_setup.py @@ -3,7 +3,7 @@ import random from cloudinit.config import cc_disk_setup -from ..helpers import ExitStack, mock, TestCase +from ..helpers import CiTestCase, ExitStack, mock, TestCase class TestIsDiskUsed(TestCase): @@ -174,32 +174,32 @@ class TestUpdateFsSetupDevices(TestCase): return_value=('/dev/xdb1', False)) @mock.patch('cloudinit.config.cc_disk_setup.device_type', return_value=None) @mock.patch('cloudinit.config.cc_disk_setup.util.subp', return_value=('', '')) -class TestMkfsCommandHandling(TestCase): +class TestMkfsCommandHandling(CiTestCase): + + with_logs = True def test_with_cmd(self, subp, *args): """mkfs honors cmd and logs warnings when extra_opts or overwrite are provided.""" - with self.assertLogs( - 'cloudinit.config.cc_disk_setup') as logs: - cc_disk_setup.mkfs({ - 'cmd': 'mkfs -t %(filesystem)s -L %(label)s %(device)s', - 'filesystem': 'ext4', - 'device': '/dev/xdb1', - 'label': 'with_cmd', - 'extra_opts': ['should', 'generate', 'warning'], - 'overwrite': 'should generate warning too' - }) + cc_disk_setup.mkfs({ + 'cmd': 'mkfs -t %(filesystem)s -L %(label)s %(device)s', + 'filesystem': 'ext4', + 'device': '/dev/xdb1', + 'label': 'with_cmd', + 'extra_opts': ['should', 'generate', 'warning'], + 'overwrite': 'should generate warning too' + }) self.assertIn( - 'WARNING:cloudinit.config.cc_disk_setup:fs_setup:extra_opts ' + + 'extra_opts ' + 'ignored because cmd was specified: mkfs -t ext4 -L with_cmd ' + '/dev/xdb1', - logs.output) + self.logs.getvalue()) self.assertIn( - 'WARNING:cloudinit.config.cc_disk_setup:fs_setup:overwrite ' + + 'overwrite ' + 'ignored because cmd was specified: mkfs -t ext4 -L with_cmd ' + '/dev/xdb1', - logs.output) + self.logs.getvalue()) subp.assert_called_once_with( 'mkfs -t ext4 -L with_cmd /dev/xdb1', shell=True) diff --git a/tools/read-dependencies b/tools/read-dependencies index 4ba2c1bc..8a585343 100755 --- a/tools/read-dependencies +++ b/tools/read-dependencies @@ -18,6 +18,7 @@ import re import subprocess import sys +DEFAULT_REQUIREMENTS = 'requirements.txt' # Map the appropriate package dir needed for each distro choice DISTRO_PKG_TYPE_MAP = { @@ -51,8 +52,9 @@ def get_parser(): """Return an argument parser for this command.""" parser = ArgumentParser(description=__doc__) parser.add_argument( - '-r', '--requirements-file', type=str, dest='req_file', - default='requirements.txt', help='The pip-style requirements file') + '-r', '--requirements-file', type=str, dest='req_files', + action='append', default=None, + help='pip-style requirements file [default=%s]' % DEFAULT_REQUIREMENTS) parser.add_argument( '-d', '--distro', type=str, choices=DISTRO_PKG_TYPE_MAP.keys(), help='The name of the distro to generate package deps for.') @@ -144,12 +146,24 @@ def main(distro): topd = os.path.realpath(os.environ.get('CLOUD_INIT_TOP_D')) else: topd = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) - req_path = os.path.join(topd, args.req_file) - if not os.path.isfile(req_path): - sys.stderr.write("Unable to locate '%s' file that should " - "exist in cloud-init root directory." % req_path) - return 1 - pip_pkg_names = parse_pip_requirements(req_path) + + if args.req_files is None: + args.req_files = [os.path.join(topd, DEFAULT_REQUIREMENTS)] + if not os.path.isfile(args.req_files[0]): + sys.stderr.write("Unable to locate '%s' file that should " + "exist in cloud-init root directory." % + args.req_files[0]) + sys.exit(1) + + bad_files = [r for r in args.req_files if not os.path.isfile(r)] + if bad_files: + sys.stderr.write( + "Unable to find requirements files: %s\n" % ','.join(bad_files)) + sys.exit(1) + + pip_pkg_names = set() + for req_path in args.req_files: + pip_pkg_names.update(set(parse_pip_requirements(req_path))) deps_from_json = get_package_deps_from_json(topd, args.distro) renames = deps_from_json.get('renames', {}) translated_pip_names = translate_pip_to_system_pkg( @@ -174,7 +188,7 @@ def pkg_install(pkg_list, distro, dry_run=False): """Install a list of packages using the DISTRO_INSTALL_PKG_CMD.""" print('Installing deps: {0}{1}'.format( '(dryrun)' if dry_run else '', ' '.join(pkg_list))) - pkg_list.extend(EXTRA_SYSTEM_BASE_PKGS) + pkg_list = list(pkg_list) + EXTRA_SYSTEM_BASE_PKGS install_cmd = [] if dry_run: install_cmd.append('echo') diff --git a/tools/run-centos b/tools/run-centos index de21d756..99ba6be0 100755 --- a/tools/run-centos +++ b/tools/run-centos @@ -14,17 +14,22 @@ errorrc() { local r=$?; error "$@" "ret=$r"; return $r; } Usage() { cat <<EOF -Usage: ${0##*/} [ options ] CentOS version +Usage: ${0##*/} [ options ] version This utility can makes it easier to run tests, build rpm and source rpm generation inside a LXC of the specified version of CentOS. + version is major release number (6 or 7) + options: -a | --artifact keep .rpm artifacts -k | --keep keep container after tests -r | --rpm build .rpm -s | --srpm build .src.rpm -u | --unittest run unit tests + + Example: + * ${0##*/} --rpm --srpm --unittest 6 EOF } @@ -48,6 +53,10 @@ inside_as() { # executes cmd with args inside container as user in users home dir. local name="$1" user="$2" shift 2 + if [ "$user" = "root" ]; then + inside "$name" "$@" + return + fi local stuffed="" b64="" stuffed=$(getopt --shell sh --options "" -- -- "$@") stuffed=${stuffed# -- } @@ -56,6 +65,12 @@ inside_as() { 'cd; eval set -- "$(echo '$b64' | base64 --decode)" && exec "$@"' } +inside_as_cd() { + local name="$1" user="$2" dir="$3" + shift 3 + inside_as "$name" "$user" sh -c 'cd "$0" && exec "$@"' "$dir" "$@" +} + inside() { local name="$1" shift @@ -63,26 +78,52 @@ inside() { } inject_cloud_init(){ - local name="$1" - tarball_name='cloud-init.tar.gz' - top_d=$(git rev-parse --show-toplevel) || - fail "failed to get top level" - cd "$top_d" || - fail "failed to cd to git top dir" - tar_folder=${PWD##*/} - cd .. - tar -czf "$TEMP_D/$tarball_name" "$tar_folder" || - fail "failed: creating tarball_name" - cd "$tar_folder" || - fail "failed: changing directory" - - user='centos' - tarball="/home/$user/$tarball_name" - inside "$name" useradd "$user" - lxc file push "$TEMP_D/$tarball_name" "$name/home/$user"/ - inside "$name" chown "$user:$user" "$tarball" - inside_as "$name" "$user" tar -C "/home/$user" -xzf "$tarball" || - fail "failed: extracting tarball" + # take current cloud-init git dir and put it inside $name at + # ~$user/cloud-init. + local name="$1" user="$2" top_d="" dname="" pstat="" + top_d=$(git rev-parse --show-toplevel) || { + errorrc "Failed to get git top level in $PWD"; + return + } + dname=$(basename "${top_d}") || return + debug 1 "collecting ${top_d} ($dname) into user $user in $name." + tar -C "${top_d}/.." -cpf - "$dname" | + inside_as "$name" "$user" sh -ec ' + dname=$1 + rm -Rf "$dname" + tar -xpf - + [ "$dname" = "cloud-init" ] || mv "$dname" cloud-init' \ + extract "$dname" + [ "${PIPESTATUS[*]}" = "0 0" ] || { + error "Failed to push tarball of '$top_d' into $name" \ + " for user $user (dname=$dname)" + return 1 + } + return 0 +} + +prep() { + # we need some very basic things not present in the container. + # - git + # - tar (CentOS 6 lxc container does not have it) + # - python-argparse (or python3) + local needed="" pair="" pkg="" cmd="" needed="" + for pair in tar:tar git:git; do + pkg=${pair#*:} + cmd=${pair%%:*} + command -v $cmd >/dev/null 2>&1 || needed="${needed} $pkg" + done + if ! command -v python3; then + python -c "import argparse" >/dev/null 2>&1 || + needed="${needed} python-argparse" + fi + needed=${needed# } + if [ -z "$needed" ]; then + error "No prep packages needed" + return 0 + fi + error "Installing prep packages: ${needed}" + yum install --assumeyes ${needed} } start_container() { @@ -121,8 +162,8 @@ delete_container() { } main() { - local short_opts="ahkrsuv:" - local long_opts="artifact,help,keep,rpm,srpm,unittest,verbose:" + local short_opts="ahkrsuv" + local long_opts="artifact,help,keep,rpm,srpm,unittest,verbose" local getopt_out="" getopt_out=$(getopt --name "${0##*/}" \ --options "${short_opts}" --long "${long_opts}" -- "$@") && @@ -149,60 +190,70 @@ main() { [ $# -eq 1 ] || { bad_Usage "ERROR: Must provide version!"; return; } version="$1" + case "$version" in + 6|7) :;; + *) error "Expected version of 6 or 7, not '$version'"; return;; + esac TEMP_D=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXX") || fail "failed to make tempdir" trap cleanup EXIT # program starts here - local uuid="" name="" + local uuid="" name="" user="ci-test" cdir="" + cdir="/home/$user/cloud-init" uuid=$(uuidgen -t) || { error "no uuidgen"; return 1; } name="cloud-init-centos-${uuid%%-*}" start_container "images:centos/$version" "$name" - # CentOS 6 does not come with tar - if [ "$version" = "6" ]; then - inside "$name" yum install --assumeyes tar || { - errorrc "FAIL: yum install tar failed"; - } - fi + + # prep the container (install very basic dependencies) + inside "$name" bash -s prep <"$0" || + { errorrc "Failed to prep container $name"; return; } + + # add the user + inside "$name" useradd "$user" debug 1 "inserting cloud-init" - inject_cloud_init "$name" || { + inject_cloud_init "$name" "$user" || { errorrc "FAIL: injecting cloud-init into $name failed." return } - # install dependencies - debug 1 "installing dependencies" - inside "$name" /bin/sh <tools/setup-centos || - fail "failed: setting up container $name" + inside_as_cd "$name" root "$cdir" \ + ./tools/read-dependencies \ + --requirements-file=requirements.txt \ + --requirements-file=test-requirements.txt \ + --distro=centos --install || { + errorrc "FAIL: failed to install dependencies with read-dependencies" + return + } - local errors=0 do_cd="cd $tar_folder" - inside_as "$name" "$user" sh -ec "$do_cd; git checkout .; git status" || + local errors=0 + inside_as_cd "$name" "$user" "$cdir" \ + sh -ec "git checkout .; git status" || { errorrc "git checkout failed."; errors=$(($errors+1)); } if [ -n "$unittest" ]; then debug 1 "running unit tests." - inside_as "$name" "$user" sh -ec "$do_cd; nosetests tests/unittests" || + inside_as_cd "$name" "$user" "$cdir" nosetests tests/unittests || { errorrc "nosetests failed."; errors=$(($errors+1)); } fi if [ -n "$srpm" ]; then debug 1 "building srpm." - inside_as "$name" "$user" sh -ec "$do_cd; ./packages/brpm --srpm" || + inside_as_cd "$name" "$user" "$cdir" ./packages/brpm --srpm || { errorrc "brpm --srpm."; errors=$(($errors+1)); } fi if [ -n "$rpm" ]; then debug 1 "building rpm." - inside_as "$name" "$user" sh -ec "$do_cd; ./packages/brpm" || + inside_as_cd "$name" "$user" "$cdir" ./packages/brpm || { errorrc "brpm failed."; errors=$(($errors+1)); } fi if [ -n "$artifact" ]; then - cmd="ls /home/$user/$tar_folder/*.rpm" - for built_rpm in $(lxc exec "$name" -- sh -c "$cmd"); do + for built_rpm in $(inside "$name" sh -c "echo $cdir/*.rpm"); do lxc file pull "$name/$built_rpm" . done fi @@ -214,5 +265,10 @@ main() { return 0 } -main "$@" +if [ "$1" = "prep" ]; then + shift + prep "$@" +else + main "$@" +fi # vi: ts=4 expandtab diff --git a/tools/setup-centos b/tools/setup-centos deleted file mode 100755 index bc5da8a7..00000000 --- a/tools/setup-centos +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/sh -# This file is part of cloud-init. See LICENSE file for license information. -set -fux -export LANG=C - -packages=" - file - git - pyserial - python-argparse - python-cheetah - python-configobj - python-devel - python-jinja2 - python-jsonpatch - python-oauthlib - python-pip - python-prettytable - python-requests - python-six - PyYAML - rpm-build -" - -pips=" - contextlib2 - httpretty - mock - nose - pep8 - unittest2 -" - -error() { echo "$@" 1>&2; } -fail() { [ $# -eq 0 ] || error "$@"; exit 1; } -info() { echo "$@"; } - -pips=$(for p in $pips; do echo "$p"; done | sort -u) -packages=$(for p in $packages; do echo "$p"; done | sort -u) - -if ! rpm -q epel-release >/dev/null; then - yum install --assumeyes epel-release || - fail "failed: yum install epel-release" -fi -yum install --assumeyes $packages || - fail "failed: yum install" "$packages" - -pip install --upgrade $pips || - fail "failed: pip install $pips" -- cgit v1.2.3 From b23d9d7c5c112612dbaaf8c8371c9e735500b2eb Mon Sep 17 00:00:00 2001 From: Chad Smith <chad.smith@canonical.com> Date: Wed, 14 Jun 2017 17:11:43 -0600 Subject: ci deps: Add --test-distro to read-dependencies to install all deps read-dependencies now takes --test-distro param to indicate we want to install all system package depenencies to allow for testing and building for our continous integration environment. It allows us to install all needed deps on a fresh system with: python3 ./tools/read-dependencies --distro ubuntu --test-distro [--dry-run]. Additionally read-dependencies now looks at what version of python is running the script (py2 vs p3) and opts to install python 2 or 3 system deps respectively. This behavior can still be overridden with python3 ./tools/read-dependencies ... --python-version 2. There are also some distro-specific packaging and test dependencies, like devscripts, tox and libssl-dev on debian or ubuntu. Those pkg dependencies have now been broken out from common pkg deps to avoid trying to install them on centos/redhat/suse. --- Makefile | 6 ++---- packages/bddeb | 4 +++- tools/read-dependencies | 46 +++++++++++++++++++++++++++++++++++++--------- tools/run-centos | 5 +---- 4 files changed, 43 insertions(+), 18 deletions(-) (limited to 'tools') diff --git a/Makefile b/Makefile index c752530c..e9f54982 100644 --- a/Makefile +++ b/Makefile @@ -54,12 +54,10 @@ unittest3: clean_pyc nosetests3 $(noseopts) tests/unittests ci-deps-ubuntu: - @$(PYVER) $(CWD)/tools/read-dependencies --distro ubuntu --install --python-version 3 - @$(PYVER) $(CWD)/tools/read-dependencies --distro ubuntu --requirements-file test-requirements.txt --install --python-version 3 + @$(PYVER) $(CWD)/tools/read-dependencies --distro-ubuntu --test-distro ci-deps-centos: - @$(PYVER) $(CWD)/tools/read-dependencies --distro centos --install - @$(PYVER) $(CWD)/tools/read-dependencies --distro centos --requirements-file test-requirements.txt --install + @$(PYVER) $(CWD)/tools/read-dependencies --distro centos --test-distro pip-requirements: @echo "Installing cloud-init dependencies..." diff --git a/packages/bddeb b/packages/bddeb index e45af6ee..609a94fb 100755 --- a/packages/bddeb +++ b/packages/bddeb @@ -72,7 +72,9 @@ def write_debian_folder(root, templ_data, is_python2, cloud_util_deps): requires = ['cloud-utils | cloud-guest-utils'] if cloud_util_deps else [] # We consolidate all deps as Build-Depends as our package build runs all # tests so we need all runtime dependencies anyway. - requires.extend(reqs + test_reqs + [python]) + # NOTE: python package was moved to the front after debuild -S would fail with + # 'Please add apropriate interpreter' errors (as in debian bug 861132) + requires.extend([python] + reqs + test_reqs) templater.render_to_file(util.abs_join(find_root(), 'packages', 'debian', 'control.in'), util.abs_join(deb_dir, 'control'), diff --git a/tools/read-dependencies b/tools/read-dependencies index 8a585343..2a648680 100755 --- a/tools/read-dependencies +++ b/tools/read-dependencies @@ -40,8 +40,13 @@ DISTRO_INSTALL_PKG_CMD = { } -# List of base system packages required to start using make -EXTRA_SYSTEM_BASE_PKGS = ['make', 'sudo', 'tar'] +# List of base system packages required to enable ci automation +CI_SYSTEM_BASE_PKGS = { + 'common': ['make', 'sudo', 'tar'], + 'redhat': ['python-tox'], + 'centos': ['python-tox'], + 'ubuntu': ['devscripts', 'python3-dev', 'libssl-dev', 'tox', 'sbuild'], + 'debian': ['devscripts', 'python3-dev', 'libssl-dev', 'tox', 'sbuild']} # JSON definition of distro-specific package dependencies @@ -70,10 +75,16 @@ def get_parser(): dest='install', help='When specified, install the required system packages.') parser.add_argument( - '-v', '--python-version', type=str, dest='python_version', default="2", + '-t', '--test-distro', action='store_true', default=False, + dest='test_distro', + help='Additionally install continuous integration system packages ' + 'required for build and test automation.') + parser.add_argument( + '-v', '--python-version', type=str, dest='python_version', default=None, choices=["2", "3"], - help='The version of python we want to generate system package ' - 'dependencies for.') + help='Override the version of python we want to generate system ' + 'package dependencies for. Defaults to the version of python ' + 'this script is called with') return parser @@ -114,13 +125,17 @@ def parse_pip_requirements(requirements_path): return dep_names -def translate_pip_to_system_pkg(pip_requires, renames, python_ver="2"): +def translate_pip_to_system_pkg(pip_requires, renames, python_ver): """Translate pip package names to distro-specific package names. @param pip_requires: List of versionless pip package names to translate. @param renames: Dict containg special case renames from pip name to system package name for the distro. + @param python_ver: Optional python version string "2" or "3". When None, + use the python version that is calling this script via sys.version_info. """ + if python_ver is None: + python_ver = str(sys.version_info[0]) if python_ver == "2": prefix = "python-" else: @@ -147,6 +162,16 @@ def main(distro): else: topd = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + if args.test_distro: + # Give us all the system deps we need for continuous integration + if args.req_files: + sys.stderr.write( + "Parameter --test-distro overrides --requirements-file. Use " + "one or the other.\n") + sys.exit(1) + args.req_files = [os.path.join(topd, DEFAULT_REQUIREMENTS), + os.path.join(topd, 'test-' + DEFAULT_REQUIREMENTS)] + args.install = True if args.req_files is None: args.req_files = [os.path.join(topd, DEFAULT_REQUIREMENTS)] if not os.path.isfile(args.req_files[0]): @@ -179,16 +204,19 @@ def main(distro): else: all_deps = pip_pkg_names if args.install: - pkg_install(all_deps, args.distro, args.dry_run) + pkg_install(all_deps, args.distro, args.test_distro, args.dry_run) else: print('\n'.join(all_deps)) -def pkg_install(pkg_list, distro, dry_run=False): +def pkg_install(pkg_list, distro, test_distro=False, dry_run=False): """Install a list of packages using the DISTRO_INSTALL_PKG_CMD.""" + if test_distro: + pkg_list = list(pkg_list) + CI_SYSTEM_BASE_PKGS['common'] + distro_base_pkgs = CI_SYSTEM_BASE_PKGS.get(distro, []) + pkg_list += distro_base_pkgs print('Installing deps: {0}{1}'.format( '(dryrun)' if dry_run else '', ' '.join(pkg_list))) - pkg_list = list(pkg_list) + EXTRA_SYSTEM_BASE_PKGS install_cmd = [] if dry_run: install_cmd.append('echo') diff --git a/tools/run-centos b/tools/run-centos index 99ba6be0..b10e3bc4 100755 --- a/tools/run-centos +++ b/tools/run-centos @@ -221,10 +221,7 @@ main() { } inside_as_cd "$name" root "$cdir" \ - ./tools/read-dependencies \ - --requirements-file=requirements.txt \ - --requirements-file=test-requirements.txt \ - --distro=centos --install || { + ./tools/read-dependencies --distro=centos --test-distro || { errorrc "FAIL: failed to install dependencies with read-dependencies" return } -- cgit v1.2.3 From ecb408afa1104fe49ce6eb1dc5708be56abd5cb2 Mon Sep 17 00:00:00 2001 From: Scott Moser <smoser@brickies.net> Date: Thu, 15 Jun 2017 10:03:45 -0400 Subject: FreeBSD: Make freebsd a variant, fix unittests and tools/build-on-freebsd. - Simplify the logic of 'variant' in util.system_info much of the data from https://github.com/hpcugent/easybuild/wiki/OS_flavor_name_version - fix get_resource_disk_on_freebsd when running on a system without an Azure resource disk. - fix tools/build-on-freebsd to replace oauth with oauthlib and add bash which is a dependency for tests. - update a fiew places that were checking for freebsd but not using the util.is_FreeBSD() --- cloudinit/config/cc_growpart.py | 2 +- cloudinit/config/cc_power_state_change.py | 2 +- cloudinit/sources/DataSourceAzure.py | 2 +- cloudinit/util.py | 46 ++++++++++-------------- config/cloud.cfg.tmpl | 20 +++++------ tests/unittests/test_handler/test_handler_ntp.py | 2 +- tests/unittests/test_util.py | 9 +++-- tools/build-on-freebsd | 6 ++-- 8 files changed, 40 insertions(+), 49 deletions(-) (limited to 'tools') diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index d2bc6e6c..bafca9d8 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -214,7 +214,7 @@ def device_part_info(devpath): # FreeBSD doesn't know of sysfs so just get everything we need from # the device, like /dev/vtbd0p2. - if util.system_info()["platform"].startswith('FreeBSD'): + if util.is_FreeBSD(): m = re.search('^(/dev/.+)p([0-9])$', devpath) return (m.group(1), m.group(2)) diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index c1c6fe7e..eba58b02 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -71,7 +71,7 @@ def givecmdline(pid): # Example output from procstat -c 1 # PID COMM ARGS # 1 init /bin/init -- - if util.system_info()["platform"].startswith('FreeBSD'): + if util.is_FreeBSD(): (output, _err) = util.subp(['procstat', '-c', str(pid)]) line = output.splitlines()[1] m = re.search('\d+ (\w|\.|-)+\s+(/\w.+)', line) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 71e7c55c..4fe0d635 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -101,7 +101,7 @@ def get_dev_storvsc_sysctl(): sysctl_out, err = util.subp(['sysctl', 'dev.storvsc']) except util.ProcessExecutionError: LOG.debug("Fail to execute sysctl dev.storvsc") - return None + sysctl_out = "" return sysctl_out diff --git a/cloudinit/util.py b/cloudinit/util.py index ec68925e..c93b6d7e 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -573,7 +573,7 @@ def is_ipv4(instr): def is_FreeBSD(): - return system_info()['platform'].startswith('FreeBSD') + return system_info()['variant'] == "freebsd" def get_cfg_option_bool(yobj, key, default=False): @@ -598,37 +598,29 @@ def get_cfg_option_int(yobj, key, default=0): def system_info(): info = { 'platform': platform.platform(), + 'system': platform.system(), 'release': platform.release(), 'python': platform.python_version(), 'uname': platform.uname(), - 'dist': platform.linux_distribution(), # pylint: disable=W1505 + 'dist': platform.dist(), # pylint: disable=W1505 } - plat = info['platform'].lower() - # Try to get more info about what it actually is, in a format - # that we can easily use across linux and variants... - if plat.startswith('darwin'): - info['variant'] = 'darwin' - elif plat.endswith("bsd"): - info['variant'] = 'bsd' - elif plat.startswith('win'): - info['variant'] = 'windows' - elif 'linux' in plat: - # Try to get a single string out of these... - linux_dist, _version, _id = info['dist'] - linux_dist = linux_dist.lower() - if linux_dist in ('ubuntu', 'linuxmint', 'mint'): - info['variant'] = 'ubuntu' + system = info['system'].lower() + var = 'unknown' + if system == "linux": + linux_dist = info['dist'][0].lower() + if linux_dist in ('centos', 'fedora', 'debian'): + var = linux_dist + elif linux_dist in ('ubuntu', 'linuxmint', 'mint'): + var = 'ubuntu' + elif linux_dist == 'redhat': + var = 'rhel' else: - for prefix, variant in [('redhat', 'rhel'), - ('centos', 'centos'), - ('fedora', 'fedora'), - ('debian', 'debian')]: - if linux_dist.startswith(prefix): - info['variant'] = variant - if 'variant' not in info: - info['variant'] = 'linux' - if 'variant' not in info: - info['variant'] = 'unknown' + var = 'linux' + elif system in ('windows', 'darwin', "freebsd"): + var = system + + info['variant'] = var + return info diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 5af2a88f..f4b9069b 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -2,7 +2,7 @@ # The top level settings are used as module # and system configuration. -{% if variant in ["bsd"] %} +{% if variant in ["freebsd"] %} syslog_fix_perms: root:wheel {% endif %} # A set of users which may be applied and/or used by various modules @@ -13,7 +13,7 @@ users: # If this is set, 'root' will not be able to ssh in and they # will get a message to login instead as the default $user -{% if variant in ["bsd"] %} +{% if variant in ["freebsd"] %} disable_root: false {% else %} disable_root: true @@ -30,7 +30,7 @@ ssh_pwauth: 0 # This will cause the set+update hostname module to not operate (if true) preserve_hostname: false -{% if variant in ["bsd"] %} +{% if variant in ["freebsd"] %} # This should not be required, but leave it in place until the real cause of # not beeing able to find -any- datasources is resolved. datasource_list: ['ConfigDrive', 'Azure', 'OpenStack', 'Ec2'] @@ -53,13 +53,13 @@ cloud_init_modules: - write-files - growpart - resizefs -{% if variant not in ["bsd"] %} +{% if variant not in ["freebsd"] %} - disk_setup - mounts {% endif %} - set_hostname - update_hostname -{% if variant not in ["bsd"] %} +{% if variant not in ["freebsd"] %} - update_etc_hosts - ca-certs - rsyslog @@ -87,7 +87,7 @@ cloud_config_modules: - apt-pipelining - apt-configure {% endif %} -{% if variant not in ["bsd"] %} +{% if variant not in ["freebsd"] %} - ntp {% endif %} - timezone @@ -108,7 +108,7 @@ cloud_final_modules: - landscape - lxd {% endif %} -{% if variant not in ["bsd"] %} +{% if variant not in ["freebsd"] %} - puppet - chef - salt-minion @@ -130,10 +130,8 @@ cloud_final_modules: # (not accessible to handlers/transforms) system_info: # This will affect which distro class gets used -{% if variant in ["centos", "debian", "fedora", "rhel", "ubuntu"] %} +{% if variant in ["centos", "debian", "fedora", "rhel", "ubuntu", "freebsd"] %} distro: {{ variant }} -{% elif variant in ["bsd"] %} - distro: freebsd {% else %} # Unknown/fallback distro. distro: ubuntu @@ -182,7 +180,7 @@ system_info: cloud_dir: /var/lib/cloud/ templates_dir: /etc/cloud/templates/ ssh_svcname: sshd -{% elif variant in ["bsd"] %} +{% elif variant in ["freebsd"] %} # Default user name + that default users groups (if added/used) default_user: name: freebsd diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py index c4299d94..7f278646 100644 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ b/tests/unittests/test_handler/test_handler_ntp.py @@ -62,7 +62,7 @@ class TestNtp(FilesystemMockingTestCase): def test_ntp_rename_ntp_conf(self): """When NTP_CONF exists, rename_ntp moves it.""" ntpconf = self.tmp_path("ntp.conf", self.new_root) - os.mknod(ntpconf) + util.write_file(ntpconf, "") with mock.patch("cloudinit.config.cc_ntp.NTP_CONF", ntpconf): cc_ntp.rename_ntp_conf() self.assertFalse(os.path.exists(ntpconf)) diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 014aa6a3..a73fd26a 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -20,6 +20,9 @@ except ImportError: import mock +BASH = util.which('bash') + + class FakeSelinux(object): def __init__(self, match_what): @@ -544,17 +547,17 @@ class TestReadSeeded(helpers.TestCase): class TestSubp(helpers.TestCase): - stdin2err = ['bash', '-c', 'cat >&2'] + stdin2err = [BASH, '-c', 'cat >&2'] stdin2out = ['cat'] utf8_invalid = b'ab\xaadef' utf8_valid = b'start \xc3\xa9 end' utf8_valid_2 = b'd\xc3\xa9j\xc8\xa7' - printenv = ['bash', '-c', 'for n in "$@"; do echo "$n=${!n}"; done', '--'] + printenv = [BASH, '-c', 'for n in "$@"; do echo "$n=${!n}"; done', '--'] def printf_cmd(self, *args): # bash's printf supports \xaa. So does /usr/bin/printf # but by using bash, we remove dependency on another program. - return(['bash', '-c', 'printf "$@"', 'printf'] + list(args)) + return([BASH, '-c', 'printf "$@"', 'printf'] + list(args)) def test_subp_handles_utf8(self): # The given bytes contain utf-8 accented characters as seen in e.g. diff --git a/tools/build-on-freebsd b/tools/build-on-freebsd index ccc10b40..ff9153ad 100755 --- a/tools/build-on-freebsd +++ b/tools/build-on-freebsd @@ -8,6 +8,7 @@ fail() { echo "FAILED:" "$@" 1>&2; exit 1; } # Check dependencies: depschecked=/tmp/c-i.dependencieschecked pkgs=" + bash dmidecode e2fsprogs py27-Jinja2 @@ -16,7 +17,7 @@ pkgs=" py27-configobj py27-jsonpatch py27-jsonpointer - py27-oauth + py27-oauthlib py27-prettytable py27-requests py27-serial @@ -35,9 +36,6 @@ touch $depschecked python setup.py build python setup.py install -O1 --skip-build --prefix /usr/local/ --init-system sysvinit_freebsd -# Install the correct config file: -cp config/cloud.cfg-freebsd /etc/cloud/cloud.cfg - # Enable cloud-init in /etc/rc.conf: sed -i.bak -e "/cloudinit_enable=.*/d" /etc/rc.conf echo 'cloudinit_enable="YES"' >> /etc/rc.conf -- cgit v1.2.3 From b6c478e07008819ea1a69a666f3a97c89f457e19 Mon Sep 17 00:00:00 2001 From: Joonas Kylmälä <joonas.kylmala@iki.fi> Date: Wed, 12 Jul 2017 11:23:04 +0000 Subject: tools: Fix exception handling. We should be expecting IndexError instead of KeyError because we are using a list (key_ids) and not a dictionary. Also, thanks to Emmanuel Kasper for pointing out the wrong response code. LP: #1701527 --- tools/mock-meta.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/mock-meta.py b/tools/mock-meta.py index f185dbf2..a5d14ab7 100755 --- a/tools/mock-meta.py +++ b/tools/mock-meta.py @@ -262,8 +262,8 @@ class MetaDataHandler(object): except ValueError: raise WebException(hclient.BAD_REQUEST, "%s: not an integer" % mybe_key) - except KeyError: - raise WebException(hclient.BAD_REQUEST, + except IndexError: + raise WebException(hclient.NOT_FOUND, "Unknown key id %r" % mybe_key) # Extract the possible sub-params result = traverse(nparams[1:], { -- cgit v1.2.3 From e80517ae6aea49c9ab3bd622a33fee44014f485f Mon Sep 17 00:00:00 2001 From: Julien Castets <castets.j@gmail.com> Date: Tue, 25 Apr 2017 09:06:13 +0000 Subject: Scaleway: add datasource with user and vendor data for Scaleway. Here we add and enable by default a datasource for Scaleway cloud. The datasource quickly exits unless one of three things: a.) 'Scaleway' found as the system vendor b.) 'scaleway' found on the kernel command line. c.) the directory /var/run/scaleway exists (this is currently created by the scaleway initramfs module). One interesting bit of this particular datasource is that it requires the source port of the http request to be < 1024. --- cloudinit/settings.py | 1 + cloudinit/sources/DataSourceScaleway.py | 234 ++++++++++++++++++++ cloudinit/url_helper.py | 10 +- tests/unittests/test_datasource/test_common.py | 2 + tests/unittests/test_datasource/test_scaleway.py | 262 +++++++++++++++++++++++ tools/ds-identify | 18 +- 6 files changed, 524 insertions(+), 3 deletions(-) create mode 100644 cloudinit/sources/DataSourceScaleway.py create mode 100644 tests/unittests/test_datasource/test_scaleway.py (limited to 'tools') diff --git a/cloudinit/settings.py b/cloudinit/settings.py index 0abd8a4a..c120498f 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -35,6 +35,7 @@ CFG_BUILTIN = { 'CloudStack', 'SmartOS', 'Bigstep', + 'Scaleway', # At the end to act as a 'catch' when none of the above work... 'None', ], diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py new file mode 100644 index 00000000..3a8a8e8f --- /dev/null +++ b/cloudinit/sources/DataSourceScaleway.py @@ -0,0 +1,234 @@ +# Author: Julien Castets <castets.j@gmail.com> +# +# This file is part of cloud-init. See LICENSE file for license information. + +# Scaleway API: +# https://developer.scaleway.com/#metadata + +import json +import os +import socket +import time + +import requests + +# pylint fails to import the two modules below. +# These are imported via requests.packages rather than urllib3 because: +# a.) the provider of the requests package should ensure that urllib3 +# contained in it is consistent/correct. +# b.) cloud-init does not specifically have a dependency on urllib3 +# +# For future reference, see: +# https://github.com/kennethreitz/requests/pull/2375 +# https://github.com/requests/requests/issues/4104 +# pylint: disable=E0401 +from requests.packages.urllib3.connection import HTTPConnection +from requests.packages.urllib3.poolmanager import PoolManager + +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import url_helper +from cloudinit import util + + +LOG = logging.getLogger(__name__) + +DS_BASE_URL = 'http://169.254.42.42' + +BUILTIN_DS_CONFIG = { + 'metadata_url': DS_BASE_URL + '/conf?format=json', + 'userdata_url': DS_BASE_URL + '/user_data/cloud-init', + 'vendordata_url': DS_BASE_URL + '/vendor_data/cloud-init' +} + +DEF_MD_RETRIES = 5 +DEF_MD_TIMEOUT = 10 + + +def on_scaleway(): + """ + There are three ways to detect if you are on Scaleway: + + * check DMI data: not yet implemented by Scaleway, but the check is made to + be future-proof. + * the initrd created the file /var/run/scaleway. + * "scaleway" is in the kernel cmdline. + """ + vendor_name = util.read_dmi_data('system-manufacturer') + if vendor_name == 'Scaleway': + return True + + if os.path.exists('/var/run/scaleway'): + return True + + cmdline = util.get_cmdline() + if 'scaleway' in cmdline: + return True + + return False + + +class SourceAddressAdapter(requests.adapters.HTTPAdapter): + """ + Adapter for requests to choose the local address to bind to. + """ + def __init__(self, source_address, **kwargs): + self.source_address = source_address + super(SourceAddressAdapter, self).__init__(**kwargs) + + def init_poolmanager(self, connections, maxsize, block=False): + socket_options = HTTPConnection.default_socket_options + [ + (socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) + ] + self.poolmanager = PoolManager(num_pools=connections, + maxsize=maxsize, + block=block, + source_address=self.source_address, + socket_options=socket_options) + + +def query_data_api_once(api_address, timeout, requests_session): + """ + Retrieve user data or vendor data. + + Scaleway user/vendor data API returns HTTP/404 if user/vendor data is not + set. + + This function calls `url_helper.readurl` but instead of considering + HTTP/404 as an error that requires a retry, it considers it as empty + user/vendor data. + + Also, be aware the user data/vendor API requires the source port to be + below 1024 to ensure the client is root (since non-root users can't bind + ports below 1024). If requests raises ConnectionError (EADDRINUSE), the + caller should retry to call this function on an other port. + """ + try: + resp = url_helper.readurl( + api_address, + data=None, + timeout=timeout, + # It's the caller's responsability to recall this function in case + # of exception. Don't let url_helper.readurl() retry by itself. + retries=0, + session=requests_session, + # If the error is a HTTP/404 or a ConnectionError, go into raise + # block below. + exception_cb=lambda _, exc: exc.code == 404 or ( + isinstance(exc.cause, requests.exceptions.ConnectionError) + ) + ) + return util.decode_binary(resp.contents) + except url_helper.UrlError as exc: + # Empty user data. + if exc.code == 404: + return None + raise + + +def query_data_api(api_type, api_address, retries, timeout): + """Get user or vendor data. + + Handle the retrying logic in case the source port is used. + + Scaleway metadata service requires the source port of the client to + be a privileged port (<1024). This is done to ensure that only a + privileged user on the system can access the metadata service. + """ + # Query user/vendor data. Try to make a request on the first privileged + # port available. + for port in range(1, max(retries, 2)): + try: + LOG.debug( + 'Trying to get %s data (bind on port %d)...', + api_type, port + ) + requests_session = requests.Session() + requests_session.mount( + 'http://', + SourceAddressAdapter(source_address=('0.0.0.0', port)) + ) + data = query_data_api_once( + api_address, + timeout=timeout, + requests_session=requests_session + ) + LOG.debug('%s-data downloaded', api_type) + return data + + except url_helper.UrlError as exc: + # Local port already in use or HTTP/429. + LOG.warning('Error while trying to get %s data: %s', api_type, exc) + time.sleep(5) + last_exc = exc + continue + + # Max number of retries reached. + raise last_exc + + +class DataSourceScaleway(sources.DataSource): + + def __init__(self, sys_cfg, distro, paths): + super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) + + self.ds_cfg = util.mergemanydict([ + util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}), + BUILTIN_DS_CONFIG + ]) + + self.metadata_address = self.ds_cfg['metadata_url'] + self.userdata_address = self.ds_cfg['userdata_url'] + self.vendordata_address = self.ds_cfg['vendordata_url'] + + self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES)) + self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT)) + + def get_data(self): + if not on_scaleway(): + return False + + resp = url_helper.readurl(self.metadata_address, + timeout=self.timeout, + retries=self.retries) + self.metadata = json.loads(util.decode_binary(resp.contents)) + + self.userdata_raw = query_data_api( + 'user-data', self.userdata_address, + self.retries, self.timeout + ) + self.vendordata_raw = query_data_api( + 'vendor-data', self.vendordata_address, + self.retries, self.timeout + ) + return True + + @property + def launch_index(self): + return None + + def get_instance_id(self): + return self.metadata['id'] + + def get_public_ssh_keys(self): + return [key['key'] for key in self.metadata['ssh_public_keys']] + + def get_hostname(self, fqdn=False, resolve_ip=False): + return self.metadata['hostname'] + + @property + def availability_zone(self): + return None + + @property + def region(self): + return None + + +datasources = [ + (DataSourceScaleway, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +] + + +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index d2b92e6a..7cf76aae 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -172,7 +172,8 @@ def _get_ssl_args(url, ssl_details): def readurl(url, data=None, timeout=None, retries=0, sec_between=1, headers=None, headers_cb=None, ssl_details=None, - check_status=True, allow_redirects=True, exception_cb=None): + check_status=True, allow_redirects=True, exception_cb=None, + session=None): url = _cleanurl(url) req_args = { 'url': url, @@ -231,7 +232,12 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, LOG.debug("[%s/%s] open '%s' with %s configuration", i, manual_tries, url, filtered_req_args) - r = requests.request(**req_args) + if session is None: + session = requests.Session() + + with session as sess: + r = sess.request(**req_args) + if check_status: r.raise_for_status() LOG.debug("Read from %s (%s, %sb) after %s attempts", url, diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index 2ff1d9df..413e87ac 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -19,6 +19,7 @@ from cloudinit.sources import ( DataSourceOpenNebula as OpenNebula, DataSourceOpenStack as OpenStack, DataSourceOVF as OVF, + DataSourceScaleway as Scaleway, DataSourceSmartOS as SmartOS, ) from cloudinit.sources import DataSourceNone as DSNone @@ -48,6 +49,7 @@ DEFAULT_NETWORK = [ NoCloud.DataSourceNoCloudNet, OpenStack.DataSourceOpenStack, OVF.DataSourceOVFNet, + Scaleway.DataSourceScaleway, ] diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py new file mode 100644 index 00000000..65d83ad7 --- /dev/null +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -0,0 +1,262 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import json + +import httpretty +import requests + +from cloudinit import helpers +from cloudinit import settings +from cloudinit.sources import DataSourceScaleway + +from ..helpers import mock, HttprettyTestCase, TestCase + + +class DataResponses(object): + """ + Possible responses of the API endpoint + 169.254.42.42/user_data/cloud-init and + 169.254.42.42/vendor_data/cloud-init. + """ + + FAKE_USER_DATA = '#!/bin/bash\necho "user-data"' + + @staticmethod + def rate_limited(method, uri, headers): + return 429, headers, '' + + @staticmethod + def api_error(method, uri, headers): + return 500, headers, '' + + @classmethod + def get_ok(cls, method, uri, headers): + return 200, headers, cls.FAKE_USER_DATA + + @staticmethod + def empty(method, uri, headers): + """ + No user data for this server. + """ + return 404, headers, '' + + +class MetadataResponses(object): + """ + Possible responses of the metadata API. + """ + + FAKE_METADATA = { + 'id': '00000000-0000-0000-0000-000000000000', + 'hostname': 'scaleway.host', + 'ssh_public_keys': [{ + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + 'fingerprint': '2048 06:ae:... login (RSA)' + }, { + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'fingerprint': '2048 06:ff:... login2 (RSA)' + }] + } + + @classmethod + def get_ok(cls, method, uri, headers): + return 200, headers, json.dumps(cls.FAKE_METADATA) + + +class TestOnScaleway(TestCase): + + def install_mocks(self, fake_dmi, fake_file_exists, fake_cmdline): + mock, faked = fake_dmi + mock.return_value = 'Scaleway' if faked else 'Whatever' + + mock, faked = fake_file_exists + mock.return_value = faked + + mock, faked = fake_cmdline + mock.return_value = \ + 'initrd=initrd showopts scaleway nousb' if faked \ + else 'BOOT_IMAGE=/vmlinuz-3.11.0-26-generic' + + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('os.path.exists') + @mock.patch('cloudinit.util.read_dmi_data') + def test_not_on_scaleway(self, m_read_dmi_data, m_file_exists, + m_get_cmdline): + self.install_mocks( + fake_dmi=(m_read_dmi_data, False), + fake_file_exists=(m_file_exists, False), + fake_cmdline=(m_get_cmdline, False) + ) + self.assertFalse(DataSourceScaleway.on_scaleway()) + + # When not on Scaleway, get_data() returns False. + datasource = DataSourceScaleway.DataSourceScaleway( + settings.CFG_BUILTIN, None, helpers.Paths({}) + ) + self.assertFalse(datasource.get_data()) + + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('os.path.exists') + @mock.patch('cloudinit.util.read_dmi_data') + def test_on_scaleway_dmi(self, m_read_dmi_data, m_file_exists, + m_get_cmdline): + """ + dmidecode returns "Scaleway". + """ + # dmidecode returns "Scaleway" + self.install_mocks( + fake_dmi=(m_read_dmi_data, True), + fake_file_exists=(m_file_exists, False), + fake_cmdline=(m_get_cmdline, False) + ) + self.assertTrue(DataSourceScaleway.on_scaleway()) + + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('os.path.exists') + @mock.patch('cloudinit.util.read_dmi_data') + def test_on_scaleway_var_run_scaleway(self, m_read_dmi_data, m_file_exists, + m_get_cmdline): + """ + /var/run/scaleway exists. + """ + self.install_mocks( + fake_dmi=(m_read_dmi_data, False), + fake_file_exists=(m_file_exists, True), + fake_cmdline=(m_get_cmdline, False) + ) + self.assertTrue(DataSourceScaleway.on_scaleway()) + + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('os.path.exists') + @mock.patch('cloudinit.util.read_dmi_data') + def test_on_scaleway_cmdline(self, m_read_dmi_data, m_file_exists, + m_get_cmdline): + """ + "scaleway" in /proc/cmdline. + """ + self.install_mocks( + fake_dmi=(m_read_dmi_data, False), + fake_file_exists=(m_file_exists, False), + fake_cmdline=(m_get_cmdline, True) + ) + self.assertTrue(DataSourceScaleway.on_scaleway()) + + +def get_source_address_adapter(*args, **kwargs): + """ + Scaleway user/vendor data API requires to be called with a privileged port. + + If the unittests are run as non-root, the user doesn't have the permission + to bind on ports below 1024. + + This function removes the bind on a privileged address, since anyway the + HTTP call is mocked by httpretty. + """ + kwargs.pop('source_address') + return requests.adapters.HTTPAdapter(*args, **kwargs) + + +class TestDataSourceScaleway(HttprettyTestCase): + + def setUp(self): + self.datasource = DataSourceScaleway.DataSourceScaleway( + settings.CFG_BUILTIN, None, helpers.Paths({}) + ) + super(TestDataSourceScaleway, self).setUp() + + self.metadata_url = \ + DataSourceScaleway.BUILTIN_DS_CONFIG['metadata_url'] + self.userdata_url = \ + DataSourceScaleway.BUILTIN_DS_CONFIG['userdata_url'] + self.vendordata_url = \ + DataSourceScaleway.BUILTIN_DS_CONFIG['vendordata_url'] + + @httpretty.activate + @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', + get_source_address_adapter) + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('time.sleep', return_value=None) + def test_metadata_ok(self, sleep, m_get_cmdline): + """ + get_data() returns metadata, user data and vendor data. + """ + m_get_cmdline.return_value = 'scaleway' + + # Make user data API return a valid response + httpretty.register_uri(httpretty.GET, self.metadata_url, + body=MetadataResponses.get_ok) + httpretty.register_uri(httpretty.GET, self.userdata_url, + body=DataResponses.get_ok) + httpretty.register_uri(httpretty.GET, self.vendordata_url, + body=DataResponses.get_ok) + self.datasource.get_data() + + self.assertEqual(self.datasource.get_instance_id(), + MetadataResponses.FAKE_METADATA['id']) + self.assertEqual(self.datasource.get_public_ssh_keys(), [ + elem['key'] for elem in + MetadataResponses.FAKE_METADATA['ssh_public_keys'] + ]) + self.assertEqual(self.datasource.get_hostname(), + MetadataResponses.FAKE_METADATA['hostname']) + self.assertEqual(self.datasource.get_userdata_raw(), + DataResponses.FAKE_USER_DATA) + self.assertEqual(self.datasource.get_vendordata_raw(), + DataResponses.FAKE_USER_DATA) + self.assertIsNone(self.datasource.availability_zone) + self.assertIsNone(self.datasource.region) + self.assertEqual(sleep.call_count, 0) + + @httpretty.activate + @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', + get_source_address_adapter) + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('time.sleep', return_value=None) + def test_metadata_404(self, sleep, m_get_cmdline): + """ + get_data() returns metadata, but no user data nor vendor data. + """ + m_get_cmdline.return_value = 'scaleway' + + # Make user and vendor data APIs return HTTP/404, which means there is + # no user / vendor data for the server. + httpretty.register_uri(httpretty.GET, self.metadata_url, + body=MetadataResponses.get_ok) + httpretty.register_uri(httpretty.GET, self.userdata_url, + body=DataResponses.empty) + httpretty.register_uri(httpretty.GET, self.vendordata_url, + body=DataResponses.empty) + self.datasource.get_data() + self.assertIsNone(self.datasource.get_userdata_raw()) + self.assertIsNone(self.datasource.get_vendordata_raw()) + self.assertEqual(sleep.call_count, 0) + + @httpretty.activate + @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', + get_source_address_adapter) + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('time.sleep', return_value=None) + def test_metadata_rate_limit(self, sleep, m_get_cmdline): + """ + get_data() is rate limited two times by the metadata API when fetching + user data. + """ + m_get_cmdline.return_value = 'scaleway' + + httpretty.register_uri(httpretty.GET, self.metadata_url, + body=MetadataResponses.get_ok) + httpretty.register_uri(httpretty.GET, self.vendordata_url, + body=DataResponses.empty) + + httpretty.register_uri( + httpretty.GET, self.userdata_url, + responses=[ + httpretty.Response(body=DataResponses.rate_limited), + httpretty.Response(body=DataResponses.rate_limited), + httpretty.Response(body=DataResponses.get_ok), + ] + ) + self.datasource.get_data() + self.assertEqual(self.datasource.get_userdata_raw(), + DataResponses.FAKE_USER_DATA) + self.assertEqual(sleep.call_count, 2) diff --git a/tools/ds-identify b/tools/ds-identify index 7c8b144b..33bd2991 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -112,7 +112,7 @@ DI_DSNAME="" # be searched if there is no setting found in config. DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \ -OVF SmartOS" +OVF SmartOS Scaleway" DI_DSLIST="" DI_MODE="" DI_ON_FOUND="" @@ -896,6 +896,22 @@ dscheck_None() { return ${DS_NOT_FOUND} } +dscheck_Scaleway() { + if [ "${DI_DMI_SYS_VENDOR}" = "Scaleway" ]; then + return $DS_FOUND + fi + + case " ${DI_KERNEL_CMDLINE} " in + *\ scaleway\ *) return ${DS_FOUND};; + esac + + if [ -f ${PATH_ROOT}/var/run/scaleway ]; then + return ${DS_FOUND} + fi + + return ${DS_NOT_FOUND} +} + collect_info() { read_virt read_pid1_product_name -- cgit v1.2.3 From 33d573907d3ffc790e28102ecac15c3be6a85462 Mon Sep 17 00:00:00 2001 From: Scott Moser <smoser@brickies.net> Date: Fri, 21 Jul 2017 13:34:05 -0400 Subject: tools/run-centos: make running with no argument show help. If you ran tools/run-centos without an argument it would fail due to 'set -u' like: ./tools/run-centos: line 266: 1: unbound variable --- tools/run-centos | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/run-centos b/tools/run-centos index b10e3bc4..d44d5145 100755 --- a/tools/run-centos +++ b/tools/run-centos @@ -262,7 +262,7 @@ main() { return 0 } -if [ "$1" = "prep" ]; then +if [ "${1:-}" = "prep" ]; then shift prep "$@" else -- cgit v1.2.3