summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoshua Powers <josh.powers@canonical.com>2017-12-07 12:54:46 -0800
committerScott Moser <smoser@ubuntu.com>2018-01-05 20:30:33 -0500
commit34595e9b4abacc10ac599aad97c95861af34ea54 (patch)
tree36461e98bad6e31737188a000c796d0ce73458c9
parent0b5bacb1761aefa74adb79bd1683d614bdf8c998 (diff)
downloadvyos-cloud-init-34595e9b4abacc10ac599aad97c95861af34ea54.tar.gz
vyos-cloud-init-34595e9b4abacc10ac599aad97c95861af34ea54.zip
tests: Enable AWS EC2 Integration Testing
This enables integration tests to utilize AWS EC2 as a testing platform by utilizing the boto3 Python library. Usage will create and delete a custom VPC for every run. All resources will be tagged with the ec2 tag, 'cii', and the date (e.g. cii-20171220-102452). The VPC is setup with both IPv4 and IPv6 capabilities, but will only hand out IPv4 addresses by default. Instances will have complete Internet access and have full ingress and egress access (i.e. no firewall). SSH keys are generated with each run of the integration tests with the key getting uploaded to AWS at the start of tests and deleted on exit. To enable creation when the platform is setup the SSH generation code is moved to be completed by the platform setup and not during image setup. The nocloud-kvm platform was updated with this change. Creating a custom image will utilize the same clean script, boot_clean_script, that the LXD platform uses as well. The custom AMI is generated, used, and de-registered after a test run. The default instance type is set to t2.micro. This is one of the smallest instance types and is free tier eligible. The default timeout for ec2 was increased to 300 from 120 as many tests hit up against the 2 minute timeout and depending on region load can go over. Documentation for the AWS platform was added with the expected configuration files for the platform to be used. There are some additional whitespace changes included as well. pylint exception was added for paramiko and simplestreams. In the past these were not already flagged due to no __init__.py in the subdirectories of files that used these. boto3 was added to the list of dependencies in the tox ci-test runner. In order to grab console logs on EC2 the harness will now shut down an instance before terminating and before collecting the console log. This is to address a behavior of EC2 where the console log is refreshed very infrequently, but one point when it is refreshed is after shutdown.
-rw-r--r--.pylintrc2
-rw-r--r--doc/rtd/topics/tests.rst38
-rw-r--r--tests/cloud_tests/collect.py19
-rw-r--r--tests/cloud_tests/platforms.yaml11
-rw-r--r--tests/cloud_tests/platforms/__init__.py2
-rw-r--r--tests/cloud_tests/platforms/ec2/image.py109
-rw-r--r--tests/cloud_tests/platforms/ec2/instance.py126
-rw-r--r--tests/cloud_tests/platforms/ec2/platform.py231
-rw-r--r--tests/cloud_tests/platforms/ec2/snapshot.py66
-rw-r--r--tests/cloud_tests/platforms/instances.py70
-rw-r--r--tests/cloud_tests/platforms/nocloudkvm/instance.py88
-rw-r--r--tests/cloud_tests/platforms/nocloudkvm/platform.py4
-rw-r--r--tests/cloud_tests/platforms/platforms.py69
-rw-r--r--tests/cloud_tests/releases.yaml8
-rw-r--r--tests/cloud_tests/setup_image.py18
-rw-r--r--tests/cloud_tests/util.py17
-rw-r--r--tox.ini1
17 files changed, 784 insertions, 95 deletions
diff --git a/.pylintrc b/.pylintrc
index 3ad36924..05a086d9 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -46,7 +46,7 @@ reports=no
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis. It
# supports qualified module names, as well as Unix pattern matching.
-ignored-modules=six.moves,pkg_resources,httplib,http.client
+ignored-modules=six.moves,pkg_resources,httplib,http.client,paramiko,simplestreams
# List of class names for which member attributes should not be checked (useful
# for classes with dynamically set attributes). This supports the use of
diff --git a/doc/rtd/topics/tests.rst b/doc/rtd/topics/tests.rst
index d668e3f4..bf04bb3c 100644
--- a/doc/rtd/topics/tests.rst
+++ b/doc/rtd/topics/tests.rst
@@ -118,19 +118,19 @@ TreeRun and TreeCollect
If working on a cloud-init feature or resolving a bug, it may be useful to
run the current copy of cloud-init in the integration testing environment.
-The integration testing suite can automatically build a deb based on the
+The integration testing suite can automatically build a deb based on the
current working tree of cloud-init and run the test suite using this deb.
The ``tree_run`` and ``tree_collect`` commands take the same arguments as
-the ``run`` and ``collect`` commands. These commands will build a deb and
-write it into a temporary file, then start the test suite and pass that deb
+the ``run`` and ``collect`` commands. These commands will build a deb and
+write it into a temporary file, then start the test suite and pass that deb
in. To build a deb only, and not run the test suite, the ``bddeb`` command
can be used.
Note that code in the cloud-init working tree that has not been committed
when the cloud-init deb is built will still be included. To build a
cloud-init deb from or use the ``tree_run`` command using a copy of
-cloud-init located in a different directory, use the option ``--cloud-init
+cloud-init located in a different directory, use the option ``--cloud-init
/path/to/cloud-init``.
.. code-block:: bash
@@ -383,7 +383,7 @@ Development Checklist
* Valid unit tests validating output collected
* Passes pylint & pep8 checks
* Placed in the appropriate sub-folder in the test cases directory
-* Tested by running the test:
+* Tested by running the test:
.. code-block:: bash
@@ -392,6 +392,32 @@ Development Checklist
--test modules/your_test.yaml \
[--deb <build of cloud-init>]
+
+Platforms
+=========
+
+EC2
+---
+To run on the EC2 platform it is required that the user has an AWS credentials
+configuration file specifying his or her access keys and a default region.
+These configuration files are the standard that the AWS cli and other AWS
+tools utilize for interacting directly with AWS itself and are normally
+generated when running ``aws configure``:
+
+.. code-block:: bash
+
+ $ cat $HOME/.aws/credentials
+ [default]
+ aws_access_key_id = <KEY HERE>
+ aws_secret_access_key = <KEY HERE>
+
+.. code-block:: bash
+
+ $ cat $HOME/.aws/config
+ [default]
+ region = us-west-2
+
+
Architecture
============
@@ -455,7 +481,7 @@ replace the default. If the data is a dictionary then the value will be the
result of merging that dictionary from the default config and that
dictionary from the overrides.
-Merging is done using the function
+Merging is done using the function
``tests.cloud_tests.config.merge_config``, which can be examined for more
detail on config merging behavior.
diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py
index bb722457..33acbb1e 100644
--- a/tests/cloud_tests/collect.py
+++ b/tests/cloud_tests/collect.py
@@ -28,12 +28,18 @@ def collect_script(instance, base_dir, script, script_name):
def collect_console(instance, base_dir):
- LOG.debug('getting console log')
+ """Collect instance console log.
+
+ @param instance: instance to get console log for
+ @param base_dir: directory to write console log to
+ """
+ logfile = os.path.join(base_dir, 'console.log')
+ LOG.debug('getting console log for %s to %s', instance, logfile)
try:
data = instance.console_log()
except NotImplementedError:
data = b'instance.console_log: not implemented'
- with open(os.path.join(base_dir, 'console.log'), "wb") as fp:
+ with open(logfile, "wb") as fp:
fp.write(data)
@@ -89,12 +95,11 @@ def collect_test_data(args, snapshot, os_name, test_name):
test_output_dir, script, script_name))
for script_name, script in test_scripts.items()]
- console_log = partial(
- run_single, 'collect console',
- partial(collect_console, instance, test_output_dir))
-
res = run_stage('collect for test: {}'.format(test_name),
- [start_call] + collect_calls + [console_log])
+ [start_call] + collect_calls)
+
+ instance.shutdown()
+ collect_console(instance, test_output_dir)
return res
diff --git a/tests/cloud_tests/platforms.yaml b/tests/cloud_tests/platforms.yaml
index fa4f845e..cb1c904b 100644
--- a/tests/cloud_tests/platforms.yaml
+++ b/tests/cloud_tests/platforms.yaml
@@ -6,8 +6,13 @@ default_platform_config:
get_image_timeout: 300
# maximum time to create instance (before waiting for cloud-init)
create_instance_timeout: 60
-
+ private_key: id_rsa
+ public_key: id_rsa.pub
platforms:
+ ec2:
+ enabled: true
+ instance-type: t2.micro
+ tag: cii
lxd:
enabled: true
# overrides for image templates
@@ -61,9 +66,5 @@ platforms:
{{ config_get("user.vendor-data", properties.default) }}
nocloud-kvm:
enabled: true
- private_key: id_rsa
- public_key: id_rsa.pub
- ec2: {}
- azure: {}
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/__init__.py b/tests/cloud_tests/platforms/__init__.py
index 92ed1627..a01e51ac 100644
--- a/tests/cloud_tests/platforms/__init__.py
+++ b/tests/cloud_tests/platforms/__init__.py
@@ -2,10 +2,12 @@
"""Main init."""
+from .ec2 import platform as ec2
from .lxd import platform as lxd
from .nocloudkvm import platform as nocloudkvm
PLATFORMS = {
+ 'ec2': ec2.EC2Platform,
'nocloud-kvm': nocloudkvm.NoCloudKVMPlatform,
'lxd': lxd.LXDPlatform,
}
diff --git a/tests/cloud_tests/platforms/ec2/image.py b/tests/cloud_tests/platforms/ec2/image.py
new file mode 100644
index 00000000..53706b1d
--- /dev/null
+++ b/tests/cloud_tests/platforms/ec2/image.py
@@ -0,0 +1,109 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""EC2 Image Base Class."""
+
+from ..images import Image
+from .snapshot import EC2Snapshot
+from tests.cloud_tests import LOG
+
+
+class EC2Image(Image):
+ """EC2 backed image."""
+
+ platform_name = 'ec2'
+
+ def __init__(self, platform, config, image_ami):
+ """Set up image.
+
+ @param platform: platform object
+ @param config: image configuration
+ @param image_ami: string of image ami ID
+ """
+ super(EC2Image, self).__init__(platform, config)
+ self._img_instance = None
+ self.image_ami = image_ami
+
+ @property
+ def _instance(self):
+ """Internal use only, returns a running instance"""
+ if not self._img_instance:
+ self._img_instance = self.platform.create_instance(
+ self.properties, self.config, self.features,
+ self.image_ami, user_data=None)
+ self._img_instance.start(wait=True, wait_for_cloud_init=True)
+ return self._img_instance
+
+ @property
+ def properties(self):
+ """Dictionary containing: 'arch', 'os', 'version', 'release'."""
+ return {
+ 'arch': self.config['arch'],
+ 'os': self.config['family'],
+ 'release': self.config['release'],
+ 'version': self.config['version'],
+ }
+
+ def destroy(self):
+ """Delete the instance used to create a custom image."""
+ if self._img_instance:
+ LOG.debug('terminating backing instance %s',
+ self._img_instance.instance.instance_id)
+ self._img_instance.instance.terminate()
+ self._img_instance.instance.wait_until_terminated()
+
+ super(EC2Image, self).destroy()
+
+ def _execute(self, *args, **kwargs):
+ """Execute command in image, modifying image."""
+ self._instance.start(wait=True)
+ return self._instance._execute(*args, **kwargs)
+
+ def push_file(self, local_path, remote_path):
+ """Copy file at 'local_path' to instance at 'remote_path'."""
+ self._instance.start(wait=True)
+ return self._instance.push_file(local_path, remote_path)
+
+ def run_script(self, *args, **kwargs):
+ """Run script in image, modifying image.
+
+ @return_value: script output
+ """
+ self._instance.start(wait=True)
+ return self._instance.run_script(*args, **kwargs)
+
+ def snapshot(self):
+ """Create snapshot of image, block until done.
+
+ Will return base image_ami if no instance has been booted, otherwise
+ will run the clean script, shutdown the instance, create a custom
+ AMI, and use that AMI once available.
+ """
+ if not self._img_instance:
+ return EC2Snapshot(self.platform, self.properties, self.config,
+ self.features, self.image_ami,
+ delete_on_destroy=False)
+
+ if self.config.get('boot_clean_script'):
+ self._img_instance.run_script(self.config.get('boot_clean_script'))
+
+ self._img_instance.shutdown(wait=True)
+
+ LOG.debug('creating custom ami from instance %s',
+ self._img_instance.instance.instance_id)
+ response = self.platform.ec2_client.create_image(
+ Name='%s-%s' % (self.platform.tag, self.image_ami),
+ InstanceId=self._img_instance.instance.instance_id
+ )
+ image_ami_edited = response['ImageId']
+
+ # Create image and wait until it is in the 'available' state
+ image = self.platform.ec2_resource.Image(image_ami_edited)
+ image.wait_until_exists()
+ waiter = self.platform.ec2_client.get_waiter('image_available')
+ waiter.wait(ImageIds=[image.id])
+ image.reload()
+
+ return EC2Snapshot(self.platform, self.properties, self.config,
+ self.features, image_ami_edited)
+
+# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/ec2/instance.py b/tests/cloud_tests/platforms/ec2/instance.py
new file mode 100644
index 00000000..4ba737ab
--- /dev/null
+++ b/tests/cloud_tests/platforms/ec2/instance.py
@@ -0,0 +1,126 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Base EC2 instance."""
+import os
+
+import botocore
+
+from ..instances import Instance
+from tests.cloud_tests import LOG, util
+
+
+class EC2Instance(Instance):
+ """EC2 backed instance."""
+
+ platform_name = "ec2"
+ _ssh_client = None
+
+ def __init__(self, platform, properties, config, features,
+ image_ami, user_data=None):
+ """Set up instance.
+
+ @param platform: platform object
+ @param properties: dictionary of properties
+ @param config: dictionary of configuration values
+ @param features: dictionary of supported feature flags
+ @param image_ami: AWS AMI ID for image to use
+ @param user_data: test user-data to pass to instance
+ """
+ super(EC2Instance, self).__init__(
+ platform, image_ami, properties, config, features)
+
+ self.image_ami = image_ami
+ self.instance = None
+ self.user_data = user_data
+ self.ssh_ip = None
+ self.ssh_port = 22
+ self.ssh_key_file = os.path.join(
+ platform.config['data_dir'], platform.config['private_key'])
+ self.ssh_pubkey_file = os.path.join(
+ platform.config['data_dir'], platform.config['public_key'])
+
+ def console_log(self):
+ """Collect console log from instance.
+
+ The console log is buffered and not always present, therefore
+ may return empty string.
+ """
+ try:
+ return self.instance.console_output()['Output'].encode()
+ except KeyError:
+ return b''
+
+ def destroy(self):
+ """Clean up instance."""
+ if self.instance:
+ LOG.debug('destroying instance %s', self.instance.id)
+ self.instance.terminate()
+ self.instance.wait_until_terminated()
+
+ self._ssh_close()
+
+ super(EC2Instance, self).destroy()
+
+ def _execute(self, command, stdin=None, env=None):
+ """Execute command on instance."""
+ env_args = []
+ if env:
+ env_args = ['env'] + ["%s=%s" for k, v in env.items()]
+
+ return self._ssh(['sudo'] + env_args + list(command), stdin=stdin)
+
+ def start(self, wait=True, wait_for_cloud_init=False):
+ """Start instance on EC2 with the platfrom's VPC."""
+ if self.instance:
+ if self.instance.state['Name'] == 'running':
+ return
+
+ LOG.debug('starting instance %s', self.instance.id)
+ self.instance.start()
+ else:
+ LOG.debug('launching instance')
+
+ args = {
+ 'ImageId': self.image_ami,
+ 'InstanceType': self.platform.instance_type,
+ 'KeyName': self.platform.key_name,
+ 'MaxCount': 1,
+ 'MinCount': 1,
+ 'SecurityGroupIds': [self.platform.security_group.id],
+ 'SubnetId': self.platform.subnet.id,
+ 'TagSpecifications': [{
+ 'ResourceType': 'instance',
+ 'Tags': [{
+ 'Key': 'Name', 'Value': self.platform.tag
+ }]
+ }],
+ }
+
+ if self.user_data:
+ args['UserData'] = self.user_data
+
+ try:
+ instances = self.platform.ec2_resource.create_instances(**args)
+ except botocore.exceptions.ClientError as error:
+ error_msg = error.response['Error']['Message']
+ raise util.PlatformError('start', error_msg)
+
+ self.instance = instances[0]
+
+ LOG.debug('instance id: %s', self.instance.id)
+ if wait:
+ self.instance.wait_until_running()
+ self.instance.reload()
+ self.ssh_ip = self.instance.public_ip_address
+ self._wait_for_system(wait_for_cloud_init)
+
+ def shutdown(self, wait=True):
+ """Shutdown instance."""
+ LOG.debug('stopping instance %s', self.instance.id)
+ self.instance.stop()
+
+ if wait:
+ self.instance.wait_until_stopped()
+ self.instance.reload()
+
+# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/ec2/platform.py b/tests/cloud_tests/platforms/ec2/platform.py
new file mode 100644
index 00000000..fdb17ba0
--- /dev/null
+++ b/tests/cloud_tests/platforms/ec2/platform.py
@@ -0,0 +1,231 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Base EC2 platform."""
+from datetime import datetime
+import os
+
+import boto3
+import botocore
+
+from ..platforms import Platform
+from .image import EC2Image
+from .instance import EC2Instance
+from tests.cloud_tests import LOG
+
+
+class EC2Platform(Platform):
+ """EC2 test platform."""
+
+ platform_name = 'ec2'
+ ipv4_cidr = '192.168.1.0/20'
+
+ def __init__(self, config):
+ """Set up platform."""
+ super(EC2Platform, self).__init__(config)
+ # Used for unique VPC, SSH key, and custom AMI generation naming
+ self.tag = '%s-%s' % (
+ config['tag'], datetime.now().strftime('%Y%m%d%H%M%S'))
+ self.instance_type = config['instance-type']
+
+ try:
+ self.ec2_client = boto3.client('ec2')
+ self.ec2_resource = boto3.resource('ec2')
+ self.ec2_region = boto3.Session().region_name
+ self.key_name = self._upload_public_key(config)
+ except botocore.exceptions.NoRegionError:
+ raise RuntimeError(
+ 'Please configure default region in $HOME/.aws/config')
+ except botocore.exceptions.NoCredentialsError:
+ raise RuntimeError(
+ 'Please configure ec2 credentials in $HOME/.aws/credentials')
+
+ self.vpc = self._create_vpc()
+ self.internet_gateway = self._create_internet_gateway()
+ self.subnet = self._create_subnet()
+ self.routing_table = self._create_routing_table()
+ self.security_group = self._create_security_group()
+
+ def create_instance(self, properties, config, features,
+ image_ami, user_data=None):
+ """Create an instance
+
+ @param src_img_path: image path to launch from
+ @param properties: image properties
+ @param config: image configuration
+ @param features: image features
+ @param image_ami: string of image ami ID
+ @param user_data: test user-data to pass to instance
+ @return_value: cloud_tests.instances instance
+ """
+ return EC2Instance(self, properties, config, features,
+ image_ami, user_data)
+
+ def destroy(self):
+ """Delete SSH keys, terminate all instances, and delete VPC."""
+ for instance in self.vpc.instances.all():
+ LOG.debug('waiting for instance %s termination', instance.id)
+ instance.terminate()
+ instance.wait_until_terminated()
+
+ if self.key_name:
+ LOG.debug('deleting SSH key %s', self.key_name)
+ self.ec2_client.delete_key_pair(KeyName=self.key_name)
+
+ if self.security_group:
+ LOG.debug('deleting security group %s', self.security_group.id)
+ self.security_group.delete()
+
+ if self.subnet:
+ LOG.debug('deleting subnet %s', self.subnet.id)
+ self.subnet.delete()
+
+ if self.routing_table:
+ LOG.debug('deleting routing table %s', self.routing_table.id)
+ self.routing_table.delete()
+
+ if self.internet_gateway:
+ LOG.debug('deleting internet gateway %s', self.internet_gateway.id)
+ self.internet_gateway.detach_from_vpc(VpcId=self.vpc.id)
+ self.internet_gateway.delete()
+
+ if self.vpc:
+ LOG.debug('deleting vpc %s', self.vpc.id)
+ self.vpc.delete()
+
+ def get_image(self, img_conf):
+ """Get image using specified image configuration.
+
+ Hard coded for 'amd64' based images.
+
+ @param img_conf: configuration for image
+ @return_value: cloud_tests.images instance
+ """
+ if img_conf['root-store'] == 'ebs':
+ root_store = 'ssd'
+ elif img_conf['root-store'] == 'instance-store':
+ root_store = 'instance'
+ else:
+ raise RuntimeError('Unknown root-store type: %s' %
+ (img_conf['root-store']))
+
+ filters = [
+ 'arch=%s' % 'amd64',
+ 'endpoint=https://ec2.%s.amazonaws.com' % self.ec2_region,
+ 'region=%s' % self.ec2_region,
+ 'release=%s' % img_conf['release'],
+ 'root_store=%s' % root_store,
+ 'virt=hvm',
+ ]
+
+ LOG.debug('finding image using streams')
+ image = self._query_streams(img_conf, filters)
+
+ try:
+ image_ami = image['id']
+ except KeyError:
+ raise RuntimeError('No images found for %s!' % img_conf['release'])
+
+ LOG.debug('found image: %s', image_ami)
+ image = EC2Image(self, img_conf, image_ami)
+ return image
+
+ def _create_internet_gateway(self):
+ """Create Internet Gateway and assign to VPC."""
+ LOG.debug('creating internet gateway')
+ internet_gateway = self.ec2_resource.create_internet_gateway()
+ internet_gateway.attach_to_vpc(VpcId=self.vpc.id)
+ self._tag_resource(internet_gateway)
+
+ return internet_gateway
+
+ def _create_routing_table(self):
+ """Update default routing table with internet gateway.
+
+ This sets up internet access between the VPC via the internet gateway
+ by configuring routing tables for IPv4 and IPv6.
+ """
+ LOG.debug('creating routing table')
+ route_table = self.vpc.create_route_table()
+ route_table.create_route(DestinationCidrBlock='0.0.0.0/0',
+ GatewayId=self.internet_gateway.id)
+ route_table.create_route(DestinationIpv6CidrBlock='::/0',
+ GatewayId=self.internet_gateway.id)
+ route_table.associate_with_subnet(SubnetId=self.subnet.id)
+ self._tag_resource(route_table)
+
+ return route_table
+
+ def _create_security_group(self):
+ """Enables ingress to default VPC security group."""
+ LOG.debug('creating security group')
+ security_group = self.vpc.create_security_group(
+ GroupName=self.tag, Description='integration test security group')
+ security_group.authorize_ingress(
+ IpProtocol='-1', FromPort=-1, ToPort=-1, CidrIp='0.0.0.0/0')
+ self._tag_resource(security_group)
+
+ return security_group
+
+ def _create_subnet(self):
+ """Generate IPv4 and IPv6 subnets for use."""
+ ipv6_cidr = self.vpc.ipv6_cidr_block_association_set[0][
+ 'Ipv6CidrBlock'][:-2] + '64'
+
+ LOG.debug('creating subnet with following ranges:')
+ LOG.debug('ipv4: %s', self.ipv4_cidr)
+ LOG.debug('ipv6: %s', ipv6_cidr)
+ subnet = self.vpc.create_subnet(CidrBlock=self.ipv4_cidr,
+ Ipv6CidrBlock=ipv6_cidr)
+ modify_subnet = subnet.meta.client.modify_subnet_attribute
+ modify_subnet(SubnetId=subnet.id,
+ MapPublicIpOnLaunch={'Value': True})
+ self._tag_resource(subnet)
+
+ return subnet
+
+ def _create_vpc(self):
+ """Setup AWS EC2 VPC or return existing VPC."""
+ LOG.debug('creating new vpc')
+ try:
+ vpc = self.ec2_resource.create_vpc(
+ CidrBlock=self.ipv4_cidr,
+ AmazonProvidedIpv6CidrBlock=True)
+ except botocore.exceptions.ClientError as e:
+ raise RuntimeError(e)
+
+ vpc.wait_until_available()
+ self._tag_resource(vpc)
+
+ return vpc
+
+ def _tag_resource(self, resource):
+ """Tag a resource with the specified tag.
+
+ This makes finding and deleting resources specific to this testing
+ much easier to find.
+
+ @param resource: resource to tag
+ """
+ tag = {
+ 'Key': 'Name',
+ 'Value': self.tag
+ }
+ resource.create_tags(Tags=[tag])
+
+ def _upload_public_key(self, config):
+ """Generate random name and upload SSH key with that name.
+
+ @param config: platform config
+ @return: string of ssh key name
+ """
+ key_file = os.path.join(config['data_dir'], config['public_key'])
+ with open(key_file, 'r') as file:
+ public_key = file.read().strip('\n')
+
+ LOG.debug('uploading SSH key %s', self.tag)
+ self.ec2_client.import_key_pair(KeyName=self.tag,
+ PublicKeyMaterial=public_key)
+
+ return self.tag
+
+# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/ec2/snapshot.py b/tests/cloud_tests/platforms/ec2/snapshot.py
new file mode 100644
index 00000000..2c48cb54
--- /dev/null
+++ b/tests/cloud_tests/platforms/ec2/snapshot.py
@@ -0,0 +1,66 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Base EC2 snapshot."""
+
+from ..snapshots import Snapshot
+from tests.cloud_tests import LOG
+
+
+class EC2Snapshot(Snapshot):
+ """EC2 image copy backed snapshot."""
+
+ platform_name = 'ec2'
+
+ def __init__(self, platform, properties, config, features, image_ami,
+ delete_on_destroy=True):
+ """Set up snapshot.
+
+ @param platform: platform object
+ @param properties: image properties
+ @param config: image config
+ @param features: supported feature flags
+ @param image_ami: string of image ami ID
+ @param delete_on_destroy: boolean to delete on destroy
+ """
+ super(EC2Snapshot, self).__init__(
+ platform, properties, config, features)
+
+ self.image_ami = image_ami
+ self.delete_on_destroy = delete_on_destroy
+
+ def destroy(self):
+ """Deregister the backing AMI."""
+ if self.delete_on_destroy:
+ image = self.platform.ec2_resource.Image(self.image_ami)
+ snapshot_id = image.block_device_mappings[0]['Ebs']['SnapshotId']
+
+ LOG.debug('removing custom ami %s', self.image_ami)
+ self.platform.ec2_client.deregister_image(ImageId=self.image_ami)
+
+ LOG.debug('removing custom snapshot %s', snapshot_id)
+ self.platform.ec2_client.delete_snapshot(SnapshotId=snapshot_id)
+
+ def launch(self, user_data, meta_data=None, block=True, start=True,
+ use_desc=None):
+ """Launch instance.
+
+ @param user_data: user-data for the instance
+ @param meta_data: meta_data for the instance
+ @param block: wait until instance is created
+ @param start: start instance and wait until fully started
+ @param use_desc: string of test name
+ @return_value: an Instance
+ """
+ if meta_data is not None:
+ raise ValueError("metadata not supported on Ec2")
+
+ instance = self.platform.create_instance(
+ self.properties, self.config, self.features,
+ self.image_ami, user_data)
+
+ if start:
+ instance.start()
+
+ return instance
+
+# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/instances.py b/tests/cloud_tests/platforms/instances.py
index 8c59d62c..3bad021f 100644
--- a/tests/cloud_tests/platforms/instances.py
+++ b/tests/cloud_tests/platforms/instances.py
@@ -1,14 +1,21 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Base instance."""
+import time
+
+import paramiko
+from paramiko.ssh_exception import (
+ BadHostKeyException, AuthenticationException, SSHException)
from ..util import TargetBase
+from tests.cloud_tests import LOG, util
class Instance(TargetBase):
"""Base instance object."""
platform_name = None
+ _ssh_client = None
def __init__(self, platform, name, properties, config, features):
"""Set up instance.
@@ -26,6 +33,11 @@ class Instance(TargetBase):
self.features = features
self._tmp_count = 0
+ self.ssh_ip = None
+ self.ssh_port = None
+ self.ssh_key_file = None
+ self.ssh_username = 'ubuntu'
+
def console_log(self):
"""Instance console.
@@ -47,7 +59,63 @@ class Instance(TargetBase):
def destroy(self):
"""Clean up instance."""
- pass
+ self._ssh_close()
+
+ def _ssh(self, command, stdin=None):
+ """Run a command via SSH."""
+ client = self._ssh_connect()
+
+ cmd = util.shell_pack(command)
+ fp_in, fp_out, fp_err = client.exec_command(cmd)
+ channel = fp_in.channel
+
+ if stdin is not None:
+ fp_in.write(stdin)
+ fp_in.close()
+
+ channel.shutdown_write()
+ rc = channel.recv_exit_status()
+
+ return (fp_out.read(), fp_err.read(), rc)
+
+ def _ssh_close(self):
+ if self._ssh_client:
+ try:
+ self._ssh_client.close()
+ except SSHException:
+ LOG.warning('Failed to close SSH connection.')
+ self._ssh_client = None
+
+ def _ssh_connect(self):
+ """Connect via SSH."""
+ if self._ssh_client:
+ return self._ssh_client
+
+ if not self.ssh_ip or not self.ssh_port:
+ raise ValueError
+
+ client = paramiko.SSHClient()
+ client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ private_key = paramiko.RSAKey.from_private_key_file(self.ssh_key_file)
+
+ retries = 30
+ while retries:
+ try:
+ client.connect(username=self.ssh_username,
+ hostname=self.ssh_ip, port=self.ssh_port,
+ pkey=private_key, banner_timeout=30)
+ self._ssh_client = client
+ return client
+ except (ConnectionRefusedError, AuthenticationException,
+ BadHostKeyException, ConnectionResetError, SSHException,
+ OSError) as e:
+ retries -= 1
+ time.sleep(10)
+
+ ssh_cmd = 'Failed ssh connection to %s@%s:%s after 300 seconds' % (
+ self.ssh_username, self.ssh_ip, self.ssh_port
+ )
+ raise util.InTargetExecuteError(b'', b'', 1, ssh_cmd, 'ssh')
def _wait_for_system(self, wait_for_cloud_init):
"""Wait until system has fully booted and cloud-init has finished.
diff --git a/tests/cloud_tests/platforms/nocloudkvm/instance.py b/tests/cloud_tests/platforms/nocloudkvm/instance.py
index 9bb24256..932dc0fa 100644
--- a/tests/cloud_tests/platforms/nocloudkvm/instance.py
+++ b/tests/cloud_tests/platforms/nocloudkvm/instance.py
@@ -4,7 +4,6 @@
import copy
import os
-import paramiko
import socket
import subprocess
import time
@@ -13,7 +12,7 @@ import uuid
from ..instances import Instance
from cloudinit.atomic_helper import write_json
from cloudinit import util as c_util
-from tests.cloud_tests import util
+from tests.cloud_tests import LOG, util
# This domain contains reverse lookups for hostnames that are used.
# The primary reason is so sudo will return quickly when it attempts
@@ -26,7 +25,6 @@ class NoCloudKVMInstance(Instance):
"""NoCloud KVM backed instance."""
platform_name = "nocloud-kvm"
- _ssh_client = None
def __init__(self, platform, name, image_path, properties, config,
features, user_data, meta_data):
@@ -39,6 +37,10 @@ class NoCloudKVMInstance(Instance):
@param config: dictionary of configuration values
@param features: dictionary of supported feature flags
"""
+ super(NoCloudKVMInstance, self).__init__(
+ platform, name, properties, config, features
+ )
+
self.user_data = user_data
if meta_data:
meta_data = copy.deepcopy(meta_data)
@@ -66,6 +68,7 @@ class NoCloudKVMInstance(Instance):
meta_data['public-keys'] = []
meta_data['public-keys'].append(self.ssh_pubkey)
+ self.ssh_ip = '127.0.0.1'
self.ssh_port = None
self.pid = None
self.pid_file = None
@@ -73,8 +76,33 @@ class NoCloudKVMInstance(Instance):
self.disk = image_path
self.meta_data = meta_data
- super(NoCloudKVMInstance, self).__init__(
- platform, name, properties, config, features)
+ def shutdown(self, wait=True):
+ """Shutdown instance."""
+
+ if self.pid:
+ # This relies on _execute which uses sudo over ssh. The ssh
+ # connection would get killed before sudo exited, so ignore errors.
+ cmd = ['shutdown', 'now']
+ try:
+ self._execute(cmd)
+ except util.InTargetExecuteError:
+ pass
+ self._ssh_close()
+
+ if wait:
+ LOG.debug("Executed shutdown. waiting on pid %s to end",
+ self.pid)
+ time_for_shutdown = 120
+ give_up_at = time.time() + time_for_shutdown
+ pid_file_path = '/proc/%s' % self.pid
+ msg = ("pid %s did not exit in %s seconds after shutdown." %
+ (self.pid, time_for_shutdown))
+ while True:
+ if not os.path.exists(pid_file_path):
+ break
+ if time.time() > give_up_at:
+ raise util.PlatformError("shutdown", msg)
+ self.pid = None
def destroy(self):
"""Clean up instance."""
@@ -88,9 +116,7 @@ class NoCloudKVMInstance(Instance):
os.remove(self.pid_file)
self.pid = None
- if self._ssh_client:
- self._ssh_client.close()
- self._ssh_client = None
+ self._ssh_close()
super(NoCloudKVMInstance, self).destroy()
@@ -99,7 +125,7 @@ class NoCloudKVMInstance(Instance):
if env:
env_args = ['env'] + ["%s=%s" for k, v in env.items()]
- return self.ssh(['sudo'] + env_args + list(command), stdin=stdin)
+ return self._ssh(['sudo'] + env_args + list(command), stdin=stdin)
def generate_seed(self, tmpdir):
"""Generate nocloud seed from user-data"""
@@ -125,50 +151,6 @@ class NoCloudKVMInstance(Instance):
s.close()
return num
- def ssh(self, command, stdin=None):
- """Run a command via SSH."""
- client = self._ssh_connect()
-
- cmd = util.shell_pack(command)
- try:
- fp_in, fp_out, fp_err = client.exec_command(cmd)
- channel = fp_in.channel
- if stdin is not None:
- fp_in.write(stdin)
- fp_in.close()
-
- channel.shutdown_write()
- rc = channel.recv_exit_status()
- return (fp_out.read(), fp_err.read(), rc)
- except paramiko.SSHException as e:
- raise util.InTargetExecuteError(
- b'', b'', -1, command, self.name, reason=e)
-
- def _ssh_connect(self, hostname='localhost', username='ubuntu',
- banner_timeout=120, retry_attempts=30):
- """Connect via SSH."""
- if self._ssh_client:
- return self._ssh_client
-
- private_key = paramiko.RSAKey.from_private_key_file(self.ssh_key_file)
- client = paramiko.SSHClient()
- client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
- while retry_attempts:
- try:
- client.connect(hostname=hostname, username=username,
- port=self.ssh_port, pkey=private_key,
- banner_timeout=banner_timeout)
- self._ssh_client = client
- return client
- except (paramiko.SSHException, TypeError):
- time.sleep(1)
- retry_attempts = retry_attempts - 1
-
- error_desc = 'Failed command to: %s@%s:%s' % (username, hostname,
- self.ssh_port)
- raise util.InTargetExecuteError('', '', -1, 'ssh connect',
- self.name, error_desc)
-
def start(self, wait=True, wait_for_cloud_init=False):
"""Start instance."""
tmpdir = self.platform.config['data_dir']
diff --git a/tests/cloud_tests/platforms/nocloudkvm/platform.py b/tests/cloud_tests/platforms/nocloudkvm/platform.py
index 85933463..a7e6f5de 100644
--- a/tests/cloud_tests/platforms/nocloudkvm/platform.py
+++ b/tests/cloud_tests/platforms/nocloudkvm/platform.py
@@ -21,6 +21,10 @@ class NoCloudKVMPlatform(Platform):
platform_name = 'nocloud-kvm'
+ def __init__(self, config):
+ """Set up platform."""
+ super(NoCloudKVMPlatform, self).__init__(config)
+
def get_image(self, img_conf):
"""Get image using specified image configuration.
diff --git a/tests/cloud_tests/platforms/platforms.py b/tests/cloud_tests/platforms/platforms.py
index 28975368..d4e5c561 100644
--- a/tests/cloud_tests/platforms/platforms.py
+++ b/tests/cloud_tests/platforms/platforms.py
@@ -1,6 +1,12 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Base platform class."""
+import os
+
+from simplestreams import filters, mirrors
+from simplestreams import util as s_util
+
+from cloudinit import util as c_util
class Platform(object):
@@ -11,6 +17,7 @@ class Platform(object):
def __init__(self, config):
"""Set up platform."""
self.config = config
+ self._generate_ssh_keys(config['data_dir'])
def get_image(self, img_conf):
"""Get image using specified image configuration.
@@ -24,4 +31,66 @@ class Platform(object):
"""Clean up platform data."""
pass
+ def _generate_ssh_keys(self, data_dir):
+ """Generate SSH keys to be used with image."""
+ filename = os.path.join(data_dir, 'id_rsa')
+
+ if os.path.exists(filename):
+ c_util.del_file(filename)
+
+ c_util.subp(['ssh-keygen', '-t', 'rsa', '-b', '4096',
+ '-f', filename, '-P', '',
+ '-C', 'ubuntu@cloud_test'],
+ capture=True)
+
+ @staticmethod
+ def _query_streams(img_conf, img_filter):
+ """Query streams for latest image given a specific filter.
+
+ @param img_conf: configuration for image
+ @param filters: array of filters as strings format 'key=value'
+ @return: dictionary with latest image information or empty
+ """
+ def policy(content, path):
+ return s_util.read_signed(content, keyring=img_conf['keyring'])
+
+ (url, path) = s_util.path_from_mirror_url(img_conf['mirror_url'], None)
+ smirror = mirrors.UrlMirrorReader(url, policy=policy)
+
+ config = {'max_items': 1, 'filters': filters.get_filters(img_filter)}
+ tmirror = FilterMirror(config)
+ tmirror.sync(smirror, path)
+
+ try:
+ return tmirror.json_entries[0]
+ except IndexError:
+ raise RuntimeError('no images found with filter: %s' % img_filter)
+
+
+class FilterMirror(mirrors.BasicMirrorWriter):
+ """Taken from sstream-query to return query result as json array."""
+
+ def __init__(self, config=None):
+ super(FilterMirror, self).__init__(config=config)
+ if config is None:
+ config = {}
+ self.config = config
+ self.filters = config.get('filters', [])
+ self.json_entries = []
+
+ def load_products(self, path=None, content_id=None):
+ return {'content_id': content_id, 'products': {}}
+
+ def filter_item(self, data, src, target, pedigree):
+ return filters.filter_item(self.filters, data, src, pedigree)
+
+ def insert_item(self, data, src, target, pedigree, contentsource):
+ # src and target are top level products:1.0
+ # data is src['products'][ped[0]]['versions'][ped[1]]['items'][ped[2]]
+ # contentsource is a ContentSource if 'path' exists in data or None
+ data = s_util.products_exdata(src, pedigree)
+ if 'path' in data:
+ data.update({'item_url': contentsource.url})
+ self.json_entries.append(data)
+
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml
index e5933802..48f903b8 100644
--- a/tests/cloud_tests/releases.yaml
+++ b/tests/cloud_tests/releases.yaml
@@ -27,10 +27,14 @@ default_release_config:
# features groups and additional feature settings
feature_groups: []
features: {}
- nocloud-kvm:
mirror_url: https://cloud-images.ubuntu.com/daily
- mirror_dir: '/srv/citest/nocloud-kvm'
+ mirror_dir: '/srv/citest/images'
keyring: /usr/share/keyrings/ubuntu-cloudimage-keyring.gpg
+ ec2:
+ # Choose from: [ebs, instance-store]
+ root-store: ebs
+ boot_timeout: 300
+ nocloud-kvm:
setup_overrides: null
override_templates: false
# lxd specific default configuration options
diff --git a/tests/cloud_tests/setup_image.py b/tests/cloud_tests/setup_image.py
index 179f40db..6d242115 100644
--- a/tests/cloud_tests/setup_image.py
+++ b/tests/cloud_tests/setup_image.py
@@ -5,7 +5,6 @@
from functools import partial
import os
-from cloudinit import util as c_util
from tests.cloud_tests import LOG
from tests.cloud_tests import stage, util
@@ -192,20 +191,6 @@ def enable_repo(args, image):
image.execute(cmd, description=msg)
-def generate_ssh_keys(data_dir):
- """Generate SSH keys to be used with image."""
- LOG.info('generating SSH keys')
- filename = os.path.join(data_dir, 'id_rsa')
-
- if os.path.exists(filename):
- c_util.del_file(filename)
-
- c_util.subp(['ssh-keygen', '-t', 'rsa', '-b', '4096',
- '-f', filename, '-P', '',
- '-C', 'ubuntu@cloud_test'],
- capture=True)
-
-
def setup_image(args, image):
"""Set up image as specified in args.
@@ -239,9 +224,6 @@ def setup_image(args, image):
LOG.info('setting up %s', image)
res = stage.run_stage(
'set up for {}'.format(image), calls, continue_after_error=False)
- LOG.debug('after setup complete, installed cloud-init version is: %s',
- installed_package_version(image, 'cloud-init'))
- generate_ssh_keys(args.data_dir)
return res
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/util.py b/tests/cloud_tests/util.py
index 2aedcd0d..6ff285e7 100644
--- a/tests/cloud_tests/util.py
+++ b/tests/cloud_tests/util.py
@@ -321,9 +321,9 @@ class TargetBase(object):
rcs = (0,)
if description:
- LOG.debug('Executing "%s"', description)
+ LOG.debug('executing "%s"', description)
else:
- LOG.debug("Executing command: %s", shell_quote(command))
+ LOG.debug("executing command: %s", shell_quote(command))
out, err, rc = self._execute(command=command, stdin=stdin, env=env)
@@ -447,6 +447,19 @@ class InTargetExecuteError(c_util.ProcessExecutionError):
reason=reason)
+class PlatformError(IOError):
+ """Error type for platform errors."""
+
+ default_desc = 'unexpected error in platform.'
+
+ def __init__(self, operation, description=None):
+ """Init error and parent error class."""
+ description = description if description else self.default_desc
+
+ message = '%s: %s' % (operation, description)
+ IOError.__init__(self, message)
+
+
class TempDir(object):
"""Configurable temporary directory like tempfile.TemporaryDirectory."""
diff --git a/tox.ini b/tox.ini
index fdc8a665..88b82dc3 100644
--- a/tox.ini
+++ b/tox.ini
@@ -134,4 +134,5 @@ passenv = HOME
deps =
pylxd==2.2.4
paramiko==2.3.1
+ boto3==1.4.8
bzr+lp:simplestreams