From 2796dab3cdd96306be80d6b78b8eec3a23f4d9f4 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 26 Apr 2017 09:19:08 -0600 Subject: tests: Enable artful --- tests/cloud_tests/releases.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml index 3ffa68f0..183f78c1 100644 --- a/tests/cloud_tests/releases.yaml +++ b/tests/cloud_tests/releases.yaml @@ -49,6 +49,13 @@ releases: #alias: ubuntu/zesty/default alias: z sstreams_server: https://cloud-images.ubuntu.com/daily + artful: + enabled: true + platform_ident: + lxd: + #alias: ubuntu/artful/default + alias: a + sstreams_server: https://cloud-images.ubuntu.com/daily jessie: platform_ident: lxd: -- cgit v1.2.3 From 653fda98a56c63d11d65ef08c4991d0425fc3f91 Mon Sep 17 00:00:00 2001 From: Felix Dreissig Date: Sat, 29 Apr 2017 16:28:57 +0200 Subject: Fix name of "uri" key in docs for "cc_apt_configure" module The key is called "uri", not "url". This is what's used throughout the examples and also what works in practice (verified on Ubuntu 16.10). This also slightly improves formatting of the key names in the related documentation. --- cloudinit/config/cc_apt_configure.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 7e751776..f6885d7c 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -65,12 +65,12 @@ take a list of configs, allowing mirrors to be specified on a per-architecture basis. Each config is a dictionary which must have an entry for ``arches``, specifying which architectures that config entry is for. The keyword ``default`` applies to any architecture not explicitly listed. The mirror url -can be specified with the ``url`` key, or a list of mirrors to check can be +can be specified with the ``uri`` key, or a list of mirrors to check can be provided in order, with the first mirror that can be resolved being selected. This allows the same configuration to be used in different environment, with -different hosts used for a local apt mirror. If no mirror is provided by uri or -search, ``search_dns`` may be used to search for dns names in the format -``-mirror`` in each of the following: +different hosts used for a local apt mirror. If no mirror is provided by +``uri`` or ``search``, ``search_dns`` may be used to search for dns names in +the format ``-mirror`` in each of the following: - fqdn of this host per cloud metadata - localdomain -- cgit v1.2.3 From 99faf3ece1badc566e7e75e769ff374250196197 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Wed, 29 Mar 2017 22:03:04 -0500 Subject: doc: document network configuration defaults policy and formats. Add documentation for cloud-init networking configuration formats, default behavior, policy and other specific details about how network config is consumed and utilized. --- doc/rtd/index.rst | 1 + doc/rtd/topics/datasources/altcloud.rst | 2 + doc/rtd/topics/datasources/azure.rst | 2 + doc/rtd/topics/datasources/cloudsigma.rst | 2 + doc/rtd/topics/datasources/cloudstack.rst | 2 + doc/rtd/topics/datasources/configdrive.rst | 2 + doc/rtd/topics/datasources/digitalocean.rst | 2 + doc/rtd/topics/datasources/ec2.rst | 2 + doc/rtd/topics/datasources/fallback.rst | 2 + doc/rtd/topics/datasources/maas.rst | 2 + doc/rtd/topics/datasources/nocloud.rst | 39 ++ doc/rtd/topics/datasources/opennebula.rst | 2 + doc/rtd/topics/datasources/openstack.rst | 2 + doc/rtd/topics/datasources/ovf.rst | 2 + doc/rtd/topics/datasources/smartos.rst | 2 + doc/rtd/topics/network-config-format-eni.rst | 20 + doc/rtd/topics/network-config-format-v1.rst | 563 +++++++++++++++++++++++++++ doc/rtd/topics/network-config-format-v2.rst | 503 ++++++++++++++++++++++++ doc/rtd/topics/network-config.rst | 254 ++++++++++++ 19 files changed, 1406 insertions(+) create mode 100644 doc/rtd/topics/network-config-format-eni.rst create mode 100644 doc/rtd/topics/network-config-format-v1.rst create mode 100644 doc/rtd/topics/network-config-format-v2.rst create mode 100644 doc/rtd/topics/network-config.rst diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst index 8f163b6a..a691103e 100644 --- a/doc/rtd/index.rst +++ b/doc/rtd/index.rst @@ -38,6 +38,7 @@ initialization of a cloud instance. topics/logging.rst topics/modules.rst topics/merging.rst + topics/network-config.rst topics/vendordata.rst topics/moreinfo.rst topics/hacking.rst diff --git a/doc/rtd/topics/datasources/altcloud.rst b/doc/rtd/topics/datasources/altcloud.rst index 202b0a4a..eeb197f2 100644 --- a/doc/rtd/topics/datasources/altcloud.rst +++ b/doc/rtd/topics/datasources/altcloud.rst @@ -1,3 +1,5 @@ +.. _datasource_alt_cloud: + Alt Cloud ========= diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst index 18d7c506..4a3735b5 100644 --- a/doc/rtd/topics/datasources/azure.rst +++ b/doc/rtd/topics/datasources/azure.rst @@ -1,3 +1,5 @@ +.. _datasource_azure: + Azure ===== diff --git a/doc/rtd/topics/datasources/cloudsigma.rst b/doc/rtd/topics/datasources/cloudsigma.rst index 54963f61..86b834c8 100644 --- a/doc/rtd/topics/datasources/cloudsigma.rst +++ b/doc/rtd/topics/datasources/cloudsigma.rst @@ -1,3 +1,5 @@ +.. _datasource_cloudsigma: + CloudSigma ========== diff --git a/doc/rtd/topics/datasources/cloudstack.rst b/doc/rtd/topics/datasources/cloudstack.rst index 04603d9c..225093a1 100644 --- a/doc/rtd/topics/datasources/cloudstack.rst +++ b/doc/rtd/topics/datasources/cloudstack.rst @@ -1,3 +1,5 @@ +.. _datasource_cloudstack: + CloudStack ========== diff --git a/doc/rtd/topics/datasources/configdrive.rst b/doc/rtd/topics/datasources/configdrive.rst index 11dd52ab..f1a488a2 100644 --- a/doc/rtd/topics/datasources/configdrive.rst +++ b/doc/rtd/topics/datasources/configdrive.rst @@ -1,3 +1,5 @@ +.. _datasource_config_drive: + Config Drive ============ diff --git a/doc/rtd/topics/datasources/digitalocean.rst b/doc/rtd/topics/datasources/digitalocean.rst index c6f5bc74..938ede89 100644 --- a/doc/rtd/topics/datasources/digitalocean.rst +++ b/doc/rtd/topics/datasources/digitalocean.rst @@ -1,3 +1,5 @@ +.. _datasource_digital_ocean: + Digital Ocean ============= diff --git a/doc/rtd/topics/datasources/ec2.rst b/doc/rtd/topics/datasources/ec2.rst index 4810c984..3bc66e17 100644 --- a/doc/rtd/topics/datasources/ec2.rst +++ b/doc/rtd/topics/datasources/ec2.rst @@ -1,3 +1,5 @@ +.. _datasource_ec2: + Amazon EC2 ========== diff --git a/doc/rtd/topics/datasources/fallback.rst b/doc/rtd/topics/datasources/fallback.rst index 1eb01dd0..2b133fcd 100644 --- a/doc/rtd/topics/datasources/fallback.rst +++ b/doc/rtd/topics/datasources/fallback.rst @@ -1,3 +1,5 @@ +.. _datasource_fallback: + Fallback/None ============= diff --git a/doc/rtd/topics/datasources/maas.rst b/doc/rtd/topics/datasources/maas.rst index f495dd4b..85c853e9 100644 --- a/doc/rtd/topics/datasources/maas.rst +++ b/doc/rtd/topics/datasources/maas.rst @@ -1,3 +1,5 @@ +.. _datasource_maas: + MAAS ==== diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst index b9ab5f11..0159e853 100644 --- a/doc/rtd/topics/datasources/nocloud.rst +++ b/doc/rtd/topics/datasources/nocloud.rst @@ -1,3 +1,5 @@ +.. _datasource_nocloud: + NoCloud ======= @@ -70,6 +72,43 @@ Example metadata: gateway 192.168.1.254 hostname: myhost + +Network configuration can also be provided to cloud-init in either +:ref:`network_config_v1` or :ref:`network_config_v2` by providing that +yaml formatted data in a file named ``network-config``. If found, +this file will override a ``network-interfaces`` file. + +See an example below. Note specifically that this file does not +have a top level ``network`` key as it it is already assumed to +be network configuration based on the filename. + +.. code:: yaml + + version: 1 + config: + - type: physical + name: interface0 + mac_address: "52:54:00:12:34:00" + subnets: + - type: static + address: 192.168.1.10 + netmask: 255.255.255.0 + gateway: 192.168.1.254 + + +.. code:: yaml + + version: 2 + ethernets: + interface0: + match: + mac_address: "52:54:00:12:34:00" + set-name: interface0 + addresses: + - 192.168.1.10/255.255.255.0 + gateway4: 192.168.1.254 + + .. _iso9660: https://en.wikipedia.org/wiki/ISO_9660 .. _vfat: https://en.wikipedia.org/wiki/File_Allocation_Table .. vi: textwidth=78 diff --git a/doc/rtd/topics/datasources/opennebula.rst b/doc/rtd/topics/datasources/opennebula.rst index 1b90a9c7..7c0367c4 100644 --- a/doc/rtd/topics/datasources/opennebula.rst +++ b/doc/rtd/topics/datasources/opennebula.rst @@ -1,3 +1,5 @@ +.. _datasource_opennebula: + OpenNebula ========== diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst index 164b0e0c..607b70f3 100644 --- a/doc/rtd/topics/datasources/openstack.rst +++ b/doc/rtd/topics/datasources/openstack.rst @@ -1,3 +1,5 @@ +.. _datasource_openstack: + OpenStack ========= diff --git a/doc/rtd/topics/datasources/ovf.rst b/doc/rtd/topics/datasources/ovf.rst index a0770332..c312617f 100644 --- a/doc/rtd/topics/datasources/ovf.rst +++ b/doc/rtd/topics/datasources/ovf.rst @@ -1,3 +1,5 @@ +.. _datasource_ovf: + OVF === diff --git a/doc/rtd/topics/datasources/smartos.rst b/doc/rtd/topics/datasources/smartos.rst index a1e1542b..cb9a128e 100644 --- a/doc/rtd/topics/datasources/smartos.rst +++ b/doc/rtd/topics/datasources/smartos.rst @@ -1,3 +1,5 @@ +.. _datasource_smartos: + SmartOS Datasource ================== diff --git a/doc/rtd/topics/network-config-format-eni.rst b/doc/rtd/topics/network-config-format-eni.rst new file mode 100644 index 00000000..b0904952 --- /dev/null +++ b/doc/rtd/topics/network-config-format-eni.rst @@ -0,0 +1,20 @@ +.. _network_config_eni: + +Network Configuration ENI (Legacy) +---------------------------------- + +`Cloud-init`_ supports reading and writing network config in the ``ENI`` +format which is consumed by the ``ifupdown`` tool to parse and apply network +configuration. + +As an input format this is **legacy**. In cases where ENI format is available +and another format is also available, it will prefer to use the other format. +This can happen in either :ref:`datasource_nocloud` or +:ref:`datasource_openstack` datasources. + +Please reference existing `documentation`_ for the +``/etc/network/interfaces(5)`` format. + +.. _Cloud-init: https://launchpad.net/cloud-init +.. _documentation: http://manpages.ubuntu.com/manpages/trusty/en/man5/interfaces.5.html +.. vi: textwidth=78 diff --git a/doc/rtd/topics/network-config-format-v1.rst b/doc/rtd/topics/network-config-format-v1.rst new file mode 100644 index 00000000..36326b59 --- /dev/null +++ b/doc/rtd/topics/network-config-format-v1.rst @@ -0,0 +1,563 @@ +.. _network_config_v1: + +Networking Config Version 1 +=========================== + +This network configuration format lets users customize their instance's +networking interfaces by assigning subnet configuration, virtual device +creation (bonds, bridges, vlans) routes and DNS configuration. + +Required elements of a Network Config Version 1 are ``config`` and +``version``. + +Cloud-init will read this format from system config. +For example the following could be present in +``/etc/cloud/cloud.cfg.d/custom-networking.cfg``: + +.. code-block:: yaml + + network: + version: 1 + config: + - type: physical + name: eth0 + subnets: + - type: dhcp + +The :ref:`datasource_nocloud` datasource can also provide cloud-init +networking configuration in this Format. + +Configuration Types +------------------- +Within the network ``config`` portion, users include a list of configuration +types. The current list of support ``type`` values are as follows: + +- Physical (``physical``) +- Bond (``bond``) +- Bridge (``bridge``) +- VLAN (``vlan``) +- Nameserver (``nameserver``) +- Route (``route``) + +Physical, Bond, Bridge and VLAN types may also include IP configuration under +the key ``subnets``. + +- Subnet/IP (``subnets``) + + +Physical +~~~~~~~~ +The ``physical`` type configuration represents a "physical" network device, +typically Ethernet-based. At least one of of these entries is required for +external network connectivity. Type ``physical`` requires only one key: +``name``. A ``physical`` device may contain some or all of the following +keys: + +**name**: ** + +A devices name must be less than 15 characters. Names exceeding the maximum +will be truncated. This is a limitation of the Linux kernel network-device +structure. + +**mac_address**: ** + +The MAC Address is a device unique identifier that most Ethernet-based network +devices possess. Specifying a MAC Address is optional. + + +.. note:: + + Cloud-init will handle the persistent mapping between a + device's ``name`` and the ``mac_address``. + +**mtu**: ** + +The MTU key represents a device's Maximum Transmission Unit, the largest size +packet or frame, specified in octets (eight-bit bytes), that can be sent in a +packet- or frame-based network. Specifying ``mtu`` is optional. + +.. note:: + + The possible supported values of a device's MTU is not available at + configuration time. It's possible to specify a value too large or to + small for a device and may be ignored by the device. + + +**Physical Example**:: + + network: + version: 1 + config: + # Simple network adapter + - type: physical + name: interface0 + mac_address: 00:11:22:33:44:55 + # Second nic with Jumbo frames + - type: physical + name: jumbo0 + mac_address: aa:11:22:33:44:55 + mtu: 9000 + # 10G pair + - type: physical + name: gbe0 + mac_address: cd:11:22:33:44:00 + - type: physical + name: gbe1 + mac_address: cd:11:22:33:44:02 + +Bond +~~~~ +A ``bond`` type will configure a Linux software Bond with one or more network +devices. A ``bond`` type requires the following keys: + +**name**: ** + +A devices name must be less than 15 characters. Names exceeding the maximum +will be truncated. This is a limitation of the Linux kernel network-device +structure. + +**mac_address**: ** + +When specifying MAC Address on a bond this value will be assigned to the bond +device and may be different than the MAC address of any of the underlying +bond interfaces. Specifying a MAC Address is optional. If ``mac_address`` is +not present, then the bond will use one of the MAC Address values from one of +the bond interfaces. + + +**bond_interfaces**: ** + +The ``bond_interfaces`` key accepts a list of network device ``name`` values +from the configuration. This list may be empty. + +**params**: ** + +The ``params`` key in a bond holds a dictionary of bonding parameters. +This dictionary may be empty. For more details on what the various bonding +parameters mean please read the Linux Kernel Bonding.txt. + +Valid ``params`` keys are: + + - ``active_slave``: Set bond attribute + - ``ad_actor_key``: Set bond attribute + - ``ad_actor_sys_prio``: Set bond attribute + - ``ad_actor_system``: Set bond attribute + - ``ad_aggregator``: Set bond attribute + - ``ad_num_ports``: Set bond attribute + - ``ad_partner_key``: Set bond attribute + - ``ad_partner_mac``: Set bond attribute + - ``ad_select``: Set bond attribute + - ``ad_user_port_key``: Set bond attribute + - ``all_slaves_active``: Set bond attribute + - ``arp_all_targets``: Set bond attribute + - ``arp_interval``: Set bond attribute + - ``arp_ip_target``: Set bond attribute + - ``arp_validate``: Set bond attribute + - ``downdelay``: Set bond attribute + - ``fail_over_mac``: Set bond attribute + - ``lacp_rate``: Set bond attribute + - ``lp_interval``: Set bond attribute + - ``miimon``: Set bond attribute + - ``mii_status``: Set bond attribute + - ``min_links``: Set bond attribute + - ``mode``: Set bond attribute + - ``num_grat_arp``: Set bond attribute + - ``num_unsol_na``: Set bond attribute + - ``packets_per_slave``: Set bond attribute + - ``primary``: Set bond attribute + - ``primary_reselect``: Set bond attribute + - ``queue_id``: Set bond attribute + - ``resend_igmp``: Set bond attribute + - ``slaves``: Set bond attribute + - ``tlb_dynamic_lb``: Set bond attribute + - ``updelay``: Set bond attribute + - ``use_carrier``: Set bond attribute + - ``xmit_hash_policy``: Set bond attribute + +**Bond Example**:: + + network: + version: 1 + config: + # Simple network adapter + - type: physical + name: interface0 + mac_address: 00:11:22:33:44:55 + # 10G pair + - type: physical + name: gbe0 + mac_address: cd:11:22:33:44:00 + - type: physical + name: gbe1 + mac_address: cd:11:22:33:44:02 + - type: bond + name: bond0 + bond_interfaces: + - gbe0 + - gbe1 + params: + bond-mode: active-backup + +Bridge +~~~~~~ +Type ``bridge`` requires the following keys: + +- ``name``: Set the name of the bridge. +- ``bridge_interfaces``: Specify the ports of a bridge via their ``name``. + This list may be empty. +- ``params``: A list of bridge params. For more details, please read the + bridge-utils-interfaces manpage. + +Valid keys are: + + - ``bridge_ageing``: Set the bridge's ageing value. + - ``bridge_bridgeprio``: Set the bridge device network priority. + - ``bridge_fd``: Set the bridge's forward delay. + - ``bridge_hello``: Set the bridge's hello value. + - ``bridge_hw``: Set the bridge's MAC address. + - ``bridge_maxage``: Set the bridge's maxage value. + - ``bridge_maxwait``: Set how long network scripts should wait for the + bridge to be up. + - ``bridge_pathcost``: Set the cost of a specific port on the bridge. + - ``bridge_portprio``: Set the priority of a specific port on the bridge. + - ``bridge_ports``: List of devices that are part of the bridge. + - ``bridge_stp``: Set spanning tree protocol on or off. + - ``bridge_waitport``: Set amount of time in seconds to wait on specific + ports to become available. + + +**Bridge Example**:: + + network: + version: 1 + config: + # Simple network adapter + - type: physical + name: interface0 + mac_address: 00:11:22:33:44:55 + # Second nic with Jumbo frames + - type: physical + name: jumbo0 + mac_address: aa:11:22:33:44:55 + mtu: 9000 + - type: bridge + name: br0 + bridge_interfaces: + - jumbo0 + params: + bridge_ageing: 250 + bridge_bridgeprio: 22 + bridge_fd: 1 + bridge_hello: 1 + bridge_maxage: 10 + bridge_maxwait: 0 + bridge_pathcost: + - jumbo0 75 + bridge_pathprio: + - jumbo0 28 + bridge_stp: 'off' + bridge_maxwait: + - jumbo0 0 + + +VLAN +~~~~ +Type ``vlan`` requires the following keys: + +- ``name``: Set the name of the VLAN +- ``vlan_link``: Specify the underlying link via its ``name``. +- ``vlan_id``: Specify the VLAN numeric id. + +**VLAN Example**:: + + network: + version: 1 + config: + # Physical interfaces. + - type: physical + name: eth0 + mac_address: "c0:d6:9f:2c:e8:80" + # VLAN interface. + - type: vlan + name: eth0.101 + vlan_link: eth0 + vlan_id: 101 + mtu: 1500 + +Nameserver +~~~~~~~~~~ + +Users can specify a ``nameserver`` type. Nameserver dictionaries include +the following keys: + +- ``address``: List of IPv4 or IPv6 address of nameservers. +- ``search``: List of of hostnames to include in the resolv.conf search path. + +**Nameserver Example**:: + + network: + version: 1 + config: + - type: physical + name: interface0 + mac_address: 00:11:22:33:44:55 + subnets: + - type: static + address: 192.168.23.14/27 + gateway: 192.168.23.1 + - type: nameserver: + address: + - 192.168.23.2 + - 8.8.8.8 + search: + - exemplary + + + +Route +~~~~~ + +Users can include static routing information as well. A ``route`` dictionary +has the following keys: + +- ``destination``: IPv4 network address with CIDR netmask notation. +- ``gateway``: IPv4 gateway address with CIDR netmask notation. +- ``metric``: Integer which sets the network metric value for this route. + +**Route Example**:: + + network: + version: 1 + config: + - type: physical + name: interface0 + mac_address: 00:11:22:33:44:55 + subnets: + - type: static + address: 192.168.23.14/24 + gateway: 192.168.23.1 + - type: route + destination: 192.168.24.0/24 + gateway: 192.168.24.1 + metric: 3 + +Subnet/IP +~~~~~~~~~ + +For any network device (one of the Config Types) users can define a list of +``subnets`` which contain ip configuration dictionaries. Multiple subnet +entries will create interface alias allowing a single interface to use +different ip configurations. + +Valid keys for for ``subnets`` include the following: + +- ``type``: Specify the subnet type. +- ``control``: Specify manual, auto or hotplug. Indicates how the interface + will be handled during boot. +- ``address``: IPv4 or IPv6 address. It may include CIDR netmask notation. +- ``netmask``: IPv4 subnet mask in dotted format or CIDR notation. +- ``gateway``: IPv4 address of the default gateway for this subnet. +- ``dns_nameserver``: Specify a list of IPv4 dns server IPs to end up in + resolv.conf. +- ``dns_search``: Specify a list of search paths to be included in + resolv.conf. +- ``routes``: Specify a list of routes for a given interface + + +Subnet types are one of the following: + +- ``dhcp4``: Configure this interface with IPv4 dhcp. +- ``dhcp``: Alias for ``dhcp4`` +- ``dhcp6``: Configure this interface with IPv6 dhcp. +- ``static``: Configure this interface with a static IPv4. +- ``static6``: Configure this interface with a static IPv6 . + +When making use of ``dhcp`` types, no additional configuration is needed in +the subnet dictionary. + + +**Subnet DHCP Example**:: + + network: + version: 1 + config: + - type: physical + name: interface0 + mac_address: 00:11:22:33:44:55 + subnets: + - type: dhcp + + +**Subnet Static Example**:: + + network: + version: 1 + config: + - type: physical + name: interface0 + mac_address: 00:11:22:33:44:55 + subnets: + - type: static + address: 192.168.23.14/27 + gateway: 192.168.23.1 + dns_nameservers: + - 192.168.23.2 + - 8.8.8.8 + dns_search: + - exemplary.maas + +The following will result in an ``interface0`` using DHCP and ``interface0:1`` +using the static subnet configuration. + +**Multiple subnet Example**:: + + network: + version: 1 + config: + - type: physical + name: interface0 + mac_address: 00:11:22:33:44:55 + subnets: + - type: dhcp + - type: static + address: 192.168.23.14/27 + gateway: 192.168.23.1 + dns_nameservers: + - 192.168.23.2 + - 8.8.8.8 + dns_search: + - exemplary + +**Subnet with routes Example**:: + + network: + version: 1 + config: + - type: physical + name: interface0 + mac_address: 00:11:22:33:44:55 + subnets: + - type: dhcp + - type: static + address: 10.184.225.122 + netmask: 255.255.255.252 + routes: + - gateway: 10.184.225.121 + netmask: 255.240.0.0 + network: 10.176.0.0 + - gateway: 10.184.225.121 + netmask: 255.240.0.0 + network: 10.208.0.0 + + +Multi-layered configurations +---------------------------- + +Complex networking sometimes uses layers of configuration. The syntax allows +users to build those layers one at a time. All of the virtual network devices +supported allow specifying an underlying device by their ``name`` value. + +**Bonded VLAN Example**:: + + network: + version: 1 + config: + # 10G pair + - type: physical + name: gbe0 + mac_address: cd:11:22:33:44:00 + - type: physical + name: gbe1 + mac_address: cd:11:22:33:44:02 + # Bond. + - type: bond + name: bond0 + bond_interfaces: + - gbe0 + - gbe1 + params: + bond-mode: 802.3ad + bond-lacp-rate: fast + # A Bond VLAN. + - type: vlan + name: bond0.200 + vlan_link: bond0 + vlan_id: 200 + subnets: + - type: dhcp4 + +More Examples +------------- +Some more examples to explore the various options available. + +**Multiple VLAN example**:: + + network: + version: 1 + config: + - id: eth0 + mac_address: d4:be:d9:a8:49:13 + mtu: 1500 + name: eth0 + subnets: + - address: 10.245.168.16/21 + dns_nameservers: + - 10.245.168.2 + gateway: 10.245.168.1 + type: static + type: physical + - id: eth1 + mac_address: d4:be:d9:a8:49:15 + mtu: 1500 + name: eth1 + subnets: + - address: 10.245.188.2/24 + dns_nameservers: [] + type: static + type: physical + - id: eth1.2667 + mtu: 1500 + name: eth1.2667 + subnets: + - address: 10.245.184.2/24 + dns_nameservers: [] + type: static + type: vlan + vlan_id: 2667 + vlan_link: eth1 + - id: eth1.2668 + mtu: 1500 + name: eth1.2668 + subnets: + - address: 10.245.185.1/24 + dns_nameservers: [] + type: static + type: vlan + vlan_id: 2668 + vlan_link: eth1 + - id: eth1.2669 + mtu: 1500 + name: eth1.2669 + subnets: + - address: 10.245.186.1/24 + dns_nameservers: [] + type: static + type: vlan + vlan_id: 2669 + vlan_link: eth1 + - id: eth1.2670 + mtu: 1500 + name: eth1.2670 + subnets: + - address: 10.245.187.2/24 + dns_nameservers: [] + type: static + type: vlan + vlan_id: 2670 + vlan_link: eth1 + - address: 10.245.168.2 + search: + - dellstack + type: nameserver + +.. vi: textwidth=78 diff --git a/doc/rtd/topics/network-config-format-v2.rst b/doc/rtd/topics/network-config-format-v2.rst new file mode 100644 index 00000000..335d236a --- /dev/null +++ b/doc/rtd/topics/network-config-format-v2.rst @@ -0,0 +1,503 @@ +.. _network_config_v2: + +Networking Config Version 2 +=========================== + +Cloud-init's support for Version 2 network config is a subset of the +version 2 format defined for the `netplan`_ tool. Cloud-init supports +both reading and writing of Version 2; the latter support requires a +distro with `netplan`_ present. + +The ``network`` key has at least two required elements. First +it must include ``version: 2`` and one or more of possible device +``types``.. + +Cloud-init will read this format from system config. +For example the following could be present in +``/etc/cloud/cloud.cfg.d/custom-networking.cfg``: + + network: + version: 2 + ethernets: [] + +It may also be provided in other locations including the +:ref:`datasource_nocloud`, see :ref:`default_behavior` for other places. + +Supported device ``types`` values are as follows: + +- Ethernets (``ethernets``) +- Bonds (``bonds``) +- Bridges (``bridges``) +- VLANs (``vlans``) + +Each type block contains device definitions as a map where the keys (called +"configuration IDs"). Each entry under the ``types`` may include IP and/or +device configuration. + +Cloud-init does not current support ``wifis`` type that is present in native +`netplan`_. + + +Device configuration IDs +------------------------ + +The key names below the per-device-type definition maps (like ``ethernets:``) +are called "ID"s. They must be unique throughout the entire set of +configuration files. Their primary purpose is to serve as anchor names for +composite devices, for example to enumerate the members of a bridge that is +currently being defined. + +There are two physically/structurally different classes of device definitions, +and the ID field has a different interpretation for each: + +Physical devices + +: (Examples: ethernet, wifi) These can dynamically come and go between + reboots and even during runtime (hotplugging). In the generic case, they + can be selected by ``match:`` rules on desired properties, such as name/name + pattern, MAC address, driver, or device paths. In general these will match + any number of devices (unless they refer to properties which are unique + such as the full path or MAC address), so without further knowledge about + the hardware these will always be considered as a group. + + It is valid to specify no match rules at all, in which case the ID field is + simply the interface name to be matched. This is mostly useful if you want + to keep simple cases simple, and it's how network device configuration has + been done for a long time. + + If there are ``match``: rules, then the ID field is a purely opaque name + which is only being used for references from definitions of compound + devices in the config. + +Virtual devices + +: (Examples: veth, bridge, bond) These are fully under the control of the + config file(s) and the network stack. I. e. these devices are being created + instead of matched. Thus ``match:`` and ``set-name:`` are not applicable for + these, and the ID field is the name of the created virtual device. + +Common properties for physical device types +------------------------------------------- + +**match**: *<(mapping)>* + +This selects a subset of available physical devices by various hardware +properties. The following configuration will then apply to all matching +devices, as soon as they appear. *All* specified properties must match. +The following properties for creating matches are supported: + +**name**: *<(scalar)>* + +Current interface name. Globs are supported, and the primary use case +for matching on names, as selecting one fixed name can be more easily +achieved with having no ``match:`` at all and just using the ID (see +above). Note that currently only networkd supports globbing, +NetworkManager does not. + +**macaddress**: *<(scalar)>* + +Device's MAC address in the form "XX:XX:XX:XX:XX:XX". Globs are not allowed. + +**driver**: *<(scalar)>* + +Kernel driver name, corresponding to the ``DRIVER`` udev property. Globs are +supported. Matching on driver is *only* supported with networkd. + +**Examples**:: + + # all cards on second PCI bus + match: + name: enp2* + + # fixed MAC address + match: + macaddress: 11:22:33:AA:BB:FF + + # first card of driver ``ixgbe`` + match: + driver: ixgbe + name: en*s0 + +**set-name**: *<(scalar)>* + +When matching on unique properties such as path or MAC, or with additional +assumptions such as "there will only ever be one wifi device", +match rules can be written so that they only match one device. Then this +property can be used to give that device a more specific/desirable/nicer +name than the default from udev’s ifnames. Any additional device that +satisfies the match rules will then fail to get renamed and keep the +original kernel name (and dmesg will show an error). + +**wakeonlan**: *<(bool)>* + +Enable wake on LAN. Off by default. + + +Common properties for all device types +-------------------------------------- + +**renderer**: *<(scalar)>* + +Use the given networking backend for this definition. Currently supported are +``networkd`` and ``NetworkManager``. This property can be specified globally +in ``networks:``, for a device type (in e. g. ``ethernets:``) or +for a particular device definition. Default is ``networkd``. + +.. note:: + + Cloud-init only supports networkd backend if rendering version2 config + to the instance. + +**dhcp4**: *<(bool)>* + +Enable DHCP for IPv4. Off by default. + +**dhcp6**: *<(bool)>* + +Enable DHCP for IPv6. Off by default. + +**addresses**: *<(sequence of scalars)>* + +Add static addresses to the interface in addition to the ones received +through DHCP or RA. Each sequence entry is in CIDR notation, i. e. of the +form ``addr/prefixlen`` . ``addr`` is an IPv4 or IPv6 address as recognized +by ``inet_pton``(3) and ``prefixlen`` the number of bits of the subnet. + +Example: ``addresses: [192.168.14.2/24, 2001:1::1/64]`` + +**gateway4**: or **gateway6**: *<(scalar)>* + +Set default gateway for IPv4/6, for manual address configuration. This +requires setting ``addresses`` too. Gateway IPs must be in a form +recognized by ``inet_pton(3)`` + +Example for IPv4: ``gateway4: 172.16.0.1`` +Example for IPv6: ``gateway6: 2001:4::1`` + +**nameservers**: *<(mapping)>* + +Set DNS servers and search domains, for manual address configuration. There +are two supported fields: ``addresses:`` is a list of IPv4 or IPv6 addresses +similar to ``gateway*``, and ``search:`` is a list of search domains. + +Example: :: + + nameservers: + search: [lab, home] + addresses: [8.8.8.8, FEDC::1] + +**routes**: *<(sequence of mapping)>* + +Add device specific routes. Each mapping includes a ``to``, ``via`` key +with an IPv4 or IPv6 address as value. ``metric`` is an optional value. + +Example: :: + + routes: + - to: 0.0.0.0/0 + via: 10.23.2.1 + metric: 3 + +Ethernets +~~~~~~~~~ +Ethernet device definitions do not support any specific properties beyond the +common ones described above. + +Bonds +~~~~~ + +**interfaces** *<(sequence of scalars)>* + +All devices matching this ID list will be added to the bond. + +Example: :: + + ethernets: + switchports: + match: {name: "enp2*"} + [...] + bonds: + bond0: + interfaces: [switchports] + +**parameters**: *<(mapping)>* + +Customization parameters for special bonding options. Time values are specified +in seconds unless otherwise specified. + +**mode**: *<(scalar)>* + +Set the bonding mode used for the interfaces. The default is +``balance-rr`` (round robin). Possible values are ``balance-rr``, +``active-backup``, ``balance-xor``, ``broadcast``, ``802.3ad``, +``balance-tlb``, and ``balance-alb``. + +**lacp-rate**: *<(scalar)>* + +Set the rate at which LACPDUs are transmitted. This is only useful +in 802.3ad mode. Possible values are ``slow`` (30 seconds, default), +and ``fast`` (every second). + +**mii-monitor-interval**: *<(scalar)>* + +Specifies the interval for MII monitoring (verifying if an interface +of the bond has carrier). The default is ``0``; which disables MII +monitoring. + +**min-links**: *<(scalar)>* + +The minimum number of links up in a bond to consider the bond +interface to be up. + +**transmit-hash-policy**: <*(scalar)>* + +Specifies the transmit hash policy for the selection of slaves. This +is only useful in balance-xor, 802.3ad and balance-tlb modes. +Possible values are ``layer2``, ``layer3+4``, ``layer2+3``, +``encap2+3``, and ``encap3+4``. + +**ad-select**: <*(scalar)>* + +Set the aggregation selection mode. Possible values are ``stable``, +``bandwidth``, and ``count``. This option is only used in 802.3ad mode. + +**all-slaves-active**: <*(bool)>* + +If the bond should drop duplicate frames received on inactive ports, +set this option to ``false``. If they should be delivered, set this +option to ``true``. The default value is false, and is the desirable +behavior in most situations. + +**arp-interval**: <*(scalar)>* + +Set the interval value for how frequently ARP link monitoring should +happen. The default value is ``0``, which disables ARP monitoring. + +**arp-ip-targets**: <*(sequence of scalars)>* + +IPs of other hosts on the link which should be sent ARP requests in +order to validate that a slave is up. This option is only used when +``arp-interval`` is set to a value other than ``0``. At least one IP +address must be given for ARP link monitoring to function. Only IPv4 +addresses are supported. You can specify up to 16 IP addresses. The +default value is an empty list. + +**arp-validate**: <*(scalar)>* + +Configure how ARP replies are to be validated when using ARP link +monitoring. Possible values are ``none``, ``active``, ``backup``, +and ``all``. + +**arp-all-targets**: <*(scalar)>* + +Specify whether to use any ARP IP target being up as sufficient for +a slave to be considered up; or if all the targets must be up. This +is only used for ``active-backup`` mode when ``arp-validate`` is +enabled. Possible values are ``any`` and ``all``. + +**up-delay**: <*(scalar)>* + +Specify the delay before enabling a link once the link is physically +up. The default value is ``0``. + +**down-delay**: <*(scalar)>* + +Specify the delay before disabling a link once the link has been +lost. The default value is ``0``. + +**fail-over-mac-policy**: <*(scalar)>* + +Set whether to set all slaves to the same MAC address when adding +them to the bond, or how else the system should handle MAC addresses. +The possible values are ``none``, ``active``, and ``follow``. + +**gratuitious-arp**: <*(scalar)>* + +Specify how many ARP packets to send after failover. Once a link is +up on a new slave, a notification is sent and possibly repeated if +this value is set to a number greater than ``1``. The default value +is ``1`` and valid values are between ``1`` and ``255``. This only +affects ``active-backup`` mode. + +**packets-per-slave**: <*(scalar)>* + +In ``balance-rr`` mode, specifies the number of packets to transmit +on a slave before switching to the next. When this value is set to +``0``, slaves are chosen at random. Allowable values are between +``0`` and ``65535``. The default value is ``1``. This setting is +only used in ``balance-rr`` mode. + +**primary-reselect-policy**: <*(scalar)>* + +Set the reselection policy for the primary slave. On failure of the +active slave, the system will use this policy to decide how the new +active slave will be chosen and how recovery will be handled. The +possible values are ``always``, ``better``, and ``failure``. + +**learn-packet-interval**: <*(scalar)>* + +Specify the interval between sending learning packets to each slave. +The value range is between ``1`` and ``0x7fffffff``. The default +value is ``1``. This option only affects ``balance-tlb`` and +``balance-alb`` modes. + + +Bridges +~~~~~~~ + +**interfaces**: <*(sequence of scalars)>* + +All devices matching this ID list will be added to the bridge. + +Example: :: + + ethernets: + switchports: + match: {name: "enp2*"} + [...] + bridges: + br0: + interfaces: [switchports] + +**parameters**: <*(mapping)>* + +Customization parameters for special bridging options. Time values are specified +in seconds unless otherwise specified. + +**ageing-time**: <*(scalar)>* + +Set the period of time to keep a MAC address in the forwarding +database after a packet is received. + +**priority**: <*(scalar)>* + +Set the priority value for the bridge. This value should be an +number between ``0`` and ``65535``. Lower values mean higher +priority. The bridge with the higher priority will be elected as +the root bridge. + +**forward-delay**: <*(scalar)>* + +Specify the period of time the bridge will remain in Listening and +Learning states before getting to the Forwarding state. This value +should be set in seconds for the systemd backend, and in milliseconds +for the NetworkManager backend. + +**hello-time**: <*(scalar)>* + +Specify the interval between two hello packets being sent out from +the root and designated bridges. Hello packets communicate +information about the network topology. + +**max-age**: <*(scalar)>* + +Set the maximum age of a hello packet. If the last hello packet is +older than that value, the bridge will attempt to become the root +bridge. + +**path-cost**: <*(scalar)>* + +Set the cost of a path on the bridge. Faster interfaces should have +a lower cost. This allows a finer control on the network topology +so that the fastest paths are available whenever possible. + +**stp**: <*(bool)>* + +Define whether the bridge should use Spanning Tree Protocol. The +default value is "true", which means that Spanning Tree should be +used. + + +VLANs +~~~~~ + +**id**: <*(scalar)>* + +VLAN ID, a number between 0 and 4094. + +**link**: <*(scalar)>* + +ID of the underlying device definition on which this VLAN gets +created. + +Example: :: + + ethernets: + eno1: {...} + vlans: + en-intra: + id: 1 + link: eno1 + dhcp4: yes + en-vpn: + id: 2 + link: eno1 + address: ... + + +Examples +-------- +Configure an ethernet device with networkd, identified by its name, and enable +DHCP: :: + + network: + version: 2 + ethernets: + eno1: + dhcp4: true + +This is a complex example which shows most available features: :: + + network: + version: 2 + ethernets: + # opaque ID for physical interfaces, only referred to by other stanzas + id0: + match: + macaddress: 00:11:22:33:44:55 + wakeonlan: true + dhcp4: true + addresses: + - 192.168.14.2/24 + - 2001:1::1/64 + gateway4: 192.168.14.1 + gateway6: 2001:1::2 + nameservers: + search: [foo.local, bar.local] + addresses: [8.8.8.8] + lom: + match: + driver: ixgbe + # you are responsible for setting tight enough match rules + # that only match one device if you use set-name + set-name: lom1 + dhcp6: true + switchports: + # all cards on second PCI bus; unconfigured by themselves, will be added + # to br0 below + match: + name: enp2* + mtu: 1280 + bonds: + bond0: + interfaces: [id0, lom] + bridges: + # the key name is the name for virtual (created) interfaces; no match: and + # set-name: allowed + br0: + # IDs of the components; switchports expands into multiple interfaces + interfaces: [wlp1s0, switchports] + dhcp4: true + vlans: + en-intra: + id: 1 + link: id0 + dhcp4: yes + # static routes + routes: + - to: 0.0.0.0/0 + via: 11.0.0.1 + metric: 3 + +.. _netplan: https://launchpad.net/netplan +.. vi: textwidth=78 diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst new file mode 100644 index 00000000..109c86f5 --- /dev/null +++ b/doc/rtd/topics/network-config.rst @@ -0,0 +1,254 @@ +********************* +Network Configuration +********************* + +- Default Behavior +- Disabling Network Configuration +- Fallback Networking +- Network Configuration Sources +- Network Configuration Outputs +- Network Output Policy +- Network Configuration Tools +- Examples + +.. _default_behavior: + +Default Behavior +================ + +`Cloud-init`_ 's searches for network configuration in order of increasing +precedence; each item overriding the previous. + +**Datasource** + +For example, OpenStack may provide network config in the MetaData Service. + +**System Config** + +A ``network:`` entry in /etc/cloud/cloud.cfg.d/* configuration files. + +**Kernel Command Line** + +``ip=`` or ``network-config=`` + +User-data cannot change an instance's network configuration. In the absense +of network configuration in any of the above sources , `Cloud-init`_ will +write out a network configuration that will issue a DHCP request on a "first" +network interface. + + +Disabling Network Configuration +=============================== + +Users may disable `Cloud-init`_ 's network configuration capability and rely +on other methods, such as embedded configuration or other customizations. + +`Cloud-init`_ supports the following methods for disabling cloud-init. + + +**Kernel Command Line** + +`Cloud-init`_ will check for a parameter ``network-config`` and the +value is expected to be YAML string in the :ref:`network_config_v1` format. +The YAML string may optionally be ``Base64`` encoded, and optionally +compressed with ``gzip``. + +Example disabling kernel command line entry: :: + + network-config={config: disabled} + + +**cloud config** + +In the combined cloud-init configuration dictionary. :: + + network: + config: disabled + +If `Cloud-init`_ 's networking config has not been disabled, and +no other network information is found, then it will proceed +to generate a fallback networking configuration. + + +Fallback Network Configuration +============================== + +`Cloud-init`_ will attempt to determine which of any attached network devices +is most likely to have a connection and then generate a network +configuration to issue a DHCP request on that interface. + +`Cloud-init`_ runs during early boot and does not expect composed network +devices (such as Bridges) to be available. `Cloud-init`_ does not consider +the following interface devices as likely 'first' network interfaces for +fallback configuration; they are filtered out from being selected. + +- **loopback**: ``name=lo`` +- **Virtual Ethernet**: ``name=veth*`` +- **Software Bridges**: ``type=bridge`` +- **Software VLANs**: ``type=vlan`` + + +`Cloud-init`_ will prefer network interfaces that indicate they are connected +via the Linux ``carrier`` flag being set. If no interfaces are marked +connected, then all unfiltered interfaces are potential connections. + +Of the potential interfaces, `Cloud-init`_ will attempt to pick the "right" +interface given the information it has available. + +Finally after selecting the "right" interface, a configuration is +generated and applied to the system. + + +Network Configuration Sources +============================= + +`Cloud-init`_ accepts a number of different network configuration formats in +support of different cloud substrates. The Datasource for these clouds in +`Cloud-init`_ will detect and consume Datasource-specific network +configuration formats for use when writing an instance's network +configuration. + +The following Datasources optionally provide network configuration: + +- :ref:`datasource_config_drive` + + - `OpenStack Metadata Service Network`_ + - :ref:`network_config_eni` + +- :ref:`datasource_digital_ocean` + + - `DigitalOcean JSON metadata`_ + +- :ref:`datasource_nocloud` + + - :ref:`network_config_v1` + - :ref:`network_config_v2` + - :ref:`network_config_eni` + +- :ref:`datasource_opennebula` + + - :ref:`network_config_eni` + +- :ref:`datasource_openstack` + + - :ref:`network_config_eni` + - `OpenStack Metadata Service Network`_ + +- :ref:`datasource_smartos` + + - `SmartOS JSON Metadata`_ + +For more information on network configuration formats + +.. toctree:: + :maxdepth: 1 + + network-config-format-eni.rst + network-config-format-v1.rst + network-config-format-v2.rst + + +Network Configuration Outputs +============================= + +`Cloud-init`_ converts various forms of user supplied or automatically +generated configuration into an internal network configuration state. From +this state `Cloud-init`_ delegates rendering of the configuration to Distro +supported formats. The following ``renderers`` are supported in cloud-init: + +- **ENI** + +/etc/network/interfaces or ``ENI`` is supported by the ``ifupdown`` package +found in Ubuntu and Debian. + +- **Netplan** + +Since Ubuntu 16.10, codename Yakkety, the ``netplan`` project has been an +optional network configuration tool which consumes :ref:`network_config_v2` +input and renders network configuration for supported backends such as +``systemd-networkd`` and ``NetworkManager``. + +- **Sysconfig** + +Sysconfig format is used by RHEL, CentOS, Fedora and other derivatives. + + +Network Output Policy +===================== + +The default policy for selecting a network ``renderer`` in order of preference +is as follows: + +- ENI +- Sysconfig +- Netplan + +When applying the policy, `Cloud-init`_ checks if the current instance has the +correct binaries and paths to support the renderer. The first renderer that +can be used is selected. Users may override the network renderer policy by +supplying an updated configuration in cloud-config. :: + + system_info: + network: + renderers: ['netplan', 'eni', 'sysconfig'] + + +Network Configuration Tools +=========================== + +`Cloud-init`_ contains one tool used to test input/output conversion between +formats. The ``tools/net-convert.py`` in the `Cloud-init`_ source repository +is helpful for examining expected output for a given input format. + +CLI Interface : + +.. code-block:: bash + + % tools/net-convert.py --help + usage: net-convert.py [-h] --network-data PATH --kind + {eni,network_data.json,yaml} -d PATH [-m name,mac] + --output-kind {eni,netplan,sysconfig} + + optional arguments: + -h, --help show this help message and exit + --network-data PATH, -p PATH + --kind {eni,network_data.json,yaml}, -k {eni,network_data.json,yaml} + -d PATH, --directory PATH + directory to place output in + -m name,mac, --mac name,mac + interface name to mac mapping + --output-kind {eni,netplan,sysconfig}, -ok {eni,netplan,sysconfig} + + +Example output convertion V2 to sysconfig: + +.. code-block:: bash + + % tools/net-convert.py --network-data v2.yaml --kind yaml \ + --output-kind sysconfig -d target + % cat target/etc/sysconfig/network-scripts/ifcfg-eth* + # Created by cloud-init on instance boot automatically, do not edit. + # + BOOTPROTO=static + DEVICE=eth7 + IPADDR=192.168.1.5/255.255.255.0 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + # Created by cloud-init on instance boot automatically, do not edit. + # + BOOTPROTO=dhcp + DEVICE=eth9 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + + +.. _Cloud-init: https://launchpad.net/cloud-init +.. _DigitalOcean JSON metadata: https://developers.digitalocean.com/documentation/metadata/#network-interfaces-index +.. _OpenStack Metadata Service Network: https://specs.openstack.org/openstack/nova-specs/specs/liberty/implemented/metadata-service-network-info.html +.. _SmartOS JSON Metadata: https://eng.joyent.com/mdata/datadict.html + +.. vi: textwidth=78 -- cgit v1.2.3 From 4f0f171c29bb9abb5cbb6f9adbe68015089aeed9 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Tue, 2 May 2017 22:26:50 +0000 Subject: fs_setup: if cmd is specified, use shell interpretation. If 'cmd' is provided to a fs_setup entry, then cloud-init was trying to execute the rendered string as a single name, rather than splitting the string. The change here will pass the string to shell for interpretation so that it is split there. Also fix some documentation errors and warn when fs_opts or overwrite is provided along with 'cmd'. LP: #1687712 --- cloudinit/config/cc_disk_setup.py | 21 +++++++-- doc/examples/cloud-config-disk-setup.txt | 6 +-- .../test_handler/test_handler_disk_setup.py | 55 ++++++++++++++++++++++ 3 files changed, 74 insertions(+), 8 deletions(-) diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index f49386e3..6f827ddc 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -910,12 +910,23 @@ def mkfs(fs_cfg): "must be set.", label) # Create the commands + shell = False if fs_cmd: fs_cmd = fs_cfg['cmd'] % { 'label': label, 'filesystem': fs_type, 'device': device, } + shell = True + + if overwrite: + LOG.warning( + "fs_setup:overwrite ignored because cmd was specified: %s", + fs_cmd) + if fs_opts: + LOG.warning( + "fs_setup:extra_opts ignored because cmd was specified: %s", + fs_cmd) else: # Find the mkfs command mkfs_cmd = util.which("mkfs.%s" % fs_type) @@ -936,14 +947,14 @@ def mkfs(fs_cfg): if overwrite or device_type(device) == "disk": fs_cmd.append(lookup_force_flag(fs_type)) - # Add the extends FS options - if fs_opts: - fs_cmd.extend(fs_opts) + # Add the extends FS options + if fs_opts: + fs_cmd.extend(fs_opts) LOG.debug("Creating file system %s on %s", label, device) - LOG.debug(" Using cmd: %s", " ".join(fs_cmd)) + LOG.debug(" Using cmd: %s", str(fs_cmd)) try: - util.subp(fs_cmd) + util.subp(fs_cmd, shell=shell) except Exception as e: raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e)) diff --git a/doc/examples/cloud-config-disk-setup.txt b/doc/examples/cloud-config-disk-setup.txt index 3e46a22e..38ad0528 100644 --- a/doc/examples/cloud-config-disk-setup.txt +++ b/doc/examples/cloud-config-disk-setup.txt @@ -155,11 +155,11 @@ fs_setup: filesystem: 'ext3' device: 'ephemeral0' partition: 'auto' - - label: mylabl2 + - label: mylabl2 filesystem: 'ext4' device: '/dev/xvda1' - - special: - cmd: mkfs -t %(FILESYSTEM)s -L %(LABEL)s %(DEVICE)s + - cmd: mkfs -t %(filesystem)s -L %(label)s %(device)s + label: mylabl3 filesystem: 'btrfs' device: '/dev/xvdh' diff --git a/tests/unittests/test_handler/test_handler_disk_setup.py b/tests/unittests/test_handler/test_handler_disk_setup.py index 7ff39225..9f00d46a 100644 --- a/tests/unittests/test_handler/test_handler_disk_setup.py +++ b/tests/unittests/test_handler/test_handler_disk_setup.py @@ -17,6 +17,10 @@ class TestIsDiskUsed(TestCase): self.check_fs = self.patches.enter_context( mock.patch('{0}.check_fs'.format(mod_name))) + def tearDown(self): + super(TestIsDiskUsed, self).tearDown() + self.patches.close() + def test_multiple_child_nodes_returns_true(self): self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(2)) self.check_fs.return_value = (mock.MagicMock(), None, mock.MagicMock()) @@ -147,4 +151,55 @@ class TestUpdateFsSetupDevices(TestCase): 'filesystem': 'xfs' }, fs_setup) + +@mock.patch('cloudinit.config.cc_disk_setup.find_device_node', + return_value=('/dev/xdb1', False)) +@mock.patch('cloudinit.config.cc_disk_setup.device_type', return_value=None) +@mock.patch('cloudinit.config.cc_disk_setup.util.subp', return_value=('', '')) +class TestMkfsCommandHandling(TestCase): + + def test_with_cmd(self, subp, *args): + """mkfs honors cmd and logs warnings when extra_opts or overwrite are + provided.""" + with self.assertLogs( + 'cloudinit.config.cc_disk_setup') as logs: + cc_disk_setup.mkfs({ + 'cmd': 'mkfs -t %(filesystem)s -L %(label)s %(device)s', + 'filesystem': 'ext4', + 'device': '/dev/xdb1', + 'label': 'with_cmd', + 'extra_opts': ['should', 'generate', 'warning'], + 'overwrite': 'should generate warning too' + }) + + self.assertIn( + 'WARNING:cloudinit.config.cc_disk_setup:fs_setup:extra_opts ' + + 'ignored because cmd was specified: mkfs -t ext4 -L with_cmd ' + + '/dev/xdb1', + logs.output) + self.assertIn( + 'WARNING:cloudinit.config.cc_disk_setup:fs_setup:overwrite ' + + 'ignored because cmd was specified: mkfs -t ext4 -L with_cmd ' + + '/dev/xdb1', + logs.output) + + subp.assert_called_once_with( + 'mkfs -t ext4 -L with_cmd /dev/xdb1', shell=True) + + def test_overwrite_and_extra_opts_without_cmd(self, subp, *args): + """mkfs observes extra_opts and overwrite settings when cmd is not + present.""" + cc_disk_setup.mkfs({ + 'filesystem': 'ext4', + 'device': '/dev/xdb1', + 'label': 'without_cmd', + 'extra_opts': ['are', 'added'], + 'overwrite': True + }) + + subp.assert_called_once_with( + ['/sbin/mkfs.ext4', '/dev/xdb1', + '-L', 'without_cmd', '-F', 'are', 'added'], + shell=False) + # vi: ts=4 expandtab -- cgit v1.2.3 From 370a04e8d7b530c1ef8280e15eb628ff6880c736 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 10 Feb 2017 22:08:03 -0500 Subject: Add unit tests for ds-identify, fix Ec2 bug found. This adds several unit tests for ds-identify, and fixes a bug in Ec2 detection that I found while writing these tests. The method of testing is to use the ds-identify code as a shell library. The TestDsIdentify:call basically does: * populate a (temp) directory with files that represent what ds-identify would see in /sys or other locations it reads. * create a file '_shwrap' that replaces the 3 programs that are executed in ds-identify code path. It supports setting their stdout, stderr, and exit code. * set the default policies explicitly (DI_DEFAULT_POLICY) so we can support testing different builtins. This is necessary because the Ubuntu branches patch the builtin value. If we did not explicilty set it, then testing there would fail. * execute sh to source the script and call its main. --- tests/unittests/helpers.py | 7 + tests/unittests/test_ds_identify.py | 290 ++++++++++++++++++++++++++++++++++++ tools/ds-identify | 24 ++- 3 files changed, 313 insertions(+), 8 deletions(-) create mode 100644 tests/unittests/test_ds_identify.py diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index 90e2431f..a711404c 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -3,6 +3,7 @@ from __future__ import print_function import functools +import json import os import shutil import sys @@ -280,6 +281,12 @@ def dir2dict(startdir, prefix=None): return flist +def json_dumps(data): + # print data in nicely formatted json. + return json.dumps(data, indent=1, sort_keys=True, + separators=(',', ': ')) + + def wrap_and_call(prefix, mocks, func, *args, **kwargs): """ call func(args, **kwargs) with mocks applied, then unapplies mocks diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py new file mode 100644 index 00000000..9e148852 --- /dev/null +++ b/tests/unittests/test_ds_identify.py @@ -0,0 +1,290 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import copy +import os +from uuid import uuid4 + +from cloudinit import safeyaml +from cloudinit import util +from .helpers import CiTestCase, dir2dict, json_dumps, populate_dir + +UNAME_MYSYS = ("Linux bart 4.4.0-62-generic #83-Ubuntu " + "SMP Wed Jan 18 14:10:15 UTC 2017 x86_64 GNU/Linux") +BLKID_EFI_ROOT = """ +DEVNAME=/dev/sda1 +UUID=8B36-5390 +TYPE=vfat +PARTUUID=30d7c715-a6ae-46ee-b050-afc6467fc452 + +DEVNAME=/dev/sda2 +UUID=19ac97d5-6973-4193-9a09-2e6bbfa38262 +TYPE=ext4 +PARTUUID=30c65c77-e07d-4039-b2fb-88b1fb5fa1fc +""" + +DI_DEFAULT_POLICY = "search,found=all,maybe=all,notfound=enabled" +DI_DEFAULT_POLICY_NO_DMI = "search,found=all,maybe=all,notfound=disabled" + +SHELL_MOCK_TMPL = """\ +%(name)s() { + local out='%(out)s' err='%(err)s' r='%(ret)s' RET='%(RET)s' + [ "$out" = "_unset" ] || echo "$out" + [ "$err" = "_unset" ] || echo "$err" 2>&1 + [ "$RET" = "_unset" ] || _RET="$RET" + return $r +} +""" + +RC_FOUND = 0 +RC_NOT_FOUND = 1 +DS_NONE = 'None' + +P_PRODUCT_NAME = "sys/class/dmi/id/product_name" +P_PRODUCT_SERIAL = "sys/class/dmi/id/product_serial" +P_PRODUCT_UUID = "sys/class/dmi/id/product_uuid" +P_DSID_CFG = "etc/cloud/ds-identify.cfg" + +MOCK_VIRT_IS_KVM = {'name': 'detect_virt', 'RET': 'kvm', 'ret': 0} + + +class TestDsIdentify(CiTestCase): + dsid_path = os.path.realpath('tools/ds-identify') + + def call(self, rootd=None, mocks=None, args=None, files=None, + policy_dmi=DI_DEFAULT_POLICY, + policy_nodmi=DI_DEFAULT_POLICY_NO_DMI): + if args is None: + args = [] + if mocks is None: + mocks = [] + + if files is None: + files = {} + + if rootd is None: + rootd = self.tmp_dir() + + unset = '_unset' + wrap = self.tmp_path(path="_shwrap", dir=rootd) + populate_dir(rootd, files) + + # DI_DEFAULT_POLICY* are declared always as to not rely + # on the default in the code. This is because SRU releases change + # the value in the code, and thus tests would fail there. + head = [ + "DI_MAIN=noop", + "DEBUG_LEVEL=2", + "DI_LOG=stderr", + "PATH_ROOT='%s'" % rootd, + ". " + self.dsid_path, + 'DI_DEFAULT_POLICY="%s"' % policy_dmi, + 'DI_DEFAULT_POLICY_NO_DMI="%s"' % policy_nodmi, + "" + ] + + def write_mock(data): + ddata = {'out': None, 'err': None, 'ret': 0, 'RET': None} + ddata.update(data) + for k in ddata: + if ddata[k] is None: + ddata[k] = unset + return SHELL_MOCK_TMPL % ddata + + mocklines = [] + defaults = [ + {'name': 'detect_virt', 'RET': 'none', 'ret': 1}, + {'name': 'uname', 'out': UNAME_MYSYS}, + {'name': 'blkid', 'out': BLKID_EFI_ROOT}, + ] + + written = [d['name'] for d in mocks] + for data in mocks: + mocklines.append(write_mock(data)) + for d in defaults: + if d['name'] not in written: + mocklines.append(write_mock(d)) + + endlines = [ + 'main %s' % ' '.join(['"%s"' % s for s in args]) + ] + + with open(wrap, "w") as fp: + fp.write('\n'.join(head + mocklines + endlines) + "\n") + + rc = 0 + try: + out, err = util.subp(['sh', '-c', '. %s' % wrap], capture=True) + except util.ProcessExecutionError as e: + rc = e.exit_code + out = e.stdout + err = e.stderr + + cfg = None + cfg_out = os.path.join(rootd, 'run/cloud-init/cloud.cfg') + if os.path.exists(cfg_out): + contents = util.load_file(cfg_out) + try: + cfg = safeyaml.load(contents) + except Exception as e: + cfg = {"_INVALID_YAML": contents, + "_EXCEPTION": str(e)} + + return rc, out, err, cfg, dir2dict(rootd) + + def _call_via_dict(self, data, rootd=None, **kwargs): + # return output of self.call with a dict input like VALID_CFG[item] + xwargs = {'rootd': rootd} + for k in ('mocks', 'args', 'policy_dmi', 'policy_nodmi', 'files'): + if k in data: + xwargs[k] = data[k] + if k in kwargs: + xwargs[k] = kwargs[k] + + return self.call(**xwargs) + + def _test_ds_found(self, name): + data = copy.deepcopy(VALID_CFG[name]) + return self._check_via_dict( + data, RC_FOUND, dslist=[data.get('ds'), DS_NONE]) + + def _check_via_dict(self, data, rc, dslist=None, **kwargs): + found_rc, out, err, cfg, files = self._call_via_dict(data, **kwargs) + good = False + try: + self.assertEqual(rc, found_rc) + if dslist is not None: + self.assertEqual(dslist, cfg['datasource_list']) + good = True + finally: + if not good: + _print_run_output(rc, out, err, cfg, files) + return rc, out, err, cfg, files + + def test_aws_ec2_hvm(self): + """EC2: hvm instances use dmi serial and uuid starting with 'ec2'.""" + self._test_ds_found('Ec2-hvm') + + def test_aws_ec2_xen(self): + """EC2: sys/hypervisor/uuid starts with ec2.""" + self._test_ds_found('Ec2-xen') + + def test_brightbox_is_ec2(self): + """EC2: product_serial ends with 'brightbox.com'""" + self._test_ds_found('Ec2-brightbox') + + def test_gce_by_product_name(self): + """GCE identifies itself with product_name.""" + self._test_ds_found('GCE') + + def test_gce_by_serial(self): + """Older gce compute instances must be identified by serial.""" + self._test_ds_found('GCE-serial') + + def test_config_drive(self): + """ConfigDrive datasource has a disk with LABEL=config-2.""" + self._test_ds_found('ConfigDrive') + return + + def test_policy_disabled(self): + """A Builtin policy of 'disabled' should return not found. + + Even though a search would find something, the builtin policy of + disabled should cause the return of not found.""" + mydata = copy.deepcopy(VALID_CFG['Ec2-hvm']) + self._check_via_dict(mydata, rc=RC_NOT_FOUND, policy_dmi="disabled") + + def test_policy_config_disable_overrides_builtin(self): + """explicit policy: disabled in config file should cause not found.""" + mydata = copy.deepcopy(VALID_CFG['Ec2-hvm']) + mydata['files'][P_DSID_CFG] = '\n'.join(['policy: disabled', '']) + self._check_via_dict(mydata, rc=RC_NOT_FOUND) + + def test_single_entry_defines_datasource(self): + """If config has a single entry in datasource_list, that is used. + + Test the valid Ec2-hvm, but provide a config file that specifies + a single entry in datasource_list. The configured value should + be used.""" + mydata = copy.deepcopy(VALID_CFG['Ec2-hvm']) + cfgpath = 'etc/cloud/cloud.cfg.d/myds.cfg' + mydata['files'][cfgpath] = 'datasource_list: ["NoCloud"]\n' + self._check_via_dict(mydata, rc=RC_FOUND, dslist=['NoCloud', DS_NONE]) + + +def blkid_out(disks=None): + """Convert a list of disk dictionaries into blkid content.""" + if disks is None: + disks = [] + lines = [] + for disk in disks: + if not disk["DEVNAME"].startswith("/dev/"): + disk["DEVNAME"] = "/dev/" + disk["DEVNAME"] + for key in disk: + lines.append("%s=%s" % (key, disk[key])) + lines.append("") + return '\n'.join(lines) + + +def _print_run_output(rc, out, err, cfg, files): + """A helper to print return of TestDsIdentify. + + _print_run_output(self.call())""" + print('\n'.join([ + '-- rc = %s --' % rc, + '-- out --', str(out), + '-- err --', str(err), + '-- cfg --', json_dumps(cfg)])) + print('-- files --') + for k, v in files.items(): + if "/_shwrap" in k: + continue + print(' === %s ===' % k) + for line in v.splitlines(): + print(" " + line) + + +VALID_CFG = { + 'Ec2-hvm': { + 'ds': 'Ec2', + 'mocks': [{'name': 'detect_virt', 'RET': 'kvm', 'ret': 0}], + 'files': { + P_PRODUCT_SERIAL: 'ec23aef5-54be-4843-8d24-8c819f88453e\n', + P_PRODUCT_UUID: 'EC23AEF5-54BE-4843-8D24-8C819F88453E\n', + } + }, + 'Ec2-xen': { + 'ds': 'Ec2', + 'mocks': [{'name': 'detect_virt', 'RET': 'xen', 'ret': 0}], + 'files': { + 'sys/hypervisor/uuid': 'ec2c6e2f-5fac-4fc7-9c82-74127ec14bbb\n' + }, + }, + 'Ec2-brightbox': { + 'ds': 'Ec2', + 'files': {P_PRODUCT_SERIAL: 'facc6e2f.brightbox.com\n'}, + }, + 'GCE': { + 'ds': 'GCE', + 'files': {P_PRODUCT_NAME: 'Google Compute Engine\n'}, + 'mocks': [MOCK_VIRT_IS_KVM], + }, + 'GCE-serial': { + 'ds': 'GCE', + 'files': {P_PRODUCT_SERIAL: 'GoogleCloud-8f2e88f\n'}, + 'mocks': [MOCK_VIRT_IS_KVM], + }, + 'ConfigDrive': { + 'ds': 'ConfigDrive', + 'mocks': [ + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, + {'DEVNAME': 'vda2', 'TYPE': 'ext4', + 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()}, + {'DEVNAME': 'vdb', 'TYPE': 'vfat', 'LABEL': 'config-2'}]) + }, + ], + }, +} + +# vi: ts=4 expandtab diff --git a/tools/ds-identify b/tools/ds-identify index a43b1291..aff26eb6 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -216,9 +216,8 @@ has_cdrom() { [ -e "${PATH_ROOT}/dev/cdrom" ] } -read_virt() { - cached "$DI_VIRT" && return 0 - local out="" r="" virt="${UNAVAILABLE}" +detect_virt() { + local virt="${UNAVAILABLE}" r="" out="" if [ -d /run/systemd ]; then out=$(systemd-detect-virt 2>&1) r=$? @@ -226,7 +225,13 @@ read_virt() { virt="$out" fi fi - DI_VIRT=$virt + _RET="$virt" +} + +read_virt() { + cached "$DI_VIRT" && return 0 + detect_virt + DI_VIRT=${_RET} } is_container() { @@ -318,8 +323,11 @@ parse_yaml_array() { # ['1'] or [1] # '1', '2' local val="$1" oifs="$IFS" ret="" tok="" - val=${val#[} - val=${val%]} + # i386/14.04 (dash=0.5.7-4ubuntu1): the following outputs "[foo" + # sh -c 'n="$1"; echo ${n#[}' -- "[foo" + # the fix was to quote the open bracket (val=${val#"["}) (LP: #1689648) + val=${val#"["} + val=${val%"]"} IFS=","; set -- $val; IFS="$oifs" for tok in "$@"; do trim "$tok" @@ -714,7 +722,7 @@ ec2_identify_platform() { # AWS http://docs.aws.amazon.com/AWSEC2/ # latest/UserGuide/identify_ec2_instances.html - local uuid="" hvuuid="$PATH_ROOT/sys/hypervisor/uuid" + local uuid="" hvuuid="${PATH_SYS_HYPERVISOR}/uuid" # if the (basically) xen specific /sys/hypervisor/uuid starts with 'ec2' if [ -r "$hvuuid" ] && read uuid < "$hvuuid" && [ "${uuid#ec2}" != "$uuid" ]; then @@ -725,7 +733,7 @@ ec2_identify_platform() { # product uuid and product serial start with case insensitive local uuid="${DI_DMI_PRODUCT_UUID}" case "$uuid:$serial" in - [Ee][Cc]2*:[Ee][Cc]2) + [Ee][Cc]2*:[Ee][Cc]2*) # both start with ec2, now check for case insenstive equal nocase_equal "$uuid" "$serial" && { _RET="AWS"; return 0; };; -- cgit v1.2.3 From 0a71d5a870b416f2c86c8bc196004bb3fc0768a0 Mon Sep 17 00:00:00 2001 From: Hongjiang Zhang Date: Fri, 13 Jan 2017 15:08:22 +0800 Subject: FreeBSD: improvements and fixes for use on Azure This patch targets to make FreeBSD 10.3 or 11 work on Azure. The modifications abide by the rule of: * making as less modification as possible * delegate to the distro or datasource where possible. The main modifications are: 1. network configuration improvements, and movement into distro path. 2. Fix setting of password. Password setting through "pw" can only work through pipe. 3. Add 'root:wheel' to syslog_fix_perms field. 4. Support resizing default file system (ufs) 5. copy cloud.cfg for freebsd to /etc/cloud/cloud.cfg rather than /usr/local/etc/cloud/cloud.cfg. 6. Azure specific changes: a. When reading the azure endpoint, search in a different path and read a different option name (option-245 vs. unknown-245). so, the lease file path should be generated according to platform. b. adjust the handling of ephemeral mounts for ufs filesystem and for finding the ephemeral device. c. fix mounting of cdrom LP: #1636345 --- cloudinit/config/cc_resizefs.py | 95 +++++++ cloudinit/distros/__init__.py | 3 + cloudinit/distros/freebsd.py | 277 +++++++++++++++++++-- cloudinit/settings.py | 2 +- cloudinit/sources/DataSourceAzure.py | 180 ++++++++++++- cloudinit/sources/helpers/azure.py | 11 +- cloudinit/stages.py | 2 +- cloudinit/util.py | 52 +++- config/cloud.cfg-freebsd | 2 +- setup.py | 11 +- sysvinit/freebsd/cloudconfig | 10 - sysvinit/freebsd/cloudfinal | 10 - sysvinit/freebsd/cloudinit | 10 - sysvinit/freebsd/cloudinitlocal | 12 +- tests/unittests/test_datasource/test_azure.py | 65 +++++ .../unittests/test_datasource/test_azure_helper.py | 4 +- tests/unittests/test_datasource/test_cloudstack.py | 5 + tests/unittests/test_distros/test_netconfig.py | 47 +++- .../test_handler/test_handler_resizefs.py | 59 +++++ tests/unittests/test_net.py | 6 +- tests/unittests/test_util.py | 3 +- tools/build-on-freebsd | 4 +- 22 files changed, 783 insertions(+), 87 deletions(-) create mode 100644 tests/unittests/test_handler/test_handler_resizefs.py diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 60e3ab53..ceee952b 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -33,7 +33,10 @@ disabled altogether by setting ``resize_rootfs`` to ``false``. """ import errno +import getopt import os +import re +import shlex import stat from cloudinit.settings import PER_ALWAYS @@ -58,6 +61,62 @@ def _resize_ufs(mount_point, devpth): return ('growfs', devpth) +def _get_dumpfs_output(mount_point): + dumpfs_res, err = util.subp(['dumpfs', '-m', mount_point]) + return dumpfs_res + + +def _get_gpart_output(part): + gpart_res, err = util.subp(['gpart', 'show', part]) + return gpart_res + + +def _can_skip_resize_ufs(mount_point, devpth): + # extract the current fs sector size + """ + # dumpfs -m / + # newfs command for / (/dev/label/rootfs) + newfs -O 2 -U -a 4 -b 32768 -d 32768 -e 4096 -f 4096 -g 16384 + -h 64 -i 8192 -j -k 6408 -m 8 -o time -s 58719232 /dev/label/rootf + """ + cur_fs_sz = None + frag_sz = None + dumpfs_res = _get_dumpfs_output(mount_point) + for line in dumpfs_res.splitlines(): + if not line.startswith('#'): + newfs_cmd = shlex.split(line) + opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:' + optlist, args = getopt.getopt(newfs_cmd[1:], opt_value) + for o, a in optlist: + if o == "-s": + cur_fs_sz = int(a) + if o == "-f": + frag_sz = int(a) + # check the current partition size + """ + # gpart show /dev/da0 +=> 40 62914480 da0 GPT (30G) + 40 1024 1 freebsd-boot (512K) + 1064 58719232 2 freebsd-ufs (28G) + 58720296 3145728 3 freebsd-swap (1.5G) + 61866024 1048496 - free - (512M) + """ + expect_sz = None + m = re.search('^(/dev/.+)p([0-9])$', devpth) + gpart_res = _get_gpart_output(m.group(1)) + for line in gpart_res.splitlines(): + if re.search(r"freebsd-ufs", line): + fields = line.split() + expect_sz = int(fields[1]) + # Normalize the gpart sector size, + # because the size is not exactly the same as fs size. + normal_expect_sz = (expect_sz - expect_sz % (frag_sz / 512)) + if normal_expect_sz == cur_fs_sz: + return True + else: + return False + + # Do not use a dictionary as these commands should be able to be used # for multiple filesystem types if possible, e.g. one command for # ext2, ext3 and ext4. @@ -68,9 +127,40 @@ RESIZE_FS_PREFIXES_CMDS = [ ('ufs', _resize_ufs), ] +RESIZE_FS_PRECHECK_CMDS = { + 'ufs': _can_skip_resize_ufs +} + NOBLOCK = "noblock" +def rootdev_from_cmdline(cmdline): + found = None + for tok in cmdline.split(): + if tok.startswith("root="): + found = tok[5:] + break + if found is None: + return None + + if found.startswith("/dev/"): + return found + if found.startswith("LABEL="): + return "/dev/disk/by-label/" + found[len("LABEL="):] + if found.startswith("UUID="): + return "/dev/disk/by-uuid/" + found[len("UUID="):] + + return "/dev/" + found + + +def can_skip_resize(fs_type, resize_what, devpth): + fstype_lc = fs_type.lower() + for i, func in RESIZE_FS_PRECHECK_CMDS.items(): + if fstype_lc.startswith(i): + return func(resize_what, devpth) + return False + + def handle(name, cfg, _cloud, log, args): if len(args) != 0: resize_root = args[0] @@ -139,6 +229,11 @@ def handle(name, cfg, _cloud, log, args): return resizer = None + if can_skip_resize(fs_type, resize_what, devpth): + log.debug("Skip resize filesystem type %s for %s", + fs_type, resize_what) + return + fstype_lc = fs_type.lower() for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS: if fstype_lc.startswith(pfix): diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 28650b88..f56c0cf7 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -155,6 +155,9 @@ class Distro(object): ns, header=header, render_hwaddress=True) return self.apply_network(contents, bring_up=bring_up) + def generate_fallback_config(self): + return net.generate_fallback_config() + def apply_network_config(self, netconfig, bring_up=False): # apply network config netconfig # This method is preferred to apply_network which only takes diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 183e4452..bad112fe 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -30,6 +30,7 @@ class Distro(distros.Distro): login_conf_fn_bak = '/etc/login.conf.orig' resolv_conf_fn = '/etc/resolv.conf' ci_sudoers_fn = '/usr/local/etc/sudoers.d/90-cloud-init-users' + default_primary_nic = 'hn0' def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) @@ -38,6 +39,8 @@ class Distro(distros.Distro): # should only happen say once per instance...) self._runner = helpers.Runners(paths) self.osfamily = 'freebsd' + self.ipv4_pat = re.compile(r"\s+inet\s+\d+[.]\d+[.]\d+[.]\d+") + cfg['ssh_svcname'] = 'sshd' # Updates a key in /etc/rc.conf. def updatercconf(self, key, value): @@ -183,7 +186,6 @@ class Distro(distros.Distro): "gecos": '-c', "primary_group": '-g', "groups": '-G', - "passwd": '-h', "shell": '-s', "inactive": '-E', } @@ -193,19 +195,11 @@ class Distro(distros.Distro): "no_log_init": '--no-log-init', } - redact_opts = ['passwd'] - for key, val in kwargs.items(): if (key in adduser_opts and val and isinstance(val, six.string_types)): adduser_cmd.extend([adduser_opts[key], val]) - # Redact certain fields from the logs - if key in redact_opts: - log_adduser_cmd.extend([adduser_opts[key], 'REDACTED']) - else: - log_adduser_cmd.extend([adduser_opts[key], val]) - elif key in adduser_flags and val: adduser_cmd.append(adduser_flags[key]) log_adduser_cmd.append(adduser_flags[key]) @@ -226,19 +220,21 @@ class Distro(distros.Distro): except Exception as e: util.logexc(LOG, "Failed to create user %s", name) raise e + # Set the password if it is provided + # For security consideration, only hashed passwd is assumed + passwd_val = kwargs.get('passwd', None) + if passwd_val is not None: + self.set_passwd(name, passwd_val, hashed=True) def set_passwd(self, user, passwd, hashed=False): - cmd = ['pw', 'usermod', user] - if hashed: - cmd.append('-H') + hash_opt = "-H" else: - cmd.append('-h') - - cmd.append('0') + hash_opt = "-h" try: - util.subp(cmd, passwd, logstring="chpasswd for %s" % user) + util.subp(['pw', 'usermod', user, hash_opt, '0'], + data=passwd, logstring="chpasswd for %s" % user) except Exception as e: util.logexc(LOG, "Failed to set password for %s", user) raise e @@ -271,6 +267,255 @@ class Distro(distros.Distro): keys = set(kwargs['ssh_authorized_keys']) or [] ssh_util.setup_user_keys(keys, name, options=None) + @staticmethod + def get_ifconfig_list(): + cmd = ['ifconfig', '-l'] + (nics, err) = util.subp(cmd, rcs=[0, 1]) + if len(err): + LOG.warning("Error running %s: %s", cmd, err) + return None + return nics + + @staticmethod + def get_ifconfig_ifname_out(ifname): + cmd = ['ifconfig', ifname] + (if_result, err) = util.subp(cmd, rcs=[0, 1]) + if len(err): + LOG.warning("Error running %s: %s", cmd, err) + return None + return if_result + + @staticmethod + def get_ifconfig_ether(): + cmd = ['ifconfig', '-l', 'ether'] + (nics, err) = util.subp(cmd, rcs=[0, 1]) + if len(err): + LOG.warning("Error running %s: %s", cmd, err) + return None + return nics + + @staticmethod + def get_interface_mac(ifname): + if_result = Distro.get_ifconfig_ifname_out(ifname) + for item in if_result.splitlines(): + if item.find('ether ') != -1: + mac = str(item.split()[1]) + if mac: + return mac + + @staticmethod + def get_devicelist(): + nics = Distro.get_ifconfig_list() + return nics.split() + + @staticmethod + def get_ipv6(): + ipv6 = [] + nics = Distro.get_devicelist() + for nic in nics: + if_result = Distro.get_ifconfig_ifname_out(nic) + for item in if_result.splitlines(): + if item.find("inet6 ") != -1 and item.find("scopeid") == -1: + ipv6.append(nic) + return ipv6 + + def get_ipv4(self): + ipv4 = [] + nics = Distro.get_devicelist() + for nic in nics: + if_result = Distro.get_ifconfig_ifname_out(nic) + for item in if_result.splitlines(): + print(item) + if self.ipv4_pat.match(item): + ipv4.append(nic) + return ipv4 + + def is_up(self, ifname): + if_result = Distro.get_ifconfig_ifname_out(ifname) + pat = "^" + ifname + for item in if_result.splitlines(): + if re.match(pat, item): + flags = item.split('<')[1].split('>')[0] + if flags.find("UP") != -1: + return True + + def _get_current_rename_info(self, check_downable=True): + """Collect information necessary for rename_interfaces.""" + names = Distro.get_devicelist() + bymac = {} + for n in names: + bymac[Distro.get_interface_mac(n)] = { + 'name': n, 'up': self.is_up(n), 'downable': None} + + if check_downable: + nics_with_addresses = set() + ipv6 = self.get_ipv6() + ipv4 = self.get_ipv4() + for bytes_out in (ipv6, ipv4): + for i in ipv6: + nics_with_addresses.update(i) + for i in ipv4: + nics_with_addresses.update(i) + + for d in bymac.values(): + d['downable'] = (d['up'] is False or + d['name'] not in nics_with_addresses) + + return bymac + + def _rename_interfaces(self, renames): + if not len(renames): + LOG.debug("no interfaces to rename") + return + + current_info = self._get_current_rename_info() + + cur_bymac = {} + for mac, data in current_info.items(): + cur = data.copy() + cur['mac'] = mac + cur_bymac[mac] = cur + + def update_byname(bymac): + return dict((data['name'], data) + for data in bymac.values()) + + def rename(cur, new): + util.subp(["ifconfig", cur, "name", new], capture=True) + + def down(name): + util.subp(["ifconfig", name, "down"], capture=True) + + def up(name): + util.subp(["ifconfig", name, "up"], capture=True) + + ops = [] + errors = [] + ups = [] + cur_byname = update_byname(cur_bymac) + tmpname_fmt = "cirename%d" + tmpi = -1 + + for mac, new_name in renames: + cur = cur_bymac.get(mac, {}) + cur_name = cur.get('name') + cur_ops = [] + if cur_name == new_name: + # nothing to do + continue + + if not cur_name: + errors.append("[nic not present] Cannot rename mac=%s to %s" + ", not available." % (mac, new_name)) + continue + + if cur['up']: + msg = "[busy] Error renaming mac=%s from %s to %s" + if not cur['downable']: + errors.append(msg % (mac, cur_name, new_name)) + continue + cur['up'] = False + cur_ops.append(("down", mac, new_name, (cur_name,))) + ups.append(("up", mac, new_name, (new_name,))) + + if new_name in cur_byname: + target = cur_byname[new_name] + if target['up']: + msg = "[busy-target] Error renaming mac=%s from %s to %s." + if not target['downable']: + errors.append(msg % (mac, cur_name, new_name)) + continue + else: + cur_ops.append(("down", mac, new_name, (new_name,))) + + tmp_name = None + while tmp_name is None or tmp_name in cur_byname: + tmpi += 1 + tmp_name = tmpname_fmt % tmpi + + cur_ops.append(("rename", mac, new_name, (new_name, tmp_name))) + target['name'] = tmp_name + cur_byname = update_byname(cur_bymac) + if target['up']: + ups.append(("up", mac, new_name, (tmp_name,))) + + cur_ops.append(("rename", mac, new_name, (cur['name'], new_name))) + cur['name'] = new_name + cur_byname = update_byname(cur_bymac) + ops += cur_ops + + opmap = {'rename': rename, 'down': down, 'up': up} + if len(ops) + len(ups) == 0: + if len(errors): + LOG.debug("unable to do any work for renaming of %s", renames) + else: + LOG.debug("no work necessary for renaming of %s", renames) + else: + LOG.debug("achieving renaming of %s with ops %s", + renames, ops + ups) + + for op, mac, new_name, params in ops + ups: + try: + opmap.get(op)(*params) + except Exception as e: + errors.append( + "[unknown] Error performing %s%s for %s, %s: %s" % + (op, params, mac, new_name, e)) + if len(errors): + raise Exception('\n'.join(errors)) + + def apply_network_config_names(self, netcfg): + renames = [] + for ent in netcfg.get('config', {}): + if ent.get('type') != 'physical': + continue + mac = ent.get('mac_address') + name = ent.get('name') + if not mac: + continue + renames.append([mac, name]) + return self._rename_interfaces(renames) + + @classmethod + def generate_fallback_config(self): + nics = Distro.get_ifconfig_ether() + if nics is None: + LOG.debug("Fail to get network interfaces") + return None + potential_interfaces = nics.split() + connected = [] + for nic in potential_interfaces: + pat = "^" + nic + if_result = Distro.get_ifconfig_ifname_out(nic) + for item in if_result.split("\n"): + if re.match(pat, item): + flags = item.split('<')[1].split('>')[0] + if flags.find("RUNNING") != -1: + connected.append(nic) + if connected: + potential_interfaces = connected + names = list(sorted(potential_interfaces)) + default_pri_nic = Distro.default_primary_nic + if default_pri_nic in names: + names.remove(default_pri_nic) + names.insert(0, default_pri_nic) + target_name = None + target_mac = None + for name in names: + mac = Distro.get_interface_mac(name) + if mac: + target_name = name + target_mac = mac + break + if target_mac and target_name: + nconf = {'config': [], 'version': 1} + nconf['config'].append( + {'type': 'physical', 'name': target_name, + 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]}) + return nconf + else: + return None + def _write_network(self, settings): entries = net_util.translate_network(settings) nameservers = [] diff --git a/cloudinit/settings.py b/cloudinit/settings.py index dbafead5..411960d8 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -39,7 +39,7 @@ CFG_BUILTIN = { ], 'def_log_file': '/var/log/cloud-init.log', 'log_cfgs': [], - 'syslog_fix_perms': ['syslog:adm', 'root:adm'], + 'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel'], 'system_info': { 'paths': { 'cloud_dir': '/var/lib/cloud', diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 04358b73..5254e18a 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -10,6 +10,7 @@ import crypt from functools import partial import os import os.path +import re import time from xml.dom import minidom import xml.etree.ElementTree as ET @@ -32,19 +33,160 @@ BOUNCE_COMMAND = [ # azure systems will always have a resource disk, and 66-azure-ephemeral.rules # ensures that it gets linked to this path. RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource' +DEFAULT_PRIMARY_NIC = 'eth0' +LEASE_FILE = '/var/lib/dhcp/dhclient.eth0.leases' +DEFAULT_FS = 'ext4' + + +def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid): + # extract the 'X' from dev.storvsc.X. if deviceid matches + """ + dev.storvsc.1.%pnpinfo: + classid=32412632-86cb-44a2-9b5c-50d1417354f5 + deviceid=00000000-0001-8899-0000-000000000000 + """ + for line in sysctl_out.splitlines(): + if re.search(r"pnpinfo", line): + fields = line.split() + if len(fields) >= 3: + columns = fields[2].split('=') + if (len(columns) >= 2 and + columns[0] == "deviceid" and + columns[1].startswith(deviceid)): + comps = fields[0].split('.') + return comps[2] + return None + + +def find_busdev_from_disk(camcontrol_out, disk_drv): + # find the scbusX from 'camcontrol devlist -b' output + # if disk_drv matches the specified disk driver, i.e. blkvsc1 + """ + scbus0 on ata0 bus 0 + scbus1 on ata1 bus 0 + scbus2 on blkvsc0 bus 0 + scbus3 on blkvsc1 bus 0 + scbus4 on storvsc2 bus 0 + scbus5 on storvsc3 bus 0 + scbus-1 on xpt0 bus 0 + """ + for line in camcontrol_out.splitlines(): + if re.search(disk_drv, line): + items = line.split() + return items[0] + return None + + +def find_dev_from_busdev(camcontrol_out, busdev): + # find the daX from 'camcontrol devlist' output + # if busdev matches the specified value, i.e. 'scbus2' + """ + at scbus1 target 0 lun 0 (cd0,pass0) + at scbus2 target 0 lun 0 (da0,pass1) + at scbus3 target 1 lun 0 (da1,pass2) + """ + for line in camcontrol_out.splitlines(): + if re.search(busdev, line): + items = line.split('(') + if len(items) == 2: + dev_pass = items[1].split(',') + return dev_pass[0] + return None + + +def get_dev_storvsc_sysctl(): + try: + sysctl_out, err = util.subp(['sysctl', 'dev.storvsc']) + except util.ProcessExecutionError: + LOG.debug("Fail to execute sysctl dev.storvsc") + return None + return sysctl_out + + +def get_camcontrol_dev_bus(): + try: + camcontrol_b_out, err = util.subp(['camcontrol', 'devlist', '-b']) + except util.ProcessExecutionError: + LOG.debug("Fail to execute camcontrol devlist -b") + return None + return camcontrol_b_out + + +def get_camcontrol_dev(): + try: + camcontrol_out, err = util.subp(['camcontrol', 'devlist']) + except util.ProcessExecutionError: + LOG.debug("Fail to execute camcontrol devlist") + return None + return camcontrol_out + + +def get_resource_disk_on_freebsd(port_id): + g0 = "00000000" + if port_id > 1: + g0 = "00000001" + port_id = port_id - 2 + g1 = "000" + str(port_id) + g0g1 = "{0}-{1}".format(g0, g1) + """ + search 'X' from + 'dev.storvsc.X.%pnpinfo: + classid=32412632-86cb-44a2-9b5c-50d1417354f5 + deviceid=00000000-0001-8899-0000-000000000000' + """ + sysctl_out = get_dev_storvsc_sysctl() + + storvscid = find_storvscid_from_sysctl_pnpinfo(sysctl_out, g0g1) + if not storvscid: + LOG.debug("Fail to find storvsc id from sysctl") + return None + + camcontrol_b_out = get_camcontrol_dev_bus() + camcontrol_out = get_camcontrol_dev() + # try to find /dev/XX from 'blkvsc' device + blkvsc = "blkvsc{0}".format(storvscid) + scbusx = find_busdev_from_disk(camcontrol_b_out, blkvsc) + if scbusx: + devname = find_dev_from_busdev(camcontrol_out, scbusx) + if devname is None: + LOG.debug("Fail to find /dev/daX") + return None + return devname + # try to find /dev/XX from 'storvsc' device + storvsc = "storvsc{0}".format(storvscid) + scbusx = find_busdev_from_disk(camcontrol_b_out, storvsc) + if scbusx: + devname = find_dev_from_busdev(camcontrol_out, scbusx) + if devname is None: + LOG.debug("Fail to find /dev/daX") + return None + return devname + return None + +# update the FreeBSD specific information +if util.is_FreeBSD(): + DEFAULT_PRIMARY_NIC = 'hn0' + LEASE_FILE = '/var/db/dhclient.leases.hn0' + DEFAULT_FS = 'freebsd-ufs' + res_disk = get_resource_disk_on_freebsd(1) + if res_disk is not None: + LOG.debug("resource disk is not None") + RESOURCE_DISK_PATH = "/dev/" + res_disk + else: + LOG.debug("resource disk is None") BUILTIN_DS_CONFIG = { 'agent_command': AGENT_START_BUILTIN, 'data_dir': "/var/lib/waagent", 'set_hostname': True, 'hostname_bounce': { - 'interface': 'eth0', + 'interface': DEFAULT_PRIMARY_NIC, 'policy': True, 'command': BOUNCE_COMMAND, 'hostname_command': 'hostname', }, 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH}, - 'dhclient_lease_file': '/var/lib/dhcp/dhclient.eth0.leases', + 'dhclient_lease_file': LEASE_FILE, } BUILTIN_CLOUD_CONFIG = { @@ -53,7 +195,7 @@ BUILTIN_CLOUD_CONFIG = { 'layout': [100], 'overwrite': True}, }, - 'fs_setup': [{'filesystem': 'ext4', + 'fs_setup': [{'filesystem': DEFAULT_FS, 'device': 'ephemeral0.1', 'replace_fs': 'ntfs'}], } @@ -190,7 +332,11 @@ class DataSourceAzureNet(sources.DataSource): for cdev in candidates: try: if cdev.startswith("/dev/"): - ret = util.mount_cb(cdev, load_azure_ds_dir) + if util.is_FreeBSD(): + ret = util.mount_cb(cdev, load_azure_ds_dir, + mtype="udf", sync=False) + else: + ret = util.mount_cb(cdev, load_azure_ds_dir) else: ret = load_azure_ds_dir(cdev) @@ -218,11 +364,12 @@ class DataSourceAzureNet(sources.DataSource): LOG.debug("using files cached in %s", ddir) # azure / hyper-v provides random data here - seed = util.load_file("/sys/firmware/acpi/tables/OEM0", - quiet=True, decode=False) - if seed: - self.metadata['random_seed'] = seed - + if not util.is_FreeBSD(): + seed = util.load_file("/sys/firmware/acpi/tables/OEM0", + quiet=True, decode=False) + if seed: + self.metadata['random_seed'] = seed + # TODO. find the seed on FreeBSD platform # now update ds_cfg to reflect contents pass in config user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) @@ -633,8 +780,19 @@ def encrypt_pass(password, salt_id="$6$"): def list_possible_azure_ds_devs(): # return a sorted list of devices that might have a azure datasource devlist = [] - for fstype in ("iso9660", "udf"): - devlist.extend(util.find_devs_with("TYPE=%s" % fstype)) + if util.is_FreeBSD(): + cdrom_dev = "/dev/cd0" + try: + util.subp(["mount", "-o", "ro", "-t", "udf", cdrom_dev, + "/mnt/cdrom/secure"]) + except util.ProcessExecutionError: + LOG.debug("Fail to mount cd") + return devlist + util.subp(["umount", "/mnt/cdrom/secure"]) + devlist.append(cdrom_dev) + else: + for fstype in ("iso9660", "udf"): + devlist.extend(util.find_devs_with("TYPE=%s" % fstype)) devlist.sort(reverse=True) return devlist diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 6e01aa47..e22409d1 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -29,6 +29,14 @@ def cd(newdir): os.chdir(prevdir) +def _get_dhcp_endpoint_option_name(): + if util.is_FreeBSD(): + azure_endpoint = "option-245" + else: + azure_endpoint = "unknown-245" + return azure_endpoint + + class AzureEndpointHttpClient(object): headers = { @@ -235,8 +243,9 @@ class WALinuxAgentShim(object): leases = [] content = util.load_file(fallback_lease_file) LOG.debug("content is %s", content) + option_name = _get_dhcp_endpoint_option_name() for line in content.splitlines(): - if 'unknown-245' in line: + if option_name in line: # Example line from Ubuntu # option unknown-245 a8:3f:81:10; leases.append(line.strip(' ').split(' ', 2)[-1].strip(';\n"')) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index f7191b09..ad557827 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -624,7 +624,7 @@ class Init(object): return (None, loc) if ncfg: return (ncfg, loc) - return (net.generate_fallback_config(), "fallback") + return (self.distro.generate_fallback_config(), "fallback") def apply_network_config(self, bring_up): netcfg, src = self._find_networking_config() diff --git a/cloudinit/util.py b/cloudinit/util.py index 22af99dd..27a98330 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -565,6 +565,10 @@ def is_ipv4(instr): return len(toks) == 4 +def is_FreeBSD(): + return system_info()['platform'].startswith('FreeBSD') + + def get_cfg_option_bool(yobj, key, default=False): if key not in yobj: return default @@ -2055,11 +2059,56 @@ def parse_mtab(path): return None +def find_freebsd_part(label_part): + if label_part.startswith("/dev/label/"): + target_label = label_part[5:] + (label_part, err) = subp(['glabel', 'status', '-s']) + for labels in label_part.split("\n"): + items = labels.split() + if len(items) > 0 and items[0].startswith(target_label): + label_part = items[2] + break + label_part = str(label_part) + return label_part + + +def get_path_dev_freebsd(path, mnt_list): + path_found = None + for line in mnt_list.split("\n"): + items = line.split() + if (len(items) > 2 and os.path.exists(items[1] + path)): + path_found = line + break + return path_found + + +def get_mount_info_freebsd(path, log=LOG): + (result, err) = subp(['mount', '-p', path], rcs=[0, 1]) + if len(err): + # find a path if the input is not a mounting point + (mnt_list, err) = subp(['mount', '-p']) + path_found = get_path_dev_freebsd(path, mnt_list) + if (path_found is None): + return None + result = path_found + ret = result.split() + label_part = find_freebsd_part(ret[0]) + return "/dev/" + label_part, ret[2], ret[1] + + def parse_mount(path): (mountoutput, _err) = subp("mount") mount_locs = mountoutput.splitlines() for line in mount_locs: m = re.search(r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$', line) + if not m: + continue + # check whether the dev refers to a label on FreeBSD + # for example, if dev is '/dev/label/rootfs', we should + # continue finding the real device like '/dev/da0'. + devm = re.search('^(/dev/.+)p([0-9])$', m.group(1)) + if (not devm and is_FreeBSD()): + return get_mount_info_freebsd(path) devpth = m.group(1) mount_point = m.group(2) fs_type = m.group(3) @@ -2336,7 +2385,8 @@ def read_dmi_data(key): uname_arch = os.uname()[4] if not (uname_arch == "x86_64" or (uname_arch.startswith("i") and uname_arch[2:] == "86") or - uname_arch == 'aarch64'): + uname_arch == 'aarch64' or + uname_arch == 'amd64'): LOG.debug("dmidata is not supported on %s", uname_arch) return None diff --git a/config/cloud.cfg-freebsd b/config/cloud.cfg-freebsd index be664f5d..d666c397 100644 --- a/config/cloud.cfg-freebsd +++ b/config/cloud.cfg-freebsd @@ -5,7 +5,7 @@ syslog_fix_perms: root:wheel # This should not be required, but leave it in place until the real cause of # not beeing able to find -any- datasources is resolved. -datasource_list: ['ConfigDrive', 'OpenStack', 'Ec2'] +datasource_list: ['ConfigDrive', 'Azure', 'OpenStack', 'Ec2'] # A set of users which may be applied and/or used by various modules # when a 'default' entry is found it will reference the 'default_user' diff --git a/setup.py b/setup.py index 32a44d94..4616599b 100755 --- a/setup.py +++ b/setup.py @@ -89,7 +89,6 @@ LIB = "/lib" if os.uname()[0] == 'FreeBSD': USR = "/usr/local" USR_LIB_EXEC = "/usr/local/lib" - ETC = "/usr/local/etc" elif os.path.isfile('/etc/redhat-release'): USR_LIB_EXEC = "/usr/libexec" @@ -164,8 +163,6 @@ else: (ETC + '/cloud', glob('config/*.cfg')), (ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')), (ETC + '/cloud/templates', glob('templates/*')), - (ETC + '/NetworkManager/dispatcher.d/', ['tools/hook-network-manager']), - (ETC + '/dhcp/dhclient-exit-hooks.d/', ['tools/hook-dhclient']), (USR_LIB_EXEC + '/cloud-init', ['tools/ds-identify', 'tools/uncloud-init', 'tools/write-ssh-key-fingerprints']), @@ -174,8 +171,14 @@ else: [f for f in glob('doc/examples/*') if is_f(f)]), (USR + '/share/doc/cloud-init/examples/seed', [f for f in glob('doc/examples/seed/*') if is_f(f)]), - (LIB + '/udev/rules.d', [f for f in glob('udev/*.rules')]), ] + if os.uname()[0] != 'FreeBSD': + data_files.extend([ + (ETC + '/NetworkManager/dispatcher.d/', + ['tools/hook-network-manager']), + (ETC + '/dhcp/dhclient-exit-hooks.d/', ['tools/hook-dhclient']), + (LIB + '/udev/rules.d', [f for f in glob('udev/*.rules')]) + ]) # Use a subclass for install that handles # adding on the right init system configuration files cmdclass = { diff --git a/sysvinit/freebsd/cloudconfig b/sysvinit/freebsd/cloudconfig index 01bc061e..e4064fa3 100755 --- a/sysvinit/freebsd/cloudconfig +++ b/sysvinit/freebsd/cloudconfig @@ -7,24 +7,14 @@ . /etc/rc.subr PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" -export CLOUD_CFG=/usr/local/etc/cloud/cloud.cfg name="cloudconfig" command="/usr/local/bin/cloud-init" start_cmd="cloudconfig_start" stop_cmd=":" rcvar="cloudinit_enable" -start_precmd="cloudinit_override" start_cmd="cloudconfig_start" -cloudinit_override() -{ - # If there exist sysconfig/defaults variable override files use it... - if [ -f /etc/defaults/cloud-init ]; then - . /etc/defaults/cloud-init - fi -} - cloudconfig_start() { echo "${command} starting" diff --git a/sysvinit/freebsd/cloudfinal b/sysvinit/freebsd/cloudfinal index 1b487aa0..b6894c39 100755 --- a/sysvinit/freebsd/cloudfinal +++ b/sysvinit/freebsd/cloudfinal @@ -7,24 +7,14 @@ . /etc/rc.subr PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" -export CLOUD_CFG=/usr/local/etc/cloud/cloud.cfg name="cloudfinal" command="/usr/local/bin/cloud-init" start_cmd="cloudfinal_start" stop_cmd=":" rcvar="cloudinit_enable" -start_precmd="cloudinit_override" start_cmd="cloudfinal_start" -cloudinit_override() -{ - # If there exist sysconfig/defaults variable override files use it... - if [ -f /etc/defaults/cloud-init ]; then - . /etc/defaults/cloud-init - fi -} - cloudfinal_start() { echo -n "${command} starting" diff --git a/sysvinit/freebsd/cloudinit b/sysvinit/freebsd/cloudinit index 862eeab4..33263009 100755 --- a/sysvinit/freebsd/cloudinit +++ b/sysvinit/freebsd/cloudinit @@ -7,24 +7,14 @@ . /etc/rc.subr PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" -export CLOUD_CFG=/usr/local/etc/cloud/cloud.cfg name="cloudinit" command="/usr/local/bin/cloud-init" start_cmd="cloudinit_start" stop_cmd=":" rcvar="cloudinit_enable" -start_precmd="cloudinit_override" start_cmd="cloudinit_start" -cloudinit_override() -{ - # If there exist sysconfig/defaults variable override files use it... - if [ -f /etc/defaults/cloud-init ]; then - . /etc/defaults/cloud-init - fi -} - cloudinit_start() { echo -n "${command} starting" diff --git a/sysvinit/freebsd/cloudinitlocal b/sysvinit/freebsd/cloudinitlocal index fb342a0f..11a5eb1c 100755 --- a/sysvinit/freebsd/cloudinitlocal +++ b/sysvinit/freebsd/cloudinitlocal @@ -1,30 +1,20 @@ #!/bin/sh # PROVIDE: cloudinitlocal -# REQUIRE: mountcritlocal +# REQUIRE: ldconfig mountcritlocal # BEFORE: NETWORKING FILESYSTEMS cloudinit cloudconfig cloudfinal . /etc/rc.subr PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" -export CLOUD_CFG=/usr/local/etc/cloud/cloud.cfg name="cloudinitlocal" command="/usr/local/bin/cloud-init" start_cmd="cloudlocal_start" stop_cmd=":" rcvar="cloudinit_enable" -start_precmd="cloudinit_override" start_cmd="cloudlocal_start" -cloudinit_override() -{ - # If there exist sysconfig/defaults variable override files use it... - if [ -f /etc/defaults/cloud-init ]; then - . /etc/defaults/cloud-init - fi -} - cloudlocal_start() { echo -n "${command} starting" diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 8d22bb59..e6b0dcb4 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -3,6 +3,8 @@ from cloudinit import helpers from cloudinit.util import b64e, decode_binary, load_file from cloudinit.sources import DataSourceAzure +from cloudinit.util import find_freebsd_part +from cloudinit.util import get_path_dev_freebsd from ..helpers import TestCase, populate_dir, mock, ExitStack, PY26, SkipTest @@ -95,6 +97,41 @@ class TestAzureDataSource(TestCase): for module, name, new in patches: self.patches.enter_context(mock.patch.object(module, name, new)) + def _get_mockds(self): + mod = DataSourceAzure + sysctl_out = "dev.storvsc.3.%pnpinfo: "\ + "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\ + "deviceid=f8b3781b-1e82-4818-a1c3-63d806ec15bb\n" + sysctl_out += "dev.storvsc.2.%pnpinfo: "\ + "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\ + "deviceid=f8b3781a-1e82-4818-a1c3-63d806ec15bb\n" + sysctl_out += "dev.storvsc.1.%pnpinfo: "\ + "classid=32412632-86cb-44a2-9b5c-50d1417354f5 "\ + "deviceid=00000000-0001-8899-0000-000000000000\n" + camctl_devbus = """ +scbus0 on ata0 bus 0 +scbus1 on ata1 bus 0 +scbus2 on blkvsc0 bus 0 +scbus3 on blkvsc1 bus 0 +scbus4 on storvsc2 bus 0 +scbus5 on storvsc3 bus 0 +scbus-1 on xpt0 bus 0 + """ + camctl_dev = """ + at scbus1 target 0 lun 0 (cd0,pass0) + at scbus2 target 0 lun 0 (da0,pass1) + at scbus3 target 1 lun 0 (da1,pass2) + """ + self.apply_patches([ + (mod, 'get_dev_storvsc_sysctl', mock.MagicMock( + return_value=sysctl_out)), + (mod, 'get_camcontrol_dev_bus', mock.MagicMock( + return_value=camctl_devbus)), + (mod, 'get_camcontrol_dev', mock.MagicMock( + return_value=camctl_dev)) + ]) + return mod + def _get_ds(self, data, agent_command=None): def dsdevs(): @@ -177,6 +214,34 @@ class TestAzureDataSource(TestCase): return raise AssertionError("XML is the same") + def test_get_resource_disk(self): + ds = self._get_mockds() + dev = ds.get_resource_disk_on_freebsd(1) + self.assertEqual("da1", dev) + + @mock.patch('cloudinit.util.subp') + def test_find_freebsd_part_on_Azure(self, mock_subp): + glabel_out = ''' +gptid/fa52d426-c337-11e6-8911-00155d4c5e47 N/A da0p1 + label/rootfs N/A da0p2 + label/swap N/A da0p3 +''' + mock_subp.return_value = (glabel_out, "") + res = find_freebsd_part("/dev/label/rootfs") + self.assertEqual("da0p2", res) + + def test_get_path_dev_freebsd_on_Azure(self): + mnt_list = ''' +/dev/label/rootfs / ufs rw 1 1 +devfs /dev devfs rw,multilabel 0 0 +fdescfs /dev/fd fdescfs rw 0 0 +/dev/da1s1 /mnt/resource ufs rw 2 2 +''' + with mock.patch.object(os.path, 'exists', + return_value=True): + res = get_path_dev_freebsd('/etc', mnt_list) + self.assertNotEqual(res, None) + def test_basic_seed_dir(self): odata = {'HostName': "myhost", 'UserName': "myuser"} data = {'ovfcontent': construct_valid_ovf_env(data=odata), diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index aafdebd7..b2d2971b 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -3,7 +3,6 @@ import os from cloudinit.sources.helpers import azure as azure_helper - from ..helpers import ExitStack, mock, TestCase @@ -72,10 +71,11 @@ class TestFindEndpoint(TestCase): @staticmethod def _build_lease_content(encoded_address): + endpoint = azure_helper._get_dhcp_endpoint_option_name() return '\n'.join([ 'lease {', ' interface "eth0";', - ' option unknown-245 {0};'.format(encoded_address), + ' option {0} {1};'.format(endpoint, encoded_address), '}']) def test_from_dhcp_client(self): diff --git a/tests/unittests/test_datasource/test_cloudstack.py b/tests/unittests/test_datasource/test_cloudstack.py index e93d28de..1d3d2f19 100644 --- a/tests/unittests/test_datasource/test_cloudstack.py +++ b/tests/unittests/test_datasource/test_cloudstack.py @@ -15,6 +15,11 @@ class TestCloudStackPasswordFetching(TestCase): mod_name = 'cloudinit.sources.DataSourceCloudStack' self.patches.enter_context(mock.patch('{0}.ec2'.format(mod_name))) self.patches.enter_context(mock.patch('{0}.uhelp'.format(mod_name))) + default_gw = "192.201.20.0" + mod_name = 'cloudinit.sources.DataSourceCloudStack.get_default_gateway' + get_default_gw = mock.MagicMock(return_value=default_gw) + self.patches.enter_context( + mock.patch(mod_name, get_default_gw)) def _set_password_server_response(self, response_string): subp = mock.MagicMock(return_value=(response_string, '')) diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index 88370669..1e10a33d 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -178,6 +178,20 @@ class WriteBuffer(object): class TestNetCfgDistro(TestCase): + frbsd_ifout = """\ +hn0: flags=8843 metric 0 mtu 1500 + options=51b + ether 00:15:5d:4c:73:00 + inet6 fe80::215:5dff:fe4c:7300%hn0 prefixlen 64 scopeid 0x2 + inet 10.156.76.127 netmask 0xfffffc00 broadcast 10.156.79.255 + nd6 options=23 + media: Ethernet autoselect (10Gbase-T ) + status: active +""" + + def setUp(self): + super(TestNetCfgDistro, self).setUp() + def _get_distro(self, dname, renderers=None): cls = distros.fetch(dname) cfg = settings.CFG_BUILTIN @@ -251,6 +265,7 @@ class TestNetCfgDistro(TestCase): def test_apply_network_config_v1_to_netplan_ub(self): renderers = ['netplan'] + devlist = ['eth0', 'lo'] ub_distro = self._get_distro('ubuntu', renderers=renderers) with ExitStack() as mocks: write_bufs = {} @@ -272,6 +287,9 @@ class TestNetCfgDistro(TestCase): mock.patch.object(util, 'subp', return_value=(0, 0))) mocks.enter_context( mock.patch.object(os.path, 'isfile', return_value=False)) + mocks.enter_context( + mock.patch("cloudinit.net.netplan.get_devicelist", + return_value=devlist)) ub_distro.apply_network_config(V1_NET_CFG, False) @@ -285,6 +303,7 @@ class TestNetCfgDistro(TestCase): def test_apply_network_config_v2_passthrough_ub(self): renderers = ['netplan'] + devlist = ['eth0', 'lo'] ub_distro = self._get_distro('ubuntu', renderers=renderers) with ExitStack() as mocks: write_bufs = {} @@ -306,7 +325,10 @@ class TestNetCfgDistro(TestCase): mock.patch.object(util, 'subp', return_value=(0, 0))) mocks.enter_context( mock.patch.object(os.path, 'isfile', return_value=False)) - + # FreeBSD does not have '/sys/class/net' file, + # so we need mock here. + mocks.enter_context( + mock.patch.object(os, 'listdir', return_value=devlist)) ub_distro.apply_network_config(V2_NET_CFG, False) self.assertEqual(len(write_bufs), 1) @@ -328,6 +350,29 @@ class TestNetCfgDistro(TestCase): for (k, v) in b1.items(): self.assertEqual(v, b2[k]) + @mock.patch('cloudinit.distros.freebsd.Distro.get_ifconfig_list') + @mock.patch('cloudinit.distros.freebsd.Distro.get_ifconfig_ifname_out') + def test_get_ip_nic_freebsd(self, ifname_out, iflist): + frbsd_distro = self._get_distro('freebsd') + iflist.return_value = "lo0 hn0" + ifname_out.return_value = self.frbsd_ifout + res = frbsd_distro.get_ipv4() + self.assertEqual(res, ['lo0', 'hn0']) + res = frbsd_distro.get_ipv6() + self.assertEqual(res, []) + + @mock.patch('cloudinit.distros.freebsd.Distro.get_ifconfig_ether') + @mock.patch('cloudinit.distros.freebsd.Distro.get_ifconfig_ifname_out') + @mock.patch('cloudinit.distros.freebsd.Distro.get_interface_mac') + def test_generate_fallback_config_freebsd(self, mac, ifname_out, if_ether): + frbsd_distro = self._get_distro('freebsd') + + if_ether.return_value = 'hn0' + ifname_out.return_value = self.frbsd_ifout + mac.return_value = '00:15:5d:4c:73:00' + res = frbsd_distro.generate_fallback_config() + self.assertIsNotNone(res) + def test_simple_write_rh(self): rh_distro = self._get_distro('rhel') diff --git a/tests/unittests/test_handler/test_handler_resizefs.py b/tests/unittests/test_handler/test_handler_resizefs.py new file mode 100644 index 00000000..52591b8b --- /dev/null +++ b/tests/unittests/test_handler/test_handler_resizefs.py @@ -0,0 +1,59 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.config import cc_resizefs + +import textwrap +import unittest + +try: + from unittest import mock +except ImportError: + import mock + + +class TestResizefs(unittest.TestCase): + def setUp(self): + super(TestResizefs, self).setUp() + self.name = "resizefs" + + @mock.patch('cloudinit.config.cc_resizefs._get_dumpfs_output') + @mock.patch('cloudinit.config.cc_resizefs._get_gpart_output') + def test_skip_ufs_resize(self, gpart_out, dumpfs_out): + fs_type = "ufs" + resize_what = "/" + devpth = "/dev/da0p2" + dumpfs_out.return_value = ( + "# newfs command for / (/dev/label/rootfs)\n" + "newfs -O 2 -U -a 4 -b 32768 -d 32768 -e 4096 " + "-f 4096 -g 16384 -h 64 -i 8192 -j -k 6408 -m 8 " + "-o time -s 58719232 /dev/label/rootfs\n") + gpart_out.return_value = textwrap.dedent("""\ + => 40 62914480 da0 GPT (30G) + 40 1024 1 freebsd-boot (512K) + 1064 58719232 2 freebsd-ufs (28G) + 58720296 3145728 3 freebsd-swap (1.5G) + 61866024 1048496 - free - (512M) + """) + res = cc_resizefs.can_skip_resize(fs_type, resize_what, devpth) + self.assertTrue(res) + + @mock.patch('cloudinit.config.cc_resizefs._get_dumpfs_output') + @mock.patch('cloudinit.config.cc_resizefs._get_gpart_output') + def test_skip_ufs_resize_roundup(self, gpart_out, dumpfs_out): + fs_type = "ufs" + resize_what = "/" + devpth = "/dev/da0p2" + dumpfs_out.return_value = ( + "# newfs command for / (/dev/label/rootfs)\n" + "newfs -O 2 -U -a 4 -b 32768 -d 32768 -e 4096 " + "-f 4096 -g 16384 -h 64 -i 8192 -j -k 368 -m 8 " + "-o time -s 297080 /dev/label/rootfs\n") + gpart_out.return_value = textwrap.dedent("""\ + => 34 297086 da0 GPT (145M) + 34 297086 1 freebsd-ufs (145M) + """) + res = cc_resizefs.can_skip_resize(fs_type, resize_what, devpth) + self.assertTrue(res) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 89e75369..5d2dd031 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1120,14 +1120,14 @@ class TestNetplanPostcommands(CiTestCase): render_target = 'netplan.yaml' renderer = netplan.Renderer( {'netplan_path': render_target, 'postcmds': True}) - renderer.render_network_state(render_dir, ns) - expected = [ mock.call(['netplan', 'generate'], capture=True), mock.call(['udevadm', 'test-builtin', 'net_setup_link', '/sys/class/net/lo'], capture=True), ] - mock_subp.assert_has_calls(expected) + with mock.patch.object(os.path, 'islink', return_value=True): + renderer.render_network_state(render_dir, ns) + mock_subp.assert_has_calls(expected) class TestEniNetworkStateToEni(CiTestCase): diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 5d21b4b7..189caca8 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -596,7 +596,8 @@ class TestSubp(helpers.TestCase): def test_subp_capture_stderr(self): data = b'hello world' (out, err) = util.subp(self.stdin2err, capture=True, - decode=False, data=data) + decode=False, data=data, + update_env={'LC_ALL': 'C'}) self.assertEqual(err, data) self.assertEqual(out, b'') diff --git a/tools/build-on-freebsd b/tools/build-on-freebsd index 8436498e..ccc10b40 100755 --- a/tools/build-on-freebsd +++ b/tools/build-on-freebsd @@ -10,9 +10,7 @@ depschecked=/tmp/c-i.dependencieschecked pkgs=" dmidecode e2fsprogs - gpart py27-Jinja2 - py27-argparse py27-boto py27-cheetah py27-configobj @@ -38,7 +36,7 @@ python setup.py build python setup.py install -O1 --skip-build --prefix /usr/local/ --init-system sysvinit_freebsd # Install the correct config file: -cp config/cloud.cfg-freebsd /usr/local/etc/cloud/cloud.cfg +cp config/cloud.cfg-freebsd /etc/cloud/cloud.cfg # Enable cloud-init in /etc/rc.conf: sed -i.bak -e "/cloudinit_enable=.*/d" /etc/rc.conf -- cgit v1.2.3 From dd03bb411c9a6f10854a3bbc3223b204c3d4d174 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 9 May 2017 20:23:05 -0600 Subject: sysconfig: Raise ValueError when multiple default gateways are present. Fixed setting Route.has_set_default_ipv6 or *_ipv4 to track whether a route already has a default gateway defined. The code was setting Route.has_set_default which wasn't checked when raising "duplicate gateway" ValueErrors. Added unit tests to exercise this expected raised ValueError. Also moved is_ipv6 = subnet.get('ipv6') logic out of a for loop because we don't need to recalculate the same value every route iteration. LP: #1687485 --- cloudinit/net/sysconfig.py | 14 ++++----- tests/unittests/test_net.py | 76 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+), 7 deletions(-) diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 504e4d02..d981277a 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -232,12 +232,8 @@ class Renderer(renderer.Renderer): iface_cfg.name)) if 'netmask' in subnet: iface_cfg['NETMASK'] = subnet['netmask'] + is_ipv6 = subnet.get('ipv6') for route in subnet.get('routes', []): - if subnet.get('ipv6'): - gw_cfg = 'IPV6_DEFAULTGW' - else: - gw_cfg = 'GATEWAY' - if _is_default_route(route): if ( (subnet.get('ipv4') and @@ -258,8 +254,12 @@ class Renderer(renderer.Renderer): # also provided the default route? iface_cfg['DEFROUTE'] = True if 'gateway' in route: - iface_cfg[gw_cfg] = route['gateway'] - route_cfg.has_set_default = True + if is_ipv6: + iface_cfg['IPV6_DEFAULTGW'] = route['gateway'] + route_cfg.has_set_default_ipv6 = True + else: + iface_cfg['GATEWAY'] = route['gateway'] + route_cfg.has_set_default_ipv4 = True else: gw_key = 'GATEWAY%s' % route_cfg.last_idx nm_key = 'NETMASK%s' % route_cfg.last_idx diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 5d2dd031..7c5dc4ef 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -880,6 +880,82 @@ USERCTL=no """.lstrip() self.assertEqual(expected_content, content) + def test_multiple_ipv4_default_gateways(self): + """ValueError is raised when duplicate ipv4 gateways exist.""" + net_json = { + "services": [{"type": "dns", "address": "172.19.0.12"}], + "networks": [{ + "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4", + "type": "ipv4", "netmask": "255.255.252.0", + "link": "tap1a81968a-79", + "routes": [{ + "netmask": "0.0.0.0", + "network": "0.0.0.0", + "gateway": "172.19.3.254", + }, { + "netmask": "0.0.0.0", # A second default gateway + "network": "0.0.0.0", + "gateway": "172.20.3.254", + }], + "ip_address": "172.19.1.34", "id": "network0" + }], + "links": [ + { + "ethernet_mac_address": "fa:16:3e:ed:9a:59", + "mtu": None, "type": "bridge", "id": + "tap1a81968a-79", + "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" + }, + ], + } + macs = {'fa:16:3e:ed:9a:59': 'eth0'} + render_dir = self.tmp_dir() + network_cfg = openstack.convert_net_json(net_json, known_macs=macs) + ns = network_state.parse_net_config_data(network_cfg, + skip_broken=False) + renderer = sysconfig.Renderer() + with self.assertRaises(ValueError): + renderer.render_network_state(ns, render_dir) + self.assertEqual([], os.listdir(render_dir)) + + def test_multiple_ipv6_default_gateways(self): + """ValueError is raised when duplicate ipv6 gateways exist.""" + net_json = { + "services": [{"type": "dns", "address": "172.19.0.12"}], + "networks": [{ + "network_id": "public-ipv6", + "type": "ipv6", "netmask": "", + "link": "tap1a81968a-79", + "routes": [{ + "gateway": "2001:DB8::1", + "netmask": "::", + "network": "::" + }, { + "gateway": "2001:DB9::1", + "netmask": "::", + "network": "::" + }], + "ip_address": "2001:DB8::10", "id": "network1" + }], + "links": [ + { + "ethernet_mac_address": "fa:16:3e:ed:9a:59", + "mtu": None, "type": "bridge", "id": + "tap1a81968a-79", + "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f" + }, + ], + } + macs = {'fa:16:3e:ed:9a:59': 'eth0'} + render_dir = self.tmp_dir() + network_cfg = openstack.convert_net_json(net_json, known_macs=macs) + ns = network_state.parse_net_config_data(network_cfg, + skip_broken=False) + renderer = sysconfig.Renderer() + with self.assertRaises(ValueError): + renderer.render_network_state(ns, render_dir) + self.assertEqual([], os.listdir(render_dir)) + def test_openstack_rendering_samples(self): for os_sample in OS_SAMPLES: render_dir = self.tmp_dir() -- cgit v1.2.3 From 9d437489b8ce1f8cd9d34cd9ff4994ca18bd2d78 Mon Sep 17 00:00:00 2001 From: Julien Castets Date: Tue, 16 May 2017 09:19:11 +0000 Subject: Add address to config entry generated by _klibc_to_config_entry. If /run/net-.cfg contains an IPV4ADDR or an IPV6ADDR, the config file generated by _klibc_to_config_entry now contains the "address". LP: #1691135 --- cloudinit/net/cmdline.py | 5 +++++ tests/unittests/test_net.py | 3 ++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py index 7c5d11a7..61e23697 100755 --- a/cloudinit/net/cmdline.py +++ b/cloudinit/net/cmdline.py @@ -100,6 +100,11 @@ def _klibc_to_config_entry(content, mac_addrs=None): cur_proto = data.get(pre + 'PROTO', proto) subnet = {'type': cur_proto, 'control': 'manual'} + # only populate address for static types. While the rendered config + # may have an address for dhcp, that is not really expected. + if cur_proto == 'static': + subnet['address'] = data[pre + 'ADDR'] + # these fields go right on the subnet for key in ('NETMASK', 'BROADCAST', 'GATEWAY'): if pre + key in data: diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 7c5dc4ef..052c4016 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -100,7 +100,8 @@ STATIC_EXPECTED_1 = { 'gateway': '10.0.0.1', 'dns_search': ['foo.com'], 'type': 'static', 'netmask': '255.255.255.0', - 'dns_nameservers': ['10.0.1.1']}], + 'dns_nameservers': ['10.0.1.1'], + 'address': '10.0.0.2'}], } # Examples (and expected outputs for various renderers). -- cgit v1.2.3 From 4bcc947301bedc5ebf430cfaf6e4597bfb174aa7 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 16 May 2017 13:43:55 -0400 Subject: Improve detection of snappy to include os-release and kernel cmdline. Recent core snap images (edge channel revision 1886) do not contain the previously known files used to detect that a system is ubuntu core. The changes here are to look in 2 additional locations to determine if a system is snappy. LP: #1689944 --- cloudinit/net/cmdline.py | 31 +------------------- cloudinit/util.py | 37 ++++++++++++++++++++++++ tests/unittests/helpers.py | 7 ++--- tests/unittests/test_util.py | 69 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 110 insertions(+), 34 deletions(-) diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py index 61e23697..38b27a52 100755 --- a/cloudinit/net/cmdline.py +++ b/cloudinit/net/cmdline.py @@ -9,41 +9,12 @@ import base64 import glob import gzip import io -import shlex -import sys - -import six from . import get_devicelist from . import read_sys_net_safe from cloudinit import util -PY26 = sys.version_info[0:2] == (2, 6) - - -def _shlex_split(blob): - if PY26 and isinstance(blob, six.text_type): - # Older versions don't support unicode input - blob = blob.encode("utf8") - return shlex.split(blob) - - -def _load_shell_content(content, add_empty=False, empty_val=None): - """Given shell like syntax (key=value\nkey2=value2\n) in content - return the data in dictionary form. If 'add_empty' is True - then add entries in to the returned dictionary for 'VAR=' - variables. Set their value to empty_val.""" - data = {} - for line in _shlex_split(content): - key, value = line.split("=", 1) - if not value: - value = empty_val - if add_empty or value: - data[key] = value - - return data - def _klibc_to_config_entry(content, mac_addrs=None): """Convert a klibc written shell content file to a 'config' entry @@ -63,7 +34,7 @@ def _klibc_to_config_entry(content, mac_addrs=None): if mac_addrs is None: mac_addrs = {} - data = _load_shell_content(content) + data = util.load_shell_content(content) try: name = data['DEVICE'] if 'DEVICE' in data else data['DEVICE6'] except KeyError: diff --git a/cloudinit/util.py b/cloudinit/util.py index 27a98330..67ff7ba3 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -24,6 +24,7 @@ import platform import pwd import random import re +import shlex import shutil import socket import stat @@ -75,6 +76,7 @@ CONTAINER_TESTS = (['systemd-detect-virt', '--quiet', '--container'], PROC_CMDLINE = None _LSB_RELEASE = {} +PY26 = sys.version_info[0:2] == (2, 6) def get_architecture(target=None): @@ -2424,6 +2426,18 @@ def system_is_snappy(): # channel.ini is configparser loadable. # snappy will move to using /etc/system-image/config.d/*.ini # this is certainly not a perfect test, but good enough for now. + orpath = "/etc/os-release" + try: + orinfo = load_shell_content(load_file(orpath, quiet=True)) + if orinfo.get('ID', '').lower() == "ubuntu-core": + return True + except ValueError as e: + LOG.warning("Unexpected error loading '%s': %s", orpath, e) + + cmdline = get_cmdline() + if 'snap_core=' in cmdline: + return True + content = load_file("/etc/system-image/channel.ini", quiet=True) if 'ubuntu-core' in content.lower(): return True @@ -2470,4 +2484,27 @@ def rootdev_from_cmdline(cmdline): return "/dev/" + found +def load_shell_content(content, add_empty=False, empty_val=None): + """Given shell like syntax (key=value\nkey2=value2\n) in content + return the data in dictionary form. If 'add_empty' is True + then add entries in to the returned dictionary for 'VAR=' + variables. Set their value to empty_val.""" + + def _shlex_split(blob): + if PY26 and isinstance(blob, six.text_type): + # Older versions don't support unicode input + blob = blob.encode("utf8") + return shlex.split(blob) + + data = {} + for line in _shlex_split(content): + key, value = line.split("=", 1) + if not value: + value = empty_val + if add_empty or value: + data[key] = value + + return data + + # vi: ts=4 expandtab diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index a711404c..d24f817d 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -106,7 +106,7 @@ class CiTestCase(TestCase): return os.path.normpath(os.path.abspath(os.path.join(dir, path))) -class ResourceUsingTestCase(TestCase): +class ResourceUsingTestCase(CiTestCase): def setUp(self): super(ResourceUsingTestCase, self).setUp() self.resource_path = None @@ -229,8 +229,7 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): def reRoot(self, root=None): if root is None: - root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, root) + root = self.tmp_dir() self.patchUtils(root) self.patchOS(root) return root @@ -256,7 +255,7 @@ def populate_dir(path, files): os.makedirs(path) ret = [] for (name, content) in files.items(): - p = os.path.join(path, name) + p = os.path.sep.join([path, name]) util.ensure_dir(os.path.dirname(p)) with open(p, "wb") as fp: if isinstance(content, six.binary_type): diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 189caca8..490760d1 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -712,4 +712,73 @@ class TestProcessExecutionError(helpers.TestCase): )).format(description=self.empty_description, empty_attr=self.empty_attr)) + +class TestSystemIsSnappy(helpers.FilesystemMockingTestCase): + def test_id_in_os_release_quoted(self): + """os-release containing ID="ubuntu-core" is snappy.""" + orcontent = '\n'.join(['ID="ubuntu-core"', '']) + root_d = self.tmp_dir() + helpers.populate_dir(root_d, {'etc/os-release': orcontent}) + self.reRoot(root_d) + self.assertTrue(util.system_is_snappy()) + + def test_id_in_os_release(self): + """os-release containing ID=ubuntu-core is snappy.""" + orcontent = '\n'.join(['ID=ubuntu-core', '']) + root_d = self.tmp_dir() + helpers.populate_dir(root_d, {'etc/os-release': orcontent}) + self.reRoot(root_d) + self.assertTrue(util.system_is_snappy()) + + @mock.patch('cloudinit.util.get_cmdline') + def test_bad_content_in_os_release_no_effect(self, m_cmdline): + """malformed os-release should not raise exception.""" + m_cmdline.return_value = 'root=/dev/sda' + orcontent = '\n'.join(['IDubuntu-core', '']) + root_d = self.tmp_dir() + helpers.populate_dir(root_d, {'etc/os-release': orcontent}) + self.reRoot() + self.assertFalse(util.system_is_snappy()) + + @mock.patch('cloudinit.util.get_cmdline') + def test_snap_core_in_cmdline_is_snappy(self, m_cmdline): + """The string snap_core= in kernel cmdline indicates snappy.""" + cmdline = ( + "BOOT_IMAGE=(loop)/kernel.img root=LABEL=writable " + "snap_core=core_x1.snap snap_kernel=pc-kernel_x1.snap ro " + "net.ifnames=0 init=/lib/systemd/systemd console=tty1 " + "console=ttyS0 panic=-1") + m_cmdline.return_value = cmdline + self.assertTrue(util.system_is_snappy()) + self.assertTrue(m_cmdline.call_count > 0) + + @mock.patch('cloudinit.util.get_cmdline') + def test_nothing_found_is_not_snappy(self, m_cmdline): + """If no positive identification, then not snappy.""" + m_cmdline.return_value = 'root=/dev/sda' + self.reRoot() + self.assertFalse(util.system_is_snappy()) + self.assertTrue(m_cmdline.call_count > 0) + + @mock.patch('cloudinit.util.get_cmdline') + def test_channel_ini_with_snappy_is_snappy(self, m_cmdline): + """A Channel.ini file with 'ubuntu-core' indicates snappy.""" + m_cmdline.return_value = 'root=/dev/sda' + root_d = self.tmp_dir() + content = '\n'.join(["[Foo]", "source = 'ubuntu-core'", ""]) + helpers.populate_dir( + root_d, {'etc/system-image/channel.ini': content}) + self.reRoot(root_d) + self.assertTrue(util.system_is_snappy()) + + @mock.patch('cloudinit.util.get_cmdline') + def test_system_image_config_dir_is_snappy(self, m_cmdline): + """Existence of /etc/system-image/config.d indicates snappy.""" + m_cmdline.return_value = 'root=/dev/sda' + root_d = self.tmp_dir() + helpers.populate_dir( + root_d, {'etc/system-image/config.d/my.file': "_unused"}) + self.reRoot(root_d) + self.assertTrue(util.system_is_snappy()) + # vi: ts=4 expandtab -- cgit v1.2.3 From 6edf0c72ce11e64627d3db6a0c4fd74a81039ed4 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Thu, 27 Apr 2017 14:31:48 -0600 Subject: unittests: fix unittests run on centos Apt related tests were broken when running on centos becasue apt is not available. This fixes the unit test, with a small re-work of apt_configure. Also in 'tox -e centos6' only run nose on tests/unittests as tests/ also contain integration tests that should not be run. --- cloudinit/config/cc_apt_configure.py | 19 +++-- .../test_handler_apt_configure_sources_list_v3.py | 94 +++++++++++++++++----- .../test_handler/test_handler_yum_add_repo.py | 4 +- tox.ini | 2 +- 4 files changed, 88 insertions(+), 31 deletions(-) diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index f6885d7c..177cbcf7 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -282,16 +282,21 @@ def handle(name, ocfg, cloud, log, _): apply_apt(cfg, cloud, target) +def _should_configure_on_empty_apt(): + # if no config was provided, should apt configuration be done? + if util.system_is_snappy(): + return False, "system is snappy." + if not (util.which('apt-get') or util.which('apt')): + return False, "no apt commands." + return True, "Apt is available." + + def apply_apt(cfg, cloud, target): # cfg is the 'apt' top level dictionary already in 'v3' format. if not cfg: - # no config was provided. If apt configuration does not seem - # necessary on this system, then return. - if util.system_is_snappy(): - LOG.debug("Nothing to do: No apt config and running on snappy") - return - if not (util.which('apt-get') or util.which('apt')): - LOG.debug("Nothing to do: No apt config and no apt commands") + should_config, msg = _should_configure_on_empty_apt() + if not should_config: + LOG.debug("Nothing to do: No apt config and %s", msg) return LOG.debug("handling apt config: %s", cfg) diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py index 24e45233..1ca915b4 100644 --- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py +++ b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py @@ -121,39 +121,82 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): myds.metadata.update(metadata) return cloud.Cloud(myds, paths, {}, mydist, None) - def _apt_source_list(self, cfg, expected, distro): - "_apt_source_list - Test rendering from template (generic)" - + def _apt_source_list(self, distro, cfg, cfg_on_empty=False): + """_apt_source_list - Test rendering from template (generic)""" # entry at top level now, wrap in 'apt' key cfg = {'apt': cfg} mycloud = self._get_cloud(distro) - with mock.patch.object(util, 'write_file') as mockwf: + + with mock.patch.object(util, 'write_file') as mock_writefile: with mock.patch.object(util, 'load_file', - return_value=MOCKED_APT_SRC_LIST) as mocklf: + return_value=MOCKED_APT_SRC_LIST + ) as mock_loadfile: with mock.patch.object(os.path, 'isfile', - return_value=True) as mockisfile: - with mock.patch.object(util, 'rename'): - cc_apt_configure.handle("test", cfg, mycloud, - LOG, None) - - # check if it would have loaded the distro template - mockisfile.assert_any_call( - ('/etc/cloud/templates/sources.list.%s.tmpl' % distro)) - mocklf.assert_any_call( - ('/etc/cloud/templates/sources.list.%s.tmpl' % distro)) - # check expected content in result - mockwf.assert_called_once_with('/etc/apt/sources.list', expected, - mode=0o644) + return_value=True) as mock_isfile: + cfg_func = ('cloudinit.config.cc_apt_configure.' + + '_should_configure_on_empty_apt') + with mock.patch(cfg_func, + return_value=(cfg_on_empty, "test") + ) as mock_shouldcfg: + cc_apt_configure.handle("test", cfg, mycloud, LOG, + None) + + return mock_writefile, mock_loadfile, mock_isfile, mock_shouldcfg def test_apt_v3_source_list_debian(self): """test_apt_v3_source_list_debian - without custom sources or parms""" cfg = {} - self._apt_source_list(cfg, EXPECTED_BASE_CONTENT, 'debian') + distro = 'debian' + expected = EXPECTED_BASE_CONTENT + + mock_writefile, mock_load_file, mock_isfile, mock_shouldcfg = ( + self._apt_source_list(distro, cfg, cfg_on_empty=True)) + + template = '/etc/cloud/templates/sources.list.%s.tmpl' % distro + mock_writefile.assert_called_once_with('/etc/apt/sources.list', + expected, mode=0o644) + mock_load_file.assert_called_with(template) + mock_isfile.assert_any_call(template) + self.assertEqual(1, mock_shouldcfg.call_count) def test_apt_v3_source_list_ubuntu(self): """test_apt_v3_source_list_ubuntu - without custom sources or parms""" cfg = {} - self._apt_source_list(cfg, EXPECTED_BASE_CONTENT, 'ubuntu') + distro = 'ubuntu' + expected = EXPECTED_BASE_CONTENT + + mock_writefile, mock_load_file, mock_isfile, mock_shouldcfg = ( + self._apt_source_list(distro, cfg, cfg_on_empty=True)) + + template = '/etc/cloud/templates/sources.list.%s.tmpl' % distro + mock_writefile.assert_called_once_with('/etc/apt/sources.list', + expected, mode=0o644) + mock_load_file.assert_called_with(template) + mock_isfile.assert_any_call(template) + self.assertEqual(1, mock_shouldcfg.call_count) + + def test_apt_v3_source_list_ubuntu_snappy(self): + """test_apt_v3_source_list_ubuntu_snappy - without custom sources or + parms""" + cfg = {'apt': {}} + mycloud = self._get_cloud('ubuntu') + + with mock.patch.object(util, 'write_file') as mock_writefile: + with mock.patch.object(util, 'system_is_snappy', + return_value=True) as mock_issnappy: + cc_apt_configure.handle("test", cfg, mycloud, LOG, None) + + self.assertEqual(0, mock_writefile.call_count) + self.assertEqual(1, mock_issnappy.call_count) + + def test_apt_v3_source_list_centos(self): + """test_apt_v3_source_list_centos - without custom sources or parms""" + cfg = {} + distro = 'rhel' + + mock_writefile, _, _, _ = self._apt_source_list(distro, cfg) + + self.assertEqual(0, mock_writefile.call_count) def test_apt_v3_source_list_psm(self): """test_apt_v3_source_list_psm - Test specifying prim+sec mirrors""" @@ -164,8 +207,17 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): 'uri': pm}], 'security': [{'arches': ["default"], 'uri': sm}]} + distro = 'ubuntu' + expected = EXPECTED_PRIMSEC_CONTENT + + mock_writefile, mock_load_file, mock_isfile, _ = ( + self._apt_source_list(distro, cfg, cfg_on_empty=True)) - self._apt_source_list(cfg, EXPECTED_PRIMSEC_CONTENT, 'ubuntu') + template = '/etc/cloud/templates/sources.list.%s.tmpl' % distro + mock_writefile.assert_called_once_with('/etc/apt/sources.list', + expected, mode=0o644) + mock_load_file.assert_called_with(template) + mock_isfile.assert_any_call(template) def test_apt_v3_srcl_custom(self): """test_apt_v3_srcl_custom - Test rendering a custom source template""" diff --git a/tests/unittests/test_handler/test_handler_yum_add_repo.py b/tests/unittests/test_handler/test_handler_yum_add_repo.py index 4815bdb6..c4396df5 100644 --- a/tests/unittests/test_handler/test_handler_yum_add_repo.py +++ b/tests/unittests/test_handler/test_handler_yum_add_repo.py @@ -72,7 +72,7 @@ class TestConfig(helpers.FilesystemMockingTestCase): } for section in expected: self.assertTrue(parser.has_section(section), - "Contains section {}".format(section)) + "Contains section {0}".format(section)) for k, v in expected[section].items(): self.assertEqual(parser.get(section, k), v) @@ -109,7 +109,7 @@ class TestConfig(helpers.FilesystemMockingTestCase): } for section in expected: self.assertTrue(parser.has_section(section), - "Contains section {}".format(section)) + "Contains section {0}".format(section)) for k, v in expected[section].items(): self.assertEqual(parser.get(section, k), v) diff --git a/tox.ini b/tox.ini index bf9046af..826f554e 100644 --- a/tox.ini +++ b/tox.ini @@ -70,7 +70,7 @@ deps = [testenv:centos6] basepython = python2.6 -commands = nosetests {posargs:tests} +commands = nosetests {posargs:tests/unittests} deps = # requirements argparse==1.2.1 -- cgit v1.2.3 From 9a178611b89ca198145c200718ab8f205ece398c Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Thu, 11 May 2017 21:23:50 -0400 Subject: openstack: fix log message copy/paste typo in _get_url_settings There was a copy/paste error in _get_url_settings such that the error message would complain about max wait when in fact it was talking about retries. --- cloudinit/sources/DataSourceOpenStack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index f0a6bfce..b64a7f24 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -60,7 +60,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): try: retries = int(self.ds_cfg.get("retries", retries)) except Exception: - util.logexc(LOG, "Failed to get max wait. using %s", retries) + util.logexc(LOG, "Failed to get retries. using %s", retries) return (max_wait, timeout, retries) -- cgit v1.2.3 From 64a3df16d9c63db470a3ba55d9c5cc8e05d050d7 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Mon, 24 Apr 2017 16:40:35 -0500 Subject: tools/net-convert: fix argument order for render_network_state We're calling Renderer.render_network_state() with incorrect args. % PYTHONPATH=`pwd` ./tools/net-convert.py \ --network-data=simple-v2.yaml --kind=yaml \ --output-kind netplan --directory ./target Traceback (most recent call last): File "./tools/net-convert.py", line 82, in main() File "./tools/net-convert.py", line 78, in main r.render_network_state(ns, target=args.directory) TypeError: render_network_state() got multiple values for argument 'target' The method signature requires passing , . This patch fixes the call order. LP: #1685944 --- tools/net-convert.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/net-convert.py b/tools/net-convert.py index 870da639..ec4ee645 100755 --- a/tools/net-convert.py +++ b/tools/net-convert.py @@ -75,7 +75,7 @@ def main(): r_cls = sysconfig.Renderer r = r_cls() - r.render_network_state(ns, target=args.directory) + r.render_network_state(args.directory, ns) if __name__ == '__main__': -- cgit v1.2.3 From 8f176479c0282e564a87036704c515f746aab3a9 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Mon, 24 Apr 2017 17:57:04 +0000 Subject: make deb: Add devscripts dependency for make deb. Cleanup packages/bddeb. Add a simple dependency check to "make deb" target for devscripts. Rework a bit of the logic in package/bddeb to drop superfluous STD_NAMED_PACKAGES to avoid duplication of requirements already listed in (test-)?requiremets.txt. All "standard" packages can be assumed to have either python3- or python- prefix if not listed in NONSTD_NAMED_PACKAGES. This branch also moves logic inside write_debian_folder which is unneeded up in main. LP: #1685935 --- Makefile | 4 +++ packages/bddeb | 100 +++++++++++++++++++++------------------------------------ 2 files changed, 40 insertions(+), 64 deletions(-) diff --git a/Makefile b/Makefile index 5940ed7e..09cd1479 100644 --- a/Makefile +++ b/Makefile @@ -82,6 +82,10 @@ rpm: ./packages/brpm --distro $(distro) deb: + @which debuild || \ + { echo "Missing devscripts dependency. Install with:"; \ + echo sudo apt-get install devscripts; exit 1; } + ./packages/bddeb .PHONY: test pyflakes pyflakes3 clean pep8 rpm deb yaml check_version diff --git a/packages/bddeb b/packages/bddeb index 79ac9768..f415209f 100755 --- a/packages/bddeb +++ b/packages/bddeb @@ -24,35 +24,17 @@ if "avoid-pep8-E402-import-not-top-of-file": from cloudinit import templater from cloudinit import util -# Package names that will showup in requires to what we can actually -# use in our debian 'control' file, this is a translation of the 'requires' -# file pypi package name to a debian/ubuntu package name. -STD_NAMED_PACKAGES = [ - 'configobj', - 'coverage', - 'jinja2', - 'jsonpatch', - 'oauthlib', - 'prettytable', - 'requests', - 'six', - 'httpretty', - 'mock', - 'nose', - 'setuptools', - 'flake8', - 'hacking', - 'unittest2', -] +# Package names that will showup in requires which have unique package names. +# Format is '': {'': , ...}. NONSTD_NAMED_PACKAGES = { - 'argparse': ('python-argparse', None), - 'contextlib2': ('python-contextlib2', None), - 'cheetah': ('python-cheetah', None), - 'pyserial': ('python-serial', 'python3-serial'), - 'pyyaml': ('python-yaml', 'python3-yaml'), - 'six': ('python-six', 'python3-six'), - 'pep8': ('pep8', 'python3-pep8'), - 'pyflakes': ('pyflakes', 'pyflakes'), + 'argparse': {'2': 'python-argparse', '3': None}, + 'contextlib2': {'2': 'python-contextlib2', '3': None}, + 'cheetah': {'2': 'python-cheetah', '3': None}, + 'pyserial': {'2': 'python-serial', '3': 'python3-serial'}, + 'pyyaml': {'2': 'python-yaml', '3': 'python3-yaml'}, + 'six': {'2': 'python-six', '3': 'python3-six'}, + 'pep8': {'2': 'pep8', '3': 'python3-pep8'}, + 'pyflakes': {'2': 'pyflakes', '3': 'pyflakes'}, } DEBUILD_ARGS = ["-S", "-d"] @@ -68,8 +50,17 @@ def run_helper(helper, args=None, strip=True): return stdout -def write_debian_folder(root, templ_data, pkgmap, pyver="3", - append_requires=[]): +def write_debian_folder(root, templ_data, is_python2, cloud_util_deps): + """Create a debian package directory with all rendered template files.""" + print("Creating a debian/ folder in %r" % (root)) + if is_python2: + pyver = "2" + python = "python" + else: + pyver = "3" + python = "python3" + pkgfmt = "{}-{}" + deb_dir = util.abs_join(root, 'debian') # Just copy debian/ dir and then update files @@ -91,21 +82,16 @@ def write_debian_folder(root, templ_data, pkgmap, pyver="3", pypi_test_pkgs = [p.lower().strip() for p in test_reqs] # Map to known packages - requires = append_requires + requires = ['cloud-utils | cloud-guest-utils'] if cloud_util_deps else [] test_requires = [] lists = ((pypi_pkgs, requires), (pypi_test_pkgs, test_requires)) for pypilist, target in lists: for p in pypilist: - if p not in pkgmap: - raise RuntimeError(("Do not know how to translate pypi " - "dependency %r to a known package") % (p)) - elif pkgmap[p]: - target.append(pkgmap[p]) - - if pyver == "3": - python = "python3" - else: - python = "python" + if p in NONSTD_NAMED_PACKAGES: + if NONSTD_NAMED_PACKAGES[p][pyver]: + target.append(NONSTD_NAMED_PACKAGES[p][pyver]) + else: # Then standard package prefix + target.append(pkgfmt.format(python, p)) templater.render_to_file(util.abs_join(find_root(), 'packages', 'debian', 'control.in'), @@ -124,8 +110,8 @@ def read_version(): return json.loads(run_helper('read-version', ['--json'])) -def main(): - +def get_parser(): + """Setup and return an argument parser for bdeb tool.""" parser = argparse.ArgumentParser() parser.add_argument("-v", "--verbose", dest="verbose", help=("run verbosely" @@ -162,7 +148,11 @@ def main(): parser.add_argument("--signuser", default=False, action='store', help="user to sign, see man dpkg-genchanges") + return parser + +def main(): + parser = get_parser() args = parser.parse_args() if not args.sign: @@ -177,18 +167,6 @@ def main(): if args.verbose: capture = False - pkgmap = {} - for p in NONSTD_NAMED_PACKAGES: - pkgmap[p] = NONSTD_NAMED_PACKAGES[p][int(not args.python2)] - - for p in STD_NAMED_PACKAGES: - if args.python2: - pkgmap[p] = "python-" + p - pyver = "2" - else: - pkgmap[p] = "python3-" + p - pyver = "3" - templ_data = {'debian_release': args.release} with util.tempdir() as tdir: @@ -208,16 +186,10 @@ def main(): util.subp(cmd, capture=capture) xdir = util.abs_join(tdir, "cloud-init-%s" % ver_data['version_long']) - - print("Creating a debian/ folder in %r" % (xdir)) - if args.cloud_utils: - append_requires = ['cloud-utils | cloud-guest-utils'] - else: - append_requires = [] - templ_data.update(ver_data) - write_debian_folder(xdir, templ_data, pkgmap, - pyver=pyver, append_requires=append_requires) + + write_debian_folder(xdir, templ_data, is_python2=args.python2, + cloud_util_deps=args.cloud_utils) print("Running 'debuild %s' in %r" % (' '.join(args.debuild_args), xdir)) -- cgit v1.2.3 From f4d3ca43d8a8b1da136c9c07fa9cd0a08c5e3dba Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 16 May 2017 16:51:25 -0400 Subject: Revert "tools/net-convert: fix argument order for render_network_state" This reverts commit 64a3df16d9c63db470a3ba55d9c5cc8e05d050d7. Patch shouldn't have been pulled. The issue was inside the netplan renderer. --- tools/net-convert.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/net-convert.py b/tools/net-convert.py index ec4ee645..870da639 100755 --- a/tools/net-convert.py +++ b/tools/net-convert.py @@ -75,7 +75,7 @@ def main(): r_cls = sysconfig.Renderer r = r_cls() - r.render_network_state(args.directory, ns) + r.render_network_state(ns, target=args.directory) if __name__ == '__main__': -- cgit v1.2.3 From 31b6f173280fcc8e9be2732ae2e9b6f6c89679d4 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 28 Apr 2017 09:23:25 -0400 Subject: Azure: fix reformatting of ephemeral disks on resize to large types. Large instance types have a different disk format on the newly partitioned ephemeral drive. So we have to adjust the logic in the Azure datasource to recognize that a disk with 2 partitions and an empty ntfs filesystem on the second one is acceptable. This also adjusts the datasources's builtin fs_setup config to remove the 'replace_fs' entry. This entry was previously ignored, and confusing. I've clarified the doc on that also. LP: #1686514 --- cloudinit/config/cc_disk_setup.py | 19 +- cloudinit/sources/DataSourceAzure.py | 84 ++++--- tests/unittests/test_datasource/test_azure.py | 265 ++++++++++++++++++--- .../test_handler/test_handler_disk_setup.py | 16 ++ 4 files changed, 307 insertions(+), 77 deletions(-) diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index 6f827ddc..29eb5dd8 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -68,6 +68,9 @@ specified using ``filesystem``. Using ``overwrite: true`` for filesystems is dangerous and can lead to data loss, so double check the entry in ``fs_setup``. +.. note:: + ``replace_fs`` is ignored unless ``partition`` is ``auto`` or ``any``. + **Internal name:** ``cc_disk_setup`` **Module frequency:** per instance @@ -127,7 +130,7 @@ def handle(_name, cfg, cloud, log, _args): log.debug("Partitioning disks: %s", str(disk_setup)) for disk, definition in disk_setup.items(): if not isinstance(definition, dict): - log.warn("Invalid disk definition for %s" % disk) + log.warning("Invalid disk definition for %s" % disk) continue try: @@ -144,7 +147,7 @@ def handle(_name, cfg, cloud, log, _args): update_fs_setup_devices(fs_setup, cloud.device_name_to_device) for definition in fs_setup: if not isinstance(definition, dict): - log.warn("Invalid file system definition: %s" % definition) + log.warning("Invalid file system definition: %s" % definition) continue try: @@ -199,8 +202,13 @@ def update_fs_setup_devices(disk_setup, tformer): definition['_origname'] = origname definition['device'] = tformed - if part and 'partition' in definition: - definition['_partition'] = definition['partition'] + if part: + # In origname with .N, N overrides 'partition' key. + if 'partition' in definition: + LOG.warning("Partition '%s' from dotted device name '%s' " + "overrides 'partition' key in %s", part, origname, + definition) + definition['_partition'] = definition['partition'] definition['partition'] = part @@ -849,7 +857,8 @@ def mkfs(fs_cfg): # Check to see if the fs already exists LOG.debug("Checking device %s", device) check_label, check_fstype, _ = check_fs(device) - LOG.debug("Device %s has %s %s", device, check_label, check_fstype) + LOG.debug("Device '%s' has check_label='%s' check_fstype=%s", + device, check_label, check_fstype) if check_label == label and check_fstype == fs_type: LOG.debug("Existing file system found at %s", device) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 5254e18a..44857c09 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -196,8 +196,7 @@ BUILTIN_CLOUD_CONFIG = { 'overwrite': True}, }, 'fs_setup': [{'filesystem': DEFAULT_FS, - 'device': 'ephemeral0.1', - 'replace_fs': 'ntfs'}], + 'device': 'ephemeral0.1'}], } DS_CFG_PATH = ['datasource', DS_NAME] @@ -413,56 +412,71 @@ class DataSourceAzureNet(sources.DataSource): return +def _partitions_on_device(devpath, maxnum=16): + # return a list of tuples (ptnum, path) for each part on devpath + for suff in ("-part", "p", ""): + found = [] + for pnum in range(1, maxnum): + ppath = devpath + suff + str(pnum) + if os.path.exists(ppath): + found.append((pnum, os.path.realpath(ppath))) + if found: + return found + return [] + + +def _has_ntfs_filesystem(devpath): + ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True) + LOG.debug('ntfs_devices found = %s', ntfs_devices) + return os.path.realpath(devpath) in ntfs_devices + + def can_dev_be_reformatted(devpath): - # determine if the ephemeral block device path devpath - # is newly formatted after a resize. + """Determine if block device devpath is newly formatted ephemeral. + + A newly formatted disk will: + a.) have a partition table (dos or gpt) + b.) have 1 partition that is ntfs formatted, or + have 2 partitions with the second partition ntfs formatted. + (larger instances with >2TB ephemeral disk have gpt, and will + have a microsoft reserved partition as part 1. LP: #1686514) + c.) the ntfs partition will have no files other than possibly + 'dataloss_warning_readme.txt'""" if not os.path.exists(devpath): return False, 'device %s does not exist' % devpath - realpath = os.path.realpath(devpath) - LOG.debug('Resolving realpath of %s -> %s', devpath, realpath) - - # it is possible that the block device might exist, but the kernel - # have not yet read the partition table and sent events. we udevadm settle - # to hope to resolve that. Better here would probably be to test and see, - # and then settle if we didn't find anything and try again. - if util.which("udevadm"): - util.subp(["udevadm", "settle"]) + LOG.debug('Resolving realpath of %s -> %s', devpath, + os.path.realpath(devpath)) # devpath of /dev/sd[a-z] or /dev/disk/cloud/azure_resource # where partitions are "1" or "-part1" or "p1" - part1path = None - for suff in ("-part", "p", ""): - cand = devpath + suff + "1" - if os.path.exists(cand): - if os.path.exists(devpath + suff + "2"): - msg = ('device %s had more than 1 partition: %s, %s' % - devpath, cand, devpath + suff + "2") - return False, msg - part1path = cand - break - - if part1path is None: + partitions = _partitions_on_device(devpath) + if len(partitions) == 0: return False, 'device %s was not partitioned' % devpath + elif len(partitions) > 2: + msg = ('device %s had 3 or more partitions: %s' % + (devpath, ' '.join([p[1] for p in partitions]))) + return False, msg + elif len(partitions) == 2: + cand_part, cand_path = partitions[1] + else: + cand_part, cand_path = partitions[0] - real_part1path = os.path.realpath(part1path) - ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True) - LOG.debug('ntfs_devices found = %s', ntfs_devices) - if real_part1path not in ntfs_devices: - msg = ('partition 1 (%s -> %s) on device %s was not ntfs formatted' % - (part1path, real_part1path, devpath)) + if not _has_ntfs_filesystem(cand_path): + msg = ('partition %s (%s) on device %s was not ntfs formatted' % + (cand_part, cand_path, devpath)) return False, msg def count_files(mp): ignored = set(['dataloss_warning_readme.txt']) return len([f for f in os.listdir(mp) if f.lower() not in ignored]) - bmsg = ('partition 1 (%s -> %s) on device %s was ntfs formatted' % - (part1path, real_part1path, devpath)) + bmsg = ('partition %s (%s) on device %s was ntfs formatted' % + (cand_part, cand_path, devpath)) try: - file_count = util.mount_cb(part1path, count_files) + file_count = util.mount_cb(cand_path, count_files) except util.MountFailedError as e: - return False, bmsg + ' but mount of %s failed: %s' % (part1path, e) + return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) if file_count != 0: return False, bmsg + ' but had %d files on it.' % file_count diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index e6b0dcb4..67cddeb9 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1,12 +1,13 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit import helpers -from cloudinit.util import b64e, decode_binary, load_file -from cloudinit.sources import DataSourceAzure +from cloudinit.util import b64e, decode_binary, load_file, write_file +from cloudinit.sources import DataSourceAzure as dsaz from cloudinit.util import find_freebsd_part from cloudinit.util import get_path_dev_freebsd -from ..helpers import TestCase, populate_dir, mock, ExitStack, PY26, SkipTest +from ..helpers import (CiTestCase, TestCase, populate_dir, mock, + ExitStack, PY26, SkipTest) import crypt import os @@ -98,7 +99,6 @@ class TestAzureDataSource(TestCase): self.patches.enter_context(mock.patch.object(module, name, new)) def _get_mockds(self): - mod = DataSourceAzure sysctl_out = "dev.storvsc.3.%pnpinfo: "\ "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\ "deviceid=f8b3781b-1e82-4818-a1c3-63d806ec15bb\n" @@ -123,14 +123,14 @@ scbus-1 on xpt0 bus 0 at scbus3 target 1 lun 0 (da1,pass2) """ self.apply_patches([ - (mod, 'get_dev_storvsc_sysctl', mock.MagicMock( + (dsaz, 'get_dev_storvsc_sysctl', mock.MagicMock( return_value=sysctl_out)), - (mod, 'get_camcontrol_dev_bus', mock.MagicMock( + (dsaz, 'get_camcontrol_dev_bus', mock.MagicMock( return_value=camctl_devbus)), - (mod, 'get_camcontrol_dev', mock.MagicMock( + (dsaz, 'get_camcontrol_dev', mock.MagicMock( return_value=camctl_dev)) ]) - return mod + return dsaz def _get_ds(self, data, agent_command=None): @@ -152,8 +152,7 @@ scbus-1 on xpt0 bus 0 populate_dir(os.path.join(self.paths.seed_dir, "azure"), {'ovf-env.xml': data['ovfcontent']}) - mod = DataSourceAzure - mod.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d self.get_metadata_from_fabric = mock.MagicMock(return_value={ 'public-keys': [], @@ -162,19 +161,19 @@ scbus-1 on xpt0 bus 0 self.instance_id = 'test-instance-id' self.apply_patches([ - (mod, 'list_possible_azure_ds_devs', dsdevs), - (mod, 'invoke_agent', _invoke_agent), - (mod, 'wait_for_files', _wait_for_files), - (mod, 'pubkeys_from_crt_files', _pubkeys_from_crt_files), - (mod, 'perform_hostname_bounce', mock.MagicMock()), - (mod, 'get_hostname', mock.MagicMock()), - (mod, 'set_hostname', mock.MagicMock()), - (mod, 'get_metadata_from_fabric', self.get_metadata_from_fabric), - (mod.util, 'read_dmi_data', mock.MagicMock( + (dsaz, 'list_possible_azure_ds_devs', dsdevs), + (dsaz, 'invoke_agent', _invoke_agent), + (dsaz, 'wait_for_files', _wait_for_files), + (dsaz, 'pubkeys_from_crt_files', _pubkeys_from_crt_files), + (dsaz, 'perform_hostname_bounce', mock.MagicMock()), + (dsaz, 'get_hostname', mock.MagicMock()), + (dsaz, 'set_hostname', mock.MagicMock()), + (dsaz, 'get_metadata_from_fabric', self.get_metadata_from_fabric), + (dsaz.util, 'read_dmi_data', mock.MagicMock( return_value=self.instance_id)), ]) - dsrc = mod.DataSourceAzureNet( + dsrc = dsaz.DataSourceAzureNet( data.get('sys_cfg', {}), distro=None, paths=self.paths) if agent_command is not None: dsrc.ds_cfg['agent_command'] = agent_command @@ -418,7 +417,7 @@ fdescfs /dev/fd fdescfs rw 0 0 cfg = dsrc.get_config_obj() self.assertEqual(dsrc.device_name_to_device("ephemeral0"), - DataSourceAzure.RESOURCE_DISK_PATH) + dsaz.RESOURCE_DISK_PATH) assert 'disk_setup' in cfg assert 'fs_setup' in cfg self.assertIsInstance(cfg['disk_setup'], dict) @@ -468,14 +467,13 @@ fdescfs /dev/fd fdescfs rw 0 0 # Make sure that the redacted password on disk is not used by CI self.assertNotEqual(dsrc.cfg.get('password'), - DataSourceAzure.DEF_PASSWD_REDACTION) + dsaz.DEF_PASSWD_REDACTION) # Make sure that the password was really encrypted et = ET.fromstring(on_disk_ovf) for elem in et.iter(): if 'UserPassword' in elem.tag: - self.assertEqual(DataSourceAzure.DEF_PASSWD_REDACTION, - elem.text) + self.assertEqual(dsaz.DEF_PASSWD_REDACTION, elem.text) def test_ovf_env_arrives_in_waagent_dir(self): xml = construct_valid_ovf_env(data={}, userdata="FOODATA") @@ -524,17 +522,17 @@ class TestAzureBounce(TestCase): def mock_out_azure_moving_parts(self): self.patches.enter_context( - mock.patch.object(DataSourceAzure, 'invoke_agent')) + mock.patch.object(dsaz, 'invoke_agent')) self.patches.enter_context( - mock.patch.object(DataSourceAzure, 'wait_for_files')) + mock.patch.object(dsaz, 'wait_for_files')) self.patches.enter_context( - mock.patch.object(DataSourceAzure, 'list_possible_azure_ds_devs', + mock.patch.object(dsaz, 'list_possible_azure_ds_devs', mock.MagicMock(return_value=[]))) self.patches.enter_context( - mock.patch.object(DataSourceAzure, 'get_metadata_from_fabric', + mock.patch.object(dsaz, 'get_metadata_from_fabric', mock.MagicMock(return_value={}))) self.patches.enter_context( - mock.patch.object(DataSourceAzure.util, 'read_dmi_data', + mock.patch.object(dsaz.util, 'read_dmi_data', mock.MagicMock(return_value='test-instance-id'))) def setUp(self): @@ -543,13 +541,13 @@ class TestAzureBounce(TestCase): self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent') self.paths = helpers.Paths({'cloud_dir': self.tmp}) self.addCleanup(shutil.rmtree, self.tmp) - DataSourceAzure.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d self.patches = ExitStack() self.mock_out_azure_moving_parts() self.get_hostname = self.patches.enter_context( - mock.patch.object(DataSourceAzure, 'get_hostname')) + mock.patch.object(dsaz, 'get_hostname')) self.set_hostname = self.patches.enter_context( - mock.patch.object(DataSourceAzure, 'set_hostname')) + mock.patch.object(dsaz, 'set_hostname')) self.subp = self.patches.enter_context( mock.patch('cloudinit.sources.DataSourceAzure.util.subp')) @@ -560,7 +558,7 @@ class TestAzureBounce(TestCase): if ovfcontent is not None: populate_dir(os.path.join(self.paths.seed_dir, "azure"), {'ovf-env.xml': ovfcontent}) - dsrc = DataSourceAzure.DataSourceAzureNet( + dsrc = dsaz.DataSourceAzureNet( {}, distro=None, paths=self.paths) if agent_command is not None: dsrc.ds_cfg['agent_command'] = agent_command @@ -673,7 +671,7 @@ class TestAzureBounce(TestCase): def test_default_bounce_command_used_by_default(self): cmd = 'default-bounce-command' - DataSourceAzure.BUILTIN_DS_CONFIG['hostname_bounce']['command'] = cmd + dsaz.BUILTIN_DS_CONFIG['hostname_bounce']['command'] = cmd cfg = {'hostname_bounce': {'policy': 'force'}} data = self.get_ovf_env_with_dscfg('some-hostname', cfg) self._get_ds(data, agent_command=['not', '__builtin__']).get_data() @@ -701,15 +699,208 @@ class TestAzureBounce(TestCase): class TestReadAzureOvf(TestCase): def test_invalid_xml_raises_non_azure_ds(self): invalid_xml = "" + construct_valid_ovf_env(data={}) - self.assertRaises(DataSourceAzure.BrokenAzureDataSource, - DataSourceAzure.read_azure_ovf, invalid_xml) + self.assertRaises(dsaz.BrokenAzureDataSource, + dsaz.read_azure_ovf, invalid_xml) def test_load_with_pubkeys(self): mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}] pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] content = construct_valid_ovf_env(pubkeys=pubkeys) - (_md, _ud, cfg) = DataSourceAzure.read_azure_ovf(content) + (_md, _ud, cfg) = dsaz.read_azure_ovf(content) for mypk in mypklist: self.assertIn(mypk, cfg['_pubkeys']) + +class TestCanDevBeReformatted(CiTestCase): + warning_file = 'dataloss_warning_readme.txt' + + def _domock(self, mockpath, sattr=None): + patcher = mock.patch(mockpath) + setattr(self, sattr, patcher.start()) + self.addCleanup(patcher.stop) + + def setUp(self): + super(TestCanDevBeReformatted, self).setUp() + + def patchup(self, devs): + bypath = {} + for path, data in devs.items(): + bypath[path] = data + if 'realpath' in data: + bypath[data['realpath']] = data + for ppath, pdata in data.get('partitions', {}).items(): + bypath[ppath] = pdata + if 'realpath' in data: + bypath[pdata['realpath']] = pdata + + def realpath(d): + return bypath[d].get('realpath', d) + + def partitions_on_device(devpath): + parts = bypath.get(devpath, {}).get('partitions', {}) + ret = [] + for path, data in parts.items(): + ret.append((data.get('num'), realpath(path))) + # return sorted by partition number + return sorted(ret, key=lambda d: d[0]) + + def mount_cb(device, callback): + p = self.tmp_dir() + for f in bypath.get(device).get('files', []): + write_file(os.path.join(p, f), content=f) + return callback(p) + + def has_ntfs_fs(device): + return bypath.get(device, {}).get('fs') == 'ntfs' + + p = 'cloudinit.sources.DataSourceAzure' + self._domock(p + "._partitions_on_device", 'm_partitions_on_device') + self._domock(p + "._has_ntfs_filesystem", 'm_has_ntfs_filesystem') + self._domock(p + ".util.mount_cb", 'm_mount_cb') + self._domock(p + ".os.path.realpath", 'm_realpath') + self._domock(p + ".os.path.exists", 'm_exists') + + self.m_exists.side_effect = lambda p: p in bypath + self.m_realpath.side_effect = realpath + self.m_has_ntfs_filesystem.side_effect = has_ntfs_fs + self.m_mount_cb.side_effect = mount_cb + self.m_partitions_on_device.side_effect = partitions_on_device + + def test_three_partitions_is_false(self): + """A disk with 3 partitions can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1}, + '/dev/sda2': {'num': 2}, + '/dev/sda3': {'num': 3}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertFalse(False, value) + self.assertIn("3 or more", msg.lower()) + + def test_no_partitions_is_false(self): + """A disk with no partitions can not be formatted.""" + self.patchup({'/dev/sda': {}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertEqual(False, value) + self.assertIn("not partitioned", msg.lower()) + + def test_two_partitions_not_ntfs_false(self): + """2 partitions and 2nd not ntfs can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1}, + '/dev/sda2': {'num': 2, 'fs': 'ext4', 'files': []}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertFalse(False, value) + self.assertIn("not ntfs", msg.lower()) + + def test_two_partitions_ntfs_populated_false(self): + """2 partitions and populated ntfs fs on 2nd can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1}, + '/dev/sda2': {'num': 2, 'fs': 'ntfs', + 'files': ['secret.txt']}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertFalse(False, value) + self.assertIn("files on it", msg.lower()) + + def test_two_partitions_ntfs_empty_is_true(self): + """2 partitions and empty ntfs fs on 2nd can be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1}, + '/dev/sda2': {'num': 2, 'fs': 'ntfs', 'files': []}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertEqual(True, value) + self.assertIn("safe for", msg.lower()) + + def test_one_partition_not_ntfs_false(self): + """1 partition witih fs other than ntfs can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'zfs'}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertEqual(False, value) + self.assertIn("not ntfs", msg.lower()) + + def test_one_partition_ntfs_populated_false(self): + """1 mountable ntfs partition with many files can not be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'ntfs', + 'files': ['file1.txt', 'file2.exe']}, + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertEqual(False, value) + self.assertIn("files on it", msg.lower()) + + def test_one_partition_ntfs_empty_is_true(self): + """1 mountable ntfs partition and no files can be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []} + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertEqual(True, value) + self.assertIn("safe for", msg.lower()) + + def test_one_partition_ntfs_empty_with_dataloss_file_is_true(self): + """1 mountable ntfs partition and only warn file can be formatted.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'ntfs', + 'files': ['dataloss_warning_readme.txt']} + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + self.assertEqual(True, value) + self.assertIn("safe for", msg.lower()) + + def test_one_partition_through_realpath_is_true(self): + """A symlink to a device with 1 ntfs partition can be formatted.""" + epath = '/dev/disk/cloud/azure_resource' + self.patchup({ + epath: { + 'realpath': '/dev/sdb', + 'partitions': { + epath + '-part1': { + 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file], + 'realpath': '/dev/sdb1'} + }}}) + value, msg = dsaz.can_dev_be_reformatted(epath) + self.assertEqual(True, value) + self.assertIn("safe for", msg.lower()) + + def test_three_partition_through_realpath_is_false(self): + """A symlink to a device with 3 partitions can not be formatted.""" + epath = '/dev/disk/cloud/azure_resource' + self.patchup({ + epath: { + 'realpath': '/dev/sdb', + 'partitions': { + epath + '-part1': { + 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file], + 'realpath': '/dev/sdb1'}, + epath + '-part2': {'num': 2, 'fs': 'ext3', + 'realpath': '/dev/sdb2'}, + epath + '-part3': {'num': 3, 'fs': 'ext', + 'realpath': '/dev/sdb3'} + }}}) + value, msg = dsaz.can_dev_be_reformatted(epath) + self.assertEqual(False, value) + self.assertIn("3 or more", msg.lower()) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_disk_setup.py b/tests/unittests/test_handler/test_handler_disk_setup.py index 9f00d46a..68fc6aae 100644 --- a/tests/unittests/test_handler/test_handler_disk_setup.py +++ b/tests/unittests/test_handler/test_handler_disk_setup.py @@ -151,6 +151,22 @@ class TestUpdateFsSetupDevices(TestCase): 'filesystem': 'xfs' }, fs_setup) + def test_dotted_devname_populates_partition(self): + fs_setup = { + 'device': 'ephemeral0.1', + 'label': 'test2', + 'filesystem': 'xfs' + } + cc_disk_setup.update_fs_setup_devices([fs_setup], + lambda device: device) + self.assertEqual({ + '_origname': 'ephemeral0.1', + 'device': 'ephemeral0', + 'partition': '1', + 'label': 'test2', + 'filesystem': 'xfs' + }, fs_setup) + @mock.patch('cloudinit.config.cc_disk_setup.find_device_node', return_value=('/dev/xdb1', False)) -- cgit v1.2.3 From a6572d9415e592cbf9821b769bbee9e7fdf029d5 Mon Sep 17 00:00:00 2001 From: Dimitri John Ledkov Date: Sat, 13 May 2017 01:28:14 +0100 Subject: netplan: fix netplan render_network_state signature. tools/net-convert fails to output netplan config, because the positional arguments of render_network_state are the wrong way around for that function w.r.t. other renders. Fix the netplan renderer to have the correct signature. LP: #1685944 --- cloudinit/net/netplan.py | 2 +- tests/unittests/test_net.py | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index 825fe831..56b41be4 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -205,7 +205,7 @@ class Renderer(renderer.Renderer): self._postcmds = config.get('postcmds', False) self.clean_default = config.get('clean_default', True) - def render_network_state(self, target, network_state): + def render_network_state(self, network_state, target): # check network state for version # if v2, then extract network_state.config # else render_v2_from_state diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 052c4016..d36d0e76 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1073,7 +1073,7 @@ class TestNetplanNetRendering(CiTestCase): render_target = 'netplan.yaml' renderer = netplan.Renderer( {'netplan_path': render_target, 'postcmds': False}) - renderer.render_network_state(render_dir, ns) + renderer.render_network_state(ns, render_dir) self.assertTrue(os.path.exists(os.path.join(render_dir, render_target))) @@ -1178,7 +1178,7 @@ class TestNetplanPostcommands(CiTestCase): render_target = 'netplan.yaml' renderer = netplan.Renderer( {'netplan_path': render_target, 'postcmds': True}) - renderer.render_network_state(render_dir, ns) + renderer.render_network_state(ns, render_dir) mock_netplan_generate.assert_called_with(run=True) mock_net_setup_link.assert_called_with(run=True) @@ -1203,7 +1203,7 @@ class TestNetplanPostcommands(CiTestCase): '/sys/class/net/lo'], capture=True), ] with mock.patch.object(os.path, 'islink', return_value=True): - renderer.render_network_state(render_dir, ns) + renderer.render_network_state(ns, render_dir) mock_subp.assert_has_calls(expected) @@ -1354,9 +1354,9 @@ class TestCmdlineReadKernelConfig(CiTestCase): class TestNetplanRoundTrip(CiTestCase): def _render_and_read(self, network_config=None, state=None, - netplan_path=None, dir=None): - if dir is None: - dir = self.tmp_dir() + netplan_path=None, target=None): + if target is None: + target = self.tmp_dir() if network_config: ns = network_state.parse_net_config_data(network_config) @@ -1371,8 +1371,8 @@ class TestNetplanRoundTrip(CiTestCase): renderer = netplan.Renderer( config={'netplan_path': netplan_path}) - renderer.render_network_state(dir, ns) - return dir2dict(dir) + renderer.render_network_state(ns, target) + return dir2dict(target) def testsimple_render_small_netplan(self): entry = NETWORK_CONFIGS['small'] -- cgit v1.2.3 From 66e46d8ec290737fd74f50eb8c7672d627d9b516 Mon Sep 17 00:00:00 2001 From: Chris Brinker Date: Fri, 28 Apr 2017 10:53:07 -0700 Subject: Actually skip warnings when .skip file is present. This change allows the presence of "/var/lib/cloud/instance/warnings/.skip" to actually skip warnings as documented in Z99-cloudinit-warnings.sh. LP: #1691551 --- tools/Z99-cloudinit-warnings.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/Z99-cloudinit-warnings.sh b/tools/Z99-cloudinit-warnings.sh index b237786b..1d413374 100644 --- a/tools/Z99-cloudinit-warnings.sh +++ b/tools/Z99-cloudinit-warnings.sh @@ -4,12 +4,12 @@ # Purpose: show user warnings on login. cloud_init_warnings() { - local skipf="" warning="" idir="/var/lib/cloud/instance" n=0 + local warning="" idir="/var/lib/cloud/instance" n=0 local warndir="$idir/warnings" local ufile="$HOME/.cloud-warnings.skip" sfile="$warndir/.skip" [ -d "$warndir" ] || return 0 [ ! -f "$ufile" ] || return 0 - [ ! -f "$skipf" ] || return 0 + [ ! -f "$sfile" ] || return 0 for warning in "$warndir"/*; do [ -f "$warning" ] || continue -- cgit v1.2.3 From 951863c211ab0f8c43a9443d080dbbe0f6b454a6 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 17 May 2017 10:33:00 -0700 Subject: tests: fix hardcoded path to mkfs.ext4 A recent merge that added a mkfs.ext4 tests has a hard coded location for the binary of mkfs.ext4. On CentOS 7 the test failed because the command in a different location than Ubuntu. LP: #1691517 --- tests/unittests/test_handler/test_handler_disk_setup.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/unittests/test_handler/test_handler_disk_setup.py b/tests/unittests/test_handler/test_handler_disk_setup.py index 68fc6aae..35dff3ee 100644 --- a/tests/unittests/test_handler/test_handler_disk_setup.py +++ b/tests/unittests/test_handler/test_handler_disk_setup.py @@ -202,9 +202,11 @@ class TestMkfsCommandHandling(TestCase): subp.assert_called_once_with( 'mkfs -t ext4 -L with_cmd /dev/xdb1', shell=True) - def test_overwrite_and_extra_opts_without_cmd(self, subp, *args): + @mock.patch('cloudinit.config.cc_disk_setup.util.which') + def test_overwrite_and_extra_opts_without_cmd(self, m_which, subp, *args): """mkfs observes extra_opts and overwrite settings when cmd is not present.""" + m_which.side_effect = lambda p: {'mkfs.ext4': '/sbin/mkfs.ext4'}[p] cc_disk_setup.mkfs({ 'filesystem': 'ext4', 'device': '/dev/xdb1', -- cgit v1.2.3 From afdddf8eea34866b43d1fc92624f9ac175802f36 Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Thu, 18 May 2017 15:07:55 -0400 Subject: cloudstack: fix tests to avoid accessing /var/lib/NetworkManager on centos/fedora/rhel, /var/lib/NetworkManager has mode 700, causing the cloudstack unit tests to fail when run as a non-root user. This mocks out get_latest_lease so that we no longer try to read dhcp lease information during the unit tests. --- tests/unittests/test_datasource/test_cloudstack.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/tests/unittests/test_datasource/test_cloudstack.py b/tests/unittests/test_datasource/test_cloudstack.py index 1d3d2f19..e94aad61 100644 --- a/tests/unittests/test_datasource/test_cloudstack.py +++ b/tests/unittests/test_datasource/test_cloudstack.py @@ -16,10 +16,15 @@ class TestCloudStackPasswordFetching(TestCase): self.patches.enter_context(mock.patch('{0}.ec2'.format(mod_name))) self.patches.enter_context(mock.patch('{0}.uhelp'.format(mod_name))) default_gw = "192.201.20.0" - mod_name = 'cloudinit.sources.DataSourceCloudStack.get_default_gateway' + get_latest_lease = mock.MagicMock(return_value=None) + self.patches.enter_context(mock.patch( + 'cloudinit.sources.DataSourceCloudStack.get_latest_lease', + get_latest_lease)) + get_default_gw = mock.MagicMock(return_value=default_gw) - self.patches.enter_context( - mock.patch(mod_name, get_default_gw)) + self.patches.enter_context(mock.patch( + 'cloudinit.sources.DataSourceCloudStack.get_default_gateway', + get_default_gw)) def _set_password_server_response(self, response_string): subp = mock.MagicMock(return_value=(response_string, '')) -- cgit v1.2.3 From e11d3899d47ec5fcb545e0c7820af9d3995cb574 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Fri, 19 May 2017 16:49:11 -0400 Subject: cc_ntp: write template before installing and add service restart On systems which installed ntp and specified servers or pools in the config ntpd didn't notice the updated configuration file and didn't use the correct configuration. Resolve this by rendering the template first which allows the package install to use the existing configuration. Additionally add a service restart to handle the case where ntp does not need to be installed but it may not have started. Add an integration test to confirm that cc_ntp enables ntp to use the specific servers and pools in the cloud-config. LP: #1645644 --- cloudinit/config/cc_ntp.py | 24 ++++++- tests/cloud_tests/configs/modules/ntp_pools.yaml | 10 +-- tests/cloud_tests/configs/modules/ntp_servers.yaml | 16 +++-- tests/cloud_tests/testcases/modules/ntp.py | 4 +- tests/cloud_tests/testcases/modules/ntp_pools.py | 18 +++-- tests/cloud_tests/testcases/modules/ntp_servers.py | 19 ++++-- tests/unittests/test_handler/test_handler_ntp.py | 78 +++++++++++++++++++++- 7 files changed, 142 insertions(+), 27 deletions(-) diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index e33032fd..225f898a 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -74,10 +74,19 @@ def handle(name, cfg, cloud, log, _args): "not present or disabled by cfg", name) return True - install_ntp(cloud.distro.install_packages, packages=['ntp'], - check_exe="ntpd") rename_ntp_conf() + # ensure when ntp is installed it has a configuration file + # to use instead of starting up with packaged defaults write_ntp_config_template(ntp_cfg, cloud) + install_ntp(cloud.distro.install_packages, packages=['ntp'], + check_exe="ntpd") + + # if ntp was already installed, it may not have started + try: + reload_ntp(systemd=cloud.distro.uses_systemd()) + except util.ProcessExecutionError as e: + LOG.exception("Failed to reload/start ntp service: %s", e) + raise def install_ntp(install_func, packages=None, check_exe="ntpd"): @@ -90,6 +99,7 @@ def install_ntp(install_func, packages=None, check_exe="ntpd"): def rename_ntp_conf(config=NTP_CONF): + """Rename any existing ntp.conf file and render from template""" if os.path.exists(config): util.rename(config, config + ".dist") @@ -125,4 +135,14 @@ def write_ntp_config_template(cfg, cloud): templater.render_to_file(template_fn, NTP_CONF, params) + +def reload_ntp(systemd=False): + service = 'ntp' + if systemd: + cmd = ['systemctl', 'reload-or-restart', service] + else: + cmd = ['service', service, 'restart'] + util.subp(cmd, capture=True) + + # vi: ts=4 expandtab diff --git a/tests/cloud_tests/configs/modules/ntp_pools.yaml b/tests/cloud_tests/configs/modules/ntp_pools.yaml index bd0ac292..e040cc32 100644 --- a/tests/cloud_tests/configs/modules/ntp_pools.yaml +++ b/tests/cloud_tests/configs/modules/ntp_pools.yaml @@ -5,10 +5,9 @@ cloud_config: | #cloud-config ntp: pools: - - 0.pool.ntp.org - - 1.pool.ntp.org - - 2.pool.ntp.org - - 3.pool.ntp.org + - 0.cloud-init.mypool + - 1.cloud-init.mypool + - 172.16.15.14 collect_scripts: ntp_installed_pools: | #!/bin/bash @@ -19,5 +18,8 @@ collect_scripts: ntp_conf_pools: | #!/bin/bash grep '^pool' /etc/ntp.conf + ntpq_servers: | + #!/bin/sh + ntpq -p -w # vi: ts=4 expandtab diff --git a/tests/cloud_tests/configs/modules/ntp_servers.yaml b/tests/cloud_tests/configs/modules/ntp_servers.yaml index 934b9c5d..e0564a03 100644 --- a/tests/cloud_tests/configs/modules/ntp_servers.yaml +++ b/tests/cloud_tests/configs/modules/ntp_servers.yaml @@ -5,16 +5,20 @@ cloud_config: | #cloud-config ntp: servers: - - pool.ntp.org + - 172.16.15.14 + - 172.16.17.18 collect_scripts: ntp_installed_servers: | - #!/bin/bash - dpkg -l | grep ntp | wc -l + #!/bin/sh + dpkg -l | grep -c ntp ntp_conf_dist_servers: | - #!/bin/bash - ls /etc/ntp.conf.dist | wc -l + #!/bin/sh + cat /etc/ntp.conf.dist | wc -l ntp_conf_servers: | - #!/bin/bash + #!/bin/sh grep '^server' /etc/ntp.conf + ntpq_servers: | + #!/bin/sh + ntpq -p -w # vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/ntp.py b/tests/cloud_tests/testcases/modules/ntp.py index b1119257..82d32880 100644 --- a/tests/cloud_tests/testcases/modules/ntp.py +++ b/tests/cloud_tests/testcases/modules/ntp.py @@ -13,9 +13,9 @@ class TestNtp(base.CloudTestCase): self.assertEqual(1, int(out)) def test_ntp_dist_entries(self): - """Test dist config file has one entry""" + """Test dist config file is empty""" out = self.get_data_file('ntp_conf_dist_empty') - self.assertEqual(1, int(out)) + self.assertEqual(0, int(out)) def test_ntp_entires(self): """Test config entries""" diff --git a/tests/cloud_tests/testcases/modules/ntp_pools.py b/tests/cloud_tests/testcases/modules/ntp_pools.py index d80cb673..ff6d8fa4 100644 --- a/tests/cloud_tests/testcases/modules/ntp_pools.py +++ b/tests/cloud_tests/testcases/modules/ntp_pools.py @@ -13,16 +13,22 @@ class TestNtpPools(base.CloudTestCase): self.assertEqual(1, int(out)) def test_ntp_dist_entries(self): - """Test dist config file has one entry""" + """Test dist config file is empty""" out = self.get_data_file('ntp_conf_dist_pools') - self.assertEqual(1, int(out)) + self.assertEqual(0, int(out)) def test_ntp_entires(self): """Test config entries""" out = self.get_data_file('ntp_conf_pools') - self.assertIn('pool 0.pool.ntp.org iburst', out) - self.assertIn('pool 1.pool.ntp.org iburst', out) - self.assertIn('pool 2.pool.ntp.org iburst', out) - self.assertIn('pool 3.pool.ntp.org iburst', out) + pools = self.cloud_config.get('ntp').get('pools') + for pool in pools: + self.assertIn('pool %s iburst' % pool, out) + + def test_ntpq_servers(self): + """Test ntpq output has configured servers""" + out = self.get_data_file('ntpq_servers') + pools = self.cloud_config.get('ntp').get('pools') + for pool in pools: + self.assertIn(pool, out) # vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/ntp_servers.py b/tests/cloud_tests/testcases/modules/ntp_servers.py index 4879bb6f..9ef270ee 100644 --- a/tests/cloud_tests/testcases/modules/ntp_servers.py +++ b/tests/cloud_tests/testcases/modules/ntp_servers.py @@ -13,13 +13,22 @@ class TestNtpServers(base.CloudTestCase): self.assertEqual(1, int(out)) def test_ntp_dist_entries(self): - """Test dist config file has one entry""" + """Test dist config file is empty""" out = self.get_data_file('ntp_conf_dist_servers') - self.assertEqual(1, int(out)) + self.assertEqual(0, int(out)) def test_ntp_entires(self): - """Test config entries""" - out = self.get_data_file('ntp_conf_servers') - self.assertIn('server pool.ntp.org iburst', out) + """Test config pools entries""" + out = self.get_data_file('ntp_conf_pools') + servers = self.cloud_config.get('ntp').get('servers') + for server in servers: + self.assertIn('server %s iburst' % server, out) + + def test_ntpq_servers(self): + """Test ntpq output has configured servers""" + out = self.get_data_file('ntpq_servers') + servers = self.cloud_config.get('ntp').get('servers') + for server in servers: + self.assertIn(server, out) # vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py index ec600077..02bd3046 100644 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ b/tests/unittests/test_handler/test_handler_ntp.py @@ -249,6 +249,8 @@ class TestNtp(FilesystemMockingTestCase): } } mycloud = self._get_cloud('ubuntu') + mycloud.distro = mock.MagicMock() + mycloud.distro.uses_systemd.return_value = True side_effect = [NTP_TEMPLATE.lstrip()] with mock.patch.object(util, 'which', return_value=None): @@ -259,13 +261,70 @@ class TestNtp(FilesystemMockingTestCase): with mock.patch.object(os.path, 'isfile', return_value=True): with mock.patch.object(util, 'rename'): - cc_ntp.handle("notimportant", cfg, - mycloud, LOG, None) + with mock.patch.object(util, 'subp') as msubp: + cc_ntp.handle("notimportant", cfg, + mycloud, LOG, None) mockwrite.assert_called_once_with( '/etc/ntp.conf', NTP_EXPECTED_UBUNTU, mode=420) + msubp.assert_any_call(['systemctl', 'reload-or-restart', 'ntp'], + capture=True) + + @mock.patch("cloudinit.config.cc_ntp.install_ntp") + @mock.patch("cloudinit.config.cc_ntp.write_ntp_config_template") + @mock.patch("cloudinit.config.cc_ntp.rename_ntp_conf") + def test_write_config_before_install(self, mock_ntp_rename, + mock_ntp_write_config, + mock_install_ntp): + pools = ['0.mycompany.pool.ntp.org'] + servers = ['192.168.23.3'] + cfg = { + 'ntp': { + 'pools': pools, + 'servers': servers, + } + } + cc = self._get_cloud('ubuntu') + cc.distro = mock.MagicMock() + mock_parent = mock.MagicMock() + mock_parent.attach_mock(mock_ntp_rename, 'mock_ntp_rename') + mock_parent.attach_mock(mock_ntp_write_config, 'mock_ntp_write_config') + mock_parent.attach_mock(mock_install_ntp, 'mock_install_ntp') + + cc_ntp.handle('cc_ntp', cfg, cc, LOG, None) + + """Check call order""" + mock_parent.assert_has_calls([ + mock.call.mock_ntp_rename(), + mock.call.mock_ntp_write_config(cfg.get('ntp'), cc), + mock.call.mock_install_ntp(cc.distro.install_packages, + packages=['ntp'], check_exe="ntpd")]) + + @mock.patch("cloudinit.config.cc_ntp.reload_ntp") + @mock.patch("cloudinit.config.cc_ntp.install_ntp") + @mock.patch("cloudinit.config.cc_ntp.write_ntp_config_template") + @mock.patch("cloudinit.config.cc_ntp.rename_ntp_conf") + def test_reload_ntp_fail_raises_exception(self, mock_rename, + mock_write_conf, + mock_install, + mock_reload): + pools = ['0.mycompany.pool.ntp.org'] + servers = ['192.168.23.3'] + cfg = { + 'ntp': { + 'pools': pools, + 'servers': servers, + } + } + cc = self._get_cloud('ubuntu') + cc.distro = mock.MagicMock() + + mock_reload.side_effect = [util.ProcessExecutionError] + self.assertRaises(util.ProcessExecutionError, + cc_ntp.handle, 'cc_ntp', + cfg, cc, LOG, None) @mock.patch("cloudinit.config.cc_ntp.util") def test_no_ntpcfg_does_nothing(self, mock_util): @@ -275,4 +334,19 @@ class TestNtp(FilesystemMockingTestCase): self.assertFalse(cc.distro.install_packages.called) self.assertFalse(mock_util.subp.called) + @mock.patch("cloudinit.config.cc_ntp.util") + def test_reload_ntp_systemd(self, mock_util): + cc_ntp.reload_ntp(systemd=True) + self.assertTrue(mock_util.subp.called) + mock_util.subp.assert_called_with( + ['systemctl', 'reload-or-restart', 'ntp'], capture=True) + + @mock.patch("cloudinit.config.cc_ntp.util") + def test_reload_ntp_service(self, mock_util): + cc_ntp.reload_ntp(systemd=False) + self.assertTrue(mock_util.subp.called) + mock_util.subp.assert_called_with( + ['service', 'ntp', 'restart'], capture=True) + + # vi: ts=4 expandtab -- cgit v1.2.3 From d91f03177c264e1c889a31f078a6915b7cb75291 Mon Sep 17 00:00:00 2001 From: Felix Dreissig Date: Fri, 28 Apr 2017 11:06:11 +0200 Subject: doc: various improvements for the docs on cc_users_groups. It fixes an annoying typo/inconsistency ("ssh-autorized-keys" vs. "ssh-authorized-keys"), adds descriptions of previously missing config keys and improves overall consistency within the docs section. --- cloudinit/config/cc_users_groups.py | 59 +++++++++++++++++++++++++------------ 1 file changed, 40 insertions(+), 19 deletions(-) diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py index 59649800..b80d1d36 100644 --- a/cloudinit/config/cc_users_groups.py +++ b/cloudinit/config/cc_users_groups.py @@ -25,28 +25,39 @@ entry of the ``users`` list. Each entry in the ``users`` list, other than a config keys for an entry in ``users`` are as follows: - ``name``: The user's login name - - ``homedir``: Optional. Home dir for user. Default is ``/home/`` - - ``primary-group``: Optional. Primary group for user. Default to new group - named after user. + - ``expiredate``: Optional. Date on which the user's login will be + disabled. Default: none + - ``gecos``: Optional. Comment about the user, usually a comma-separated + string of real name and contact information. Default: none - ``groups``: Optional. Additional groups to add the user to. Default: none - - ``selinux-user``: Optional. SELinux user for user's login. Default to - default SELinux user. - - ``lock_passwd``: Optional. Disable password login. Default: true + - ``homedir``: Optional. Home dir for user. Default is ``/home/`` - ``inactive``: Optional. Mark user inactive. Default: false - - ``passwd``: Hash of user password + - ``lock_passwd``: Optional. Disable password login. Default: true - ``no-create-home``: Optional. Do not create home directory. Default: false - - ``no-user-group``: Optional. Do not create group named after user. - Default: false - ``no-log-init``: Optional. Do not initialize lastlog and faillog for user. Default: false - - ``ssh-import-id``: Optional. SSH id to import for user. Default: none - - ``ssh-autorized-keys``: Optional. List of ssh keys to add to user's + - ``no-user-group``: Optional. Do not create group named after user. + Default: false + - ``passwd``: Hash of user password + - ``primary-group``: Optional. Primary group for user. Default to new group + named after user. + - ``selinux-user``: Optional. SELinux user for user's login. Default to + default SELinux user. + - ``shell``: Optional. The user's login shell. The default is to set no + shell, which results in a system-specific default being used. + - ``snapuser``: Optional. Specify an email address to create the user as + a Snappy user through ``snap create-user``. If an Ubuntu SSO account is + associated with the address, username and SSH keys will be requested from + there. Default: none + - ``ssh-authorized-keys``: Optional. List of ssh keys to add to user's authkeys file. Default: none + - ``ssh-import-id``: Optional. SSH id to import for user. Default: none - ``sudo``: Optional. Sudo rule to use, or list of sudo rules to use. Default: none. - ``system``: Optional. Create user as system user with no home directory. Default: false + - ``uid``: Optional. The user's ID. Default: The next available value. .. note:: Specifying a hash of a user's password with ``passwd`` is a security risk @@ -65,23 +76,33 @@ config keys for an entry in ``users`` are as follows: **Config keys**:: groups: - - ubuntu: [foo, bar] - - cloud-users + - : [, ] + - users: - default - name: - gecos: - primary-group: - groups: - selinux-user: expiredate: - ssh-import-id: + gecos: + groups: + homedir: + inactive: lock_passwd: + no-create-home: + no-log-init: + no-user-group: passwd: + primary-group: + selinux-user: + shell: + snapuser: + ssh-authorized-keys: + - + - + ssh-import-id: sudo: - inactive: system: + uid: """ # Ensure this is aliased to a name not 'distros' -- cgit v1.2.3 From d059d480c3a5bbeb3bb2e8ff2350f85d64721c11 Mon Sep 17 00:00:00 2001 From: Dimitri John Ledkov Date: Sat, 13 May 2017 16:15:37 +0100 Subject: netplan: pass macaddress, when specified, for vlans When vlan mac address is specified in config, render it for netplan and for ENI. LP: #1690388 --- cloudinit/net/netplan.py | 4 +++- tests/unittests/test_net.py | 3 +++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index 56b41be4..9b71de97 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -345,7 +345,9 @@ class Renderer(renderer.Renderer): 'id': ifcfg.get('vlan_id'), 'link': ifcfg.get('vlan-raw-device') } - + macaddr = ifcfg.get('mac_address', None) + if macaddr is not None: + vlan['macaddress'] = macaddr.lower() _extract_addresses(ifcfg, vlan) vlans.update({ifname: vlan}) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index d36d0e76..68a0157a 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -558,6 +558,7 @@ iface eth0.101 inet static dns-nameservers 192.168.0.10 10.23.23.134 dns-search barley.maas sacchromyces.maas brettanomyces.maas gateway 192.168.0.1 + hwaddress aa:bb:cc:dd:ee:11 mtu 1500 vlan-raw-device eth0 vlan_id 101 @@ -680,6 +681,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true gateway4: 192.168.0.1 id: 101 link: eth0 + macaddress: aa:bb:cc:dd:ee:11 nameservers: addresses: - 192.168.0.10 @@ -723,6 +725,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true name: eth0.101 vlan_link: eth0 vlan_id: 101 + mac_address: aa:bb:cc:dd:ee:11 mtu: 1500 subnets: - type: static -- cgit v1.2.3 From 3d97b29bd71b9de5fb14d8bd320c20545b88a81b Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Wed, 26 Apr 2017 14:07:53 -0600 Subject: DigitalOcean: remove routes except for the public interface. Previously, the datasource for DigitalOcean allowed for a gateway on each NIC. As a result, on Ubuntu 16.04, networking.service was broken. For 17.04 and later, Ubuntu _replaces_ the default gateway with the second gateway on 'ifup' after reboot. DigitalOcean is looking at changing the meta-data, however, this will result in another version of the meta-data JSON. LP: #1681531. --- cloudinit/sources/helpers/digitalocean.py | 2 +- .../unittests/test_datasource/test_digitalocean.py | 25 ++++++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py index 257989e8..693f8d5c 100644 --- a/cloudinit/sources/helpers/digitalocean.py +++ b/cloudinit/sources/helpers/digitalocean.py @@ -162,7 +162,7 @@ def convert_network_configuration(config, dns_servers): continue sub_part = _get_subnet_part(raw_subnet) - if netdef in ('private', 'anchor_ipv4', 'anchor_ipv6'): + if nic_type != "public" or "anchor" in netdef: del sub_part['gateway'] subnets.append(sub_part) diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/test_datasource/test_digitalocean.py index a11166a9..e97a679a 100644 --- a/tests/unittests/test_datasource/test_digitalocean.py +++ b/tests/unittests/test_datasource/test_digitalocean.py @@ -1,6 +1,8 @@ # Copyright (C) 2014 Neal Shrader # # Author: Neal Shrader +# Author: Ben Howard +# Author: Scott Moser # # This file is part of cloud-init. See LICENSE file for license information. @@ -262,6 +264,29 @@ class TestNetworkConvert(TestCase): print(json.dumps(subn, indent=3)) return subn + def test_correct_gateways_defined(self): + """test to make sure the eth0 ipv4 and ipv6 gateways are defined""" + netcfg = self._get_networking() + gateways = [] + for nic_def in netcfg.get('config'): + if nic_def.get('type') != 'physical': + continue + for subn in nic_def.get('subnets'): + if 'gateway' in subn: + gateways.append(subn.get('gateway')) + + # we should have two gateways, one ipv4 and ipv6 + self.assertEqual(len(gateways), 2) + + # make that the ipv6 gateway is there + (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') + ipv4_def = meta_def.get('ipv4') + self.assertIn(ipv4_def.get('gateway'), gateways) + + # make sure the the ipv6 gateway is there + ipv6_def = meta_def.get('ipv6') + self.assertIn(ipv6_def.get('gateway'), gateways) + def test_public_interface_defined(self): """test that the public interface is defined as eth0""" (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') -- cgit v1.2.3 From 2c0655feb9a194b5fbdfe90a5f847c16f1e15409 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 19 May 2017 13:12:54 -0400 Subject: Fix get_interfaces_by_mac for empty macs Some interfaces (greptap0 in the bug) have a mac address of '00:00:00:00:00:00'. That was causing a duplicate mac detection as the 'lo' device also has that mac. The change here is to just ignore macs other than 'lo' that have that. LP: #1692028 --- cloudinit/net/__init__.py | 3 +++ tests/unittests/test_net.py | 27 +++++++++++++++++++-------- 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index a072a8d6..8c6cd057 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -393,6 +393,7 @@ def get_interfaces_by_mac(): else: raise ret = {} + empty_mac = '00:00:00:00:00:00' for name in devs: if not interface_has_own_mac(name): continue @@ -404,6 +405,8 @@ def get_interfaces_by_mac(): # some devices may not have a mac (tun0) if not mac: continue + if mac == empty_mac and name != 'lo': + continue if mac in ret: raise RuntimeError( "duplicate mac found! both '%s' and '%s' have mac '%s'" % diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 68a0157a..8bd3f433 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1542,24 +1542,24 @@ class TestNetRenderers(CiTestCase): class TestGetInterfacesByMac(CiTestCase): - _data = {'devices': ['enp0s1', 'enp0s2', 'bond1', 'bridge1', - 'bridge1-nic', 'tun0', 'bond1.101'], - 'bonds': ['bond1'], + _data = {'bonds': ['bond1'], 'bridges': ['bridge1'], 'vlans': ['bond1.101'], 'own_macs': ['enp0s1', 'enp0s2', 'bridge1-nic', 'bridge1', - 'bond1.101'], + 'bond1.101', 'lo'], 'macs': {'enp0s1': 'aa:aa:aa:aa:aa:01', 'enp0s2': 'aa:aa:aa:aa:aa:02', 'bond1': 'aa:aa:aa:aa:aa:01', 'bond1.101': 'aa:aa:aa:aa:aa:01', 'bridge1': 'aa:aa:aa:aa:aa:03', 'bridge1-nic': 'aa:aa:aa:aa:aa:03', + 'lo': '00:00:00:00:00:00', + 'greptap0': '00:00:00:00:00:00', 'tun0': None}} data = {} def _se_get_devicelist(self): - return self.data['devices'] + return list(self.data['devices']) def _se_get_interface_mac(self, name): return self.data['macs'][name] @@ -1575,6 +1575,7 @@ class TestGetInterfacesByMac(CiTestCase): def _mock_setup(self): self.data = copy.deepcopy(self._data) + self.data['devices'] = set(list(self.data['macs'].keys())) mocks = ('get_devicelist', 'get_interface_mac', 'is_bridge', 'interface_has_own_mac', 'is_vlan') self.mocks = {} @@ -1602,7 +1603,7 @@ class TestGetInterfacesByMac(CiTestCase): [mock.call('enp0s1'), mock.call('bond1')], any_order=True) self.assertEqual( {'aa:aa:aa:aa:aa:01': 'enp0s1', 'aa:aa:aa:aa:aa:02': 'enp0s2', - 'aa:aa:aa:aa:aa:03': 'bridge1-nic'}, + 'aa:aa:aa:aa:aa:03': 'bridge1-nic', '00:00:00:00:00:00': 'lo'}, ret) def test_excludes_bridges(self): @@ -1611,7 +1612,7 @@ class TestGetInterfacesByMac(CiTestCase): # set everything other than 'b1' to be a bridge. # then expect b1 is the only thing left. self.data['macs']['b1'] = 'aa:aa:aa:aa:aa:b1' - self.data['devices'].append('b1') + self.data['devices'].add('b1') self.data['bonds'] = [] self.data['own_macs'] = self.data['devices'] self.data['bridges'] = [f for f in self.data['devices'] if f != "b1"] @@ -1628,7 +1629,7 @@ class TestGetInterfacesByMac(CiTestCase): # set everything other than 'b1' to be a vlan. # then expect b1 is the only thing left. self.data['macs']['b1'] = 'aa:aa:aa:aa:aa:b1' - self.data['devices'].append('b1') + self.data['devices'].add('b1') self.data['bonds'] = [] self.data['bridges'] = [] self.data['own_macs'] = self.data['devices'] @@ -1640,6 +1641,16 @@ class TestGetInterfacesByMac(CiTestCase): mock.call('b1')], any_order=True) + def test_duplicates_of_empty_mac_are_ok(self): + """Duplicate macs of 00:00:00:00:00:00 should be skipped.""" + self._mock_setup() + empty_mac = "00:00:00:00:00:00" + addnics = ('greptap1', 'lo', 'greptap2') + self.data['macs'].update(dict((k, empty_mac) for k in addnics)) + self.data['devices'].update(set(addnics)) + ret = net.get_interfaces_by_mac() + self.assertEqual('lo', ret[empty_mac]) + def _gzip_data(data): with io.BytesIO() as iobuf: -- cgit v1.2.3 From 9c33cb24b8a834aa8034db4c989725901a0814c6 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 22 May 2017 10:17:38 -0400 Subject: tox: move pylint target to 1.7.1 The motivation for this is to make tip-pylint target green. It does 2 things: a.) silence a warning that is generated in pylint 1.7.1, but not other versions of pylint. This bug in pylint is filed at https://github.com/PyCQA/pylint/issues/1444 b.) move tox -e pylint to use pylint 1.7.1 --- cloudinit/util.py | 5 +++-- tox.ini | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/cloudinit/util.py b/cloudinit/util.py index 67ff7ba3..135e4608 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -478,10 +478,11 @@ def decomp_gzip(data, quiet=True, decode=True): try: buf = six.BytesIO(encode_text(data)) with contextlib.closing(gzip.GzipFile(None, "rb", 1, buf)) as gh: + # E1101 is https://github.com/PyCQA/pylint/issues/1444 if decode: - return decode_binary(gh.read()) + return decode_binary(gh.read()) # pylint: disable=E1101 else: - return gh.read() + return gh.read() # pylint: disable=E1101 except Exception as e: if quiet: return data diff --git a/tox.ini b/tox.ini index 826f554e..fce07740 100644 --- a/tox.ini +++ b/tox.ini @@ -18,7 +18,7 @@ setenv = LC_ALL = en_US.utf-8 [testenv:pylint] -deps = pylint==1.6.5 +deps = pylint==1.7.1 commands = {envpython} -m pylint {posargs:cloudinit} [testenv:py3] -- cgit v1.2.3 From 5375d9da344e84f496c54b253b42454e0888d101 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Mon, 22 May 2017 11:58:06 -0700 Subject: Fixing wrong file name regression. --- tests/cloud_tests/testcases/modules/ntp_servers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/cloud_tests/testcases/modules/ntp_servers.py b/tests/cloud_tests/testcases/modules/ntp_servers.py index 9ef270ee..419c8270 100644 --- a/tests/cloud_tests/testcases/modules/ntp_servers.py +++ b/tests/cloud_tests/testcases/modules/ntp_servers.py @@ -19,7 +19,7 @@ class TestNtpServers(base.CloudTestCase): def test_ntp_entires(self): """Test config pools entries""" - out = self.get_data_file('ntp_conf_pools') + out = self.get_data_file('ntp_conf_servers') servers = self.cloud_config.get('ntp').get('servers') for server in servers: self.assertIn('server %s iburst' % server, out) -- cgit v1.2.3 From 9fa17d4bc1ef2564e30ab655bf6de462296aecad Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Mon, 22 May 2017 12:43:31 -0700 Subject: function spelling & docstring update --- tests/cloud_tests/testcases/modules/ntp_servers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/cloud_tests/testcases/modules/ntp_servers.py b/tests/cloud_tests/testcases/modules/ntp_servers.py index 419c8270..4010cf80 100644 --- a/tests/cloud_tests/testcases/modules/ntp_servers.py +++ b/tests/cloud_tests/testcases/modules/ntp_servers.py @@ -17,8 +17,8 @@ class TestNtpServers(base.CloudTestCase): out = self.get_data_file('ntp_conf_dist_servers') self.assertEqual(0, int(out)) - def test_ntp_entires(self): - """Test config pools entries""" + def test_ntp_entries(self): + """Test config server entries""" out = self.get_data_file('ntp_conf_servers') servers = self.cloud_config.get('ntp').get('servers') for server in servers: -- cgit v1.2.3 From 3507b59eaa4914ba041f9c7ae987a2cfb036d8b5 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 19 May 2017 14:57:04 -0400 Subject: disk_setup: fix several issues with gpt disk partitions. This fixes several shortcomings of disk_setup with gpt disks. * 'sgdisk -p' was being used to determine the size of a disk. this can fail if it believes there is a bad gpt partition table. Instead we just use blockdev now for both mbr or gpt disks. * parsing of sgdisk -p output assumed that the 'name' of the partition type would not have any spaces (Microsoft basic data) * interaction with sgdisk did not realize that sgdisk wants input of '8300' rather than '83' and will output the same. LP: #1692087 --- cloudinit/config/cc_disk_setup.py | 80 ++++++++++++---------- .../test_handler/test_handler_disk_setup.py | 2 +- 2 files changed, 46 insertions(+), 36 deletions(-) diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index 29eb5dd8..e1505b39 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -431,7 +431,7 @@ def get_dyn_func(*args): raise Exception("No such function %s to call!" % func_name) -def get_mbr_hdd_size(device): +def get_hdd_size(device): try: size_in_bytes, _ = util.subp([BLKDEV_CMD, '--getsize64', device]) sector_size, _ = util.subp([BLKDEV_CMD, '--getss', device]) @@ -441,22 +441,6 @@ def get_mbr_hdd_size(device): return int(size_in_bytes) / int(sector_size) -def get_gpt_hdd_size(device): - out, _ = util.subp([SGDISK_CMD, '-p', device], update_env=LANG_C_ENV) - for line in out.splitlines(): - if line.startswith("Disk"): - return line.split()[2] - raise Exception("Failed to get %s size from sgdisk" % (device)) - - -def get_hdd_size(table_type, device): - """ - Returns the hard disk size. - This works with any disk type, including GPT. - """ - return get_dyn_func("get_%s_hdd_size", table_type, device) - - def check_partition_mbr_layout(device, layout): """ Returns true if the partition layout matches the one on the disk @@ -504,12 +488,35 @@ def check_partition_gpt_layout(device, layout): device, e)) out_lines = iter(out.splitlines()) - # Skip header + # Skip header. Output looks like: + # *************************************************************** + # Found invalid GPT and valid MBR; converting MBR to GPT format + # in memory. + # *************************************************************** + # + # Disk /dev/vdb: 83886080 sectors, 40.0 GiB + # Logical sector size: 512 bytes + # Disk identifier (GUID): 8A7F11AD-3953-491B-8051-077E01C8E9A7 + # Partition table holds up to 128 entries + # First usable sector is 34, last usable sector is 83886046 + # Partitions will be aligned on 2048-sector boundaries + # Total free space is 83476413 sectors (39.8 GiB) + # + # Number Start (sector) End (sector) Size Code Name + # 1 2048 206847 100.0 MiB 0700 Microsoft basic data for line in out_lines: if line.strip().startswith('Number'): break - return [line.strip().split()[-1] for line in out_lines] + codes = [line.strip().split()[5] for line in out_lines] + cleaned = [] + + # user would expect a code '83' to be Linux, but sgdisk outputs 8300. + for code in codes: + if len(code) == 4 and code.endswith("00"): + code = code[0:2] + cleaned.append(code) + return cleaned def check_partition_layout(table_type, device, layout): @@ -523,6 +530,8 @@ def check_partition_layout(table_type, device, layout): found_layout = get_dyn_func( "check_partition_%s_layout", table_type, device, layout) + LOG.debug("called check_partition_%s_layout(%s, %s), returned: %s", + table_type, device, layout, found_layout) if isinstance(layout, bool): # if we are using auto partitioning, or "True" be happy # if a single partition exists. @@ -530,18 +539,17 @@ def check_partition_layout(table_type, device, layout): return True return False - else: - if len(found_layout) != len(layout): - return False - else: - # This just makes sure that the number of requested - # partitions and the type labels are right - for x in range(1, len(layout) + 1): - if isinstance(layout[x - 1], tuple): - _, part_type = layout[x] - if int(found_layout[x]) != int(part_type): - return False - return True + elif len(found_layout) == len(layout): + # This just makes sure that the number of requested + # partitions and the type labels are right + layout_types = [str(x[1]) if isinstance(x, (tuple, list)) else None + for x in layout] + LOG.debug("Layout types=%s. Found types=%s", + layout_types, found_layout) + for itype, ftype in zip(layout_types, found_layout): + if itype is not None and str(ftype) != str(itype): + return False + return True return False @@ -704,9 +712,11 @@ def exec_mkpart_gpt(device, layout): util.subp([SGDISK_CMD, '-n', '{}:{}:{}'.format(index, start, end), device]) if partition_type is not None: + # convert to a 4 char (or more) string right padded with 0 + # 82 -> 8200. 'Linux' -> 'Linux' + pinput = str(partition_type).ljust(4, "0") util.subp( - [SGDISK_CMD, - '-t', '{}:{}'.format(index, partition_type), device]) + [SGDISK_CMD, '-t', '{}:{}'.format(index, pinput), device]) except Exception: LOG.warning("Failed to partition device %s", device) raise @@ -777,8 +787,8 @@ def mkpart(device, definition): LOG.debug("Skipping partitioning on configured device %s", device) return - LOG.debug("Checking for device size") - device_size = get_hdd_size(table_type, device) + LOG.debug("Checking for device size of %s", device) + device_size = get_hdd_size(device) LOG.debug("Calculating partition layout") part_definition = get_partition_layout(table_type, device_size, layout) diff --git a/tests/unittests/test_handler/test_handler_disk_setup.py b/tests/unittests/test_handler/test_handler_disk_setup.py index 35dff3ee..e3226978 100644 --- a/tests/unittests/test_handler/test_handler_disk_setup.py +++ b/tests/unittests/test_handler/test_handler_disk_setup.py @@ -66,7 +66,7 @@ class TestGetMbrHddSize(TestCase): size_in_sectors = size_in_bytes / sector_size self._configure_subp_mock(size_in_bytes, sector_size) self.assertEqual(size_in_sectors, - cc_disk_setup.get_mbr_hdd_size('/dev/sda1')) + cc_disk_setup.get_hdd_size('/dev/sda1')) def test_size_for_512_byte_sectors(self): self._test_for_sector_size(512) -- cgit v1.2.3 From f38fa41317602908139aa96e930b634f65e39555 Mon Sep 17 00:00:00 2001 From: Andreas Karis Date: Fri, 21 Apr 2017 20:35:39 -0400 Subject: RHEL/CentOS: Fix dual stack IPv4/IPv6 configuration. Dual stack IPv4/IPv6 configuration via config drive is broken for RHEL7. This patch fixes several scenarios for IPv4/IPv6/dual-stack with multiple IP assignment. Removes usage of unpopular IPv4 alias files and invalid IPv6 alias files. Also fix associated unit tests. LP: #1679817 LP: #1685534 LP: #1685532 --- cloudinit/net/sysconfig.py | 244 ++++++++++++++++++------- tests/unittests/test_distros/test_netconfig.py | 8 +- tests/unittests/test_net.py | 79 +++----- 3 files changed, 199 insertions(+), 132 deletions(-) diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index d981277a..58c5713f 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -59,6 +59,9 @@ class ConfigMap(object): def __setitem__(self, key, value): self._conf[key] = value + def __getitem__(self, key): + return self._conf[key] + def drop(self, key): self._conf.pop(key, None) @@ -83,7 +86,8 @@ class ConfigMap(object): class Route(ConfigMap): """Represents a route configuration.""" - route_fn_tpl = '%(base)s/network-scripts/route-%(name)s' + route_fn_tpl_ipv4 = '%(base)s/network-scripts/route-%(name)s' + route_fn_tpl_ipv6 = '%(base)s/network-scripts/route6-%(name)s' def __init__(self, route_name, base_sysconf_dir): super(Route, self).__init__() @@ -102,9 +106,58 @@ class Route(ConfigMap): return r @property - def path(self): - return self.route_fn_tpl % ({'base': self._base_sysconf_dir, - 'name': self._route_name}) + def path_ipv4(self): + return self.route_fn_tpl_ipv4 % ({'base': self._base_sysconf_dir, + 'name': self._route_name}) + + @property + def path_ipv6(self): + return self.route_fn_tpl_ipv6 % ({'base': self._base_sysconf_dir, + 'name': self._route_name}) + + def is_ipv6_route(self, address): + return ':' in address + + def to_string(self, proto="ipv4"): + # only accept ipv4 and ipv6 + if proto not in ['ipv4', 'ipv6']: + raise ValueError("Unknown protocol '%s'" % (str(proto))) + buf = six.StringIO() + buf.write(_make_header()) + if self._conf: + buf.write("\n") + # need to reindex IPv4 addresses + # (because Route can contain a mix of IPv4 and IPv6) + reindex = -1 + for key in sorted(self._conf.keys()): + if 'ADDRESS' in key: + index = key.replace('ADDRESS', '') + address_value = str(self._conf[key]) + # only accept combinations: + # if proto ipv6 only display ipv6 routes + # if proto ipv4 only display ipv4 routes + # do not add ipv6 routes if proto is ipv4 + # do not add ipv4 routes if proto is ipv6 + # (this array will contain a mix of ipv4 and ipv6) + if proto == "ipv4" and not self.is_ipv6_route(address_value): + netmask_value = str(self._conf['NETMASK' + index]) + gateway_value = str(self._conf['GATEWAY' + index]) + # increase IPv4 index + reindex = reindex + 1 + buf.write("%s=%s\n" % ('ADDRESS' + str(reindex), + _quote_value(address_value))) + buf.write("%s=%s\n" % ('GATEWAY' + str(reindex), + _quote_value(gateway_value))) + buf.write("%s=%s\n" % ('NETMASK' + str(reindex), + _quote_value(netmask_value))) + elif proto == "ipv6" and self.is_ipv6_route(address_value): + netmask_value = str(self._conf['NETMASK' + index]) + gateway_value = str(self._conf['GATEWAY' + index]) + buf.write("%s/%s via %s\n" % (address_value, + netmask_value, + gateway_value)) + + return buf.getvalue() class NetInterface(ConfigMap): @@ -211,65 +264,119 @@ class Renderer(renderer.Renderer): iface_cfg[new_key] = old_value @classmethod - def _render_subnet(cls, iface_cfg, route_cfg, subnet): - subnet_type = subnet.get('type') - if subnet_type == 'dhcp6': - iface_cfg['DHCPV6C'] = True - iface_cfg['IPV6INIT'] = True - iface_cfg['BOOTPROTO'] = 'dhcp' - elif subnet_type in ['dhcp4', 'dhcp']: - iface_cfg['BOOTPROTO'] = 'dhcp' - elif subnet_type == 'static': - iface_cfg['BOOTPROTO'] = 'static' - if subnet_is_ipv6(subnet): - iface_cfg['IPV6ADDR'] = subnet['address'] + def _render_subnets(cls, iface_cfg, subnets): + # setting base values + iface_cfg['BOOTPROTO'] = 'none' + + # modifying base values according to subnets + for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): + subnet_type = subnet.get('type') + if subnet_type == 'dhcp6': iface_cfg['IPV6INIT'] = True + iface_cfg['DHCPV6C'] = True + iface_cfg['BOOTPROTO'] = 'dhcp' + elif subnet_type in ['dhcp4', 'dhcp']: + iface_cfg['BOOTPROTO'] = 'dhcp' + elif subnet_type == 'static': + # grep BOOTPROTO sysconfig.txt -A2 | head -3 + # BOOTPROTO=none|bootp|dhcp + # 'bootp' or 'dhcp' cause a DHCP client + # to run on the device. Any other + # value causes any static configuration + # in the file to be applied. + # ==> the following should not be set to 'static' + # but should remain 'none' + # if iface_cfg['BOOTPROTO'] == 'none': + # iface_cfg['BOOTPROTO'] = 'static' + if subnet_is_ipv6(subnet): + iface_cfg['IPV6INIT'] = True else: - iface_cfg['IPADDR'] = subnet['address'] - else: - raise ValueError("Unknown subnet type '%s' found" - " for interface '%s'" % (subnet_type, - iface_cfg.name)) - if 'netmask' in subnet: - iface_cfg['NETMASK'] = subnet['netmask'] - is_ipv6 = subnet.get('ipv6') - for route in subnet.get('routes', []): - if _is_default_route(route): - if ( - (subnet.get('ipv4') and - route_cfg.has_set_default_ipv4) or - (subnet.get('ipv6') and - route_cfg.has_set_default_ipv6) - ): - raise ValueError("Duplicate declaration of default " - "route found for interface '%s'" - % (iface_cfg.name)) - # NOTE(harlowja): ipv6 and ipv4 default gateways - gw_key = 'GATEWAY0' - nm_key = 'NETMASK0' - addr_key = 'ADDRESS0' - # The owning interface provides the default route. - # - # TODO(harlowja): add validation that no other iface has - # also provided the default route? - iface_cfg['DEFROUTE'] = True - if 'gateway' in route: - if is_ipv6: - iface_cfg['IPV6_DEFAULTGW'] = route['gateway'] - route_cfg.has_set_default_ipv6 = True + raise ValueError("Unknown subnet type '%s' found" + " for interface '%s'" % (subnet_type, + iface_cfg.name)) + + # set IPv4 and IPv6 static addresses + ipv4_index = -1 + ipv6_index = -1 + for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): + subnet_type = subnet.get('type') + if subnet_type == 'dhcp6': + continue + elif subnet_type in ['dhcp4', 'dhcp']: + continue + elif subnet_type == 'static': + if subnet_is_ipv6(subnet): + ipv6_index = ipv6_index + 1 + if 'netmask' in subnet and str(subnet['netmask']) != "": + ipv6_cidr = (subnet['address'] + + '/' + + str(subnet['netmask'])) else: - iface_cfg['GATEWAY'] = route['gateway'] - route_cfg.has_set_default_ipv4 = True - else: - gw_key = 'GATEWAY%s' % route_cfg.last_idx - nm_key = 'NETMASK%s' % route_cfg.last_idx - addr_key = 'ADDRESS%s' % route_cfg.last_idx - route_cfg.last_idx += 1 - for (old_key, new_key) in [('gateway', gw_key), - ('netmask', nm_key), - ('network', addr_key)]: - if old_key in route: - route_cfg[new_key] = route[old_key] + ipv6_cidr = subnet['address'] + if ipv6_index == 0: + iface_cfg['IPV6ADDR'] = ipv6_cidr + elif ipv6_index == 1: + iface_cfg['IPV6ADDR_SECONDARIES'] = ipv6_cidr + else: + iface_cfg['IPV6ADDR_SECONDARIES'] = ( + iface_cfg['IPV6ADDR_SECONDARIES'] + + " " + ipv6_cidr) + else: + ipv4_index = ipv4_index + 1 + if ipv4_index == 0: + iface_cfg['IPADDR'] = subnet['address'] + if 'netmask' in subnet: + iface_cfg['NETMASK'] = subnet['netmask'] + else: + iface_cfg['IPADDR' + str(ipv4_index)] = \ + subnet['address'] + if 'netmask' in subnet: + iface_cfg['NETMASK' + str(ipv4_index)] = \ + subnet['netmask'] + + @classmethod + def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets): + for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): + for route in subnet.get('routes', []): + is_ipv6 = subnet.get('ipv6') + + if _is_default_route(route): + if ( + (subnet.get('ipv4') and + route_cfg.has_set_default_ipv4) or + (subnet.get('ipv6') and + route_cfg.has_set_default_ipv6) + ): + raise ValueError("Duplicate declaration of default " + "route found for interface '%s'" + % (iface_cfg.name)) + # NOTE(harlowja): ipv6 and ipv4 default gateways + gw_key = 'GATEWAY0' + nm_key = 'NETMASK0' + addr_key = 'ADDRESS0' + # The owning interface provides the default route. + # + # TODO(harlowja): add validation that no other iface has + # also provided the default route? + iface_cfg['DEFROUTE'] = True + if 'gateway' in route: + if is_ipv6: + iface_cfg['IPV6_DEFAULTGW'] = route['gateway'] + route_cfg.has_set_default_ipv6 = True + else: + iface_cfg['GATEWAY'] = route['gateway'] + route_cfg.has_set_default_ipv4 = True + + else: + gw_key = 'GATEWAY%s' % route_cfg.last_idx + nm_key = 'NETMASK%s' % route_cfg.last_idx + addr_key = 'ADDRESS%s' % route_cfg.last_idx + route_cfg.last_idx += 1 + for (old_key, new_key) in [('gateway', gw_key), + ('netmask', nm_key), + ('network', addr_key)]: + if old_key in route: + route_cfg[new_key] = route[old_key] @classmethod def _render_bonding_opts(cls, iface_cfg, iface): @@ -295,15 +402,9 @@ class Renderer(renderer.Renderer): iface_subnets = iface.get("subnets", []) iface_cfg = iface_contents[iface_name] route_cfg = iface_cfg.routes - if len(iface_subnets) == 1: - cls._render_subnet(iface_cfg, route_cfg, iface_subnets[0]) - elif len(iface_subnets) > 1: - for i, isubnet in enumerate(iface_subnets, - start=len(iface_cfg.children)): - iface_sub_cfg = iface_cfg.copy() - iface_sub_cfg.name = "%s:%s" % (iface_name, i) - iface_cfg.children.append(iface_sub_cfg) - cls._render_subnet(iface_sub_cfg, route_cfg, isubnet) + + cls._render_subnets(iface_cfg, iface_subnets) + cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) @classmethod def _render_bond_interfaces(cls, network_state, iface_contents): @@ -387,7 +488,10 @@ class Renderer(renderer.Renderer): if iface_cfg: contents[iface_cfg.path] = iface_cfg.to_string() if iface_cfg.routes: - contents[iface_cfg.routes.path] = iface_cfg.routes.to_string() + contents[iface_cfg.routes.path_ipv4] = \ + iface_cfg.routes.to_string("ipv4") + contents[iface_cfg.routes.path_ipv6] = \ + iface_cfg.routes.to_string("ipv6") return contents def render_network_state(self, network_state, target=None): diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index 1e10a33d..fd7c051f 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -476,7 +476,7 @@ NETWORKING=yes expected_buf = ''' # Created by cloud-init on instance boot automatically, do not edit. # -BOOTPROTO=static +BOOTPROTO=none DEVICE=eth0 IPADDR=192.168.1.5 NETMASK=255.255.255.0 @@ -533,7 +533,6 @@ NETWORKING=yes mock.patch.object(util, 'load_file', return_value='')) mocks.enter_context( mock.patch.object(os.path, 'isfile', return_value=False)) - rh_distro.apply_network(BASE_NET_CFG_IPV6, False) self.assertEqual(len(write_bufs), 4) @@ -626,11 +625,10 @@ IPV6_AUTOCONF=no expected_buf = ''' # Created by cloud-init on instance boot automatically, do not edit. # -BOOTPROTO=static +BOOTPROTO=none DEVICE=eth0 -IPV6ADDR=2607:f0d0:1002:0011::2 +IPV6ADDR=2607:f0d0:1002:0011::2/64 IPV6INIT=yes -NETMASK=64 NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 8bd3f433..feeab908 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -137,7 +137,7 @@ OS_SAMPLES = [ """ # Created by cloud-init on instance boot automatically, do not edit. # -BOOTPROTO=static +BOOTPROTO=none DEFROUTE=yes DEVICE=eth0 GATEWAY=172.19.3.254 @@ -205,38 +205,14 @@ nameserver 172.19.0.12 # Created by cloud-init on instance boot automatically, do not edit. # BOOTPROTO=none -DEVICE=eth0 -HWADDR=fa:16:3e:ed:9a:59 -NM_CONTROLLED=no -ONBOOT=yes -TYPE=Ethernet -USERCTL=no -""".lstrip()), - ('etc/sysconfig/network-scripts/ifcfg-eth0:0', - """ -# Created by cloud-init on instance boot automatically, do not edit. -# -BOOTPROTO=static DEFROUTE=yes -DEVICE=eth0:0 +DEVICE=eth0 GATEWAY=172.19.3.254 HWADDR=fa:16:3e:ed:9a:59 IPADDR=172.19.1.34 +IPADDR1=10.0.0.10 NETMASK=255.255.252.0 -NM_CONTROLLED=no -ONBOOT=yes -TYPE=Ethernet -USERCTL=no -""".lstrip()), - ('etc/sysconfig/network-scripts/ifcfg-eth0:1', - """ -# Created by cloud-init on instance boot automatically, do not edit. -# -BOOTPROTO=static -DEVICE=eth0:1 -HWADDR=fa:16:3e:ed:9a:59 -IPADDR=10.0.0.10 -NETMASK=255.255.255.0 +NETMASK1=255.255.255.0 NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet @@ -266,7 +242,7 @@ nameserver 172.19.0.12 }], "ip_address": "172.19.1.34", "id": "network0" }, { - "network_id": "public-ipv6", + "network_id": "public-ipv6-a", "type": "ipv6", "netmask": "", "link": "tap1a81968a-79", "routes": [ @@ -277,6 +253,20 @@ nameserver 172.19.0.12 } ], "ip_address": "2001:DB8::10", "id": "network1" + }, { + "network_id": "public-ipv6-b", + "type": "ipv6", "netmask": "64", + "link": "tap1a81968a-79", + "routes": [ + ], + "ip_address": "2001:DB9::10", "id": "network2" + }, { + "network_id": "public-ipv6-c", + "type": "ipv6", "netmask": "64", + "link": "tap1a81968a-79", + "routes": [ + ], + "ip_address": "2001:DB10::10", "id": "network3" }], "links": [ { @@ -296,41 +286,16 @@ nameserver 172.19.0.12 # Created by cloud-init on instance boot automatically, do not edit. # BOOTPROTO=none -DEVICE=eth0 -HWADDR=fa:16:3e:ed:9a:59 -NM_CONTROLLED=no -ONBOOT=yes -TYPE=Ethernet -USERCTL=no -""".lstrip()), - ('etc/sysconfig/network-scripts/ifcfg-eth0:0', - """ -# Created by cloud-init on instance boot automatically, do not edit. -# -BOOTPROTO=static DEFROUTE=yes -DEVICE=eth0:0 +DEVICE=eth0 GATEWAY=172.19.3.254 HWADDR=fa:16:3e:ed:9a:59 IPADDR=172.19.1.34 -NETMASK=255.255.252.0 -NM_CONTROLLED=no -ONBOOT=yes -TYPE=Ethernet -USERCTL=no -""".lstrip()), - ('etc/sysconfig/network-scripts/ifcfg-eth0:1', - """ -# Created by cloud-init on instance boot automatically, do not edit. -# -BOOTPROTO=static -DEFROUTE=yes -DEVICE=eth0:1 -HWADDR=fa:16:3e:ed:9a:59 IPV6ADDR=2001:DB8::10 +IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64" IPV6INIT=yes IPV6_DEFAULTGW=2001:DB8::1 -NETMASK= +NETMASK=255.255.252.0 NM_CONTROLLED=no ONBOOT=yes TYPE=Ethernet -- cgit v1.2.3 From 06a7f0afc4db3f8ba2a6b3b521274ee45a028ef2 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Mon, 22 May 2017 12:13:55 -0700 Subject: tests: Apply workaround for snapd bug in test case. Snapd does not start on artful or on the versions in proposed. This changes the behavior of the test to confirm that snapd is installed, not that it is started, while the snap team fixes the issue (LP: ##1690880). --- tests/cloud_tests/configs/modules/snappy.yaml | 4 ++-- tests/cloud_tests/testcases/modules/snappy.py | 7 ++----- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/tests/cloud_tests/configs/modules/snappy.yaml b/tests/cloud_tests/configs/modules/snappy.yaml index 923bfe12..0e7dc852 100644 --- a/tests/cloud_tests/configs/modules/snappy.yaml +++ b/tests/cloud_tests/configs/modules/snappy.yaml @@ -6,8 +6,8 @@ cloud_config: | snappy: system_snappy: auto collect_scripts: - snap_version: | + snapd: | #!/bin/bash - snap --version + dpkg -s snapd # vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/snappy.py b/tests/cloud_tests/testcases/modules/snappy.py index 3e2f5924..b92271c1 100644 --- a/tests/cloud_tests/testcases/modules/snappy.py +++ b/tests/cloud_tests/testcases/modules/snappy.py @@ -9,10 +9,7 @@ class TestSnappy(base.CloudTestCase): def test_snappy_version(self): """Test snappy version output""" - out = self.get_data_file('snap_version') - self.assertIn('snap ', out) - self.assertIn('snapd ', out) - self.assertIn('series ', out) - self.assertIn('ubuntu ', out) + out = self.get_data_file('snapd') + self.assertIn('Status: install ok installed', out) # vi: ts=4 expandtab -- cgit v1.2.3 From 2825a917e5fa130818c0d77219f32961b99a057f Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 23 May 2017 13:09:26 -0400 Subject: flake8: move the pinned version of flake8 up to 3.3.0 This just moves flake8 and related tools up to newer versions and fixes the complaints associated with that. We added to the list of flake8 ignores: H102: do not put vim info in source files H304: no relative imports Also updates and pins the following in the flake8 environment: pep8: 1.7.0 => drop (although hacking still pulls it in). pyflakes 1.1.0 => 1.5.0 hacking 0.10.2 => 0.13.0 flake8 2.5.4 => 3.3.0 pycodestyle none => 2.3.1 --- cloudinit/sources/DataSourceAzure.py | 1 + test-requirements.txt | 8 ++++---- tests/unittests/test_datasource/test_altcloud.py | 2 +- tests/unittests/test_datasource/test_azure.py | 2 +- tests/unittests/test_datasource/test_maas.py | 2 +- tests/unittests/test_datasource/test_opennebula.py | 4 ++-- tests/unittests/test_datasource/test_openstack.py | 4 ++-- tests/unittests/test_datasource/test_ovf.py | 2 +- tests/unittests/test_distros/test_resolv.py | 2 +- tests/unittests/test_handler/test_handler_power_state.py | 4 ++-- tests/unittests/test_handler/test_handler_snappy.py | 4 ++-- tests/unittests/test_helpers.py | 2 +- tests/unittests/test_net.py | 2 +- tests/unittests/test_util.py | 10 +++++----- tools/hacking.py | 5 +++-- tools/mock-meta.py | 2 +- tools/net-convert.py | 2 +- tox.ini | 2 +- 18 files changed, 31 insertions(+), 29 deletions(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 44857c09..b9458ffa 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -163,6 +163,7 @@ def get_resource_disk_on_freebsd(port_id): return devname return None + # update the FreeBSD specific information if util.is_FreeBSD(): DEFAULT_PRIMARY_NIC = 'hn0' diff --git a/test-requirements.txt b/test-requirements.txt index 0e7fc8fb..1b39ea5c 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -13,7 +13,7 @@ contextlib2 setuptools # Used for syle checking -pep8==1.7.0 -pyflakes==1.1.0 -flake8==2.5.4 -hacking==0.10.2 +pycodestyle==2.3.1 +pyflakes==1.5.0 +flake8==3.3.0 +hacking==0.13.0 diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py index 63a2b04d..b6d4a453 100644 --- a/tests/unittests/test_datasource/test_altcloud.py +++ b/tests/unittests/test_datasource/test_altcloud.py @@ -419,7 +419,7 @@ class TestReadUserDataCallback(TestCase): '''Test read_user_data_callback() no files are found.''' _remove_user_data_files(self.mount_dir) - self.assertEqual(None, dsac.read_user_data_callback(self.mount_dir)) + self.assertIsNone(dsac.read_user_data_callback(self.mount_dir)) def force_arch(arch=None): diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 67cddeb9..852ec703 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -239,7 +239,7 @@ fdescfs /dev/fd fdescfs rw 0 0 with mock.patch.object(os.path, 'exists', return_value=True): res = get_path_dev_freebsd('/etc', mnt_list) - self.assertNotEqual(res, None) + self.assertIsNotNone(res) def test_basic_seed_dir(self): odata = {'HostName': "myhost", 'UserName': "myuser"} diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index 693882d2..c1911bf4 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -44,7 +44,7 @@ class TestMAASDataSource(TestCase): # verify that 'userdata' is not returned as part of the metadata self.assertFalse(('user-data' in md)) - self.assertEqual(vd, None) + self.assertIsNone(vd) def test_seed_dir_valid_extra(self): """Verify extra files do not affect seed_dir validity.""" diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py index bce66125..b0f8e435 100644 --- a/tests/unittests/test_datasource/test_opennebula.py +++ b/tests/unittests/test_datasource/test_opennebula.py @@ -126,14 +126,14 @@ class TestOpenNebulaDataSource(TestCase): populate_dir(self.seed_dir, {'context.sh': ''}) results = ds.read_context_disk_dir(self.seed_dir) - self.assertEqual(results['userdata'], None) + self.assertIsNone(results['userdata']) self.assertEqual(results['metadata'], {}) def test_seed_dir_empty2_context(self): populate_context_dir(self.seed_dir, {}) results = ds.read_context_disk_dir(self.seed_dir) - self.assertEqual(results['userdata'], None) + self.assertIsNone(results['userdata']) self.assertEqual(results['metadata'], {}) def test_seed_dir_broken_context(self): diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index 7bf55084..c2905d1a 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -242,7 +242,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertEqual(USER_DATA, ds_os.userdata_raw) self.assertEqual(2, len(ds_os.files)) self.assertEqual(VENDOR_DATA, ds_os.vendordata_pure) - self.assertEqual(ds_os.vendordata_raw, None) + self.assertIsNone(ds_os.vendordata_raw) @hp.activate def test_bad_datasource_meta(self): @@ -318,7 +318,7 @@ class TestVendorDataLoading(test_helpers.TestCase): self.assertEqual(self.cvj(data), data) def test_vd_load_dict_no_ci(self): - self.assertEqual(self.cvj({'foo': 'bar'}), None) + self.assertIsNone(self.cvj({'foo': 'bar'})) def test_vd_load_dict_ci_dict(self): self.assertRaises(ValueError, self.cvj, diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index 3e09510c..477cf8ed 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -68,6 +68,6 @@ class TestReadOvfEnv(test_helpers.TestCase): md, ud, cfg = dsovf.read_ovf_environment(env) self.assertEqual({"instance-id": "inst-001"}, md) self.assertEqual({'password': "passw0rd"}, cfg) - self.assertEqual(None, ud) + self.assertIsNone(ud) # vi: ts=4 expandtab diff --git a/tests/unittests/test_distros/test_resolv.py b/tests/unittests/test_distros/test_resolv.py index c9d03475..97168cf9 100644 --- a/tests/unittests/test_distros/test_resolv.py +++ b/tests/unittests/test_distros/test_resolv.py @@ -30,7 +30,7 @@ class TestResolvHelper(TestCase): def test_local_domain(self): rp = resolv_conf.ResolvConf(BASE_RESOLVE) - self.assertEqual(None, rp.local_domain) + self.assertIsNone(rp.local_domain) rp.local_domain = "bob" self.assertEqual('bob', rp.local_domain) diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/test_handler/test_handler_power_state.py index 3fd0069d..e382210d 100644 --- a/tests/unittests/test_handler/test_handler_power_state.py +++ b/tests/unittests/test_handler/test_handler_power_state.py @@ -15,12 +15,12 @@ class TestLoadPowerState(t_help.TestCase): def test_no_config(self): # completely empty config should mean do nothing (cmd, _timeout, _condition) = psc.load_power_state({}) - self.assertEqual(cmd, None) + self.assertIsNone(cmd) def test_irrelevant_config(self): # no power_state field in config should return None for cmd (cmd, _timeout, _condition) = psc.load_power_state({'foo': 'bar'}) - self.assertEqual(cmd, None) + self.assertIsNone(cmd) def test_invalid_mode(self): cfg = {'power_state': {'mode': 'gibberish'}} diff --git a/tests/unittests/test_handler/test_handler_snappy.py b/tests/unittests/test_handler/test_handler_snappy.py index edb73d6d..e4d07622 100644 --- a/tests/unittests/test_handler/test_handler_snappy.py +++ b/tests/unittests/test_handler/test_handler_snappy.py @@ -419,7 +419,7 @@ class TestSnapConfig(FilesystemMockingTestCase): def test_snap_config_add_snap_user_no_config(self): usercfg = add_snap_user(cfg=None) - self.assertEqual(usercfg, None) + self.assertIsNone(usercfg) def test_snap_config_add_snap_user_not_dict(self): cfg = ['foobar'] @@ -428,7 +428,7 @@ class TestSnapConfig(FilesystemMockingTestCase): def test_snap_config_add_snap_user_no_email(self): cfg = {'assertions': [], 'known': True} usercfg = add_snap_user(cfg=cfg) - self.assertEqual(usercfg, None) + self.assertIsNone(usercfg) @mock.patch('cloudinit.config.cc_snap_config.util') def test_snap_config_add_snap_user_email_only(self, mock_util): diff --git a/tests/unittests/test_helpers.py b/tests/unittests/test_helpers.py index 955f8dfa..f1979e89 100644 --- a/tests/unittests/test_helpers.py +++ b/tests/unittests/test_helpers.py @@ -32,6 +32,6 @@ class TestPaths(test_helpers.ResourceUsingTestCase): myds._instance_id = None mypaths = self.getCloudPaths(myds) - self.assertEqual(None, mypaths.get_ipath()) + self.assertIsNone(mypaths.get_ipath()) # vi: ts=4 expandtab diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index feeab908..7104d00e 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1301,7 +1301,7 @@ class TestCmdlineReadKernelConfig(CiTestCase): files = sorted(populate_dir(self.tmp_dir(), content)) found = cmdline.read_kernel_cmdline_config( files=files, cmdline='foo root=/dev/sda', mac_addrs=self.macs) - self.assertEqual(found, None) + self.assertIsNone(found) def test_ip_cmdline_both_ip_ip6(self): content = {'net-eth0.conf': DHCP_CONTENT_1, diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 490760d1..014aa6a3 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -44,7 +44,7 @@ class TestGetCfgOptionListOrStr(helpers.TestCase): """None is returned if key is not found and no default given.""" config = {} result = util.get_cfg_option_list(config, "key") - self.assertEqual(None, result) + self.assertIsNone(result) def test_not_found_with_default(self): """Default is returned if key is not found.""" @@ -432,13 +432,13 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase): def test_none_returned_if_neither_source_has_data(self): self.patch_mapping({}) self._configure_dmidecode_return('key', 'value') - self.assertEqual(None, util.read_dmi_data('expect-fail')) + self.assertIsNone(util.read_dmi_data('expect-fail')) def test_none_returned_if_dmidecode_not_in_path(self): self.patched_funcs.enter_context( mock.patch.object(util, 'which', lambda _: False)) self.patch_mapping({}) - self.assertEqual(None, util.read_dmi_data('expect-fail')) + self.assertIsNone(util.read_dmi_data('expect-fail')) def test_dots_returned_instead_of_foxfox(self): # uninitialized dmi values show as \xff, return those as . @@ -626,8 +626,8 @@ class TestSubp(helpers.TestCase): def test_returns_none_if_no_capture(self): (out, err) = util.subp(self.stdin2out, data=b'', capture=False) - self.assertEqual(err, None) - self.assertEqual(out, None) + self.assertIsNone(err) + self.assertIsNone(out) def test_bunch_of_slashes_in_path(self): self.assertEqual("/target/my/path/", diff --git a/tools/hacking.py b/tools/hacking.py index 6c320935..e6a05136 100755 --- a/tools/hacking.py +++ b/tools/hacking.py @@ -165,7 +165,8 @@ if __name__ == "__main__": pep8._main() finally: if len(_missingImport) > 0: - print >> sys.stderr, ("%i imports missing in this test environment" - % len(_missingImport)) + sys.stderr.write( + "%i imports missing in this test environment\n" % + len(_missingImport)) # vi: ts=4 expandtab diff --git a/tools/mock-meta.py b/tools/mock-meta.py index 82816e8a..f185dbf2 100755 --- a/tools/mock-meta.py +++ b/tools/mock-meta.py @@ -21,8 +21,8 @@ import functools import json import logging import os -import socket import random +import socket import string import sys import yaml diff --git a/tools/net-convert.py b/tools/net-convert.py index 870da639..b2db8adf 100755 --- a/tools/net-convert.py +++ b/tools/net-convert.py @@ -9,8 +9,8 @@ import yaml from cloudinit.sources.helpers import openstack from cloudinit.net import eni -from cloudinit.net import network_state from cloudinit.net import netplan +from cloudinit.net import network_state from cloudinit.net import sysconfig diff --git a/tox.ini b/tox.ini index fce07740..03bb5f19 100644 --- a/tox.ini +++ b/tox.ini @@ -34,7 +34,7 @@ setenv = [flake8] #H102 Apache 2.0 license header not found -ignore=H404,H405,H105,H301,H104,H403,H101,H102 +ignore=H404,H405,H105,H301,H104,H403,H101,H102,H106,H304 exclude = .venv,.tox,dist,doc,*egg,.git,build,tools [testenv:doc] -- cgit v1.2.3 From fd0c88cf8e8aa015eadb5ab842e872cb627197ec Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Mon, 22 May 2017 23:11:42 -0600 Subject: cc_ntp: Restructure cc_ntp unit tests. Any CiTestCase subclass can now set a class attribute with_logs = True and tests can now make assertions on self.logs.getvalue(). This branch restructures a bit of cc_ntp module to get better test coverage of the module. It also restructures the handler_cc_ntp unit tests to avoid nested mocks where possible. Deeply nested mocks cause a couple of issues: - greater risk: mocks are permanent within the scope, so multiple call-sites could be affected by package mocks - less legible tests: each mock doesn't advertise the actual call-site - tight coupling: the unit test logic to tightly bound to the actual implementation in remote (unrelated) modules which makes it more costly to maintain code - false success: we should be testing the expected behavior not specific remote method names as we want to know if that underlying behavior changes and breaks us. LP: #1692794 --- cloudinit/config/cc_ntp.py | 25 +- tests/unittests/helpers.py | 26 ++ tests/unittests/test_handler/test_handler_ntp.py | 441 ++++++++--------------- 3 files changed, 188 insertions(+), 304 deletions(-) diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index 225f898a..5cc54536 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -53,14 +53,12 @@ distros = ['centos', 'debian', 'fedora', 'opensuse', 'ubuntu'] def handle(name, cfg, cloud, log, _args): - """ - Enable and configure ntp + """Enable and configure ntp.""" - ntp: - pools: ['0.{{distro}}.pool.ntp.org', '1.{{distro}}.pool.ntp.org'] - servers: ['192.168.2.1'] - - """ + if 'ntp' not in cfg: + LOG.debug( + "Skipping module named %s, not present or disabled by cfg", name) + return ntp_cfg = cfg.get('ntp', {}) @@ -69,18 +67,12 @@ def handle(name, cfg, cloud, log, _args): " but not a dictionary type," " is a %s %instead"), type_utils.obj_name(ntp_cfg)) - if 'ntp' not in cfg: - LOG.debug("Skipping module named %s," - "not present or disabled by cfg", name) - return True - rename_ntp_conf() # ensure when ntp is installed it has a configuration file # to use instead of starting up with packaged defaults write_ntp_config_template(ntp_cfg, cloud) install_ntp(cloud.distro.install_packages, packages=['ntp'], check_exe="ntpd") - # if ntp was already installed, it may not have started try: reload_ntp(systemd=cloud.distro.uses_systemd()) @@ -98,8 +90,10 @@ def install_ntp(install_func, packages=None, check_exe="ntpd"): install_func(packages) -def rename_ntp_conf(config=NTP_CONF): +def rename_ntp_conf(config=None): """Rename any existing ntp.conf file and render from template""" + if config is None: # For testing + config = NTP_CONF if os.path.exists(config): util.rename(config, config + ".dist") @@ -117,8 +111,9 @@ def write_ntp_config_template(cfg, cloud): pools = cfg.get('pools', []) if len(servers) == 0 and len(pools) == 0: - LOG.debug('Adding distro default ntp pool servers') pools = generate_server_names(cloud.distro.name) + LOG.debug( + 'Adding distro default ntp pool servers: %s', ','.join(pools)) params = { 'servers': servers, diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index d24f817d..9ff15993 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -4,6 +4,7 @@ from __future__ import print_function import functools import json +import logging import os import shutil import sys @@ -18,6 +19,10 @@ try: from contextlib import ExitStack except ImportError: from contextlib2 import ExitStack +try: + from cStringIO import StringIO +except ImportError: + from io import StringIO from cloudinit import helpers as ch from cloudinit import util @@ -87,6 +92,27 @@ class TestCase(unittest2.TestCase): class CiTestCase(TestCase): """This is the preferred test case base class unless user needs other test case classes below.""" + + # Subclass overrides for specific test behavior + # Whether or not a unit test needs logfile setup + with_logs = False + + def setUp(self): + super(CiTestCase, self).setUp() + if self.with_logs: + # Create a log handler so unit tests can search expected logs. + logger = logging.getLogger() + self.logs = StringIO() + handler = logging.StreamHandler(self.logs) + self.old_handlers = logger.handlers + logger.handlers = [handler] + + def tearDown(self): + if self.with_logs: + # Remove the handler we setup + logging.getLogger().handlers = self.old_handlers + super(CiTestCase, self).tearDown() + def tmp_dir(self, dir=None, cleanup=True): # return a full path to a temporary directory that will be cleaned up. if dir is None: diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py index 02bd3046..21f2ab19 100644 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ b/tests/unittests/test_handler/test_handler_ntp.py @@ -2,351 +2,214 @@ from cloudinit.config import cc_ntp from cloudinit.sources import DataSourceNone -from cloudinit import templater from cloudinit import (distros, helpers, cloud, util) from ..helpers import FilesystemMockingTestCase, mock -import logging + import os +from os.path import dirname import shutil -import tempfile - -LOG = logging.getLogger(__name__) -NTP_TEMPLATE = """ +NTP_TEMPLATE = b"""\ ## template: jinja - -{% if pools %}# pools -{% endif %} -{% for pool in pools -%} -pool {{pool}} iburst -{% endfor %} -{%- if servers %}# servers -{% endif %} -{% for server in servers -%} -server {{server}} iburst -{% endfor %} - -""" - - -NTP_EXPECTED_UBUNTU = """ -# pools -pool 0.mycompany.pool.ntp.org iburst -# servers -server 192.168.23.3 iburst - +servers {{servers}} +pools {{pools}} """ class TestNtp(FilesystemMockingTestCase): + with_logs = True + def setUp(self): super(TestNtp, self).setUp() self.subp = util.subp - self.new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.new_root) + self.new_root = self.tmp_dir() - def _get_cloud(self, distro, metadata=None): + def _get_cloud(self, distro): self.patchUtils(self.new_root) - paths = helpers.Paths({}) + paths = helpers.Paths({'templates_dir': self.new_root}) cls = distros.fetch(distro) mydist = cls(distro, {}, paths) myds = DataSourceNone.DataSourceNone({}, mydist, paths) - if metadata: - myds.metadata.update(metadata) return cloud.Cloud(myds, paths, {}, mydist, None) @mock.patch("cloudinit.config.cc_ntp.util") def test_ntp_install(self, mock_util): - cc = self._get_cloud('ubuntu') - cc.distro = mock.MagicMock() - cc.distro.name = 'ubuntu' - mock_util.which.return_value = None + """ntp_install installs via install_func when check_exe is absent.""" + mock_util.which.return_value = None # check_exe not found. install_func = mock.MagicMock() - cc_ntp.install_ntp(install_func, packages=['ntpx'], check_exe='ntpdx') - self.assertTrue(install_func.called) mock_util.which.assert_called_with('ntpdx') - install_pkg = install_func.call_args_list[0][0][0] - self.assertEqual(sorted(install_pkg), ['ntpx']) + install_func.assert_called_once_with(['ntpx']) @mock.patch("cloudinit.config.cc_ntp.util") def test_ntp_install_not_needed(self, mock_util): - cc = self._get_cloud('ubuntu') - cc.distro = mock.MagicMock() - cc.distro.name = 'ubuntu' - mock_util.which.return_value = ["/usr/sbin/ntpd"] - cc_ntp.install_ntp(cc) - self.assertFalse(cc.distro.install_packages.called) + """ntp_install doesn't attempt install when check_exe is found.""" + mock_util.which.return_value = ["/usr/sbin/ntpd"] # check_exe found. + install_func = mock.MagicMock() + cc_ntp.install_ntp(install_func, packages=['ntp'], check_exe='ntpd') + install_func.assert_not_called() def test_ntp_rename_ntp_conf(self): - with mock.patch.object(os.path, 'exists', - return_value=True) as mockpath: - with mock.patch.object(util, 'rename') as mockrename: - cc_ntp.rename_ntp_conf() - - mockpath.assert_called_with('/etc/ntp.conf') - mockrename.assert_called_with('/etc/ntp.conf', '/etc/ntp.conf.dist') + """When NTP_CONF exists, rename_ntp moves it.""" + ntpconf = self.tmp_path("ntp.conf", self.new_root) + os.mknod(ntpconf) + with mock.patch("cloudinit.config.cc_ntp.NTP_CONF", ntpconf): + cc_ntp.rename_ntp_conf() + self.assertFalse(os.path.exists(ntpconf)) + self.assertTrue(os.path.exists("{}.dist".format(ntpconf))) def test_ntp_rename_ntp_conf_skip_missing(self): - with mock.patch.object(os.path, 'exists', - return_value=False) as mockpath: - with mock.patch.object(util, 'rename') as mockrename: - cc_ntp.rename_ntp_conf() - - mockpath.assert_called_with('/etc/ntp.conf') - mockrename.assert_not_called() - - def ntp_conf_render(self, distro): - """ntp_conf_render - Test rendering of a ntp.conf from template for a given distro + """When NTP_CONF doesn't exist rename_ntp doesn't create a file.""" + ntpconf = self.tmp_path("ntp.conf", self.new_root) + self.assertFalse(os.path.exists(ntpconf)) + with mock.patch("cloudinit.config.cc_ntp.NTP_CONF", ntpconf): + cc_ntp.rename_ntp_conf() + self.assertFalse(os.path.exists("{}.dist".format(ntpconf))) + self.assertFalse(os.path.exists(ntpconf)) + + def test_write_ntp_config_template_from_ntp_conf_tmpl_with_servers(self): + """write_ntp_config_template reads content from ntp.conf.tmpl. + + It reads ntp.conf.tmpl if present and renders the value from servers + key. When no pools key is defined, template is rendered using an empty + list for pools. """ - - cfg = {'ntp': {}} - mycloud = self._get_cloud(distro) - distro_names = cc_ntp.generate_server_names(distro) - - with mock.patch.object(templater, 'render_to_file') as mocktmpl: - with mock.patch.object(os.path, 'isfile', return_value=True): - with mock.patch.object(util, 'rename'): - cc_ntp.write_ntp_config_template(cfg, mycloud) - - mocktmpl.assert_called_once_with( - ('/etc/cloud/templates/ntp.conf.%s.tmpl' % distro), - '/etc/ntp.conf', - {'servers': [], 'pools': distro_names}) - - def test_ntp_conf_render_rhel(self): - """Test templater.render_to_file() for rhel""" - self.ntp_conf_render('rhel') - - def test_ntp_conf_render_debian(self): - """Test templater.render_to_file() for debian""" - self.ntp_conf_render('debian') - - def test_ntp_conf_render_fedora(self): - """Test templater.render_to_file() for fedora""" - self.ntp_conf_render('fedora') - - def test_ntp_conf_render_sles(self): - """Test templater.render_to_file() for sles""" - self.ntp_conf_render('sles') - - def test_ntp_conf_render_ubuntu(self): - """Test templater.render_to_file() for ubuntu""" - self.ntp_conf_render('ubuntu') - - def test_ntp_conf_servers_no_pools(self): distro = 'ubuntu' - pools = [] - servers = ['192.168.2.1'] cfg = { - 'ntp': { - 'pools': pools, - 'servers': servers, - } + 'servers': ['192.168.2.1', '192.168.2.2'] } mycloud = self._get_cloud(distro) - - with mock.patch.object(templater, 'render_to_file') as mocktmpl: - with mock.patch.object(os.path, 'isfile', return_value=True): - with mock.patch.object(util, 'rename'): - cc_ntp.write_ntp_config_template(cfg.get('ntp'), mycloud) - - mocktmpl.assert_called_once_with( - ('/etc/cloud/templates/ntp.conf.%s.tmpl' % distro), - '/etc/ntp.conf', - {'servers': servers, 'pools': pools}) - - def test_ntp_conf_custom_pools_no_server(self): + ntp_conf = self.tmp_path("ntp.conf", self.new_root) # Doesn't exist + # Create ntp.conf.tmpl + with open('{}.tmpl'.format(ntp_conf), 'wb') as stream: + stream.write(NTP_TEMPLATE) + with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): + cc_ntp.write_ntp_config_template(cfg, mycloud) + content = util.read_file_or_url('file://' + ntp_conf).contents + self.assertEqual( + "servers ['192.168.2.1', '192.168.2.2']\npools []\n", + content.decode()) + + def test_write_ntp_config_template_uses_ntp_conf_distro_no_servers(self): + """write_ntp_config_template reads content from ntp.conf.distro.tmpl. + + It reads ntp.conf..tmpl before attempting ntp.conf.tmpl. It + renders the value from the keys servers and pools. When no + servers value is present, template is rendered using an empty list. + """ distro = 'ubuntu' - pools = ['0.mycompany.pool.ntp.org'] - servers = [] cfg = { - 'ntp': { - 'pools': pools, - 'servers': servers, - } + 'pools': ['10.0.0.1', '10.0.0.2'] } mycloud = self._get_cloud(distro) - - with mock.patch.object(templater, 'render_to_file') as mocktmpl: - with mock.patch.object(os.path, 'isfile', return_value=True): - with mock.patch.object(util, 'rename'): - cc_ntp.write_ntp_config_template(cfg.get('ntp'), mycloud) - - mocktmpl.assert_called_once_with( - ('/etc/cloud/templates/ntp.conf.%s.tmpl' % distro), - '/etc/ntp.conf', - {'servers': servers, 'pools': pools}) - - def test_ntp_conf_custom_pools_and_server(self): + ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist + # Create ntp.conf.tmpl which isn't read + with open('{}.tmpl'.format(ntp_conf), 'wb') as stream: + stream.write(b'NOT READ: ntp.conf..tmpl is primary') + # Create ntp.conf.tmpl. + with open('{}.{}.tmpl'.format(ntp_conf, distro), 'wb') as stream: + stream.write(NTP_TEMPLATE) + with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): + cc_ntp.write_ntp_config_template(cfg, mycloud) + content = util.read_file_or_url('file://' + ntp_conf).contents + self.assertEqual( + "servers []\npools ['10.0.0.1', '10.0.0.2']\n", + content.decode()) + + def test_write_ntp_config_template_defaults_pools_when_empty_lists(self): + """write_ntp_config_template defaults pools servers upon empty config. + + When both pools and servers are empty, default NR_POOL_SERVERS get + configured. + """ distro = 'ubuntu' - pools = ['0.mycompany.pool.ntp.org'] - servers = ['192.168.23.3'] - cfg = { - 'ntp': { - 'pools': pools, - 'servers': servers, - } - } mycloud = self._get_cloud(distro) - - with mock.patch.object(templater, 'render_to_file') as mocktmpl: - with mock.patch.object(os.path, 'isfile', return_value=True): - with mock.patch.object(util, 'rename'): - cc_ntp.write_ntp_config_template(cfg.get('ntp'), mycloud) - - mocktmpl.assert_called_once_with( - ('/etc/cloud/templates/ntp.conf.%s.tmpl' % distro), - '/etc/ntp.conf', - {'servers': servers, 'pools': pools}) - - def test_ntp_conf_contents_match(self): - """Test rendered contents of /etc/ntp.conf for ubuntu""" - pools = ['0.mycompany.pool.ntp.org'] - servers = ['192.168.23.3'] + ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist + # Create ntp.conf.tmpl + with open('{}.tmpl'.format(ntp_conf), 'wb') as stream: + stream.write(NTP_TEMPLATE) + with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): + cc_ntp.write_ntp_config_template({}, mycloud) + content = util.read_file_or_url('file://' + ntp_conf).contents + default_pools = [ + "{}.{}.pool.ntp.org".format(x, distro) + for x in range(0, cc_ntp.NR_POOL_SERVERS)] + self.assertEqual( + "servers []\npools {}\n".format(default_pools), + content.decode()) + self.assertIn( + "Adding distro default ntp pool servers: {}".format( + ",".join(default_pools)), + self.logs.getvalue()) + + def test_ntp_handler_mocked_template(self): + """Test ntp handler renders ubuntu ntp.conf template.""" + pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org'] + servers = ['192.168.23.3', '192.168.23.4'] cfg = { 'ntp': { 'pools': pools, - 'servers': servers, + 'servers': servers } } mycloud = self._get_cloud('ubuntu') - side_effect = [NTP_TEMPLATE.lstrip()] - - # work backwards from util.write_file and mock out call path - # write_ntp_config_template() - # cloud.get_template_filename() - # os.path.isfile() - # templater.render_to_file() - # templater.render_from_file() - # util.load_file() - # util.write_file() - # - with mock.patch.object(util, 'write_file') as mockwrite: - with mock.patch.object(util, 'load_file', side_effect=side_effect): - with mock.patch.object(os.path, 'isfile', return_value=True): - with mock.patch.object(util, 'rename'): - cc_ntp.write_ntp_config_template(cfg.get('ntp'), - mycloud) - - mockwrite.assert_called_once_with( - '/etc/ntp.conf', - NTP_EXPECTED_UBUNTU, - mode=420) - - def test_ntp_handler(self): - """Test ntp handler renders ubuntu ntp.conf template""" - pools = ['0.mycompany.pool.ntp.org'] - servers = ['192.168.23.3'] + ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist + # Create ntp.conf.tmpl + with open('{}.tmpl'.format(ntp_conf), 'wb') as stream: + stream.write(NTP_TEMPLATE) + with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): + with mock.patch.object(util, 'which', return_value=None): + cc_ntp.handle('notimportant', cfg, mycloud, None, None) + + content = util.read_file_or_url('file://' + ntp_conf).contents + self.assertEqual( + 'servers {}\npools {}\n'.format(servers, pools), + content.decode()) + + def test_ntp_handler_real_distro_templates(self): + """Test ntp handler renders the shipped distro ntp.conf templates.""" + pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org'] + servers = ['192.168.23.3', '192.168.23.4'] cfg = { 'ntp': { 'pools': pools, - 'servers': servers, + 'servers': servers } } - mycloud = self._get_cloud('ubuntu') - mycloud.distro = mock.MagicMock() - mycloud.distro.uses_systemd.return_value = True - side_effect = [NTP_TEMPLATE.lstrip()] - - with mock.patch.object(util, 'which', return_value=None): - with mock.patch.object(os.path, 'exists'): - with mock.patch.object(util, 'write_file') as mockwrite: - with mock.patch.object(util, 'load_file', - side_effect=side_effect): - with mock.patch.object(os.path, 'isfile', - return_value=True): - with mock.patch.object(util, 'rename'): - with mock.patch.object(util, 'subp') as msubp: - cc_ntp.handle("notimportant", cfg, - mycloud, LOG, None) - - mockwrite.assert_called_once_with( - '/etc/ntp.conf', - NTP_EXPECTED_UBUNTU, - mode=420) - msubp.assert_any_call(['systemctl', 'reload-or-restart', 'ntp'], - capture=True) - - @mock.patch("cloudinit.config.cc_ntp.install_ntp") - @mock.patch("cloudinit.config.cc_ntp.write_ntp_config_template") - @mock.patch("cloudinit.config.cc_ntp.rename_ntp_conf") - def test_write_config_before_install(self, mock_ntp_rename, - mock_ntp_write_config, - mock_install_ntp): - pools = ['0.mycompany.pool.ntp.org'] - servers = ['192.168.23.3'] - cfg = { - 'ntp': { - 'pools': pools, - 'servers': servers, - } - } - cc = self._get_cloud('ubuntu') - cc.distro = mock.MagicMock() - mock_parent = mock.MagicMock() - mock_parent.attach_mock(mock_ntp_rename, 'mock_ntp_rename') - mock_parent.attach_mock(mock_ntp_write_config, 'mock_ntp_write_config') - mock_parent.attach_mock(mock_install_ntp, 'mock_install_ntp') - - cc_ntp.handle('cc_ntp', cfg, cc, LOG, None) - - """Check call order""" - mock_parent.assert_has_calls([ - mock.call.mock_ntp_rename(), - mock.call.mock_ntp_write_config(cfg.get('ntp'), cc), - mock.call.mock_install_ntp(cc.distro.install_packages, - packages=['ntp'], check_exe="ntpd")]) - - @mock.patch("cloudinit.config.cc_ntp.reload_ntp") - @mock.patch("cloudinit.config.cc_ntp.install_ntp") - @mock.patch("cloudinit.config.cc_ntp.write_ntp_config_template") - @mock.patch("cloudinit.config.cc_ntp.rename_ntp_conf") - def test_reload_ntp_fail_raises_exception(self, mock_rename, - mock_write_conf, - mock_install, - mock_reload): - pools = ['0.mycompany.pool.ntp.org'] - servers = ['192.168.23.3'] - cfg = { - 'ntp': { - 'pools': pools, - 'servers': servers, - } - } - cc = self._get_cloud('ubuntu') - cc.distro = mock.MagicMock() - - mock_reload.side_effect = [util.ProcessExecutionError] - self.assertRaises(util.ProcessExecutionError, - cc_ntp.handle, 'cc_ntp', - cfg, cc, LOG, None) - - @mock.patch("cloudinit.config.cc_ntp.util") - def test_no_ntpcfg_does_nothing(self, mock_util): - cc = self._get_cloud('ubuntu') - cc.distro = mock.MagicMock() - cc_ntp.handle('cc_ntp', {}, cc, LOG, []) - self.assertFalse(cc.distro.install_packages.called) - self.assertFalse(mock_util.subp.called) - - @mock.patch("cloudinit.config.cc_ntp.util") - def test_reload_ntp_systemd(self, mock_util): - cc_ntp.reload_ntp(systemd=True) - self.assertTrue(mock_util.subp.called) - mock_util.subp.assert_called_with( - ['systemctl', 'reload-or-restart', 'ntp'], capture=True) - - @mock.patch("cloudinit.config.cc_ntp.util") - def test_reload_ntp_service(self, mock_util): - cc_ntp.reload_ntp(systemd=False) - self.assertTrue(mock_util.subp.called) - mock_util.subp.assert_called_with( - ['service', 'ntp', 'restart'], capture=True) - + ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist + for distro in ('debian', 'ubuntu', 'fedora', 'rhel', 'sles'): + mycloud = self._get_cloud(distro) + root_dir = dirname(dirname(os.path.realpath(util.__file__))) + tmpl_file = os.path.join( + '{}/templates/ntp.conf.{}.tmpl'.format(root_dir, distro)) + # Create a copy in our tmp_dir + shutil.copy( + tmpl_file, + os.path.join(self.new_root, 'ntp.conf.%s.tmpl' % distro)) + with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): + with mock.patch.object(util, 'which', return_value=[True]): + cc_ntp.handle('notimportant', cfg, mycloud, None, None) + + content = util.read_file_or_url('file://' + ntp_conf).contents + expected_servers = '\n'.join([ + 'server {} iburst'.format(server) for server in servers]) + self.assertIn( + expected_servers, content.decode(), + 'failed to render ntp.conf for distro:{}'.format(distro)) + expected_pools = '\n'.join([ + 'pool {} iburst'.format(pool) for pool in pools]) + self.assertIn( + expected_pools, content.decode(), + 'failed to render ntp.conf for distro:{}'.format(distro)) + + def test_no_ntpcfg_does_nothing(self): + """When no ntp section is defined handler logs a warning and noops.""" + cc_ntp.handle('cc_ntp', {}, None, None, []) + self.assertEqual( + 'Skipping module named cc_ntp, not present or disabled by cfg\n', + self.logs.getvalue()) # vi: ts=4 expandtab -- cgit v1.2.3 From 6b5369be71054a677b89fa8080f7ddd029ce4986 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 24 May 2017 09:36:57 -0400 Subject: tox/build: do not package depend on style requirements. When the style/checking dependencies were updated in test-requirements.txt, the debian package build dependencies created by ./packages/bddeb were also updated. Pycodestyle was added to the list in order to pin its version. That broke the package build for 16.04. The reason for this is simply that python3-pycodestyle is not available in 16.04. The change here is to remove style dependencies from test-requirements, and add them to the tox environments directly. We had previously changed the package build process to not run pep8 or flake8 simply to avoid having to code to N different versions of style checkers (3bcb72c593f). The link between package build and test-requirements still exists, though. So future breakage can occur if any package is added to test-requirements.txt (or requirements.txt) if the target distro release does not have a python3- in its archive. There is also a bit of a tox.ini cleanup here, in that we do not have to explictly list '-rrequirements.txt' as the setup.py pulls those in. And lastly, we drop the -rtest-requirements.txt from the base 'testenv', and add these test requirements only to environments that need to run test. Finally, a change to packages/debian/control.in to drop the build dependencies that were listed for style checking and also a dependency on iproute2 which was a bad unit test that has been previously fixed. --- packages/debian/control.in | 4 ---- test-requirements.txt | 6 ------ tox.ini | 20 ++++++++++++-------- 3 files changed, 12 insertions(+), 18 deletions(-) diff --git a/packages/debian/control.in b/packages/debian/control.in index b58561e7..6c39d531 100644 --- a/packages/debian/control.in +++ b/packages/debian/control.in @@ -6,10 +6,6 @@ Maintainer: Scott Moser Build-Depends: debhelper (>= 9), dh-python, dh-systemd, - iproute2, - pep8, - pyflakes, - python3-pyflakes | pyflakes (<< 1.1.0-2), ${python}, ${test_requires}, ${requires} diff --git a/test-requirements.txt b/test-requirements.txt index 1b39ea5c..d9d41b57 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -11,9 +11,3 @@ coverage # Only really needed on older versions of python contextlib2 setuptools - -# Used for syle checking -pycodestyle==2.3.1 -pyflakes==1.5.0 -flake8==3.3.0 -hacking==0.13.0 diff --git a/tox.ini b/tox.ini index 03bb5f19..6276662d 100644 --- a/tox.ini +++ b/tox.ini @@ -4,13 +4,16 @@ recreate = True [testenv] commands = python -m nose {posargs:tests/unittests} -deps = -r{toxinidir}/test-requirements.txt - -r{toxinidir}/requirements.txt setenv = LC_ALL = en_US.utf-8 [testenv:flake8] basepython = python3 +deps = + pycodestyle==2.3.1 + pyflakes==1.5.0 + flake8==3.3.0 + hacking==0.13.0 commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/} # https://github.com/gabrielfalcao/HTTPretty/issues/223 @@ -23,11 +26,17 @@ commands = {envpython} -m pylint {posargs:cloudinit} [testenv:py3] basepython = python3 +deps = -r{toxinidir}/test-requirements.txt commands = {envpython} -m nose {posargs:--with-coverage \ --cover-erase --cover-branches --cover-inclusive \ --cover-package=cloudinit tests/unittests} +[testenv:py27] +basepython = python2.7 +deps = -r{toxinidir}/test-requirements.txt + [testenv:py26] +deps = -r{toxinidir}/test-requirements.txt commands = nosetests {posargs:tests/unittests} setenv = LC_ALL = C @@ -39,8 +48,7 @@ exclude = .venv,.tox,dist,doc,*egg,.git,build,tools [testenv:doc] basepython = python3 -deps = {[testenv]deps} - sphinx +deps = sphinx commands = {envpython} -m sphinx {posargs:doc/rtd doc/rtd_html} [testenv:xenial] @@ -63,10 +71,6 @@ deps = nose==1.3.7 unittest2==1.1.0 contextlib2==0.5.1 - pep8==1.7.0 - pyflakes==1.1.0 - flake8==2.5.4 - hacking==0.10.2 [testenv:centos6] basepython = python2.6 -- cgit v1.2.3 From 13673f8d2b815452aa22f8e3811e04481558a731 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 18 May 2017 13:19:14 -0400 Subject: fix tools/ds-identify to not write None twice. If the user configured: datasource_list: ["Ec2", "None"] then ds-identify would write datasource_list: ["Ec2", "None", "None"] which would break the logic to avoid warning. --- tests/unittests/test_ds_identify.py | 7 +++++++ tools/ds-identify | 9 +++++---- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 9e148852..8559e1fe 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -210,6 +210,13 @@ class TestDsIdentify(CiTestCase): mydata['files'][cfgpath] = 'datasource_list: ["NoCloud"]\n' self._check_via_dict(mydata, rc=RC_FOUND, dslist=['NoCloud', DS_NONE]) + def test_configured_list_with_none(self): + """If user set a datasource_list, that should be used.""" + mydata = copy.deepcopy(VALID_CFG['GCE']) + cfgpath = 'etc/cloud/cloud.cfg.d/myds.cfg' + mydata['files'][cfgpath] = 'datasource_list: ["Ec2", "None"]\n' + self._check_via_dict(mydata, rc=RC_FOUND, dslist=['Ec2', DS_NONE]) + def blkid_out(disks=None): """Convert a list of disk dictionaries into blkid content.""" diff --git a/tools/ds-identify b/tools/ds-identify index aff26eb6..74d26537 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -963,10 +963,11 @@ found() { # do not pass an empty line through. shift fi - # always write the None datasource last. - if [ "$list" != "None" ]; then - list="${list:+${list}, }None" - fi + # if None is not already in the list, then add it last. + case " $list " in + *\ None,\ *|*\ None\ ) :;; + *) list=${list:+${list}, None};; + esac write_result "datasource_list: [ $list ]" "$@" return } -- cgit v1.2.3 From 9e01cca6a90a30e1b8cf454467c002b40f22dff9 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 23 May 2017 14:58:10 -0400 Subject: test: update docstring on test_configured_list_with_none Simply improve the docstring on a test added in last commit. --- tests/unittests/test_ds_identify.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 8559e1fe..f5694b26 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -211,7 +211,10 @@ class TestDsIdentify(CiTestCase): self._check_via_dict(mydata, rc=RC_FOUND, dslist=['NoCloud', DS_NONE]) def test_configured_list_with_none(self): - """If user set a datasource_list, that should be used.""" + """When datasource_list already contains None, None is not added. + + The explicitly configured datasource_list has 'None' in it. That + should not have None automatically added.""" mydata = copy.deepcopy(VALID_CFG['GCE']) cfgpath = 'etc/cloud/cloud.cfg.d/myds.cfg' mydata['files'][cfgpath] = 'datasource_list: ["Ec2", "None"]\n' -- cgit v1.2.3 From e5b2c011440aefe036c71a8c5e8ec547cc80f270 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 24 May 2017 21:33:30 -0400 Subject: python2.6: fix unit tests usage of assertNone and format. python2.6 unittest.TestCase does not have the assertIsNone or assertIsNotNone. We just have to explicitly use the unittest2 version, which we get from helpers. The desire to use assertIsNone comes from flake8 (through hacking, I believe). Also, fix "{}.format('foo')" which is not valid in python2.6. --- tests/unittests/test_datasource/test_altcloud.py | 3 ++- tests/unittests/test_handler/test_handler_ntp.py | 32 ++++++++++++------------ 2 files changed, 18 insertions(+), 17 deletions(-) diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py index b6d4a453..9c46abc1 100644 --- a/tests/unittests/test_datasource/test_altcloud.py +++ b/tests/unittests/test_datasource/test_altcloud.py @@ -17,7 +17,8 @@ import tempfile from cloudinit import helpers from cloudinit import util -from unittest import TestCase + +from ..helpers import TestCase import cloudinit.sources.DataSourceAltCloud as dsac diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py index 21f2ab19..bc4277b7 100644 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ b/tests/unittests/test_handler/test_handler_ntp.py @@ -59,7 +59,7 @@ class TestNtp(FilesystemMockingTestCase): with mock.patch("cloudinit.config.cc_ntp.NTP_CONF", ntpconf): cc_ntp.rename_ntp_conf() self.assertFalse(os.path.exists(ntpconf)) - self.assertTrue(os.path.exists("{}.dist".format(ntpconf))) + self.assertTrue(os.path.exists("{0}.dist".format(ntpconf))) def test_ntp_rename_ntp_conf_skip_missing(self): """When NTP_CONF doesn't exist rename_ntp doesn't create a file.""" @@ -67,7 +67,7 @@ class TestNtp(FilesystemMockingTestCase): self.assertFalse(os.path.exists(ntpconf)) with mock.patch("cloudinit.config.cc_ntp.NTP_CONF", ntpconf): cc_ntp.rename_ntp_conf() - self.assertFalse(os.path.exists("{}.dist".format(ntpconf))) + self.assertFalse(os.path.exists("{0}.dist".format(ntpconf))) self.assertFalse(os.path.exists(ntpconf)) def test_write_ntp_config_template_from_ntp_conf_tmpl_with_servers(self): @@ -84,7 +84,7 @@ class TestNtp(FilesystemMockingTestCase): mycloud = self._get_cloud(distro) ntp_conf = self.tmp_path("ntp.conf", self.new_root) # Doesn't exist # Create ntp.conf.tmpl - with open('{}.tmpl'.format(ntp_conf), 'wb') as stream: + with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: stream.write(NTP_TEMPLATE) with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): cc_ntp.write_ntp_config_template(cfg, mycloud) @@ -107,10 +107,10 @@ class TestNtp(FilesystemMockingTestCase): mycloud = self._get_cloud(distro) ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist # Create ntp.conf.tmpl which isn't read - with open('{}.tmpl'.format(ntp_conf), 'wb') as stream: + with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: stream.write(b'NOT READ: ntp.conf..tmpl is primary') # Create ntp.conf.tmpl. - with open('{}.{}.tmpl'.format(ntp_conf, distro), 'wb') as stream: + with open('{0}.{1}.tmpl'.format(ntp_conf, distro), 'wb') as stream: stream.write(NTP_TEMPLATE) with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): cc_ntp.write_ntp_config_template(cfg, mycloud) @@ -129,19 +129,19 @@ class TestNtp(FilesystemMockingTestCase): mycloud = self._get_cloud(distro) ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist # Create ntp.conf.tmpl - with open('{}.tmpl'.format(ntp_conf), 'wb') as stream: + with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: stream.write(NTP_TEMPLATE) with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): cc_ntp.write_ntp_config_template({}, mycloud) content = util.read_file_or_url('file://' + ntp_conf).contents default_pools = [ - "{}.{}.pool.ntp.org".format(x, distro) + "{0}.{1}.pool.ntp.org".format(x, distro) for x in range(0, cc_ntp.NR_POOL_SERVERS)] self.assertEqual( - "servers []\npools {}\n".format(default_pools), + "servers []\npools {0}\n".format(default_pools), content.decode()) self.assertIn( - "Adding distro default ntp pool servers: {}".format( + "Adding distro default ntp pool servers: {0}".format( ",".join(default_pools)), self.logs.getvalue()) @@ -158,7 +158,7 @@ class TestNtp(FilesystemMockingTestCase): mycloud = self._get_cloud('ubuntu') ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist # Create ntp.conf.tmpl - with open('{}.tmpl'.format(ntp_conf), 'wb') as stream: + with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: stream.write(NTP_TEMPLATE) with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): with mock.patch.object(util, 'which', return_value=None): @@ -166,7 +166,7 @@ class TestNtp(FilesystemMockingTestCase): content = util.read_file_or_url('file://' + ntp_conf).contents self.assertEqual( - 'servers {}\npools {}\n'.format(servers, pools), + 'servers {0}\npools {1}\n'.format(servers, pools), content.decode()) def test_ntp_handler_real_distro_templates(self): @@ -184,7 +184,7 @@ class TestNtp(FilesystemMockingTestCase): mycloud = self._get_cloud(distro) root_dir = dirname(dirname(os.path.realpath(util.__file__))) tmpl_file = os.path.join( - '{}/templates/ntp.conf.{}.tmpl'.format(root_dir, distro)) + '{0}/templates/ntp.conf.{1}.tmpl'.format(root_dir, distro)) # Create a copy in our tmp_dir shutil.copy( tmpl_file, @@ -195,15 +195,15 @@ class TestNtp(FilesystemMockingTestCase): content = util.read_file_or_url('file://' + ntp_conf).contents expected_servers = '\n'.join([ - 'server {} iburst'.format(server) for server in servers]) + 'server {0} iburst'.format(server) for server in servers]) self.assertIn( expected_servers, content.decode(), - 'failed to render ntp.conf for distro:{}'.format(distro)) + 'failed to render ntp.conf for distro:{0}'.format(distro)) expected_pools = '\n'.join([ - 'pool {} iburst'.format(pool) for pool in pools]) + 'pool {0} iburst'.format(pool) for pool in pools]) self.assertIn( expected_pools, content.decode(), - 'failed to render ntp.conf for distro:{}'.format(distro)) + 'failed to render ntp.conf for distro:{0}'.format(distro)) def test_no_ntpcfg_does_nothing(self): """When no ntp section is defined handler logs a warning and noops.""" -- cgit v1.2.3 From 910ed46124e992eb20e49ea156b7127cd3ebbe9d Mon Sep 17 00:00:00 2001 From: Dimitri John Ledkov Date: Sat, 13 May 2017 01:45:23 +0100 Subject: nplan: For bonds, allow dashed or underscore names of keys. As some of the bond paramemters are passed in as dashed, or underscored, depending on the input source. Also correct transmit-hash-policy netplan target key. LP: #1690480 --- cloudinit/net/netplan.py | 4 ++-- tests/unittests/test_net.py | 10 ++++++++++ 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index 9b71de97..d7ddf0c3 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -41,7 +41,7 @@ NET_CONFIG_TO_V2 = { 'bond-num-grat-arp': 'gratuitious-arp', 'bond-primary-reselect': 'primary-reselect-policy', 'bond-updelay': 'up-delay', - 'bond-xmit_hash_policy': 'transmit_hash_policy'}, + 'bond-xmit-hash-policy': 'transmit-hash-policy'}, 'bridge': {'bridge_ageing': 'ageing-time', 'bridge_bridgeprio': 'priority', 'bridge_fd': 'forward-delay', @@ -294,7 +294,7 @@ class Renderer(renderer.Renderer): for match in ['bond_', 'bond-']: bond_params = _get_params_dict_by_match(ifcfg, match) for (param, value) in bond_params.items(): - newname = v2_bond_map.get(param) + newname = v2_bond_map.get(param.replace('_', '-')) if newname is None: continue bond_config.update({newname: value}) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 7104d00e..5169821a 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -483,11 +483,15 @@ auto eth1 iface eth1 inet manual bond-master bond0 bond-mode active-backup + bond-xmit-hash-policy layer3+4 + bond_miimon 100 auto eth2 iface eth2 inet manual bond-master bond0 bond-mode active-backup + bond-xmit-hash-policy layer3+4 + bond_miimon 100 iface eth3 inet manual @@ -500,6 +504,8 @@ auto bond0 iface bond0 inet6 dhcp bond-mode active-backup bond-slaves none + bond-xmit-hash-policy layer3+4 + bond_miimon 100 hwaddress aa:bb:cc:dd:ee:ff auto br0 @@ -625,7 +631,9 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true - eth1 - eth2 parameters: + mii-monitor-interval: 100 mode: active-backup + transmit-hash-policy: layer3+4 bridges: br0: addresses: @@ -716,6 +724,8 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true - eth2 params: bond-mode: active-backup + bond_miimon: 100 + bond-xmit-hash-policy: "layer3+4" subnets: - type: dhcp6 # A Bond VLAN. -- cgit v1.2.3 From d27c49391df343d25bd2e24045d2be6bf39c30d2 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Thu, 25 May 2017 10:21:21 -0800 Subject: GCE: Update the attribute used to find instance SSH keys. Per the documentation at https://cloud.google.com/compute/docs/storing-retrieving-metadata The instance-level SSH key was named 'sshKeys' and now is 'ssh-keys'. The project-level SSH key attribute has not changed so is intentionally not changed here. LP: #1693582 --- cloudinit/sources/DataSourceGCE.py | 2 +- tests/unittests/test_datasource/test_gce.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index e9afda9c..684eac86 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -71,7 +71,7 @@ class DataSourceGCE(sources.DataSource): ('availability-zone', ('instance/zone',), True, True), ('local-hostname', ('instance/hostname',), True, True), ('public-keys', ('project/attributes/sshKeys', - 'instance/attributes/sshKeys'), False, True), + 'instance/attributes/ssh-keys'), False, True), ('user-data', ('instance/attributes/user-data',), False, False), ('user-data-encoding', ('instance/attributes/user-data-encoding',), False, True), diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index 3eaa58e3..6fd1341d 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -140,7 +140,7 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): def test_instance_level_ssh_keys_are_used(self): key_content = 'ssh-rsa JustAUser root@server' meta = GCE_META.copy() - meta['instance/attributes/sshKeys'] = 'user:{0}'.format(key_content) + meta['instance/attributes/ssh-keys'] = 'user:{0}'.format(key_content) _set_mock_metadata(meta) self.ds.get_data() @@ -150,7 +150,7 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): def test_instance_level_keys_replace_project_level_keys(self): key_content = 'ssh-rsa JustAUser root@server' meta = GCE_META.copy() - meta['instance/attributes/sshKeys'] = 'user:{0}'.format(key_content) + meta['instance/attributes/ssh-keys'] = 'user:{0}'.format(key_content) _set_mock_metadata(meta) self.ds.get_data() -- cgit v1.2.3 From 1815c6d801933c47a01f1a94a8e689824f6797b4 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 25 May 2017 13:06:08 -0400 Subject: disk_setup: udev settle before attempting partitioning or fs creation. This attempts to use udevadm settle to wait until devices have been fully "realized". If a device exists, there may still be events in the udev queue that would create its partition table entries. We need to wait until those have been processed also. LP: #1692093 --- cloudinit/config/cc_disk_setup.py | 26 +++++++++++++++++++--- .../test_handler/test_handler_disk_setup.py | 2 ++ 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index e1505b39..c2b83aea 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -680,14 +680,14 @@ def read_parttbl(device): reliable way to probe the partition table. """ blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device] - udev_cmd = [UDEVADM_CMD, 'settle'] + udevadm_settle() try: - util.subp(udev_cmd) util.subp(blkdev_cmd) - util.subp(udev_cmd) except Exception as e: util.logexc(LOG, "Failed reading the partition table %s" % e) + udevadm_settle() + def exec_mkpart_mbr(device, layout): """ @@ -737,6 +737,24 @@ def exec_mkpart(table_type, device, layout): return get_dyn_func("exec_mkpart_%s", table_type, device, layout) +def udevadm_settle(): + util.subp(['udevadm', 'settle']) + + +def assert_and_settle_device(device): + """Assert that device exists and settle so it is fully recognized.""" + if not os.path.exists(device): + udevadm_settle() + if not os.path.exists(device): + raise RuntimeError("Device %s did not exist and was not created " + "with a udevamd settle." % device) + + # Whether or not the device existed above, it is possible that udev + # events that would populate udev database (for reading by lsdname) have + # not yet finished. So settle again. + udevadm_settle() + + def mkpart(device, definition): """ Creates the partition table. @@ -752,6 +770,7 @@ def mkpart(device, definition): device: the device to work on. """ # ensure that we get a real device rather than a symbolic link + assert_and_settle_device(device) device = os.path.realpath(device) LOG.debug("Checking values for %s definition", device) @@ -852,6 +871,7 @@ def mkfs(fs_cfg): overwrite = fs_cfg.get('overwrite', False) # ensure that we get a real device rather than a symbolic link + assert_and_settle_device(device) device = os.path.realpath(device) # This allows you to define the default ephemeral or swap diff --git a/tests/unittests/test_handler/test_handler_disk_setup.py b/tests/unittests/test_handler/test_handler_disk_setup.py index e3226978..916a0d7a 100644 --- a/tests/unittests/test_handler/test_handler_disk_setup.py +++ b/tests/unittests/test_handler/test_handler_disk_setup.py @@ -168,6 +168,8 @@ class TestUpdateFsSetupDevices(TestCase): }, fs_setup) +@mock.patch('cloudinit.config.cc_disk_setup.assert_and_settle_device', + return_value=None) @mock.patch('cloudinit.config.cc_disk_setup.find_device_node', return_value=('/dev/xdb1', False)) @mock.patch('cloudinit.config.cc_disk_setup.device_type', return_value=None) -- cgit v1.2.3 From 16a7302f6acb69adb0aee75eaf12392fa3688853 Mon Sep 17 00:00:00 2001 From: Dimitri John Ledkov Date: Tue, 16 May 2017 14:18:25 +0100 Subject: net: fix reading and rendering addresses in cidr format. Input (specifically OpenStack) that had: "ip_address" : "104.130.20.155", "netmask" : "255.255.255.0" Was being rendered to netplan as '104.130.20.155/255.255.255.0'. That is now fixed to '104.130.20.155/24' Also fixed is reading of a route that had a network prefix integer in the 'netmask' rather than a netmask. LP: #1689346 LP: #1684349 --- cloudinit/net/netplan.py | 14 ++++++++------ cloudinit/net/network_state.py | 4 ++-- tests/unittests/test_distros/test_netconfig.py | 2 +- tests/unittests/test_net.py | 2 +- 4 files changed, 12 insertions(+), 10 deletions(-) diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index d7ddf0c3..a715f3b0 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -4,7 +4,7 @@ import copy import os from . import renderer -from .network_state import subnet_is_ipv6 +from .network_state import mask2cidr, subnet_is_ipv6 from cloudinit import log as logging from cloudinit import util @@ -118,9 +118,10 @@ def _extract_addresses(config, entry): sn_type += '4' entry.update({sn_type: True}) elif sn_type in ['static']: - addr = "%s" % subnet.get('address') - if 'netmask' in subnet: - addr += "/%s" % subnet.get('netmask') + addr = '%s' % subnet.get('address') + netmask = subnet.get('netmask') + if netmask and '/' not in addr: + addr += '/%s' % mask2cidr(netmask) if 'gateway' in subnet and subnet.get('gateway'): gateway = subnet.get('gateway') if ":" in gateway: @@ -137,8 +138,9 @@ def _extract_addresses(config, entry): mtukey += '6' entry.update({mtukey: subnet.get('mtu')}) for route in subnet.get('routes', []): - to_net = "%s/%s" % (route.get('network'), - route.get('netmask')) + network = route.get('network') + netmask = route.get('netmask') + to_net = '%s/%s' % (network, mask2cidr(netmask)) route = { 'via': route.get('gateway'), 'to': to_net, diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index db3c3579..9e9c05a0 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -734,9 +734,9 @@ def ipv6mask2cidr(mask): def mask2cidr(mask): - if ':' in mask: + if ':' in str(mask): return ipv6mask2cidr(mask) - elif '.' in mask: + elif '.' in str(mask): return ipv4mask2cidr(mask) else: return mask diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index fd7c051f..be9a8318 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -127,7 +127,7 @@ network: ethernets: eth0: addresses: - - 192.168.1.5/255.255.255.0 + - 192.168.1.5/24 gateway4: 192.168.1.254 eth1: dhcp4: true diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 5169821a..167ed01e 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -406,7 +406,7 @@ NETWORK_CONFIGS = { - sach.maas - wark.maas routes: - - to: 0.0.0.0/0.0.0.0 + - to: 0.0.0.0/0 via: 65.61.151.37 set-name: eth99 """).rstrip(' '), -- cgit v1.2.3