commit 8152452bad55109768a3efd16e65a3e837bf5d4f Author: overweight <5324761+overweight@user.noreply.gitee.com> Date: Mon Sep 30 10:35:22 2019 -0400 Package init diff --git a/EC2-Fix-bug-using-fallback_nic-and-metadata-when-res.patch b/EC2-Fix-bug-using-fallback_nic-and-metadata-when-res.patch new file mode 100644 index 0000000..ee4d478 --- /dev/null +++ b/EC2-Fix-bug-using-fallback_nic-and-metadata-when-res.patch @@ -0,0 +1,175 @@ +From 281a82181716183d526e76f4e0415e0a6f680cbe Mon Sep 17 00:00:00 2001 +From: Scott Moser +Date: Mon, 20 Nov 2017 15:56:40 -0500 +Subject: [PATCH 040/354] EC2: Fix bug using fallback_nic and metadata when + restoring from cache. + +If user upgraded to new cloud-init and attempted to run 'cloud-init init' +without rebooting, cloud-init restores the datasource object from pickle. +The older version pickled datasource object had no value for +_network_config or fallback_nic. This caused the Ec2 datasource to attempt +to reconfigure networking with a None fallback_nic. The pickled object +also cached an older version of ec2 metadata which didn't contain network +information. + +This branch does two things: + - Add a fallback_interface property to DatasourceEC2 to support reading the + old .fallback_nic attribute if it was set. New versions will + call net.find_fallback_nic() if there has not been one found. + - Re-crawl metadata if we are on Ec2 and don't have a 'network' key in + metadata + +LP: #1732917 +--- + cloudinit/net/dhcp.py | 3 +- + cloudinit/sources/DataSourceEc2.py | 44 +++++++++++++++++++++-------- + tests/unittests/test_datasource/test_ec2.py | 33 ++++++++++++++++++++++ + 3 files changed, 67 insertions(+), 13 deletions(-) + +diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py +index f3a412a..d8624d8 100644 +--- a/cloudinit/net/dhcp.py ++++ b/cloudinit/net/dhcp.py +@@ -42,8 +42,7 @@ def maybe_perform_dhcp_discovery(nic=None): + if nic is None: + nic = find_fallback_nic() + if nic is None: +- LOG.debug( +- 'Skip dhcp_discovery: Unable to find fallback nic.') ++ LOG.debug('Skip dhcp_discovery: Unable to find fallback nic.') + return {} + elif nic not in get_devicelist(): + LOG.debug( +diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py +index 0ef2217..7bbbfb6 100644 +--- a/cloudinit/sources/DataSourceEc2.py ++++ b/cloudinit/sources/DataSourceEc2.py +@@ -65,7 +65,7 @@ class DataSourceEc2(sources.DataSource): + get_network_metadata = False + + # Track the discovered fallback nic for use in configuration generation. +- fallback_nic = None ++ _fallback_interface = None + + def __init__(self, sys_cfg, distro, paths): + sources.DataSource.__init__(self, sys_cfg, distro, paths) +@@ -92,18 +92,17 @@ class DataSourceEc2(sources.DataSource): + elif self.cloud_platform == Platforms.NO_EC2_METADATA: + return False + +- self.fallback_nic = net.find_fallback_nic() + if self.get_network_metadata: # Setup networking in init-local stage. + if util.is_FreeBSD(): + LOG.debug("FreeBSD doesn't support running dhclient with -sf") + return False +- dhcp_leases = dhcp.maybe_perform_dhcp_discovery(self.fallback_nic) ++ dhcp_leases = dhcp.maybe_perform_dhcp_discovery( ++ self.fallback_interface) + if not dhcp_leases: + # DataSourceEc2Local failed in init-local stage. DataSourceEc2 + # will still run in init-network stage. + return False + dhcp_opts = dhcp_leases[-1] +- self.fallback_nic = dhcp_opts.get('interface') + net_params = {'interface': dhcp_opts.get('interface'), + 'ip': dhcp_opts.get('fixed-address'), + 'prefix_or_mask': dhcp_opts.get('subnet-mask'), +@@ -301,21 +300,44 @@ class DataSourceEc2(sources.DataSource): + return None + + result = None +- net_md = self.metadata.get('network') ++ no_network_metadata_on_aws = bool( ++ 'network' not in self.metadata and ++ self.cloud_platform == Platforms.AWS) ++ if no_network_metadata_on_aws: ++ LOG.debug("Metadata 'network' not present:" ++ " Refreshing stale metadata from prior to upgrade.") ++ util.log_time( ++ logfunc=LOG.debug, msg='Re-crawl of metadata service', ++ func=self._crawl_metadata) ++ + # Limit network configuration to only the primary/fallback nic +- macs_to_nics = { +- net.get_interface_mac(self.fallback_nic): self.fallback_nic} ++ iface = self.fallback_interface ++ macs_to_nics = {net.get_interface_mac(iface): iface} ++ net_md = self.metadata.get('network') + if isinstance(net_md, dict): + result = convert_ec2_metadata_network_config( +- net_md, macs_to_nics=macs_to_nics, +- fallback_nic=self.fallback_nic) ++ net_md, macs_to_nics=macs_to_nics, fallback_nic=iface) + else: +- LOG.warning("unexpected metadata 'network' key not valid: %s", +- net_md) ++ LOG.warning("Metadata 'network' key not valid: %s.", net_md) + self._network_config = result + + return self._network_config + ++ @property ++ def fallback_interface(self): ++ if self._fallback_interface is None: ++ # fallback_nic was used at one point, so restored objects may ++ # have an attribute there. respect that if found. ++ _legacy_fbnic = getattr(self, 'fallback_nic', None) ++ if _legacy_fbnic: ++ self._fallback_interface = _legacy_fbnic ++ self.fallback_nic = None ++ else: ++ self._fallback_interface = net.find_fallback_nic() ++ if self._fallback_interface is None: ++ LOG.warning("Did not find a fallback interface on EC2.") ++ return self._fallback_interface ++ + def _crawl_metadata(self): + """Crawl metadata service when available. + +diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py +index 6af699a..ba328ee 100644 +--- a/tests/unittests/test_datasource/test_ec2.py ++++ b/tests/unittests/test_datasource/test_ec2.py +@@ -307,6 +307,39 @@ class TestEc2(test_helpers.HttprettyTestCase): + + @httpretty.activate + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') ++ def test_network_config_cached_property_refreshed_on_upgrade(self, m_dhcp): ++ """Refresh the network_config Ec2 cache if network key is absent. ++ ++ This catches an upgrade issue where obj.pkl contained stale metadata ++ which lacked newly required network key. ++ """ ++ old_metadata = copy.deepcopy(DEFAULT_METADATA) ++ old_metadata.pop('network') ++ ds = self._setup_ds( ++ platform_data=self.valid_platform_data, ++ sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, ++ md=old_metadata) ++ self.assertTrue(ds.get_data()) ++ # Provide new revision of metadata that contains network data ++ register_mock_metaserver( ++ 'http://169.254.169.254/2009-04-04/meta-data/', DEFAULT_METADATA) ++ mac1 = '06:17:04:d7:26:09' # Defined in DEFAULT_METADATA ++ get_interface_mac_path = ( ++ 'cloudinit.sources.DataSourceEc2.net.get_interface_mac') ++ ds.fallback_nic = 'eth9' ++ with mock.patch(get_interface_mac_path) as m_get_interface_mac: ++ m_get_interface_mac.return_value = mac1 ++ ds.network_config # Will re-crawl network metadata ++ self.assertIn('Re-crawl of metadata service', self.logs.getvalue()) ++ expected = {'version': 1, 'config': [ ++ {'mac_address': '06:17:04:d7:26:09', ++ 'name': 'eth9', ++ 'subnets': [{'type': 'dhcp4'}, {'type': 'dhcp6'}], ++ 'type': 'physical'}]} ++ self.assertEqual(expected, ds.network_config) ++ ++ @httpretty.activate ++ @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + def test_valid_platform_with_strict_true(self, m_dhcp): + """Valid platform data should return true with strict_id true.""" + ds = self._setup_ds( +-- +1.7.12.4 + diff --git a/Fix-ssh-keys-validation-in-ssh_util.patch b/Fix-ssh-keys-validation-in-ssh_util.patch new file mode 100644 index 0000000..f16c053 --- /dev/null +++ b/Fix-ssh-keys-validation-in-ssh_util.patch @@ -0,0 +1,89 @@ +From 45289a00bf8c043c5783c527c4ea720e67e0524b Mon Sep 17 00:00:00 2001 +From: Tatiana Kholkina +Date: Thu, 1 Feb 2018 18:08:15 +0300 +Subject: [PATCH 092/354] Fix ssh keys validation in ssh_util + +This fixes a bug where invalid keys would sneak into authorized_keys. +--- + cloudinit/ssh_util.py | 5 +---- + tests/unittests/test_sshutil.py | 42 +++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 43 insertions(+), 4 deletions(-) + +diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py +index b95b956..882517f 100644 +--- a/cloudinit/ssh_util.py ++++ b/cloudinit/ssh_util.py +@@ -171,16 +171,13 @@ def parse_authorized_keys(fname): + + + def update_authorized_keys(old_entries, keys): +- to_add = list(keys) +- ++ to_add = list([k for k in keys if k.valid()]) + for i in range(0, len(old_entries)): + ent = old_entries[i] + if not ent.valid(): + continue + # Replace those with the same base64 + for k in keys: +- if not ent.valid(): +- continue + if k.base64 == ent.base64: + # Replace it with our better one + ent = k +diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py +index 2a8e6ab..4c62c8b 100644 +--- a/tests/unittests/test_sshutil.py ++++ b/tests/unittests/test_sshutil.py +@@ -126,6 +126,48 @@ class TestAuthKeyLineParser(test_helpers.TestCase): + self.assertFalse(key.valid()) + + ++class TestUpdateAuthorizedKeys(test_helpers.TestCase): ++ ++ def test_new_keys_replace(self): ++ """new entries with the same base64 should replace old.""" ++ orig_entries = [ ++ ' '.join(('rsa', VALID_CONTENT['rsa'], 'orig_comment1')), ++ ' '.join(('dsa', VALID_CONTENT['dsa'], 'orig_comment2'))] ++ ++ new_entries = [ ++ ' '.join(('rsa', VALID_CONTENT['rsa'], 'new_comment1')), ] ++ ++ expected = '\n'.join([new_entries[0], orig_entries[1]]) + '\n' ++ ++ parser = ssh_util.AuthKeyLineParser() ++ found = ssh_util.update_authorized_keys( ++ [parser.parse(p) for p in orig_entries], ++ [parser.parse(p) for p in new_entries]) ++ ++ self.assertEqual(expected, found) ++ ++ def test_new_invalid_keys_are_ignored(self): ++ """new entries that are invalid should be skipped.""" ++ orig_entries = [ ++ ' '.join(('rsa', VALID_CONTENT['rsa'], 'orig_comment1')), ++ ' '.join(('dsa', VALID_CONTENT['dsa'], 'orig_comment2'))] ++ ++ new_entries = [ ++ ' '.join(('rsa', VALID_CONTENT['rsa'], 'new_comment1')), ++ 'xxx-invalid-thing1', ++ 'xxx-invalid-blob2' ++ ] ++ ++ expected = '\n'.join([new_entries[0], orig_entries[1]]) + '\n' ++ ++ parser = ssh_util.AuthKeyLineParser() ++ found = ssh_util.update_authorized_keys( ++ [parser.parse(p) for p in orig_entries], ++ [parser.parse(p) for p in new_entries]) ++ ++ self.assertEqual(expected, found) ++ ++ + class TestParseSSHConfig(test_helpers.TestCase): + + def setUp(self): +-- +1.7.12.4 + diff --git a/add-variable-to-forbid-tmp-dir.patch b/add-variable-to-forbid-tmp-dir.patch new file mode 100644 index 0000000..5990566 --- /dev/null +++ b/add-variable-to-forbid-tmp-dir.patch @@ -0,0 +1,58 @@ +From 224da46b2331b582577b86c3eb707f67d57800fb Mon Sep 17 00:00:00 2001 +From: chengquan +Date: Thu, 8 Aug 2019 16:15:31 +0800 +Subject: [PATCH] cloud-init: add variable to forbid tmp dir + +reason: add variable to forbid temporary directory + +Signed-off-by: chengquan +--- + cloud-init-17.1/setup.py | 14 +++++++++++--- + 1 file changed, 11 insertions(+), 3 deletions(-) + +diff -Nur cloud-init-17.1/setup.py cloud-init-17.1_bak/setup.py +--- cloud-init-17.1/setup.py 2019-04-12 19:00:20.782000000 +0800 ++++ cloud-init-17.1_bak/setup.py 2019-04-12 19:48:04.246000000 +0800 +@@ -86,6 +86,8 @@ + (deps, _e) = tiny_p(cmd) + return str(deps).splitlines() + ++# add variable to forbid tmp dir ++num = 0 + + def render_tmpl(template): + """render template into a tmpdir under same dir as setup.py +@@ -107,7 +109,10 @@ + return template + + topdir = os.path.dirname(sys.argv[0]) +- tmpd = tempfile.mkdtemp(dir=topdir) ++ global num ++ os.mkdir(topdir + str(num)) ++ tmpd = os.path.abspath(topdir + str(num)) ++ num = num + 1 + atexit.register(shutil.rmtree, tmpd) + bname = os.path.basename(template).rstrip(tmpl_ext) + fpath = os.path.join(tmpd, bname) +@@ -115,6 +120,9 @@ + # return path relative to setup.py + return os.path.join(os.path.basename(tmpd), bname) + ++def sort_files(file_list): ++ file_list.sort() ++ return file_list + + INITSYS_FILES = { + 'sysvinit': [f for f in glob('sysvinit/redhat/*') if is_f(f)], +@@ -123,9 +131,9 @@ + 'sysvinit_openrc': [f for f in glob('sysvinit/gentoo/*') if is_f(f)], + 'sysvinit_suse': [f for f in glob('sysvinit/suse/*') if is_f(f)], + 'systemd': [render_tmpl(f) +- for f in (glob('systemd/*.tmpl') + ++ for f in sort_files((glob('systemd/*.tmpl') + + glob('systemd/*.service') + +- glob('systemd/*.target')) if is_f(f)], ++ glob('systemd/*.target'))) if is_f(f)], + 'systemd.generators': [f for f in glob('systemd/*-generator') if is_f(f)], + 'upstart': [f for f in glob('upstart/*') if is_f(f)], + } diff --git a/bugfix-cloud-init-add-euleros-os.patch b/bugfix-cloud-init-add-euleros-os.patch new file mode 100644 index 0000000..96e345a --- /dev/null +++ b/bugfix-cloud-init-add-euleros-os.patch @@ -0,0 +1,227 @@ +From 098429a75ea00df1d8a5670e45df6babfc37f327 Mon Sep 17 00:00:00 2001 +From: chengquan +Date: Thu, 8 Aug 2019 16:11:58 +0800 +Subject: [PATCH] cloud-init: cloud-init add euleros os + +reason: add euleros into distros + +Signed-off-by: chengquan +--- + .../0001-cloud-init-Update-patch-information.patch | 68 ++++++++++++++++++++++ + cloud-init-17.1/cloudinit/config/cc_ntp.py | 2 +- + cloud-init-17.1/cloudinit/config/cc_resolv_conf.py | 2 +- + .../cloudinit/config/cc_rh_subscription.py | 2 +- + cloud-init-17.1/cloudinit/config/cc_spacewalk.py | 2 +- + .../cloudinit/config/cc_yum_add_repo.py | 2 +- + cloud-init-17.1/cloudinit/distros/__init__.py | 2 +- + cloud-init-17.1/cloudinit/distros/euleros.py | 12 ++++ + cloud-init-17.1/cloudinit/util.py | 2 +- + cloud-init-17.1/config/cloud.cfg.tmpl | 8 +-- + cloud-init-17.1/systemd/cloud-init.service.tmpl | 2 +- + cloud-init-17.1/tests/cloud_tests/util.py | 2 +- + .../unittests/test_handler/test_handler_ntp.py | 2 +- + cloud-init-17.1/tools/render-cloudcfg | 2 +- + 14 files changed, 95 insertions(+), 15 deletions(-) + create mode 100644 cloud-init-17.1/cloudinit/distros/euleros.py + +diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py +index d43d060..4f14c10 100644 +--- a/cloudinit/config/cc_ntp.py ++++ b/cloudinit/config/cc_ntp.py +@@ -23,7 +23,7 @@ frequency = PER_INSTANCE + NTP_CONF = '/etc/ntp.conf' + TIMESYNCD_CONF = '/etc/systemd/timesyncd.conf.d/cloud-init.conf' + NR_POOL_SERVERS = 4 +-distros = ['centos', 'debian', 'fedora', 'opensuse', 'ubuntu'] ++distros = ['centos', 'debian', 'fedora', 'opensuse', 'ubuntu', 'euleros'] + + + # The schema definition for each cloud-config module is a strict contract for +diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py +index 9812562..973fe2e 100644 +--- a/cloudinit/config/cc_resolv_conf.py ++++ b/cloudinit/config/cc_resolv_conf.py +@@ -55,7 +55,7 @@ LOG = logging.getLogger(__name__) + + frequency = PER_INSTANCE + +-distros = ['fedora', 'opensuse', 'rhel', 'sles'] ++distros = ['fedora', 'opensuse', 'rhel', 'sles', 'euleros'] + + + def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"): +diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py +index 7f36cf8..23f3a5a 100644 +--- a/cloudinit/config/cc_rh_subscription.py ++++ b/cloudinit/config/cc_rh_subscription.py +@@ -40,7 +40,7 @@ Subscription`` example config. + + from cloudinit import util + +-distros = ['fedora', 'rhel'] ++distros = ['fedora', 'rhel', 'euleros'] + + + def handle(name, cfg, _cloud, log, _args): +diff --git a/cloudinit/config/cc_spacewalk.py b/cloudinit/config/cc_spacewalk.py +index 1020e94..63e9d3b 100644 +--- a/cloudinit/config/cc_spacewalk.py ++++ b/cloudinit/config/cc_spacewalk.py +@@ -30,7 +30,7 @@ For more information about spacewalk see: https://fedorahosted.org/spacewalk/ + from cloudinit import util + + +-distros = ['redhat', 'fedora'] ++distros = ['redhat', 'fedora', 'euleros'] + required_packages = ['rhn-setup'] + def_ca_cert_path = "/usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT" + +diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py +index 6a42f49..9b2d1bd 100644 +--- a/cloudinit/config/cc_yum_add_repo.py ++++ b/cloudinit/config/cc_yum_add_repo.py +@@ -40,7 +40,7 @@ import six + + from cloudinit import util + +-distros = ['fedora', 'rhel'] ++distros = ['fedora', 'rhel', 'euleros'] + + + def _canonicalize_id(repo_id): +diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py +index d5becd1..f6eb899 100755 +--- a/cloudinit/distros/__init__.py ++++ b/cloudinit/distros/__init__.py +@@ -36,7 +36,7 @@ ALL_DISTROS = 'all' + + OSFAMILIES = { + 'debian': ['debian', 'ubuntu'], +- 'redhat': ['centos', 'fedora', 'rhel'], ++ 'redhat': ['centos', 'fedora', 'rhel', 'euleros'], + 'gentoo': ['gentoo'], + 'freebsd': ['freebsd'], + 'suse': ['opensuse', 'sles'], +diff --git a/cloudinit/util.py b/cloudinit/util.py +index e1290aa..d85daf0 100644 +--- a/cloudinit/util.py ++++ b/cloudinit/util.py +@@ -592,7 +592,7 @@ def system_info(): + var = 'unknown' + if system == "linux": + linux_dist = info['dist'][0].lower() +- if linux_dist in ('centos', 'fedora', 'debian'): ++ if linux_dist in ('centos', 'fedora', 'debian', 'euleros'): + var = linux_dist + elif linux_dist in ('ubuntu', 'linuxmint', 'mint'): + var = 'ubuntu' +diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl +index 50e3bd8..e3816f2 100644 +--- a/config/cloud.cfg.tmpl ++++ b/config/cloud.cfg.tmpl +@@ -19,7 +19,7 @@ disable_root: false + disable_root: true + {% endif %} + +-{% if variant in ["centos", "fedora", "rhel"] %} ++{% if variant in ["centos", "fedora", "rhel", "euleros"] %} + mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2'] + resize_rootfs_tmp: /dev + ssh_deletekeys: 0 +@@ -75,7 +75,7 @@ cloud_config_modules: + - ssh-import-id + - locale + - set-passwords +-{% if variant in ["rhel", "fedora"] %} ++{% if variant in ["rhel", "fedora", "euleros"] %} + - spacewalk + - yum-add-repo + {% endif %} +@@ -127,7 +127,7 @@ cloud_final_modules: + # (not accessible to handlers/transforms) + system_info: + # This will affect which distro class gets used +-{% if variant in ["centos", "debian", "fedora", "rhel", "suse", "ubuntu", "freebsd"] %} ++{% if variant in ["centos", "debian", "fedora", "rhel", "suse", "ubuntu", "freebsd", "euleros"] %} + distro: {{ variant }} + {% else %} + # Unknown/fallback distro. +@@ -163,7 +163,7 @@ system_info: + primary: http://ports.ubuntu.com/ubuntu-ports + security: http://ports.ubuntu.com/ubuntu-ports + ssh_svcname: ssh +-{% elif variant in ["centos", "rhel", "fedora", "suse"] %} ++{% elif variant in ["centos", "rhel", "fedora", "suse", "euleros"] %} + # Default user name + that default users groups (if added/used) + default_user: + name: {{ variant }} +diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl +index b92e8ab..f59d4fd 100644 +--- a/systemd/cloud-init.service.tmpl ++++ b/systemd/cloud-init.service.tmpl +@@ -10,7 +10,7 @@ After=systemd-networkd-wait-online.service + {% if variant in ["ubuntu", "unknown", "debian"] %} + After=networking.service + {% endif %} +-{% if variant in ["centos", "fedora", "redhat"] %} ++{% if variant in ["centos", "fedora", "redhat", "euleros"] %} + After=network.service + {% endif %} + {% if variant in ["suse"] %} +diff --git a/tests/cloud_tests/util.py b/tests/cloud_tests/util.py +index 4357fbb..7d3034d 100644 +--- a/tests/cloud_tests/util.py ++++ b/tests/cloud_tests/util.py +@@ -18,7 +18,7 @@ from tests.cloud_tests import LOG + + OS_FAMILY_MAPPING = { + 'debian': ['debian', 'ubuntu'], +- 'redhat': ['centos', 'rhel', 'fedora'], ++ 'redhat': ['centos', 'rhel', 'fedora', 'euleros'], + 'gentoo': ['gentoo'], + 'freebsd': ['freebsd'], + 'suse': ['sles'], +diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py +index 3abe578..78548cc 100644 +--- a/tests/unittests/test_handler/test_handler_ntp.py ++++ b/tests/unittests/test_handler/test_handler_ntp.py +@@ -258,7 +258,7 @@ class TestNtp(FilesystemMockingTestCase): + } + } + ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist +- for distro in ('debian', 'ubuntu', 'fedora', 'rhel', 'sles'): ++ for distro in ('debian', 'ubuntu', 'fedora', 'rhel', 'sles', 'euleros'): + mycloud = self._get_cloud(distro) + root_dir = dirname(dirname(os.path.realpath(util.__file__))) + tmpl_file = os.path.join( +diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg +index 91d074b..7a8a2c4 100755 +--- a/tools/render-cloudcfg ++++ b/tools/render-cloudcfg +@@ -4,7 +4,7 @@ import argparse + import os + import sys + +-VARIANTS = ["bsd", "centos", "fedora", "rhel", "suse", "ubuntu", "unknown"] ++VARIANTS = ["bsd", "centos", "fedora", "rhel", "suse", "ubuntu", "unknown", "euleros"] + + if "avoid-pep8-E402-import-not-top-of-file": + _tdir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +diff --git a/cloudinit/distros/euleros.py b/cloudinit/distros/euleros.py +new file mode 100644 +index 0000000..5ac4700 +--- /dev/null ++++ b/cloudinit/distros/euleros.py +@@ -0,0 +1,12 @@ ++# Copyright (c) Huawei Technologies Co., Ltd. 2019-2019. All rights reserved. ++# This file is part of cloud-init. See LICENSE file for license information. ++ ++from cloudinit.distros import rhel ++from cloudinit import log as logging ++ ++LOG = logging.getLogger(__name__) ++ ++class Distro(rhel.Distro): ++ pass ++ ++# vi: ts=4 expandtab diff --git a/bugfix-sort-requirements.patch b/bugfix-sort-requirements.patch new file mode 100644 index 0000000..a0a46b2 --- /dev/null +++ b/bugfix-sort-requirements.patch @@ -0,0 +1,24 @@ +From ef3f0c73fd940accf732f94cace4d53fc1604142 Mon Sep 17 00:00:00 2001 +From: chengquan +Date: Thu, 8 Aug 2019 16:14:06 +0800 +Subject: [PATCH] sort requirements + +reason: sort requirements in setup + +Signed-off-by: chengquan +--- + cloud-init-17.1/setup.py | 1 + + 1 file changed, 1 insertion(+) + +diff -Nur cloud-init-17.1_bak/setup.py cloud-init-17.1/setup.py +--- cloud-init-17.1_bak/setup.py 2019-04-11 20:27:41.526622810 +0800 ++++ cloud-init-17.1/setup.py 2019-04-11 20:28:21.734622815 +0800 +@@ -232,6 +232,7 @@ + } + + requirements = read_requires() ++requirements.sort() + + setuptools.setup( + name='cloud-init', + diff --git a/cloud-init-17.1-disable-lxd-tests.patch b/cloud-init-17.1-disable-lxd-tests.patch new file mode 100644 index 0000000..956265a --- /dev/null +++ b/cloud-init-17.1-disable-lxd-tests.patch @@ -0,0 +1,17 @@ +Index: cloud-init-17.1/tests/cloud_tests/platforms/__init__.py +=================================================================== +--- cloud-init-17.1.orig/tests/cloud_tests/platforms/__init__.py ++++ cloud-init-17.1/tests/cloud_tests/platforms/__init__.py +@@ -2,12 +2,10 @@ + + """Main init.""" + +-from tests.cloud_tests.platforms import lxd + from tests.cloud_tests.platforms import nocloudkvm + + PLATFORMS = { + 'nocloud-kvm': nocloudkvm.NoCloudKVMPlatform, +- 'lxd': lxd.LXDPlatform, + } + + diff --git a/cloud-init-17.1-fix-local-ipv4-only.patch b/cloud-init-17.1-fix-local-ipv4-only.patch new file mode 100644 index 0000000..c60be12 --- /dev/null +++ b/cloud-init-17.1-fix-local-ipv4-only.patch @@ -0,0 +1,298 @@ +From a16fb4e1e1379db61a1ee40513f2ad10c9b38ef9 Mon Sep 17 00:00:00 2001 +From: Chad Smith +Date: Tue, 31 Oct 2017 12:42:15 -0600 +Subject: [PATCH 4/4] EC2: Limit network config to fallback nic, fix local-ipv4 + only instances. + +VPC instances have the option to specific local only IPv4 addresses. Allow +Ec2Datasource to enable dhcp4 on instances even if local-ipv4s is +configured on an instance. + +Also limit network_configuration to only the primary (fallback) nic. + +LP: #1728152 +(cherry picked from commit eb292c18c3d83b9f7e5d1fd81b0e8aefaab0cc2d) +--- + cloudinit/sources/DataSourceEc2.py | 24 ++++- + tests/unittests/test_datasource/test_ec2.py | 136 ++++++++++++++++++++++++++-- + 2 files changed, 149 insertions(+), 11 deletions(-) + +diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py +index 41367a8b..0ef22174 100644 +--- a/cloudinit/sources/DataSourceEc2.py ++++ b/cloudinit/sources/DataSourceEc2.py +@@ -64,6 +64,9 @@ class DataSourceEc2(sources.DataSource): + # Whether we want to get network configuration from the metadata service. + get_network_metadata = False + ++ # Track the discovered fallback nic for use in configuration generation. ++ fallback_nic = None ++ + def __init__(self, sys_cfg, distro, paths): + sources.DataSource.__init__(self, sys_cfg, distro, paths) + self.metadata_address = None +@@ -89,16 +92,18 @@ class DataSourceEc2(sources.DataSource): + elif self.cloud_platform == Platforms.NO_EC2_METADATA: + return False + ++ self.fallback_nic = net.find_fallback_nic() + if self.get_network_metadata: # Setup networking in init-local stage. + if util.is_FreeBSD(): + LOG.debug("FreeBSD doesn't support running dhclient with -sf") + return False +- dhcp_leases = dhcp.maybe_perform_dhcp_discovery() ++ dhcp_leases = dhcp.maybe_perform_dhcp_discovery(self.fallback_nic) + if not dhcp_leases: + # DataSourceEc2Local failed in init-local stage. DataSourceEc2 + # will still run in init-network stage. + return False + dhcp_opts = dhcp_leases[-1] ++ self.fallback_nic = dhcp_opts.get('interface') + net_params = {'interface': dhcp_opts.get('interface'), + 'ip': dhcp_opts.get('fixed-address'), + 'prefix_or_mask': dhcp_opts.get('subnet-mask'), +@@ -297,8 +302,13 @@ class DataSourceEc2(sources.DataSource): + + result = None + net_md = self.metadata.get('network') ++ # Limit network configuration to only the primary/fallback nic ++ macs_to_nics = { ++ net.get_interface_mac(self.fallback_nic): self.fallback_nic} + if isinstance(net_md, dict): +- result = convert_ec2_metadata_network_config(net_md) ++ result = convert_ec2_metadata_network_config( ++ net_md, macs_to_nics=macs_to_nics, ++ fallback_nic=self.fallback_nic) + else: + LOG.warning("unexpected metadata 'network' key not valid: %s", + net_md) +@@ -458,15 +468,18 @@ def _collect_platform_data(): + return data + + +-def convert_ec2_metadata_network_config(network_md, macs_to_nics=None): ++def convert_ec2_metadata_network_config(network_md, macs_to_nics=None, ++ fallback_nic=None): + """Convert ec2 metadata to network config version 1 data dict. + + @param: network_md: 'network' portion of EC2 metadata. + generally formed as {"interfaces": {"macs": {}} where + 'macs' is a dictionary with mac address as key and contents like: + {"device-number": "0", "interface-id": "...", "local-ipv4s": ...} +- @param: macs_to_name: Optional dict mac addresses and the nic name. If ++ @param: macs_to_nics: Optional dict of mac addresses and nic names. If + not provided, get_interfaces_by_mac is called to get it from the OS. ++ @param: fallback_nic: Optionally provide the primary nic interface name. ++ This nic will be guaranteed to minimally have a dhcp4 configuration. + + @return A dict of network config version 1 based on the metadata and macs. + """ +@@ -480,7 +493,8 @@ def convert_ec2_metadata_network_config(network_md, macs_to_nics=None): + continue # Not a physical nic represented in metadata + nic_cfg = {'type': 'physical', 'name': nic_name, 'subnets': []} + nic_cfg['mac_address'] = mac +- if nic_metadata.get('public-ipv4s'): ++ if (nic_name == fallback_nic or nic_metadata.get('public-ipv4s') or ++ nic_metadata.get('local-ipv4s')): + nic_cfg['subnets'].append({'type': 'dhcp4'}) + if nic_metadata.get('ipv6s'): + nic_cfg['subnets'].append({'type': 'dhcp6'}) +diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py +index a7301dbf..6af699a6 100644 +--- a/tests/unittests/test_datasource/test_ec2.py ++++ b/tests/unittests/test_datasource/test_ec2.py +@@ -51,6 +51,29 @@ DEFAULT_METADATA = { + "vpc-ipv4-cidr-block": "172.31.0.0/16", + "vpc-ipv4-cidr-blocks": "172.31.0.0/16", + "vpc-ipv6-cidr-blocks": "2600:1f16:aeb:b200::/56" ++ }, ++ "06:17:04:d7:26:0A": { ++ "device-number": "1", # Only IPv4 local config ++ "interface-id": "eni-e44ef49f", ++ "ipv4-associations": {"": "172.3.3.16"}, ++ "ipv6s": "", # No IPv6 config ++ "local-hostname": ("ip-172-3-3-16.us-east-2." ++ "compute.internal"), ++ "local-ipv4s": "172.3.3.16", ++ "mac": "06:17:04:d7:26:0A", ++ "owner-id": "950047163771", ++ "public-hostname": ("ec2-172-3-3-16.us-east-2." ++ "compute.amazonaws.com"), ++ "public-ipv4s": "", # No public ipv4 config ++ "security-group-ids": "sg-5a61d333", ++ "security-groups": "wide-open", ++ "subnet-id": "subnet-20b8565b", ++ "subnet-ipv4-cidr-block": "172.31.16.0/20", ++ "subnet-ipv6-cidr-blocks": "", ++ "vpc-id": "vpc-87e72bee", ++ "vpc-ipv4-cidr-block": "172.31.0.0/16", ++ "vpc-ipv4-cidr-blocks": "172.31.0.0/16", ++ "vpc-ipv6-cidr-blocks": "" + } + } + } +@@ -209,12 +232,20 @@ class TestEc2(test_helpers.HttprettyTestCase): + + @httpretty.activate + def test_network_config_property_returns_version_1_network_data(self): +- """network_config property returns network version 1 for metadata.""" ++ """network_config property returns network version 1 for metadata. ++ ++ Only one device is configured even when multiple exist in metadata. ++ """ + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, + md=DEFAULT_METADATA) +- ds.get_data() ++ find_fallback_path = ( ++ 'cloudinit.sources.DataSourceEc2.net.find_fallback_nic') ++ with mock.patch(find_fallback_path) as m_find_fallback: ++ m_find_fallback.return_value = 'eth9' ++ ds.get_data() ++ + mac1 = '06:17:04:d7:26:09' # Defined in DEFAULT_METADATA + expected = {'version': 1, 'config': [ + {'mac_address': '06:17:04:d7:26:09', 'name': 'eth9', +@@ -222,9 +253,48 @@ class TestEc2(test_helpers.HttprettyTestCase): + 'type': 'physical'}]} + patch_path = ( + 'cloudinit.sources.DataSourceEc2.net.get_interfaces_by_mac') ++ get_interface_mac_path = ( ++ 'cloudinit.sources.DataSourceEc2.net.get_interface_mac') ++ with mock.patch(patch_path) as m_get_interfaces_by_mac: ++ with mock.patch(find_fallback_path) as m_find_fallback: ++ with mock.patch(get_interface_mac_path) as m_get_mac: ++ m_get_interfaces_by_mac.return_value = {mac1: 'eth9'} ++ m_find_fallback.return_value = 'eth9' ++ m_get_mac.return_value = mac1 ++ self.assertEqual(expected, ds.network_config) ++ ++ @httpretty.activate ++ def test_network_config_property_set_dhcp4_on_private_ipv4(self): ++ """network_config property configures dhcp4 on private ipv4 nics. ++ ++ Only one device is configured even when multiple exist in metadata. ++ """ ++ ds = self._setup_ds( ++ platform_data=self.valid_platform_data, ++ sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, ++ md=DEFAULT_METADATA) ++ find_fallback_path = ( ++ 'cloudinit.sources.DataSourceEc2.net.find_fallback_nic') ++ with mock.patch(find_fallback_path) as m_find_fallback: ++ m_find_fallback.return_value = 'eth9' ++ ds.get_data() ++ ++ mac1 = '06:17:04:d7:26:0A' # IPv4 only in DEFAULT_METADATA ++ expected = {'version': 1, 'config': [ ++ {'mac_address': '06:17:04:d7:26:0A', 'name': 'eth9', ++ 'subnets': [{'type': 'dhcp4'}], ++ 'type': 'physical'}]} ++ patch_path = ( ++ 'cloudinit.sources.DataSourceEc2.net.get_interfaces_by_mac') ++ get_interface_mac_path = ( ++ 'cloudinit.sources.DataSourceEc2.net.get_interface_mac') + with mock.patch(patch_path) as m_get_interfaces_by_mac: +- m_get_interfaces_by_mac.return_value = {mac1: 'eth9'} +- self.assertEqual(expected, ds.network_config) ++ with mock.patch(find_fallback_path) as m_find_fallback: ++ with mock.patch(get_interface_mac_path) as m_get_mac: ++ m_get_interfaces_by_mac.return_value = {mac1: 'eth9'} ++ m_find_fallback.return_value = 'eth9' ++ m_get_mac.return_value = mac1 ++ self.assertEqual(expected, ds.network_config) + + def test_network_config_property_is_cached_in_datasource(self): + """network_config property is cached in DataSourceEc2.""" +@@ -321,9 +391,11 @@ class TestEc2(test_helpers.HttprettyTestCase): + + @httpretty.activate + @mock.patch('cloudinit.net.EphemeralIPv4Network') ++ @mock.patch('cloudinit.net.find_fallback_nic') + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + @mock.patch('cloudinit.sources.DataSourceEc2.util.is_FreeBSD') +- def test_ec2_local_performs_dhcp_on_non_bsd(self, m_is_bsd, m_dhcp, m_net): ++ def test_ec2_local_performs_dhcp_on_non_bsd(self, m_is_bsd, m_dhcp, ++ m_fallback_nic, m_net): + """Ec2Local returns True for valid platform data on non-BSD with dhcp. + + DataSourceEc2Local will setup initial IPv4 network via dhcp discovery. +@@ -331,6 +403,7 @@ class TestEc2(test_helpers.HttprettyTestCase): + When the platform data is valid, return True. + """ + ++ m_fallback_nic.return_value = 'eth9' + m_is_bsd.return_value = False + m_dhcp.return_value = [{ + 'interface': 'eth9', 'fixed-address': '192.168.2.9', +@@ -344,7 +417,7 @@ class TestEc2(test_helpers.HttprettyTestCase): + + ret = ds.get_data() + self.assertTrue(ret) +- m_dhcp.assert_called_once_with() ++ m_dhcp.assert_called_once_with('eth9') + m_net.assert_called_once_with( + broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', + prefix_or_mask='255.255.255.0', router='192.168.2.1') +@@ -389,6 +462,57 @@ class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase): + ec2.convert_ec2_metadata_network_config( + network_metadata_ipv6, macs_to_nics)) + ++ def test_convert_ec2_metadata_network_config_handles_local_dhcp4(self): ++ """Config dhcp4 when there are no public addresses in public-ipv4s.""" ++ macs_to_nics = {self.mac1: 'eth9'} ++ network_metadata_ipv6 = copy.deepcopy(self.network_metadata) ++ nic1_metadata = ( ++ network_metadata_ipv6['interfaces']['macs'][self.mac1]) ++ nic1_metadata['local-ipv4s'] = '172.3.3.15' ++ nic1_metadata.pop('public-ipv4s') ++ expected = {'version': 1, 'config': [ ++ {'mac_address': self.mac1, 'type': 'physical', ++ 'name': 'eth9', 'subnets': [{'type': 'dhcp4'}]}]} ++ self.assertEqual( ++ expected, ++ ec2.convert_ec2_metadata_network_config( ++ network_metadata_ipv6, macs_to_nics)) ++ ++ def test_convert_ec2_metadata_network_config_handles_absent_dhcp4(self): ++ """Config dhcp4 on fallback_nic when there are no ipv4 addresses.""" ++ macs_to_nics = {self.mac1: 'eth9'} ++ network_metadata_ipv6 = copy.deepcopy(self.network_metadata) ++ nic1_metadata = ( ++ network_metadata_ipv6['interfaces']['macs'][self.mac1]) ++ nic1_metadata['public-ipv4s'] = '' ++ ++ # When no ipv4 or ipv6 content but fallback_nic set, set dhcp4 config. ++ expected = {'version': 1, 'config': [ ++ {'mac_address': self.mac1, 'type': 'physical', ++ 'name': 'eth9', 'subnets': [{'type': 'dhcp4'}]}]} ++ self.assertEqual( ++ expected, ++ ec2.convert_ec2_metadata_network_config( ++ network_metadata_ipv6, macs_to_nics, fallback_nic='eth9')) ++ ++ def test_convert_ec2_metadata_network_config_handles_local_v4_and_v6(self): ++ """When dhcp6 is public and dhcp4 is set to local enable both.""" ++ macs_to_nics = {self.mac1: 'eth9'} ++ network_metadata_both = copy.deepcopy(self.network_metadata) ++ nic1_metadata = ( ++ network_metadata_both['interfaces']['macs'][self.mac1]) ++ nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64' ++ nic1_metadata.pop('public-ipv4s') ++ nic1_metadata['local-ipv4s'] = '10.0.0.42' # Local ipv4 only on vpc ++ expected = {'version': 1, 'config': [ ++ {'mac_address': self.mac1, 'type': 'physical', ++ 'name': 'eth9', ++ 'subnets': [{'type': 'dhcp4'}, {'type': 'dhcp6'}]}]} ++ self.assertEqual( ++ expected, ++ ec2.convert_ec2_metadata_network_config( ++ network_metadata_both, macs_to_nics)) ++ + def test_convert_ec2_metadata_network_config_handles_dhcp4_and_dhcp6(self): + """Config both dhcp4 and dhcp6 when both vpc-ipv6 and ipv4 exists.""" + macs_to_nics = {self.mac1: 'eth9'} +-- +2.14.3 + diff --git a/cloud-init-17.1-nm-controlled.patch b/cloud-init-17.1-nm-controlled.patch new file mode 100644 index 0000000..668d63e --- /dev/null +++ b/cloud-init-17.1-nm-controlled.patch @@ -0,0 +1,291 @@ +Index: cloud-init-17.1/cloudinit/net/sysconfig.py +=================================================================== +--- cloud-init-17.1.orig/cloudinit/net/sysconfig.py ++++ cloud-init-17.1/cloudinit/net/sysconfig.py +@@ -230,7 +230,6 @@ class Renderer(renderer.Renderer): + iface_defaults = tuple([ + ('ONBOOT', True), + ('USERCTL', False), +- ('NM_CONTROLLED', False), + ('BOOTPROTO', 'none'), + ]) + +Index: cloud-init-17.1/tests/unittests/test_net.py +=================================================================== +--- cloud-init-17.1.orig/tests/unittests/test_net.py ++++ cloud-init-17.1/tests/unittests/test_net.py +@@ -146,7 +146,6 @@ GATEWAY=172.19.3.254 + HWADDR=fa:16:3e:ed:9a:59 + IPADDR=172.19.1.34 + NETMASK=255.255.252.0 +-NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -214,7 +213,6 @@ IPADDR=172.19.1.34 + IPADDR1=10.0.0.10 + NETMASK=255.255.252.0 + NETMASK1=255.255.255.0 +-NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -304,7 +302,6 @@ IPV6ADDR_SECONDARIES="2001:DB9::10/64 20 + IPV6INIT=yes + IPV6_DEFAULTGW=2001:DB8::1 + NETMASK=255.255.252.0 +-NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -428,7 +425,6 @@ NETWORK_CONFIGS = { + BOOTPROTO=none + DEVICE=eth1 + HWADDR=cf:d6:af:48:e8:80 +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no"""), +@@ -440,7 +436,6 @@ NETWORK_CONFIGS = { + HWADDR=c0:d6:9f:2c:e8:80 + IPADDR=192.168.21.3 + NETMASK=255.255.255.0 +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no"""), +@@ -552,7 +547,6 @@ NETWORK_CONFIGS = { + IPV6ADDR=2001:1::1/64 + IPV6INIT=yes + NETMASK=255.255.255.0 +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -794,14 +788,12 @@ pre-down route del -net 10.0.0.0 netmask + DHCPV6C=yes + IPV6INIT=yes + MACADDR=aa:bb:cc:dd:ee:ff +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Bond + USERCTL=no"""), + 'ifcfg-bond0.200': textwrap.dedent("""\ + BOOTPROTO=dhcp + DEVICE=bond0.200 +- NM_CONTROLLED=no + ONBOOT=yes + PHYSDEV=bond0 + TYPE=Ethernet +@@ -817,7 +809,6 @@ pre-down route del -net 10.0.0.0 netmask + IPV6INIT=yes + IPV6_DEFAULTGW=2001:4800:78ff:1b::1 + NETMASK=255.255.255.0 +- NM_CONTROLLED=no + ONBOOT=yes + PRIO=22 + STP=off +@@ -827,7 +818,6 @@ pre-down route del -net 10.0.0.0 netmask + BOOTPROTO=none + DEVICE=eth0 + HWADDR=c0:d6:9f:2c:e8:80 +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no"""), +@@ -841,7 +831,6 @@ pre-down route del -net 10.0.0.0 netmask + MTU=1500 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 +- NM_CONTROLLED=no + ONBOOT=yes + PHYSDEV=eth0 + TYPE=Ethernet +@@ -852,7 +841,6 @@ pre-down route del -net 10.0.0.0 netmask + DEVICE=eth1 + HWADDR=aa:d6:9f:2c:e8:80 + MASTER=bond0 +- NM_CONTROLLED=no + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet +@@ -862,7 +850,6 @@ pre-down route del -net 10.0.0.0 netmask + DEVICE=eth2 + HWADDR=c0:bb:9f:2c:e8:80 + MASTER=bond0 +- NM_CONTROLLED=no + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet +@@ -872,7 +859,6 @@ pre-down route del -net 10.0.0.0 netmask + BRIDGE=br0 + DEVICE=eth3 + HWADDR=66:bb:9f:2c:e8:80 +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no"""), +@@ -881,7 +867,6 @@ pre-down route del -net 10.0.0.0 netmask + BRIDGE=br0 + DEVICE=eth4 + HWADDR=98:bb:9f:2c:e8:80 +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no"""), +@@ -889,7 +874,6 @@ pre-down route del -net 10.0.0.0 netmask + BOOTPROTO=dhcp + DEVICE=eth5 + HWADDR=98:bb:9f:2c:e8:8a +- NM_CONTROLLED=no + ONBOOT=no + TYPE=Ethernet + USERCTL=no""") +@@ -1171,7 +1155,6 @@ pre-down route del -net 10.0.0.0 netmask + IPV6INIT=yes + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Bond + USERCTL=no +@@ -1181,7 +1164,6 @@ pre-down route del -net 10.0.0.0 netmask + DEVICE=bond0s0 + HWADDR=aa:bb:cc:dd:e8:00 + MASTER=bond0 +- NM_CONTROLLED=no + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet +@@ -1199,7 +1181,6 @@ pre-down route del -net 10.0.0.0 netmask + DEVICE=bond0s1 + HWADDR=aa:bb:cc:dd:e8:01 + MASTER=bond0 +- NM_CONTROLLED=no + ONBOOT=yes + SLAVE=yes + TYPE=Ethernet +@@ -1236,7 +1217,6 @@ pre-down route del -net 10.0.0.0 netmask + BOOTPROTO=none + DEVICE=en0 + HWADDR=aa:bb:cc:dd:e8:00 +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no"""), +@@ -1252,7 +1232,6 @@ pre-down route del -net 10.0.0.0 netmask + IPV6_DEFAULTGW=2001:1::1 + NETMASK=255.255.255.0 + NETMASK1=255.255.255.0 +- NM_CONTROLLED=no + ONBOOT=yes + PHYSDEV=en0 + TYPE=Ethernet +@@ -1293,7 +1272,6 @@ pre-down route del -net 10.0.0.0 netmask + DEVICE=br0 + IPADDR=192.168.2.2 + NETMASK=255.255.255.0 +- NM_CONTROLLED=no + ONBOOT=yes + PRIO=22 + STP=off +@@ -1307,7 +1285,6 @@ pre-down route del -net 10.0.0.0 netmask + HWADDR=52:54:00:12:34:00 + IPV6ADDR=2001:1::100/96 + IPV6INIT=yes +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -1319,7 +1296,6 @@ pre-down route del -net 10.0.0.0 netmask + HWADDR=52:54:00:12:34:01 + IPV6ADDR=2001:1::101/96 + IPV6INIT=yes +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -1393,7 +1369,6 @@ pre-down route del -net 10.0.0.0 netmask + HWADDR=52:54:00:12:34:00 + IPADDR=192.168.1.2 + NETMASK=255.255.255.0 +- NM_CONTROLLED=no + ONBOOT=no + TYPE=Ethernet + USERCTL=no +@@ -1403,7 +1378,6 @@ pre-down route del -net 10.0.0.0 netmask + DEVICE=eth1 + HWADDR=52:54:00:12:34:aa + MTU=1480 +- NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -1412,7 +1386,6 @@ pre-down route del -net 10.0.0.0 netmask + BOOTPROTO=none + DEVICE=eth2 + HWADDR=52:54:00:12:34:ff +- NM_CONTROLLED=no + ONBOOT=no + TYPE=Ethernet + USERCTL=no +@@ -1685,7 +1658,6 @@ class TestSysConfigRendering(CiTestCase) + BOOTPROTO=dhcp + DEVICE=eth1000 + HWADDR=07-1C-C6-75-A4-BE +-NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -1805,7 +1777,6 @@ GATEWAY=10.0.2.2 + HWADDR=52:54:00:12:34:00 + IPADDR=10.0.2.15 + NETMASK=255.255.255.0 +-NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -1826,7 +1797,6 @@ USERCTL=no + # + BOOTPROTO=dhcp + DEVICE=eth0 +-NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +Index: cloud-init-17.1/tests/unittests/test_distros/test_netconfig.py +=================================================================== +--- cloud-init-17.1.orig/tests/unittests/test_distros/test_netconfig.py ++++ cloud-init-17.1/tests/unittests/test_distros/test_netconfig.py +@@ -481,7 +481,6 @@ DEVICE=eth0 + GATEWAY=192.168.1.254 + IPADDR=192.168.1.5 + NETMASK=255.255.255.0 +-NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -498,7 +497,6 @@ USERCTL=no + # + BOOTPROTO=dhcp + DEVICE=eth1 +-NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -632,7 +630,6 @@ DEVICE=eth0 + IPV6ADDR=2607:f0d0:1002:0011::2/64 + IPV6INIT=yes + IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 +-NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no +@@ -647,7 +644,6 @@ USERCTL=no + # + BOOTPROTO=dhcp + DEVICE=eth1 +-NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no diff --git a/cloud-init-17.1-no-override-default-network.patch b/cloud-init-17.1-no-override-default-network.patch new file mode 100644 index 0000000..7c21c83 --- /dev/null +++ b/cloud-init-17.1-no-override-default-network.patch @@ -0,0 +1,200 @@ +diff -rup cloud-init-17.1.orig/cloudinit/net/sysconfig.py cloud-init-17.1/cloudinit/net/sysconfig.py +--- cloud-init-17.1.orig/cloudinit/net/sysconfig.py 2018-03-26 19:22:35.693111559 +0200 ++++ cloud-init-17.1/cloudinit/net/sysconfig.py 2018-03-26 23:47:41.424803588 +0200 +@@ -586,7 +586,17 @@ class Renderer(renderer.Renderer): + + # always write /etc/sysconfig/network configuration + sysconfig_path = util.target_path(target, "etc/sysconfig/network") +- netcfg = [_make_header(), 'NETWORKING=yes'] ++ # Make sure that existing lines, other than overriding ones, remain ++ netcfg = [] ++ for line in util.load_file(sysconfig_path, quiet=True).split('\n'): ++ if 'cloud-init' in line: ++ break ++ if not line.startswith(('NETWORKING=', ++ 'IPV6_AUTOCONF=', ++ 'NETWORKING_IPV6=')): ++ netcfg.append(line) ++ # Now generate the cloud-init portion of sysconfig/network ++ netcfg.extend([_make_header(), 'NETWORKING=yes']) + if network_state.use_ipv6: + netcfg.append('NETWORKING_IPV6=yes') + netcfg.append('IPV6_AUTOCONF=no') +diff -rup cloud-init-17.1.orig/tests/unittests/test_distros/test_netconfig.py cloud-init-17.1/tests/unittests/test_distros/test_netconfig.py +--- cloud-init-17.1.orig/tests/unittests/test_distros/test_netconfig.py 2018-03-26 19:22:35.717111557 +0200 ++++ cloud-init-17.1/tests/unittests/test_distros/test_netconfig.py 2018-03-26 22:08:25.008717651 +0200 +@@ -384,6 +384,82 @@ hn0: flags=8843= 0.8.14-2 +BuildRequires: python3-jinja2 python3-jsonpatch python3-jsonschema +BuildRequires: python3-mock python3-nose python3-oauthlib python3-prettytable +BuildRequires: python3-pyserial python3-PyYAML python3-requests python3-six +BuildRequires: python3-unittest2 dnf + +Requires: e2fsprogs iproute python3-libselinux net-tools python3-policycoreutils +Requires: procps python3-configobj python3-jinja2 python3-jsonpatch xfsprogs +Requires: python3-jsonschema python3-oauthlib python3-prettytable util-linux +Requires: python3-pyserial python3-pyyaml python3-requests python3-six shadow +%{?systemd_requires} + +%description +Cloud-init is the defacto multi-distribution package that handles early +initialization of a cloud instance. + +%package_help + +%prep +%autosetup -n %{name}-%{version} -p1 +sed -i -e 's|#!/usr/bin/env python|#!/usr/bin/env python3|' \ + -e 's|#!/usr/bin/python|#!/usr/bin/python3|' tools/* cloudinit/ssh_util.py + +%build +%py3_build + +%install +%py3_install -- --init-system=systemd +python3 tools/render-cloudcfg --variant euleros > %{buildroot}/%{_sysconfdir}/cloud/cloud.cfg +install -d %{buildroot}/var/lib/cloud +install -d %{buildroot}/run/cloud-init +install -D -m 0644 %{SOURCE1} %{buildroot}/%{_tmpfilesdir}/%{name}.conf +install -D -m 0644 tools/21-cloudinit.conf %{buildroot}/%{_sysconfdir}/rsyslog.d/21-cloudinit.conf + +%check +#remove test_handler_ntp.py +rm -f $RPM_BUILD_DIR/%{name}-%{version}/tests/unittests/test_handler/test_handler_ntp.py + +nosetests-%{python3_version} tests/unittests/ + +%preun +%systemd_preun cloud-config.service cloud-config.target cloud-final.service cloud-init.service cloud-init.target cloud-init-local.service + +%post +if [ $1 -eq 1 ] ; then + /bin/systemctl enable cloud-config.service >/dev/null 2>&1 || : + /bin/systemctl enable cloud-final.service >/dev/null 2>&1 || : + /bin/systemctl enable cloud-init.service >/dev/null 2>&1 || : + /bin/systemctl enable cloud-init-local.service >/dev/null 2>&1 || : +elif [ $1 -eq 2 ]; then + /bin/systemctl is-enabled cloud-config.service >/dev/null 2>&1 && + /bin/systemctl reenable cloud-config.service >/dev/null 2>&1 || : + /bin/systemctl is-enabled cloud-final.service >/dev/null 2>&1 && + /bin/systemctl reenable cloud-final.service >/dev/null 2>&1 || : + /bin/systemctl is-enabled cloud-init.service >/dev/null 2>&1 && + /bin/systemctl reenable cloud-init.service >/dev/null 2>&1 || : + /bin/systemctl is-enabled cloud-init-local.service >/dev/null 2>&1 && + /bin/systemctl reenable cloud-init-local.service >/dev/null 2>&1 || : +fi + +%postun +%systemd_postun + +%files +%doc ChangeLog +%license LICENSE LICENSE-Apache2.0 LICENSE-GPLv3 +%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg +%dir %{_sysconfdir}/cloud/cloud.cfg.d +%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg.d/*.cfg +%doc %{_sysconfdir}/cloud/cloud.cfg.d/README +%dir %{_sysconfdir}/rsyslog.d +%config(noreplace) %{_sysconfdir}/rsyslog.d/21-cloudinit.conf +%{_sysconfdir}/NetworkManager/dispatcher.d/hook-network-manager +%{_sysconfdir}/dhcp/dhclient-exit-hooks.d/hook-dhclient +/lib/udev/rules.d/66-azure-ephemeral.rules +%{_unitdir}/cloud-config.service +%{_unitdir}/cloud-final.service +%{_unitdir}/cloud-init.service +%{_unitdir}/cloud-init-local.service +%{_unitdir}/cloud-config.target +%{_unitdir}/cloud-init.target +/usr/lib/systemd/system-generators/cloud-init-generator +%{_tmpfilesdir}/%{name}.conf +%{_libexecdir}/%{name} +%{_bindir}/cloud-init* +%{python3_sitelib}/* +%dir /run/cloud-init +%dir /var/lib/cloud + +%files help +%doc doc/* +%dir %{_sysconfdir}/cloud/templates +%config(noreplace) %{_sysconfdir}/cloud/templates/* +%exclude /usr/share/doc/* + +%changelog +* Tue Sep 17 2019 openEuler Buildteam - 17.1-8 +- Package init diff --git a/hosts-Fix-openSUSE-and-SLES-setup-for-etc-hosts-and-.patch b/hosts-Fix-openSUSE-and-SLES-setup-for-etc-hosts-and-.patch new file mode 100644 index 0000000..d9eb27d --- /dev/null +++ b/hosts-Fix-openSUSE-and-SLES-setup-for-etc-hosts-and-.patch @@ -0,0 +1,172 @@ +From 22a14a6a6d45ae55d2c2307d7b097eef9863bb0c Mon Sep 17 00:00:00 2001 +From: Robert Schweikert +Date: Wed, 8 Nov 2017 15:45:53 -0500 +Subject: [PATCH 035/354] hosts: Fix openSUSE and SLES setup for /etc/hosts + and clarify docs. + +The etc/hosts file is was not properly setup for openSUSE or SLES +when manage_etc_hosts is set in the config file. + +Improve the doc to address the fact that the 'localhost' ip is +distribution dependent (not always 127.0.0.1). + +LP: #1731022 +--- + cloudinit/config/cc_update_etc_hosts.py | 4 +- + templates/hosts.opensuse.tmpl | 26 -------- + templates/hosts.suse.tmpl | 10 +++- + .../test_handler/test_handler_etc_hosts.py | 69 ++++++++++++++++++++++ + 4 files changed, 79 insertions(+), 30 deletions(-) + delete mode 100644 templates/hosts.opensuse.tmpl + create mode 100644 tests/unittests/test_handler/test_handler_etc_hosts.py + +diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py +index b394784..c96eede 100644 +--- a/cloudinit/config/cc_update_etc_hosts.py ++++ b/cloudinit/config/cc_update_etc_hosts.py +@@ -23,8 +23,8 @@ using the template located in ``/etc/cloud/templates/hosts.tmpl``. In the + + If ``manage_etc_hosts`` is set to ``localhost``, then cloud-init will not + rewrite ``/etc/hosts`` entirely, but rather will ensure that a entry for the +-fqdn with ip ``127.0.1.1`` is present in ``/etc/hosts`` (i.e. +-``ping `` will ping ``127.0.1.1``). ++fqdn with a distribution dependent ip is present in ``/etc/hosts`` (i.e. ++``ping `` will ping ``127.0.0.1`` or ``127.0.1.1`` or other ip). + + .. note:: + if ``manage_etc_hosts`` is set ``true`` or ``template``, the contents +diff --git a/templates/hosts.opensuse.tmpl b/templates/hosts.opensuse.tmpl +deleted file mode 100644 +index 655da3f..0000000 +--- a/templates/hosts.opensuse.tmpl ++++ /dev/null +@@ -1,26 +0,0 @@ +-* +- This file /etc/cloud/templates/hosts.opensuse.tmpl is only utilized +- if enabled in cloud-config. Specifically, in order to enable it +- you need to add the following to config: +- manage_etc_hosts: True +-*# +-# Your system has configured 'manage_etc_hosts' as True. +-# As a result, if you wish for changes to this file to persist +-# then you will need to either +-# a.) make changes to the master file in +-# /etc/cloud/templates/hosts.opensuse.tmpl +-# b.) change or remove the value of 'manage_etc_hosts' in +-# /etc/cloud/cloud.cfg or cloud-config from user-data +-# +-# The following lines are desirable for IPv4 capable hosts +-127.0.0.1 localhost +- +-# The following lines are desirable for IPv6 capable hosts +-::1 localhost ipv6-localhost ipv6-loopback +-fe00::0 ipv6-localnet +- +-ff00::0 ipv6-mcastprefix +-ff02::1 ipv6-allnodes +-ff02::2 ipv6-allrouters +-ff02::3 ipv6-allhosts +- +diff --git a/templates/hosts.suse.tmpl b/templates/hosts.suse.tmpl +index b608269..8e664db 100644 +--- a/templates/hosts.suse.tmpl ++++ b/templates/hosts.suse.tmpl +@@ -13,12 +13,18 @@ you need to add the following to config: + # /etc/cloud/cloud.cfg or cloud-config from user-data + # + # The following lines are desirable for IPv4 capable hosts +-127.0.0.1 localhost ++127.0.0.1 {{fqdn}} {{hostname}} ++127.0.0.1 localhost.localdomain localhost ++127.0.0.1 localhost4.localdomain4 localhost4 + + # The following lines are desirable for IPv6 capable hosts ++::1 {{fqdn}} {{hostname}} ++::1 localhost.localdomain localhost ++::1 localhost6.localdomain6 localhost6 + ::1 localhost ipv6-localhost ipv6-loopback +-fe00::0 ipv6-localnet + ++ ++fe00::0 ipv6-localnet + ff00::0 ipv6-mcastprefix + ff02::1 ipv6-allnodes + ff02::2 ipv6-allrouters +diff --git a/tests/unittests/test_handler/test_handler_etc_hosts.py b/tests/unittests/test_handler/test_handler_etc_hosts.py +new file mode 100644 +index 0000000..ced05a8 +--- /dev/null ++++ b/tests/unittests/test_handler/test_handler_etc_hosts.py +@@ -0,0 +1,69 @@ ++# This file is part of cloud-init. See LICENSE file for license information. ++ ++from cloudinit.config import cc_update_etc_hosts ++ ++from cloudinit import cloud ++from cloudinit import distros ++from cloudinit import helpers ++from cloudinit import util ++ ++from cloudinit.tests import helpers as t_help ++ ++import logging ++import os ++import shutil ++ ++LOG = logging.getLogger(__name__) ++ ++ ++class TestHostsFile(t_help.FilesystemMockingTestCase): ++ def setUp(self): ++ super(TestHostsFile, self).setUp() ++ self.tmp = self.tmp_dir() ++ ++ def _fetch_distro(self, kind): ++ cls = distros.fetch(kind) ++ paths = helpers.Paths({}) ++ return cls(kind, {}, paths) ++ ++ def test_write_etc_hosts_suse_localhost(self): ++ cfg = { ++ 'manage_etc_hosts': 'localhost', ++ 'hostname': 'cloud-init.test.us' ++ } ++ os.makedirs('%s/etc/' % self.tmp) ++ hosts_content = '192.168.1.1 blah.blah.us blah\n' ++ fout = open('%s/etc/hosts' % self.tmp, 'w') ++ fout.write(hosts_content) ++ fout.close() ++ distro = self._fetch_distro('sles') ++ distro.hosts_fn = '%s/etc/hosts' % self.tmp ++ paths = helpers.Paths({}) ++ ds = None ++ cc = cloud.Cloud(ds, paths, {}, distro, None) ++ self.patchUtils(self.tmp) ++ cc_update_etc_hosts.handle('test', cfg, cc, LOG, []) ++ contents = util.load_file('%s/etc/hosts' % self.tmp) ++ if '127.0.0.1\tcloud-init.test.us\tcloud-init' not in contents: ++ self.assertIsNone('No entry for 127.0.0.1 in etc/hosts') ++ if '192.168.1.1\tblah.blah.us\tblah' not in contents: ++ self.assertIsNone('Default etc/hosts content modified') ++ ++ def test_write_etc_hosts_suse_template(self): ++ cfg = { ++ 'manage_etc_hosts': 'template', ++ 'hostname': 'cloud-init.test.us' ++ } ++ shutil.copytree('templates', '%s/etc/cloud/templates' % self.tmp) ++ distro = self._fetch_distro('sles') ++ paths = helpers.Paths({}) ++ paths.template_tpl = '%s' % self.tmp + '/etc/cloud/templates/%s.tmpl' ++ ds = None ++ cc = cloud.Cloud(ds, paths, {}, distro, None) ++ self.patchUtils(self.tmp) ++ cc_update_etc_hosts.handle('test', cfg, cc, LOG, []) ++ contents = util.load_file('%s/etc/hosts' % self.tmp) ++ if '127.0.0.1 cloud-init.test.us cloud-init' not in contents: ++ self.assertIsNone('No entry for 127.0.0.1 in etc/hosts') ++ if '::1 cloud-init.test.us cloud-init' not in contents: ++ self.assertIsNone('No entry for 127.0.0.1 in etc/hosts') +-- +1.7.12.4 + diff --git a/ntp-fix-config-module-schema-to-allow-empty-ntp-conf.patch b/ntp-fix-config-module-schema-to-allow-empty-ntp-conf.patch new file mode 100644 index 0000000..b2d4aa9 --- /dev/null +++ b/ntp-fix-config-module-schema-to-allow-empty-ntp-conf.patch @@ -0,0 +1,163 @@ +From 6bc504e41666329631cdfd5b947ed5b0e2529a76 Mon Sep 17 00:00:00 2001 +From: Chad Smith +Date: Fri, 20 Oct 2017 13:24:22 -0600 +Subject: [PATCH 021/354] ntp: fix config module schema to allow empty ntp + config + +Fix three things related to the ntp module: + 1. Fix invalid cloud-config schema in the integration test which + provided empty dicts instead of emptylists for pools and servers + 2. Correct logic in the ntp module to allow support for the minimal + cloud-config 'ntp:' without raising a RuntimeError. Docs and schema + definitions already describe that cloud-config's ntp can be empty. + An ntp configuration with neither pools nor servers will be + configured with a default set of ntp pools. As such, the ntp module + now officially allows the following ntp cloud-configs: + - ntp: + - ntp: {} + - ntp: + servers: [] + pools: [] + 3. Add a simple unit test which validates all cloud-config provided to + our integration tests to ensure it adheres to any defined module + schema so as more jsonschema definitions are added, we validate our + integration test configs. + +LP: #1724951 +--- + cloudinit/config/cc_ntp.py | 4 ++- + tests/cloud_tests/testcases/modules/ntp.yaml | 4 +-- + tests/unittests/test_handler/test_handler_ntp.py | 23 ++++++++------- + tests/unittests/test_handler/test_schema.py | 37 +++++++++++++++++++++++- + 4 files changed, 53 insertions(+), 15 deletions(-) + +diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py +index 15ae1ec..d43d060 100644 +--- a/cloudinit/config/cc_ntp.py ++++ b/cloudinit/config/cc_ntp.py +@@ -100,7 +100,9 @@ def handle(name, cfg, cloud, log, _args): + LOG.debug( + "Skipping module named %s, not present or disabled by cfg", name) + return +- ntp_cfg = cfg.get('ntp', {}) ++ ntp_cfg = cfg['ntp'] ++ if ntp_cfg is None: ++ ntp_cfg = {} # Allow empty config which will install the package + + # TODO drop this when validate_cloudconfig_schema is strict=True + if not isinstance(ntp_cfg, (dict)): +diff --git a/tests/cloud_tests/configs/modules/ntp.yaml b/tests/cloud_tests/configs/modules/ntp.yaml +index fbef431..2530d72 100644 +--- a/tests/cloud_tests/configs/modules/ntp.yaml ++++ b/tests/cloud_tests/configs/modules/ntp.yaml +@@ -4,8 +4,8 @@ + cloud_config: | + #cloud-config + ntp: +- pools: {} +- servers: {} ++ pools: [] ++ servers: [] + collect_scripts: + ntp_installed: | + #!/bin/bash +diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py +index 4f29124..3abe578 100644 +--- a/tests/unittests/test_handler/test_handler_ntp.py ++++ b/tests/unittests/test_handler/test_handler_ntp.py +@@ -293,23 +293,24 @@ class TestNtp(FilesystemMockingTestCase): + + def test_ntp_handler_schema_validation_allows_empty_ntp_config(self): + """Ntp schema validation allows for an empty ntp: configuration.""" +- invalid_config = {'ntp': {}} ++ valid_empty_configs = [{'ntp': {}}, {'ntp': None}] + distro = 'ubuntu' + cc = self._get_cloud(distro) + ntp_conf = os.path.join(self.new_root, 'ntp.conf') + with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: + stream.write(NTP_TEMPLATE) +- with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): +- cc_ntp.handle('cc_ntp', invalid_config, cc, None, []) ++ for valid_empty_config in valid_empty_configs: ++ with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): ++ cc_ntp.handle('cc_ntp', valid_empty_config, cc, None, []) ++ with open(ntp_conf) as stream: ++ content = stream.read() ++ default_pools = [ ++ "{0}.{1}.pool.ntp.org".format(x, distro) ++ for x in range(0, cc_ntp.NR_POOL_SERVERS)] ++ self.assertEqual( ++ "servers []\npools {0}\n".format(default_pools), ++ content) + self.assertNotIn('Invalid config:', self.logs.getvalue()) +- with open(ntp_conf) as stream: +- content = stream.read() +- default_pools = [ +- "{0}.{1}.pool.ntp.org".format(x, distro) +- for x in range(0, cc_ntp.NR_POOL_SERVERS)] +- self.assertEqual( +- "servers []\npools {0}\n".format(default_pools), +- content) + + @skipIf(_missing_jsonschema_dep, "No python-jsonschema dependency") + def test_ntp_handler_schema_validation_warns_non_string_item_type(self): +diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py +index b8fc893..648573f 100644 +--- a/tests/unittests/test_handler/test_schema.py ++++ b/tests/unittests/test_handler/test_schema.py +@@ -4,11 +4,12 @@ from cloudinit.config.schema import ( + CLOUD_CONFIG_HEADER, SchemaValidationError, annotated_cloudconfig_file, + get_schema_doc, get_schema, validate_cloudconfig_file, + validate_cloudconfig_schema, main) +-from cloudinit.util import write_file ++from cloudinit.util import subp, write_file + + from cloudinit.tests.helpers import CiTestCase, mock, skipIf + + from copy import copy ++import os + from six import StringIO + from textwrap import dedent + from yaml import safe_load +@@ -364,4 +365,38 @@ class MainTest(CiTestCase): + self.assertIn( + 'Valid cloud-config file {0}'.format(myyaml), m_stdout.getvalue()) + ++ ++class CloudTestsIntegrationTest(CiTestCase): ++ """Validate all cloud-config yaml schema provided in integration tests. ++ ++ It is less expensive to have unittests validate schema of all cloud-config ++ yaml provided to integration tests, than to run an integration test which ++ raises Warnings or errors on invalid cloud-config schema. ++ """ ++ ++ @skipIf(_missing_jsonschema_dep, "No python-jsonschema dependency") ++ def test_all_integration_test_cloud_config_schema(self): ++ """Validate schema of cloud_tests yaml files looking for warnings.""" ++ schema = get_schema() ++ testsdir = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) ++ integration_testdir = os.path.sep.join( ++ [testsdir, 'cloud_tests', 'testcases']) ++ errors = [] ++ out, _ = subp(['find', integration_testdir, '-name', '*yaml']) ++ for filename in out.splitlines(): ++ test_cfg = safe_load(open(filename)) ++ cloud_config = test_cfg.get('cloud_config') ++ if cloud_config: ++ cloud_config = safe_load( ++ cloud_config.replace("#cloud-config\n", "")) ++ try: ++ validate_cloudconfig_schema( ++ cloud_config, schema, strict=True) ++ except SchemaValidationError as e: ++ errors.append( ++ '{0}: {1}'.format( ++ filename, e)) ++ if errors: ++ raise AssertionError(', '.join(errors)) ++ + # vi: ts=4 expandtab syntax=python +-- +1.7.12.4 + diff --git a/resizefs-Fix-regression-when-system-booted-with-root.patch b/resizefs-Fix-regression-when-system-booted-with-root.patch new file mode 100644 index 0000000..e12b2a4 --- /dev/null +++ b/resizefs-Fix-regression-when-system-booted-with-root.patch @@ -0,0 +1,322 @@ +From 17a15f9e0ae78e4fc4e24fab0caebdf78f06ef66 Mon Sep 17 00:00:00 2001 +From: Chad Smith +Date: Mon, 23 Oct 2017 14:34:23 -0600 +Subject: [PATCH 025/354] resizefs: Fix regression when system booted with + root=PARTUUID= + +A recent cleanup of the resizefs module broke resizing when a system was +booted with root=PARTUUID= and the device /dev/root does not exist. +This path is exposed with the Ubuntu 16.04 but not with Ubuntu 17.10. A +recreate exists under bug 1684869. + +LP: #1725067 +--- + cloudinit/config/cc_resizefs.py | 43 ++++------ + .../test_handler/test_handler_resizefs.py | 91 ++++++++++++++-------- + 2 files changed, 70 insertions(+), 64 deletions(-) + +diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py +index f774baa..0d282e6 100644 +--- a/cloudinit/config/cc_resizefs.py ++++ b/cloudinit/config/cc_resizefs.py +@@ -145,25 +145,6 @@ RESIZE_FS_PRECHECK_CMDS = { + } + + +-def rootdev_from_cmdline(cmdline): +- found = None +- for tok in cmdline.split(): +- if tok.startswith("root="): +- found = tok[5:] +- break +- if found is None: +- return None +- +- if found.startswith("/dev/"): +- return found +- if found.startswith("LABEL="): +- return "/dev/disk/by-label/" + found[len("LABEL="):] +- if found.startswith("UUID="): +- return "/dev/disk/by-uuid/" + found[len("UUID="):] +- +- return "/dev/" + found +- +- + def can_skip_resize(fs_type, resize_what, devpth): + fstype_lc = fs_type.lower() + for i, func in RESIZE_FS_PRECHECK_CMDS.items(): +@@ -172,14 +153,15 @@ def can_skip_resize(fs_type, resize_what, devpth): + return False + + +-def is_device_path_writable_block(devpath, info, log): +- """Return True if devpath is a writable block device. ++def maybe_get_writable_device_path(devpath, info, log): ++ """Return updated devpath if the devpath is a writable block device. + +- @param devpath: Path to the root device we want to resize. ++ @param devpath: Requested path to the root device we want to resize. + @param info: String representing information about the requested device. + @param log: Logger to which logs will be added upon error. + +- @returns Boolean True if block device is writable ++ @returns devpath or updated devpath per kernel commandline if the device ++ path is a writable block device, returns None otherwise. + """ + container = util.is_container() + +@@ -189,12 +171,12 @@ def is_device_path_writable_block(devpath, info, log): + devpath = util.rootdev_from_cmdline(util.get_cmdline()) + if devpath is None: + log.warn("Unable to find device '/dev/root'") +- return False ++ return None + log.debug("Converted /dev/root to '%s' per kernel cmdline", devpath) + + if devpath == 'overlayroot': + log.debug("Not attempting to resize devpath '%s': %s", devpath, info) +- return False ++ return None + + try: + statret = os.stat(devpath) +@@ -207,7 +189,7 @@ def is_device_path_writable_block(devpath, info, log): + devpath, info) + else: + raise exc +- return False ++ return None + + if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode): + if container: +@@ -216,8 +198,8 @@ def is_device_path_writable_block(devpath, info, log): + else: + log.warn("device '%s' not a block device. cannot resize: %s" % + (devpath, info)) +- return False +- return True ++ return None ++ return devpath # The writable block devpath + + + def handle(name, cfg, _cloud, log, args): +@@ -242,8 +224,9 @@ def handle(name, cfg, _cloud, log, args): + info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what) + log.debug("resize_info: %s" % info) + +- if not is_device_path_writable_block(devpth, info, log): +- return ++ devpth = maybe_get_writable_device_path(devpth, info, log) ++ if not devpth: ++ return # devpath was not a writable block device + + resizer = None + if can_skip_resize(fs_type, resize_what, devpth): +diff --git a/tests/unittests/test_handler/test_handler_resizefs.py b/tests/unittests/test_handler/test_handler_resizefs.py +index 3e5d436..29d5574 100644 +--- a/tests/unittests/test_handler/test_handler_resizefs.py ++++ b/tests/unittests/test_handler/test_handler_resizefs.py +@@ -1,9 +1,9 @@ + # This file is part of cloud-init. See LICENSE file for license information. + + from cloudinit.config.cc_resizefs import ( +- can_skip_resize, handle, is_device_path_writable_block, +- rootdev_from_cmdline) ++ can_skip_resize, handle, maybe_get_writable_device_path) + ++from collections import namedtuple + import logging + import textwrap + +@@ -138,47 +138,48 @@ class TestRootDevFromCmdline(CiTestCase): + invalid_cases = [ + 'BOOT_IMAGE=/adsf asdfa werasef root adf', 'BOOT_IMAGE=/adsf', ''] + for case in invalid_cases: +- self.assertIsNone(rootdev_from_cmdline(case)) ++ self.assertIsNone(util.rootdev_from_cmdline(case)) + + def test_rootdev_from_cmdline_with_root_startswith_dev(self): + """Return the cmdline root when the path starts with /dev.""" + self.assertEqual( +- '/dev/this', rootdev_from_cmdline('asdf root=/dev/this')) ++ '/dev/this', util.rootdev_from_cmdline('asdf root=/dev/this')) + + def test_rootdev_from_cmdline_with_root_without_dev_prefix(self): + """Add /dev prefix to cmdline root when the path lacks the prefix.""" +- self.assertEqual('/dev/this', rootdev_from_cmdline('asdf root=this')) ++ self.assertEqual( ++ '/dev/this', util.rootdev_from_cmdline('asdf root=this')) + + def test_rootdev_from_cmdline_with_root_with_label(self): + """When cmdline root contains a LABEL, our root is disk/by-label.""" + self.assertEqual( + '/dev/disk/by-label/unique', +- rootdev_from_cmdline('asdf root=LABEL=unique')) ++ util.rootdev_from_cmdline('asdf root=LABEL=unique')) + + def test_rootdev_from_cmdline_with_root_with_uuid(self): + """When cmdline root contains a UUID, our root is disk/by-uuid.""" + self.assertEqual( + '/dev/disk/by-uuid/adsfdsaf-adsf', +- rootdev_from_cmdline('asdf root=UUID=adsfdsaf-adsf')) ++ util.rootdev_from_cmdline('asdf root=UUID=adsfdsaf-adsf')) + + +-class TestIsDevicePathWritableBlock(CiTestCase): ++class TestMaybeGetDevicePathAsWritableBlock(CiTestCase): + + with_logs = True + +- def test_is_device_path_writable_block_false_on_overlayroot(self): ++ def test_maybe_get_writable_device_path_none_on_overlayroot(self): + """When devpath is overlayroot (on MAAS), is_dev_writable is False.""" + info = 'does not matter' +- is_writable = wrap_and_call( ++ devpath = wrap_and_call( + 'cloudinit.config.cc_resizefs.util', + {'is_container': {'return_value': False}}, +- is_device_path_writable_block, 'overlayroot', info, LOG) +- self.assertFalse(is_writable) ++ maybe_get_writable_device_path, 'overlayroot', info, LOG) ++ self.assertIsNone(devpath) + self.assertIn( + "Not attempting to resize devpath 'overlayroot'", + self.logs.getvalue()) + +- def test_is_device_path_writable_block_warns_missing_cmdline_root(self): ++ def test_maybe_get_writable_device_path_warns_missing_cmdline_root(self): + """When root does not exist isn't in the cmdline, log warning.""" + info = 'does not matter' + +@@ -190,43 +191,43 @@ class TestIsDevicePathWritableBlock(CiTestCase): + exists_mock_path = 'cloudinit.config.cc_resizefs.os.path.exists' + with mock.patch(exists_mock_path) as m_exists: + m_exists.return_value = False +- is_writable = wrap_and_call( ++ devpath = wrap_and_call( + 'cloudinit.config.cc_resizefs.util', + {'is_container': {'return_value': False}, + 'get_mount_info': {'side_effect': fake_mount_info}, + 'get_cmdline': {'return_value': 'BOOT_IMAGE=/vmlinuz.efi'}}, +- is_device_path_writable_block, '/dev/root', info, LOG) +- self.assertFalse(is_writable) ++ maybe_get_writable_device_path, '/dev/root', info, LOG) ++ self.assertIsNone(devpath) + logs = self.logs.getvalue() + self.assertIn("WARNING: Unable to find device '/dev/root'", logs) + +- def test_is_device_path_writable_block_does_not_exist(self): ++ def test_maybe_get_writable_device_path_does_not_exist(self): + """When devpath does not exist, a warning is logged.""" + info = 'dev=/I/dont/exist mnt_point=/ path=/dev/none' +- is_writable = wrap_and_call( ++ devpath = wrap_and_call( + 'cloudinit.config.cc_resizefs.util', + {'is_container': {'return_value': False}}, +- is_device_path_writable_block, '/I/dont/exist', info, LOG) +- self.assertFalse(is_writable) ++ maybe_get_writable_device_path, '/I/dont/exist', info, LOG) ++ self.assertIsNone(devpath) + self.assertIn( + "WARNING: Device '/I/dont/exist' did not exist." + ' cannot resize: %s' % info, + self.logs.getvalue()) + +- def test_is_device_path_writable_block_does_not_exist_in_container(self): ++ def test_maybe_get_writable_device_path_does_not_exist_in_container(self): + """When devpath does not exist in a container, log a debug message.""" + info = 'dev=/I/dont/exist mnt_point=/ path=/dev/none' +- is_writable = wrap_and_call( ++ devpath = wrap_and_call( + 'cloudinit.config.cc_resizefs.util', + {'is_container': {'return_value': True}}, +- is_device_path_writable_block, '/I/dont/exist', info, LOG) +- self.assertFalse(is_writable) ++ maybe_get_writable_device_path, '/I/dont/exist', info, LOG) ++ self.assertIsNone(devpath) + self.assertIn( + "DEBUG: Device '/I/dont/exist' did not exist in container." + ' cannot resize: %s' % info, + self.logs.getvalue()) + +- def test_is_device_path_writable_block_raises_oserror(self): ++ def test_maybe_get_writable_device_path_raises_oserror(self): + """When unexpected OSError is raises by os.stat it is reraised.""" + info = 'dev=/I/dont/exist mnt_point=/ path=/dev/none' + with self.assertRaises(OSError) as context_manager: +@@ -234,41 +235,63 @@ class TestIsDevicePathWritableBlock(CiTestCase): + 'cloudinit.config.cc_resizefs', + {'util.is_container': {'return_value': True}, + 'os.stat': {'side_effect': OSError('Something unexpected')}}, +- is_device_path_writable_block, '/I/dont/exist', info, LOG) ++ maybe_get_writable_device_path, '/I/dont/exist', info, LOG) + self.assertEqual( + 'Something unexpected', str(context_manager.exception)) + +- def test_is_device_path_writable_block_non_block(self): ++ def test_maybe_get_writable_device_path_non_block(self): + """When device is not a block device, emit warning return False.""" + fake_devpath = self.tmp_path('dev/readwrite') + util.write_file(fake_devpath, '', mode=0o600) # read-write + info = 'dev=/dev/root mnt_point=/ path={0}'.format(fake_devpath) + +- is_writable = wrap_and_call( ++ devpath = wrap_and_call( + 'cloudinit.config.cc_resizefs.util', + {'is_container': {'return_value': False}}, +- is_device_path_writable_block, fake_devpath, info, LOG) +- self.assertFalse(is_writable) ++ maybe_get_writable_device_path, fake_devpath, info, LOG) ++ self.assertIsNone(devpath) + self.assertIn( + "WARNING: device '{0}' not a block device. cannot resize".format( + fake_devpath), + self.logs.getvalue()) + +- def test_is_device_path_writable_block_non_block_on_container(self): ++ def test_maybe_get_writable_device_path_non_block_on_container(self): + """When device is non-block device in container, emit debug log.""" + fake_devpath = self.tmp_path('dev/readwrite') + util.write_file(fake_devpath, '', mode=0o600) # read-write + info = 'dev=/dev/root mnt_point=/ path={0}'.format(fake_devpath) + +- is_writable = wrap_and_call( ++ devpath = wrap_and_call( + 'cloudinit.config.cc_resizefs.util', + {'is_container': {'return_value': True}}, +- is_device_path_writable_block, fake_devpath, info, LOG) +- self.assertFalse(is_writable) ++ maybe_get_writable_device_path, fake_devpath, info, LOG) ++ self.assertIsNone(devpath) + self.assertIn( + "DEBUG: device '{0}' not a block device in container." + ' cannot resize'.format(fake_devpath), + self.logs.getvalue()) + ++ def test_maybe_get_writable_device_path_returns_cmdline_root(self): ++ """When root device is UUID in kernel commandline, update devpath.""" ++ # XXX Long-term we want to use FilesystemMocking test to avoid ++ # touching os.stat. ++ FakeStat = namedtuple( ++ 'FakeStat', ['st_mode', 'st_size', 'st_mtime']) # minimal def. ++ info = 'dev=/dev/root mnt_point=/ path=/does/not/matter' ++ devpath = wrap_and_call( ++ 'cloudinit.config.cc_resizefs', ++ {'util.get_cmdline': {'return_value': 'asdf root=UUID=my-uuid'}, ++ 'util.is_container': False, ++ 'os.path.exists': False, # /dev/root doesn't exist ++ 'os.stat': { ++ 'return_value': FakeStat(25008, 0, 1)} # char block device ++ }, ++ maybe_get_writable_device_path, '/dev/root', info, LOG) ++ self.assertEqual('/dev/disk/by-uuid/my-uuid', devpath) ++ self.assertIn( ++ "DEBUG: Converted /dev/root to '/dev/disk/by-uuid/my-uuid'" ++ " per kernel cmdline", ++ self.logs.getvalue()) ++ + + # vi: ts=4 expandtab +-- +1.7.12.4 + diff --git a/stages-Fix-bug-causing-datasource-to-have-incorrect-.patch b/stages-Fix-bug-causing-datasource-to-have-incorrect-.patch new file mode 100644 index 0000000..e5b3583 --- /dev/null +++ b/stages-Fix-bug-causing-datasource-to-have-incorrect-.patch @@ -0,0 +1,33 @@ +From f0ff194054da90b7b49620b5658342e52156d68e Mon Sep 17 00:00:00 2001 +From: Scott Moser +Date: Thu, 20 Sep 2018 12:45:00 +0000 +Subject: [PATCH 275/354] stages: Fix bug causing datasource to have incorrect + sys_cfg. + +The Init object had a bug/odd side effect where when retrieving a distro +object it would update the datasources's sys_cfg. That was probably +intended to refresh the possibly stale config stored there. Unfortunately +what it actually did limit the config there to the 'system_info' top level +key where initially it had the whole config. + +LP: #1787459 +--- + cloudinit/stages.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/cloudinit/stages.py b/cloudinit/stages.py +index ef5c699..8a06412 100644 +--- a/cloudinit/stages.py ++++ b/cloudinit/stages.py +@@ -88,7 +88,7 @@ class Init(object): + # from whatever it was to a new set... + if self.datasource is not NULL_DATA_SOURCE: + self.datasource.distro = self._distro +- self.datasource.sys_cfg = system_config ++ self.datasource.sys_cfg = self.cfg + return self._distro + + @property +-- +1.7.12.4 + diff --git a/stages-fix-tracebacks-if-a-module-stage-is-undefined.patch b/stages-fix-tracebacks-if-a-module-stage-is-undefined.patch new file mode 100644 index 0000000..b969e3c --- /dev/null +++ b/stages-fix-tracebacks-if-a-module-stage-is-undefined.patch @@ -0,0 +1,94 @@ +From fef2616b9876d3d354b0de1a8e753361e52e77b0 Mon Sep 17 00:00:00 2001 +From: Robert Schweikert +Date: Fri, 15 Jun 2018 13:41:21 -0600 +Subject: [PATCH 219/354] stages: fix tracebacks if a module stage is + undefined or empty + +In /etc/cloud/cloud.cfg, users and imagees can configure which modules run +during a specific cloud-init stage by modifying one of the following +lists: cloud_init_modules, cloud_init_modules, cloud_init_final_modules. + +If any of the configured module lists are absent or empty, cloud-init will +emit the same message it already does for existing lists that only contain +modules which are not unsupported on that platform: + +No 'config' modules to run under section 'cloud_config_modules' + +LP: #1770462 +--- + cloudinit/stages.py | 4 +++- + tests/unittests/test_runs/test_simple_run.py | 32 ++++++++++++++++++++++++++-- + 2 files changed, 33 insertions(+), 3 deletions(-) + +diff --git a/cloudinit/stages.py b/cloudinit/stages.py +index 3998cf6..286607b 100644 +--- a/cloudinit/stages.py ++++ b/cloudinit/stages.py +@@ -697,7 +697,9 @@ class Modules(object): + module_list = [] + if name not in self.cfg: + return module_list +- cfg_mods = self.cfg[name] ++ cfg_mods = self.cfg.get(name) ++ if not cfg_mods: ++ return module_list + # Create 'module_list', an array of hashes + # Where hash['mod'] = module name + # hash['freq'] = frequency +diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py +index 762974e..d67c422 100644 +--- a/tests/unittests/test_runs/test_simple_run.py ++++ b/tests/unittests/test_runs/test_simple_run.py +@@ -1,5 +1,6 @@ + # This file is part of cloud-init. See LICENSE file for license information. + ++import copy + import os + + from cloudinit.tests import helpers +@@ -127,8 +128,9 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): + """run_section forced skipped modules by using unverified_modules.""" + + # re-write cloud.cfg with unverified_modules override +- self.cfg['unverified_modules'] = ['spacewalk'] # Would have skipped +- cloud_cfg = util.yaml_dumps(self.cfg) ++ cfg = copy.deepcopy(self.cfg) ++ cfg['unverified_modules'] = ['spacewalk'] # Would have skipped ++ cloud_cfg = util.yaml_dumps(cfg) + util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) + util.write_file(os.path.join(self.new_root, 'etc', + 'cloud', 'cloud.cfg'), cloud_cfg) +@@ -150,4 +152,30 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): + "running unverified_modules: 'spacewalk'", + self.logs.getvalue()) + ++ def test_none_ds_run_with_no_config_modules(self): ++ """run_section will report no modules run when none are configured.""" ++ ++ # re-write cloud.cfg with unverified_modules override ++ cfg = copy.deepcopy(self.cfg) ++ # Represent empty configuration in /etc/cloud/cloud.cfg ++ cfg['cloud_init_modules'] = None ++ cloud_cfg = util.yaml_dumps(cfg) ++ util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) ++ util.write_file(os.path.join(self.new_root, 'etc', ++ 'cloud', 'cloud.cfg'), cloud_cfg) ++ ++ initer = stages.Init() ++ initer.read_cfg() ++ initer.initialize() ++ initer.fetch() ++ initer.instancify() ++ initer.update() ++ initer.cloudify().run('consume_data', initer.consume_data, ++ args=[PER_INSTANCE], freq=PER_INSTANCE) ++ ++ mods = stages.Modules(initer) ++ (which_ran, failures) = mods.run_section('cloud_init_modules') ++ self.assertTrue(len(failures) == 0) ++ self.assertEqual([], which_ran) ++ + # vi: ts=4 expandtab +-- +1.7.12.4 +