diff --git a/.github/workflows/check_format.yml b/.github/workflows/check_format.yml index 6e9b0bbc..3b77b237 100644 --- a/.github/workflows/check_format.yml +++ b/.github/workflows/check_format.yml @@ -19,7 +19,7 @@ jobs: matrix: env: [ruff, mypy, pylint, black, isort] name: Check ${{ matrix.env }} - runs-on: ubuntu-24.04 + runs-on: ubuntu-latest steps: - name: "Checkout #1" uses: actions/checkout@v4 diff --git a/.github/workflows/shellcheck-debian-scripts.yml b/.github/workflows/shellcheck-debian-scripts.yml new file mode 100644 index 00000000..442f71d5 --- /dev/null +++ b/.github/workflows/shellcheck-debian-scripts.yml @@ -0,0 +1,60 @@ +name: ShellCheck Debian package scripts + +env: + # This is a space separated string for multiple globs + # Do not use curly braces as they will be treated as literal string in `git ls-files ${GLOBS_TO_SHELLCHECK}` + # If you decide to use globstar, make sure to use the bash shell and to `shopt -s globstar` + # Assumption in this workflow: the resolved filepaths do not contain spaces. + GLOBS_TO_SHELLCHECK: "debian/cherry-pick debian/*.config debian/*.postinst debian/*.postrm debian/*.preinst debian/*.prerm packages/debian/*.postrm" + +on: + pull_request: + # There is a known bug in Github but it will most probably not affect out use case + # https://github.com/orgs/community/discussions/118623#discussioncomment-9087833 + # When there are 2 PRs using the same source branch (actually the same head SHA to be more specific), with the base branch in one PR matching + # on.pull_request.branches and thee base branch in the second PR not matching this key, + # then the second PR will show these checks that were triggered by the first PR but not the second PR. + branches: + - 'ubuntu/**' + - main + +concurrency: + group: 'ci-${{ github.workflow }}-${{ github.ref }}' + cancel-in-progress: true + +# Note: No need to specify the shell option in the shellcheck command +# as shellcheck reads and uses the shebang at the top of the linted scripts. +jobs: + shellcheck-on-matching-and-changed-files: + name: ShellCheck on matching files that have changed + runs-on: ubuntu-24.04 + steps: + - name: Repository checkout + uses: actions/checkout@v4 + + - name: Get all matching changed files + id: matching-changed-files + # For security, make sure to use a SHA not a version + uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 + with: + files: ${{ env.GLOBS_TO_SHELLCHECK }} + files_separator: " " + + - name: Run shellcheck on the matching changed files + env: + ALL_CHANGED_FILES: ${{ steps.matching-changed-files.outputs.all_changed_and_modified_files }} + run: | + if [ -z "${ALL_CHANGED_FILES}" ] + then + echo "There are no changed files in the repo which match the glob pattern \'${GLOBS_TO_SHELLCHECK}\' so shellcheck will not run" + else + RETAINED_CHANGED_FILES=$(git ls-files ${ALL_CHANGED_FILES} | tr '\n' ' ') #filter out deleted files + if [ -z "${RETAINED_CHANGED_FILES}" ] + then + echo "There are no changed files remaining in the repo which match the glob pattern \'${GLOBS_TO_SHELLCHECK}\' so shellcheck will not run" + else + echo "shellcheck will run on the remaining changed files: ${RETAINED_CHANGED_FILES}" + shellcheck ${RETAINED_CHANGED_FILES} + echo "shellcheck succeeded running on the remaining changed files" + fi + fi diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 44f13d1a..dff15895 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -18,7 +18,7 @@ jobs: stale-pr-message: | Hello! Thank you for this proposed change to cloud-init. This pull request is now marked as stale as it has not seen any activity in 14 days. If no activity occurs within the next 7 days, this pull request will automatically close. - If you are waiting for code review and you are seeing this message, apologies! Please reply, tagging TheRealFalcon, and he will ensure that someone takes a look soon. + If you are waiting for code review and you are seeing this message, apologies! Please reply, tagging blackboxsw, and he will ensure that someone takes a look soon. - (If the pull request is closed and you would like to continue working on it, please do tag TheRealFalcon to reopen it.) + (If the pull request is closed and you would like to continue working on it, please do tag blackboxsw to reopen it.) stale-pr-label: 'stale-pr' diff --git a/.gitignore b/.gitignore index 7d671fb9..13032535 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ *.asc *.build *.buildinfo +*.builddir *.changes *.cover *.deb diff --git a/.pc/applied-patches b/.pc/applied-patches index 6de088d4..95bd7033 100644 --- a/.pc/applied-patches +++ b/.pc/applied-patches @@ -4,3 +4,4 @@ no-nocloud-network.patch grub-dpkg-support.patch no-remove-networkd-online.patch strip-invalid-mtu.patch +retain-setuptools.patch diff --git a/.pc/grub-dpkg-support.patch/cloudinit/config/schemas/schema-cloud-config-v1.json b/.pc/grub-dpkg-support.patch/cloudinit/config/schemas/schema-cloud-config-v1.json index e2501c53..a8c3af41 100644 --- a/.pc/grub-dpkg-support.patch/cloudinit/config/schemas/schema-cloud-config-v1.json +++ b/.pc/grub-dpkg-support.patch/cloudinit/config/schemas/schema-cloud-config-v1.json @@ -666,6 +666,47 @@ }, "minProperties": 1 }, + "rh_subscription_activation_key": { + "type": "string", + "description": "The activation key to use. Must be used with **org**. Should not be used with **username** or **password**." + }, + "rh_subscription_auto_attach": { + "type": "boolean", + "description": "Whether to attach subscriptions automatically." + }, + "rh_subscription_service_level": { + "type": "string", + "description": "The service level to use when subscribing to RH repositories. ``auto_attach`` must be true for this to be used." + }, + "rh_subscription_add_pool": { + "type": "array", + "description": "A list of pool IDs add to the subscription.", + "items": { + "type": "string" + } + }, + "rh_subscription_enable_repo": { + "type": "array", + "description": "A list of repositories to enable.", + "items": { + "type": "string" + } + }, + "rh_subscription_disable_repo": { + "type": "array", + "description": "A list of repositories to disable.", + "items": { + "type": "string" + } + }, + "rh_subscription_rhsm_baseurl": { + "type": "string", + "description": "Sets the baseurl in ``/etc/rhsm/rhsm.conf``." + }, + "rh_subscription_server_hostname": { + "type": "string", + "description": "Sets the serverurl in ``/etc/rhsm/rhsm.conf``." + }, "modules_definition": { "type": "array", "items": { @@ -2580,18 +2621,23 @@ "properties": { "username": { "type": "string", - "description": "The username to use. Must be used with password. Should not be used with **activation-key** or **org**." + "description": "The username to use. Must be used with password. Should not be used with **activation_key** or **org**." }, "password": { "type": "string", - "description": "The password to use. Must be used with username. Should not be used with **activation-key** or **org**." + "description": "The password to use. Must be used with username. Should not be used with **activation_key** or **org**." + }, + "activation_key": { + "$ref": "#/$defs/rh_subscription_activation_key" }, "activation-key": { - "type": "string", - "description": "The activation key to use. Must be used with **org**. Should not be used with **username** or **password**." + "$ref": "#/$defs/rh_subscription_activation_key", + "deprecated": true, + "deprecated_version": "25.3", + "deprecated_description": "Use **activation_key** instead." }, "org": { - "description": "The organization to use. Must be used with **activation-key**. Should not be used with **username** or **password**.", + "description": "The organization to use. Must be used with **activation_key**. Should not be used with **username** or **password**.", "oneOf": [ { "type": "string" @@ -2604,44 +2650,140 @@ } ] }, + "auto_attach": { + "$ref": "#/$defs/rh_subscription_auto_attach" + }, "auto-attach": { - "type": "boolean", - "description": "Whether to attach subscriptions automatically." + "$ref": "#/$defs/rh_subscription_auto_attach", + "deprecated": true, + "deprecated_version": "25.3", + "deprecated_description": "Use **auto_attach** instead." + }, + "service_level": { + "$ref": "#/$defs/rh_subscription_service_level" }, "service-level": { - "type": "string", - "description": "The service level to use when subscribing to RH repositories. ``auto-attach`` must be true for this to be used." + "$ref": "#/$defs/rh_subscription_service_level", + "deprecated": true, + "deprecated_version": "25.3", + "deprecated_description": "Use **service_level** instead." + }, + "add_pool": { + "$ref": "#/$defs/rh_subscription_add_pool" }, "add-pool": { - "type": "array", - "description": "A list of pools ids add to the subscription.", - "items": { - "type": "string" - } + "$ref": "#/$defs/rh_subscription_add_pool", + "deprecated": true, + "deprecated_version": "25.3", + "deprecated_description": "Use **add_pool** instead." + }, + "enable_repo": { + "$ref": "#/$defs/rh_subscription_enable_repo" }, "enable-repo": { - "type": "array", - "description": "A list of repositories to enable.", - "items": { - "type": "string" - } + "$ref": "#/$defs/rh_subscription_enable_repo", + "deprecated": true, + "deprecated_version": "25.3", + "deprecated_description": "Use **enable_repo** instead." + }, + "disable_repo": { + "$ref": "#/$defs/rh_subscription_disable_repo" }, "disable-repo": { - "type": "array", - "description": "A list of repositories to disable.", - "items": { - "type": "string" - } + "$ref": "#/$defs/rh_subscription_disable_repo", + "deprecated": true, + "deprecated_version": "25.3", + "deprecated_description": "Use **disable_repo** instead." }, - "rhsm-baseurl": { + "release_version": { "type": "string", - "description": "Sets the baseurl in ``/etc/rhsm/rhsm.conf``." + "description": "Sets the release_version via``subscription-manager release --set=`` then deletes the package manager cache ``/var/cache/{dnf,yum}`` . These steps are applied after any pool attachment and/or enabling/disabling repos. For more information about this key, check https://access.redhat.com/solutions/238533 ." + }, + "rhsm_baseurl": { + "$ref": "#/$defs/rh_subscription_rhsm_baseurl" + }, + "rhsm-baseurl": { + "$ref": "#/$defs/rh_subscription_rhsm_baseurl", + "deprecated": true, + "deprecated_version": "25.3", + "deprecated_description": "Use **rhsm_baseurl** instead." + }, + "server_hostname": { + "$ref": "#/$defs/rh_subscription_server_hostname" }, "server-hostname": { - "type": "string", - "description": "Sets the serverurl in ``/etc/rhsm/rhsm.conf``." + "$ref": "#/$defs/rh_subscription_server_hostname", + "deprecated": true, + "deprecated_version": "25.3", + "deprecated_description": "Use **server_hostname** instead." } - } + }, + "allOf": [ + { + "not": { + "required": [ + "activation_key", + "activation-key" + ] + } + }, + { + "not": { + "required": [ + "auto_attach", + "auto-attach" + ] + } + }, + { + "not": { + "required": [ + "service_level", + "service-level" + ] + } + }, + { + "not": { + "required": [ + "add_pool", + "add-pool" + ] + } + }, + { + "not": { + "required": [ + "enable_repo", + "enable-repo" + ] + } + }, + { + "not": { + "required": [ + "disable_repo", + "disable-repo" + ] + } + }, + { + "not": { + "required": [ + "rhsm_baseurl", + "rhsm-baseurl" + ] + } + }, + { + "not": { + "required": [ + "server_hostname", + "server-hostname" + ] + } + } + ] } } }, @@ -2692,11 +2834,6 @@ "type": "boolean", "description": "Enable 1-Wire interface. Default: ``false``.", "default": false - }, - "remote_gpio": { - "type": "boolean", - "description": "Enable remote GPIO interface. Default: ``false``.", - "default": false } } }, @@ -3328,7 +3465,7 @@ "properties": { "manage_etc_hosts": { "default": false, - "description": "Whether to manage ``/etc/hosts`` on the system. If ``true``, render the hosts file using ``/etc/cloud/templates/hosts.tmpl`` replacing ``$hostname`` and ``$fdqn``. If ``localhost``, append a ``127.0.1.1`` entry that resolves from FQDN and hostname every boot. Default: ``false``.", + "description": "Whether to manage ``/etc/hosts`` on the system. If ``true``, render the hosts file using ``/etc/cloud/templates/hosts.tmpl`` replacing ``$hostname`` and ``$fqdn``. If ``localhost``, append a ``127.0.1.1`` entry that resolves from FQDN and hostname every boot. Default: ``false``.", "oneOf": [ { "enum": [ diff --git a/.pc/no-nocloud-network.patch/cloudinit/util.py b/.pc/no-nocloud-network.patch/cloudinit/util.py index 348cd863..3e65c7ef 100644 --- a/.pc/no-nocloud-network.patch/cloudinit/util.py +++ b/.pc/no-nocloud-network.patch/cloudinit/util.py @@ -914,8 +914,18 @@ def center(text, fill, max_len): def del_dir(path): + ''' + Deletes a directory and all its contents by calling shutil.rmtree + Will ignore FileNotFoundError + + @param path: The path of the directory. + """ + ''' LOG.debug("Recursively deleting %s", path) - shutil.rmtree(path) + try: + shutil.rmtree(path) + except FileNotFoundError: + pass def read_optional_seed(fill, base="", ext="", timeout=5): diff --git a/.pc/no-nocloud-network.patch/tests/unittests/test_util.py b/.pc/no-nocloud-network.patch/tests/unittests/test_util.py index 3d681939..2651e2ff 100644 --- a/.pc/no-nocloud-network.patch/tests/unittests/test_util.py +++ b/.pc/no-nocloud-network.patch/tests/unittests/test_util.py @@ -13,6 +13,7 @@ import stat import tempfile from collections import deque +from contextlib import nullcontext as does_not_raise from pathlib import Path from textwrap import dedent from unittest import mock @@ -36,7 +37,7 @@ from cloudinit.sources import DataSourceHostname from cloudinit.subp import SubpResult from tests.unittests import helpers -from tests.unittests.helpers import CiTestCase, skipIf, skipUnlessJinja +from tests.unittests.helpers import random_string, skipIf, skipUnlessJinja LOG = logging.getLogger(__name__) M_PATH = "cloudinit.util." @@ -638,56 +639,52 @@ def test_fetch_ssl_details( assert 2 == m_isdir.call_count == m_isfile.call_count -class TestSymlink(CiTestCase): - def test_sym_link_simple(self): - tmpd = self.tmp_dir() - link = self.tmp_path("link", tmpd) - target = self.tmp_path("target", tmpd) +class TestSymlink: + def test_sym_link_simple(self, tmp_path): + link = str(tmp_path / "link") + target = str(tmp_path / "target") util.write_file(target, "hello") util.sym_link(target, link) - self.assertTrue(os.path.exists(link)) - self.assertTrue(os.path.islink(link)) - - def test_sym_link_source_exists(self): - tmpd = self.tmp_dir() - link = self.tmp_path("link", tmpd) - target = self.tmp_path("target", tmpd) - target2 = self.tmp_path("target2", tmpd) + assert os.path.exists(link) + assert os.path.islink(link) + + def test_sym_link_source_exists(self, tmp_path): + link = str(tmp_path / "link") + target = str(tmp_path / "target") + target2 = str(tmp_path / "target2") util.write_file(target, "hello") util.write_file(target2, "hello2") util.sym_link(target, link) - self.assertTrue(os.path.exists(link)) + assert os.path.exists(link) util.sym_link(target2, link, force=True) - self.assertTrue(os.path.exists(link)) - self.assertEqual("hello2", util.load_text_file(link)) + assert os.path.exists(link) + assert "hello2" == util.load_text_file(link) - def test_sym_link_dangling_link(self): - tmpd = self.tmp_dir() - link = self.tmp_path("link", tmpd) - target = self.tmp_path("target", tmpd) + def test_sym_link_dangling_link(self, tmp_path): + link = str(tmp_path / "link") + target = str(tmp_path / "target") util.sym_link(target, link) - self.assertTrue(os.path.islink(link)) - self.assertFalse(os.path.exists(link)) + assert os.path.islink(link) + assert not os.path.exists(link) util.sym_link(target, link, force=True) - self.assertTrue(os.path.islink(link)) - self.assertFalse(os.path.exists(link)) + assert os.path.islink(link) + assert not os.path.exists(link) - def test_sym_link_create_dangling(self): - tmpd = self.tmp_dir() - link = self.tmp_path("link", tmpd) - target = self.tmp_path("target", tmpd) + def test_sym_link_create_dangling(self, tmp_path): + link = str(tmp_path / "link") + target = str(tmp_path / "target") util.sym_link(target, link) - self.assertTrue(os.path.islink(link)) - self.assertFalse(os.path.exists(link)) + assert os.path.islink(link) + assert not os.path.exists(link) -class TestUptime(CiTestCase): +class TestUptime: @mock.patch(M_PATH + "boottime") @mock.patch(M_PATH + "os.path.exists") @mock.patch(M_PATH + "time.time") @@ -698,75 +695,67 @@ def test_uptime_non_linux_path(self, m_time, m_exists, m_boottime): m_time.return_value = boottime + uptime m_exists.return_value = False result = util.uptime() - self.assertEqual(str(uptime), result) + assert str(uptime) == result -class TestShellify(CiTestCase): +class TestShellify: def test_input_dict_raises_type_error(self): - self.assertRaisesRegex( - TypeError, - "Input.*was.*dict.*xpected", - util.shellify, - {"mykey": "myval"}, - ) + with pytest.raises(TypeError, match="Input.*was.*dict.*xpected"): + util.shellify( + {"mykey": "myval"}, + ) def test_input_str_raises_type_error(self): - self.assertRaisesRegex( - TypeError, "Input.*was.*str.*xpected", util.shellify, "foobar" - ) + with pytest.raises(TypeError, match="Input.*was.*str.*xpected"): + util.shellify("foobar") def test_value_with_int_raises_type_error(self): - self.assertRaisesRegex( - TypeError, "shellify.*int", util.shellify, ["foo", 1] - ) + with pytest.raises(TypeError, match="shellify.*int"): + util.shellify(["foo", 1]) def test_supports_strings_and_lists(self): - self.assertEqual( - "\n".join( - [ - "#!/bin/sh", - "echo hi mom", - "'echo' 'hi dad'", - "'echo' 'hi' 'sis'", - "", - ] - ), - util.shellify( - ["echo hi mom", ["echo", "hi dad"], ("echo", "hi", "sis")] - ), + assert "\n".join( + [ + "#!/bin/sh", + "echo hi mom", + "'echo' 'hi dad'", + "'echo' 'hi' 'sis'", + "", + ] + ) == util.shellify( + ["echo hi mom", ["echo", "hi dad"], ("echo", "hi", "sis")] ) def test_supports_comments(self): - self.assertEqual( - "\n".join(["#!/bin/sh", "echo start", "echo end", ""]), - util.shellify(["echo start", None, "echo end"]), - ) + assert "\n".join( + ["#!/bin/sh", "echo start", "echo end", ""] + ) == util.shellify(["echo start", None, "echo end"]) -class TestGetHostnameFqdn(CiTestCase): +class TestGetHostnameFqdn: def test_get_hostname_fqdn_from_only_cfg_fqdn(self): """When cfg only has the fqdn key, derive hostname and fqdn from it.""" hostname, fqdn, _ = util.get_hostname_fqdn( cfg={"fqdn": "myhost.domain.com"}, cloud=None ) - self.assertEqual("myhost", hostname) - self.assertEqual("myhost.domain.com", fqdn) + assert "myhost" == hostname + assert "myhost.domain.com" == fqdn def test_get_hostname_fqdn_from_cfg_fqdn_and_hostname(self): """When cfg has both fqdn and hostname keys, return them.""" hostname, fqdn, _ = util.get_hostname_fqdn( cfg={"fqdn": "myhost.domain.com", "hostname": "other"}, cloud=None ) - self.assertEqual("other", hostname) - self.assertEqual("myhost.domain.com", fqdn) + assert "other" == hostname + assert "myhost.domain.com" == fqdn def test_get_hostname_fqdn_from_cfg_hostname_with_domain(self): """When cfg has only hostname key which represents a fqdn, use that.""" hostname, fqdn, _ = util.get_hostname_fqdn( cfg={"hostname": "myhost.domain.com"}, cloud=None ) - self.assertEqual("myhost", hostname) - self.assertEqual("myhost.domain.com", fqdn) + assert "myhost" == hostname + assert "myhost.domain.com" == fqdn def test_get_hostname_fqdn_from_cfg_hostname_without_domain(self): """When cfg has a hostname without a '.' query cloud.get_hostname.""" @@ -777,8 +766,8 @@ def test_get_hostname_fqdn_from_cfg_hostname_without_domain(self): hostname, fqdn, _ = util.get_hostname_fqdn( cfg={"hostname": "myhost"}, cloud=cloud ) - self.assertEqual("myhost", hostname) - self.assertEqual("cloudhost.mycloud.com", fqdn) + assert "myhost" == hostname + assert "cloudhost.mycloud.com" == fqdn assert [ mock.call(fqdn=True, metadata_only=False) ] == cloud.get_hostname.call_args_list @@ -791,8 +780,8 @@ def test_get_hostname_fqdn_from_without_fqdn_or_hostname(self): DataSourceHostname("cloudhost", False), ) hostname, fqdn, _ = util.get_hostname_fqdn(cfg={}, cloud=cloud) - self.assertEqual("cloudhost", hostname) - self.assertEqual("cloudhost.mycloud.com", fqdn) + assert "cloudhost" == hostname + assert "cloudhost.mycloud.com" == fqdn assert [ mock.call(fqdn=True, metadata_only=False), mock.call(metadata_only=False), @@ -803,16 +792,16 @@ def test_get_hostname_fqdn_from_numeric_fqdn(self): hostname, fqdn, _ = util.get_hostname_fqdn( cfg={"fqdn": 12345}, cloud=None ) - self.assertEqual("12345", hostname) - self.assertEqual("12345", fqdn) + assert "12345" == hostname + assert "12345" == fqdn def test_get_hostname_fqdn_from_numeric_fqdn_with_domain(self): """When cfg fqdn is numeric with a domain, ensure correct parsing.""" hostname, fqdn, _ = util.get_hostname_fqdn( cfg={"fqdn": "12345.example.com"}, cloud=None ) - self.assertEqual("12345", hostname) - self.assertEqual("12345.example.com", fqdn) + assert "12345" == hostname + assert "12345.example.com" == fqdn def test_get_hostname_fqdn_from_passes_metadata_only_to_cloud(self): """Calls to cloud.get_hostname pass the metadata_only parameter.""" @@ -830,7 +819,7 @@ def test_get_hostname_fqdn_from_passes_metadata_only_to_cloud(self): ] == cloud.get_hostname.call_args_list -class TestBlkid(CiTestCase): +class TestBlkid: ids = { "id01": "1111-1111", "id02": "22222222-2222", @@ -858,8 +847,6 @@ class TestBlkid(CiTestCase): """ ) - maxDiff = None - def _get_expected(self): return { "/dev/loop0": {"DEVNAME": "/dev/loop0", "TYPE": "squashfs"}, @@ -898,7 +885,7 @@ def _get_expected(self): @mock.patch("cloudinit.subp.subp") def test_functional_blkid(self, m_subp): m_subp.return_value = SubpResult(self.blkid_out.format(**self.ids), "") - self.assertEqual(self._get_expected(), util.blkid()) + assert self._get_expected() == util.blkid() m_subp.assert_called_with( ["blkid", "-o", "full"], capture=True, decode="replace" ) @@ -907,7 +894,7 @@ def test_functional_blkid(self, m_subp): def test_blkid_no_cache_uses_no_cache(self, m_subp): """blkid should turn off cache if disable_cache is true.""" m_subp.return_value = SubpResult(self.blkid_out.format(**self.ids), "") - self.assertEqual(self._get_expected(), util.blkid(disable_cache=True)) + assert self._get_expected() == util.blkid(disable_cache=True) m_subp.assert_called_with( ["blkid", "-o", "full", "-c", "/dev/null"], capture=True, @@ -917,7 +904,7 @@ def test_blkid_no_cache_uses_no_cache(self, m_subp): @mock.patch("cloudinit.util.subp.which") @mock.patch("cloudinit.util.subp.subp") -class TestUdevadmSettle(CiTestCase): +class TestUdevadmSettle: def test_with_no_params(self, m_subp, m_which): """called with no parameters.""" m_which.side_effect = lambda m: m in ("udevadm",) @@ -931,20 +918,20 @@ def test_udevadm_not_present(self, m_subp, m_which): m_which.assert_called_once_with("udevadm") m_subp.assert_not_called() - def test_with_exists_and_not_exists(self, m_subp, m_which): + def test_with_exists_and_not_exists(self, m_subp, m_which, tmp_path): """with exists=file where file does not exist should invoke subp.""" m_which.side_effect = lambda m: m in ("udevadm",) - mydev = self.tmp_path("mydev") + mydev = str(tmp_path / "mydev") util.udevadm_settle(exists=mydev) m_subp.assert_called_once_with( ["udevadm", "settle", "--exit-if-exists=%s" % mydev] ) - def test_with_exists_and_file_exists(self, m_subp, m_which): + def test_with_exists_and_file_exists(self, m_subp, m_which, tmp_path): """with exists=file where file does exist should only invoke subp once for 'which' call.""" m_which.side_effect = lambda m: m in ("udevadm",) - mydev = self.tmp_path("mydev") + mydev = str(tmp_path / "mydev") util.write_file(mydev, "foo\n") util.udevadm_settle(exists=mydev) m_which.assert_called_once_with("udevadm") @@ -968,10 +955,10 @@ def test_with_timeout_string(self, m_subp, m_which): ["udevadm", "settle", "--timeout=%s" % timeout] ) - def test_with_exists_and_timeout(self, m_subp, m_which): + def test_with_exists_and_timeout(self, m_subp, m_which, tmp_path): """test call with both exists and timeout.""" m_which.side_effect = lambda m: m in ("udevadm",) - mydev = self.tmp_path("mydev") + mydev = str(tmp_path / "mydev") timeout = "3" util.udevadm_settle(exists=mydev, timeout=timeout) m_subp.assert_called_once_with( @@ -986,12 +973,14 @@ def test_with_exists_and_timeout(self, m_subp, m_which): def test_subp_exception_raises_to_caller(self, m_subp, m_which): m_which.side_effect = lambda m: m in ("udevadm",) m_subp.side_effect = subp.ProcessExecutionError("BOOM") - self.assertRaises(subp.ProcessExecutionError, util.udevadm_settle) + with pytest.raises(subp.ProcessExecutionError): + util.udevadm_settle() @mock.patch("os.path.exists") -class TestGetLinuxDistro(CiTestCase): - def setUp(self): +class TestGetLinuxDistro: + @pytest.fixture(autouse=True) + def fixtures(self): util.get_linux_distro.cache_clear() @classmethod @@ -1013,7 +1002,7 @@ def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_SLES m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("sles", "12.3", platform.machine()), dist) + assert ("sles", "12.3", platform.machine()) == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_distro_bare_name(self, m_os_release, m_path_exists): @@ -1022,7 +1011,7 @@ def test_get_linux_distro_bare_name(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_UBUNTU m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("ubuntu", "16.04", "xenial"), dist) + assert ("ubuntu", "16.04", "xenial") == dist @mock.patch("platform.system") @mock.patch("platform.release") @@ -1041,7 +1030,7 @@ def test_get_linux_freebsd( m_parse_redhat_release.return_value = {} util.is_BSD.cache_clear() dist = util.get_linux_distro() - self.assertEqual(("freebsd", "12.0-RELEASE-p10", ""), dist) + assert ("freebsd", "12.0-RELEASE-p10", "") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_centos6(self, m_os_release, m_path_exists): @@ -1049,7 +1038,7 @@ def test_get_linux_centos6(self, m_os_release, m_path_exists): m_os_release.return_value = REDHAT_RELEASE_CENTOS_6 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(("centos", "6.10", "Final"), dist) + assert ("centos", "6.10", "Final") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_centos7_redhat_release(self, m_os_release, m_exists): @@ -1057,7 +1046,7 @@ def test_get_linux_centos7_redhat_release(self, m_os_release, m_exists): m_os_release.return_value = REDHAT_RELEASE_CENTOS_7 m_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(("centos", "7.5.1804", "Core"), dist) + assert ("centos", "7.5.1804", "Core") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_redhat7_osrelease(self, m_os_release, m_path_exists): @@ -1065,7 +1054,7 @@ def test_get_linux_redhat7_osrelease(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_REDHAT_7 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("redhat", "7.5", "Maipo"), dist) + assert ("redhat", "7.5", "Maipo") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_redhat7_rhrelease(self, m_os_release, m_path_exists): @@ -1073,7 +1062,7 @@ def test_get_linux_redhat7_rhrelease(self, m_os_release, m_path_exists): m_os_release.return_value = REDHAT_RELEASE_REDHAT_7 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(("redhat", "7.5", "Maipo"), dist) + assert ("redhat", "7.5", "Maipo") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_redhat6_rhrelease(self, m_os_release, m_path_exists): @@ -1081,7 +1070,7 @@ def test_get_linux_redhat6_rhrelease(self, m_os_release, m_path_exists): m_os_release.return_value = REDHAT_RELEASE_REDHAT_6 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(("redhat", "6.10", "Santiago"), dist) + assert ("redhat", "6.10", "Santiago") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_copr_centos(self, m_os_release, m_path_exists): @@ -1089,7 +1078,7 @@ def test_get_linux_copr_centos(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_CENTOS m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("centos", "7", "Core"), dist) + assert ("centos", "7", "Core") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_almalinux8_rhrelease(self, m_os_release, m_path_exists): @@ -1097,7 +1086,7 @@ def test_get_linux_almalinux8_rhrelease(self, m_os_release, m_path_exists): m_os_release.return_value = REDHAT_RELEASE_ALMALINUX_8 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(("almalinux", "8.3", "Purple Manul"), dist) + assert ("almalinux", "8.3", "Purple Manul") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_almalinux8_osrelease(self, m_os_release, m_path_exists): @@ -1105,7 +1094,7 @@ def test_get_linux_almalinux8_osrelease(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_ALMALINUX_8 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("almalinux", "8.3", "Purple Manul"), dist) + assert ("almalinux", "8.3", "Purple Manul") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_eurolinux7_rhrelease(self, m_os_release, m_path_exists): @@ -1113,7 +1102,7 @@ def test_get_linux_eurolinux7_rhrelease(self, m_os_release, m_path_exists): m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_7 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(("eurolinux", "7.9", "Minsk"), dist) + assert ("eurolinux", "7.9", "Minsk") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_eurolinux7_osrelease(self, m_os_release, m_path_exists): @@ -1121,7 +1110,7 @@ def test_get_linux_eurolinux7_osrelease(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_EUROLINUX_7 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("eurolinux", "7.9", "Minsk"), dist) + assert ("eurolinux", "7.9", "Minsk") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_eurolinux8_rhrelease(self, m_os_release, m_path_exists): @@ -1129,7 +1118,7 @@ def test_get_linux_eurolinux8_rhrelease(self, m_os_release, m_path_exists): m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_8 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(("eurolinux", "8.4", "Vaduz"), dist) + assert ("eurolinux", "8.4", "Vaduz") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_eurolinux8_osrelease(self, m_os_release, m_path_exists): @@ -1137,7 +1126,7 @@ def test_get_linux_eurolinux8_osrelease(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_EUROLINUX_8 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("eurolinux", "8.4", "Vaduz"), dist) + assert ("eurolinux", "8.4", "Vaduz") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_miraclelinux8_rhrelease( @@ -1147,7 +1136,7 @@ def test_get_linux_miraclelinux8_rhrelease( m_os_release.return_value = REDHAT_RELEASE_MIRACLELINUX_8 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(("miracle", "8.4", "Peony"), dist) + assert ("miracle", "8.4", "Peony") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_miraclelinux8_osrelease( @@ -1157,7 +1146,7 @@ def test_get_linux_miraclelinux8_osrelease( m_os_release.return_value = OS_RELEASE_MIRACLELINUX_8 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("miraclelinux", "8", "Peony"), dist) + assert ("miraclelinux", "8", "Peony") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_rocky8_rhrelease(self, m_os_release, m_path_exists): @@ -1165,7 +1154,7 @@ def test_get_linux_rocky8_rhrelease(self, m_os_release, m_path_exists): m_os_release.return_value = REDHAT_RELEASE_ROCKY_8 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(("rocky", "8.3", "Green Obsidian"), dist) + assert ("rocky", "8.3", "Green Obsidian") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_rocky8_osrelease(self, m_os_release, m_path_exists): @@ -1173,7 +1162,7 @@ def test_get_linux_rocky8_osrelease(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_ROCKY_8 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("rocky", "8.3", "Green Obsidian"), dist) + assert ("rocky", "8.3", "Green Obsidian") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_virtuozzo8_rhrelease(self, m_os_release, m_path_exists): @@ -1181,7 +1170,7 @@ def test_get_linux_virtuozzo8_rhrelease(self, m_os_release, m_path_exists): m_os_release.return_value = REDHAT_RELEASE_VIRTUOZZO_8 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(("virtuozzo", "8", "Virtuozzo Linux"), dist) + assert ("virtuozzo", "8", "Virtuozzo Linux") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_virtuozzo8_osrelease(self, m_os_release, m_path_exists): @@ -1189,7 +1178,7 @@ def test_get_linux_virtuozzo8_osrelease(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_VIRTUOZZO_8 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("virtuozzo", "8", "Virtuozzo Linux"), dist) + assert ("virtuozzo", "8", "Virtuozzo Linux") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_cloud8_rhrelease(self, m_os_release, m_path_exists): @@ -1197,7 +1186,7 @@ def test_get_linux_cloud8_rhrelease(self, m_os_release, m_path_exists): m_os_release.return_value = REDHAT_RELEASE_CLOUDLINUX_8 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(("cloudlinux", "8.4", "Valery Rozhdestvensky"), dist) + assert ("cloudlinux", "8.4", "Valery Rozhdestvensky") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_cloud8_osrelease(self, m_os_release, m_path_exists): @@ -1205,7 +1194,7 @@ def test_get_linux_cloud8_osrelease(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_CLOUDLINUX_8 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("cloudlinux", "8.4", "Valery Rozhdestvensky"), dist) + assert ("cloudlinux", "8.4", "Valery Rozhdestvensky") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_debian(self, m_os_release, m_path_exists): @@ -1213,7 +1202,7 @@ def test_get_linux_debian(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_DEBIAN m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("debian", "9", "stretch"), dist) + assert ("debian", "9", "stretch") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_openeuler(self, m_os_release, m_path_exists): @@ -1221,7 +1210,7 @@ def test_get_linux_openeuler(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_OPENEULER_20 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("openEuler", "20.03", "LTS-SP2"), dist) + assert ("openEuler", "20.03", "LTS-SP2") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_opencloudos(self, m_os_release, m_path_exists): @@ -1229,7 +1218,7 @@ def test_get_linux_opencloudos(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_OPENCLOUDOS_8 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("OpenCloudOS", "8.6", ""), dist) + assert ("OpenCloudOS", "8.6", "") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_tencentos(self, m_os_release, m_path_exists): @@ -1237,7 +1226,7 @@ def test_get_linux_tencentos(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_TENCENTOS_3 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("TencentOS", "3.1", ""), dist) + assert ("TencentOS", "3.1", "") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_opensuse(self, m_os_release, m_path_exists): @@ -1247,7 +1236,7 @@ def test_get_linux_opensuse(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_OPENSUSE m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("opensuse", "42.3", platform.machine()), dist) + assert ("opensuse", "42.3", platform.machine()) == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_opensuse_l15(self, m_os_release, m_path_exists): @@ -1257,7 +1246,7 @@ def test_get_linux_opensuse_l15(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_OPENSUSE_L15 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("opensuse-leap", "15.0", platform.machine()), dist) + assert ("opensuse-leap", "15.0", platform.machine()) == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_opensuse_tw(self, m_os_release, m_path_exists): @@ -1267,9 +1256,7 @@ def test_get_linux_opensuse_tw(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_OPENSUSE_TW m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual( - ("opensuse-tumbleweed", "20180920", platform.machine()), dist - ) + assert ("opensuse-tumbleweed", "20180920", platform.machine()) == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_photon_os_release(self, m_os_release, m_path_exists): @@ -1277,7 +1264,7 @@ def test_get_linux_photon_os_release(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_PHOTON m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("photon", "4.0", "VMware Photon OS/Linux"), dist) + assert ("photon", "4.0", "VMware Photon OS/Linux") == dist @mock.patch("cloudinit.util.load_text_file") def test_get_linux_mariner_os_release(self, m_os_release, m_path_exists): @@ -1285,7 +1272,7 @@ def test_get_linux_mariner_os_release(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_MARINER m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("mariner", "2.0", ""), dist) + assert ("mariner", "2.0", "") == dist @mock.patch("cloudinit.util.load_text_file") def test_get_linux_azurelinux_os_release( @@ -1295,7 +1282,7 @@ def test_get_linux_azurelinux_os_release( m_os_release.return_value = OS_RELEASE_AZURELINUX m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("azurelinux", "3.0", ""), dist) + assert ("azurelinux", "3.0", "") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_openmandriva(self, m_os_release, m_path_exists): @@ -1303,7 +1290,7 @@ def test_get_linux_openmandriva(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_OPENMANDRIVA m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("openmandriva", "4.90", "nickel"), dist) + assert ("openmandriva", "4.90", "nickel") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_cos(self, m_os_release, m_path_exists): @@ -1311,7 +1298,7 @@ def test_get_linux_cos(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_COS m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("cos", "93", ""), dist) + assert ("cos", "93", "") == dist @mock.patch("platform.system") @mock.patch("platform.dist", create=True) @@ -1323,7 +1310,7 @@ def test_get_linux_distro_no_data( m_platform_system.return_value = "Linux" m_path_exists.return_value = 0 dist = util.get_linux_distro() - self.assertEqual(("", "", ""), dist) + assert ("", "", "") == dist @mock.patch("platform.system") @mock.patch("platform.dist", create=True) @@ -1336,7 +1323,7 @@ def test_get_linux_distro_no_impl( m_platform_system.return_value = "Linux" m_path_exists.return_value = 0 dist = util.get_linux_distro() - self.assertEqual(("", "", ""), dist) + assert ("", "", "") == dist @mock.patch("platform.system") @mock.patch("platform.dist", create=True) @@ -1348,7 +1335,7 @@ def test_get_linux_distro_plat_data( m_platform_system.return_value = "Linux" m_path_exists.return_value = 0 dist = util.get_linux_distro() - self.assertEqual(("foo", "1.1", "aarch64"), dist) + assert ("foo", "1.1", "aarch64") == dist class TestGetVariant: @@ -1396,40 +1383,36 @@ def test_get_variant(self, info, expected_variant): assert util._get_variant(info) == expected_variant -class TestJsonDumps(CiTestCase): +class TestJsonDumps: def test_is_str(self): """json_dumps should return a string.""" - self.assertTrue( - isinstance(atomic_helper.json_dumps({"abc": "123"}), str) - ) + assert isinstance(atomic_helper.json_dumps({"abc": "123"}), str) def test_utf8(self): smiley = "\\ud83d\\ude03" - self.assertEqual( - {"smiley": smiley}, - json.loads(atomic_helper.json_dumps({"smiley": smiley})), + assert {"smiley": smiley} == json.loads( + atomic_helper.json_dumps({"smiley": smiley}) ) def test_non_utf8(self): blob = b"\xba\x03Qx-#y\xea" - self.assertEqual( - {"blob": "ci-b64:" + base64.b64encode(blob).decode("utf-8")}, - json.loads(atomic_helper.json_dumps({"blob": blob})), - ) + assert { + "blob": "ci-b64:" + base64.b64encode(blob).decode("utf-8") + } == json.loads(atomic_helper.json_dumps({"blob": blob})) @mock.patch("os.path.exists") -class TestIsLXD(CiTestCase): +class TestIsLXD: def test_is_lxd_true_on_sock_device(self, m_exists): """When lxd's /dev/lxd/sock exists, is_lxd returns true.""" m_exists.return_value = True - self.assertTrue(util.is_lxd()) + assert util.is_lxd() is True m_exists.assert_called_once_with("/dev/lxd/sock") def test_is_lxd_false_when_sock_device_absent(self, m_exists): """When lxd's /dev/lxd/sock is absent, is_lxd returns false.""" m_exists.return_value = False - self.assertFalse(util.is_lxd()) + assert not util.is_lxd() m_exists.assert_called_once_with("/dev/lxd/sock") @@ -1438,7 +1421,7 @@ class TestReadCcFromCmdline: "cmdline,expected_cfg", [ # Return None if cmdline has no cc:end_cc content. - pytest.param(CiTestCase.random_string(), None, id="random_string"), + pytest.param(random_string(), None, id="random_string"), # Return None if YAML content is empty string. ("foo cc: end_cc bar", None), # Return expected dictionary without trailing end_cc marker. @@ -1813,31 +1796,31 @@ def test_not_found_no_default(self): """None is returned if key is not found and no default given.""" config = {} result = util.get_cfg_option_list(config, "key") - self.assertIsNone(result) + assert result is None def test_not_found_with_default(self): """Default is returned if key is not found.""" config = {} result = util.get_cfg_option_list(config, "key", default=["DEFAULT"]) - self.assertEqual(["DEFAULT"], result) + assert ["DEFAULT"] == result def test_found_with_default(self): """Default is not returned if key is found.""" config = {"key": ["value1"]} result = util.get_cfg_option_list(config, "key", default=["DEFAULT"]) - self.assertEqual(["value1"], result) + assert ["value1"] == result def test_found_convert_to_list(self): """Single string is converted to one element list.""" config = {"key": "value1"} result = util.get_cfg_option_list(config, "key") - self.assertEqual(["value1"], result) + assert ["value1"] == result def test_value_is_none(self): """If value is None empty list is returned.""" config = {"key": None} result = util.get_cfg_option_list(config, "key") - self.assertEqual([], result) + assert [] == result class TestWriteFile(helpers.TestCase): @@ -1853,13 +1836,13 @@ def test_basic_usage(self): util.write_file(path, contents) - self.assertTrue(os.path.exists(path)) - self.assertTrue(os.path.isfile(path)) + assert os.path.exists(path) + assert os.path.isfile(path) with open(path) as f: create_contents = f.read() - self.assertEqual(contents, create_contents) + assert contents == create_contents file_stat = os.stat(path) - self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode)) + assert 0o644 == stat.S_IMODE(file_stat.st_mode) def test_dir_is_created_if_required(self): """Verifiy that directories are created is required.""" @@ -1869,8 +1852,8 @@ def test_dir_is_created_if_required(self): util.write_file(path, contents) - self.assertTrue(os.path.isdir(dirname)) - self.assertTrue(os.path.isfile(path)) + assert os.path.isdir(dirname) + assert os.path.isfile(path) def test_dir_ownership(self): """Verifiy that directories is created with appropriate ownership.""" @@ -1897,10 +1880,10 @@ def test_dir_is_not_created_if_ensure_dir_false(self): path = os.path.join(dirname, "NewFile.txt") contents = "Hey there" - with self.assertRaises(FileNotFoundError): + with pytest.raises(FileNotFoundError): util.write_file(path, contents, ensure_dir_exists=False) - self.assertFalse(os.path.isdir(dirname)) + assert not os.path.isdir(dirname) def test_explicit_mode(self): """Verify explicit file mode works properly.""" @@ -1909,10 +1892,10 @@ def test_explicit_mode(self): util.write_file(path, contents, mode=0o666) - self.assertTrue(os.path.exists(path)) - self.assertTrue(os.path.isfile(path)) + assert os.path.exists(path) + assert os.path.isfile(path) file_stat = os.stat(path) - self.assertEqual(0o666, stat.S_IMODE(file_stat.st_mode)) + assert 0o666 == stat.S_IMODE(file_stat.st_mode) def test_preserve_mode_no_existing(self): """Verify that file is created with mode 0o644 if preserve_mode @@ -1922,10 +1905,10 @@ def test_preserve_mode_no_existing(self): util.write_file(path, contents, preserve_mode=True) - self.assertTrue(os.path.exists(path)) - self.assertTrue(os.path.isfile(path)) + assert os.path.exists(path) + assert os.path.isfile(path) file_stat = os.stat(path) - self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode)) + assert 0o644 == stat.S_IMODE(file_stat.st_mode) def test_preserve_mode_with_existing(self): """Verify that file is created using mode of existing file @@ -1938,10 +1921,10 @@ def test_preserve_mode_with_existing(self): util.write_file(path, contents, preserve_mode=True) - self.assertTrue(os.path.exists(path)) - self.assertTrue(os.path.isfile(path)) + assert os.path.exists(path) + assert os.path.isfile(path) file_stat = os.stat(path) - self.assertEqual(0o666, stat.S_IMODE(file_stat.st_mode)) + assert 0o666 == stat.S_IMODE(file_stat.st_mode) def test_custom_omode(self): """Verify custom omode works properly.""" @@ -1953,11 +1936,11 @@ def test_custom_omode(self): f.write(b"LINE1\n") util.write_file(path, contents, omode="a") - self.assertTrue(os.path.exists(path)) - self.assertTrue(os.path.isfile(path)) + assert os.path.exists(path) + assert os.path.isfile(path) with open(path) as f: create_contents = f.read() - self.assertEqual("LINE1\nHey there", create_contents) + assert "LINE1\nHey there" == create_contents def test_restorecon_if_possible_is_called(self): """Make sure the selinux guard is called correctly.""" @@ -1971,10 +1954,10 @@ def test_restorecon_if_possible_is_called(self): importer, "import_module", return_value=fake_se ) as mockobj: with util.SeLinuxGuard(my_file) as is_on: - self.assertTrue(is_on) + assert is_on - self.assertEqual(1, len(fake_se.restored)) - self.assertEqual(my_file, fake_se.restored[0]) + assert 1 == len(fake_se.restored) + assert my_file == fake_se.restored[0] mockobj.assert_called_once_with("selinux") @@ -1986,13 +1969,13 @@ def setUp(self): self.addCleanup(shutil.rmtree, self.tmp) def assertDirEmpty(self, dirname): - self.assertEqual([], os.listdir(dirname)) + assert [] == os.listdir(dirname) def test_does_not_delete_dir(self): """Ensure directory itself is not deleted.""" util.delete_dir_contents(self.tmp) - self.assertTrue(os.path.isdir(self.tmp)) + assert os.path.isdir(self.tmp) self.assertDirEmpty(self.tmp) def test_deletes_files(self): @@ -2045,11 +2028,50 @@ def test_deletes_symlinks(self): self.assertDirEmpty(self.tmp) +class TestDelDir: + """ + Test the del_dir function + """ + + def test_del_dir_existing_directory(self, tmpdir): + """ + An existing directory can be deleted without issues + """ + assert os.path.exists(tmpdir) + with does_not_raise(): + util.del_dir(tmpdir) + assert not os.path.exists(tmpdir) + + def test_del_dir_file_not_found(self): + """ + Should not raise FileNotFoundError + """ + non_existing_dir = "/blabla" + assert not os.path.exists(non_existing_dir) + with does_not_raise(): + util.del_dir(non_existing_dir) + assert not os.path.exists(non_existing_dir) + + def test_del_dir_generic_errors(self, mocker): + """ + If shutil.rmtree raises a non-FileNotFoundError , del_dir should + raise this error + """ + mocked_side_effect = PermissionError + mock_rmtree = mocker.patch( + "shutil.rmtree", + side_effect=mocked_side_effect, + ) + with pytest.raises(mocked_side_effect): + util.del_dir("somedir") + assert mock_rmtree.call_count == 1 + + class TestKeyValStrings(helpers.TestCase): def test_keyval_str_to_dict(self): expected = {"1": "one", "2": "one+one", "ro": True} cmdline = "1=one ro 2=one+one" - self.assertEqual(expected, util.keyval_str_to_dict(cmdline)) + assert expected == util.keyval_str_to_dict(cmdline) class TestGetCmdline(helpers.TestCase): @@ -2058,7 +2080,7 @@ def test_cmdline_reads_debug_env(self): "os.environ", values={"DEBUG_PROC_CMDLINE": "abcd 123"} ): ret = util.get_cmdline() - self.assertEqual("abcd 123", ret) + assert "abcd 123" == ret class TestFipsEnabled: @@ -2086,57 +2108,49 @@ def fake_load_file(path): assert expected is util.fips_enabled() -class TestLoadYaml(helpers.CiTestCase): +class TestLoadYaml: mydefault = "7b03a8ebace993d806255121073fed52" - with_logs = True def test_simple(self): mydata = {"1": "one", "2": "two"} - self.assertEqual(util.load_yaml(yaml.dump(mydata)), mydata) + assert util.load_yaml(yaml.dump(mydata)) == mydata - def test_nonallowed_returns_default(self): + def test_nonallowed_returns_default(self, caplog): """Any unallowed types result in returning default; log the issue.""" # for now, anything not in the allowed list just returns the default. myyaml = yaml.dump({"1": "one"}) - self.assertEqual( - util.load_yaml( - blob=myyaml, default=self.mydefault, allowed=(str,) - ), - self.mydefault, + assert ( + util.load_yaml(blob=myyaml, default=self.mydefault, allowed=(str,)) + == self.mydefault ) regex = re.compile( r"Yaml load allows \(<(class|type) \'str\'>,\) root types, but" r" got dict" ) - self.assertTrue( - regex.search(self.logs.getvalue()), - msg="Missing expected yaml load error", - ) + assert regex.search(caplog.text), "Missing expected yaml load error" - def test_bogus_scan_error_returns_default(self): + def test_bogus_scan_error_returns_default(self, caplog): """On Yaml scan error, load_yaml returns the default and logs issue.""" badyaml = "1\n 2:" - self.assertEqual( - util.load_yaml(blob=badyaml, default=self.mydefault), - self.mydefault, + assert ( + util.load_yaml(blob=badyaml, default=self.mydefault) + == self.mydefault ) - self.assertIn( + assert ( "Failed loading yaml blob. Invalid format at line 2 column 3:" - ' "mapping values are not allowed here', - self.logs.getvalue(), + ' "mapping values are not allowed here' in caplog.text ) - def test_bogus_parse_error_returns_default(self): + def test_bogus_parse_error_returns_default(self, caplog): """On Yaml parse error, load_yaml returns default and logs issue.""" badyaml = "{}}" - self.assertEqual( - util.load_yaml(blob=badyaml, default=self.mydefault), - self.mydefault, + assert ( + util.load_yaml(blob=badyaml, default=self.mydefault) + == self.mydefault ) - self.assertIn( + assert ( "Failed loading yaml blob. Invalid format at line 1 column 3:" - " \"expected '', but found '}'", - self.logs.getvalue(), + " \"expected '', but found '}'" in caplog.text ) def test_unsafe_types(self): @@ -2148,27 +2162,24 @@ def test_unsafe_types(self): 3, ) ) - self.assertEqual( - util.load_yaml(blob=unsafe_yaml, default=self.mydefault), - self.mydefault, + assert ( + util.load_yaml(blob=unsafe_yaml, default=self.mydefault) + == self.mydefault ) def test_python_unicode(self): # complex type of python/unicode is explicitly allowed myobj = {"1": "FOOBAR"} safe_yaml = yaml.dump(myobj) - self.assertEqual( - util.load_yaml(blob=safe_yaml, default=self.mydefault), myobj - ) + assert util.load_yaml(blob=safe_yaml, default=self.mydefault) == myobj def test_none_returns_default(self): """If yaml.load returns None, then default should be returned.""" blobs = ("", " ", "# foo\n", "#") mdef = self.mydefault - self.assertEqual( - [(b, self.mydefault) for b in blobs], - [(b, util.load_yaml(blob=b, default=mdef)) for b in blobs], - ) + assert [(b, self.mydefault) for b in blobs] == [ + (b, util.load_yaml(blob=b, default=mdef)) for b in blobs + ] class TestMountinfoParsing: @@ -2264,106 +2275,91 @@ def test_parse_mount_with_zfs(self, mount_out): assert ("vmzroot/var/tmp", "zfs", "/var/tmp") == ret -class TestIsX86(helpers.CiTestCase): +class TestIsX86: def test_is_x86_matches_x86_types(self): """is_x86 returns True if CPU architecture matches.""" matched_arches = ["x86_64", "i386", "i586", "i686"] for arch in matched_arches: - self.assertTrue( - util.is_x86(arch), 'Expected is_x86 for arch "%s"' % arch - ) + assert util.is_x86(arch), 'Expected is_x86 for arch "%s"' % arch def test_is_x86_unmatched_types(self): """is_x86 returns Fale on non-intel x86 architectures.""" unmatched_arches = ["ia64", "9000/800", "arm64v71"] for arch in unmatched_arches: - self.assertFalse( - util.is_x86(arch), 'Expected not is_x86 for arch "%s"' % arch + assert not util.is_x86(arch), ( + 'Expected not is_x86 for arch "%s"' % arch ) @mock.patch(M_PATH + "os.uname") def test_is_x86_calls_uname_for_architecture(self, m_uname): """is_x86 returns True if platform from uname matches.""" m_uname.return_value = [0, 1, 2, 3, "x86_64"] - self.assertTrue(util.is_x86()) + assert util.is_x86() -class TestGetConfigLogfiles(helpers.CiTestCase): +class TestGetConfigLogfiles: def test_empty_cfg_returns_empty_list(self): """An empty config passed to get_config_logfiles returns empty list.""" - self.assertEqual([], util.get_config_logfiles(None)) - self.assertEqual([], util.get_config_logfiles({})) + assert [] == util.get_config_logfiles(None) + assert [] == util.get_config_logfiles({}) def test_default_log_file_present(self): """When default_log_file is set get_config_logfiles finds it.""" - self.assertEqual( - ["/my.log"], util.get_config_logfiles({"def_log_file": "/my.log"}) + assert ["/my.log"] == util.get_config_logfiles( + {"def_log_file": "/my.log"} ) def test_output_logs_parsed_when_teeing_files(self): """When output configuration is parsed when teeing files.""" - self.assertEqual( - ["/himom.log", "/my.log"], - sorted( - util.get_config_logfiles( - { - "def_log_file": "/my.log", - "output": {"all": "|tee -a /himom.log"}, - } - ) - ), + assert ["/himom.log", "/my.log"] == sorted( + util.get_config_logfiles( + { + "def_log_file": "/my.log", + "output": {"all": "|tee -a /himom.log"}, + } + ) ) def test_output_logs_parsed_when_redirecting(self): """When output configuration is parsed when redirecting to a file.""" - self.assertEqual( - ["/my.log", "/test.log"], - sorted( - util.get_config_logfiles( - { - "def_log_file": "/my.log", - "output": {"all": ">/test.log"}, - } - ) - ), + assert ["/my.log", "/test.log"] == sorted( + util.get_config_logfiles( + { + "def_log_file": "/my.log", + "output": {"all": ">/test.log"}, + } + ) ) def test_output_logs_parsed_when_appending(self): """When output configuration is parsed when appending to a file.""" - self.assertEqual( - ["/my.log", "/test.log"], - sorted( - util.get_config_logfiles( - { - "def_log_file": "/my.log", - "output": {"all": ">> /test.log"}, - } - ) - ), + assert ["/my.log", "/test.log"] == sorted( + util.get_config_logfiles( + { + "def_log_file": "/my.log", + "output": {"all": ">> /test.log"}, + } + ) ) - def test_output_logs_parsed_when_teeing_files_and_rotated(self): + def test_output_logs_parsed_when_teeing_files_and_rotated(self, tmp_path): """When output configuration is parsed when teeing files and rotated log files are present.""" - tmpd = self.tmp_dir() - log1 = self.tmp_path("my.log", tmpd) - log1_rotated = self.tmp_path("my.log.1.gz", tmpd) - log2 = self.tmp_path("himom.log", tmpd) - log2_rotated = self.tmp_path("himom.log.1.gz", tmpd) + log1 = str(tmp_path / "my.log") + log1_rotated = str(tmp_path / "my.log.1.gz") + log2 = str(tmp_path / "himom.log") + log2_rotated = str(tmp_path / "himom.log.1.gz") util.write_file(log1_rotated, "hello") util.write_file(log2_rotated, "hello") - self.assertEqual( - [log2, log2_rotated, log1, log1_rotated], - sorted( - util.get_config_logfiles( - { - "def_log_file": str(log1), - "output": {"all": f"|tee -a {log2}"}, - } - ) - ), + assert [log2, log2_rotated, log1, log1_rotated] == sorted( + util.get_config_logfiles( + { + "def_log_file": str(log1), + "output": {"all": f"|tee -a {log2}"}, + } + ) ) @@ -2461,7 +2457,7 @@ def test_given_log_level_used(self): class TestMessageFromString(helpers.TestCase): def test_unicode_not_messed_up(self): roundtripped = util.message_from_string("\n").as_string() - self.assertNotIn("\x00", roundtripped) + assert "\x00" not in roundtripped class TestReadOptionalSeed: @@ -2663,10 +2659,10 @@ def test_unicode_not_messed_up(self): sdir = self.tmp + os.path.sep found_md, found_ud, found_vd, found_network = util.read_seeded(sdir) - self.assertEqual(found_md, {"key1": "val1"}) - self.assertEqual(found_ud, ud) - self.assertEqual(found_vd, vd) - self.assertEqual(found_network, {"test": "true"}) + assert found_md == {"key1": "val1"} + assert found_ud == ud + assert found_vd == vd + assert found_network == {"test": "true"} class TestEncode(helpers.TestCase): @@ -2675,7 +2671,7 @@ class TestEncode(helpers.TestCase): def test_decode_binary_plain_text_with_hex(self): blob = "BOOTABLE_FLAG=\x80init=/bin/systemd" text = util.decode_binary(blob) - self.assertEqual(text, blob) + assert text == blob class TestProcessExecutionError(helpers.TestCase): @@ -2694,37 +2690,30 @@ def test_pexec_error_indent_text(self): error = subp.ProcessExecutionError() msg = "abc\ndef" formatted = "abc\n{0}def".format(" " * 4) - self.assertEqual(error._indent_text(msg, indent_level=4), formatted) - self.assertEqual( - error._indent_text(msg.encode(), indent_level=4), - formatted.encode(), - ) - self.assertIsInstance( - error._indent_text(msg.encode()), type(msg.encode()) + assert error._indent_text(msg, indent_level=4) == formatted + assert ( + error._indent_text(msg.encode(), indent_level=4) + == formatted.encode() ) + assert isinstance(error._indent_text(msg.encode()), type(msg.encode())) def test_pexec_error_type(self): - self.assertIsInstance(subp.ProcessExecutionError(), IOError) + assert isinstance(subp.ProcessExecutionError(), IOError) def test_pexec_error_empty_msgs(self): error = subp.ProcessExecutionError() - self.assertTrue( - all( - attr == self.empty_attr - for attr in (error.stderr, error.stdout, error.reason) - ) + assert all( + attr == self.empty_attr + for attr in (error.stderr, error.stdout, error.reason) ) - self.assertEqual(error.description, self.empty_description) - self.assertEqual( - str(error), - self.template.format( - description=self.empty_description, - exit_code=self.empty_attr, - reason=self.empty_attr, - stdout=self.empty_attr, - stderr=self.empty_attr, - cmd=self.empty_attr, - ), + assert error.description == self.empty_description + assert str(error) == self.template.format( + description=self.empty_description, + exit_code=self.empty_attr, + reason=self.empty_attr, + stdout=self.empty_attr, + stderr=self.empty_attr, + cmd=self.empty_attr, ) def test_pexec_error_single_line_msgs(self): @@ -2735,16 +2724,13 @@ def test_pexec_error_single_line_msgs(self): error = subp.ProcessExecutionError( stdout=stdout_msg, stderr=stderr_msg, exit_code=3, cmd=cmd ) - self.assertEqual( - str(error), - self.template.format( - description=self.empty_description, - stdout=stdout_msg, - stderr=stderr_msg, - exit_code=str(exit_code), - reason=self.empty_attr, - cmd=cmd, - ), + assert str(error) == self.template.format( + description=self.empty_description, + stdout=stdout_msg, + stderr=stderr_msg, + exit_code=str(exit_code), + reason=self.empty_attr, + cmd=cmd, ) def test_pexec_error_multi_line_msgs(self): @@ -2754,24 +2740,21 @@ def test_pexec_error_multi_line_msgs(self): error = subp.ProcessExecutionError( stdout=stdout_msg, stderr=stderr_msg ) - self.assertEqual( - str(error), - "\n".join( - ( - "{description}", - "Command: {empty_attr}", - "Exit code: {empty_attr}", - "Reason: {empty_attr}", - "Stdout: multi", - " line", - " output message", - "Stderr: multi", - " line", - " error message", - ) - ).format( - description=self.empty_description, empty_attr=self.empty_attr - ), + assert str(error) == "\n".join( + ( + "{description}", + "Command: {empty_attr}", + "Exit code: {empty_attr}", + "Reason: {empty_attr}", + "Stdout: multi", + " line", + " output message", + "Stderr: multi", + " line", + " error message", + ) + ).format( + description=self.empty_description, empty_attr=self.empty_attr ) @@ -2835,20 +2818,21 @@ def test_system_image_config_dir_is_snappy(self, mocker): class TestLoadShellContent(helpers.TestCase): def test_comments_handled_correctly(self): """Shell comments should be allowed in the content.""" - self.assertEqual( - {"key1": "val1", "key2": "val2", "key3": "val3 #tricky"}, - util.load_shell_content( - "\n".join( - [ - "#top of file comment", - "key1=val1 #this is a comment", - "# second comment", - 'key2="val2" # inlin comment#badkey=wark', - 'key3="val3 #tricky"', - "", - ] - ) - ), + assert { + "key1": "val1", + "key2": "val2", + "key3": "val3 #tricky", + } == util.load_shell_content( + "\n".join( + [ + "#top of file comment", + "key1=val1 #this is a comment", + "# second comment", + 'key2="val2" # inlin comment#badkey=wark', + 'key3="val3 #tricky"', + "", + ] + ) ) @@ -2876,34 +2860,29 @@ def test_non_utf8_in_environment(self, m_load_file): ) m_load_file.return_value = content - self.assertEqual( - { - "BOOTABLE_FLAG": self._val_decoded(self.bootflag), - "HOME": "/", - "PATH": "/bin:/sbin", - "MIXED": self._val_decoded(self.mixed), - }, - util.get_proc_env(1), - ) - self.assertEqual(1, m_load_file.call_count) + assert { + "BOOTABLE_FLAG": self._val_decoded(self.bootflag), + "HOME": "/", + "PATH": "/bin:/sbin", + "MIXED": self._val_decoded(self.mixed), + } == util.get_proc_env(1) + assert 1 == m_load_file.call_count @mock.patch(M_PATH + "load_binary_file") def test_all_utf8_encoded(self, m_load_file): """common path where only utf-8 decodable content.""" content = self.null.join((self.simple1, self.simple2)) m_load_file.return_value = content - self.assertEqual( - {"HOME": "/", "PATH": "/bin:/sbin"}, util.get_proc_env(1) - ) - self.assertEqual(1, m_load_file.call_count) + assert {"HOME": "/", "PATH": "/bin:/sbin"} == util.get_proc_env(1) + assert 1 == m_load_file.call_count @mock.patch(M_PATH + "load_binary_file") def test_non_existing_file_returns_empty_dict(self, m_load_file): """as implemented, a non-existing pid returns empty dict. This is how it was originally implemented.""" m_load_file.side_effect = OSError("File does not exist.") - self.assertEqual({}, util.get_proc_env(1)) - self.assertEqual(1, m_load_file.call_count) + assert {} == util.get_proc_env(1) + assert 1 == m_load_file.call_count class TestGetProcPpid(helpers.TestCase): @@ -2914,19 +2893,19 @@ def test_get_proc_ppid_linux(self): """get_proc_ppid returns correct parent pid value.""" my_pid = os.getpid() my_ppid = os.getppid() - self.assertEqual(my_ppid, Distro.get_proc_ppid(my_pid)) + assert my_ppid == Distro.get_proc_ppid(my_pid) @skipIf(not util.is_Linux(), "/proc/$pid/stat is not useful on not-Linux") def test_get_proc_pgrp_linux(self): """get_proc_ppid returns correct parent pid value.""" - self.assertEqual(os.getpgid(0), Distro.get_proc_pgid(os.getpid())) + assert os.getpgid(0) == Distro.get_proc_pgid(os.getpid()) @pytest.mark.allow_subp_for("ps") def test_get_proc_ppid_ps(self): """get_proc_ppid returns correct parent pid value.""" my_pid = os.getpid() my_ppid = os.getppid() - self.assertEqual(my_ppid, Distro.get_proc_ppid(my_pid)) + assert my_ppid == Distro.get_proc_ppid(my_pid) def test_get_proc_ppid_mocked(self): for ppid, proc_data in ( diff --git a/.pc/no-single-process.patch/cloudinit/config/cc_mounts.py b/.pc/no-single-process.patch/cloudinit/config/cc_mounts.py index c3998e57..1b4230c5 100644 --- a/.pc/no-single-process.patch/cloudinit/config/cc_mounts.py +++ b/.pc/no-single-process.patch/cloudinit/config/cc_mounts.py @@ -514,18 +514,20 @@ def mount_if_needed( subp.subp(["systemctl", "daemon-reload"]) -def cleanup_fstab(ds_remove_entries: list = []) -> None: +def cleanup_fstab(ds_remove_entry: Optional[str] = None) -> None: if not os.path.exists(FSTAB_PATH): return - base_entry = [MNT_COMMENT] + remove_entries = [MNT_COMMENT] + if ds_remove_entry: + remove_entries.append(ds_remove_entry) with open(FSTAB_PATH, "r") as f: lines = f.readlines() new_lines = [] changed = False for line in lines: - if all(entry in line for entry in [*base_entry, *ds_remove_entries]): + if all(entry in line for entry in remove_entries): changed = True continue new_lines.append(line) diff --git a/.pc/no-single-process.patch/cloudinit/config/schemas/schema-cloud-config-v1.json b/.pc/no-single-process.patch/cloudinit/config/schemas/schema-cloud-config-v1.json index 5fe97a5f..37097453 100644 --- a/.pc/no-single-process.patch/cloudinit/config/schemas/schema-cloud-config-v1.json +++ b/.pc/no-single-process.patch/cloudinit/config/schemas/schema-cloud-config-v1.json @@ -666,6 +666,47 @@ }, "minProperties": 1 }, + "rh_subscription_activation_key": { + "type": "string", + "description": "The activation key to use. Must be used with **org**. Should not be used with **username** or **password**." + }, + "rh_subscription_auto_attach": { + "type": "boolean", + "description": "Whether to attach subscriptions automatically." + }, + "rh_subscription_service_level": { + "type": "string", + "description": "The service level to use when subscribing to RH repositories. ``auto_attach`` must be true for this to be used." + }, + "rh_subscription_add_pool": { + "type": "array", + "description": "A list of pool IDs add to the subscription.", + "items": { + "type": "string" + } + }, + "rh_subscription_enable_repo": { + "type": "array", + "description": "A list of repositories to enable.", + "items": { + "type": "string" + } + }, + "rh_subscription_disable_repo": { + "type": "array", + "description": "A list of repositories to disable.", + "items": { + "type": "string" + } + }, + "rh_subscription_rhsm_baseurl": { + "type": "string", + "description": "Sets the baseurl in ``/etc/rhsm/rhsm.conf``." + }, + "rh_subscription_server_hostname": { + "type": "string", + "description": "Sets the serverurl in ``/etc/rhsm/rhsm.conf``." + }, "modules_definition": { "type": "array", "items": { @@ -2580,18 +2621,23 @@ "properties": { "username": { "type": "string", - "description": "The username to use. Must be used with password. Should not be used with **activation-key** or **org**." + "description": "The username to use. Must be used with password. Should not be used with **activation_key** or **org**." }, "password": { "type": "string", - "description": "The password to use. Must be used with username. Should not be used with **activation-key** or **org**." + "description": "The password to use. Must be used with username. Should not be used with **activation_key** or **org**." + }, + "activation_key": { + "$ref": "#/$defs/rh_subscription_activation_key" }, "activation-key": { - "type": "string", - "description": "The activation key to use. Must be used with **org**. Should not be used with **username** or **password**." + "$ref": "#/$defs/rh_subscription_activation_key", + "deprecated": true, + "deprecated_version": "25.3", + "deprecated_description": "Use **activation_key** instead." }, "org": { - "description": "The organization to use. Must be used with **activation-key**. Should not be used with **username** or **password**.", + "description": "The organization to use. Must be used with **activation_key**. Should not be used with **username** or **password**.", "oneOf": [ { "type": "string" @@ -2604,44 +2650,140 @@ } ] }, + "auto_attach": { + "$ref": "#/$defs/rh_subscription_auto_attach" + }, "auto-attach": { - "type": "boolean", - "description": "Whether to attach subscriptions automatically." + "$ref": "#/$defs/rh_subscription_auto_attach", + "deprecated": true, + "deprecated_version": "25.3", + "deprecated_description": "Use **auto_attach** instead." + }, + "service_level": { + "$ref": "#/$defs/rh_subscription_service_level" }, "service-level": { - "type": "string", - "description": "The service level to use when subscribing to RH repositories. ``auto-attach`` must be true for this to be used." + "$ref": "#/$defs/rh_subscription_service_level", + "deprecated": true, + "deprecated_version": "25.3", + "deprecated_description": "Use **service_level** instead." + }, + "add_pool": { + "$ref": "#/$defs/rh_subscription_add_pool" }, "add-pool": { - "type": "array", - "description": "A list of pools ids add to the subscription.", - "items": { - "type": "string" - } + "$ref": "#/$defs/rh_subscription_add_pool", + "deprecated": true, + "deprecated_version": "25.3", + "deprecated_description": "Use **add_pool** instead." + }, + "enable_repo": { + "$ref": "#/$defs/rh_subscription_enable_repo" }, "enable-repo": { - "type": "array", - "description": "A list of repositories to enable.", - "items": { - "type": "string" - } + "$ref": "#/$defs/rh_subscription_enable_repo", + "deprecated": true, + "deprecated_version": "25.3", + "deprecated_description": "Use **enable_repo** instead." + }, + "disable_repo": { + "$ref": "#/$defs/rh_subscription_disable_repo" }, "disable-repo": { - "type": "array", - "description": "A list of repositories to disable.", - "items": { - "type": "string" - } + "$ref": "#/$defs/rh_subscription_disable_repo", + "deprecated": true, + "deprecated_version": "25.3", + "deprecated_description": "Use **disable_repo** instead." }, - "rhsm-baseurl": { + "release_version": { "type": "string", - "description": "Sets the baseurl in ``/etc/rhsm/rhsm.conf``." + "description": "Sets the release_version via``subscription-manager release --set=`` then deletes the package manager cache ``/var/cache/{dnf,yum}`` . These steps are applied after any pool attachment and/or enabling/disabling repos. For more information about this key, check https://access.redhat.com/solutions/238533 ." + }, + "rhsm_baseurl": { + "$ref": "#/$defs/rh_subscription_rhsm_baseurl" + }, + "rhsm-baseurl": { + "$ref": "#/$defs/rh_subscription_rhsm_baseurl", + "deprecated": true, + "deprecated_version": "25.3", + "deprecated_description": "Use **rhsm_baseurl** instead." + }, + "server_hostname": { + "$ref": "#/$defs/rh_subscription_server_hostname" }, "server-hostname": { - "type": "string", - "description": "Sets the serverurl in ``/etc/rhsm/rhsm.conf``." + "$ref": "#/$defs/rh_subscription_server_hostname", + "deprecated": true, + "deprecated_version": "25.3", + "deprecated_description": "Use **server_hostname** instead." } - } + }, + "allOf": [ + { + "not": { + "required": [ + "activation_key", + "activation-key" + ] + } + }, + { + "not": { + "required": [ + "auto_attach", + "auto-attach" + ] + } + }, + { + "not": { + "required": [ + "service_level", + "service-level" + ] + } + }, + { + "not": { + "required": [ + "add_pool", + "add-pool" + ] + } + }, + { + "not": { + "required": [ + "enable_repo", + "enable-repo" + ] + } + }, + { + "not": { + "required": [ + "disable_repo", + "disable-repo" + ] + } + }, + { + "not": { + "required": [ + "rhsm_baseurl", + "rhsm-baseurl" + ] + } + }, + { + "not": { + "required": [ + "server_hostname", + "server-hostname" + ] + } + } + ] } } }, @@ -2692,11 +2834,6 @@ "type": "boolean", "description": "Enable 1-Wire interface. Default: ``false``.", "default": false - }, - "remote_gpio": { - "type": "boolean", - "description": "Enable remote GPIO interface. Default: ``false``.", - "default": false } } }, @@ -3328,7 +3465,7 @@ "properties": { "manage_etc_hosts": { "default": false, - "description": "Whether to manage ``/etc/hosts`` on the system. If ``true``, render the hosts file using ``/etc/cloud/templates/hosts.tmpl`` replacing ``$hostname`` and ``$fdqn``. If ``localhost``, append a ``127.0.1.1`` entry that resolves from FQDN and hostname every boot. Default: ``false``.", + "description": "Whether to manage ``/etc/hosts`` on the system. If ``true``, render the hosts file using ``/etc/cloud/templates/hosts.tmpl`` replacing ``$hostname`` and ``$fqdn``. If ``localhost``, append a ``127.0.1.1`` entry that resolves from FQDN and hostname every boot. Default: ``false``.", "oneOf": [ { "enum": [ diff --git a/.pc/no-single-process.patch/systemd/cloud-config.service b/.pc/no-single-process.patch/systemd/cloud-config.service index 68f80d2b..3fe62f9d 100644 --- a/.pc/no-single-process.patch/systemd/cloud-config.service +++ b/.pc/no-single-process.patch/systemd/cloud-config.service @@ -16,7 +16,7 @@ Type=oneshot # process has completed this stage. The output from the return socket is piped # into a shell so that the process can send a completion message (defaults to # "done", otherwise includes an error message) and an exit code to systemd. -ExecStart=sh -c 'echo "start" | nc -Uu -W1 /run/cloud-init/share/config.sock -s /run/cloud-init/share/config-return.sock | sh' +ExecStart=sh -c 'echo "start" | nc -U /run/cloud-init/share/config.sock | sh' RemainAfterExit=yes TimeoutSec=0 diff --git a/.pc/no-single-process.patch/systemd/cloud-final.service b/.pc/no-single-process.patch/systemd/cloud-final.service index fb74a47c..e7e892ab 100644 --- a/.pc/no-single-process.patch/systemd/cloud-final.service +++ b/.pc/no-single-process.patch/systemd/cloud-final.service @@ -19,7 +19,7 @@ Type=oneshot # process has completed this stage. The output from the return socket is piped # into a shell so that the process can send a completion message (defaults to # "done", otherwise includes an error message) and an exit code to systemd. -ExecStart=sh -c 'echo "start" | nc -Uu -W1 /run/cloud-init/share/final.sock -s /run/cloud-init/share/final-return.sock | sh' +ExecStart=sh -c 'echo "start" | nc -U /run/cloud-init/share/final.sock | sh' RemainAfterExit=yes TimeoutSec=0 TasksMax=infinity diff --git a/.pc/no-single-process.patch/systemd/cloud-init-local.service.tmpl b/.pc/no-single-process.patch/systemd/cloud-init-local.service.tmpl index 26a6aee1..e88b15ca 100644 --- a/.pc/no-single-process.patch/systemd/cloud-init-local.service.tmpl +++ b/.pc/no-single-process.patch/systemd/cloud-init-local.service.tmpl @@ -7,7 +7,6 @@ DefaultDependencies=no {% endif %} Wants=network-pre.target After=hv_kvp_daemon.service -Before=auditd.service Before=network-pre.target Before=shutdown.target {% if variant in ["almalinux", "cloudlinux", "rhel"] %} @@ -33,7 +32,7 @@ ExecStartPre=/sbin/restorecon /run/cloud-init # process has completed this stage. The output from the return socket is piped # into a shell so that the process can send a completion message (defaults to # "done", otherwise includes an error message) and an exit code to systemd. -ExecStart=sh -c 'echo "start" | nc -Uu -W1 /run/cloud-init/share/local.sock -s /run/cloud-init/share/local-return.sock | sh' +ExecStart=sh -c 'echo "start" | nc -U /run/cloud-init/share/local.sock | sh' RemainAfterExit=yes TimeoutSec=0 diff --git a/.pc/no-single-process.patch/systemd/cloud-init-network.service.tmpl b/.pc/no-single-process.patch/systemd/cloud-init-network.service.tmpl index 61425b4a..9658af1d 100644 --- a/.pc/no-single-process.patch/systemd/cloud-init-network.service.tmpl +++ b/.pc/no-single-process.patch/systemd/cloud-init-network.service.tmpl @@ -56,7 +56,7 @@ Type=oneshot # process has completed this stage. The output from the return socket is piped # into a shell so that the process can send a completion message (defaults to # "done", otherwise includes an error message) and an exit code to systemd. -ExecStart=sh -c 'echo "start" | nc -Uu -W1 /run/cloud-init/share/network.sock -s /run/cloud-init/share/network-return.sock | sh' +ExecStart=sh -c 'echo "start" | nc -U /run/cloud-init/share/network.sock | sh' RemainAfterExit=yes TimeoutSec=0 diff --git a/.pc/retain-setuptools.patch/doc-requirements.txt b/.pc/retain-setuptools.patch/doc-requirements.txt new file mode 100644 index 00000000..78a0a22f --- /dev/null +++ b/.pc/retain-setuptools.patch/doc-requirements.txt @@ -0,0 +1,13 @@ +-r requirements.txt +doc8 +furo +meson +m2r2 +pyyaml +sphinx==7.1.2 +sphinx-design +sphinx-copybutton +sphinx-notfound-page +sphinxcontrib.datatemplates +sphinxcontrib-mermaid +sphinxcontrib-spelling diff --git a/.pc/retain-setuptools.patch/packages/bddeb b/.pc/retain-setuptools.patch/packages/bddeb new file mode 100755 index 00000000..2fbb1cca --- /dev/null +++ b/.pc/retain-setuptools.patch/packages/bddeb @@ -0,0 +1,393 @@ +#!/usr/bin/env python3 + +import argparse +import csv +import json +import os +import re +import shutil +import subprocess +import sys + +UNRELEASED = "UNRELEASED" + + +def find_root(): + # expected path is in /packages/ + top_dir = os.environ.get("CLOUD_INIT_TOP_D", None) + if top_dir is None: + top_dir = os.path.dirname( + os.path.dirname(os.path.abspath(sys.argv[0])) + ) + if os.path.isfile(os.path.join(top_dir, "meson.build")): + return os.path.abspath(top_dir) + raise OSError( + ( + "Unable to determine where your cloud-init topdir is." + " set CLOUD_INIT_TOP_D?" + ) + ) + + +if "avoid-pep8-E402-import-not-top-of-file": + # Use the util functions from cloudinit + sys.path.insert(0, find_root()) + from cloudinit import subp + from cloudinit import util + from cloudinit import temp_utils + from cloudinit import templater + +DEBUILD_ARGS = ["-S", "-d"] + + +def get_release_suffix(release): + """Given ubuntu release, return a suffix for package + + Examples: + --------- + >>> get_release_suffix("jammy") + '~22.04.1' + """ + csv_path = "/usr/share/distro-info/ubuntu.csv" + rels = {} + # fields are version, codename, series, created, release, eol, eol-server + if os.path.exists(csv_path): + with open(csv_path, "r") as fp: + # version has "16.04 LTS" or "16.10", so drop "LTS" portion. + rels = { + row["series"]: row["version"].replace(" LTS", "") + for row in csv.DictReader(fp) + } + if release in rels: + return "~%s.1" % rels[release] + elif release != UNRELEASED: + print( + "missing distro-info-data package, unable to give " + "per-release suffix.\n" + ) + return "" + + +def run_helper(helper, args=None, strip=True): + if args is None: + args = [] + cmd = [os.path.abspath(os.path.join(find_root(), "tools", helper))] + args + (stdout, _stderr) = subp.subp(cmd) + if strip: + stdout = stdout.strip() + return stdout + + +def write_debian_folder(root, templ_data, cloud_util_deps): + """Create a debian package directory with all rendered template files.""" + print("Creating a debian/ folder in %r" % (root)) + + deb_dir = os.path.abspath(os.path.join(root, "debian")) + + # Just copy debian/ dir and then update files + pdeb_d = os.path.abspath(os.path.join(find_root(), "packages", "debian")) + subp.subp(["cp", "-a", pdeb_d, deb_dir]) + + # Fill in the change log template + templater.render_to_file( + os.path.abspath( + os.path.join(find_root(), "packages", "debian", "changelog.in") + ), + os.path.abspath(os.path.join(deb_dir, "changelog")), + params=templ_data, + ) + + # Write out the control file template + reqs_output = run_helper("read-dependencies", args=["--distro", "debian"]) + reqs = reqs_output.splitlines() + test_reqs = run_helper( + "read-dependencies", + [ + "--distro", + "debian", + "--requirements-file", + "test-requirements.txt", + "--system-pkg-names", + ], + ).splitlines() + + requires = ["cloud-utils | cloud-guest-utils"] if cloud_util_deps else [] + # We consolidate all deps as Build-Depends as our package build runs all + # tests so we need all runtime dependencies anyway. + # NOTE: python package was moved to the front after debuild -S would fail + # with 'Please add appropriate interpreter' errors + # (as in debian bug 861132) + requires.extend(["python3"] + reqs + test_reqs) + if templ_data["debian_release"] in ( + "buster", + "bionic", + "focal", + ): + requires.append("dh-systemd") + build_deps = ",".join(requires) + (stdout, _stderr) = subp.subp( + ["dpkg-query", "-W", "-f='${Provides}'", "debhelper"] + ) + # Get latest debhelper-compat support on host + debhelper_matches = re.findall(r"(debhelper-compat \(= \d+\)),", stdout) + if debhelper_matches: + if templ_data["debian_release"] == "bionic": + # Bionic doesn't support debhelper-compat > 11 + build_deps += ",debhelper-compat (= 11)" + elif templ_data["debian_release"] == "focal": + # Focal doesn't support debhelper-compat > 12 + build_deps += ",debhelper-compat (= 12)" + else: + build_deps += f",{debhelper_matches[-1]}" + templater.render_to_file( + os.path.abspath( + os.path.join(find_root(), "packages", "debian", "control.in") + ), + os.path.abspath(os.path.join(deb_dir, "control")), + params={"build_depends": build_deps}, + ) + + +def write_debian_folder_from_branch(root, templ_data, branch): + """Import a debian package directory from a branch.""" + print("Importing debian/ from branch %s to %s" % (branch, root)) + + p_dumpdeb = subprocess.Popen( + ["git", "archive", branch, "debian"], stdout=subprocess.PIPE + ) + subprocess.check_call( + ["tar", "-v", "-C", root, "-x"], stdin=p_dumpdeb.stdout + ) + + print("Adding new entry to debian/changelog") + full_deb_version = ( + templ_data["version_long"] + "-1~bddeb" + templ_data["release_suffix"] + ) + subp.subp( + [ + "dch", + "--distribution", + templ_data["debian_release"], + "--newversion", + full_deb_version, + "--controlmaint", + "Snapshot build.", + ], + cwd=root, + ) + + +def read_version(): + return json.loads(run_helper("read-version", ["--json"])) + + +def get_parser(): + """Setup and return an argument parser for bdeb tool.""" + parser = argparse.ArgumentParser() + parser.add_argument( + "-v", + "--verbose", + dest="verbose", + help=("run verbosely (default: %(default)s)"), + default=False, + action="store_true", + ) + parser.add_argument( + "--cloud-utils", + dest="cloud_utils", + help=("depend on cloud-utils package (default: %(default)s)"), + default=False, + action="store_true", + ) + + parser.add_argument( + "--init-system", + dest="init_system", + help=("build deb with INIT_SYSTEM=xxx (default: %(default)s"), + default=os.environ.get("INIT_SYSTEM", "systemd"), + ) + + parser.add_argument( + "--release", + dest="release", + help=("build with changelog referencing RELEASE"), + default=UNRELEASED, + ) + + for ent in DEBUILD_ARGS: + parser.add_argument( + ent, + dest="debuild_args", + action="append_const", + const=ent, + default=[], + help=("pass through '%s' to debuild" % ent), + ) + + parser.add_argument( + "--sign", + default=False, + action="store_true", + help="sign result. do not pass -us -uc to debuild", + ) + + parser.add_argument( + "--signuser", + default=False, + action="store", + help="user to sign, see man dpkg-genchanges", + ) + + parser.add_argument( + "--packaging-branch", + nargs="?", + metavar="BRANCH", + const="ubuntu/devel", + type=str, + help=( + "Import packaging from %(metavar)s instead of" + " using the packages/debian/* templates" + " (default: %(const)s)" + ), + ) + + return parser + + +def main(): + parser = get_parser() + args = parser.parse_args() + + if args.packaging_branch: + try: + subp.subp( + [ + "git", + "show-ref", + "--quiet", + "--verify", + "refs/heads/" + args.packaging_branch, + ] + ) + except subp.ProcessExecutionError: + print("Couldn't find branch '%s'." % args.packaging_branch) + print("You may need to checkout the branch from the git remote.") + return 1 + try: + subp.subp( + [ + "git", + "cat-file", + "-e", + args.packaging_branch + ":debian/control", + ] + ) + except subp.ProcessExecutionError: + print( + "Couldn't find debian/control in branch '%s'." + " Is it a packaging branch?" % args.packaging_branch + ) + return 1 + + if not args.sign: + args.debuild_args.extend(["-us", "-uc"]) + + if args.signuser: + args.debuild_args.extend(["-e%s" % args.signuser]) + + os.environ["INIT_SYSTEM"] = args.init_system + + capture = True + if args.verbose: + capture = False + + templ_data = { + "debian_release": args.release, + "release_suffix": get_release_suffix(args.release), + } + + with temp_utils.tempdir(needs_exe=True) as tdir: + + # output like 0.7.6-1022-g36e92d3 + ver_data = read_version() + if ver_data["is_release_branch_ci"]: + # If we're performing CI for a new release branch, we don't yet + # have the tag required to generate version_long; use version + # instead. + ver_data["version_long"] = ver_data["version"] + + # This is really only a temporary archive + # since we will extract it then add in the debian + # folder, then re-archive it for debian happiness + tarball = "cloud-init_%s.orig.tar.gz" % ver_data["version_long"] + tarball_fp = os.path.abspath(os.path.join(tdir, tarball)) + path = None + for pd in ("./", "../", "../dl/"): + if os.path.exists(pd + tarball): + path = pd + tarball + print("Using existing tarball %s" % path) + shutil.copy(path, tarball_fp) + break + if path is None: + print("Creating a temp tarball using the 'make-tarball' helper") + run_helper( + "make-tarball", + [ + "--version", + ver_data["version_long"], + "--output=" + tarball_fp, + ], + ) + + print("Extracting temporary tarball %r" % (tarball)) + cmd = ["tar", "-xvzf", tarball_fp, "-C", tdir] + subp.subp(cmd, capture=capture) + + xdir = os.path.abspath( + os.path.join(tdir, "cloud-init-%s" % ver_data["version_long"]) + ) + templ_data.update(ver_data) + + if args.packaging_branch: + write_debian_folder_from_branch( + xdir, templ_data, args.packaging_branch + ) + else: + write_debian_folder( + xdir, templ_data, cloud_util_deps=args.cloud_utils + ) + + print( + "Running 'debuild %s' in %r" % (" ".join(args.debuild_args), xdir) + ) + with util.chdir(xdir): + cmd = ["debuild", "--preserve-envvar", "INIT_SYSTEM"] + if args.debuild_args: + cmd.extend(args.debuild_args) + subp.subp(cmd, capture=capture) + + link_fn = os.path.join(os.getcwd(), "cloud-init_all.deb") + link_dsc = os.path.join(os.getcwd(), "cloud-init.dsc") + for base_fn in os.listdir(os.path.join(tdir)): + full_fn = os.path.join(tdir, base_fn) + if not os.path.isfile(full_fn): + continue + shutil.move(full_fn, base_fn) + print("Wrote %r" % (base_fn)) + if base_fn.endswith("_all.deb"): + # Add in the local link + util.del_file(link_fn) + os.symlink(base_fn, link_fn) + print("Linked %r to %r" % (base_fn, os.path.basename(link_fn))) + if base_fn.endswith(".dsc"): + util.del_file(link_dsc) + os.symlink(base_fn, link_dsc) + print( + "Linked %r to %r" % (base_fn, os.path.basename(link_dsc)) + ) + + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/.pc/retain-setuptools.patch/packages/debian/rules b/.pc/retain-setuptools.patch/packages/debian/rules new file mode 100755 index 00000000..d314400b --- /dev/null +++ b/.pc/retain-setuptools.patch/packages/debian/rules @@ -0,0 +1,29 @@ +#!/usr/bin/make -f + +include /usr/share/dpkg/pkg-info.mk + +export PYTHONDONTWRITEBYTECODE=1 + +%: + dh $@ --buildsystem meson + +override_dh_auto_configure: + dh_auto_configure -- -Dinit_system=systemd -Dlibexecdir=lib -Ddistro_templates=chef_client.rb.tmpl,chrony.conf.ubuntu.tmpl,hosts.debian.tmpl,ntp.conf.ubuntu.tmpl,sources.list.ubuntu.deb822.tmpl,sources.list.ubuntu.deb822.tmpl,timesyncd.conf.tmpl + +override_dh_auto_test: + +ifeq (,$(findstring nocheck,$(DEB_BUILD_OPTIONS))) + http_proxy= make PYVER=python3 check +else + @echo check disabled by DEB_BUILD_OPTIONS=$(DEB_BUILD_OPTIONS) +endif + +override_dh_installsystemd: + dh_installsystemd --no-restart-on-upgrade --no-start + +override_dh_auto_install: + dh_auto_install --destdir=debian/cloud-init-base + install -D -m 0644 ./tools/21-cloudinit.conf debian/cloud-init-base/etc/rsyslog.d/21-cloudinit.conf + install -D ./tools/Z99-cloud-locale-test.sh debian/cloud-init-base/etc/profile.d/Z99-cloud-locale-test.sh + install -D ./tools/Z99-cloudinit-warnings.sh debian/cloud-init-base/etc/profile.d/Z99-cloudinit-warnings.sh + flist=$$(find $(CURDIR)/debian/ -type f -name version.py) && sed -i 's,@@PACKAGED_VERSION@@,$(DEB_VERSION),' $${flist:-did-not-find-version-py-for-replacement} diff --git a/.pc/retain-setuptools.patch/packages/pkg-deps.json b/.pc/retain-setuptools.patch/packages/pkg-deps.json new file mode 100644 index 00000000..86c28714 --- /dev/null +++ b/.pc/retain-setuptools.patch/packages/pkg-deps.json @@ -0,0 +1,112 @@ +{ + "debian" : { + "build-requires" : [ + "meson", + "pkgconf", + "bash-completion", + "debhelper", + "systemd-dev", + "python3", + "python3-debconf" + ], + "renames" : { + "pyyaml" : "python3-yaml", + "pyserial" : "python3-serial" + }, + "requires" : [ + "debconf", + "dhcpcd-base", + "iproute2", + "netcat-openbsd", + "netplan.io", + "procps" + ] + }, + "centos" : { + "build-requires" : [ + "python3-devel" + ], + "requires" : [ + "e2fsprogs", + "iproute", + "net-tools", + "procps", + "rsyslog", + "shadow-utils", + "sudo" + ] + }, + "eurolinux" : { + "build-requires" : [ + "python3-devel" + ], + "requires" : [ + "e2fsprogs", + "iproute", + "net-tools", + "procps", + "rsyslog", + "shadow-utils", + "sudo" + ] + }, + "redhat" : { + "build-requires" : [ + "bash-completion", + "meson", + "pkgconf", + "python3-devel", + "systemd-devel" + ], + "requires" : [ + "e2fsprogs", + "iproute", + "net-tools", + "procps", + "rsyslog", + "shadow-utils", + "sudo", + "hostname" + ] + }, + "fedora": { + "build-requires" : [ + "bash-completion-devel", + "meson", + "pkgconf", + "python3-devel", + "systemd-devel" + ], + "requires" : [ + "e2fsprogs", + "iproute", + "net-tools", + "procps", + "rsyslog", + "shadow-utils", + "sudo", + "hostname" + ] + }, + "suse" : { + "renames" : { + "jinja2" : "python3-Jinja2", + "pyyaml" : "python3-PyYAML" + }, + "build-requires" : [ + "meson", + "pkgconf", + "bash-completion-devel", + "fdupes", + "filesystem", + "python3-devel" + ], + "requires" : [ + "iproute2", + "e2fsprogs", + "net-tools", + "procps", + "sudo" + ] + } +} diff --git a/.pc/retain-setuptools.patch/pyproject.toml b/.pc/retain-setuptools.patch/pyproject.toml new file mode 100644 index 00000000..99451a10 --- /dev/null +++ b/.pc/retain-setuptools.patch/pyproject.toml @@ -0,0 +1,236 @@ +[build-system] # See meson.build. Empty build-system to avoid RTD builds +build-backend = "" +requires = [] + +[tool.black] +line-length = 79 +include = '(brpm|bddeb|\.py)$' + +[tool.isort] +profile = "black" +line_length = 79 +# We patch logging in main.py before certain imports +skip = ["cloudinit/cmd/main.py", ".tox", "packages", "tools"] + +[tool.mypy] +follow_imports = "silent" +check_untyped_defs = true +warn_redundant_casts = true +warn_unused_ignores = true +warn_unreachable = true +exclude = [] + +[[tool.mypy.overrides]] +module = [ + "apport.*", + "BaseHTTPServer", + "configobj", + "debconf", + "httplib", + "jsonpatch", + "paramiko.*", + "pip.*", + "pycloudlib.*", + "responses", + "serial", + "tests.integration_tests.user_settings", + "uaclient.*", +] +ignore_missing_imports = true +no_implicit_optional = true + +# See GH-5445 +[[tool.mypy.overrides]] +module = [ + "cloudinit.cmd.devel.make_mime", + "cloudinit.cmd.devel.net_convert", + "cloudinit.cmd.main", + "cloudinit.config.cc_apt_configure", + "cloudinit.config.cc_ca_certs", + "cloudinit.config.cc_growpart", + "cloudinit.config.cc_ntp", + "cloudinit.config.modules", + "cloudinit.distros", + "cloudinit.distros.alpine", + "cloudinit.distros.azurelinux", + "cloudinit.distros.bsd", + "cloudinit.distros.opensuse", + "cloudinit.distros.parsers.hostname", + "cloudinit.distros.parsers.hosts", + "cloudinit.distros.parsers.resolv_conf", + "cloudinit.distros.ubuntu", + "cloudinit.distros.ug_util", + "cloudinit.helpers", + "cloudinit.net.cmdline", + "cloudinit.net.ephemeral", + "cloudinit.net.freebsd", + "cloudinit.net.netbsd", + "cloudinit.net.network_manager", + "cloudinit.net.network_state", + "cloudinit.net.networkd", + "cloudinit.net.sysconfig", + "cloudinit.netinfo", + "cloudinit.sources.DataSourceAzure", + "cloudinit.sources.DataSourceBigstep", + "cloudinit.sources.DataSourceCloudSigma", + "cloudinit.sources.DataSourceCloudStack", + "cloudinit.sources.DataSourceConfigDrive", + "cloudinit.sources.DataSourceDigitalOcean", + "cloudinit.sources.DataSourceEc2", + "cloudinit.sources.DataSourceExoscale", + "cloudinit.sources.DataSourceGCE", + "cloudinit.sources.DataSourceHetzner", + "cloudinit.sources.DataSourceNoCloud", + "cloudinit.sources.DataSourceOVF", + "cloudinit.sources.DataSourceOpenNebula", + "cloudinit.sources.DataSourceOpenStack", + "cloudinit.sources.DataSourceOracle", + "cloudinit.sources.DataSourceRbxCloud", + "cloudinit.sources.DataSourceScaleway", + "cloudinit.sources.DataSourceSmartOS", + "cloudinit.sources.DataSourceVMware", + "cloudinit.sources", + "cloudinit.sources.helpers.azure", + "cloudinit.sources.helpers.ec2", + "cloudinit.sources.helpers.netlink", + "cloudinit.sources.helpers.openstack", + "cloudinit.sources.helpers.vmware.imc.config_file", + "cloudinit.sources.helpers.vmware.imc.config_nic", + "cloudinit.sources.helpers.vultr", + "cloudinit.ssh_util", + "cloudinit.stages", + "cloudinit.temp_utils", + "cloudinit.templater", + "cloudinit.user_data", + "tests.integration_tests.instances", + "tests.unittests.analyze.test_show", + "tests.unittests.config.test_apt_configure_sources_list_v3", + "tests.unittests.config.test_apt_source_v1", + "tests.unittests.config.test_cc_apk_configure", + "tests.unittests.config.test_cc_apt_pipelining", + "tests.unittests.config.test_cc_bootcmd", + "tests.unittests.config.test_cc_ca_certs", + "tests.unittests.config.test_cc_chef", + "tests.unittests.config.test_cc_disable_ec2_metadata", + "tests.unittests.config.test_cc_final_message", + "tests.unittests.config.test_cc_growpart", + "tests.unittests.config.test_cc_grub_dpkg", + "tests.unittests.config.test_cc_install_hotplug", + "tests.unittests.config.test_cc_keys_to_console", + "tests.unittests.config.test_cc_mcollective", + "tests.unittests.config.test_cc_phone_home", + "tests.unittests.config.test_cc_puppet", + "tests.unittests.config.test_cc_resizefs", + "tests.unittests.config.test_cc_resolv_conf", + "tests.unittests.config.test_cc_rh_subscription", + "tests.unittests.config.test_cc_ubuntu_autoinstall", + "tests.unittests.config.test_cc_update_etc_hosts", + "tests.unittests.config.test_cc_users_groups", + "tests.unittests.config.test_cc_wireguard", + "tests.unittests.config.test_cc_yum_add_repo", + "tests.unittests.config.test_cc_zypper_add_repo", + "tests.unittests.config.test_modules", + "tests.unittests.config.test_schema", + "tests.unittests.distros.test_alpine", + "tests.unittests.distros.test_hosts", + "tests.unittests.distros.test_ifconfig", + "tests.unittests.distros.test_netbsd", + "tests.unittests.distros.test_netconfig", + "tests.unittests.distros.test_opensuse", + "tests.unittests.distros.test_user_data_normalize", + "tests.unittests.helpers", + "tests.unittests.net.test_dhcp", + "tests.unittests.net.test_init", + "tests.unittests.net.test_networkd", + "tests.unittests.runs.test_merge_run", + "tests.unittests.runs.test_simple_run", + "tests.unittests.sources.azure.test_errors", + "tests.unittests.sources.azure.test_imds", + "tests.unittests.sources.helpers.test_openstack", + "tests.unittests.sources.test_aliyun", + "tests.unittests.sources.test_altcloud", + "tests.unittests.sources.test_azure", + "tests.unittests.sources.test_azure_helper", + "tests.unittests.sources.test_cloudsigma", + "tests.unittests.sources.test_common", + "tests.unittests.sources.test_configdrive", + "tests.unittests.sources.test_digitalocean", + "tests.unittests.sources.test_ec2", + "tests.unittests.sources.test_exoscale", + "tests.unittests.sources.test_gce", + "tests.unittests.sources.test_init", + "tests.unittests.sources.test_nocloud", + "tests.unittests.sources.test_opennebula", + "tests.unittests.sources.test_openstack", + "tests.unittests.sources.test_oracle", + "tests.unittests.sources.test_ovf", + "tests.unittests.sources.test_rbx", + "tests.unittests.sources.test_scaleway", + "tests.unittests.sources.test_smartos", + "tests.unittests.sources.test_upcloud", + "tests.unittests.sources.test_vultr", + "tests.unittests.sources.vmware.test_vmware_config_file", + "tests.unittests.test__init__", + "tests.unittests.test_apport", + "tests.unittests.test_builtin_handlers", + "tests.unittests.test_cli", + "tests.unittests.test_conftest", + "tests.unittests.test_data", + "tests.unittests.test_ds_identify", + "tests.unittests.test_helpers", + "tests.unittests.test_log", + "tests.unittests.test_merging", + "tests.unittests.test_net", + "tests.unittests.test_net_activators", + "tests.unittests.test_ssh_util", + "tests.unittests.test_stages", + "tests.unittests.test_subp", + "tests.unittests.test_templating", + "tests.unittests.test_upgrade", + "tests.unittests.test_url_helper", + "tests.unittests.test_util", + "tests.unittests.util", + + # tools/* + "netplan_schema_check", + "mock-meta", +] +check_untyped_defs = false + +[tool.ruff] +target-version = "py38" +line-length = 79 +# E, W, and F make up the entirety of default flake8 +lint.select = [ + "D", # pydocstyle + "E", # pycodestyle errors + "W", # pycodestyle warnings + "F", # pyflakes + "T10", # flake8-debugger + "ISC", # flake8-implicit-str-concat + "ICN", # flake8-import-conventions + "G", # flake8-logging-format + "PIE", # flake8-pie + "Q", # flake8-quotes +] +lint.ignore = [ + "D100", # docstring: public module + "D101", # docstring: public class required + "D102", # docstring: public method required + "D103", # docstring: public function required + "D107", # docstring: __init__ required + "D104", # docstring: public package required + "D105", # docstring: magic method required + "D200", # docstring: one line docstring shouldn't wrap + "D202", # docstring: blank line + "D205", # docstring: 1 blank line between initial and summary + "D209", # docstring: closing quotes -> separate line + "D400", # docstring: end with a period + "D401", # docstring: imperative mood + "D402", # docstring: docstring shouldn't start with func signature + "D403", # docstring: capitalized first line + "E731", # Do not assign a `lambda` expression, use a `def` +] + +[tool.ruff.lint.pydocstyle] +convention = "pep257" diff --git a/.pc/retain-setuptools.patch/setup.py b/.pc/retain-setuptools.patch/setup.py new file mode 100644 index 00000000..e69de29b diff --git a/.pc/retain-setuptools.patch/setup_utils.py b/.pc/retain-setuptools.patch/setup_utils.py new file mode 100644 index 00000000..e69de29b diff --git a/.pc/retain-setuptools.patch/test-requirements.txt b/.pc/retain-setuptools.patch/test-requirements.txt new file mode 100644 index 00000000..c55f9ca4 --- /dev/null +++ b/.pc/retain-setuptools.patch/test-requirements.txt @@ -0,0 +1,21 @@ +# Needed generally in tests + +-r requirements.txt + +# Avoid breaking change in `testpaths` treatment forced +# test/unittests/conftest.py to be loaded by our integration-tests tox env +# resulting in an unmet dependency issue: +# https://github.com/pytest-dev/pytest/issues/11104 +pytest!=7.3.2 + +pytest-cov +pytest-mock +pytest-xdist +jsonschema +responses +packaging +passlib + +# This one is currently used only by the CloudSigma and SmartOS datasources. +# If these datasources are removed, this is no longer needed. +pyserial diff --git a/.pc/retain-setuptools.patch/tools/test_tools.py b/.pc/retain-setuptools.patch/tools/test_tools.py new file mode 100644 index 00000000..38440758 --- /dev/null +++ b/.pc/retain-setuptools.patch/tools/test_tools.py @@ -0,0 +1,89 @@ +import pathlib +from packaging.version import Version, InvalidVersion +from importlib.machinery import SourceFileLoader +from importlib.util import module_from_spec, spec_from_loader +from unittest import mock + +import pytest + +# Since read-version has a '-' and no .py extension, we have to do this +# to import it +spec = spec_from_loader( + "read-version", + SourceFileLoader( + "read-version", + str(pathlib.Path(__file__).absolute().parent / "read-version"), + ), +) +if not spec: + pytest.fail("Could not import read-version") +read_version = module_from_spec(spec) +if not spec.loader: + pytest.fail("Could not import read-version") +spec.loader.exec_module(read_version) + + +def version_to_pep440(version: str) -> str: + # read-version can spit out something like 22.4-15-g7f97aee24 + # which is invalid under PEP 440. If we replace the first - with a + + # that should give us a valid version. + return version.replace("-", "+", 1) + + +def assert_valid_version(version): + try: + Version(version) + except InvalidVersion: + pytest.fail(f"{version} is not PEP 440 compliant") + + +@pytest.mark.parametrize( + "version,expected", + [ + (("23.2", "23.2"), "23.2"), + (("23.2", "23.2-0-gcdc24d864"), "23.2-0-gcdc24d86"), + (("23.2.1", "23.2.1"), "23.2.1"), + (("23.2.1", "23.2.1-0-gcda472559"), "23.2.1-0-gcda47255"), + ( + ("23.2-65-g392346ccd", "23.2-65-g392346ccd"), + "23.2-65-g392346cc", + ), + ( + ("23.2.1-65-g392346ccd", "23.2.1-65-g392346ccd"), + "23.2.1-65-g392346cc", + ), + ( + ( + "cloud-init-23.1.1-2.el8-2-g285d8d80", + "cloud-init-23.1.1-2.el8-2-g285d8d80", + ), + "23.1.1-2-g285d8d80", + ), # RH tags + ( + ( + "21.1-19-gbad84ad4-0ubuntu1_16.04.4+esm1", + "21.1-19-gbad84ad4-0ubuntu1_16.04.4+esm1", + ), + "21.1-19-gbad84ad4", + ), + (("0.3.4ubuntu6", "0.3.4ubuntu6"), "0.3.4"), + (("noparse", "noparse"), "10.2.1"), + ], +) +@mock.patch.object( + read_version.ci_version, "version_string", return_value="10.2.1" +) +class TestReadVersion: + def test_tag_parsing(self, _m_package_version, version, expected): + """Ensure that we can parse most tags. + + If we cannot parse the tag, fallback to package version. + """ + with mock.patch.object( + read_version, "get_version_from_git", return_value=version + ): + out = read_version.main() + assert out == expected + + # Also ensure it passes setuptools PEP 440 check + assert_valid_version(version_to_pep440(out)) diff --git a/.pc/retain-setuptools.patch/tox.ini b/.pc/retain-setuptools.patch/tox.ini new file mode 100644 index 00000000..d3d877d4 --- /dev/null +++ b/.pc/retain-setuptools.patch/tox.ini @@ -0,0 +1,319 @@ +[tox] +envlist = + py3, + black, + ruff, + isort, + mypy, + pylint + +[doc8] +ignore-path-errors=doc/rtd/topics/faq.rst;D001 + +[testenv] +package = skip +basepython = python3 +setenv = + LC_ALL = en_US.utf-8 +passenv = + PYTEST_ADDOPTS + HYPOTHESIS_PROFILE +deps = + -r{toxinidir}/requirements-all.txt + -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[types] +deps = + # each release of type stubs relates to a specific version of a library + # so leave these unpinned + types-jsonschema + types-Jinja2 + types-oauthlib + types-passlib + types-PyYAML + types-requests + typing-extensions + +[pinned_versions] +deps = + {[types]deps} + black==25.1.0 + hypothesis==6.111.0 + hypothesis_jsonschema==0.23.1 + isort==6.0.1 + mypy==1.17.1 + pylint==3.3.8 + ruff==0.12.9 + +[latest_versions] +deps = + {[types]deps} + black + hypothesis + hypothesis_jsonschema + isort + mypy + pylint + ruff + +[files] +schema = cloudinit/config/schemas/schema-cloud-config-v1.json +version = cloudinit/config/schemas/versions.schema.cloud-config.json +network_v1 = cloudinit/config/schemas/schema-network-config-v1.json +network_v2 = cloudinit/config/schemas/schema-network-config-v2.json + +[testenv:ruff] +deps = {[pinned_versions]deps} +commands = {envpython} -m ruff check {posargs:.} + +[testenv:pylint] +deps = + -r{toxinidir}/integration-requirements.txt + {[pinned_versions]deps} + {[testenv]deps} +commands = {envpython} -m pylint {posargs:cloudinit/ tests/ tools/} + +[testenv:black] +deps = {[pinned_versions]deps} +commands = {envpython} -m black --check {posargs:.} + +[testenv:isort] +deps = {[pinned_versions]deps} +commands = {envpython} -m isort --check-only --diff {posargs:.} + +[testenv:mypy] +deps = + -r{toxinidir}/integration-requirements.txt + {[testenv]deps} + {[pinned_versions]deps} +commands = {envpython} -m mypy {posargs:cloudinit/ tests/ tools/} + +[testenv:check_format] +deps = + -r{toxinidir}/integration-requirements.txt + {[testenv]deps} + {[pinned_versions]deps} +commands = + {envpython} -m ruff check {posargs:.} + {envpython} -m pylint {posargs:cloudinit/ tests/ tools/} + {envpython} -m black --check {posargs:.} + {envpython} -m isort --check-only --diff {posargs:.} + {envpython} -m mypy {posargs:cloudinit/ tests/ tools/} + +[testenv:check_format_tip] +deps = + -r{toxinidir}/integration-requirements.txt + {[testenv]deps} + {[latest_versions]deps} +commands = + {envpython} -m ruff check {posargs:.} + {envpython} -m pylint {posargs:.} + {envpython} -m black --check {posargs:.} + {envpython} -m isort --check-only --diff {posargs:.} + {envpython} -m mypy {posargs:cloudinit/ tests/ tools/} + +[testenv:do_format] +deps = {[pinned_versions]deps} +commands = + {envpython} -m isort . + {envpython} -m black . + {envpython} -m json.tool --indent 2 {[files]schema} {[files]schema} + {envpython} -m json.tool --indent 2 {[files]version} {[files]version} + {envpython} -m json.tool --indent 2 {[files]network_v1} {[files]network_v1} + {envpython} -m json.tool --indent 2 {[files]network_v2} {[files]network_v2} + +[testenv:do_format_tip] +deps = {[latest_versions]deps} +commands = + {envpython} -m isort . + {envpython} -m black . + {envpython} -m json.tool --indent 2 {[files]schema} {[files]schema} + {envpython} -m json.tool --indent 2 {[files]version} {[files]version} + {envpython} -m json.tool --indent 2 {[files]network_v1} {[files]network_v1} + {envpython} -m json.tool --indent 2 {[files]network_v2} {[files]network_v2} + +[testenv:py3] +commands = {envpython} -m pytest -vv -m "not hypothesis_slow" --cov=cloudinit --cov-branch {posargs:tests/unittests} + +[testenv:py3-fast] +deps = + {[testenv]deps} + pytest-xdist +commands = {envpython} -m pytest -n auto -m "not hypothesis_slow" -m "not serial" {posargs:tests/unittests} + +[testenv:hypothesis-slow] +deps = {[pinned_versions]deps} + {[testenv]deps} +commands = {envpython} -m pytest \ + -m hypothesis_slow \ + --hypothesis-show-statistics \ + {posargs:tests/unittests} + +#commands = {envpython} -X tracemalloc=40 -Werror::ResourceWarning:cloudinit -m pytest \ +[testenv:py3-leak] +commands = {envpython} -X tracemalloc=40 -Wall -m pytest {posargs:tests/unittests} + +# generates html coverage report from the most recent pytest run +[testenv:coverage-html] +deps = {[testenv]deps} +commands = coverage html -i + +# prints out the coverage report "table" from the most recent pytest run +[testenv:coverage-report] +deps = {[testenv]deps} +commands = coverage report -i + +[testenv:lowest-supported] +# Tox is going to install requirements from pip. This is fine for +# testing python version compatibility, but when we build cloud-init, we are +# building against the dependencies in the OS repo, not pip. The OS +# dependencies will generally be older than what is found in pip. + +# To obtain these versions, check the versions of these libraries +# in the oldest support Ubuntu distro. Theses versions are from bionic. +deps = + jinja2==2.10.1 + oauthlib==3.1.0 + pyserial==3.4 + configobj==5.0.6 + pyyaml==5.3.1 + requests==2.22.0 + jsonpatch==1.23 + jsonschema==3.2.0 + # test-requirements + pytest==4.6.9 + pytest-cov==2.8.1 + pytest-mock==1.10.4 + responses==0.9.0 + passlib + # required for this version of jinja2 + markupsafe==2.0.1 +commands = {envpython} -m pytest -m "not hypothesis_slow" --cov=cloud-init --cov-branch {posargs:tests/unittests} + +[testenv:doc] +deps = -r{toxinidir}/doc-requirements.txt +commands = + {envpython} -m sphinx {posargs:-W doc/rtd doc/rtd_html} + {envpython} -m doc8 doc/rtd +passenv = + CLOUD_INIT_* + +[testenv:doc-spelling] +deps = -r{toxinidir}/doc-requirements.txt +commands = {envpython} -m sphinx -b spelling {posargs:-W doc/rtd doc/rtd_html} + +# linkcheck shows false positives and has noisy output. +# Despite these limitations, it is better than a manual search of the docs. +# suggested workflow is: +# +# tox -e linkcheck | grep broken # takes some time +# +# followed by manual verification of the links reported +[testenv:linkcheck] +deps = -r{toxinidir}/doc-requirements.txt +commands = + {envpython} -m sphinx {posargs:-b linkcheck doc/rtd doc/rtd_html} + +[testenv:tip-ruff] +deps = {[latest_versions]deps} +commands = {envpython} -m ruff check {posargs:.} + +[testenv:tip-mypy] +deps = + -r{toxinidir}/integration-requirements.txt + {[testenv]deps} + {[latest_versions]deps} +commands = {envpython} -m mypy {posargs:cloudinit/ tests/ tools/} + +[testenv:tip-pylint] +deps = {[latest_versions]deps} + -r{toxinidir}/integration-requirements.txt + {[testenv]deps} +commands = {envpython} -m pylint {posargs:cloudinit/ tests/ tools/} + +[testenv:tip-black] +deps = {[latest_versions]deps} +commands = {envpython} -m black --check {posargs:.} + +[testenv:tip-isort] +deps = {[latest_versions]deps} +commands = {envpython} -m isort --check-only --diff {posargs:.} + +[testenv:integration-tests] +deps = -r{toxinidir}/integration-requirements.txt +commands = {envpython} -m pytest --log-cli-level=INFO {posargs:tests/integration_tests} +passenv = + CLOUD_INIT_* + PYCLOUDLIB_* + SSH_AUTH_SOCK + OS_* + +[testenv:integration-tests-fast] +deps = + -r{toxinidir}/integration-requirements.txt + -r{toxinidir}/test-requirements.txt +commands = {envpython} -m pytest --log-cli-level=INFO -n auto -m "not hypothesis_slow" -m "not serial" {posargs:tests/integration_tests} +passenv = + CLOUD_INIT_* + PYCLOUDLIB_* + SSH_AUTH_SOCK + OS_* + +[testenv:integration-tests-ci] +deps = -r{toxinidir}/integration-requirements.txt +commands = {envpython} -m pytest --log-cli-level=INFO {posargs:tests/integration_tests} +passenv = + CLOUD_INIT_* + SSH_AUTH_SOCK + OS_* + GITHUB_* +setenv = + PYTEST_ADDOPTS="-m ci and not adhoc" + +[testenv:integration-tests-jenkins] +# Pytest's RC=1 means "Tests were collected and run but some of the tests failed". +# Do not fail in this case, but let Jenkins handle it using the junit report. +deps = -r{toxinidir}/integration-requirements.txt +allowlist_externals = sh +commands = sh -c "{envpython} -m pytest --log-cli-level=INFO {posargs:tests/integration_tests/none} || [ $? -eq 1 ]" +passenv = + *_proxy + CLOUD_INIT_* + PYCLOUDLIB_* + SSH_AUTH_SOCK + OS_* + GOOGLE_* + GCP_* +setenv = + PYTEST_ADDOPTS="-m not adhoc" + ON_JENKINS="1" + +[pytest] +# TODO: s/--strict/--strict-markers/ once pytest version is high enough +testpaths = tools tests/unittests +addopts = --strict +log_format = %(asctime)s %(levelname)-9s %(name)s:%(filename)s:%(lineno)d %(message)s +log_date_format = %Y-%m-%d %H:%M:%S +markers = + adhoc: only run on adhoc basis, not in any CI environment (travis or jenkins) + allow_all_subp: allow all subp usage (disable_subp_usage) + allow_subp_for: allow subp usage for the given commands (disable_subp_usage) + ci: run this integration test as part of CI test runs + ds_sys_cfg: a sys_cfg dict to be used by datasource fixtures + hypothesis_slow: hypothesis test too slow to run as unit test + instance_name: the name to be used for the test instance + integration_cloud_args: args for IntegrationCloud customization + is_iscsi: whether is an instance has iscsi net cfg or not + lxd_config_dict: set the config_dict passed on LXD instance creation + lxd_setup: specify callable to be called between init and start + lxd_use_exec: `execute` will use `lxc exec` instead of SSH + serial: tests that do not work in parallel, skipped with py3-fast + unstable: skip this test because it is flakey + user_data: the user data to be passed to the test instance + allow_dns_lookup: disable autochecking for host network configuration + +[coverage:paths] +source = + cloudinit/ + /usr/lib/python3/dist-packages/cloudinit/ diff --git a/.readthedocs.yaml b/.readthedocs.yaml index f5a56263..edbc723e 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -3,13 +3,14 @@ version: 2 build: os: "ubuntu-22.04" tools: - python: "3.10" + python: "3.11" +sphinx: + configuration: doc/rtd/conf.py formats: all python: install: - - path: . - requirements: doc-requirements.txt sphinx: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a4cb2de6..c27c8625 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -30,9 +30,9 @@ Before you can begin, you will need to: ## Getting help -We use IRC and have a dedicated `#cloud-init` channel where you can contact +We use Matrix and have a dedicated `#cloud-init` channel where you can contact us for help and guidance. This link will take you directly to our -[IRC channel on Libera](https://kiwiirc.com/nextclient/irc.libera.chat/cloud-init). +[Matrix room](https://matrix.to/#/#cloud-init:ubuntu.com). Please don't be afraid to reach out if you need help constructing your pull request. diff --git a/ChangeLog b/ChangeLog index a9f62d02..49d71ac8 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,93 @@ +25.3 + - chore(cc_rh_subscription): deprecate hyphenated fields, remove self.log + (#6470) [Mostafa Abdelwahab] (GH: 6370) + - feat(eni): support "ip route" if available instead of "route" (#6460) + [dermotbradley] + - test(distros): Convert test_hosts.py from unittest to pytest (#6481) + [GautamThorani] + - fix: preserve special permission bits (#6467) [deepsghimire] + - test: Convert test_registry.py from unittest to pytest (#6479) + [GautamThorani] + - test: fix integration for PROPOSED to run apt update prior to install + (#6477) + - feat(schema): add support for the not keyword (#6469) + [Mostafa Abdelwahab] (GH: 6468) + - fix: correct netplan renderer target (#6465) + - feat(hetzner): enable hotplug support and prepare IPv6 integration + (#6445) [Philipp Bender] + - fix(net-convert): respect output directory with netplan + - fix(bddeb): require exec permissions on temporary folder + - fix: properly quote commands in `do_as` function (#6400) + [Amirhossein Shaerpour] (GH: 6171) + - Optionalize bash completion. (#6441) [3405691582] + - docs(nocloud): Fix file schema examples (#6455) (GH: 5843) + - fix(systemd): revert auditd.service dependency (#6448) + - fix: bddeb tool needs distro debian param for package dependency lookup + - fix(test): raises match against an empty string will always pass + - fix(test): marks applied to fixtures have no effect + - test: refactor test_reporting.py to use only pytest (#6449) + [Chijioke Ibekwe] + - Update wait_for_cloud_init.rst to be more consistent about systemd usage + (#6442) [ExplGamma] + - fix: add OpenRC support to Meson build (#6426) [dermotbradley] + - feat(rh_subscription): new release_version field (#6347) + [Mostafa Abdelwahab] (GH: 6030) + - chore: bump isort version (#6446) + - net_convert.py: make some import failures not generate an error (#6399) + [dermotbradley] + - test: fix lru_cache test wrapper (#6443) (GH: 5869) + - docs(cc_raspberry_pi): Fix example4 (#6439) [Paul] + - test: refactor test_data.py to use only pytest (#6440) [Chijioke Ibekwe] + - tests: convert util.pathprefix2dict tests from unittest to pytest + (#6433) [Aamir] + - chore: hand off responsibilities (#6432) + - feat: support nmap in socket protocol (#6339) (GH: 6136) + - feat(cc_raspberry_pi): remove remote_gpio interface (#6429) [Paul] + - fix(docs): Fix typo in `fDQn` (#6420) [Vladimir Levin] + - Refresh dev docs for tests (#6428) + - fix(raspberry-pi-os): Update ntp client (#6425) [Paul] + - fixed defs types in cloud-init cmd devel make_mime (#6419) [abdulganiyy] + - test: remove CiTestCase fully (#6416) + - test: drop citestcase from test_reporting_hyperv.py (#6413) + - test: drop citestcase from net/test_init.py (#6410) + - test: drop citestcase from test_version.py (#6415) + - fix: read-dependencies syntax error on regex (#6326) + - doc: add testing links to doc/rtd Development headings (#6326) + - chore: update developer specfile and build for opensuse (#6326) + - chore: update developer specfile redhat/fedora and dependencies (#6326) + - chore: drop yaml validation target, covered in unittests (#6326) + - chore: update debian developer packaging template (#6326) + - fix: add apt-get update before development pkg install (#6326) + - chore!: switch to meson.build due to PEP632 (#6326) (LP: #1978328) + - chore: render vlan, bond, bridge mac properly in network state (#6397) + [Shreenidhi Shedi] + - chore: replace all mailing-list references with GH Discussions + - chore: replace all IRC references with matrix + - fix: fixed defs types in cloud-init analyze show (#6353) [abdulganiyy] + - chore(tox): align check_format's pylint folder targets (#6414) + - test: drop citestcase from net/test_network_state.py (#6402) + - test: drop citestcase from net/test_dhcp.py (#6401) + - chore(ci): bump pylint version, fix test failures (#6408) + - chore(ci): fix cloud-init test failures from new pylint (#6407) + - chore(ci): bump versions for black, ruff, and mypy (#6406) + - doc: change `jammy` to `noble` (#6398) [Amirhossein Shaerpour] + - test: drop citestcase from t* tests (#6379) + - fix: add missing test mock (#6395) + - test: drop citestcase net_freebsd (#6374) + - test: drop citestcase from test_util.py (#6387) + - Add 'to: default' support in network state (#6391) [Shreenidhi Shedi] + - test: drop citestcase from s* tests (#6378) + - test: drop citestcase from r, s and u sources (#6373) + - test: drop citestcase from v sources (#6372) + - test: drop citestcase from n and o sources (#6363) + - fix: make tools/tox-venv with current versions of tox (#6365) + [Scott Moser] + - test: skip gpg dependency test on questing (#6388) + - feat(networkd): add support for rendering bridge devices (#6349) + [Shreenidhi Shedi] + - cloud-init-hotplugd: remove use of "exec" and unnecessary exit (#6357) + [dermotbradley] (GH: 6351) + 25.2 - fix: Ensure 822 template renders correctly on Debian (#6381) (GH: 6380) - test: support systemctl try-reload-or-restart messaging alternatives diff --git a/Makefile b/Makefile index 6d4a0b50..5d3a9b75 100644 --- a/Makefile +++ b/Makefile @@ -1,9 +1,6 @@ CWD=$(shell pwd) VARIANT ?= ubuntu -YAML_FILES=$(shell find cloudinit tests tools -name "*.yaml" -type f ) -YAML_FILES+=$(shell find doc/examples -name "cloud-config*.txt" -type f ) - PYTHON ?= python3 NUM_ITER ?= 100 @@ -19,7 +16,7 @@ BENCHMARK=./tools/benchmark.sh all: check -check: check_version test yaml +check: check_version test style-check: lint @@ -93,9 +90,6 @@ clean_release: clean: clean_pyc clean_pytest clean_packaging clean_release rm -rf doc/rtd_html .tox .coverage tags $(GENERATOR_F) -yaml: - @$(PYTHON) $(CWD)/tools/validate-yaml.py $(YAML_FILES) - rpm: $(PYTHON) ./packages/brpm --distro=$(distro) @@ -125,7 +119,7 @@ fmt-tip: tox -e do_format_tip && tox -e check_format_tip -.PHONY: all check test lint clean rpm srpm deb deb-src yaml +.PHONY: all check test lint clean rpm srpm deb deb-src .PHONY: check_version clean_pyc .PHONY: unittest style-check render-template benchmark-generator .PHONY: clean_pytest clean_packaging clean_release doc diff --git a/README.md b/README.md index fa34fd64..67c98a35 100644 --- a/README.md +++ b/README.md @@ -28,10 +28,8 @@ If you need support, start with the [user documentation](https://docs.cloud-init If you need additional help consider reaching out with one of the following options: -- Ask a question in the [``#cloud-init`` IRC channel on Libera](https://kiwiirc.com/nextclient/irc.libera.chat/cloud-init) -- Search the cloud-init [mailing list archive](https://lists.launchpad.net/cloud-init/) -* Follow announcements or ask a question on [the cloud-init Discourse forum](https://discourse.ubuntu.com/c/server/cloud-init/) -- Join the [cloud-init mailing list](https://launchpad.net/~cloud-init) and participate +- Ask a question in the [``#cloud-init`` channel on Matrix](https://matrix.to/#/#cloud-init:ubuntu.com) +- Follow announcements or ask a question on [GitHub Discussions](https://github.com/canonical/cloud-init/discussions) - Find a bug? [Report bugs on GitHub Issues](https://github.com/canonical/cloud-init/issues) ## Distribution and cloud support diff --git a/SECURITY.md b/SECURITY.md index ebb8a7e5..7c4df9b2 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -5,7 +5,7 @@ The following documents the upstream cloud-init security policy. ## Reporting If a security bug is found, please send an email to -cloud-init-security@lists.canonical.com . After the bug is received, + . After the bug is received, the issue is triaged within 2 working days of being reported and a response is sent to the reporter. @@ -44,7 +44,7 @@ determined time for disclosure has arrived the following will occur: * A public bug is filed/made public with vulnerability details, CVE, mitigations and where to obtain the fix -* An email is sent to the [public cloud-init mailing list](https://lists.launchpad.net/cloud-init/) +* An announcement is made to [GitHub Discussions](https://github.com/canonical/cloud-init/discussions) The disclosure timeframe is coordinated with the reporter and members of the cloud-init-security list. This depends on a number of factors: diff --git a/cloudinit/analyze/__init__.py b/cloudinit/analyze/__init__.py index 60ff8daa..ea2144cb 100644 --- a/cloudinit/analyze/__init__.py +++ b/cloudinit/analyze/__init__.py @@ -115,7 +115,7 @@ def get_parser( return parser -def analyze_boot(name: str, args: argparse.Namespace) -> int: +def analyze_boot(name: str, args: argparse.Namespace) -> str: """Report a list of how long different boot operations took. For Example: diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py index dd1b9c32..5837852b 100644 --- a/cloudinit/analyze/dump.py +++ b/cloudinit/analyze/dump.py @@ -179,6 +179,10 @@ def dump_events( data = rawdata.splitlines() elif cisource is not None: data = cisource.readlines() + else: + raise ValueError( + "Either cisource or rawdata parameters must have a value" + ) for line in data: for match in CI_EVENT_MATCHES: diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py index b4491bca..56e317bf 100644 --- a/cloudinit/analyze/show.py +++ b/cloudinit/analyze/show.py @@ -8,6 +8,7 @@ import json import sys import time +from typing import IO, Any, Dict, List, Optional, Tuple, Union, cast from cloudinit import subp, util from cloudinit.distros import uses_systemd @@ -50,8 +51,10 @@ CONTAINER_CODE = "container" TIMESTAMP_UNKNOWN = (FAIL_CODE, -1, -1, -1) +Event = Dict -def format_record(msg, event): + +def format_record(msg: str, event: Event) -> str: for i, j in format_key.items(): if i in msg: # ensure consistent formatting of time values @@ -62,56 +65,68 @@ def format_record(msg, event): return msg.format(**event) -def event_name(event): +def event_name(event: Optional[Event]) -> Optional[str]: if event: return event.get("name") return None -def event_type(event): +def event_type(event: Optional[Event]) -> Optional[str]: if event: return event.get("event_type") return None -def event_parent(event): - if event: - return event_name(event).split("/")[0] +def event_parent(event: Optional[Event]) -> Optional[str]: + name = event_name(event) + if name: + return name.split("/")[0] return None -def event_timestamp(event): - return float(event.get("timestamp")) +def event_timestamp(event: Event) -> float: + ts = event.get("timestamp") + if ts is None: + raise ValueError("Event is missing a 'timestamp'") + return float(ts) -def event_datetime(event): +def event_datetime(event: Event) -> datetime.datetime: return datetime.datetime.fromtimestamp( event_timestamp(event), datetime.timezone.utc ) -def delta_seconds(t1, t2): +def delta_seconds(t1: datetime.datetime, t2: datetime.datetime) -> float: return (t2 - t1).total_seconds() -def event_duration(start, finish): +def event_duration(start: Event, finish: Event) -> float: return delta_seconds(event_datetime(start), event_datetime(finish)) -def event_record(start_time, start, finish): +def event_record( + start_time: datetime.datetime, + start: Event, + finish: Event, +) -> Event: record = finish.copy() + name = event_name(start) + indent = "|" + if name: + indent += " " * (name.count("/") - 1) + "`->" record.update( { "delta": event_duration(start, finish), "elapsed": delta_seconds(start_time, event_datetime(start)), - "indent": "|" + " " * (event_name(start).count("/") - 1) + "`->", + "indent": indent, } ) return record -def total_time_record(total_time): +def total_time_record(total_time: float) -> str: return "Total Time: %3.5f seconds\n" % total_time @@ -120,9 +135,10 @@ class SystemctlReader: Class for dealing with all systemctl subp calls in a consistent manner. """ - def __init__(self, property, parameter=None): - self.stdout = None - self.args = [subp.which("systemctl"), "show"] + def __init__(self, property: str, parameter: Optional[str] = None): + self.stdout: Optional[str] = None + self.args: Any = [subp.which("systemctl"), "show"] + if parameter: self.args.append(parameter) # --timestamp=utc is needed for native date strings. Othwerise, @@ -135,9 +151,9 @@ def __init__(self, property, parameter=None): # Don't want the init of our object to break. Instead of throwing # an exception, set an error code that gets checked when data is # requested from the object - self.failure = self.subp() + self.failure = self._subp() - def subp(self): + def _subp(self) -> Optional[Union[str, Exception]]: """ Make a subp call based on set args and handle errors by setting failure code @@ -153,7 +169,7 @@ def subp(self): except Exception as systemctl_fail: return systemctl_fail - def convert_val_to_float(self): + def convert_val_to_float(self) -> float: """ If subp call succeeded, return the timestamp from subp as a float. @@ -167,7 +183,15 @@ def convert_val_to_float(self): "Subprocess call to systemctl has failed, " "returning error code ({})".format(self.failure) ) + # this should never happen as the call to subp succeeded + + if self.stdout is None: + raise RuntimeError( + "stdout of subprocess call to systemctl is None" + ) + # Output from systemctl show has the format Property=Value. + val = self.stdout.split("=")[1].strip() if val.isnumeric(): @@ -191,7 +215,7 @@ def convert_val_to_float(self): return timestamp -def dist_check_timestamp(): +def dist_check_timestamp() -> Tuple[str, float, float, float]: """ Determine which init system a particular linux distro is using. Each init system (systemd, etc) has a different way of @@ -213,7 +237,7 @@ def dist_check_timestamp(): return TIMESTAMP_UNKNOWN -def gather_timestamps_using_dmesg(): +def gather_timestamps_using_dmesg() -> Tuple[str, float, float, float]: """ Gather timestamps that corresponds to kernel begin initialization, kernel finish initialization using dmesg as opposed to systemctl @@ -244,7 +268,7 @@ def gather_timestamps_using_dmesg(): return TIMESTAMP_UNKNOWN -def gather_timestamps_using_systemd(): +def gather_timestamps_using_systemd() -> Tuple[str, float, float, float]: """ Gather timestamps that corresponds to kernel begin initialization, kernel finish initialization. and cloud-init systemd unit activation @@ -302,9 +326,9 @@ def gather_timestamps_using_systemd(): def generate_records( - events, - print_format="(%n) %d seconds in %I%D", -): + events: List[Event], + print_format: str = "(%n) %d seconds in %I%D", +) -> List[List[str]]: """ Take in raw events and create parent-child dependencies between events in order to order events in chronological order. @@ -317,7 +341,7 @@ def generate_records( """ sorted_events = sorted(events, key=lambda x: x["timestamp"]) - records = [] + records: List[str] = [] start_time = None total_time = 0.0 stage_start_time = {} @@ -346,6 +370,9 @@ def generate_records( # see if we have a pair if event_name(event) == event_name(next_evt): if event_type(next_evt) == "finish": + # next_evt is a dictionary because event_type has extracted + # an event type from it: + next_evt = cast(Dict, next_evt) records.append( format_record( print_format, @@ -360,12 +387,15 @@ def generate_records( else: prev_evt = unprocessed.pop() if event_name(event) == event_name(prev_evt): - record = event_record(start_time, prev_evt, event) - records.append( - format_record("Finished stage: (%n) %d seconds", record) - + "\n" - ) - total_time += record.get("delta") + if start_time: + record = event_record(start_time, prev_evt, event) + records.append( + format_record( + "Finished stage: (%n) %d seconds", record + ) + + "\n" + ) + total_time += record.get("delta") or 0.0 else: # not a match, put it back unprocessed.append(prev_evt) @@ -375,7 +405,7 @@ def generate_records( return boot_records -def show_events(events, print_format): +def show_events(events: List[Event], print_format: str) -> List[List[str]]: """ A passthrough method that makes it easier to call generate_records() @@ -388,7 +418,7 @@ def show_events(events, print_format): return generate_records(events, print_format=print_format) -def load_events_infile(infile): +def load_events_infile(infile: IO) -> Tuple[Optional[Any], str]: """ Takes in a log file, read it, and convert to json. diff --git a/cloudinit/cmd/devel/make_mime.py b/cloudinit/cmd/devel/make_mime.py index 8b2c68cc..5411ad60 100755 --- a/cloudinit/cmd/devel/make_mime.py +++ b/cloudinit/cmd/devel/make_mime.py @@ -9,6 +9,7 @@ import sys from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText +from typing import List, Optional from cloudinit.handlers import INCLUSION_TYPES_MAP @@ -22,7 +23,7 @@ def create_mime_message(files): sub_messages = [] - errors = [] + errors: List[str] = [] for i, (fh, filename, format_type) in enumerate(files): contents = fh.read() sub_message = MIMEText(contents, format_type, sys.getdefaultencoding()) @@ -43,18 +44,20 @@ def create_mime_message(files): return (combined_message, errors) -def file_content_type(text): +def file_content_type(text: str): """Return file content type by reading the first line of the input.""" try: filename, content_type = text.split(":", 1) return (open(filename, "r"), filename, content_type.strip()) except ValueError as e: raise argparse.ArgumentError( - text, "Invalid value for %r" % (text) + None, "Invalid value for %r" % (text) ) from e -def get_parser(parser=None): +def get_parser( + parser: Optional[argparse.ArgumentParser] = None, +) -> argparse.ArgumentParser: """Build or extend and arg parser for make-mime utility. @param parser: Optional existing ArgumentParser instance representing the @@ -94,7 +97,7 @@ def get_parser(parser=None): return parser -def get_content_types(strip_prefix=False): +def get_content_types(strip_prefix: bool = False) -> List[str]: """Return a list of cloud-init supported content types. Optionally strip out the leading 'text/' of the type if strip_prefix=True. """ @@ -106,7 +109,7 @@ def get_content_types(strip_prefix=False): ) -def handle_args(name, args): +def handle_args(name: str, args: argparse.Namespace) -> int: """Create a multi-part MIME archive for use as user-data. Optionally print out the list of supported content types of cloud-init. diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py index b3c2fa7c..eafb11f1 100755 --- a/cloudinit/cmd/devel/net_convert.py +++ b/cloudinit/cmd/devel/net_convert.py @@ -20,9 +20,28 @@ networkd, sysconfig, ) -from cloudinit.sources import DataSourceAzure as azure -from cloudinit.sources.helpers import openstack -from cloudinit.sources.helpers.vmware.imc import guestcust_util + +try: + from cloudinit.sources import DataSourceAzure as azure +except ImportError: + azure_kind_available = False +else: + azure_kind_available = True + +try: + from cloudinit.sources.helpers import openstack +except ImportError: + openstack_kind_available = False +else: + openstack_kind_available = True + +try: + from cloudinit.sources.helpers.vmware.imc import guestcust_util +except ImportError: + vmware_kind_available = False +else: + vmware_kind_available = True + NAME = "net-convert" @@ -45,16 +64,22 @@ def get_parser(parser=None): required=True, help="The network configuration to read", ) + + available_kinds = ["eni", "yaml"] + + if azure_kind_available: + available_kinds.append("azure-imds") + + if openstack_kind_available: + available_kinds.append("network_data.json") + + if vmware_kind_available: + available_kinds.append("vmware-imc") + parser.add_argument( "-k", "--kind", - choices=[ - "eni", - "network_data.json", - "yaml", - "azure-imds", - "vmware-imc", - ], + choices=available_kinds, required=True, help="The format of the given network config", ) diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index d8d1d80c..05b9336d 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -80,6 +80,10 @@ class SubcommandAwareArgumentParser(argparse.ArgumentParser): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._raw_args = None + def parse_args(self, args=None, namespace=None): """Override parse_args to store raw arguments for error handling.""" self._raw_args = args @@ -91,7 +95,7 @@ def error(self, message): # Scan for the first valid subcommand - if not hasattr(self, "_raw_args"): + if not self._raw_args: self._raw_args = sys.argv[1:] subcommand = None if self._raw_args: @@ -345,7 +349,7 @@ def _should_bring_up_interfaces(init, args): def _should_wait_via_user_data( - raw_config: Optional[Union[str, bytes]] + raw_config: Optional[Union[str, bytes]], ) -> Tuple[bool, Reason]: """Determine if our cloud-config requires us to wait @@ -767,7 +771,7 @@ def main_modules(action_name, args): util.logexc(LOG, msg) print_exc(msg) if not args.force: - return [(msg)] + return [msg] _maybe_persist_instance_data(init) # Stage 3 mods = Modules(init, extract_fns(args), reporter=args.reporter) diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py index 4b89977f..7344a933 100644 --- a/cloudinit/config/cc_ca_certs.py +++ b/cloudinit/config/cc_ca_certs.py @@ -259,6 +259,8 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: " Ignoring ca-certs." ) ca_cert_cfg = cfg.get("ca_certs", cfg.get("ca-certs")) + if not isinstance(ca_cert_cfg, dict): + raise TypeError("unexpected type: {ca_cert_cfg}") distro_cfg = _distro_ca_certs_configs(cloud.distro.name) # If there is a remove_defaults option set to true, disable the system diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index 058d27ea..f6162272 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -1112,7 +1112,7 @@ def exec_mkpart_gpt_sgdisk(device, layout): def exec_mkpart_gpt_sfdisk(device, layout): cmd = "" # Promote partition types to GPT partition GUIDs - for partition_type, (start, end) in layout: + for partition_type, (_, end) in layout: partition_type = str(partition_type).ljust(4, "0") if len(partition_type) == 4 and partition_type in sgdisk_to_gpt_id: partition_type = sgdisk_to_gpt_id[partition_type] diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index 9d2de69c..4530e971 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -514,18 +514,20 @@ def mount_if_needed( subp.subp(["systemctl", "daemon-reload"]) -def cleanup_fstab(ds_remove_entries: list = []) -> None: +def cleanup_fstab(ds_remove_entry: Optional[str] = None) -> None: if not os.path.exists(FSTAB_PATH): return - base_entry = [MNT_COMMENT] + remove_entries = [MNT_COMMENT] + if ds_remove_entry: + remove_entries.append(ds_remove_entry) with open(FSTAB_PATH, "r") as f: lines = f.readlines() new_lines = [] changed = False for line in lines: - if all(entry in line for entry in [*base_entry, *ds_remove_entries]): + if all(entry in line for entry in remove_entries): changed = True continue new_lines.append(line) diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index a8f1defb..adc17bf0 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -213,8 +213,8 @@ }, }, "raspberry-pi-os": { - "chrony": { - "confpath": "/etc/chrony/chrony.conf", + "systemd-timesyncd": { + "check_exe": "/usr/lib/systemd/systemd-timesyncd", }, }, "rhel": { diff --git a/cloudinit/config/cc_raspberry_pi.py b/cloudinit/config/cc_raspberry_pi.py index a80a8f08..2af8db03 100644 --- a/cloudinit/config/cc_raspberry_pi.py +++ b/cloudinit/config/cc_raspberry_pi.py @@ -22,7 +22,6 @@ "i2c": "do_i2c", "serial": "do_serial", "onewire": "do_onewire", - "remote_gpio": "do_rgpio", } RASPI_CONFIG_SERIAL_CONS_FN = "do_serial_cons" RASPI_CONFIG_SERIAL_HW_FN = "do_serial_hw" diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py index 893ec780..9033f454 100644 --- a/cloudinit/config/cc_rh_subscription.py +++ b/cloudinit/config/cc_rh_subscription.py @@ -24,7 +24,7 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: - sm = SubscriptionManager(cfg, log=LOG) + sm = SubscriptionManager(cfg) if not sm.is_configured(): LOG.debug("%s: module not configured.", name) return None @@ -40,21 +40,21 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: "Registration failed or did not run completely" ) - # Splitting up the registration, auto-attach, and servicelevel + # Splitting up the registration, auto_attach, and servicelevel # commands because the error codes, messages from subman are not # specific enough. # Attempt to change the service level if sm.auto_attach and sm.servicelevel is not None: if not sm._set_service_level(): - raise SubscriptionError("Setting of service-level failed") + raise SubscriptionError("Setting of service_level failed") else: - sm.log.debug("Completed auto-attach with service level") + LOG.debug("Completed auto_attach with service level") elif sm.auto_attach: if not sm._set_auto_attach(): - raise SubscriptionError("Setting auto-attach failed") + raise SubscriptionError("Setting auto_attach failed") else: - sm.log.debug("Completed auto-attach") + LOG.debug("Completed auto_attach") if sm.pools is not None: if not isinstance(sm.pools, list): @@ -69,12 +69,16 @@ def handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None: return_stat = sm.update_repos() if not return_stat: raise SubscriptionError("Unable to add or remove repos") - sm.log_success("rh_subscription plugin completed successfully") + if sm.release_version: + sm._set_release_version() + sm._delete_packagemanager_cache() + + LOG.info("rh_subscription plugin completed successfully") except SubscriptionError as e: - sm.log_warn(str(e)) - sm.log_warn("rh_subscription plugin did not complete successfully") + LOG.warning(str(e)) + LOG.warning("rh_subscription plugin did not complete successfully") else: - sm.log_success("System is already registered") + LOG.info("System is already registered") class SubscriptionError(Exception): @@ -82,45 +86,43 @@ class SubscriptionError(Exception): class SubscriptionManager: - valid_rh_keys = [ - "org", - "activation-key", - "username", - "password", - "disable-repo", - "enable-repo", - "add-pool", - "rhsm-baseurl", - "server-hostname", - "auto-attach", - "service-level", - ] - - def __init__(self, cfg, log=None): - if log is None: - log = LOG - self.log = log - self.cfg = cfg - self.rhel_cfg = self.cfg.get("rh_subscription", {}) - self.rhsm_baseurl = self.rhel_cfg.get("rhsm-baseurl") - self.server_hostname = self.rhel_cfg.get("server-hostname") - self.pools = self.rhel_cfg.get("add-pool") - self.activation_key = self.rhel_cfg.get("activation-key") + + def __init__(self, cfg): + self.rhel_cfg = cfg.get("rh_subscription", {}) self.org = self.rhel_cfg.get("org") self.userid = self.rhel_cfg.get("username") self.password = self.rhel_cfg.get("password") - self.auto_attach = self.rhel_cfg.get("auto-attach") - self.enable_repo = self.rhel_cfg.get("enable-repo") - self.disable_repo = self.rhel_cfg.get("disable-repo") - self.servicelevel = self.rhel_cfg.get("service-level") - - def log_success(self, msg): - """Simple wrapper for logging info messages. Useful for unittests""" - self.log.info(msg) - - def log_warn(self, msg): - """Simple wrapper for logging warning messages. Useful for unittests""" - self.log.warning(msg) + self.release_version = self.rhel_cfg.get("release_version") + + # The hyphenated fields have been deprecated in this module. + # For new fields in the future, do not use hyphenated fields. + # The json schema validator (ran before initializing + # SubscriptionManager) already validated that the user did not + # pass a deprecated key and its corresponding new key at the same time + self.rhsm_baseurl = self.rhel_cfg.get( + "rhsm_baseurl", self.rhel_cfg.get("rhsm-baseurl") + ) + self.server_hostname = self.rhel_cfg.get( + "server_hostname", self.rhel_cfg.get("server-hostname") + ) + self.pools = self.rhel_cfg.get( + "add_pool", self.rhel_cfg.get("add-pool") + ) + self.activation_key = self.rhel_cfg.get( + "activation_key", self.rhel_cfg.get("activation-key") + ) + self.auto_attach = self.rhel_cfg.get( + "auto_attach", self.rhel_cfg.get("auto-attach") + ) + self.enable_repo = self.rhel_cfg.get( + "enable_repo", self.rhel_cfg.get("enable-repo") + ) + self.disable_repo = self.rhel_cfg.get( + "disable_repo", self.rhel_cfg.get("disable-repo") + ) + self.servicelevel = self.rhel_cfg.get( + "service_level", self.rhel_cfg.get("service-level") + ) def _verify_keys(self): """ @@ -128,21 +130,12 @@ def _verify_keys(self): are what we expect. """ - for k in self.rhel_cfg: - if k not in self.valid_rh_keys: - bad_key = ( - "{0} is not a valid key for rh_subscription. " - "Valid keys are: " - "{1}".format(k, ", ".join(self.valid_rh_keys)) - ) - return False, bad_key - - # Check for bad auto-attach value + # Check for bad auto_attach value if (self.auto_attach is not None) and not ( util.is_true(self.auto_attach) or util.is_false(self.auto_attach) ): not_bool = ( - "The key auto-attach must be a boolean value (True/False " + "The key auto_attach must be a boolean value (True/False)" ) return False, not_bool @@ -150,11 +143,18 @@ def _verify_keys(self): (not self.auto_attach) or (util.is_false(str(self.auto_attach))) ): no_auto = ( - "The service-level key must be used in conjunction " - "with the auto-attach key. Please re-run with " - "auto-attach: True" + "The service_level key must be used in conjunction " + "with the auto_attach key. Please re-run with " + "auto_attach: True" ) return False, no_auto + + # Not verifying the release_version statically in _verify_keys + # (by verifying the key is in the output of + # `subscription-manager release --list`) because sometimes + # the release will become available only after enabling some repos + # (which is executed after verify_keys). So we will catch this error + # during "subscription-manager release --set=" return True, None def is_registered(self): @@ -198,9 +198,7 @@ def rhn_register(self): return_out = _sub_man_cli(cmd, logstring_val=True)[0] except subp.ProcessExecutionError as e: if e.stdout == "": - self.log_warn( - "Registration failed due to: {0}".format(e.stderr) - ) + LOG.warning("Registration failed due to: %s", e.stderr) return False elif (self.userid is not None) and (self.password is not None): @@ -225,22 +223,20 @@ def rhn_register(self): return_out = _sub_man_cli(cmd, logstring_val=True)[0] except subp.ProcessExecutionError as e: if e.stdout == "": - self.log_warn( - "Registration failed due to: {0}".format(e.stderr) - ) + LOG.warning("Registration failed due to: %s", e.stderr) return False else: - self.log_warn( + LOG.warning( "Unable to register system due to incomplete information." ) - self.log_warn( + LOG.warning( "Use either activationkey and org *or* userid and password" ) return False reg_id = return_out.split("ID: ")[1].rstrip() - self.log.debug("Registered successfully with ID %s", reg_id) + LOG.debug("Registered successfully with ID %s", reg_id) return True def _set_service_level(self): @@ -256,17 +252,16 @@ def _set_service_level(self): if e.stdout.rstrip() != "": for line in e.stdout.split("\n"): if line != "": - self.log_warn(line) + LOG.warning(line) else: - self.log_warn( - "Setting the service level failed with: {0}".format( - e.stderr.strip() - ) + LOG.warning( + "Setting the service level failed with: %s", + e.stderr.strip(), ) return False for line in return_out.split("\n"): if line != "": - self.log.debug(line) + LOG.debug(line) return True def _set_auto_attach(self): @@ -274,11 +269,11 @@ def _set_auto_attach(self): try: return_out = _sub_man_cli(cmd)[0] except subp.ProcessExecutionError as e: - self.log_warn("Auto-attach failed with: {0}".format(e)) + LOG.warning("auto_attach failed with: %s", e) return False for line in return_out.split("\n"): if line != "": - self.log.debug(line) + LOG.debug(line) return True def _getPools(self): @@ -331,7 +326,7 @@ def addPool(self, pools): # An empty list was passed if not pools: - self.log.debug("No pools to attach") + LOG.debug("No pools to attach") return True pool_available, pool_consumed = self._getPools() @@ -341,20 +336,18 @@ def addPool(self, pools): if (pool not in pool_consumed) and (pool in pool_available): pool_list.append("--pool={0}".format(pool)) else: - self.log_warn("Pool {0} is not available".format(pool)) + LOG.warning("Pool %s is not available", pool) if len(pool_list) > 0: cmd.extend(pool_list) try: _sub_man_cli(cmd) - self.log.debug( + LOG.debug( "Attached the following pools to your system: %s", (", ".join(pool_list)).replace("--pool=", ""), ) return True except subp.ProcessExecutionError as e: - self.log_warn( - "Unable to attach pool {0} due to {1}".format(pool, e) - ) + LOG.warning("Unable to attach pool %s due to %s", pool, e) return False def update_repos(self): @@ -371,16 +364,16 @@ def update_repos(self): if drepos is None: drepos = [] if not isinstance(erepos, list): - self.log_warn("Repo IDs must in the format of a list.") + LOG.warning("Repo IDs must in the format of a list.") return False if not isinstance(drepos, list): - self.log_warn("Repo IDs must in the format of a list.") + LOG.warning("Repo IDs must in the format of a list.") return False # Bail if both lists are not populated if not (erepos) and not (drepos): - self.log.debug("No repo IDs to enable or disable") + LOG.debug("No repo IDs to enable or disable") return True active_repos, inactive_repos = self._getRepos() @@ -407,14 +400,12 @@ def update_repos(self): for fail in enable_list_fail: # Check if the repo exists or not if fail in active_repos: - self.log.debug("Repo %s is already enabled", fail) + LOG.debug("Repo %s is already enabled", fail) else: - self.log_warn( - "Repo {0} does not appear to exist".format(fail) - ) + LOG.warning("Repo %s does not appear to exist", fail) if len(disable_list_fail) > 0: for fail in disable_list_fail: - self.log.debug( + LOG.debug( "Repo %s not disabled because it is not enabled", fail ) @@ -428,16 +419,16 @@ def update_repos(self): try: _sub_man_cli(cmd) except subp.ProcessExecutionError as e: - self.log_warn("Unable to alter repos due to {0}".format(e)) + LOG.warning("Unable to alter repos due to %s", e) return False if len(enable_list) > 0: - self.log.debug( + LOG.debug( "Enabled the following repos: %s", (", ".join(enable_list)).replace("--enable=", ""), ) if len(disable_list) > 0: - self.log.debug( + LOG.debug( "Disabled the following repos: %s", (", ".join(disable_list)).replace("--disable=", ""), ) @@ -446,6 +437,34 @@ def update_repos(self): def is_configured(self): return bool((self.userid and self.password) or self.activation_key) + def _set_release_version(self): + """ + Execute "subscription-manager release --set=" + Raises Subscription error if the command fails + """ + + cmd = ["release", f"--set={self.release_version}"] + try: + _sub_man_cli(cmd) + except subp.ProcessExecutionError as e: + raise SubscriptionError( + f"Unable to set release_version using: {cmd}" + ) from e + + def _delete_packagemanager_cache(self): + """ + Delete the package manager cache. + Raises Subscription error if the deletion fails + """ + LOG.debug("Deleting the package manager cache") + try: + util.del_dir("/var/cache/dnf") + util.del_dir("/var/cache/yum") + except Exception as e: + raise SubscriptionError( + "Unable to delete the package manager cache" + ) from e + def _sub_man_cli(cmd, logstring_val=False): """ diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py index ac9ed601..250ae3c9 100644 --- a/cloudinit/config/cc_write_files.py +++ b/cloudinit/config/cc_write_files.py @@ -112,6 +112,9 @@ def write_files(name, files, owner: str, ssl_details: Optional[dict] = None): path, contents, omode=omode, mode=perms, user=u, group=g ) util.chownbyname(path, u, g) + if util.get_permissions(path) != perms: + # Original setuid bit permissions were cleared due to chown + util.chmod(path, perms) def decode_perms(perm, default): diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index 08e956b6..6be53a3b 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -500,11 +500,12 @@ def is_valid(self, instance, _schema=None, **__): It does ignore instances of `SchemaDeprecationError`. """ + validator = self if _schema is None else self.evolve(schema=_schema) errors = filter( lambda e: not isinstance( # pylint: disable=W1116 e, SchemaDeprecationError ), - self.evolve(schema=_schema).iter_errors(instance), + validator.iter_errors(instance), ) return next(errors, None) is None diff --git a/cloudinit/config/schemas/schema-cloud-config-v1.json b/cloudinit/config/schemas/schema-cloud-config-v1.json index 3e051048..19b40e54 100644 --- a/cloudinit/config/schemas/schema-cloud-config-v1.json +++ b/cloudinit/config/schemas/schema-cloud-config-v1.json @@ -666,6 +666,47 @@ }, "minProperties": 1 }, + "rh_subscription_activation_key": { + "type": "string", + "description": "The activation key to use. Must be used with **org**. Should not be used with **username** or **password**." + }, + "rh_subscription_auto_attach": { + "type": "boolean", + "description": "Whether to attach subscriptions automatically." + }, + "rh_subscription_service_level": { + "type": "string", + "description": "The service level to use when subscribing to RH repositories. ``auto_attach`` must be true for this to be used." + }, + "rh_subscription_add_pool": { + "type": "array", + "description": "A list of pool IDs add to the subscription.", + "items": { + "type": "string" + } + }, + "rh_subscription_enable_repo": { + "type": "array", + "description": "A list of repositories to enable.", + "items": { + "type": "string" + } + }, + "rh_subscription_disable_repo": { + "type": "array", + "description": "A list of repositories to disable.", + "items": { + "type": "string" + } + }, + "rh_subscription_rhsm_baseurl": { + "type": "string", + "description": "Sets the baseurl in ``/etc/rhsm/rhsm.conf``." + }, + "rh_subscription_server_hostname": { + "type": "string", + "description": "Sets the serverurl in ``/etc/rhsm/rhsm.conf``." + }, "modules_definition": { "type": "array", "items": { @@ -2580,18 +2621,23 @@ "properties": { "username": { "type": "string", - "description": "The username to use. Must be used with password. Should not be used with **activation-key** or **org**." + "description": "The username to use. Must be used with password. Should not be used with **activation_key** or **org**." }, "password": { "type": "string", - "description": "The password to use. Must be used with username. Should not be used with **activation-key** or **org**." + "description": "The password to use. Must be used with username. Should not be used with **activation_key** or **org**." + }, + "activation_key": { + "$ref": "#/$defs/rh_subscription_activation_key" }, "activation-key": { - "type": "string", - "description": "The activation key to use. Must be used with **org**. Should not be used with **username** or **password**." + "$ref": "#/$defs/rh_subscription_activation_key", + "deprecated": true, + "deprecated_version": "25.3", + "deprecated_description": "Use **activation_key** instead." }, "org": { - "description": "The organization to use. Must be used with **activation-key**. Should not be used with **username** or **password**.", + "description": "The organization to use. Must be used with **activation_key**. Should not be used with **username** or **password**.", "oneOf": [ { "type": "string" @@ -2604,44 +2650,140 @@ } ] }, + "auto_attach": { + "$ref": "#/$defs/rh_subscription_auto_attach" + }, "auto-attach": { - "type": "boolean", - "description": "Whether to attach subscriptions automatically." + "$ref": "#/$defs/rh_subscription_auto_attach", + "deprecated": true, + "deprecated_version": "25.3", + "deprecated_description": "Use **auto_attach** instead." + }, + "service_level": { + "$ref": "#/$defs/rh_subscription_service_level" }, "service-level": { - "type": "string", - "description": "The service level to use when subscribing to RH repositories. ``auto-attach`` must be true for this to be used." + "$ref": "#/$defs/rh_subscription_service_level", + "deprecated": true, + "deprecated_version": "25.3", + "deprecated_description": "Use **service_level** instead." + }, + "add_pool": { + "$ref": "#/$defs/rh_subscription_add_pool" }, "add-pool": { - "type": "array", - "description": "A list of pools ids add to the subscription.", - "items": { - "type": "string" - } + "$ref": "#/$defs/rh_subscription_add_pool", + "deprecated": true, + "deprecated_version": "25.3", + "deprecated_description": "Use **add_pool** instead." + }, + "enable_repo": { + "$ref": "#/$defs/rh_subscription_enable_repo" }, "enable-repo": { - "type": "array", - "description": "A list of repositories to enable.", - "items": { - "type": "string" - } + "$ref": "#/$defs/rh_subscription_enable_repo", + "deprecated": true, + "deprecated_version": "25.3", + "deprecated_description": "Use **enable_repo** instead." + }, + "disable_repo": { + "$ref": "#/$defs/rh_subscription_disable_repo" }, "disable-repo": { - "type": "array", - "description": "A list of repositories to disable.", - "items": { - "type": "string" - } + "$ref": "#/$defs/rh_subscription_disable_repo", + "deprecated": true, + "deprecated_version": "25.3", + "deprecated_description": "Use **disable_repo** instead." }, - "rhsm-baseurl": { + "release_version": { "type": "string", - "description": "Sets the baseurl in ``/etc/rhsm/rhsm.conf``." + "description": "Sets the release_version via``subscription-manager release --set=`` then deletes the package manager cache ``/var/cache/{dnf,yum}`` . These steps are applied after any pool attachment and/or enabling/disabling repos. For more information about this key, check https://access.redhat.com/solutions/238533 ." + }, + "rhsm_baseurl": { + "$ref": "#/$defs/rh_subscription_rhsm_baseurl" + }, + "rhsm-baseurl": { + "$ref": "#/$defs/rh_subscription_rhsm_baseurl", + "deprecated": true, + "deprecated_version": "25.3", + "deprecated_description": "Use **rhsm_baseurl** instead." + }, + "server_hostname": { + "$ref": "#/$defs/rh_subscription_server_hostname" }, "server-hostname": { - "type": "string", - "description": "Sets the serverurl in ``/etc/rhsm/rhsm.conf``." + "$ref": "#/$defs/rh_subscription_server_hostname", + "deprecated": true, + "deprecated_version": "25.3", + "deprecated_description": "Use **server_hostname** instead." } - } + }, + "allOf": [ + { + "not": { + "required": [ + "activation_key", + "activation-key" + ] + } + }, + { + "not": { + "required": [ + "auto_attach", + "auto-attach" + ] + } + }, + { + "not": { + "required": [ + "service_level", + "service-level" + ] + } + }, + { + "not": { + "required": [ + "add_pool", + "add-pool" + ] + } + }, + { + "not": { + "required": [ + "enable_repo", + "enable-repo" + ] + } + }, + { + "not": { + "required": [ + "disable_repo", + "disable-repo" + ] + } + }, + { + "not": { + "required": [ + "rhsm_baseurl", + "rhsm-baseurl" + ] + } + }, + { + "not": { + "required": [ + "server_hostname", + "server-hostname" + ] + } + } + ] } } }, @@ -2692,11 +2834,6 @@ "type": "boolean", "description": "Enable 1-Wire interface. Default: ``false``.", "default": false - }, - "remote_gpio": { - "type": "boolean", - "description": "Enable remote GPIO interface. Default: ``false``.", - "default": false } } }, @@ -3328,7 +3465,7 @@ "properties": { "manage_etc_hosts": { "default": false, - "description": "Whether to manage ``/etc/hosts`` on the system. If ``true``, render the hosts file using ``/etc/cloud/templates/hosts.tmpl`` replacing ``$hostname`` and ``$fdqn``. If ``localhost``, append a ``127.0.1.1`` entry that resolves from FQDN and hostname every boot. Default: ``false``.", + "description": "Whether to manage ``/etc/hosts`` on the system. If ``true``, render the hosts file using ``/etc/cloud/templates/hosts.tmpl`` replacing ``$hostname`` and ``$fqdn``. If ``localhost``, append a ``127.0.1.1`` entry that resolves from FQDN and hostname every boot. Default: ``false``.", "oneOf": [ { "enum": [ diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index eb58095c..2c9daff2 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -13,6 +13,7 @@ import logging import os import re +import shlex import stat import string import urllib.parse @@ -1419,7 +1420,9 @@ def do_as(self, command: list, user: str, cwd: str = "", **kwargs): "-", user, "-c", - directory + "env PATH=$PATH " + " ".join(command), + directory + + "env PATH=$PATH " + + " ".join(shlex.quote(arg) for arg in command), ], **kwargs, ) diff --git a/cloudinit/log/loggers.py b/cloudinit/log/loggers.py index fd83c994..a3e5a78c 100644 --- a/cloudinit/log/loggers.py +++ b/cloudinit/log/loggers.py @@ -8,7 +8,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import collections.abc +import collections.abc # pylint: disable=import-error import copy import io import logging diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index a580ff0d..823a94a9 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -424,19 +424,40 @@ def _render_route(self, route: dict, indent: str = "") -> List[str]: how-to-set-static-routes-in-ubuntu-server """ content = [] - up = indent + "post-up route add" - down = indent + "pre-down route del" + if subp.which("ip"): + use_ip_cmd = True + else: + use_ip_cmd = False + + if use_ip_cmd: + up = indent + "post-up ip" + down = indent + "pre-down ip" + mapping = { + "gateway": "via", + "metric": "metric", + } + else: + up = indent + "post-up route add" + down = indent + "pre-down route del" + mapping = { + "gateway": "gw", + "metric": "metric", + } or_true = " || true" - mapping = { - "gateway": "gw", - "metric": "metric", - } default_gw = "" if route["network"] == "0.0.0.0" and route["netmask"] == "0.0.0.0": + if use_ip_cmd: + up += " route add" + down += " route del" default_gw = " default" elif route["network"] == "::" and route["prefix"] == 0: - default_gw = " -A inet6 default" + if use_ip_cmd: + up += " -family inet6 route add" + down += " -family inet6 route del" + default_gw = " default" + else: + default_gw = " -A inet6 default" route_line = "" for k in ["network", "gateway", "metric"]: @@ -446,12 +467,20 @@ def _render_route(self, route: dict, indent: str = "") -> List[str]: route_line += "%s %s %s" % (default_gw, mapping[k], route[k]) elif k in route: if k == "network": - if is_ipv6_address(route[k]): - route_line += " -A inet6" - elif route.get("prefix") == 32: - route_line += " -host" + if use_ip_cmd: + if is_ipv6_address(route[k]): + up += " -family inet6" + down += " -family inet6" + up += " route add" + down += " route del" else: - route_line += " -net" + if is_ipv6_address(route[k]): + route_line += " -A inet6" + elif route.get("prefix") == 32: + route_line += " -host" + else: + route_line += " -net" + if "prefix" in route: route_line += " %s/%s" % (route[k], route["prefix"]) else: diff --git a/cloudinit/net/ephemeral.py b/cloudinit/net/ephemeral.py index 08e6086e..ad137e56 100644 --- a/cloudinit/net/ephemeral.py +++ b/cloudinit/net/ephemeral.py @@ -1,7 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. -"""Module for ephemeral network context managers -""" +"""Module for ephemeral network context managers""" import contextlib import logging from functools import partial diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index ae90db4b..98530d84 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -243,7 +243,9 @@ def _clean_default(target=None): os.unlink(f) -def netplan_api_write_yaml_file(net_config_content: str) -> bool: +def netplan_api_write_yaml_file( + net_config_content: str, target: Optional[str] = None +) -> bool: """Use netplan.State._write_yaml_file to write netplan config Where netplan python API exists, prefer to use of the private @@ -281,9 +283,11 @@ def netplan_api_write_yaml_file(net_config_content: str) -> bool: # determine default root-dir /etc/netplan and/or specialized # filenames or read permissions based on whether this config # contains secrets. - state_output_file._write_yaml_file( - os.path.basename(CLOUDINIT_NETPLAN_FILE) - ) + if not target: + file = os.path.basename(CLOUDINIT_NETPLAN_FILE) + else: + file = target + state_output_file._write_yaml_file(file) except Exception as e: LOG.warning( "Unable to render network config using netplan python module." @@ -391,8 +395,14 @@ def render_network_state( header += "\n" content = header + content + # Customize target only if explicitly passed in + if target is None: + target_ = target + else: + target_ = fpnplan + netplan_config_changed = has_netplan_config_changed(fpnplan, content) - if not netplan_api_write_yaml_file(content): + if not netplan_api_write_yaml_file(content, target=target_): fallback_write_netplan_yaml(fpnplan, content) if self.clean_default: @@ -431,6 +441,7 @@ def _net_setup_link(self, run=False): # net_setup_link on a device that no longer exists. When this happens, # we don't know what the device was renamed to, so re-gather the # entire list of devices and try again. + last_exception: Optional[Exception] for _ in range(5): try: for iface in get_devicelist(): @@ -438,10 +449,11 @@ def _net_setup_link(self, run=False): subp.subp( setup_lnk + [SYS_CLASS_NET + iface], capture=True ) + last_exception = None break except subp.ProcessExecutionError as e: last_exception = e - else: + if last_exception: raise RuntimeError( "'udevadm test-builtin net_setup_link' unable to run " "successfully for all devices." @@ -527,8 +539,9 @@ def _render_content(self, network_state: NetworkState) -> str: bridge_ports = ifcfg.get("bridge_ports") if bridge_ports is None: LOG.warning( - "Invalid config. The key", - f"'bridge_ports' is required in {config}.", + "Invalid config. The key" + "'bridge_ports' is required in %s.", + config, ) continue ports = sorted(copy.copy(bridge_ports)) diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 9fd51e42..be87a8f5 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -16,6 +16,7 @@ ipv4_mask_to_net_prefix, ipv6_mask_to_net_prefix, is_ip_network, + is_ipv4_address, is_ipv4_network, is_ipv6_address, is_ipv6_network, @@ -431,6 +432,10 @@ def handle_physical(self, command): "keep_configuration": command.get("keep_configuration"), } ) + + if iface["mac_address"]: + iface["mac_address"] = iface["mac_address"].lower() + iface_key = command.get("config_id", command.get("name")) self._network_state["interfaces"].update({iface_key: iface}) self.dump_network_state() @@ -790,6 +795,7 @@ def handle_vlans(self, command): "name": vlan, "vlan_id": cfg.get("id"), "vlan_link": cfg.get("link"), + "mac_address": cfg.get("macaddress"), } if "mtu" in cfg: vlan_cmd["mtu"] = cfg["mtu"] @@ -847,8 +853,11 @@ def _handle_bond_bridge(self, command, cmd_type=None): cmd_type + "_interfaces": item_cfg.get("interfaces"), "params": dict((v2key_to_v1[k], v) for k, v in params.items()), } + if "mtu" in item_cfg: v1_cmd["mtu"] = item_cfg["mtu"] + if "macaddress" in item_cfg: + v1_cmd["mac_address"] = item_cfg["macaddress"] warn_deprecated_all_devices(item_cfg) subnets = self._v2_to_v1_ipcfg(item_cfg) @@ -923,6 +932,7 @@ def _add_dhcp_overrides(overrides, subnet): "gateway": route.get("via"), "metric": route.get("metric"), "mtu": route.get("mtu"), + "table": route.get("table"), } ) ) @@ -990,6 +1000,22 @@ def _normalize_net_keys(network, address_keys=()): raise ValueError(message) addr = str(net.get(addr_key)) + if addr == "default": + gw_ip = str(net.get("gateway")) + if not gw_ip: + message = "Gateway IP is empty" + LOG.error(message) + raise ValueError(message) + + if is_ipv4_address(gw_ip): + addr = "0.0.0.0/0" + elif is_ipv6_address(gw_ip): + addr = "::/0" + else: + message = f"Invalid Gateway IP: '{gw_ip}'" + LOG.error(message) + raise ValueError(message) + if not is_ip_network(addr): LOG.error("Address %s is not a valid ip network", addr) raise ValueError(f"Address {addr} is not a valid ip address") diff --git a/cloudinit/net/networkd.py b/cloudinit/net/networkd.py index 8672e56e..efeedfd2 100644 --- a/cloudinit/net/networkd.py +++ b/cloudinit/net/networkd.py @@ -5,7 +5,8 @@ # This file is part of cloud-init. See LICENSE file for license information. import logging -from typing import Any, Dict, List, Optional +from collections import defaultdict +from typing import Any, Callable, Dict, List, Optional, Tuple from cloudinit import subp, util from cloudinit.net import renderer, should_add_gateway_onlink_flag @@ -66,6 +67,8 @@ def __init__(self): "NetDev": [], "VLAN": [], "Bond": [], + "Bridge": [], + "RoutingPolicyRule": {}, } def update_section(self, sec, key, val): @@ -102,7 +105,7 @@ def get_final_conf(self): if k == "Address": for e in sorted(v): contents += f"[{k}]\n{e}\n\n" - elif k == "Route": + elif k in ["Route", "RoutingPolicyRule"]: for n in sorted(v): contents += f"[{k}]\n" for e in sorted(v[n]): @@ -179,6 +182,7 @@ def parse_routes(self, rid, conf, cfg: CfgParser): "gateway": "Gateway", "network": "Destination", "metric": "Metric", + "table": "Table", } # prefix is derived using netmask by network_state @@ -348,6 +352,7 @@ def render_network_state( network = self._render_content(network_state) vlan_netdev = network.pop("vlan_netdev", {}) bond_netdev = network.pop("bond_netdev", {}) + bridge_netdev = network.pop("bridge_netdev", {}) for k, v in network.items(): self.create_network_file(k, v, network_dir) @@ -358,10 +363,34 @@ def render_network_state( for k, v in bond_netdev.items(): self.create_network_file(k, v, network_dir, ext=".netdev") + for k, v in bridge_netdev.items(): + self.create_network_file(k, v, network_dir, ext=".netdev") + + def parseRoutingPolicy(self, policy, cfg: CfgParser): + rid = 0 + key = "RoutingPolicyRule" + for val in policy: + for k, v in val.items(): + cfg.update_route_section(key, rid, k.capitalize(), v) + rid += 1 + + def extractRoutingPolicies(self, nsCfg: Dict): + routingPolicies = {} + key = "routing-policy" + + for section in ("ethernets", "bonds", "vlans", "bridges"): + if section not in nsCfg: + continue + for iface, settings in nsCfg[section].items(): + if key in settings: + routingPolicies[iface] = settings[key] + return routingPolicies + def _render_content(self, ns: NetworkState): ret_dict = {} vlan_link = {} bond_link = {} + bridge_link = {} if "vlans" in ns.config: vlan_dict = self.render_vlans(ns) @@ -375,32 +404,50 @@ def _render_content(self, ns: NetworkState): bond_link = bond_dict["bond_link"] ret_dict["bond_netdev"] = bond_netdev + if "bridges" in ns.config: + bridge_dict = self.render_bridges(ns) + bridge_netdev = bridge_dict["bridge_netdev"] + bridge_link = bridge_dict["bridge_link"] + ret_dict["bridge_netdev"] = bridge_netdev + + routingPolicies = self.extractRoutingPolicies(ns.config) + for iface in ns.iter_interfaces(): cfg = CfgParser() iface_name = iface["name"] - vlan_link_name = vlan_link.get(iface_name) - if vlan_link_name: - cfg.update_section("Network", "VLAN", vlan_link_name) + vlan_link_name = vlan_link.get(iface_name, []) + for i in vlan_link_name: + cfg.update_section("Network", "VLAN", i) - # TODO: revisit this once network state renders macaddress - # properly for vlan config - if not iface["mac_address"] and vlan_link.get("macaddress"): - mac = vlan_link["macaddress"].get(iface_name) - if mac: - iface["mac_address"] = mac + rPolicy = routingPolicies.get(iface_name) + if rPolicy: + self.parseRoutingPolicy(rPolicy, cfg) bond_link_name = bond_link.get(iface_name) if bond_link_name: cfg.update_section("Network", "Bond", bond_link_name) - # TODO: revisit this once network state renders macaddress - # properly for bond config - if not iface["mac_address"] and bond_link.get("macaddress"): - mac = bond_link["macaddress"].get(iface_name) - if mac: - iface["mac_address"] = mac + bridge_link_name = bridge_link.get(iface_name) + if bridge_link_name: + val = ( + bridge_link["path-cost"] + .get(bridge_link_name, {}) + .get(iface_name) + ) + if val: + cfg.update_section("Bridge", "Cost", val) + + val = ( + bridge_link["port-priority"] + .get(bridge_link_name, {}) + .get(iface_name) + ) + if val: + cfg.update_section("Bridge", "Priority", val) + + cfg.update_section("Network", "Bridge", bridge_link_name) link = self.generate_match_section(iface, cfg) @@ -457,9 +504,8 @@ def _render_content(self, ns: NetworkState): return ret_dict def render_vlans(self, ns: NetworkState) -> dict: - vlan_link_info: Dict[str, Any] = {} + vlan_link_info = defaultdict(list) vlan_ndev_configs = {} - vlan_link_info["macaddress"] = {} vlans = ns.config.get("vlans", {}) for vlan_name, vlan_cfg in vlans.items(): @@ -472,7 +518,7 @@ def render_vlans(self, ns: NetworkState) -> dict: ) continue - vlan_link_info[parent] = vlan_name + vlan_link_info[parent].append(vlan_name) # -------- .netdev for VLAN -------- cfg = CfgParser() @@ -485,26 +531,21 @@ def render_vlans(self, ns: NetworkState) -> dict: val = vlan_cfg.get("macaddress") if val: - val = val.lower() - cfg.update_section("NetDev", "MACAddress", val) - vlan_link_info["macaddress"][vlan_name] = val + cfg.update_section("NetDev", "MACAddress", val.lower()) cfg.update_section("VLAN", "Id", vlan_id) vlan_ndev_configs[vlan_name] = cfg.get_final_conf() - ret_dict = { + return { "vlan_netdev": vlan_ndev_configs, "vlan_link": vlan_link_info, } - return ret_dict def render_bonds(self, ns: NetworkState) -> dict: bond_link_info: Dict[str, Any] = {} bond_ndev_configs = {} section = "Bond" - bond_link_info["macaddress"] = {} - bonds = ns.config.get("bonds", {}) for bond_name, bond_cfg in bonds.items(): interfaces = bond_cfg.get("interfaces") @@ -527,96 +568,119 @@ def render_bonds(self, ns: NetworkState) -> dict: val = bond_cfg.get("macaddress") if val: - val = val.lower() - cfg.update_section("NetDev", "MACAddress", val) - bond_link_info["macaddress"][bond_name] = val + cfg.update_section("NetDev", "MACAddress", val.lower()) # Optional bond parameters params = bond_cfg.get("parameters", {}) - if "mode" in params: - cfg.update_section(section, "Mode", params["mode"]) + ParamMapType = Dict[str, Tuple[str, Callable[[object], str]]] - if "mii-monitor-interval" in params: - cfg.update_section( - section, - "MIIMonitorSec", - f"{params['mii-monitor-interval']}ms", - ) - - if "updelay" in params: - cfg.update_section( - section, "UpDelaySec", f"{params['updelay']}ms" - ) - - if "downdelay" in params: - cfg.update_section( - section, "DownDelaySec", f"{params['downdelay']}ms" - ) + param_map: ParamMapType = { + "ad-select": ("AdSelect", str), + "all-slaves-active": ( + "AllSlavesActive", + lambda v: str(v).lower(), + ), + "arp-all-targets": ("ARPAllTargets", str), + "arp-interval": ("ARPIntervalSec", lambda v: f"{v}ms"), + "arp-validate": ("ARPValidate", str), + "down-delay": ("DownDelaySec", lambda v: f"{v}ms"), + "fail-over-mac-policy": ("FailOverMACPolicy", str), + "gratuitous-arp": ("GratuitousARP", str), + "lacp-rate": ("LACPTransmitRate", str), + "learn-packet-interval": ("LearnPacketIntervalSec", str), + "mii-monitor-interval": ("MIIMonitorSec", lambda v: f"{v}ms"), + "min-links": ("MinLinks", str), + "mode": ("Mode", str), + "packets-per-slave": ("PacketsPerSlave", str), + "primary-reselect-policy": ("PrimaryReselectPolicy", str), + "transmit-hash-policy": ("TransmitHashPolicy", str), + "up-delay": ("UpDelaySec", lambda v: f"{v}ms"), + } - if "arp-interval" in params: - cfg.update_section( - section, "ARPIntervalSec", f"{params['arp-interval']}ms" - ) + for key, (option, formatter) in param_map.items(): + if key in params: + cfg.update_section(section, option, formatter(params[key])) - if "arp-ip-target" in params: - targets = params["arp-ip-target"] + if "arp-ip-targets" in params: + targets = params["arp-ip-targets"] if isinstance(targets, str): targets = [targets] - ip_list = " ".join(targets) - cfg.update_section(section, "ARPIPTargets", ip_list) + cfg.update_section(section, "ARPIPTargets", " ".join(targets)) - if "arp-validate" in params: - cfg.update_section( - section, "ARPValidate", params["arp-validate"] - ) + bond_ndev_configs[bond_name] = cfg.get_final_conf() - if "arp-all-targets" in params: - cfg.update_section( - section, "ARPAllTargets", params["arp-all-targets"] - ) + return { + "bond_netdev": bond_ndev_configs, + "bond_link": bond_link_info, + } - if "primary-reselect" in params: - cfg.update_section( - section, - "PrimaryReselectPolicy", - params["primary-reselect"], - ) + def render_bridges(self, ns: NetworkState) -> dict: + bridge_link_info: Dict[str, Any] = { + "macaddress": {}, + "path-cost": {}, + "port-priority": {}, + } - if "lacp-rate" in params: - cfg.update_section( - section, "LACPTransmitRate", params["lacp-rate"] - ) + bridge_ndev_configs = {} - if "transmit-hash-policy" in params: - cfg.update_section( - section, - "TransmitHashPolicy", - params["transmit-hash-policy"], + bridges = ns.config.get("bridges", {}) + for bridge_name, bridge_cfg in bridges.items(): + interfaces = bridge_cfg.get("interfaces", []) + if not interfaces: + LOG.warning( + "Skipping bridge %s - missing 'interfaces'", bridge_name ) + continue - if "ad-select" in params: - cfg.update_section(section, "AdSelect", params["ad-select"]) + # Map each interface to its bridge + for iface in interfaces: + bridge_link_info[iface] = bridge_name - if "min-links" in params: - cfg.update_section( - section, "MinLinks", str(params["min-links"]) - ) + # ---- .netdev config for the bridge ---- + cfg = CfgParser() + cfg.update_section("NetDev", "Name", bridge_name) + cfg.update_section("NetDev", "Kind", "bridge") - if "all-slaves-active" in params: - cfg.update_section( - section, - "AllSlavesActive", - str(params["all-slaves-active"]).lower(), - ) + val = bridge_cfg.get("mtu") + if val: + cfg.update_section("NetDev", "MTUBytes", val) - bond_ndev_configs[bond_name] = cfg.get_final_conf() + val = bridge_cfg.get("macaddress") + if val: + cfg.update_section("NetDev", "MACAddress", val.lower()) + + # Bridge parameters + params = bridge_cfg.get("parameters", {}) + param_map = { + "ageing-time": "AgeingTimeSec", + "forward-delay": "ForwardDelaySec", + "hello-time": "HelloTimeSec", + "max-age": "MaxAgeSec", + "priority": "Priority", + "stp": "STP", + } + for key, sysd_key in param_map.items(): + val = params.get(key) + if val: + if isinstance(val, bool): + val = "yes" if val else "no" + cfg.update_section("Bridge", sysd_key, val) + + val = params.get("path-cost") + if val: + bridge_link_info["path-cost"][bridge_name] = val - ret_dict = { - "bond_netdev": bond_ndev_configs, - "bond_link": bond_link_info, + val = params.get("port-priority") + if val: + bridge_link_info["port-priority"][bridge_name] = val + + bridge_ndev_configs[bridge_name] = cfg.get_final_conf() + + return { + "bridge_netdev": bridge_ndev_configs, + "bridge_link": bridge_link_info, } - return ret_dict def available(target=None): diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 98e4ed93..5d45babd 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -1056,7 +1056,7 @@ def _render_sysconfig( cls._render_bridge_interfaces(network_state, iface_contents, flavor) cls._render_ib_interfaces(network_state, iface_contents, flavor) contents = {} - for iface_name, iface_cfg in iface_contents.items(): + for _, iface_cfg in iface_contents.items(): if iface_cfg or iface_cfg.children: contents[iface_cfg.path] = iface_cfg.to_string() for iface_cfg in iface_cfg.children: diff --git a/cloudinit/socket.py b/cloudinit/socket.py index 98c82886..0a5485a0 100644 --- a/cloudinit/socket.py +++ b/cloudinit/socket.py @@ -5,6 +5,7 @@ import socket import sys from contextlib import suppress +from typing import Dict from cloudinit import performance from cloudinit.settings import DEFAULT_RUN_DIR @@ -55,16 +56,16 @@ def __init__(self, *names: str): :param names: stage names, used as a unique identifiers """ self.stage = "" - self.remote = "" self.first_exception = "" self.systemd_exit_code = 0 self.experienced_any_error = False self.sockets = { name: socket.socket( - socket.AF_UNIX, socket.SOCK_DGRAM | socket.SOCK_CLOEXEC + socket.AF_UNIX, socket.SOCK_STREAM | socket.SOCK_CLOEXEC ) for name in names } + self.connections: Dict[str, socket.socket] = {} # ensure the directory exists os.makedirs(f"{DEFAULT_RUN_DIR}/share", mode=0o700, exist_ok=True) # removing stale sockets and bind @@ -73,6 +74,7 @@ def __init__(self, *names: str): with suppress(FileNotFoundError): os.remove(socket_path) sock.bind(socket_path) + sock.listen() def __call__(self, stage: str): """Set the stage before entering context. @@ -116,19 +118,14 @@ def __enter__(self): # reply, which is expected to be /path/to/{self.stage}-return.sock sock = self.sockets[self.stage] with performance.Timed(f"Waiting to start stage {self.stage}"): - chunk, self.remote = sock.recvfrom(5) + connection, _ = sock.accept() + chunk, _ = connection.recvfrom(5) + self.connections[self.stage] = connection if b"start" != chunk: # The protocol expects to receive a command "start" self.__exit__(None, None, None) raise ValueError(f"Received invalid message: [{str(chunk)}]") - elif f"{DEFAULT_RUN_DIR}/share/{self.stage}-return.sock" != str( - self.remote - ): - # assert that the return path is in a directory with appropriate - # permissions - self.__exit__(None, None, None) - raise ValueError(f"Unexpected path to unix socket: {self.remote}") sd_notify(f"STATUS=Running ({self.stage} stage)") return self @@ -156,8 +153,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.experienced_any_error = self.experienced_any_error or bool( self.systemd_exit_code ) - sock = self.sockets[self.stage] - sock.connect(self.remote) + sock = self.connections[self.stage] # the returned message will be executed in a subshell # hardcode this message rather than sending a more informative message diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py index 286ed2af..b9a7a995 100644 --- a/cloudinit/sources/DataSourceAliYun.py +++ b/cloudinit/sources/DataSourceAliYun.py @@ -49,9 +49,6 @@ def __init__(self, sys_cfg, distro, paths): self.default_update_events = copy.deepcopy(self.default_update_events) self.default_update_events[EventScope.NETWORK].add(EventType.BOOT) - def _unpickle(self, ci_pkl_version: int) -> None: - super()._unpickle(ci_pkl_version) - def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): hostname = self.metadata.get("hostname") is_default = False diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 3d54bede..0b1183ec 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -1676,7 +1676,7 @@ def _cleanup_resourcedisk_fstab(self): by cloud-init i.e. lines containing "/dev/disk/cloud/azure_resource" and cloudconfig comment. """ - cc_mounts.cleanup_fstab([RESOURCE_DISK_PATH]) + cc_mounts.cleanup_fstab(RESOURCE_DISK_PATH) def clean(self): # Azure-specific cleanup logic for "cloud-init clean -c datasource" diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 80dcfb13..789245c0 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -94,6 +94,7 @@ def __init__(self, sys_cfg, distro, paths): # http:///latest/ self.api_ver = "latest" self.cfg = {} + self.vr_addr = None def _get_domainname(self): """ diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py index 53a950f8..2aaba2a5 100644 --- a/cloudinit/sources/DataSourceHetzner.py +++ b/cloudinit/sources/DataSourceHetzner.py @@ -9,81 +9,149 @@ import logging import cloudinit.sources.helpers.hetzner as hc_helper -from cloudinit import dmi, net, sources, util +from cloudinit import dmi, net, sources, url_helper, util +from cloudinit.event import EventScope, EventType from cloudinit.net.dhcp import NoDHCPLeaseError -from cloudinit.net.ephemeral import EphemeralDHCPv4 +from cloudinit.net.ephemeral import EphemeralIPNetwork LOG = logging.getLogger(__name__) -BASE_URL_V1 = "http://169.254.169.254/hetzner/v1" - BUILTIN_DS_CONFIG = { - "metadata_url": BASE_URL_V1 + "/metadata", - "userdata_url": BASE_URL_V1 + "/userdata", + "metadata_path": "metadata", + "metadata_private_networks_path": "metadata/private-networks", + "userdata_path": "userdata", } MD_RETRIES = 60 MD_TIMEOUT = 2 MD_WAIT_RETRY = 2 +MD_MAX_WAIT = 120 +MD_SLEEP_TIME = 2 +# Do not re-configure the network on non-Hetzner network interface +# changes. Currently, Hetzner private network addresses start with 0x86. +EXTRA_HOTPLUG_UDEV_RULES = """ +SUBSYSTEM=="net", ATTR{address}=="86:*", GOTO="cloudinit_hook" +GOTO="cloudinit_end" +""" -class DataSourceHetzner(sources.DataSource): +def base_urls_v1(): + return ( + f"http://[fe80::a9fe:a9fe%25{net.find_fallback_nic()}]/hetzner/v1/", + "http://169.254.169.254/hetzner/v1/", + ) + + +class DataSourceHetzner(sources.DataSource): dsname = "Hetzner" + default_update_events = { + EventScope.NETWORK: { + EventType.BOOT_NEW_INSTANCE, + EventType.BOOT, + EventType.HOTPLUG, + } + } + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.distro = distro - self.metadata = dict() + self.metadata = {} self.ds_cfg = util.mergemanydict( [ util.get_cfg_by_path(sys_cfg, ["datasource", "Hetzner"], {}), BUILTIN_DS_CONFIG, ] ) - self.metadata_address = self.ds_cfg["metadata_url"] - self.userdata_address = self.ds_cfg["userdata_url"] + self.metadata_path = self.ds_cfg["metadata_path"] + self.metadata_private_networks_path = self.ds_cfg[ + "metadata_private_networks_path" + ] + self.userdata_path = self.ds_cfg["userdata_path"] self.retries = self.ds_cfg.get("retries", MD_RETRIES) self.timeout = self.ds_cfg.get("timeout", MD_TIMEOUT) self.wait_retry = self.ds_cfg.get("wait_retry", MD_WAIT_RETRY) + self.max_wait = self.ds_cfg.get("max_wait", MD_MAX_WAIT) + self.sleep_time = self.ds_cfg.get("sleep_time", MD_SLEEP_TIME) self._network_config = sources.UNSET self.dsmode = sources.DSMODE_NETWORK self.metadata_full = None + self.extra_hotplug_udev_rules = EXTRA_HOTPLUG_UDEV_RULES + + def _unpickle(self, ci_pkl_version: int) -> None: + super()._unpickle(ci_pkl_version) + self.extra_hotplug_udev_rules = EXTRA_HOTPLUG_UDEV_RULES + self.wait_retry = self.ds_cfg.get("wait_retry", MD_WAIT_RETRY) + self.max_wait = self.ds_cfg.get("max_wait", MD_MAX_WAIT) + self.sleep_time = self.ds_cfg.get("sleep_time", MD_SLEEP_TIME) + self.metadata_path = self.ds_cfg["metadata_path"] + self.metadata_private_networks_path = self.ds_cfg[ + "metadata_private_networks_path" + ] + self.userdata_path = self.ds_cfg["userdata_path"] + def _get_data(self): (on_hetzner, serial) = get_hcloud_data() if not on_hetzner: return False + base_urls = base_urls_v1() try: - with EphemeralDHCPv4( + with EphemeralIPNetwork( self.distro, - iface=net.find_fallback_nic(), + interface=net.find_fallback_nic(), + ipv4=True, + ipv6=True, connectivity_urls_data=[ { - "url": BASE_URL_V1 + "/metadata/instance-id", + "url": url_helper.combine_url( + url, f"{self.metadata_path}/instance-id" + ) } + for url in base_urls ], ): - md = hc_helper.read_metadata( - self.metadata_address, + url, contents = hc_helper.get_metadata( + [ + url_helper.combine_url(url, self.metadata_path) + for url in base_urls + ], + max_wait=self.max_wait, timeout=self.timeout, - sec_between=self.wait_retry, - retries=self.retries, + sleep_time=self.sleep_time, ) - ud = hc_helper.read_userdata( - self.userdata_address, + LOG.debug("Using metadata source: '%s'", url) + md = util.load_yaml(contents.decode(), allowed=(dict, list)) + url, contents = hc_helper.get_metadata( + [ + url_helper.combine_url( + url, self.metadata_private_networks_path + ) + for url in base_urls + ], + max_wait=self.max_wait, timeout=self.timeout, - sec_between=self.wait_retry, - retries=self.retries, + sleep_time=self.sleep_time, ) - pn = hc_helper.read_metadata( - self.metadata_address + "/private-networks", + LOG.debug("Using private_networks source: '%s'", url) + md["private-networks"] = util.load_yaml( + contents.decode(), allowed=(dict, list) + ) + url, ud = hc_helper.get_metadata( + [ + url_helper.combine_url(url, self.userdata_path) + for url in base_urls + ], + max_wait=self.max_wait, timeout=self.timeout, - sec_between=self.wait_retry, - retries=self.retries, + sleep_time=self.sleep_time, ) + LOG.debug("Using userdata source: '%s'", url) + if not ud: + LOG.debug("Got empty userdata") except NoDHCPLeaseError as e: LOG.error("Bailing, DHCP Exception: %s", e) raise @@ -105,7 +173,7 @@ def _get_data(self): self.metadata["local-hostname"] = md["hostname"] self.metadata["network-config"] = md.get("network-config", None) self.metadata["public-keys"] = md.get("public-keys", None) - self.metadata["private-networks"] = pn + self.metadata["private-networks"] = md.get("private-networks", []) self.vendordata_raw = md.get("vendor_data", None) # instance-id and serial from SMBIOS should be identical @@ -138,19 +206,37 @@ def network_config(self): if self._network_config != sources.UNSET: return self._network_config - _net_config = self.metadata["network-config"] - if not _net_config: + net_config = self.metadata["network-config"] + if not net_config: raise RuntimeError("Unable to get meta-data from server....") - self._network_config = _net_config - + private_networks = self.metadata.get("private-networks", []) + private_networks_config = [] + for private_network in private_networks: + private_networks_config.append( + { + "type": "physical", + "mac_address": private_network["mac_address"], + "name": hc_helper.get_interface_name_from_mac( + private_network["mac_address"] + ), + "subnets": [ + { + "ipv4": True, + "type": "dhcp", + } + ], + } + ) + net_config["config"].extend(private_networks_config) + self._network_config = net_config return self._network_config def get_hcloud_data(): vendor_name = dmi.read_dmi_data("system-manufacturer") if vendor_name != "Hetzner": - return (False, None) + return False, None serial = dmi.read_dmi_data("system-serial-number") if serial: @@ -158,7 +244,7 @@ def get_hcloud_data(): else: raise RuntimeError("Hetzner Cloud detected, but no serial found") - return (True, serial) + return True, serial # Used to match classes to dependencies diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py index 881e33d5..e74e1cba 100644 --- a/cloudinit/sources/DataSourceOracle.py +++ b/cloudinit/sources/DataSourceOracle.py @@ -470,7 +470,7 @@ def read_opc_metadata( fetch_vnics_data: bool = False, max_wait=DataSourceOracle.url_max_wait, timeout=DataSourceOracle.url_timeout, - metadata_patterns: List[str] = [IPV4_METADATA_PATTERN], + metadata_patterns: Optional[List[str]] = None, ) -> Optional[ReadOpcMetadataResponse]: """ Fetch metadata from the /opc/ routes from the IMDS. @@ -487,6 +487,8 @@ def read_opc_metadata( This allows for later determining if v1 or v2 endppoint was used and whether the IMDS was reached via IPv4 or IPv6. """ + if metadata_patterns is None: + metadata_patterns = [IPV4_METADATA_PATTERN] urls = [ metadata_pattern.format(version=version, path="instance") for version in [2, 1] diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index 43b4b8bd..b2e8740b 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -235,7 +235,7 @@ def _crawl_metadata(self): ) @staticmethod - def ds_detect(): + def ds_detect() -> bool: """ There are three ways to detect if you are on Scaleway: @@ -254,6 +254,7 @@ def ds_detect(): cmdline = util.get_cmdline() if "scaleway" in cmdline: return True + return False def _get_data(self): diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index b42ec3fa..820c0925 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -26,9 +26,9 @@ net, performance, type_utils, + user_data, + util, ) -from cloudinit import user_data as ud -from cloudinit import util from cloudinit.atomic_helper import write_json from cloudinit.distros import Distro from cloudinit.event import EventScope, EventType @@ -354,7 +354,7 @@ def __init__(self, sys_cfg, distro: Distro, paths: Paths, ud_proc=None): self.ds_cfg = {} if not ud_proc: - self.ud_proc = ud.UserDataProcessor(self.paths) + self.ud_proc = user_data.UserDataProcessor(self.paths) else: self.ud_proc = ud_proc diff --git a/cloudinit/sources/helpers/hetzner.py b/cloudinit/sources/helpers/hetzner.py index 50fbcb04..68abd4dc 100644 --- a/cloudinit/sources/helpers/hetzner.py +++ b/cloudinit/sources/helpers/hetzner.py @@ -3,22 +3,41 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit import url_helper, util +from typing import Optional, Tuple +from cloudinit import net, url_helper -def read_metadata(url, timeout=2, sec_between=2, retries=30): - response = url_helper.readurl( - url, timeout=timeout, sec_between=sec_between, retries=retries - ) - if not response.ok(): - raise RuntimeError("unable to read metadata at %s" % url) - return util.load_yaml(response.contents.decode(), allowed=(dict, list)) +def _skip_retry_on_empty_response(cause: url_helper.UrlError) -> bool: + return cause.code != 204 -def read_userdata(url, timeout=2, sec_between=2, retries=30): - response = url_helper.readurl( - url, timeout=timeout, sec_between=sec_between, retries=retries - ) - if not response.ok(): - raise RuntimeError("unable to read userdata at %s" % url) - return response.contents + +def get_metadata( + urls, + max_wait=120, + timeout=2, + sleep_time=2, +) -> Tuple[Optional[str], bytes]: + try: + url, contents = url_helper.wait_for_url( + urls=urls, + max_wait=max_wait, + timeout=timeout, + sleep_time=sleep_time, + # It is ok for userdata to not exist (that's why we are stopping if + # HTTP code is 204) and just in that case returning an empty + # string. + exception_cb=_skip_retry_on_empty_response, + ) + if not url: + raise RuntimeError("No data received from urls: '%s':" % urls) + return url, contents + except url_helper.UrlError as e: + if e.code == 204: + return e.url, b"" + raise + + +def get_interface_name_from_mac(mac: str) -> Optional[str]: + mac_to_iface = net.get_interfaces_by_mac() + return mac_to_iface.get(mac.lower()) diff --git a/cloudinit/subp.py b/cloudinit/subp.py index eebf009d..12fba876 100644 --- a/cloudinit/subp.py +++ b/cloudinit/subp.py @@ -258,7 +258,7 @@ def subp( ] try: with performance.Timed( - "Running {}".format(logstring if logstring else args) + "Running {!r}".format(logstring if logstring else args) ): sp = subprocess.Popen( bytes_args, diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 9734bdbd..bae99b03 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -85,7 +85,7 @@ def ftp_get_return_code_from_exception(exc) -> int: } code = ftp_error_codes.get(type(exc)) # pyright: ignore if not code: - if isinstance(exc, OSError): + if isinstance(exc, OSError) and exc.errno: code = exc.errno else: LOG.warning( @@ -387,13 +387,15 @@ def _get_retry_after(retry_after: str) -> float: """ try: to_wait = float(retry_after) - except ValueError: + except ValueError as exc: # Translate a date such as "Fri, 31 Dec 1999 23:59:59 GMT" # into seconds to wait try: time_tuple = parsedate(retry_after) if not time_tuple: - raise ValueError("Failed to parse Retry-After header value") + raise ValueError( + "Failed to parse Retry-After header value" + ) from exc to_wait = float(time.mktime(time_tuple) - time.time()) except ValueError: LOG.info( diff --git a/cloudinit/util.py b/cloudinit/util.py index 3eed40d3..5c73c93a 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -914,8 +914,18 @@ def center(text, fill, max_len): def del_dir(path): + ''' + Deletes a directory and all its contents by calling shutil.rmtree + Will ignore FileNotFoundError + + @param path: The path of the directory. + """ + ''' LOG.debug("Recursively deleting %s", path) - shutil.rmtree(path) + try: + shutil.rmtree(path) + except FileNotFoundError: + pass def read_optional_seed(fill, base="", ext="", timeout=5): diff --git a/cloudinit/version.py b/cloudinit/version.py index f6e85159..3edaee9b 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -__VERSION__ = "25.2" +__VERSION__ = "25.3" _PACKAGED_VERSION = "@@PACKAGED_VERSION@@" FEATURES = [ diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index dea7f031..96fe02ed 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -336,8 +336,6 @@ system_info: {% if variant in ["debian", "ubuntu", "unknown"] %} # Automatically discover the best ntp_client ntp_client: auto -{% elif variant == "raspberry-pi-os" %} - ntp_client: 'systemd-timesyncd' {% endif %} {% if variant in ["alpine", "amazon", "aosc", "arch", "azurelinux", "debian", "fedora", "gentoo", "mariner", "OpenCloudOS", "openeuler", diff --git a/conftest.py b/conftest.py index 5d7aa563..52d8d546 100644 --- a/conftest.py +++ b/conftest.py @@ -95,8 +95,8 @@ def disable_subp_usage(request, fixture_utils): Note that this can only catch invocations where the ``subp`` module is imported and ``subp.subp(...)`` is called. ``from cloudinit.subp import - subp`` imports happen before the patching here (or the CiTestCase - monkey-patching) happens, so are left untouched. + subp`` is left untouched because those imports happen before the patching + happens here. While ``disable_subp_usage`` unconditionally patches ``cloudinit.subp.subp``, any test-local patching will override this @@ -128,11 +128,6 @@ def test_bash(self): def test_several_things(self): subp.subp(["bash"]) subp.subp(["whoami"]) - - This fixture (roughly) mirrors the functionality of - ``CiTestCase.allowed_subp``. N.B. While autouse fixtures do affect - non-pytest tests, CiTestCase's ``allowed_subp`` does take precedence (and - we have ``TestDisableSubpUsageInTestSubclass`` to confirm that). """ allow_subp_for = fixture_utils.closest_marker_args_or( request, "allow_subp_for", None diff --git a/debian/changelog b/debian/changelog index b6a8a433..d9e958c9 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,16 @@ +cloud-init (25.3-0ubuntu1~24.04.1) noble; urgency=medium + + * d/p/retain-setuptools.patch: avoid upstream switch to meson build backend. + * refresh patches: + - d/p/no-nocloud-network.patch + - d/p/no-single-process.patch + - d/p/grub-dpkg-support.patch + * Upstream snapshot based on 25.3. (LP: #2131604). + List of changes from upstream can be found at + https://raw.githubusercontent.com/canonical/cloud-init/25.3/ChangeLog + + -- Chad Smith Sat, 15 Nov 2025 11:02:56 -0700 + cloud-init (25.2-0ubuntu1~24.04.1) noble; urgency=medium * add d/p/strip-invalid-mtu.patch diff --git a/debian/patches/grub-dpkg-support.patch b/debian/patches/grub-dpkg-support.patch index 0bfa06e9..a092e2b0 100644 --- a/debian/patches/grub-dpkg-support.patch +++ b/debian/patches/grub-dpkg-support.patch @@ -28,7 +28,7 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ return --- a/cloudinit/config/schemas/schema-cloud-config-v1.json +++ b/cloudinit/config/schemas/schema-cloud-config-v1.json -@@ -1655,8 +1655,8 @@ +@@ -1696,8 +1696,8 @@ "properties": { "enabled": { "type": "boolean", diff --git a/debian/patches/no-nocloud-network.patch b/debian/patches/no-nocloud-network.patch index bd2d80a7..1ff0d2dd 100644 --- a/debian/patches/no-nocloud-network.patch +++ b/debian/patches/no-nocloud-network.patch @@ -26,7 +26,7 @@ Last-Update: 2024-08-02 # Now that we have exhausted any other places merge in the defaults --- a/cloudinit/util.py +++ b/cloudinit/util.py -@@ -1015,7 +1015,6 @@ def read_seeded(base="", ext="", timeout +@@ -1025,7 +1025,6 @@ def read_seeded(base="", ext="", timeout ud_url = base.replace("%s", "user-data" + ext) vd_url = base.replace("%s", "vendor-data" + ext) md_url = base.replace("%s", "meta-data" + ext) @@ -34,7 +34,7 @@ Last-Update: 2024-08-02 else: if features.NOCLOUD_SEED_URL_APPEND_FORWARD_SLASH: if base[-1] != "/" and parse.urlparse(base).query == "": -@@ -1024,17 +1023,7 @@ def read_seeded(base="", ext="", timeout +@@ -1034,17 +1033,7 @@ def read_seeded(base="", ext="", timeout ud_url = "%s%s%s" % (base, "user-data", ext) vd_url = "%s%s%s" % (base, "vendor-data", ext) md_url = "%s%s%s" % (base, "meta-data", ext) @@ -54,7 +54,7 @@ Last-Update: 2024-08-02 ) --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py -@@ -2489,7 +2489,7 @@ class TestReadOptionalSeed: +@@ -2485,7 +2485,7 @@ class TestReadOptionalSeed: { "meta-data": {"md": "val"}, "user-data": b"ud", @@ -63,7 +63,7 @@ Last-Update: 2024-08-02 "vendor-data": None, }, True, -@@ -2544,7 +2544,7 @@ class TestReadSeeded: +@@ -2540,7 +2540,7 @@ class TestReadSeeded: assert found_md == {"key1": "val1"} assert found_ud == ud assert found_vd == vd @@ -72,7 +72,7 @@ Last-Update: 2024-08-02 @pytest.mark.parametrize( "base, feature_flag, req_urls", -@@ -2553,7 +2553,6 @@ class TestReadSeeded: +@@ -2549,7 +2549,6 @@ class TestReadSeeded: "http://10.0.0.1/%s?qs=1", True, [ @@ -80,7 +80,7 @@ Last-Update: 2024-08-02 "http://10.0.0.1/meta-data?qs=1", "http://10.0.0.1/user-data?qs=1", "http://10.0.0.1/vendor-data?qs=1", -@@ -2564,7 +2563,6 @@ class TestReadSeeded: +@@ -2560,7 +2559,6 @@ class TestReadSeeded: "https://10.0.0.1:8008/", True, [ @@ -88,7 +88,7 @@ Last-Update: 2024-08-02 "https://10.0.0.1:8008/meta-data", "https://10.0.0.1:8008/user-data", "https://10.0.0.1:8008/vendor-data", -@@ -2575,7 +2573,6 @@ class TestReadSeeded: +@@ -2571,7 +2569,6 @@ class TestReadSeeded: "https://10.0.0.1:8008", True, [ @@ -96,7 +96,7 @@ Last-Update: 2024-08-02 "https://10.0.0.1:8008/meta-data", "https://10.0.0.1:8008/user-data", "https://10.0.0.1:8008/vendor-data", -@@ -2586,7 +2583,6 @@ class TestReadSeeded: +@@ -2582,7 +2579,6 @@ class TestReadSeeded: "https://10.0.0.1:8008", False, [ @@ -104,7 +104,7 @@ Last-Update: 2024-08-02 "https://10.0.0.1:8008meta-data", "https://10.0.0.1:8008user-data", "https://10.0.0.1:8008vendor-data", -@@ -2597,7 +2593,6 @@ class TestReadSeeded: +@@ -2593,7 +2589,6 @@ class TestReadSeeded: "https://10.0.0.1:8008?qs=", True, [ @@ -112,7 +112,7 @@ Last-Update: 2024-08-02 "https://10.0.0.1:8008?qs=meta-data", "https://10.0.0.1:8008?qs=user-data", "https://10.0.0.1:8008?qs=vendor-data", -@@ -2636,7 +2631,7 @@ class TestReadSeeded: +@@ -2632,7 +2627,7 @@ class TestReadSeeded: # user-data, vendor-data read raw. It could be scripts or other format assert found_ud == "/user-data: 1" assert found_vd == "/vendor-data: 1" @@ -121,12 +121,12 @@ Last-Update: 2024-08-02 assert [ mock.call(req_url, timeout=5, retries=10) for req_url in req_urls ] == m_read.call_args_list -@@ -2666,7 +2661,7 @@ class TestReadSeededWithoutVendorData(he - self.assertEqual(found_md, {"key1": "val1"}) - self.assertEqual(found_ud, ud) - self.assertEqual(found_vd, vd) -- self.assertEqual(found_network, {"test": "true"}) -+ self.assertIsNone(found_network) +@@ -2662,7 +2657,7 @@ class TestReadSeededWithoutVendorData(he + assert found_md == {"key1": "val1"} + assert found_ud == ud + assert found_vd == vd +- assert found_network == {"test": "true"} ++ assert found_network is None class TestEncode(helpers.TestCase): diff --git a/debian/patches/no-single-process.patch b/debian/patches/no-single-process.patch index d2f72fb3..d21e37e9 100644 --- a/debian/patches/no-single-process.patch +++ b/debian/patches/no-single-process.patch @@ -19,7 +19,7 @@ Last-Update: 2024-08-02 stdout = query_systemctl( --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py -@@ -545,7 +545,7 @@ def handle(name: str, cfg: Config, cloud +@@ -547,7 +547,7 @@ def handle(name: str, cfg: Config, cloud # fs_spec, fs_file, fs_vfstype, fs_mntops, fs_freq, fs_passno uses_systemd = cloud.distro.uses_systemd() default_mount_options = ( @@ -30,7 +30,7 @@ Last-Update: 2024-08-02 ) --- a/cloudinit/config/schemas/schema-cloud-config-v1.json +++ b/cloudinit/config/schemas/schema-cloud-config-v1.json -@@ -2073,12 +2073,12 @@ +@@ -2114,12 +2114,12 @@ }, "mount_default_fields": { "type": "array", @@ -58,7 +58,7 @@ Last-Update: 2024-08-02 -# process has completed this stage. The output from the return socket is piped -# into a shell so that the process can send a completion message (defaults to -# "done", otherwise includes an error message) and an exit code to systemd. --ExecStart=sh -c 'echo "start" | nc -Uu -W1 /run/cloud-init/share/config.sock -s /run/cloud-init/share/config-return.sock | sh' +-ExecStart=sh -c 'echo "start" | nc -U /run/cloud-init/share/config.sock | sh' +ExecStart=/usr/bin/cloud-init modules --mode=config RemainAfterExit=yes TimeoutSec=0 @@ -86,7 +86,7 @@ Last-Update: 2024-08-02 -# process has completed this stage. The output from the return socket is piped -# into a shell so that the process can send a completion message (defaults to -# "done", otherwise includes an error message) and an exit code to systemd. --ExecStart=sh -c 'echo "start" | nc -Uu -W1 /run/cloud-init/share/final.sock -s /run/cloud-init/share/final-return.sock | sh' +-ExecStart=sh -c 'echo "start" | nc -U /run/cloud-init/share/final.sock | sh' +ExecStart=/usr/bin/cloud-init modules --mode=final RemainAfterExit=yes TimeoutSec=0 @@ -101,10 +101,10 @@ Last-Update: 2024-08-02 Wants=network-pre.target After=hv_kvp_daemon.service +After=systemd-remount-fs.service - Before=auditd.service Before=network-pre.target Before=shutdown.target -@@ -17,6 +18,7 @@ Before=firewalld.target + {% if variant in ["almalinux", "cloudlinux", "rhel"] %} +@@ -16,6 +17,7 @@ Before=firewalld.target Before=sysinit.target {% endif %} Conflicts=shutdown.target @@ -112,7 +112,7 @@ Last-Update: 2024-08-02 ConditionPathExists=!/etc/cloud/cloud-init.disabled ConditionKernelCommandLine=!cloud-init=disabled ConditionEnvironment=!KERNEL_CMDLINE=cloud-init=disabled -@@ -26,14 +28,7 @@ Type=oneshot +@@ -25,14 +27,7 @@ Type=oneshot {% if variant in ["almalinux", "cloudlinux", "rhel"] %} ExecStartPre=/sbin/restorecon /run/cloud-init {% endif %} @@ -123,7 +123,7 @@ Last-Update: 2024-08-02 -# process has completed this stage. The output from the return socket is piped -# into a shell so that the process can send a completion message (defaults to -# "done", otherwise includes an error message) and an exit code to systemd. --ExecStart=sh -c 'echo "start" | nc -Uu -W1 /run/cloud-init/share/local.sock -s /run/cloud-init/share/local-return.sock | sh' +-ExecStart=sh -c 'echo "start" | nc -U /run/cloud-init/share/local.sock | sh' +ExecStart=/usr/bin/cloud-init init --local RemainAfterExit=yes TimeoutSec=0 @@ -234,7 +234,7 @@ Last-Update: 2024-08-02 -# process has completed this stage. The output from the return socket is piped -# into a shell so that the process can send a completion message (defaults to -# "done", otherwise includes an error message) and an exit code to systemd. --ExecStart=sh -c 'echo "start" | nc -Uu -W1 /run/cloud-init/share/network.sock -s /run/cloud-init/share/network-return.sock | sh' +-ExecStart=sh -c 'echo "start" | nc -U /run/cloud-init/share/network.sock | sh' -RemainAfterExit=yes -TimeoutSec=0 - diff --git a/debian/patches/retain-setuptools.patch b/debian/patches/retain-setuptools.patch new file mode 100644 index 00000000..32e46163 --- /dev/null +++ b/debian/patches/retain-setuptools.patch @@ -0,0 +1,579 @@ +Description: Retain setuptools build backend on stable series. + Avoid change in behavior in tip of main which switched to meson.build + as the default build backend. Retain the minimal patchset necessary + to retain setuptools pybuild default build backend. +Author: Chad Smith +Origin: backport +Last-Update: 2025-08-25 +--- a/doc-requirements.txt ++++ b/doc-requirements.txt +@@ -1,9 +1,9 @@ + -r requirements.txt + doc8 + furo +-meson + m2r2 + pyyaml ++setuptools + sphinx==7.1.2 + sphinx-design + sphinx-copybutton +--- a/packages/bddeb ++++ b/packages/bddeb +@@ -19,7 +19,7 @@ def find_root(): + top_dir = os.path.dirname( + os.path.dirname(os.path.abspath(sys.argv[0])) + ) +- if os.path.isfile(os.path.join(top_dir, "meson.build")): ++ if os.path.isfile(os.path.join(top_dir, "setup.py")): + return os.path.abspath(top_dir) + raise OSError( + ( +--- a/packages/debian/rules ++++ b/packages/debian/rules +@@ -2,16 +2,13 @@ + + include /usr/share/dpkg/pkg-info.mk + +-export PYTHONDONTWRITEBYTECODE=1 ++INIT_SYSTEM ?= systemd ++export PYBUILD_INSTALL_ARGS=--init-system=$(INIT_SYSTEM) + + %: +- dh $@ --buildsystem meson +- +-override_dh_auto_configure: +- dh_auto_configure -- -Dinit_system=systemd -Dlibexecdir=lib -Ddistro_templates=chef_client.rb.tmpl,chrony.conf.ubuntu.tmpl,hosts.debian.tmpl,ntp.conf.ubuntu.tmpl,sources.list.ubuntu.deb822.tmpl,sources.list.ubuntu.deb822.tmpl,timesyncd.conf.tmpl ++ dh $@ --with python3 --buildsystem pybuild + + override_dh_auto_test: +- + ifeq (,$(findstring nocheck,$(DEB_BUILD_OPTIONS))) + http_proxy= make PYVER=python3 check + else +--- a/packages/pkg-deps.json ++++ b/packages/pkg-deps.json +@@ -1,12 +1,8 @@ + { + "debian" : { + "build-requires" : [ +- "meson", +- "pkgconf", +- "bash-completion", + "debhelper", +- "systemd-dev", +- "python3", ++ "dh-python", + "python3-debconf" + ], + "renames" : { +@@ -14,11 +10,6 @@ + "pyserial" : "python3-serial" + }, + "requires" : [ +- "debconf", +- "dhcpcd-base", +- "iproute2", +- "netcat-openbsd", +- "netplan.io", + "procps" + ] + }, +--- a/pyproject.toml ++++ b/pyproject.toml +@@ -1,6 +1,6 @@ +-[build-system] # See meson.build. Empty build-system to avoid RTD builds +-build-backend = "" +-requires = [] ++[build-system] ++requires = ["setuptools"] ++build-backend = "setuptools.build_meta" + + [tool.black] + line-length = 79 +--- /dev/null ++++ b/setup.py +@@ -0,0 +1,342 @@ ++# Copyright (C) 2009 Canonical Ltd. ++# Copyright (C) 2012 Yahoo! Inc. ++# ++# Author: Soren Hansen ++# Author: Joshua Harlow ++# ++# This file is part of cloud-init. See LICENSE file for license information. ++ ++# Distutils magic for ec2-init ++ ++import atexit ++import os ++import platform ++import shutil ++import subprocess ++import sys ++import tempfile ++from glob import glob ++ ++import setuptools ++from setuptools.command.egg_info import egg_info ++from setuptools.command.install import install ++ ++# Python-path here is a little unpredictable as setup.py could be run ++# from a directory other than the root of the repo, so ensure we can find ++# our utils ++sys.path.insert(0, os.path.dirname(os.path.realpath(__file__))) ++# isort: off ++from setup_utils import ( # noqa: E402 ++ get_version, ++ is_f, ++ is_generator, ++ pkg_config_read, ++ read_requires, ++) ++ ++# isort: on ++del sys.path[0] ++ ++# pylint: disable=W0402 ++try: ++ from setuptools.errors import DistutilsError ++except ImportError: ++ from distutils.errors import DistutilsArgError as DistutilsError ++# pylint: enable=W0402 ++ ++RENDERED_TMPD_PREFIX = "RENDERED_TEMPD" ++VARIANT = None ++PREFIX = None ++ ++ ++def render_tmpl(template, mode=None, is_yaml=False): ++ """render template into a tmpdir under same dir as setup.py ++ ++ This is rendered to a temporary directory under the top level ++ directory with the name 'cloud.cfg'. The reason for not just rendering ++ to config/cloud.cfg is for a.) don't want to write over contents ++ in that file if user had something there. b.) debuild will complain ++ that files are different outside of the debian directory.""" ++ ++ # newer versions just use install. ++ if "install" not in sys.argv: ++ return template ++ ++ tmpl_ext = ".tmpl" ++ # we may get passed a non-template file, just pass it back ++ if not template.endswith(tmpl_ext): ++ return template ++ ++ topdir = os.path.dirname(sys.argv[0]) ++ tmpd = tempfile.mkdtemp(dir=topdir, prefix=RENDERED_TMPD_PREFIX) ++ atexit.register(shutil.rmtree, tmpd) ++ bname = os.path.basename(template) ++ ename, ext = os.path.splitext(bname) ++ if ext == tmpl_ext: ++ bname = ename ++ fpath = os.path.join(tmpd, bname) ++ cmd_variant = [] ++ cmd_prefix = [] ++ if VARIANT: ++ cmd_variant = ["--variant", VARIANT] ++ if PREFIX: ++ cmd_prefix = ["--prefix", PREFIX] ++ subprocess.run( # nosec B603 ++ [ ++ sys.executable, ++ "./tools/render-template", ++ *(["--is-yaml"] if is_yaml else []), ++ *cmd_prefix, ++ *cmd_variant, ++ *[template, fpath], ++ ], ++ check=True, ++ ) ++ if mode: ++ os.chmod(fpath, mode) ++ # return path relative to setup.py ++ return os.path.join(os.path.basename(tmpd), bname) ++ ++ ++# User can set the variant for template rendering ++for a in sys.argv: ++ if a.startswith("--distro"): ++ idx = sys.argv.index(a) ++ if "=" in a: ++ _, VARIANT = a.split("=") ++ del sys.argv[idx] ++ else: ++ VARIANT = sys.argv[idx + 1] ++ del sys.argv[idx + 1] ++ sys.argv.remove("--distro") ++ ++# parse PREFIX and pass it on from render_tmpl() ++for a in sys.argv: ++ if a.startswith("--prefix"): ++ idx = sys.argv.index(a) ++ if "=" in a: ++ _, PREFIX = a.split("=") ++ else: ++ PREFIX = sys.argv[idx + 1] ++ ++INITSYS_FILES = { ++ "sysvinit": lambda: [f for f in glob("sysvinit/redhat/*") if is_f(f)], ++ "sysvinit_freebsd": lambda: [ ++ render_tmpl(f, mode=0o755) ++ for f in glob("sysvinit/freebsd/*") ++ if is_f(f) ++ ], ++ "sysvinit_netbsd": lambda: [ ++ render_tmpl(f, mode=0o755) ++ for f in glob("sysvinit/netbsd/*") ++ if is_f(f) ++ ], ++ "sysvinit_openbsd": lambda: [ ++ render_tmpl(f, mode=0o755) ++ for f in glob("sysvinit/openbsd/*") ++ if is_f(f) ++ ], ++ "sysvinit_deb": lambda: [f for f in glob("sysvinit/debian/*") if is_f(f)], ++ "sysvinit_openrc": lambda: [ ++ f for f in glob("sysvinit/openrc/*") if is_f(f) ++ ], ++ "sysvinit_openrc.dep": lambda: ["tools/cloud-init-hotplugd"], ++ "systemd": lambda: [ ++ render_tmpl(f) ++ for f in ( ++ glob("systemd/*.tmpl") ++ + glob("systemd/*.service") ++ + glob("systemd/*.socket") ++ + glob("systemd/*.target") ++ ) ++ if (is_f(f) and not is_generator(f)) ++ ], ++ "systemd.generators": lambda: [ ++ render_tmpl(f, mode=0o755) ++ for f in glob("systemd/*") ++ if is_f(f) and is_generator(f) ++ ], ++} ++INITSYS_ROOTS = { ++ "sysvinit": "etc/rc.d/init.d", ++ "sysvinit_freebsd": "usr/local/etc/rc.d", ++ "sysvinit_netbsd": "usr/local/etc/rc.d", ++ "sysvinit_openbsd": "etc/rc.d", ++ "sysvinit_deb": "etc/init.d", ++ "sysvinit_openrc": "etc/init.d", ++ "sysvinit_openrc.dep": "usr/lib/cloud-init", ++ "systemd": pkg_config_read("systemd", "systemdsystemunitdir"), ++ "systemd.generators": pkg_config_read( ++ "systemd", "systemdsystemgeneratordir" ++ ), ++} ++INITSYS_TYPES = sorted([f.partition(".")[0] for f in INITSYS_ROOTS.keys()]) ++ ++ ++# Install everything in the right location and take care of Linux (default) and ++# FreeBSD systems. ++USR = "usr" ++ETC = "etc" ++USR_LIB_EXEC = "usr/lib" ++LIB = "lib" ++if os.uname()[0] in ["FreeBSD", "DragonFly", "OpenBSD"]: ++ USR = "usr/local" ++ USR_LIB_EXEC = "usr/local/lib" ++elif os.path.isfile("/etc/redhat-release"): ++ USR_LIB_EXEC = "usr/libexec" ++elif os.path.isfile("/etc/system-release-cpe"): ++ with open("/etc/system-release-cpe") as f: ++ cpe_data = f.read().rstrip().split(":") ++ (cpe_vendor, cpe_product, cpe_version) = cpe_data[3:6] ++ if cpe_vendor == "amazon": ++ USR_LIB_EXEC = "usr/libexec" ++ ++ ++class MyEggInfo(egg_info): ++ """This makes sure to not include the rendered files in SOURCES.txt.""" ++ ++ def find_sources(self): ++ egg_info.find_sources(self) ++ # update the self.filelist. ++ self.filelist.exclude_pattern( ++ RENDERED_TMPD_PREFIX + ".*", is_regex=True ++ ) ++ # but since mfname is already written we have to update it also. ++ mfname = os.path.join(self.egg_info, "SOURCES.txt") ++ if os.path.exists(mfname): ++ with open(mfname) as fp: ++ files = [ ++ f for f in fp if not f.startswith(RENDERED_TMPD_PREFIX) ++ ] ++ with open(mfname, "w") as fp: ++ fp.write("".join(files)) ++ ++ ++# TODO: Is there a better way to do this?? ++class InitsysInstallData(install): ++ init_system = None ++ user_options = install.user_options + [ ++ # This will magically show up in member variable 'init_sys' ++ ( ++ "init-system=", ++ None, ++ "init system(s) to configure (%s) [default: None]" ++ % ", ".join(INITSYS_TYPES), ++ ), ++ ] ++ ++ def initialize_options(self): ++ install.initialize_options(self) ++ self.init_system = "" ++ ++ def finalize_options(self): ++ install.finalize_options(self) ++ ++ if self.init_system and isinstance(self.init_system, str): ++ self.init_system = self.init_system.split(",") ++ ++ if not self.init_system and not platform.system().endswith("BSD"): ++ self.init_system = ["systemd"] ++ ++ bad = [f for f in self.init_system if f not in INITSYS_TYPES] ++ if bad: ++ raise DistutilsError("Invalid --init-system: %s" % ",".join(bad)) ++ ++ for system in self.init_system: ++ # add data files for anything that starts with '.' ++ datakeys = [ ++ k for k in INITSYS_ROOTS if k.partition(".")[0] == system ++ ] ++ for k in datakeys: ++ files = INITSYS_FILES[k]() ++ if not files: ++ continue ++ self.distribution.data_files.append((INITSYS_ROOTS[k], files)) ++ # Force that command to reinitialize (with new file list) ++ self.distribution.reinitialize_command("install_data", True) ++ ++ ++USR = "/" + USR ++ETC = "/" + ETC ++USR_LIB_EXEC = "/" + USR_LIB_EXEC ++LIB = "/" + LIB ++for k in INITSYS_ROOTS.keys(): ++ INITSYS_ROOTS[k] = "/" + INITSYS_ROOTS[k] ++ ++data_files = [ ++ (ETC + "/cloud", [render_tmpl("config/cloud.cfg.tmpl", is_yaml=True)]), ++ (ETC + "/cloud/clean.d", glob("config/clean.d/*")), ++ (ETC + "/cloud/cloud.cfg.d", glob("config/cloud.cfg.d/*")), ++ (ETC + "/cloud/templates", glob("templates/*")), ++ ( ++ USR_LIB_EXEC + "/cloud-init", ++ [ ++ "tools/ds-identify", ++ "tools/hook-hotplug", ++ "tools/uncloud-init", ++ "tools/write-ssh-key-fingerprints", ++ ], ++ ), ++ ( ++ USR + "/share/bash-completion/completions", ++ ["bash_completion/cloud-init"], ++ ), ++ (USR + "/share/doc/cloud-init", [f for f in glob("doc/*") if is_f(f)]), ++ ( ++ USR + "/share/doc/cloud-init/examples", ++ [f for f in glob("doc/examples/*") if is_f(f)], ++ ), ++ ( ++ USR + "/share/doc/cloud-init/examples/seed", ++ [f for f in glob("doc/examples/seed/*") if is_f(f)], ++ ), ++ ( ++ USR + "/share/doc/cloud-init/module-docs", ++ [f for f in glob("doc/module-docs/*", recursive=True) if is_f(f)], ++ ), ++] ++if not platform.system().endswith("BSD"): ++ RULES_PATH = pkg_config_read("udev", "udevdir") ++ RULES_PATH = "/" + RULES_PATH ++ ++ data_files.extend( ++ [ ++ (RULES_PATH + "/rules.d", [f for f in glob("udev/*.rules")]), ++ ( ++ INITSYS_ROOTS["systemd"] + "/sshd-keygen@.service.d/", ++ ["systemd/disable-sshd-keygen-if-cloud-init-active.conf"], ++ ), ++ ] ++ ) ++# Use a subclass for install that handles ++# adding on the right init system configuration files ++cmdclass = { ++ "install": InitsysInstallData, ++ "egg_info": MyEggInfo, ++} ++ ++requirements = read_requires() ++ ++setuptools.setup( ++ name="cloud-init", ++ version=get_version(), ++ description="Cloud instance initialization magic", ++ author="Scott Moser", ++ author_email="scott.moser@canonical.com", ++ url="http://launchpad.net/cloud-init/", ++ package_data={ ++ "": ["*.json"], ++ }, ++ packages=setuptools.find_packages(exclude=["tests.*", "tests"]), ++ scripts=["tools/cloud-init-per"], ++ license="Dual-licensed under GPLv3 or Apache 2.0", ++ data_files=data_files, ++ install_requires=requirements, ++ cmdclass=cmdclass, ++ entry_points={ ++ "console_scripts": [ ++ "cloud-init = cloudinit.cmd.main:main", ++ "cloud-id = cloudinit.cmd.cloud_id:main", ++ ], ++ }, ++) +--- /dev/null ++++ b/setup_utils.py +@@ -0,0 +1,60 @@ ++import os ++import subprocess ++import sys ++from typing import List ++ ++ ++def is_f(p: str) -> bool: ++ return os.path.isfile(p) ++ ++ ++def is_generator(p: str) -> bool: ++ return "-generator" in p ++ ++ ++def pkg_config_read(library: str, var: str) -> str: ++ pkg_config = "pkg-config" ++ ++ if os.getenv("PKG_CONFIG"): ++ pkg_config = os.getenv("PKG_CONFIG") ++ ++ fallbacks = { ++ "systemd": { ++ "systemdsystemconfdir": "/etc/systemd/system", ++ "systemdsystemunitdir": "/usr/lib/systemd/system", ++ "systemdsystemgeneratordir": "/usr/lib/systemd/system-generators", ++ }, ++ "udev": { ++ "udevdir": "/usr/lib/udev", ++ }, ++ } ++ cmd = [pkg_config, f"--variable={var}", library] ++ try: ++ path = subprocess.check_output(cmd).decode("utf-8") # nosec B603 ++ path = path.strip() ++ except Exception: ++ path = fallbacks[library][var] ++ if path.startswith("/"): ++ path = path[1:] ++ ++ return path ++ ++ ++def version_to_pep440(version: str) -> str: ++ # read-version can spit out something like 22.4-15-g7f97aee24 ++ # which is invalid under PEP 440. If we replace the first - with a + ++ # that should give us a valid version. ++ return version.replace("-", "+", 1) ++ ++ ++def get_version() -> str: ++ cmd = [sys.executable, "tools/read-version"] ++ ver = subprocess.check_output(cmd) # B603 ++ version = ver.decode("utf-8").strip() ++ return version_to_pep440(version) ++ ++ ++def read_requires() -> List[str]: ++ cmd = [sys.executable, "tools/read-dependencies"] ++ deps = subprocess.check_output(cmd) # nosec B603 ++ return deps.decode("utf-8").splitlines() +--- a/test-requirements.txt ++++ b/test-requirements.txt +@@ -11,9 +11,9 @@ pytest!=7.3.2 + pytest-cov + pytest-mock + pytest-xdist ++setuptools + jsonschema + responses +-packaging + passlib + + # This one is currently used only by the CloudSigma and SmartOS datasources. +--- a/tools/test_tools.py ++++ b/tools/test_tools.py +@@ -1,10 +1,23 @@ + import pathlib +-from packaging.version import Version, InvalidVersion + from importlib.machinery import SourceFileLoader + from importlib.util import module_from_spec, spec_from_loader + from unittest import mock + + import pytest ++import setuptools ++ ++ ++from setup_utils import version_to_pep440 # pylint: disable=import-error ++ ++try: ++ validate_version = setuptools.dist.Distribution._validate_version # type: ignore # noqa: E501 ++ setuptools.sic # pylint: disable=no-member,pointless-statement ++except AttributeError: ++ pytest.skip( ++ "Unable to import necessary setuptools utilities. " ++ "Version is likely too old.", ++ allow_module_level=True, ++ ) + + # Since read-version has a '-' and no .py extension, we have to do this + # to import it +@@ -23,17 +36,9 @@ if not spec.loader: + spec.loader.exec_module(read_version) + + +-def version_to_pep440(version: str) -> str: +- # read-version can spit out something like 22.4-15-g7f97aee24 +- # which is invalid under PEP 440. If we replace the first - with a + +- # that should give us a valid version. +- return version.replace("-", "+", 1) +- +- + def assert_valid_version(version): +- try: +- Version(version) +- except InvalidVersion: ++ response = validate_version(version) ++ if isinstance(response, setuptools.sic): # pylint: disable=no-member + pytest.fail(f"{version} is not PEP 440 compliant") + + +--- a/tox.ini ++++ b/tox.ini +@@ -33,6 +33,7 @@ deps = + types-passlib + types-PyYAML + types-requests ++ types-setuptools + typing-extensions + + [pinned_versions] +@@ -185,6 +186,7 @@ deps = + pytest==4.6.9 + pytest-cov==2.8.1 + pytest-mock==1.10.4 ++ setuptools==45.2.0 + responses==0.9.0 + passlib + # required for this version of jinja2 diff --git a/debian/patches/series b/debian/patches/series index 6de088d4..95bd7033 100644 --- a/debian/patches/series +++ b/debian/patches/series @@ -4,3 +4,4 @@ no-nocloud-network.patch grub-dpkg-support.patch no-remove-networkd-online.patch strip-invalid-mtu.patch +retain-setuptools.patch diff --git a/doc/module-docs/cc_install_hotplug/data.yaml b/doc/module-docs/cc_install_hotplug/data.yaml index 2277c9f2..abdac913 100644 --- a/doc/module-docs/cc_install_hotplug/data.yaml +++ b/doc/module-docs/cc_install_hotplug/data.yaml @@ -14,7 +14,7 @@ cc_install_hotplug: around this limitation, one can wait until cloud-init has completed before hotplugging devices. - Currently supported datasources: Openstack, EC2 + Currently supported datasources: Openstack, EC2, Hetzner examples: - comment: | Example 1: Enable hotplug of network devices diff --git a/doc/module-docs/cc_phone_home/data.yaml b/doc/module-docs/cc_phone_home/data.yaml index 2a34f799..1391aff1 100644 --- a/doc/module-docs/cc_phone_home/data.yaml +++ b/doc/module-docs/cc_phone_home/data.yaml @@ -12,7 +12,7 @@ cc_phone_home: - ``pub_key_ed25519`` - ``instance_id`` - ``hostname`` - - ``fdqn`` + - ``fqdn`` Data is sent as ``x-www-form-urlencoded`` arguments. diff --git a/doc/module-docs/cc_raspberry_pi/example4.yaml b/doc/module-docs/cc_raspberry_pi/example4.yaml index c600a276..9e83ca26 100644 --- a/doc/module-docs/cc_raspberry_pi/example4.yaml +++ b/doc/module-docs/cc_raspberry_pi/example4.yaml @@ -1,7 +1,6 @@ #cloud-config rpi: interfaces: - ssh: true # works on all Pi models # only enables the UART hardware without binding it to the console serial: diff --git a/doc/module-docs/cc_rh_subscription/data.yaml b/doc/module-docs/cc_rh_subscription/data.yaml index e87a95af..f2c17193 100644 --- a/doc/module-docs/cc_rh_subscription/data.yaml +++ b/doc/module-docs/cc_rh_subscription/data.yaml @@ -5,11 +5,11 @@ cc_rh_subscription: Following a successful registration, you can: - - auto-attach subscriptions + - auto_attach subscriptions - set the service level - add subscriptions based on pool ID - enable/disable yum repositories based on repo ID - - alter the ``rhsm_baseurl`` and ``server-hostname`` in + - alter the ``rhsm_baseurl`` and ``server_hostname`` in ``/etc/rhsm/rhs.conf``. examples: - comment: | diff --git a/doc/module-docs/cc_rh_subscription/example2.yaml b/doc/module-docs/cc_rh_subscription/example2.yaml index 72328f93..6381526b 100644 --- a/doc/module-docs/cc_rh_subscription/example2.yaml +++ b/doc/module-docs/cc_rh_subscription/example2.yaml @@ -1,4 +1,4 @@ #cloud-config rh_subscription: - activation-key: foobar + activation_key: foobar org: "ABC" diff --git a/doc/module-docs/cc_rh_subscription/example3.yaml b/doc/module-docs/cc_rh_subscription/example3.yaml index a8b741cc..ee132bfc 100644 --- a/doc/module-docs/cc_rh_subscription/example3.yaml +++ b/doc/module-docs/cc_rh_subscription/example3.yaml @@ -1,19 +1,22 @@ #cloud-config rh_subscription: - activation-key: foobar + activation_key: foobar org: 12345 - auto-attach: true - service-level: self-support - add-pool: + auto_attach: true + service_level: self-support + add_pool: - 1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a - 2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b - enable-repo: + enable_repo: - repo-id-to-enable - other-repo-id-to-enable - disable-repo: + disable_repo: - repo-id-to-disable - other-repo-id-to-disable # Alter the baseurl in /etc/rhsm/rhsm.conf - rhsm-baseurl: http://url + rhsm_baseurl: http://url # Alter the server hostname in /etc/rhsm/rhsm.conf - server-hostname: foo.bar.com + server_hostname: foo.bar.com + # Set `subscription-manager release --set=6Server` then + # delete /var/cache/{dnf,yum} + release_version: 6Server diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py index 212ca832..35587502 100644 --- a/doc/rtd/conf.py +++ b/doc/rtd/conf.py @@ -3,10 +3,6 @@ import os import sys -from cloudinit import version -from cloudinit.config.schema import get_schema -from cloudinit.handlers.jinja_template import render_jinja_payload - # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. @@ -15,6 +11,12 @@ sys.path.insert(0, os.path.abspath("./")) sys.path.insert(0, os.path.abspath(".")) +# Readthedocs builds will pip install packages and not have PYTHONPATH set +# So avoid cloudinit imports until we have updated our path first. +from cloudinit import version +from cloudinit.config.schema import get_schema +from cloudinit.handlers.jinja_template import render_jinja_payload + # Suppress warnings for docs that aren't used yet # unused_docs = [ # ] diff --git a/doc/rtd/development/code_review.rst b/doc/rtd/development/code_review.rst index df8d692b..bde3f232 100644 --- a/doc/rtd/development/code_review.rst +++ b/doc/rtd/development/code_review.rst @@ -26,11 +26,8 @@ help if they need it. If you have questions about the code review process, or need advice on an open PR, these are the available avenues: * Open a PR, add "WIP:" to the title, and leave a comment on that PR -* join the ``#cloud-init`` `channel on the Libera IRC `_ network -* post on the ``#cloud-init`` `Discourse topic `_ -* send an email to the cloud-init mailing list: :: - - cloud-init@lists.launchpad.net +* join the ``#cloud-init`` `channel on Matrix `_ +* post on `Github Discussions`_ These are listed in order of our preference, but please use whichever of them you are most comfortable with. diff --git a/doc/rtd/development/contribute_docs.rst b/doc/rtd/development/contribute_docs.rst index 810da3a9..944b65d3 100644 --- a/doc/rtd/development/contribute_docs.rst +++ b/doc/rtd/development/contribute_docs.rst @@ -53,9 +53,9 @@ In your first PR ================= If you need some help with your contribution, you can contact us on our -`IRC channel `_. If you have already submitted a work-in-progress PR, you -can also ask for guidance from our technical author by `tagging s-makin`_ as a -reviewer. +`Matrix room `_. If you have already submitted a +work-in-progress PR, you can also ask for guidance from our technical author +by `tagging s-makin`_ as a reviewer. .. LINKS .. include:: ../links.txt diff --git a/doc/rtd/development/contribute_testing.rst b/doc/rtd/development/contribute_testing.rst new file mode 100644 index 00000000..ec7ec5da --- /dev/null +++ b/doc/rtd/development/contribute_testing.rst @@ -0,0 +1,8 @@ +Contribute to testing +********************* + +.. toctree:: + :maxdepth: 1 + + package_testing.rst + ubuntu_test_prerelease.rst diff --git a/doc/rtd/development/find_issues.rst b/doc/rtd/development/find_issues.rst index 8572ece3..45240f83 100644 --- a/doc/rtd/development/find_issues.rst +++ b/doc/rtd/development/find_issues.rst @@ -53,7 +53,7 @@ improvements you think can be made. This might be related to: * the CSS theming * or even just highlighting things you found confusing or unclear -Feel free to `contact us on IRC `_ if you have other ideas about +Feel free to `contact us on Matrix `_ if you have other ideas about contributions you might want to make, such as blog posts, guides, or tutorials. .. LINKS diff --git a/doc/rtd/development/first_PR.rst b/doc/rtd/development/first_PR.rst index 0aa7bf5c..39244f22 100644 --- a/doc/rtd/development/first_PR.rst +++ b/doc/rtd/development/first_PR.rst @@ -18,9 +18,8 @@ To contribute to cloud-init, you must first sign the Canonical every pull request to ensure that the CLA has been signed. For any questions or help with the process, email -`James Falcon `_ with the subject: -"Cloud-init CLA". You can also contact user ``falcojr`` in the #cloud-init -channel on the `Libera IRC network `_. +`Chad Smith `_ with the subject: +"Cloud-init CLA". Create a sandbox environment ============================ diff --git a/doc/rtd/development/index.rst b/doc/rtd/development/index.rst index 18dab526..df66735b 100644 --- a/doc/rtd/development/index.rst +++ b/doc/rtd/development/index.rst @@ -35,9 +35,9 @@ Before you can begin, you will need to: Getting help ============ -We use IRC and have a dedicated `#cloud-init` channel where you can contact +We use Matrix and have a dedicated `#cloud-init` room where you can contact us for help and guidance. This link will take you directly to our -`IRC channel on Libera `_. +`room on Matrix `_. Getting started =============== diff --git a/doc/rtd/development/package_testing.rst b/doc/rtd/development/package_testing.rst new file mode 100644 index 00000000..9097731d --- /dev/null +++ b/doc/rtd/development/package_testing.rst @@ -0,0 +1,26 @@ +.. _package_testing: + +Development package builds +************************** + +To ease the development and testing of local packaging changes, +development-quality DEB or RPM packages can be built with one of the following +scripts on a build host which already has all system build dependencies +installed: + +.. code-block:: bash + + ./packages/brpm --distro=redhat # or --distro=suse to build an RPM + ./packages/bddeb -d # to build a DEB + +OR if LXD is present, the full package build can be run in a container: + +.. code-block:: bash + + ./tools/run-container ubuntu-daily:plucky --package --keep + ./tools/run-container rockylinux/9 --package --keep + + +.. note:: + + meson support has not yet been added to the BSDs in :file:`tools/build-on-*bsd` or :file:`meson.build`. diff --git a/doc/rtd/development/packaging.rst b/doc/rtd/development/packaging.rst new file mode 100644 index 00000000..b366dfdf --- /dev/null +++ b/doc/rtd/development/packaging.rst @@ -0,0 +1,58 @@ +.. _downstream_packaging: + +Downstream packaging +******************** + +This page is intended to support operating system packagers of ``cloud-init`` +and is not intended for other audiences to generate their own custom cloud-init +packages. + +``Cloud-init`` is not published to PyPI as it is not intended to be consumed +as a pure-python package or run from virtual environments or python paths that +are not system-wide. + +Guidelines +========== + +Build Dependencies +------------------ +The following build dependencies must be available on the system: + +- ``python3`` +- ``meson >= 0.63.0`` +- ``pkgconf`` +- ``bash-completion`` + +Additional dependencies for systemd environments: + +- ``systemd-devel`` +- ``udev`` + +The full list of all package build dependencies for a given distribution can +be obtained by the following command: + +.. code-block:: bash + + ./tools/read-dependencies --requirements-file requirements.txt --requirements-file test-requirements.txt --system-pkg-names --system-pkg-names --distro= + + +Manual build procedure +---------------------- + +Meson install directory locations may be set with +``meson setup -D=``. + +Steps to validate ``cloud-init`` package builds in a development environment: + +.. code-block:: bash + + meson setup builddir + meson test -C builddir -v + meson install -C builddir --destdir=testinstall + # List installed files + find builddir/testinstall/ + +See :ref:`package_testing` for more information package testing. + +.. LINKS: +.. _meson: https://mesonbuild.com/ diff --git a/doc/rtd/development/testing.rst b/doc/rtd/development/testing.rst index a94641a7..1ac90c8a 100644 --- a/doc/rtd/development/testing.rst +++ b/doc/rtd/development/testing.rst @@ -9,8 +9,12 @@ be found at :file:`tests/unittests`. Integration tests can be found at tests can be found on the :ref:`integration_tests` page, but the guidelines specified below apply to both types of tests. -``Cloud-init`` uses `pytest`_ to run its tests, and has tests written both -as ``unittest.TestCase`` sub-classes and as un-subclassed ``pytest`` tests. +``Cloud-init`` uses `pytest`_ to write and run its tests. + +.. note:: + While there are a subset of tests written as ``unittest.TestCase`` + sub-classes, this is due to historical reasons. Their use is discouraged and + they are tracked to be removed in `#6427`_. Guidelines ========== @@ -20,36 +24,40 @@ The following guidelines should be followed. Test layout ----------- -* For ease of organisation and greater accessibility for developers unfamiliar - with ``pytest``, all ``cloud-init`` unit tests must be contained within test - classes. In other words, module-level test functions should not be used. +* For consistency, unit test files should have a matching name and + directory location under :file:`tests/unittests`. + +* E.g., the expected test file for code in :file:`cloudinit/path/to/file.py` + is :file:`tests/unittests/path/to/test_file.py`. + +``pytest`` guidelines +--------------------- + +* Use `pytest fixtures`_ to share functionality instead of inheritance. -* Since all tests are contained within classes, it is acceptable to mix - ``TestCase`` test classes and ``pytest`` test classes within the same - test file. +* Use bare ``assert`` statements, to take advantage of ``pytest``'s + `assertion introspection`_. - * These can be easily distinguished by their definition: ``pytest`` - classes will not use inheritance at all (e.g., - `TestGetPackageMirrorInfo`_), whereas ``TestCase`` classes will - subclass (indirectly) from ``TestCase`` (e.g., - `TestPrependBaseCommands`_). +* Prefer ``pytest``'s + `parametrized tests `__ + over test repetition. -* Unit tests and integration tests are located under :file:`cloud-init/tests`. +In-house fixtures +----------------- - * For consistency, unit test files should have a matching name and - directory location under :file:`tests/unittests`. +Before implementing your own fixture do search in :file:`*/conftest.py` files +as it could be already implemented. Another source to look for test helpers is +:file:`tests/*/helpers.py`. - * E.g., the expected test file for code in :file:`cloudinit/path/to/file.py` - is :file:`tests/unittests/path/to/test_file.py`. +Relevant fixtures: -``pytest`` tests ----------------- +* `disable_subp_usage`_ auto-disables call to subprocesses. See its + documentation to disable it. -* ``pytest`` test classes should use `pytest fixtures`_ to share - functionality instead of inheritance. +* `fake_filesystem`_ makes tests run on a temporary filesystem. -* ``pytest`` tests should use bare ``assert`` statements, to take advantage - of ``pytest``'s `assertion introspection`_. +* `paths`_ provides an instance of `cloudinit.helper.Paths` pointing to a + temporary filesystem. Dependency versions ------------------- @@ -134,3 +142,7 @@ Test argument ordering .. _pytest 3.0: https://docs.pytest.org/en/latest/changelog.html#id1093 .. _pytest.param: https://docs.pytest.org/en/6.2.x/reference.html#pytest-param .. _autospecced: https://docs.python.org/3.8/library/unittest.mock.html#autospeccing +.. _#6427: https://github.com/canonical/cloud-init/issues/6427 +.. _disable_subp_usage: https://github.com/canonical/cloud-init/blob/16f2039d0705ee9873ace98c967a34e6da6d0b87/conftest.py#L92 +.. _fake_filesystem: https://github.com/canonical/cloud-init/blob/16f2039d0705ee9873ace98c967a34e6da6d0b87/tests/unittests/conftest.py#L114 +.. _paths: https://github.com/canonical/cloud-init/blob/16f2039d0705ee9873ace98c967a34e6da6d0b87/tests/unittests/conftest.py#L224 diff --git a/doc/rtd/howto/ubuntu_test_prerelease.rst b/doc/rtd/development/ubuntu_test_prerelease.rst similarity index 92% rename from doc/rtd/howto/ubuntu_test_prerelease.rst rename to doc/rtd/development/ubuntu_test_prerelease.rst index cc685a84..dab0cde6 100644 --- a/doc/rtd/howto/ubuntu_test_prerelease.rst +++ b/doc/rtd/development/ubuntu_test_prerelease.rst @@ -37,7 +37,7 @@ cloud-init developers missed during testing before cloud-init gets released more broadly. If issues are found during testing, please file a `new cloud-init bug`_ and -leave a message in the `#cloud-init IRC channel`_. +leave a message in the `Matrix room `_. Remove the proposed repository ------------------------------ @@ -58,5 +58,6 @@ This will cause cloud-init to rerun as if it is a first boot. sudo cloud-init clean --logs --reboot +.. LINKS +.. include:: ../links.txt .. _new cloud-init bug: https://github.com/canonical/cloud-init/issues -.. _#cloud-init IRC channel: https://kiwiirc.com/nextclient/irc.libera.chat/cloud-init diff --git a/doc/rtd/howto/index.rst b/doc/rtd/howto/index.rst index d03e3909..096a14b2 100644 --- a/doc/rtd/howto/index.rst +++ b/doc/rtd/howto/index.rst @@ -28,4 +28,3 @@ How do I...? Report a bug Identify my datasource Disable cloud-init - Test pre-release cloud-init on Ubuntu diff --git a/doc/rtd/howto/launch_libvirt.rst b/doc/rtd/howto/launch_libvirt.rst index 3b481ed2..fa05b1bb 100644 --- a/doc/rtd/howto/launch_libvirt.rst +++ b/doc/rtd/howto/launch_libvirt.rst @@ -21,8 +21,8 @@ Create an instance .. code-block:: shell-session virt-install --name cloud-init-001 --memory 4000 --noreboot \ - --os-variant detect=on,name=ubuntujammy \ - --disk=size=10,backing_store="$(pwd)/jammy-server-cloudimg-amd64.img" \ + --os-variant detect=on,name=ubuntunoble \ + --disk=size=10,backing_store="$(pwd)/noble-server-cloudimg-amd64.img" \ --cloud-init user-data="$(pwd)/user-data,meta-data=$(pwd)/meta-data,network-config=$(pwd)/network-config" .. LINKS diff --git a/doc/rtd/howto/wait_for_cloud_init.rst b/doc/rtd/howto/wait_for_cloud_init.rst index 929ab138..0ca2ca33 100644 --- a/doc/rtd/howto/wait_for_cloud_init.rst +++ b/doc/rtd/howto/wait_for_cloud_init.rst @@ -23,7 +23,7 @@ completes. This may be accomplished by including [Unit] Description=Example service - After=cloud-final.service multi-user.target + After=cloud-init.target multi-user.target [Service] Type=oneshot diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst index 76af68cf..6a3ef846 100644 --- a/doc/rtd/index.rst +++ b/doc/rtd/index.rst @@ -65,7 +65,7 @@ Having trouble? We would like to help! - :ref:`Check out our tutorials` if you're new to ``cloud-init`` - :ref:`Try the FAQ` for answers to some common questions -- You can also search the ``cloud-init`` `mailing list archive`_ +- You can also check `Github Discussions`_ - Find a bug? `Report bugs on GitHub Issues`_ Project and community @@ -75,9 +75,8 @@ Project and community projects, contributions, suggestions, fixes and constructive feedback. * Read our `Code of Conduct`_ -* Ask questions in the ``#cloud-init`` `IRC channel on Libera `_ -* Follow announcements or ask a question on `the cloud-init Discourse forum`_ -* Join the `cloud-init mailing list`_ +* Ask questions in the ``#cloud-init`` `room on Matrix `_ +* Follow announcements or ask a question on `GitHub Discussions`_ * :ref:`Contribute on GitHub` * `Release schedule`_ @@ -99,13 +98,11 @@ projects, contributions, suggestions, fixes and constructive feedback. Contributing overview Contribute to code Contribute to docs + Contribute to testing Community - + Downstream packaging .. LINKS .. include:: links.txt -.. _the cloud-init Discourse forum: https://discourse.ubuntu.com/c/project/cloud-init/54 -.. _cloud-init mailing list: https://launchpad.net/~cloud-init -.. _mailing list archive: https://lists.launchpad.net/cloud-init/ .. _Release schedule: https://discourse.ubuntu.com/t/2025-cloud-init-release-schedule/55534 .. _Report bugs on GitHub Issues: https://github.com/canonical/cloud-init/issues diff --git a/doc/rtd/links.txt b/doc/rtd/links.txt index 05d25107..0381ab31 100644 --- a/doc/rtd/links.txt +++ b/doc/rtd/links.txt @@ -3,10 +3,11 @@ .. _CLA: https://ubuntu.com/legal/contributors .. _Code of Conduct: https://ubuntu.com/community/code-of-conduct .. _Diataxis: https://diataxis.fr/ +.. _GitHub Discussions: https://github.com/canonical/cloud-init/discussions .. _GH repo: https://github.com/canonical/cloud-init .. _GitHub: https://github.com -.. _IRC: https://kiwiirc.com/nextclient/irc.libera.chat/cloud-init .. _Launchpad: https://launchpad.net +.. _Matrix: https://matrix.to/#/#cloud-init:ubuntu.com .. _the docs: https://docs.cloud-init.io/en/latest/ .. _tox: https://tox.readthedocs.io/en/latest/ .. _Discourse: https://discourse.ubuntu.com/c/server/cloud-init/54 diff --git a/doc/rtd/reference/breaking_changes.rst b/doc/rtd/reference/breaking_changes.rst index 1f5d613c..434e0e0d 100644 --- a/doc/rtd/reference/breaking_changes.rst +++ b/doc/rtd/reference/breaking_changes.rst @@ -11,6 +11,18 @@ releases. many operating system vendors patch out breaking changes in cloud-init to ensure consistent behavior on their platform. +25.3 +==== + +**For cloud-init downstream packagers**: the packaging build backend changed +to meson and away from python's setuptools.distutils in response to +`PEP-0632's deprecation of distutils`_. Meson packaging may not be identical +to previous binary packages generated by python's setuptools. Downstream +packagers should validate the final generated package to ensure +unexpected package deltas do not exist. + +See :ref:`downstream_packaging` for more info on packaging cloud-init. + 25.1.4 ====== @@ -252,3 +264,4 @@ a ``datasource_list`` in ``/etc/cloud/cloud.cfg.d/*.cfg``. .. _attach a ConfigDrive: https://docs.openstack.org/nova/2024.1/admin/config-drive.html .. _this patch: https://github.com/canonical/cloud-init/blob/ubuntu/noble/debian/patches/no-single-process.patch .. _Python3 equivalent: https://github.com/canonical/cloud-init/pull/5489#issuecomment-2408210561 +.. _PEP-0632's deprecation of distutils: https://peps.python.org/pep-0632/ diff --git a/doc/rtd/reference/datasources/nocloud.rst b/doc/rtd/reference/datasources/nocloud.rst index 03e7654c..9a1e9eb3 100644 --- a/doc/rtd/reference/datasources/nocloud.rst +++ b/doc/rtd/reference/datasources/nocloud.rst @@ -150,7 +150,7 @@ filesystem path for those that require more flexibility. This may be done with a line configuration: :: - ds=nocloud;s=file://path/to/directory/ + ds=nocloud;s=file:///path/to/directory/ Or a system configuration: @@ -158,7 +158,7 @@ Or a system configuration: datasource: NoCloud: - seedfrom: file://path/to/directory + seedfrom: file:///path/to/directory Source 2: Drive with labeled filesystem --------------------------------------- diff --git a/doc/rtd/reference/faq.rst b/doc/rtd/reference/faq.rst index 146dc667..225ea642 100644 --- a/doc/rtd/reference/faq.rst +++ b/doc/rtd/reference/faq.rst @@ -10,8 +10,8 @@ Having trouble? We would like to help! - First go through this page with answers to common questions - Use the search bar at the upper left to search our documentation -- Ask questions in the ``#cloud-init`` `IRC channel on Libera`_ -- Join and ask questions on the ``cloud-init`` `mailing list`_ +- Ask questions in the ``#cloud-init`` `Matrix room `_ +- Join and ask questions on `Github Discussions`_ - Find a bug? Check out the :ref:`reporting_bugs` topic to find out how to report one @@ -75,8 +75,6 @@ Whitepapers: - `Utilising cloud-init on Microsoft Azure (Whitepaper)`_ - `Cloud Instance Initialization with cloud-init (Whitepaper)`_ -.. _mailing list: https://launchpad.net/~cloud-init -.. _IRC channel on Libera: https://kiwiirc.com/nextclient/irc.libera.chat/cloud-init .. _do: https://github.com/canonical/ubuntu-pro-client/blob/9b46480b9e4b88e918bac5ced0d4b8edb3cbbeab/lib/auto_attach.py#L35 .. _cloud-init - The Good Parts: https://www.youtube.com/watch?v=2_m6EUo6VOI @@ -98,3 +96,6 @@ Whitepapers: .. _cloud-init Summit 2018: https://powersj.io/post/cloud-init-summit18/ .. _cloud-init Summit 2017: https://powersj.io/post/cloud-init-summit17/ .. _Subiquity autoinstaller: https://ubuntu.com/server/docs/install/autoinstall + +.. LINKS +.. include:: ../links.txt diff --git a/doc/rtd/reference/network-config-format-v2.rst b/doc/rtd/reference/network-config-format-v2.rst index c90e3b62..b26ab6e6 100644 --- a/doc/rtd/reference/network-config-format-v2.rst +++ b/doc/rtd/reference/network-config-format-v2.rst @@ -296,12 +296,15 @@ Example: :: ----------------------------------- Add device specific routes. Each mapping includes a ``to``, ``via`` key -with an IPv4 or IPv6 address as value. ``metric`` is an optional value. +with an IPv4 or IPv6 address as value. ``to: default`` may be used to +configure the default route. ``metric`` is an optional value. +``table`` is an optional numeric ID or name of the routing table for +policy-based routing. Example: :: routes: - - to: 0.0.0.0/0 + - to: default # could be 0.0.0.0/0 optionally via: 10.23.2.1 metric: 3 diff --git a/doc/rtd/reference/ubuntu_stable_release_updates.rst b/doc/rtd/reference/ubuntu_stable_release_updates.rst index 1babc257..ffb7d77c 100644 --- a/doc/rtd/reference/ubuntu_stable_release_updates.rst +++ b/doc/rtd/reference/ubuntu_stable_release_updates.rst @@ -47,6 +47,5 @@ The `integration test suite` used for validation follows these steps: .. _SRU: https://wiki.ubuntu.com/StableReleaseUpdates .. _CloudinitUpdates: https://wiki.ubuntu.com/CloudinitUpdates .. _new cloud-init bug: https://github.com/canonical/cloud-init/issues -.. _#cloud-init IRC channel: https://kiwiirc.com/nextclient/irc.libera.chat/cloud-init .. _integration test suite: https://github.com/canonical/cloud-init/tree/main/tests/integration_tests .. _SRU release version: https://github.com/canonical/ubuntu-maintainers-handbook/blob/main/VersionStrings.md#version-adding-a-change-in-ubuntu-as-a-stable-release-update diff --git a/meson.build b/meson.build new file mode 100644 index 00000000..6dc157d5 --- /dev/null +++ b/meson.build @@ -0,0 +1,234 @@ +project( + 'cloud-init', + meson_version: '>=0.63.0', # rockylinux/9 + license: 'GPL-3 OR Apache-2.0', + default_options: [ + # The default can yield broken results. + 'python.install_env=auto', + ], +) +system = host_machine.system() +sysconfdir = get_option('sysconfdir') +init_system = get_option('init_system') + +lib_exec_dir = get_option('prefix') / get_option('libexecdir') / 'cloud-init' +render_tmpl = './tools/render-template' + +pymod = import('python') +python = pymod.find_installation('python3') + +# https://mesonbuild.com/FAQ.html#but-i-really-want-to-use-wildcards for downsteam drop-in files +find = find_program('find') + +install_subdir( + 'cloudinit', + install_dir: python.get_install_dir(), + install_tag: 'python-runtime', + install_mode: 'rw-r--r--', +) + +# Binaries and script entrypoints +if get_option('bash_completion') + bash_completions_dir = dependency('bash-completion').get_variable( + pkgconfig: 'completionsdir', + default_value: get_option('datadir') / 'bash-completion' / 'completions', + ) + install_data( + 'bash_completion/cloud-init', + install_dir: bash_completions_dir, + install_mode: 'rw-r--r--', + install_tag: 'bin', + ) +endif + +install_data( + [ + 'tools/ds-identify', + 'tools/hook-hotplug', + 'tools/uncloud-init', + 'tools/write-ssh-key-fingerprints', + ], + install_dir: lib_exec_dir, + install_mode: 'rwxr-xr-x', + install_tag: 'bin', +) + +install_data( + ['tools/cloud-init-per', 'scripts/cloud-id', 'scripts/cloud-init'], + install_dir: get_option('bindir'), + install_mode: 'rwxr-xr-x', + install_tag: 'bin', +) + +# Required Config and Templates +cfgs = run_command(find, 'config/cloud.cfg.d', '-type', 'f', '-name', '*.cfg', check: true) +install_data( + ['config/cloud.cfg.d/README'] + cfgs.stdout().strip().split('\n'), + install_dir: sysconfdir / 'cloud' / 'cloud.cfg.d', + install_mode: 'rw-r--r--', + install_tag: 'config', +) + +distro_templates = get_option('distro_templates') +if distro_templates.length() == 0 + templates = run_command(find, 'templates', '-type', 'f', '-name', '*.tmpl', check: true).stdout().strip().split('\n') +else + templates = [] + foreach template : distro_templates + templates += 'templates' / template + endforeach +endif + +install_data( + templates, + install_dir: sysconfdir / 'cloud' / 'templates', + install_mode: 'rw-r--r--', + install_tag: 'config', +) + +if init_system == 'systemd' + systemd = dependency('systemd') + udev = dependency('udev') + systemd_unit_dir = systemd.get_variable(pkgconfig: 'systemdsystemunitdir') + systemd_generator_dir = systemd.get_variable(pkgconfig: 'systemdsystemgeneratordir') + udev_dir = udev.get_variable(pkgconfig: 'udevdir') + install_data( + 'udev/66-azure-ephemeral.rules', + install_dir: udev_dir / 'rules.d', + install_mode: 'rw-r--r--', + install_tag: 'systemd', + ) + + # Must generate systemd templates in root mesonbuild, because nested + # systemd/meson.build results in builddir @OUTPUT@ macro being doubly-nested + # paths + custom_target( + input: 'systemd/cloud-init-generator.tmpl', + output: '@BASENAME@', + command: [ + render_tmpl, + '@INPUT@', + meson.current_build_dir() / '@OUTPUT@', + ], + install: true, + install_dir: systemd_generator_dir, + install_mode: 'rwxr-xr-x', + install_tag: 'systemd', + ) + + systemd_templates = run_command( + find, + 'systemd', + '-not', + '-name', '*generator*', + '-name', '*.tmpl', + check: true, + ) + foreach template : systemd_templates.stdout().strip().split('\n') + custom_target( + input: template, + output: '@BASENAME@', + command: [ + render_tmpl, + '@INPUT@', + meson.current_build_dir() / '@OUTPUT@', + ], + install: true, + install_dir: systemd_unit_dir, + install_mode: 'rw-r--r--', + install_tag: 'systemd', + ) + endforeach + + if get_option('disable_sshd_keygen') + # Typically on Fedora systems and sshd_keygen service can race cloud-init. + install_data( + 'systemd/disable-sshd-keygen-if-cloud-init-active.conf', + install_dir: systemd_unit_dir / 'sshd-keygen@.service.d', + install_mode: 'rw-r--r--', + install_tag: 'systemd', + ) + endif + + # Allow for downstream supplements of services files meson.build changes + install_data( + run_command(find, 'systemd', '-name', '*.service', check: true).stdout().strip().split('\n') + + run_command(find, 'systemd', '-name', '*.target', check: true).stdout().strip().split('\n') + + run_command(find, 'systemd', '-name', '*.socket', check: true).stdout().strip().split('\n'), + install_dir: systemd_unit_dir, + install_mode: 'rw-r--r--', + install_tag: 'systemd', + ) +elif init_system == 'sysvinit_openrc' + openrc = dependency('openrc') + udev = dependency('udev') + udev_dir = udev.get_variable(pkgconfig: 'udevdir') + install_data( + 'udev/66-azure-ephemeral.rules', + install_dir: udev_dir / 'rules.d', + install_mode: 'rw-r--r--', + install_tag: 'openrc', + ) + + install_data( + run_command(find, 'sysvinit/openrc', '-type', 'f', check: true).stdout().strip().split('\n'), + install_dir: '/etc/init.d', + install_mode: 'rwxr-xr-x', + install_tag: 'openrc', + ) + + install_data( + [ + 'tools/cloud-init-hotplugd', + ], + install_dir: lib_exec_dir, + install_mode: 'rwxr-xr-x', + install_tag: 'bin', + ) +endif + +custom_target( + input: 'config/cloud.cfg.tmpl', + output: 'cloud.cfg', + command: [ + render_tmpl, + '--is-yaml', '@INPUT@', + meson.current_build_dir() / '@OUTPUT@', + ], + install: true, + install_dir: sysconfdir / 'cloud', + install_mode: 'rw-r--r--', + install_tag: 'config', +) + +# Docs and Examples: +install_man(['doc/man/cloud-init.1', 'doc/man/cloud-id.1', 'doc/man/cloud-init-per.1']) +examples = run_command(find, 'doc/examples', '-name', '*.txt', '-o', '-name', '*.yaml', check: true) +install_data( + examples.stdout().strip().split('\n'), + install_dir: get_option('datadir') / 'doc' / 'cloud-init' / 'examples', + install_mode: 'rw-r--r--', + install_tag: 'doc', +) +install_data( + [ + 'doc/examples/seed/README', + 'doc/examples/seed/meta-data', + 'doc/examples/seed/user-data', + ], + install_dir: get_option('datadir') / 'doc' / 'cloud-init' / 'examples' / 'seed', + install_mode: 'rw-r--r--', + install_tag: 'doc', +) + +# Create expected packaging directory structure +meson.add_install_script('sh', '-c', 'mkdir -p ${DESTDIR}/' + sysconfdir + '/cloud/clean.d') + +# meson setup builddir -Dinit_system=systemd +# meson test -C builddir -v +test( + 'py3', + python, + args: ['-m', 'pytest', meson.project_source_root() / 'tests' / 'unittests'], + timeout: 0, +) diff --git a/meson_options.txt b/meson_options.txt new file mode 100644 index 00000000..13de1f8c --- /dev/null +++ b/meson_options.txt @@ -0,0 +1,4 @@ +option('init_system', type: 'string', value: 'systemd', description: 'Set target init system.') +option('distro_templates', type: 'array', value: [], description: 'Distro template files to install. WARNING: Templates may change in the future. If using this option, be sure to check new releases for template file changes.') +option('disable_sshd_keygen', type: 'boolean', value: false, description: 'Provide systemd service to disable sshd-keygen if present to avoid races with cloud-init.') +option('bash_completion', type: 'boolean', value: true, description: 'Bash completion for cloud-init.') diff --git a/packages/bddeb b/packages/bddeb index af9fbf89..3b2258b4 100755 --- a/packages/bddeb +++ b/packages/bddeb @@ -102,7 +102,13 @@ def write_debian_folder(root, templ_data, cloud_util_deps): reqs = reqs_output.splitlines() test_reqs = run_helper( "read-dependencies", - ["--requirements-file", "test-requirements.txt", "--system-pkg-names"], + [ + "--distro", + "debian", + "--requirements-file", + "test-requirements.txt", + "--system-pkg-names", + ], ).splitlines() requires = ["cloud-utils | cloud-guest-utils"] if cloud_util_deps else [] @@ -300,7 +306,7 @@ def main(): "release_suffix": get_release_suffix(args.release), } - with temp_utils.tempdir() as tdir: + with temp_utils.tempdir(needs_exe=True) as tdir: # output like 0.7.6-1022-g36e92d3 ver_data = read_version() diff --git a/packages/brpm b/packages/brpm index e942dbae..b942aa89 100755 --- a/packages/brpm +++ b/packages/brpm @@ -16,7 +16,7 @@ def find_root(): top_dir = os.path.dirname( os.path.dirname(os.path.abspath(sys.argv[0])) ) - if os.path.isfile(os.path.join(top_dir, "setup.py")): + if os.path.isfile(os.path.join(top_dir, "meson.build")): return os.path.abspath(top_dir) raise OSError( ( diff --git a/packages/debian/control.in b/packages/debian/control.in index 87403fe9..2ce4fe2d 100644 --- a/packages/debian/control.in +++ b/packages/debian/control.in @@ -2,20 +2,86 @@ Source: cloud-init Section: admin Priority: optional -Maintainer: Scott Moser +Homepage: https://cloud-init.io/ +Maintainer: Ubuntu Developers Build-Depends: ${build_depends} XS-Python-Version: all -Standards-Version: 3.9.6 +Vcs-Browser: https://github.com/canonical/cloud-init/tree/main +Vcs-Git: https://github.com/canonical/cloud-init -b main +Standards-Version: 4.5.0 +Rules-Requires-Root: no -Package: cloud-init +Package: cloud-init-base Architecture: all -Depends: ${misc:Depends}, - ${python3:Depends}, +Depends: cloud-guest-utils | cloud-utils, + dhcpcd-base, iproute2, - python3-debconf -Breaks: cloud-init-base -Recommends: eatmydata, sudo, software-properties-common, gdisk -Suggests: ssh-import-id, openssh-server -Description: Init scripts for cloud instances - Cloud instances need special scripts to run during initialization - to retrieve and install ssh keys and to let the user run various scripts. + netcat-openbsd, + netplan.io, + procps, + python3, + python3-debconf, + python3-requests, + ${misc:Depends}, + ${python3:Depends} +Recommends: eatmydata, gdisk, gnupg, python3-apt, software-properties-common +Suggests: openssh-server, ssh-import-id +Replaces: cloud-init (<< 25.1~), cloud-init-base +Breaks: cloud-init (<< 25.1~), cloud-init-base +Description: initialization and customization tool for cloud instances + Cloud-init with minimal dependencies, refer to cloud-init for more + information. + +Package: cloud-init-azure +Architecture: all +Depends: cloud-init-base, + python3-passlib, + ${misc:Depends}, +Description: Azure specific cloud-init + This metapackage depends on cloud-init-base and additional packages for + Azure. + +Package: cloud-init-cloud-sigma +Architecture: all +Depends: cloud-init-base, + python3-serial, + ${misc:Depends}, +Description: Cloud Sigma specific cloud-init + This metapackage depends on cloud-init-base and additional packages for + Cloud Sigma. + +Package: cloud-init-smart-os +Architecture: all +Depends: cloud-init-base, + python3-serial, + ${misc:Depends}, +Description: Smart OS specific cloud-init + This metapackage depends on cloud-init-base and additional packages for + Smart OS. + +Package: cloud-init +Architecture: all +Depends: cloud-init-base, + python3-serial, + python3-passlib, + ${misc:Depends}, +Description: initialization and customization tool for cloud instances + Cloud-init is the industry standard multi-distribution method for + cross-platform cloud instance initialization. It is supported across all major + public cloud providers, provisioning systems for private cloud infrastructure, + and bare-metal installations. + . + Cloud instances are initialized from a disk image and instance data: + . + * Cloud metadata + * User data (optional) + * Vendor data (optional) + . + Cloud-init will identify the cloud it is running on during boot, read any + provided metadata from the cloud and initialize the system accordingly. This + may involve setting up the network and storage devices to configuring SSH + access key and many other aspects of a system. Later on the cloud-init will + also parse and process any optional user or vendor data that was passed to + the instance. + . + This is a metapackage that includes every cloud-init dependency. diff --git a/packages/debian/rules b/packages/debian/rules index b9b8eaff..4587caf4 100755 --- a/packages/debian/rules +++ b/packages/debian/rules @@ -19,8 +19,8 @@ override_dh_installsystemd: dh_installsystemd --no-restart-on-upgrade --no-start override_dh_auto_install: - dh_auto_install --destdir=debian/cloud-init - install -D -m 0644 ./tools/21-cloudinit.conf debian/cloud-init/etc/rsyslog.d/21-cloudinit.conf - install -D ./tools/Z99-cloud-locale-test.sh debian/cloud-init/etc/profile.d/Z99-cloud-locale-test.sh - install -D ./tools/Z99-cloudinit-warnings.sh debian/cloud-init/etc/profile.d/Z99-cloudinit-warnings.sh + dh_auto_install --destdir=debian/cloud-init-base + install -D -m 0644 ./tools/21-cloudinit.conf debian/cloud-init-base/etc/rsyslog.d/21-cloudinit.conf + install -D ./tools/Z99-cloud-locale-test.sh debian/cloud-init-base/etc/profile.d/Z99-cloud-locale-test.sh + install -D ./tools/Z99-cloudinit-warnings.sh debian/cloud-init-base/etc/profile.d/Z99-cloudinit-warnings.sh flist=$$(find $(CURDIR)/debian/ -type f -name version.py) && sed -i 's,@@PACKAGED_VERSION@@,$(DEB_VERSION),' $${flist:-did-not-find-version-py-for-replacement} diff --git a/packages/pkg-deps.json b/packages/pkg-deps.json index 4ee0982a..caacc83f 100644 --- a/packages/pkg-deps.json +++ b/packages/pkg-deps.json @@ -43,7 +43,30 @@ }, "redhat" : { "build-requires" : [ - "python3-devel" + "bash-completion", + "meson", + "pkgconf", + "python3-devel", + "systemd-devel" + ], + "requires" : [ + "e2fsprogs", + "iproute", + "net-tools", + "procps", + "rsyslog", + "shadow-utils", + "sudo", + "hostname" + ] + }, + "fedora": { + "build-requires" : [ + "bash-completion-devel", + "meson", + "pkgconf", + "python3-devel", + "systemd-devel" ], "requires" : [ "e2fsprogs", @@ -62,10 +85,12 @@ "pyyaml" : "python3-PyYAML" }, "build-requires" : [ + "meson", + "pkgconf", + "bash-completion-devel", "fdupes", "filesystem", - "python3-devel", - "python3-setuptools" + "python3-devel" ], "requires" : [ "iproute2", diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in index a09e87d2..7d91d2d8 100644 --- a/packages/redhat/cloud-init.spec.in +++ b/packages/redhat/cloud-init.spec.in @@ -1,11 +1,4 @@ ## template: jinja -%define use_systemd (0%{?fedora} && 0%{?fedora} >= 18) || (0%{?rhel} && 0%{?rhel} >= 7) - -%if %{use_systemd} -%define init_system systemd -%else -%define init_system sysvinit -%endif # See: http://www.zarb.org/~jasonc/macros.php # Or: http://fedoraproject.org/wiki/Packaging:ScriptletSnippets @@ -14,30 +7,20 @@ Name: cloud-init Version: {{rpm_upstream_version}} Release: 1{{subrelease}}%{?dist} -Summary: Cloud instance init scripts +Summary: Cloud instance initialization tool Group: System Environment/Base License: Dual-licesed GPLv3 or Apache 2.0 -URL: http://launchpad.net/cloud-init +URL: https://github.com/canonical/cloud-init Source0: {{archive_name}} BuildArch: noarch BuildRoot: %{_tmppath} -%if "%{?el6}" == "1" -BuildRequires: python-argparse -%endif -%if %{use_systemd} -Requires: systemd -BuildRequires: systemd -Requires: systemd-units -BuildRequires: systemd-units -%else -Requires: initscripts >= 8.36 -Requires(postun): initscripts -Requires(post): chkconfig -Requires(preun): chkconfig -%endif +Requires: systemd +BuildRequires: pkgconfig(systemd) +Requires: systemd-units +BuildRequires: systemd-units {% for r in buildrequires %} BuildRequires: {{r}} @@ -61,16 +44,9 @@ Requires: {{r}} Patch{{loop.index0}}: {{p}} {% endfor %} -%if "%{init_system}" == "systemd" Requires(post): systemd Requires(preun): systemd Requires(postun): systemd -%else -Requires(post): chkconfig -Requires(postun): initscripts -Requires(preun): chkconfig -Requires(preun): initscripts -%endif %description Cloud-init is a set of init scripts for cloud instances. Cloud instances @@ -86,13 +62,11 @@ ssh keys and to let the user run various scripts. {% endfor %} %build -%{__python3} setup.py build +%meson -Dinit_system=systemd -Ddistro_templates=chef_client.rb.tmpl,chrony.conf.rhel.tmpl,hosts.redhat.tmpl,ntp.conf.rhel.tmpl,resolv.conf.tmpl,timesyncd.conf.tmpl +%meson_build %install - -%{__python3} setup.py install -O1 \ - --skip-build --root $RPM_BUILD_ROOT \ - --init-system=%{init_system} +%meson_install # Note that /etc/rsyslog.d didn't exist by default until F15. # el6 request: https://bugzilla.redhat.com/show_bug.cgi?id=740420 @@ -100,9 +74,6 @@ mkdir -p $RPM_BUILD_ROOT/%{_sysconfdir}/rsyslog.d cp -p tools/21-cloudinit.conf \ $RPM_BUILD_ROOT/%{_sysconfdir}/rsyslog.d/21-cloudinit.conf -# Remove the tests -rm -rf $RPM_BUILD_ROOT%{python3_sitelib}/tests - # Required dirs... mkdir -p $RPM_BUILD_ROOT/%{_sharedstatedir}/cloud mkdir -p $RPM_BUILD_ROOT/%{_libexecdir}/%{name} @@ -118,66 +89,23 @@ version_pys=$(cd "$RPM_BUILD_ROOT" && find . -name version.py -type f) rm -rf $RPM_BUILD_ROOT %post +%systemd_post cloud-init-main.service cloud-config.service cloud-config.target cloud-final.service cloud-init-network.service cloud-init.target cloud-init-local.service -%if "%{init_system}" == "systemd" -if [ $1 -eq 1 ] -then - /bin/systemctl enable cloud-config.service >/dev/null 2>&1 || : - /bin/systemctl enable cloud-final.service >/dev/null 2>&1 || : - /bin/systemctl enable cloud-init-network.service >/dev/null 2>&1 || : - /bin/systemctl enable cloud-init-local.service >/dev/null 2>&1 || : -fi -%else -/sbin/chkconfig --add %{_initrddir}/cloud-init-local -/sbin/chkconfig --add %{_initrddir}/cloud-init -/sbin/chkconfig --add %{_initrddir}/cloud-config -/sbin/chkconfig --add %{_initrddir}/cloud-final -%endif %preun +%systemd_preun cloud-init-main.service cloud-config.service cloud-config.target cloud-final.service cloud-init-network.service cloud-init.target cloud-init-local.service -%if "%{init_system}" == "systemd" -if [ $1 -eq 0 ] -then - /bin/systemctl --no-reload disable cloud-config.service >/dev/null 2>&1 || : - /bin/systemctl --no-reload disable cloud-final.service >/dev/null 2>&1 || : - /bin/systemctl --no-reload disable cloud-init-network.service >/dev/null 2>&1 || : - /bin/systemctl --no-reload disable cloud-init-local.service >/dev/null 2>&1 || : -fi -%else -if [ $1 -eq 0 ] -then - /sbin/service cloud-init stop >/dev/null 2>&1 || : - /sbin/chkconfig --del cloud-init || : - /sbin/service cloud-init-local stop >/dev/null 2>&1 || : - /sbin/chkconfig --del cloud-init-local || : - /sbin/service cloud-config stop >/dev/null 2>&1 || : - /sbin/chkconfig --del cloud-config || : - /sbin/service cloud-final stop >/dev/null 2>&1 || : - /sbin/chkconfig --del cloud-final || : -fi -%endif %postun - -%if "%{init_system}" == "systemd" -/bin/systemctl daemon-reload >/dev/null 2>&1 || : -%endif +%systemd_postun cloud-init-main.service cloud-config.service cloud-config.target cloud-final.service cloud-init-network.service cloud-init.target cloud-init-local.service %files %{_udevrulesdir}/66-azure-ephemeral.rules -%if "%{init_system}" == "systemd" /usr/lib/systemd/system-generators/cloud-init-generator /usr/lib/systemd/system/sshd-keygen@.service.d/disable-sshd-keygen-if-cloud-init-active.conf %{_unitdir}/cloud-* -%else -%attr(0755, root, root) %{_initddir}/cloud-config -%attr(0755, root, root) %{_initddir}/cloud-final -%attr(0755, root, root) %{_initddir}/cloud-init-local -%attr(0755, root, root) %{_initddir}/cloud-init -%endif # Program binaries %{_bindir}/cloud-init* @@ -189,7 +117,6 @@ fi # Configs %config(noreplace) %{_sysconfdir}/cloud/cloud.cfg -%dir %{_sysconfdir}/cloud/clean.d %dir %{_sysconfdir}/cloud/cloud.cfg.d %config(noreplace) %{_sysconfdir}/cloud/cloud.cfg.d/*.cfg %config(noreplace) %{_sysconfdir}/cloud/cloud.cfg.d/README @@ -198,8 +125,13 @@ fi %config(noreplace) %{_sysconfdir}/rsyslog.d/21-cloudinit.conf # Bash completion script +%dir %{_datadir}/bash-completion/completions %{_datadir}/bash-completion/completions/cloud-init +# Man pages +%dir %{_mandir}/man1 +%{_mandir}/man1/*.gz + %{_libexecdir}/%{name} %dir %{_sharedstatedir}/cloud diff --git a/packages/suse/cloud-init.spec.in b/packages/suse/cloud-init.spec.in index fae3c12b..1e063243 100644 --- a/packages/suse/cloud-init.spec.in +++ b/packages/suse/cloud-init.spec.in @@ -48,25 +48,15 @@ end for {% endfor %} %build -%{__python} setup.py build +%meson -Dinit_system=systemd --libexecdir=/usr/lib -Ddistro_templates=chef_client.rb.tmpl,chrony.conf.opensuse.tmpl,hosts.suse.tmpl,ntp.conf.opensuse.tmpl,resolv.conf.tmpl,timesyncd.conf.tmpl +%meson_build %install -%{__python} setup.py install \ - --skip-build --root=%{buildroot} --prefix=%{_prefix} \ - --record-rpm=INSTALLED_FILES --install-lib=%{python_sitelib} \ - --init-system=systemd - -# Move udev rules -mkdir -p %{buildroot}/usr/lib/udev/rules.d/ -mv %{buildroot}/lib/udev/rules.d/* %{buildroot}/usr/lib/udev/rules.d/ - -# Remove non-SUSE templates -rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.debian.* -rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.redhat.* -rm %{buildroot}/%{_sysconfdir}/cloud/templates/*.ubuntu.* +%meson_install # Move documentation mkdir -p %{buildroot}/%{_defaultdocdir} +mkdir -p %{buildroot}/%{_sysconfdir}/cloud/clean.d mv %{buildroot}/usr/share/doc/cloud-init %{buildroot}/%{_defaultdocdir} for doc in LICENSE ChangeLog requirements.txt; do cp ${doc} %{buildroot}/%{_defaultdocdir}/cloud-init @@ -102,6 +92,7 @@ version_pys=$(cd "%{buildroot}" && find . -name version.py -type f) # There doesn't seem to be an agreed upon place for these # although it appears the standard says /usr/lib but rpmbuild # will try /usr/lib64 ?? +/usr/lib/%{name}/hook-hotplug /usr/lib/%{name}/uncloud-init /usr/lib/%{name}/write-ssh-key-fingerprints /usr/lib/%{name}/ds-identify @@ -114,7 +105,7 @@ version_pys=$(cd "%{buildroot}" && find . -name version.py -type f) %doc %{_defaultdocdir}/cloud-init/* # Configs -%dir %{_sysconfdir}/cloud/clean.d +%dir %{_sysconfdir}/cloud/clean.d %config(noreplace) %{_sysconfdir}/cloud/cloud.cfg %dir %{_sysconfdir}/cloud/cloud.cfg.d %config(noreplace) %{_sysconfdir}/cloud/cloud.cfg.d/*.cfg @@ -124,8 +115,7 @@ version_pys=$(cd "%{buildroot}" && find . -name version.py -type f) # Bash completion script %{_datadir}/bash-completion/completions/cloud-init - -%{_sysconfdir}/systemd/system/sshd-keygen@.service.d/disable-sshd-keygen-if-cloud-init-active.conf +/usr/share/man/man1/cloud-*gz # Python code is here... %{python_sitelib}/* diff --git a/pyproject.toml b/pyproject.toml index 14315477..e4ba441d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,7 +42,6 @@ no_implicit_optional = true # See GH-5445 [[tool.mypy.overrides]] module = [ - "cloudinit.analyze.show", "cloudinit.cmd.devel.make_mime", "cloudinit.cmd.devel.net_convert", "cloudinit.cmd.main", @@ -199,7 +198,7 @@ module = [ check_untyped_defs = false [tool.ruff] -target-version = "py37" +target-version = "py38" line-length = 79 # E, W, and F make up the entirety of default flake8 lint.select = [ diff --git a/scripts/cloud-id b/scripts/cloud-id new file mode 100644 index 00000000..553a7f0e --- /dev/null +++ b/scripts/cloud-id @@ -0,0 +1,9 @@ +#!/usr/bin/env python3 + +# Python wrapper for cloud-id command + +import sys + +from cloudinit.cmd import cloud_id + +sys.exit(cloud_id.main()) diff --git a/scripts/cloud-init b/scripts/cloud-init new file mode 100644 index 00000000..3d3a7306 --- /dev/null +++ b/scripts/cloud-init @@ -0,0 +1,9 @@ +#!/usr/bin/env python3 + +# Python wrapper for cloud-init CLI + +import sys + +from cloudinit.cmd import main + +sys.exit(main.main()) diff --git a/systemd/cloud-init-local.service.tmpl b/systemd/cloud-init-local.service.tmpl index 862664d1..3e790289 100644 --- a/systemd/cloud-init-local.service.tmpl +++ b/systemd/cloud-init-local.service.tmpl @@ -8,7 +8,6 @@ DefaultDependencies=no Wants=network-pre.target After=hv_kvp_daemon.service After=systemd-remount-fs.service -Before=auditd.service Before=network-pre.target Before=shutdown.target {% if variant in ["almalinux", "cloudlinux", "rhel"] %} diff --git a/sysvinit/openrc/cloud-init-ds-identify b/sysvinit/openrc/cloud-init-ds-identify index eeca5659..41f5435f 100755 --- a/sysvinit/openrc/cloud-init-ds-identify +++ b/sysvinit/openrc/cloud-init-ds-identify @@ -15,7 +15,7 @@ start() { ewarn "$RC_SVCNAME is disabled via cloud-init.disabled file" else ebegin "$description" - /usr/lib/cloud-init/ds-identify + /usr/libexec/cloud-init/ds-identify eend $? fi } diff --git a/sysvinit/openrc/cloud-init-hotplug b/sysvinit/openrc/cloud-init-hotplug index a45720ec..cd37b3fb 100755 --- a/sysvinit/openrc/cloud-init-hotplug +++ b/sysvinit/openrc/cloud-init-hotplug @@ -2,7 +2,7 @@ description="cloud-init hotplug daemon" -command="/usr/lib/cloud-init/cloud-init-hotplugd" +command="/usr/libexec/cloud-init/cloud-init-hotplugd" pidfile="/run/$RC_SVCNAME.pid" depend() { diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py index a93a2d2d..e72213a1 100644 --- a/tests/integration_tests/clouds.py +++ b/tests/integration_tests/clouds.py @@ -334,6 +334,7 @@ def _mount_source(instance: LXDInstance): ).format(**format_variables) subp(command.split()) + # pylint: disable=assignment-from-none def _perform_launch( self, *, diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index 194cda58..5a391b6a 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -145,11 +145,11 @@ def session_cloud( image_types = [member.value for member in ImageType.__members__.values()] try: image_type = ImageType(integration_settings.OS_IMAGE_TYPE) - except ValueError: + except ValueError as e: raise ValueError( f"{integration_settings.OS_IMAGE_TYPE} is an invalid OS_IMAGE_TYPE" f" specified in settings. Must be one of {image_types}" - ) + ) from e cloud: IntegrationCloud = platforms[integration_settings.PLATFORM]( reaper=reaper, image_type=image_type diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py index 8268a775..2dc16f80 100644 --- a/tests/integration_tests/instances.py +++ b/tests/integration_tests/instances.py @@ -215,6 +215,7 @@ def install_proposed_image(self, pkg: str): '$(lsb_release -sc)-proposed main" >> ' "/etc/apt/sources.list.d/proposed.list" ).ok + assert self.execute("apt-get update").ok assert self.execute( f"apt-get install -qy {pkg} -t=$(lsb_release -sc)-proposed" ).ok diff --git a/tests/integration_tests/modules/test_apt_functionality.py b/tests/integration_tests/modules/test_apt_functionality.py index dd068d04..c7878cdd 100644 --- a/tests/integration_tests/modules/test_apt_functionality.py +++ b/tests/integration_tests/modules/test_apt_functionality.py @@ -14,7 +14,12 @@ KEEP_INSTANCE, PLATFORM, ) -from tests.integration_tests.releases import CURRENT_RELEASE, IS_UBUNTU, MANTIC +from tests.integration_tests.releases import ( + CURRENT_RELEASE, + IS_UBUNTU, + MANTIC, + QUESTING, +) from tests.integration_tests.util import ( get_feature_flag_value, verify_clean_boot, @@ -544,6 +549,10 @@ def _do_oci_customization(cloud_config: str): @pytest.mark.skipif(not IS_UBUNTU, reason="Apt usage") +@pytest.mark.skipif( + CURRENT_RELEASE == QUESTING, + reason="Trying to remove gpg on Questing makes apt unhappy", +) def test_install_missing_deps(session_cloud: IntegrationCloud): """ Test the installation of missing dependencies using apt on an Ubuntu diff --git a/tests/integration_tests/releases.py b/tests/integration_tests/releases.py index 9f6c8ae7..3c3c1fd3 100644 --- a/tests/integration_tests/releases.py +++ b/tests/integration_tests/releases.py @@ -99,6 +99,7 @@ def from_os_image( NOBLE = Release("ubuntu", "noble", "24.04") ORACULAR = Release("ubuntu", "oracular", "24.10") PLUCKY = Release("ubuntu", "plucky", "25.04") +QUESTING = Release("ubuntu", "questing", "25.10") UBUNTU_STABLE = (FOCAL, JAMMY, MANTIC, NOBLE) diff --git a/tests/unittests/analyze/test_boot.py b/tests/unittests/analyze/test_boot.py index 18911c96..ffe147e5 100644 --- a/tests/unittests/analyze/test_boot.py +++ b/tests/unittests/analyze/test_boot.py @@ -24,7 +24,7 @@ def test_blank_distro(self, m_subp): @mock.patch("cloudinit.subp.subp") @mock.patch("cloudinit.util.is_FreeBSD", return_value=True) def test_freebsd_gentoo_cant_find(self, m_is_FreeBSD, m_subp): - err_code == dist_check_timestamp() + assert err_code == dist_check_timestamp() @mock.patch("cloudinit.subp.subp", return_value=(0, 1)) def test_subp_fails(self, m_subp): diff --git a/tests/unittests/cmd/test_clean.py b/tests/unittests/cmd/test_clean.py index 2f2b6c04..9b7f6824 100644 --- a/tests/unittests/cmd/test_clean.py +++ b/tests/unittests/cmd/test_clean.py @@ -330,11 +330,14 @@ def ds_fetch(): return ds init_class.fetch = ds_fetch - retcode = clean.remove_artifacts( - init_class, - remove_logs=False, - remove_config=["datasource"], - ) + with mock.patch( + "cloudinit.cmd.clean.settings.CLEAN_RUNPARTS_DIR", os.devnull + ): + retcode = clean.remove_artifacts( + init_class, + remove_logs=False, + remove_config=["datasource"], + ) assert ds_conf.exists() is False, f"Unexpected file {ds_conf}" assert 0 == retcode diff --git a/tests/unittests/config/test_cc_resizefs.py b/tests/unittests/config/test_cc_resizefs.py index 612f76f7..937df181 100644 --- a/tests/unittests/config/test_cc_resizefs.py +++ b/tests/unittests/config/test_cc_resizefs.py @@ -36,9 +36,6 @@ class TestResizefs: - def setUp(self): - super(TestResizefs, self).setUp() - self.name = "resizefs" @mock.patch("cloudinit.subp.subp") def test_skip_ufs_resize(self, m_subp): diff --git a/tests/unittests/config/test_cc_rh_subscription.py b/tests/unittests/config/test_cc_rh_subscription.py index 0f467617..a9b95072 100644 --- a/tests/unittests/config/test_cc_rh_subscription.py +++ b/tests/unittests/config/test_cc_rh_subscription.py @@ -14,7 +14,11 @@ get_schema, validate_cloudconfig_schema, ) -from tests.unittests.helpers import mock, skipUnlessJsonSchema +from tests.unittests.helpers import ( + mock, + skipUnlessJsonSchema, + skipUnlessJsonSchemaVersionGreaterThan, +) SUBMGR = cc_rh_subscription.SubscriptionManager SUB_MAN_CLI = "cloudinit.config.cc_rh_subscription._sub_man_cli" @@ -35,11 +39,12 @@ class TestHappyPath: "rh_subscription": { "username": "scooby@do.com", "password": "scooby-snacks", - "auto-attach": True, - "service-level": "self-support", - "add-pool": ["pool1", "pool2", "pool3"], - "enable-repo": ["repo1", "repo2", "repo3"], - "disable-repo": ["repo4", "repo5"], + "auto_attach": True, + "service_level": "self-support", + "add_pool": ["pool1", "pool2", "pool3"], + "enable_repo": ["repo1", "repo2", "repo3"], + "disable_repo": ["repo4", "repo5"], + "release_version": "7.6b", } } @@ -52,7 +57,12 @@ def test_already_registered(self, m_sman_cli, caplog): assert m_sman_cli.call_count == 1 assert "System is already registered" in caplog.text - def test_simple_registration(self, m_sman_cli, caplog): + @mock.patch.object( + cc_rh_subscription.SubscriptionManager, "_set_release_version" + ) + def test_simple_registration( + self, m_set_release_version, m_sman_cli, caplog + ): """ Simple registration with username and password """ @@ -76,13 +86,28 @@ def test_simple_registration(self, m_sman_cli, caplog): ) assert "rh_subscription plugin completed successfully" in caplog.text assert m_sman_cli.call_count == 2 + assert m_set_release_version.call_count == 0 + @pytest.mark.parametrize( + "variable_name_separator", + ( + pytest.param("_", id="update_repos_disable_with_none"), + pytest.param( + "-", id="same_functional_behavior_with_deprecated_keys" + ), + ), + ) @mock.patch.object(cc_rh_subscription.SubscriptionManager, "_getRepos") - def test_update_repos_disable_with_none(self, m_get_repos, m_sman_cli): + def test_update_repos_disable_with_none( + self, m_get_repos, m_sman_cli, variable_name_separator + ): cfg = copy.deepcopy(self.CONFIG) m_get_repos.return_value = ([], ["repo1"]) + + enable_repo_key = "enable_repo".replace("_", variable_name_separator) + disable_repo_key = "disable_repo".replace("_", variable_name_separator) cfg["rh_subscription"].update( - {"enable-repo": ["repo1"], "disable-repo": None} + {enable_repo_key: ["repo1"], disable_repo_key: None} ) mysm = cc_rh_subscription.SubscriptionManager(cfg) assert True is mysm.update_repos() @@ -93,8 +118,8 @@ def test_update_repos_disable_with_none(self, m_get_repos, m_sman_cli): def test_full_registration(self, m_sman_cli, caplog): """ - Registration with auto-attach, service-level, adding pools, - and enabling and disabling yum repos + Registration with auto_attach, service_level, adding pools, + enabling and disabling yum repos and setting release_version """ call_lists = [] call_lists.append(["attach", "--pool=pool1", "--pool=pool3"]) @@ -102,6 +127,7 @@ def test_full_registration(self, m_sman_cli, caplog): ["repos", "--disable=repo5", "--enable=repo2", "--enable=repo3"] ) call_lists.append(["attach", "--auto", "--servicelevel=self-support"]) + call_lists.append(["release", "--set=7.6b"]) reg = ( "The system has been registered with ID:" " 12345678-abde-abcde-1234-1234567890abc" @@ -116,14 +142,37 @@ def test_full_registration(self, m_sman_cli, caplog): ("Repo ID: repo1\nRepo ID: repo5\n", ""), ("Repo ID: repo2\nRepo ID: repo3\nRepo ID: repo4", ""), ("", ""), + ("Release set to: 7.6b", ""), ] + # to avoid deleting the actual cache files + # (triggered by the presence of the release_version key) + # on the host running the tests + mock.patch("shutil.rmtree") + cc_rh_subscription.handle(NAME, self.CONFIG_FULL, None, []) - assert m_sman_cli.call_count == 9 + assert m_sman_cli.call_count == 10 for call in call_lists: assert mock.call(call) in m_sman_cli.call_args_list assert "rh_subscription plugin completed successfully" in caplog.text +CONFIG_BADAUTOATTACH = { + "rh_subscription": { + "username": "scooby@do.com", + "password": "scooby-snacks", + "auto_attach": 3, + } +} + +CONFIG_SERVICE = { + "rh_subscription": { + "username": "scooby@do.com", + "password": "scooby-snacks", + "service_level": "self-support", + } +} + + @mock.patch(SUB_MAN_CLI) class TestBadInput: SM = cc_rh_subscription.SubscriptionManager @@ -136,15 +185,7 @@ class TestBadInput: CONFIG_NO_KEY = { "rh_subscription": { - "activation-key": "1234abcde", - } - } - - CONFIG_SERVICE = { - "rh_subscription": { - "username": "scooby@do.com", - "password": "scooby-snacks", - "service-level": "self-support", + "activation_key": "1234abcde", } } @@ -152,21 +193,21 @@ class TestBadInput: "rh_subscription": { "username": "scooby@do.com", "password": "scooby-snacks", - "add-pool": "not_a_list", + "add_pool": "not_a_list", } } CONFIG_BADREPO = { "rh_subscription": { "username": "scooby@do.com", "password": "scooby-snacks", - "enable-repo": "not_a_list", + "enable_repo": "not_a_list", } } - CONFIG_BADKEY = { + CONFIG_BAD_RELEASE_VERSION = { "rh_subscription": { - "activation-key": "abcdef1234", - "fookey": "bar", - "org": "ABC", + "username": "scooby@do.com", + "password": "scooby-snacks", + "release_version": "bad_release_version", } } @@ -203,20 +244,36 @@ def test_no_org(self, m_sman_cli, caplog): caplog, ) - def test_service_level_without_auto(self, m_sman_cli, caplog): - """Attempt to register using service-level without auto-attach key.""" + @pytest.mark.parametrize( + "auto_attach_cfg,warnings", + ( + ( + CONFIG_SERVICE, + [ + "The service_level key must be used in conjunction with" + " the auto_attach key. Please re-run with" + " auto_attach: True", + "rh_subscription plugin did not complete successfully", + ], + ), + ( + CONFIG_BADAUTOATTACH, + ["The key auto_attach must be a boolean value (True/False)"], + ), + ), + ) + def test_service_level_without_auto( + self, m_sman_cli, auto_attach_cfg, warnings, caplog + ): + """Attempt to register using service_level without auto_attach key.""" m_sman_cli.side_effect = [ subp.ProcessExecutionError, (self.REG, "bar"), ] - cc_rh_subscription.handle(NAME, self.CONFIG_SERVICE, None, []) + cc_rh_subscription.handle(NAME, auto_attach_cfg, None, []) assert m_sman_cli.call_count == 1 self.assert_logged_warnings( - ( - "The service-level key must be used in conjunction with the" - " auto-attach key. Please re-run with auto-attach: True", - "rh_subscription plugin did not complete successfully", - ), + warnings, caplog, ) @@ -257,22 +314,59 @@ def test_repo_not_a_list(self, m_sman_cli, caplog): caplog, ) - def test_bad_key_value(self, m_sman_cli, caplog): + @mock.patch.object( + cc_rh_subscription.SubscriptionManager, "_delete_packagemanager_cache" + ) + def test_bad_release_version(self, m_delete_pm_cache, m_sman_cli, caplog): """ - Attempt to register with a key that we don't know + Failure at setting release_version """ m_sman_cli.side_effect = [ subp.ProcessExecutionError, (self.REG, "bar"), + subp.ProcessExecutionError, ] - cc_rh_subscription.handle(NAME, self.CONFIG_BADKEY, None, []) - assert m_sman_cli.call_count == 1 + cc_rh_subscription.handle( + NAME, self.CONFIG_BAD_RELEASE_VERSION, None, [] + ) + assert m_sman_cli.call_count == 3 + assert m_delete_pm_cache.call_count == 0 + expected_cmd = [ + "release", + f"--set={self.CONFIG_BAD_RELEASE_VERSION['rh_subscription']['release_version']}", + ] + self.assert_logged_warnings( + ( + f"Unable to set release_version using: {expected_cmd}", + "rh_subscription plugin did not complete successfully", + ), + caplog, + ) + + @mock.patch("shutil.rmtree", side_effect=[PermissionError]) + def test_pm_cache_deletion_after_setting_release_version( + self, m_rmtree, m_sman_cli, caplog + ): + """ + Failure at deleting package manager cache + after setting release_version + """ + good_release_ver_cfg = copy.deepcopy(self.CONFIG_BAD_RELEASE_VERSION) + good_release_ver_cfg["rh_subscription"][ + "release_version" + ] = "1.2Server" + m_sman_cli.side_effect = [ + subp.ProcessExecutionError, + (self.REG, "bar"), + ("Release set to: 1.2Server", ""), + ] + cc_rh_subscription.handle(NAME, good_release_ver_cfg, None, []) + # assert "rh_subscription plugin completed successfully" in caplog.text + assert m_sman_cli.call_count == 3 + assert m_rmtree.call_args_list == [mock.call("/var/cache/dnf")] self.assert_logged_warnings( ( - "fookey is not a valid key for rh_subscription. Valid keys" - " are: org, activation-key, username, password, disable-repo," - " enable-repo, add-pool, rhsm-baseurl, server-hostname," - " auto-attach, service-level", + "Unable to delete the package manager cache", "rh_subscription plugin did not complete successfully", ), caplog, @@ -292,24 +386,47 @@ class TestRhSubscriptionSchema: "1 is not of type 'string'", ), ( - {"rh_subscription": {"enable-repo": "name"}}, + {"rh_subscription": {"add_pool": [1]}}, + "1 is not of type 'string'", + ), + ( + {"rh_subscription": {"add_pool": ["1"]}}, + None, + ), + # The json schema error message is not descriptive + # but basically we need to confirm the schema will fail + # the config validation when both add_pool and the deprecated + # add-pool are added + ( + {"rh_subscription": {"add_pool": ["1"], "add-pool": ["2"]}}, + r"({'add_pool': \['1'\], 'add-pool': \['2'\]} should not be" + r" valid under {'required': \['add_pool', 'add-pool'\]}|" + r"{'required': \['add_pool', 'add-pool'\]} is not allowed" + r" for {'add_pool': \['1'\], 'add-pool': \['2'\]})", + ), + ( + {"rh_subscription": {"enable_repo": "name"}}, "'name' is not of type 'array'", ), ( - {"rh_subscription": {"disable-repo": "name"}}, + {"rh_subscription": {"disable_repo": "name"}}, "'name' is not of type 'array'", ), + ( + {"rh_subscription": {"release_version": [10]}}, + r"\[10\] is not of type 'string'", + ), ( { "rh_subscription": { - "activation-key": "foobar", + "activation_key": "foobar", "org": "ABC", } }, None, ), ( - {"rh_subscription": {"activation-key": "foobar", "org": 314}}, + {"rh_subscription": {"activation_key": "foobar", "org": 314}}, "Deprecated in version 24.2. Use of type integer for this" " value is deprecated. Use a string instead.", ), @@ -317,8 +434,84 @@ class TestRhSubscriptionSchema: ) @skipUnlessJsonSchema() def test_schema_validation(self, config, error_msg): + self._validation_steps(config, error_msg) + + @pytest.mark.parametrize( + "config, error_msg", + [ + ( + {"rh_subscription": {"add-pool": ["1"]}}, + # The deprecation is not raised for jsonschema<4.0 + # as the latter can't merge $defs and inline keys + r"Deprecated in version 25.3. Use \*\*add_pool\*\* instead.", + ), + ], + ) + @skipUnlessJsonSchemaVersionGreaterThan(version=(3, 2, 0)) + def test_schema_validation_requiring_new_json_schema( + self, config, error_msg + ): + self._validation_steps(config, error_msg) + + @staticmethod + def _validation_steps(config, error_msg): if error_msg is None: validate_cloudconfig_schema(config, get_schema(), strict=True) else: with pytest.raises(SchemaValidationError, match=error_msg): validate_cloudconfig_schema(config, get_schema(), strict=True) + + +class TestConstructor: + """ + Test Constructor operations + """ + + def test_deprecated_values(self): + """ + Confirm the constructor assigns the deprecated fields' cfg keys to the + correct python object fields + """ + + cfg_with_new_keys = {"rh_subscription": {}} + cfg_with_deprecated_keys = {"rh_subscription": {}} + + deprecation_pairs = [ + ("activation-key", "activation_key"), + ("disable-repo", "disable_repo"), + ("enable-repo", "enable_repo"), + ("add-pool", "add_pool"), + ("rhsm-baseurl", "rhsm_baseurl"), + ("server-hostname", "server_hostname"), + ("auto-attach", "auto_attach"), + ("service-level", "service_level"), + ] + + counter = 0 + for tuple in deprecation_pairs: + cfg_with_new_keys["rh_subscription"][tuple[0]] = counter + cfg_with_deprecated_keys["rh_subscription"][tuple[1]] = counter + counter = counter + 1 + + mgr_with_new_keys = cc_rh_subscription.SubscriptionManager( + cfg_with_new_keys + ) + mgr_with_deprecated_keys = cc_rh_subscription.SubscriptionManager( + cfg_with_deprecated_keys + ) + + assert ( + mgr_with_new_keys.rhel_cfg == cfg_with_new_keys["rh_subscription"] + ) + assert ( + mgr_with_deprecated_keys.rhel_cfg + == cfg_with_deprecated_keys["rh_subscription"] + ) + + dict_new_without_rhel_cfg = mgr_with_new_keys.__dict__ + del dict_new_without_rhel_cfg["rhel_cfg"] + + dict_deprecated_without_rhel_cfg = mgr_with_deprecated_keys.__dict__ + del dict_deprecated_without_rhel_cfg["rhel_cfg"] + + assert dict_new_without_rhel_cfg == dict_deprecated_without_rhel_cfg diff --git a/tests/unittests/config/test_cc_runcmd.py b/tests/unittests/config/test_cc_runcmd.py index bdf394a5..622ac207 100644 --- a/tests/unittests/config/test_cc_runcmd.py +++ b/tests/unittests/config/test_cc_runcmd.py @@ -91,7 +91,7 @@ class TestRunCmdSchema: {"a": "n"}, ] }, - "", + "is not of type", ), ), ) diff --git a/tests/unittests/config/test_cc_write_files.py b/tests/unittests/config/test_cc_write_files.py index 398fea66..5922b4e2 100644 --- a/tests/unittests/config/test_cc_write_files.py +++ b/tests/unittests/config/test_cc_write_files.py @@ -115,6 +115,29 @@ def test_append(self, m_chownbyname): mock.call(mock.ANY, USER, GROUP) ] == m_chownbyname.call_args_list + def test_special_permission_bits(self, m_chownbyname, tmp_path): + expected = "hello world\n" + filename = str(tmp_path / "special.file") + permissions = 0o4711 + write_files( + "test_permission", + [ + { + "content": expected, + "path": filename, + "permissions": permissions, + } + ], + OWNER, + ) + assert util.load_text_file(filename) == expected + assert [ + mock.call(mock.ANY, USER, GROUP) + ] == m_chownbyname.call_args_list + assert util.get_permissions(filename) == decode_perms( + permissions, None + ) + def test_yaml_binary(self, m_chownbyname): data_wrong_paths = util.load_yaml(YAML_TEXT) data = [] diff --git a/tests/unittests/config/test_schema.py b/tests/unittests/config/test_schema.py index 4650f594..f0dd7a5c 100644 --- a/tests/unittests/config/test_schema.py +++ b/tests/unittests/config/test_schema.py @@ -47,7 +47,6 @@ from tests.hypothesis import given from tests.hypothesis_jsonschema import from_schema from tests.unittests.helpers import ( - SkipTest, does_not_raise, mock, skipUnlessHypothesisJsonSchema, @@ -377,7 +376,6 @@ class TestNetplanValidateNetworkSchema: ), ), ) - @SkipTest def test_network_config_schema_validation_false_when_skipped( self, config, expected_log, caplog, mocker ): @@ -526,6 +524,59 @@ def test_validateconfig_schema_honors_formats(self): str(context_mgr.value) ) + @skipUnlessJsonSchema() + @pytest.mark.parametrize( + "schema,should_succeed_validating,expected_err_msg_of_either_schema_or_its_negation", + [ + ( + {"required": ["a", "b"]}, + True, + "Cloud config schema errors: : ({'a': 5, 'b': 6} should not" + r" be valid under {'required': \['a', 'b'\]}|{'required': " + r"\['a', 'b'\]} is not allowed for {'a': 5, 'b': 6})", + ), + ( + {"required": ["a", "c"]}, + False, + "Cloud config schema errors: : 'c' is a required property", + ), + ( + {"required": ["d", "c"]}, + False, + "Cloud config schema errors: : 'c' is a required property, :" + " 'd' is a required property", + ), + ], + ) + def test_validateconfig_with_not_keyword_in_schema( + self, + schema, + should_succeed_validating, + expected_err_msg_of_either_schema_or_its_negation, + ): + """ + Test the behavior of the not keyword in a schema + """ + cfg_to_test = {"a": 5, "b": 6} + not_schema = {"not": schema} + (schema_to_succeed, schema_to_fail) = ( + (schema, not_schema) + if should_succeed_validating + else (not_schema, schema) + ) + + validate_cloudconfig_schema( + cfg_to_test, schema_to_succeed, strict=True + ) + + with pytest.raises( + SchemaValidationError, + match=expected_err_msg_of_either_schema_or_its_negation, + ): + validate_cloudconfig_schema( + cfg_to_test, schema_to_fail, strict=True + ) + @skipUnlessJsonSchema() def test_validateconfig_schema_honors_formats_strict_metaschema(self): """With strict and strict_metaschema True, ensure errors on format""" @@ -1833,7 +1884,6 @@ class TestNetworkSchema: ), ), ) - @SkipTest @mock.patch("cloudinit.net.netplan.available", return_value=False) def test_network_schema( self, diff --git a/tests/unittests/distros/test__init__.py b/tests/unittests/distros/test__init__.py index 2447d313..2769efc6 100644 --- a/tests/unittests/distros/test__init__.py +++ b/tests/unittests/distros/test__init__.py @@ -325,6 +325,28 @@ def test_expire_passwd_freebsd_uses_pw_command(self): ["pw", "usermod", "myuser", "-p", "01-Jan-1970"] ) + @mock.patch(M_PATH + "subp.subp") + def test_do_as_nested_py_cmd(self, m_subp): + cls = distros.fetch("ubuntu") + d = cls("ubuntu", {}, None) + d.do_as( + command=[ + "/usr/bin/python3", + "-c", + "import site; print(site.getuserbase())", + ], + user="ubuntu", + ) + m_subp.assert_called_once_with( + [ + "su", + "-", + "ubuntu", + "-c", + "env PATH=$PATH /usr/bin/python3 -c 'import site; print(site.getuserbase())'", # noqa: E501 + ] + ) + class TestGetPackageMirrors: def return_first(self, mlist): diff --git a/tests/unittests/distros/test_hosts.py b/tests/unittests/distros/test_hosts.py index 2a705568..7fd5abf2 100644 --- a/tests/unittests/distros/test_hosts.py +++ b/tests/unittests/distros/test_hosts.py @@ -1,7 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. -import unittest - from cloudinit.distros.parsers import hosts BASE_ETC = """ @@ -15,30 +13,28 @@ BASE_ETC = BASE_ETC.strip() -class TestHostsHelper(unittest.TestCase): +class TestHostsHelper: def test_parse(self): eh = hosts.HostsConf(BASE_ETC) - self.assertEqual(eh.get_entry("127.0.0.1"), [["localhost"]]) - self.assertEqual( - eh.get_entry("192.168.1.10"), - [["foo.mydomain.org", "foo"], ["bar.mydomain.org", "bar"]], - ) + assert eh.get_entry("127.0.0.1") == [["localhost"]] + assert eh.get_entry("192.168.1.10") == [ + ["foo.mydomain.org", "foo"], + ["bar.mydomain.org", "bar"], + ] eh = str(eh) - self.assertTrue(eh.startswith("# Example")) + assert eh.startswith("# Example") def test_add(self): eh = hosts.HostsConf(BASE_ETC) eh.add_entry("127.0.0.0", "blah") - self.assertEqual(eh.get_entry("127.0.0.0"), [["blah"]]) + assert eh.get_entry("127.0.0.0") == [["blah"]] eh.add_entry("127.0.0.3", "blah", "blah2", "blah3") - self.assertEqual( - eh.get_entry("127.0.0.3"), [["blah", "blah2", "blah3"]] - ) + assert eh.get_entry("127.0.0.3") == [["blah", "blah2", "blah3"]] def test_del(self): eh = hosts.HostsConf(BASE_ETC) eh.add_entry("127.0.0.0", "blah") - self.assertEqual(eh.get_entry("127.0.0.0"), [["blah"]]) + assert eh.get_entry("127.0.0.0") == [["blah"]] eh.del_entries("127.0.0.0") - self.assertEqual(eh.get_entry("127.0.0.0"), []) + assert eh.get_entry("127.0.0.0") == [] diff --git a/tests/unittests/early_patches.py b/tests/unittests/early_patches.py index 01b894d3..3848263e 100644 --- a/tests/unittests/early_patches.py +++ b/tests/unittests/early_patches.py @@ -6,8 +6,15 @@ def wrapped_lru_cache(*args, **kwargs): - def wrapper(func: Callable, *a, **k): - new_func = old_lru_cache(*args, **kwargs)(func, *a, **k) + def wrapper(*a, **k): + func: Callable + if len(args) > 0 and callable(args[0]): + func = args[0] + elif len(a) > 0 and callable(a[0]): + func = a[0] + else: + raise NotImplementedError("cannot find cached func") + new_func = old_lru_cache(*args, **kwargs)(*a, **k) # Without this check, we'll also store stdlib functions with @lru_cache if "cloudinit" in func.__module__: diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index 7e9eda8c..a52860b4 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -2,38 +2,26 @@ # pylint: disable=attribute-defined-outside-init import copy -import functools -import io -import logging import os import random import shutil import string -import tempfile import time import unittest from contextlib import contextmanager -from typing import ClassVar, List, Union from unittest import mock from unittest.util import strclass from urllib.parse import urlsplit, urlunsplit import responses -from cloudinit import distros, helpers, settings, subp, util -from cloudinit.config.schema import ( - SchemaValidationError, - validate_cloudconfig_schema, -) +from cloudinit import distros, helpers, settings, util from cloudinit.helpers import Paths from cloudinit.templater import JINJA_AVAILABLE from tests.helpers import cloud_init_project_dir from tests.hypothesis_jsonschema import HAS_HYPOTHESIS_JSONSCHEMA -_real_subp = subp.subp - # Used for skipping tests -SkipTest = unittest.SkipTest skipIf = unittest.skipIf @@ -130,6 +118,8 @@ def random_string(length=8): ) +# Note: The use of this class and unittests.TestCase is discouraged. Use pytest +# instead. See development docs on testing. class TestCase(unittest.TestCase): def reset_global_state(self): """Reset any global state to its original settings. @@ -164,107 +154,6 @@ def add_patch(self, target, attr, *args, **kwargs): setattr(self, attr, p) -class CiTestCase(TestCase): - """This is the preferred test case base class unless user - needs other test case classes below.""" - - # Subclass overrides for specific test behavior - # Whether or not a unit test needs logfile setup - with_logs = False - allowed_subp: ClassVar[Union[List, bool]] = False - SUBP_SHELL_TRUE = "shell=true" - - @contextmanager - def allow_subp(self, allowed_subp): - orig = self.allowed_subp - try: - self.allowed_subp = allowed_subp - yield - finally: - self.allowed_subp = orig - - def setUp(self): - super(CiTestCase, self).setUp() - if self.with_logs: - # Create a log handler so unit tests can search expected logs. - self.logger = logging.getLogger() - self.logs = io.StringIO() - formatter = logging.Formatter("%(levelname)s: %(message)s") - handler = logging.StreamHandler(self.logs) - handler.setFormatter(formatter) - self.old_handlers = self.logger.handlers - self.logger.handlers = [handler] - self.old_level = logging.root.level - self.logger.level = logging.DEBUG - if self.allowed_subp is True: - subp.subp = _real_subp - else: - subp.subp = self._fake_subp - - def _fake_subp(self, *args, **kwargs): - if "args" in kwargs: - cmd = kwargs["args"] - else: - if not args: - raise TypeError( - "subp() missing 1 required positional argument: 'args'" - ) - cmd = args[0] - - if not isinstance(cmd, str): - cmd = cmd[0] - pass_through = False - if not isinstance(self.allowed_subp, (list, bool)): - raise TypeError("self.allowed_subp supports list or bool.") - if isinstance(self.allowed_subp, bool): - pass_through = self.allowed_subp - else: - pass_through = (cmd in self.allowed_subp) or ( - self.SUBP_SHELL_TRUE in self.allowed_subp - and kwargs.get("shell") - ) - if pass_through: - return _real_subp(*args, **kwargs) - raise RuntimeError( - "called subp. set self.allowed_subp=True to allow\n subp(%s)" - % ", ".join( - [str(repr(a)) for a in args] - + ["%s=%s" % (k, repr(v)) for k, v in kwargs.items()] - ) - ) - - def tearDown(self): - if self.with_logs: - # Remove the handler we setup - logging.getLogger().handlers = self.old_handlers - logging.getLogger().setLevel(self.old_level) - subp.subp = _real_subp - super(CiTestCase, self).tearDown() - - def tmp_dir(self, dir=None, cleanup=True): - # return a full path to a temporary directory that will be cleaned up. - if dir is None: - tmpd = tempfile.mkdtemp(prefix="ci-%s." % self.__class__.__name__) - else: - tmpd = tempfile.mkdtemp(dir=dir) - self.addCleanup( - functools.partial(shutil.rmtree, tmpd, ignore_errors=True) - ) - return tmpd - - def tmp_path(self, path, dir=None): - # return an absolute path to 'path' under dir. - # if dir is None, one will be created with tmp_dir() - # the file is not created or modified. - if dir is None: - dir = self.tmp_dir() - return os.path.normpath(os.path.abspath(os.path.join(dir, path))) - - @classmethod - def random_string(cls, length=8): - return random_string(length) - - def replicate_test_root(example_root, target_root): real_root = resourceLocation() real_root = os.path.join(real_root, "roots", example_root) @@ -328,22 +217,6 @@ def __init__(self, path_cfgs: dict, ds=None): return MockPaths -class SchemaTestCaseMixin(unittest.TestCase): - def assertSchemaValid(self, cfg, msg="Valid Schema failed validation."): - """Assert the config is valid per self.schema. - - If there is only one top level key in the schema properties, then - the cfg will be put under that key.""" - props = list(self.schema.get("properties")) - # put cfg under top level key if there is only one in the schema - if len(props) == 1: - cfg = {props[0]: cfg} - try: - validate_cloudconfig_schema(cfg, self.schema, strict=True) - except SchemaValidationError: - self.fail(msg) - - def populate_dir(path, files): if not os.path.exists(path): os.makedirs(path) diff --git a/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-br0.netdev b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-br0.netdev new file mode 100644 index 00000000..6240729b --- /dev/null +++ b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-br0.netdev @@ -0,0 +1,13 @@ +[Bridge] +AgeingTimeSec=300 +ForwardDelaySec=4 +HelloTimeSec=2 +MaxAgeSec=20 +Priority=2048 +STP=yes + +[NetDev] +Kind=bridge +MACAddress=00:11:22:33:44:aa +MTUBytes=9000 +Name=br0 diff --git a/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-br0.network b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-br0.network new file mode 100644 index 00000000..2b733945 --- /dev/null +++ b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-br0.network @@ -0,0 +1,19 @@ +[Address] +Address=172.16.1.10/24 + +[Link] +MACAddress=00:11:22:33:44:aa +MTUBytes=9000 + +[Match] +Name=br0 + +[Network] +DHCP=no +DNS=1.1.1.1 +Domains=bridge.test + +[Route] +Destination=0.0.0.0/0 +Gateway=172.16.1.1 +Metric=100 diff --git a/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-br1.netdev b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-br1.netdev new file mode 100644 index 00000000..6a7d7f81 --- /dev/null +++ b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-br1.netdev @@ -0,0 +1,13 @@ +[Bridge] +AgeingTimeSec=200 +ForwardDelaySec=3 +HelloTimeSec=1 +MaxAgeSec=15 +Priority=4096 +STP=yes + +[NetDev] +Kind=bridge +MACAddress=00:11:22:33:44:88 +MTUBytes=1500 +Name=br1 diff --git a/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-br1.network b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-br1.network new file mode 100644 index 00000000..ea656764 --- /dev/null +++ b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-br1.network @@ -0,0 +1,19 @@ +[Address] +Address=10.0.0.10/24 + +[Link] +MACAddress=00:11:22:33:44:88 +MTUBytes=1500 + +[Match] +Name=br1 + +[Network] +DHCP=no +DNS=9.9.9.9 +Domains=vlan.bridge + +[Route] +Destination=10.0.0.0/24 +Gateway=10.0.0.1 +Metric=200 diff --git a/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-br2.netdev b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-br2.netdev new file mode 100644 index 00000000..a273afde --- /dev/null +++ b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-br2.netdev @@ -0,0 +1,11 @@ +[Bridge] +AgeingTimeSec=100 +HelloTimeSec=2 +MaxAgeSec=20 +Priority=8192 + +[NetDev] +Kind=bridge +MACAddress=00:11:22:33:44:bb +MTUBytes=1600 +Name=br2 diff --git a/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-br2.network b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-br2.network new file mode 100644 index 00000000..02b421b7 --- /dev/null +++ b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-br2.network @@ -0,0 +1,13 @@ +[Address] +Address=192.168.50.10/24 + +[Link] +MACAddress=00:11:22:33:44:bb +MTUBytes=1600 + +[Match] +Name=br2 + +[Network] +DHCP=no +DNS=8.8.4.4 diff --git a/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-eth0.network b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-eth0.network index e3a8358f..16ea4e32 100644 --- a/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-eth0.network +++ b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-eth0.network @@ -1,3 +1,6 @@ +[Address] +Address=192.0.2.10/24 + [Link] MTUBytes=1500 @@ -9,3 +12,28 @@ Name=eth0 Bond=bond0 DHCP=no VLAN=vlan100 +VLAN=vlan101 +VLAN=vlan102 + +[Route] +Destination=0.0.0.0/0 +Gateway=192.168.14.1 +Metric=50 +Table=10 + +[Route] +Destination=::/0 +Gateway=2001:1::2 +Metric=100 + +[RoutingPolicyRule] +From=192.0.2.10 +Priority=100 +Table=10 +To=203.0.113.100 + +[RoutingPolicyRule] +From=203.0.113.101/24 +Priority=90 +Table=70 +To=192.0.2.11/8 diff --git a/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-eth2.network b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-eth2.network index 712c52b7..e637cbec 100644 --- a/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-eth2.network +++ b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-eth2.network @@ -14,3 +14,19 @@ DHCP=no DNS=9.9.9.9 8.8.8.8 Domains=example.org VLAN=vl101 + +[Route] +Destination=198.51.100.0/24 +Gateway=10.20.30.1 +Metric=60 + +[Route] +Destination=192.0.2.0/24 +Gateway=10.20.30.1 +Metric=110 + +[RoutingPolicyRule] +From=10.20.30.40 +Priority=120 +Table=12 +To=198.51.100.200 diff --git a/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-eth3.network b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-eth3.network new file mode 100644 index 00000000..149bb852 --- /dev/null +++ b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-eth3.network @@ -0,0 +1,10 @@ +[Bridge] +Cost=30 +Priority=32 + +[Match] +Name=eth3 + +[Network] +Bridge=br0 +DHCP=no diff --git a/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-eth4.network b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-eth4.network new file mode 100644 index 00000000..fb31f7df --- /dev/null +++ b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-eth4.network @@ -0,0 +1,10 @@ +[Bridge] +Cost=40 +Priority=60 + +[Match] +Name=eth4 + +[Network] +Bridge=br0 +DHCP=no diff --git a/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-vl101.network b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-vl101.network index fb82925b..2d9b2b6d 100644 --- a/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-vl101.network +++ b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-vl101.network @@ -2,4 +2,5 @@ Name=vl101 [Network] +Bridge=br2 DHCP=no diff --git a/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-vlan100.network b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-vlan100.network index f8afc472..49ea2ce7 100644 --- a/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-vlan100.network +++ b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-vlan100.network @@ -4,6 +4,10 @@ Address=192.168.100.10/24 [Address] Address=192.168.100.11/24 +[Bridge] +Cost=45 +Priority=50 + [Link] MACAddress=00:11:22:33:44:66 MTUBytes=901 @@ -12,6 +16,7 @@ MTUBytes=901 Name=vlan100 [Network] +Bridge=br1 DHCP=ipv6 DNS=8.8.8.8 1.1.1.1 Domains=corp.example.com vlan.test diff --git a/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-vlan101.netdev b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-vlan101.netdev new file mode 100644 index 00000000..aec56f81 --- /dev/null +++ b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-vlan101.netdev @@ -0,0 +1,7 @@ +[NetDev] +Kind=vlan +MTUBytes=1400 +Name=vlan101 + +[VLAN] +Id=101 diff --git a/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-vlan101.network b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-vlan101.network new file mode 100644 index 00000000..ac9ff418 --- /dev/null +++ b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-vlan101.network @@ -0,0 +1,23 @@ +[Address] +Address=10.100.101.10/24 + +[Link] +MTUBytes=1400 + +[Match] +Name=vlan101 + +[Network] +DHCP=no +DNS=1.1.1.1 9.9.9.9 +Domains=vlan101.local + +[Route] +Destination=0.0.0.0/0 +Gateway=10.100.101.1 +Table=101 + +[RoutingPolicyRule] +From=10.100.101.10 +Priority=100 +Table=101 diff --git a/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-vlan102.netdev b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-vlan102.netdev new file mode 100644 index 00000000..2d8e8fba --- /dev/null +++ b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-vlan102.netdev @@ -0,0 +1,7 @@ +[NetDev] +Kind=vlan +MTUBytes=1400 +Name=vlan102 + +[VLAN] +Id=102 diff --git a/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-vlan102.network b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-vlan102.network new file mode 100644 index 00000000..65b3f754 --- /dev/null +++ b/tests/unittests/net/artifacts/photon_net_config/etc/systemd/network/10-cloud-init-vlan102.network @@ -0,0 +1,22 @@ +[Address] +Address=10.100.102.10/24 + +[Link] +MTUBytes=1400 + +[Match] +Name=vlan102 + +[Network] +DHCP=no +DNS=8.8.8.8 1.0.0.1 +Domains=vlan102.local + +[Route] +Destination=0.0.0.0/0 +Gateway=10.100.102.1 +Table=102 + +[RoutingPolicyRule] +From=10.100.102.10 +Table=102 diff --git a/tests/unittests/net/artifacts/photon_net_config_v2.yaml b/tests/unittests/net/artifacts/photon_net_config_v2.yaml index 7984f843..c8d6e435 100644 --- a/tests/unittests/net/artifacts/photon_net_config_v2.yaml +++ b/tests/unittests/net/artifacts/photon_net_config_v2.yaml @@ -8,6 +8,24 @@ network: set-name: eth0 dhcp4: false mtu: 1500 + addresses: [192.0.2.10/24] + routes: + - to: default + via: 192.168.14.1 + metric: 50 + table: 10 + - to: default + via: 2001:1::2 + metric: 100 + routing-policy: + - from: 192.0.2.10 + to: 203.0.113.100 + table: 10 + priority: 100 + - to: 192.0.2.11/8 + from: 203.0.113.101/24 + table: 70 + priority: 90 eth1: dhcp4: true @@ -17,16 +35,33 @@ network: match: macaddress: "00:aa:bb:cc:dd:ee" set-name: eth2 - mtu: 1400 dhcp4: false + mtu: 1400 addresses: - 10.20.30.40/24 nameservers: - addresses: - - 9.9.9.9 - - 8.8.8.8 - search: - - example.org + addresses: [9.9.9.9, 8.8.8.8] + search: [example.org] + routes: + - to: 198.51.100.0/24 + via: 10.20.30.1 + metric: 60 + - to: 192.0.2.0/24 + via: 10.20.30.1 + metric: 110 + routing-policy: + - from: 10.20.30.40 + to: 198.51.100.200 + table: 12 + priority: 120 + + eth3: + dhcp4: false + dhcp6: false + + eth4: + dhcp4: false + dhcp6: false bonds: bond0: @@ -45,19 +80,13 @@ network: vlan100: id: 100 link: eth0 - addresses: - - 192.168.100.10/24 - - 192.168.100.11/24 + addresses: [192.168.100.10/24, 192.168.100.11/24] dhcp6: true mtu: 901 macaddress: "00:11:22:33:44:66" nameservers: - addresses: - - 8.8.8.8 - - 1.1.1.1 - search: - - corp.example.com - - vlan.test + addresses: [8.8.8.8, 1.1.1.1] + search: [corp.example.com, vlan.test] routes: - to: 10.10.200.0/24 via: 192.168.100.1 @@ -66,6 +95,39 @@ network: via: 192.168.100.2 metric: 150 + vlan101: + id: 101 + link: eth0 + addresses: [10.100.101.10/24] + routes: + - to: 0.0.0.0/0 + via: 10.100.101.1 + table: 101 + routing-policy: + - from: 10.100.101.10 + table: 101 + priority: 100 + mtu: 1400 + nameservers: + addresses: [1.1.1.1, 9.9.9.9] + search: [vlan101.local] + + vlan102: + id: 102 + link: eth0 + addresses: [10.100.102.10/24] + routes: + - to: 0.0.0.0/0 + via: 10.100.102.1 + table: 102 + routing-policy: + - from: 10.100.102.10 + table: 102 + mtu: 1400 + nameservers: + addresses: [8.8.8.8, 1.0.0.1] + search: [vlan102.local] + vl101: id: 101 link: eth2 @@ -74,13 +136,75 @@ network: id: 200 link: bond0 macaddress: "00:11:22:33:44:99" - addresses: - - 192.168.200.10/24 + addresses: [192.168.200.10/24] nameservers: - addresses: - - 1.1.1.1 - - 8.8.4.4 - search: - - bond.vlan.test + addresses: [1.1.1.1, 8.8.4.4] + search: [bond.vlan.test] mtu: 111 dhcp6: true + + bridges: + br0: + interfaces: [eth3, eth4] + addresses: [172.16.1.10/24] + routes: + - to: 0.0.0.0/0 + via: 172.16.1.1 + metric: 100 + nameservers: + addresses: [1.1.1.1] + search: [bridge.test] + mtu: 9000 + macaddress: "00:11:22:33:44:aa" + parameters: + stp: true + forward-delay: 4 + hello-time: 2 + max-age: 20 + priority: 2048 + ageing-time: 300 + path-cost: + eth3: 30 + eth4: 40 + port-priority: + eth3: 32 + eth4: 60 + + br1: + interfaces: [vlan100] + addresses: [10.0.0.10/24] + routes: + - to: 10.0.0.0/24 + via: 10.0.0.1 + metric: 200 + nameservers: + addresses: [9.9.9.9] + search: [vlan.bridge] + mtu: 1500 + macaddress: "00:11:22:33:44:88" + parameters: + stp: true + forward-delay: 3 + hello-time: 1 + max-age: 15 + priority: 4096 + ageing-time: 200 + path-cost: + vlan100: 45 + port-priority: + vlan100: 50 + + br2: + interfaces: [vl101] + addresses: [192.168.50.10/24] + nameservers: + addresses: [8.8.4.4] + mtu: 1600 + macaddress: "00:11:22:33:44:BB" + parameters: + stp: false + forward-delay: 0 + hello-time: 2 + max-age: 20 + priority: 8192 + ageing-time: 100 diff --git a/tests/unittests/net/network_configs.py b/tests/unittests/net/network_configs.py index c2fd524a..bd42b2bf 100644 --- a/tests/unittests/net/network_configs.py +++ b/tests/unittests/net/network_configs.py @@ -115,7 +115,30 @@ DNS=1.2.3.4 5.6.7.8 """ ).rstrip(" "), - "expected_eni": textwrap.dedent( + "expected_eni_ip_cmd": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + dns-nameservers 1.2.3.4 5.6.7.8 + dns-search wark.maas + + iface eth1 inet manual + + auto eth99 + iface eth99 inet dhcp + + # control-alias eth99 + iface eth99 inet static + address 192.168.21.3/24 + dns-nameservers 8.8.8.8 8.8.4.4 + dns-search barley.maas sach.maas + post-up ip route add default via 65.61.151.37 metric 10000 \ +|| true + pre-down ip route del default via 65.61.151.37 metric 10000 \ +|| true + """ + ).rstrip(" "), + "expected_eni_route_cmd": textwrap.dedent( """\ auto lo iface lo inet loopback @@ -319,7 +342,28 @@ DHCP=no """ ).rstrip(" "), - "expected_eni": textwrap.dedent( + "expected_eni_ip_cmd": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + iface eth1 inet manual + + auto eth99 + iface eth99 inet dhcp + + # control-alias eth99 + iface eth99 inet static + address 192.168.21.3/24 + dns-nameservers 8.8.8.8 8.8.4.4 + dns-search barley.maas sach.maas + post-up ip route add default via 65.61.151.37 metric 10000 \ +|| true + pre-down ip route del default via 65.61.151.37 metric 10000 \ +|| true + """ + ).rstrip(" "), + "expected_eni_route_cmd": textwrap.dedent( """\ auto lo iface lo inet loopback @@ -468,7 +512,19 @@ DHCP=yes """ ).rstrip(" "), - "expected_eni": textwrap.dedent( + "expected_eni_ip_cmd": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet dhcp + + # control-alias iface0 + iface iface0 inet6 dhcp + """ + ).rstrip(" "), + "expected_eni_route_cmd": textwrap.dedent( """\ auto lo iface lo inet loopback @@ -562,7 +618,23 @@ Address=2001:1::1/64 """ ).rstrip(" "), - "expected_eni": textwrap.dedent( + "expected_eni_ip_cmd": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet static + address 192.168.14.2/24 + mtu 9000 + + # control-alias iface0 + iface iface0 inet6 static + address 2001:1::1/64 + mtu 1500 + """ + ).rstrip(" "), + "expected_eni_route_cmd": textwrap.dedent( """\ auto lo iface lo inet loopback @@ -696,7 +768,22 @@ Address=2001:1::1/64 """ ).rstrip(" "), - "expected_eni": textwrap.dedent( + "expected_eni_ip_cmd": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet static + address 192.168.14.2/24 + mtu 9000 + + # control-alias iface0 + iface iface0 inet6 static + address 2001:1::1/64 + """ + ).rstrip(" "), + "expected_eni_route_cmd": textwrap.dedent( """\ auto lo iface lo inet loopback @@ -845,7 +932,16 @@ DHCP=ipv6 """ ).rstrip(" "), - "expected_eni": textwrap.dedent( + "expected_eni_ip_cmd": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 dhcp + """ + ).rstrip(" "), + "expected_eni_route_cmd": textwrap.dedent( """\ auto lo iface lo inet loopback @@ -930,7 +1026,17 @@ }, }, "dhcpv6_accept_ra": { - "expected_eni": textwrap.dedent( + "expected_eni_ip_cmd": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 dhcp + accept-ra 1 + """ + ).rstrip(" "), + "expected_eni_route_cmd": textwrap.dedent( """\ auto lo iface lo inet loopback @@ -1005,7 +1111,17 @@ ).rstrip(" "), }, "dhcpv6_reject_ra": { - "expected_eni": textwrap.dedent( + "expected_eni_ip_cmd": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 dhcp + accept-ra 0 + """ + ).rstrip(" "), + "expected_eni_route_cmd": textwrap.dedent( """\ auto lo iface lo inet loopback @@ -1080,7 +1196,17 @@ ).rstrip(" "), }, "ipv6_slaac": { - "expected_eni": textwrap.dedent( + "expected_eni_ip_cmd": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 auto + dhcp 0 + """ + ).rstrip(" "), + "expected_eni_route_cmd": textwrap.dedent( """\ auto lo iface lo inet loopback @@ -1203,7 +1329,17 @@ }, }, "dhcpv6_stateless": { - "expected_eni": textwrap.dedent( + "expected_eni_ip_cmd": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 auto + dhcp 1 + """ + ).rstrip(" "), + "expected_eni_route_cmd": textwrap.dedent( """\ auto lo iface lo inet loopback @@ -1288,7 +1424,16 @@ }, }, "dhcpv6_stateful": { - "expected_eni": textwrap.dedent( + "expected_eni_ip_cmd": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 dhcp + """ + ).rstrip(" "), + "expected_eni_route_cmd": textwrap.dedent( """\ auto lo iface lo inet loopback @@ -1348,7 +1493,16 @@ }, }, "wakeonlan_disabled": { - "expected_eni": textwrap.dedent( + "expected_eni_ip_cmd": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet dhcp + """ + ).rstrip(" "), + "expected_eni_route_cmd": textwrap.dedent( """\ auto lo iface lo inet loopback @@ -1421,7 +1575,17 @@ ).rstrip(" "), }, "wakeonlan_enabled": { - "expected_eni": textwrap.dedent( + "expected_eni_ip_cmd": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet dhcp + ethernet-wol g + """ + ).rstrip(" "), + "expected_eni_route_cmd": textwrap.dedent( """\ auto lo iface lo inet loopback @@ -1498,7 +1662,102 @@ ).rstrip(" "), }, "large_v1": { - "expected_eni": """\ + "expected_eni_ip_cmd": """\ +auto lo +iface lo inet loopback + dns-nameservers 8.8.8.8 4.4.4.4 8.8.4.4 + dns-search barley.maas wark.maas foobar.maas + +iface eth0 inet manual + +auto eth1 +iface eth1 inet manual + bond-master bond0 + bond-miimon 100 + bond-mode active-backup + bond-xmit-hash-policy layer3+4 + +auto eth2 +iface eth2 inet manual + bond-master bond0 + bond-miimon 100 + bond-mode active-backup + bond-xmit-hash-policy layer3+4 + +iface eth3 inet manual + +iface eth4 inet manual + +# control-manual eth5 +iface eth5 inet dhcp + +auto ib0 +iface ib0 inet static + address 192.168.200.7/24 + mtu 9000 + hwaddress a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 + +auto bond0 +iface bond0 inet6 dhcp + bond-miimon 100 + bond-mode active-backup + bond-slaves none + bond-xmit-hash-policy layer3+4 + hwaddress aa:bb:cc:dd:ee:ff + +auto br0 +iface br0 inet static + address 192.168.14.2/24 + bridge-ageing 250 + bridge-bridgeprio 22 + bridge-fd 1 + bridge-gcint 2 + bridge-hello 1 + bridge-maxage 10 + bridge-pathcost eth3 50 + bridge-pathcost eth4 75 + bridge-portprio eth3 28 + bridge-portprio eth4 14 + bridge-ports eth3 eth4 + bridge-stp off + bridge-waitport 1 eth3 + bridge-waitport 2 eth4 + hwaddress bb:bb:bb:bb:bb:aa + +# control-alias br0 +iface br0 inet6 static + address 2001:1::1/64 + post-up ip -family inet6 route add default via 2001:4800:78ff:1b::1 \ +|| true + pre-down ip -family inet6 route del default via 2001:4800:78ff:1b::1 \ +|| true + +auto bond0.200 +iface bond0.200 inet dhcp + vlan-id 200 + vlan-raw-device bond0 + +auto eth0.101 +iface eth0.101 inet static + address 192.168.0.2/24 + dns-nameservers 192.168.0.10 10.23.23.134 + dns-search barley.maas sacchromyces.maas brettanomyces.maas + gateway 192.168.0.1 + mtu 1500 + hwaddress aa:bb:cc:dd:ee:11 + vlan-id 101 + vlan-raw-device eth0 + +# control-alias eth0.101 +iface eth0.101 inet static + address 192.168.2.10/24 + dns-nameservers 192.168.0.10 10.23.23.134 + dns-search barley.maas sacchromyces.maas brettanomyces.maas + +post-up ip route add 10.0.0.0/8 via 11.0.0.1 metric 3 || true +pre-down ip route del 10.0.0.0/8 via 11.0.0.1 metric 3 || true +""", + "expected_eni_route_cmd": """\ auto lo iface lo inet loopback dns-nameservers 8.8.8.8 4.4.4.4 8.8.4.4 @@ -2341,7 +2600,7 @@ ).lstrip(), }, "large_v2": { - "expected_eni": """\ + "expected_eni_ip_cmd": """\ auto lo iface lo inet loopback dns-nameservers 8.8.8.8 4.4.4.4 8.8.4.4 @@ -2406,8 +2665,10 @@ # control-alias br0 iface br0 inet6 static address 2001:1::1/64 - post-up route add -A inet6 default gw 2001:4800:78ff:1b::1 || true - pre-down route del -A inet6 default gw 2001:4800:78ff:1b::1 || true + post-up ip -family inet6 route add default via 2001:4800:78ff:1b::1 \ +|| true + pre-down ip -family inet6 route del default via 2001:4800:78ff:1b::1 \ +|| true auto bond0.200 iface bond0.200 inet dhcp @@ -2429,22 +2690,113 @@ iface eth0.101 inet static address 192.168.2.10/24 -post-up route add -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true -pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true +post-up ip route add 10.0.0.0/8 via 11.0.0.1 metric 3 || true +pre-down ip route del 10.0.0.0/8 via 11.0.0.1 metric 3 || true """, - "expected_sysconfig_opensuse": { - "ifcfg-bond0": textwrap.dedent( - """\ - BONDING_MASTER=yes - BONDING_MODULE_OPTS="mode=active-backup """ - """xmit_hash_policy=layer3+4 """ - """miimon=100" - BONDING_SLAVE_0=eth1 - BONDING_SLAVE_1=eth2 - BOOTPROTO=dhcp6 - DHCLIENT6_MODE=managed - LLADDR=aa:bb:cc:dd:ee:ff - STARTMODE=auto""" + "expected_eni_route_cmd": """\ +auto lo +iface lo inet loopback + dns-nameservers 8.8.8.8 4.4.4.4 8.8.4.4 + dns-search barley.maas wark.maas foobar.maas + +iface eth0 inet manual + +auto eth1 +iface eth1 inet manual + bond-master bond0 + bond-miimon 100 + bond-mode active-backup + bond-xmit-hash-policy layer3+4 + +auto eth2 +iface eth2 inet manual + bond-master bond0 + bond-miimon 100 + bond-mode active-backup + bond-xmit-hash-policy layer3+4 + +iface eth3 inet manual + +iface eth4 inet manual + +# control-manual eth5 +iface eth5 inet dhcp + +auto ib0 +iface ib0 inet static + address 192.168.200.7/24 + mtu 9000 + hwaddress a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1 + +auto bond0 +iface bond0 inet6 dhcp + bond-miimon 100 + bond-mode active-backup + bond-slaves none + bond-xmit-hash-policy layer3+4 + hwaddress aa:bb:cc:dd:ee:ff + +auto br0 +iface br0 inet static + address 192.168.14.2/24 + bridge-ageing 250 + bridge-bridgeprio 22 + bridge-fd 1 + bridge-gcint 2 + bridge-hello 1 + bridge-maxage 10 + bridge-pathcost eth3 50 + bridge-pathcost eth4 75 + bridge-portprio eth3 28 + bridge-portprio eth4 14 + bridge-ports eth3 eth4 + bridge-stp off + bridge-waitport 1 eth3 + bridge-waitport 2 eth4 + hwaddress bb:bb:bb:bb:bb:aa + +# control-alias br0 +iface br0 inet6 static + address 2001:1::1/64 + post-up route add -A inet6 default gw 2001:4800:78ff:1b::1 || true + pre-down route del -A inet6 default gw 2001:4800:78ff:1b::1 || true + +auto bond0.200 +iface bond0.200 inet dhcp + vlan-id 200 + vlan-raw-device bond0 + +auto eth0.101 +iface eth0.101 inet static + address 192.168.0.2/24 + dns-nameservers 192.168.0.10 10.23.23.134 + dns-search barley.maas sacchromyces.maas brettanomyces.maas + gateway 192.168.0.1 + mtu 1500 + hwaddress aa:bb:cc:dd:ee:11 + vlan-id 101 + vlan-raw-device eth0 + +# control-alias eth0.101 +iface eth0.101 inet static + address 192.168.2.10/24 + +post-up route add -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true +pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true +""", + "expected_sysconfig_opensuse": { + "ifcfg-bond0": textwrap.dedent( + """\ + BONDING_MASTER=yes + BONDING_MODULE_OPTS="mode=active-backup """ + """xmit_hash_policy=layer3+4 """ + """miimon=100" + BONDING_SLAVE_0=eth1 + BONDING_SLAVE_1=eth2 + BOOTPROTO=dhcp6 + DHCLIENT6_MODE=managed + LLADDR=aa:bb:cc:dd:ee:ff + STARTMODE=auto""" ), "ifcfg-bond0.200": textwrap.dedent( """\ @@ -3132,7 +3484,74 @@ via: 3001:67c:15::1 """ ), - "expected_eni": textwrap.dedent( + "expected_eni_ip_cmd": textwrap.dedent( + """\ +auto lo +iface lo inet loopback + +auto bond0s0 +iface bond0s0 inet manual + bond-downdelay 10 + bond-fail-over-mac active + bond-master bond0 + bond-miimon 100 + bond-mode active-backup + bond-num-grat-arp 5 + bond-primary bond0s0 + bond-primary-reselect always + bond-updelay 20 + bond-xmit-hash-policy layer3+4 + +auto bond0s1 +iface bond0s1 inet manual + bond-downdelay 10 + bond-fail-over-mac active + bond-master bond0 + bond-miimon 100 + bond-mode active-backup + bond-num-grat-arp 5 + bond-primary bond0s0 + bond-primary-reselect always + bond-updelay 20 + bond-xmit-hash-policy layer3+4 + +auto bond0 +iface bond0 inet static + address 192.168.0.2/24 + gateway 192.168.0.1 + bond-downdelay 10 + bond-fail-over-mac active + bond-miimon 100 + bond-mode active-backup + bond-num-grat-arp 5 + bond-primary bond0s0 + bond-primary-reselect always + bond-slaves none + bond-updelay 20 + bond-xmit-hash-policy layer3+4 + hwaddress aa:bb:cc:dd:e8:ff + mtu 9000 + post-up ip route add 10.1.3.0/24 via 192.168.0.3 || true + pre-down ip route del 10.1.3.0/24 via 192.168.0.3 || true + +# control-alias bond0 +iface bond0 inet static + address 192.168.1.2/24 + +# control-alias bond0 +iface bond0 inet6 static + address 2001:1::1/92 + post-up ip -family inet6 route add 2001:67c::/32 via 2001:67c:1562::1 \ +|| true + pre-down ip -family inet6 route del 2001:67c::/32 via 2001:67c:1562::1 \ +|| true + post-up ip -family inet6 route add 3001:67c::/32 via 3001:67c:15::1 \ +metric 10000 || true + pre-down ip -family inet6 route del 3001:67c::/32 via 3001:67c:15::1 \ +metric 10000 || true + """ + ), + "expected_eni_route_cmd": textwrap.dedent( """\ auto lo iface lo inet loopback @@ -3481,7 +3900,74 @@ via: 3001:67c:15::1 """ ), - "expected_eni": textwrap.dedent( + "expected_eni_ip_cmd": textwrap.dedent( + """\ +auto lo +iface lo inet loopback + +auto bond0s0 +iface bond0s0 inet manual + bond-downdelay 10 + bond-fail-over-mac active + bond-master bond0 + bond_miimon 100 + bond-mode active-backup + bond-num-grat-arp 5 + bond-primary bond0s0 + bond-primary-reselect always + bond-updelay 20 + bond-xmit-hash-policy layer3+4 + +auto bond0s1 +iface bond0s1 inet manual + bond-downdelay 10 + bond-fail-over-mac active + bond-master bond0 + bond_miimon 100 + bond-mode active-backup + bond-num-grat-arp 5 + bond-primary bond0s0 + bond-primary-reselect always + bond-updelay 20 + bond-xmit-hash-policy layer3+4 + +auto bond0 +iface bond0 inet static + address 192.168.0.2/24 + gateway 192.168.0.1 + bond-downdelay 10 + bond-fail-over-mac active + bond_miimon 100 + bond-mode active-backup + bond-num-grat-arp 5 + bond-primary bond0s0 + bond-primary-reselect always + bond-slaves none + bond-updelay 20 + bond-xmit-hash-policy layer3+4 + hwaddress aa:bb:cc:dd:e8:ff + mtu 9000 + post-up ip route add 10.1.3.0/24 via 192.168.0.3 || true + pre-down ip route del 10.1.3.0/24 via 192.168.0.3 || true + +# control-alias bond0 +iface bond0 inet static + address 192.168.1.2/24 + +# control-alias bond0 +iface bond0 inet6 static + address 2001:1::1/92 + post-up ip -family inet6 route add 2001:67c::/32 via 2001:67c:1562::1 \ +|| true + pre-down ip -family inet6 route del 2001:67c::/32 via 2001:67c:1562::1 \ +|| true + post-up ip -family inet6 route add 3001:67c::/32 via 3001:67c:15::1 \ +metric 10000 || true + pre-down ip -family inet6 route del 3001:67c::/32 via 3001:67c:15::1 \ +metric 10000 || true + """ + ), + "expected_eni_route_cmd": textwrap.dedent( """\ auto lo iface lo inet loopback @@ -4246,7 +4732,24 @@ control: manual """ ), - "expected_eni": textwrap.dedent( + "expected_eni_ip_cmd": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + # control-manual eth0 + iface eth0 inet static + address 192.168.1.2/24 + + auto eth1 + iface eth1 inet manual + mtu 1480 + + # control-manual eth2 + iface eth2 inet manual + """ + ), + "expected_eni_route_cmd": textwrap.dedent( """\ auto lo iface lo inet loopback @@ -4437,7 +4940,31 @@ Gateway=192.168.1.1 """ ), - "expected_eni": textwrap.dedent( + "expected_eni_ip_cmd": textwrap.dedent( + """\ + # This file is generated from information provided by the datasource. Changes + # to it will not persist across an instance reboot. To disable cloud-init's + # network configuration capabilities, write a file + # /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: + # network: {config: disabled} + auto lo + iface lo inet loopback + dns-nameservers 2.2.2.2 + dns-search bbbb + + iface lo inet6 loopback + dns-nameservers FEDC::1 + dns-search bbbb + + auto interface0 + iface interface0 inet static + address 192.168.1.20/16 + dns-nameservers 1.1.1.1 3.3.3.3 + dns-search aaaa cccc + gateway 192.168.1.1 + """ # noqa: E501 + ), + "expected_eni_route_cmd": textwrap.dedent( """\ # This file is generated from information provided by the datasource. Changes # to it will not persist across an instance reboot. To disable cloud-init's @@ -4604,7 +5131,40 @@ ), }, "v2-mixed-routes": { - "expected_eni": textwrap.dedent( + "expected_eni_ip_cmd": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto eth0 + iface eth0 inet dhcp + mtu 500 + post-up ip route add 169.254.42.42/32 via 62.210.0.1 || true + pre-down ip route del 169.254.42.42/32 via 62.210.0.1 || true + post-up ip route add 169.254.42.43/32 via 62.210.0.2 || true + pre-down ip route del 169.254.42.43/32 via 62.210.0.2 || true + + # control-alias eth0 + iface eth0 inet6 dhcp + post-up ip -family inet6 route add default via fe80::dc00:ff:fe20:186 || true + pre-down ip -family inet6 route del default via fe80::dc00:ff:fe20:186 || true + post-up ip -family inet6 route add fe80::dc00:ff:fe20:188/64 via fe80::dc00:ff:fe20:187 || true + pre-down ip -family inet6 route del fe80::dc00:ff:fe20:188/64 via fe80::dc00:ff:fe20:187 || true + + # control-alias eth0 + iface eth0 inet static + address 192.168.1.20/16 + dns-nameservers 8.8.8.8 + dns-search lab home + + # control-alias eth0 + iface eth0 inet6 static + address 2001:bc8:1210:232:dc00:ff:fe20:185/64 + dns-nameservers FEDC::1 + dns-search lab home + """ # noqa: E501 + ), + "expected_eni_route_cmd": textwrap.dedent( """\ auto lo iface lo inet loopback @@ -4711,7 +5271,40 @@ ), }, "v2-mixed-routes-reversed": { - "expected_eni": textwrap.dedent( + "expected_eni_ip_cmd": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto eth0 + iface eth0 inet dhcp + mtu 500 + post-up ip route add 169.254.42.42/32 via 62.210.0.1 || true + pre-down ip route del 169.254.42.42/32 via 62.210.0.1 || true + post-up ip route add 169.254.42.43/32 via 62.210.0.2 || true + pre-down ip route del 169.254.42.43/32 via 62.210.0.2 || true + + # control-alias eth0 + iface eth0 inet6 dhcp + post-up ip -family inet6 route add default via fe80::dc00:ff:fe20:186 || true + pre-down ip -family inet6 route del default via fe80::dc00:ff:fe20:186 || true + post-up ip -family inet6 route add fe80::dc00:ff:fe20:188/64 via fe80::dc00:ff:fe20:187 || true + pre-down ip -family inet6 route del fe80::dc00:ff:fe20:188/64 via fe80::dc00:ff:fe20:187 || true + + # control-alias eth0 + iface eth0 inet6 static + address 2001:bc8:1210:232:dc00:ff:fe20:185/64 + dns-nameservers FEDC::1 + dns-search home lab + + # control-alias eth0 + iface eth0 inet static + address 192.168.1.20/16 + dns-nameservers 8.8.8.8 + dns-search home lab + """ # noqa: E501 + ), + "expected_eni_route_cmd": textwrap.dedent( """\ auto lo iface lo inet loopback @@ -4818,7 +5411,31 @@ ), }, "v2-mixed-routes-no-ipv6-addr": { - "expected_eni": textwrap.dedent( + "expected_eni_ip_cmd": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto eth0 + iface eth0 inet dhcp + post-up ip route add 169.254.42.42/32 via 62.210.0.1 || true + pre-down ip route del 169.254.42.42/32 via 62.210.0.1 || true + + # control-alias eth0 + iface eth0 inet static + address 192.168.1.20/16 + dns-nameservers 8.8.8.8 + dns-search lab home + + # control-alias eth0 + iface eth0 inet6 static + dns-nameservers FEDC::1 + dns-search lab home + post-up ip -family inet6 route add default via fe80::dc00:ff:fe20:186 || true + pre-down ip -family inet6 route del default via fe80::dc00:ff:fe20:186 || true + """ # noqa: E501 + ), + "expected_eni_route_cmd": textwrap.dedent( """\ auto lo iface lo inet loopback @@ -4912,7 +5529,25 @@ Domains=lab home """ ), - "expected_eni": textwrap.dedent( + "expected_eni_ip_cmd": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto eth0 + iface eth0 inet static + address 192.168.1.20/16 + dns-nameservers 8.8.8.8 + dns-search lab home + + # control-alias eth0 + iface eth0 inet6 static + address 2001:bc8:1210:232:dc00:ff:fe20:185/64 + dns-nameservers FEDC::1 + dns-search lab home + """ # noqa: E501 + ), + "expected_eni_route_cmd": textwrap.dedent( """\ auto lo iface lo inet loopback @@ -5014,7 +5649,19 @@ ), }, "v2-dns-no-if-ips": { - "expected_eni": textwrap.dedent( + "expected_eni_ip_cmd": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + auto eth0 + iface eth0 inet dhcp + + # control-alias eth0 + iface eth0 inet6 dhcp + """ # noqa: E501 + ), + "expected_eni_route_cmd": textwrap.dedent( """\ auto lo iface lo inet loopback @@ -5072,7 +5719,15 @@ ), }, "v2-dns-no-dhcp": { - "expected_eni": textwrap.dedent( + "expected_eni_ip_cmd": textwrap.dedent( + """\ + auto lo + iface lo inet loopback + + iface eth0 inet manual + """ # noqa: E501 + ), + "expected_eni_route_cmd": textwrap.dedent( """\ auto lo iface lo inet loopback diff --git a/tests/unittests/net/test_dhcp.py b/tests/unittests/net/test_dhcp.py index 70856642..c467c324 100644 --- a/tests/unittests/net/test_dhcp.py +++ b/tests/unittests/net/test_dhcp.py @@ -27,10 +27,11 @@ from cloudinit.subp import SubpResult from cloudinit.util import ensure_file, load_binary_file, subp, write_file from tests.unittests.helpers import ( - CiTestCase, + assert_count_equal, example_netdev, mock, populate_dir, + resourceLocation, ) from tests.unittests.util import MockDistro @@ -81,30 +82,32 @@ def test_find_server_address_when_present( ) +@pytest.fixture +def isc_dh_cli(tmp_path): + cli = IscDhclient() + cli.lease_file = str(tmp_path / "leases") + return cli + + @pytest.mark.usefixtures("dhclient_exists") -class TestParseDHCPLeasesFile(CiTestCase): - def test_parse_empty_lease_file_errors(self): +class TestParseDHCPLeasesFile: + + def test_parse_empty_lease_file_errors(self, isc_dh_cli): """get_newest_lease errors when file content is empty.""" - client = IscDhclient() - client.lease_file = self.tmp_path("leases") - ensure_file(client.lease_file) - assert not client.get_newest_lease("eth0") + ensure_file(isc_dh_cli.lease_file) + assert not isc_dh_cli.get_newest_lease("eth0") - def test_parse_malformed_lease_file_content_errors(self): + def test_parse_malformed_lease_file_content_errors(self, isc_dh_cli): """IscDhclient.get_newest_lease errors when file content isn't dhcp leases. """ - client = IscDhclient() - client.lease_file = self.tmp_path("leases") - write_file(client.lease_file, "hi mom.") - assert not client.get_newest_lease("eth0") + write_file(isc_dh_cli.lease_file, "hi mom.") + assert not isc_dh_cli.get_newest_lease("eth0") - def test_parse_multiple_leases(self): + def test_parse_multiple_leases(self, isc_dh_cli): """IscDhclient().get_newest_lease returns the latest lease within. """ - client = IscDhclient() - client.lease_file = self.tmp_path("leases") content = dedent( """ lease { @@ -132,22 +135,22 @@ def test_parse_multiple_leases(self): "subnet-mask": "255.255.255.0", "routers": "192.168.2.1", } - write_file(client.lease_file, content) - got = client.get_newest_lease("eth0") - self.assertCountEqual(got, expected) + write_file(isc_dh_cli.lease_file, content) + got = isc_dh_cli.get_newest_lease("eth0") + assert_count_equal(got, expected) @pytest.mark.usefixtures("dhclient_exists") @pytest.mark.usefixtures("disable_netdev_info") -class TestDHCPRFC3442(CiTestCase): - def test_parse_lease_finds_rfc3442_classless_static_routes(self): +class TestDHCPRFC3442: + def test_parse_lease_finds_rfc3442_classless_static_routes( + self, isc_dh_cli + ): """IscDhclient().get_newest_lease() returns rfc3442-classless-static-routes. """ - client = IscDhclient() - client.lease_file = self.tmp_path("leases") write_file( - client.lease_file, + isc_dh_cli.lease_file, dedent( """ lease { @@ -171,15 +174,13 @@ def test_parse_lease_finds_rfc3442_classless_static_routes(self): "renew": "4 2017/07/27 18:02:30", "expire": "5 2017/07/28 07:08:15", } - self.assertCountEqual(expected, client.get_newest_lease("eth0")) + assert_count_equal(expected, isc_dh_cli.get_newest_lease("eth0")) - def test_parse_lease_finds_classless_static_routes(self): + def test_parse_lease_finds_classless_static_routes(self, isc_dh_cli): """ IscDhclient().get_newest_lease returns classless-static-routes for Centos lease format. """ - client = IscDhclient() - client.lease_file = self.tmp_path("leases") content = dedent( """ lease { @@ -202,8 +203,8 @@ def test_parse_lease_finds_classless_static_routes(self): "renew": "4 2017/07/27 18:02:30", "expire": "5 2017/07/28 07:08:15", } - write_file(client.lease_file, content) - self.assertCountEqual(expected, client.get_newest_lease("eth0")) + write_file(isc_dh_cli.lease_file, content) + assert_count_equal(expected, isc_dh_cli.get_newest_lease("eth0")) @mock.patch("cloudinit.net.ephemeral.EphemeralIPv4Network") @mock.patch("cloudinit.net.ephemeral.maybe_perform_dhcp_discovery") @@ -263,33 +264,29 @@ def test_obtain_centos_lease_parses_static_routes(self, m_maybe, m_ipv4): m_ipv4.assert_called_with(distro, **expected_kwargs) -class TestDHCPParseStaticRoutes(CiTestCase): - with_logs = True - +class TestDHCPParseStaticRoutes: def test_parse_static_routes_empty_string(self): - self.assertEqual([], IscDhclient.parse_static_routes("")) + assert [] == IscDhclient.parse_static_routes("") def test_parse_static_routes_invalid_input_returns_empty_list(self): rfc3442 = "32,169,254,169,254,130,56,248" - self.assertEqual([], IscDhclient.parse_static_routes(rfc3442)) + assert [] == IscDhclient.parse_static_routes(rfc3442) def test_parse_static_routes_bogus_width_returns_empty_list(self): rfc3442 = "33,169,254,169,254,130,56,248" - self.assertEqual([], IscDhclient.parse_static_routes(rfc3442)) + assert [] == IscDhclient.parse_static_routes(rfc3442) def test_parse_static_routes_single_ip(self): rfc3442 = "32,169,254,169,254,130,56,248,255" - self.assertEqual( - [("169.254.169.254/32", "130.56.248.255")], - IscDhclient.parse_static_routes(rfc3442), - ) + assert [ + ("169.254.169.254/32", "130.56.248.255") + ] == IscDhclient.parse_static_routes(rfc3442) def test_parse_static_routes_single_ip_handles_trailing_semicolon(self): rfc3442 = "32,169,254,169,254,130,56,248,255;" - self.assertEqual( - [("169.254.169.254/32", "130.56.248.255")], - IscDhclient.parse_static_routes(rfc3442), - ) + assert [ + ("169.254.169.254/32", "130.56.248.255") + ] == IscDhclient.parse_static_routes(rfc3442) def test_unknown_121(self): for unknown121 in [ @@ -304,35 +301,30 @@ def test_unknown_121(self): def test_parse_static_routes_default_route(self): rfc3442 = "0,130,56,240,1" - self.assertEqual( - [("0.0.0.0/0", "130.56.240.1")], - IscDhclient.parse_static_routes(rfc3442), - ) + assert [ + ("0.0.0.0/0", "130.56.240.1") + ] == IscDhclient.parse_static_routes(rfc3442) def test_unspecified_gateway(self): rfc3442 = "32,169,254,169,254,0,0,0,0" - self.assertEqual( - [("169.254.169.254/32", "0.0.0.0")], - IscDhclient.parse_static_routes(rfc3442), - ) + assert [ + ("169.254.169.254/32", "0.0.0.0") + ] == IscDhclient.parse_static_routes(rfc3442) def test_parse_static_routes_class_c_b_a(self): class_c = "24,192,168,74,192,168,0,4" class_b = "16,172,16,172,16,0,4" class_a = "8,10,10,0,0,4" rfc3442 = ",".join([class_c, class_b, class_a]) - self.assertEqual( - sorted( - [ - ("192.168.74.0/24", "192.168.0.4"), - ("172.16.0.0/16", "172.16.0.4"), - ("10.0.0.0/8", "10.0.0.4"), - ] - ), - sorted(IscDhclient.parse_static_routes(rfc3442)), - ) + assert sorted( + [ + ("192.168.74.0/24", "192.168.0.4"), + ("172.16.0.0/16", "172.16.0.4"), + ("10.0.0.0/8", "10.0.0.4"), + ] + ) == sorted(IscDhclient.parse_static_routes(rfc3442)) - def test_parse_static_routes_logs_error_truncated(self): + def test_parse_static_routes_logs_error_truncated(self, caplog): bad_rfc3442 = { "class_c": "24,169,254,169,10", "class_b": "16,172,16,10", @@ -341,52 +333,43 @@ def test_parse_static_routes_logs_error_truncated(self): "netlen": "33,0", } for rfc3442 in bad_rfc3442.values(): - self.assertEqual([], IscDhclient.parse_static_routes(rfc3442)) + assert [] == IscDhclient.parse_static_routes(rfc3442) - logs = self.logs.getvalue() - self.assertEqual(len(bad_rfc3442.keys()), len(logs.splitlines())) + assert len(bad_rfc3442.keys()) == len(caplog.text.splitlines()) - def test_parse_static_routes_returns_valid_routes_until_parse_err(self): + def test_parse_static_routes_returns_valid_routes_until_parse_err( + self, caplog + ): class_c = "24,192,168,74,192,168,0,4" class_b = "16,172,16,172,16,0,4" class_a_error = "8,10,10,0,0" rfc3442 = ",".join([class_c, class_b, class_a_error]) - self.assertEqual( - sorted( - [ - ("192.168.74.0/24", "192.168.0.4"), - ("172.16.0.0/16", "172.16.0.4"), - ] - ), - sorted(IscDhclient.parse_static_routes(rfc3442)), - ) + assert sorted( + [ + ("192.168.74.0/24", "192.168.0.4"), + ("172.16.0.0/16", "172.16.0.4"), + ] + ) == sorted(IscDhclient.parse_static_routes(rfc3442)) - logs = self.logs.getvalue() - self.assertIn(rfc3442, logs.splitlines()[0]) + assert rfc3442 in caplog.text.splitlines()[0] def test_redhat_format(self): redhat_format = "24.191.168.128 192.168.128.1,0 192.168.128.1" - self.assertEqual( - sorted( - [ - ("191.168.128.0/24", "192.168.128.1"), - ("0.0.0.0/0", "192.168.128.1"), - ] - ), - sorted(IscDhclient.parse_static_routes(redhat_format)), - ) + assert sorted( + [ + ("191.168.128.0/24", "192.168.128.1"), + ("0.0.0.0/0", "192.168.128.1"), + ] + ) == sorted(IscDhclient.parse_static_routes(redhat_format)) def test_redhat_format_with_a_space_too_much_after_comma(self): redhat_format = "24.191.168.128 192.168.128.1, 0 192.168.128.1" - self.assertEqual( - sorted( - [ - ("191.168.128.0/24", "192.168.128.1"), - ("0.0.0.0/0", "192.168.128.1"), - ] - ), - sorted(IscDhclient.parse_static_routes(redhat_format)), - ) + assert sorted( + [ + ("191.168.128.0/24", "192.168.128.1"), + ("0.0.0.0/0", "192.168.128.1"), + ] + ) == sorted(IscDhclient.parse_static_routes(redhat_format)) class TestDHCPDiscoveryClean: @@ -696,7 +679,7 @@ def dhcp_log_func(interface, out, err): ) -class TestSystemdParseLeases(CiTestCase): +class TestSystemdParseLeases: lxd_lease = dedent( """\ # This is private data. Do not parse. @@ -775,49 +758,40 @@ class TestSystemdParseLeases(CiTestCase): "OPTION_245": "624c3620", } - def setUp(self): - super(TestSystemdParseLeases, self).setUp() - self.lease_d = self.tmp_dir() - - def test_no_leases_returns_empty_dict(self): + def test_no_leases_returns_empty_dict(self, tmp_path): """A leases dir with no lease files should return empty dictionary.""" - self.assertEqual({}, networkd_load_leases(self.lease_d)) + assert {} == networkd_load_leases(str(tmp_path)) - def test_no_leases_dir_returns_empty_dict(self): + def test_no_leases_dir_returns_empty_dict(self, tmp_path): """A non-existing leases dir should return empty dict.""" - enodir = os.path.join(self.lease_d, "does-not-exist") - self.assertEqual({}, networkd_load_leases(enodir)) + enodir = os.path.join(tmp_path, "does-not-exist") + assert {} == networkd_load_leases(enodir) - def test_single_leases_file(self): + def test_single_leases_file(self, tmp_path): """A leases dir with one leases file.""" - populate_dir(self.lease_d, {"2": self.lxd_lease}) - self.assertEqual( - {"2": self.lxd_parsed}, networkd_load_leases(self.lease_d) - ) + populate_dir(str(tmp_path), {"2": self.lxd_lease}) + assert {"2": self.lxd_parsed} == networkd_load_leases(str(tmp_path)) - def test_single_azure_leases_file(self): + def test_single_azure_leases_file(self, tmp_path): """On Azure, option 245 should be present, verify it specifically.""" - populate_dir(self.lease_d, {"1": self.azure_lease}) - self.assertEqual( - {"1": self.azure_parsed}, networkd_load_leases(self.lease_d) - ) + populate_dir(str(tmp_path), {"1": self.azure_lease}) + assert {"1": self.azure_parsed} == networkd_load_leases(str(tmp_path)) - def test_multiple_files(self): + def test_multiple_files(self, tmp_path): """Multiple leases files on azure with one found return that value.""" - self.maxDiff = None populate_dir( - self.lease_d, {"1": self.azure_lease, "9": self.lxd_lease} - ) - self.assertEqual( - {"1": self.azure_parsed, "9": self.lxd_parsed}, - networkd_load_leases(self.lease_d), + str(tmp_path), {"1": self.azure_lease, "9": self.lxd_lease} ) + assert { + "1": self.azure_parsed, + "9": self.lxd_parsed, + } == networkd_load_leases(str(tmp_path)) @responses.activate @pytest.mark.usefixtures("disable_netdev_info") @mock.patch("cloudinit.net.ephemeral._check_connectivity_to_imds") -class TestEphemeralDhcpNoNetworkSetup(CiTestCase): +class TestEphemeralDhcpNoNetworkSetup: @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") def test_ephemeral_dhcp_no_network_if_url_connectivity( self, m_dhcp, m_imds @@ -832,7 +806,7 @@ def test_ephemeral_dhcp_no_network_if_url_connectivity( MockDistro(), connectivity_urls_data=[{"url": url}], ) as lease: - self.assertIsNone(lease) + assert lease is None # Ensure that no teardown happens: m_dhcp.assert_not_called() @@ -857,7 +831,7 @@ def test_ephemeral_dhcp_setup_network_if_url_connectivity( MockDistro(), connectivity_urls_data=[{"url": url}], ) as lease: - self.assertEqual(m_dhcp.return_value, lease) + assert m_dhcp.return_value == lease # Ensure that dhcp discovery occurs m_dhcp.assert_called_once() @@ -917,10 +891,7 @@ def test_ctx_mgr_umbrella_error(self, m_dhcp, error_class): assert len(m_dhcp.mock_calls) == 1 -class TestUDHCPCDiscoveryClean(CiTestCase): - with_logs = True - maxDiff = None - +class TestUDHCPCDiscoveryClean: @mock.patch("cloudinit.net.dhcp.is_ib_interface", return_value=False) @mock.patch("cloudinit.net.dhcp.subp.which", return_value="/sbin/udhcpc") @mock.patch("cloudinit.net.dhcp.os.remove") @@ -947,16 +918,13 @@ def test_udhcpc_discovery( "routers": "192.168.2.1", "static_routes": "10.240.0.1/32 0.0.0.0 0.0.0.0/0 10.240.0.1", } - self.assertEqual( - { - "fixed-address": "192.168.2.74", - "interface": "eth9", - "routers": "192.168.2.1", - "static_routes": "10.240.0.1/32 0.0.0.0 0.0.0.0/0 10.240.0.1", - "subnet-mask": "255.255.255.0", - }, - Udhcpc().dhcp_discovery("eth9", distro=MockDistro()), - ) + assert { + "fixed-address": "192.168.2.74", + "interface": "eth9", + "routers": "192.168.2.1", + "static_routes": "10.240.0.1/32 0.0.0.0 0.0.0.0/0 10.240.0.1", + "subnet-mask": "255.255.255.0", + } == Udhcpc().dhcp_discovery("eth9", distro=MockDistro()) # Interface was brought up before dhclient called m_subp.assert_has_calls( [ @@ -1016,16 +984,13 @@ def test_udhcpc_discovery_ib( "routers": "192.168.2.1", "static_routes": "10.240.0.1/32 0.0.0.0 0.0.0.0/0 10.240.0.1", } - self.assertEqual( - { - "fixed-address": "192.168.2.74", - "interface": "ib0", - "routers": "192.168.2.1", - "static_routes": "10.240.0.1/32 0.0.0.0 0.0.0.0/0 10.240.0.1", - "subnet-mask": "255.255.255.0", - }, - Udhcpc().dhcp_discovery("ib0", distro=MockDistro()), - ) + assert { + "fixed-address": "192.168.2.74", + "interface": "ib0", + "routers": "192.168.2.1", + "static_routes": "10.240.0.1/32 0.0.0.0 0.0.0.0/0 10.240.0.1", + "subnet-mask": "255.255.255.0", + } == Udhcpc().dhcp_discovery("ib0", distro=MockDistro()) # Interface was brought up before dhclient called m_subp.assert_has_calls( [ @@ -1057,7 +1022,7 @@ def test_udhcpc_discovery_ib( ) -class TestISCDHClient(CiTestCase): +class TestISCDHClient: @mock.patch( "os.listdir", return_value=( @@ -1072,9 +1037,9 @@ def test_get_newest_lease_file_from_distro_rhel(self, *_): """ Test that an rhel style lease has been found """ - self.assertEqual( - "/var/lib/NetworkManager/dhclient-0-u-u-i-d-enp2s0f0.lease", - IscDhclient.get_newest_lease_file_from_distro(rhel.Distro), + assert ( + "/var/lib/NetworkManager/dhclient-0-u-u-i-d-enp2s0f0.lease" + == IscDhclient.get_newest_lease_file_from_distro(rhel.Distro) ) @mock.patch( @@ -1091,9 +1056,9 @@ def test_get_newest_lease_file_from_distro_amazonlinux(self, *_): """ Test that an amazon style lease has been found """ - self.assertEqual( - "/var/lib/dhcp/dhclient--eth0.leases", - IscDhclient.get_newest_lease_file_from_distro(amazon.Distro), + assert ( + "/var/lib/dhcp/dhclient--eth0.leases" + == IscDhclient.get_newest_lease_file_from_distro(amazon.Distro) ) @mock.patch( @@ -1110,9 +1075,9 @@ def test_get_newest_lease_file_from_distro_freebsd(self, *_): """ Test that an freebsd style lease has been found """ - self.assertEqual( - "/var/db/dhclient.leases.vtynet0", - IscDhclient.get_newest_lease_file_from_distro(freebsd.Distro), + assert ( + "/var/db/dhclient.leases.vtynet0" + == IscDhclient.get_newest_lease_file_from_distro(freebsd.Distro) ) @mock.patch( @@ -1129,9 +1094,9 @@ def test_get_newest_lease_file_from_distro_alpine(self, *_): """ Test that an alpine style lease has been found """ - self.assertEqual( - "/var/lib/dhcp/dhclient.leases", - IscDhclient.get_newest_lease_file_from_distro(alpine.Distro), + assert ( + "/var/lib/dhcp/dhclient.leases" + == IscDhclient.get_newest_lease_file_from_distro(alpine.Distro) ) @mock.patch( @@ -1148,9 +1113,9 @@ def test_get_newest_lease_file_from_distro_debian(self, *_): """ Test that an debian style lease has been found """ - self.assertEqual( - "/var/lib/dhcp/dhclient.eth0.leases", - IscDhclient.get_newest_lease_file_from_distro(debian.Distro), + assert ( + "/var/lib/dhcp/dhclient.eth0.leases" + == IscDhclient.get_newest_lease_file_from_distro(debian.Distro) ) # If argument to listdir is '/var/lib/NetworkManager' @@ -1170,11 +1135,10 @@ def test_fallback_when_nothing_found(self, *_): This tests a situation where Distro provides lease information but the lease wasn't found on that location """ - self.assertEqual( - os.path.join(DHCLIENT_FALLBACK_LEASE_DIR, "!@#$-eth0.lease"), - IscDhclient.get_newest_lease_file_from_distro( - rhel.Distro("", {}, {}) - ), + assert os.path.join( + DHCLIENT_FALLBACK_LEASE_DIR, "!@#$-eth0.lease" + ) == IscDhclient.get_newest_lease_file_from_distro( + rhel.Distro("", {}, {}) ) @mock.patch( @@ -1191,9 +1155,8 @@ def test_get_newest_lease_file_from_distro_notfound(self, *_): Test the case when no leases were found """ # Any Distro would suffice for the absense test, choose Centos then. - self.assertEqual( - None, - IscDhclient.get_newest_lease_file_from_distro(centos.Distro), + assert None is IscDhclient.get_newest_lease_file_from_distro( + centos.Distro ) @@ -1300,7 +1263,7 @@ def _raise(): ), ) def test_parse_raw_lease(self, lease_file, option_245): - lease = load_binary_file(f"tests/data/net/dhcp/{lease_file}") + lease = load_binary_file(resourceLocation(f"net/dhcp/{lease_file}")) assert option_245 == Dhcpcd.parse_unknown_options_from_packet( lease, 245 ) diff --git a/tests/unittests/net/test_init.py b/tests/unittests/net/test_init.py index 3200bfd4..f4d6ac1b 100644 --- a/tests/unittests/net/test_init.py +++ b/tests/unittests/net/test_init.py @@ -17,7 +17,11 @@ from cloudinit.net.ephemeral import EphemeralIPv4Network, EphemeralIPv6Network from cloudinit.subp import ProcessExecutionError from cloudinit.util import ensure_file, write_file -from tests.unittests.helpers import CiTestCase, example_netdev, random_string +from tests.unittests.helpers import ( + assert_count_equal, + example_netdev, + random_string, +) from tests.unittests.util import MockDistro @@ -223,26 +227,20 @@ def test_is_vlan(self): assert net.is_vlan("eth0") -class TestGenerateFallbackConfig(CiTestCase): - def setUp(self): - super(TestGenerateFallbackConfig, self).setUp() - sys_mock = mock.patch("cloudinit.net.get_sys_class_path") - self.m_sys_path = sys_mock.start() - self.sysdir = self.tmp_dir() + "/" - self.m_sys_path.return_value = self.sysdir - self.addCleanup(sys_mock.stop) - self.add_patch( - "cloudinit.net.util.is_container", - "m_is_container", - return_value=False, +class TestGenerateFallbackConfig: + @pytest.fixture(autouse=True) + def fixtures(self, mocker, tmp_path): + self.sysdir = str(tmp_path) + "/" + mocker.patch( + "cloudinit.net.get_sys_class_path", return_value=self.sysdir ) - self.add_patch("cloudinit.net.util.udevadm_settle", "m_settle") - self.add_patch( - "cloudinit.net.is_netfailover", "m_netfail", return_value=False + mocker.patch("cloudinit.net.util.is_container", return_value=False) + mocker.patch("cloudinit.net.util.udevadm_settle") + self.m_is_netfail = mocker.patch( + "cloudinit.net.is_netfailover", return_value=False ) - self.add_patch( + mocker.patch( "cloudinit.net.is_netfail_master", - "m_netfail_master", return_value=False, ) @@ -263,7 +261,7 @@ def test_generate_fallback_finds_connected_eth_with_mac(self): }, "version": 2, } - self.assertEqual(expected, net.generate_fallback_config()) + assert expected == net.generate_fallback_config() def test_generate_fallback_finds_dormant_eth_with_mac(self): """generate_fallback_config finds any dormant device with a mac.""" @@ -281,7 +279,7 @@ def test_generate_fallback_finds_dormant_eth_with_mac(self): }, "version": 2, } - self.assertEqual(expected, net.generate_fallback_config()) + assert expected == net.generate_fallback_config() def test_generate_fallback_finds_eth_by_operstate(self): """generate_fallback_config finds any dormant device with a mac.""" @@ -301,15 +299,15 @@ def test_generate_fallback_finds_eth_by_operstate(self): valid_operstates = ["dormant", "down", "lowerlayerdown", "unknown"] for state in valid_operstates: write_file(os.path.join(self.sysdir, "eth0", "operstate"), state) - self.assertEqual(expected, net.generate_fallback_config()) + assert expected == net.generate_fallback_config() write_file(os.path.join(self.sysdir, "eth0", "operstate"), "noworky") - self.assertIsNone(net.generate_fallback_config()) + assert net.generate_fallback_config() is None def test_generate_fallback_config_skips_veth(self): """generate_fallback_config will skip any veth interfaces.""" # A connected veth which gets ignored write_file(os.path.join(self.sysdir, "veth0", "carrier"), "1") - self.assertIsNone(net.generate_fallback_config()) + assert net.generate_fallback_config() is None def test_generate_fallback_config_skips_bridges(self): """generate_fallback_config will skip any bridges interfaces.""" @@ -318,7 +316,7 @@ def test_generate_fallback_config_skips_bridges(self): mac = "aa:bb:cc:aa:bb:cc" write_file(os.path.join(self.sysdir, "eth0", "address"), mac) ensure_file(os.path.join(self.sysdir, "eth0", "bridge")) - self.assertIsNone(net.generate_fallback_config()) + assert net.generate_fallback_config() is None def test_generate_fallback_config_skips_bonds(self): """generate_fallback_config will skip any bonded interfaces.""" @@ -327,9 +325,12 @@ def test_generate_fallback_config_skips_bonds(self): mac = "aa:bb:cc:aa:bb:cc" write_file(os.path.join(self.sysdir, "eth0", "address"), mac) ensure_file(os.path.join(self.sysdir, "eth0", "bonding")) - self.assertIsNone(net.generate_fallback_config()) + assert net.generate_fallback_config() is None - def test_generate_fallback_config_skips_netfail_devs(self): + @mock.patch("cloudinit.net.is_netfail_master") + def test_generate_fallback_config_skips_netfail_devs( + self, m_is_netfail_master + ): """gen_fallback_config ignores netfail primary,sby no mac on master.""" mac = "aa:bb:cc:aa:bb:cc" # netfailover devs share the same mac for iface in ["ens3", "ens3sby", "enP0s1f3"]: @@ -345,7 +346,7 @@ def is_netfail(iface, _driver=None): return False return True - self.m_netfail.side_effect = is_netfail + self.m_is_netfail.side_effect = is_netfail def is_netfail_master(iface, _driver=None): # ens3 is the master @@ -353,7 +354,7 @@ def is_netfail_master(iface, _driver=None): return True return False - self.m_netfail_master.side_effect = is_netfail_master + m_is_netfail_master.side_effect = is_netfail_master expected = { "ethernets": { "ens3": { @@ -366,23 +367,21 @@ def is_netfail_master(iface, _driver=None): "version": 2, } result = net.generate_fallback_config() - self.assertEqual(expected, result) - - -class TestNetFindFallBackNic(CiTestCase): - def setUp(self): - super(TestNetFindFallBackNic, self).setUp() - sys_mock = mock.patch("cloudinit.net.get_sys_class_path") - self.m_sys_path = sys_mock.start() - self.sysdir = self.tmp_dir() + "/" - self.m_sys_path.return_value = self.sysdir - self.addCleanup(sys_mock.stop) - self.add_patch( + assert expected == result + + +class TestNetFindFallBackNic: + @pytest.fixture(autouse=True) + def fixtures(self, mocker, tmp_path): + self.sysdir = str(tmp_path) + "/" + mocker.patch( + "cloudinit.net.get_sys_class_path", return_value=self.sysdir + ) + mocker.patch( "cloudinit.net.util.is_container", - "m_is_container", return_value=False, ) - self.add_patch("cloudinit.net.util.udevadm_settle", "m_settle") + mocker.patch("cloudinit.net.util.udevadm_settle") def test_generate_fallback_finds_first_connected_eth_with_mac(self): """find_fallback_nic finds any connected device with a mac.""" @@ -390,7 +389,7 @@ def test_generate_fallback_finds_first_connected_eth_with_mac(self): write_file(os.path.join(self.sysdir, "eth1", "carrier"), "1") mac = "aa:bb:cc:aa:bb:cc" write_file(os.path.join(self.sysdir, "eth1", "address"), mac) - self.assertEqual("eth1", net.find_fallback_nic()) + assert "eth1" == net.find_fallback_nic() class TestNetFindCandidateNics: @@ -617,66 +616,62 @@ def test_udevadm_settle_failure_handled_gracefully(self, caplog): self.m_settle.assert_called_once() -class TestGetDeviceList(CiTestCase): - def setUp(self): - super(TestGetDeviceList, self).setUp() - sys_mock = mock.patch("cloudinit.net.get_sys_class_path") - self.m_sys_path = sys_mock.start() - self.sysdir = self.tmp_dir() + "/" - self.m_sys_path.return_value = self.sysdir - self.addCleanup(sys_mock.stop) +class TestGetDeviceList: + @pytest.fixture(autouse=True) + def fixtures(self, mocker, tmp_path): + self.sysdir = str(tmp_path) + "/" + self.m_sys_path = mocker.patch( + "cloudinit.net.get_sys_class_path", return_value=self.sysdir + ) def test_get_devicelist_raise_oserror(self): """get_devicelist raise any non-ENOENT OSerror.""" error = OSError("Can not do it") error.errno = errno.EPERM # Set non-ENOENT self.m_sys_path.side_effect = error - with self.assertRaises(OSError) as context_manager: + with pytest.raises(OSError, match="Can not do it"): net.get_devicelist() - exception = context_manager.exception - self.assertEqual("Can not do it", str(exception)) def test_get_devicelist_empty_without_sys_net(self): """get_devicelist returns empty list when missing SYS_CLASS_NET.""" self.m_sys_path.return_value = "idontexist" - self.assertEqual([], net.get_devicelist()) + assert [] == net.get_devicelist() def test_get_devicelist_empty_with_no_devices_in_sys_net(self): """get_devicelist returns empty directoty listing for SYS_CLASS_NET.""" - self.assertEqual([], net.get_devicelist()) + assert [] == net.get_devicelist() def test_get_devicelist_lists_any_subdirectories_in_sys_net(self): """get_devicelist returns a directory listing for SYS_CLASS_NET.""" write_file(os.path.join(self.sysdir, "eth0", "operstate"), "up") write_file(os.path.join(self.sysdir, "eth1", "operstate"), "up") - self.assertCountEqual(["eth0", "eth1"], net.get_devicelist()) + assert_count_equal(["eth0", "eth1"], net.get_devicelist()) @mock.patch( "cloudinit.net.is_openvswitch_internal_interface", mock.Mock(return_value=False), ) -class TestGetInterfaceMAC(CiTestCase): - def setUp(self): - super(TestGetInterfaceMAC, self).setUp() - sys_mock = mock.patch("cloudinit.net.get_sys_class_path") - self.m_sys_path = sys_mock.start() - self.sysdir = self.tmp_dir() + "/" - self.m_sys_path.return_value = self.sysdir - self.addCleanup(sys_mock.stop) +class TestGetInterfaceMAC: + @pytest.fixture(autouse=True) + def fixtures(self, mocker, tmp_path): + self.sysdir = str(tmp_path) + "/" + self.m_sys_path = mocker.patch( + "cloudinit.net.get_sys_class_path", return_value=self.sysdir + ) def test_get_interface_mac_false_with_no_mac(self): """get_device_list returns False when no mac is reported.""" ensure_file(os.path.join(self.sysdir, "eth0", "bonding")) mac_path = os.path.join(self.sysdir, "eth0", "address") - self.assertFalse(os.path.exists(mac_path)) - self.assertFalse(net.get_interface_mac("eth0")) + assert not os.path.exists(mac_path) + assert net.get_interface_mac("eth0") is False def test_get_interface_mac(self): """get_interfaces returns the mac from SYS_CLASS_NET/dev/address.""" mac = "aa:bb:cc:aa:bb:cc" write_file(os.path.join(self.sysdir, "eth1", "address"), mac) - self.assertEqual(mac, net.get_interface_mac("eth1")) + assert mac == net.get_interface_mac("eth1") def test_get_interface_mac_grabs_bonding_address(self): """get_interfaces returns the source device mac for bonded devices.""" @@ -687,12 +682,12 @@ def test_get_interface_mac_grabs_bonding_address(self): os.path.join(self.sysdir, "eth1", "bonding_slave", "perm_hwaddr"), source_dev_mac, ) - self.assertEqual(source_dev_mac, net.get_interface_mac("eth1")) + assert source_dev_mac == net.get_interface_mac("eth1") def test_get_interfaces_empty_list_without_sys_net(self): """get_interfaces returns an empty list when missing SYS_CLASS_NET.""" self.m_sys_path.return_value = "idontexist" - self.assertEqual([], net.get_interfaces()) + assert [] == net.get_interfaces() def test_get_interfaces_by_mac_skips_empty_mac(self): """Ignore 00:00:00:00:00:00 addresses from get_interfaces_by_mac.""" @@ -703,18 +698,18 @@ def test_get_interfaces_by_mac_skips_empty_mac(self): write_file(os.path.join(self.sysdir, "eth2", "addr_assign_type"), "0") write_file(os.path.join(self.sysdir, "eth2", "address"), mac) expected = [("eth2", "aa:bb:cc:aa:bb:cc", None, None)] - self.assertEqual(expected, net.get_interfaces()) + assert expected == net.get_interfaces() def test_get_interfaces_by_mac_skips_missing_mac(self): """Ignore interfaces without an address from get_interfaces_by_mac.""" write_file(os.path.join(self.sysdir, "eth1", "addr_assign_type"), "0") address_path = os.path.join(self.sysdir, "eth1", "address") - self.assertFalse(os.path.exists(address_path)) + assert not os.path.exists(address_path) mac = "aa:bb:cc:aa:bb:cc" write_file(os.path.join(self.sysdir, "eth2", "addr_assign_type"), "0") write_file(os.path.join(self.sysdir, "eth2", "address"), mac) expected = [("eth2", "aa:bb:cc:aa:bb:cc", None, None)] - self.assertEqual(expected, net.get_interfaces()) + assert expected == net.get_interfaces() def test_get_interfaces_by_mac_skips_master_devs(self): """Ignore interfaces with a master device which would have dup mac.""" @@ -725,7 +720,7 @@ def test_get_interfaces_by_mac_skips_master_devs(self): write_file(os.path.join(self.sysdir, "eth2", "addr_assign_type"), "0") write_file(os.path.join(self.sysdir, "eth2", "address"), mac2) expected = [("eth2", mac2, None, None)] - self.assertEqual(expected, net.get_interfaces()) + assert expected == net.get_interfaces() @mock.patch("cloudinit.net.is_netfailover") def test_get_interfaces_by_mac_skips_netfailvoer(self, m_netfail): @@ -746,7 +741,7 @@ def is_netfail(iface, _driver=None): m_netfail.side_effect = is_netfail expected = [("ens3", mac, None, None)] - self.assertEqual(expected, net.get_interfaces()) + assert expected == net.get_interfaces() def test_get_interfaces_does_not_skip_phys_members_of_bridges_and_bonds( self, @@ -779,24 +774,23 @@ def test_get_interfaces_does_not_skip_phys_members_of_bridges_and_bonds( ) interface_names = [interface[0] for interface in net.get_interfaces()] - self.assertEqual( - ["eth1", "eth2", "eth3", "ovs-system"], sorted(interface_names) + assert ["eth1", "eth2", "eth3", "ovs-system"] == sorted( + interface_names ) -class TestInterfaceHasOwnMAC(CiTestCase): - def setUp(self): - super(TestInterfaceHasOwnMAC, self).setUp() - sys_mock = mock.patch("cloudinit.net.get_sys_class_path") - self.m_sys_path = sys_mock.start() - self.sysdir = self.tmp_dir() + "/" - self.m_sys_path.return_value = self.sysdir - self.addCleanup(sys_mock.stop) +class TestInterfaceHasOwnMAC: + @pytest.fixture(autouse=True) + def fixtures(self, mocker, tmp_path): + self.sysdir = str(tmp_path) + "/" + mocker.patch( + "cloudinit.net.get_sys_class_path", return_value=self.sysdir + ) def test_interface_has_own_mac_false_when_stolen(self): """Return False from interface_has_own_mac when address is stolen.""" write_file(os.path.join(self.sysdir, "eth1", "addr_assign_type"), "2") - self.assertFalse(net.interface_has_own_mac("eth1")) + assert net.interface_has_own_mac("eth1") is False def test_interface_has_own_mac_true_when_not_stolen(self): """Return False from interface_has_own_mac when mac isn't stolen.""" @@ -804,27 +798,23 @@ def test_interface_has_own_mac_true_when_not_stolen(self): assign_path = os.path.join(self.sysdir, "eth1", "addr_assign_type") for _type in valid_assign_types: write_file(assign_path, _type) - self.assertTrue(net.interface_has_own_mac("eth1")) + assert net.interface_has_own_mac("eth1") is True def test_interface_has_own_mac_strict_errors_on_absent_assign_type(self): """When addr_assign_type is absent, interface_has_own_mac errors.""" - with self.assertRaises(ValueError): + with pytest.raises(ValueError): net.interface_has_own_mac("eth1", strict=True) @mock.patch("cloudinit.net.subp.subp") @pytest.mark.usefixtures("disable_netdev_info") -class TestEphemeralIPV4Network(CiTestCase): - - with_logs = True - - def setUp(self): - super(TestEphemeralIPV4Network, self).setUp() - sys_mock = mock.patch("cloudinit.net.get_sys_class_path") - self.m_sys_path = sys_mock.start() - self.sysdir = self.tmp_dir() + "/" - self.m_sys_path.return_value = self.sysdir - self.addCleanup(sys_mock.stop) +class TestEphemeralIPV4Network: + @pytest.fixture(autouse=True) + def fixtures(self, mocker, tmp_path): + self.sysdir = str(tmp_path) + "/" + mocker.patch( + "cloudinit.net.get_sys_class_path", return_value=self.sysdir + ) def test_ephemeral_ipv4_network_errors_on_missing_params(self, m_subp): """No required params for EphemeralIPv4Network can be None.""" @@ -837,15 +827,13 @@ def test_ephemeral_ipv4_network_errors_on_missing_params(self, m_subp): for key in required_params.keys(): params = copy.deepcopy(required_params) params[key] = None - with self.assertRaises(ValueError) as context_manager: + with pytest.raises(ValueError, match="Cannot init network on"): EphemeralIPv4Network( MockDistro(), interface_addrs_before_dhcp=example_netdev, **params, ) - error = context_manager.exception - self.assertIn("Cannot init network on", str(error)) - self.assertEqual(0, m_subp.call_count) + assert 0 == m_subp.call_count def test_ephemeral_ipv4_network_errors_invalid_mask_prefix(self, m_subp): """Raise an error when prefix_or_mask is not a netmask or prefix.""" @@ -858,14 +846,13 @@ def test_ephemeral_ipv4_network_errors_invalid_mask_prefix(self, m_subp): invalid_masks = ("invalid", "invalid.", "123.123.123") for error_val in invalid_masks: params["prefix_or_mask"] = error_val - with self.assertRaises(ValueError) as context_manager: + with pytest.raises( + ValueError, + match="Cannot setup network, invalid prefix or netmask: ", + ): with EphemeralIPv4Network(MockDistro(), **params): pass - error = context_manager.exception - self.assertIn( - "Cannot setup network, invalid prefix or netmask: ", str(error) - ) - self.assertEqual(0, m_subp.call_count) + assert 0 == m_subp.call_count def test_ephemeral_ipv4_network_performs_teardown(self, m_subp): """EphemeralIPv4Network performs teardown on the device if setup.""" @@ -907,7 +894,7 @@ def test_ephemeral_ipv4_network_performs_teardown(self, m_subp): "interface_addrs_before_dhcp": example_netdev, } with EphemeralIPv4Network(MockDistro(), **params): - self.assertEqual(expected_setup_calls, m_subp.call_args_list) + assert expected_setup_calls == m_subp.call_args_list m_subp.assert_has_calls(expected_teardown_calls) def test_teardown_on_enter_exception(self, m_subp): @@ -969,7 +956,7 @@ def side_effect(args, **kwargs): for teardown in expected_teardown_calls: assert teardown in m_subp.call_args_list - def test_ephemeral_ipv4_network_noop_when_configured(self, m_subp): + def test_ephemeral_ipv4_network_noop_when_configured(self, m_subp, caplog): """EphemeralIPv4Network handles exception when address is setup. It performs no cleanup as the interface was already setup. @@ -988,8 +975,8 @@ def test_ephemeral_ipv4_network_noop_when_configured(self, m_subp): with EphemeralIPv4Network(MockDistro(), **params): pass assert expected_calls == m_subp.call_args_list - assert "Skip bringing up network link" in self.logs.getvalue() - assert "Skip adding ip address" in self.logs.getvalue() + assert "Skip bringing up network link" in caplog.text + assert "Skip adding ip address" in caplog.text def test_ephemeral_ipv4_network_with_prefix(self, m_subp): """EphemeralIPv4Network takes a valid prefix to setup the network.""" @@ -1116,7 +1103,7 @@ def test_ephemeral_ipv4_network_with_new_default_route(self, m_subp): ] with EphemeralIPv4Network(MockDistro(), **params): - self.assertEqual(expected_setup_calls, m_subp.call_args_list) + assert expected_setup_calls == m_subp.call_args_list m_subp.assert_has_calls(expected_teardown_calls) def test_ephemeral_ipv4_network_with_rfc3442_static_routes(self, m_subp): @@ -1230,7 +1217,7 @@ def test_ephemeral_ipv4_network_with_rfc3442_static_routes(self, m_subp): ), ] with EphemeralIPv4Network(MockDistro(), **params): - self.assertEqual(expected_setup_calls, m_subp.call_args_list) + assert expected_setup_calls == m_subp.call_args_list m_subp.assert_has_calls(expected_setup_calls + expected_teardown_calls) @@ -1272,11 +1259,13 @@ def _mk_v2_phys(mac, name, driver=None, device_id=None): return v2_cfg -class TestExtractPhysdevs(CiTestCase): - def setUp(self): - super(TestExtractPhysdevs, self).setUp() - self.add_patch("cloudinit.net.device_driver", "m_driver") - self.add_patch("cloudinit.net.device_devid", "m_devid") +class TestExtractPhysdevs: + @pytest.fixture(autouse=True) + def fixtures(self, mocker): + self.m_driver = mocker.patch("cloudinit.net.device_driver") + self.m_devid = mocker.patch( + "cloudinit.net.device_devid", + ) def test_extract_physdevs_looks_up_driver_v1(self): driver = "virtio" @@ -1290,9 +1279,7 @@ def test_extract_physdevs_looks_up_driver_v1(self): } # insert the driver value for verification physdevs[0][2] = driver - self.assertEqual( - sorted(physdevs), sorted(net.extract_physdevs(netcfg)) - ) + assert sorted(physdevs) == sorted(net.extract_physdevs(netcfg)) self.m_driver.assert_called_with("eth0") def test_extract_physdevs_looks_up_driver_v2(self): @@ -1307,9 +1294,7 @@ def test_extract_physdevs_looks_up_driver_v2(self): } # insert the driver value for verification physdevs[0][2] = driver - self.assertEqual( - sorted(physdevs), sorted(net.extract_physdevs(netcfg)) - ) + assert sorted(physdevs) == sorted(net.extract_physdevs(netcfg)) self.m_driver.assert_called_with("eth0") def test_extract_physdevs_looks_up_devid_v1(self): @@ -1324,9 +1309,7 @@ def test_extract_physdevs_looks_up_devid_v1(self): } # insert the driver value for verification physdevs[0][3] = devid - self.assertEqual( - sorted(physdevs), sorted(net.extract_physdevs(netcfg)) - ) + assert sorted(physdevs) == sorted(net.extract_physdevs(netcfg)) self.m_devid.assert_called_with("eth0") def test_extract_physdevs_looks_up_devid_v2(self): @@ -1341,9 +1324,7 @@ def test_extract_physdevs_looks_up_devid_v2(self): } # insert the driver value for verification physdevs[0][3] = devid - self.assertEqual( - sorted(physdevs), sorted(net.extract_physdevs(netcfg)) - ) + assert sorted(physdevs) == sorted(net.extract_physdevs(netcfg)) self.m_devid.assert_called_with("eth0") def test_get_v1_type_physical(self): @@ -1356,9 +1337,7 @@ def test_get_v1_type_physical(self): "version": 1, "config": [_mk_v1_phys(*args) for args in physdevs], } - self.assertEqual( - sorted(physdevs), sorted(net.extract_physdevs(netcfg)) - ) + assert sorted(physdevs) == sorted(net.extract_physdevs(netcfg)) def test_get_v2_type_physical(self): physdevs = [ @@ -1370,9 +1349,7 @@ def test_get_v2_type_physical(self): "version": 2, "ethernets": {args[1]: _mk_v2_phys(*args) for args in physdevs}, } - self.assertEqual( - sorted(physdevs), sorted(net.extract_physdevs(netcfg)) - ) + assert sorted(physdevs) == sorted(net.extract_physdevs(netcfg)) def test_get_v2_type_physical_skips_if_no_set_name(self): netcfg = { @@ -1383,10 +1360,10 @@ def test_get_v2_type_physical_skips_if_no_set_name(self): } }, } - self.assertEqual([], net.extract_physdevs(netcfg)) + assert [] == net.extract_physdevs(netcfg) def test_runtime_error_on_unknown_netcfg_version(self): - with self.assertRaises(RuntimeError): + with pytest.raises(RuntimeError): net.extract_physdevs({"version": 3, "awesome_config": []}) diff --git a/tests/unittests/net/test_net_rendering.py b/tests/unittests/net/test_net_rendering.py index 72fe3b1b..5ef81718 100644 --- a/tests/unittests/net/test_net_rendering.py +++ b/tests/unittests/net/test_net_rendering.py @@ -125,4 +125,6 @@ def test_convert(test_name, renderers, tmp_path): if Renderer.NetworkManager in renderers: _check_network_manager(network_state, tmp_path) if Renderer.Networkd in renderers: - _check_networkd_renderer(network_state, tmp_path) + _check_networkd_renderer( # pylint: disable=E1120 + network_state, tmp_path + ) diff --git a/tests/unittests/net/test_network_state.py b/tests/unittests/net/test_network_state.py index 5161b9cc..0b9a5049 100644 --- a/tests/unittests/net/test_network_state.py +++ b/tests/unittests/net/test_network_state.py @@ -11,7 +11,6 @@ from cloudinit.net import network_state from cloudinit.net.netplan import Renderer as NetplanRenderer from cloudinit.net.renderers import NAME_TO_RENDERER -from tests.unittests.helpers import CiTestCase netstate_path = "cloudinit.net.network_state" @@ -51,9 +50,19 @@ eth0: match: macaddress: '00:11:22:33:44:55' + addresses: + - 192.168.14.10/24 + - 2001:1::100/64 nameservers: search: [spam.local, eggs.local] addresses: [8.8.8.8] + routes: + - to: default + via: 192.168.14.1 + metric: 50 + - to: default + via: 2001:1::2 + metric: 100 eth1: match: macaddress: '66:77:88:99:00:11' @@ -64,45 +73,45 @@ """ -class TestNetworkStateParseConfig(CiTestCase): - def setUp(self): - super(TestNetworkStateParseConfig, self).setUp() - nsi_path = netstate_path + ".NetworkStateInterpreter" - self.add_patch(nsi_path, "m_nsi") - self.m_nsi: MagicMock +class TestNetworkStateParseConfig: + # pylint: disable=attribute-defined-outside-init + @pytest.fixture(autouse=True) + def fixtures(self, mocker): + self.m_nsi: MagicMock = mocker.patch( + netstate_path + ".NetworkStateInterpreter" + ) def test_missing_version_returns_none(self): ncfg: Dict[str, int] = {} - with self.assertRaises(RuntimeError): + with pytest.raises(RuntimeError): network_state.parse_net_config_data(ncfg) def test_unknown_versions_returns_none(self): ncfg = {"version": 13.2} - with self.assertRaises(RuntimeError): + with pytest.raises(RuntimeError): network_state.parse_net_config_data(ncfg) def test_version_2_passes_self_as_config(self): ncfg = {"version": 2, "otherconfig": {}, "somemore": [1, 2, 3]} network_state.parse_net_config_data(ncfg) - self.assertEqual( - [mock.call(version=2, config=ncfg, renderer=None)], - self.m_nsi.call_args_list, - ) + assert [ + mock.call(version=2, config=ncfg, renderer=None) + ] == self.m_nsi.call_args_list def test_valid_config_gets_network_state(self): ncfg = {"version": 2, "otherconfig": {}, "somemore": [1, 2, 3]} result = network_state.parse_net_config_data(ncfg) - self.assertNotEqual(None, result) + assert None is not result def test_empty_v1_config_gets_network_state(self): ncfg = {"version": 1, "config": []} result = network_state.parse_net_config_data(ncfg) - self.assertNotEqual(None, result) + assert None is not result def test_empty_v2_config_gets_network_state(self): ncfg = {"version": 2} result = network_state.parse_net_config_data(ncfg) - self.assertNotEqual(None, result) + assert None is not result @mock.patch("cloudinit.net.network_state.get_interfaces_by_mac") @@ -282,6 +291,17 @@ def test_v2_nameservers(self, mocker): # Ensure DNS defined on interface exists on interface for iface in config.iter_interfaces(): if iface["name"] == "eth0": + for route in iface["subnets"][0]["routes"]: + if route["gateway"] == "192.168.14.1": + assert route["network"] == "0.0.0.0" + assert route["prefix"] == 0 + assert route["netmask"] == "0.0.0.0" + elif route["gateway"] == "2001:1::2": + assert route["network"] == "::" + assert route["prefix"] == 0 + assert "netmask" not in route + else: + assert False assert iface["dns"] == { "nameservers": ["8.8.8.8"], "search": ["spam.local", "eggs.local"], @@ -299,7 +319,7 @@ def test_v2_nameservers(self, mocker): assert search not in config.dns_searchdomains -class TestNetworkStateHelperFunctions(CiTestCase): +class TestNetworkStateHelperFunctions: def test_mask_to_net_prefix_ipv4(self): netmask_value = "255.255.255.0" expected = 24 @@ -314,9 +334,8 @@ def test_mask_to_net_prefix_all_bits_ipv4(self): def test_mask_to_net_prefix_to_many_bits_ipv4(self): netmask_value = "33" - self.assertRaises( - ValueError, network_state.ipv4_mask_to_net_prefix, netmask_value - ) + with pytest.raises(ValueError): + network_state.ipv4_mask_to_net_prefix(netmask_value) def test_mask_to_net_prefix_all_bits_ipv6(self): netmask_value = "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff" @@ -332,15 +351,13 @@ def test_mask_to_net_prefix_ipv6(self): def test_mask_to_net_prefix_raises_value_error(self): netmask_value = "ff:ff:ff:ff::" - self.assertRaises( - ValueError, network_state.ipv6_mask_to_net_prefix, netmask_value - ) + with pytest.raises(ValueError): + network_state.ipv6_mask_to_net_prefix(netmask_value) def test_mask_to_net_prefix_to_many_bits_ipv6(self): netmask_value = "129" - self.assertRaises( - ValueError, network_state.ipv6_mask_to_net_prefix, netmask_value - ) + with pytest.raises(ValueError): + network_state.ipv6_mask_to_net_prefix(netmask_value) def test_mask_to_net_prefix_ipv4_object(self): netmask_value = ipaddress.IPv4Address("255.255.255.255") diff --git a/tests/unittests/reporting/test_reporting.py b/tests/unittests/reporting/test_reporting.py index 9ed82583..ad0f16b0 100644 --- a/tests/unittests/reporting/test_reporting.py +++ b/tests/unittests/reporting/test_reporting.py @@ -13,8 +13,8 @@ get_schema, validate_cloudconfig_schema, ) -from cloudinit.reporting import events, handlers -from tests.unittests.helpers import TestCase, skipUnlessJsonSchema +from cloudinit.reporting import events +from tests.unittests.helpers import skipUnlessJsonSchema def _fake_registry(): @@ -23,7 +23,7 @@ def _fake_registry(): ) -class TestReportStartEvent(TestCase): +class TestReportStartEvent: @mock.patch( "cloudinit.reporting.events.instantiated_handler_registry", new_callable=_fake_registry, @@ -40,12 +40,12 @@ def test_report_start_event_passes_something_with_as_string_to_handlers( _, handler, ) in instantiated_handler_registry.registered_items.items(): - self.assertEqual(1, handler.publish_event.call_count) + assert handler.publish_event.call_count == 1 event = handler.publish_event.call_args[0][0] - self.assertEqual(expected_string_representation, event.as_string()) + assert expected_string_representation == event.as_string() -class TestReportFinishEvent(TestCase): +class TestReportFinishEvent: def _report_finish_event(self, result=events.status.SUCCESS): event_name, event_description = "my_test_event", "my description" events.report_finish_event( @@ -53,13 +53,13 @@ def _report_finish_event(self, result=events.status.SUCCESS): ) return event_name, event_description - def assertHandlersPassedObjectWithAsString( + def assert_handlers_passed_object_with_as_string( self, handlers, expected_as_string ): for _, handler in handlers.items(): - self.assertEqual(1, handler.publish_event.call_count) + assert handler.publish_event.call_count == 1 event = handler.publish_event.call_args[0][0] - self.assertEqual(expected_as_string, event.as_string()) + assert expected_as_string == event.as_string() @mock.patch( "cloudinit.reporting.events.instantiated_handler_registry", @@ -72,7 +72,7 @@ def test_report_finish_event_passes_something_with_as_string_to_handlers( expected_string_representation = ": ".join( ["finish", event_name, events.status.SUCCESS, event_description] ) - self.assertHandlersPassedObjectWithAsString( + self.assert_handlers_passed_object_with_as_string( instantiated_handler_registry.registered_items, expected_string_representation, ) @@ -90,7 +90,7 @@ def test_reporting_successful_finish_has_sensible_string_repr( expected_string_representation = ": ".join( ["finish", event_name, events.status.SUCCESS, event_description] ) - self.assertHandlersPassedObjectWithAsString( + self.assert_handlers_passed_object_with_as_string( instantiated_handler_registry.registered_items, expected_string_representation, ) @@ -108,23 +108,24 @@ def test_reporting_unsuccessful_finish_has_sensible_string_repr( expected_string_representation = ": ".join( ["finish", event_name, events.status.FAIL, event_description] ) - self.assertHandlersPassedObjectWithAsString( + self.assert_handlers_passed_object_with_as_string( instantiated_handler_registry.registered_items, expected_string_representation, ) def test_invalid_result_raises_attribute_error(self): - self.assertRaises(ValueError, self._report_finish_event, ("BOGUS",)) + with pytest.raises(ValueError): + self._report_finish_event("BOGUS") -class TestReportingEvent(TestCase): +class TestReportingEvent: def test_as_string(self): event_type, name, description = "test_type", "test_name", "test_desc" event = events.ReportingEvent(event_type, name, description) expected_string_representation = ": ".join( [event_type, name, description] ) - self.assertEqual(expected_string_representation, event.as_string()) + assert expected_string_representation == event.as_string() def test_as_dict(self): event_type, name, desc = "test_type", "test_name", "test_desc" @@ -138,20 +139,20 @@ def test_as_dict(self): # allow for timestamp to differ, but must be present as_dict = event.as_dict() - self.assertIn("timestamp", as_dict) + assert "timestamp" in as_dict del as_dict["timestamp"] - self.assertEqual(expected, as_dict) + assert expected == as_dict -class TestFinishReportingEvent(TestCase): +class TestFinishReportingEvent: def test_as_has_result(self): result = events.status.SUCCESS name, desc = "test_name", "test_desc" event = events.FinishReportingEvent(name, desc, result) ret = event.as_dict() - self.assertTrue("result" in ret) - self.assertEqual(ret["result"], result) + assert "result" in ret + assert ret["result"] == result def test_has_result_with_optional_post_files(self): result = events.status.SUCCESS @@ -164,56 +165,43 @@ def test_has_result_with_optional_post_files(self): name, desc, result, post_files=files ) ret = event.as_dict() - self.assertTrue("result" in ret) - self.assertTrue("files" in ret) - self.assertEqual(ret["result"], result) + assert "result" in ret + assert "files" in ret + assert ret["result"] == result posted_install_log = ret["files"][0] - self.assertTrue("path" in posted_install_log) - self.assertTrue("content" in posted_install_log) - self.assertTrue("encoding" in posted_install_log) - self.assertEqual(posted_install_log["path"], files[0]) - self.assertEqual(posted_install_log["encoding"], "base64") + assert "path" in posted_install_log + assert "content" in posted_install_log + assert "encoding" in posted_install_log + assert posted_install_log["path"] == files[0] + assert posted_install_log["encoding"] == "base64" -class TestBaseReportingHandler(TestCase): - def test_base_reporting_handler_is_abstract(self): - regexp = r".*abstract.*publish_event.*" - self.assertRaisesRegex(TypeError, regexp, handlers.ReportingHandler) - - -class TestLogHandler(TestCase): +class TestLogHandler: @mock.patch.object(reporting.handlers.logging, "getLogger") def test_appropriate_logger_used(self, getLogger): event_type, event_name = "test_type", "test_name" event = events.ReportingEvent(event_type, event_name, "description") reporting.handlers.LogHandler().publish_event(event) - self.assertEqual( - [ - mock.call( - "cloudinit.reporting.{0}.{1}".format( - event_type, event_name - ) - ) - ], - getLogger.call_args_list, - ) + assert getLogger.call_args_list == [ + mock.call( + "cloudinit.reporting.{0}.{1}".format(event_type, event_name) + ) + ] @mock.patch.object(reporting.handlers.logging, "getLogger") def test_single_log_message_at_info_published(self, getLogger): event = events.ReportingEvent("type", "name", "description") reporting.handlers.LogHandler().publish_event(event) - self.assertEqual(1, getLogger.return_value.log.call_count) + assert getLogger.return_value.log.call_count == 1 @mock.patch.object(reporting.handlers.logging, "getLogger") def test_log_message_uses_event_as_string(self, getLogger): event = events.ReportingEvent("type", "name", "description") reporting.handlers.LogHandler(level="INFO").publish_event(event) - self.assertIn( - event.as_string(), getLogger.return_value.log.call_args[0][1] - ) + assert event.as_string() in getLogger.return_value.log.call_args[0][1] -class TestDefaultRegisteredHandler(TestCase): +class TestDefaultRegisteredHandler: def test_log_handler_registered_by_default(self): registered_items = ( reporting.instantiated_handler_registry.registered_items @@ -222,18 +210,16 @@ def test_log_handler_registered_by_default(self): if isinstance(item, reporting.handlers.LogHandler): break else: - self.fail("No reporting LogHandler registered by default.") + pytest.fail("No reporting LogHandler registered by default.") -class TestReportingConfiguration(TestCase): +class TestReportingConfiguration: @mock.patch.object(reporting, "instantiated_handler_registry") def test_empty_configuration_doesnt_add_handlers( self, instantiated_handler_registry ): reporting.update_configuration({}) - self.assertEqual( - 0, instantiated_handler_registry.register_item.call_count - ) + assert instantiated_handler_registry.register_item.call_count == 0 @mock.patch.object( reporting, "instantiated_handler_registry", reporting.DictRegistry() @@ -247,10 +233,9 @@ def test_looks_up_handler_by_type_and_adds_it(self, available_handlers): reporting.update_configuration( {handler_name: {"type": handler_type_name}} ) - self.assertEqual( - {handler_name: handler_cls.return_value}, - reporting.instantiated_handler_registry.registered_items, - ) + assert reporting.instantiated_handler_registry.registered_items == { + handler_name: handler_cls.return_value + } @mock.patch.object( reporting, "instantiated_handler_registry", reporting.DictRegistry() @@ -267,15 +252,13 @@ def test_uses_non_type_parts_of_config_dict_as_kwargs( handler_config.update({"type": handler_type_name}) handler_name = "my_test_handler" reporting.update_configuration({handler_name: handler_config}) - self.assertEqual( - handler_cls.return_value, + assert ( reporting.instantiated_handler_registry.registered_items[ handler_name - ], - ) - self.assertEqual( - [mock.call(**extra_kwargs)], handler_cls.call_args_list + ] + == handler_cls.return_value ) + assert handler_cls.call_args_list == [mock.call(**extra_kwargs)] @mock.patch.object( reporting, "instantiated_handler_registry", reporting.DictRegistry() @@ -288,7 +271,7 @@ def test_handler_config_not_modified(self, available_handlers): handler_config = {"type": handler_type_name, "foo": "bar"} expected_handler_config = handler_config.copy() reporting.update_configuration({"my_test_handler": handler_config}) - self.assertEqual(expected_handler_config, handler_config) + assert expected_handler_config == handler_config @mock.patch.object( reporting, "instantiated_handler_registry", reporting.DictRegistry() @@ -302,32 +285,25 @@ def test_handlers_removed_if_falseish_specified(self, available_handlers): reporting.update_configuration( {handler_name: {"type": handler_type_name}} ) - self.assertEqual( - 1, len(reporting.instantiated_handler_registry.registered_items) + assert ( + len(reporting.instantiated_handler_registry.registered_items) == 1 ) reporting.update_configuration({handler_name: None}) - self.assertEqual( - 0, len(reporting.instantiated_handler_registry.registered_items) + assert ( + len(reporting.instantiated_handler_registry.registered_items) == 0 ) -class TestReportingEventStack(TestCase): +class TestReportingEventStack: @mock.patch("cloudinit.reporting.events.report_finish_event") @mock.patch("cloudinit.reporting.events.report_start_event") def test_start_and_finish_success(self, report_start, report_finish): with events.ReportEventStack(name="myname", description="mydesc"): pass - self.assertEqual( - [mock.call("myname", "mydesc")], report_start.call_args_list - ) - self.assertEqual( - [ - mock.call( - "myname", "mydesc", events.status.SUCCESS, post_files=[] - ) - ], - report_finish.call_args_list, - ) + assert report_start.call_args_list == [mock.call("myname", "mydesc")] + assert report_finish.call_args_list == [ + mock.call("myname", "mydesc", events.status.SUCCESS, post_files=[]) + ] @mock.patch("cloudinit.reporting.events.report_finish_event") @mock.patch("cloudinit.reporting.events.report_start_event") @@ -339,11 +315,10 @@ def test_finish_exception_defaults_fail(self, report_start, report_finish): raise ValueError("This didnt work") except ValueError: pass - self.assertEqual([mock.call(name, desc)], report_start.call_args_list) - self.assertEqual( - [mock.call(name, desc, events.status.FAIL, post_files=[])], - report_finish.call_args_list, - ) + assert report_start.call_args_list == [mock.call(name, desc)] + assert report_finish.call_args_list == [ + mock.call(name, desc, events.status.FAIL, post_files=[]) + ] @mock.patch("cloudinit.reporting.events.report_finish_event") @mock.patch("cloudinit.reporting.events.report_start_event") @@ -357,11 +332,10 @@ def test_result_on_exception_used(self, report_start, report_finish): raise ValueError("This didnt work") except ValueError: pass - self.assertEqual([mock.call(name, desc)], report_start.call_args_list) - self.assertEqual( - [mock.call(name, desc, events.status.WARN, post_files=[])], - report_finish.call_args_list, - ) + assert report_start.call_args_list == [mock.call(name, desc)] + assert report_finish.call_args_list == [ + mock.call(name, desc, events.status.WARN, post_files=[]) + ] @mock.patch("cloudinit.reporting.events.report_start_event") def test_child_fullname_respects_parent(self, report_start): @@ -380,8 +354,7 @@ def test_child_fullname_respects_parent(self, report_start): report_start.assert_called_with(c2_expected_fullname, "c2desc") @mock.patch("cloudinit.reporting.events.report_finish_event") - @mock.patch("cloudinit.reporting.events.report_start_event") - def test_child_result_bubbles_up(self, report_start, report_finish): + def test_child_result_bubbles_up(self, report_finish): parent = events.ReportEventStack("topname", "topdesc") child = events.ReportEventStack("c_name", "c_desc", parent=parent) with parent: @@ -396,27 +369,21 @@ def test_child_result_bubbles_up(self, report_start, report_finish): def test_message_used_in_finish(self, report_finish): with events.ReportEventStack("myname", "mydesc", message="mymessage"): pass - self.assertEqual( - [ - mock.call( - "myname", "mymessage", events.status.SUCCESS, post_files=[] - ) - ], - report_finish.call_args_list, - ) + assert report_finish.call_args_list == [ + mock.call( + "myname", "mymessage", events.status.SUCCESS, post_files=[] + ) + ] @mock.patch("cloudinit.reporting.events.report_finish_event") def test_message_updatable(self, report_finish): with events.ReportEventStack("myname", "mydesc") as c: c.message = "all good" - self.assertEqual( - [ - mock.call( - "myname", "all good", events.status.SUCCESS, post_files=[] - ) - ], - report_finish.call_args_list, - ) + assert report_finish.call_args_list == [ + mock.call( + "myname", "all good", events.status.SUCCESS, post_files=[] + ) + ] @mock.patch("cloudinit.reporting.events.report_start_event") @mock.patch("cloudinit.reporting.events.report_finish_event") @@ -425,8 +392,8 @@ def test_reporting_disabled_does_not_report_events( ): with events.ReportEventStack("a", "b", reporting_enabled=False): pass - self.assertEqual(report_start.call_count, 0) - self.assertEqual(report_finish.call_count, 0) + assert report_start.call_count == 0 + assert report_finish.call_count == 0 @mock.patch("cloudinit.reporting.events.report_start_event") @mock.patch("cloudinit.reporting.events.report_finish_event") @@ -440,25 +407,27 @@ def test_reporting_child_default_to_parent( with parent: with child: pass - self.assertEqual(report_start.call_count, 0) - self.assertEqual(report_finish.call_count, 0) + assert report_start.call_count == 0 + assert report_finish.call_count == 0 def test_reporting_event_has_sane_repr(self): myrep = events.ReportEventStack( "fooname", "foodesc", reporting_enabled=True ).__repr__() - self.assertIn("fooname", myrep) - self.assertIn("foodesc", myrep) - self.assertIn("True", myrep) + assert "fooname" in myrep + assert "foodesc" in myrep + assert "True" in myrep def test_set_invalid_result_raises_value_error(self): f = events.ReportEventStack("myname", "mydesc") - self.assertRaises(ValueError, setattr, f, "result", "BOGUS") + with pytest.raises(ValueError): + f.result = "BOGUS" -class TestStatusAccess(TestCase): +class TestStatusAccess: def test_invalid_status_access_raises_value_error(self): - self.assertRaises(AttributeError, getattr, events.status, "BOGUS") + with pytest.raises(AttributeError): + getattr(events.status, "BOGUS") @skipUnlessJsonSchema() diff --git a/tests/unittests/reporting/test_reporting_hyperv.py b/tests/unittests/reporting/test_reporting_hyperv.py index 31b0408d..c346ac5b 100644 --- a/tests/unittests/reporting/test_reporting_hyperv.py +++ b/tests/unittests/reporting/test_reporting_hyperv.py @@ -8,7 +8,8 @@ import zlib from unittest import mock -from cloudinit import util +import pytest + from cloudinit.reporting import events, instantiated_handler_registry from cloudinit.reporting.handlers import HyperVKvpReportingHandler @@ -17,28 +18,33 @@ # long term we should restructure these modules to avoid the issue. from cloudinit.sources.azure import errors # noqa: F401 from cloudinit.sources.helpers import azure -from tests.unittests.helpers import CiTestCase -class TestKvpEncoding(CiTestCase): +class TestKvpEncoding: def test_encode_decode(self): kvp = {"key": "key1", "value": "value1"} kvp_reporting = HyperVKvpReportingHandler() data = kvp_reporting._encode_kvp_item(kvp["key"], kvp["value"]) - self.assertEqual(len(data), kvp_reporting.HV_KVP_RECORD_SIZE) + assert len(data) == kvp_reporting.HV_KVP_RECORD_SIZE decoded_kvp = kvp_reporting._decode_kvp_item(data) - self.assertEqual(kvp, decoded_kvp) + assert kvp == decoded_kvp + +class TestKvpReporter: + @pytest.fixture + def kvp_file_path(self, tmp_path): + file_path = tmp_path / "kvp_pool_file" + file_path.touch() + return str(file_path) -class TextKvpReporter(CiTestCase): - def setUp(self): - super(TextKvpReporter, self).setUp() - self.tmp_file_path = self.tmp_path("kvp_pool_file") - util.ensure_file(self.tmp_file_path) + @pytest.fixture + def reporter(self, kvp_file_path): + return HyperVKvpReportingHandler(kvp_file_path=kvp_file_path) - def test_events_with_higher_incarnation_not_over_written(self): - reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) - self.assertEqual(0, len(list(reporter._iterate_kvps(0)))) + def test_events_with_higher_incarnation_not_over_written( + self, kvp_file_path, reporter + ): + assert 0 == len(list(reporter._iterate_kvps(0))) reporter.publish_event( events.ReportingEvent("foo", "name1", "description") @@ -47,29 +53,27 @@ def test_events_with_higher_incarnation_not_over_written(self): events.ReportingEvent("foo", "name2", "description") ) reporter.q.join() - self.assertEqual(2, len(list(reporter._iterate_kvps(0)))) + assert 2 == len(list(reporter._iterate_kvps(0))) - reporter3 = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + reporter3 = HyperVKvpReportingHandler(kvp_file_path=kvp_file_path) reporter3.incarnation_no = reporter.incarnation_no - 1 reporter3.publish_event( events.ReportingEvent("foo", "name3", "description") ) reporter3.q.join() - self.assertEqual(3, len(list(reporter3._iterate_kvps(0)))) + assert 3 == len(list(reporter3._iterate_kvps(0))) - def test_finish_event_result_is_logged(self): - reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + def test_finish_event_result_is_logged(self, reporter): reporter.publish_event( events.FinishReportingEvent( "name2", "description1", result=events.status.FAIL ) ) reporter.q.join() - self.assertIn("FAIL", list(reporter._iterate_kvps(0))[0]["value"]) + assert "FAIL" in list(reporter._iterate_kvps(0))[0]["value"] - def test_file_operation_issue(self): - os.remove(self.tmp_file_path) - reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + def test_file_operation_issue(self, kvp_file_path, reporter): + os.remove(kvp_file_path) reporter.publish_event( events.FinishReportingEvent( "name2", "description1", result=events.status.FAIL @@ -77,8 +81,7 @@ def test_file_operation_issue(self): ) reporter.q.join() - def test_event_very_long(self): - reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + def test_event_very_long(self, reporter): description = "ab" * reporter.HV_KVP_AZURE_MAX_VALUE_SIZE long_event = events.FinishReportingEvent( "event_name", description, result=events.status.FAIL @@ -86,18 +89,20 @@ def test_event_very_long(self): reporter.publish_event(long_event) reporter.q.join() kvps = list(reporter._iterate_kvps(0)) - self.assertEqual(3, len(kvps)) + assert 3 == len(kvps) # restore from the kvp to see the content are all there full_description = "" for i in range(len(kvps)): msg_slice = json.loads(kvps[i]["value"]) - self.assertEqual(msg_slice["msg_i"], i) + assert msg_slice["msg_i"] == i full_description += msg_slice["msg"] - self.assertEqual(description, full_description) + assert description == full_description - def test_not_truncate_kvp_file_modified_after_boot(self): - with open(self.tmp_file_path, "wb+") as f: + def test_not_truncate_kvp_file_modified_after_boot( + self, kvp_file_path, reporter + ): + with open(kvp_file_path, "wb+") as f: kvp = {"key": "key1", "value": "value1"} data = struct.pack( "%ds%ds" @@ -110,18 +115,17 @@ def test_not_truncate_kvp_file_modified_after_boot(self): ) f.write(data) cur_time = time.time() - os.utime(self.tmp_file_path, (cur_time, cur_time)) + os.utime(kvp_file_path, (cur_time, cur_time)) # reset this because the unit test framework # has already polluted the class variable HyperVKvpReportingHandler._already_truncated_pool_file = False - reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) kvps = list(reporter._iterate_kvps(0)) - self.assertEqual(1, len(kvps)) + assert 1 == len(kvps) - def test_truncate_stale_kvp_file(self): - with open(self.tmp_file_path, "wb+") as f: + def test_truncate_stale_kvp_file(self, kvp_file_path, reporter): + with open(kvp_file_path, "wb+") as f: kvp = {"key": "key1", "value": "value1"} data = struct.pack( "%ds%ds" @@ -136,20 +140,19 @@ def test_truncate_stale_kvp_file(self): # set the time ways back to make it look like # we had an old kvp file - os.utime(self.tmp_file_path, (1000000, 1000000)) + os.utime(kvp_file_path, (1000000, 1000000)) # reset this because the unit test framework # has already polluted the class variable HyperVKvpReportingHandler._already_truncated_pool_file = False - reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + reporter = HyperVKvpReportingHandler(kvp_file_path=kvp_file_path) kvps = list(reporter._iterate_kvps(0)) - self.assertEqual(0, len(kvps)) + assert 0 == len(kvps) @mock.patch("cloudinit.distros.uses_systemd") @mock.patch("cloudinit.subp.subp") - def test_get_boot_telemetry(self, m_subp, m_sysd): - reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + def test_get_boot_telemetry(self, m_subp, m_sysd, reporter): datetime_pattern = ( r"\d{4}-[01]\d-[0-3]\dT[0-2]\d:[0-5]" r"\d:[0-5]\d\.\d+([+-][0-2]\d:[0-5]\d|Z)" @@ -166,7 +169,7 @@ def test_get_boot_telemetry(self, m_subp, m_sysd): reporter.publish_event(azure.get_boot_telemetry()) reporter.q.join() kvps = list(reporter._iterate_kvps(0)) - self.assertEqual(1, len(kvps)) + assert 1 == len(kvps) evt_msg = kvps[0]["value"] if not re.search("kernel_start=" + datetime_pattern, evt_msg): @@ -176,14 +179,13 @@ def test_get_boot_telemetry(self, m_subp, m_sysd): if not re.search("cloudinit_activation=" + datetime_pattern, evt_msg): raise AssertionError("missing cloudinit_activation timestamp") - def test_get_system_info(self): - reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + def test_get_system_info(self, reporter): pattern = r"[^=\s]+" reporter.publish_event(azure.get_system_info()) reporter.q.join() kvps = list(reporter._iterate_kvps(0)) - self.assertEqual(1, len(kvps)) + assert 1 == len(kvps) evt_msg = kvps[0]["value"] # the most important information is cloudinit version, @@ -196,20 +198,18 @@ def test_get_system_info(self): if not re.search("variant=" + pattern, evt_msg): raise AssertionError("missing distro variant string") - def test_report_diagnostic_event_without_logger_func(self): - reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + def test_report_diagnostic_event_without_logger_func(self, reporter): diagnostic_msg = "test_diagnostic" reporter.publish_event(azure.report_diagnostic_event(diagnostic_msg)) reporter.q.join() kvps = list(reporter._iterate_kvps(0)) - self.assertEqual(1, len(kvps)) + assert 1 == len(kvps) evt_msg = kvps[0]["value"] if diagnostic_msg not in evt_msg: raise AssertionError("missing expected diagnostic message") - def test_report_diagnostic_event_with_logger_func(self): - reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + def test_report_diagnostic_event_with_logger_func(self, reporter): logger_func = mock.MagicMock() diagnostic_msg = "test_diagnostic" reporter.publish_event( @@ -219,15 +219,14 @@ def test_report_diagnostic_event_with_logger_func(self): ) reporter.q.join() kvps = list(reporter._iterate_kvps(0)) - self.assertEqual(1, len(kvps)) + assert 1 == len(kvps) evt_msg = kvps[0]["value"] if diagnostic_msg not in evt_msg: raise AssertionError("missing expected diagnostic message") logger_func.assert_called_once_with(diagnostic_msg) - def test_report_compressed_event(self): - reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + def test_report_compressed_event(self, reporter): try: instantiated_handler_registry.register_item("telemetry", reporter) event_desc = b"test_compressed" @@ -257,14 +256,13 @@ def validate_compressed_kvps(self, reporter, count, values): base64.decodebytes(evt_msg_json["data"].encode("ascii")) ) - self.assertLess(compressed_count, len(values)) - self.assertEqual(evt_data, values[compressed_count]) - self.assertEqual(evt_encoding, "gz+b64") + assert compressed_count < len(values) + assert evt_data == values[compressed_count] + assert evt_encoding == "gz+b64" compressed_count += 1 - self.assertEqual(compressed_count, count) + assert compressed_count == count - def test_unique_kvp_key(self): - reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + def test_unique_kvp_key(self, reporter): evt1 = events.ReportingEvent( "event_type", "event_message", "event_description" ) @@ -280,21 +278,18 @@ def test_unique_kvp_key(self): reporter.q.join() kvps = list(reporter._iterate_kvps(0)) - self.assertEqual(2, len(kvps)) - self.assertNotEqual( - kvps[0]["key"], kvps[1]["key"], "duplicate keys for KVP entries" - ) + assert 2 == len(kvps) + assert ( + kvps[0]["key"] != kvps[1]["key"] + ), "duplicate keys for KVP entries" - def test_write_key(self): - reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + def test_write_key(self, reporter): reporter.write_key("test-key", "test-value") assert list(reporter._iterate_kvps(0)) == [ {"key": "test-key", "value": "test-value"} ] - def test_write_key_truncates(self): - reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) - + def test_write_key_truncates(self, reporter): value = "A" * 2000 reporter.write_key("test-key", value) diff --git a/tests/unittests/runs/test_merge_run.py b/tests/unittests/runs/test_merge_run.py index 876f8c69..e0e46740 100644 --- a/tests/unittests/runs/test_merge_run.py +++ b/tests/unittests/runs/test_merge_run.py @@ -11,7 +11,6 @@ from tests.unittests.helpers import replicate_test_root -@pytest.mark.usefixtures("fake_filesystem_hook") @pytest.fixture(autouse=True) def user_data(tmp_path): replicate_test_root("simple_ubuntu", str(tmp_path)) diff --git a/tests/unittests/runs/test_simple_run.py b/tests/unittests/runs/test_simple_run.py index 93493e72..1a4ab110 100644 --- a/tests/unittests/runs/test_simple_run.py +++ b/tests/unittests/runs/test_simple_run.py @@ -12,7 +12,6 @@ from tests.unittests.helpers import replicate_test_root -@pytest.mark.usefixtures("fake_filesystem_hook") @pytest.fixture(autouse=True) def replicate_root(tmp_path): replicate_test_root("simple_ubuntu", str(tmp_path)) diff --git a/tests/unittests/sources/test_cloudcix.py b/tests/unittests/sources/test_cloudcix.py index 531e6022..9d57677d 100644 --- a/tests/unittests/sources/test_cloudcix.py +++ b/tests/unittests/sources/test_cloudcix.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import json from unittest import mock diff --git a/tests/unittests/sources/test_cloudstack.py b/tests/unittests/sources/test_cloudstack.py index 990c8a7f..eb49a7b3 100644 --- a/tests/unittests/sources/test_cloudstack.py +++ b/tests/unittests/sources/test_cloudstack.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init from socket import gaierror from textwrap import dedent diff --git a/tests/unittests/sources/test_configdrive.py b/tests/unittests/sources/test_configdrive.py index 089bdef4..b072f3f2 100644 --- a/tests/unittests/sources/test_configdrive.py +++ b/tests/unittests/sources/test_configdrive.py @@ -886,7 +886,8 @@ def test_convert_raises_value_error_on_missing_name(self): known_macs=macs, ) - def test_conversion_with_route(self, tmp_path): + @mock.patch("cloudinit.subp.which") + def test_conversion_with_route(self, m_which, tmp_path): ncfg = openstack.convert_net_json( NETWORK_DATA_2, known_macs=KNOWN_MACS ) @@ -901,6 +902,19 @@ def test_conversion_with_route(self, tmp_path): "netmask": "0.0.0.0", "gateway": "2.2.2.9", } in routes + + m_which.return_value = "/sbin/ip" + eni_renderer = eni.Renderer() + eni_renderer.render_network_state( + network_state.parse_net_config_data(ncfg), target=str(tmp_path) + ) + with open( + os.path.join(tmp_path, "etc", "network", "interfaces"), "r" + ) as f: + eni_rendering = f.read() + assert "ip route add default via 2.2.2.9" in eni_rendering + + m_which.return_value = None eni_renderer = eni.Renderer() eni_renderer.render_network_state( network_state.parse_net_config_data(ncfg), target=str(tmp_path) diff --git a/tests/unittests/sources/test_digitalocean.py b/tests/unittests/sources/test_digitalocean.py index 0ef2e51a..b5bc526e 100644 --- a/tests/unittests/sources/test_digitalocean.py +++ b/tests/unittests/sources/test_digitalocean.py @@ -5,6 +5,7 @@ # Author: Scott Moser # # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import json diff --git a/tests/unittests/sources/test_gce.py b/tests/unittests/sources/test_gce.py index d1e60472..e9941499 100644 --- a/tests/unittests/sources/test_gce.py +++ b/tests/unittests/sources/test_gce.py @@ -3,6 +3,7 @@ # Author: Vaidas Jablonskis # # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import datetime import json diff --git a/tests/unittests/sources/test_hetzner.py b/tests/unittests/sources/test_hetzner.py index 26883756..af0a893c 100644 --- a/tests/unittests/sources/test_hetzner.py +++ b/tests/unittests/sources/test_hetzner.py @@ -10,8 +10,7 @@ from cloudinit.sources import DataSourceHetzner from tests.unittests.helpers import mock -METADATA = util.load_yaml( - """ +METADATA = b""" hostname: cloudinit-test instance-id: 123456 local-ipv4: '' @@ -45,13 +44,24 @@ test-key@workstation vendor_data: "test" """ -) USERDATA = b"""#cloud-config runcmd: - [touch, /root/cloud-init-worked ] """ +PRIVATE_NETWORKS = b""" +- ip: 10.1.0.2 + alias_ips: [] + interface_num: 2 + mac_address: 86:00:00:aa:5d:f8 + network_id: 11352901 + network_name: network-2 + network: 10.1.0.0/16 + subnet: 10.1.0.0/24 + gateway: 10.1.0.1 +""" + class TestDataSourceHetzner: """ @@ -68,16 +78,14 @@ def ds(self, paths, tmp_path): return ds @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") - @mock.patch("cloudinit.sources.DataSourceHetzner.EphemeralDHCPv4") + @mock.patch("cloudinit.sources.DataSourceHetzner.EphemeralIPNetwork") @mock.patch("cloudinit.net.find_fallback_nic") - @mock.patch("cloudinit.sources.helpers.hetzner.read_metadata") - @mock.patch("cloudinit.sources.helpers.hetzner.read_userdata") + @mock.patch("cloudinit.sources.helpers.hetzner.get_metadata") @mock.patch("cloudinit.sources.DataSourceHetzner.get_hcloud_data") def test_read_data( self, m_get_hcloud_data, - m_usermd, - m_readmd, + m_get_metadata, m_fallback_nic, m_net, m_dhcp, @@ -85,10 +93,15 @@ def test_read_data( ): m_get_hcloud_data.return_value = ( True, - str(METADATA.get("instance-id")), + str(util.load_yaml(METADATA).get("instance-id")), ) - m_readmd.return_value = METADATA.copy() - m_usermd.return_value = USERDATA + # Use side_effect to return values for the three sequential calls to + # helpers.hetzner.get_metadata: metadata, private-networks, userdata + m_get_metadata.side_effect = [ + ("metadata_url", METADATA), + ("privnets_url", PRIVATE_NETWORKS), + ("userdata_url", USERDATA), + ] m_fallback_nic.return_value = "eth0" m_dhcp.return_value = [ { @@ -104,29 +117,45 @@ def test_read_data( m_net.assert_called_once_with( ds.distro, - iface="eth0", + interface="eth0", + ipv4=True, + ipv6=True, connectivity_urls_data=[ + { + "url": "http://[fe80::a9fe:a9fe%25eth0]/hetzner/v1/metadata/instance-id" + }, { "url": "http://169.254.169.254/hetzner/v1/metadata/instance-id" - } + }, ], ) - assert 0 != m_readmd.call_count + assert 0 != m_get_metadata.call_count - assert METADATA.get("hostname") == ds.get_hostname().hostname + assert ( + util.load_yaml(METADATA).get("hostname") + == ds.get_hostname().hostname + ) - assert METADATA.get("public-keys") == ds.get_public_ssh_keys() + assert ( + util.load_yaml(METADATA).get("public-keys") + == ds.get_public_ssh_keys() + ) + assert ds.metadata["private-networks"] == util.load_yaml( + PRIVATE_NETWORKS, allowed=(dict, list) + ) assert isinstance(ds.get_public_ssh_keys(), list) assert ds.get_userdata_raw() == USERDATA - assert ds.get_vendordata_raw() == METADATA.get("vendor_data") + assert ds.get_vendordata_raw() == util.load_yaml(METADATA).get( + "vendor_data" + ) - @mock.patch("cloudinit.sources.helpers.hetzner.read_metadata") + @mock.patch("cloudinit.sources.helpers.hetzner.get_metadata") @mock.patch("cloudinit.net.find_fallback_nic") @mock.patch("cloudinit.sources.DataSourceHetzner.get_hcloud_data") def test_not_on_hetzner_returns_false( - self, m_get_hcloud_data, m_find_fallback, m_read_md, ds + self, m_get_hcloud_data, m_find_fallback, m_get_metadata, ds ): """If helper 'get_hcloud_data' returns False, return False from get_data.""" @@ -136,4 +165,4 @@ def test_not_on_hetzner_returns_false( assert not ret # These are a white box attempt to ensure it did not search. assert 0 == m_find_fallback.call_count - assert 0 == m_read_md.call_count + assert 0 == m_get_metadata.call_count diff --git a/tests/unittests/sources/test_ibmcloud.py b/tests/unittests/sources/test_ibmcloud.py index 2589871a..f6b81908 100644 --- a/tests/unittests/sources/test_ibmcloud.py +++ b/tests/unittests/sources/test_ibmcloud.py @@ -1,4 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import base64 import copy diff --git a/tests/unittests/sources/test_init.py b/tests/unittests/sources/test_init.py index 05bebd6e..f4a6cc6b 100644 --- a/tests/unittests/sources/test_init.py +++ b/tests/unittests/sources/test_init.py @@ -24,7 +24,7 @@ redact_sensitive_keys, ) from cloudinit.user_data import UserDataProcessor -from tests.unittests.helpers import CiTestCase, assert_count_equal, mock +from tests.unittests.helpers import assert_count_equal, mock class DataSourceTestSubclassNet(DataSource): @@ -933,7 +933,7 @@ def fake_get_data(): ) in caplog.record_tuples -class TestRedactSensitiveData(CiTestCase): +class TestRedactSensitiveData: def test_redact_sensitive_data_noop_when_no_sensitive_keys_present(self): """When sensitive_keys is absent or empty from metadata do nothing.""" md = {"my": "data"} @@ -962,7 +962,7 @@ def test_redact_sensitive_data_does_redacts_with_default_string(self): assert secure_md == redact_sensitive_keys(md) -class TestCanonicalCloudID(CiTestCase): +class TestCanonicalCloudID: def test_cloud_id_returns_platform_on_unknowns(self): """When region and cloud_name are unknown, return platform.""" assert "platform" == canonical_cloud_id( diff --git a/tests/unittests/sources/test_nwcs.py b/tests/unittests/sources/test_nwcs.py index 9ecd13a5..c208c875 100644 --- a/tests/unittests/sources/test_nwcs.py +++ b/tests/unittests/sources/test_nwcs.py @@ -1,8 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit import helpers, settings, util +import pytest + +from cloudinit import settings, util from cloudinit.sources import DataSourceNWCS -from tests.unittests.helpers import CiTestCase, mock +from tests.unittests.helpers import mock METADATA = util.load_yaml( """ @@ -26,22 +28,18 @@ ) -class TestDataSourceNWCS(CiTestCase): +class TestDataSourceNWCS: """ Test reading the metadata """ - def setUp(self): - super(TestDataSourceNWCS, self).setUp() - self.tmp = self.tmp_dir() - - def get_ds(self): + @pytest.fixture + def ds(self, paths, tmp_path): distro = mock.MagicMock() - distro.get_tmp_exec_path = self.tmp_dir - ds = DataSourceNWCS.DataSourceNWCS( - settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": self.tmp}) + distro.get_tmp_exec_path = str(tmp_path) + return DataSourceNWCS.DataSourceNWCS( + settings.CFG_BUILTIN, distro, paths ) - return ds @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") @mock.patch("cloudinit.sources.DataSourceNWCS.EphemeralDHCPv4") @@ -55,6 +53,7 @@ def test_read_data( m_fallback_nic, m_net, m_dhcp, + ds, ): m_ds_detect.return_value = True m_readmd.return_value = METADATA.copy() @@ -69,9 +68,7 @@ def test_read_data( } ] - ds = self.get_ds() - ret = ds.get_data() - self.assertTrue(ret) + assert ds.get_data() m_net.assert_called_once_with( ds.distro, @@ -81,29 +78,27 @@ def test_read_data( ], ) - self.assertTrue(m_readmd.called) + assert m_readmd.called - self.assertEqual(METADATA.get("hostname"), ds.get_hostname().hostname) + assert METADATA.get("hostname") == ds.get_hostname().hostname - self.assertEqual(METADATA.get("public-keys"), ds.get_public_ssh_keys()) + assert METADATA.get("public-keys") == ds.get_public_ssh_keys() - self.assertIsInstance(ds.get_public_ssh_keys(), list) - self.assertEqual(ds.get_userdata_raw(), METADATA.get("userdata")) - self.assertEqual(ds.get_vendordata_raw(), METADATA.get("vendordata")) + assert isinstance(ds.get_public_ssh_keys(), list) + assert ds.get_userdata_raw() == METADATA.get("userdata") + assert ds.get_vendordata_raw() == METADATA.get("vendordata") @mock.patch("cloudinit.sources.DataSourceNWCS.read_metadata") @mock.patch("cloudinit.net.find_fallback_nic") @mock.patch("cloudinit.sources.DataSourceNWCS.DataSourceNWCS.ds_detect") def test_not_on_nwcs_returns_false( - self, m_ds_detect, m_find_fallback, m_read_md + self, m_ds_detect, m_find_fallback, m_read_md, ds ): """If 'ds_detect' returns False, return False from get_data.""" m_ds_detect.return_value = False - ds = self.get_ds() - ret = ds.get_data() + assert not ds.get_data() - self.assertFalse(ret) # These are a white box attempt to ensure it did not search. m_find_fallback.assert_not_called() m_read_md.assert_not_called() @@ -112,6 +107,6 @@ def test_not_on_nwcs_returns_false( def test_get_interface_name(self, m_ifname): m_ifname.return_value = "eth0" - self.assertEqual( - m_ifname.return_value, METADATA["network"]["config"][0]["name"] + assert ( + m_ifname.return_value == METADATA["network"]["config"][0]["name"] ) diff --git a/tests/unittests/sources/test_opennebula.py b/tests/unittests/sources/test_opennebula.py index ecb0fa76..c6f4066a 100644 --- a/tests/unittests/sources/test_opennebula.py +++ b/tests/unittests/sources/test_opennebula.py @@ -1,14 +1,14 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import os import pwd -import unittest import pytest -from cloudinit import atomic_helper, helpers, util +from cloudinit import atomic_helper, util from cloudinit.sources import DataSourceOpenNebula as ds -from tests.unittests.helpers import CiTestCase, mock, populate_dir +from tests.unittests.helpers import mock, populate_dir TEST_VARS = { "VAR1": "single", @@ -41,20 +41,14 @@ DS_PATH = "cloudinit.sources.DataSourceOpenNebula" -class TestOpenNebulaDataSource(CiTestCase): +@pytest.mark.allow_subp_for("bash", "sh") +class TestOpenNebulaDataSource: parsed_user = None - allowed_subp = ["bash", "sh"] - - def setUp(self): - super(TestOpenNebulaDataSource, self).setUp() - self.tmp = self.tmp_dir() - self.paths = helpers.Paths( - {"cloud_dir": self.tmp, "run_dir": self.tmp} - ) + @pytest.fixture(autouse=True) + def fixtures(self, paths): # defaults for few tests - self.ds = ds.DataSourceOpenNebula - self.seed_dir = os.path.join(self.paths.seed_dir, "opennebula") + self.seed_dir = os.path.join(paths.seed_dir, "opennebula") self.sys_cfg = {"datasource": {"OpenNebula": {"dsmode": "local"}}} # we don't want 'sudo' called in tests. so we patch switch_user_cmd @@ -64,152 +58,129 @@ def my_switch_user_cmd(user): self.switch_user_cmd_real = ds.switch_user_cmd ds.switch_user_cmd = my_switch_user_cmd - - def tearDown(self): + yield ds.switch_user_cmd = self.switch_user_cmd_real - super().tearDown() - - def test_get_data_non_contextdisk(self): - orig_find_devs_with = util.find_devs_with - try: - # dont' try to lookup for CDs - util.find_devs_with = lambda n: [] # type: ignore - dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths) - ret = dsrc.get_data() - self.assertFalse(ret) - finally: - util.find_devs_with = orig_find_devs_with - - def test_get_data_broken_contextdisk(self): - orig_find_devs_with = util.find_devs_with - try: - # dont' try to lookup for CDs - util.find_devs_with = lambda n: [] # type: ignore - populate_dir(self.seed_dir, {"context.sh": INVALID_CONTEXT}) - dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths) - self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data) - finally: - util.find_devs_with = orig_find_devs_with - - def test_get_data_invalid_identity(self): - orig_find_devs_with = util.find_devs_with - try: - # generate non-existing system user name - sys_cfg = self.sys_cfg - invalid_user = "invalid" - while not sys_cfg["datasource"]["OpenNebula"].get("parseuser"): - try: - pwd.getpwnam(invalid_user) - invalid_user += "X" - except KeyError: - sys_cfg["datasource"]["OpenNebula"][ - "parseuser" - ] = invalid_user - - # dont' try to lookup for CDs - util.find_devs_with = lambda n: [] # type: ignore - populate_context_dir(self.seed_dir, {"KEY1": "val1"}) - dsrc = self.ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) - self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data) - finally: - util.find_devs_with = orig_find_devs_with - - def test_get_data(self): - orig_find_devs_with = util.find_devs_with - try: - # dont' try to lookup for CDs - util.find_devs_with = lambda n: [] # type: ignore - populate_context_dir(self.seed_dir, {"KEY1": "val1"}) - dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths) - with mock.patch(DS_PATH + ".pwd.getpwnam") as getpwnam: - ret = dsrc.get_data() - self.assertEqual([mock.call("nobody")], getpwnam.call_args_list) - self.assertTrue(ret) - finally: - util.find_devs_with = orig_find_devs_with - self.assertEqual("opennebula", dsrc.cloud_name) - self.assertEqual("opennebula", dsrc.platform_type) - self.assertEqual( - "seed-dir (%s/seed/opennebula)" % self.tmp, dsrc.subplatform + + @pytest.fixture + def dsrc(self, paths): + return ds.DataSourceOpenNebula( + sys_cfg=self.sys_cfg, distro=None, paths=paths ) + # dont' try to lookup for CDs + @mock.patch(DS_PATH + ".util.find_devs_with", return_value=[]) + def test_get_data_non_contextdisk(self, m_find_devs_with, dsrc): + assert not dsrc.get_data() + + # dont' try to lookup for CDs + @mock.patch(DS_PATH + ".util.find_devs_with", return_value=[]) + def test_get_data_broken_contextdisk(self, m_find_devs_with, dsrc): + populate_dir(self.seed_dir, {"context.sh": INVALID_CONTEXT}) + with pytest.raises(ds.BrokenContextDiskDir): + dsrc.get_data() + + # dont' try to lookup for CDs + @mock.patch(DS_PATH + ".util.find_devs_with", return_value=[]) + def test_get_data_invalid_identity(self, m_find_devs_with, dsrc): + # generate non-existing system user name + sys_cfg = self.sys_cfg + invalid_user = "invalid" + while not sys_cfg["datasource"]["OpenNebula"].get("parseuser"): + try: + pwd.getpwnam(invalid_user) + invalid_user += "X" + except KeyError: + sys_cfg["datasource"]["OpenNebula"]["parseuser"] = invalid_user + + populate_context_dir(self.seed_dir, {"KEY1": "val1"}) + with pytest.raises(ds.BrokenContextDiskDir): + dsrc.get_data() + + # dont' try to lookup for CDs + @mock.patch(DS_PATH + ".util.find_devs_with", return_value=[]) + def test_get_data(self, m_find_devs_with, dsrc, paths): + populate_context_dir(self.seed_dir, {"KEY1": "val1"}) + with mock.patch(DS_PATH + ".pwd.getpwnam") as getpwnam: + ret = dsrc.get_data() + assert [mock.call("nobody")] == getpwnam.call_args_list + assert ret + assert "opennebula" == dsrc.cloud_name + assert "opennebula" == dsrc.platform_type + assert "seed-dir (%s/opennebula)" % paths.seed_dir == dsrc.subplatform + def test_seed_dir_non_contextdisk(self): - self.assertRaises( - ds.NonContextDiskDir, - ds.read_context_disk_dir, - self.seed_dir, - mock.Mock(), - ) + with pytest.raises(ds.NonContextDiskDir): + ds.read_context_disk_dir( + self.seed_dir, + mock.Mock(), + ) def test_seed_dir_empty1_context(self): populate_dir(self.seed_dir, {"context.sh": ""}) results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - self.assertIsNone(results["userdata"]) - self.assertEqual(results["metadata"], {}) + assert results["userdata"] is None + assert results["metadata"] == {} def test_seed_dir_empty2_context(self): populate_context_dir(self.seed_dir, {}) results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - self.assertIsNone(results["userdata"]) - self.assertEqual(results["metadata"], {}) + assert results["userdata"] is None + assert results["metadata"] == {} def test_seed_dir_broken_context(self): populate_dir(self.seed_dir, {"context.sh": INVALID_CONTEXT}) - self.assertRaises( - ds.BrokenContextDiskDir, - ds.read_context_disk_dir, - self.seed_dir, - mock.Mock(), - ) + with pytest.raises(ds.BrokenContextDiskDir): + ds.read_context_disk_dir( + self.seed_dir, + mock.Mock(), + ) def test_context_parser(self): populate_context_dir(self.seed_dir, TEST_VARS) results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - self.assertTrue("metadata" in results) - self.assertEqual(TEST_VARS, results["metadata"]) + assert "metadata" in results + assert TEST_VARS == results["metadata"] - def test_ssh_key(self): + def test_ssh_key(self, tmp_path): public_keys = ["first key", "second key"] for c in range(4): for k in ("SSH_KEY", "SSH_PUBLIC_KEY"): - my_d = os.path.join(self.tmp, "%s-%i" % (k, c)) + my_d = str(tmp_path / f"{k}-{c}") populate_context_dir(my_d, {k: "\n".join(public_keys)}) results = ds.read_context_disk_dir(my_d, mock.Mock()) - self.assertTrue("metadata" in results) - self.assertTrue("public-keys" in results["metadata"]) - self.assertEqual( - public_keys, results["metadata"]["public-keys"] - ) + assert "metadata" in results + assert "public-keys" in results["metadata"] + assert public_keys == results["metadata"]["public-keys"] public_keys.append(SSH_KEY % (c + 1,)) - def test_user_data_plain(self): + def test_user_data_plain(self, tmp_path): for k in ("USER_DATA", "USERDATA"): - my_d = os.path.join(self.tmp, k) + my_d = os.path.join(tmp_path, k) populate_context_dir(my_d, {k: USER_DATA, "USERDATA_ENCODING": ""}) results = ds.read_context_disk_dir(my_d, mock.Mock()) - self.assertTrue("userdata" in results) - self.assertEqual(USER_DATA, results["userdata"]) + assert "userdata" in results + assert USER_DATA == results["userdata"] - def test_user_data_encoding_required_for_decode(self): + def test_user_data_encoding_required_for_decode(self, tmp_path): b64userdata = atomic_helper.b64e(USER_DATA) for k in ("USER_DATA", "USERDATA"): - my_d = os.path.join(self.tmp, k) + my_d = str(tmp_path / k) populate_context_dir(my_d, {k: b64userdata}) results = ds.read_context_disk_dir(my_d, mock.Mock()) - self.assertTrue("userdata" in results) - self.assertEqual(b64userdata, results["userdata"]) + assert "userdata" in results + assert b64userdata == results["userdata"] - def test_user_data_base64_encoding(self): + def test_user_data_base64_encoding(self, tmp_path): for k in ("USER_DATA", "USERDATA"): - my_d = os.path.join(self.tmp, k) + my_d = str(tmp_path / k) populate_context_dir( my_d, { @@ -219,11 +190,11 @@ def test_user_data_base64_encoding(self): ) results = ds.read_context_disk_dir(my_d, mock.Mock()) - self.assertTrue("userdata" in results) - self.assertEqual(USER_DATA, results["userdata"]) + assert "userdata" in results + assert USER_DATA == results["userdata"] @mock.patch(DS_PATH + ".get_physical_nics_by_mac") - def test_hostname(self, m_get_phys_by_mac): + def test_hostname(self, m_get_phys_by_mac, tmp_path): for dev in ("eth0", "ens3"): m_get_phys_by_mac.return_value = {MACADDR: dev} for k in ( @@ -233,15 +204,13 @@ def test_hostname(self, m_get_phys_by_mac): "IP_PUBLIC", "ETH0_IP", ): - my_d = os.path.join(self.tmp, k) + my_d = str(tmp_path / k) populate_context_dir(my_d, {k: PUBLIC_IP}) results = ds.read_context_disk_dir(my_d, mock.Mock()) - self.assertTrue("metadata" in results) - self.assertTrue("local-hostname" in results["metadata"]) - self.assertEqual( - PUBLIC_IP, results["metadata"]["local-hostname"] - ) + assert "metadata" in results + assert "local-hostname" in results["metadata"] + assert PUBLIC_IP == results["metadata"]["local-hostname"] @mock.patch(DS_PATH + ".get_physical_nics_by_mac") def test_network_interfaces(self, m_get_phys_by_mac): @@ -253,8 +222,8 @@ def test_network_interfaces(self, m_get_phys_by_mac): populate_context_dir(self.seed_dir, {"ETH0_IP": IP_BY_MACADDR}) results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - self.assertTrue("network-interfaces" in results) - self.assertTrue( + assert "network-interfaces" in results + assert ( IP_BY_MACADDR + "/" + IP4_PREFIX in results["network-interfaces"]["ethernets"][dev]["addresses"] ) @@ -265,8 +234,8 @@ def test_network_interfaces(self, m_get_phys_by_mac): ) results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - self.assertTrue("network-interfaces" in results) - self.assertTrue( + assert "network-interfaces" in results + assert ( IP_BY_MACADDR + "/" + IP4_PREFIX in results["network-interfaces"]["ethernets"][dev]["addresses"] ) @@ -279,8 +248,8 @@ def test_network_interfaces(self, m_get_phys_by_mac): ) results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - self.assertTrue("network-interfaces" in results) - self.assertTrue( + assert "network-interfaces" in results + assert ( IP_BY_MACADDR + "/" + IP4_PREFIX in results["network-interfaces"]["ethernets"][dev]["addresses"] ) @@ -296,8 +265,8 @@ def test_network_interfaces(self, m_get_phys_by_mac): ) results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - self.assertTrue("network-interfaces" in results) - self.assertTrue( + assert "network-interfaces" in results + assert ( IP_BY_MACADDR + "/16" in results["network-interfaces"]["ethernets"][dev]["addresses"] ) @@ -313,8 +282,8 @@ def test_network_interfaces(self, m_get_phys_by_mac): ) results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - self.assertTrue("network-interfaces" in results) - self.assertTrue( + assert "network-interfaces" in results + assert ( IP_BY_MACADDR + "/" + IP4_PREFIX in results["network-interfaces"]["ethernets"][dev]["addresses"] ) @@ -329,8 +298,8 @@ def test_network_interfaces(self, m_get_phys_by_mac): ) results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - self.assertTrue("network-interfaces" in results) - self.assertTrue( + assert "network-interfaces" in results + assert ( IP6_GLOBAL + "/64" in results["network-interfaces"]["ethernets"][dev]["addresses"] ) @@ -345,8 +314,8 @@ def test_network_interfaces(self, m_get_phys_by_mac): ) results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - self.assertTrue("network-interfaces" in results) - self.assertTrue( + assert "network-interfaces" in results + assert ( IP6_ULA + "/64" in results["network-interfaces"]["ethernets"][dev]["addresses"] ) @@ -362,8 +331,8 @@ def test_network_interfaces(self, m_get_phys_by_mac): ) results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - self.assertTrue("network-interfaces" in results) - self.assertTrue( + assert "network-interfaces" in results + assert ( IP6_GLOBAL + "/" + IP6_PREFIX in results["network-interfaces"]["ethernets"][dev]["addresses"] ) @@ -379,13 +348,14 @@ def test_network_interfaces(self, m_get_phys_by_mac): ) results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - self.assertTrue("network-interfaces" in results) - self.assertTrue( + assert "network-interfaces" in results + assert ( IP6_GLOBAL + "/64" in results["network-interfaces"]["ethernets"][dev]["addresses"] ) - def test_find_candidates(self): + @mock.patch(DS_PATH + ".util.find_devs_with") + def test_find_candidates(self, m_find_devs_with): def my_devs_with(criteria): return { "LABEL=CONTEXT": ["/dev/sdb"], @@ -393,18 +363,13 @@ def my_devs_with(criteria): "TYPE=iso9660": ["/dev/vdb"], }.get(criteria, []) - orig_find_devs_with = util.find_devs_with - try: - util.find_devs_with = my_devs_with - self.assertEqual( - ["/dev/sdb", "/dev/sr0", "/dev/vdb"], ds.find_candidate_devs() - ) - finally: - util.find_devs_with = orig_find_devs_with + m_find_devs_with.side_effect = my_devs_with + util.find_devs_with = my_devs_with + assert ["/dev/sdb", "/dev/sr0", "/dev/vdb"] == ds.find_candidate_devs() @mock.patch(DS_PATH + ".net.get_interfaces_by_mac", mock.Mock(return_value={})) -class TestOpenNebulaNetwork(unittest.TestCase): +class TestOpenNebulaNetwork: system_nics = ("eth0", "ens3") @@ -419,7 +384,7 @@ def test_context_devname(self): "02:00:0a:12:0f:0f": "ETH1", } net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(expected, net.context_devname) + assert expected == net.context_devname def test_get_nameservers(self): """ @@ -437,21 +402,21 @@ def test_get_nameservers(self): } net = ds.OpenNebulaNetwork(context, mock.Mock()) val = net.get_nameservers("eth0") - self.assertEqual(expected, val) + assert expected == val def test_get_mtu(self): """Verify get_mtu('device') correctly returns MTU size.""" context = {"ETH0_MTU": "1280"} net = ds.OpenNebulaNetwork(context, mock.Mock()) val = net.get_mtu("eth0") - self.assertEqual("1280", val) + assert "1280" == val def test_get_ip(self): """Verify get_ip('device') correctly returns IPv4 address.""" context = {"ETH0_IP": PUBLIC_IP} net = ds.OpenNebulaNetwork(context, mock.Mock()) val = net.get_ip("eth0", MACADDR) - self.assertEqual(PUBLIC_IP, val) + assert PUBLIC_IP == val def test_get_ip_emptystring(self): """ @@ -462,7 +427,7 @@ def test_get_ip_emptystring(self): context = {"ETH0_IP": ""} net = ds.OpenNebulaNetwork(context, mock.Mock()) val = net.get_ip("eth0", MACADDR) - self.assertEqual(IP_BY_MACADDR, val) + assert IP_BY_MACADDR == val def test_get_ip6(self): """ @@ -476,7 +441,7 @@ def test_get_ip6(self): expected = [IP6_GLOBAL] net = ds.OpenNebulaNetwork(context, mock.Mock()) val = net.get_ip6("eth0") - self.assertEqual(expected, val) + assert expected == val def test_get_ip6_ula(self): """ @@ -490,7 +455,7 @@ def test_get_ip6_ula(self): expected = [IP6_ULA] net = ds.OpenNebulaNetwork(context, mock.Mock()) val = net.get_ip6("eth0") - self.assertEqual(expected, val) + assert expected == val def test_get_ip6_dual(self): """ @@ -504,7 +469,7 @@ def test_get_ip6_dual(self): expected = [IP6_GLOBAL, IP6_ULA] net = ds.OpenNebulaNetwork(context, mock.Mock()) val = net.get_ip6("eth0") - self.assertEqual(expected, val) + assert expected == val def test_get_ip6_prefix(self): """ @@ -513,7 +478,7 @@ def test_get_ip6_prefix(self): context = {"ETH0_IP6_PREFIX_LENGTH": IP6_PREFIX} net = ds.OpenNebulaNetwork(context, mock.Mock()) val = net.get_ip6_prefix("eth0") - self.assertEqual(IP6_PREFIX, val) + assert IP6_PREFIX == val def test_get_ip6_prefix_emptystring(self): """ @@ -524,7 +489,7 @@ def test_get_ip6_prefix_emptystring(self): context = {"ETH0_IP6_PREFIX_LENGTH": ""} net = ds.OpenNebulaNetwork(context, mock.Mock()) val = net.get_ip6_prefix("eth0") - self.assertEqual("64", val) + assert "64" == val def test_get_gateway(self): """ @@ -534,7 +499,7 @@ def test_get_gateway(self): context = {"ETH0_GATEWAY": "1.2.3.5"} net = ds.OpenNebulaNetwork(context, mock.Mock()) val = net.get_gateway("eth0") - self.assertEqual("1.2.3.5", val) + assert "1.2.3.5" == val def test_get_gateway6(self): """ @@ -545,7 +510,7 @@ def test_get_gateway6(self): context = {"ETH0_" + k: IP6_GW} net = ds.OpenNebulaNetwork(context, mock.Mock()) val = net.get_gateway6("eth0") - self.assertEqual(IP6_GW, val) + assert IP6_GW == val def test_get_mask(self): """ @@ -554,7 +519,7 @@ def test_get_mask(self): context = {"ETH0_MASK": "255.255.0.0"} net = ds.OpenNebulaNetwork(context, mock.Mock()) val = net.get_mask("eth0") - self.assertEqual("255.255.0.0", val) + assert "255.255.0.0" == val def test_get_mask_emptystring(self): """ @@ -564,7 +529,7 @@ def test_get_mask_emptystring(self): context = {"ETH0_MASK": ""} net = ds.OpenNebulaNetwork(context, mock.Mock()) val = net.get_mask("eth0") - self.assertEqual("255.255.255.0", val) + assert "255.255.255.0" == val def test_get_field(self): """ @@ -573,7 +538,7 @@ def test_get_field(self): context = {"ETH9_DUMMY": "DUMMY_VALUE"} net = ds.OpenNebulaNetwork(context, mock.Mock()) val = net.get_field("eth9", "dummy") - self.assertEqual("DUMMY_VALUE", val) + assert "DUMMY_VALUE" == val def test_get_field_withdefaultvalue(self): """ @@ -583,7 +548,7 @@ def test_get_field_withdefaultvalue(self): context = {"ETH9_DUMMY": "DUMMY_VALUE"} net = ds.OpenNebulaNetwork(context, mock.Mock()) val = net.get_field("eth9", "dummy", "DEFAULT_VALUE") - self.assertEqual("DUMMY_VALUE", val) + assert "DUMMY_VALUE" == val def test_get_field_withdefaultvalue_emptycontext(self): """ @@ -593,7 +558,7 @@ def test_get_field_withdefaultvalue_emptycontext(self): context = {"ETH9_DUMMY": ""} net = ds.OpenNebulaNetwork(context, mock.Mock()) val = net.get_field("eth9", "dummy", "DEFAULT_VALUE") - self.assertEqual("DEFAULT_VALUE", val) + assert "DEFAULT_VALUE" == val def test_get_field_emptycontext(self): """ @@ -603,7 +568,7 @@ def test_get_field_emptycontext(self): context = {"ETH9_DUMMY": ""} net = ds.OpenNebulaNetwork(context, mock.Mock()) val = net.get_field("eth9", "dummy") - self.assertEqual(None, val) + assert None is val def test_get_field_nonecontext(self): """ @@ -613,7 +578,7 @@ def test_get_field_nonecontext(self): context = {"ETH9_DUMMY": None} net = ds.OpenNebulaNetwork(context, mock.Mock()) val = net.get_field("eth9", "dummy") - self.assertEqual(None, val) + assert None is val @mock.patch(DS_PATH + ".get_physical_nics_by_mac") def test_gen_conf_gateway(self, m_get_phys_by_mac): @@ -636,7 +601,7 @@ def test_gen_conf_gateway(self, m_get_phys_by_mac): } m_get_phys_by_mac.return_value = {MACADDR: nic} net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) + assert net.gen_conf() == expected # set ETH0_GATEWAY context = { @@ -656,7 +621,7 @@ def test_gen_conf_gateway(self, m_get_phys_by_mac): } m_get_phys_by_mac.return_value = {MACADDR: nic} net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) + assert net.gen_conf() == expected @mock.patch(DS_PATH + ".get_physical_nics_by_mac") def test_gen_conf_gateway6(self, m_get_phys_by_mac): @@ -679,7 +644,7 @@ def test_gen_conf_gateway6(self, m_get_phys_by_mac): } m_get_phys_by_mac.return_value = {MACADDR: nic} net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) + assert net.gen_conf() == expected # set ETH0_GATEWAY6 context = { @@ -699,7 +664,7 @@ def test_gen_conf_gateway6(self, m_get_phys_by_mac): } m_get_phys_by_mac.return_value = {MACADDR: nic} net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) + assert net.gen_conf() == expected @mock.patch(DS_PATH + ".get_physical_nics_by_mac") def test_gen_conf_ipv6address(self, m_get_phys_by_mac): @@ -724,7 +689,7 @@ def test_gen_conf_ipv6address(self, m_get_phys_by_mac): } m_get_phys_by_mac.return_value = {MACADDR: nic} net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) + assert net.gen_conf() == expected # set ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH context = { @@ -749,7 +714,7 @@ def test_gen_conf_ipv6address(self, m_get_phys_by_mac): } m_get_phys_by_mac.return_value = {MACADDR: nic} net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) + assert net.gen_conf() == expected @mock.patch(DS_PATH + ".get_physical_nics_by_mac") def test_gen_conf_dns(self, m_get_phys_by_mac): @@ -774,7 +739,7 @@ def test_gen_conf_dns(self, m_get_phys_by_mac): } m_get_phys_by_mac.return_value = {MACADDR: nic} net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) + assert net.gen_conf() == expected # set DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN context = { @@ -799,7 +764,7 @@ def test_gen_conf_dns(self, m_get_phys_by_mac): } m_get_phys_by_mac.return_value = {MACADDR: nic} net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) + assert net.gen_conf() == expected @mock.patch(DS_PATH + ".get_physical_nics_by_mac") def test_gen_conf_mtu(self, m_get_phys_by_mac): @@ -822,7 +787,7 @@ def test_gen_conf_mtu(self, m_get_phys_by_mac): } m_get_phys_by_mac.return_value = {MACADDR: nic} net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) + assert net.gen_conf() == expected # set ETH0_MTU context = { @@ -842,7 +807,7 @@ def test_gen_conf_mtu(self, m_get_phys_by_mac): } m_get_phys_by_mac.return_value = {MACADDR: nic} net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) + assert net.gen_conf() == expected @mock.patch(DS_PATH + ".get_physical_nics_by_mac") def test_eth0(self, m_get_phys_by_mac): @@ -859,15 +824,14 @@ def test_eth0(self, m_get_phys_by_mac): }, } - self.assertEqual(net.gen_conf(), expected) + assert net.gen_conf() == expected @mock.patch(DS_PATH + ".get_physical_nics_by_mac") def test_distro_passed_through(self, m_get_physical_nics_by_mac): ds.OpenNebulaNetwork({}, mock.sentinel.distro) - self.assertEqual( - [mock.call(mock.sentinel.distro)], - m_get_physical_nics_by_mac.call_args_list, - ) + assert [ + mock.call(mock.sentinel.distro) + ] == m_get_physical_nics_by_mac.call_args_list def test_eth0_override(self): self.maxDiff = None @@ -904,7 +868,7 @@ def test_eth0_override(self): }, } - self.assertEqual(expected, net.gen_conf()) + assert expected == net.gen_conf() def test_eth0_v4v6_override(self): self.maxDiff = None @@ -949,7 +913,7 @@ def test_eth0_v4v6_override(self): }, } - self.assertEqual(expected, net.gen_conf()) + assert expected == net.gen_conf() def test_multiple_nics(self): """Test rendering multiple nics with names that differ from context.""" @@ -1019,7 +983,7 @@ def test_multiple_nics(self): }, } - self.assertEqual(expected, net.gen_conf()) + assert expected == net.gen_conf() class TestParseShellConfig: diff --git a/tests/unittests/sources/test_openstack.py b/tests/unittests/sources/test_openstack.py index 2d2e8a71..85582716 100644 --- a/tests/unittests/sources/test_openstack.py +++ b/tests/unittests/sources/test_openstack.py @@ -13,7 +13,7 @@ import pytest import responses -from cloudinit import helpers, settings, util +from cloudinit import settings, util from cloudinit.sources import UNSET, BrokenMetadata from cloudinit.sources import DataSourceOpenStack as ds from cloudinit.sources import convert_vendordata @@ -145,14 +145,17 @@ def _read_metadata_service(): return ds.read_metadata_service(BASE_URL, retries=0, timeout=0.1) -class TestOpenStackDataSource(test_helpers.CiTestCase): +class TestOpenStackDataSource: - with_logs = True VERSION = "latest" - def setUp(self): - super(TestOpenStackDataSource, self).setUp() - self.tmp = self.tmp_dir() + @pytest.fixture + def ds_os(self, paths): + return ds.DataSourceOpenStack( + settings.CFG_BUILTIN, + test_util.MockDistro(), + paths, + ) @responses.activate def test_successful(self): @@ -164,26 +167,22 @@ def test_successful(self): responses_mock=responses, ) f = _read_metadata_service() - self.assertEqual(VENDOR_DATA, f.get("vendordata")) - self.assertEqual(VENDOR_DATA2, f.get("vendordata2")) - self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"]) - self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"]) - self.assertEqual(2, len(f["files"])) - self.assertEqual(USER_DATA, f.get("userdata")) - self.assertEqual(EC2_META, f.get("ec2-metadata")) - self.assertEqual(2, f.get("version")) + assert VENDOR_DATA == f.get("vendordata") + assert VENDOR_DATA2 == f.get("vendordata2") + assert CONTENT_0 == f["files"]["/etc/foo.cfg"] + assert CONTENT_1 == f["files"]["/etc/bar/bar.cfg"] + assert 2 == len(f["files"]) + assert USER_DATA == f.get("userdata") + assert EC2_META == f.get("ec2-metadata") + assert 2 == f.get("version") metadata = f["metadata"] - self.assertEqual("nova", metadata.get("availability_zone")) - self.assertEqual("sm-foo-test.novalocal", metadata.get("hostname")) - self.assertEqual( - "sm-foo-test.novalocal", metadata.get("local-hostname") - ) - self.assertEqual("sm-foo-test", metadata.get("name")) - self.assertEqual( - "b0fa911b-69d4-4476-bbe2-1c92bff6535c", metadata.get("uuid") - ) - self.assertEqual( - "b0fa911b-69d4-4476-bbe2-1c92bff6535c", metadata.get("instance-id") + assert "nova" == metadata.get("availability_zone") + assert "sm-foo-test.novalocal" == metadata.get("hostname") + assert "sm-foo-test.novalocal" == metadata.get("local-hostname") + assert "sm-foo-test" == metadata.get("name") + assert "b0fa911b-69d4-4476-bbe2-1c92bff6535c" == metadata.get("uuid") + assert "b0fa911b-69d4-4476-bbe2-1c92bff6535c" == metadata.get( + "instance-id" ) @responses.activate @@ -192,13 +191,13 @@ def test_no_ec2(self): self.VERSION, {}, {}, OS_FILES, responses_mock=responses ) f = _read_metadata_service() - self.assertEqual(VENDOR_DATA, f.get("vendordata")) - self.assertEqual(VENDOR_DATA2, f.get("vendordata2")) - self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"]) - self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"]) - self.assertEqual(USER_DATA, f.get("userdata")) - self.assertEqual({}, f.get("ec2-metadata")) - self.assertEqual(2, f.get("version")) + assert VENDOR_DATA == f.get("vendordata") + assert VENDOR_DATA2 == f.get("vendordata2") + assert CONTENT_0 == f["files"]["/etc/foo.cfg"] + assert CONTENT_1 == f["files"]["/etc/bar/bar.cfg"] + assert USER_DATA == f.get("userdata") + assert {} == f.get("ec2-metadata") + assert 2 == f.get("version") @responses.activate def test_bad_metadata(self): @@ -209,7 +208,8 @@ def test_bad_metadata(self): _register_uris( self.VERSION, {}, {}, os_files, responses_mock=responses ) - self.assertRaises(openstack.NonReadable, _read_metadata_service) + with pytest.raises(openstack.NonReadable): + _read_metadata_service() @responses.activate def test_bad_uuid(self): @@ -222,7 +222,8 @@ def test_bad_uuid(self): _register_uris( self.VERSION, {}, {}, os_files, responses_mock=responses ) - self.assertRaises(BrokenMetadata, _read_metadata_service) + with pytest.raises(BrokenMetadata): + _read_metadata_service() @responses.activate def test_userdata_empty(self): @@ -234,11 +235,11 @@ def test_userdata_empty(self): self.VERSION, {}, {}, os_files, responses_mock=responses ) f = _read_metadata_service() - self.assertEqual(VENDOR_DATA, f.get("vendordata")) - self.assertEqual(VENDOR_DATA2, f.get("vendordata2")) - self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"]) - self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"]) - self.assertFalse(f.get("userdata")) + assert VENDOR_DATA == f.get("vendordata") + assert VENDOR_DATA2 == f.get("vendordata2") + assert CONTENT_0 == f["files"]["/etc/foo.cfg"] + assert CONTENT_1 == f["files"]["/etc/bar/bar.cfg"] + assert not f.get("userdata") @responses.activate def test_vendordata_empty(self): @@ -250,9 +251,9 @@ def test_vendordata_empty(self): self.VERSION, {}, {}, os_files, responses_mock=responses ) f = _read_metadata_service() - self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"]) - self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"]) - self.assertFalse(f.get("vendordata")) + assert CONTENT_0 == f["files"]["/etc/foo.cfg"] + assert CONTENT_1 == f["files"]["/etc/bar/bar.cfg"] + assert not f.get("vendordata") @responses.activate def test_vendordata2_empty(self): @@ -264,9 +265,9 @@ def test_vendordata2_empty(self): self.VERSION, {}, {}, os_files, responses_mock=responses ) f = _read_metadata_service() - self.assertEqual(CONTENT_0, f["files"]["/etc/foo.cfg"]) - self.assertEqual(CONTENT_1, f["files"]["/etc/bar/bar.cfg"]) - self.assertFalse(f.get("vendordata2")) + assert CONTENT_0 == f["files"]["/etc/foo.cfg"] + assert CONTENT_1 == f["files"]["/etc/bar/bar.cfg"] + assert not f.get("vendordata2") @responses.activate def test_vendordata_invalid(self): @@ -277,7 +278,8 @@ def test_vendordata_invalid(self): _register_uris( self.VERSION, {}, {}, os_files, responses_mock=responses ) - self.assertRaises(BrokenMetadata, _read_metadata_service) + with pytest.raises(BrokenMetadata): + _read_metadata_service() @responses.activate def test_vendordata2_invalid(self): @@ -288,7 +290,8 @@ def test_vendordata2_invalid(self): _register_uris( self.VERSION, {}, {}, os_files, responses_mock=responses ) - self.assertRaises(BrokenMetadata, _read_metadata_service) + with pytest.raises(BrokenMetadata): + _read_metadata_service() @responses.activate def test_metadata_invalid(self): @@ -299,11 +302,12 @@ def test_metadata_invalid(self): _register_uris( self.VERSION, {}, {}, os_files, responses_mock=responses ) - self.assertRaises(BrokenMetadata, _read_metadata_service) + with pytest.raises(BrokenMetadata): + _read_metadata_service() @responses.activate @test_helpers.mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery") - def test_datasource(self, m_dhcp): + def test_datasource(self, m_dhcp, ds_os): _register_uris( self.VERSION, EC2_FILES, @@ -311,23 +315,18 @@ def test_datasource(self, m_dhcp): OS_FILES, responses_mock=responses, ) - ds_os = ds.DataSourceOpenStack( - settings.CFG_BUILTIN, - test_util.MockDistro(), - helpers.Paths({"run_dir": self.tmp}), - ) - self.assertIsNone(ds_os.version) + assert ds_os.version is None with mock.patch.object(ds_os, "override_ds_detect", return_value=True): - self.assertTrue(ds_os.get_data()) - self.assertEqual(2, ds_os.version) + assert ds_os.get_data() + assert 2 == ds_os.version md = dict(ds_os.metadata) md.pop("instance-id", None) md.pop("local-hostname", None) - self.assertEqual(OSTACK_META, md) - self.assertEqual(EC2_META, ds_os.ec2_metadata) - self.assertEqual(USER_DATA, ds_os.userdata_raw) - self.assertEqual(2, len(ds_os.files)) - self.assertIsNone(ds_os.vendordata_raw) + assert OSTACK_META == md + assert EC2_META == ds_os.ec2_metadata + assert USER_DATA == ds_os.userdata_raw + assert 2 == len(ds_os.files) + assert ds_os.vendordata_raw is None m_dhcp.assert_not_called() @responses.activate @@ -336,7 +335,7 @@ def test_datasource(self, m_dhcp): "cloudinit.net.ephemeral.maybe_perform_dhcp_discovery" ) @pytest.mark.usefixtures("disable_netdev_info") - def test_local_datasource(self, m_dhcp, m_net): + def test_local_datasource(self, m_dhcp, m_net, paths, tmp_path): """OpenStackLocal calls EphemeralDHCPNetwork and gets instance data.""" _register_uris( self.VERSION, @@ -346,9 +345,9 @@ def test_local_datasource(self, m_dhcp, m_net): responses_mock=responses, ) distro = mock.MagicMock() - distro.get_tmp_exec_path = self.tmp_dir + distro.get_tmp_exec_path = str(tmp_path) ds_os_local = ds.DataSourceOpenStackLocal( - settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": self.tmp}) + settings.CFG_BUILTIN, distro, paths ) distro.fallback_interface = "eth9" # Monkey patch for dhcp m_dhcp.return_value = { @@ -359,26 +358,25 @@ def test_local_datasource(self, m_dhcp, m_net): "broadcast-address": "192.168.2.255", } - self.assertIsNone(ds_os_local.version) + assert ds_os_local.version is None with test_helpers.mock.patch.object( ds_os_local, "override_ds_detect" ) as m_detect_os: m_detect_os.return_value = True - found = ds_os_local.get_data() - self.assertTrue(found) - self.assertEqual(2, ds_os_local.version) + assert ds_os_local.get_data() is True + assert 2 == ds_os_local.version md = dict(ds_os_local.metadata) md.pop("instance-id", None) md.pop("local-hostname", None) - self.assertEqual(OSTACK_META, md) - self.assertEqual(EC2_META, ds_os_local.ec2_metadata) - self.assertEqual(USER_DATA, ds_os_local.userdata_raw) - self.assertEqual(2, len(ds_os_local.files)) - self.assertIsNone(ds_os_local.vendordata_raw) + assert OSTACK_META == md + assert EC2_META == ds_os_local.ec2_metadata + assert USER_DATA == ds_os_local.userdata_raw + assert 2 == len(ds_os_local.files) + assert ds_os_local.vendordata_raw is None m_dhcp.assert_called_with(distro, "eth9", None) @responses.activate - def test_bad_datasource_meta(self): + def test_bad_datasource_meta(self, caplog, ds_os): os_files = copy.deepcopy(OS_FILES) for k in list(os_files.keys()): if k.endswith("meta_data.json"): @@ -386,27 +384,22 @@ def test_bad_datasource_meta(self): _register_uris( self.VERSION, {}, {}, os_files, responses_mock=responses ) - ds_os = ds.DataSourceOpenStack( - settings.CFG_BUILTIN, - test_util.MockDistro(), - helpers.Paths({"run_dir": self.tmp}), - ) - self.assertIsNone(ds_os.version) + assert ds_os.version is None with test_helpers.mock.patch.object( ds_os, "override_ds_detect" ) as m_detect_os: m_detect_os.return_value = True found = ds_os.get_data() - self.assertFalse(found) - self.assertIsNone(ds_os.version) - self.assertRegex( - self.logs.getvalue(), + assert not found + assert ds_os.version is None + assert re.search( r"InvalidMetaDataException: Broken metadata address" r" http://(169.254.169.254|\[fe80::a9fe:a9fe\])", + caplog.text, ) @responses.activate - def test_no_datasource(self): + def test_no_datasource(self, ds_os): os_files = copy.deepcopy(OS_FILES) for k in list(os_files.keys()): if k.endswith("meta_data.json"): @@ -414,26 +407,18 @@ def test_no_datasource(self): _register_uris( self.VERSION, {}, {}, os_files, responses_mock=responses ) - ds_os = ds.DataSourceOpenStack( - settings.CFG_BUILTIN, - test_util.MockDistro(), - helpers.Paths({"run_dir": self.tmp}), - ) ds_os.ds_cfg = { "max_wait": 0, "timeout": 0, } - self.assertIsNone(ds_os.version) + assert ds_os.version is None with mock.patch.object(ds_os, "override_ds_detect", return_value=True): - self.assertFalse(ds_os.get_data()) - self.assertIsNone(ds_os.version) + assert not ds_os.get_data() + assert ds_os.version is None - def test_network_config_disabled_by_datasource_config(self): + def test_network_config_disabled_by_datasource_config(self, ds_os): """The network_config can be disabled from datasource config.""" mock_path = MOCK_PATH + "openstack.convert_net_json" - ds_os = ds.DataSourceOpenStack( - settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp}) - ) ds_os.ds_cfg = {"apply_network_config": False} sample_json = { "links": [{"ethernet_mac_address": "mymac"}], @@ -442,16 +427,13 @@ def test_network_config_disabled_by_datasource_config(self): } ds_os.network_json = sample_json # Ignore this content from metadata with test_helpers.mock.patch(mock_path) as m_convert_json: - self.assertIsNone(ds_os.network_config) + assert ds_os.network_config is None m_convert_json.assert_not_called() - def test_network_config_from_network_json(self): + def test_network_config_from_network_json(self, caplog, ds_os): """The datasource gets network_config from network_data.json.""" mock_path = MOCK_PATH + "openstack.convert_net_json" example_cfg = {"version": 1, "config": []} - ds_os = ds.DataSourceOpenStack( - settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp}) - ) sample_json = { "links": [{"ethernet_mac_address": "mymac"}], "networks": [], @@ -460,26 +442,21 @@ def test_network_config_from_network_json(self): ds_os.network_json = sample_json with test_helpers.mock.patch(mock_path) as m_convert_json: m_convert_json.return_value = example_cfg - self.assertEqual(example_cfg, ds_os.network_config) - self.assertIn( - "network config provided via network_json", self.logs.getvalue() - ) + assert example_cfg == ds_os.network_config + assert "network config provided via network_json" in caplog.text m_convert_json.assert_called_with(sample_json, known_macs=None) - def test_network_config_cached(self): + def test_network_config_cached(self, ds_os): """The datasource caches the network_config property.""" mock_path = MOCK_PATH + "openstack.convert_net_json" example_cfg = {"version": 1, "config": []} - ds_os = ds.DataSourceOpenStack( - settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp}) - ) ds_os._network_config = example_cfg with test_helpers.mock.patch(mock_path) as m_convert_json: - self.assertEqual(example_cfg, ds_os.network_config) + assert example_cfg == ds_os.network_config m_convert_json.assert_not_called() @responses.activate - def test_disabled_datasource(self): + def test_disabled_datasource(self, ds_os): os_files = copy.deepcopy(OS_FILES) os_meta = copy.deepcopy(OSTACK_META) os_meta["meta"] = { @@ -491,26 +468,21 @@ def test_disabled_datasource(self): _register_uris( self.VERSION, {}, {}, os_files, responses_mock=responses ) - ds_os = ds.DataSourceOpenStack( - settings.CFG_BUILTIN, - test_util.MockDistro(), - helpers.Paths({"run_dir": self.tmp}), - ) ds_os.ds_cfg = { "max_wait": 0, "timeout": 0, } - self.assertIsNone(ds_os.version) + assert ds_os.version is None with test_helpers.mock.patch.object( ds_os, "override_ds_detect" ) as m_detect_os: m_detect_os.return_value = True found = ds_os.get_data() - self.assertFalse(found) - self.assertIsNone(ds_os.version) + assert not found + assert ds_os.version is None @responses.activate - def test_wb__crawl_metadata_does_not_persist(self): + def test_wb__crawl_metadata_does_not_persist(self, ds_os): """_crawl_metadata returns current metadata and does not cache.""" _register_uris( self.VERSION, @@ -519,45 +491,37 @@ def test_wb__crawl_metadata_does_not_persist(self): OS_FILES, responses_mock=responses, ) - ds_os = ds.DataSourceOpenStack( - settings.CFG_BUILTIN, - test_util.MockDistro(), - helpers.Paths({"run_dir": self.tmp}), - ) crawled_data = ds_os._crawl_metadata() - self.assertEqual(UNSET, ds_os.ec2_metadata) - self.assertIsNone(ds_os.userdata_raw) - self.assertEqual(0, len(ds_os.files)) - self.assertIsNone(ds_os.vendordata_raw) - self.assertEqual( - [ - "dsmode", - "ec2-metadata", - "files", - "metadata", - "networkdata", - "userdata", - "vendordata", - "vendordata2", - "version", - ], - sorted(crawled_data.keys()), - ) - self.assertEqual("local", crawled_data["dsmode"]) - self.assertEqual(EC2_META, crawled_data["ec2-metadata"]) - self.assertEqual(2, len(crawled_data["files"])) + assert UNSET == ds_os.ec2_metadata + assert ds_os.userdata_raw is None + assert 0 == len(ds_os.files) + assert ds_os.vendordata_raw is None + assert [ + "dsmode", + "ec2-metadata", + "files", + "metadata", + "networkdata", + "userdata", + "vendordata", + "vendordata2", + "version", + ] == sorted(crawled_data.keys()) + assert "local" == crawled_data["dsmode"] + assert EC2_META == crawled_data["ec2-metadata"] + assert 2 == len(crawled_data["files"]) md = copy.deepcopy(crawled_data["metadata"]) md.pop("instance-id") md.pop("local-hostname") - self.assertEqual(OSTACK_META, md) - self.assertEqual( - json.loads(OS_FILES["openstack/latest/network_data.json"]), - crawled_data["networkdata"], + assert OSTACK_META == md + assert ( + json.loads(OS_FILES["openstack/latest/network_data.json"]) + == crawled_data["networkdata"] ) - self.assertEqual(USER_DATA, crawled_data["userdata"]) - self.assertEqual(VENDOR_DATA, crawled_data["vendordata"]) - self.assertEqual(VENDOR_DATA2, crawled_data["vendordata2"]) - self.assertEqual(2, crawled_data["version"]) + assert USER_DATA == crawled_data["userdata"] + assert VENDOR_DATA == crawled_data["vendordata"] + assert VENDOR_DATA2 == crawled_data["vendordata2"] + assert 2 == crawled_data["version"] class TestVendorDataLoading(test_helpers.TestCase): @@ -566,55 +530,52 @@ def cvj(self, data): def test_vd_load_none(self): # non-existant vendor-data should return none - self.assertIsNone(self.cvj(None)) + assert self.cvj(None) is None def test_vd_load_string(self): - self.assertEqual(self.cvj("foobar"), "foobar") + assert self.cvj("foobar") == "foobar" def test_vd_load_list(self): data = [{"foo": "bar"}, "mystring", list(["another", "list"])] - self.assertEqual(self.cvj(data), data) + assert self.cvj(data) == data def test_vd_load_dict_no_ci(self): - self.assertIsNone(self.cvj({"foo": "bar"})) + assert self.cvj({"foo": "bar"}) is None def test_vd_load_dict_ci_dict(self): - self.assertRaises( - ValueError, self.cvj, {"foo": "bar", "cloud-init": {"x": 1}} - ) + with pytest.raises(ValueError): + self.cvj({"foo": "bar", "cloud-init": {"x": 1}}) def test_vd_load_dict_ci_string(self): data = {"foo": "bar", "cloud-init": "VENDOR_DATA"} - self.assertEqual(self.cvj(data), data["cloud-init"]) + assert self.cvj(data) == data["cloud-init"] def test_vd_load_dict_ci_list(self): data = {"foo": "bar", "cloud-init": ["VD_1", "VD_2"]} - self.assertEqual(self.cvj(data), data["cloud-init"]) + assert self.cvj(data) == data["cloud-init"] @test_helpers.mock.patch(MOCK_PATH + "util.is_x86") -class TestDetectOpenStack(test_helpers.CiTestCase): - def setUp(self): - self.tmp = self.tmp_dir() +class TestDetectOpenStack: - def _fake_ds(self) -> ds.DataSourceOpenStack: + @pytest.fixture + def fake_ds(self, paths) -> ds.DataSourceOpenStack: return ds.DataSourceOpenStack( settings.CFG_BUILTIN, test_util.MockDistro(), - helpers.Paths({"run_dir": self.tmp}), + paths, ) - def test_ds_detect_non_intel_x86(self, m_is_x86): + def test_ds_detect_non_intel_x86(self, m_is_x86, fake_ds): """Return True on non-intel platforms because dmi isn't conclusive.""" m_is_x86.return_value = False - self.assertTrue( - self._fake_ds().ds_detect(), - "Expected ds_detect == True", - ) + assert fake_ds.ds_detect(), "Expected ds_detect == True" @test_helpers.mock.patch(MOCK_PATH + "util.get_proc_env") @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") - def test_not_ds_detect_intel_x86_ec2(self, m_dmi, m_proc_env, m_is_x86): + def test_not_ds_detect_intel_x86_ec2( + self, m_dmi, m_proc_env, m_is_x86, fake_ds + ): """Return False on EC2 platforms.""" m_is_x86.return_value = True # No product_name in proc/1/environ @@ -628,28 +589,24 @@ def fake_dmi_read(dmi_key): assert False, "Unexpected dmi read of %s" % dmi_key m_dmi.side_effect = fake_dmi_read - self.assertFalse( - self._fake_ds().ds_detect(), - "Expected ds_detect == False on EC2", - ) + assert not fake_ds.ds_detect(), "Expected ds_detect == False on EC2" m_proc_env.assert_called_with(1) @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") - def test_ds_detect_intel_product_name_compute(self, m_dmi, m_is_x86): + def test_ds_detect_intel_product_name_compute( + self, m_dmi, m_is_x86, fake_ds + ): """Return True on OpenStack compute and nova instances.""" m_is_x86.return_value = True openstack_product_names = ["OpenStack Nova", "OpenStack Compute"] for product_name in openstack_product_names: m_dmi.return_value = product_name - self.assertTrue( - self._fake_ds().ds_detect(), - "Failed to ds_detect", - ) + assert fake_ds.ds_detect(), "Failed to ds_detect" @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") def test_ds_detect_opentelekomcloud_chassis_asset_tag( - self, m_dmi, m_is_x86 + self, m_dmi, m_is_x86, fake_ds ): """Return True on OpenStack reporting OpenTelekomCloud asset-tag.""" m_is_x86.return_value = True @@ -662,13 +619,14 @@ def fake_dmi_read(dmi_key): assert False, "Unexpected dmi read of %s" % dmi_key m_dmi.side_effect = fake_dmi_read - self.assertTrue( - self._fake_ds().ds_detect(), - "Expected ds_detect == True on OpenTelekomCloud", - ) + assert ( + fake_ds.ds_detect() + ), "Expected ds_detect == True on OpenTelekomCloud" @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") - def test_ds_detect_sapccloud_chassis_asset_tag(self, m_dmi, m_is_x86): + def test_ds_detect_sapccloud_chassis_asset_tag( + self, m_dmi, m_is_x86, fake_ds + ): """Return True on OpenStack reporting SAP CCloud VM asset-tag.""" m_is_x86.return_value = True @@ -680,13 +638,14 @@ def fake_dmi_read(dmi_key): assert False, "Unexpected dmi read of %s" % dmi_key m_dmi.side_effect = fake_dmi_read - self.assertTrue( - self._fake_ds().ds_detect(), - "Expected ds_detect == True on SAP CCloud VM", - ) + assert ( + fake_ds.ds_detect() + ), "Expected ds_detect == True on SAP CCloud VM" @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") - def test_ds_detect_huaweicloud_chassis_asset_tag(self, m_dmi, m_is_x86): + def test_ds_detect_huaweicloud_chassis_asset_tag( + self, m_dmi, m_is_x86, fake_ds + ): """Return True on OpenStack reporting Huawei Cloud VM asset-tag.""" m_is_x86.return_value = True @@ -698,14 +657,13 @@ def fake_asset_tag_dmi_read(dmi_key): assert False, "Unexpected dmi read of %s" % dmi_key m_dmi.side_effect = fake_asset_tag_dmi_read - self.assertTrue( - self._fake_ds().ds_detect(), - "Expected ds_detect == True on Huawei Cloud VM", - ) + assert ( + fake_ds.ds_detect() + ), "Expected ds_detect == True on Huawei Cloud VM" @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") def test_ds_detect_samsung_cloud_platform_chassis_asset_tag( - self, m_dmi, m_is_x86 + self, m_dmi, m_is_x86, fake_ds ): """Return True on OpenStack reporting Samsung Cloud Platform VM asset-tag.""" @@ -719,13 +677,14 @@ def fake_asset_tag_dmi_read(dmi_key): assert False, "Unexpected dmi read of %s" % dmi_key m_dmi.side_effect = fake_asset_tag_dmi_read - self.assertTrue( - self._fake_ds().ds_detect(), - "Expected ds_detect == True on Samsung Cloud Platform VM", - ) + assert ( + fake_ds.ds_detect() + ), "Expected ds_detect == True on Samsung Cloud Platform VM" @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") - def test_ds_detect_oraclecloud_chassis_asset_tag(self, m_dmi, m_is_x86): + def test_ds_detect_oraclecloud_chassis_asset_tag( + self, m_dmi, m_is_x86, fake_ds + ): """Return True on OpenStack reporting Oracle cloud asset-tag.""" m_is_x86.return_value = True @@ -737,20 +696,18 @@ def fake_dmi_read(dmi_key): assert False, "Unexpected dmi read of %s" % dmi_key m_dmi.side_effect = fake_dmi_read - ds = self._fake_ds() + ds = fake_ds ds.sys_cfg = {"datasource_list": ["Oracle"]} - self.assertTrue( - ds.ds_detect(), - "Expected ds_detect == True on OracleCloud.com", - ) + assert ds.ds_detect(), "Expected ds_detect == True on OracleCloud.com" ds.sys_cfg = {"datasource_list": []} - self.assertFalse( - ds.ds_detect(), - "Expected ds_detect == False.", - ) + assert not ds.ds_detect(), "Expected ds_detect == False." - def _test_ds_detect_nova_compute_chassis_asset_tag( - self, m_dmi, m_is_x86, chassis_tag + @pytest.mark.parametrize( + ["chassis_tag"], [("OpenStack Nova",), ("OpenStack Compute",)] + ) + @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") + def test_ds_detect_chassis_asset_tag( + self, m_dmi, m_is_x86, chassis_tag, fake_ds ): """Return True on OpenStack reporting generic asset-tag.""" m_is_x86.return_value = True @@ -763,26 +720,15 @@ def fake_dmi_read(dmi_key): assert False, "Unexpected dmi read of %s" % dmi_key m_dmi.side_effect = fake_dmi_read - self.assertTrue( - self._fake_ds().ds_detect(), - "Expected ds_detect == True on Generic OpenStack Platform", - ) - - @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") - def test_ds_detect_nova_chassis_asset_tag(self, m_dmi, m_is_x86): - self._test_ds_detect_nova_compute_chassis_asset_tag( - m_dmi, m_is_x86, "OpenStack Nova" - ) - - @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") - def test_ds_detect_compute_chassis_asset_tag(self, m_dmi, m_is_x86): - self._test_ds_detect_nova_compute_chassis_asset_tag( - m_dmi, m_is_x86, "OpenStack Compute" - ) + assert ( + fake_ds.ds_detect() + ), "Expected ds_detect == True on Generic OpenStack Platform" @test_helpers.mock.patch(MOCK_PATH + "util.get_proc_env") @test_helpers.mock.patch(MOCK_PATH + "dmi.read_dmi_data") - def test_ds_detect_by_proc_1_environ(self, m_dmi, m_proc_env, m_is_x86): + def test_ds_detect_by_proc_1_environ( + self, m_dmi, m_proc_env, m_is_x86, fake_ds + ): """Return True when nova product_name specified in /proc/1/environ.""" m_is_x86.return_value = True # Nova product_name in proc/1/environ @@ -799,14 +745,13 @@ def fake_dmi_read(dmi_key): assert False, "Unexpected dmi read of %s" % dmi_key m_dmi.side_effect = fake_dmi_read - self.assertTrue( - self._fake_ds().ds_detect(), - "Expected ds_detect == True on OpenTelekomCloud", - ) + assert ( + fake_ds.ds_detect() + ), "Expected ds_detect == True on OpenTelekomCloud" m_proc_env.assert_called_with(1) -class TestMetadataReader(test_helpers.CiTestCase): +class TestMetadataReader: """Test the MetadataReader.""" burl = "http://169.254.169.254/" @@ -856,9 +801,9 @@ def test__find_working_version(self): openstack.OS_LATEST, ] ) - self.assertEqual( - openstack.OS_LIBERTY, - openstack.MetadataReader(self.burl)._find_working_version(), + assert ( + openstack.OS_LIBERTY + == openstack.MetadataReader(self.burl)._find_working_version() ) @responses.activate @@ -866,9 +811,9 @@ def test__find_working_version_uses_latest(self): """'latest' should be used if no supported versions.""" unsup1, unsup2 = ("2016-11-09", "2017-06-06") self.register_versions([unsup1, unsup2, openstack.OS_LATEST]) - self.assertEqual( - openstack.OS_LATEST, - openstack.MetadataReader(self.burl)._find_working_version(), + assert ( + openstack.OS_LATEST + == openstack.MetadataReader(self.burl)._find_working_version() ) @responses.activate @@ -909,5 +854,5 @@ def test_read_v2_os_ocata(self): } reader = openstack.MetadataReader(self.burl) reader._read_ec2_metadata = mock_read_ec2 - self.assertEqual(expected, reader.read_v2()) - self.assertEqual(1, mock_read_ec2.call_count) + assert expected == reader.read_v2() + assert 1 == mock_read_ec2.call_count diff --git a/tests/unittests/sources/test_oracle.py b/tests/unittests/sources/test_oracle.py index 1a61510a..14d7ad29 100644 --- a/tests/unittests/sources/test_oracle.py +++ b/tests/unittests/sources/test_oracle.py @@ -1340,7 +1340,9 @@ def exit_context_manager(*args): if ephemeral_dhcp_setup_raises_exception: def raise_exception(**kwargs): - raise Exception("Failed to setup ephemeral network") + raise Exception( # pylint: disable=W0719 + "Failed to setup ephemeral network" + ) m_ephemeral_network.side_effect = raise_exception else: @@ -1371,12 +1373,12 @@ def assert_in_context_manager(**kwargs): ), ): # datasource fails/exits if ephemeral dhcp setup fails - with ( - pytest.raises(Exception) - if ephemeral_dhcp_setup_raises_exception - else test_helpers.does_not_raise() - ): - assert oracle_ds._check_and_get_data() + if ephemeral_dhcp_setup_raises_exception: + with pytest.raises(Exception): + assert oracle_ds._check_and_get_data() + else: + with test_helpers.does_not_raise(): + assert oracle_ds._check_and_get_data() if perform_dhcp_setup: assert [ diff --git a/tests/unittests/sources/test_ovf.py b/tests/unittests/sources/test_ovf.py index b3bc84a8..5b771df1 100644 --- a/tests/unittests/sources/test_ovf.py +++ b/tests/unittests/sources/test_ovf.py @@ -3,15 +3,18 @@ # Author: Scott Moser # # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import base64 +import os from collections import OrderedDict from textwrap import dedent +import pytest + from cloudinit import subp, util -from cloudinit.helpers import Paths from cloudinit.sources import DataSourceOVF as dsovf -from tests.unittests.helpers import CiTestCase, mock +from tests.unittests.helpers import mock MPATH = "cloudinit.sources.DataSourceOVF." @@ -48,7 +51,7 @@ def fill_properties(props, template=OVF_ENV_CONTENT): return template.format(properties=properties) -class TestReadOvfEnv(CiTestCase): +class TestReadOvfEnv: def test_with_b64_userdata(self): user_data = "#!/bin/sh\necho hello world\n" user_data_b64 = base64.b64encode(user_data.encode()).decode() @@ -59,26 +62,26 @@ def test_with_b64_userdata(self): } env = fill_properties(props) md, ud, cfg = dsovf.read_ovf_environment(env) - self.assertEqual({"instance-id": "inst-001"}, md) - self.assertEqual(user_data.encode(), ud) - self.assertEqual({"password": "passw0rd"}, cfg) + assert {"instance-id": "inst-001"} == md + assert user_data.encode() == ud + assert {"password": "passw0rd"} == cfg def test_with_non_b64_userdata(self): user_data = "my-user-data" props = {"user-data": user_data, "instance-id": "inst-001"} env = fill_properties(props) md, ud, cfg = dsovf.read_ovf_environment(env) - self.assertEqual({"instance-id": "inst-001"}, md) - self.assertEqual(user_data.encode(), ud) - self.assertEqual({}, cfg) + assert {"instance-id": "inst-001"} == md + assert user_data.encode() == ud + assert {} == cfg def test_with_no_userdata(self): props = {"password": "passw0rd", "instance-id": "inst-001"} env = fill_properties(props) md, ud, cfg = dsovf.read_ovf_environment(env) - self.assertEqual({"instance-id": "inst-001"}, md) - self.assertEqual({"password": "passw0rd"}, cfg) - self.assertIsNone(ud) + assert {"instance-id": "inst-001"} == md + assert {"password": "passw0rd"} == cfg + assert ud is None def test_with_b64_network_config_enable_read_network(self): network_config = dedent( @@ -109,27 +112,24 @@ def test_with_b64_network_config_enable_read_network(self): } env = fill_properties(props) md, ud, cfg = dsovf.read_ovf_environment(env, True) - self.assertEqual("inst-001", md["instance-id"]) - self.assertEqual({"password": "passw0rd"}, cfg) - self.assertEqual( - { - "version": 2, - "ethernets": { - "nics": { - "nameservers": { - "addresses": ["127.0.0.53"], - "search": ["eng.vmware.com", "vmware.com"], - }, - "match": {"name": "eth*"}, - "gateway4": "10.10.10.253", - "dhcp4": False, - "addresses": ["10.10.10.1/24"], - } - }, + assert "inst-001" == md["instance-id"] + assert {"password": "passw0rd"} == cfg + assert { + "version": 2, + "ethernets": { + "nics": { + "nameservers": { + "addresses": ["127.0.0.53"], + "search": ["eng.vmware.com", "vmware.com"], + }, + "match": {"name": "eth*"}, + "gateway4": "10.10.10.253", + "dhcp4": False, + "addresses": ["10.10.10.1/24"], + } }, - md["network-config"], - ) - self.assertIsNone(ud) + } == md["network-config"] + assert ud is None def test_with_non_b64_network_config_enable_read_network(self): network_config = dedent( @@ -159,9 +159,9 @@ def test_with_non_b64_network_config_enable_read_network(self): } env = fill_properties(props) md, ud, cfg = dsovf.read_ovf_environment(env, True) - self.assertEqual({"instance-id": "inst-001"}, md) - self.assertEqual({"password": "passw0rd"}, cfg) - self.assertIsNone(ud) + assert {"instance-id": "inst-001"} == md + assert {"password": "passw0rd"} == cfg + assert ud is None def test_with_b64_network_config_disable_read_network(self): network_config = dedent( @@ -192,52 +192,45 @@ def test_with_b64_network_config_disable_read_network(self): } env = fill_properties(props) md, ud, cfg = dsovf.read_ovf_environment(env) - self.assertEqual({"instance-id": "inst-001"}, md) - self.assertEqual({"password": "passw0rd"}, cfg) - self.assertIsNone(ud) + assert {"instance-id": "inst-001"} == md + assert {"password": "passw0rd"} == cfg + assert ud is None -class TestDatasourceOVF(CiTestCase): - with_logs = True +class TestDatasourceOVF: + @pytest.fixture + def ds(self, paths): + return dsovf.DataSourceOVF(sys_cfg={}, distro={}, paths=paths) - def setUp(self): - super(TestDatasourceOVF, self).setUp() - self.datasource = dsovf.DataSourceOVF - self.tdir = self.tmp_dir() - - def test_get_data_seed_dir(self): + def test_get_data_seed_dir(self, ds, paths): """Platform info properly reports when getting data from seed dir.""" - paths = Paths({"cloud_dir": self.tdir, "run_dir": self.tdir}) # Write ovf-env.xml seed file - seed_dir = self.tmp_path("seed", dir=self.tdir) - ovf_env = self.tmp_path("ovf-env.xml", dir=seed_dir) + ovf_env = os.path.join(paths.seed_dir, "ovf-env.xml") util.write_file(ovf_env, OVF_ENV_CONTENT) - ds = self.datasource(sys_cfg={}, distro={}, paths=paths) - self.assertEqual("ovf", ds.cloud_name) - self.assertEqual("ovf", ds.platform_type) + assert "ovf" == ds.cloud_name + assert "ovf" == ds.platform_type with mock.patch(MPATH + "transport_vmware_guestinfo") as m_guestd: with mock.patch(MPATH + "transport_iso9660") as m_iso9660: m_iso9660.return_value = NOT_FOUND m_guestd.return_value = NOT_FOUND - self.assertTrue(ds.get_data()) - self.assertEqual( - "ovf (%s/seed/ovf-env.xml)" % self.tdir, ds.subplatform + assert ds.get_data() + assert ( + "ovf (%s/ovf-env.xml)" % paths.seed_dir == ds.subplatform ) + @pytest.mark.parametrize( + ["guestinfo", "iso"], + [ + pytest.param(False, True, id="vmware_guestinfo"), + pytest.param(True, False, id="iso9660"), + ], + ) @mock.patch("cloudinit.subp.subp") @mock.patch("cloudinit.sources.DataSource.persist_instance_data") def test_get_data_vmware_guestinfo_with_network_config( - self, m_persist, m_subp + self, m_persist, m_subp, guestinfo, iso, ds, paths ): - self._test_get_data_with_network_config(guestinfo=False, iso=True) - - @mock.patch("cloudinit.subp.subp") - @mock.patch("cloudinit.sources.DataSource.persist_instance_data") - def test_get_data_iso9660_with_network_config(self, m_persist, m_subp): - self._test_get_data_with_network_config(guestinfo=True, iso=False) - - def _test_get_data_with_network_config(self, guestinfo, iso): network_config = dedent( """\ network: @@ -264,8 +257,6 @@ def _test_get_data_with_network_config(self, guestinfo, iso): "instance-id": "inst-001", } env = fill_properties(props) - paths = Paths({"cloud_dir": self.tdir, "run_dir": self.tdir}) - ds = self.datasource(sys_cfg={}, distro={}, paths=paths) with mock.patch( MPATH + "transport_vmware_guestinfo", return_value=env if guestinfo else NOT_FOUND, @@ -274,36 +265,33 @@ def _test_get_data_with_network_config(self, guestinfo, iso): MPATH + "transport_iso9660", return_value=env if iso else NOT_FOUND, ): - self.assertTrue(ds.get_data()) - self.assertEqual("inst-001", ds.metadata["instance-id"]) - self.assertEqual( - { - "version": 2, - "ethernets": { - "nics": { - "nameservers": { - "addresses": ["127.0.0.53"], - "search": ["vmware.com"], - }, - "match": {"name": "eth*"}, - "gateway4": "10.10.10.253", - "dhcp4": False, - "addresses": ["10.10.10.1/24"], - } - }, + assert ds.get_data() is True + assert "inst-001" == ds.metadata["instance-id"] + assert { + "version": 2, + "ethernets": { + "nics": { + "nameservers": { + "addresses": ["127.0.0.53"], + "search": ["vmware.com"], + }, + "match": {"name": "eth*"}, + "gateway4": "10.10.10.253", + "dhcp4": False, + "addresses": ["10.10.10.1/24"], + } }, - ds.network_config, - ) + } == ds.network_config -class TestTransportIso9660(CiTestCase): - def setUp(self): - super(TestTransportIso9660, self).setUp() - self.add_patch("cloudinit.util.find_devs_with", "m_find_devs_with") - self.add_patch("cloudinit.util.mounts", "m_mounts") - self.add_patch("cloudinit.util.mount_cb", "m_mount_cb") - self.add_patch( - "cloudinit.sources.DataSourceOVF.get_ovf_env", "m_get_ovf_env" +class TestTransportIso9660: + @pytest.fixture(autouse=True) + def fixtures(self, mocker): + self.m_find_devs_with = mocker.patch("cloudinit.util.find_devs_with") + self.m_mounts = mocker.patch("cloudinit.util.mounts") + self.m_mount_cb = mocker.patch("cloudinit.util.mount_cb") + self.m_get_ovf_env = mocker.patch( + "cloudinit.sources.DataSourceOVF.get_ovf_env" ) self.m_get_ovf_env.return_value = ("myfile", "mycontent") @@ -318,7 +306,7 @@ def test_find_already_mounted(self): } self.m_mounts.return_value = mounts - self.assertEqual("mycontent", dsovf.transport_iso9660()) + assert "mycontent" == dsovf.transport_iso9660() def test_find_already_mounted_skips_non_iso9660(self): """Check we call get_ovf_env ignoring non iso9660""" @@ -342,7 +330,7 @@ def test_find_already_mounted_skips_non_iso9660(self): sorted(mounts.items(), key=lambda t: t[0]) ) - self.assertEqual("mycontent", dsovf.transport_iso9660()) + assert "mycontent" == dsovf.transport_iso9660() def test_find_already_mounted_matches_kname(self): """Check we dont regex match on basename of the device""" @@ -356,7 +344,7 @@ def test_find_already_mounted_matches_kname(self): # we're skipping an entry which fails to match. self.m_mounts.return_value = mounts - self.assertEqual(NOT_FOUND, dsovf.transport_iso9660()) + assert NOT_FOUND == dsovf.transport_iso9660() def test_mount_cb_called_on_blkdevs_with_iso9660(self): """Check we call mount_cb on blockdevs with iso9660 only""" @@ -364,7 +352,7 @@ def test_mount_cb_called_on_blkdevs_with_iso9660(self): self.m_find_devs_with.return_value = ["/dev/sr0"] self.m_mount_cb.return_value = ("myfile", "mycontent") - self.assertEqual("mycontent", dsovf.transport_iso9660()) + assert "mycontent" == dsovf.transport_iso9660() self.m_mount_cb.assert_called_with( "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660" ) @@ -379,7 +367,7 @@ def test_mount_cb_called_on_blkdevs_with_iso9660_check_regex(self): ] self.m_mount_cb.return_value = ("myfile", "mycontent") - self.assertEqual("mycontent", dsovf.transport_iso9660()) + assert "mycontent" == dsovf.transport_iso9660() self.m_mount_cb.assert_called_with( "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660" ) @@ -389,8 +377,8 @@ def test_mount_cb_not_called_no_matches(self): self.m_mounts.return_value = {} self.m_find_devs_with.return_value = ["/dev/vg/myovf"] - self.assertEqual(NOT_FOUND, dsovf.transport_iso9660()) - self.assertEqual(0, self.m_mount_cb.call_count) + assert NOT_FOUND == dsovf.transport_iso9660() + assert 0 == self.m_mount_cb.call_count def test_mount_cb_called_require_iso_false(self): """Check we call mount_cb on blockdevs with require_iso=False""" @@ -398,9 +386,7 @@ def test_mount_cb_called_require_iso_false(self): self.m_find_devs_with.return_value = ["/dev/xvdz"] self.m_mount_cb.return_value = ("myfile", "mycontent") - self.assertEqual( - "mycontent", dsovf.transport_iso9660(require_iso=False) - ) + assert "mycontent" == dsovf.transport_iso9660(require_iso=False) self.m_mount_cb.assert_called_with( "/dev/xvdz", dsovf.get_ovf_env, mtype=None @@ -408,48 +394,47 @@ def test_mount_cb_called_require_iso_false(self): def test_maybe_cdrom_device_none(self): """Test maybe_cdrom_device returns False for none/empty input""" - self.assertFalse(dsovf.maybe_cdrom_device(None)) - self.assertFalse(dsovf.maybe_cdrom_device("")) + assert not dsovf.maybe_cdrom_device(None) + assert not dsovf.maybe_cdrom_device("") def test_maybe_cdrom_device_non_string_exception(self): """Test maybe_cdrom_device raises ValueError on non-string types""" - with self.assertRaises(ValueError): + with pytest.raises(ValueError): dsovf.maybe_cdrom_device({"a": "eleven"}) def test_maybe_cdrom_device_false_on_multi_dir_paths(self): """Test maybe_cdrom_device is false on /dev[/.*]/* paths""" - self.assertFalse(dsovf.maybe_cdrom_device("/dev/foo/sr0")) - self.assertFalse(dsovf.maybe_cdrom_device("foo/sr0")) - self.assertFalse(dsovf.maybe_cdrom_device("../foo/sr0")) - self.assertFalse(dsovf.maybe_cdrom_device("../foo/sr0")) + assert not dsovf.maybe_cdrom_device("/dev/foo/sr0") + assert not dsovf.maybe_cdrom_device("foo/sr0") + assert not dsovf.maybe_cdrom_device("../foo/sr0") + assert not dsovf.maybe_cdrom_device("../foo/sr0") def test_maybe_cdrom_device_true_on_hd_partitions(self): """Test maybe_cdrom_device is false on /dev/hd[a-z][0-9]+ paths""" - self.assertTrue(dsovf.maybe_cdrom_device("/dev/hda1")) - self.assertTrue(dsovf.maybe_cdrom_device("hdz9")) + assert dsovf.maybe_cdrom_device("/dev/hda1") + assert dsovf.maybe_cdrom_device("hdz9") def test_maybe_cdrom_device_true_on_valid_relative_paths(self): """Test maybe_cdrom_device normalizes paths""" - self.assertTrue(dsovf.maybe_cdrom_device("/dev/wark/../sr9")) - self.assertTrue(dsovf.maybe_cdrom_device("///sr0")) - self.assertTrue(dsovf.maybe_cdrom_device("/sr0")) - self.assertTrue(dsovf.maybe_cdrom_device("//dev//hda")) + assert dsovf.maybe_cdrom_device("/dev/wark/../sr9") + assert dsovf.maybe_cdrom_device("///sr0") + assert dsovf.maybe_cdrom_device("/sr0") + assert dsovf.maybe_cdrom_device("//dev//hda") def test_maybe_cdrom_device_true_on_xvd_partitions(self): """Test maybe_cdrom_device returns true on xvd*""" - self.assertTrue(dsovf.maybe_cdrom_device("/dev/xvda")) - self.assertTrue(dsovf.maybe_cdrom_device("/dev/xvda1")) - self.assertTrue(dsovf.maybe_cdrom_device("xvdza1")) + assert dsovf.maybe_cdrom_device("/dev/xvda") + assert dsovf.maybe_cdrom_device("/dev/xvda1") + assert dsovf.maybe_cdrom_device("xvdza1") @mock.patch(MPATH + "subp.which") @mock.patch(MPATH + "subp.subp") -class TestTransportVmwareGuestinfo(CiTestCase): +class TestTransportVmwareGuestinfo: """Test the com.vmware.guestInfo transport implemented in transport_vmware_guestinfo.""" rpctool = "vmware-rpctool" - with_logs = True rpctool_path = "/not/important/vmware-rpctool" vmtoolsd_path = "/not/important/vmtoolsd" @@ -457,38 +442,32 @@ def test_without_vmware_rpctool_and_without_vmtoolsd_returns_notfound( self, m_subp, m_which ): m_which.side_effect = [None, None] - self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) - self.assertEqual( - 0, - m_subp.call_count, - "subp should not be called if no rpctool in path.", - ) + assert NOT_FOUND == dsovf.transport_vmware_guestinfo() + assert ( + 0 == m_subp.call_count + ), "subp should not be called if no rpctool in path." def test_without_vmware_rpctool_and_with_vmtoolsd_returns_found( self, m_subp, m_which ): m_which.side_effect = [self.vmtoolsd_path, None] m_subp.side_effect = [(fill_properties({}), "")] - self.assertNotEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) - self.assertEqual( - 1, - m_subp.call_count, - "subp should be called once bc/ rpctool missing.", - ) + assert NOT_FOUND != dsovf.transport_vmware_guestinfo() + assert ( + 1 == m_subp.call_count + ), "subp should be called once bc/ rpctool missing." - def test_notfound_on_exit_code_1(self, m_subp, m_which): + def test_notfound_on_exit_code_1(self, m_subp, m_which, caplog): """If vmware-rpctool exits 1, then must return not found.""" m_which.side_effect = [None, self.rpctool_path] m_subp.side_effect = subp.ProcessExecutionError( stdout="", stderr="No value found", exit_code=1, cmd=["unused"] ) - self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) - self.assertEqual(1, m_subp.call_count) - self.assertNotIn( - "WARNING", - self.logs.getvalue(), - "exit code of 1 by rpctool should not cause warning.", - ) + assert NOT_FOUND == dsovf.transport_vmware_guestinfo() + assert 1 == m_subp.call_count + assert ( + "WARNING" not in caplog.text + ), "exit code of 1 by rpctool should not cause warning." def test_notfound_if_no_content_but_exit_zero(self, m_subp, m_which): """If vmware-rpctool exited 0 with no stdout is normal not-found. @@ -499,32 +478,34 @@ def test_notfound_if_no_content_but_exit_zero(self, m_subp, m_which): """ m_which.side_effect = [None, self.rpctool_path] m_subp.return_value = ("", "") - self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) - self.assertEqual(1, m_subp.call_count) + assert NOT_FOUND == dsovf.transport_vmware_guestinfo() + assert 1 == m_subp.call_count - def test_notfound_and_warns_on_unexpected_exit_code(self, m_subp, m_which): + def test_notfound_and_warns_on_unexpected_exit_code( + self, m_subp, m_which, caplog + ): """If vmware-rpctool exits non zero or 1, warnings should be logged.""" m_which.side_effect = [None, self.rpctool_path] m_subp.side_effect = subp.ProcessExecutionError( stdout=None, stderr="No value found", exit_code=2, cmd=["unused"] ) - self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) - self.assertEqual(1, m_subp.call_count) - self.assertIn( - "WARNING", - self.logs.getvalue(), - "exit code of 2 by rpctool should log WARNING.", - ) + assert NOT_FOUND == dsovf.transport_vmware_guestinfo() + assert 1 == m_subp.call_count + assert ( + "WARNING" in caplog.text + ), "exit code of 2 by rpctool should log WARNING." def test_found_when_guestinfo_present(self, m_subp, m_which): """When there is a ovf info, transport should return it.""" m_which.side_effect = [None, self.rpctool_path] content = fill_properties({}) m_subp.return_value = (content, "") - self.assertEqual(content, dsovf.transport_vmware_guestinfo()) - self.assertEqual(1, m_subp.call_count) + assert content == dsovf.transport_vmware_guestinfo() + assert 1 == m_subp.call_count - def test_vmware_rpctool_fails_and_vmtoolsd_fails(self, m_subp, m_which): + def test_vmware_rpctool_fails_and_vmtoolsd_fails( + self, m_subp, m_which, caplog + ): """When vmware-rpctool fails and vmtoolsd fails""" m_which.side_effect = [self.vmtoolsd_path, self.rpctool_path] m_subp.side_effect = [ @@ -535,13 +516,11 @@ def test_vmware_rpctool_fails_and_vmtoolsd_fails(self, m_subp, m_which): stdout="", stderr="No value found", exit_code=1, cmd=["unused"] ), ] - self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) - self.assertEqual(2, m_subp.call_count) - self.assertNotIn( - "WARNING", - self.logs.getvalue(), - "exit code of 1 by rpctool and vmtoolsd should not cause warning.", - ) + assert NOT_FOUND == dsovf.transport_vmware_guestinfo() + assert 2 == m_subp.call_count + assert ( + "WARNING" not in caplog.text + ), "exit code of 1 by rpctool and vmtoolsd should not cause warning." def test_vmware_rpctool_fails_and_vmtoolsd_success(self, m_subp, m_which): """When vmware-rpctool fails but vmtoolsd succeeds""" @@ -552,8 +531,8 @@ def test_vmware_rpctool_fails_and_vmtoolsd_success(self, m_subp, m_which): ), (fill_properties({}), ""), ] - self.assertNotEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) - self.assertEqual(2, m_subp.call_count) + assert NOT_FOUND != dsovf.transport_vmware_guestinfo() + assert 2 == m_subp.call_count # diff --git a/tests/unittests/sources/test_rbx.py b/tests/unittests/sources/test_rbx.py index 475bf498..f239d542 100644 --- a/tests/unittests/sources/test_rbx.py +++ b/tests/unittests/sources/test_rbx.py @@ -1,8 +1,11 @@ import json +from unittest import mock -from cloudinit import distros, helpers, subp +import pytest + +from cloudinit import distros, subp from cloudinit.sources import DataSourceRbxCloud as ds -from tests.unittests.helpers import CiTestCase, mock, populate_dir +from tests.unittests.helpers import populate_dir DS_PATH = "cloudinit.sources.DataSourceRbxCloud" @@ -76,54 +79,42 @@ } -class TestRbxDataSource(CiTestCase): - parsed_user = None - allowed_subp = ["bash"] - - def _fetch_distro(self, kind): - cls = distros.fetch(kind) - paths = helpers.Paths({}) - return cls(kind, {}, paths) - - def setUp(self): - super(TestRbxDataSource, self).setUp() - self.tmp = self.tmp_dir() - self.paths = helpers.Paths( - {"cloud_dir": self.tmp, "run_dir": self.tmp} - ) +class TestRbxDataSource: + @pytest.fixture + def fetch_distro(self, paths): + def _fetch_distro(kind): + cls = distros.fetch(kind) + return cls(kind, {}, paths) - # defaults for few tests - self.ds = ds.DataSourceRbxCloud - self.seed_dir = self.paths.seed_dir - self.sys_cfg = {"datasource": {"RbxCloud": {"dsmode": "local"}}} + return _fetch_distro - def test_seed_read_user_data_callback_empty_file(self): - populate_user_metadata(self.seed_dir, "") - populate_cloud_metadata(self.seed_dir, {}) - results = ds.read_user_data_callback(self.seed_dir) + def test_seed_read_user_data_callback_empty_file(self, paths): + populate_user_metadata(paths.seed_dir, "") + populate_cloud_metadata(paths.seed_dir, {}) + results = ds.read_user_data_callback(paths.seed_dir) - self.assertIsNone(results) + assert results is None - def test_seed_read_user_data_callback_valid_disk(self): - populate_user_metadata(self.seed_dir, "") - populate_cloud_metadata(self.seed_dir, CLOUD_METADATA) - results = ds.read_user_data_callback(self.seed_dir) + def test_seed_read_user_data_callback_valid_disk(self, paths): + populate_user_metadata(paths.seed_dir, "") + populate_cloud_metadata(paths.seed_dir, CLOUD_METADATA) + results = ds.read_user_data_callback(paths.seed_dir) - self.assertNotEqual(results, None) - self.assertTrue("userdata" in results) - self.assertTrue("metadata" in results) - self.assertTrue("cfg" in results) + assert results is not None + assert "userdata" in results + assert "metadata" in results + assert "cfg" in results - def test_seed_read_user_data_callback_userdata(self): + def test_seed_read_user_data_callback_userdata(self, paths): userdata = "#!/bin/sh\nexit 1" - populate_user_metadata(self.seed_dir, userdata) - populate_cloud_metadata(self.seed_dir, CLOUD_METADATA) + populate_user_metadata(paths.seed_dir, userdata) + populate_cloud_metadata(paths.seed_dir, CLOUD_METADATA) - results = ds.read_user_data_callback(self.seed_dir) + results = ds.read_user_data_callback(paths.seed_dir) - self.assertNotEqual(results, None) - self.assertTrue("userdata" in results) - self.assertEqual(results["userdata"], userdata) + assert results is not None + assert "userdata" in results + assert results["userdata"] == userdata def test_generate_network_config(self): expected = { @@ -161,12 +152,10 @@ def test_generate_network_config(self): }, ], } - self.assertTrue( - ds.generate_network_config(CLOUD_METADATA["netadp"]), expected - ) + assert ds.generate_network_config(CLOUD_METADATA["netadp"]), expected @mock.patch(DS_PATH + ".subp.subp") - def test_gratuitous_arp_run_standard_arping(self, m_subp): + def test_gratuitous_arp_run_standard_arping(self, m_subp, fetch_distro): """Test handle run arping & parameters.""" items = [ {"destination": "172.17.0.2", "source": "172.16.6.104"}, @@ -175,21 +164,18 @@ def test_gratuitous_arp_run_standard_arping(self, m_subp): "source": "172.16.6.104", }, ] - ds.gratuitous_arp(items, self._fetch_distro("ubuntu")) - self.assertEqual( - [ - mock.call( - ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"] - ), - mock.call( - ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"] - ), - ], - m_subp.call_args_list, - ) + ds.gratuitous_arp(items, fetch_distro("ubuntu")) + assert [ + mock.call( + ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"] + ), + mock.call( + ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"] + ), + ] == m_subp.call_args_list @mock.patch(DS_PATH + ".subp.subp") - def test_handle_rhel_like_arping(self, m_subp): + def test_handle_rhel_like_arping(self, m_subp, fetch_distro): """Test handle on RHEL-like distros.""" items = [ { @@ -197,20 +183,17 @@ def test_handle_rhel_like_arping(self, m_subp): "destination": "172.17.0.2", } ] - ds.gratuitous_arp(items, self._fetch_distro("fedora")) - self.assertEqual( - [ - mock.call( - ["arping", "-c", "2", "-s", "172.16.6.104", "172.17.0.2"] - ) - ], - m_subp.call_args_list, - ) + ds.gratuitous_arp(items, fetch_distro("fedora")) + assert [ + mock.call( + ["arping", "-c", "2", "-s", "172.16.6.104", "172.17.0.2"] + ) + ] == m_subp.call_args_list @mock.patch( DS_PATH + ".subp.subp", side_effect=subp.ProcessExecutionError() ) - def test_continue_on_arping_error(self, m_subp): + def test_continue_on_arping_error(self, m_subp, fetch_distro): """Continue when command error""" items = [ {"destination": "172.17.0.2", "source": "172.16.6.104"}, @@ -219,18 +202,15 @@ def test_continue_on_arping_error(self, m_subp): "source": "172.16.6.104", }, ] - ds.gratuitous_arp(items, self._fetch_distro("ubuntu")) - self.assertEqual( - [ - mock.call( - ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"] - ), - mock.call( - ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"] - ), - ], - m_subp.call_args_list, - ) + ds.gratuitous_arp(items, fetch_distro("ubuntu")) + assert [ + mock.call( + ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"] + ), + mock.call( + ["arping", "-c", "2", "-S", "172.16.6.104", "172.17.0.2"] + ), + ] == m_subp.call_args_list def populate_cloud_metadata(path, data): diff --git a/tests/unittests/sources/test_scaleway.py b/tests/unittests/sources/test_scaleway.py index bb1f2136..8a073644 100644 --- a/tests/unittests/sources/test_scaleway.py +++ b/tests/unittests/sources/test_scaleway.py @@ -1,21 +1,19 @@ # This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import json import socket from urllib.parse import SplitResult, urlsplit +import pytest import requests import responses from requests.exceptions import ConnectionError, ConnectTimeout -from cloudinit import helpers, settings, sources +from cloudinit import settings, sources from cloudinit.distros import ubuntu from cloudinit.sources import DataSourceScaleway -from tests.unittests.helpers import ( - CiTestCase, - mock, - responses_assert_call_count, -) +from tests.unittests.helpers import mock, responses_assert_call_count class DataResponses: @@ -76,11 +74,7 @@ def get_ok(cls, response): return 200, response.headers, json.dumps(cls.FAKE_METADATA) -class TestOnScaleway(CiTestCase): - def setUp(self): - super(TestOnScaleway, self).setUp() - self.tmp = self.tmp_dir() - +class TestOnScaleway: def install_mocks(self, fake_dmi, fake_file_exists, fake_cmdline): mock, faked = fake_dmi mock.return_value = "Scaleway" if faked else "Whatever" @@ -99,20 +93,20 @@ def install_mocks(self, fake_dmi, fake_file_exists, fake_cmdline): @mock.patch("os.path.exists") @mock.patch("cloudinit.dmi.read_dmi_data") def test_not_ds_detect( - self, m_read_dmi_data, m_file_exists, m_get_cmdline + self, m_read_dmi_data, m_file_exists, m_get_cmdline, paths ): self.install_mocks( fake_dmi=(m_read_dmi_data, False), fake_file_exists=(m_file_exists, False), fake_cmdline=(m_get_cmdline, False), ) - self.assertFalse(DataSourceScaleway.DataSourceScaleway.ds_detect()) + assert False is DataSourceScaleway.DataSourceScaleway.ds_detect() # When not on Scaleway, get_data() returns False. datasource = DataSourceScaleway.DataSourceScaleway( - settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp}) + settings.CFG_BUILTIN, None, paths ) - self.assertFalse(datasource.get_data()) + assert False is datasource.get_data() @mock.patch("cloudinit.util.get_cmdline") @mock.patch("os.path.exists") @@ -129,7 +123,7 @@ def test_ds_detect_dmi( fake_file_exists=(m_file_exists, False), fake_cmdline=(m_get_cmdline, False), ) - self.assertTrue(DataSourceScaleway.DataSourceScaleway.ds_detect()) + assert True is DataSourceScaleway.DataSourceScaleway.ds_detect() @mock.patch("cloudinit.util.get_cmdline") @mock.patch("os.path.exists") @@ -145,7 +139,7 @@ def test_ds_detect_var_run_scaleway( fake_file_exists=(m_file_exists, True), fake_cmdline=(m_get_cmdline, False), ) - self.assertTrue(DataSourceScaleway.DataSourceScaleway.ds_detect()) + assert True is DataSourceScaleway.DataSourceScaleway.ds_detect() @mock.patch("cloudinit.util.get_cmdline") @mock.patch("os.path.exists") @@ -161,7 +155,7 @@ def test_ds_detect_cmdline( fake_file_exists=(m_file_exists, False), fake_cmdline=(m_get_cmdline, True), ) - self.assertTrue(DataSourceScaleway.DataSourceScaleway.ds_detect()) + assert True is DataSourceScaleway.DataSourceScaleway.ds_detect() def get_source_address_adapter(*args, **kwargs): @@ -191,15 +185,14 @@ def _fix_mocking_url(url: str) -> str: ).geturl() -class TestDataSourceScaleway(CiTestCase): - def setUp(self): - tmp = self.tmp_dir() +class TestDataSourceScaleway: + @pytest.fixture(autouse=True) + def fixtures(self, mocker, paths, tmp_path): distro = ubuntu.Distro("", {}, {}) - distro.get_tmp_exec_path = self.tmp_dir + distro.get_tmp_exec_path = str(tmp_path) self.datasource = DataSourceScaleway.DataSourceScaleway( - settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": tmp}) + settings.CFG_BUILTIN, distro, paths ) - super(TestDataSourceScaleway, self).setUp() self.base_urls = DataSourceScaleway.DS_BASE_URLS for url in self.base_urls: @@ -214,15 +207,12 @@ def setUp(self): ) # Define the metadata URLS - self.add_patch( - "cloudinit.sources.DataSourceScaleway." - "DataSourceScaleway.ds_detect", - "_m_ds_detect", + mocker.patch( + "cloudinit.sources.DataSourceScaleway.DataSourceScaleway.ds_detect", return_value=True, ) - self.add_patch( + mocker.patch( "cloudinit.distros.net.find_fallback_nic", - "_m_find_fallback_nic", return_value="scalewaynic0", ) @@ -231,14 +221,14 @@ def test_set_metadata_url_ipv4_ok(self): self.datasource._set_metadata_url([self.base_urls[0]]) - self.assertTrue(self.base_urls[0] in self.datasource.metadata_url) + assert self.base_urls[0] in self.datasource.metadata_url @responses.activate def test_set_metadata_url_ipv6_ok(self): self.datasource._set_metadata_url([self.base_urls[1]]) - self.assertTrue(self.base_urls[1] in self.datasource.metadata_url) + assert self.base_urls[1] in self.datasource.metadata_url @responses.activate @mock.patch( @@ -286,34 +276,32 @@ def test_ipv4_metadata_ok(self, dhcpv4, ds_detect): f"{self.base_urls[0]}/vendor_data/cloud-init", callback=DataResponses.get_ok, ) - self.assertTrue(self.datasource.get_data()) + assert self.datasource.get_data() - self.assertEqual( - self.datasource.get_instance_id(), - MetadataResponses.FAKE_METADATA["id"], + assert ( + self.datasource.get_instance_id() + == MetadataResponses.FAKE_METADATA["id"] ) ssh_keys = self.datasource.get_public_ssh_keys() ssh_keys.sort() - self.assertEqual( - ssh_keys, - [ - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA", - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC", - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD", - ], - ) - self.assertEqual( - self.datasource.get_hostname().hostname, - MetadataResponses.FAKE_METADATA["hostname"], + assert ssh_keys == [ + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA", + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC", + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD", + ] + assert ( + self.datasource.get_hostname().hostname + == MetadataResponses.FAKE_METADATA["hostname"] ) - self.assertEqual( - self.datasource.get_userdata_raw(), DataResponses.FAKE_USER_DATA + assert ( + self.datasource.get_userdata_raw() == DataResponses.FAKE_USER_DATA ) - self.assertEqual( - self.datasource.get_vendordata_raw(), DataResponses.FAKE_USER_DATA + assert ( + self.datasource.get_vendordata_raw() + == DataResponses.FAKE_USER_DATA ) - self.assertIsNone(self.datasource.availability_zone) - self.assertIsNone(self.datasource.region) + assert self.datasource.availability_zone is None + assert self.datasource.region is None @responses.activate @mock.patch( @@ -371,32 +359,30 @@ def test_ipv4_metadata_timeout_ipv6_ok(self, dhcpv4, inet6, ds_detect): f"{self.datasource.metadata_urls[1]}/conf?format=json", 1 ) - self.assertEqual( - self.datasource.get_instance_id(), - MetadataResponses.FAKE_METADATA["id"], + assert ( + self.datasource.get_instance_id() + == MetadataResponses.FAKE_METADATA["id"] ) ssh_keys = self.datasource.get_public_ssh_keys() ssh_keys.sort() - self.assertEqual( - ssh_keys, - [ - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA", - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC", - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD", - ], - ) - self.assertEqual( - self.datasource.get_hostname().hostname, - MetadataResponses.FAKE_METADATA["hostname"], + assert ssh_keys == [ + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA", + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC", + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD", + ] + assert ( + self.datasource.get_hostname().hostname + == MetadataResponses.FAKE_METADATA["hostname"] ) - self.assertEqual( - self.datasource.get_userdata_raw(), DataResponses.FAKE_USER_DATA + assert ( + self.datasource.get_userdata_raw() == DataResponses.FAKE_USER_DATA ) - self.assertEqual( - self.datasource.get_vendordata_raw(), DataResponses.FAKE_USER_DATA + assert ( + self.datasource.get_vendordata_raw() + == DataResponses.FAKE_USER_DATA ) - self.assertIsNone(self.datasource.availability_zone) - self.assertIsNone(self.datasource.region) + assert self.datasource.availability_zone is None + assert self.datasource.region is None @responses.activate @mock.patch( @@ -428,14 +414,12 @@ def test_ipv4_ipv6_metadata_timeout(self, dhcpv4, inet6, sleep, ds_detect): callback=ConnectTimeout, ) self.datasource.max_wait = 0 - ret = self.datasource.get_data() + assert False is self.datasource.get_data() responses_assert_call_count(f"{self.datasource.metadata_urls[0]}", 2) responses_assert_call_count(f"{self.datasource.metadata_urls[1]}", 2) - - self.assertFalse(ret) - self.assertEqual(self.datasource.metadata, {}) - self.assertIsNone(self.datasource.get_userdata_raw()) - self.assertIsNone(self.datasource.get_vendordata_raw()) + assert self.datasource.metadata == {} + assert self.datasource.get_userdata_raw() is None + assert self.datasource.get_vendordata_raw() is None @responses.activate @mock.patch( @@ -470,11 +454,9 @@ def test_metadata_ipv4_404(self, dhcpv4, ds_detect): callback=DataResponses.empty, ) self.datasource.get_data() - self.assertEqual( - self.datasource.metadata, MetadataResponses.FAKE_METADATA - ) - self.assertIsNone(self.datasource.get_userdata_raw()) - self.assertIsNone(self.datasource.get_vendordata_raw()) + assert self.datasource.metadata == MetadataResponses.FAKE_METADATA + assert self.datasource.get_userdata_raw() is None + assert self.datasource.get_vendordata_raw() is None @responses.activate @mock.patch("cloudinit.url_helper.time.sleep", lambda x: None) @@ -492,16 +474,16 @@ def test_metadata_connection_errors_legacy_ipv4_url(self, dhcpv4): ] responses.reset() - with self.assertRaises(ConnectionError): + with pytest.raises(ConnectionError): responses.add_callback( responses.GET, f"{self.datasource.metadata_urls[0]}/", callback=ConnectionError, ) self.datasource._set_metadata_url(self.datasource.metadata_urls) - self.assertEqual(self.datasource.metadata, {}) - self.assertIsNone(self.datasource.get_userdata_raw()) - self.assertIsNone(self.datasource.get_vendordata_raw()) + assert self.datasource.metadata == {} + assert self.datasource.get_userdata_raw() is None + assert self.datasource.get_vendordata_raw() is None @responses.activate @mock.patch( @@ -561,8 +543,8 @@ def test_metadata_connection_errors_two_urls( f"{self.datasource.metadata_urls[1]}", 2, ) - self.assertIsNone(self.datasource.get_userdata_raw()) - self.assertIsNone(self.datasource.get_vendordata_raw()) + assert self.datasource.get_userdata_raw() is None + assert self.datasource.get_vendordata_raw() is None @responses.activate @mock.patch( @@ -611,10 +593,10 @@ def _callback(request): callback=_callback, ) self.datasource.get_data() - self.assertEqual( - self.datasource.get_userdata_raw(), DataResponses.FAKE_USER_DATA + assert ( + self.datasource.get_userdata_raw() == DataResponses.FAKE_USER_DATA ) - self.assertEqual(sleep.call_count, 2) + assert sleep.call_count == 2 def test_ssh_keys_empty(self): """ @@ -623,7 +605,7 @@ def test_ssh_keys_empty(self): """ self.datasource.metadata["tags"] = [] self.datasource.metadata["ssh_public_keys"] = [] - self.assertEqual(self.datasource.get_public_ssh_keys(), []) + assert self.datasource.get_public_ssh_keys() == [] def test_ssh_keys_only_tags(self): """ @@ -636,13 +618,10 @@ def test_ssh_keys_only_tags(self): self.datasource.metadata["ssh_public_keys"] = [] ssh_keys = self.datasource.get_public_ssh_keys() ssh_keys.sort() - self.assertEqual( - ssh_keys, - [ - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC", - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD", - ], - ) + assert ssh_keys == [ + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC", + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD", + ] def test_ssh_keys_only_conf(self): """ @@ -662,13 +641,10 @@ def test_ssh_keys_only_conf(self): ] ssh_keys = self.datasource.get_public_ssh_keys() ssh_keys.sort() - self.assertEqual( - ssh_keys, - [ - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA", - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC", - ], - ) + assert ssh_keys == [ + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA", + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC", + ] def test_ssh_keys_both(self): """ @@ -691,14 +667,11 @@ def test_ssh_keys_both(self): ] ssh_keys = self.datasource.get_public_ssh_keys() ssh_keys.sort() - self.assertEqual( - ssh_keys, - [ - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA", - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC", - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD", - ], - ) + assert ssh_keys == [ + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA", + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC", + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD", + ] @mock.patch("cloudinit.distros.net.find_fallback_nic") @mock.patch("cloudinit.util.get_cmdline") @@ -723,7 +696,7 @@ def test_legacy_network_config_ok(self, m_get_cmdline, fallback_nic): } ], } - self.assertEqual(netcfg, resp) + assert netcfg == resp @mock.patch("cloudinit.distros.net.find_fallback_nic") @mock.patch("cloudinit.util.get_cmdline") @@ -766,7 +739,7 @@ def test_legacy_network_config_ipv6_ok(self, m_get_cmdline, fallback_nic): } ], } - self.assertEqual(netcfg, resp) + assert netcfg == resp @mock.patch("cloudinit.distros.net.find_fallback_nic") @mock.patch("cloudinit.util.get_cmdline") @@ -779,7 +752,7 @@ def test_legacy_network_config_existing(self, m_get_cmdline, fallback_nic): self.datasource._network_config = "0xdeadbeef" netcfg = self.datasource.network_config - self.assertEqual(netcfg, "0xdeadbeef") + assert netcfg == "0xdeadbeef" @mock.patch("cloudinit.distros.net.find_fallback_nic") @mock.patch("cloudinit.util.get_cmdline") @@ -806,7 +779,7 @@ def test_legacy_network_config_unset(self, m_get_cmdline, fallback_nic): } netcfg = self.datasource.network_config - self.assertEqual(netcfg, resp) + assert netcfg == resp @mock.patch("cloudinit.sources.DataSourceScaleway.LOG.warning") @mock.patch("cloudinit.distros.net.find_fallback_nic") @@ -836,7 +809,7 @@ def test_legacy_network_config_cached_none( } netcfg = self.datasource.network_config - self.assertEqual(netcfg, resp) + assert netcfg == resp logwarning.assert_called_with( "Found None as cached _network_config. Resetting to %s", sources.UNSET, @@ -873,7 +846,7 @@ def test_ipmob_primary_ipv4_config_ok(self, m_get_cmdline, fallback_nic): }, } - self.assertEqual(netcfg, resp) + assert netcfg == resp @mock.patch("cloudinit.distros.net.find_fallback_nic") @mock.patch("cloudinit.util.get_cmdline") @@ -919,7 +892,7 @@ def test_ipmob_additional_ipv4_config_ok( }, }, } - self.assertEqual(netcfg, resp) + assert netcfg == resp @mock.patch("cloudinit.distros.net.find_fallback_nic") @mock.patch("cloudinit.util.get_cmdline") @@ -957,7 +930,7 @@ def test_ipmob_primary_ipv6_config_ok(self, m_get_cmdline, fallback_nic): }, } - self.assertEqual(netcfg, resp) + assert netcfg == resp @mock.patch("cloudinit.distros.net.find_fallback_nic") @mock.patch("cloudinit.util.get_cmdline") @@ -1008,7 +981,7 @@ def test_ipmob_primary_ipv4_v6_config_ok( }, } - self.assertEqual(netcfg, resp) + assert netcfg == resp @mock.patch("cloudinit.distros.net.find_fallback_nic") @mock.patch("cloudinit.util.get_cmdline") @@ -1059,4 +1032,4 @@ def test_ipmob_primary_ipv6_v4_config_ok( }, } - self.assertEqual(netcfg, resp) + assert netcfg == resp diff --git a/tests/unittests/sources/test_smartos.py b/tests/unittests/sources/test_smartos.py index e73b7c4d..dc58e0ac 100644 --- a/tests/unittests/sources/test_smartos.py +++ b/tests/unittests/sources/test_smartos.py @@ -31,7 +31,10 @@ from cloudinit.atomic_helper import b64e from cloudinit.event import EventScope, EventType from cloudinit.sources import DataSourceSmartOS -from cloudinit.sources.DataSourceSmartOS import SERIAL_DEVICE, SMARTOS_ENV_KVM +from cloudinit.sources.DataSourceSmartOS import ( + SERIAL_DEVICE, + SMARTOS_ENV_KVM, +) from cloudinit.sources.DataSourceSmartOS import ( convert_smartos_network_data as convert_net, ) @@ -791,7 +794,7 @@ def joyent_metadata(mocker, m_serial): "request_id", "metadata_value", "response_parts", - "meta_source_data", + "metasource_data", "metasource_data_len", ], defaults=(None, None, None, None, None, None), @@ -810,12 +813,15 @@ def joyent_metadata(mocker, m_serial): def make_response(): payloadstr = "" - if "payload" in res.response_parts: - payloadstr = " {0}".format(res.response_parts["payload"]) + if "payload" in res.response_parts: # pylint: disable=E1135 + payloadstr = " {0}".format( + res.response_parts["payload"] # pylint: disable=E1146,E1136 + ) return ( "V2 {length} {crc} {request_id} " "{command}{payloadstr}\n".format( - payloadstr=payloadstr, **res.response_parts + payloadstr=payloadstr, + **res.response_parts # pylint: disable=E1134 ).encode("ascii") ) @@ -825,8 +831,10 @@ def read_response(length): if not res.metasource_data: res.metasource_data = make_response() res.metasource_data_len = len(res.metasource_data) - resp = res.metasource_data[:length] - res.metasource_data = res.metasource_data[length:] + resp = res.metasource_data[:length] # pylint: disable=E1136 + res.metasource_data = res.metasource_data[ # pylint: disable=E1136 + length: + ] return resp res.serial.read.side_effect = read_response @@ -1422,7 +1430,6 @@ def test_ipv6_addrconf(self): assert expected == found -@pytest.mark.allow_subp_for("mdata-get") @pytest.fixture def mdata_proc(): mdata_proc = multiprocessing.Process(target=start_mdata_loop) diff --git a/tests/unittests/sources/test_upcloud.py b/tests/unittests/sources/test_upcloud.py index a6a805e0..85f5144e 100644 --- a/tests/unittests/sources/test_upcloud.py +++ b/tests/unittests/sources/test_upcloud.py @@ -3,15 +3,16 @@ # This file is part of cloud-init. See LICENSE file for license information. import json +from unittest import mock import pytest -from cloudinit import helpers, importer, settings, sources +from cloudinit import importer, settings, sources from cloudinit.sources.DataSourceUpCloud import ( DataSourceUpCloud, DataSourceUpCloudLocal, ) -from tests.unittests.helpers import CiTestCase, example_netdev, mock +from tests.unittests.helpers import example_netdev UC_METADATA = json.loads( """ @@ -149,73 +150,50 @@ def _mock_dmi(): return True, "00322b68-0096-4042-9406-faad61922128" -class TestUpCloudMetadata(CiTestCase): +class TestUpCloudMetadata: """ Test reading the meta-data """ - def setUp(self): - super(TestUpCloudMetadata, self).setUp() - self.tmp = self.tmp_dir() - - def get_ds(self, get_sysinfo=_mock_dmi): - ds = DataSourceUpCloud( - settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": self.tmp}) - ) - if get_sysinfo: - ds._get_sysinfo = get_sysinfo - return ds - @mock.patch("cloudinit.sources.helpers.upcloud.read_sysinfo") - def test_returns_false_not_on_upcloud(self, m_read_sysinfo): + def test_returns_false_not_on_upcloud(self, m_read_sysinfo, paths): m_read_sysinfo.return_value = (False, None) - ds = self.get_ds(get_sysinfo=None) - self.assertEqual(False, ds.get_data()) - self.assertTrue(m_read_sysinfo.called) + ds = DataSourceUpCloud(settings.CFG_BUILTIN, None, paths) + assert False is ds.get_data() + assert m_read_sysinfo.called @mock.patch("cloudinit.sources.helpers.upcloud.read_metadata") - def test_metadata(self, mock_readmd): + def test_metadata(self, mock_readmd, paths): mock_readmd.return_value = UC_METADATA.copy() - ds = self.get_ds() + ds = DataSourceUpCloud(settings.CFG_BUILTIN, None, paths) + ds._get_sysinfo = _mock_dmi ds.perform_dhcp_setup = False - ret = ds.get_data() - self.assertTrue(ret) + assert ds.get_data() is True + assert mock_readmd.called - self.assertTrue(mock_readmd.called) + assert UC_METADATA.get("user_data") == ds.get_userdata_raw() + assert UC_METADATA.get("vendor_data") == ds.get_vendordata_raw() + assert UC_METADATA.get("region") == ds.availability_zone + assert UC_METADATA.get("instance_id") == ds.get_instance_id() + assert UC_METADATA.get("cloud_name") == ds.cloud_name - self.assertEqual(UC_METADATA.get("user_data"), ds.get_userdata_raw()) - self.assertEqual( - UC_METADATA.get("vendor_data"), ds.get_vendordata_raw() - ) - self.assertEqual(UC_METADATA.get("region"), ds.availability_zone) - self.assertEqual(UC_METADATA.get("instance_id"), ds.get_instance_id()) - self.assertEqual(UC_METADATA.get("cloud_name"), ds.cloud_name) - - self.assertEqual( - UC_METADATA.get("public_keys"), ds.get_public_ssh_keys() - ) - self.assertIsInstance(ds.get_public_ssh_keys(), list) + assert UC_METADATA.get("public_keys") == ds.get_public_ssh_keys() + assert isinstance(ds.get_public_ssh_keys(), list) -class TestUpCloudNetworkSetup(CiTestCase): +class TestUpCloudNetworkSetup: """ Test reading the meta-data on networked context """ - def setUp(self): - super(TestUpCloudNetworkSetup, self).setUp() - self.tmp = self.tmp_dir() - - def get_ds(self, get_sysinfo=_mock_dmi): + @pytest.fixture + def ds(self, paths, tmp_path): distro = mock.MagicMock() - distro.get_tmp_exec_path = self.tmp_dir - ds = DataSourceUpCloudLocal( - settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": self.tmp}) - ) - if get_sysinfo: - ds._get_sysinfo = get_sysinfo + distro.get_tmp_exec_path = str(tmp_path) + ds = DataSourceUpCloudLocal(settings.CFG_BUILTIN, distro, paths) + ds._get_sysinfo = _mock_dmi return ds @pytest.mark.usefixtures("disable_netdev_info") @@ -224,7 +202,7 @@ def get_ds(self, get_sysinfo=_mock_dmi): @mock.patch("cloudinit.net.ephemeral.maybe_perform_dhcp_discovery") @mock.patch("cloudinit.net.ephemeral.EphemeralIPv4Network") def test_network_configured_metadata( - self, m_net, m_dhcp, m_fallback_nic, mock_readmd + self, m_net, m_dhcp, m_fallback_nic, mock_readmd, ds ): mock_readmd.return_value = UC_METADATA.copy() @@ -237,12 +215,9 @@ def test_network_configured_metadata( "broadcast-address": "10.6.3.255", } - ds = self.get_ds() - - ret = ds.get_data() - self.assertTrue(ret) + assert ds.get_data() is True - self.assertTrue(m_dhcp.called) + assert m_dhcp.called m_dhcp.assert_called_with(ds.distro, "eth1", None) m_net.assert_called_once_with( @@ -256,19 +231,19 @@ def test_network_configured_metadata( static_routes=None, ) - self.assertTrue(mock_readmd.called) + assert mock_readmd.called - self.assertEqual(UC_METADATA.get("region"), ds.availability_zone) - self.assertEqual(UC_METADATA.get("instance_id"), ds.get_instance_id()) - self.assertEqual(UC_METADATA.get("cloud_name"), ds.cloud_name) + assert UC_METADATA.get("region") == ds.availability_zone + assert UC_METADATA.get("instance_id") == ds.get_instance_id() + assert UC_METADATA.get("cloud_name") == ds.cloud_name @mock.patch("cloudinit.sources.helpers.upcloud.read_metadata") @mock.patch("cloudinit.net.get_interfaces_by_mac") - def test_network_configuration(self, m_get_by_mac, mock_readmd): + def test_network_configuration(self, m_get_by_mac, mock_readmd, ds): mock_readmd.return_value = UC_METADATA.copy() raw_ifaces = UC_METADATA.get("network").get("interfaces") - self.assertEqual(4, len(raw_ifaces)) + assert 4 == len(raw_ifaces) m_get_by_mac.return_value = { raw_ifaces[0].get("mac"): "eth0", @@ -277,52 +252,47 @@ def test_network_configuration(self, m_get_by_mac, mock_readmd): raw_ifaces[3].get("mac"): "eth3", } - ds = self.get_ds() ds.perform_dhcp_setup = False - ret = ds.get_data() - self.assertTrue(ret) - - self.assertTrue(mock_readmd.called) + assert ds.get_data() is True + assert mock_readmd.called netcfg = ds.network_config - self.assertEqual(1, netcfg.get("version")) + assert 1 == netcfg.get("version") config = netcfg.get("config") - self.assertIsInstance(config, list) - self.assertEqual(5, len(config)) - self.assertEqual("physical", config[3].get("type")) - - self.assertEqual( - raw_ifaces[2].get("mac"), config[2].get("mac_address") - ) - self.assertEqual(1, len(config[2].get("subnets"))) - self.assertEqual( - "ipv6_dhcpv6-stateless", config[2].get("subnets")[0].get("type") + assert isinstance(config, list) + assert 5 == len(config) + assert "physical" == config[3].get("type") + + assert raw_ifaces[2].get("mac") == config[2].get("mac_address") + assert 1 == len(config[2].get("subnets")) + assert "ipv6_dhcpv6-stateless" == config[2].get("subnets")[0].get( + "type" ) - self.assertEqual(2, len(config[0].get("subnets"))) - self.assertEqual("static", config[0].get("subnets")[1].get("type")) + assert 2 == len(config[0].get("subnets")) + assert "static" == config[0].get("subnets")[1].get("type") dns = config[4] - self.assertEqual("nameserver", dns.get("type")) - self.assertEqual(2, len(dns.get("address"))) - self.assertEqual( - UC_METADATA.get("network").get("dns")[1], dns.get("address")[1] + assert "nameserver" == dns.get("type") + assert 2 == len(dns.get("address")) + assert ( + UC_METADATA.get("network").get("dns")[1] == dns.get("address")[1] ) -class TestUpCloudDatasourceLoading(CiTestCase): +class TestUpCloudDatasourceLoading: def test_get_datasource_list_returns_in_local(self): deps = (sources.DEP_FILESYSTEM,) ds_list = sources.DataSourceUpCloud.get_datasource_list(deps) - self.assertEqual(ds_list, [DataSourceUpCloudLocal]) + assert ds_list == [DataSourceUpCloudLocal] def test_get_datasource_list_returns_in_normal(self): deps = (sources.DEP_FILESYSTEM, sources.DEP_NETWORK) ds_list = sources.DataSourceUpCloud.get_datasource_list(deps) - self.assertEqual(ds_list, [DataSourceUpCloud]) + assert ds_list == [DataSourceUpCloud] @mock.patch.object( importer, @@ -335,4 +305,4 @@ def test_list_sources_finds_ds(self): (sources.DEP_FILESYSTEM, sources.DEP_NETWORK), ["cloudinit.sources"], ) - self.assertEqual([DataSourceUpCloud], found) + assert [DataSourceUpCloud] == found diff --git a/tests/unittests/sources/test_vultr.py b/tests/unittests/sources/test_vultr.py index e5f1c39e..2cf555a0 100644 --- a/tests/unittests/sources/test_vultr.py +++ b/tests/unittests/sources/test_vultr.py @@ -6,12 +6,15 @@ # https://www.vultr.com/metadata/ import copy +import json +from unittest import mock -from cloudinit import helpers, settings +import pytest + +from cloudinit import settings from cloudinit.net.dhcp import NoDHCPLeaseError from cloudinit.sources import DataSourceVultr from cloudinit.sources.helpers import vultr -from tests.unittests.helpers import CiTestCase, mock VENDOR_DATA = """\ #cloud-config @@ -64,7 +67,7 @@ } ], "public-keys": ["ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key"], - "region": "us", + "region": {"regioncode": "EWR", "countrycode": "US"}, "user-defined": [], "startup-script": "echo No configured startup script", "raid1-script": "", @@ -126,7 +129,7 @@ }, ], "public-keys": ["ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key"], - "region": "us", + "region": {"regioncode": "EWR", "countrycode": "US"}, "user-defined": [], "startup-script": "echo No configured startup script", "user-data": [], @@ -244,54 +247,30 @@ } -class TestDataSourceVultr(CiTestCase): - def setUp(self): - super(TestDataSourceVultr, self).setUp() - self.tmp = self.tmp_dir() +class TestDataSourceVultr: + @pytest.fixture + def ds(self, paths, tmp_path): + distro = mock.MagicMock() + distro.get_tmp_exec_path.return_value = str(tmp_path) + return DataSourceVultr.DataSourceVultr( + settings.CFG_BUILTIN, distro, paths + ) # Test the datasource itself @mock.patch("cloudinit.net.get_interfaces_by_mac") @mock.patch("cloudinit.sources.helpers.vultr.is_vultr") @mock.patch("cloudinit.sources.helpers.vultr.get_metadata") - def test_datasource(self, mock_getmeta, mock_isvultr, mock_netmap): + def test_datasource(self, mock_getmeta, mock_isvultr, mock_netmap, ds): mock_getmeta.return_value = VULTR_V1_2 mock_isvultr.return_value = True mock_netmap.return_value = INTERFACE_MAP - distro = mock.MagicMock() - distro.get_tmp_exec_path = self.tmp_dir - source = DataSourceVultr.DataSourceVultr( - settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": self.tmp}) - ) - - # Test for failure - self.assertEqual(True, source._get_data()) - - # Test instance id - self.assertEqual("42872224", source.metadata["instanceid"]) - - # Test hostname - self.assertEqual("CLOUDINIT_2", source.metadata["local-hostname"]) - - # Test ssh keys - self.assertEqual(SSH_KEYS_1, source.metadata["public-keys"]) - - # Test vendor data generation - orig_val = self.maxDiff - self.maxDiff = None - - vendordata = source.vendordata_raw - - # Test vendor config - self.assertEqual( - VENDOR_DATA, - vendordata, - ) - - self.maxDiff = orig_val - - # Test network config generation - self.assertEqual(EXPECTED_VULTR_NETWORK_2, source.network_config) + assert True is ds._get_data() + assert "42872224" == ds.metadata["instanceid"] + assert "CLOUDINIT_2" == ds.metadata["local-hostname"] + assert SSH_KEYS_1 == ds.metadata["public-keys"] + assert VENDOR_DATA == ds.vendordata_raw + assert EXPECTED_VULTR_NETWORK_2 == ds.network_config def _get_metadata(self): # Create v1_3 @@ -307,22 +286,16 @@ def _get_metadata(self): @mock.patch("cloudinit.sources.helpers.vultr.is_vultr") @mock.patch("cloudinit.sources.helpers.vultr.get_metadata") def test_datasource_cloud_interfaces( - self, mock_getmeta, mock_isvultr, mock_netmap + self, mock_getmeta, mock_isvultr, mock_netmap, ds ): mock_getmeta.return_value = self._get_metadata() mock_isvultr.return_value = True mock_netmap.return_value = INTERFACE_MAP - distro = mock.MagicMock() - distro.get_tmp_exec_path = self.tmp_dir - source = DataSourceVultr.DataSourceVultr( - settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": self.tmp}) - ) - - source._get_data() + ds._get_data() # Test network config generation - self.assertEqual(EXPECTED_VULTR_NETWORK_2, source.network_config) + assert EXPECTED_VULTR_NETWORK_2 == ds.network_config # Test network config generation @mock.patch("cloudinit.net.get_interfaces_by_mac") @@ -330,8 +303,8 @@ def test_network_config(self, mock_netmap): mock_netmap.return_value = INTERFACE_MAP interf = VULTR_V1_1["interfaces"] - self.assertEqual( - EXPECTED_VULTR_NETWORK_1, vultr.generate_network_config(interf) + assert EXPECTED_VULTR_NETWORK_1 == vultr.generate_network_config( + interf ) # Test Private Networking config generation @@ -341,15 +314,15 @@ def test_private_network_config(self, mock_netmap): interf = copy.deepcopy(VULTR_V1_2["interfaces"]) # Test configuring - self.assertEqual( - EXPECTED_VULTR_NETWORK_2, vultr.generate_network_config(interf) + assert EXPECTED_VULTR_NETWORK_2 == vultr.generate_network_config( + interf ) # Test unconfigured interf[1]["unconfigured"] = True expected = copy.deepcopy(EXPECTED_VULTR_NETWORK_2) expected["config"].pop(2) - self.assertEqual(expected, vultr.generate_network_config(interf)) + assert expected == vultr.generate_network_config(interf) # Override ephemeral for proper unit testing def override_enter(self): @@ -379,20 +352,12 @@ def test_interface_seek( mock_read_metadata, mock_isvultr, mock_eph_init, + ds, ): - mock_read_metadata.return_value = {} + mock_read_metadata.return_value = json.dumps(VULTR_V1_1) mock_isvultr.return_value = True mock_interface_list.return_value = FILTERED_INTERFACES - distro = mock.MagicMock() - distro.get_tmp_exec_path = self.tmp_dir - source = DataSourceVultr.DataSourceVultr( - settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": self.tmp}) - ) - - try: - source._get_data() - except Exception: - pass + ds.get_metadata() assert mock_eph_init.call_args[1]["iface"] == FILTERED_INTERFACES[1] diff --git a/tests/unittests/sources/vmware/test_custom_script.py b/tests/unittests/sources/vmware/test_custom_script.py index 530fbc68..b886f124 100644 --- a/tests/unittests/sources/vmware/test_custom_script.py +++ b/tests/unittests/sources/vmware/test_custom_script.py @@ -7,6 +7,9 @@ import os import stat +from unittest import mock + +import pytest from cloudinit import util from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( @@ -15,17 +18,24 @@ PostCustomScript, PreCustomScript, ) -from tests.unittests.helpers import CiTestCase, mock -class TestVmwareCustomScript(CiTestCase): - def setUp(self): - self.tmpDir = self.tmp_dir() - # Mock the tmpDir as the root dir in VM. - self.execDir = os.path.join(self.tmpDir, ".customization") - self.execScript = os.path.join(self.execDir, ".customize.sh") +@pytest.fixture +def fake_exec_dir(mocker, tmp_path): + exec_dir = tmp_path / ".customization" + mocker.patch.object(CustomScriptConstant, "CUSTOM_TMP_DIR", str(exec_dir)) + return exec_dir + + +@pytest.fixture +def fake_exec_script(fake_exec_dir, mocker): + ex_script = fake_exec_dir / ".customize.sh" + mocker.patch.object(CustomScriptConstant, "CUSTOM_SCRIPT", str(ex_script)) + return ex_script + - def test_prepare_custom_script(self): +class TestVmwareCustomScript: + def test_prepare_custom_script(self, fake_exec_script, tmp_path): """ This test is designed to verify the behavior based on the presence of custom script. Mainly needed for scenario where a custom script is @@ -33,79 +43,60 @@ def test_prepare_custom_script(self): is raised in such cases. """ # Custom script does not exist. - preCust = PreCustomScript("random-vmw-test", self.tmpDir) - self.assertEqual("random-vmw-test", preCust.scriptname) - self.assertEqual(self.tmpDir, preCust.directory) - self.assertEqual( - self.tmp_path("random-vmw-test", self.tmpDir), preCust.scriptpath - ) - with self.assertRaises(CustomScriptNotFound): + preCust = PreCustomScript("random-vmw-test", str(tmp_path)) + assert "random-vmw-test" == preCust.scriptname + assert str(tmp_path) == preCust.directory + assert str(tmp_path / "random-vmw-test") == preCust.scriptpath + with pytest.raises(CustomScriptNotFound): preCust.prepare_script() # Custom script exists. - custScript = self.tmp_path("test-cust", self.tmpDir) + custScript = str(tmp_path / "test-cust") util.write_file(custScript, "test-CR-strip\r\r") - with mock.patch.object( - CustomScriptConstant, "CUSTOM_TMP_DIR", self.execDir - ): - with mock.patch.object( - CustomScriptConstant, "CUSTOM_SCRIPT", self.execScript - ): - postCust = PostCustomScript( - "test-cust", self.tmpDir, self.tmpDir - ) - self.assertEqual("test-cust", postCust.scriptname) - self.assertEqual(self.tmpDir, postCust.directory) - self.assertEqual(custScript, postCust.scriptpath) - postCust.prepare_script() - # Custom script is copied with exec privilege - self.assertTrue(os.path.exists(self.execScript)) - st = os.stat(self.execScript) - self.assertTrue(st.st_mode & stat.S_IEXEC) - with open(self.execScript, "r") as f: - content = f.read() - self.assertEqual(content, "test-CR-strip") - # Check if all carraige returns are stripped from script. - self.assertFalse("\r" in content) + postCust = PostCustomScript("test-cust", str(tmp_path), str(tmp_path)) + assert "test-cust" == postCust.scriptname + assert str(tmp_path) == preCust.directory + assert custScript == postCust.scriptpath + postCust.prepare_script() - def test_execute_post_cust(self): + # Custom script is copied with exec privilege + assert fake_exec_script.exists() + st = os.stat(fake_exec_script) + assert st.st_mode & stat.S_IEXEC + assert "test-CR-strip" == fake_exec_script.read_text() + + def test_execute_post_cust(self, fake_exec_script, tmp_path): """ This test is designed to verify the behavior after execute post customization. """ # Prepare the customize package - postCustRun = self.tmp_path("post-customize-guest.sh", self.tmpDir) + postCustRun = str(tmp_path / "post-customize-guest.sh") util.write_file(postCustRun, "This is the script to run post cust") - userScript = self.tmp_path("test-cust", self.tmpDir) + userScript = str(tmp_path / "test-cust") util.write_file(userScript, "This is the post cust script") # Mock the cc_scripts_per_instance dir and marker file. # Create another tmp dir for cc_scripts_per_instance. - ccScriptDir = self.tmp_dir() - ccScript = os.path.join(ccScriptDir, "post-customize-guest.sh") - markerFile = os.path.join(self.tmpDir, ".markerFile") + ccScriptDir = tmp_path / "out" + ccScriptDir.mkdir() + ccScript = ccScriptDir / "post-customize-guest.sh" + markerFile = tmp_path / ".markerFile" + with mock.patch.object( - CustomScriptConstant, "CUSTOM_TMP_DIR", self.execDir + CustomScriptConstant, + "POST_CUSTOM_PENDING_MARKER", + str(markerFile), ): - with mock.patch.object( - CustomScriptConstant, "CUSTOM_SCRIPT", self.execScript - ): - with mock.patch.object( - CustomScriptConstant, - "POST_CUSTOM_PENDING_MARKER", - markerFile, - ): - postCust = PostCustomScript( - "test-cust", self.tmpDir, ccScriptDir - ) - postCust.execute() - # Check cc_scripts_per_instance and marker file - # are created. - self.assertTrue(os.path.exists(ccScript)) - with open(ccScript, "r") as f: - content = f.read() - self.assertEqual( - content, "This is the script to run post cust" - ) - self.assertTrue(os.path.exists(markerFile)) + postCust = PostCustomScript( + "test-cust", str(tmp_path), ccScriptDir + ) + postCust.execute() + # Check cc_scripts_per_instance and marker file + # are created. + assert ccScript.exists() + assert ( + "This is the script to run post cust" == ccScript.read_text() + ) + assert markerFile.exists() diff --git a/tests/unittests/sources/vmware/test_guestcust_util.py b/tests/unittests/sources/vmware/test_guestcust_util.py index 4e7541e9..46d81e07 100644 --- a/tests/unittests/sources/vmware/test_guestcust_util.py +++ b/tests/unittests/sources/vmware/test_guestcust_util.py @@ -5,6 +5,8 @@ # # This file is part of cloud-init. See LICENSE file for license information. +from unittest import mock + from cloudinit import subp from cloudinit.sources.helpers.vmware.imc.config import Config from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile @@ -13,18 +15,18 @@ set_gc_status, ) from cloudinit.subp import SubpResult -from tests.unittests.helpers import CiTestCase, mock -class TestGuestCustUtil(CiTestCase): +class TestGuestCustUtil: def test_get_tools_config_not_installed(self): """ This test is designed to verify the behavior if vmware-toolbox-cmd is not installed. """ with mock.patch.object(subp, "which", return_value=None): - self.assertEqual( - get_tools_config("section", "key", "defaultVal"), "defaultVal" + assert ( + get_tools_config("section", "key", "defaultVal") + == "defaultVal" ) def test_get_tools_config_internal_exception(self): @@ -42,9 +44,9 @@ def test_get_tools_config_internal_exception(self): ), ): # verify return value is 'defaultVal', not 'value'. - self.assertEqual( - get_tools_config("section", "key", "defaultVal"), - "defaultVal", + assert ( + get_tools_config("section", "key", "defaultVal") + == "defaultVal" ) def test_get_tools_config_normal(self): @@ -57,31 +59,29 @@ def test_get_tools_config_normal(self): with mock.patch.object( subp, "subp", return_value=SubpResult("key = value ", b"") ): - self.assertEqual( - get_tools_config("section", "key", "defaultVal"), "value" + assert ( + get_tools_config("section", "key", "defaultVal") == "value" ) # value is blank with mock.patch.object( subp, "subp", return_value=SubpResult("key = ", b"") ): - self.assertEqual( - get_tools_config("section", "key", "defaultVal"), "" - ) + assert get_tools_config("section", "key", "defaultVal") == "" # value contains = with mock.patch.object( subp, "subp", return_value=SubpResult("key=Bar=Wark", b"") ): - self.assertEqual( - get_tools_config("section", "key", "defaultVal"), - "Bar=Wark", + assert ( + get_tools_config("section", "key", "defaultVal") + == "Bar=Wark" ) # value contains specific characters with mock.patch.object( subp, "subp", return_value=SubpResult("[a] b.c_d=e-f", b"") ): - self.assertEqual( - get_tools_config("section", "key", "defaultVal"), "e-f" + assert ( + get_tools_config("section", "key", "defaultVal") == "e-f" ) def test_set_gc_status(self): @@ -89,12 +89,12 @@ def test_set_gc_status(self): This test is designed to verify the behavior of set_gc_status """ # config is None, return None - self.assertEqual(set_gc_status(None, "Successful"), None) + assert set_gc_status(None, "Successful") is None # post gc status is NO, return None cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") conf = Config(cf) - self.assertEqual(set_gc_status(conf, "Successful"), None) + assert set_gc_status(conf, "Successful") is None # post gc status is YES, subp is called to execute command cf._insertKey("MISC|POST-GC-STATUS", "YES") @@ -102,7 +102,7 @@ def test_set_gc_status(self): with mock.patch.object( subp, "subp", return_value=SubpResult("ok", b"") ) as mockobj: - self.assertEqual(set_gc_status(conf, "Successful"), ("ok", b"")) + assert set_gc_status(conf, "Successful") == ("ok", b"") mockobj.assert_called_once_with( ["vmware-rpctool", "info-set guestinfo.gc.status Successful"], rcs=[0], diff --git a/tests/unittests/sources/vmware/test_vmware_config_file.py b/tests/unittests/sources/vmware/test_vmware_config_file.py index 837efa21..c9b23287 100644 --- a/tests/unittests/sources/vmware/test_vmware_config_file.py +++ b/tests/unittests/sources/vmware/test_vmware_config_file.py @@ -9,11 +9,11 @@ # This file is part of cloud-init. See LICENSE file for license information. import logging -import os import sys -import tempfile import textwrap +import pytest + from cloudinit.sources.helpers.vmware.imc.boot_proto import BootProtoEnum from cloudinit.sources.helpers.vmware.imc.config import Config from cloudinit.sources.helpers.vmware.imc.config_file import ( @@ -25,7 +25,6 @@ get_non_network_data_from_vmware_cust_cfg, ) from tests.helpers import cloud_init_project_dir -from tests.unittests.helpers import CiTestCase logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) logger = logging.getLogger(__name__) @@ -35,23 +34,22 @@ def ConfigFile(path: str): return WrappedConfigFile(cloud_init_project_dir(path)) -class TestVmwareConfigFile(CiTestCase): +class TestVmwareConfigFile: def test_utility_methods(self): """Tests basic utility methods of ConfigFile class""" cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") cf.clear() - - self.assertEqual(0, len(cf), "clear size") + assert 0 == len(cf), "clear size" cf._insertKey(" PASSWORD|-PASS ", " foo ") cf._insertKey("BAR", " ") - self.assertEqual(2, len(cf), "insert size") - self.assertEqual("foo", cf["PASSWORD|-PASS"], "password") - self.assertTrue("PASSWORD|-PASS" in cf, "hasPassword") - self.assertFalse("FOO" in cf, "hasFoo") - self.assertTrue("BAR" in cf, "hasBar") + assert 2 == len(cf), "insert size" + assert "foo" == cf["PASSWORD|-PASS"], "password" + assert "PASSWORD|-PASS" in cf, "hasPassword" + assert "FOO" not in cf, "hasFoo" + assert "BAR" in cf, "hasBar" def test_configfile_without_instance_id(self): """ @@ -60,11 +58,11 @@ def test_configfile_without_instance_id(self): cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") conf = Config(cf) - (md1, _) = get_non_network_data_from_vmware_cust_cfg(conf) - self.assertFalse("instance-id" in md1) + md1, _ = get_non_network_data_from_vmware_cust_cfg(conf) + assert "instance-id" not in md1 (md2, _) = get_non_network_data_from_vmware_cust_cfg(conf) - self.assertFalse("instance-id" in md2) + assert "instance-id" not in md2 def test_configfile_with_instance_id(self): """Tests instance id get from configuration file""" @@ -72,10 +70,10 @@ def test_configfile_with_instance_id(self): conf = Config(cf) (md1, _) = get_non_network_data_from_vmware_cust_cfg(conf) - self.assertEqual(md1["instance-id"], conf.instance_id, "instance-id") + assert md1["instance-id"] == conf.instance_id, "instance-id" (md2, _) = get_non_network_data_from_vmware_cust_cfg(conf) - self.assertEqual(md2["instance-id"], conf.instance_id, "instance-id") + assert md2["instance-id"] == conf.instance_id, "instance-id" def test_configfile_static_2nics(self): """Tests Config class for a configuration with two static NICs.""" @@ -83,38 +81,33 @@ def test_configfile_static_2nics(self): conf = Config(cf) - self.assertEqual("myhost1", conf.host_name, "hostName") - self.assertEqual("Africa/Abidjan", conf.timezone, "tz") + assert "myhost1" == conf.host_name, "hostName" + assert "Africa/Abidjan" == conf.timezone, "tz" - self.assertEqual( - ["10.20.145.1", "10.20.145.2"], conf.name_servers, "dns" - ) - self.assertEqual( - ["eng.vmware.com", "proxy.vmware.com"], - conf.dns_suffixes, - "suffixes", - ) + assert ["10.20.145.1", "10.20.145.2"] == conf.name_servers, "dns" + assert [ + "eng.vmware.com", + "proxy.vmware.com", + ] == conf.dns_suffixes, "suffixes" nics = conf.nics ipv40 = nics[0].staticIpv4 - self.assertEqual(2, len(nics), "nics") - self.assertEqual("NIC1", nics[0].name, "nic0") - self.assertEqual("00:50:56:a6:8c:08", nics[0].mac, "mac0") - self.assertEqual(BootProtoEnum.STATIC, nics[0].bootProto, "bootproto0") - self.assertEqual("10.20.87.154", ipv40[0].ip, "ipv4Addr0") - self.assertEqual("255.255.252.0", ipv40[0].netmask, "ipv4Mask0") - self.assertEqual(2, len(ipv40[0].gateways), "ipv4Gw0") - self.assertEqual("10.20.87.253", ipv40[0].gateways[0], "ipv4Gw0_0") - self.assertEqual("10.20.87.105", ipv40[0].gateways[1], "ipv4Gw0_1") - - self.assertEqual(1, len(nics[0].staticIpv6), "ipv6Cnt0") - self.assertEqual( - "fc00:10:20:87::154", nics[0].staticIpv6[0].ip, "ipv6Addr0" - ) + assert 2 == len(nics), "nics" + assert "NIC1" == nics[0].name, "nic0" + assert "00:50:56:a6:8c:08" == nics[0].mac, "mac0" + assert BootProtoEnum.STATIC == nics[0].bootProto, "bootproto0" + assert "10.20.87.154" == ipv40[0].ip, "ipv4Addr0" + assert "255.255.252.0" == ipv40[0].netmask, "ipv4Mask0" + assert 2 == len(ipv40[0].gateways), "ipv4Gw0" + assert "10.20.87.253" == ipv40[0].gateways[0], "ipv4Gw0_0" + assert "10.20.87.105" == ipv40[0].gateways[1], "ipv4Gw0_1" - self.assertEqual("NIC2", nics[1].name, "nic1") - self.assertTrue(not nics[1].staticIpv6, "ipv61 dhcp") + assert 1 == len(nics[0].staticIpv6), "ipv6Cnt0" + assert "fc00:10:20:87::154" == nics[0].staticIpv6[0].ip, "ipv6Addr0" + + assert "NIC2" == nics[1].name, "nic1" + assert not nics[1].staticIpv6, "ipv61 dhcp" def test_config_file_dhcp_2nics(self): """Tests Config class for a configuration with two DHCP NICs.""" @@ -122,10 +115,10 @@ def test_config_file_dhcp_2nics(self): conf = Config(cf) nics = conf.nics - self.assertEqual(2, len(nics), "nics") - self.assertEqual("NIC1", nics[0].name, "nic0") - self.assertEqual("00:50:56:a6:8c:08", nics[0].mac, "mac0") - self.assertEqual(BootProtoEnum.DHCP, nics[0].bootProto, "bootproto0") + assert 2 == len(nics), "nics" + assert "NIC1" == nics[0].name, "nic0" + assert "00:50:56:a6:8c:08" == nics[0].mac, "mac0" + assert BootProtoEnum.DHCP == nics[0].bootProto, "bootproto0" def test_config_password(self): cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") @@ -134,8 +127,8 @@ def test_config_password(self): cf._insertKey("PASSWORD|RESET", "no") conf = Config(cf) - self.assertEqual("test-password", conf.admin_password, "password") - self.assertFalse(conf.reset_password, "do not reset password") + assert "test-password" == conf.admin_password, "password" + assert not conf.reset_password, "do not reset password" def test_config_reset_passwd(self): cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") @@ -144,16 +137,15 @@ def test_config_reset_passwd(self): cf._insertKey("PASSWORD|RESET", "random") conf = Config(cf) - with self.assertRaises(ValueError): - pw = conf.reset_password - self.assertIsNone(pw) + with pytest.raises(ValueError): + assert None is conf.reset_password cf.clear() cf._insertKey("PASSWORD|RESET", "yes") - self.assertEqual(1, len(cf), "insert size") + assert 1 == len(cf), "insert size" conf = Config(cf) - self.assertTrue(conf.reset_password, "reset password") + assert conf.reset_password, "reset password" def test_get_config_nameservers(self): """Tests DNS and nameserver settings in a configuration.""" @@ -163,22 +155,19 @@ def test_get_config_nameservers(self): network_config = get_network_data_from_vmware_cust_cfg(config, False) - self.assertEqual(2, network_config.get("version")) + assert 2 == network_config.get("version") ethernets = network_config.get("ethernets") for _, config in ethernets.items(): - self.assertTrue(isinstance(config, dict)) + assert isinstance(config, dict) name_servers = config.get("nameservers").get("addresses") dns_suffixes = config.get("nameservers").get("search") - self.assertEqual( - ["10.20.145.1", "10.20.145.2"], name_servers, "dns" - ) - self.assertEqual( - ["eng.vmware.com", "proxy.vmware.com"], - dns_suffixes, - "suffixes", - ) + assert ["10.20.145.1", "10.20.145.2"] == name_servers, "dns" + assert [ + "eng.vmware.com", + "proxy.vmware.com", + ] == dns_suffixes, "suffixes" def test_get_config_dns_suffixes(self): """Tests if get_network_from_vmware_cust_cfg properly @@ -190,16 +179,16 @@ def test_get_config_dns_suffixes(self): network_config = get_network_data_from_vmware_cust_cfg(config, False) - self.assertEqual(2, network_config.get("version")) + assert 2 == network_config.get("version") ethernets = network_config.get("ethernets") for _, config in ethernets.items(): - self.assertTrue(isinstance(config, dict)) + assert isinstance(config, dict) name_servers = config.get("nameservers").get("addresses") dns_suffixes = config.get("nameservers").get("search") - self.assertEqual(None, name_servers, "dns") - self.assertEqual(["eng.vmware.com"], dns_suffixes, "suffixes") + assert None is name_servers, "dns" + assert ["eng.vmware.com"] == dns_suffixes, "suffixes" def test_get_nics_list_dhcp(self): """Tests if NicConfigurator properly calculates ethernets @@ -213,44 +202,28 @@ def test_get_nics_list_dhcp(self): ) ethernets_dict = nicConfigurator.generate() - self.assertTrue(isinstance(ethernets_dict, dict)) - self.assertEqual(2, len(ethernets_dict), "number of ethernets") + assert isinstance(ethernets_dict, dict) + assert 2 == len(ethernets_dict), "number of ethernets" for name, config in ethernets_dict.items(): if name == "NIC1": - self.assertEqual( - "00:50:56:a6:8c:08", - config.get("match").get("macaddress"), - "mac address of NIC1", - ) - self.assertEqual( - True, config.get("wakeonlan"), "wakeonlan of NIC1" - ) - self.assertEqual( - True, config.get("dhcp4"), "DHCPv4 enablement of NIC1" - ) - self.assertEqual( - False, - config.get("dhcp4-overrides").get("use-dns"), - "use-dns enablement for dhcp4-overrides of NIC1", - ) + assert "00:50:56:a6:8c:08" == config.get("match").get( + "macaddress" + ), "mac address of NIC1" + assert True is config.get("wakeonlan"), "wakeonlan of NIC1" + assert True is config.get("dhcp4"), "DHCPv4 enablement of NIC1" + assert False is config.get("dhcp4-overrides").get( + "use-dns" + ), "use-dns enablement for dhcp4-overrides of NIC1" if name == "NIC2": - self.assertEqual( - "00:50:56:a6:5a:de", - config.get("match").get("macaddress"), - "mac address of NIC2", - ) - self.assertEqual( - True, config.get("wakeonlan"), "wakeonlan of NIC2" - ) - self.assertEqual( - True, config.get("dhcp4"), "DHCPv4 enablement of NIC2" - ) - self.assertEqual( - False, - config.get("dhcp4-overrides").get("use-dns"), - "use-dns enablement for dhcp4-overrides of NIC2", - ) + assert "00:50:56:a6:5a:de" == config.get("match").get( + "macaddress" + ), "mac address of NIC2" + assert True is config.get("wakeonlan"), "wakeonlan of NIC2" + assert True is config.get("dhcp4"), "DHCPv4 enablement of NIC2" + assert False is config.get("dhcp4-overrides").get( + "use-dns" + ), "use-dns enablement for dhcp4-overrides of NIC2" def test_get_nics_list_static(self): """Tests if NicConfigurator properly calculates ethernets @@ -264,124 +237,95 @@ def test_get_nics_list_static(self): ) ethernets_dict = nicConfigurator.generate() - self.assertTrue(isinstance(ethernets_dict, dict)) - self.assertEqual(2, len(ethernets_dict), "number of ethernets") + assert isinstance(ethernets_dict, dict) + assert 2 == len(ethernets_dict), "number of ethernets" for name, config in ethernets_dict.items(): print(config) if name == "NIC1": - self.assertEqual( - "00:50:56:a6:8c:08", - config.get("match").get("macaddress"), - "mac address of NIC1", - ) - self.assertEqual( - True, config.get("wakeonlan"), "wakeonlan of NIC1" - ) - self.assertEqual( - False, config.get("dhcp4"), "DHCPv4 enablement of NIC1" - ) - self.assertEqual( - False, config.get("dhcp6"), "DHCPv6 enablement of NIC1" - ) - self.assertEqual( - ["10.20.87.154/22", "fc00:10:20:87::154/64"], - config.get("addresses"), - "IP addresses of NIC1", - ) - self.assertEqual( - [ - {"to": "10.20.84.0/22", "via": "10.20.87.253"}, - {"to": "10.20.84.0/22", "via": "10.20.87.105"}, - { - "to": "fc00:10:20:87::/64", - "via": "fc00:10:20:87::253", - }, - ], - config.get("routes"), - "routes of NIC1", - ) + assert "00:50:56:a6:8c:08" == config.get("match").get( + "macaddress" + ), "mac address of NIC1" + assert True is config.get("wakeonlan"), "wakeonlan of NIC1" + assert False is config.get( + "dhcp4" + ), "DHCPv4 enablement of NIC1" + assert False is config.get( + "dhcp6" + ), "DHCPv6 enablement of NIC1" + assert [ + "10.20.87.154/22", + "fc00:10:20:87::154/64", + ] == config.get("addresses"), "IP addresses of NIC1" + assert [ + {"to": "10.20.84.0/22", "via": "10.20.87.253"}, + {"to": "10.20.84.0/22", "via": "10.20.87.105"}, + { + "to": "fc00:10:20:87::/64", + "via": "fc00:10:20:87::253", + }, + ] == config.get("routes"), "routes of NIC1" if name == "NIC2": - self.assertEqual( - "00:50:56:a6:ef:7d", - config.get("match").get("macaddress"), - "mac address of NIC2", - ) - self.assertEqual( - True, config.get("wakeonlan"), "wakeonlan of NIC2" - ) - self.assertEqual( - False, config.get("dhcp4"), "DHCPv4 enablement of NIC2" - ) - self.assertEqual( - ["192.168.6.102/16"], - config.get("addresses"), - "IP addresses of NIC2", - ) - self.assertEqual( - [ - {"to": "192.168.0.0/16", "via": "192.168.0.10"}, - ], - config.get("routes"), - "routes of NIC2", - ) + assert "00:50:56:a6:ef:7d" == config.get("match").get( + "macaddress" + ), "mac address of NIC2" + assert True is config.get("wakeonlan"), "wakeonlan of NIC2" + assert False is config.get( + "dhcp4" + ), "DHCPv4 enablement of NIC2" + assert ["192.168.6.102/16"] == config.get( + "addresses" + ), "IP addresses of NIC2" + assert [ + {"to": "192.168.0.0/16", "via": "192.168.0.10"}, + ] == config.get("routes"), "routes of NIC2" def test_custom_script(self): cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") conf = Config(cf) - self.assertIsNone(conf.custom_script_name) + assert conf.custom_script_name is None cf._insertKey("CUSTOM-SCRIPT|SCRIPT-NAME", "test-script") conf = Config(cf) - self.assertEqual("test-script", conf.custom_script_name) + assert "test-script" == conf.custom_script_name def test_post_gc_status(self): cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") conf = Config(cf) - self.assertFalse(conf.post_gc_status) + assert not conf.post_gc_status cf._insertKey("MISC|POST-GC-STATUS", "YES") conf = Config(cf) - self.assertTrue(conf.post_gc_status) + assert conf.post_gc_status def test_no_default_run_post_script(self): cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") conf = Config(cf) - self.assertFalse(conf.default_run_post_script) + assert not conf.default_run_post_script cf._insertKey("MISC|DEFAULT-RUN-POST-CUST-SCRIPT", "NO") conf = Config(cf) - self.assertFalse(conf.default_run_post_script) + assert not conf.default_run_post_script def test_yes_default_run_post_script(self): cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") cf._insertKey("MISC|DEFAULT-RUN-POST-CUST-SCRIPT", "yes") conf = Config(cf) - self.assertTrue(conf.default_run_post_script) + assert conf.default_run_post_script -class TestVmwareNetConfig(CiTestCase): +class TestVmwareNetConfig: """Test conversion of vmware config to cloud-init config.""" - maxDiff = None - - def _get_NicConfigurator(self, text): - fp = None - try: - with tempfile.NamedTemporaryFile( - mode="w", dir=self.tmp_dir(), delete=False - ) as fp: - fp.write(text) - fp.close() - cfg = Config(ConfigFile(fp.name)) - return NicConfigurator( - cfg.nics, - cfg.name_servers, - cfg.dns_suffixes, - use_system_devices=False, - ) - finally: - if fp: - os.unlink(fp.name) - - def test_static_nic_without_ipv4_netmask(self): + def _get_NicConfigurator(self, tmp_path, text): + tmp_file = tmp_path / ".config" + tmp_file.write_text(text) + cfg = Config(ConfigFile(tmp_file)) + return NicConfigurator( + cfg.nics, + cfg.name_servers, + cfg.dns_suffixes, + use_system_devices=False, + ) + + def test_static_nic_without_ipv4_netmask(self, tmp_path): """netmask is optional for static ipv4 configuration.""" config = textwrap.dedent( """\ @@ -402,21 +346,18 @@ def test_static_nic_without_ipv4_netmask(self): IPADDR = 10.20.87.154 """ ) - nc = self._get_NicConfigurator(config) - self.assertEqual( - { - "NIC1": { - "match": {"macaddress": "00:50:56:a6:8c:08"}, - "wakeonlan": True, - "dhcp4": False, - "addresses": ["10.20.87.154/32"], - "set-name": "NIC1", - } - }, - nc.generate(), - ) - - def test_static_nic_without_ipv6_netmask(self): + nc = self._get_NicConfigurator(tmp_path, config) + assert { + "NIC1": { + "match": {"macaddress": "00:50:56:a6:8c:08"}, + "wakeonlan": True, + "dhcp4": False, + "addresses": ["10.20.87.154/32"], + "set-name": "NIC1", + } + } == nc.generate() + + def test_static_nic_without_ipv6_netmask(self, tmp_path): """netmask is mandatory for static ipv6 configuration.""" config = textwrap.dedent( """\ @@ -438,11 +379,11 @@ def test_static_nic_without_ipv6_netmask(self): IPv6ADDR|1 = fc00:10:20:87::154 """ ) - nc = self._get_NicConfigurator(config) - with self.assertRaises(ValueError): + nc = self._get_NicConfigurator(tmp_path, config) + with pytest.raises(ValueError): nc.generate() - def test_non_primary_nic_with_gateway(self): + def test_non_primary_nic_with_gateway(self, tmp_path): """A non primary nic set can have a gateway.""" config = textwrap.dedent( """\ @@ -465,22 +406,19 @@ def test_non_primary_nic_with_gateway(self): GATEWAY = 10.20.87.253 """ ) - nc = self._get_NicConfigurator(config) - self.assertEqual( - { - "NIC1": { - "match": {"macaddress": "00:50:56:a6:8c:08"}, - "wakeonlan": True, - "dhcp4": False, - "addresses": ["10.20.87.154/22"], - "routes": [{"to": "10.20.84.0/22", "via": "10.20.87.253"}], - "set-name": "NIC1", - } - }, - nc.generate(), - ) - - def test_cust_non_primary_nic_with_gateway_(self): + nc = self._get_NicConfigurator(tmp_path, config) + assert { + "NIC1": { + "match": {"macaddress": "00:50:56:a6:8c:08"}, + "wakeonlan": True, + "dhcp4": False, + "addresses": ["10.20.87.154/22"], + "routes": [{"to": "10.20.84.0/22", "via": "10.20.87.253"}], + "set-name": "NIC1", + } + } == nc.generate() + + def test_cust_non_primary_nic_with_gateway_(self, tmp_path): """A customer non primary nic set can have a gateway.""" config = textwrap.dedent( """\ @@ -512,25 +450,22 @@ def test_cust_non_primary_nic_with_gateway_(self): UTC = yes """ ) - nc = self._get_NicConfigurator(config) - self.assertEqual( - { - "NIC1": { - "match": {"macaddress": "00:50:56:ac:d1:8a"}, - "wakeonlan": True, - "dhcp4": False, - "addresses": ["100.115.223.75/24"], - "routes": [ - {"to": "100.115.223.0/24", "via": "100.115.223.254"} - ], - "set-name": "NIC1", - "nameservers": {"addresses": ["8.8.8.8"]}, - } - }, - nc.generate(), - ) - - def test_a_primary_nic_with_gateway(self): + nc = self._get_NicConfigurator(tmp_path, config) + assert { + "NIC1": { + "match": {"macaddress": "00:50:56:ac:d1:8a"}, + "wakeonlan": True, + "dhcp4": False, + "addresses": ["100.115.223.75/24"], + "routes": [ + {"to": "100.115.223.0/24", "via": "100.115.223.254"} + ], + "set-name": "NIC1", + "nameservers": {"addresses": ["8.8.8.8"]}, + } + } == nc.generate() + + def test_a_primary_nic_with_gateway(self, tmp_path): """A primary nic set can have a gateway.""" config = textwrap.dedent( """\ @@ -554,33 +489,30 @@ def test_a_primary_nic_with_gateway(self): GATEWAY = 10.20.87.253 """ ) - nc = self._get_NicConfigurator(config) - self.assertEqual( - { - "NIC1": { - "match": {"macaddress": "00:50:56:a6:8c:08"}, - "wakeonlan": True, - "dhcp4": False, - "addresses": ["10.20.87.154/22"], - "routes": [{"to": "0.0.0.0/0", "via": "10.20.87.253"}], - "set-name": "NIC1", - } - }, - nc.generate(), - ) + nc = self._get_NicConfigurator(tmp_path, config) + assert { + "NIC1": { + "match": {"macaddress": "00:50:56:a6:8c:08"}, + "wakeonlan": True, + "dhcp4": False, + "addresses": ["10.20.87.154/22"], + "routes": [{"to": "0.0.0.0/0", "via": "10.20.87.253"}], + "set-name": "NIC1", + } + } == nc.generate() def test_meta_data(self): cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") conf = Config(cf) - self.assertIsNone(conf.meta_data_name) + assert conf.meta_data_name is None cf._insertKey("CLOUDINIT|METADATA", "test-metadata") conf = Config(cf) - self.assertEqual("test-metadata", conf.meta_data_name) + assert "test-metadata" == conf.meta_data_name def test_user_data(self): cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") conf = Config(cf) - self.assertIsNone(conf.user_data_name) + assert conf.user_data_name is None cf._insertKey("CLOUDINIT|USERDATA", "test-userdata") conf = Config(cf) - self.assertEqual("test-userdata", conf.user_data_name) + assert "test-userdata" == conf.user_data_name diff --git a/tests/unittests/test_all_stages.py b/tests/unittests/test_all_stages.py index 90bde5e1..1b66e695 100644 --- a/tests/unittests/test_all_stages.py +++ b/tests/unittests/test_all_stages.py @@ -15,9 +15,8 @@ class Sync: """ def __init__(self, name: str, path: str): - self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.sock.connect(f"{path}/share/{name}.sock") - self.sock.bind(f"{path}/share/{name}-return.sock") self.sock.sendall(b"start") def receive(self): diff --git a/tests/unittests/test_conftest.py b/tests/unittests/test_conftest.py index d1a4be23..bc2db355 100644 --- a/tests/unittests/test_conftest.py +++ b/tests/unittests/test_conftest.py @@ -2,7 +2,6 @@ from cloudinit import subp from conftest import UnexpectedSubpError -from tests.unittests.helpers import CiTestCase class TestDisableSubpUsage: @@ -51,27 +50,3 @@ def test_subp_usage_can_be_conditionally_reenabled_for_multiple_cmds(self): def test_both_marks_raise_an_error(self): with pytest.raises(UnexpectedSubpError, match="marked both"): subp.subp(["sh"]) - - -class TestDisableSubpUsageInTestSubclass(CiTestCase): - """Test that disable_subp_usage doesn't impact CiTestCase's subp logic. - - Once the rest of the CiTestCase tests are removed, this class - should be removed as well. - """ - - def test_using_subp_raises_exception(self): - with pytest.raises(Exception): - subp.subp(["some", "args"]) - - def test_typeerrors_on_incorrect_usage(self): - with pytest.raises(TypeError): - subp.subp() - - def test_subp_usage_can_be_reenabled(self): - _old_allowed_subp = self.allow_subp - self.allowed_subp = True - try: - subp.subp(["sh", "-c", "true"]) - finally: - self.allowed_subp = _old_allowed_subp diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index 204765e4..7861afc2 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -23,7 +23,6 @@ from cloudinit import util from cloudinit.config.modules import Modules from cloudinit.settings import DEFAULT_RUN_DIR, PER_INSTANCE -from tests.unittests import helpers from tests.unittests.util import FakeDataSource MPATH = "cloudinit.stages" @@ -790,29 +789,29 @@ def test_compressed_in_userdata(self, ud_proc): assert count_messages(message) == 1 -class TestConvertString(helpers.TestCase): +class TestConvertString: def test_handles_binary_non_utf8_decodable(self): """Printable unicode (not utf8-decodable) is safely converted.""" blob = b"#!/bin/bash\necho \xc3\x84\n" msg = ud.convert_string(blob) - self.assertEqual(blob, msg.get_payload(decode=True)) + assert blob == msg.get_payload(decode=True) def test_handles_binary_utf8_decodable(self): blob = b"\x32\x32" msg = ud.convert_string(blob) - self.assertEqual(blob, msg.get_payload(decode=True)) + assert blob == msg.get_payload(decode=True) def test_handle_headers(self): text = "hi mom" msg = ud.convert_string(text) - self.assertEqual(text, msg.get_payload(decode=False)) + assert text == msg.get_payload(decode=False) def test_handle_mime_parts(self): """Mime parts are properly returned as a mime message.""" message = MIMEBase("text", "plain") message.set_payload("Just text") msg = ud.convert_string(str(message)) - self.assertEqual("Just text", msg.get_payload(decode=False)) + assert "Just text" == msg.get_payload(decode=False) class TestFetchBaseConfig: diff --git a/tests/unittests/test_dmi.py b/tests/unittests/test_dmi.py index e8eeb372..c951c93f 100644 --- a/tests/unittests/test_dmi.py +++ b/tests/unittests/test_dmi.py @@ -1,3 +1,5 @@ +# This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=attribute-defined-outside-init import os from unittest import mock @@ -97,7 +99,7 @@ def test_dmidecode_used_if_no_sysfs_file_on_disk(self, mocker): "x-version", "x86_64", ) - expected_dmi_value == dmi.read_dmi_data("use-dmidecode") + assert expected_dmi_value == dmi.read_dmi_data("use-dmidecode") def test_dmidecode_not_used_on_arm(self, mocker): mocker.patch("cloudinit.dmi.DMIDECODE_TO_KERNEL", {}) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 4ff7450c..a55ade23 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -4548,17 +4548,30 @@ def testsimple_convert_and_render(self): ("v2-dns-no-dhcp", "yaml"), ], ) - def test_config(self, expected_name, yaml_version): + @mock.patch("cloudinit.subp.which") + def test_config(self, m_which, expected_name, yaml_version): entry = NETWORK_CONFIGS[expected_name] + + m_which.return_value = "/sbin/ip" + files = self._render_and_read( + network_config=yaml.safe_load(entry[yaml_version]) + ) + assert ( + entry["expected_eni_ip_cmd"].splitlines() + == files["/etc/network/interfaces"].splitlines() + ) + + m_which.return_value = None files = self._render_and_read( network_config=yaml.safe_load(entry[yaml_version]) ) assert ( - entry["expected_eni"].splitlines() + entry["expected_eni_route_cmd"].splitlines() == files["/etc/network/interfaces"].splitlines() ) - def test_routes_rendered(self): + @mock.patch("cloudinit.subp.which") + def test_routes_rendered_ip_cmd(self, m_which): # as reported in bug 1649652 conf = [ { @@ -4603,6 +4616,85 @@ def test_routes_rendered(self): }, ] + m_which.return_value = "/sbin/ip" + files = self._render_and_read( + network_config={"config": conf, "version": 1} + ) + expected = [ + "auto lo", + "iface lo inet loopback", + "auto eth0", + "iface eth0 inet static", + " address 172.23.31.42/26", + " gateway 172.23.31.2", + "post-up ip route add 10.0.0.0/12 via " + "172.23.31.1 metric 0 || true", + "pre-down ip route del 10.0.0.0/12 via " + "172.23.31.1 metric 0 || true", + "post-up ip route add 192.168.2.0/16 via " + "172.23.31.1 metric 0 || true", + "pre-down ip route del 192.168.2.0/16 via " + "172.23.31.1 metric 0 || true", + "post-up ip route add 10.0.200.0/16 via " + "172.23.31.1 metric 1 || true", + "pre-down ip route del 10.0.200.0/16 via " + "172.23.31.1 metric 1 || true", + "post-up ip route add 10.0.0.100/32 via " + "172.23.31.1 metric 1 || true", + "pre-down ip route del 10.0.0.100/32 via " + "172.23.31.1 metric 1 || true", + ] + found = files["/etc/network/interfaces"].splitlines() + + assert expected == [line for line in found if line] + + @mock.patch("cloudinit.subp.which") + def test_routes_rendered_route_cmd(self, m_which): + # as reported in bug 1649652 + conf = [ + { + "name": "eth0", + "type": "physical", + "subnets": [ + { + "address": "172.23.31.42/26", + "dns_nameservers": [], + "gateway": "172.23.31.2", + "type": "static", + } + ], + }, + { + "type": "route", + "id": 4, + "metric": 0, + "destination": "10.0.0.0/12", + "gateway": "172.23.31.1", + }, + { + "type": "route", + "id": 5, + "metric": 0, + "destination": "192.168.2.0/16", + "gateway": "172.23.31.1", + }, + { + "type": "route", + "id": 6, + "metric": 1, + "destination": "10.0.200.0/16", + "gateway": "172.23.31.1", + }, + { + "type": "route", + "id": 7, + "metric": 1, + "destination": "10.0.0.100/32", + "gateway": "172.23.31.1", + }, + ] + + m_which.return_value = None files = self._render_and_read( network_config={"config": conf, "version": 1} ) @@ -4634,7 +4726,88 @@ def test_routes_rendered(self): assert expected == [line for line in found if line] - def test_ipv6_static_routes(self): + @mock.patch("cloudinit.subp.which") + def test_ipv6_static_routes_ip_cmd(self, m_which): + # as reported in bug 1818669 + conf = [ + { + "name": "eno3", + "type": "physical", + "subnets": [ + { + "address": "fd00::12/64", + "dns_nameservers": ["fd00:2::15"], + "gateway": "fd00::1", + "ipv6": True, + "type": "static", + "routes": [ + { + "netmask": "32", + "network": "fd00:12::", + "gateway": "fd00::2", + }, + {"network": "fd00:14::", "gateway": "fd00::3"}, + { + "destination": "fe00:14::/48", + "gateway": "fe00::4", + "metric": 500, + }, + { + "gateway": "192.168.23.1", + "metric": 999, + "netmask": 24, + "network": "192.168.23.0", + }, + { + "destination": "10.23.23.0/24", + "gateway": "10.23.23.2", + "metric": 300, + }, + ], + } + ], + }, + ] + + m_which.return_value = "/sbin/ip" + files = self._render_and_read( + network_config={"config": conf, "version": 1} + ) + expected = [ + "auto lo", + "iface lo inet loopback", + "auto eno3", + "iface eno3 inet6 static", + " address fd00::12/64", + " dns-nameservers fd00:2::15", + " gateway fd00::1", + " post-up ip -family inet6 route add fd00:12::/32 via fd00::2 " + "|| true", + " pre-down ip -family inet6 route del fd00:12::/32 via fd00::2 " + "|| true", + " post-up ip -family inet6 route add fd00:14::/64 via fd00::3 " + "|| true", + " pre-down ip -family inet6 route del fd00:14::/64 via fd00::3 " + "|| true", + " post-up ip -family inet6 route add fe00:14::/48 via " + "fe00::4 metric 500 || true", + " pre-down ip -family inet6 route del fe00:14::/48 via " + "fe00::4 metric 500 || true", + " post-up ip route add 192.168.23.0/24 via " + "192.168.23.1 metric 999 || true", + " pre-down ip route del 192.168.23.0/24 via " + "192.168.23.1 metric 999 || true", + " post-up ip route add 10.23.23.0/24 via " + "10.23.23.2 metric 300 || true", + " pre-down ip route del 10.23.23.0/24 via " + "10.23.23.2 metric 300 || true", + ] + found = files["/etc/network/interfaces"].splitlines() + + assert expected == [line for line in found if line] + + @mock.patch("cloudinit.subp.which") + def test_ipv6_static_routes_route_cmd(self, m_which): # as reported in bug 1818669 conf = [ { @@ -4676,6 +4849,7 @@ def test_ipv6_static_routes(self): }, ] + m_which.return_value = None files = self._render_and_read( network_config={"config": conf, "version": 1} ) diff --git a/tests/unittests/test_net_freebsd.py b/tests/unittests/test_net_freebsd.py index 5b21e0e7..67fe27c5 100644 --- a/tests/unittests/test_net_freebsd.py +++ b/tests/unittests/test_net_freebsd.py @@ -1,10 +1,12 @@ import os +from unittest import mock +import pytest import yaml import cloudinit.net import cloudinit.net.network_state -from tests.unittests.helpers import CiTestCase, dir2dict, mock, readResource +from tests.unittests.helpers import dir2dict, readResource SAMPLE_FREEBSD_IFCONFIG_OUT = readResource("netinfo/freebsd-ifconfig-output") V1 = """ @@ -29,7 +31,7 @@ """ -class TestInterfacesByMac(CiTestCase): +class TestInterfacesByMac: @mock.patch("cloudinit.subp.subp") @mock.patch("cloudinit.util.is_FreeBSD") def test_get_interfaces_by_mac(self, mock_is_FreeBSD, mock_subp): @@ -44,30 +46,18 @@ def test_get_interfaces_by_mac(self, mock_is_FreeBSD, mock_subp): } -class TestFreeBSDRoundTrip(CiTestCase): - def _render_and_read( - self, network_config=None, state=None, netplan_path=None, target=None - ): - if target is None: - target = self.tmp_dir() - os.mkdir("%s/etc" % target) - with open("%s/etc/rc.conf" % target, "a") as fd: - fd.write("# dummy rc.conf\n") - with open("%s/etc/resolv.conf" % target, "a") as fd: - fd.write("# dummy resolv.conf\n") - - if network_config: - ns = cloudinit.net.network_state.parse_net_config_data( - network_config - ) - elif state: - ns = state - else: - raise ValueError("Expected data or state, got neither") +@pytest.mark.usefixtures("fake_filesystem") +class TestFreeBSDRoundTrip: + def _render_and_read(self, ns): + os.mkdir("/etc") + with open("/etc/rc.conf", "a") as fd: + fd.write("# dummy rc.conf\n") + with open("/etc/resolv.conf", "a") as fd: + fd.write("# dummy resolv.conf\n") renderer = cloudinit.net.freebsd.Renderer() - renderer.render_network_state(ns, target=target) - return dir2dict(target) + renderer.render_network_state(ns) + return dir2dict("/") @mock.patch( "cloudinit.subp.subp", return_value=(SAMPLE_FREEBSD_IFCONFIG_OUT, 0) @@ -79,10 +69,10 @@ def test_render_output_has_yaml(self, m_is_freebsd, m_subp): } network_config = yaml.safe_load(entry["yaml"]) ns = cloudinit.net.network_state.parse_net_config_data(network_config) - files = self._render_and_read(state=ns) + files = self._render_and_read(ns) assert files == { - "/etc/resolv.conf": "# dummy resolv.conf\n", - "/etc/rc.conf": ( + "etc/resolv.conf": "# dummy resolv.conf\n", + "etc/rc.conf": ( "# dummy rc.conf\n" "ifconfig_eno1=" "'inet 172.20.80.129 netmask 255.255.255.128 mtu 1470'\n" diff --git a/tests/unittests/test_pathprefix2dict.py b/tests/unittests/test_pathprefix2dict.py index 997061bc..834327a0 100644 --- a/tests/unittests/test_pathprefix2dict.py +++ b/tests/unittests/test_pathprefix2dict.py @@ -1,43 +1,40 @@ # This file is part of cloud-init. See LICENSE file for license information. -import shutil -import tempfile +import pytest from cloudinit import util -from tests.unittests.helpers import TestCase, populate_dir +from tests.unittests.helpers import populate_dir -class TestPathPrefix2Dict(TestCase): - def setUp(self): - super(TestPathPrefix2Dict, self).setUp() - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - - def test_required_only(self): +class TestPathPrefix2Dict: + def test_required_only(self, tmp_path): dirdata = {"f1": b"f1content", "f2": b"f2content"} - populate_dir(self.tmp, dirdata) + populate_dir(str(tmp_path), dirdata) - ret = util.pathprefix2dict(self.tmp, required=["f1", "f2"]) - self.assertEqual(dirdata, ret) + ret = util.pathprefix2dict(str(tmp_path), required=["f1", "f2"]) + assert dirdata == ret - def test_required_missing(self): + def test_required_missing(self, tmp_path): dirdata = {"f1": b"f1content"} - populate_dir(self.tmp, dirdata) + populate_dir(str(tmp_path), dirdata) kwargs = {"required": ["f1", "f2"]} - self.assertRaises(ValueError, util.pathprefix2dict, self.tmp, **kwargs) + with pytest.raises(ValueError): + util.pathprefix2dict(str(tmp_path), **kwargs) - def test_no_required_and_optional(self): + def test_no_required_and_optional(self, tmp_path): dirdata = {"f1": b"f1c", "f2": b"f2c"} - populate_dir(self.tmp, dirdata) + populate_dir(str(tmp_path), dirdata) ret = util.pathprefix2dict( - self.tmp, required=None, optional=["f1", "f2"] + str(tmp_path), required=None, optional=["f1", "f2"] ) - self.assertEqual(dirdata, ret) + assert dirdata == ret - def test_required_and_optional(self): + def test_required_and_optional(self, tmp_path): dirdata = {"f1": b"f1c", "f2": b"f2c"} - populate_dir(self.tmp, dirdata) + populate_dir(str(tmp_path), dirdata) - ret = util.pathprefix2dict(self.tmp, required=["f1"], optional=["f2"]) - self.assertEqual(dirdata, ret) + ret = util.pathprefix2dict( + str(tmp_path), required=["f1"], optional=["f2"] + ) + assert dirdata == ret diff --git a/tests/unittests/test_registry.py b/tests/unittests/test_registry.py index b422ace0..ef7e39ba 100644 --- a/tests/unittests/test_registry.py +++ b/tests/unittests/test_registry.py @@ -1,30 +1,31 @@ # This file is part of cloud-init. See LICENSE file for license information. +from unittest import mock + +import pytest + from cloudinit.registry import DictRegistry -from tests.unittests.helpers import TestCase, mock -class TestDictRegistry(TestCase): +class TestDictRegistry: def test_added_item_included_in_output(self): registry = DictRegistry() item_key, item_to_register = "test_key", mock.Mock() registry.register_item(item_key, item_to_register) - self.assertEqual( - {item_key: item_to_register}, registry.registered_items - ) + assert {item_key: item_to_register} == registry.registered_items def test_registry_starts_out_empty(self): - self.assertEqual({}, DictRegistry().registered_items) + assert {} == DictRegistry().registered_items def test_modifying_registered_items_isnt_exposed_to_other_callers(self): registry = DictRegistry() registry.registered_items["test_item"] = mock.Mock() - self.assertEqual({}, registry.registered_items) + assert {} == registry.registered_items def test_keys_cannot_be_replaced(self): registry = DictRegistry() item_key = "test_key" registry.register_item(item_key, mock.Mock()) - self.assertRaises( - ValueError, registry.register_item, item_key, mock.Mock() - ) + + with pytest.raises(ValueError): + registry.register_item(item_key, mock.Mock()) diff --git a/tests/unittests/test_simpletable.py b/tests/unittests/test_simpletable.py index ee7eb0b4..f5d12f20 100644 --- a/tests/unittests/test_simpletable.py +++ b/tests/unittests/test_simpletable.py @@ -10,7 +10,6 @@ """ from cloudinit.simpletable import SimpleTable -from tests.unittests.helpers import CiTestCase # Examples rendered by cloud-init using PrettyTable NET_DEVICE_FIELDS = ("Device", "Up", "Address", "Mask", "Scope", "Hw-Address") @@ -85,25 +84,25 @@ +--------+""" -class TestSimpleTable(CiTestCase): +class TestSimpleTable: def test_no_rows(self): """An empty table is rendered as PrettyTable would have done it.""" table = SimpleTable(NO_ROWS_FIELDS) - self.assertEqual(str(table), NO_ROWS_TABLE) + assert str(table) == NO_ROWS_TABLE def test_net_dev(self): """Net device info is rendered as it was with PrettyTable.""" table = SimpleTable(NET_DEVICE_FIELDS) for row in NET_DEVICE_ROWS: table.add_row(row) - self.assertEqual(str(table), NET_DEVICE_TABLE) + assert str(table) == NET_DEVICE_TABLE def test_route_ipv4(self): """Route IPv4 info is rendered as it was with PrettyTable.""" table = SimpleTable(ROUTE_IPV4_FIELDS) for row in ROUTE_IPV4_ROWS: table.add_row(row) - self.assertEqual(str(table), ROUTE_IPV4_TABLE) + assert str(table) == ROUTE_IPV4_TABLE def test_authorized_keys(self): """SSH authorized keys are rendered as they were with PrettyTable.""" @@ -116,4 +115,4 @@ def test_get_string(self): table = SimpleTable(AUTHORIZED_KEYS_FIELDS) for row in AUTHORIZED_KEYS_ROWS: table.add_row(row) - self.assertEqual(table.get_string(), str(table)) + assert table.get_string() == str(table) diff --git a/tests/unittests/test_subp.py b/tests/unittests/test_subp.py index eca48800..27921955 100644 --- a/tests/unittests/test_subp.py +++ b/tests/unittests/test_subp.py @@ -3,36 +3,38 @@ """Tests for cloudinit.subp utility functions""" import json +import logging import os import stat import sys from unittest import mock +import pytest + from cloudinit import subp, util from tests.helpers import get_top_level_dir -from tests.unittests.helpers import CiTestCase SH = "sh" BOGUS_COMMAND = "this-is-not-expected-to-be-a-program-name" -class TestPrependBaseCommands(CiTestCase): - with_logs = True +class TestPrependBaseCommands: def test_prepend_base_command_errors_on_neither_string_nor_list(self): """Raise an error for each command which is not a string or list.""" orig_commands = ["ls", 1, {"not": "gonna work"}, ["basecmd", "list"]] - with self.assertRaises(TypeError) as context_manager: + with pytest.raises( + TypeError, + match="Invalid basecmd config. These commands are not a string or" + " list:\n1\n{'not': 'gonna work'}", + ): subp.prepend_base_command( base_command="basecmd", commands=orig_commands ) - self.assertEqual( - "Invalid basecmd config. These commands are not a string or" - " list:\n1\n{'not': 'gonna work'}", - str(context_manager.exception), - ) - def test_prepend_base_command_warns_on_non_base_string_commands(self): + def test_prepend_base_command_warns_on_non_base_string_commands( + self, caplog + ): """Warn on each non-base for commands of type string.""" orig_commands = [ "ls", @@ -43,14 +45,16 @@ def test_prepend_base_command_warns_on_non_base_string_commands(self): fixed_commands = subp.prepend_base_command( base_command="basecmd", commands=orig_commands ) - self.assertEqual( - "WARNING: Non-basecmd commands in basecmd config:\n" - "ls\ntouch /blah\n", - self.logs.getvalue(), - ) - self.assertEqual(orig_commands, fixed_commands) - - def test_prepend_base_command_prepends_on_non_base_list_commands(self): + assert ( + mock.ANY, + logging.WARNING, + "Non-basecmd commands in basecmd config:\nls\ntouch /blah", + ) in caplog.record_tuples + assert orig_commands == fixed_commands + + def test_prepend_base_command_prepends_on_non_base_list_commands( + self, caplog + ): """Prepend 'basecmd' for each non-basecmd command of type list.""" orig_commands = [ ["ls"], @@ -67,10 +71,10 @@ def test_prepend_base_command_prepends_on_non_base_list_commands(self): fixed_commands = subp.prepend_base_command( base_command="basecmd", commands=orig_commands ) - self.assertEqual("", self.logs.getvalue()) - self.assertEqual(expected, fixed_commands) + assert "" == caplog.text + assert expected == fixed_commands - def test_prepend_base_command_removes_first_item_when_none(self): + def test_prepend_base_command_removes_first_item_when_none(self, caplog): """Remove the first element of a non-basecmd when it is None.""" orig_commands = [ [None, "ls"], @@ -87,20 +91,12 @@ def test_prepend_base_command_removes_first_item_when_none(self): fixed_commands = subp.prepend_base_command( base_command="basecmd", commands=orig_commands ) - self.assertEqual("", self.logs.getvalue()) - self.assertEqual(expected, fixed_commands) - + assert "" == caplog.text + assert expected == fixed_commands -class TestSubp(CiTestCase): - allowed_subp = [ - SH, - "cat", - CiTestCase.SUBP_SHELL_TRUE, - BOGUS_COMMAND, - sys.executable, - "env", - ] +@pytest.mark.allow_all_subp +class TestSubp: stdin2err = [SH, "-c", "cat >&2"] stdin2out = ["cat"] utf8_invalid = b"ab\xaadef" @@ -112,38 +108,38 @@ def printf_cmd(arg): """print with builtin printf""" return [SH, "-c", 'printf "$@"', "printf", arg] - def test_subp_handles_bytestrings(self): + def test_subp_handles_bytestrings(self, tmp_path): """subp can run a bytestring command if shell is True.""" - tmp_file = self.tmp_path("test.out") + tmp_file = str(tmp_path / "test.out") cmd = "echo HI MOM >> {tmp_file}".format(tmp_file=tmp_file) (out, _err) = subp.subp(cmd.encode("utf-8"), shell=True) - self.assertEqual("", out) - self.assertEqual("", _err) - self.assertEqual("HI MOM\n", util.load_text_file(tmp_file)) + assert "" == out + assert "" == _err + assert "HI MOM\n" == util.load_text_file(tmp_file) - def test_subp_handles_strings(self): + def test_subp_handles_strings(self, tmp_path): """subp can run a string command if shell is True.""" - tmp_file = self.tmp_path("test.out") + tmp_file = str(tmp_path / "test.out") cmd = "echo HI MOM >> {tmp_file}".format(tmp_file=tmp_file) (out, _err) = subp.subp(cmd, shell=True) - self.assertEqual("", out) - self.assertEqual("", _err) - self.assertEqual("HI MOM\n", util.load_text_file(tmp_file)) + assert "" == out + assert "" == _err + assert "HI MOM\n" == util.load_text_file(tmp_file) def test_subp_handles_utf8(self): # The given bytes contain utf-8 accented characters as seen in e.g. # the "deja dup" package in Ubuntu. cmd = self.printf_cmd(self.utf8_valid_2) (out, _err) = subp.subp(cmd, capture=True) - self.assertEqual(out, self.utf8_valid_2.decode("utf-8")) + assert out == self.utf8_valid_2.decode("utf-8") def test_subp_respects_decode_false(self): (out, err) = subp.subp( self.stdin2out, capture=True, decode=False, data=self.utf8_valid ) - self.assertTrue(isinstance(out, bytes)) - self.assertTrue(isinstance(err, bytes)) - self.assertEqual(out, self.utf8_valid) + assert isinstance(out, bytes) + assert isinstance(err, bytes) + assert out == self.utf8_valid def test_subp_decode_ignore(self): """ensure that invalid utf-8 is ignored with the "ignore" kwarg""" @@ -166,14 +162,14 @@ def test_subp_decode_strict_valid_utf8(self): (out, _err) = subp.subp( self.stdin2out, capture=True, decode="strict", data=self.utf8_valid ) - self.assertEqual(out, self.utf8_valid.decode("utf-8")) + assert out == self.utf8_valid.decode("utf-8") def test_subp_decode_invalid_utf8_replaces(self): (out, _err) = subp.subp( self.stdin2out, capture=True, data=self.utf8_invalid ) expected = self.utf8_invalid.decode("utf-8", "replace") - self.assertEqual(out, expected) + assert out == expected def test_subp_decode_strict_raises(self): args = [] @@ -183,7 +179,8 @@ def test_subp_decode_strict_raises(self): "decode": "strict", "data": self.utf8_invalid, } - self.assertRaises(UnicodeDecodeError, subp.subp, *args, **kwargs) + with pytest.raises(UnicodeDecodeError): + subp.subp(*args, **kwargs) def test_subp_capture_stderr(self): data = b"hello world" @@ -194,8 +191,8 @@ def test_subp_capture_stderr(self): data=data, update_env={"LC_ALL": "C"}, ) - self.assertEqual(err, data) - self.assertEqual(out, b"") + assert err == data + assert out == b"" def test_subp_reads_env(self): with mock.patch.dict("os.environ", values={"FOO": "BAR"}): @@ -217,54 +214,48 @@ def test_subp_update_env(self): set(out.splitlines()) ) - def test_subp_warn_missing_shebang(self): + def test_subp_warn_missing_shebang(self, tmp_path): """Warn on no #! in script""" - noshebang = self.tmp_path("noshebang") + noshebang = str(tmp_path / "noshebang") util.write_file(noshebang, "true\n") print("os is %s" % os) os.chmod(noshebang, os.stat(noshebang).st_mode | stat.S_IEXEC) - with self.allow_subp([noshebang]): - self.assertRaisesRegex( - subp.ProcessExecutionError, - r"Missing #! in script\?", - subp.subp, + with pytest.raises( + subp.ProcessExecutionError, match=r"Missing #! in script\?" + ): + subp.subp( (noshebang,), ) def test_returns_none_if_no_capture(self): (out, err) = subp.subp(self.stdin2out, data=b"", capture=False) - self.assertIsNone(err) - self.assertIsNone(out) + assert err is None + assert out is None def test_exception_has_out_err_are_bytes_if_decode_false(self): """Raised exc should have stderr, stdout as bytes if no decode.""" - with self.assertRaises(subp.ProcessExecutionError) as cm: + with pytest.raises(subp.ProcessExecutionError) as exc_info: subp.subp([BOGUS_COMMAND], decode=False) - self.assertTrue(isinstance(cm.exception.stdout, bytes)) - self.assertTrue(isinstance(cm.exception.stderr, bytes)) + assert isinstance(exc_info.value.stdout, bytes) + assert isinstance(exc_info.value.stderr, bytes) def test_exception_has_out_err_are_bytes_if_decode_true(self): """Raised exc should have stderr, stdout as string if no decode.""" - with self.assertRaises(subp.ProcessExecutionError) as cm: + with pytest.raises(subp.ProcessExecutionError) as exc_info: subp.subp([BOGUS_COMMAND], decode=True) - self.assertTrue(isinstance(cm.exception.stdout, str)) - self.assertTrue(isinstance(cm.exception.stderr, str)) + assert isinstance(exc_info.value.stdout, str) + assert isinstance(exc_info.value.stderr, str) def test_exception_invalid_command(self): args = [None, "first", "arg", "missing"] - with self.assertRaises( - subp.ProcessExecutionError, msg="Running invalid command" - ): - with self.allow_subp(args): - subp.subp(args) + with pytest.raises(subp.ProcessExecutionError): + subp.subp(args) def test_bunch_of_slashes_in_path(self): - self.assertEqual( - "/target/my/path/", subp.target_path("/target/", "//my/path/") - ) - self.assertEqual( - "/target/my/path/", subp.target_path("/target/", "///my/path/") + assert "/target/my/path/" == subp.target_path("/target/", "//my/path/") + assert "/target/my/path/" == subp.target_path( + "/target/", "///my/path/" ) def test_c_lang_can_take_utf8_args(self): @@ -308,4 +299,4 @@ def test_c_lang_can_take_utf8_args(self): data=json.dumps(cmd).encode("utf-8"), decode=False, ) - self.assertEqual(self.utf8_valid, out) + assert self.utf8_valid == out diff --git a/tests/unittests/test_temp_utils.py b/tests/unittests/test_temp_utils.py index 6cbc3728..670e92aa 100644 --- a/tests/unittests/test_temp_utils.py +++ b/tests/unittests/test_temp_utils.py @@ -5,11 +5,13 @@ import os from tempfile import gettempdir +import pytest + from cloudinit.temp_utils import mkdtemp, mkstemp, tempdir -from tests.unittests.helpers import CiTestCase, wrap_and_call +from tests.unittests.helpers import wrap_and_call -class TestTempUtils(CiTestCase): +class TestTempUtils: prefix = gettempdir() def test_mkdtemp_default_non_root(self): @@ -29,8 +31,8 @@ def fake_mkdtemp(*args, **kwargs): }, mkdtemp, ) - self.assertEqual("/fake/return/path", retval) - self.assertEqual([{"dir": self.prefix}], calls) + assert "/fake/return/path" == retval + assert [{"dir": self.prefix}] == calls def test_mkdtemp_default_non_root_needs_exe(self): """mkdtemp creates a dir under /var/tmp/cloud-init when needs_exe.""" @@ -51,8 +53,8 @@ def fake_mkdtemp(*args, **kwargs): mkdtemp, needs_exe=True, ) - self.assertEqual("/fake/return/path", retval) - self.assertEqual([{"dir": "/var/tmp/cloud-init"}], calls) + assert "/fake/return/path" == retval + assert [{"dir": "/var/tmp/cloud-init"}] == calls def test_mkdtemp_default_root(self): """mkdtemp creates a dir under /run/cloud-init for the privileged.""" @@ -71,8 +73,8 @@ def fake_mkdtemp(*args, **kwargs): }, mkdtemp, ) - self.assertEqual("/fake/return/path", retval) - self.assertEqual([{"dir": "/run/cloud-init/tmp"}], calls) + assert "/fake/return/path" == retval + assert [{"dir": "/run/cloud-init/tmp"}] == calls def test_mkstemp_default_non_root(self): """mkstemp creates secure tempfile under /tmp for the unprivileged.""" @@ -91,8 +93,8 @@ def fake_mkstemp(*args, **kwargs): }, mkstemp, ) - self.assertEqual("/fake/return/path", retval) - self.assertEqual([{"dir": self.prefix}], calls) + assert "/fake/return/path" == retval + assert [{"dir": self.prefix}] == calls def test_mkstemp_default_root(self): """mkstemp creates a secure tempfile in /run/cloud-init for root.""" @@ -111,13 +113,13 @@ def fake_mkstemp(*args, **kwargs): }, mkstemp, ) - self.assertEqual("/fake/return/path", retval) - self.assertEqual([{"dir": "/run/cloud-init/tmp"}], calls) + assert "/fake/return/path" == retval + assert [{"dir": "/run/cloud-init/tmp"}] == calls def test_tempdir_error_suppression(self): """test tempdir suppresses errors during directory removal.""" - with self.assertRaises(OSError): + with pytest.raises(OSError): with tempdir(prefix="cloud-init-dhcp-") as tdir: os.rmdir(tdir) # As a result, the directory is already gone, diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py index 9171a58a..3b14e8a3 100644 --- a/tests/unittests/test_templating.py +++ b/tests/unittests/test_templating.py @@ -4,7 +4,9 @@ # # This file is part of cloud-init. See LICENSE file for license information. +import logging import textwrap +from unittest import mock import pytest @@ -14,9 +16,7 @@ from tests.unittests import helpers as test_helpers -class TestTemplates(test_helpers.CiTestCase): - - with_logs = True +class TestTemplates: jinja_utf8 = b"It\xe2\x80\x99s not ascii, {{name}}\n" jinja_utf8_rbob = b"It\xe2\x80\x99s not ascii, bob\n".decode("utf-8") @@ -45,18 +45,18 @@ def test_render_basic(self): """ ) out_data = templater.basic_render(in_data, {"b": 2}) - self.assertEqual(expected_data.strip(), out_data) + assert expected_data.strip() == out_data def test_render_jinja(self): blob = """## template:jinja {{a}},{{b}}""" c = templater.render_string(blob, {"a": 1, "b": 2}) - self.assertEqual("1,2", c) + assert "1,2" == c def test_render_default(self): blob = """$a,$b""" c = templater.render_string(blob, {"a": 1, "b": 2}) - self.assertEqual("1,2", c) + assert "1,2" == c def test_render_basic_deeper(self): hn = "myfoohost.yahoo.com" @@ -68,21 +68,21 @@ def test_render_basic_deeper(self): }, } out_data = templater.render_string(in_data, params) - self.assertEqual(expected_data, out_data) + assert expected_data == out_data def test_render_basic_no_parens(self): hn = "myfoohost" in_data = "h=$hostname\nc=d\n" expected_data = "h=%s\nc=d\n" % hn out_data = templater.basic_render(in_data, {"hostname": hn}) - self.assertEqual(expected_data, out_data) + assert expected_data == out_data def test_render_basic_parens(self): hn = "myfoohost" in_data = "h = ${hostname}\nc=d\n" expected_data = "h = %s\nc=d\n" % hn out_data = templater.basic_render(in_data, {"hostname": hn}) - self.assertEqual(expected_data, out_data) + assert expected_data == out_data def test_render_basic2(self): mirror = "mymirror" @@ -96,30 +96,27 @@ def test_render_basic2(self): out_data = templater.basic_render( in_data, {"mirror": mirror, "codename": codename} ) - self.assertEqual(ex_data, out_data) + assert ex_data == out_data def test_jinja_nonascii_render_to_string(self): """Test jinja render_to_string with non-ascii content.""" - self.assertEqual( + assert ( templater.render_string( self.add_header("jinja", self.jinja_utf8), {"name": "bob"} - ), - self.jinja_utf8_rbob, + ) + == self.jinja_utf8_rbob ) def test_jinja_nonascii_render_undefined_variables_to_default_py3(self): """Test py3 jinja render_to_string with undefined variable default.""" - self.assertEqual( - templater.render_string( - self.add_header("jinja", self.jinja_utf8), {} - ), - self.jinja_utf8_rbob.replace("bob", "CI_MISSING_JINJA_VAR/name"), - ) + assert templater.render_string( + self.add_header("jinja", self.jinja_utf8), {} + ) == self.jinja_utf8_rbob.replace("bob", "CI_MISSING_JINJA_VAR/name") - def test_jinja_nonascii_render_to_file(self): + def test_jinja_nonascii_render_to_file(self, tmp_path): """Test jinja render_to_file of a filename with non-ascii content.""" - tmpl_fn = self.tmp_path("j-render-to-file.template") - out_fn = self.tmp_path("j-render-to-file.out") + tmpl_fn = str(tmp_path / "j-render-to-file.template") + out_fn = str(tmp_path / "j-render-to-file.out") write_file( filename=tmpl_fn, omode="wb", @@ -127,35 +124,38 @@ def test_jinja_nonascii_render_to_file(self): ) templater.render_to_file(tmpl_fn, out_fn, {"name": "bob"}) result = load_binary_file(out_fn).decode("utf-8") - self.assertEqual(result, self.jinja_utf8_rbob) + assert result == self.jinja_utf8_rbob - def test_jinja_nonascii_render_from_file(self): + def test_jinja_nonascii_render_from_file(self, tmp_path): """Test jinja render_from_file with non-ascii content.""" - tmpl_fn = self.tmp_path("j-render-from-file.template") + tmpl_fn = str(tmp_path / "j-render-from-file.template") write_file( tmpl_fn, omode="wb", content=self.add_header("jinja", self.jinja_utf8).encode("utf-8"), ) result = templater.render_from_file(tmpl_fn, {"name": "bob"}) - self.assertEqual(result, self.jinja_utf8_rbob) + assert result == self.jinja_utf8_rbob @test_helpers.skipIfJinja() - def test_jinja_warns_on_missing_dep_and_uses_basic_renderer(self): + def test_jinja_warns_on_missing_dep_and_uses_basic_renderer( + self, caplog, tmp_path + ): """Test jinja render_from_file will fallback to basic renderer.""" - tmpl_fn = self.tmp_path("j-render-from-file.template") + tmpl_fn = tmp_path("j-render-from-file.template") write_file( tmpl_fn, omode="wb", content=self.add_header("jinja", self.jinja_utf8).encode("utf-8"), ) result = templater.render_from_file(tmpl_fn, {"name": "bob"}) - self.assertEqual(result, self.jinja_utf8.decode()) - self.assertIn( - "WARNING: Jinja not available as the selected renderer for desired" + assert result == self.jinja_utf8.decode() + assert ( + mock.ANY, + logging.WARNING, + "Jinja not available as the selected renderer for desired" " template, reverting to the basic renderer.", - self.logs.getvalue(), - ) + ) in caplog.record_tuples def test_jinja_do_extension_render_to_string(self): """Test jinja render_to_string using do extension.""" @@ -164,11 +164,11 @@ def test_jinja_do_extension_render_to_string(self): "{% set r = [] %} {% set input = [1,2,3] %} " "{% for i in input %} {% do r.append(i) %} {% endfor %} {{r}}" ) - self.assertEqual( + assert ( templater.render_string( self.add_header("jinja", jinja_template), {} - ).strip(), - expected_result, + ).strip() + == expected_result ) diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 3bfaad6c..3cb01fd0 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -13,6 +13,7 @@ import stat import tempfile from collections import deque +from contextlib import nullcontext as does_not_raise from pathlib import Path from textwrap import dedent from unittest import mock @@ -36,7 +37,7 @@ from cloudinit.sources import DataSourceHostname from cloudinit.subp import SubpResult from tests.unittests import helpers -from tests.unittests.helpers import CiTestCase, skipIf, skipUnlessJinja +from tests.unittests.helpers import random_string, skipIf, skipUnlessJinja LOG = logging.getLogger(__name__) M_PATH = "cloudinit.util." @@ -638,56 +639,52 @@ def test_fetch_ssl_details( assert 2 == m_isdir.call_count == m_isfile.call_count -class TestSymlink(CiTestCase): - def test_sym_link_simple(self): - tmpd = self.tmp_dir() - link = self.tmp_path("link", tmpd) - target = self.tmp_path("target", tmpd) +class TestSymlink: + def test_sym_link_simple(self, tmp_path): + link = str(tmp_path / "link") + target = str(tmp_path / "target") util.write_file(target, "hello") util.sym_link(target, link) - self.assertTrue(os.path.exists(link)) - self.assertTrue(os.path.islink(link)) - - def test_sym_link_source_exists(self): - tmpd = self.tmp_dir() - link = self.tmp_path("link", tmpd) - target = self.tmp_path("target", tmpd) - target2 = self.tmp_path("target2", tmpd) + assert os.path.exists(link) + assert os.path.islink(link) + + def test_sym_link_source_exists(self, tmp_path): + link = str(tmp_path / "link") + target = str(tmp_path / "target") + target2 = str(tmp_path / "target2") util.write_file(target, "hello") util.write_file(target2, "hello2") util.sym_link(target, link) - self.assertTrue(os.path.exists(link)) + assert os.path.exists(link) util.sym_link(target2, link, force=True) - self.assertTrue(os.path.exists(link)) - self.assertEqual("hello2", util.load_text_file(link)) + assert os.path.exists(link) + assert "hello2" == util.load_text_file(link) - def test_sym_link_dangling_link(self): - tmpd = self.tmp_dir() - link = self.tmp_path("link", tmpd) - target = self.tmp_path("target", tmpd) + def test_sym_link_dangling_link(self, tmp_path): + link = str(tmp_path / "link") + target = str(tmp_path / "target") util.sym_link(target, link) - self.assertTrue(os.path.islink(link)) - self.assertFalse(os.path.exists(link)) + assert os.path.islink(link) + assert not os.path.exists(link) util.sym_link(target, link, force=True) - self.assertTrue(os.path.islink(link)) - self.assertFalse(os.path.exists(link)) + assert os.path.islink(link) + assert not os.path.exists(link) - def test_sym_link_create_dangling(self): - tmpd = self.tmp_dir() - link = self.tmp_path("link", tmpd) - target = self.tmp_path("target", tmpd) + def test_sym_link_create_dangling(self, tmp_path): + link = str(tmp_path / "link") + target = str(tmp_path / "target") util.sym_link(target, link) - self.assertTrue(os.path.islink(link)) - self.assertFalse(os.path.exists(link)) + assert os.path.islink(link) + assert not os.path.exists(link) -class TestUptime(CiTestCase): +class TestUptime: @mock.patch(M_PATH + "boottime") @mock.patch(M_PATH + "os.path.exists") @mock.patch(M_PATH + "time.time") @@ -698,75 +695,67 @@ def test_uptime_non_linux_path(self, m_time, m_exists, m_boottime): m_time.return_value = boottime + uptime m_exists.return_value = False result = util.uptime() - self.assertEqual(str(uptime), result) + assert str(uptime) == result -class TestShellify(CiTestCase): +class TestShellify: def test_input_dict_raises_type_error(self): - self.assertRaisesRegex( - TypeError, - "Input.*was.*dict.*xpected", - util.shellify, - {"mykey": "myval"}, - ) + with pytest.raises(TypeError, match="Input.*was.*dict.*xpected"): + util.shellify( + {"mykey": "myval"}, + ) def test_input_str_raises_type_error(self): - self.assertRaisesRegex( - TypeError, "Input.*was.*str.*xpected", util.shellify, "foobar" - ) + with pytest.raises(TypeError, match="Input.*was.*str.*xpected"): + util.shellify("foobar") def test_value_with_int_raises_type_error(self): - self.assertRaisesRegex( - TypeError, "shellify.*int", util.shellify, ["foo", 1] - ) + with pytest.raises(TypeError, match="shellify.*int"): + util.shellify(["foo", 1]) def test_supports_strings_and_lists(self): - self.assertEqual( - "\n".join( - [ - "#!/bin/sh", - "echo hi mom", - "'echo' 'hi dad'", - "'echo' 'hi' 'sis'", - "", - ] - ), - util.shellify( - ["echo hi mom", ["echo", "hi dad"], ("echo", "hi", "sis")] - ), + assert "\n".join( + [ + "#!/bin/sh", + "echo hi mom", + "'echo' 'hi dad'", + "'echo' 'hi' 'sis'", + "", + ] + ) == util.shellify( + ["echo hi mom", ["echo", "hi dad"], ("echo", "hi", "sis")] ) def test_supports_comments(self): - self.assertEqual( - "\n".join(["#!/bin/sh", "echo start", "echo end", ""]), - util.shellify(["echo start", None, "echo end"]), - ) + assert "\n".join( + ["#!/bin/sh", "echo start", "echo end", ""] + ) == util.shellify(["echo start", None, "echo end"]) -class TestGetHostnameFqdn(CiTestCase): +class TestGetHostnameFqdn: def test_get_hostname_fqdn_from_only_cfg_fqdn(self): """When cfg only has the fqdn key, derive hostname and fqdn from it.""" hostname, fqdn, _ = util.get_hostname_fqdn( cfg={"fqdn": "myhost.domain.com"}, cloud=None ) - self.assertEqual("myhost", hostname) - self.assertEqual("myhost.domain.com", fqdn) + assert "myhost" == hostname + assert "myhost.domain.com" == fqdn def test_get_hostname_fqdn_from_cfg_fqdn_and_hostname(self): """When cfg has both fqdn and hostname keys, return them.""" hostname, fqdn, _ = util.get_hostname_fqdn( cfg={"fqdn": "myhost.domain.com", "hostname": "other"}, cloud=None ) - self.assertEqual("other", hostname) - self.assertEqual("myhost.domain.com", fqdn) + assert "other" == hostname + assert "myhost.domain.com" == fqdn def test_get_hostname_fqdn_from_cfg_hostname_with_domain(self): """When cfg has only hostname key which represents a fqdn, use that.""" hostname, fqdn, _ = util.get_hostname_fqdn( cfg={"hostname": "myhost.domain.com"}, cloud=None ) - self.assertEqual("myhost", hostname) - self.assertEqual("myhost.domain.com", fqdn) + assert "myhost" == hostname + assert "myhost.domain.com" == fqdn def test_get_hostname_fqdn_from_cfg_hostname_without_domain(self): """When cfg has a hostname without a '.' query cloud.get_hostname.""" @@ -777,8 +766,8 @@ def test_get_hostname_fqdn_from_cfg_hostname_without_domain(self): hostname, fqdn, _ = util.get_hostname_fqdn( cfg={"hostname": "myhost"}, cloud=cloud ) - self.assertEqual("myhost", hostname) - self.assertEqual("cloudhost.mycloud.com", fqdn) + assert "myhost" == hostname + assert "cloudhost.mycloud.com" == fqdn assert [ mock.call(fqdn=True, metadata_only=False) ] == cloud.get_hostname.call_args_list @@ -791,8 +780,8 @@ def test_get_hostname_fqdn_from_without_fqdn_or_hostname(self): DataSourceHostname("cloudhost", False), ) hostname, fqdn, _ = util.get_hostname_fqdn(cfg={}, cloud=cloud) - self.assertEqual("cloudhost", hostname) - self.assertEqual("cloudhost.mycloud.com", fqdn) + assert "cloudhost" == hostname + assert "cloudhost.mycloud.com" == fqdn assert [ mock.call(fqdn=True, metadata_only=False), mock.call(metadata_only=False), @@ -803,16 +792,16 @@ def test_get_hostname_fqdn_from_numeric_fqdn(self): hostname, fqdn, _ = util.get_hostname_fqdn( cfg={"fqdn": 12345}, cloud=None ) - self.assertEqual("12345", hostname) - self.assertEqual("12345", fqdn) + assert "12345" == hostname + assert "12345" == fqdn def test_get_hostname_fqdn_from_numeric_fqdn_with_domain(self): """When cfg fqdn is numeric with a domain, ensure correct parsing.""" hostname, fqdn, _ = util.get_hostname_fqdn( cfg={"fqdn": "12345.example.com"}, cloud=None ) - self.assertEqual("12345", hostname) - self.assertEqual("12345.example.com", fqdn) + assert "12345" == hostname + assert "12345.example.com" == fqdn def test_get_hostname_fqdn_from_passes_metadata_only_to_cloud(self): """Calls to cloud.get_hostname pass the metadata_only parameter.""" @@ -830,7 +819,7 @@ def test_get_hostname_fqdn_from_passes_metadata_only_to_cloud(self): ] == cloud.get_hostname.call_args_list -class TestBlkid(CiTestCase): +class TestBlkid: ids = { "id01": "1111-1111", "id02": "22222222-2222", @@ -858,8 +847,6 @@ class TestBlkid(CiTestCase): """ ) - maxDiff = None - def _get_expected(self): return { "/dev/loop0": {"DEVNAME": "/dev/loop0", "TYPE": "squashfs"}, @@ -898,7 +885,7 @@ def _get_expected(self): @mock.patch("cloudinit.subp.subp") def test_functional_blkid(self, m_subp): m_subp.return_value = SubpResult(self.blkid_out.format(**self.ids), "") - self.assertEqual(self._get_expected(), util.blkid()) + assert self._get_expected() == util.blkid() m_subp.assert_called_with( ["blkid", "-o", "full"], capture=True, decode="replace" ) @@ -907,7 +894,7 @@ def test_functional_blkid(self, m_subp): def test_blkid_no_cache_uses_no_cache(self, m_subp): """blkid should turn off cache if disable_cache is true.""" m_subp.return_value = SubpResult(self.blkid_out.format(**self.ids), "") - self.assertEqual(self._get_expected(), util.blkid(disable_cache=True)) + assert self._get_expected() == util.blkid(disable_cache=True) m_subp.assert_called_with( ["blkid", "-o", "full", "-c", "/dev/null"], capture=True, @@ -917,7 +904,7 @@ def test_blkid_no_cache_uses_no_cache(self, m_subp): @mock.patch("cloudinit.util.subp.which") @mock.patch("cloudinit.util.subp.subp") -class TestUdevadmSettle(CiTestCase): +class TestUdevadmSettle: def test_with_no_params(self, m_subp, m_which): """called with no parameters.""" m_which.side_effect = lambda m: m in ("udevadm",) @@ -931,20 +918,20 @@ def test_udevadm_not_present(self, m_subp, m_which): m_which.assert_called_once_with("udevadm") m_subp.assert_not_called() - def test_with_exists_and_not_exists(self, m_subp, m_which): + def test_with_exists_and_not_exists(self, m_subp, m_which, tmp_path): """with exists=file where file does not exist should invoke subp.""" m_which.side_effect = lambda m: m in ("udevadm",) - mydev = self.tmp_path("mydev") + mydev = str(tmp_path / "mydev") util.udevadm_settle(exists=mydev) m_subp.assert_called_once_with( ["udevadm", "settle", "--exit-if-exists=%s" % mydev] ) - def test_with_exists_and_file_exists(self, m_subp, m_which): + def test_with_exists_and_file_exists(self, m_subp, m_which, tmp_path): """with exists=file where file does exist should only invoke subp once for 'which' call.""" m_which.side_effect = lambda m: m in ("udevadm",) - mydev = self.tmp_path("mydev") + mydev = str(tmp_path / "mydev") util.write_file(mydev, "foo\n") util.udevadm_settle(exists=mydev) m_which.assert_called_once_with("udevadm") @@ -968,10 +955,10 @@ def test_with_timeout_string(self, m_subp, m_which): ["udevadm", "settle", "--timeout=%s" % timeout] ) - def test_with_exists_and_timeout(self, m_subp, m_which): + def test_with_exists_and_timeout(self, m_subp, m_which, tmp_path): """test call with both exists and timeout.""" m_which.side_effect = lambda m: m in ("udevadm",) - mydev = self.tmp_path("mydev") + mydev = str(tmp_path / "mydev") timeout = "3" util.udevadm_settle(exists=mydev, timeout=timeout) m_subp.assert_called_once_with( @@ -986,12 +973,14 @@ def test_with_exists_and_timeout(self, m_subp, m_which): def test_subp_exception_raises_to_caller(self, m_subp, m_which): m_which.side_effect = lambda m: m in ("udevadm",) m_subp.side_effect = subp.ProcessExecutionError("BOOM") - self.assertRaises(subp.ProcessExecutionError, util.udevadm_settle) + with pytest.raises(subp.ProcessExecutionError): + util.udevadm_settle() @mock.patch("os.path.exists") -class TestGetLinuxDistro(CiTestCase): - def setUp(self): +class TestGetLinuxDistro: + @pytest.fixture(autouse=True) + def fixtures(self): util.get_linux_distro.cache_clear() @classmethod @@ -1013,7 +1002,7 @@ def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_SLES m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("sles", "12.3", platform.machine()), dist) + assert ("sles", "12.3", platform.machine()) == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_distro_bare_name(self, m_os_release, m_path_exists): @@ -1022,7 +1011,7 @@ def test_get_linux_distro_bare_name(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_UBUNTU m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("ubuntu", "16.04", "xenial"), dist) + assert ("ubuntu", "16.04", "xenial") == dist @mock.patch("platform.system") @mock.patch("platform.release") @@ -1041,7 +1030,7 @@ def test_get_linux_freebsd( m_parse_redhat_release.return_value = {} util.is_BSD.cache_clear() dist = util.get_linux_distro() - self.assertEqual(("freebsd", "12.0-RELEASE-p10", ""), dist) + assert ("freebsd", "12.0-RELEASE-p10", "") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_centos6(self, m_os_release, m_path_exists): @@ -1049,7 +1038,7 @@ def test_get_linux_centos6(self, m_os_release, m_path_exists): m_os_release.return_value = REDHAT_RELEASE_CENTOS_6 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(("centos", "6.10", "Final"), dist) + assert ("centos", "6.10", "Final") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_centos7_redhat_release(self, m_os_release, m_exists): @@ -1057,7 +1046,7 @@ def test_get_linux_centos7_redhat_release(self, m_os_release, m_exists): m_os_release.return_value = REDHAT_RELEASE_CENTOS_7 m_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(("centos", "7.5.1804", "Core"), dist) + assert ("centos", "7.5.1804", "Core") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_redhat7_osrelease(self, m_os_release, m_path_exists): @@ -1065,7 +1054,7 @@ def test_get_linux_redhat7_osrelease(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_REDHAT_7 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("redhat", "7.5", "Maipo"), dist) + assert ("redhat", "7.5", "Maipo") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_redhat7_rhrelease(self, m_os_release, m_path_exists): @@ -1073,7 +1062,7 @@ def test_get_linux_redhat7_rhrelease(self, m_os_release, m_path_exists): m_os_release.return_value = REDHAT_RELEASE_REDHAT_7 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(("redhat", "7.5", "Maipo"), dist) + assert ("redhat", "7.5", "Maipo") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_redhat6_rhrelease(self, m_os_release, m_path_exists): @@ -1081,7 +1070,7 @@ def test_get_linux_redhat6_rhrelease(self, m_os_release, m_path_exists): m_os_release.return_value = REDHAT_RELEASE_REDHAT_6 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(("redhat", "6.10", "Santiago"), dist) + assert ("redhat", "6.10", "Santiago") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_copr_centos(self, m_os_release, m_path_exists): @@ -1089,7 +1078,7 @@ def test_get_linux_copr_centos(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_CENTOS m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("centos", "7", "Core"), dist) + assert ("centos", "7", "Core") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_almalinux8_rhrelease(self, m_os_release, m_path_exists): @@ -1097,7 +1086,7 @@ def test_get_linux_almalinux8_rhrelease(self, m_os_release, m_path_exists): m_os_release.return_value = REDHAT_RELEASE_ALMALINUX_8 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(("almalinux", "8.3", "Purple Manul"), dist) + assert ("almalinux", "8.3", "Purple Manul") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_almalinux8_osrelease(self, m_os_release, m_path_exists): @@ -1105,7 +1094,7 @@ def test_get_linux_almalinux8_osrelease(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_ALMALINUX_8 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("almalinux", "8.3", "Purple Manul"), dist) + assert ("almalinux", "8.3", "Purple Manul") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_eurolinux7_rhrelease(self, m_os_release, m_path_exists): @@ -1113,7 +1102,7 @@ def test_get_linux_eurolinux7_rhrelease(self, m_os_release, m_path_exists): m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_7 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(("eurolinux", "7.9", "Minsk"), dist) + assert ("eurolinux", "7.9", "Minsk") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_eurolinux7_osrelease(self, m_os_release, m_path_exists): @@ -1121,7 +1110,7 @@ def test_get_linux_eurolinux7_osrelease(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_EUROLINUX_7 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("eurolinux", "7.9", "Minsk"), dist) + assert ("eurolinux", "7.9", "Minsk") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_eurolinux8_rhrelease(self, m_os_release, m_path_exists): @@ -1129,7 +1118,7 @@ def test_get_linux_eurolinux8_rhrelease(self, m_os_release, m_path_exists): m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_8 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(("eurolinux", "8.4", "Vaduz"), dist) + assert ("eurolinux", "8.4", "Vaduz") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_eurolinux8_osrelease(self, m_os_release, m_path_exists): @@ -1137,7 +1126,7 @@ def test_get_linux_eurolinux8_osrelease(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_EUROLINUX_8 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("eurolinux", "8.4", "Vaduz"), dist) + assert ("eurolinux", "8.4", "Vaduz") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_miraclelinux8_rhrelease( @@ -1147,7 +1136,7 @@ def test_get_linux_miraclelinux8_rhrelease( m_os_release.return_value = REDHAT_RELEASE_MIRACLELINUX_8 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(("miracle", "8.4", "Peony"), dist) + assert ("miracle", "8.4", "Peony") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_miraclelinux8_osrelease( @@ -1157,7 +1146,7 @@ def test_get_linux_miraclelinux8_osrelease( m_os_release.return_value = OS_RELEASE_MIRACLELINUX_8 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("miraclelinux", "8", "Peony"), dist) + assert ("miraclelinux", "8", "Peony") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_rocky8_rhrelease(self, m_os_release, m_path_exists): @@ -1165,7 +1154,7 @@ def test_get_linux_rocky8_rhrelease(self, m_os_release, m_path_exists): m_os_release.return_value = REDHAT_RELEASE_ROCKY_8 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(("rocky", "8.3", "Green Obsidian"), dist) + assert ("rocky", "8.3", "Green Obsidian") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_rocky8_osrelease(self, m_os_release, m_path_exists): @@ -1173,7 +1162,7 @@ def test_get_linux_rocky8_osrelease(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_ROCKY_8 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("rocky", "8.3", "Green Obsidian"), dist) + assert ("rocky", "8.3", "Green Obsidian") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_virtuozzo8_rhrelease(self, m_os_release, m_path_exists): @@ -1181,7 +1170,7 @@ def test_get_linux_virtuozzo8_rhrelease(self, m_os_release, m_path_exists): m_os_release.return_value = REDHAT_RELEASE_VIRTUOZZO_8 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(("virtuozzo", "8", "Virtuozzo Linux"), dist) + assert ("virtuozzo", "8", "Virtuozzo Linux") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_virtuozzo8_osrelease(self, m_os_release, m_path_exists): @@ -1189,7 +1178,7 @@ def test_get_linux_virtuozzo8_osrelease(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_VIRTUOZZO_8 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("virtuozzo", "8", "Virtuozzo Linux"), dist) + assert ("virtuozzo", "8", "Virtuozzo Linux") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_cloud8_rhrelease(self, m_os_release, m_path_exists): @@ -1197,7 +1186,7 @@ def test_get_linux_cloud8_rhrelease(self, m_os_release, m_path_exists): m_os_release.return_value = REDHAT_RELEASE_CLOUDLINUX_8 m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists dist = util.get_linux_distro() - self.assertEqual(("cloudlinux", "8.4", "Valery Rozhdestvensky"), dist) + assert ("cloudlinux", "8.4", "Valery Rozhdestvensky") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_cloud8_osrelease(self, m_os_release, m_path_exists): @@ -1205,7 +1194,7 @@ def test_get_linux_cloud8_osrelease(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_CLOUDLINUX_8 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("cloudlinux", "8.4", "Valery Rozhdestvensky"), dist) + assert ("cloudlinux", "8.4", "Valery Rozhdestvensky") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_debian(self, m_os_release, m_path_exists): @@ -1213,7 +1202,7 @@ def test_get_linux_debian(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_DEBIAN m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("debian", "9", "stretch"), dist) + assert ("debian", "9", "stretch") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_openeuler(self, m_os_release, m_path_exists): @@ -1221,7 +1210,7 @@ def test_get_linux_openeuler(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_OPENEULER_20 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("openEuler", "20.03", "LTS-SP2"), dist) + assert ("openEuler", "20.03", "LTS-SP2") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_opencloudos(self, m_os_release, m_path_exists): @@ -1229,7 +1218,7 @@ def test_get_linux_opencloudos(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_OPENCLOUDOS_8 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("OpenCloudOS", "8.6", ""), dist) + assert ("OpenCloudOS", "8.6", "") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_tencentos(self, m_os_release, m_path_exists): @@ -1237,7 +1226,7 @@ def test_get_linux_tencentos(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_TENCENTOS_3 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("TencentOS", "3.1", ""), dist) + assert ("TencentOS", "3.1", "") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_opensuse(self, m_os_release, m_path_exists): @@ -1247,7 +1236,7 @@ def test_get_linux_opensuse(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_OPENSUSE m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("opensuse", "42.3", platform.machine()), dist) + assert ("opensuse", "42.3", platform.machine()) == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_opensuse_l15(self, m_os_release, m_path_exists): @@ -1257,7 +1246,7 @@ def test_get_linux_opensuse_l15(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_OPENSUSE_L15 m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("opensuse-leap", "15.0", platform.machine()), dist) + assert ("opensuse-leap", "15.0", platform.machine()) == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_opensuse_tw(self, m_os_release, m_path_exists): @@ -1267,9 +1256,7 @@ def test_get_linux_opensuse_tw(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_OPENSUSE_TW m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual( - ("opensuse-tumbleweed", "20180920", platform.machine()), dist - ) + assert ("opensuse-tumbleweed", "20180920", platform.machine()) == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_photon_os_release(self, m_os_release, m_path_exists): @@ -1277,7 +1264,7 @@ def test_get_linux_photon_os_release(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_PHOTON m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("photon", "4.0", "VMware Photon OS/Linux"), dist) + assert ("photon", "4.0", "VMware Photon OS/Linux") == dist @mock.patch("cloudinit.util.load_text_file") def test_get_linux_mariner_os_release(self, m_os_release, m_path_exists): @@ -1285,7 +1272,7 @@ def test_get_linux_mariner_os_release(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_MARINER m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("mariner", "2.0", ""), dist) + assert ("mariner", "2.0", "") == dist @mock.patch("cloudinit.util.load_text_file") def test_get_linux_azurelinux_os_release( @@ -1295,7 +1282,7 @@ def test_get_linux_azurelinux_os_release( m_os_release.return_value = OS_RELEASE_AZURELINUX m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("azurelinux", "3.0", ""), dist) + assert ("azurelinux", "3.0", "") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_openmandriva(self, m_os_release, m_path_exists): @@ -1303,7 +1290,7 @@ def test_get_linux_openmandriva(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_OPENMANDRIVA m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("openmandriva", "4.90", "nickel"), dist) + assert ("openmandriva", "4.90", "nickel") == dist @mock.patch(M_PATH + "load_text_file") def test_get_linux_cos(self, m_os_release, m_path_exists): @@ -1311,7 +1298,7 @@ def test_get_linux_cos(self, m_os_release, m_path_exists): m_os_release.return_value = OS_RELEASE_COS m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists dist = util.get_linux_distro() - self.assertEqual(("cos", "93", ""), dist) + assert ("cos", "93", "") == dist @mock.patch("platform.system") @mock.patch("platform.dist", create=True) @@ -1323,7 +1310,7 @@ def test_get_linux_distro_no_data( m_platform_system.return_value = "Linux" m_path_exists.return_value = 0 dist = util.get_linux_distro() - self.assertEqual(("", "", ""), dist) + assert ("", "", "") == dist @mock.patch("platform.system") @mock.patch("platform.dist", create=True) @@ -1336,7 +1323,7 @@ def test_get_linux_distro_no_impl( m_platform_system.return_value = "Linux" m_path_exists.return_value = 0 dist = util.get_linux_distro() - self.assertEqual(("", "", ""), dist) + assert ("", "", "") == dist @mock.patch("platform.system") @mock.patch("platform.dist", create=True) @@ -1348,7 +1335,7 @@ def test_get_linux_distro_plat_data( m_platform_system.return_value = "Linux" m_path_exists.return_value = 0 dist = util.get_linux_distro() - self.assertEqual(("foo", "1.1", "aarch64"), dist) + assert ("foo", "1.1", "aarch64") == dist class TestGetVariant: @@ -1396,40 +1383,36 @@ def test_get_variant(self, info, expected_variant): assert util._get_variant(info) == expected_variant -class TestJsonDumps(CiTestCase): +class TestJsonDumps: def test_is_str(self): """json_dumps should return a string.""" - self.assertTrue( - isinstance(atomic_helper.json_dumps({"abc": "123"}), str) - ) + assert isinstance(atomic_helper.json_dumps({"abc": "123"}), str) def test_utf8(self): smiley = "\\ud83d\\ude03" - self.assertEqual( - {"smiley": smiley}, - json.loads(atomic_helper.json_dumps({"smiley": smiley})), + assert {"smiley": smiley} == json.loads( + atomic_helper.json_dumps({"smiley": smiley}) ) def test_non_utf8(self): blob = b"\xba\x03Qx-#y\xea" - self.assertEqual( - {"blob": "ci-b64:" + base64.b64encode(blob).decode("utf-8")}, - json.loads(atomic_helper.json_dumps({"blob": blob})), - ) + assert { + "blob": "ci-b64:" + base64.b64encode(blob).decode("utf-8") + } == json.loads(atomic_helper.json_dumps({"blob": blob})) @mock.patch("os.path.exists") -class TestIsLXD(CiTestCase): +class TestIsLXD: def test_is_lxd_true_on_sock_device(self, m_exists): """When lxd's /dev/lxd/sock exists, is_lxd returns true.""" m_exists.return_value = True - self.assertTrue(util.is_lxd()) + assert util.is_lxd() is True m_exists.assert_called_once_with("/dev/lxd/sock") def test_is_lxd_false_when_sock_device_absent(self, m_exists): """When lxd's /dev/lxd/sock is absent, is_lxd returns false.""" m_exists.return_value = False - self.assertFalse(util.is_lxd()) + assert not util.is_lxd() m_exists.assert_called_once_with("/dev/lxd/sock") @@ -1438,7 +1421,7 @@ class TestReadCcFromCmdline: "cmdline,expected_cfg", [ # Return None if cmdline has no cc:end_cc content. - pytest.param(CiTestCase.random_string(), None, id="random_string"), + pytest.param(random_string(), None, id="random_string"), # Return None if YAML content is empty string. ("foo cc: end_cc bar", None), # Return expected dictionary without trailing end_cc marker. @@ -1813,31 +1796,31 @@ def test_not_found_no_default(self): """None is returned if key is not found and no default given.""" config = {} result = util.get_cfg_option_list(config, "key") - self.assertIsNone(result) + assert result is None def test_not_found_with_default(self): """Default is returned if key is not found.""" config = {} result = util.get_cfg_option_list(config, "key", default=["DEFAULT"]) - self.assertEqual(["DEFAULT"], result) + assert ["DEFAULT"] == result def test_found_with_default(self): """Default is not returned if key is found.""" config = {"key": ["value1"]} result = util.get_cfg_option_list(config, "key", default=["DEFAULT"]) - self.assertEqual(["value1"], result) + assert ["value1"] == result def test_found_convert_to_list(self): """Single string is converted to one element list.""" config = {"key": "value1"} result = util.get_cfg_option_list(config, "key") - self.assertEqual(["value1"], result) + assert ["value1"] == result def test_value_is_none(self): """If value is None empty list is returned.""" config = {"key": None} result = util.get_cfg_option_list(config, "key") - self.assertEqual([], result) + assert [] == result class TestWriteFile(helpers.TestCase): @@ -1853,13 +1836,13 @@ def test_basic_usage(self): util.write_file(path, contents) - self.assertTrue(os.path.exists(path)) - self.assertTrue(os.path.isfile(path)) + assert os.path.exists(path) + assert os.path.isfile(path) with open(path) as f: create_contents = f.read() - self.assertEqual(contents, create_contents) + assert contents == create_contents file_stat = os.stat(path) - self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode)) + assert 0o644 == stat.S_IMODE(file_stat.st_mode) def test_dir_is_created_if_required(self): """Verifiy that directories are created is required.""" @@ -1869,8 +1852,8 @@ def test_dir_is_created_if_required(self): util.write_file(path, contents) - self.assertTrue(os.path.isdir(dirname)) - self.assertTrue(os.path.isfile(path)) + assert os.path.isdir(dirname) + assert os.path.isfile(path) def test_dir_ownership(self): """Verifiy that directories is created with appropriate ownership.""" @@ -1897,10 +1880,10 @@ def test_dir_is_not_created_if_ensure_dir_false(self): path = os.path.join(dirname, "NewFile.txt") contents = "Hey there" - with self.assertRaises(FileNotFoundError): + with pytest.raises(FileNotFoundError): util.write_file(path, contents, ensure_dir_exists=False) - self.assertFalse(os.path.isdir(dirname)) + assert not os.path.isdir(dirname) def test_explicit_mode(self): """Verify explicit file mode works properly.""" @@ -1909,10 +1892,10 @@ def test_explicit_mode(self): util.write_file(path, contents, mode=0o666) - self.assertTrue(os.path.exists(path)) - self.assertTrue(os.path.isfile(path)) + assert os.path.exists(path) + assert os.path.isfile(path) file_stat = os.stat(path) - self.assertEqual(0o666, stat.S_IMODE(file_stat.st_mode)) + assert 0o666 == stat.S_IMODE(file_stat.st_mode) def test_preserve_mode_no_existing(self): """Verify that file is created with mode 0o644 if preserve_mode @@ -1922,10 +1905,10 @@ def test_preserve_mode_no_existing(self): util.write_file(path, contents, preserve_mode=True) - self.assertTrue(os.path.exists(path)) - self.assertTrue(os.path.isfile(path)) + assert os.path.exists(path) + assert os.path.isfile(path) file_stat = os.stat(path) - self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode)) + assert 0o644 == stat.S_IMODE(file_stat.st_mode) def test_preserve_mode_with_existing(self): """Verify that file is created using mode of existing file @@ -1938,10 +1921,10 @@ def test_preserve_mode_with_existing(self): util.write_file(path, contents, preserve_mode=True) - self.assertTrue(os.path.exists(path)) - self.assertTrue(os.path.isfile(path)) + assert os.path.exists(path) + assert os.path.isfile(path) file_stat = os.stat(path) - self.assertEqual(0o666, stat.S_IMODE(file_stat.st_mode)) + assert 0o666 == stat.S_IMODE(file_stat.st_mode) def test_custom_omode(self): """Verify custom omode works properly.""" @@ -1953,11 +1936,11 @@ def test_custom_omode(self): f.write(b"LINE1\n") util.write_file(path, contents, omode="a") - self.assertTrue(os.path.exists(path)) - self.assertTrue(os.path.isfile(path)) + assert os.path.exists(path) + assert os.path.isfile(path) with open(path) as f: create_contents = f.read() - self.assertEqual("LINE1\nHey there", create_contents) + assert "LINE1\nHey there" == create_contents def test_restorecon_if_possible_is_called(self): """Make sure the selinux guard is called correctly.""" @@ -1971,10 +1954,10 @@ def test_restorecon_if_possible_is_called(self): importer, "import_module", return_value=fake_se ) as mockobj: with util.SeLinuxGuard(my_file) as is_on: - self.assertTrue(is_on) + assert is_on - self.assertEqual(1, len(fake_se.restored)) - self.assertEqual(my_file, fake_se.restored[0]) + assert 1 == len(fake_se.restored) + assert my_file == fake_se.restored[0] mockobj.assert_called_once_with("selinux") @@ -1986,13 +1969,13 @@ def setUp(self): self.addCleanup(shutil.rmtree, self.tmp) def assertDirEmpty(self, dirname): - self.assertEqual([], os.listdir(dirname)) + assert [] == os.listdir(dirname) def test_does_not_delete_dir(self): """Ensure directory itself is not deleted.""" util.delete_dir_contents(self.tmp) - self.assertTrue(os.path.isdir(self.tmp)) + assert os.path.isdir(self.tmp) self.assertDirEmpty(self.tmp) def test_deletes_files(self): @@ -2045,11 +2028,50 @@ def test_deletes_symlinks(self): self.assertDirEmpty(self.tmp) +class TestDelDir: + """ + Test the del_dir function + """ + + def test_del_dir_existing_directory(self, tmpdir): + """ + An existing directory can be deleted without issues + """ + assert os.path.exists(tmpdir) + with does_not_raise(): + util.del_dir(tmpdir) + assert not os.path.exists(tmpdir) + + def test_del_dir_file_not_found(self): + """ + Should not raise FileNotFoundError + """ + non_existing_dir = "/blabla" + assert not os.path.exists(non_existing_dir) + with does_not_raise(): + util.del_dir(non_existing_dir) + assert not os.path.exists(non_existing_dir) + + def test_del_dir_generic_errors(self, mocker): + """ + If shutil.rmtree raises a non-FileNotFoundError , del_dir should + raise this error + """ + mocked_side_effect = PermissionError + mock_rmtree = mocker.patch( + "shutil.rmtree", + side_effect=mocked_side_effect, + ) + with pytest.raises(mocked_side_effect): + util.del_dir("somedir") + assert mock_rmtree.call_count == 1 + + class TestKeyValStrings(helpers.TestCase): def test_keyval_str_to_dict(self): expected = {"1": "one", "2": "one+one", "ro": True} cmdline = "1=one ro 2=one+one" - self.assertEqual(expected, util.keyval_str_to_dict(cmdline)) + assert expected == util.keyval_str_to_dict(cmdline) class TestGetCmdline(helpers.TestCase): @@ -2058,7 +2080,7 @@ def test_cmdline_reads_debug_env(self): "os.environ", values={"DEBUG_PROC_CMDLINE": "abcd 123"} ): ret = util.get_cmdline() - self.assertEqual("abcd 123", ret) + assert "abcd 123" == ret class TestFipsEnabled: @@ -2086,57 +2108,49 @@ def fake_load_file(path): assert expected is util.fips_enabled() -class TestLoadYaml(helpers.CiTestCase): +class TestLoadYaml: mydefault = "7b03a8ebace993d806255121073fed52" - with_logs = True def test_simple(self): mydata = {"1": "one", "2": "two"} - self.assertEqual(util.load_yaml(yaml.dump(mydata)), mydata) + assert util.load_yaml(yaml.dump(mydata)) == mydata - def test_nonallowed_returns_default(self): + def test_nonallowed_returns_default(self, caplog): """Any unallowed types result in returning default; log the issue.""" # for now, anything not in the allowed list just returns the default. myyaml = yaml.dump({"1": "one"}) - self.assertEqual( - util.load_yaml( - blob=myyaml, default=self.mydefault, allowed=(str,) - ), - self.mydefault, + assert ( + util.load_yaml(blob=myyaml, default=self.mydefault, allowed=(str,)) + == self.mydefault ) regex = re.compile( r"Yaml load allows \(<(class|type) \'str\'>,\) root types, but" r" got dict" ) - self.assertTrue( - regex.search(self.logs.getvalue()), - msg="Missing expected yaml load error", - ) + assert regex.search(caplog.text), "Missing expected yaml load error" - def test_bogus_scan_error_returns_default(self): + def test_bogus_scan_error_returns_default(self, caplog): """On Yaml scan error, load_yaml returns the default and logs issue.""" badyaml = "1\n 2:" - self.assertEqual( - util.load_yaml(blob=badyaml, default=self.mydefault), - self.mydefault, + assert ( + util.load_yaml(blob=badyaml, default=self.mydefault) + == self.mydefault ) - self.assertIn( + assert ( "Failed loading yaml blob. Invalid format at line 2 column 3:" - ' "mapping values are not allowed here', - self.logs.getvalue(), + ' "mapping values are not allowed here' in caplog.text ) - def test_bogus_parse_error_returns_default(self): + def test_bogus_parse_error_returns_default(self, caplog): """On Yaml parse error, load_yaml returns default and logs issue.""" badyaml = "{}}" - self.assertEqual( - util.load_yaml(blob=badyaml, default=self.mydefault), - self.mydefault, + assert ( + util.load_yaml(blob=badyaml, default=self.mydefault) + == self.mydefault ) - self.assertIn( + assert ( "Failed loading yaml blob. Invalid format at line 1 column 3:" - " \"expected '', but found '}'", - self.logs.getvalue(), + " \"expected '', but found '}'" in caplog.text ) def test_unsafe_types(self): @@ -2148,27 +2162,24 @@ def test_unsafe_types(self): 3, ) ) - self.assertEqual( - util.load_yaml(blob=unsafe_yaml, default=self.mydefault), - self.mydefault, + assert ( + util.load_yaml(blob=unsafe_yaml, default=self.mydefault) + == self.mydefault ) def test_python_unicode(self): # complex type of python/unicode is explicitly allowed myobj = {"1": "FOOBAR"} safe_yaml = yaml.dump(myobj) - self.assertEqual( - util.load_yaml(blob=safe_yaml, default=self.mydefault), myobj - ) + assert util.load_yaml(blob=safe_yaml, default=self.mydefault) == myobj def test_none_returns_default(self): """If yaml.load returns None, then default should be returned.""" blobs = ("", " ", "# foo\n", "#") mdef = self.mydefault - self.assertEqual( - [(b, self.mydefault) for b in blobs], - [(b, util.load_yaml(blob=b, default=mdef)) for b in blobs], - ) + assert [(b, self.mydefault) for b in blobs] == [ + (b, util.load_yaml(blob=b, default=mdef)) for b in blobs + ] class TestMountinfoParsing: @@ -2264,106 +2275,91 @@ def test_parse_mount_with_zfs(self, mount_out): assert ("vmzroot/var/tmp", "zfs", "/var/tmp") == ret -class TestIsX86(helpers.CiTestCase): +class TestIsX86: def test_is_x86_matches_x86_types(self): """is_x86 returns True if CPU architecture matches.""" matched_arches = ["x86_64", "i386", "i586", "i686"] for arch in matched_arches: - self.assertTrue( - util.is_x86(arch), 'Expected is_x86 for arch "%s"' % arch - ) + assert util.is_x86(arch), 'Expected is_x86 for arch "%s"' % arch def test_is_x86_unmatched_types(self): """is_x86 returns Fale on non-intel x86 architectures.""" unmatched_arches = ["ia64", "9000/800", "arm64v71"] for arch in unmatched_arches: - self.assertFalse( - util.is_x86(arch), 'Expected not is_x86 for arch "%s"' % arch + assert not util.is_x86(arch), ( + 'Expected not is_x86 for arch "%s"' % arch ) @mock.patch(M_PATH + "os.uname") def test_is_x86_calls_uname_for_architecture(self, m_uname): """is_x86 returns True if platform from uname matches.""" m_uname.return_value = [0, 1, 2, 3, "x86_64"] - self.assertTrue(util.is_x86()) + assert util.is_x86() -class TestGetConfigLogfiles(helpers.CiTestCase): +class TestGetConfigLogfiles: def test_empty_cfg_returns_empty_list(self): """An empty config passed to get_config_logfiles returns empty list.""" - self.assertEqual([], util.get_config_logfiles(None)) - self.assertEqual([], util.get_config_logfiles({})) + assert [] == util.get_config_logfiles(None) + assert [] == util.get_config_logfiles({}) def test_default_log_file_present(self): """When default_log_file is set get_config_logfiles finds it.""" - self.assertEqual( - ["/my.log"], util.get_config_logfiles({"def_log_file": "/my.log"}) + assert ["/my.log"] == util.get_config_logfiles( + {"def_log_file": "/my.log"} ) def test_output_logs_parsed_when_teeing_files(self): """When output configuration is parsed when teeing files.""" - self.assertEqual( - ["/himom.log", "/my.log"], - sorted( - util.get_config_logfiles( - { - "def_log_file": "/my.log", - "output": {"all": "|tee -a /himom.log"}, - } - ) - ), + assert ["/himom.log", "/my.log"] == sorted( + util.get_config_logfiles( + { + "def_log_file": "/my.log", + "output": {"all": "|tee -a /himom.log"}, + } + ) ) def test_output_logs_parsed_when_redirecting(self): """When output configuration is parsed when redirecting to a file.""" - self.assertEqual( - ["/my.log", "/test.log"], - sorted( - util.get_config_logfiles( - { - "def_log_file": "/my.log", - "output": {"all": ">/test.log"}, - } - ) - ), + assert ["/my.log", "/test.log"] == sorted( + util.get_config_logfiles( + { + "def_log_file": "/my.log", + "output": {"all": ">/test.log"}, + } + ) ) def test_output_logs_parsed_when_appending(self): """When output configuration is parsed when appending to a file.""" - self.assertEqual( - ["/my.log", "/test.log"], - sorted( - util.get_config_logfiles( - { - "def_log_file": "/my.log", - "output": {"all": ">> /test.log"}, - } - ) - ), + assert ["/my.log", "/test.log"] == sorted( + util.get_config_logfiles( + { + "def_log_file": "/my.log", + "output": {"all": ">> /test.log"}, + } + ) ) - def test_output_logs_parsed_when_teeing_files_and_rotated(self): + def test_output_logs_parsed_when_teeing_files_and_rotated(self, tmp_path): """When output configuration is parsed when teeing files and rotated log files are present.""" - tmpd = self.tmp_dir() - log1 = self.tmp_path("my.log", tmpd) - log1_rotated = self.tmp_path("my.log.1.gz", tmpd) - log2 = self.tmp_path("himom.log", tmpd) - log2_rotated = self.tmp_path("himom.log.1.gz", tmpd) + log1 = str(tmp_path / "my.log") + log1_rotated = str(tmp_path / "my.log.1.gz") + log2 = str(tmp_path / "himom.log") + log2_rotated = str(tmp_path / "himom.log.1.gz") util.write_file(log1_rotated, "hello") util.write_file(log2_rotated, "hello") - self.assertEqual( - [log2, log2_rotated, log1, log1_rotated], - sorted( - util.get_config_logfiles( - { - "def_log_file": str(log1), - "output": {"all": f"|tee -a {log2}"}, - } - ) - ), + assert [log2, log2_rotated, log1, log1_rotated] == sorted( + util.get_config_logfiles( + { + "def_log_file": str(log1), + "output": {"all": f"|tee -a {log2}"}, + } + ) ) @@ -2461,7 +2457,7 @@ def test_given_log_level_used(self): class TestMessageFromString(helpers.TestCase): def test_unicode_not_messed_up(self): roundtripped = util.message_from_string("\n").as_string() - self.assertNotIn("\x00", roundtripped) + assert "\x00" not in roundtripped class TestReadOptionalSeed: @@ -2658,10 +2654,10 @@ def test_unicode_not_messed_up(self): sdir = self.tmp + os.path.sep found_md, found_ud, found_vd, found_network = util.read_seeded(sdir) - self.assertEqual(found_md, {"key1": "val1"}) - self.assertEqual(found_ud, ud) - self.assertEqual(found_vd, vd) - self.assertIsNone(found_network) + assert found_md == {"key1": "val1"} + assert found_ud == ud + assert found_vd == vd + assert found_network is None class TestEncode(helpers.TestCase): @@ -2670,7 +2666,7 @@ class TestEncode(helpers.TestCase): def test_decode_binary_plain_text_with_hex(self): blob = "BOOTABLE_FLAG=\x80init=/bin/systemd" text = util.decode_binary(blob) - self.assertEqual(text, blob) + assert text == blob class TestProcessExecutionError(helpers.TestCase): @@ -2689,37 +2685,30 @@ def test_pexec_error_indent_text(self): error = subp.ProcessExecutionError() msg = "abc\ndef" formatted = "abc\n{0}def".format(" " * 4) - self.assertEqual(error._indent_text(msg, indent_level=4), formatted) - self.assertEqual( - error._indent_text(msg.encode(), indent_level=4), - formatted.encode(), - ) - self.assertIsInstance( - error._indent_text(msg.encode()), type(msg.encode()) + assert error._indent_text(msg, indent_level=4) == formatted + assert ( + error._indent_text(msg.encode(), indent_level=4) + == formatted.encode() ) + assert isinstance(error._indent_text(msg.encode()), type(msg.encode())) def test_pexec_error_type(self): - self.assertIsInstance(subp.ProcessExecutionError(), IOError) + assert isinstance(subp.ProcessExecutionError(), IOError) def test_pexec_error_empty_msgs(self): error = subp.ProcessExecutionError() - self.assertTrue( - all( - attr == self.empty_attr - for attr in (error.stderr, error.stdout, error.reason) - ) + assert all( + attr == self.empty_attr + for attr in (error.stderr, error.stdout, error.reason) ) - self.assertEqual(error.description, self.empty_description) - self.assertEqual( - str(error), - self.template.format( - description=self.empty_description, - exit_code=self.empty_attr, - reason=self.empty_attr, - stdout=self.empty_attr, - stderr=self.empty_attr, - cmd=self.empty_attr, - ), + assert error.description == self.empty_description + assert str(error) == self.template.format( + description=self.empty_description, + exit_code=self.empty_attr, + reason=self.empty_attr, + stdout=self.empty_attr, + stderr=self.empty_attr, + cmd=self.empty_attr, ) def test_pexec_error_single_line_msgs(self): @@ -2730,16 +2719,13 @@ def test_pexec_error_single_line_msgs(self): error = subp.ProcessExecutionError( stdout=stdout_msg, stderr=stderr_msg, exit_code=3, cmd=cmd ) - self.assertEqual( - str(error), - self.template.format( - description=self.empty_description, - stdout=stdout_msg, - stderr=stderr_msg, - exit_code=str(exit_code), - reason=self.empty_attr, - cmd=cmd, - ), + assert str(error) == self.template.format( + description=self.empty_description, + stdout=stdout_msg, + stderr=stderr_msg, + exit_code=str(exit_code), + reason=self.empty_attr, + cmd=cmd, ) def test_pexec_error_multi_line_msgs(self): @@ -2749,24 +2735,21 @@ def test_pexec_error_multi_line_msgs(self): error = subp.ProcessExecutionError( stdout=stdout_msg, stderr=stderr_msg ) - self.assertEqual( - str(error), - "\n".join( - ( - "{description}", - "Command: {empty_attr}", - "Exit code: {empty_attr}", - "Reason: {empty_attr}", - "Stdout: multi", - " line", - " output message", - "Stderr: multi", - " line", - " error message", - ) - ).format( - description=self.empty_description, empty_attr=self.empty_attr - ), + assert str(error) == "\n".join( + ( + "{description}", + "Command: {empty_attr}", + "Exit code: {empty_attr}", + "Reason: {empty_attr}", + "Stdout: multi", + " line", + " output message", + "Stderr: multi", + " line", + " error message", + ) + ).format( + description=self.empty_description, empty_attr=self.empty_attr ) @@ -2830,20 +2813,21 @@ def test_system_image_config_dir_is_snappy(self, mocker): class TestLoadShellContent(helpers.TestCase): def test_comments_handled_correctly(self): """Shell comments should be allowed in the content.""" - self.assertEqual( - {"key1": "val1", "key2": "val2", "key3": "val3 #tricky"}, - util.load_shell_content( - "\n".join( - [ - "#top of file comment", - "key1=val1 #this is a comment", - "# second comment", - 'key2="val2" # inlin comment#badkey=wark', - 'key3="val3 #tricky"', - "", - ] - ) - ), + assert { + "key1": "val1", + "key2": "val2", + "key3": "val3 #tricky", + } == util.load_shell_content( + "\n".join( + [ + "#top of file comment", + "key1=val1 #this is a comment", + "# second comment", + 'key2="val2" # inlin comment#badkey=wark', + 'key3="val3 #tricky"', + "", + ] + ) ) @@ -2871,34 +2855,29 @@ def test_non_utf8_in_environment(self, m_load_file): ) m_load_file.return_value = content - self.assertEqual( - { - "BOOTABLE_FLAG": self._val_decoded(self.bootflag), - "HOME": "/", - "PATH": "/bin:/sbin", - "MIXED": self._val_decoded(self.mixed), - }, - util.get_proc_env(1), - ) - self.assertEqual(1, m_load_file.call_count) + assert { + "BOOTABLE_FLAG": self._val_decoded(self.bootflag), + "HOME": "/", + "PATH": "/bin:/sbin", + "MIXED": self._val_decoded(self.mixed), + } == util.get_proc_env(1) + assert 1 == m_load_file.call_count @mock.patch(M_PATH + "load_binary_file") def test_all_utf8_encoded(self, m_load_file): """common path where only utf-8 decodable content.""" content = self.null.join((self.simple1, self.simple2)) m_load_file.return_value = content - self.assertEqual( - {"HOME": "/", "PATH": "/bin:/sbin"}, util.get_proc_env(1) - ) - self.assertEqual(1, m_load_file.call_count) + assert {"HOME": "/", "PATH": "/bin:/sbin"} == util.get_proc_env(1) + assert 1 == m_load_file.call_count @mock.patch(M_PATH + "load_binary_file") def test_non_existing_file_returns_empty_dict(self, m_load_file): """as implemented, a non-existing pid returns empty dict. This is how it was originally implemented.""" m_load_file.side_effect = OSError("File does not exist.") - self.assertEqual({}, util.get_proc_env(1)) - self.assertEqual(1, m_load_file.call_count) + assert {} == util.get_proc_env(1) + assert 1 == m_load_file.call_count class TestGetProcPpid(helpers.TestCase): @@ -2909,19 +2888,19 @@ def test_get_proc_ppid_linux(self): """get_proc_ppid returns correct parent pid value.""" my_pid = os.getpid() my_ppid = os.getppid() - self.assertEqual(my_ppid, Distro.get_proc_ppid(my_pid)) + assert my_ppid == Distro.get_proc_ppid(my_pid) @skipIf(not util.is_Linux(), "/proc/$pid/stat is not useful on not-Linux") def test_get_proc_pgrp_linux(self): """get_proc_ppid returns correct parent pid value.""" - self.assertEqual(os.getpgid(0), Distro.get_proc_pgid(os.getpid())) + assert os.getpgid(0) == Distro.get_proc_pgid(os.getpid()) @pytest.mark.allow_subp_for("ps") def test_get_proc_ppid_ps(self): """get_proc_ppid returns correct parent pid value.""" my_pid = os.getpid() my_ppid = os.getppid() - self.assertEqual(my_ppid, Distro.get_proc_ppid(my_pid)) + assert my_ppid == Distro.get_proc_ppid(my_pid) def test_get_proc_ppid_mocked(self): for ppid, proc_data in ( diff --git a/tests/unittests/test_version.py b/tests/unittests/test_version.py index 52a8ae33..f4e616ad 100644 --- a/tests/unittests/test_version.py +++ b/tests/unittests/test_version.py @@ -3,27 +3,26 @@ from unittest import mock from cloudinit import version -from tests.unittests.helpers import CiTestCase -class TestExportsFeatures(CiTestCase): +class TestExportsFeatures: def test_has_network_config_v1(self): - self.assertIn("NETWORK_CONFIG_V1", version.FEATURES) + assert "NETWORK_CONFIG_V1" in version.FEATURES def test_has_network_config_v2(self): - self.assertIn("NETWORK_CONFIG_V2", version.FEATURES) + assert "NETWORK_CONFIG_V2" in version.FEATURES -class TestVersionString(CiTestCase): +class TestVersionString: @mock.patch( "cloudinit.version._PACKAGED_VERSION", "17.2-3-gb05b9972-0ubuntu1" ) def test_package_version_respected(self): """If _PACKAGED_VERSION is filled in, then it should be returned.""" - self.assertEqual("17.2-3-gb05b9972-0ubuntu1", version.version_string()) + assert "17.2-3-gb05b9972-0ubuntu1" == version.version_string() @mock.patch("cloudinit.version._PACKAGED_VERSION", "@@PACKAGED_VERSION@@") @mock.patch("cloudinit.version.__VERSION__", "17.2") def test_package_version_skipped(self): """If _PACKAGED_VERSION is not modified, then return __VERSION__.""" - self.assertEqual("17.2", version.version_string()) + assert "17.2" == version.version_string() diff --git a/tools/cloud-init-hotplugd b/tools/cloud-init-hotplugd index 3d56fffa..eb811d69 100755 --- a/tools/cloud-init-hotplugd +++ b/tools/cloud-init-hotplugd @@ -11,13 +11,12 @@ PIPE="/run/cloud-init/share/hook-hotplug-cmd" -mkfifo -m700 $PIPE +[ -p $PIPE ] || mkfifo -m700 $PIPE while true; do # shellcheck disable=SC2162 - read args < $PIPE - # shellcheck disable=SC2086 - exec /usr/bin/cloud-init devel hotplug-hook $args + if read args < $PIPE; then + # shellcheck disable=SC2086 + /usr/bin/cloud-init devel hotplug-hook $args + fi done - -exit diff --git a/tools/read-dependencies b/tools/read-dependencies index 934c88aa..8f58908d 100755 --- a/tools/read-dependencies +++ b/tools/read-dependencies @@ -26,7 +26,7 @@ DISTRO_PKG_TYPE_MAP = { "centos": "redhat", "eurolinux": "redhat", "miraclelinux": "redhat", - "fedora": "redhat", + "fedora": "fedora", "rocky": "redhat", "redhat": "redhat", "debian": "debian", @@ -67,13 +67,13 @@ MAYBE_RELIABLE_YUM_INSTALL = [ grep -q baseurl $CENTOS_REPO if [ $? -eq 1 ]; then # CentOS 9 does not provide baseurl definitions - sed -i '/\[baseos\]/a baseurl=https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os' ${CENTOS_REPO} - sed -i '/\[appstream\]/a baseurl=https://mirror.stream.centos.org/9-stream/AppStream/x86_64/os' ${CENTOS_REPO} - sed -i '/\[crb\]/a baseurl=https://mirror.stream.centos.org/9-stream/CRB/x86_64/os' ${CENTOS_REPO} + sed -i '/\\[baseos\\]/a baseurl=https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os' ${CENTOS_REPO} + sed -i '/\\[appstream\\]/a baseurl=https://mirror.stream.centos.org/9-stream/AppStream/x86_64/os' ${CENTOS_REPO} + sed -i '/\\[crb\\]/a baseurl=https://mirror.stream.centos.org/9-stream/CRB/x86_64/os' ${CENTOS_REPO} fi CENTOS_EXTRAS_REPO="/etc/yum.repos.d/centos-addons.repo" if [ -f $CENTOS_EXTRAS_REPO ]; then - sed -i '/\[extras-common\]/a baseurl=https://mirror.stream.centos.org/SIGs/9-stream/extras/x86_64/extras-common' ${CENTOS_EXTRAS_REPO} + sed -i '/\\[extras-common\\]/a baseurl=https://mirror.stream.centos.org/SIGs/9-stream/extras/x86_64/extras-common' ${CENTOS_EXTRAS_REPO} fi fi } @@ -107,14 +107,22 @@ ZYPPER_INSTALL = [ DRYRUN_DISTRO_INSTALL_PKG_CMD = { "redhat": ["yum", "install", "--assumeyes"], + "fedora": ["yum", "install", "--assumeyes"], } DISTRO_INSTALL_PKG_CMD = { "redhat": MAYBE_RELIABLE_YUM_INSTALL, + "fedora": MAYBE_RELIABLE_YUM_INSTALL, "debian": ["apt", "install", "-y"], "suse": ZYPPER_INSTALL, } +DISTRO_UPDATE_PKG_CMD = { + "redhat": ["yum", "update"], + "debian": ["apt", "update", "-q"], + "suse": ["zypper", "update"], +} + # List of base system packages required to enable ci automation CI_SYSTEM_BASE_PKGS = { "common": ["make", "sudo", "tar"], @@ -347,19 +355,27 @@ def pkg_install(pkg_list, distro, test_distro=False, dry_run=False): "(dryrun)" if dry_run else "", " ".join(pkg_list) ) ) + distro_family = DISTRO_PKG_TYPE_MAP[distro] install_cmd = [] + update_cmd = DISTRO_UPDATE_PKG_CMD.get(distro_family, []) if dry_run: install_cmd.append("echo") + if update_cmd: + update_cmd.insert(0, "echo") if os.geteuid() != 0: install_cmd.append("sudo") + if update_cmd: + update_cmd.append("sudo") - distro_family = DISTRO_PKG_TYPE_MAP[distro] if dry_run and distro_family in DRYRUN_DISTRO_INSTALL_PKG_CMD: cmd = DRYRUN_DISTRO_INSTALL_PKG_CMD[distro_family] else: cmd = DISTRO_INSTALL_PKG_CMD[distro_family] install_cmd.extend(cmd) + if update_cmd: + subprocess.check_call(update_cmd) + if distro in ["centos", "redhat", "rocky", "eurolinux"]: # CentOS and Redhat need epel-release to access oauthlib and jsonschema subprocess.check_call(install_cmd + ["epel-release"]) @@ -371,15 +387,7 @@ def pkg_install(pkg_list, distro, test_distro=False, dry_run=False): subprocess.call( ["dnf", "config-manager", "--set-disabled", "epel-cisco-openh264"] ) - if distro in [ - "suse", - "opensuse", - "fedora", - "redhat", - "rocky", - "centos", - "eurolinux", - ]: + if DISTRO_PKG_TYPE_MAP.get(distro) in ["redhat", "suse", "fedora"]: pkg_list.append("rpm-build") subprocess.check_call(install_cmd + pkg_list) diff --git a/tools/run-container b/tools/run-container index 5c5d3577..5a14140a 100755 --- a/tools/run-container +++ b/tools/run-container @@ -24,7 +24,7 @@ Usage: ${0##*/} [ options ] [images:]image-ref To see images available, run 'lxc image list images:' Example input: - centos/7 + rockylinux/9 opensuse/15.4 debian/10 diff --git a/tools/test_tools.py b/tools/test_tools.py index e744e1d1..694aed61 100644 --- a/tools/test_tools.py +++ b/tools/test_tools.py @@ -7,7 +7,7 @@ import setuptools -from setup_utils import version_to_pep440 +from setup_utils import version_to_pep440 # pylint: disable=import-error try: validate_version = setuptools.dist.Distribution._validate_version # type: ignore # noqa: E501 diff --git a/tools/tox-venv b/tools/tox-venv index e16f129d..c22f6fac 100755 --- a/tools/tox-venv +++ b/tools/tox-venv @@ -22,7 +22,7 @@ get_env_dirs() { key=${key%%=*} val=${equal} fi - [ "$key" = "envdir" ] || continue + [ "$key" = "envdir" ] || [ "$key" = "env_dir" ] || continue out="${out:+${out}${CR}}${curenv}:$val" done echo "$out" diff --git a/tools/validate-yaml.py b/tools/validate-yaml.py index fa90edf1..2219e657 100755 --- a/tools/validate-yaml.py +++ b/tools/validate-yaml.py @@ -1,7 +1,6 @@ #!/usr/bin/env python3 -"""Try to read a YAML file and report any errors. -""" +"""Try to read a YAML file and report any errors.""" import sys import yaml diff --git a/tox.ini b/tox.ini index e7f3f6f1..7549e79f 100644 --- a/tox.ini +++ b/tox.ini @@ -39,13 +39,13 @@ deps = [pinned_versions] deps = {[types]deps} - black==24.8.0 + black==25.1.0 hypothesis==6.111.0 hypothesis_jsonschema==0.23.1 - isort==5.13.2 - mypy==1.11.1 - pylint==3.2.6 - ruff==0.6.4 + isort==6.0.1 + mypy==1.17.1 + pylint==3.3.8 + ruff==0.12.9 [latest_versions] deps = @@ -69,8 +69,11 @@ deps = {[pinned_versions]deps} commands = {envpython} -m ruff check {posargs:.} [testenv:pylint] -deps = {[pinned_versions]deps} -commands = {envpython} -m pylint {posargs:.} +deps = + -r{toxinidir}/integration-requirements.txt + {[pinned_versions]deps} + {[testenv]deps} +commands = {envpython} -m pylint {posargs:cloudinit/ tests/ tools/} [testenv:black] deps = {[pinned_versions]deps} @@ -94,7 +97,7 @@ deps = {[pinned_versions]deps} commands = {envpython} -m ruff check {posargs:.} - {envpython} -m pylint {posargs:.} + {envpython} -m pylint {posargs:cloudinit/ tests/ tools/} {envpython} -m black --check {posargs:.} {envpython} -m isort --check-only --diff {posargs:.} {envpython} -m mypy {posargs:cloudinit/ tests/ tools/} @@ -227,8 +230,9 @@ commands = {envpython} -m mypy {posargs:cloudinit/ tests/ tools/} [testenv:tip-pylint] deps = {[latest_versions]deps} + -r{toxinidir}/integration-requirements.txt {[testenv]deps} -commands = {envpython} -m pylint {posargs:.} +commands = {envpython} -m pylint {posargs:cloudinit/ tests/ tools/} [testenv:tip-black] deps = {[latest_versions]deps}