diff --git a/.gitignore b/.gitignore index 054bcbfc..5f72c41b 100644 --- a/.gitignore +++ b/.gitignore @@ -27,38 +27,26 @@ wheels/ # Log files *.log -# Eclipse / PyCharm files +# Eclipse / PyCharm files / VSCode files .project .pydevproject .settings/ .idea/ - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / static analysis / coverage reports -htmlcov/ -coverage_html_report/ -.tox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -.hypothesis/ -flake8-output.txt -shellcheck-output.xml +.vscode/launch.json +.vscode/settings.json # Environments +.env +.env.local +.env.*.local .venv env/ venv/ ENV/ -# Configuration files -# config.ini +# Dashboard UI Files +/portal/ui/.angular +/portal/ui/node_modules # Mac files .DS_Store diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..92044114 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,32 @@ +--- +default_stages: [commit] +default_language_version: + python: python3.11 +repos: + - repo: local + hooks: + - id: flake8 + name: flake8 - code lint and style checks + entry: flake8 + language: python + types: [python] + args: [--config, pre-commit/.flake8] + - id: isort + name: isort - import sorting + entry: isort + language: python + types: [python] + args: [--settings-path, pre-commit/pyproject.toml] + - id: black + name: black - check formatting (show diff on FAIL) + entry: black + language: python + types: [python] + args: [--config, pre-commit/pyproject.toml, --check, --diff, --color, --quiet] + - id: black + name: black - auto-format code on FAIL + entry: black + language: python + types: [python] + args: [--config, pre-commit/pyproject.toml] + diff --git a/.zuul.yaml b/.zuul.yaml index aa634d46..3c062079 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -7,15 +7,11 @@ - build-openstack-releasenotes: vars: sphinx_python: python3 - - openstack-tox-pep8 - - openstack-tox-pylint gate: jobs: - build-openstack-releasenotes: vars: sphinx_python: python3 - - openstack-tox-pep8 - - openstack-tox-pylint post: jobs: - stx-test-upload-git-mirror diff --git a/DB_SETUP.txt b/DB_SETUP.txt new file mode 100644 index 00000000..74f5253a --- /dev/null +++ b/DB_SETUP.txt @@ -0,0 +1,17 @@ +These instructions are for setting up the DB for the test automation results dashboard + +1) Install postgres 16.2 database +2) Install pgadmin4 version 8.4 +3) Right click on 'Servers' then select Create -> Server Groups. Create a new Server group called 'Automation' +4) Expand Automation folder and right click the Login/Group Roles folder and select Create -> Login/Group Role + - Name user automation_user + - From Privileges tab, enable all permissions +5) From pgadmin4, connect to the postgres db + - From Databases folder, Right click -> Create -> Database with name automation_central + - Right click on created db (automation_central) and select Restore + - Set format to Custom or tar + - Select the /framework/resource/db/automation_central_backup.sql file + - Select automation_user for Role name + - From Data Options -> Select Pre-data, Data, and Post-data + - Click Restore button +4) -- config steps once created go here -- \ No newline at end of file diff --git a/Pipfile b/Pipfile new file mode 100644 index 00000000..506e4bbe --- /dev/null +++ b/Pipfile @@ -0,0 +1,22 @@ +[[source]] +name = "pypi" +url = "https://pypi.org/simple" + +[requires] +python_version = ">=3.11" + +[packages] +# Linting and Static Analysis +pre-commit = "==3.7.0" +black = "==24.3.0" +isort = "==5.13.2" +flake8 = "==7.0.0" +# Tools Packages +pytest = "==8.1.1" +paramiko = "==3.4.0" +json5 = "==0.9.24" +selenium = "==4.20.0" +django = "==5.0.6" +psycopg2-binary = "==2.9.9" +jinja2 = "*" +requests = "*" diff --git a/Pipfile.lock b/Pipfile.lock new file mode 100644 index 00000000..161aa9a3 --- /dev/null +++ b/Pipfile.lock @@ -0,0 +1,868 @@ +{ + "_meta": { + "hash": { + "sha256": "ea9321978b73f4a63700306112ba707e262be757bf530165e75602d54804e979" + }, + "pipfile-spec": 6, + "requires": { + "python_version": ">=3.11" + }, + "sources": [ + { + "name": "pypi", + "url": "https://pypi.org/simple", + "verify_ssl": true + } + ] + }, + "default": { + "asgiref": { + "hashes": [ + "sha256:3e1e3ecc849832fe52ccf2cb6686b7a55f82bb1d6aee72a58826471390335e47", + "sha256:c343bd80a0bec947a9860adb4c432ffa7db769836c64238fc34bdc3fec84d590" + ], + "markers": "python_version >= '3.8'", + "version": "==3.8.1" + }, + "attrs": { + "hashes": [ + "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346", + "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2" + ], + "markers": "python_version >= '3.7'", + "version": "==24.2.0" + }, + "bcrypt": { + "hashes": [ + "sha256:096a15d26ed6ce37a14c1ac1e48119660f21b24cba457f160a4b830f3fe6b5cb", + "sha256:0da52759f7f30e83f1e30a888d9163a81353ef224d82dc58eb5bb52efcabc399", + "sha256:1bb429fedbe0249465cdd85a58e8376f31bb315e484f16e68ca4c786dcc04291", + "sha256:1d84cf6d877918620b687b8fd1bf7781d11e8a0998f576c7aa939776b512b98d", + "sha256:1ee38e858bf5d0287c39b7a1fc59eec64bbf880c7d504d3a06a96c16e14058e7", + "sha256:1ff39b78a52cf03fdf902635e4c81e544714861ba3f0efc56558979dd4f09170", + "sha256:27fe0f57bb5573104b5a6de5e4153c60814c711b29364c10a75a54bb6d7ff48d", + "sha256:3413bd60460f76097ee2e0a493ccebe4a7601918219c02f503984f0a7ee0aebe", + "sha256:3698393a1b1f1fd5714524193849d0c6d524d33523acca37cd28f02899285060", + "sha256:373db9abe198e8e2c70d12b479464e0d5092cc122b20ec504097b5f2297ed184", + "sha256:39e1d30c7233cfc54f5c3f2c825156fe044efdd3e0b9d309512cc514a263ec2a", + "sha256:3bbbfb2734f0e4f37c5136130405332640a1e46e6b23e000eeff2ba8d005da68", + "sha256:3d3a6d28cb2305b43feac298774b997e372e56c7c7afd90a12b3dc49b189151c", + "sha256:5a1e8aa9b28ae28020a3ac4b053117fb51c57a010b9f969603ed885f23841458", + "sha256:61ed14326ee023917ecd093ee6ef422a72f3aec6f07e21ea5f10622b735538a9", + "sha256:655ea221910bcac76ea08aaa76df427ef8625f92e55a8ee44fbf7753dbabb328", + "sha256:762a2c5fb35f89606a9fde5e51392dad0cd1ab7ae64149a8b935fe8d79dd5ed7", + "sha256:77800b7147c9dc905db1cba26abe31e504d8247ac73580b4aa179f98e6608f34", + "sha256:8ac68872c82f1add6a20bd489870c71b00ebacd2e9134a8aa3f98a0052ab4b0e", + "sha256:8d7bb9c42801035e61c109c345a28ed7e84426ae4865511eb82e913df18f58c2", + "sha256:8f6ede91359e5df88d1f5c1ef47428a4420136f3ce97763e31b86dd8280fbdf5", + "sha256:9c1c4ad86351339c5f320ca372dfba6cb6beb25e8efc659bedd918d921956bae", + "sha256:c02d944ca89d9b1922ceb8a46460dd17df1ba37ab66feac4870f6862a1533c00", + "sha256:c52aac18ea1f4a4f65963ea4f9530c306b56ccd0c6f8c8da0c06976e34a6e841", + "sha256:cb2a8ec2bc07d3553ccebf0746bbf3d19426d1c6d1adbd4fa48925f66af7b9e8", + "sha256:cf69eaf5185fd58f268f805b505ce31f9b9fc2d64b376642164e9244540c1221", + "sha256:f4f4acf526fcd1c34e7ce851147deedd4e26e6402369304220250598b26448db" + ], + "markers": "python_version >= '3.7'", + "version": "==4.2.0" + }, + "black": { + "hashes": [ + "sha256:2818cf72dfd5d289e48f37ccfa08b460bf469e67fb7c4abb07edc2e9f16fb63f", + "sha256:41622020d7120e01d377f74249e677039d20e6344ff5851de8a10f11f513bf93", + "sha256:4acf672def7eb1725f41f38bf6bf425c8237248bb0804faa3965c036f7672d11", + "sha256:4be5bb28e090456adfc1255e03967fb67ca846a03be7aadf6249096100ee32d0", + "sha256:4f1373a7808a8f135b774039f61d59e4be7eb56b2513d3d2f02a8b9365b8a8a9", + "sha256:56f52cfbd3dabe2798d76dbdd299faa046a901041faf2cf33288bc4e6dae57b5", + "sha256:65b76c275e4c1c5ce6e9870911384bff5ca31ab63d19c76811cb1fb162678213", + "sha256:65c02e4ea2ae09d16314d30912a58ada9a5c4fdfedf9512d23326128ac08ac3d", + "sha256:6905238a754ceb7788a73f02b45637d820b2f5478b20fec82ea865e4f5d4d9f7", + "sha256:79dcf34b33e38ed1b17434693763301d7ccbd1c5860674a8f871bd15139e7837", + "sha256:7bb041dca0d784697af4646d3b62ba4a6b028276ae878e53f6b4f74ddd6db99f", + "sha256:7d5e026f8da0322b5662fa7a8e752b3fa2dac1c1cbc213c3d7ff9bdd0ab12395", + "sha256:9f50ea1132e2189d8dff0115ab75b65590a3e97de1e143795adb4ce317934995", + "sha256:a0c9c4a0771afc6919578cec71ce82a3e31e054904e7197deacbc9382671c41f", + "sha256:aadf7a02d947936ee418777e0247ea114f78aff0d0959461057cae8a04f20597", + "sha256:b5991d523eee14756f3c8d5df5231550ae8993e2286b8014e2fdea7156ed0959", + "sha256:bf21b7b230718a5f08bd32d5e4f1db7fc8788345c8aea1d155fc17852b3410f5", + "sha256:c45f8dff244b3c431b36e3224b6be4a127c6aca780853574c00faf99258041eb", + "sha256:c7ed6668cbbfcd231fa0dc1b137d3e40c04c7f786e626b405c62bcd5db5857e4", + "sha256:d7de8d330763c66663661a1ffd432274a2f92f07feeddd89ffd085b5744f85e7", + "sha256:e19cb1c6365fd6dc38a6eae2dcb691d7d83935c10215aef8e6c38edee3f77abd", + "sha256:e2af80566f43c85f5797365077fb64a393861a3730bd110971ab7a0c94e873e7" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==24.3.0" + }, + "certifi": { + "hashes": [ + "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8", + "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9" + ], + "markers": "python_version >= '3.6'", + "version": "==2024.8.30" + }, + "cffi": { + "hashes": [ + "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8", + "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", + "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1", + "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15", + "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", + "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", + "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8", + "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36", + "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17", + "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf", + "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc", + "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", + "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", + "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702", + "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1", + "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", + "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", + "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6", + "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d", + "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b", + "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e", + "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be", + "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c", + "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", + "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", + "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", + "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8", + "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1", + "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", + "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655", + "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67", + "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595", + "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0", + "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", + "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41", + "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6", + "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401", + "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6", + "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3", + "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16", + "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", + "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e", + "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", + "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964", + "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c", + "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576", + "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0", + "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3", + "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662", + "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", + "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", + "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", + "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", + "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f", + "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", + "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14", + "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", + "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9", + "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7", + "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382", + "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a", + "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", + "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", + "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4", + "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", + "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87", + "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b" + ], + "markers": "platform_python_implementation != 'PyPy'", + "version": "==1.17.1" + }, + "cfgv": { + "hashes": [ + "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9", + "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560" + ], + "markers": "python_version >= '3.8'", + "version": "==3.4.0" + }, + "charset-normalizer": { + "hashes": [ + "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621", + "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6", + "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8", + "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912", + "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c", + "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b", + "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d", + "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d", + "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95", + "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e", + "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565", + "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64", + "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab", + "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be", + "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e", + "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907", + "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0", + "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2", + "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62", + "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62", + "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23", + "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc", + "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284", + "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca", + "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455", + "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858", + "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b", + "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594", + "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc", + "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db", + "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b", + "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea", + "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6", + "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920", + "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749", + "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7", + "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd", + "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99", + "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242", + "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee", + "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129", + "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2", + "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51", + "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee", + "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8", + "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b", + "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613", + "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742", + "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe", + "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3", + "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5", + "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631", + "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7", + "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15", + "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c", + "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea", + "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417", + "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250", + "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88", + "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca", + "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa", + "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99", + "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149", + "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41", + "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574", + "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0", + "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f", + "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d", + "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654", + "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3", + "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19", + "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90", + "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578", + "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9", + "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1", + "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51", + "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719", + "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236", + "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a", + "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c", + "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade", + "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944", + "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc", + "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6", + "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6", + "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27", + "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6", + "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2", + "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12", + "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf", + "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114", + "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7", + "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf", + "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d", + "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b", + "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed", + "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03", + "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4", + "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67", + "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365", + "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a", + "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748", + "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b", + "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079", + "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482" + ], + "markers": "python_full_version >= '3.7.0'", + "version": "==3.4.0" + }, + "click": { + "hashes": [ + "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28", + "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de" + ], + "markers": "python_version >= '3.7'", + "version": "==8.1.7" + }, + "cryptography": { + "hashes": [ + "sha256:0c580952eef9bf68c4747774cde7ec1d85a6e61de97281f2dba83c7d2c806362", + "sha256:0f996e7268af62598f2fc1204afa98a3b5712313a55c4c9d434aef49cadc91d4", + "sha256:1ec0bcf7e17c0c5669d881b1cd38c4972fade441b27bda1051665faaa89bdcaa", + "sha256:281c945d0e28c92ca5e5930664c1cefd85efe80e5c0d2bc58dd63383fda29f83", + "sha256:2ce6fae5bdad59577b44e4dfed356944fbf1d925269114c28be377692643b4ff", + "sha256:315b9001266a492a6ff443b61238f956b214dbec9910a081ba5b6646a055a805", + "sha256:443c4a81bb10daed9a8f334365fe52542771f25aedaf889fd323a853ce7377d6", + "sha256:4a02ded6cd4f0a5562a8887df8b3bd14e822a90f97ac5e544c162899bc467664", + "sha256:53a583b6637ab4c4e3591a15bc9db855b8d9dee9a669b550f311480acab6eb08", + "sha256:63efa177ff54aec6e1c0aefaa1a241232dcd37413835a9b674b6e3f0ae2bfd3e", + "sha256:74f57f24754fe349223792466a709f8e0c093205ff0dca557af51072ff47ab18", + "sha256:7e1ce50266f4f70bf41a2c6dc4358afadae90e2a1e5342d3c08883df1675374f", + "sha256:81ef806b1fef6b06dcebad789f988d3b37ccaee225695cf3e07648eee0fc6b73", + "sha256:846da004a5804145a5f441b8530b4bf35afbf7da70f82409f151695b127213d5", + "sha256:8ac43ae87929a5982f5948ceda07001ee5e83227fd69cf55b109144938d96984", + "sha256:9762ea51a8fc2a88b70cf2995e5675b38d93bf36bd67d91721c309df184f49bd", + "sha256:a2a431ee15799d6db9fe80c82b055bae5a752bef645bba795e8e52687c69efe3", + "sha256:bf7a1932ac4176486eab36a19ed4c0492da5d97123f1406cf15e41b05e787d2e", + "sha256:c2e6fc39c4ab499049df3bdf567f768a723a5e8464816e8f009f121a5a9f4405", + "sha256:cbeb489927bd7af4aa98d4b261af9a5bc025bd87f0e3547e11584be9e9427be2", + "sha256:d03b5621a135bffecad2c73e9f4deb1a0f977b9a8ffe6f8e002bf6c9d07b918c", + "sha256:d56e96520b1020449bbace2b78b603442e7e378a9b3bd68de65c782db1507995", + "sha256:df6b6c6d742395dd77a23ea3728ab62f98379eff8fb61be2744d4679ab678f73", + "sha256:e1be4655c7ef6e1bbe6b5d0403526601323420bcf414598955968c9ef3eb7d16", + "sha256:f18c716be16bc1fea8e95def49edf46b82fccaa88587a45f8dc0ff6ab5d8e0a7", + "sha256:f46304d6f0c6ab8e52770addfa2fc41e6629495548862279641972b6215451cd", + "sha256:f7b178f11ed3664fd0e995a47ed2b5ff0a12d893e41dd0494f406d1cf555cab7" + ], + "markers": "python_version >= '3.7'", + "version": "==43.0.3" + }, + "distlib": { + "hashes": [ + "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87", + "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403" + ], + "version": "==0.3.9" + }, + "django": { + "hashes": [ + "sha256:8363ac062bb4ef7c3f12d078f6fa5d154031d129a15170a1066412af49d30905", + "sha256:ff1b61005004e476e0aeea47c7f79b85864c70124030e95146315396f1e7951f" + ], + "index": "pypi", + "markers": "python_version >= '3.10'", + "version": "==5.0.6" + }, + "filelock": { + "hashes": [ + "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0", + "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435" + ], + "markers": "python_version >= '3.8'", + "version": "==3.16.1" + }, + "flake8": { + "hashes": [ + "sha256:33f96621059e65eec474169085dc92bf26e7b2d47366b70be2f67ab80dc25132", + "sha256:a6dfbb75e03252917f2473ea9653f7cd799c3064e54d4c8140044c5c065f53c3" + ], + "index": "pypi", + "markers": "python_full_version >= '3.8.1'", + "version": "==7.0.0" + }, + "h11": { + "hashes": [ + "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d", + "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761" + ], + "markers": "python_version >= '3.7'", + "version": "==0.14.0" + }, + "identify": { + "hashes": [ + "sha256:53863bcac7caf8d2ed85bd20312ea5dcfc22226800f6d6881f232d861db5a8f0", + "sha256:91478c5fb7c3aac5ff7bf9b4344f803843dc586832d5f110d672b19aa1984c98" + ], + "markers": "python_version >= '3.8'", + "version": "==2.6.1" + }, + "idna": { + "hashes": [ + "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", + "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3" + ], + "markers": "python_version >= '3.6'", + "version": "==3.10" + }, + "iniconfig": { + "hashes": [ + "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", + "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374" + ], + "markers": "python_version >= '3.7'", + "version": "==2.0.0" + }, + "isort": { + "hashes": [ + "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109", + "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6" + ], + "index": "pypi", + "markers": "python_full_version >= '3.8.0'", + "version": "==5.13.2" + }, + "jinja2": { + "hashes": [ + "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369", + "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==3.1.4" + }, + "json5": { + "hashes": [ + "sha256:0c638399421da959a20952782800e5c1a78c14e08e1dc9738fa10d8ec14d58c8", + "sha256:4ca101fd5c7cb47960c055ef8f4d0e31e15a7c6c48c3b6f1473fc83b6c462a13" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==0.9.24" + }, + "markupsafe": { + "hashes": [ + "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4", + "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", + "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0", + "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", + "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", + "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13", + "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028", + "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca", + "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557", + "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832", + "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0", + "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b", + "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579", + "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", + "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", + "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff", + "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", + "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22", + "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", + "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb", + "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e", + "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", + "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a", + "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d", + "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a", + "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b", + "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8", + "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225", + "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c", + "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144", + "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f", + "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", + "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d", + "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93", + "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf", + "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158", + "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84", + "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", + "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", + "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171", + "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", + "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", + "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", + "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d", + "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", + "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", + "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", + "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", + "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29", + "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", + "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798", + "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c", + "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8", + "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", + "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", + "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a", + "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178", + "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", + "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", + "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", + "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50" + ], + "markers": "python_version >= '3.9'", + "version": "==3.0.2" + }, + "mccabe": { + "hashes": [ + "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325", + "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e" + ], + "markers": "python_version >= '3.6'", + "version": "==0.7.0" + }, + "mypy-extensions": { + "hashes": [ + "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d", + "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782" + ], + "markers": "python_version >= '3.5'", + "version": "==1.0.0" + }, + "nodeenv": { + "hashes": [ + "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f", + "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6'", + "version": "==1.9.1" + }, + "outcome": { + "hashes": [ + "sha256:9dcf02e65f2971b80047b377468e72a268e15c0af3cf1238e6ff14f7f91143b8", + "sha256:e771c5ce06d1415e356078d3bdd68523f284b4ce5419828922b6871e65eda82b" + ], + "markers": "python_version >= '3.7'", + "version": "==1.3.0.post0" + }, + "packaging": { + "hashes": [ + "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002", + "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124" + ], + "markers": "python_version >= '3.8'", + "version": "==24.1" + }, + "paramiko": { + "hashes": [ + "sha256:43f0b51115a896f9c00f59618023484cb3a14b98bbceab43394a39c6739b7ee7", + "sha256:aac08f26a31dc4dffd92821527d1682d99d52f9ef6851968114a8728f3c274d3" + ], + "index": "pypi", + "markers": "python_version >= '3.6'", + "version": "==3.4.0" + }, + "pathspec": { + "hashes": [ + "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", + "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712" + ], + "markers": "python_version >= '3.8'", + "version": "==0.12.1" + }, + "platformdirs": { + "hashes": [ + "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907", + "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb" + ], + "markers": "python_version >= '3.8'", + "version": "==4.3.6" + }, + "pluggy": { + "hashes": [ + "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", + "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669" + ], + "markers": "python_version >= '3.8'", + "version": "==1.5.0" + }, + "pre-commit": { + "hashes": [ + "sha256:5eae9e10c2b5ac51577c3452ec0a490455c45a0533f7960f993a0d01e59decab", + "sha256:e209d61b8acdcf742404408531f0c37d49d2c734fd7cff2d6076083d191cb060" + ], + "index": "pypi", + "markers": "python_version >= '3.9'", + "version": "==3.7.0" + }, + "psycopg2-binary": { + "hashes": [ + "sha256:03ef7df18daf2c4c07e2695e8cfd5ee7f748a1d54d802330985a78d2a5a6dca9", + "sha256:0a602ea5aff39bb9fac6308e9c9d82b9a35c2bf288e184a816002c9fae930b77", + "sha256:0c009475ee389757e6e34611d75f6e4f05f0cf5ebb76c6037508318e1a1e0d7e", + "sha256:0ef4854e82c09e84cc63084a9e4ccd6d9b154f1dbdd283efb92ecd0b5e2b8c84", + "sha256:1236ed0952fbd919c100bc839eaa4a39ebc397ed1c08a97fc45fee2a595aa1b3", + "sha256:143072318f793f53819048fdfe30c321890af0c3ec7cb1dfc9cc87aa88241de2", + "sha256:15208be1c50b99203fe88d15695f22a5bed95ab3f84354c494bcb1d08557df67", + "sha256:1873aade94b74715be2246321c8650cabf5a0d098a95bab81145ffffa4c13876", + "sha256:18d0ef97766055fec15b5de2c06dd8e7654705ce3e5e5eed3b6651a1d2a9a152", + "sha256:1ea665f8ce695bcc37a90ee52de7a7980be5161375d42a0b6c6abedbf0d81f0f", + "sha256:2293b001e319ab0d869d660a704942c9e2cce19745262a8aba2115ef41a0a42a", + "sha256:246b123cc54bb5361588acc54218c8c9fb73068bf227a4a531d8ed56fa3ca7d6", + "sha256:275ff571376626195ab95a746e6a04c7df8ea34638b99fc11160de91f2fef503", + "sha256:281309265596e388ef483250db3640e5f414168c5a67e9c665cafce9492eda2f", + "sha256:2d423c8d8a3c82d08fe8af900ad5b613ce3632a1249fd6a223941d0735fce493", + "sha256:2e5afae772c00980525f6d6ecf7cbca55676296b580c0e6abb407f15f3706996", + "sha256:30dcc86377618a4c8f3b72418df92e77be4254d8f89f14b8e8f57d6d43603c0f", + "sha256:31a34c508c003a4347d389a9e6fcc2307cc2150eb516462a7a17512130de109e", + "sha256:323ba25b92454adb36fa425dc5cf6f8f19f78948cbad2e7bc6cdf7b0d7982e59", + "sha256:34eccd14566f8fe14b2b95bb13b11572f7c7d5c36da61caf414d23b91fcc5d94", + "sha256:3a58c98a7e9c021f357348867f537017057c2ed7f77337fd914d0bedb35dace7", + "sha256:3f78fd71c4f43a13d342be74ebbc0666fe1f555b8837eb113cb7416856c79682", + "sha256:4154ad09dac630a0f13f37b583eae260c6aa885d67dfbccb5b02c33f31a6d420", + "sha256:420f9bbf47a02616e8554e825208cb947969451978dceb77f95ad09c37791dae", + "sha256:4686818798f9194d03c9129a4d9a702d9e113a89cb03bffe08c6cf799e053291", + "sha256:57fede879f08d23c85140a360c6a77709113efd1c993923c59fde17aa27599fe", + "sha256:60989127da422b74a04345096c10d416c2b41bd7bf2a380eb541059e4e999980", + "sha256:64cf30263844fa208851ebb13b0732ce674d8ec6a0c86a4e160495d299ba3c93", + "sha256:68fc1f1ba168724771e38bee37d940d2865cb0f562380a1fb1ffb428b75cb692", + "sha256:6e6f98446430fdf41bd36d4faa6cb409f5140c1c2cf58ce0bbdaf16af7d3f119", + "sha256:729177eaf0aefca0994ce4cffe96ad3c75e377c7b6f4efa59ebf003b6d398716", + "sha256:72dffbd8b4194858d0941062a9766f8297e8868e1dd07a7b36212aaa90f49472", + "sha256:75723c3c0fbbf34350b46a3199eb50638ab22a0228f93fb472ef4d9becc2382b", + "sha256:77853062a2c45be16fd6b8d6de2a99278ee1d985a7bd8b103e97e41c034006d2", + "sha256:78151aa3ec21dccd5cdef6c74c3e73386dcdfaf19bced944169697d7ac7482fc", + "sha256:7f01846810177d829c7692f1f5ada8096762d9172af1b1a28d4ab5b77c923c1c", + "sha256:804d99b24ad523a1fe18cc707bf741670332f7c7412e9d49cb5eab67e886b9b5", + "sha256:81ff62668af011f9a48787564ab7eded4e9fb17a4a6a74af5ffa6a457400d2ab", + "sha256:8359bf4791968c5a78c56103702000105501adb557f3cf772b2c207284273984", + "sha256:83791a65b51ad6ee6cf0845634859d69a038ea9b03d7b26e703f94c7e93dbcf9", + "sha256:8532fd6e6e2dc57bcb3bc90b079c60de896d2128c5d9d6f24a63875a95a088cf", + "sha256:876801744b0dee379e4e3c38b76fc89f88834bb15bf92ee07d94acd06ec890a0", + "sha256:8dbf6d1bc73f1d04ec1734bae3b4fb0ee3cb2a493d35ede9badbeb901fb40f6f", + "sha256:8f8544b092a29a6ddd72f3556a9fcf249ec412e10ad28be6a0c0d948924f2212", + "sha256:911dda9c487075abd54e644ccdf5e5c16773470a6a5d3826fda76699410066fb", + "sha256:977646e05232579d2e7b9c59e21dbe5261f403a88417f6a6512e70d3f8a046be", + "sha256:9dba73be7305b399924709b91682299794887cbbd88e38226ed9f6712eabee90", + "sha256:a148c5d507bb9b4f2030a2025c545fccb0e1ef317393eaba42e7eabd28eb6041", + "sha256:a6cdcc3ede532f4a4b96000b6362099591ab4a3e913d70bcbac2b56c872446f7", + "sha256:ac05fb791acf5e1a3e39402641827780fe44d27e72567a000412c648a85ba860", + "sha256:b0605eaed3eb239e87df0d5e3c6489daae3f7388d455d0c0b4df899519c6a38d", + "sha256:b58b4710c7f4161b5e9dcbe73bb7c62d65670a87df7bcce9e1faaad43e715245", + "sha256:b6356793b84728d9d50ead16ab43c187673831e9d4019013f1402c41b1db9b27", + "sha256:b76bedd166805480ab069612119ea636f5ab8f8771e640ae103e05a4aae3e417", + "sha256:bc7bb56d04601d443f24094e9e31ae6deec9ccb23581f75343feebaf30423359", + "sha256:c2470da5418b76232f02a2fcd2229537bb2d5a7096674ce61859c3229f2eb202", + "sha256:c332c8d69fb64979ebf76613c66b985414927a40f8defa16cf1bc028b7b0a7b0", + "sha256:c6af2a6d4b7ee9615cbb162b0738f6e1fd1f5c3eda7e5da17861eacf4c717ea7", + "sha256:c77e3d1862452565875eb31bdb45ac62502feabbd53429fdc39a1cc341d681ba", + "sha256:ca08decd2697fdea0aea364b370b1249d47336aec935f87b8bbfd7da5b2ee9c1", + "sha256:ca49a8119c6cbd77375ae303b0cfd8c11f011abbbd64601167ecca18a87e7cdd", + "sha256:cb16c65dcb648d0a43a2521f2f0a2300f40639f6f8c1ecbc662141e4e3e1ee07", + "sha256:d2997c458c690ec2bc6b0b7ecbafd02b029b7b4283078d3b32a852a7ce3ddd98", + "sha256:d3f82c171b4ccd83bbaf35aa05e44e690113bd4f3b7b6cc54d2219b132f3ae55", + "sha256:dc4926288b2a3e9fd7b50dc6a1909a13bbdadfc67d93f3374d984e56f885579d", + "sha256:ead20f7913a9c1e894aebe47cccf9dc834e1618b7aa96155d2091a626e59c972", + "sha256:ebdc36bea43063116f0486869652cb2ed7032dbc59fbcb4445c4862b5c1ecf7f", + "sha256:ed1184ab8f113e8d660ce49a56390ca181f2981066acc27cf637d5c1e10ce46e", + "sha256:ee825e70b1a209475622f7f7b776785bd68f34af6e7a46e2e42f27b659b5bc26", + "sha256:f7ae5d65ccfbebdfa761585228eb4d0df3a8b15cfb53bd953e713e09fbb12957", + "sha256:f7fc5a5acafb7d6ccca13bfa8c90f8c51f13d8fb87d95656d3950f0158d3ce53", + "sha256:f9b5571d33660d5009a8b3c25dc1db560206e2d2f89d3df1cb32d72c0d117d52" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==2.9.9" + }, + "pycodestyle": { + "hashes": [ + "sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f", + "sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67" + ], + "markers": "python_version >= '3.8'", + "version": "==2.11.1" + }, + "pycparser": { + "hashes": [ + "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", + "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc" + ], + "markers": "python_version >= '3.8'", + "version": "==2.22" + }, + "pyflakes": { + "hashes": [ + "sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f", + "sha256:84b5be138a2dfbb40689ca07e2152deb896a65c3a3e24c251c5c62489568074a" + ], + "markers": "python_version >= '3.8'", + "version": "==3.2.0" + }, + "pynacl": { + "hashes": [ + "sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858", + "sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d", + "sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93", + "sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1", + "sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92", + "sha256:61f642bf2378713e2c2e1de73444a3778e5f0a38be6fee0fe532fe30060282ff", + "sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba", + "sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394", + "sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b", + "sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543" + ], + "markers": "python_version >= '3.6'", + "version": "==1.5.0" + }, + "pysocks": { + "hashes": [ + "sha256:08e69f092cc6dbe92a0fdd16eeb9b9ffbc13cadfe5ca4c7bd92ffb078b293299", + "sha256:2725bd0a9925919b9b51739eea5f9e2bae91e83288108a9ad338b2e3a4435ee5", + "sha256:3f8804571ebe159c380ac6de37643bb4685970655d3bba243530d6558b799aa0" + ], + "version": "==1.7.1" + }, + "pytest": { + "hashes": [ + "sha256:2a8386cfc11fa9d2c50ee7b2a57e7d898ef90470a7a34c4b949ff59662bb78b7", + "sha256:ac978141a75948948817d360297b7aae0fcb9d6ff6bc9ec6d514b85d5a65c044" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==8.1.1" + }, + "pyyaml": { + "hashes": [ + "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff", + "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", + "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086", + "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e", + "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", + "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5", + "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", + "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee", + "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", + "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68", + "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a", + "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf", + "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99", + "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8", + "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85", + "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19", + "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", + "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a", + "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", + "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317", + "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c", + "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631", + "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d", + "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", + "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", + "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e", + "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b", + "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", + "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", + "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706", + "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", + "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237", + "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", + "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083", + "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180", + "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", + "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e", + "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f", + "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725", + "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", + "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", + "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774", + "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", + "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", + "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5", + "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d", + "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290", + "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44", + "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed", + "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", + "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", + "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12", + "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4" + ], + "markers": "python_version >= '3.8'", + "version": "==6.0.2" + }, + "requests": { + "hashes": [ + "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", + "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==2.32.3" + }, + "selenium": { + "hashes": [ + "sha256:0bd564ee166980d419a8aaf4ac00289bc152afcf2eadca5efe8c8e36711853fd", + "sha256:b1d0c33b38ca27d0499183e48e1dd09ff26973481f5d3ef2983073813ae6588d" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==4.20.0" + }, + "sniffio": { + "hashes": [ + "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", + "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc" + ], + "markers": "python_version >= '3.7'", + "version": "==1.3.1" + }, + "sortedcontainers": { + "hashes": [ + "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88", + "sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0" + ], + "version": "==2.4.0" + }, + "sqlparse": { + "hashes": [ + "sha256:773dcbf9a5ab44a090f3441e2180efe2560220203dc2f8c0b0fa141e18b505e4", + "sha256:bb6b4df465655ef332548e24f08e205afc81b9ab86cb1c45657a7ff173a3a00e" + ], + "markers": "python_version >= '3.8'", + "version": "==0.5.1" + }, + "trio": { + "hashes": [ + "sha256:1dcc95ab1726b2da054afea8fd761af74bad79bd52381b84eae408e983c76831", + "sha256:68eabbcf8f457d925df62da780eff15ff5dc68fd6b367e2dde59f7aaf2a0b884" + ], + "markers": "python_version >= '3.8'", + "version": "==0.27.0" + }, + "trio-websocket": { + "hashes": [ + "sha256:18c11793647703c158b1f6e62de638acada927344d534e3c7628eedcb746839f", + "sha256:520d046b0d030cf970b8b2b2e00c4c2245b3807853ecd44214acd33d74581638" + ], + "markers": "python_version >= '3.7'", + "version": "==0.11.1" + }, + "typing-extensions": { + "hashes": [ + "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", + "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8" + ], + "markers": "python_version >= '3.8'", + "version": "==4.12.2" + }, + "urllib3": { + "extras": [ + "socks" + ], + "hashes": [ + "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac", + "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9" + ], + "markers": "python_version >= '3.8'", + "version": "==2.2.3" + }, + "virtualenv": { + "hashes": [ + "sha256:142c6be10212543b32c6c45d3d3893dff89112cc588b7d0879ae5a1ec03a47ba", + "sha256:f11f1b8a29525562925f745563bfd48b189450f61fb34c4f9cc79dd5aa32a1f4" + ], + "markers": "python_version >= '3.8'", + "version": "==20.27.1" + }, + "wsproto": { + "hashes": [ + "sha256:ad565f26ecb92588a3e43bc3d96164de84cd9902482b130d0ddbaa9664a85065", + "sha256:b9acddd652b585d75b20477888c56642fdade28bdfd3579aa24a4d2c037dd736" + ], + "markers": "python_full_version >= '3.7.0'", + "version": "==1.2.0" + } + }, + "develop": {} +} diff --git a/README.rst b/README.rst index 95582b4c..975f3712 100644 --- a/README.rst +++ b/README.rst @@ -2,8 +2,18 @@ stx-test ======== -StarlingX Test repository for manual and automated test cases. +StarlingX Test repository for automated test cases. +Pre-Requisites +---------- + +.. code-block:: bash + + You must have a machine/VM running Ubuntu 22.04 or later + The RunAgent must be able to connect to the internet to pull images and dependencies. + The RunAgent must be able to connect to your labs via SSH. + Download and install Python 3.11, pip and pipenv. + Download and install git on the RunAgent Contribute ---------- @@ -17,9 +27,69 @@ Contribute ssh-keygen -t rsa -C "" ssh-add $private_keyfile_path - # add ssh key to settings https://review.opendev.org/#/q/project:starlingx/test + # Add ssh key to settings https://review.opendev.org/#/q/project:starlingx/test cd git remote add gerrit ssh://@review.opendev.org/starlingx/test.git git review -s -- When you are ready, create your commit with detailed commit message, and submit for review. \ No newline at end of file + # Create/activate a virtual python environment and pull the project dependencies. + pipenv shell + pipenv sync + +- When you are ready, create your commit with detailed commit message, and submit for review. + +Configuration +---------- + +The framework contains multiple configuration files found under the config folder. There are configurations for docker, +hosts, kubernetes, labs and logger. By default, the runner will choose the default config file for each (default.json5) +when running. These files can be found under config//files. However, using command line overrides a user +can use a custom file. Command line options are --lab_config_file, --k8s_config_file, --logger_config_file, and --docker_config_file. + +There are a couple of files that will need to be updated when first setting up. + +1) config/lab/files/default.json5 + +This file is responsible for holding information such as floating ip, lab type, lab capabilities etc. Adjust the +contents of default.json5 to match the information of the lab where you want to execute the test cases. Based on your +system type, you can use one of the template files (such as template_simplex.json5) as a starting point. If using a +jump server, update the values under config/host/files/jump_host.json5 to use the connection information of the +jump server. Then in the lab configuration file, set "use_jump_host: true", and the "jump_server_config:" +(ex. jump_server_config: "config/host/files/jump_host.json5") + +2) config/docker/files/default.json5 + +This file is responsible for holding information for docker registries used in testing. Adjust the local registry +credentials to match those of the lab where you want to execute the tests. + +Update Lab Capabilities +Using the lab capability scanner, we can identify common lab capabilities and automatically add them to the configuration. +This script will create a backup of the original file and create a new one with the lab capabilities added. These +capabilities will help identify which tests are applicable for a given lab setup. + +// Run script from the root location of the repo +cd +python scripts/lab_capability_scanner.py --lab_config_file= + + +.. code-block:: bash + + # (Optional) Install Chrome for Webdriver UI tests + wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb + sudo dpkg -i google-chrome-stable_current_amd64.deb + sudo apt -f install [If you encounter errors during the install] + google-chrome --version [Verify that the install was successful] + +Execution +---------- + +You are now ready to run some tests! + +// From the root repo location we can now run tests +cd +python framework/runner/scripts/test_executor.py --tests_location= + +// Note non-default config locations and filenames are also supported on the commandline as --lab_config_file, --k8s_config_file, --logger_config_file, --docker_config_file +python framework/runner/scripts/test_executor.py --tests_location= --lab_config_file= + +// Ex. python framework/runner/scripts/test_executor.py --tests_location=testcases/cloud_platform/sanity --lab_config_file=/dev/configs/my_config.json diff --git a/automated-pytest-suite/__init__.py b/__init__.py similarity index 100% rename from automated-pytest-suite/__init__.py rename to __init__.py diff --git a/automated-pytest-suite/README.rst b/automated-pytest-suite/README.rst deleted file mode 100644 index d8363b50..00000000 --- a/automated-pytest-suite/README.rst +++ /dev/null @@ -1,76 +0,0 @@ -==================================== -StarlingX Integration Test Framework -==================================== - -The project contains integration test cases that can be executed on an -installed and configured StarlingX system. - -Supported test cases: - -- CLI tests over SSH connection to StarlingX system via OAM floating IP -- Platform RestAPI test cases via external endpoints -- Horizon test cases - - -Packages Required ------------------ -- python >='3.4.3,<3.7' -- pytest>='3.1.0,<4.0' -- pexpect -- pyyaml -- requests (used by RestAPI test cases only) -- selenium (used by Horizon test cases only) -- Firefox (used by Horizon test cases only) -- pyvirtualdisplay (used by Horizon test cases only) -- ffmpeg (used by Horizon test cases only) -- Xvfb or Xephyr or Xvnc (used by pyvirtualdisplay for Horizon test cases only) - - -Setup Test Tool ---------------- -This is a off-box test tool that needs to be set up once on a Linux server -that can reach the StarlingX system under test (such as SSH to STX -system, send/receive RestAPI requests, open Horizon page). - -- Install above packages -- Clone stx-test repo -- Add absolute path for automated-pytest-suite to PYTHONPATH environment variable - -Execute Test Cases ------------------- -Precondition: STX system under test should be installed and configured. - -- | Customized config can be provided via --testcase-config . - | Config template can be found at ${project_root}/stx-test_template.conf. -- Test cases can be selected by specifying via -m -- | If stx-openstack is not deployed, platform specific marker should be specified, - | e.g., -m "platform_sanity or platform" -- | Automation logs will be created at ${HOME}/AUTOMATION_LOGS directory by default. - | Log directory can also be specified with --resultlog=${LOG_DIR} commandline option -- Examples: - -.. code-block:: bash - - export project_root= - - # Include $project_root to PYTHONPATH if not already done - export PYTHONPATH=${PYTHONPATH}:${project_root} - - cd $project_root - - # Example 1: Run all platform_sanity test cases under testcases/ - pytest -m platform_sanity --testcase-config=~/my_config.conf testcases/ - - # Example 2: Run platform_sanity or sanity (requires stx-openstack) test cases, - # on a StarlingX virtual box system that is already saved in consts/lab.py - # and save automation logs to /tmp/AUTOMATION_LOGS - pytest --resultlog=/tmp/ -m sanity --lab=vbox --natbox=localhost testcases/ - - # Example 3: List (not execute) the test cases with "migrate" in the name - pytest --collect-only -k "migrate" --lab= testcases/ - - -Contribute ----------- - -- In order to contribute, python3.4 is required to avoid producing code that is incompatible with python3.4. diff --git a/automated-pytest-suite/conftest.py b/automated-pytest-suite/conftest.py deleted file mode 100644 index 3d0fbeb1..00000000 --- a/automated-pytest-suite/conftest.py +++ /dev/null @@ -1,716 +0,0 @@ -# -# Copyright (c) 2019, 2020 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import logging -import os -from time import strftime, gmtime -# import threading # Used for formatting logger - - -import pytest # Don't remove. Used in eval - -import setups -from consts.proj_vars import ProjVar -from utils.tis_log import LOG -from utils import parse_log - -tc_start_time = None -tc_end_time = None -has_fail = False -repeat_count = -1 -stress_count = -1 -count = -1 -no_teardown = False -tracebacks = [] -region = None -test_count = 0 -console_log = True - -################################ -# Process and log test results # -################################ - - -class MakeReport: - nodeid = None - instances = {} - - def __init__(self, item): - MakeReport.nodeid = item.nodeid - self.test_pass = None - self.test_results = {} - MakeReport.instances[item.nodeid] = self - - def update_results(self, call, report): - if report.failed: - global has_fail - has_fail = True - msg = "***Failure at test {}: {}".format(call.when, call.excinfo) - print(msg) - LOG.debug(msg + "\n***Details: {}".format(report.longrepr)) - tracebacks.append(str(report.longrepr)) - self.test_results[call.when] = ['Failed', call.excinfo] - elif report.skipped: - sep = 'Skipped: ' - skipreason_list = str(call.excinfo).split(sep=sep)[1:] - skipreason_str = sep.join(skipreason_list) - self.test_results[call.when] = ['Skipped', skipreason_str] - elif report.passed: - self.test_results[call.when] = ['Passed', ''] - - def get_results(self): - return self.test_results - - @classmethod - def get_report(cls, item): - if item.nodeid == cls.nodeid: - return cls.instances[cls.nodeid] - else: - return cls(item) - - -class TestRes: - PASSNUM = 0 - FAILNUM = 0 - SKIPNUM = 0 - TOTALNUM = 0 - - -def _write_results(res_in_tests, test_name): - global tc_start_time - with open(ProjVar.get_var("TCLIST_PATH"), mode='a', encoding='utf8') as f: - f.write('\n{}\t{}\t{}'.format(res_in_tests, tc_start_time, test_name)) - global test_count - test_count += 1 - # reset tc_start and end time for next test case - tc_start_time = None - - -def pytest_runtest_makereport(item, call, __multicall__): - report = __multicall__.execute() - my_rep = MakeReport.get_report(item) - my_rep.update_results(call, report) - - test_name = item.nodeid.replace('::()::', - '::') # .replace('testcases/', '') - res_in_tests = '' - res = my_rep.get_results() - - # Write final result to test_results.log - if report.when == 'teardown': - res_in_log = 'Test Passed' - fail_at = [] - for key, val in res.items(): - if val[0] == 'Failed': - fail_at.append('test ' + key) - elif val[0] == 'Skipped': - res_in_log = 'Test Skipped\nReason: {}'.format(val[1]) - res_in_tests = 'SKIP' - break - if fail_at: - fail_at = ', '.join(fail_at) - res_in_log = 'Test Failed at {}'.format(fail_at) - - # Log test result - testcase_log(msg=res_in_log, nodeid=test_name, log_type='tc_res') - - if 'Test Passed' in res_in_log: - res_in_tests = 'PASS' - elif 'Test Failed' in res_in_log: - res_in_tests = 'FAIL' - if ProjVar.get_var('PING_FAILURE'): - setups.add_ping_failure(test_name=test_name) - - if not res_in_tests: - res_in_tests = 'UNKNOWN' - - # count testcases by status - TestRes.TOTALNUM += 1 - if res_in_tests == 'PASS': - TestRes.PASSNUM += 1 - elif res_in_tests == 'FAIL': - TestRes.FAILNUM += 1 - elif res_in_tests == 'SKIP': - TestRes.SKIPNUM += 1 - - _write_results(res_in_tests=res_in_tests, test_name=test_name) - - if repeat_count > 0: - for key, val in res.items(): - if val[0] == 'Failed': - global tc_end_time - tc_end_time = strftime("%Y%m%d %H:%M:%S", gmtime()) - _write_results(res_in_tests='FAIL', test_name=test_name) - TestRes.FAILNUM += 1 - if ProjVar.get_var('PING_FAILURE'): - setups.add_ping_failure(test_name=test_name) - - try: - parse_log.parse_test_steps(ProjVar.get_var('LOG_DIR')) - except Exception as e: - LOG.warning( - "Unable to parse test steps. \nDetails: {}".format( - e.__str__())) - - pytest.exit( - "Skip rest of the iterations upon stress test failure") - - if no_teardown and report.when == 'call': - for key, val in res.items(): - if val[0] == 'Skipped': - break - else: - pytest.exit("No teardown and skip rest of the tests if any") - - return report - - -def pytest_runtest_setup(item): - global tc_start_time - # tc_start_time = setups.get_tis_timestamp(con_ssh) - tc_start_time = strftime("%Y%m%d %H:%M:%S", gmtime()) - print('') - message = "Setup started:" - testcase_log(message, item.nodeid, log_type='tc_setup') - # set test name for ping vm failure - test_name = 'test_{}'.format( - item.nodeid.rsplit('::test_', 1)[-1].replace('/', '_')) - ProjVar.set_var(TEST_NAME=test_name) - ProjVar.set_var(PING_FAILURE=False) - - -def pytest_runtest_call(item): - separator = \ - '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++' - message = "Test steps started:" - testcase_log(message, item.nodeid, separator=separator, log_type='tc_start') - - -def pytest_runtest_teardown(item): - print('') - message = 'Teardown started:' - testcase_log(message, item.nodeid, log_type='tc_teardown') - - -def testcase_log(msg, nodeid, separator=None, log_type=None): - if separator is None: - separator = '-----------' - - print_msg = separator + '\n' + msg - logging_msg = '\n{}{} {}'.format(separator, msg, nodeid) - if console_log: - print(print_msg) - if log_type == 'tc_res': - global tc_end_time - tc_end_time = strftime("%Y%m%d %H:%M:%S", gmtime()) - LOG.tc_result(msg=msg, tc_name=nodeid) - elif log_type == 'tc_start': - LOG.tc_func_start(nodeid) - elif log_type == 'tc_setup': - LOG.tc_setup_start(nodeid) - elif log_type == 'tc_teardown': - LOG.tc_teardown_start(nodeid) - else: - LOG.debug(logging_msg) - - -######################## -# Command line options # -######################## -@pytest.mark.tryfirst -def pytest_configure(config): - config.addinivalue_line("markers", - "features(feature_name1, feature_name2, " - "...): mark impacted feature(s) for a test case.") - config.addinivalue_line("markers", - "priorities(, cpe_sanity, p2, ...): mark " - "priorities for a test case.") - config.addinivalue_line("markers", - "known_issue(LP-xxxx): mark known issue with " - "LP ID or description if no LP needed.") - - if config.getoption('help'): - return - - # Common reporting params - collect_all = config.getoption('collectall') - always_collect = config.getoption('alwayscollect') - session_log_dir = config.getoption('sessiondir') - resultlog = config.getoption('resultlog') - - # Test case params on installed system - testcase_config = config.getoption('testcase_config') - lab_arg = config.getoption('lab') - natbox_arg = config.getoption('natbox') - tenant_arg = config.getoption('tenant') - horizon_visible = config.getoption('horizon_visible') - is_vbox = config.getoption('is_vbox') - - global repeat_count - repeat_count = config.getoption('repeat') - global stress_count - stress_count = config.getoption('stress') - global count - if repeat_count > 0: - count = repeat_count - elif stress_count > 0: - count = stress_count - - global no_teardown - no_teardown = config.getoption('noteardown') - if repeat_count > 0 or no_teardown: - ProjVar.set_var(NO_TEARDOWN=True) - - collect_netinfo = config.getoption('netinfo') - - # Determine lab value. - lab = natbox = None - if lab_arg: - lab = setups.get_lab_dict(lab_arg) - if natbox_arg: - natbox = setups.get_natbox_dict(natbox_arg) - - lab, natbox = setups.setup_testcase_config(testcase_config, lab=lab, - natbox=natbox) - tenant = tenant_arg.upper() if tenant_arg else 'TENANT1' - - # Log collection params - collect_all = True if collect_all else False - always_collect = True if always_collect else False - - # If floating ip cannot be reached, whether to try to ping/ssh - # controller-0 unit IP, etc. - if collect_netinfo: - ProjVar.set_var(COLLECT_SYS_NET_INFO=True) - - horizon_visible = True if horizon_visible else False - - if session_log_dir: - log_dir = session_log_dir - else: - # compute directory for all logs based on resultlog arg, lab, - # and timestamp on local machine - resultlog = resultlog if resultlog else os.path.expanduser("~") - if '/AUTOMATION_LOGS' in resultlog: - resultlog = resultlog.split(sep='/AUTOMATION_LOGS')[0] - resultlog = os.path.join(resultlog, 'AUTOMATION_LOGS') - lab_name = lab['short_name'] - time_stamp = strftime('%Y%m%d%H%M') - log_dir = '{}/{}/{}'.format(resultlog, lab_name, time_stamp) - os.makedirs(log_dir, exist_ok=True) - - # set global constants, which will be used for the entire test session, etc - ProjVar.init_vars(lab=lab, natbox=natbox, logdir=log_dir, tenant=tenant, - collect_all=collect_all, - always_collect=always_collect, - horizon_visible=horizon_visible) - - if lab.get('central_region'): - default_subloud = config.getoption('subcloud') - subcloud_list = config.getoption('subcloud_list') - if subcloud_list: - if default_subloud not in subcloud_list: - msg = ("default subcloud --subcloud=%s not in --subcloud_list=%s" % - (default_subloud, subcloud_list)) - LOG.error(msg) - pytest.exit(msg) - - ProjVar.set_var(IS_DC=True, PRIMARY_SUBCLOUD=default_subloud, SUBCLOUD_LIST=subcloud_list) - - if is_vbox: - ProjVar.set_var(IS_VBOX=True) - - config_logger(log_dir, console=console_log) - - # set resultlog save location - config.option.resultlog = ProjVar.get_var("PYTESTLOG_PATH") - - # Repeat test params - file_or_dir = config.getoption('file_or_dir') - origin_file_dir = list(file_or_dir) - if count > 1: - print("Repeat following tests {} times: {}".format(count, file_or_dir)) - del file_or_dir[:] - for f_or_d in origin_file_dir: - for i in range(count): - file_or_dir.append(f_or_d) - - -def pytest_addoption(parser): - testconf_help = "Absolute path for testcase config file. Template can be " \ - "found at automated-pytest-suite/stx-test_template.conf" - lab_help = "STX system to connect to. Valid value: 1) short_name or name " \ - "of an existing dict entry in consts.Labs; Or 2) OAM floating " \ - "ip of the STX system under test" - tenant_help = "Default tenant to use when unspecified. Valid values: " \ - "tenant1, tenant2, or admin" - natbox_help = "NatBox IP or name. If automated tests are executed from " \ - "NatBox, --natbox=localhost can be used. " \ - "If username/password are required to SSH to NatBox, " \ - "please specify them in test config file." - vbox_help = "Specify if StarlingX system is installed in virtual " \ - "environment." - collect_all_help = "Run collect all on STX system at the end of test " \ - "session if any test fails." - logdir_help = "Directory to store test session logs. If this is " \ - "specified, then --resultlog will be ignored." - stress_help = "Number of iterations to run specified testcase(s). Abort " \ - "rest of the test session on first failure" - count_help = "Repeat tests x times - NO stop on failure" - horizon_visible_help = "Display horizon on screen" - no_console_log = 'Print minimal console logs' - region_help = "Multi-region parameter. Use when connected region is " \ - "different than region to test. " \ - "e.g., creating vm on RegionTwo from RegionOne" - subcloud_help = "Default subcloud used for automated test when boot vm, " \ - "etc. 'subcloud1' if unspecified." - subcloud_list_help = "Specifies subclouds for DC labs, e.g. --subcloud_list=subcloud1," \ - "subcloud2. If unspecified the lab's subclouds from lab.py will " \ - "be used." - - # Test session options on installed and configured STX system: - parser.addoption('--testcase-config', action='store', - metavar='testcase_config', default=None, - help=testconf_help) - parser.addoption('--lab', action='store', metavar='lab', default=None, - help=lab_help) - parser.addoption('--tenant', action='store', metavar='tenantname', - default=None, help=tenant_help) - parser.addoption('--natbox', action='store', metavar='natbox', default=None, - help=natbox_help) - parser.addoption('--vm', '--vbox', action='store_true', dest='is_vbox', - help=vbox_help) - - # Multi-region or distributed cloud options - parser.addoption('--region', action='store', metavar='region', - default=None, help=region_help) - parser.addoption('--subcloud', action='store', metavar='subcloud', - default='subcloud1', help=subcloud_help) - parser.addoption("--subcloud_list", action="store", default=None, - help=subcloud_list_help) - - # Debugging/Log collection options: - parser.addoption('--sessiondir', '--session_dir', '--session-dir', - action='store', dest='sessiondir', - metavar='sessiondir', default=None, help=logdir_help) - parser.addoption('--collectall', '--collect_all', '--collect-all', - dest='collectall', action='store_true', - help=collect_all_help) - parser.addoption('--alwayscollect', '--always-collect', '--always_collect', - dest='alwayscollect', - action='store_true', help=collect_all_help) - parser.addoption('--repeat', action='store', metavar='repeat', type=int, - default=-1, help=stress_help) - parser.addoption('--stress', metavar='stress', action='store', type=int, - default=-1, help=count_help) - parser.addoption('--no-teardown', '--no_teardown', '--noteardown', - dest='noteardown', action='store_true') - parser.addoption('--netinfo', '--net-info', dest='netinfo', - action='store_true', - help="Collect system networking info if scp keyfile fails") - parser.addoption('--horizon-visible', '--horizon_visible', - action='store_true', dest='horizon_visible', - help=horizon_visible_help) - parser.addoption('--noconsolelog', '--noconsole', '--no-console-log', - '--no_console_log', '--no-console', - '--no_console', action='store_true', dest='noconsolelog', - help=no_console_log) - - -def config_logger(log_dir, console=True): - # logger for log saved in file - file_name = log_dir + '/TIS_AUTOMATION.log' - logging.Formatter.converter = gmtime - log_format = '[%(asctime)s] %(lineno)-5d%(levelname)-5s %(threadName)-8s ' \ - '%(module)s.%(funcName)-8s:: %(message)s' - tis_formatter = logging.Formatter(log_format) - LOG.setLevel(logging.NOTSET) - - tmp_path = os.path.join(os.path.expanduser('~'), '.tmp_log') - # clear the tmp log with best effort so it wont keep growing - try: - os.remove(tmp_path) - except: - pass - logging.basicConfig(level=logging.NOTSET, format=log_format, - filename=tmp_path, filemode='w') - - # file handler: - file_handler = logging.FileHandler(file_name) - file_handler.setFormatter(tis_formatter) - file_handler.setLevel(logging.DEBUG) - LOG.addHandler(file_handler) - - # logger for stream output - console_level = logging.INFO if console else logging.CRITICAL - stream_hdler = logging.StreamHandler() - stream_hdler.setFormatter(tis_formatter) - stream_hdler.setLevel(console_level) - LOG.addHandler(stream_hdler) - - print("LOG DIR: {}".format(log_dir)) - - -def pytest_unconfigure(config): - # collect all if needed - if config.getoption('help'): - return - - try: - natbox_ssh = ProjVar.get_var('NATBOX_SSH') - natbox_ssh.close() - except: - pass - - version_and_patch = '' - try: - version_and_patch = setups.get_version_and_patch_info() - except Exception as e: - LOG.debug(e) - pass - log_dir = ProjVar.get_var('LOG_DIR') - if not log_dir: - try: - from utils.clients.ssh import ControllerClient - ssh_list = ControllerClient.get_active_controllers(fail_ok=True) - for con_ssh_ in ssh_list: - con_ssh_.close() - except: - pass - return - - log_dir = ProjVar.get_var('LOG_DIR') - if not log_dir: - try: - from utils.clients.ssh import ControllerClient - ssh_list = ControllerClient.get_active_controllers(fail_ok=True) - for con_ssh_ in ssh_list: - con_ssh_.close() - except: - pass - return - - try: - tc_res_path = log_dir + '/test_results.log' - build_info = ProjVar.get_var('BUILD_INFO') - build_id = build_info.get('BUILD_ID', '') - build_job = build_info.get('JOB', '') - build_server = build_info.get('BUILD_HOST', '') - system_config = ProjVar.get_var('SYS_TYPE') - session_str = '' - total_exec = TestRes.PASSNUM + TestRes.FAILNUM - # pass_rate = fail_rate = '0' - if total_exec > 0: - pass_rate = "{}%".format( - round(TestRes.PASSNUM * 100 / total_exec, 2)) - fail_rate = "{}%".format( - round(TestRes.FAILNUM * 100 / total_exec, 2)) - with open(tc_res_path, mode='a', encoding='utf8') as f: - # Append general info to result log - f.write('\n\nLab: {}\n' - 'Build ID: {}\n' - 'Job: {}\n' - 'Build Server: {}\n' - 'System Type: {}\n' - 'Automation LOGs DIR: {}\n' - 'Ends at: {}\n' - '{}' # test session id and tag - '{}'.format(ProjVar.get_var('LAB_NAME'), build_id, - build_job, build_server, system_config, - ProjVar.get_var('LOG_DIR'), tc_end_time, - session_str, version_and_patch)) - # Add result summary to beginning of the file - f.write( - '\nSummary:\nPassed: {} ({})\nFailed: {} ({})\nTotal ' - 'Executed: {}\n'. - format(TestRes.PASSNUM, pass_rate, TestRes.FAILNUM, - fail_rate, total_exec)) - if TestRes.SKIPNUM > 0: - f.write('------------\nSkipped: {}'.format(TestRes.SKIPNUM)) - - LOG.info("Test Results saved to: {}".format(tc_res_path)) - with open(tc_res_path, 'r', encoding='utf8') as fin: - print(fin.read()) - except Exception as e: - LOG.exception( - "Failed to add session summary to test_results.py. " - "\nDetails: {}".format(e.__str__())) - # Below needs con_ssh to be initialized - try: - from utils.clients.ssh import ControllerClient - con_ssh = ControllerClient.get_active_controller() - except: - LOG.warning("No con_ssh found") - return - - try: - parse_log.parse_test_steps(ProjVar.get_var('LOG_DIR')) - except Exception as e: - LOG.warning( - "Unable to parse test steps. \nDetails: {}".format(e.__str__())) - - if test_count > 0 and (ProjVar.get_var('ALWAYS_COLLECT') or ( - has_fail and ProjVar.get_var('COLLECT_ALL'))): - # Collect tis logs if collect all required upon test(s) failure - # Failure on collect all would not change the result of the last test - # case. - try: - setups.collect_tis_logs(con_ssh) - except Exception as e: - LOG.warning("'collect all' failed. {}".format(e.__str__())) - - ssh_list = ControllerClient.get_active_controllers(fail_ok=True, - current_thread_only=True) - for con_ssh_ in ssh_list: - try: - con_ssh_.close() - except: - pass - - -def pytest_collection_modifyitems(items): - # print("Collection modify") - move_to_last = [] - absolute_last = [] - - for item in items: - # re-order tests: - trylast_marker = item.get_closest_marker('trylast') - abslast_marker = item.get_closest_marker('abslast') - - if abslast_marker: - absolute_last.append(item) - elif trylast_marker: - move_to_last.append(item) - - priority_marker = item.get_closest_marker('priorities') - if priority_marker is not None: - priorities = priority_marker.args - for priority in priorities: - item.add_marker(eval("pytest.mark.{}".format(priority))) - - feature_marker = item.get_closest_marker('features') - if feature_marker is not None: - features = feature_marker.args - for feature in features: - item.add_marker(eval("pytest.mark.{}".format(feature))) - - # known issue marker - known_issue_mark = item.get_closest_marker('known_issue') - if known_issue_mark is not None: - issue = known_issue_mark.args[0] - msg = "{} has a workaround due to {}".format(item.nodeid, issue) - print(msg) - LOG.debug(msg=msg) - item.add_marker(eval("pytest.mark.known_issue")) - - # add dc maker to all tests start with test_dc_xxx - dc_maker = item.get_marker('dc') - if not dc_maker and 'test_dc_' in item.nodeid: - item.add_marker(pytest.mark.dc) - - # add trylast tests to the end - for item in move_to_last: - items.remove(item) - items.append(item) - - for i in absolute_last: - items.remove(i) - items.append(i) - - -def pytest_generate_tests(metafunc): - # Prefix 'remote_cli' to test names so they are reported as a different - # testcase - if ProjVar.get_var('REMOTE_CLI'): - metafunc.parametrize('prefix_remote_cli', ['remote_cli']) - - -############################################################## -# Manipulating fixture orders based on following pytest rules -# session > module > class > function -# autouse > non-autouse -# alphabetic after full-filling above criteria -# -# Orders we want on fixtures of same scope: -# check_alarms > delete_resources > config_host -############################################################# - -@pytest.fixture(scope='session') -def check_alarms(): - LOG.debug("Empty check alarms") - return - - -@pytest.fixture(scope='session') -def config_host_class(): - LOG.debug("Empty config host class") - return - - -@pytest.fixture(scope='session') -def config_host_module(): - LOG.debug("Empty config host module") - - -@pytest.fixture(autouse=True) -def a1_fixture(check_alarms): - return - - -@pytest.fixture(scope='module', autouse=True) -def c1_fixture(config_host_module): - return - - -@pytest.fixture(scope='class', autouse=True) -def c2_fixture(config_host_class): - return - - -@pytest.fixture(scope='session', autouse=True) -def prefix_remote_cli(): - return - - -def __params_gen(index): - return 'iter{}'.format(index) - - -@pytest.fixture(scope='session') -def global_setup(): - os.makedirs(ProjVar.get_var('TEMP_DIR'), exist_ok=True) - os.makedirs(ProjVar.get_var('PING_FAILURE_DIR'), exist_ok=True) - os.makedirs(ProjVar.get_var('GUEST_LOGS_DIR'), exist_ok=True) - - if region: - setups.set_region(region=region) - - -##################################### -# End of fixture order manipulation # -##################################### - - -def pytest_sessionfinish(): - if ProjVar.get_var('TELNET_THREADS'): - threads, end_event = ProjVar.get_var('TELNET_THREADS') - end_event.set() - for thread in threads: - thread.join() - - if repeat_count > 0 and has_fail: - # _thread.interrupt_main() - print('Printing traceback: \n' + '\n'.join(tracebacks)) - pytest.exit("\n========== Test failed - " - "Test session aborted without teardown to leave the " - "system in state ==========") - - if no_teardown: - pytest.exit( - "\n========== Test session stopped without teardown after first " - "test executed ==========") diff --git a/automated-pytest-suite/consts/auth.py b/automated-pytest-suite/consts/auth.py deleted file mode 100755 index bdef9b20..00000000 --- a/automated-pytest-suite/consts/auth.py +++ /dev/null @@ -1,356 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -class Tenant: - __PASSWORD = 'St8rlingX*' - __REGION = 'RegionOne' - __URL_PLATFORM = 'http://192.168.204.2:5000/v3/' - __URL_CONTAINERS = 'http://keystone.openstack.svc.cluster.local/v3' - __DC_MAP = {'SystemController': {'region': 'SystemController', - 'auth_url': __URL_PLATFORM}, - 'RegionOne': {'region': 'RegionOne', - 'auth_url': __URL_PLATFORM}} - - # Platform openstack user - admin - __ADMIN_PLATFORM = { - 'user': 'admin', - 'password': __PASSWORD, - 'tenant': 'admin', - 'domain': 'Default', - 'platform': True, - } - - # Containerized openstack users - admin, and two test users/tenants - __ADMIN = { - 'user': 'admin', - 'password': __PASSWORD, - 'tenant': 'admin', - 'domain': 'Default' - } - - __TENANT1 = { - 'user': 'tenant1', - 'password': __PASSWORD, - 'tenant': 'tenant1', - 'domain': 'Default', - 'nova_keypair': 'keypair-tenant1' - } - - __TENANT2 = { - 'user': 'tenant2', - 'password': __PASSWORD, - 'tenant': 'tenant2', - 'domain': 'Default', - 'nova_keypair': 'keypair-tenant2' - } - - __tenants = { - 'ADMIN_PLATFORM': __ADMIN_PLATFORM, - 'ADMIN': __ADMIN, - 'TENANT1': __TENANT1, - 'TENANT2': __TENANT2} - - @classmethod - def add_dc_region(cls, region_info): - cls.__DC_MAP.update(region_info) - - @classmethod - def set_platform_url(cls, url, central_region=False): - """ - Set auth_url for platform keystone - Args: - url (str): - central_region (bool) - """ - if central_region: - cls.__DC_MAP.get('SystemController')['auth_url'] = url - cls.__DC_MAP.get('RegionOne')['auth_url'] = url - else: - cls.__URL_PLATFORM = url - - @classmethod - def set_region(cls, region): - """ - Set default region for all tenants - Args: - region (str): e.g., SystemController, subcloud-2 - - """ - cls.__REGION = region - - @classmethod - def add(cls, username, tenantname=None, dictname=None, password=None, - region=None, auth_url=None, domain='Default', **kwargs): - user_dict = dict(user=username) - user_dict['tenant'] = tenantname - user_dict['password'] = password if password else cls.__PASSWORD - user_dict['domain'] = domain - if region: - user_dict['region'] = region - if auth_url: - user_dict['auth_url'] = auth_url - if kwargs: - user_dict.update(kwargs) - - dictname = dictname.upper() if dictname else username.upper(). \ - replace('-', '_') - cls.__tenants[dictname] = user_dict - return user_dict - - __primary = 'TENANT1' - - @classmethod - def get(cls, tenant_dictname, dc_region=None): - """ - Get tenant auth dict that can be passed to auth_info in cli cmd - Args: - tenant_dictname (str): e.g., tenant1, TENANT2, system_controller - dc_region (None|str): key for dc_region added via add_dc_region. - Used to update auth_url and region - e.g., SystemController, RegionOne, subcloud-2 - - Returns (dict): mutable dictionary. If changed, DC map or tenant dict - will update as well. - - """ - - tenant_dictname = tenant_dictname.upper().replace('-', '_') - tenant_dict = cls.__tenants.get(tenant_dictname) - if not tenant_dict: - return tenant_dict - - if dc_region: - region_dict = cls.__DC_MAP.get(dc_region, None) - if not region_dict: - raise ValueError( - 'Distributed cloud region {} is not added to ' - 'DC_MAP yet. DC_MAP: {}'.format(dc_region, cls.__DC_MAP)) - - tenant_dict = dict(tenant_dict) - tenant_dict.update({'region': region_dict['region']}) - else: - tenant_dict.pop('region', None) - - return tenant_dict - - @classmethod - def get_region_and_url(cls, platform=False, dc_region=None): - auth_region_and_url = { - 'auth_url': - cls.__URL_PLATFORM if platform else cls.__URL_CONTAINERS, - 'region': cls.__REGION - } - - if dc_region: - region_dict = cls.__DC_MAP.get(dc_region, None) - if not region_dict: - raise ValueError( - 'Distributed cloud region {} is not added to DC_MAP yet. ' - 'DC_MAP: {}'.format(dc_region, cls.__DC_MAP)) - auth_region_and_url['region'] = region_dict.get('region') - if platform: - auth_region_and_url['auth_url'] = region_dict.get('auth_url') - - return auth_region_and_url - - @classmethod - def set_primary(cls, tenant_dictname): - """ - should be called after _set_region and _set_url - Args: - tenant_dictname (str): Tenant dict name - - Returns: - - """ - cls.__primary = tenant_dictname.upper() - - @classmethod - def get_primary(cls): - return cls.get(tenant_dictname=cls.__primary) - - @classmethod - def get_secondary(cls): - secondary = 'TENANT1' if cls.__primary != 'TENANT1' else 'TENANT2' - return cls.get(tenant_dictname=secondary) - - @classmethod - def update(cls, tenant_dictname, username=None, password=None, tenant=None, - **kwargs): - tenant_dict = cls.get(tenant_dictname) - - if not isinstance(tenant_dict, dict): - raise ValueError("{} dictionary does not exist in " - "consts/auth.py".format(tenant_dictname)) - - if not username and not password and not tenant and not kwargs: - raise ValueError("Please specify username, password, tenant, " - "and/or domain to update for {} dict". - format(tenant_dictname)) - - if username: - kwargs['user'] = username - if password: - kwargs['password'] = password - if tenant: - kwargs['tenant'] = tenant - tenant_dict.update(kwargs) - cls.__tenants[tenant_dictname] = tenant_dict - - @classmethod - def get_dc_map(cls): - return cls.__DC_MAP - - -class HostLinuxUser: - - __SYSADMIN = { - 'user': 'sysadmin', - 'password': 'St8rlingX*' - } - - @classmethod - def get_user(cls): - return cls.__SYSADMIN['user'] - - @classmethod - def get_password(cls): - return cls.__SYSADMIN['password'] - - @classmethod - def get_home(cls): - return cls.__SYSADMIN.get('home', '/home/{}'.format(cls.get_user())) - - @classmethod - def set_user(cls, username): - cls.__SYSADMIN['user'] = username - - @classmethod - def set_password(cls, password): - cls.__SYSADMIN['password'] = password - - @classmethod - def set_home(cls, home): - if home: - cls.__SYSADMIN['home'] = home - - -class Guest: - CREDS = { - 'tis-centos-guest': { - 'user': 'root', - 'password': 'root' - }, - - 'cgcs-guest': { - 'user': 'root', - 'password': 'root' - }, - - 'ubuntu': { - 'user': 'ubuntu', - 'password': None - }, - - 'centos_6': { - 'user': 'centos', - 'password': None - }, - - 'centos_7': { - 'user': 'centos', - 'password': None - }, - - # This image has some issue where it usually fails to boot - 'opensuse_13': { - 'user': 'root', - 'password': None - }, - - # OPV image has root/root enabled - 'rhel': { - 'user': 'root', - 'password': 'root' - }, - - 'cirros': { - 'user': 'cirros', - 'password': 'cubswin:)' - }, - - 'win_2012': { - 'user': 'Administrator', - 'password': 'Li69nux*' - }, - - 'win_2016': { - 'user': 'Administrator', - 'password': 'Li69nux*' - }, - - 'ge_edge': { - 'user': 'root', - 'password': 'root' - }, - - 'vxworks': { - 'user': 'root', - 'password': 'root' - }, - - } - - @classmethod - def set_user(cls, image_name, username): - cls.CREDS[image_name]['user'] = username - - @classmethod - def set_password(cls, image_name, password): - cls.CREDS[image_name]['password'] = password - - -class TestFileServer: - # Place holder for shared file server in future. - SERVER = 'server_name_or_ip_that_can_ssh_to' - USER = 'username' - PASSWORD = 'password' - HOME = 'my_home' - HOSTNAME = 'hostname' - PROMPT = r'[\[]?.*@.*\$[ ]?' - - -class CliAuth: - - __var_dict = { - 'OS_AUTH_URL': 'http://192.168.204.2:5000/v3', - 'OS_ENDPOINT_TYPE': 'internalURL', - 'CINDER_ENDPOINT_TYPE': 'internalURL', - 'OS_USER_DOMAIN_NAME': 'Default', - 'OS_PROJECT_DOMAIN_NAME': 'Default', - 'OS_IDENTITY_API_VERSION': '3', - 'OS_REGION_NAME': 'RegionOne', - 'OS_INTERFACE': 'internal', - 'HTTPS': False, - 'OS_KEYSTONE_REGION_NAME': None, - } - - @classmethod - def set_vars(cls, **kwargs): - - for key in kwargs: - cls.__var_dict[key.upper()] = kwargs[key] - - @classmethod - def get_var(cls, var_name): - var_name = var_name.upper() - valid_vars = cls.__var_dict.keys() - if var_name not in valid_vars: - raise ValueError("Invalid var_name. Valid vars: {}". - format(valid_vars)) - - return cls.__var_dict[var_name] diff --git a/automated-pytest-suite/consts/cli_errs.py b/automated-pytest-suite/consts/cli_errs.py deleted file mode 100644 index 1450a35c..00000000 --- a/automated-pytest-suite/consts/cli_errs.py +++ /dev/null @@ -1,192 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -class VCPUSchedulerErr: - CANNOT_SET_VCPU0 = "vcpu 0 cannot be specified" - VCPU_VAL_OUT_OF_RANGE = "vcpu value out of range" - INVALID_PRIORITY = "priority must be between 1-99" - PRIORITY_NOT_INTEGER = "priority must be an integer" - INVALID_FORMAT = "invalid format" - UNSUPPORTED_POLICY = "not a supported policy" - POLICY_MUST_SPECIFIED_LAST = "policy/priority for all vcpus must be " \ - "specified last" - MISSING_PARAMETER = "missing required parameter" - TOO_MANY_PARAMETERS = "too many parameters" - VCPU_MULTIPLE_ASSIGNMENT = "specified multiple times, specification is " \ - "ambiguous" - CPU_MODEL_UNAVAIL = "No valid host was found.*Host VCPU model.*required.*" - CPU_MODEL_CONFLICT = "Image vCPU model is not permitted to override " \ - "configuration set against the flavor" - - -class NumaErr: - GENERAL_ERR_PIKE = 'Requested instance NUMA topology cannot fit the ' \ - 'given host NUMA topology' - # NUMA_AFFINITY_MISMATCH = " not match requested NUMA: {}" - NUMA_VSWITCH_MISMATCH = 'vswitch not configured.* does not match ' \ - 'requested NUMA' - NUMA_NODE_EXCLUDED = "NUMA: {} excluded" - # UNINITIALIZED = '(NUMATopologyFilter) Uninitialized' - TWO_NUMA_ONE_VSWITCH = 'vswitch not configured' - FLV_UNDEVISIBLE = 'ERROR (Conflict): flavor vcpus not evenly divisible ' \ - 'by the specified hw:numa_nodes value' - FLV_CPU_OR_MEM_UNSPECIFIED = 'ERROR (Conflict): CPU and memory ' \ - 'allocation must be provided for all ' \ - 'NUMA nodes' - INSUFFICIENT_CORES = 'Not enough free cores to schedule the instance' - - -class MinCPUErr: - VAL_LARGER_THAN_VCPUS = "min_vcpus must be less than or equal to " \ - "the flavor vcpus value" - VAL_LESS_THAN_1 = "min_vcpus must be greater than or equal to 1" - CPU_POLICY_NOT_DEDICATED = "min_vcpus is only valid when hw:cpu_policy " \ - "is dedicated" - - -class ScaleErr: - SCALE_LIMIT_HIT = "When scaling, cannot scale beyond limits" - - -class CpuAssignment: - VSWITCH_TOO_MANY_CORES = "The vswitch function can only be assigned up to" \ - " 8 core" - TOTAL_TOO_MANY_CORES = "More total logical cores requested than present " \ - "on 'Processor {}'" - NO_VM_CORE = "There must be at least one unused core for VMs." - VSWITCH_INSUFFICIENT_CORES = "The vswitch function must have at least {} " \ - "core(s)" - - -class CPUThreadErr: - INVALID_POLICY = "invalid hw:cpu_thread_policy '{}', must be one of " \ - "prefer, isolate, require" - DEDICATED_CPU_REQUIRED_FLAVOR = 'ERROR (Conflict): hw:cpu_thread_policy ' \ - 'is only valid when hw:cpu_policy is ' \ - 'dedicated. Either unset ' \ - 'hw:cpu_thread_policy or set ' \ - 'hw:cpu_policy to dedicated.' - DEDICATED_CPU_REQUIRED_BOOT_VM = 'ERROR (BadRequest): Cannot set cpu ' \ - 'thread pinning policy in a non ' \ - 'dedicated ' \ - 'cpu pinning policy' - VCPU_NUM_UNDIVISIBLE = "(NUMATopologyFilter) Cannot use 'require' cpu " \ - "threads policy as requested #VCPUs: {}, " \ - "is not divisible by number of threads: 2" - INSUFFICIENT_CORES_FOR_ISOLATE = "{}: (NUMATopologyFilter) Cannot use " \ - "isolate cpu thread policy as requested " \ - "VCPUS: {} is greater than available " \ - "CPUs with all siblings free" - HT_HOST_UNAVAIL = "(NUMATopologyFilter) Host not useable. Requested " \ - "threads policy: '{}'; from flavor or image " \ - "is not allowed on non-hyperthreaded host" - UNSET_SHARED_VCPU = "Cannot set hw:cpu_thread_policy to {} if " \ - "hw:wrs:shared_vcpu is set. Either unset " \ - "hw:cpu_thread_policy, set it to prefer, or unset " \ - "hw:wrs:shared_vcpu" - UNSET_MIN_VCPUS = "Cannot set hw:cpu_thread_policy to {} if " \ - "hw:wrs:min_vcpus is set. Either unset " \ - "hw:cpu_thread_policy, set it to another policy, " \ - "or unset hw:wrs:min_vcpus" - CONFLICT_FLV_IMG = "Image property 'hw_cpu_thread_policy' is not " \ - "permitted to override CPU thread pinning policy " \ - "set against the flavor" - - -class CPUPolicyErr: - CONFLICT_FLV_IMG = "Image property 'hw_cpu_policy' is not permitted to " \ - "override CPU pinning policy set against " \ - "the flavor " - - -class SharedCPUErr: - DEDICATED_CPU_REQUIRED = "hw:wrs:shared_vcpu is only valid when " \ - "hw:cpu_policy is dedicated" - INVALID_VCPU_ID = "hw:wrs:shared_vcpu must be greater than or equal to 0" - MORE_THAN_FLAVOR = "hw:wrs:shared_vcpu value ({}) must be less than " \ - "flavor vcpus ({})" - - -class ResizeVMErr: - RESIZE_ERR = "Error resizing server" - SHARED_NOT_ENABLED = 'Shared vCPU not enabled .*, required by instance ' \ - 'cell {}' - - -class ColdMigErr: - HT_HOST_REQUIRED = "(NUMATopologyFilter) Host not useable. Requested " \ - "threads policy: '[{}, {}]'; from flavor or " \ - "image is not allowed on non-hyperthreaded host" - - -class LiveMigErr: - BLOCK_MIG_UNSUPPORTED = "is not on local storage: Block migration can " \ - "not be used with shared storage" - GENERAL_NO_HOST = "No valid host was found. There are not enough hosts " \ - "available." - BLOCK_MIG_UNSUPPORTED_LVM = 'Block live migration is not supported for ' \ - 'hosts with LVM backed storage' - LVM_PRECHECK_ERROR = 'Live migration can not be used with LVM backed ' \ - 'storage except a booted from volume VM ' \ - 'which does not have a local disk' - - -class NetworkingErr: - INVALID_VXLAN_VNI_RANGE = "exceeds 16777215" - INVALID_MULTICAST_IP_ADDRESS = "is not a valid multicast IP address." - INVALID_VXLAN_PROVISION_PORTS = "Invalid input for port" - VXLAN_TTL_RANGE_MISSING = "VXLAN time-to-live attribute missing" - VXLAN_TTL_RANGE_TOO_LARGE = "is too large - must be no larger than '255'." - VXLAN_TTL_RANGE_TOO_SMALL = "is too small - must be at least '1'." - OVERLAP_SEGMENTATION_RANGE = "segmentation id range overlaps with" - INVALID_MTU_VALUE = "requires an interface MTU value of at least" - VXLAN_MISSING_IP_ON_INTERFACE = "requires an IP address" - WRONG_IF_ADDR_MODE = "interface address mode must be 'static'" - SET_IF_ADDR_MODE_WHEN_IP_EXIST = "addresses still exist on interfac" - NULL_IP_ADDR = "Address must not be null" - NULL_NETWORK_ADDR = "Network must not be null" - NULL_GATEWAY_ADDR = "Gateway address must not be null" - NULL_HOST_PARTION_ADDR = "Host bits must not be zero" - NOT_UNICAST_ADDR = "Address must be a unicast address" - NOT_BROADCAST_ADDR = "Address cannot be the network broadcast address" - DUPLICATE_IP_ADDR = "already exists" - INVALID_IP_OR_PREFIX = "Invalid IP address and prefix" - INVALID_IP_NETWORK = "Invalid IP network" - ROUTE_GATEWAY_UNREACHABLE = "not reachable" - IP_VERSION_NOT_MATCH = "Network and gateway IP versions must match" - GATEWAY_IP_IN_SUBNET = "Gateway address must not be within destination " \ - "subnet" - NETWORK_IP_EQUAL_TO_GATEWAY = "Network and gateway IP addresses must be " \ - "different" - - -class PciAddrErr: - NONE_ZERO_DOMAIN = 'Only domain 0000 is supported' - LARGER_THAN_MAX_BUS = 'PCI bus maximum value is 8' - NONE_ZERO_FUNCTION = 'Only function 0 is supported' - RESERVED_SLOTS_BUS0 = 'Slots 0,1 are reserved for PCI bus 0' - RESERVED_SLOT_ANY_BUS = 'Slots 0 is reserved for any PCI bus' - LARGER_THAN_MAX_SLOT = 'PCI slot maximum value is 31' - BAD_FORMAT = 'Bad PCI address format' - WRONG_BUS_VAL = 'Wrong bus value for PCI address' - - -class SrvGrpErr: - EXCEEDS_GRP_SIZE = 'Action would result in server group {} exceeding the ' \ - 'group size of {}' - HOST_UNAVAIL_ANTI_AFFINITY = '(ServerGroupAntiAffinityFilter) ' \ - 'Anti-affinity server group specified, ' \ - 'but this host is already used by that group' - - -class CpuRtErr: - RT_AND_ORD_REQUIRED = 'Realtime policy needs vCPU.* mask configured with ' \ - 'at least 1 RT vCPU and 1 ordinary vCPU' - DED_CPU_POL_REQUIRED = 'Cannot set realtime policy in a non dedicated cpu' \ - ' pinning policy' - RT_MASK_SHARED_VCPU_CONFLICT = 'hw:wrs:shared_vcpu .* is not a subset of ' \ - 'non-realtime vCPUs' diff --git a/automated-pytest-suite/consts/filepaths.py b/automated-pytest-suite/consts/filepaths.py deleted file mode 100755 index 2c86680f..00000000 --- a/automated-pytest-suite/consts/filepaths.py +++ /dev/null @@ -1,55 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -class StxPath: - TIS_UBUNTU_PATH = '~/userdata/ubuntu_if_config.sh' - TIS_CENTOS_PATH = '~/userdata/centos_if_config.sh' - USERDATA = '~/userdata/' - IMAGES = '~/images/' - HEAT = '~/heat/' - BACKUPS = '/opt/backups' - CUSTOM_HEAT_TEMPLATES = '~/custom_heat_templates/' - HELM_CHARTS_DIR = '/var/www/pages/helm_charts/' - DOCKER_CONF = '/etc/docker-distribution/registry/config.yml' - DOCKER_REPO = '/var/lib/docker-distribution/docker/registry/v2/repositories' - - -class VMPath: - VM_IF_PATH_UBUNTU = '/etc/network/interfaces.d/' - ETH_PATH_UBUNTU = '/etc/network/interfaces.d/{}.cfg' - # Below two paths are common for CentOS, OpenSUSE, and RHEL - VM_IF_PATH_CENTOS = '/etc/sysconfig/network-scripts/' - ETH_PATH_CENTOS = '/etc/sysconfig/network-scripts/ifcfg-{}' - - # Centos paths for ipv4: - RT_TABLES = '/etc/iproute2/rt_tables' - ETH_RT_SCRIPT = '/etc/sysconfig/network-scripts/route-{}' - ETH_RULE_SCRIPT = '/etc/sysconfig/network-scripts/rule-{}' - ETH_ARP_ANNOUNCE = '/proc/sys/net/ipv4/conf/{}/arp_announce' - ETH_ARP_FILTER = '/proc/sys/net/ipv4/conf/{}/arp_filter' - - -class UserData: - ADDUSER_TO_GUEST = 'cloud_config_adduser.txt' - DPDK_USER_DATA = 'dpdk_user_data.txt' - - -class TestServerPath: - USER_DATA = '/home/svc-cgcsauto/userdata/' - TEST_SCRIPT = '/home/svc-cgcsauto/test_scripts/' - CUSTOM_HEAT_TEMPLATES = '/sandbox/custom_heat_templates/' - CUSTOM_APPS = '/sandbox/custom_apps/' - - -class PrivKeyPath: - OPT_PLATFORM = '/opt/platform/id_rsa' - SYS_HOME = '~/.ssh/id_rsa' - - -class SysLogPath: - DC_MANAGER = '/var/log/dcmanager/dcmanager.log' - DC_ORCH = '/var/log/dcorch/dcorch.log' diff --git a/automated-pytest-suite/consts/horizon.py b/automated-pytest-suite/consts/horizon.py deleted file mode 100644 index 1712512f..00000000 --- a/automated-pytest-suite/consts/horizon.py +++ /dev/null @@ -1,8 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -test_result = False diff --git a/automated-pytest-suite/consts/lab.py b/automated-pytest-suite/consts/lab.py deleted file mode 100755 index 80b97a27..00000000 --- a/automated-pytest-suite/consts/lab.py +++ /dev/null @@ -1,167 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -class Labs(object): - # Place for existing stx systems for convenience. - # --lab can be used in cmdline to specify an existing system - - # Each lab should be a dictionary such as following. The short_name and name are free style - # strings while the floating IP and controller-0/1 IPs should map to what is shown in system - # oam-show - - # STX_SYS1 = { - # 'short_name': 'my_server1', - # 'name': 'my_server1.com', - # 'floating ip': '10.10.10.2', - # 'controller-0 ip': '10.10.10.3', - # 'controller-1 ip': '10.10.10.4', - # } - pass - - -def update_lab(lab_dict_name=None, lab_name=None, floating_ip=None, **kwargs): - """ - Update/Add lab dict params for specified lab - Args: - lab_dict_name (str|None): - lab_name (str|None): lab short_name. This is used only if - lab_dict_name is not specified - floating_ip (str|None): - **kwargs: Some possible keys: subcloud-1, name, etc - - Returns (dict): updated lab dict - - """ - - if not lab_name and not lab_dict_name: - from consts.proj_vars import ProjVar - lab_name = ProjVar.get_var('LAB').get('short_name', None) - if not lab_name: - raise ValueError("lab_dict_name or lab_name needs to be specified") - - if floating_ip: - kwargs.update(**{'floating ip': floating_ip}) - - if not kwargs: - raise ValueError("Please specify floating_ip and/or kwargs") - - if not lab_dict_name: - attr_names = [attr for attr in dir(Labs) if not attr.startswith('__')] - lab_names = [getattr(Labs, attr).get('short_name') for attr in - attr_names] - lab_index = lab_names.index(lab_name.lower().strip()) - lab_dict_name = attr_names[lab_index] - else: - lab_dict_name = lab_dict_name.upper().replace('-', '_') - - lab_dict = getattr(Labs, lab_dict_name) - lab_dict.update(kwargs) - return lab_dict - - -def get_lab_dict(lab, key='short_name'): - """ - - Args: - lab: lab name or fip - key: unique identifier to locate a lab. Valid values: short_name, - name, floating ip - - Returns (dict|None): lab dict or None if no matching lab found - """ - __lab_attr_list = [attr for attr in dir(Labs) if not attr.startswith('__')] - __lab_list = [getattr(Labs, attr) for attr in __lab_attr_list] - __lab_list = [lab for lab in __lab_list if isinstance(lab, dict)] - - lab_info = None - for lab_ in __lab_list: - if lab.lower().replace('-', '_') == lab_.get(key).lower().replace('-', - '_'): - lab_info = lab_ - break - - return lab_info - - -def add_lab_entry(floating_ip, dict_name=None, short_name=None, name=None, - **kwargs): - """ - Add a new lab dictionary to Labs class - Args: - floating_ip (str): floating ip of a lab to be added - dict_name: name of the entry, such as 'PV0' - short_name: short name of the TiS system, such as ip_1_4 - name: name of the STX system, such as 'yow-cgcs-pv-0' - **kwargs: other information of the lab such as controllers' ips, etc - - Returns: - dict: lab dict added to Labs class - - """ - for attr in dir(Labs): - lab = getattr(Labs, attr) - if isinstance(lab, dict): - if lab['floating ip'] == floating_ip: - raise ValueError( - "Entry for {} already exists in Labs class!".format( - floating_ip)) - - if dict_name and dict_name in dir(Labs): - raise ValueError( - "Entry for {} already exists in Labs class!".format(dict_name)) - - if not short_name: - short_name = floating_ip - - if not name: - name = floating_ip - - if not dict_name: - dict_name = floating_ip - - lab_dict = {'name': name, - 'short_name': short_name, - 'floating ip': floating_ip, - } - - lab_dict.update(kwargs) - setattr(Labs, dict_name, lab_dict) - return lab_dict - - -class NatBoxes(object): - # Place for existing NatBox that are already configured - NAT_BOX_HW_EXAMPLE = { - 'name': 'nat_hw', - 'ip': '10.10.10.10', - 'user': 'natbox_user', - 'password': 'natbox_password' - } - - # Following example when localhost is configured as natbox, and test cases - # are also ran from same localhost - NAT_BOX_VBOX_EXAMPLE = { - 'name': 'localhost', - 'ip': 'localhost', - 'user': None, - 'password': None, - } - - @staticmethod - def add_natbox(ip, user=None, password=None, prompt=None): - user = user if user else 'svc-cgcsauto' - password = password if password else ')OKM0okm' - - nat_dict = {'ip': ip, - 'name': ip, - 'user': user, - 'password': password, - } - if prompt: - nat_dict['prompt'] = prompt - setattr(NatBoxes, 'NAT_NEW', nat_dict) - return nat_dict diff --git a/automated-pytest-suite/consts/proj_vars.py b/automated-pytest-suite/consts/proj_vars.py deleted file mode 100644 index 0780430f..00000000 --- a/automated-pytest-suite/consts/proj_vars.py +++ /dev/null @@ -1,89 +0,0 @@ -# -# Copyright (c) 2019, 2020 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -# Please DO NOT import any modules - - -class ProjVar: - __var_dict = {'BUILD_PATH': None, - 'LOG_DIR': None, - 'SOURCE_OPENRC': False, - 'SW_VERSION': [], - 'PATCH': None, - 'SESSION_ID': None, - 'CGCS_DB': True, - 'IS_SIMPLEX': False, - 'KEYSTONE_DEBUG': False, - 'TEST_NAME': None, - 'PING_FAILURE': False, - 'LAB': None, - 'ALWAYS_COLLECT': False, - 'REGION': 'RegionOne', - 'COLLECT_TELNET': False, - 'TELNET_THREADS': None, - 'SYS_TYPE': None, - 'COLLECT_SYS_NET_INFO': False, - 'IS_VBOX': False, - 'RELEASE': 'R6', - 'REMOTE_CLI': False, - 'USER_FILE_DIR': '~/', - 'NO_TEARDOWN': False, - 'VSWITCH_TYPE': None, - 'IS_DC': False, - 'PRIMARY_SUBCLOUD': None, - 'SUBCLOUD_LIST': None, - 'BUILD_INFO': {}, - 'TEMP_DIR': '', - 'INSTANCE_BACKING': {}, - 'OPENSTACK_DEPLOYED': None, - 'DEFAULT_INSTANCE_BACKING': None, - 'STX_KEYFILE_PATH': '~/.ssh/id_rsa', - 'IPV6_OAM': None, - } - - @classmethod - def init_vars(cls, lab, natbox, logdir, tenant, collect_all, always_collect, - horizon_visible): - - labname = lab['short_name'] - - cls.__var_dict.update(**{ - 'NATBOX_KEYFILE_PATH': '~/priv_keys/keyfile_{}.pem'.format(labname), - 'STX_KEYFILE_SYS_HOME': '~/keyfile_{}.pem'.format(labname), - 'LOG_DIR': logdir, - 'TCLIST_PATH': logdir + '/test_results.log', - 'PYTESTLOG_PATH': logdir + '/pytestlog.log', - 'LAB_NAME': lab['short_name'], - 'TEMP_DIR': logdir + '/tmp_files/', - 'PING_FAILURE_DIR': logdir + '/ping_failures/', - 'GUEST_LOGS_DIR': logdir + '/guest_logs/', - 'PRIMARY_TENANT': tenant, - 'LAB': lab, - 'NATBOX': natbox, - 'COLLECT_ALL': collect_all, - 'ALWAYS_COLLECT': always_collect, - 'HORIZON_VISIBLE': horizon_visible - }) - - @classmethod - def set_var(cls, append=False, **kwargs): - for key, val in kwargs.items(): - if append: - cls.__var_dict[key.upper()].append(val) - else: - cls.__var_dict[key.upper()] = val - - @classmethod - def get_var(cls, var_name): - var_name = var_name.upper() - valid_vars = cls.__var_dict.keys() - if var_name not in valid_vars: - raise ValueError( - "Invalid var_name: {}. Valid vars: {}".format(var_name, - valid_vars)) - - return cls.__var_dict[var_name] diff --git a/automated-pytest-suite/consts/reasons.py b/automated-pytest-suite/consts/reasons.py deleted file mode 100644 index b1d32020..00000000 --- a/automated-pytest-suite/consts/reasons.py +++ /dev/null @@ -1,42 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -class SkipStorageSpace: - SMALL_CINDER_VOLUMES_POOL = "Cinder Volumes Pool is less than 30G" - INSUFFICIENT_IMG_CONV = 'Insufficient image-conversion space to convert ' \ - '{} image to raw format' - - -class SkipStorageBacking: - LESS_THAN_TWO_HOSTS_WITH_BACKING = "Less than two hosts with {} instance " \ - "storage backing exist on system" - NO_HOST_WITH_BACKING = "No host with {} instance storage backing exists " \ - "on system" - - -class SkipHypervisor: - LESS_THAN_TWO_HYPERVISORS = "Less than two hypervisors available" - - -class SkipHyperthreading: - LESS_THAN_TWO_HT_HOSTS = "Less than two hyperthreaded hosts available" - MORE_THAN_ONE_HT_HOSTS = "More than one hyperthreaded hosts available" - - -class SkipHostIf: - PCI_IF_UNAVAIL = "SRIOV or PCI-passthrough interface unavailable" - PCIPT_IF_UNAVAIL = "PCI-passthrough interface unavailable" - SRIOV_IF_UNAVAIL = "SRIOV interface unavailable" - MGMT_INFRA_UNAVAIL = 'traffic control class is not defined in this lab' - - -class SkipSysType: - SMALL_FOOTPRINT = "Skip for small footprint lab" - LESS_THAN_TWO_CONTROLLERS = "Less than two controllers on system" - SIMPLEX_SYSTEM = 'Not applicable to Simplex system' - DUPLEX_SYSTEM = 'Not applicable to Duplex system' - SIMPLEX_ONLY = 'Only applicable to Simplex system' diff --git a/automated-pytest-suite/consts/stx.py b/automated-pytest-suite/consts/stx.py deleted file mode 100755 index f884aaec..00000000 --- a/automated-pytest-suite/consts/stx.py +++ /dev/null @@ -1,683 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from consts.proj_vars import ProjVar - -# output of date. such as: Tue Mar 1 18:20:29 UTC 2016 -DATE_OUTPUT = r'[0-2]\d:[0-5]\d:[0-5]\d\s[A-Z]{3,}\s\d{4}$' - -EXT_IP = '8.8.8.8' - -# such as in string '5 packets transmitted, 0 received, 100% packet loss, -# time 4031ms', number 100 will be found -PING_LOSS_RATE = r'\, (\d{1,3})\% packet loss\,' - -# vshell ping loss rate pattern. 3 packets transmitted, 0 received, 0 total, -# 100.00%% loss -VSHELL_PING_LOSS_RATE = r'\, (\d{1,3}).\d{1,2}[%]% loss' - -# Matches 8-4-4-4-12 hexadecimal digits. Lower case only -UUID = r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}' - -# Match name and uuid. -# Such as: 'ubuntu_14 (a764c205-eb82-4f18-bda6-6c8434223eb5)' -NAME_UUID = r'(.*) \((' + UUID + r')\)' - -# Message to indicate boot from volume from nova show -BOOT_FROM_VOLUME = 'Attempt to boot from volume - no image supplied' - -METADATA_SERVER = '169.254.169.254' - -# Heat template path -HEAT_PATH = 'heat/hot/simple/' -HEAT_SCENARIO_PATH = 'heat/hot/scenarios/' -HEAT_FLAVORS = ['small_ded', 'small_float'] -HEAT_CUSTOM_TEMPLATES = 'custom_heat_templates' - -# special NIC patterns -MELLANOX_DEVICE = 'MT27500|MT27710' -MELLANOX4 = 'MT.*ConnectX-4' - -PLATFORM_AFFINE_INCOMPLETE = '/etc/platform/.task_affining_incomplete' -PLATFORM_CONF_PATH = '/etc/platform/platform.conf' - -SUBCLOUD_PATTERN = 'subcloud' - -PLATFORM_NET_TYPES = ('mgmt', 'oam', 'infra', 'pxeboot') - -TIMEZONES = [ - "Asia/Hong_Kong", # UTC+8 - "America/Los_Angeles", # UTC-8, DST:UTC-7 - "Canada/Eastern", # UTC-5, DST:UTC-4 - "Canada/Central", # UTC-6, DST:UTC-5 - # "Europe/London", # UTC, DST:UTC+1 - "Europe/Berlin", # UTC+1, DST:UTC+2 - "UTC" -] - -STORAGE_AGGREGATE = { - # 'local_lvm' : 'local_storage_lvm_hosts', - 'local_image': 'local_storage_image_hosts', - 'remote': 'remote_storage_hosts', -} - - -class NtpPool: - NTP_POOL_1 = '2.pool.ntp.org,1.pool.ntp.org,0.pool.ntp.org' - NTP_POOL_2 = '1.pool.ntp.org,2.pool.ntp.org,2.pool.ntp.org' - NTP_POOL_3 = '3.ca.pool.ntp.org,2.ca.pool.ntp.org,1.ca.pool.ntp.org' - NTP_POOL_TOO_LONG = '3.ca.pool.ntp.org,2.ca.pool.ntp.org,' \ - '1.ca.pool.ntp.org,1.com,2.com,3.com' - NTP_NAME_TOO_LONG = 'garbage_' * 30 - - -class GuestImages: - TMP_IMG_DIR = '/opt/backups' - DEFAULT = { - 'image_dir': '{}/images'.format(ProjVar.get_var('USER_FILE_DIR')), - 'image_dir_file_server': '/sandbox/images', - 'guest': 'tis-centos-guest' - } - TIS_GUEST_PATTERN = 'cgcs-guest|tis-centos-guest' - GUESTS_NO_RM = ['ubuntu_14', 'tis-centos-guest', 'cgcs-guest'] - # Image files name and size from TestFileServer - # : , , - # , , - IMAGE_FILES = { - 'ubuntu_14': ( - 'ubuntu-14.04-server-cloudimg-amd64-disk1.img', 3, - 'ubuntu_14.qcow2', 'qcow2', 'bare'), - 'ubuntu_12': ( - 'ubuntu-12.04-server-cloudimg-amd64-disk1.img', 8, - 'ubuntu_12.qcow2', 'qcow2', 'bare'), - 'ubuntu_16': ( - 'ubuntu-16.04-xenial-server-cloudimg-amd64-disk1.img', 8, - 'ubuntu_16.qcow2', 'qcow2', 'bare'), - 'centos_6': ( - 'CentOS-6.8-x86_64-GenericCloud-1608.qcow2', 8, - 'centos_6.qcow2', 'qcow2', 'bare'), - 'centos_7': ( - 'CentOS-7-x86_64-GenericCloud.qcow2', 8, - 'centos_7.qcow2', 'qcow2', 'bare'), - 'rhel_6': ( - 'rhel-6.5-x86_64.qcow2', 11, 'rhel_6.qcow2', 'qcow2', 'bare'), - 'rhel_7': ( - 'rhel-7.2-x86_64.qcow2', 11, 'rhel_7.qcow2', 'qcow2', 'bare'), - 'opensuse_11': ( - 'openSUSE-11.3-x86_64.qcow2', 11, - 'opensuse_11.qcow2', 'qcow2', 'bare'), - 'opensuse_12': ( - 'openSUSE-12.3-x86_64.qcow2', 21, - 'opensuse_12.qcow2', 'qcow2', 'bare'), - 'opensuse_13': ( - 'openSUSE-13.2-OpenStack-Guest.x86_64-0.0.10-Build2.94.qcow2', 16, - 'opensuse_13.qcow2', 'qcow2', 'bare'), - 'win_2012': ( - 'win2012r2_cygwin_compressed.qcow2', 13, - 'win2012r2.qcow2', 'qcow2', 'bare'), - 'win_2016': ( - 'win2016_cygwin_compressed.qcow2', 29, - 'win2016.qcow2', 'qcow2', 'bare'), - 'ge_edge': ( - 'edgeOS.hddirect.qcow2', 5, - 'ge_edge.qcow2', 'qcow2', 'bare'), - 'cgcs-guest': ( - 'cgcs-guest.img', 1, 'cgcs-guest.img', 'raw', 'bare'), - 'vxworks': ( - 'vxworks-tis.img', 1, 'vxworks.img', 'raw', 'bare'), - 'tis-centos-guest': ( - None, 2, 'tis-centos-guest.img', 'raw', 'bare'), - 'tis-centos-guest-rt': ( - None, 2, 'tis-centos-guest-rt.img', 'raw', 'bare'), - 'tis-centos-guest-qcow2': ( - None, 2, 'tis-centos-guest.qcow2', 'qcow2', 'bare'), - 'centos_gpu': ( - 'centos-67-cloud-gpu.img', 8, - 'centos_6_gpu.qcow2', 'qcow2', 'bare'), - 'debian-8-m-agent': ( - 'debian-8-m-agent.qcow2', 1.8, - 'debian-8-m-agent.qcow2', 'qcow2', 'bare'), - 'trusty_uefi': ( - 'trusty-server-cloudimg-amd64-uefi1.img', 2.2, - 'trusty-uefi.qcow2', 'qcow2', 'bare'), - 'uefi_shell': ( - 'uefi_shell.iso', 2, 'uefi_shell.iso', 'raw', 'bare'), - } - - -class Networks: - INFRA_NETWORK_CIDR = "192.168.205.0/24" - IPV4_IP = r'\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}' - - __NEUTRON_NET_NAME_PATTERN = { - 'mgmt': r'tenant\d-mgmt-net', - 'data': r'tenant\d-net', - 'internal': 'internal', - 'external': 'external', - } - __NEUTRON_NET_IP_PATTERN = { - 'data': r'172.\d{1,3}.\d{1,3}.\d{1,3}', - 'mgmt': r'192.168.\d{3}\.\d{1,3}|192.168.[8|9]\d\.\d{1,3}', - 'internal': r'10.\d{1,3}.\d{1,3}.\d{1,3}', - 'external': r'192.168.\d\.\d{1,3}|192.168.[1-5]\d\.\d{1,3}|10.10.\d{' - r'1,3}\.\d{1,3}' - } - - @classmethod - def get_nenutron_net_patterns(cls, net_type='mgmt'): - return cls.__NEUTRON_NET_NAME_PATTERN.get( - net_type), cls.__NEUTRON_NET_IP_PATTERN.get(net_type) - - @classmethod - def set_neutron_net_patterns(cls, net_type, net_name_pattern=None, - net_ip_pattern=None): - if net_type not in cls.__NEUTRON_NET_NAME_PATTERN: - raise ValueError("Unknown net_type {}. Select from: {}".format( - net_type, list(cls.__NEUTRON_NET_NAME_PATTERN.keys()))) - - if net_name_pattern is not None: - cls.__NEUTRON_NET_NAME_PATTERN[net_type] = net_name_pattern - if net_ip_pattern is not None: - cls.__NEUTRON_NET_IP_PATTERN[net_type] = net_ip_pattern - - -class SystemType: - CPE = 'All-in-one' - STANDARD = 'Standard' - - -class StorageAggregate: - LOCAL_LVM = 'local_storage_lvm_hosts' - LOCAL_IMAGE = 'local_storage_image_hosts' - REMOTE = 'remote_storage_hosts' - - -class VMStatus: - # under http://docs.openstack.org/developer/nova/vmstates.html - ACTIVE = 'ACTIVE' - BUILD = 'BUILDING' - REBUILD = 'REBUILD' - VERIFY_RESIZE = 'VERIFY_RESIZE' - RESIZE = 'RESIZED' - ERROR = 'ERROR' - SUSPENDED = 'SUSPENDED' - PAUSED = 'PAUSED' - NO_STATE = 'NO STATE' - HARD_REBOOT = 'HARD REBOOT' - SOFT_REBOOT = 'REBOOT' - STOPPED = "SHUTOFF" - MIGRATING = 'MIGRATING' - - -class ImageStatus: - QUEUED = 'queued' - ACTIVE = 'active' - SAVING = 'saving' - - -class HostAdminState: - UNLOCKED = 'unlocked' - LOCKED = 'locked' - - -class HostOperState: - ENABLED = 'enabled' - DISABLED = 'disabled' - - -class HostAvailState: - DEGRADED = 'degraded' - OFFLINE = 'offline' - ONLINE = 'online' - AVAILABLE = 'available' - FAILED = 'failed' - POWER_OFF = 'power-off' - - -class HostTask: - BOOTING = 'Booting' - REBOOTING = 'Rebooting' - POWERING_ON = 'Powering-on' - POWER_CYCLE = 'Critical Event Power-Cycle' - POWER_DOWN = 'Critical Event Power-Down' - - -class Prompt: - CONTROLLER_0 = r'.*controller\-0[:| ].*\$' - CONTROLLER_1 = r'.*controller\-1[:| ].*\$' - CONTROLLER_PROMPT = r'.*controller\-[01][:| ].*\$ ' - - VXWORKS_PROMPT = '-> ' - - ADMIN_PROMPT = r'\[.*@controller\-[01].*\(keystone_admin\)\]\$' - TENANT1_PROMPT = r'\[.*@controller\-[01] .*\(keystone_tenant1\)\]\$ ' - TENANT2_PROMPT = r'\[.*@controller\-[01] .*\(keystone_tenant2\)\]\$ ' - TENANT_PROMPT = r'\[.*@controller\-[01] .*\(keystone_{}\)\]\$ ' # - # general prompt. Need to fill in tenant name - REMOTE_CLI_PROMPT = r'\(keystone_{}\)\]\$ ' # remote cli prompt - - COMPUTE_PROMPT = r'.*compute\-([0-9]){1,}\:~\$' - STORAGE_PROMPT = r'.*storage\-([0-9]){1,}\:~\$' - PASSWORD_PROMPT = r'.*assword\:[ ]?$|assword for .*:[ ]?$' - LOGIN_PROMPT = "ogin:" - SUDO_PASSWORD_PROMPT = 'Password: ' - BUILD_SERVER_PROMPT_BASE = r'{}@{}\:~.*' - TEST_SERVER_PROMPT_BASE = r'\[{}@.*\]\$ ' - # TIS_NODE_PROMPT_BASE = r'{}\:~\$ ' - TIS_NODE_PROMPT_BASE = r'{}[: ]?~.*$' - ADD_HOST = r'.*\(yes/no\).*' - ROOT_PROMPT = '.*root@.*' - Y_N_PROMPT = r'.*\(y/n\)\?.*' - YES_N_PROMPT = r'.*\[yes/N\]\: ?' - CONFIRM_PROMPT = '.*confirm: ?' - - -class NovaCLIOutput: - VM_ACTION_ACCEPTED = "Request to {} server (.*) has been accepted." - VM_START_ACCEPTED = "Request to start server (.*) has been accepted." - VM_STOP_ACCEPTED = "Request to stop server (.*) has been accepted." - VM_DELETE_REJECTED_NOT_EXIST = "No server with a name or ID of '(.*)' " \ - "exists." - VM_DELETE_ACCEPTED = "Request to delete server (.*) has been accepted." - VM_BOOT_REJECT_MEM_PAGE_SIZE_FORBIDDEN = "Page size .* forbidden against .*" - SRV_GRP_DEL_REJ_NOT_EXIST = "Delete for server group (.*) failed" - SRV_GRP_DEL_SUCC = "Server group (.*) has been successfully deleted." - - -class FlavorSpec: - CPU_POLICY = 'hw:cpu_policy' - VCPU_MODEL = 'hw:cpu_model' - SHARED_VCPU = 'hw:wrs:shared_vcpu' - CPU_THREAD_POLICY = 'hw:cpu_thread_policy' - VCPU_SCHEDULER = 'hw:wrs:vcpu:scheduler' - MIN_VCPUS = "hw:wrs:min_vcpus" - STORAGE_BACKING = 'aggregate_instance_extra_specs:stx_storage' - DISK_READ_BYTES = 'quota:disk_read_bytes_sec' - DISK_READ_IOPS = 'quota:disk_read_iops_sec' - DISK_WRITE_BYTES = 'quota:disk_write_bytes_sec' - DISK_WRITE_IOPS = 'quota:disk_write_iops_sec' - DISK_TOTAL_BYTES = 'quota:disk_total_bytes_sec' - DISK_TOTAL_IOPS = 'quota:disk_total_iops_sec' - NUMA_NODES = 'hw:numa_nodes' - NUMA_0 = 'hw:numa_node.0' - NUMA_1 = 'hw:numa_node.1' - NUMA0_CPUS = 'hw:numa_cpus.0' - NUMA1_CPUS = 'hw:numa_cpus.1' - NUMA0_MEM = 'hw:numa_mem.0' - NUMA1_MEM = 'hw:numa_mem.1' - VSWITCH_NUMA_AFFINITY = 'hw:wrs:vswitch_numa_affinity' - MEM_PAGE_SIZE = 'hw:mem_page_size' - AUTO_RECOVERY = 'sw:wrs:auto_recovery' - GUEST_HEARTBEAT = 'sw:wrs:guest:heartbeat' - SRV_GRP_MSG = "sw:wrs:srv_grp_messaging" - NIC_ISOLATION = "hw:wrs:nic_isolation" - PCI_NUMA_AFFINITY = "hw:pci_numa_affinity_policy" - PCI_PASSTHROUGH_ALIAS = "pci_passthrough:alias" - PCI_IRQ_AFFINITY_MASK = "hw:pci_irq_affinity_mask" - CPU_REALTIME = 'hw:cpu_realtime' - CPU_REALTIME_MASK = 'hw:cpu_realtime_mask' - HPET_TIMER = 'sw:wrs:guest:hpet' - NESTED_VMX = 'hw:wrs:nested_vmx' - NUMA0_CACHE_CPUS = 'hw:cache_vcpus.0' - NUMA1_CACHE_CPUS = 'hw:cache_vcpus.1' - NUMA0_L3_CACHE = 'hw:cache_l3.0' - NUMA1_L3_CACHE = 'hw:cache_l3.1' - LIVE_MIG_TIME_OUT = 'hw:wrs:live_migration_timeout' - LIVE_MIG_MAX_DOWNTIME = 'hw:wrs:live_migration_max_downtime' - - -class ImageMetadata: - MEM_PAGE_SIZE = 'hw_mem_page_size' - AUTO_RECOVERY = 'sw_wrs_auto_recovery' - VIF_MODEL = 'hw_vif_model' - CPU_THREAD_POLICY = 'hw_cpu_thread_policy' - CPU_POLICY = 'hw_cpu_policy' - CPU_RT_MASK = 'hw_cpu_realtime_mask' - CPU_RT = 'hw_cpu_realtime' - CPU_MODEL = 'hw_cpu_model' - FIRMWARE_TYPE = 'hw_firmware_type' - - -class VMMetaData: - EVACUATION_PRIORITY = 'sw:wrs:recovery_priority' - - -class InstanceTopology: - NODE = r'node:(\d),' - PGSIZE = r'pgsize:(\d{1,3}),' - VCPUS = r'vcpus:(\d{1,2}),' - PCPUS = r'pcpus:(\d{1,2}),\s' # find a string separated by ', - # ' if multiple numa nodes - CPU_POLICY = 'pol:(.*),' - SIBLINGS = 'siblings:(.*),' - THREAD_POLICY = 'thr:(.*)$|thr:(.*),' - TOPOLOGY = r'\d{1,2}s,\d{1,2}c,\d{1,2}t' - - -class RouterStatus: - ACTIVE = 'ACTIVE' - DOWN = 'DOWN' - - -class EventLogID: - PATCH_INSTALL_FAIL = '900.002' - PATCH_IN_PROGRESS = '900.001' - CINDER_IO_CONGEST = '800.101' - STORAGE_LOR = '800.011' - STORAGE_POOLQUOTA = '800.003' - STORAGE_ALARM_COND = '800.001' - HEARTBEAT_CHECK_FAILED = '700.215' - HEARTBEAT_ENABLED = '700.211' - REBOOT_VM_COMPLETE = '700.186' - REBOOT_VM_INPROGRESS = '700.182' - REBOOT_VM_ISSUED = '700.181' # soft-reboot or hard-reboot in reason text - VM_DELETED = '700.114' - VM_DELETING = '700.110' - VM_CREATED = '700.108' - MULTI_NODE_RECOVERY = '700.016' - HEARTBEAT_DISABLED = '700.015' - VM_REBOOTING = '700.005' - VM_FAILED = '700.001' - IMA = '500.500' - SERVICE_GROUP_STATE_CHANGE = '401.001' - LOSS_OF_REDUNDANCY = '400.002' - CON_DRBD_SYNC = '400.001' - PROVIDER_NETWORK_FAILURE = '300.005' - NETWORK_AGENT_NOT_RESPOND = '300.003' - CONFIG_OUT_OF_DATE = '250.001' - INFRA_NET_FAIL = '200.009' - BMC_SENSOR_ACTION = '200.007' - STORAGE_DEGRADE = '200.006' - # 200.004 compute-0 experienced a service-affecting failure. - # Auto-recovery in progress. - # host=compute-0 critical April 7, 2017, 2:34 p.m. - HOST_RECOVERY_IN_PROGRESS = '200.004' - HOST_LOCK = '200.001' - NTP_ALARM = '100.114' - INFRA_PORT_FAIL = '100.110' - FS_THRESHOLD_EXCEEDED = '100.104' - CPU_USAGE_HIGH = '100.101' - MNFA_MODE = '200.020' - - -class NetworkingVmMapping: - VSWITCH = { - 'vif': 'avp', - 'flavor': 'medium.dpdk', - } - AVP = { - 'vif': 'avp', - 'flavor': 'small', - } - VIRTIO = { - 'vif': 'avp', - 'flavor': 'small', - } - - -class VifMapping: - VIF_MAP = {'vswitch': 'DPDKAPPS', - 'avp': 'AVPAPPS', - 'virtio': 'VIRTIOAPPS', - 'vhost': 'VHOSTAPPS', - 'sriov': 'SRIOVAPPS', - 'pcipt': 'PCIPTAPPS' - } - - -class LocalStorage: - DIR_PROFILE = 'storage_profiles' - TYPE_STORAGE_PROFILE = ['storageProfile', 'localstorageProfile'] - - -class VMNetwork: - NET_IF = r"auto {}\niface {} inet dhcp\n" - IFCFG_DHCP = """ -DEVICE={} -BOOTPROTO=dhcp -ONBOOT=yes -TYPE=Ethernet -USERCTL=yes -PEERDNS=yes -IPV6INIT={} -PERSISTENT_DHCLIENT=1 -""" - - IFCFG_STATIC = """ -DEVICE={} -BOOTPROTO=static -ONBOOT=yes -TYPE=Ethernet -USERCTL=yes -PEERDNS=yes -IPV6INIT={} -PERSISTENT_DHCLIENT=1 -IPADDR={} -""" - - -class HTTPPort: - NEUTRON_PORT = 9696 - NEUTRON_VER = "v2.0" - CEIL_PORT = 8777 - CEIL_VER = "v2" - GNOCCHI_PORT = 8041 - GNOCCHI_VER = 'v1' - SYS_PORT = 6385 - SYS_VER = "v1" - CINDER_PORT = 8776 - CINDER_VER = "v3" # v1 and v2 are also supported - GLANCE_PORT = 9292 - GLANCE_VER = "v2" - HEAT_PORT = 8004 - HEAT_VER = "v1" - HEAT_CFN_PORT = 8000 - HEAT_CFN_VER = "v1" - NOVA_PORT = 8774 - NOVA_VER = "v2.1" # v3 also supported - NOVA_EC2_PORT = 8773 - NOVA_EC2_VER = "v2" - PATCHING_PORT = 15491 - PATCHING_VER = "v1" - - -class QoSSpec: - READ_BYTES = 'read_bytes_sec' - WRITE_BYTES = 'write_bytes_sec' - TOTAL_BYTES = 'total_bytes_sec' - READ_IOPS = 'read_iops_sec' - WRITE_IOPS = 'write_iops_sec' - TOTAL_IOPS = 'total_iops_sec' - - -class DevClassID: - QAT_VF = '0b4000' - GPU = '030000' - USB = '0c0320|0c0330' - - -class MaxVmsSupported: - SX = 10 - XEON_D = 4 - DX = 10 - VBOX = 2 - - -class CpuModel: - CPU_MODELS = ( - 'Skylake-Server', 'Skylake-Client', - 'Broadwell', 'Broadwell-noTSX', - 'Haswell-noTSX-IBRS', 'Haswell', - 'IvyBridge', 'SandyBridge', - 'Westmere', 'Nehalem', 'Penryn', 'Conroe') - - -class BackendState: - CONFIGURED = 'configured' - CONFIGURING = 'configuring' - - -class BackendTask: - RECONFIG_CONTROLLER = 'reconfig-controller' - APPLY_MANIFEST = 'applying-manifests' - - -class PartitionStatus: - READY = 'Ready' - MODIFYING = 'Modifying' - DELETING = 'Deleting' - CREATING = 'Creating' - IN_USE = 'In-Use' - - -class SysType: - AIO_DX = 'AIO-DX' - AIO_SX = 'AIO-SX' - STORAGE = 'Storage' - REGULAR = 'Regular' - MULTI_REGION = 'Multi-Region' - DISTRIBUTED_CLOUD = 'Distributed_Cloud' - - -class HeatStackStatus: - CREATE_FAILED = 'CREATE_FAILED' - CREATE_COMPLETE = 'CREATE_COMPLETE' - UPDATE_COMPLETE = 'UPDATE_COMPLETE' - UPDATE_FAILED = 'UPDATE_FAILED' - DELETE_FAILED = 'DELETE_FAILED' - - -class VimEventID: - LIVE_MIG_BEGIN = 'instance-live-migrate-begin' - LIVE_MIG_END = 'instance-live-migrated' - COLD_MIG_BEGIN = 'instance-cold-migrate-begin' - COLD_MIG_END = 'instance-cold-migrated' - COLD_MIG_CONFIRM_BEGIN = 'instance-cold-migrate-confirm-begin' - COLD_MIG_CONFIRMED = 'instance-cold-migrate-confirmed' - - -class MigStatus: - COMPLETED = 'completed' - RUNNING = 'running' - PREPARING = 'preparing' - PRE_MIG = 'pre-migrating' - POST_MIG = 'post-migrating' - - -class TrafficControl: - CLASSES = {'1:40': 'default', '1:1': 'root', '1:10': 'hiprio', - '1:20': 'storage', '1:30': 'migration', - '1:50': 'drbd'} - - RATE_PATTERN_ROOT = r'class htb 1:1 root rate (\d+)([GMK])bit ceil (\d+)(' \ - r'[GMK])bit burst \d+b cburst \d+b' - RATE_PATTERN = r'class htb (1:\d+) parent 1:1 leaf \d+: prio \d+ rate (' \ - r'\d+)([GMK])bit ceil (\d+)([GMK])bit ' \ - r'burst \d+b cburst \d+b' - - # no infra - MGMT_NO_INFRA = { - 'config': 'no infra', - 'root': (1, 1), - 'default': (0.1, 0.2), - 'hiprio': (0.1, 0.2), - 'storage': (0.5, 1), - 'migration': (0.3, 1), - 'drbd': (0.8, 1)} - - # infra must be sep - MGMT_SEP = { - 'config': 'separate mgmt', - 'root': (1, 1), - 'default': (0.1, 1), - 'hiprio': (0.1, 1)} - - # infra could be sep or over pxe - MGMT_USES_PXE = { - 'config': 'mgmt consolidated over pxeboot', - 'root': (1, 1), - 'default': (0.1, 0.2), - 'hiprio': (0.1, 0.2)} - - # infra over mgmt - MGMT_USED_BY_INFRA = { - 'config': 'infra consolidated over mgmt', - 'root': (1, 1), - 'default': (0.1, 0.2), - 'hiprio': (0.1, 0.2), - 'storage': (0.5, 1), - 'migration': (0.3, 1), - 'drbd': (0.8, 1)} - - # infra over mgmt - INFRA_USES_MGMT = { - 'config': 'infra consolidated over mgmt', - 'root': (0.99, 0.99), - 'default': (0.99 * 0.1, 0.99 * 0.2), - 'hiprio': (0.99 * 0.1, 0.99 * 0.2), - 'storage': (0.99 * 0.5, 0.99 * 1), - 'migration': (0.99 * 0.3, 0.99 * 1), - 'drbd': (0.99 * 0.8, 0.99 * 1)} - - # mgmt could be sep or over pxe - INFRA_SEP = { - 'config': 'separate infra', - 'root': (1, 1), - 'default': (0.1, 0.2), - 'hiprio': (0.1, 0.2), - 'storage': (0.5, 1), - 'migration': (0.3, 1), - 'drbd': (0.8, 1)} - - # mgmt must be over pxe - INFRA_USES_PXE = { - 'config': 'infra and mgmt consolidated over pxeboot', - 'root': (1, 1), - 'default': (0.99 * 0.1, 0.99 * 0.2), # 0.1, 0.2 is the ratio for mgmt - 'hiprio': (0.99 * 0.1, 0.99 * 0.2), # 0.1, 0.2 is the ratio for mgmt - 'storage': (0.99 * 0.5, 0.99), - 'migration': (0.99 * 0.3, 0.99), - 'drbd': (0.99 * 0.8, 0.99)} - - -class SubcloudStatus: - AVAIL_ONLINE = "online" - AVAIL_OFFLINE = "offline" - MGMT_MANAGED = "managed" - MGMT_UNMANAGED = "unmanaged" - SYNCED = 'in-sync' - UNSYNCED = 'out-of-sync' - - -class PodStatus: - RUNNING = 'Running' - COMPLETED = 'Completed' - CRASH = 'CrashLoopBackOff' - POD_INIT = 'PodInitializing' - INIT = 'Init:0/1' - PENDING = 'Pending' - TERMINATING = 'Terminating' - - -class AppStatus: - UPLOADING = 'uploading' - UPLOADED = 'uploaded' - UPLOAD_FAILED = 'upload-failed' - APPLIED = 'applied' - APPLY_FAILED = 'apply-failed' - REMOVE_FAILED = 'remove-failed' - DELETE_FAILED = 'delete-failed' - - -class VSwitchType: - OVS_DPDK = 'ovs-dpdk' - AVS = 'avs' - NONE = 'none' - - -class Container: - LOCAL_DOCKER_REG = 'registry.local:9001' diff --git a/automated-pytest-suite/consts/timeout.py b/automated-pytest-suite/consts/timeout.py deleted file mode 100644 index d6de122a..00000000 --- a/automated-pytest-suite/consts/timeout.py +++ /dev/null @@ -1,160 +0,0 @@ -# -# Copyright (c) 2019, 2020 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -CLI_TIMEOUT = 600 - - -class HostTimeout: - # Host in online state after locked - ONLINE_AFTER_LOCK = 1200 - # Compute host reaches enabled/available state after system host-unlock - # returned - COMPUTE_UNLOCK = 840 - # Host reaches enabled/available state after system host-unlock returned - CONTROLLER_UNLOCK = 1360 - # Host reaches enabled/available state after sudo reboot -f from host - REBOOT = 2400 - # Active controller switched and being able to run openstack CLI after - # system host-swact returned - SWACT = 600 - # Host in locked state after system host-lock cli returned - LOCK = 900 - # Task clears in system host-show after host reaches enabled/available state - TASK_CLEAR = 600 - # Host in offline or failed state via system host-show after sudo reboot - # -f returned - FAIL_AFTER_REBOOT = 120 - # Hypervsior in enabled/up state after host in available state and task - # clears - HYPERVISOR_UP = 300 - # Web service up in sudo sm-dump after host in available state and task - # clears - WEB_SERVICE_UP = 180 - PING_TIMEOUT = 60 - TIMEOUT_BUFFER = 2 - # subfunction go enabled/available after host admin/avail states go - # enabled/available - SUBFUNC_READY = 300 - SYSTEM_RESTORE = 3600 # System restore complete - SYSTEM_BACKUP = 1800 # system backup complete - BACKUP_COPY_USB = 600 - INSTALL_CLONE = 3600 - INSTALL_CLONE_STATUS = 60 - INSTALL_CONTROLLER = 2400 - INSTALL_LOAD = 3600 - POST_INSTALL_SCRIPTS = 3600 - CONFIG_CONTROLLER_TIMEOUT = 1800 - CEPH_MON_ADD_CONFIG = 300 - NODES_STATUS_READY = 7200 - - -class InstallTimeout: - # Host reaches enabled/available state after system host-unlock returned - CONTROLLER_UNLOCK = 9000 - CONFIG_CONTROLLER_TIMEOUT = 1800 - # REBOOT = 2000 # Host reaches enabled/available state after sudo - # reboot -f from host - UPGRADE = 7200 - WIPE_DISK_TIMEOUT = 30 - SYSTEM_RESTORE = 3600 # System restore complete - SYSTEM_BACKUP = 1800 # system backup complete - BACKUP_COPY_USB = 600 - INSTALL_CLONE = 3600 - INSTALL_CLONE_STATUS = 60 - INSTALL_CONTROLLER = 2400 - INSTALL_LOAD = 3600 - POST_INSTALL_SCRIPTS = 3600 - - -class VMTimeout: - STATUS_CHANGE = 300 - STATUS_VERIFY_RESIZE = 30 - LIVE_MIGRATE_COMPLETE = 240 - COLD_MIGRATE_CONFIRM = 600 - BOOT_VM = 1800 - DELETE = 180 - VOL_ATTACH = 60 - SSH_LOGIN = 90 - AUTO_RECOVERY = 600 - REBOOT = 180 - PAUSE = 180 - IF_ADD = 30 - REBUILD = 300 - DHCP_IP_ASSIGN = 30 - DHCP_RETRY = 500 - PING_VM = 200 - - -class VolumeTimeout: - STATUS_CHANGE = 2700 # Windows guest takes a long time - DELETE = 90 - - -class SysInvTimeout: - RETENTION_PERIOD_SAVED = 30 - RETENTION_PERIOD_MODIFY = 60 - DNS_SERVERS_SAVED = 30 - DNS_MODIFY = 60 - PARTITION_CREATE = 120 - PARTITION_DELETE = 120 - PARTITION_MODIFY = 120 - - -class CMDTimeout: - HOST_CPU_MODIFY = 600 - RESOURCE_LIST = 60 - REBOOT_VM = 60 - CPU_PROFILE_APPLY = 30 - - -class ImageTimeout: - CREATE = 1800 - STATUS_CHANGE = 60 - DELETE = 120 - - -class EventLogTimeout: - HEARTBEAT_ESTABLISH = 300 - HEALTH_CHECK_FAIL = 60 - VM_REBOOT = 60 - NET_AGENT_NOT_RESPOND_CLEAR = 120 - - -class MTCTimeout: - KILL_PROCESS_HOST_CHANGE_STATUS = 40 - KILL_PROCESS_HOST_KEEP_STATUS = 20 - KILL_PROCESS_SWACT_NOT_START = 20 - KILL_PROCESS_SWACT_START = 40 - KILL_PROCESS_SWACT_COMPLETE = 40 - - -class CeilTimeout: - EXPIRE = 300 - - -class OrchestrationPhaseTimeout: - INITIAL = 20 - BUILD = 60 - ABORT = 7200 - APPLY = 86400 - - -class DCTimeout: - SYNC = 3600 # 60 minutes - SUBCLOUD_AUDIT = 600 # 4 minutes + 1 - PATCH_AUDIT = 240 # 3 minutes + 1 - - -class MiscTimeout: - # timeout for two audits. 'sudo ntpq' got pulled every 10 minutes in - # /var/log/user.log - NTPQ_UPDATE = 1260 - - -class K8sTimeout: - APP_UPLOAD = 300 - APP_APPLY = 600 diff --git a/automated-pytest-suite/consts/ubuntu_if_config.sh b/automated-pytest-suite/consts/ubuntu_if_config.sh deleted file mode 100644 index 22911e47..00000000 --- a/automated-pytest-suite/consts/ubuntu_if_config.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -# Ubuntu cloud-init user data script to be executed after ubuntu vm -# initialization - -sudo echo -e "auto eth1\niface eth1 inet dhcp\n\nauto eth2\niface eth2 inet dhcp" >> "/etc/network/interfaces" -sudo ifup eth1 -sudo ifup eth2 - -ip addr \ No newline at end of file diff --git a/automated-pytest-suite/keywords/ceilometer_helper.py b/automated-pytest-suite/keywords/ceilometer_helper.py deleted file mode 100644 index b84975cb..00000000 --- a/automated-pytest-suite/keywords/ceilometer_helper.py +++ /dev/null @@ -1,67 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from consts.auth import Tenant -from utils import table_parser, cli -from utils.clients.ssh import ControllerClient -from utils.tis_log import LOG - - -def get_alarms(header='alarm_id', name=None, strict=False, - auth_info=Tenant.get('admin'), con_ssh=None): - """ - - Args: - header - name: - strict: - auth_info: - con_ssh: - - Returns: - - """ - - table_ = table_parser.table(cli.openstack('alarm list', - ssh_client=con_ssh, - auth_info=auth_info)[1], - combine_multiline_entry=True) - if name is None: - return table_parser.get_column(table_, header) - - return table_parser.get_values(table_, header, Name=name, strict=strict) - - -def get_events(event_type, limit=None, header='message_id', con_ssh=None, - auth_info=None, **filters): - """ - - Args: - event_type: - limit - header: - con_ssh: - auth_info: - - Returns: - - """ - args = '' - if limit: - args = '--limit {}'.format(limit) - - if event_type or filters: - if event_type: - filters['event_type'] = event_type - - extra_args = ['{}={}'.format(k, v) for k, v in filters.items()] - args += ' --filter {}'.format(';'.join(extra_args)) - - table_ = table_parser.table(cli.openstack('event list', args, - ssh_client=con_ssh, - auth_info=auth_info)[1]) - return table_parser.get_values(table_, header) diff --git a/automated-pytest-suite/keywords/check_helper.py b/automated-pytest-suite/keywords/check_helper.py deleted file mode 100644 index 932b6f1f..00000000 --- a/automated-pytest-suite/keywords/check_helper.py +++ /dev/null @@ -1,635 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -############################################################### -# Intended for check functions for test result verifications -# assert is used to fail the check -# LOG.tc_step is used log the info -# Should be called by test function directly -############################################################### -import re -import time -import copy - -from utils.tis_log import LOG -from utils.rest import Rest -from consts.auth import Tenant -from consts.stx import GuestImages, EventLogID -from keywords import host_helper, system_helper, vm_helper, common, \ - glance_helper, storage_helper - -SEP = '\n------------------------------------ ' - - -def check_topology_of_vm(vm_id, vcpus, prev_total_cpus=None, numa_num=None, - vm_host=None, cpu_pol=None, - cpu_thr_pol=None, expt_increase=None, min_vcpus=None, - current_vcpus=None, - prev_siblings=None, shared_vcpu=None, con_ssh=None, - guest=None): - """ - Check vm has the correct topology based on the number of vcpus, - cpu policy, cpu threads policy, number of numa nodes - - Check is done via vm-topology, nova host-describe, virsh vcpupin (on vm - host), nova-compute.log (on vm host), - /sys/devices/system/cpu//topology/thread_siblings_list (on vm) - - Args: - vm_id (str): - vcpus (int): number of vcpus specified in flavor - prev_total_cpus (float): such as 37.0000, 37.0625 - numa_num (int): number of numa nodes vm vcpus are on. Default is 1 if - unset in flavor. - vm_host (str): - cpu_pol (str): dedicated or shared - cpu_thr_pol (str): isolate, require, or prefer - expt_increase (int): expected total vcpu increase on vm host compared - to prev_total_cpus - min_vcpus (None|int): min vcpu flavor spec. vcpu scaling specific - current_vcpus (None|int): current number of vcpus. vcpu scaling specific - prev_siblings (list): list of siblings total. Usually used when - checking vm topology after live migration - con_ssh (SSHClient) - shared_vcpu (int): which vcpu is shared - guest (str|None): guest os. e.g., ubuntu_14. Default guest is assumed - when None. - - """ - LOG.info( - "------ Check topology of vm {} on controller, hypervisor and " - "vm".format( - vm_id)) - cpu_pol = cpu_pol if cpu_pol else 'shared' - - if vm_host is None: - vm_host = vm_helper.get_vm_host(vm_id, con_ssh=con_ssh) - - log_cores_siblings = host_helper.get_logcore_siblings(host=vm_host, - con_ssh=con_ssh) - - if prev_total_cpus is not None: - if expt_increase is None: - expt_increase = vcpus - - LOG.info( - "{}Check total vcpus for vm host is increased by {} via " - "'openstack hypervisor show'".format( - SEP, expt_increase)) - expt_used_vcpus = prev_total_cpus + expt_increase - end_time = time.time() + 70 - while time.time() < end_time: - post_hosts_cpus = host_helper.get_vcpus_for_computes( - hosts=vm_host, field='vcpus_used') - if expt_used_vcpus == post_hosts_cpus[vm_host]: - break - time.sleep(10) - else: - post_hosts_cpus = host_helper.get_vcpus_for_computes( - hosts=vm_host, field='used_now') - assert expt_used_vcpus == post_hosts_cpus[ - vm_host], "Used vcpus on host {} is not as expected. " \ - "Expected: {}; Actual: {}".format(vm_host, - expt_used_vcpus, - post_hosts_cpus[ - vm_host]) - - LOG.info( - "{}Check vm vcpus, pcpus on vm host via nova-compute.log and virsh " - "vcpupin".format(SEP)) - # Note: floating vm pcpus will not be checked via virsh vcpupin - vm_host_cpus, vm_siblings = _check_vm_topology_on_host( - vm_id, vcpus=vcpus, vm_host=vm_host, cpu_pol=cpu_pol, - cpu_thr_pol=cpu_thr_pol, - host_log_core_siblings=log_cores_siblings, - shared_vcpu=shared_vcpu) - - LOG.info( - "{}Check vm vcpus, siblings on vm via " - "/sys/devices/system/cpu//topology/thread_siblings_list". - format(SEP)) - check_sibling = True if shared_vcpu is None else False - _check_vm_topology_on_vm(vm_id, vcpus=vcpus, siblings_total=vm_siblings, - current_vcpus=current_vcpus, - prev_siblings=prev_siblings, guest=guest, - check_sibling=check_sibling) - - return vm_host_cpus, vm_siblings - - -def _check_vm_topology_on_host(vm_id, vcpus, vm_host, cpu_pol, cpu_thr_pol, - host_log_core_siblings=None, shared_vcpu=None, - shared_host_cpus=None): - """ - - Args: - vm_id (str): - vcpus (int): - vm_host (str): - cpu_pol (str): - cpu_thr_pol (str): - host_log_core_siblings (list|None): - shared_vcpu (int|None): - shared_host_cpus (None|list) - - Returns: None - - """ - if not host_log_core_siblings: - host_log_core_siblings = host_helper.get_logcore_siblings(host=vm_host) - - if shared_vcpu and not shared_host_cpus: - shared_cpus_ = host_helper.get_host_cpu_cores_for_function( - func='Shared', hostname=vm_host, thread=None) - shared_host_cpus = [] - for proc, shared_cores in shared_cpus_.items(): - shared_host_cpus += shared_cores - - LOG.info( - '======= Check vm topology from vm_host via: virsh vcpupin, taskset') - instance_name = vm_helper.get_vm_instance_name(vm_id) - - with host_helper.ssh_to_host(vm_host) as host_ssh: - vcpu_cpu_map = vm_helper.get_vcpu_cpu_map(host_ssh=host_ssh) - used_host_cpus = [] - vm_host_cpus = [] - vcpus_list = list(range(vcpus)) - for instance_name_, instance_map in vcpu_cpu_map.items(): - used_host_cpus += list(instance_map.values()) - if instance_name_ == instance_name: - for vcpu in vcpus_list: - vm_host_cpus.append(instance_map[vcpu]) - used_host_cpus = list(set(used_host_cpus)) - vm_siblings = None - # Check vm sibling pairs - if 'ded' in cpu_pol and cpu_thr_pol in ('isolate', 'require'): - if len(host_log_core_siblings[0]) == 1: - assert cpu_thr_pol != 'require', \ - "cpu_thread_policy 'require' must be used on a HT host" - vm_siblings = [[vcpu_] for vcpu_ in vcpus_list] - else: - vm_siblings = [] - for vcpu_index in vcpus_list: - vm_host_cpu = vm_host_cpus[vcpu_index] - for host_sibling in host_log_core_siblings: - if vm_host_cpu in host_sibling: - other_cpu = host_sibling[0] if \ - vm_host_cpu == host_sibling[1] else \ - host_sibling[1] - if cpu_thr_pol == 'require': - assert other_cpu in vm_host_cpus, \ - "'require' vm uses only 1 of the sibling " \ - "cores" - vm_siblings.append(sorted([vcpu_index, - vm_host_cpus.index( - other_cpu)])) - else: - assert other_cpu not in used_host_cpus, \ - "sibling core was not reserved for " \ - "'isolate' vm" - vm_siblings.append([vcpu_index]) - - LOG.info("{}Check vcpus for vm via sudo virsh vcpupin".format(SEP)) - vcpu_pins = host_helper.get_vcpu_pins_for_instance_via_virsh( - host_ssh=host_ssh, - instance_name=instance_name) - assert vcpus == len(vcpu_pins), \ - 'Actual vm cpus number - {} is not as expected - {} in sudo ' \ - 'virsh vcpupin'.format(len(vcpu_pins), vcpus) - - virsh_cpus_sets = [] - for vcpu_pin in vcpu_pins: - vcpu = int(vcpu_pin['vcpu']) - cpu_set = common.parse_cpus_list(vcpu_pin['cpuset']) - virsh_cpus_sets += cpu_set - if shared_vcpu is not None and vcpu == shared_vcpu: - assert len(cpu_set) == 1, \ - "shared vcpu is pinned to more than 1 host cpu" - assert cpu_set[0] in shared_host_cpus, \ - "shared vcpu is not pinned to shared host cpu" - - if 'ded' in cpu_pol: - assert set(vm_host_cpus) == set( - virsh_cpus_sets), "pinned cpus in virsh cpupin is not the " \ - "same as ps" - else: - assert set(vm_host_cpus) < set( - virsh_cpus_sets), "floating vm should be affined to all " \ - "available host cpus" - - LOG.info("{}Get cpu affinity list for vm via taskset -pc".format(SEP)) - ps_affined_cpus = \ - vm_helper.get_affined_cpus_for_vm(vm_id, - host_ssh=host_ssh, - vm_host=vm_host, - instance_name=instance_name) - assert set(ps_affined_cpus) == set( - virsh_cpus_sets), "Actual affined cpu in taskset is different " \ - "than virsh" - return vm_host_cpus, vm_siblings - - -def _check_vm_topology_on_vm(vm_id, vcpus, siblings_total, current_vcpus=None, - prev_siblings=None, guest=None, - check_sibling=True): - siblings_total_ = None - if siblings_total: - siblings_total_ = copy.deepcopy(siblings_total) - # Check from vm in /proc/cpuinfo and - # /sys/devices/.../cpu#/topology/thread_siblings_list - if not guest: - guest = '' - if not current_vcpus: - current_vcpus = int(vcpus) - - LOG.info( - '=== Check vm topology from within the vm via: /sys/devices/system/cpu') - actual_sibs = [] - vm_helper.wait_for_vm_pingable_from_natbox(vm_id) - with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh: - - win_expt_cores_per_sib = win_log_count_per_sibling = None - if 'win' in guest: - LOG.info( - "{}Check windows guest cores via wmic cpu get cmds".format(SEP)) - offline_cores_count = 0 - log_cores_count, win_log_count_per_sibling = \ - get_procs_and_siblings_on_windows(vm_ssh) - online_cores_count = present_cores_count = log_cores_count - else: - LOG.info( - "{}Check vm present|online|offline cores from inside vm via " - "/sys/devices/system/cpu/".format(SEP)) - present_cores, online_cores, offline_cores = \ - vm_helper.get_proc_nums_from_vm(vm_ssh) - present_cores_count = len(present_cores) - online_cores_count = len(online_cores) - offline_cores_count = len(offline_cores) - - assert vcpus == present_cores_count, \ - "Number of vcpus: {}, present cores: {}".format( - vcpus, present_cores_count) - assert current_vcpus == online_cores_count, \ - "Current vcpus for vm: {}, online cores: {}".format( - current_vcpus, online_cores_count) - - expt_total_cores = online_cores_count + offline_cores_count - assert expt_total_cores in [present_cores_count, 512], \ - "Number of present cores: {}. online+offline cores: {}".format( - vcpus, expt_total_cores) - - if check_sibling and siblings_total_ and online_cores_count == \ - present_cores_count: - expt_sibs_list = [[vcpu] for vcpu in - range(present_cores_count)] if not \ - siblings_total_ \ - else siblings_total_ - - expt_sibs_list = [sorted(expt_sibs_list)] - if prev_siblings: - # siblings_total may get modified here - expt_sibs_list.append(sorted(prev_siblings)) - - if 'win' in guest: - LOG.info("{}Check windows guest siblings via wmic cpu get " - "cmds".format(SEP)) - expt_cores_list = [] - for sib_list in expt_sibs_list: - win_expt_cores_per_sib = [len(vcpus) for vcpus in sib_list] - expt_cores_list.append(win_expt_cores_per_sib) - assert win_log_count_per_sibling in expt_cores_list, \ - "Expected log cores count per sibling: {}, actual: {}".\ - format(win_expt_cores_per_sib, win_log_count_per_sibling) - - else: - LOG.info( - "{}Check vm /sys/devices/system/cpu/[" - "cpu#]/topology/thread_siblings_list".format( - SEP)) - for cpu in ['cpu{}'.format(i) for i in - range(online_cores_count)]: - actual_sibs_for_cpu = \ - vm_ssh.exec_cmd( - 'cat /sys/devices/system/cpu/{}/topology/thread_' - 'siblings_list'.format(cpu), fail_ok=False)[1] - - sib_for_cpu = common.parse_cpus_list(actual_sibs_for_cpu) - if sib_for_cpu not in actual_sibs: - actual_sibs.append(sib_for_cpu) - - assert sorted( - actual_sibs) in expt_sibs_list, "Expt sib lists: {}, " \ - "actual sib list: {}". \ - format(expt_sibs_list, sorted(actual_sibs)) - - -def get_procs_and_siblings_on_windows(vm_ssh): - cmd = 'wmic cpu get {}' - - procs = [] - for param in ['NumberOfCores', 'NumberOfLogicalProcessors']: - output = vm_ssh.exec_cmd(cmd.format(param), fail_ok=False)[1].strip() - num_per_proc = [int(line.strip()) for line in output.splitlines() if - line.strip() - and not re.search('{}|x'.format(param), line)] - procs.append(num_per_proc) - procs = zip(procs[0], procs[1]) - log_procs_per_phy = [nums[0] * nums[1] for nums in procs] - total_log_procs = sum(log_procs_per_phy) - - LOG.info( - "Windows guest total logical cores: {}, logical_cores_per_phy_core: {}". - format(total_log_procs, log_procs_per_phy)) - return total_log_procs, log_procs_per_phy - - -def check_vm_vswitch_affinity(vm_id, on_vswitch_nodes=True): - vm_host, vm_numa_nodes = vm_helper.get_vm_host_and_numa_nodes(vm_id) - vswitch_cores_dict = host_helper.get_host_cpu_cores_for_function( - vm_host, func='vSwitch') - vswitch_procs = [proc for proc in vswitch_cores_dict if - vswitch_cores_dict[proc]] - if not vswitch_procs: - return - - if on_vswitch_nodes: - assert set(vm_numa_nodes) <= set( - vswitch_procs), "VM {} is on numa nodes {} instead of vswitch " \ - "numa nodes {}".format( - vm_id, vm_numa_nodes, vswitch_procs) - else: - assert not (set(vm_numa_nodes) & set( - vswitch_procs)), "VM {} is on vswitch numa node(s). VM numa " \ - "nodes: {}, vSwitch numa nodes: {}".format( - vm_id, vm_numa_nodes, vswitch_procs) - - -def check_fs_sufficient(guest_os, boot_source='volume'): - """ - Check if volume pool, image storage, and/or image conversion space is - sufficient to launch vm - Args: - guest_os (str): e.g., tis-centos-guest, win_2016 - boot_source (str): volume or image - - Returns (str): image id - - """ - LOG.info("Check if storage fs is sufficient to launch boot-from-{} vm " - "with {}".format(boot_source, guest_os)) - check_disk = True if 'win' in guest_os else False - cleanup = None if re.search( - 'ubuntu_14|{}'.format(GuestImages.TIS_GUEST_PATTERN), - guest_os) else 'function' - img_id = glance_helper.get_guest_image(guest_os, check_disk=check_disk, - cleanup=cleanup) - return img_id - - -def check_vm_files(vm_id, storage_backing, ephemeral, swap, vm_type, file_paths, - content, root=None, vm_action=None, - prev_host=None, post_host=None, disks=None, post_disks=None, - guest_os=None, - check_volume_root=False): - """ - Check the files on vm after specified action. This is to check the disks - in the basic nova matrix table. - Args: - vm_id (str): - storage_backing (str): local_image, local_lvm, or remote - root (int): root disk size in flavor. e.g., 2, 5 - ephemeral (int): e.g., 0, 1 - swap (int): e.g., 0, 512 - vm_type (str): image, volume, image_with_vol, vol_with_vol - file_paths (list): list of file paths to check - content (str): content of the files (assume all files have the same - content) - vm_action (str|None): live_migrate, cold_migrate, resize, evacuate, - None (expect no data loss) - prev_host (None|str): vm host prior to vm_action. This is used to - check if vm host has changed when needed. - post_host (None|str): vm host after vm_action. - disks (dict): disks that are returned from - vm_helper.get_vm_devices_via_virsh() - post_disks (dict): only used in resize case - guest_os (str|None): default guest assumed for None. e,g., ubuntu_16 - check_volume_root (bool): whether to check root disk size even if vm - is booted from image - - Returns: - - """ - final_disks = post_disks if post_disks else disks - final_paths = list(file_paths) - if not disks: - disks = vm_helper.get_vm_devices_via_virsh(vm_id=vm_id) - - eph_disk = disks.get('eph', {}) - if not eph_disk: - if post_disks: - eph_disk = post_disks.get('eph', {}) - swap_disk = disks.get('swap', {}) - if not swap_disk: - if post_disks: - swap_disk = post_disks.get('swap', {}) - - disk_check = 'no_loss' - if vm_action in [None, 'live_migrate']: - disk_check = 'no_loss' - elif vm_type == 'volume': - # boot-from-vol, non-live migrate actions - disk_check = 'no_loss' - if storage_backing == 'local_lvm' and (eph_disk or swap_disk): - disk_check = 'eph_swap_loss' - elif storage_backing == 'local_image' and vm_action == 'evacuate' and ( - eph_disk or swap_disk): - disk_check = 'eph_swap_loss' - elif storage_backing == 'local_image': - # local_image, boot-from-image, non-live migrate actions - disk_check = 'no_loss' - if vm_action == 'evacuate': - disk_check = 'local_loss' - elif storage_backing == 'local_lvm': - # local_lvm, boot-from-image, non-live migrate actions - disk_check = 'local_loss' - if vm_action == 'resize': - post_host = post_host if post_host else vm_helper.get_vm_host(vm_id) - if post_host == prev_host: - disk_check = 'eph_swap_loss' - - LOG.info("disk check type: {}".format(disk_check)) - loss_paths = [] - if disk_check == 'no_loss': - no_loss_paths = final_paths - else: - # If there's any loss, we must not have remote storage. And any - # ephemeral/swap disks will be local. - disks_to_check = disks.get('eph', {}) - # skip swap type checking for data loss since it's not a regular - # filesystem - # swap_disks = disks.get('swap', {}) - # disks_to_check.update(swap_disks) - - for path_ in final_paths: - # For tis-centos-guest, ephemeral disk is mounted to /mnt after - # vm launch. - if str(path_).rsplit('/', 1)[0] == '/mnt': - loss_paths.append(path_) - break - - for disk in disks_to_check: - for path in final_paths: - if disk in path: - # We mount disk vdb to /mnt/vdb, so this is looking for - # vdb in the mount path - loss_paths.append(path) - break - - if disk_check == 'local_loss': - # if vm booted from image, then the root disk is also local disk - root_img = disks.get('root_img', {}) - if root_img: - LOG.info( - "Auto mount vm disks again since root disk was local with " - "data loss expected") - vm_helper.auto_mount_vm_disks(vm_id=vm_id, disks=final_disks) - file_name = final_paths[0].rsplit('/')[-1] - root_path = '/{}'.format(file_name) - loss_paths.append(root_path) - assert root_path in final_paths, \ - "root_path:{}, file_paths:{}".format(root_path, final_paths) - - no_loss_paths = list(set(final_paths) - set(loss_paths)) - - LOG.info("loss_paths: {}, no_loss_paths: {}, total_file_pahts: {}".format( - loss_paths, no_loss_paths, final_paths)) - res_files = {} - with vm_helper.ssh_to_vm_from_natbox(vm_id=vm_id, - vm_image_name=guest_os) as vm_ssh: - vm_ssh.exec_sudo_cmd('cat /etc/fstab') - vm_ssh.exec_sudo_cmd("mount | grep --color=never '/dev'") - - for file_path in loss_paths: - vm_ssh.exec_sudo_cmd('touch {}2'.format(file_path), fail_ok=False) - vm_ssh.exec_sudo_cmd('echo "{}" >> {}2'.format(content, file_path), - fail_ok=False) - - for file_path in no_loss_paths: - output = vm_ssh.exec_sudo_cmd('cat {}'.format(file_path), - fail_ok=False)[1] - res = '' if content in output else 'content mismatch' - res_files[file_path] = res - - for file, error in res_files.items(): - assert not error, "Check {} failed: {}".format(file, error) - - swap_disk = final_disks.get('swap', {}) - if swap_disk: - disk_name = list(swap_disk.keys())[0] - partition = '/dev/{}'.format(disk_name) - if disk_check != 'local_loss' and not disks.get('swap', {}): - mount_on, fs_type = storage_helper.mount_partition( - ssh_client=vm_ssh, disk=disk_name, - partition=partition, fs_type='swap') - storage_helper.auto_mount_fs(ssh_client=vm_ssh, fs=partition, - mount_on=mount_on, fs_type=fs_type) - - LOG.info("Check swap disk is on") - swap_output = vm_ssh.exec_sudo_cmd( - 'cat /proc/swaps | grep --color=never {}'.format(partition))[1] - assert swap_output, "Expect swapon for {}. Actual output: {}". \ - format(partition, vm_ssh.exec_sudo_cmd('cat /proc/swaps')[1]) - - LOG.info("Check swap disk size") - _check_disk_size(vm_ssh, disk_name=disk_name, expt_size=swap) - - eph_disk = final_disks.get('eph', {}) - if eph_disk: - LOG.info("Check ephemeral disk size") - eph_name = list(eph_disk.keys())[0] - _check_disk_size(vm_ssh, eph_name, expt_size=ephemeral * 1024) - - if root: - image_root = final_disks.get('root_img', {}) - root_name = '' - if image_root: - root_name = list(image_root.keys())[0] - elif check_volume_root: - root_name = list(final_disks.get('root_vol').keys())[0] - - if root_name: - LOG.info("Check root disk size") - _check_disk_size(vm_ssh, disk_name=root_name, - expt_size=root * 1024) - - -def _check_disk_size(vm_ssh, disk_name, expt_size): - partition = vm_ssh.exec_sudo_cmd( - 'cat /proc/partitions | grep --color=never "{}$"'.format(disk_name))[1] - actual_size = int( - int(partition.split()[-2].strip()) / 1024) if partition else 0 - expt_size = int(expt_size) - assert actual_size == expt_size, "Expected disk size: {}M. Actual: {}M".\ - format(expt_size, actual_size) - - -def check_alarms(before_alarms, timeout=300, - auth_info=Tenant.get('admin_platform'), con_ssh=None, - fail_ok=False): - after_alarms = system_helper.get_alarms(auth_info=auth_info, - con_ssh=con_ssh) - new_alarms = [] - check_interval = 5 - for item in after_alarms: - if item not in before_alarms: - alarm_id, entity_id = item.split('::::') - if alarm_id == EventLogID.CPU_USAGE_HIGH: - check_interval = 45 - elif alarm_id == EventLogID.NTP_ALARM: - # NTP alarm handling - LOG.info("NTP alarm found, checking ntpq stats") - host = entity_id.split('host=')[1].split('.ntp')[0] - system_helper.wait_for_ntp_sync(host=host, fail_ok=False, - auth_info=auth_info, - con_ssh=con_ssh) - continue - - new_alarms.append((alarm_id, entity_id)) - - res = True - remaining_alarms = None - if new_alarms: - LOG.info("New alarms detected. Waiting for new alarms to clear.") - res, remaining_alarms = \ - system_helper.wait_for_alarms_gone(new_alarms, - fail_ok=True, - timeout=timeout, - check_interval=check_interval, - auth_info=auth_info, - con_ssh=con_ssh) - - if not res: - msg = "New alarm(s) found and did not clear within {} seconds. " \ - "Alarm IDs and Entity IDs: {}".format(timeout, remaining_alarms) - LOG.warning(msg) - if not fail_ok: - assert res, msg - - return res, remaining_alarms - - -def check_rest_api(): - LOG.info("Check sysinv REST API") - sysinv_rest = Rest('sysinv', platform=True) - resource = '/controller_fs' - status_code, text = sysinv_rest.get(resource=resource, auth=True) - message = "Retrieved: status_code: {} message: {}" - LOG.debug(message.format(status_code, text)) - - LOG.info("Check status_code of 200 is received") - message = "Expected status_code of 200 - received {} and message {}" - assert status_code == 200, message.format(status_code, text) diff --git a/automated-pytest-suite/keywords/cinder_helper.py b/automated-pytest-suite/keywords/cinder_helper.py deleted file mode 100644 index b147399b..00000000 --- a/automated-pytest-suite/keywords/cinder_helper.py +++ /dev/null @@ -1,1765 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import re -import os -import random -import time -import math - -from consts.auth import Tenant -from consts.stx import GuestImages, Prompt -from consts.timeout import VolumeTimeout -from keywords import common, glance_helper -from testfixtures.fixture_resources import ResourceCleanup -from utils import table_parser, cli, exceptions -from utils.clients.ssh import ControllerClient -from utils.tis_log import LOG - - -def get_any_volume(status='available', bootable=True, auth_info=None, - con_ssh=None, new_name=None, cleanup=None): - """ - Get an id of any volume that meets the criteria. Create one if none exists. - - Args: - status (str): - bootable (str|bool): - auth_info (dict): - con_ssh (SSHClient): - new_name (str): This is only used if no existing volume found and new - volume needs to be created - cleanup (str|None) - - Returns: - str: volume id - - """ - volumes = get_volumes(status=status, bootable=bootable, auth_info=auth_info, - con_ssh=con_ssh) - if volumes: - return 0, random.choice(volumes) - else: - return 1, create_volume(name=new_name, bootable=bootable, - auth_info=auth_info, con_ssh=con_ssh, - cleanup=cleanup)[1] - - -def get_volumes(vols=None, full_name=None, project=None, project_domain=None, - user=None, user_domain=None, all_=True, - long=True, name=None, name_strict=False, vol_type=None, - size=None, status=None, attached_vm=None, - bootable=None, field='ID', auth_info=Tenant.get('admin'), - con_ssh=None): - """ - Return a list of volume ids based on the given criteria - - Args: - vols (list or str): - full_name - project - project_domain - user - user_domain - all_ (bool) - long (bool) - name (str): post execution table filters - name_strict (bool): - vol_type (str): - size (str): - status:(str|list|tuple) - attached_vm (str): - bootable (str|bool): true or false - field (str) - auth_info (dict): could be Tenant.get('admin'),Tenant.get('tenant1'), - Tenant.get('tenant2') - con_ssh (str): - - Returns (list): a list of volume ids based on the given criteria - """ - args_dict = { - '--long': long, - '--a': all_, - '--name': full_name, - '--project': project, - '--project-domain': project_domain, - '--user': user, - '--user-domain': user_domain, - } - args = common.parse_args(args_dict) - table_ = table_parser.table( - cli.openstack('volume list', args, ssh_client=con_ssh, - auth_info=auth_info)[1]) - - if name is not None: - table_ = table_parser.filter_table(table_, strict=name_strict, - **{'Name': name}) - if bootable is not None: - bootable = str(bootable).lower() - filters = { - 'ID': vols, - 'Type': vol_type, - 'Size': size, - 'Attached to': attached_vm, - 'Status': status, - 'Bootable': bootable - } - filters = {k: v for k, v in filters.items() if v is not None} - if filters: - table_ = table_parser.filter_table(table_, **filters) - - return table_parser.get_column(table_, field) - - -def get_volume_snapshot_values(vol_snapshot, fields, strict=True, con_ssh=None, - auth_info=None): - """ - Get volume snapshot values for given fields via openstack volume snapshot - show - Args: - vol_snapshot (str): - fields (list|str|tuple): - strict (bool): - con_ssh: - auth_info: - - Returns (list): values for given fields - - """ - - if isinstance(fields, str): - fields = [fields] - - table_ = table_parser.table( - cli.openstack('volume snapshot show', vol_snapshot, ssh_client=con_ssh, - auth_info=auth_info)[1]) - vals = [] - for field in fields: - vals.append( - table_parser.get_value_two_col_table(table_, field, strict=strict)) - - return vals - - -def get_volume_snapshot_list(vol_snaps=None, name=None, name_strict=False, - size=None, status=None, volume=None, - field='ID', auth_info=Tenant.get('admin'), - con_ssh=None): - """ - Return a list of volume ids based on the given criteria - - Args: - vol_snaps (list or str): - name (str): - name_strict (bool): - size (str): - status:(str) - volume (str): - field - auth_info (dict): could be Tenant.get('admin'),Tenant.get('tenant1'), - Tenant.get('tenant2') - con_ssh (str): - - Returns (list): a list of volume snapshot ids based on the given criteria - - """ - optional_args = { - 'ID': vol_snaps, - 'Size': size, - 'Status': status, - 'Volume': volume, - } - - criteria = {} - for key, value in optional_args.items(): - if value is not None: - criteria[key] = value - - table_ = table_parser.table( - cli.openstack('volume snapshot list --a --long', ssh_client=con_ssh, - auth_info=auth_info)[1]) - - if name: - table_ = table_parser.filter_table(table_, strict=name_strict, - **{'Name': name}) - - return table_parser.get_values(table_, field, **criteria) - - -def get_volumes_attached_to_vms(volumes=None, vms=None, header='ID', - con_ssh=None, auth_info=Tenant.get('admin')): - """ - Filter out the volumes that are attached to a vm. - Args: - volumes (list or str): list of volumes ids to filter out from. When - None, filter from all volumes - vms (list or str): get volumes attached to given vm(s). When None, - filter volumes attached to any vm - header (str): header of the column in the table to return - con_ssh (SSHClient): - auth_info (dict): - - Returns (list): a list of values from the column specified or [] if no - match found - - """ - table_ = table_parser.table( - cli.openstack('volume list --a', ssh_client=con_ssh, - auth_info=auth_info)[1]) - - # Filter from given volumes if provided - if volumes is not None: - table_ = table_parser.filter_table(table_, ID=volumes) - - # Filter from given vms if provided - if vms: - table_ = table_parser.filter_table(table_, strict=False, - **{'Attached to': vms}) - # Otherwise filter out volumes attached to any vm - else: - table_ = table_parser.filter_table(table_, strict=False, regex=True, - **{'Attached to': r'.*\S.*'}) - - return table_parser.get_column(table_, header) - - -def create_volume(name=None, description=None, source_type='image', - source_id=None, vol_type=None, size=None, - avail_zone=None, properties=None, hints=None, - multi_attach=None, bootable=True, read_only=None, - consistency_group=None, fail_ok=False, auth_info=None, - con_ssh=None, - avail_timeout=VolumeTimeout.STATUS_CHANGE, guest_image=None, - cleanup=None): - """ - Create a volume with given criteria. - - Args: - name (str|None): display name of the volume - description (str|None): description of the volume - source_type (str|None): image, snapshot, volume, or None. - source_id (str|None): source volume id to create volume from - vol_type (str|None): volume type such as 'raw' - size (int|None): volume size in GBs - avail_zone (str|None): availability zone - properties (str|list|tuple|dict|None): metadata key and value pairs - '[ [ ...]]' - bootable (bool|None): When False, the source id params will be - ignored and non-bootable volume will be created - read_only (bool|None) - hints (str|list|tuple|dict|None) - multi_attach - consistency_group (str|None) - fail_ok (bool): - auth_info (dict): - con_ssh (SSHClient): - guest_image (str): guest image name if image_id unspecified. valid - values: cgcs-guest, ubuntu, centos_7, etc - avail_timeout (int) - cleanup (None|str): teardown level - - Returns (tuple): (return_code, volume_id or err msg) - (-1, existing_vol_id) # returns existing volume_id instead of - creating a new one. Applies when rtn_exist=True. - (0, vol_id) # Volume created successfully and in available state. - (1, ) # Create volume cli rejected with sterr - (2, vol_id) # volume created, but not in available state. - (3, vol_id]: if volume created, but not in given bootable state. - - Notes: - snapshot_id > source_vol_id > image_id if more than one source ids - are provided. - """ - valid_cleanups = ('module', 'session', 'function', 'class', None) - if cleanup not in valid_cleanups: - raise ValueError( - "Invalid scope provided. Choose from: {}".format(valid_cleanups)) - - valid_source_types = (None, 'image', 'volume', 'source', 'snapshot') - if source_type not in valid_source_types: - raise ValueError( - "Invalid source type specified. Choose from: {}".format( - valid_source_types)) - - if source_type and not source_id: - if source_type != 'image': - raise ValueError( - "source_id has to be provided for {}".format(source_type)) - - # Get glance image id as source_id based on guest_image value - guest_image = guest_image if guest_image else GuestImages.DEFAULT[ - 'guest'] - source_id = glance_helper.get_image_id_from_name(guest_image, - strict=True, - auth_info=auth_info, - con_ssh=con_ssh) - if size is None: - size = GuestImages.IMAGE_FILES[guest_image][1] - - if size is None: - # size is required if source_type is not volume or snapshot - if not source_type: - size = 2 - elif source_type == 'image': - if guest_image: - size = GuestImages.IMAGE_FILES[guest_image][1] - else: - # check glance image size via openstack image show to - # determine the volume size - image_size = glance_helper.get_image_values(source_id, 'size', - auth_info=auth_info, - con_ssh=con_ssh)[0] - size = max(2, math.ceil(image_size / math.pow(1024, 3))) - - if not name: - if not auth_info: - auth_info = Tenant.get_primary() - name = 'vol-{}'.format(auth_info['tenant']) - existing_volumes = get_volumes(field='Name', auth_info=auth_info, - con_ssh=con_ssh) - name = common.get_unique_name(name, resource_type='volume', - existing_names=existing_volumes) - - optional_args = {'--size': size, - '--description': description, - '--type': vol_type, - '--availability-zone': avail_zone, - '--consistency-group': consistency_group, - '--property': properties, - '--hint': hints, - '--multi-attach': multi_attach, - '--bootable': True if bootable else None, - '--non-bootable': True if bootable is False else None, - '--read-only': True if read_only else None, - '--read-write': True if read_only is False else None, - } - if source_type: - source_type = 'source' if 'volume' in source_type else source_type - optional_args['--{}'.format(source_type)] = source_id - - args = '{} {}'.format(common.parse_args(optional_args, repeat_arg=True), - name) - LOG.info("Creating Volume with args: {}".format(args)) - exit_code, cmd_output = cli.openstack('volume create', args, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - - table_ = table_parser.table(cmd_output) - volume_id = table_parser.get_value_two_col_table(table_, 'id') - if cleanup and volume_id: - ResourceCleanup.add('volume', volume_id, scope=cleanup) - - if exit_code > 0: - return 1, cmd_output - - LOG.info("Post action check started for create volume.") - if not wait_for_volume_status(volume=volume_id, status='available', - auth_info=auth_info, fail_ok=fail_ok, - timeout=avail_timeout): - LOG.warning( - "Volume {} did not reach available state within {}s after " - "creation".format( - name, avail_timeout)) - return 2, volume_id - - LOG.info("Volume is created and in available state: {}".format(volume_id)) - return 0, volume_id - - -def get_volume_show_values(volume, fields, con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Get values for given cinder volume via openstack volume show - Args: - volume: - fields (str|tuple|list): - con_ssh: - auth_info: - - Returns (list): - - """ - if not volume: - raise ValueError("Volume is not provided.") - - if isinstance(fields, str): - fields = (fields,) - - table_ = table_parser.table( - cli.openstack('volume show', volume, ssh_client=con_ssh, - auth_info=auth_info)[1]) - vals = [] - for field in fields: - field = field.lower() - val = table_parser.get_value_two_col_table(table_, field=field, - merge_lines=True) - if field == 'properties': - val = table_parser.convert_value_to_dict(val) - elif val and (field in ('attachments', 'volume_image_metadata') or - val.lower() in ('true', 'false', 'none')): - try: - LOG.info('val: {}'.format(val)) - val = eval( - val.replace('true', 'True').replace('none', 'None').replace( - 'false', 'False')) - except (NameError, SyntaxError): - pass - vals.append(val) - - return vals - - -def wait_for_volume_status(volume, status='available', - timeout=VolumeTimeout.STATUS_CHANGE, fail_ok=True, - check_interval=5, con_ssh=None, auth_info=None): - """ - - Args: - volume (str): - status (str/list): - timeout (int): - fail_ok (bool): - check_interval (int): - con_ssh (str): - auth_info (dict): - - Returns: - True if the status of the volume is same as the status(str) that was - passed into the function \n - false if timed out or otherwise - - """ - return __wait_for_vol_status(volume, is_snapshot=False, status=status, - timeout=timeout, fail_ok=fail_ok, - check_interval=check_interval, con_ssh=con_ssh, - auth_info=auth_info) - - -def wait_for_vol_snapshot_status(vol_snapshot, status='available', - timeout=VolumeTimeout.STATUS_CHANGE, - fail_ok=False, - check_interval=5, con_ssh=None, - auth_info=None): - """ - Wait for cinder volume or volume snapshot to reach given state - Args: - vol_snapshot (str): - status (str/list): - timeout (int): - fail_ok (bool): - check_interval (int): - con_ssh (str): - auth_info (dict): - - Returns: - True if the status of the volume is same as the status(str) that was - passed into the function \n - false if timed out or otherwise - - """ - return __wait_for_vol_status(vol_snapshot, is_snapshot=True, status=status, - timeout=timeout, - fail_ok=fail_ok, check_interval=check_interval, - con_ssh=con_ssh, auth_info=auth_info) - - -def __wait_for_vol_status(volume, is_snapshot=False, status='available', - timeout=VolumeTimeout.STATUS_CHANGE, - fail_ok=False, check_interval=5, con_ssh=None, - auth_info=None): - if isinstance(status, str): - status = (status,) - - vol_str = 'snapshot ' if is_snapshot else '' - LOG.info("Waiting for cinder volume {}{} status: {}".format(vol_str, volume, - status)) - end_time = time.time() + timeout - current_status = prev_status = None - - func = get_volume_snapshot_values if is_snapshot else get_volume_show_values - - while time.time() < end_time: - current_status = func( - volume, fields='status', con_ssh=con_ssh, auth_info=auth_info)[0] - if current_status in status: - LOG.info("Volume {}{} is in {} state".format(vol_str, volume, - current_status)) - return True - elif current_status == 'error': - msg = 'Volume {}{} is in error status'.format(vol_str, volume) - LOG.warning(msg) - if fail_ok: - return False - raise exceptions.VolumeError(msg) - elif current_status != prev_status: - LOG.info("Volume {}status is: {}".format(vol_str, current_status)) - prev_status = current_status - - time.sleep(check_interval) - else: - msg = "Timed out waiting for volume {}{} status to reach status: {}. " \ - "Actual status: {}". \ - format(vol_str, volume, status, current_status) - LOG.warning(msg) - if fail_ok: - return False - raise exceptions.TimeoutException(msg) - - -def get_vol_snapshots(status='available', volume=None, vol_id=None, name=None, - size=None, field='ID', - con_ssh=None, auth_info=None): - """ - Get one volume snapshot id that matches the given criteria. - - Args: - status (str): snapshot status. e.g., 'available', 'in use' - volume (str): Name of the volume the snapshot created from - vol_id (str): volume id the snapshot was created from - name (str): snapshot name - size (int): - field (str) - con_ssh (SSHClient): - auth_info (dict): - - Returns: - A string of snapshot id. Return None if no matching snapshot found. - - """ - table_ = table_parser.table( - cli.openstack('snapshot list', ssh_client=con_ssh, auth_info=auth_info)[ - 1]) - if size is not None: - size = str(size) - - if vol_id and not volume: - volume = get_volumes(vols=vol_id, field='Name')[0] - - possible_args = { - 'status': status, - "Volume": volume, - 'Status': status, - 'name': name, - 'Size': size - } - - args_ = {} - for key, val in possible_args.items(): - if val: - args_[key] = val - - return table_parser.get_values(table_, field, **args_) - - -def _wait_for_volumes_deleted(volumes, timeout=VolumeTimeout.DELETE, - fail_ok=True, - check_interval=3, con_ssh=None, - auth_info=Tenant.get('admin')): - """ - check if a specific field still exist in a specified column for - cinder list - - Args: - volumes(list or str): ids of volumes - timeout (int): - fail_ok (bool): - check_interval (int): - con_ssh: - auth_info (dict): - - Returns (tuple): (result(boot), volumes_deleted(list)) - - """ - if isinstance(volumes, str): - volumes = [volumes] - - vols_to_check = list(volumes) - end_time = time.time() + timeout - while time.time() < end_time: - existing_vols = get_volumes(long=False, auth_info=auth_info, - con_ssh=con_ssh) - vols_to_check = list(set(existing_vols) & set(vols_to_check)) - if not vols_to_check: - return True, list(volumes) - - time.sleep(check_interval) - else: - if fail_ok: - return False, list(set(volumes) - set(vols_to_check)) - raise exceptions.TimeoutException( - "Timed out waiting for volume(s) to be removed from openstack " - "volume list: " - "{}.".format(vols_to_check)) - - -def delete_volumes(volumes=None, fail_ok=False, timeout=VolumeTimeout.DELETE, - check_first=True, con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Delete volume(s). - - Args: - volumes (list|str): ids of the volumes to delete. If None, - all available volumes under given Tenant will be - deleted. If given Tenant is admin, available volumes for all - tenants will be deleted. - fail_ok (bool): True or False - timeout (int): CLI timeout and waiting for volumes disappear timeout - in seconds. - check_first (bool): Whether to check volumes existence before attempt - to delete - con_ssh (SSHClient): - auth_info (dict): - - Returns (tuple): (rtn_code (int), msg (str)) - (-1, "No volume to delete. Do nothing.") # No volume given and no - volume exists on system for given tenant - (-1, ""None of the given volume(s) exist on system. Do nothing."") - # None of the given volume(s) exists on - system for given tenant - (0, "Volume(s) deleted successfully") # volume is successfully - deleted. - (1, ) # Delete volume cli returns stderr - (2, "Delete request(s) accepted but some volume(s) did not disappear - within seconds".) - (3, "Delete request(s) rejected and post check failed for accepted - request(s). \nCLI error: " - - """ - if volumes is None: - volumes = get_volumes(status=('available', 'error'), - auth_info=auth_info, con_ssh=con_ssh) - - LOG.info("Deleting volume(s): {}".format(volumes)) - - if not volumes: - msg = "No volume to delete. Do nothing." - LOG.info(msg) - return -1, msg - - if isinstance(volumes, str): - volumes = [volumes] - volumes = list(volumes) - - if check_first: - vols_to_del = get_volumes(vols=volumes, auth_info=auth_info, - con_ssh=con_ssh) - if not vols_to_del: - msg = "None of the given volume(s) exist on system. Do nothing." - LOG.info(msg) - return -1, msg - - if not vols_to_del == volumes: - LOG.info( - "Some volume(s) don't exist. Given volumes: {}. Volumes to " - "delete: {}.". - format(volumes, vols_to_del)) - else: - vols_to_del = volumes - - vols_to_del_str = ' '.join(vols_to_del) - - LOG.debug("Volumes to delete: {}".format(vols_to_del)) - exit_code, cmd_output = cli.openstack('volume delete', vols_to_del_str, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info, timeout=timeout) - - vols_to_check = [] - if exit_code == 1: - for vol in vols_to_del: - # if cinder delete on a specific volume ran successfully, then it - # has no output regarding that vol - if vol not in cmd_output: - vols_to_check.append(vol) - else: - vols_to_check = vols_to_del - - LOG.info("Waiting for volumes to be removed from cinder list: {}".format( - vols_to_check)) - all_deleted, vols_deleted = _wait_for_volumes_deleted(vols_to_check, - fail_ok=True, - con_ssh=con_ssh, - auth_info=auth_info, - timeout=timeout) - - if exit_code == 1: - if all_deleted: - if fail_ok: - return 1, cmd_output - raise exceptions.CLIRejected(cmd_output) - else: - msg = "Delete request(s) rejected and post check failed for " \ - "accepted request(s). \nCLI error: {}". \ - format(cmd_output) - if fail_ok: - LOG.warning(msg) - return 3, msg - raise exceptions.VolumeError(msg) - - if not all_deleted: - msg = "Delete request(s) accepted but some volume(s) did not " \ - "disappear within {} seconds".format(timeout) - if fail_ok: - LOG.warning(msg) - return 2, msg - raise exceptions.VolumeError(msg) - - LOG.info("Volume(s) are successfully deleted: {}".format(vols_to_check)) - return 0, "Volume(s) deleted successfully" - - -def delete_volume_snapshots(snapshots=None, force=False, check_first=True, - fail_ok=False, auth_info=Tenant.get('admin'), - con_ssh=None): - """ - Delete given volume snapshot via cinder snapshot-delete - - Args: - snapshots (str|list): - force (bool): - check_first (bool): - fail_ok (bool): - auth_info (dict): - con_ssh (SSHClient): - - Returns (tuple): - (0, volume snapshot is successfully deleted) - (1, ) - (2, volume snapshot still exists in cinder qos-list - after deletion) - - """ - - if not snapshots: - snapshots_to_del = get_volume_snapshot_list(auth_info=auth_info) - else: - snapshots_to_del = [snapshots] if isinstance(snapshots, str) else list( - snapshots) - if check_first: - snapshots_to_del = list(set(snapshots_to_del) & set( - get_volume_snapshot_list(auth_info=auth_info))) - - if not snapshots_to_del: - msg = "No volume snapshot to delete or provided snapshot(s) not " \ - "exist on system" - LOG.info(msg) - return -1, msg - - args_ = '{}{}'.format('--force ' if force else '', - ' '.join(snapshots_to_del)) - code, output = cli.openstack('volume snapshot delete', args_, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - - if code == 1: - return code, output - - post_vol_snap_list = get_volume_snapshot_list(auth_info=auth_info) - undeleted_snapshots = list(set(snapshots_to_del) & set(post_vol_snap_list)) - if undeleted_snapshots: - err_msg = "Volume snapshot {} still exists in cinder snapshot-list " \ - "after deletion".format(undeleted_snapshots) - if fail_ok: - LOG.warning(err_msg) - return 2, err_msg - else: - raise exceptions.CinderError(err_msg) - - succ_msg = "Volume snapshot(s) successfully deleted: {}".format( - snapshots_to_del) - LOG.info(succ_msg) - return 0, succ_msg - - -def create_volume_qos(qos_name=None, consumer=None, field='id', fail_ok=False, - auth_info=Tenant.get('admin'), con_ssh=None, **specs): - """ - Create volume QoS with given name and specs - - Args: - qos_name (str): - fail_ok (bool): - consumer (str): Valid consumer of QoS specs are: ['front-end', - 'back-end', 'both'] - field (str) - auth_info (dict): - con_ssh (SSHClient): - **specs: QoS specs - format: **{: , : } - - Returns (tuple): - (0, QoS created successfully with specs: ) - (1, ) - - """ - if not qos_name: - qos_name = 'vol_qos' - - qos_name = common.get_unique_name(qos_name, - get_volume_qos_list(field='name'), - resource_type='qos') - args_dict = { - '--consumer': consumer, - '--property': specs, - } - args_ = common.parse_args(args_dict, repeat_arg=True) - - LOG.info("Creating QoS {} with args: {}".format(qos_name, args_)) - args_ += ' {}'.format(qos_name) - code, output = cli.openstack('volume qos create', args_, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - - if code > 0: - return 1, output - - qos_tab = table_parser.table(output) - qos_value = table_parser.get_value_two_col_table(qos_tab, field) - - LOG.info( - "QoS {} created successfully with specs: {}".format(qos_name, specs)) - return 0, qos_value - - -def delete_volume_qos(qos_ids, force=False, check_first=True, fail_ok=False, - auth_info=Tenant.get('admin'), - con_ssh=None): - """ - Delete given list of QoS' - - Args: - qos_ids (list|str|tuple): - force (bool): - check_first (bool): - fail_ok (bool): - auth_info (dict): - con_ssh (SSHClient): - - Returns: - - """ - if isinstance(qos_ids, str): - qos_ids = [qos_ids] - - qos_ids_to_del = list(qos_ids) - if check_first: - existing_qos_list = get_volume_qos_list(auth_info=auth_info, - con_ssh=con_ssh) - qos_ids_to_del = list(set(existing_qos_list) & set(qos_ids)) - if not qos_ids_to_del: - msg = "None of the QoS specs {} exist in cinder qos-list. Do " \ - "nothing.".format(qos_ids) - LOG.info(msg) - return -1, msg - - rejected_list = [] - for qos in qos_ids_to_del: - args = qos if force is None else '--force {} {}'.format(force, qos) - code, output = cli.openstack('volume qos delete', args, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if code > 0: - rejected_list.append(qos) - - qos_list_to_check = list(set(qos_ids) - set(rejected_list)) - - undeleted_list = [] - if qos_list_to_check: - undeleted_list = \ - wait_for_qos_deleted(qos_ids=qos_list_to_check, fail_ok=fail_ok, - con_ssh=con_ssh, - auth_info=auth_info)[1] - - if rejected_list or undeleted_list: - reject_str = ' Deletion rejected volume QoS: {}.'.format( - rejected_list) if rejected_list else '' - undeleted_str = ' Volume QoS still exists after deletion: {}.'.format( - undeleted_list) if undeleted_list else '' - err_msg = "Some QoS's failed to delete.{}{}".format(reject_str, - undeleted_str) - LOG.warning(err_msg) - if fail_ok: - return 1, err_msg - else: - raise exceptions.CinderError(err_msg) - - succ_msg = "QoS's successfully deleted: {}".format(qos_ids) - LOG.info(succ_msg) - return 0, succ_msg - - -def wait_for_qos_deleted(qos_ids, timeout=10, check_interval=1, fail_ok=False, - auth_info=Tenant.get('admin'), con_ssh=None): - """ - Wait for given list of QoS to be gone from cinder qos-list - Args: - qos_ids (list): - timeout (int): - check_interval (int): - auth_info (dict) - fail_ok (bool): - con_ssh (SSHClient): - - Returns (tuple): - (True, []) All given QoS ids are gone from cinder qos-list - (False, [undeleted_qos_list]) Some given QoS' still exist in - cinder qos-list - - """ - LOG.info("Waiting for QoS' to be deleted from system: {}".format(qos_ids)) - if isinstance(qos_ids, str): - qos_ids = (qos_ids,) - - qos_undeleted = list(qos_ids) - end_time = time.time() + timeout - - while time.time() < end_time: - existing_qos_list = get_volume_qos_list(con_ssh=con_ssh, - auth_info=auth_info) - qos_undeleted = list(set(existing_qos_list) & set(qos_undeleted)) - - if not qos_undeleted: - msg = "QoS' all gone from 'openstack volume qos list': {}".format( - qos_ids) - LOG.info(msg) - return True, [] - - time.sleep(check_interval) - - err_msg = "Timed out waiting for QoS' to be gone from cinder qos-list: " \ - "{}".format(qos_undeleted) - LOG.warning(err_msg) - if fail_ok: - return False, qos_undeleted - else: - raise exceptions.CinderError(err_msg) - - -def create_volume_type(name=None, public=None, project=None, - project_domain=None, field='id', fail_ok=False, - auth_info=Tenant.get('admin'), con_ssh=None, - **properties): - """ - Create a volume type with given name - - Args: - name (str|None): name for the volume type - public (bool|None): - project (str|None) - project_domain (str|None) - field (str): 'id' or 'name' - fail_ok (bool): - auth_info (dict): - con_ssh (SSHClient): - - Returns (tuple): - (0, ) - volume type created successfully - (1, ) - cli rejected - (2, ) - volume type public flag is not as expected - - """ - - if not name: - name = 'vol_type' - name = common.get_unique_name(name, get_volume_types(field='Name')) - LOG.info("Creating volume type {}".format(name)) - - args_dict = { - '--public': True if public else None, - '--private': True if public is False else None, - '--property': properties, - '--project': project, - '--project-domain': project_domain, - } - - args_ = ' '.join((common.parse_args(args_dict, repeat_arg=True), name)) - code, output = cli.openstack('volume type create', args_, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if code == 1: - return 1, output - - table_ = table_parser.table(output) - vol_type = table_parser.get_value_two_col_table(table_, field) - - LOG.info("Volume type {} is created successfully".format(vol_type)) - return 0, vol_type - - -def delete_volume_types(vol_types, check_first=True, fail_ok=False, - auth_info=Tenant.get('admin'), con_ssh=None): - """ - Delete given volume type - - Args: - vol_types (list|str|tuple): volume type ID(s) to delete - check_first (bool): - fail_ok (bool): - auth_info (dict): - con_ssh (SSHClient): - - Returns (tuple): - (-1, None of the volume types exist in cinder qos-list. Do - nothing.) - (0, Volume types successfully deleted: ) - (1, ) - (2, Volume types delete rejected: ; volume types still in cinder - type-list after deletion: ) - - """ - - LOG.info("Delete volume types started") - if isinstance(vol_types, str): - vol_types = (vol_types,) - - vol_types_to_del = list(vol_types) - if check_first: - existing_vol_types = get_volume_types(auth_info=auth_info, - con_ssh=con_ssh) - vol_types_to_del = list(set(existing_vol_types) & set(vol_types)) - if not vol_types_to_del: - msg = "None of the volume types {} exist in cinder qos-list. Do " \ - "nothing.".format(vol_types) - LOG.info(msg) - return -1, msg - - args = ' '.join(vol_types_to_del) - code, output = cli.openstack('volume type delete', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code > 1: - return 1, output - - LOG.info("Check volume types are gone from 'openstack volume type list'") - post_vol_types = get_volume_types(auth_info=auth_info, con_ssh=con_ssh) - types_undeleted = list(set(post_vol_types) & set(vol_types_to_del)) - - if types_undeleted: - err_msg = "Volume type(s) still in exist after deletion: {}".format( - types_undeleted) - LOG.warning(err_msg) - if fail_ok: - return 2, err_msg - else: - raise exceptions.CinderError(err_msg) - - succ_msg = "Volume types successfully deleted: {}".format(vol_types) - LOG.info(succ_msg) - return 0, succ_msg - - -def get_volume_types(long=False, ids=None, public=None, name=None, strict=True, - field='ID', con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Get cinder volume types via openstack volume type list - Args: - long (bool) - ids (str|list|tuple|None): - public: - name: - strict: - field (str|list|tuple): - con_ssh: - auth_info: - - Returns (list): - - """ - args = '--long' if long else '' - table_ = table_parser.table( - cli.openstack('volume type list', args, ssh_client=con_ssh, - auth_info=auth_info)[1]) - - filters = {} - if ids: - filters['ID'] = ids - if public is not None: - filters['Is Public'] = public - - if filters: - table_ = table_parser.filter_table(table_, **filters) - - if name is not None: - table_ = table_parser.filter_table(table_, strict=strict, - **{'Name': name}) - - return table_parser.get_multi_values(table_, field) - - -def get_volume_qos_list(field='id', qos_id=None, name=None, consumer=None, - strict=True, con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Get qos list based on given filters - - Args: - field (str|list|tuple): 'id', 'name', 'associations', etc... - qos_id (list|str|None): volume qos id(s) to filter out from - name (str|None): name of the qos' to filter for - consumer (str): consumer of the qos' to filter for - strict (bool): - con_ssh: - auth_info: - - Returns (list): list of matching volume QoS' - - """ - - kwargs_raw = { - 'ID': qos_id, - 'Name': name, - 'Consumer': consumer, - } - - kwargs = {} - for key, val in kwargs_raw.items(): - if val is not None: - kwargs[key] = val - - table_ = table_parser.table( - cli.openstack('volume qos list', ssh_client=con_ssh, - auth_info=auth_info)[1]) - - return table_parser.get_multi_values(table_, field, strict=strict, **kwargs) - - -def associate_volume_qos(volume_qos, volume_type, fail_ok=False, - auth_info=Tenant.get('admin'), con_ssh=None): - """ - Associates qos spec with specified volume type. - # must be an admin to perform cinder qos-associate - - Args: - volume_qos (str) - volume_type (str) - auth_info - fail_ok (bool) - con_ssh - - Returns (tuple) - - """ - args_ = '{} {}'.format(volume_qos, volume_type) - - LOG.info( - "Associate volume qos {} to type {}".format(volume_qos, volume_type)) - code, output = cli.openstack('volume qos associate', args_, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if code > 0: - return 1, output - - msg = "Volume qos {} is successfully associated to volume type {}".format( - volume_qos, volume_type) - LOG.info(msg) - return 0, msg - - -def disassociate_volume_qos(volume_qos, volume_type=None, all_vol_types=False, - fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Disassociate a volume QoS spec from volume type(s) - - Args: - volume_qos (str): - volume_type (str|None): volume type name/id - all_vol_types (bool): - fail_ok (bool): - con_ssh: - auth_info (dict) - - Returns (tuple): - - """ - if not all_vol_types and not volume_type: - raise ValueError( - 'volume_type has to be specified unless all_vol_types=True') - - if all_vol_types: - args_ = '--all' - else: - args_ = '--volume-type {}'.format(volume_type) - - LOG.info("Disassociating volume qos {} from: {}".format(volume_qos, args_)) - args_ = '{} {}'.format(args_, volume_qos) - code, output = cli.openstack('volume qos disassociate', args_, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - - if code > 0: - return 1, output - - msg = "Volume QoS {} is successfully disassociated".format(volume_qos) - LOG.info(msg) - return 0, msg - - -def get_qos_associations(volume_qos, qos_val='ID', con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Get associated volume types for given volume qos spec - Args: - volume_qos: - qos_val: - con_ssh: - auth_info: - - Returns (list): list of volume type names - - """ - key = 'qos_id' if qos_val.lower() == 'id' else 'name' - - associations = get_volume_qos_list(field='associations', con_ssh=con_ssh, - auth_info=auth_info, - **{key: volume_qos})[0] - associations = [i.strip() for i in associations.split(sep=',')] - - LOG.info("Volume QoS {} associations: {}".format(volume_qos, associations)) - - return associations - - -def is_volumes_pool_sufficient(min_size=40): - """ - Check if cinder-volume-pool has sufficient space - Args: - min_size (int): Minimum requirement for cinder volume pool size in - Gbs. Default 30G. - - Returns (bool): - - """ - con_ssh = ControllerClient.get_active_controller() - lvs_pool = con_ssh.exec_sudo_cmd( - cmd="lvs --units g | grep --color='never' cinder-volumes-pool")[1] - # Sample output: - # cinder-volumes-pool cinder-volumes twi-aotz-- - # 19.95g 64.31 33.38 - # volume-05fa416d-d37b-4d57-a6ff-ab4fe49deece cinder-volumes Vwi-a-tz-- - # 1.00g cinder-volumes-pool 64.16 - # volume-1b04fa7f-b839-4cf9-a177-e676ec6cf9b7 cinder-volumes Vwi-a-tz-- - # 1.00g cinder-volumes-pool 64.16 - if lvs_pool: - pool_size = float( - lvs_pool.splitlines()[0].strip().split()[3].strip()[:-1].split( - sep='<')[-1]) - return pool_size >= min_size - - # assume enough volumes in ceph: - return True - - -def create_volume_snapshot(name, volume=None, description=None, force=False, - properties=None, remote_sources=None, - fail_ok=False, con_ssh=None, auth_info=None, cleanup=None): - """ - Create snapshot for an existing volume - Args: - name (str): - volume (None): - description (str|None): - force (bool): - properties (None|dict): - remote_sources (None|dict): - fail_ok (bool): - con_ssh: - auth_info: - cleanup: - - Returns (tuple): - - """ - valid_cleanups = ('module', 'session', 'function', 'class', None) - if cleanup not in valid_cleanups: - raise ValueError( - "Invalid scope provided. Choose from: {}".format(valid_cleanups)) - - arg_dict = { - 'volume': volume, - 'description': description, - 'force': force, - 'property': properties, - 'remote-source': remote_sources - } - - arg_str = common.parse_args(arg_dict, repeat_arg=True) - arg_str += ' {}'.format(name) - - vol = volume if volume else name - LOG.info('Creating snapshot for volume: {}'.format(vol)) - code, output = cli.openstack('volume snapshot create', arg_str, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - - table_ = table_parser.table(output) - snap_shot_id = table_parser.get_value_two_col_table(table_, 'id') - if cleanup and snap_shot_id: - ResourceCleanup.add('vol_snapshot', snap_shot_id, scope=cleanup) - - if code > 0: - return 1, output - - LOG.info( - "Volume snapshot {} created for volume {}. Wait for it to become " - "available".format( - snap_shot_id, vol)) - wait_for_vol_snapshot_status(snap_shot_id, status='available', - con_ssh=con_ssh, auth_info=auth_info) - - LOG.info("Volume snapshot {} created and READY for volume {}".format( - snap_shot_id, vol)) - return 0, snap_shot_id - - -def import_volume(cinder_volume_backup, vol_id=None, con_ssh=None, - fail_ok=False, auth_info=Tenant.get('admin'), - retries=2): - """ - Imports a cinder volume from a backup file located in /opt/backups - folder. The backup file is expected in - volume--.tgz format. Either volume_backup filename or vol_id - must be provided - Args: - cinder_volume_backup(str): the filename of the backup file - vol_id (str): - is the uuid of the cinder volume to be imported - con_ssh: - fail_ok: - auth_info: - retries (int) - - Returns: - - """ - - if not cinder_volume_backup and not vol_id: - raise ValueError("Volume backup file name or vol_id must be provided.") - - if con_ssh is None: - con_ssh = ControllerClient.get_active_controller() - - controller_prompt = Prompt.CONTROLLER_0 + \ - r'|.*controller\-0\:/opt/backups\$' - controller_prompt += r'|.*controller\-0.*backups.*\$' - LOG.info('set prompt to:{}'.format(controller_prompt)) - vol_backup = cinder_volume_backup - vol_id_ = vol_id - cd_cmd = "cd /opt/backups" - con_ssh.set_prompt(prompt=controller_prompt) - - con_ssh.exec_cmd(cd_cmd) - - if not cinder_volume_backup: - # search backup file in /opt/backups - search_str = "volume-" + vol_id_ + "*.tgz" - cmd = "cd /opt/backups; ls {}".format(search_str) - - rc, output = con_ssh.exec_cmd(cmd) - if rc == 0: - vol_backup = output.split()[0] - else: - err_msg = "volume backup file not found in /opt/backups: {}".format( - output) - LOG.error(err_msg) - if fail_ok: - return -1, err_msg - else: - raise exceptions.CinderError(err_msg) - if not vol_id_: - vol_id_ = vol_backup[7:-20] - - # according to the user documents, the first time of 'cinder import' may - # fail, in which case - # we just have to try again - for retry in range(retries if 2 <= retries <= 10 else 2): - con_ssh.set_prompt(prompt=controller_prompt) - rc, output = cli.cinder('import', vol_backup, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if rc == 1: - LOG.warn( - 'Failed to import volume for the:{} time'.format(retry + 1)) - - if wait_for_volume_status(volume=vol_id_, - status=['available', 'in-use'], - auth_info=auth_info, - con_ssh=con_ssh, fail_ok=True): - break - else: - err_msg = "Volume is imported, but not in available/in-use state." - LOG.warning(err_msg) - if fail_ok: - return 2, vol_id_ - else: - raise exceptions.CinderError(err_msg) - - return 0, "Volume {} is imported successfully".format(vol_id_) - - -def delete_backups(backup_ids=None, con_ssh=None, fail_ok=False, - auth_info=None): - LOG.info('Deleting backups:{}'.format(backup_ids)) - - if backup_ids is None: - backup_ids = get_backup_ids(con_ssh=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - - for backup_id in backup_ids: - LOG.info('Deleting backup:{}'.format(backup_id)) - cli.cinder('backup-delete', backup_id, fail_ok=fail_ok, - auth_info=auth_info) - - -def export_free_volume_using_cinder_backup(vol_id=None, container='cinder', - name='', con_ssh=None, fail_ok=False, - auth_info=None, - backup_file_path='/opt/backups'): - LOG.info( - 'Exporing free volume using cinder-backup, volume-id:{}'.format(vol_id)) - if not name: - name = 'free_vol_backup_' + str(vol_id)[0:2] + '_' + str(vol_id)[-5:] - - arg = '--container {} --name {} {}'.format(container, name, vol_id) - output = table_parser.table( - cli.cinder('backup-create', arg, ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info)[1]) - - backup_id = table_parser.get_value_two_col_table(output, 'id') - backup_name = table_parser.get_value_two_col_table(output, 'name') - volume_id = table_parser.get_value_two_col_table(output, 'volume_id') - - LOG.info( - 'TODO: backup_id:{}, backup_name:{}, volume_id:{}'.format(backup_id, - backup_name, - volume_id)) - - assert backup_name == name and volume_id == vol_id - - wait_for_backup_ready(backup_id) - - msg = ( - 'backup:{} reached "available" status, check if the files are ' - 'gerated'.format( - backup_id)) - LOG.info('OK,' + msg) - code, output = con_ssh.exec_sudo_cmd( - 'ls -l {}/*{}*'.format(os.path.join(backup_file_path, container), - backup_id)) - - if code != 0: - con_ssh.exec_sudo_cmd( - 'ls -l {}/*'.format(os.path.join(backup_file_path, container))) - - assert 0 == code and output, 'backup became "available", but files are ' \ - 'not generated' - - return backup_id - - -def wait_for_backup_ready(backup_id, timeout=900, interval=15, con_ssh=None, - fail_ok=False, auth_info=None): - LOG.info( - 'Waiting for backup reaches "available" status, backup-id:{}'.format( - backup_id)) - now = time.time() - end = now + timeout - - while time.time() < end: - time.sleep(interval) - status = get_cinder_backup_status(backup_id, con_ssh=con_ssh, - auth_info=auth_info) - if status == 'available': - break - else: - msg = 'backup did not reach status: "available" within {} ' \ - 'seconds'.format(timeout) - LOG.warning('Error:' + msg) - if not fail_ok: - assert False, msg - return -1 - - return 0 - - -def export_busy_volume_using_cinder_backup(vol_id=None, - container='cinder', - name='', - con_ssh=None, - fail_ok=False, - auth_info=None, - backup_file_path='/opt/backups' - ): - LOG.info('TODO: exporting in-use volume using cinder-backup, vol:{}'.format( - vol_id)) - if not name: - name = 'inuse_vol_backup_' + vol_id[-4:] - snp_id = create_volume_snapshot('snp_' + name, volume=vol_id, - con_ssh=con_ssh, - fail_ok=fail_ok, - force=True, auth_info=auth_info)[1] - arg = '--container {} --name {} --snapshot-id {} {}'.format( - container, name, snp_id, vol_id) - output = table_parser.table(cli.cinder('backup-create', arg, - fail_ok=fail_ok, - auth_info=auth_info)[1]) - - backup_id = table_parser.get_value_two_col_table(output, 'id') - backup_name = table_parser.get_value_two_col_table(output, 'name') - volume_id = table_parser.get_value_two_col_table(output, 'volume_id') - - LOG.info( - 'TODO: backup_id:{}, backup_name:{}, volume_id:{}'.format( - backup_id, backup_name, volume_id)) - - assert backup_name == name and volume_id == vol_id - - wait_for_backup_ready(backup_id) - - msg = ( - 'backup:{} reached "available" status, check if the files are ' - 'gerated'.format( - backup_id)) - LOG.info('OK,' + msg) - code, output = con_ssh.exec_sudo_cmd( - 'ls -l {}/*{}*'.format(os.path.join(backup_file_path, container), - backup_id)) - - if code != 0: - con_ssh.exec_sudo_cmd( - 'ls -l {}/*'.format(os.path.join(backup_file_path, container))) - - assert 0 == code and output, 'backup became "available", but files are ' \ - 'not generated' - - LOG.info( - 'TODO: successfully exported in-use volume using cinder-backup, ' - 'vol:{}'.format( - vol_id)) - - return backup_id - - -def export_volumes_using_cinder_backup(vol_ids=None, delete_existing=True, - con_ssh=None, fail_ok=False, - auth_info=None, - backup_file_path='/opt/backups'): - if not vol_ids: - LOG.warning('No volume IDs specified, skip the rest of test') - return 0, [] - - backup_ids = get_backup_ids(searching_status='', con_ssh=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - - if delete_existing and len(backup_ids) > 0: - delete_backups(con_ssh=None, fail_ok=fail_ok, auth_info=auth_info) - - code = 0 - exported_volume_ids = [] - for vol_id in vol_ids: - LOG.info('Backup volume: {}'.format(vol_id)) - volume_status = get_volume_show_values( - vol_id, 'status', con_ssh=con_ssh)[0] - if volume_status == 'available': - code = export_free_volume_using_cinder_backup( - vol_id=vol_id, - con_ssh=con_ssh, - fail_ok=fail_ok, - auth_info=auth_info, - backup_file_path=backup_file_path) - - elif volume_status == 'in-use': - code = export_busy_volume_using_cinder_backup( - vol_id=vol_id, - con_ssh=con_ssh, - fail_ok=fail_ok, - auth_info=auth_info, - backup_file_path=backup_file_path) - - exported_volume_ids.append(vol_id) - - LOG.info('Volumes backuped using cinder-backup:{}'.format( - exported_volume_ids)) - return code, exported_volume_ids - - -def get_backup_ids(searching_status='available', con_ssh=None, fail_ok=False, - auth_info=None): - if not auth_info: - auth_info = Tenant.get('admin') - - table_ = table_parser.table( - cli.cinder('backup-list', ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info)[1]) - - if searching_status and searching_status.strip(): - kwargs = {'Status': searching_status.strip()} - table_ = table_parser.filter_table(table_, **kwargs) - - status = table_parser.get_values(table_, 'Status') - backup_ids = table_parser.get_values(table_, 'ID') - volume_ids = table_parser.get_values(table_, 'Volume ID') - - LOG.info('status:{}'.format(status)) - LOG.info('backup_ids:{}'.format(backup_ids)) - LOG.info('volume_ids:{}'.format(volume_ids)) - LOG.info('backup-ids:{}'.format(backup_ids)) - - return backup_ids - - -def get_cinder_backup_status(backup_id, con_ssh=None, fail_ok=False, - auth_info=Tenant.get('admin')): - states = table_parser.table( - cli.cinder('backup-show', backup_id, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info)[1]) - return table_parser.get_value_two_col_table(states, 'status') - - -def export_volumes(vol_ids=None, con_ssh=None, fail_ok=False, - auth_info=Tenant.get('admin'), cinder_backup=False, - backup_file_path='/opt/backups'): - """ - Exports cinder volume to controller's /opt/backups folder. The backup - file is in - volume--.tgz format. - Args: - vol_ids(list/str): the list of volume ids to be exported, if none, - all system volumes are exported - con_ssh: - fail_ok: - auth_info: - cinder_backup - backup_file_path - - Returns: - - """ - if not vol_ids: - vol_ids = get_volumes() - elif isinstance(vol_ids, str): - vol_ids = [vol_ids] - - if cinder_backup: - return export_volumes_using_cinder_backup( - vol_ids=vol_ids, - con_ssh=con_ssh, - fail_ok=fail_ok, - auth_info=auth_info, - backup_file_path=backup_file_path) - volume_exported = [] - for vol_id in vol_ids: - - if get_volume_show_values(vol_id, 'status', con_ssh=con_ssh, - auth_info=auth_info)[0] == 'available': - # export available volume to ~/opt/backups - LOG.tc_step("export available volume {} ".format(vol_id)) - code, out = cli.cinder('export', vol_id, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - - if code > 0: - return 1, out - - # wait for volume copy to complete - if not wait_for_volume_status(vol_id, fail_ok=fail_ok, - auth_info=auth_info, con_ssh=con_ssh): - err_msg = "cinder volume failed to reach available status " \ - "after export" - LOG.warning(err_msg) - return 2, vol_id - - LOG.info( - "Exported 'Available' Volumes {} successfully ".format(vol_id)) - volume_exported.append(vol_id) - - # execute backup in-use volume command - if get_volume_show_values(vol_id, 'status', auth_info=auth_info, - con_ssh=con_ssh)[0] == 'in-use': - LOG.tc_step("export in use volume {} ".format(vol_id)) - snapshot_name = 'snapshot_' + vol_id - snap_shot_id = create_volume_snapshot(name=snapshot_name, - volume=vol_id, - con_ssh=con_ssh, - auth_info=auth_info)[1] - LOG.info( - "Volume snapshot {} created for volume {}".format(snap_shot_id, - vol_id)) - - # wait for volume copy to complete - if not wait_for_vol_snapshot_status(snap_shot_id, fail_ok=fail_ok, - auth_info=auth_info, - con_ssh=con_ssh): - err_msg = "cinder snapshot volume {} failed to reach " \ - "available status after copy".format(snap_shot_id) - LOG.warning(err_msg) - return 3, err_msg - - found_snap = get_vol_snapshots(vol_id=vol_id, auth_info=auth_info, - con_ssh=con_ssh)[0] - LOG.info( - "Matched Volume snapshot {} to volume {}".format(found_snap, - vol_id)) - if found_snap not in snap_shot_id: - err_msg = "cinder volume snapshot {} for volume {} not found " \ - "after export".format(snap_shot_id, vol_id) - LOG.warn(err_msg) - if fail_ok: - LOG.warning(err_msg) - return 4, err_msg - else: - raise exceptions.CinderError(err_msg) - - LOG.info( - "Exporting in-use Volume snapshot {} ".format(snap_shot_id)) - cli.cinder('snapshot-export', snap_shot_id, ssh_client=con_ssh, - auth_info=auth_info) - if not wait_for_vol_snapshot_status(snap_shot_id, fail_ok=fail_ok, - auth_info=auth_info, - con_ssh=con_ssh): - err_msg = "cinder snapshot volume {} failed to reach " \ - "available status after export".format(snap_shot_id) - return 5, err_msg - - # delete the snapshot after export - LOG.info( - "Deleting snapshot Volume snapshot {} after export ".format( - snap_shot_id)) - cli.cinder('snapshot-delete', snap_shot_id, ssh_client=con_ssh, - auth_info=auth_info) - - LOG.info( - "Exported 'in-use' Volumes {} successfully ".format(vol_id)) - volume_exported.append(vol_id) - - return 0, volume_exported - - -def get_lvm_usage(con_ssh): - LOG.info('Getting usage of cinder-volumes') - free, total, unit = 0, 0, 'g' - pattern = r'(\d+(\.\d+)?)([gm])' - code, output = con_ssh.exec_sudo_cmd('lvs') - if 0 != code: - LOG.warn('Failed to get usage of cinder-volumes') - else: - try: - used = 0 - for line in output.strip().splitlines(): - fields = line.split() - if fields[0] == 'cinder-volumes-pool': - total = re.search(pattern, fields[3], re.IGNORECASE) - unit = total.group(3) - total = float(total.group(1)) - elif fields[0].startswith('volume-'): - usage = re.search(pattern, fields[3], re.IGNORECASE) - used += float(usage.group(1)) - - free = total - used - - LOG.info('lvm usage: free:{}, used:{}, total:{}'.format(free, used, - total)) - except Exception: - LOG.info('Wrong format:{}'.format(output)) - free = total = 0 - - return free, total, unit diff --git a/automated-pytest-suite/keywords/common.py b/automated-pytest-suite/keywords/common.py deleted file mode 100644 index cecdd8af..00000000 --- a/automated-pytest-suite/keywords/common.py +++ /dev/null @@ -1,859 +0,0 @@ -# -# Copyright (c) 2019, 2020 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -############################################################# -# DO NOT import anything from helper modules to this module # -############################################################# - -import socket -import os -import re -import time -from contextlib import contextmanager -from datetime import datetime - -import pexpect -import yaml -from pytest import skip - -from consts.auth import Tenant, TestFileServer, HostLinuxUser -from consts.stx import Prompt -from consts.proj_vars import ProjVar -from utils import exceptions -from utils.clients.ssh import ControllerClient, NATBoxClient, SSHClient, \ - get_cli_client -from utils.tis_log import LOG - - -def scp_from_test_server_to_user_file_dir(source_path, dest_dir, dest_name=None, - timeout=900, con_ssh=None, - central_region=False): - if con_ssh is None: - con_ssh = get_cli_client(central_region=central_region) - if dest_name is None: - dest_name = source_path.split(sep='/')[-1] - - if ProjVar.get_var('USER_FILE_DIR') == ProjVar.get_var('TEMP_DIR'): - LOG.info("Copy file from test server to localhost") - source_server = TestFileServer.SERVER - source_user = TestFileServer.USER - source_password = TestFileServer.PASSWORD - dest_path = dest_dir if not dest_name else os.path.join(dest_dir, - dest_name) - LOG.info('Check if file already exists on TiS') - if con_ssh.file_exists(file_path=dest_path): - LOG.info('dest path {} already exists. Return existing path'.format( - dest_path)) - return dest_path - - os.makedirs(dest_dir, exist_ok=True) - con_ssh.scp_on_dest(source_user=source_user, source_ip=source_server, - source_path=source_path, - dest_path=dest_path, source_pswd=source_password, - timeout=timeout) - return dest_path - else: - LOG.info("Copy file from test server to active controller") - return scp_from_test_server_to_active_controller( - source_path=source_path, dest_dir=dest_dir, - dest_name=dest_name, timeout=timeout, con_ssh=con_ssh) - - -def _scp_from_remote_to_active_controller(source_server, source_path, - dest_dir, dest_name=None, - source_user=None, - source_password=None, - timeout=900, con_ssh=None, - is_dir=False): - """ - SCP file or files under a directory from remote server to TiS server - - Args: - source_path (str): remote server file path or directory path - dest_dir (str): destination directory. should end with '/' - dest_name (str): destination file name if not dir - timeout (int): - con_ssh: - is_dir - - Returns (str|None): destination file/dir path if scp successful else None - - """ - if con_ssh is None: - con_ssh = ControllerClient.get_active_controller() - - if not source_user: - source_user = TestFileServer.USER - if not source_password: - source_password = TestFileServer.PASSWORD - - if dest_name is None and not is_dir: - dest_name = source_path.split(sep='/')[-1] - - dest_path = dest_dir if not dest_name else os.path.join(dest_dir, dest_name) - - LOG.info('Check if file already exists on TiS') - if not is_dir and con_ssh.file_exists(file_path=dest_path): - LOG.info('dest path {} already exists. Return existing path'.format( - dest_path)) - return dest_path - - LOG.info('Create destination directory on tis server if not already exists') - cmd = 'mkdir -p {}'.format(dest_dir) - con_ssh.exec_cmd(cmd, fail_ok=False) - - nat_name = ProjVar.get_var('NATBOX') - if nat_name: - nat_name = nat_name.get('name') - if nat_name and ProjVar.get_var('IS_VBOX'): - LOG.info('VBox detected, performing intermediate scp') - - nat_dest_path = '/tmp/{}'.format(dest_name) - nat_ssh = NATBoxClient.get_natbox_client() - - if not nat_ssh.file_exists(nat_dest_path): - LOG.info("scp file from {} to NatBox: {}".format(nat_name, - source_server)) - nat_ssh.scp_on_dest(source_user=source_user, - source_ip=source_server, - source_path=source_path, - dest_path=nat_dest_path, - source_pswd=source_password, timeout=timeout, - is_dir=is_dir) - - LOG.info( - 'scp file from natbox {} to active controller'.format(nat_name)) - dest_user = HostLinuxUser.get_user() - dest_pswd = HostLinuxUser.get_password() - dest_ip = ProjVar.get_var('LAB').get('floating ip') - nat_ssh.scp_on_source(source_path=nat_dest_path, dest_user=dest_user, - dest_ip=dest_ip, dest_path=dest_path, - dest_password=dest_pswd, timeout=timeout, - is_dir=is_dir) - - else: # if not a VBox lab, scp from remote server directly to TiS server - LOG.info("scp file(s) from {} to tis".format(source_server)) - con_ssh.scp_on_dest(source_user=source_user, source_ip=source_server, - source_path=source_path, - dest_path=dest_path, source_pswd=source_password, - timeout=timeout, is_dir=is_dir) - - return dest_path - - -def scp_from_test_server_to_active_controller(source_path, dest_dir, - dest_name=None, timeout=900, - con_ssh=None, - is_dir=False): - """ - SCP file or files under a directory from test server to TiS server - - Args: - source_path (str): test server file path or directory path - dest_dir (str): destination directory. should end with '/' - dest_name (str): destination file name if not dir - timeout (int): - con_ssh: - is_dir (bool) - - Returns (str|None): destination file/dir path if scp successful else None - - """ - skip('Shared Test File Server is not ready') - if con_ssh is None: - con_ssh = ControllerClient.get_active_controller() - - source_server = TestFileServer.SERVER - source_user = TestFileServer.USER - source_password = TestFileServer.PASSWORD - - return _scp_from_remote_to_active_controller( - source_server=source_server, - source_path=source_path, - dest_dir=dest_dir, - dest_name=dest_name, - source_user=source_user, - source_password=source_password, - timeout=timeout, - con_ssh=con_ssh, - is_dir=is_dir) - - -def scp_from_active_controller_to_test_server(source_path, dest_dir, - dest_name=None, timeout=900, - is_dir=False, - con_ssh=None): - """ - SCP file or files under a directory from test server to TiS server - - Args: - source_path (str): test server file path or directory path - dest_dir (str): destination directory. should end with '/' - dest_name (str): destination file name if not dir - timeout (int): - is_dir (bool): - con_ssh: - - Returns (str|None): destination file/dir path if scp successful else None - - """ - skip('Shared Test File Server is not ready') - if con_ssh is None: - con_ssh = ControllerClient.get_active_controller() - - dir_option = '-r ' if is_dir else '' - dest_server = TestFileServer.SERVER - dest_user = TestFileServer.USER - dest_password = TestFileServer.PASSWORD - - dest_path = dest_dir if not dest_name else os.path.join(dest_dir, dest_name) - - scp_cmd = 'scp -oStrictHostKeyChecking=no -o ' \ - 'UserKnownHostsFile=/dev/null ' \ - '{}{} {}@{}:{}'.\ - format(dir_option, source_path, dest_user, dest_server, dest_path) - - LOG.info("scp file(s) from tis server to test server") - con_ssh.send(scp_cmd) - index = con_ssh.expect( - [con_ssh.prompt, Prompt.PASSWORD_PROMPT, Prompt.ADD_HOST], - timeout=timeout) - if index == 2: - con_ssh.send('yes') - index = con_ssh.expect([con_ssh.prompt, Prompt.PASSWORD_PROMPT], - timeout=timeout) - if index == 1: - con_ssh.send(dest_password) - index = con_ssh.expect(timeout=timeout) - - assert index == 0, "Failed to scp files" - - exit_code = con_ssh.get_exit_code() - assert 0 == exit_code, "scp not fully succeeded" - - return dest_path - - -def scp_from_localhost_to_active_controller( - source_path, dest_path=None, - dest_user=None, - dest_password=None, - timeout=900, is_dir=False): - - active_cont_ip = ControllerClient.get_active_controller().host - if not dest_path: - dest_path = HostLinuxUser.get_home() - if not dest_user: - dest_user = HostLinuxUser.get_user() - if not dest_password: - dest_password = HostLinuxUser.get_password() - - return scp_from_local(source_path, active_cont_ip, dest_path=dest_path, - dest_user=dest_user, dest_password=dest_password, - timeout=timeout, is_dir=is_dir) - - -def scp_from_active_controller_to_localhost( - source_path, dest_path='', - src_user=None, - src_password=None, - timeout=900, is_dir=False): - - active_cont_ip = ControllerClient.get_active_controller().host - if not src_user: - src_user = HostLinuxUser.get_user() - if not src_password: - src_password = HostLinuxUser.get_password() - - return scp_to_local(source_path=source_path, source_ip=active_cont_ip, - source_user=src_user, source_password=src_password, - dest_path=dest_path, timeout=timeout, is_dir=is_dir) - - -def scp_from_local(source_path, dest_ip, dest_path, - dest_user, - dest_password, - timeout=900, is_dir=False): - """ - Scp file(s) from localhost (i.e., from where the automated tests are - executed). - - Args: - source_path (str): source file/directory path - dest_ip (str): ip of the destination host - dest_user (str): username of destination host. - dest_password (str): password of destination host - dest_path (str): destination directory path to copy the file(s) to - timeout (int): max time to wait for scp finish in seconds - is_dir (bool): whether to copy a single file or a directory - - """ - dir_option = '-r ' if is_dir else '' - - cmd = 'scp -oStrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' \ - '{}{} {}@{}:{}'. \ - format(dir_option, source_path, dest_user, dest_ip, dest_path) - - _scp_on_local(cmd, remote_password=dest_password, timeout=timeout) - - -def scp_to_local(source_path, source_ip, source_user, source_password, - dest_path, timeout=900, is_dir=False): - """ - Scp file(s) to localhost (i.e., to where the automated tests are executed). - - Args: - source_path (str): source file/directory path - source_ip (str): ip of the source host. - source_user (str): username of source host. - source_password (str): password of source host - dest_path (str): destination directory path to copy the file(s) to - timeout (int): max time to wait for scp finish in seconds - is_dir (bool): whether to copy a single file or a directory - - """ - dir_option = '-r ' if is_dir else '' - cmd = 'scp -oStrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ' \ - '{}{}@{}:{} {}'.\ - format(dir_option, source_user, source_ip, source_path, dest_path) - - _scp_on_local(cmd, remote_password=source_password, timeout=timeout) - - -def _scp_on_local(cmd, remote_password, logdir=None, timeout=900): - LOG.debug('scp cmd: {}'.format(cmd)) - - logdir = logdir or ProjVar.get_var('LOG_DIR') - logfile = os.path.join(logdir, 'scp_files.log') - - with open(logfile, mode='a', encoding='utf8') as f: - local_child = pexpect.spawn(command=cmd, encoding='utf-8', logfile=f) - index = local_child.expect([pexpect.EOF, 'assword:', 'yes/no'], - timeout=timeout) - - if index == 2: - local_child.sendline('yes') - index = local_child.expect([pexpect.EOF, 'assword:'], - timeout=timeout) - - if index == 1: - local_child.sendline(remote_password) - local_child.expect(pexpect.EOF, timeout=timeout) - - -def get_tenant_name(auth_info=None): - """ - Get name of given tenant. If None is given, primary tenant name will be - returned. - - Args: - auth_info (dict|None): Tenant dict - - Returns: - str: name of the tenant - - """ - if auth_info is None: - auth_info = Tenant.get_primary() - return auth_info['tenant'] - - -class Count: - __vm_count = 0 - __flavor_count = 0 - __volume_count = 0 - __image_count = 0 - __server_group = 0 - __router = 0 - __subnet = 0 - __other = 0 - - @classmethod - def get_vm_count(cls): - cls.__vm_count += 1 - return cls.__vm_count - - @classmethod - def get_flavor_count(cls): - cls.__flavor_count += 1 - return cls.__flavor_count - - @classmethod - def get_volume_count(cls): - cls.__volume_count += 1 - return cls.__volume_count - - @classmethod - def get_image_count(cls): - cls.__image_count += 1 - return cls.__image_count - - @classmethod - def get_sever_group_count(cls): - cls.__server_group += 1 - return cls.__server_group - - @classmethod - def get_router_count(cls): - cls.__router += 1 - return cls.__router - - @classmethod - def get_subnet_count(cls): - cls.__subnet += 1 - return cls.__subnet - - @classmethod - def get_other_count(cls): - cls.__other += 1 - return cls.__other - - -class NameCount: - __names_count = { - 'vm': 0, - 'flavor': 0, - 'volume': 0, - 'image': 0, - 'server_group': 0, - 'subnet': 0, - 'heat_stack': 0, - 'qos': 0, - 'other': 0, - } - - @classmethod - def get_number(cls, resource_type='other'): - cls.__names_count[resource_type] += 1 - return cls.__names_count[resource_type] - - @classmethod - def get_valid_types(cls): - return list(cls.__names_count.keys()) - - -def get_unique_name(name_str, existing_names=None, resource_type='other'): - """ - Get a unique name string by appending a number to given name_str - - Args: - name_str (str): partial name string - existing_names (list): names to avoid - resource_type (str): type of resource. valid values: 'vm' - - Returns: - - """ - valid_types = NameCount.get_valid_types() - if resource_type not in valid_types: - raise ValueError( - "Invalid resource_type provided. Valid types: {}".format( - valid_types)) - - if existing_names: - if resource_type in ['image', 'volume', 'flavor']: - unique_name = name_str - else: - unique_name = "{}-{}".format(name_str, NameCount.get_number( - resource_type=resource_type)) - - for i in range(50): - if unique_name not in existing_names: - return unique_name - - unique_name = "{}-{}".format(name_str, NameCount.get_number( - resource_type=resource_type)) - else: - raise LookupError("Cannot find unique name.") - else: - unique_name = "{}-{}".format(name_str, NameCount.get_number( - resource_type=resource_type)) - - return unique_name - - -def parse_cpus_list(cpus): - """ - Convert human friendly pcup list to list of integers. - e.g., '5-7,41-43, 43, 45' >> [5, 6, 7, 41, 42, 43, 43, 45] - - Args: - cpus (str): - - Returns (list): list of integers - - """ - if isinstance(cpus, str): - if cpus.strip() == '': - return [] - - cpus = cpus.split(sep=',') - - cpus_list = list(cpus) - - for val in cpus: - # convert '3-6' to [3, 4, 5, 6] - if '-' in val: - cpus_list.remove(val) - min_, max_ = val.split(sep='-') - - # unpinned:20; pinned_cpulist:-, unpinned_cpulist:10-19,30-39 - if min_ != '': - cpus_list += list(range(int(min_), int(max_) + 1)) - - return sorted([int(val) for val in cpus_list]) - - -def get_timedelta_for_isotimes(time1, time2): - """ - - Args: - time1 (str): such as "2016-08-16T12:59:45.440697+00:00" - time2 (str): - - Returns () - - """ - - def _parse_time(time_): - time_ = time_.strip().split(sep='.')[0].split(sep='+')[0] - if 'T' in time_: - pattern = "%Y-%m-%dT%H:%M:%S" - elif ' ' in time_: - pattern = "%Y-%m-%d %H:%M:%S" - else: - raise ValueError("Unknown format for time1: {}".format(time_)) - time_datetime = datetime.strptime(time_, pattern) - return time_datetime - - time1_datetime = _parse_time(time_=time1) - time2_datetime = _parse_time(time_=time2) - - return time2_datetime - time1_datetime - - -def _execute_with_openstack_cli(): - """ - DO NOT USE THIS IN TEST FUNCTIONS! - """ - return ProjVar.get_var('OPENSTACK_CLI') - - -def get_date_in_format(ssh_client=None, date_format="%Y%m%d %T"): - """ - Get date in given format. - Args: - ssh_client (SSHClient): - date_format (str): Please see date --help for valid format strings - - Returns (str): date output in given format - - """ - if ssh_client is None: - ssh_client = ControllerClient.get_active_controller() - return ssh_client.exec_cmd("date +'{}'".format(date_format), fail_ok=False)[ - 1] - - -def write_to_file(file_path, content, mode='a'): - """ - Write content to specified local file - Args: - file_path (str): file path on localhost - content (str): content to write to file - mode (str): file operation mode. Default is 'a' (append to end of file). - - Returns: None - - """ - time_stamp = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime()) - with open(file_path, mode=mode, encoding='utf8') as f: - f.write( - '\n-----------------[{}]-----------------\n{}\n'.format(time_stamp, - content)) - - -def collect_software_logs(con_ssh=None): - if not con_ssh: - con_ssh = ControllerClient.get_active_controller() - LOG.info("Collecting all hosts logs...") - con_ssh.exec_cmd('source /etc/platform/openrc', get_exit_code=False) - con_ssh.send('collect all') - - expect_list = ['.*password for sysadmin:', 'collecting data.', - con_ssh.prompt] - index_1 = con_ssh.expect(expect_list, timeout=20) - if index_1 == 2: - LOG.error( - "Something is wrong with collect all. Check ssh console log for " - "detail.") - return - elif index_1 == 0: - con_ssh.send(con_ssh.password) - con_ssh.expect('collecting data') - - index_2 = con_ssh.expect(['/scratch/ALL_NODES.*', con_ssh.prompt], - timeout=1200) - if index_2 == 0: - output = con_ssh.cmd_output - con_ssh.expect() - logpath = re.findall('.*(/scratch/ALL_NODES_.*.tar).*', output)[0] - LOG.info( - "\n################### TiS server log path: {}".format(logpath)) - else: - LOG.error("Collecting logs failed. No ALL_NODES logs found.") - return - - dest_path = ProjVar.get_var('LOG_DIR') - try: - LOG.info("Copying log file from active controller to local {}".format( - dest_path)) - scp_from_active_controller_to_localhost( - source_path=logpath, dest_path=dest_path, timeout=300) - LOG.info("{} is successfully copied to local directory: {}".format( - logpath, dest_path)) - except Exception as e: - LOG.warning("Failed to copy log file to localhost.") - LOG.error(e, exc_info=True) - - -def parse_args(args_dict, repeat_arg=False, vals_sep=' '): - """ - parse args dictionary and convert it to string - Args: - args_dict (dict): key/value pairs - repeat_arg: if value is tuple, list, dict, should the arg be repeated. - e.g., True for --nic in nova boot. False for -m in gnocchi - measures aggregation - vals_sep (str): separator to join multiple vals. Only applicable when - repeat_arg=False. - - Returns (str): - - """ - - def convert_val_dict(key__, vals_dict, repeat_key): - vals_ = [] - for k, v in vals_dict.items(): - if ' ' in v: - v = '"{}"'.format(v) - vals_.append('{}={}'.format(k, v)) - if repeat_key: - args_str = ' ' + ' '.join( - ['{} {}'.format(key__, v_) for v_ in vals_]) - else: - args_str = ' {} {}'.format(key__, vals_sep.join(vals_)) - return args_str - - args = '' - for key, val in args_dict.items(): - if val is None: - continue - - key = key if key.startswith('-') else '--{}'.format(key) - if isinstance(val, str): - if ' ' in val: - val = '"{}"'.format(val) - args += ' {}={}'.format(key, val) - elif isinstance(val, bool): - if val: - args += ' {}'.format(key) - elif isinstance(val, (int, float)): - args += ' {}={}'.format(key, val) - elif isinstance(val, dict): - args += convert_val_dict(key__=key, vals_dict=val, - repeat_key=repeat_arg) - elif isinstance(val, (list, tuple)): - if repeat_arg: - for val_ in val: - if isinstance(val_, dict): - args += convert_val_dict(key__=key, vals_dict=val_, - repeat_key=False) - else: - args += ' {}={}'.format(key, val_) - else: - args += ' {}={}'.format(key, vals_sep.join(val)) - else: - raise ValueError( - "Unrecognized value type. Key: {}; value: {}".format(key, val)) - - return args.strip() - - -def get_symlink(ssh_client, file_path): - code, output = ssh_client.exec_cmd( - 'ls -l {} | grep --color=never ""'.format(file_path)) - if code != 0: - LOG.warning('{} not found!'.format(file_path)) - return None - - res = re.findall('> (.*)', output) - if not res: - LOG.warning('No symlink found for {}'.format(file_path)) - return None - - link = res[0].strip() - return link - - -def is_file(filename, ssh_client): - code = ssh_client.exec_cmd('test -f {}'.format(filename), fail_ok=True)[0] - return 0 == code - - -def is_directory(dirname, ssh_client): - code = ssh_client.exec_cmd('test -d {}'.format(dirname), fail_ok=True)[0] - return 0 == code - - -def lab_time_now(con_ssh=None, date_format='%Y-%m-%dT%H:%M:%S'): - if not con_ssh: - con_ssh = ControllerClient.get_active_controller() - - date_cmd_format = date_format + '.%N' - timestamp = get_date_in_format(ssh_client=con_ssh, - date_format=date_cmd_format) - with_milliseconds = timestamp.split('.')[0] + '.{}'.format( - int(int(timestamp.split('.')[1]) / 1000)) - format1 = date_format + '.%f' - parsed = datetime.strptime(with_milliseconds, format1) - - return with_milliseconds.split('.')[0], parsed - - -@contextmanager -def ssh_to_remote_node(host, username=None, password=None, prompt=None, - ssh_client=None, use_telnet=False, - telnet_session=None): - """ - ssh to a external node from sshclient. - - Args: - host (str|None): hostname or ip address of remote node to ssh to. - username (str): - password (str): - prompt (str): - ssh_client (SSHClient): client to ssh from - use_telnet: - telnet_session: - - Returns (SSHClient): ssh client of the host - - Examples: with ssh_to_remote_node('128.224.150.92) as remote_ssh: - remote_ssh.exec_cmd(cmd) - """ - - if not host: - raise exceptions.SSHException( - "Remote node hostname or ip address must be provided") - - if use_telnet and not telnet_session: - raise exceptions.SSHException( - "Telnet session cannot be none if using telnet.") - - if not ssh_client and not use_telnet: - ssh_client = ControllerClient.get_active_controller() - - if not use_telnet: - from keywords.security_helper import LinuxUser - default_user, default_password = LinuxUser.get_current_user_password() - else: - default_user = HostLinuxUser.get_user() - default_password = HostLinuxUser.get_password() - - user = username if username else default_user - password = password if password else default_password - if use_telnet: - original_host = telnet_session.exec_cmd('hostname')[1] - else: - original_host = ssh_client.host - - if not prompt: - prompt = '.*' + host + r'\:~\$' - - remote_ssh = SSHClient(host, user=user, password=password, - initial_prompt=prompt) - remote_ssh.connect() - current_host = remote_ssh.host - if not current_host == host: - raise exceptions.SSHException( - "Current host is {} instead of {}".format(current_host, host)) - try: - yield remote_ssh - finally: - if current_host != original_host: - remote_ssh.close() - - -def ssh_to_stx(lab=None, set_client=False): - if not lab: - lab = ProjVar.get_var('LAB') - - con_ssh = SSHClient(lab['floating ip'], user=HostLinuxUser.get_user(), - password=HostLinuxUser.get_password(), - initial_prompt=Prompt.CONTROLLER_PROMPT) - - con_ssh.connect(retry=True, retry_timeout=30, use_current=False) - if set_client: - ControllerClient.set_active_controller(con_ssh) - - return con_ssh - - -def get_yaml_data(filepath): - """ - Returns the yaml data in json - Args: - filepath(str): location of the yaml file to load - Return(json): - returns the json data - """ - with open(filepath, 'r', encoding='utf8') as f: - data = yaml.safe_load(f) - return data - - -def write_yaml_data_to_file(data, filename, directory=None): - """ - Writes data to a file in yaml format - Args: - data(json): data in json format - filename(str): filename - directory(boo): directory to save the file - Return(str): - returns the location of the yaml file - """ - if directory is None: - directory = ProjVar.get_var('LOG_DIR') - src_path = "{}/{}".format(directory, filename) - with open(src_path, 'w', encoding='utf8') as f: - yaml.dump(data, f) - return src_path - - -def get_lab_fip(region=None): - """ - Returns system OAM floating ip - Args: - region (str|None): central_region or subcloud, only applicable to DC - Returns (str): floating ip of the lab - """ - if ProjVar.get_var('IS_DC'): - if not region: - region = ProjVar.get_var('PRIMARY_SUBCLOUD') - elif region == 'RegionOne': - region = 'central_region' - oam_fip = ProjVar.get_var('lab')[region]["floating ip"] - else: - oam_fip = ProjVar.get_var('lab')["floating ip"] - - return oam_fip - - -def get_dnsname(region='RegionOne'): - # means that the dns name is unreachable - return None \ No newline at end of file diff --git a/automated-pytest-suite/keywords/container_helper.py b/automated-pytest-suite/keywords/container_helper.py deleted file mode 100644 index a72556aa..00000000 --- a/automated-pytest-suite/keywords/container_helper.py +++ /dev/null @@ -1,879 +0,0 @@ -# -# Copyright (c) 2019, 2020 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -""" -Container/Application related helper functions for non-kubectl commands. -For example: -- docker commands -- system application-xxx commands -- helm commands - -""" - -import os -import time -import yaml - -from utils import cli, exceptions, table_parser -from utils.tis_log import LOG -from utils.clients.ssh import ControllerClient -from consts.auth import Tenant -from consts.proj_vars import ProjVar -from consts.stx import AppStatus, Prompt, EventLogID, Container -from consts.filepaths import StxPath -from keywords import system_helper, host_helper - - -def exec_helm_upload_cmd(tarball, repo=None, timeout=120, con_ssh=None, - fail_ok=False): - if not con_ssh: - con_ssh = ControllerClient.get_active_controller() - - if not repo: - repo = 'starlingx' - cmd = 'helm-upload {} {}'.format(repo, tarball) - con_ssh.send(cmd) - pw_prompt = Prompt.PASSWORD_PROMPT - prompts = [con_ssh.prompt, pw_prompt] - - index = con_ssh.expect(prompts, timeout=timeout, searchwindowsize=100, - fail_ok=fail_ok) - if index == 1: - con_ssh.send(con_ssh.password) - prompts.remove(pw_prompt) - con_ssh.expect(prompts, timeout=timeout, searchwindowsize=100, - fail_ok=fail_ok) - - code, output = con_ssh._process_exec_result(rm_date=True, - get_exit_code=True) - if code != 0 and not fail_ok: - raise exceptions.SSHExecCommandFailed( - "Non-zero return code for cmd: {}. Output: {}". - format(cmd, output)) - - return code, output - - -def exec_docker_cmd(sub_cmd, args, timeout=120, con_ssh=None, fail_ok=False): - if not con_ssh: - con_ssh = ControllerClient.get_active_controller() - - cmd = 'docker {} {}'.format(sub_cmd, args) - code, output = con_ssh.exec_sudo_cmd(cmd, expect_timeout=timeout, - fail_ok=fail_ok) - - return code, output - - -def upload_helm_charts(tar_file, repo=None, delete_first=False, con_ssh=None, - timeout=120, fail_ok=False): - """ - Upload helm charts via helm-upload cmd - Args: - tar_file: - repo - delete_first: - con_ssh: - timeout: - fail_ok: - - Returns (tuple): - (0, ) - (1, ) - (2, ) - - """ - if not con_ssh: - con_ssh = ControllerClient.get_active_controller() - - helm_dir = os.path.normpath(StxPath.HELM_CHARTS_DIR) - if not repo: - repo = 'starlingx' - file_path = os.path.join(helm_dir, repo, os.path.basename(tar_file)) - current_host = con_ssh.get_hostname() - controllers = [current_host] - if not system_helper.is_aio_simplex(con_ssh=con_ssh): - con_name = 'controller-1' if controllers[ - 0] == 'controller-0' else \ - 'controller-0' - controllers.append(con_name) - - if delete_first: - for host in controllers: - with host_helper.ssh_to_host(hostname=host, - con_ssh=con_ssh) as host_ssh: - if host_ssh.file_exists(file_path): - host_ssh.exec_sudo_cmd('rm -f {}'.format(file_path)) - - code, output = exec_helm_upload_cmd(tarball=tar_file, repo=repo, - timeout=timeout, con_ssh=con_ssh, - fail_ok=fail_ok) - if code != 0: - return 1, output - - file_exist = con_ssh.file_exists(file_path) - if not file_exist: - raise exceptions.ContainerError( - "{} not found on {} after helm-upload".format(file_path, - current_host)) - - LOG.info("Helm charts {} uploaded successfully".format(file_path)) - return 0, file_path - - -def upload_app(tar_file, app_name=None, app_version=None, check_first=True, - fail_ok=False, uploaded_timeout=300, - con_ssh=None, auth_info=Tenant.get('admin_platform')): - """ - Upload an application via 'system application-upload' - Args: - app_name: - app_version: - tar_file: - check_first - fail_ok: - uploaded_timeout: - con_ssh: - auth_info: - - Returns: - - """ - if check_first and get_apps(application=app_name, con_ssh=con_ssh, - auth_info=auth_info): - msg = '{} already exists. Do nothing.'.format(app_name) - LOG.info(msg) - return -1, msg - - args = '' - if app_name: - args += '-n {} '.format(app_name) - if app_version: - args += '-v {} '.format(app_version) - args = '{}{}'.format(args, tar_file) - code, output = cli.system('application-upload', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - - if code > 0: - return 1, output - - res = wait_for_apps_status(apps=app_name, status=AppStatus.UPLOADED, - timeout=uploaded_timeout, - con_ssh=con_ssh, auth_info=auth_info, - fail_ok=fail_ok)[0] - if not res: - return 2, "{} failed to upload".format(app_name) - - msg = '{} uploaded successfully'.format(app_name) - LOG.info(msg) - return 0, msg - - -def get_apps(field='status', application=None, con_ssh=None, - auth_info=Tenant.get('admin_platform'), - rtn_dict=False, **kwargs): - """ - Get applications values for give apps and fields via system application-list - Args: - application (str|list|tuple): - field (str|list|tuple): - con_ssh: - auth_info: - rtn_dict: - **kwargs: extra filters other than application - - Returns (list|dict): - list of list, or - dict with app name(str) as key and values(list) for given fields for - each app as value - - """ - table_ = table_parser.table( - cli.system('application-list', ssh_client=con_ssh, auth_info=auth_info)[ - 1]) - if application: - kwargs['application'] = application - - return table_parser.get_multi_values(table_, fields=field, - rtn_dict=rtn_dict, zip_values=True, - **kwargs) - - -def get_app_values(app_name, fields, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Get values from system application-show - Args: - app_name: - fields (str|list|tuple): - con_ssh: - auth_info: - - Returns: - - """ - if isinstance(fields, str): - fields = [fields] - - table_ = table_parser.table( - cli.system('application-show', app_name, ssh_client=con_ssh, - auth_info=auth_info)[1], - combine_multiline_entry=True) - values = table_parser.get_multi_values_two_col_table(table_, fields=fields) - return values - - -def wait_for_apps_status(apps, status, timeout=360, check_interval=5, - fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Wait for applications to reach expected status via system application-list - Args: - apps: - status: - timeout: - check_interval: - fail_ok: - con_ssh: - auth_info: - - Returns (tuple): - - """ - status = '' if not status else status - if isinstance(apps, str): - apps = [apps] - apps_to_check = list(apps) - check_failed = [] - end_time = time.time() + timeout - - LOG.info( - "Wait for {} application(s) to reach status: {}".format(apps, status)) - while time.time() < end_time: - apps_status = get_apps(application=apps_to_check, - field=('application', 'status'), con_ssh=con_ssh, - auth_info=auth_info) - apps_status = {item[0]: item[1] for item in apps_status if item} - - checked = [] - for app in apps_to_check: - current_app_status = apps_status.get(app, '') - if current_app_status == status: - checked.append(app) - elif current_app_status.endswith('ed'): - check_failed.append(app) - checked.append(app) - - apps_to_check = list(set(apps_to_check) - set(checked)) - if not apps_to_check: - if check_failed: - msg = '{} failed to reach status - {}'.format(check_failed, - status) - LOG.warning(msg) - if fail_ok: - return False, check_failed - else: - raise exceptions.ContainerError(msg) - - LOG.info("{} reached expected status {}".format(apps, status)) - return True, None - - time.sleep(check_interval) - - check_failed += apps_to_check - msg = '{} did not reach status {} within {}s'.format(check_failed, status, - timeout) - LOG.warning(msg) - if fail_ok: - return False, check_failed - raise exceptions.ContainerError(msg) - - -def apply_app(app_name, check_first=False, fail_ok=False, applied_timeout=300, - check_interval=10, - wait_for_alarm_gone=True, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Apply/Re-apply application via system application-apply. Check for status - reaches 'applied'. - Args: - app_name (str): - check_first: - fail_ok: - applied_timeout: - check_interval: - con_ssh: - wait_for_alarm_gone (bool): - auth_info: - - Returns (tuple): - (-1, " is already applied. Do nothing.") # only returns - if check_first=True. - (0, " (re)applied successfully") - (1, ) # cli rejected - (2, " failed to apply") # did not reach applied status - after apply. - - """ - if check_first: - app_status = get_apps(application=app_name, field='status', - con_ssh=con_ssh, auth_info=auth_info) - if app_status and app_status[0] == AppStatus.APPLIED: - msg = '{} is already applied. Do nothing.'.format(app_name) - LOG.info(msg) - return -1, msg - - LOG.info("Apply application: {}".format(app_name)) - code, output = cli.system('application-apply', app_name, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code > 0: - return 1, output - - res = wait_for_apps_status(apps=app_name, status=AppStatus.APPLIED, - timeout=applied_timeout, - check_interval=check_interval, con_ssh=con_ssh, - auth_info=auth_info, fail_ok=fail_ok)[0] - if not res: - return 2, "{} failed to apply".format(app_name) - - if wait_for_alarm_gone: - alarm_id = EventLogID.CONFIG_OUT_OF_DATE - if system_helper.wait_for_alarm(alarm_id=alarm_id, - entity_id='controller', - timeout=15, fail_ok=True, - auth_info=auth_info, - con_ssh=con_ssh)[0]: - system_helper.wait_for_alarm_gone(alarm_id=alarm_id, - entity_id='controller', - timeout=120, - check_interval=10, - con_ssh=con_ssh, - auth_info=auth_info) - - msg = '{} (re)applied successfully'.format(app_name) - LOG.info(msg) - return 0, msg - - -def delete_app(app_name, check_first=True, fail_ok=False, applied_timeout=300, - con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Delete an application via system application-delete. Verify application - no longer listed. - Args: - app_name: - check_first: - fail_ok: - applied_timeout: - con_ssh: - auth_info: - - Returns (tuple): - (-1, " does not exist. Do nothing.") - (0, " deleted successfully") - (1, ) - (2, " failed to delete") - - """ - - if check_first: - app_vals = get_apps(application=app_name, field='status', - con_ssh=con_ssh, auth_info=auth_info) - if not app_vals: - msg = '{} does not exist. Do nothing.'.format(app_name) - LOG.info(msg) - return -1, msg - - code, output = cli.system('application-delete', app_name, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if code > 0: - return 1, output - - res = wait_for_apps_status(apps=app_name, status=None, - timeout=applied_timeout, - con_ssh=con_ssh, auth_info=auth_info, - fail_ok=fail_ok)[ - 0] - if not res: - return 2, "{} failed to delete".format(app_name) - - msg = '{} deleted successfully'.format(app_name) - LOG.info(msg) - return 0, msg - - -def remove_app(app_name, check_first=True, fail_ok=False, applied_timeout=300, - con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Remove applied application via system application-remove. Verify it is in - 'uploaded' status. - Args: - app_name (str): - check_first: - fail_ok: - applied_timeout: - con_ssh: - auth_info: - - Returns (tuple): - (-1, " is not applied. Do nothing.") - (0, " removed successfully") - (1, ) - (2, " failed to remove") # Did not reach uploaded status - - """ - - if check_first: - app_vals = get_apps(application=app_name, field='status', - con_ssh=con_ssh, auth_info=auth_info) - if not app_vals or app_vals[0] in (AppStatus.UPLOADED, - AppStatus.UPLOAD_FAILED): - msg = '{} is not applied. Do nothing.'.format(app_name) - LOG.info(msg) - return -1, msg - - code, output = cli.system('application-remove', app_name, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if code > 0: - return 1, output - - res = wait_for_apps_status(apps=app_name, status=AppStatus.UPLOADED, - timeout=applied_timeout, - con_ssh=con_ssh, auth_info=auth_info, - fail_ok=fail_ok)[0] - if not res: - return 2, "{} failed to remove".format(app_name) - - msg = '{} removed successfully'.format(app_name) - LOG.info(msg) - return 0, msg - - -def get_docker_reg_addr(con_ssh=None): - """ - Get local docker registry ip address in docker conf file. - Args: - con_ssh: - - Returns (str): - - """ - if not con_ssh: - con_ssh = ControllerClient.get_active_controller() - - output = con_ssh.exec_cmd( - 'grep --color=never "addr: " {}'.format(StxPath.DOCKER_CONF), - fail_ok=False)[1] - reg_addr = output.split('addr: ')[1].strip() - return reg_addr - - -def pull_docker_image(name, tag=None, digest=None, con_ssh=None, timeout=300, - fail_ok=False): - """ - Pull docker image via docker image pull. Verify image is listed in docker - image list. - Args: - name: - tag: - digest: - con_ssh: - timeout: - fail_ok: - - Returns (tuple): - (0, ) - (1, ) - - """ - - args = '{}'.format(name.strip()) - if tag: - args += ':{}'.format(tag) - elif digest: - args += '@{}'.format(digest) - - LOG.info("Pull docker image {}".format(args)) - code, out = exec_docker_cmd('image pull', args, timeout=timeout, - fail_ok=fail_ok, con_ssh=con_ssh) - if code != 0: - return 1, out - - image_id = get_docker_images(repo=name, tag=tag, field='IMAGE ID', - con_ssh=con_ssh, fail_ok=False)[0] - LOG.info( - 'docker image {} successfully pulled. ID: {}'.format(args, image_id)) - - return 0, image_id - - -def login_to_docker(registry=None, user=None, password=None, con_ssh=None, - fail_ok=False): - """ - Login to docker registry - Args: - registry (str|None): default docker registry will be used when None - user (str|None): admin user will be used when None - password (str|None): admin password will be used when None - con_ssh (SSHClient|None): - fail_ok (bool): - - Returns (tuple): - (0, (str)) # login succeeded - (1, (str)) # login failed - - """ - if not user: - user = 'admin' - if not password: - password = Tenant.get('admin_platform').get('password') - if not registry: - registry = Container.LOCAL_DOCKER_REG - - args = '-u {} -p {} {}'.format(user, password, registry) - LOG.info("Login to docker registry {}".format(registry)) - code, out = exec_docker_cmd('login', args, timeout=60, fail_ok=fail_ok, - con_ssh=con_ssh) - if code != 0: - return 1, out - - LOG.info('Logged into docker registry successfully: {}'.format(registry)) - return 0, args - - -def push_docker_image(name, tag=None, login_registry=None, con_ssh=None, - timeout=300, fail_ok=False): - """ - Push docker image via docker image push. - Args: - name: - tag: - login_registry (str|None): when set, login to given docker registry - before push - con_ssh: - timeout: - fail_ok: - - Returns (tuple): - (0, ) - (1, ) - - """ - args = '{}'.format(name.strip()) - if tag: - args += ':{}'.format(tag) - - if login_registry: - login_to_docker(registry=login_registry, con_ssh=con_ssh) - - LOG.info("Push docker image: {}".format(args)) - code, out = exec_docker_cmd('image push', args, timeout=timeout, - fail_ok=fail_ok, con_ssh=con_ssh) - if code != 0: - return 1, out - - LOG.info('docker image {} successfully pushed.'.format(args)) - return 0, args - - -def tag_docker_image(source_image, target_name, source_tag=None, - target_tag=None, con_ssh=None, timeout=300, - fail_ok=False): - """ - Tag docker image via docker image tag. Verify image is tagged via docker - image list. - Args: - source_image: - target_name: - source_tag: - target_tag: - con_ssh: - timeout: - fail_ok: - - Returns: - (0, ) - (1, ) - - """ - source_args = source_image.strip() - if source_tag: - source_args += ':{}'.format(source_tag) - - target_args = target_name.strip() - if target_tag: - target_args += ':{}'.format(target_tag) - - LOG.info("Tag docker image {} as {}".format(source_args, target_args)) - args = '{} {}'.format(source_args, target_args) - code, out = exec_docker_cmd('image tag', args, timeout=timeout, - fail_ok=fail_ok, con_ssh=con_ssh) - if code != 0: - return 1, out - - if not get_docker_images(repo=target_name, tag=target_tag, con_ssh=con_ssh, - fail_ok=False): - raise exceptions.ContainerError( - "Docker image {} is not listed after tagging {}".format( - target_name, source_image)) - - LOG.info('docker image {} successfully tagged as {}.'.format(source_args, - target_args)) - return 0, target_args - - -def remove_docker_images_with_pattern(pattern, con_ssh=None, timeout=300): - """ - Remove docker image(s) via docker image rm matching 'pattern' - Args: - pattern: - con_ssh: - timeout: - - Returns (tuple): - (0, ) - (1, ) - - """ - - LOG.info("Remove docker images matching pattern: {}".format(pattern)) - - args = " | grep " + pattern + " | awk '{print $3}' " - code, out = exec_docker_cmd("images", args, timeout=timeout, fail_ok=True, con_ssh=con_ssh) - - if out: - image_list = out.splitlines() - code, out = remove_docker_images(image_list, force=True, con_ssh=con_ssh) - - return code, out - - -def remove_docker_images(images, force=False, con_ssh=None, timeout=300, - fail_ok=False): - """ - Remove docker image(s) via docker image rm - Args: - images (str|tuple|list): - force (bool): - con_ssh: - timeout: - fail_ok: - - Returns (tuple): - (0, ) - (1, ) - - """ - if isinstance(images, str): - images = (images,) - - LOG.info("Remove docker images: {}".format(images)) - args = ' '.join(images) - if force: - args = '--force {}'.format(args) - - code, out = exec_docker_cmd('image rm', args, timeout=timeout, - fail_ok=fail_ok, con_ssh=con_ssh) - return code, out - - -def get_docker_images(repo=None, tag=None, field='IMAGE ID', con_ssh=None, - fail_ok=False): - """ - get values for given docker image via 'docker image ls ' - Args: - repo (str): - tag (str|None): - field (str|tuple|list): - con_ssh: - fail_ok - - Returns (list|None): return None if no docker images returned at all due - to cmd failure - - """ - args = None - if repo: - args = repo - if tag: - args += ':{}'.format(tag) - code, output = exec_docker_cmd(sub_cmd='image ls', args=args, - fail_ok=fail_ok, con_ssh=con_ssh) - if code != 0: - return None - - table_ = table_parser.table_kube(output) - if not table_['values']: - if fail_ok: - return None - else: - raise exceptions.ContainerError( - "docker image {} does not exist".format(args)) - - values = table_parser.get_multi_values(table_, fields=field, - zip_values=True) - - return values - - -def get_helm_overrides(field='overrides namespaces', app_name='stx-openstack', - charts=None, - auth_info=Tenant.get('admin_platform'), con_ssh=None): - """ - Get helm overrides values via system helm-override-list - Args: - field (str): - app_name - charts (None|str|list|tuple): - auth_info: - con_ssh: - - Returns (list): - - """ - table_ = table_parser.table( - cli.system('helm-override-list', app_name, ssh_client=con_ssh, - auth_info=auth_info)[1]) - - if charts: - table_ = table_parser.filter_table(table_, **{'chart name': charts}) - - vals = table_parser.get_multi_values(table_, fields=field, evaluate=True) - - return vals - - -def get_helm_override_values(chart, namespace, app_name='stx-openstack', - fields=('combined_overrides',), - auth_info=Tenant.get('admin_platform'), - con_ssh=None): - """ - Get helm-override values for given chart via system helm-override-show - Args: - chart (str): - namespace (str): - app_name (str) - fields (str|tuple|list): - auth_info: - con_ssh: - - Returns (list): list of parsed yaml formatted output. e.g., list of dict, - list of list, list of str - - """ - args = '{} {} {}'.format(app_name, chart, namespace) - table_ = table_parser.table( - cli.system('helm-override-show', args, ssh_client=con_ssh, - auth_info=auth_info)[1], - rstrip_value=True) - - if isinstance(fields, str): - fields = (fields,) - - values = [] - for field in fields: - value = table_parser.get_value_two_col_table(table_, field=field, - merge_lines=False) - values.append(yaml.load('\n'.join(value))) - - return values - - -def __convert_kv(k, v): - if '.' not in k: - return {k: v} - new_key, new_val = k.rsplit('.', maxsplit=1) - return __convert_kv(new_key, {new_val: v}) - - -def update_helm_override(chart, namespace, app_name='stx-openstack', - yaml_file=None, kv_pairs=None, - reset_vals=False, reuse_vals=False, - auth_info=Tenant.get('admin_platform'), - con_ssh=None, fail_ok=False): - """ - Update helm_override values for given chart - Args: - chart: - namespace: - app_name - yaml_file: - kv_pairs: - reset_vals: - reuse_vals: - fail_ok - con_ssh - auth_info - - Returns (tuple): - (0, (str|list|dict)) # cmd accepted. - (1, ) # system helm-override-update cmd rejected - - """ - args = '{} {} {}'.format(app_name, chart, namespace) - if reset_vals: - args = '--reset-values {}'.format(args) - if reuse_vals: - args = '--reuse-values {}'.format(args) - if yaml_file: - args = '--values {} {}'.format(yaml_file, args) - if kv_pairs: - cmd_overrides = ','.join( - ['{}={}'.format(k, v) for k, v in kv_pairs.items()]) - args = '--set {} {}'.format(cmd_overrides, args) - - code, output = cli.system('helm-override-update', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code != 0: - return 1, output - - table_ = table_parser.table(output, rstrip_value=True) - overrides = table_parser.get_value_two_col_table(table_, 'user_overrides') - overrides = yaml.load('\n'.join(overrides)) - # yaml.load converts str to bool, int, float; but does not convert - # None type. Updates are not verified here since it is rather complicated - # to verify properly. - LOG.info("Helm-override updated : {}".format(overrides)) - - return 0, overrides - - -def is_stx_openstack_deployed(applied_only=False, con_ssh=None, - auth_info=Tenant.get('admin_platform'), - force_check=False): - """ - Whether stx-openstack application is deployed. - Args: - applied_only (bool): if True, then only return True when application - is in applied state - con_ssh: - auth_info: - force_check: - - Returns (bool): - - """ - openstack_deployed = ProjVar.get_var('OPENSTACK_DEPLOYED') - if not applied_only and not force_check and openstack_deployed is not None: - return openstack_deployed - - openstack_status = get_apps(application='stx-openstack', field='status', - con_ssh=con_ssh, auth_info=auth_info) - - LOG.info("{}".format(openstack_status)) - - res = False - if openstack_status and 'appl' in openstack_status[0].lower(): - res = True - if applied_only and openstack_status[0] != AppStatus.APPLIED: - res = False - - return res diff --git a/automated-pytest-suite/keywords/dc_helper.py b/automated-pytest-suite/keywords/dc_helper.py deleted file mode 100644 index 48d2ff9f..00000000 --- a/automated-pytest-suite/keywords/dc_helper.py +++ /dev/null @@ -1,434 +0,0 @@ -# -# Copyright (c) 2020 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import time -import copy - -from utils import cli, exceptions, table_parser -from utils.tis_log import LOG -from utils.clients.ssh import ControllerClient -from consts.auth import Tenant, HostLinuxUser -from consts.proj_vars import ProjVar -from consts.timeout import DCTimeout -from consts.filepaths import SysLogPath -from keywords import system_helper, nova_helper - - -def get_subclouds(field='name', name=None, avail=None, sync=None, mgmt=None, deploy=None, - auth_info=Tenant.get('admin_platform', 'RegionOne'), con_ssh=None, - source_openrc=None, rtn_dict=False, evaluate=False, strict=True, regex=False, - filter_subclouds=True): - """ - Get subclouds values - Args: - field (str | tuple): fields of value to get - name (str): subcloud name - avail (str): subcloud availability status - sync (str): subcloud sync status - mgmt (str): subcloud management status - deploy (str): subcloud deploy status - auth_info (dict): - con_ssh (SSHClient): - source_openrc (None|bool): - rtn_dict (bool): whether to return dict of field/value pairs - evaluate (bool): whether to convert value to python data type - strict (bool): True to use re.match, False to use re.search - regex (bool): whether to use regex to find value(s) - filter_subclouds (bool): whether to filter out the subclouds that are not in - the --subcloud_list arg - - Returns (list | dict): - when rtn_dict is False, list of values - when rtn_dict is True, dict of field/values pairs - - """ - table_ = table_parser.table(cli.dcmanager('subcloud list', ssh_client=con_ssh, - auth_info=auth_info, source_openrc=source_openrc)[1]) - arg_map = {'name': name, - 'availability': avail, - 'sync': sync, - 'management': mgmt, - 'deploy status': deploy} - kwargs = {key: val for key, val in arg_map.items() if val} - if filter_subclouds: - filtered_subclouds = table_parser.get_values(table_, target_header=field, **kwargs) - subcloud_list = ProjVar.get_var('SUBCLOUD_LIST') - if subcloud_list: - filtered_subclouds = [subcloud for subcloud in filtered_subclouds - if subcloud in subcloud_list] - LOG.info('filtered_subclouds: {}'.format(filtered_subclouds)) - return filtered_subclouds - else: - return table_parser.get_multi_values(table_, field, rtn_dict=rtn_dict, evaluate=evaluate, - strict=strict, regex=regex, **kwargs) - - -def wait_for_subcloud_status(subcloud, avail=None, sync=None, mgmt=None, deploy=None, - timeout=DCTimeout.SUBCLOUD_AUDIT, check_interval=30, - auth_info=Tenant.get('admin_platform', 'RegionOne'), - con_ssh=None, source_openrc=None, fail_ok=False): - """ - Wait for subcloud status - Args: - subcloud: - avail: - sync: - mgmt: - timeout: - check_interval: - auth_info: - con_ssh: - source_openrc: - fail_ok: - - Returns: - - """ - - if not subcloud: - raise ValueError("Subcloud name must be specified") - - expt_status = {} - if avail: - expt_status['avail'] = avail - if sync: - expt_status['sync'] = sync - if mgmt: - expt_status['mgmt'] = mgmt - if deploy: - expt_status['deploy'] = deploy - - if not expt_status: - raise ValueError("At least one expected status of the subcloud must be specified.") - - LOG.info("Wait for {} status: {}".format(subcloud, expt_status)) - end_time = time.time() + timeout + check_interval - while time.time() < end_time: - if get_subclouds(field='name', name=subcloud, con_ssh=con_ssh, source_openrc=source_openrc, - auth_info=auth_info, **expt_status): - return 0, subcloud - LOG.info("Not in expected states yet...") - time.sleep(check_interval) - - msg = '{} status did not reach {} within {} seconds'.format(subcloud, expt_status, timeout) - LOG.warning(msg) - if fail_ok: - return 1, msg - else: - raise exceptions.DCError(msg) - - -def _manage_unmanage_subcloud(subcloud=None, manage=False, check_first=True, fail_ok=False, - con_ssh=None, auth_info=Tenant.get('admin_platform', 'RegionOne'), - source_openrc=False): - - """ - Manage/Unmanage given subcloud(s) - Args: - subcloud: - manage: - check_first: - fail_ok: - - Returns: - - """ - operation = 'manage' if manage else 'unmanage' - expt_state = '{}d'.format(operation) - if not subcloud: - subcloud = [ProjVar.get_var('PRIMARY_SUBCLOUD')] - elif isinstance(subcloud, str): - subcloud = [subcloud] - - subclouds_to_update = list(subcloud) - if check_first: - subclouds_in_state = get_subclouds(mgmt=expt_state, con_ssh=con_ssh, auth_info=auth_info) - subclouds_to_update = list(set(subclouds_to_update) - set(subclouds_in_state)) - if not subclouds_to_update: - LOG.info("{} already {}. Do nothing.".format(subcloud, expt_state)) - return -1, [] - - LOG.info("Attempt to {}: {}".format(operation, subclouds_to_update)) - failed_subclouds = [] - for subcloud_ in subclouds_to_update: - code, out = cli.dcmanager('subcloud ' + operation, subcloud_, ssh_client=con_ssh, - fail_ok=True, auth_info=auth_info, source_openrc=source_openrc) - - if code > 0: - failed_subclouds.append(subcloud_) - - if failed_subclouds: - err = "Failed to {} {}".format(operation, failed_subclouds) - if fail_ok: - LOG.info(err) - return 1, failed_subclouds - raise exceptions.DCError(err) - - LOG.info("Check management status for {} after dcmanager subcloud {}".format( - subclouds_to_update, operation)) - mgmt_states = get_subclouds(field='management', name=subclouds_to_update, auth_info=auth_info, - con_ssh=con_ssh) - failed_subclouds = \ - [subclouds_to_update[i] for i in range(len(mgmt_states)) if mgmt_states[i] != expt_state] - if failed_subclouds: - raise exceptions.DCError("{} not {} after dcmanger subcloud {}".format( - failed_subclouds, expt_state, operation)) - - return 0, subclouds_to_update - - -def manage_subcloud(subcloud=None, check_first=True, fail_ok=False, con_ssh=None): - """ - Manage subcloud(s) - Args: - subcloud (str|tuple|list): - check_first (bool): - fail_ok (bool): - con_ssh(SSClient): - - Returns (tuple): - (-1, []) All give subcloud(s) already managed. Do nothing. - (0, []) Successfully managed the give subcloud(s) - (1, []) dcmanager manage cli failed on these subcloud(s) - - """ - return _manage_unmanage_subcloud(subcloud=subcloud, manage=True, check_first=check_first, - fail_ok=fail_ok, - con_ssh=con_ssh) - - -def unmanage_subcloud(subcloud=None, check_first=True, fail_ok=False, con_ssh=None, - source_openrc=False): - """ - Unmanage subcloud(s) - Args: - subcloud (str|tuple|list): - check_first (bool): - fail_ok (bool): - con_ssh (SSHClient): - - Returns (tuple): - (-1, []) All give subcloud(s) already unmanaged. Do nothing. - (0, []) Successfully unmanaged the give subcloud(s) - (1, []) dcmanager unmanage cli failed on these subcloud(s) - - """ - return _manage_unmanage_subcloud(subcloud=subcloud, manage=False, check_first=check_first, - fail_ok=fail_ok, con_ssh=con_ssh, source_openrc=source_openrc) - - -def wait_for_subcloud_config(func, *func_args, subcloud=None, config_name=None, - expected_value=None, auth_name='admin_platform', fail_ok=False, - timeout=DCTimeout.SYNC, check_interval=30, strict_order=True, - **func_kwargs): - """ - Wait for subcloud configuration to reach expected value - Args: - subcloud (str|None): - func: function defined to get current value, which has to has parameter con_ssh and auth_info - *func_args: positional args for above func. Should NOT include auth_info or con_ssh. - config_name (str): such as dns, keypair, etc - expected_value (None|str|list): - auth_name (str): auth dict name. e.g., admin_platform, admin, tenant1, TENANT2, etc - fail_ok (bool): - timeout (int): - check_interval (int): - strict_order (bool) - **func_kwargs: kwargs for defined func. auth_info and con_ssh has to be provided here - - Returns (tuple): - (0, ) # same as expected - (1, ) # did not update within timeout - (2, ) # updated to unexpected value - - """ - if not subcloud: - subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD') - - config_name = ' ' + config_name if config_name else '' - - if expected_value is None: - central_ssh = ControllerClient.get_active_controller(name='RegionOne') - expected_value = func(con_ssh=central_ssh, - auth_info=Tenant.get(auth_name, dc_region='RegionOne')) - elif isinstance(expected_value, str): - expected_value = expected_value.split(sep=',') - - if not strict_order: - expected_value = sorted(list(expected_value)) - - LOG.info("Wait for {}{} to be {}".format(subcloud, config_name, expected_value)) - if not func_kwargs.get('con_ssh', None): - func_kwargs['con_ssh'] = ControllerClient.get_active_controller(name=subcloud) - if not func_kwargs.get('auth_info', None): - func_kwargs['auth_info'] = Tenant.get(auth_name, dc_region=subcloud) - - origin_subcloud_val = func(*func_args, **func_kwargs) - subcloud_val = copy.copy(origin_subcloud_val) - if isinstance(subcloud_val, str): - subcloud_val = subcloud_val.split(sep=',') - - if not strict_order: - subcloud_val = sorted(list(subcloud_val)) - - end_time = time.time() + timeout + check_interval - while time.time() < end_time: - if subcloud_val == expected_value: - LOG.info("{}{} setting is same as central region".format(subcloud, config_name)) - return 0, subcloud_val - - elif subcloud_val != origin_subcloud_val: - msg = '{}{} config changed to unexpected value. Expected: {}; Actual: {}'.\ - format(subcloud, config_name, expected_value, subcloud_val) - - if fail_ok: - LOG.info(msg) - return 2, subcloud_val - else: - raise exceptions.DCError(msg) - - time.sleep(check_interval) - subcloud_val = func(*func_args, **func_kwargs) - - msg = '{}{} config did not reach: {} within {} seconds; actual: {}'.format( - subcloud, config_name, expected_value, timeout, subcloud_val) - if fail_ok: - LOG.info(msg) - return 1, subcloud_val - else: - raise exceptions.DCError(msg) - - -def wait_for_sync_audit(subclouds, con_ssh=None, fail_ok=False, filters_regex=None, - timeout=DCTimeout.SYNC): - """ - Wait for Updating subcloud log msg in dcmanager.log for given subcloud(s) - Args: - subclouds (list|tuple|str): - con_ssh: - fail_ok: - filters_regex: e.g., ['audit_action.*keypair', 'Clean audit.*ntp'], '\/compute' - timeout: - - Returns (tuple): - (True, ) - (False, ) - - """ - if not con_ssh: - con_ssh = ControllerClient.get_active_controller('RegionOne') - - if isinstance(subclouds, str): - subclouds = [subclouds] - - LOG.info("Waiting for sync audit in dcmanager.log for: {}".format(subclouds)) - if not filters_regex: - filters_regex = ['platform', 'patching', 'identity'] - elif isinstance(filters_regex, str): - filters_regex = [filters_regex] - - subclouds_dict = {subcloud: list(filters_regex) for subcloud in subclouds} - res = {subcloud: False for subcloud in subclouds} - subclouds_to_wait = list(subclouds) - end_time = time.time() + timeout - - expt_list = [] - for subcloud in subclouds_dict: - expt_list += ['{}.*{}'.format(subcloud, service) for service in subclouds_dict[subcloud]] - - con_ssh.send('tail -n 0 -f {}'.format(SysLogPath.DC_ORCH)) - - try: - while time.time() < end_time: - index = con_ssh.expect(expt_list, timeout=timeout, fail_ok=True) - if index >= 0: - subcloud_, service_ = expt_list[index].split('.*', maxsplit=1) - subclouds_dict[subcloud_].remove(service_) - expt_list.pop(index) - if not subclouds_dict[subcloud_]: - subclouds_to_wait.remove(subcloud_) - subclouds_dict.pop(subcloud_) - res[subcloud_] = True - if not subclouds_to_wait: - LOG.info("sync request logged for: {}".format(subclouds)) - return True, res - else: - msg = 'sync audit for {} not shown in {} in {}s: {}'.format( - subclouds_to_wait, SysLogPath.DC_ORCH, timeout, subclouds_dict) - if fail_ok: - LOG.info(msg) - for subcloud in subclouds_to_wait: - res[subcloud] = False - return False, res - else: - raise exceptions.DCError(msg) - - finally: - con_ssh.send_control() - con_ssh.expect() - - -def wait_for_subcloud_dns_config(subcloud=None, subcloud_ssh=None, expected_dns=None, - fail_ok=False, timeout=DCTimeout.SYNC, check_interval=30): - """ - Wait for dns configuration to reach expected value - Args: - subcloud (str|None): - subcloud_ssh (None|SSHClient): - expected_dns (None|str|list): - fail_ok (bool): - timeout (int): - check_interval (int): - - Returns (tuple): - (0, ) # same as expected - (1, ) # did not update within timeout - (2, ) # updated to unexpected value - - """ - func = system_helper.get_dns_servers - func_kwargs = {'con_ssh': subcloud_ssh} if subcloud_ssh else {} - return wait_for_subcloud_config(subcloud=subcloud, func=func, config_name='DNS', - expected_value=expected_dns, fail_ok=fail_ok, timeout=timeout, - check_interval=check_interval, **func_kwargs) - - -def wait_for_subcloud_ntp_config(subcloud=None, subcloud_ssh=None, expected_ntp=None, - clear_alarm=True, fail_ok=False, timeout=DCTimeout.SYNC, - check_interval=30): - """ - Wait for ntp configuration to reach expected value - Args: - subcloud (str|None): - subcloud_ssh (None|SSHClient): - expected_ntp (None|str|list): - clear_alarm (bool) - fail_ok (bool): - timeout (int): - check_interval (int): - - Returns (tuple): - (0, ) # same as expected - (1, ) # did not update within timeout - (2, ) # updated to unexpected value - - """ - if not subcloud: - subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD') - func_kwargs = {'auth_info': Tenant.get('admin_platform', subcloud)} - if subcloud_ssh: - func_kwargs['con_ssh'] = subcloud_ssh - - func = system_helper.get_ntp_servers - res = wait_for_subcloud_config(subcloud=subcloud, func=func, config_name='NTP', - expected_value=expected_ntp, fail_ok=fail_ok, timeout=timeout, - check_interval=check_interval, **func_kwargs) - - if res[0] in (0, 2) and clear_alarm: - system_helper.wait_and_clear_config_out_of_date_alarms(host_type='controller', - **func_kwargs) - - return res diff --git a/automated-pytest-suite/keywords/glance_helper.py b/automated-pytest-suite/keywords/glance_helper.py deleted file mode 100644 index ed2478ab..00000000 --- a/automated-pytest-suite/keywords/glance_helper.py +++ /dev/null @@ -1,1146 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import os -import re -import time -import json - -from pytest import skip - -from consts.auth import Tenant, HostLinuxUser -from consts.stx import GuestImages, ImageMetadata -from consts.proj_vars import ProjVar -from consts.timeout import ImageTimeout -from keywords import common, system_helper, host_helper -from testfixtures.fixture_resources import ResourceCleanup -from utils import table_parser, cli, exceptions -from utils.clients.ssh import ControllerClient, get_cli_client -from utils.tis_log import LOG - - -def get_images(long=False, images=None, field='id', - auth_info=Tenant.get('admin'), con_ssh=None, strict=True, - exclude=False, **kwargs): - """ - Get a list of image id(s) that matches the criteria - Args: - long (bool) - images (str|list): ids of images to filter from - field(str|list|tuple): id or name - auth_info (dict): - con_ssh (SSHClient): - strict (bool): match full string or substring for the value(s) given - in kwargs. - This is only applicable if kwargs key-val pair(s) are provided. - exclude (bool): whether to exclude item containing the string/pattern - in kwargs. - e.g., search for images that don't contain 'raw' - **kwargs: header-value pair(s) to filter out images from given image - list. e.g., Status='active', Name='centos' - - Returns (list): list of image ids - - """ - args = '--long' if long else '' - table_ = table_parser.table( - cli.openstack('image list', args, ssh_client=con_ssh, - auth_info=auth_info)[1]) - if images: - table_ = table_parser.filter_table(table_, ID=images) - - return table_parser.get_multi_values(table_, field, strict=strict, - exclude=exclude, **kwargs) - - -def get_image_id_from_name(name=None, strict=False, fail_ok=True, con_ssh=None, - auth_info=None): - """ - - Args: - name (list or str): - strict: - fail_ok (bool): whether to raise exception if no image found with - provided name - con_ssh: - auth_info (dict: - - Returns: - Return a random image_id that match the name. else return an empty - string - - """ - if name is None: - name = GuestImages.DEFAULT['guest'] - - matching_images = get_images(name=name, auth_info=auth_info, - con_ssh=con_ssh, strict=strict) - if not matching_images: - image_id = '' - msg = "No existing image found with name: {}".format(name) - LOG.warning(msg) - if not fail_ok: - raise exceptions.CommonError(msg) - else: - image_id = matching_images[0] - if len(matching_images) > 1: - LOG.warning('More than one glace image found with name {}. ' - 'Select {}.'.format(name, image_id)) - - return image_id - - -def get_avail_image_space(con_ssh, path='/opt/cgcs'): - """ - Get available disk space in GiB on given path which is where glance - images are saved at - Args: - con_ssh: - path (str) - - Returns (float): e.g., 9.2 - - """ - size = con_ssh.exec_cmd("df {} | awk '{{print $4}}'".format(path), - fail_ok=False)[1] - size = float(size.splitlines()[-1].strip()) / (1024 * 1024) - return size - - -def is_image_storage_sufficient(img_file_path=None, guest_os=None, - min_diff=0.05, con_ssh=None, - image_host_ssh=None): - """ - Check if glance image storage disk is sufficient to create new glance - image from specified image - Args: - img_file_path (str): e.g., /home/sysadmin/images/tis-centos-guest.img - guest_os (str): used if img_file_path is not provided. e,g., - ubuntu_14, ge_edge, cgcs-guest, etc - min_diff: minimum difference required between available space and - specifiec size. e.g., 0.1G - con_ssh (SSHClient): tis active controller ssh client - image_host_ssh (SSHClient): such as test server ssh where image file - was stored - - Returns (bool): - - """ - if image_host_ssh is None: - image_host_ssh = get_cli_client(central_region=True) - file_size = get_image_size(img_file_path=img_file_path, guest_os=guest_os, - ssh_client=image_host_ssh) - - if con_ssh is None: - name = 'RegionOne' if ProjVar.get_var('IS_DC') else None - con_ssh = ControllerClient.get_active_controller(name=name) - if 0 == con_ssh.exec_cmd('ceph df')[0]: - # assume image storage for ceph is sufficient - return True, file_size, None - - avail_size = get_avail_image_space(con_ssh=con_ssh) - - return avail_size - file_size >= min_diff, file_size, avail_size - - -def get_image_file_info(img_file_path=None, guest_os=None, ssh_client=None): - """ - Get image file info as dictionary - Args: - img_file_path (str): e.g., /home/sysadmin/images/tis-centos-guest.img - guest_os (str): has to be specified if img_file_path is unspecified. - e.g., 'tis-centos-guest' - ssh_client (SSHClient): e.g., test server ssh - - Returns (dict): image info dict. - Examples: - { - "virtual-size": 688914432, - "filename": "images/cgcs-guest.img", - "format": "raw", - "actual-size": 688918528, - "dirty-flag": false - } - - """ - if not img_file_path: - if guest_os is None: - raise ValueError( - "Either img_file_path or guest_os has to be provided") - else: - img_file_info = GuestImages.IMAGE_FILES.get(guest_os, None) - if not img_file_info: - raise ValueError( - "Invalid guest_os provided. Choose from: {}".format( - GuestImages.IMAGE_FILES.keys())) - # Assume ssh_client is test server client and image path is test - # server path - img_file_path = "{}/{}".format( - GuestImages.DEFAULT['image_dir_file_server'], img_file_info[0]) - - def _get_img_dict(ssh_): - img_info = ssh_.exec_cmd("qemu-img info --output json {}".format( - img_file_path), fail_ok=False)[1] - return json.loads(img_info) - - if ssh_client is None: - with host_helper.ssh_to_test_server() as ssh_client: - img_dict = _get_img_dict(ssh_=ssh_client) - else: - img_dict = _get_img_dict(ssh_=ssh_client) - - LOG.info("Image {} info: {}".format(img_file_path, img_dict)) - return img_dict - - -def get_image_size(img_file_path=None, guest_os=None, virtual_size=False, - ssh_client=None): - """ - Get image virtual or actual size in GB via qemu-img info - Args: - img_file_path (str): e.g., /home/sysadmin/images/tis-centos-guest.img - guest_os (str): has to be specified if img_file_path is unspecified. - e.g., 'tis-centos-guest' - virtual_size: - ssh_client: - - Returns (float): image size in GiB - """ - key = "virtual-size" if virtual_size else "actual-size" - img_size = get_image_file_info(img_file_path=img_file_path, - guest_os=guest_os, - ssh_client=ssh_client)[key] - img_size = float(img_size) / (1024 * 1024 * 1024) - return img_size - - -def get_avail_image_conversion_space(con_ssh=None): - """ - Get available disk space in GB on /opt/img-conversions - Args: - con_ssh: - - Returns (float): e.g., 19.2 - - """ - size = con_ssh.exec_cmd("df | grep '/opt/img-conversions' | " - "awk '{{print $4}}'")[1] - size = float(size.strip()) / (1024 * 1024) - return size - - -def is_image_conversion_sufficient(img_file_path=None, guest_os=None, - min_diff=0.05, con_ssh=None, - img_host_ssh=None): - """ - Check if image conversion space is sufficient to convert given image to - raw format - Args: - img_file_path (str): e.g., /home/sysadmin/images/tis-centos-guest.img - guest_os (str): has to be specified if img_file_path is unspecified. - e.g., 'tis-centos-guest' - min_diff (int): in GB - con_ssh: - img_host_ssh - - Returns (bool): - - """ - if con_ssh is None: - con_ssh = ControllerClient.get_active_controller() - - if not system_helper.get_storage_nodes(con_ssh=con_ssh): - return True - - avail_size = get_avail_image_conversion_space(con_ssh=con_ssh) - file_size = get_image_size(img_file_path=img_file_path, guest_os=guest_os, - virtual_size=True, - ssh_client=img_host_ssh) - - return avail_size - file_size >= min_diff - - -def ensure_image_storage_sufficient(guest_os, con_ssh=None): - """ - Before image file is copied to tis, check if image storage is sufficient - Args: - guest_os: - con_ssh: - - Returns: - - """ - with host_helper.ssh_to_test_server() as img_ssh: - is_sufficient, image_file_size, avail_size = \ - is_image_storage_sufficient(guest_os=guest_os, con_ssh=con_ssh, - image_host_ssh=img_ssh) - if not is_sufficient: - images_to_del = get_images(exclude=True, - Name=GuestImages.DEFAULT['guest'], - con_ssh=con_ssh) - if images_to_del: - LOG.info( - "Delete non-default images due to insufficient image " - "storage media to create required image") - delete_images(images_to_del, check_first=False, con_ssh=con_ssh) - if not is_image_storage_sufficient(guest_os=guest_os, - con_ssh=con_ssh, - image_host_ssh=img_ssh)[0]: - LOG.info( - "Insufficient image storage media to create {} image " - "even after deleting non-default " - "glance images".format(guest_os)) - return False, image_file_size - else: - LOG.info( - "Insufficient image storage media to create {} " - "image".format( - guest_os)) - return False, image_file_size - - return True, image_file_size - - -def create_image(name=None, image_id=None, source_image_file=None, volume=None, - visibility='public', force=None, - store=None, disk_format=None, container_format=None, - min_disk=None, min_ram=None, tags=None, - protected=None, project=None, project_domain=None, - timeout=ImageTimeout.CREATE, con_ssh=None, - auth_info=Tenant.get('admin'), fail_ok=False, - ensure_sufficient_space=True, sys_con_for_dc=True, - cleanup=None, hw_vif_model=None, **properties): - """ - Create an image with given criteria. - - Args: - name (str): string to be included in image name - image_id (str): id for the image to be created - source_image_file (str): local image file to create image from. - DefaultImage will be used if unset - volume (str) - disk_format (str): One of these: ami, ari, aki, vhd, vmdk, raw, - qcow2, vdi, iso - container_format (str): One of these: ami, ari, aki, bare, ovf - min_disk (int): Minimum size of disk needed to boot image (in gigabytes) - min_ram (int): Minimum amount of ram needed to boot image (in - megabytes) - visibility (str): public|private|shared|community - protected (bool): Prevent image from being deleted. - store (str): Store to upload image to - force (bool) - tags (str|tuple|list) - project (str|None) - project_domain (str|None) - timeout (int): max seconds to wait for cli return - con_ssh (SSHClient): - auth_info (dict|None): - fail_ok (bool): - ensure_sufficient_space (bool) - sys_con_for_dc (bool): create image on system controller if it's - distributed cloud - cleanup (str|None): add to teardown list. 'function', 'class', - 'module', 'session', or None - hw_vif_model (None|str): if this is set, 'hw_vif_model' in properties - will be overridden - **properties: key=value pair(s) of properties to associate with the - image - - Returns (tuple): (rtn_code(int), message(str)) # 1, 2 only - applicable if fail_ok=True - - (0, , "Image is created successfully") - - (1, , ) # openstack image create cli rejected - - (2, , "Image status is not active.") - """ - - # Use source image url if url is provided. Else use local img file. - - default_guest_img = GuestImages.IMAGE_FILES[GuestImages.DEFAULT['guest']][2] - - file_path = source_image_file - if not file_path and not volume: - img_dir = GuestImages.DEFAULT['image_dir'] - file_path = "{}/{}".format(img_dir, default_guest_img) - - if file_path: - if file_path.startswith('~/'): - file_path = file_path.replace('~', HostLinuxUser.get_home(), 1) - file_path = os.path.normpath(file_path) - if 'win' in file_path and 'os_type' not in properties: - properties['os_type'] = 'windows' - elif 'ge_edge' in file_path and 'hw_firmware_type' not in properties: - properties['hw_firmware_type'] = 'uefi' - - if hw_vif_model: - properties[ImageMetadata.VIF_MODEL] = hw_vif_model - - if sys_con_for_dc and ProjVar.get_var('IS_DC'): - con_ssh = ControllerClient.get_active_controller('RegionOne') - create_auth = Tenant.get(tenant_dictname=auth_info['tenant'], - dc_region='SystemController').copy() - image_host_ssh = get_cli_client(central_region=True) - else: - if not con_ssh: - con_ssh = ControllerClient.get_active_controller() - image_host_ssh = get_cli_client() - create_auth = auth_info - - if ensure_sufficient_space: - if not is_image_storage_sufficient(img_file_path=file_path, - con_ssh=con_ssh, - image_host_ssh=image_host_ssh)[0]: - skip('Insufficient image storage for creating glance image ' - 'from {}'.format(file_path)) - - source_str = file_path - - known_imgs = ['cgcs-guest', 'tis-centos-guest', 'ubuntu', 'cirros', - 'opensuse', 'rhel', 'centos', 'win', 'ge_edge', - 'vxworks', 'debian-8-m-agent'] - name = name if name else 'auto' - for img_str in known_imgs: - if img_str in name: - break - elif img_str in source_str: - name = img_str + '_' + name - break - else: - name_prefix = source_str.split(sep='/')[-1] - name_prefix = name_prefix.split(sep='.')[0] - name = name_prefix + '_' + name - - name = common.get_unique_name(name_str=name, existing_names=get_images(), - resource_type='image') - - LOG.info("Creating glance image: {}".format(name)) - - if not disk_format: - if not source_image_file: - # default tis-centos-guest image is raw - disk_format = 'raw' - else: - disk_format = 'qcow2' - - args_dict = { - '--id': image_id, - '--store': store, - '--disk-format': disk_format, - '--container-format': container_format if container_format else 'bare', - '--min-disk': min_disk, - '--min-ram': min_ram, - '--file': file_path, - '--force': True if force else None, - '--protected': True if protected else None, - '--unprotected': True if protected is False else None, - '--tag': tags, - '--property': properties, - '--project': project, - '--project-domain': project_domain, - '--volume': volume, - } - if visibility: - args_dict['--{}'.format(visibility)] = True - args_ = '{} {}'.format( - common.parse_args(args_dict, repeat_arg=True, vals_sep=','), name) - - try: - LOG.info("Creating image {} with args: {}".format(name, args_)) - code, output = cli.openstack('image create', args_, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=create_auth, - timeout=timeout) - except: - # This is added to help debugging image create failure in case of - # insufficient space - con_ssh.exec_cmd('df -h', fail_ok=True, get_exit_code=False) - raise - - table_ = table_parser.table(output) - actual_id = table_parser.get_value_two_col_table(table_, 'id') - if cleanup and actual_id: - ResourceCleanup.add('image', actual_id, scope=cleanup) - - if code > 1: - return 1, actual_id, output - - in_active = wait_for_image_status(actual_id, con_ssh=con_ssh, - auth_info=create_auth, fail_ok=fail_ok) - if not in_active: - return 2, actual_id, "Image status is not active." - - if image_id and image_id != actual_id: - msg = "Actual image id - {} is different than requested id - {}.".\ - format(actual_id, image_id) - if fail_ok: - return 3, actual_id, msg - raise exceptions.ImageError(msg) - - msg = "Image {} is created successfully".format(actual_id) - LOG.info(msg) - return 0, actual_id, msg - - -def wait_for_image_appear(image_id, auth_info=None, timeout=900, fail_ok=False): - end_time = time.time() + timeout - while time.time() < end_time: - images = get_images(auth_info=auth_info) - if image_id in images: - return True - - time.sleep(20) - - if not fail_ok: - raise exceptions.StorageError( - "Glance image {} did not appear within {} seconds.".format(image_id, - timeout)) - - return False - - -def wait_for_image_status(image_id, status='active', - timeout=ImageTimeout.STATUS_CHANGE, check_interval=3, - fail_ok=True, con_ssh=None, auth_info=None): - actual_status = None - end_time = time.time() + timeout - while time.time() < end_time: - actual_status = get_image_values(image_id, fields='status', - auth_info=auth_info, - con_ssh=con_ssh)[0] - if status.lower() == actual_status.lower(): - LOG.info("Image {} has reached status: {}".format(image_id, status)) - return True - - time.sleep(check_interval) - - else: - msg = "Timed out waiting for image {} status to change to {}. Actual " \ - "status: {}".format(image_id, status, actual_status) - if fail_ok: - LOG.warning(msg) - return False - raise exceptions.TimeoutException(msg) - - -def _wait_for_images_deleted(images, timeout=ImageTimeout.STATUS_CHANGE, - fail_ok=True, - check_interval=3, con_ssh=None, - auth_info=Tenant.get('admin')): - """ - check if a specific field still exist in a specified column of openstack - image list - - Args: - images (list|str): - timeout (int): - fail_ok (bool): - check_interval (int): - con_ssh: - auth_info (dict): - - Returns (bool): Return True if the specific image_id is found within the - timeout period. False otherwise - - """ - if isinstance(images, str): - images = [images] - - imgs_to_check = list(images) - imgs_deleted = [] - end_time = time.time() + timeout - while time.time() < end_time: - existing_imgs = get_images(con_ssh=con_ssh, auth_info=auth_info) - for img in imgs_to_check: - if img not in existing_imgs: - imgs_to_check.remove(img) - imgs_deleted.append(img) - - if not imgs_to_check: - return True, tuple(imgs_deleted) - - time.sleep(check_interval) - else: - if fail_ok: - return False, tuple(imgs_deleted) - raise exceptions.TimeoutException( - "Timed out waiting for all given images to be removed from " - "openstack " - "image list. Given images: {}. Images still exist: {}.". - format(images, imgs_to_check)) - - -def image_exists(image, image_val='ID', con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Args: - image: - image_val: Name or ID - con_ssh: - auth_info - - Returns (bool): - - """ - images = get_images(auth_info=auth_info, con_ssh=con_ssh, field=image_val) - return image in images - - -def delete_images(images, timeout=ImageTimeout.DELETE, check_first=True, - fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Delete given images - - Args: - images (list|str): ids of images to delete - timeout (int): max time wait for cli to return, and max time wait for - images to remove from openstack image list - check_first (bool): whether to check if images exist before attempt - to delete - fail_ok (bool): - con_ssh (SSHClient): - auth_info (dict): - - Returns (tuple): - (-1, "None of the given image(s) exist on system. Do nothing.") - (0, "image(s) deleted successfully") - (1, ) # if delete image cli returns stderr - (2, "Delete image cli ran successfully but some image(s) did - not disappear within seconds") - """ - if not images: - return -1, "No image provided to delete" - - LOG.info("Deleting image(s): {}".format(images)) - if isinstance(images, str): - images = [images] - else: - images = list(images) - - if check_first: - existing_images = get_images(images=images, auth_info=auth_info, - con_ssh=con_ssh) - imgs_to_del = list(set(existing_images) & set(images)) - if not imgs_to_del: - msg = "None of the given image(s) exist on system. Do nothing." - LOG.info(msg) - return -1, msg - else: - imgs_to_del = list(images) - - args_ = ' '.join(imgs_to_del) - - exit_code, cmd_output = cli.openstack('image delete', args_, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info, timeout=timeout) - if exit_code > 1: - return 1, cmd_output - - LOG.info("Waiting for images to be removed from openstack image " - "list: {}".format(imgs_to_del)) - all_deleted, images_deleted = _wait_for_images_deleted(imgs_to_del, - fail_ok=fail_ok, - con_ssh=con_ssh, - auth_info=auth_info, - timeout=timeout) - - if not all_deleted: - images_undeleted = set(imgs_to_del) - set(images_deleted) - msg = "Delete image cli ran successfully but some image(s) {} did " \ - "not disappear within {} seconds".format(images_undeleted, - timeout) - return 2, msg - - LOG.info("image(s) are successfully deleted: {}".format(imgs_to_del)) - return 0, "image(s) deleted successfully" - - -def get_image_properties(image, property_keys, rtn_dict=False, - auth_info=Tenant.get('admin'), con_ssh=None): - """ - - Args: - image (str): id of image - property_keys (str|list\tuple): list of metadata key(s) to get value( - s) for - rtn_dict (bool): whether to return list or dict - auth_info (dict): Admin by default - con_ssh (SSHClient): - - Returns (dict|list): image metadata in a dictionary. - Examples: {'hw_mem_page_size': small} - """ - if isinstance(property_keys, str): - property_keys = [property_keys] - - property_keys = [k.strip().lower().replace(':', '_').replace('-', '_') for k - in property_keys] - properties = get_image_values(image, fields='properties', - auth_info=auth_info, con_ssh=con_ssh)[0] - - if rtn_dict: - return {k: properties.get(k) for k in property_keys} - else: - return [properties.get(k) for k in property_keys] - - -def get_image_values(image, fields, auth_info=Tenant.get('admin'), con_ssh=None, - fail_ok=False): - """ - Get glance image values from openstack image show - Args: - image: - fields: - auth_info: - con_ssh: - fail_ok - - Returns (list): - - """ - if isinstance(fields, str): - fields = (fields,) - code, output = cli.openstack('image show', image, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code > 0: - return [None] * len(fields) - - table_ = table_parser.table(output) - values = table_parser.get_multi_values_two_col_table( - table_, fields, merge_lines=True, evaluate=True, - dict_fields='properties') - return values - - -def scp_guest_image(img_os='ubuntu_14', dest_dir=None, timeout=3600, - con_ssh=None): - """ - - Args: - img_os (str): guest image os type. valid values: ubuntu, centos_7, - centos_6 - dest_dir (str): where to save the downloaded image. Default is - '~/images' - timeout (int) - con_ssh (SSHClient): - - Returns (str): full file name of downloaded image. e.g., - '~/images/ubuntu_14.qcow2' - - """ - valid_img_os_types = list(GuestImages.IMAGE_FILES.keys()) - - if img_os not in valid_img_os_types: - raise ValueError( - "Invalid guest image OS type provided. Valid values: {}".format( - valid_img_os_types)) - - if not dest_dir: - dest_dir = GuestImages.DEFAULT['image_dir'] - - LOG.info("Downloading guest image from test server...") - dest_name = GuestImages.IMAGE_FILES[img_os][2] - ts_source_name = GuestImages.IMAGE_FILES[img_os][0] - if con_ssh is None: - con_ssh = get_cli_client(central_region=True) - - if ts_source_name: - # img saved on test server. scp from test server - source_path = '{}/{}'.format( - GuestImages.DEFAULT['image_dir_file_server'], ts_source_name) - dest_path = common.scp_from_test_server_to_user_file_dir( - source_path=source_path, dest_dir=dest_dir, - dest_name=dest_name, timeout=timeout, con_ssh=con_ssh) - else: - # scp from tis system if needed - dest_path = '{}/{}'.format(dest_dir, dest_name) - if ProjVar.get_var('REMOTE_CLI') and not con_ssh.file_exists(dest_path): - tis_source_path = '{}/{}'.format(GuestImages.DEFAULT['image_dir'], - dest_name) - common.scp_from_active_controller_to_localhost( - source_path=tis_source_path, dest_path=dest_path, - timeout=timeout) - - if not con_ssh.file_exists(dest_path): - raise exceptions.CommonError( - "image {} does not exist after download".format(dest_path)) - - LOG.info("{} image downloaded successfully and saved to {}".format( - img_os, dest_path)) - return dest_path - - -def get_guest_image(guest_os, rm_image=True, check_disk=False, cleanup=None, - use_existing=True): - """ - Get or create a glance image with given guest OS - Args: - guest_os (str): valid values: ubuntu_12, ubuntu_14, centos_6, - centos_7, opensuse_11, tis-centos-guest, - cgcs-guest, vxworks-guest, debian-8-m-agent - rm_image (bool): whether or not to rm image from /home/sysadmin/images - after creating glance image - check_disk (bool): whether to check if image storage disk is - sufficient to create new glance image - cleanup (str|None) - use_existing (bool): whether to use existing guest image if exists - - Returns (str): image_id - - """ - # TODO: temp workaround - if guest_os in ['opensuse_12', 'win_2016', 'win_2012']: - skip('Skip test with 20G+ virtual size image for now - CGTS-10776') - - nat_name = ProjVar.get_var('NATBOX').get('name') - if nat_name == 'localhost': - if re.search('win|rhel|opensuse', guest_os): - skip("Skip tests with large images for vbox") - - LOG.info("Get or create a glance image with {} guest OS".format(guest_os)) - img_id = None - if use_existing: - img_id = get_image_id_from_name(guest_os, strict=True) - - if not img_id: - con_ssh = None - img_file_size = 0 - if check_disk: - is_sufficient, img_file_size = ensure_image_storage_sufficient( - guest_os=guest_os) - if not is_sufficient: - skip( - "Insufficient image storage space in /opt/cgcs/ to create " - "{} image".format( - guest_os)) - - if guest_os == '{}-qcow2'.format(GuestImages.DEFAULT['guest']): - # convert default img to qcow2 format if needed - qcow2_img_path = '{}/{}'.format(GuestImages.DEFAULT['image_dir'], - GuestImages.IMAGE_FILES[guest_os][ - 2]) - con_ssh = ControllerClient.get_active_controller() - if not con_ssh.file_exists(qcow2_img_path): - raw_img_path = '{}/{}'.format(GuestImages.DEFAULT['image_dir'], - GuestImages.IMAGE_FILES[ - GuestImages.DEFAULT['guest']][ - 2]) - con_ssh.exec_cmd( - 'qemu-img convert -f raw -O qcow2 {} {}'.format( - raw_img_path, qcow2_img_path), - fail_ok=False, expect_timeout=600) - - # copy non-default img from test server - dest_dir = GuestImages.DEFAULT['image_dir'] - home_dir = HostLinuxUser.get_home() - if check_disk and os.path.normpath(home_dir) in os.path.abspath( - dest_dir): - # Assume image file should not be present on system since large - # image file should get removed - if not con_ssh: - con_ssh = ControllerClient.get_active_controller() - avail_sysadmin_home = get_avail_image_space(con_ssh=con_ssh, - path=home_dir) - if avail_sysadmin_home < img_file_size: - skip("Insufficient space in {} for {} image to be copied " - "to".format(home_dir, guest_os)) - - image_path = scp_guest_image(img_os=guest_os, dest_dir=dest_dir) - - try: - disk_format, container_format = GuestImages.IMAGE_FILES[guest_os][ - 3:5] - img_id = create_image(name=guest_os, source_image_file=image_path, - disk_format=disk_format, - container_format=container_format, - fail_ok=False, cleanup=cleanup)[1] - finally: - if rm_image and not re.search('cgcs-guest|tis-centos|ubuntu_14', - guest_os): - con_ssh = ControllerClient.get_active_controller() - con_ssh.exec_cmd('rm -f {}'.format(image_path), fail_ok=True, - get_exit_code=False) - - return img_id - - -def set_unset_image_vif_multiq(image, set_=True, fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Set or unset a glance image with multiple vif-Queues - Args: - image (str): name or id of a glance image - set_ (bool): whether or not to set the hw_vif_multiqueue_enabled - fail_ok: - con_ssh: - auth_info: - - Returns (str): code, msg - - """ - - if image is None: - return 1, "Error:image_name not provided" - if set_: - cmd = 'image set ' - else: - cmd = 'image unset ' - - cmd += image - cmd += ' --property' - - if set_: - cmd += ' hw_vif_multiqueue_enabled=True' - else: - cmd += ' hw_vif_multiqueue_enabled' - - res, out = cli.openstack(cmd, ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - - return res, out - - -def unset_image(image, properties=None, tags=None, con_ssh=None, - auth_info=Tenant.get('admin')): - """ - - Args: - image (str): image name or id - properties (None|str|list|tuple): properties to unset - tags (None|str|list|tuple): tags to unset - con_ssh: - auth_info: - - Returns: - """ - args = [] - post_checks = {} - if properties: - if isinstance(properties, str): - properties = [properties] - for item in properties: - args.append('--property {}'.format(item)) - post_checks['properties'] = properties - - if tags: - if isinstance(tags, str): - tags = [tags] - for tag in tags: - args.append('--tag {}'.format(tag)) - post_checks['tags'] = tags - - if not args: - raise ValueError( - "Nothing to unset. Please specify property or tag to unset") - - args = ' '.join(args) + ' {}'.format(image) - code, out = cli.openstack('image unset', args, ssh_client=con_ssh, - fail_ok=True, auth_info=auth_info) - if code > 0: - return 1, out - - check_image_settings(image=image, check_dict=post_checks, unset=True, - con_ssh=con_ssh, auth_info=auth_info) - msg = "Image {} is successfully unset".format(image) - return 0, msg - - -def set_image(image, new_name=None, properties=None, min_disk=None, - min_ram=None, container_format=None, - disk_format=None, architecture=None, instance_id=None, - kernel_id=None, os_distro=None, - os_version=None, ramdisk_id=None, activate=None, project=None, - project_domain=None, tags=None, - protected=None, visibility=None, membership=None, - hw_vif_model=None, - con_ssh=None, auth_info=Tenant.get('admin')): - """ - Set image properties/metadata - Args: - image (str): - new_name (str|None): - properties (dict|None): - hw_vif_model (str|None): override hw_vif_model in properties if any - min_disk (int|str|None): - min_ram (int|str|None): - container_format (str|None): - disk_format (str|None): - architecture (str|None): - instance_id (str|None): - kernel_id (str|None): - os_distro (str|None): - os_version (str|None): - ramdisk_id (str|None): - activate (bool|None): - project (str|None): - project_domain (str|None): - tags (list|tuple|None): - protected (bool|None): - visibility (str): valid values: 'public', 'private', 'community', - 'shared' - membership (str): valid values: 'accept', 'reject', 'pending' - con_ssh: - auth_info: - - Returns (tupe): - (0, Image is successfully modified) - (1, ) - openstack image set is rejected - - """ - - post_checks = {} - args = [] - if protected is not None: - if protected: - args.append('--protected') - post_check_val = True - else: - args.append('--unprocteced') - post_check_val = False - post_checks['protected'] = post_check_val - - if visibility is not None: - valid_vals = ('public', 'private', 'community', 'shared') - if visibility not in valid_vals: - raise ValueError( - "Invalid visibility specified. Valid options: {}".format( - valid_vals)) - args.append('--{}'.format(visibility)) - post_checks['visibility'] = visibility - - if activate is not None: - if activate: - args.append('--activate') - post_check_val = 'active' - else: - args.append('--deactivate') - post_check_val = 'deactivated' - post_checks['status'] = post_check_val - - if membership is not None: - valid_vals = ('accept', 'reject', 'pending') - if membership not in valid_vals: - raise ValueError( - "Invalid membership specified. Valid options: {}".format( - valid_vals)) - args.append('--{}'.format(membership)) - # Unsure how to do post check - - if not properties: - properties = {} - if hw_vif_model: - properties[ImageMetadata.VIF_MODEL] = hw_vif_model - if properties: - for key, val in properties.items(): - args.append('--property {}="{}"'.format(key, val)) - post_checks['properties'] = properties - - if tags: - if isinstance(tags, str): - tags = [tags] - for tag in tags: - args.append('--tag {}'.format(tag)) - post_checks['tags'] = list(tags) - - other_args = { - '--name': (new_name, 'name'), - '--min-disk': (min_disk, 'min_disk'), - '--min-ram': (min_ram, 'min_ram'), - '--container-format': (container_format, 'container_format'), - '--disk-format': (disk_format, 'disk_format'), - '--project': (project, 'owner'), # assume project id will be given - '--project-domain': (project_domain, None), # Post check unhandled atm - '--architecture': (architecture, None), - '--instance-id': (instance_id, None), - '--kernel-id': (kernel_id, None), - '--os-distro': (os_distro, None), - '--os-version': (os_version, None), - '--ramdisk-id': (ramdisk_id, None), - } - - for key, val in other_args.items(): - if val[0] is not None: - args.append('{} {}'.format(key, val[0])) - if val[1]: - post_checks[val[1]] = val[0] - - args = ' '.join(args) - if not args: - raise ValueError("Nothing to set") - - args += ' {}'.format(image) - code, out = cli.openstack('image set', args, ssh_client=con_ssh, - fail_ok=True, auth_info=auth_info) - if code > 0: - return 1, out - - check_image_settings(image=image, check_dict=post_checks, con_ssh=con_ssh, - auth_info=auth_info) - msg = "Image {} is successfully modified".format(image) - return 0, msg - - -def check_image_settings(image, check_dict, unset=False, con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Check image settings via openstack image show. - Args: - image (str): - check_dict (dict): key should be the field; - if unset, value should be a list or tuple, key should be properties - and/or tags - if set, value should be dict if key is properties or tags, - otherwise value should normally be a str - unset (bool): whether to check if given metadata are set or unset - con_ssh (SSHClient): - auth_info (dict): - - Returns (None): - - """ - LOG.info("Checking image setting is as specified: {}".format(check_dict)) - - post_tab = table_parser.table( - cli.openstack('image show', image, ssh_client=con_ssh, - auth_info=auth_info)[1], - combine_multiline_entry=True) - - for field, expt_val in check_dict.items(): - actual_val = table_parser.get_value_two_col_table(post_tab, field=field, - merge_lines=True) - if field == 'properties': - actual_vals = actual_val.split(', ') - actual_vals = ((val.split('=')) for val in actual_vals) - actual_dict = {k.strip(): v.strip() for k, v in actual_vals} - if unset: - for key in expt_val: - assert -1 == actual_dict.get(key, -1) - else: - for key, val in expt_val.items(): - actual = actual_dict[key] - try: - actual = eval(actual) - except (NameError, SyntaxError): - pass - assert str(val) == str(actual), \ - "Property {} is not as set. Expected: {}, actual: {}". \ - format(key, val, actual_dict[key]) - elif field == 'tags': - actual_vals = [val.strip() for val in actual_val.split(',')] - if unset: - assert not (set(expt_val) & set(actual_val)), \ - "Expected to be unset: {}, actual: {}". \ - format(expt_val, actual_vals) - else: - assert set(expt_val) <= set(actual_vals), \ - "Expected tags: {}, actual: {}".format( - expt_val, actual_vals) - else: - if unset: - LOG.warning("Unset flag ignored. Only property and tag " - "is valid for unset") - assert str(expt_val) == str(actual_val), \ - "{} is not as set. Expected: {}, actual: {}". \ - format(field, expt_val, actual_val) diff --git a/automated-pytest-suite/keywords/gnocchi_helper.py b/automated-pytest-suite/keywords/gnocchi_helper.py deleted file mode 100644 index 8e8b3bbc..00000000 --- a/automated-pytest-suite/keywords/gnocchi_helper.py +++ /dev/null @@ -1,165 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from utils import cli -from utils import table_parser -from utils.tis_log import LOG - -from consts.auth import Tenant -from keywords import common - - -def get_aggregated_measures(field='value', resource_type=None, metrics=None, - start=None, stop=None, overlap=None, - refresh=None, resource_ids=None, extra_query=None, - fail_ok=False, auth_info=Tenant.get('admin'), - con_ssh=None): - """ - Get measurements via 'openstack metric measures aggregation' - Args: - field (str): header of a column - resource_type (str|None): used in --resource-type - metrics (str|list|tuple|None): used in --metric [metric2 ...] - start (str|None): used in --start - stop (str|None): used in --stop - refresh (bool): used in --refresh - overlap (str|None): overlap percentage. used in - --needed-overlap - resource_ids (str|list|tuple|None): used in --query "id=[ - or id= ...]" - extra_query (str|None): used in --query - fail_ok: - auth_info: - con_ssh: - - Returns (list): list of strings - - """ - LOG.info("Getting aggregated measurements...") - args_dict = { - 'resource-type': resource_type, - 'metric': metrics, - 'start': start, - 'stop': stop, - 'needed-overlap': overlap, - 'refresh': refresh, - } - - args = common.parse_args(args_dict, vals_sep=' ') - query_str = '' - if resource_ids: - if isinstance(resource_ids, str): - resource_ids = [resource_ids] - resource_ids = ['id={}'.format(val) for val in resource_ids] - query_str = ' or '.join(resource_ids) - - if extra_query: - if resource_ids: - query_str += ' and ' - query_str += '{}'.format(extra_query) - - if query_str: - args += ' --query "{}"'.format(query_str) - - code, out = cli.openstack('metric measures aggregation', args, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if code > 0: - return 1, out - - table_ = table_parser.table(out) - return 0, table_parser.get_values(table_, field) - - -def get_metric_values(metric_id=None, metric_name=None, resource_id=None, - fields='id', fail_ok=False, - auth_info=Tenant.get('admin'), con_ssh=None): - """ - Get metric info via 'openstack metric show' - Args: - metric_id (str|None): - metric_name (str|None): Only used if metric_id is not provided - resource_id (str|None): Only used if metric_id is not provided - fields (str|list|tuple): field name - fail_ok (bool): - auth_info: - con_ssh: - - Returns (list): - - """ - if metric_id is None and metric_name is None: - raise ValueError("metric_id or metric_name has to be provided.") - - if metric_id: - arg = metric_id - else: - if resource_id: - arg = '--resource-id {} "{}"'.format(resource_id, metric_name) - else: - if not fail_ok: - raise ValueError("resource_id needs to be provided when using " - "metric_name") - arg = '"{}"'.format(metric_name) - - code, output = cli.openstack('openstack metric show', arg, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if code > 0: - return output - - table_ = table_parser.table(output) - return table_parser.get_multi_values_two_col_table(table_, fields) - - -def get_metrics(field='id', metric_name=None, resource_id=None, fail_ok=True, - auth_info=Tenant.get('admin'), con_ssh=None): - """ - Get metrics values via 'openstack metric list' - Args: - field (str|list|tuple): header of the metric list table - metric_name (str|None): - resource_id (str|None): - fail_ok (bool): - auth_info: - con_ssh: - - Returns (list): list of strings - - """ - columns = ['id', 'archive_policy/name', 'name', 'unit', 'resource_id'] - arg = '-f value ' - arg += ' '.join(['-c {}'.format(column) for column in columns]) - - grep_str = '' - if resource_id: - grep_str += ' | grep --color=never -E -i {}'.format(resource_id) - if metric_name: - grep_str += ' | grep --color=never -E -i {}'.format(metric_name) - - arg += grep_str - - code, output = cli.openstack('metric list', arg, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code > 0: - return [] - - values = [] - convert = False - if isinstance(field, str): - field = (field, ) - convert = True - - for header in field: - lines = output.splitlines() - index = columns.index(header.lower()) - vals = [line.split(sep=' ')[index] for line in lines] - values.append(vals) - - if convert: - values = values[0] - return values diff --git a/automated-pytest-suite/keywords/heat_helper.py b/automated-pytest-suite/keywords/heat_helper.py deleted file mode 100644 index 7cdc3317..00000000 --- a/automated-pytest-suite/keywords/heat_helper.py +++ /dev/null @@ -1,398 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import time - -from utils import table_parser, cli, exceptions -from utils.tis_log import LOG -from utils.clients.ssh import get_cli_client -from consts.stx import GuestImages, HeatStackStatus, HEAT_CUSTOM_TEMPLATES -from consts.filepaths import TestServerPath -from keywords import network_helper, common -from testfixtures.fixture_resources import ResourceCleanup - - -def _wait_for_heat_stack_deleted(stack_name=None, timeout=120, - check_interval=3, con_ssh=None, - auth_info=None): - """ - This will wait for the heat stack to be deleted - Args: - stack_name(str): Heat stack name to check for state - con_ssh (SSHClient): If None, active controller ssh will be used. - auth_info (dict): Tenant dict. If None, primary tenant will be used. - - Returns: - - """ - LOG.info("Waiting for {} to be deleted...".format(stack_name)) - end_time = time.time() + timeout - while time.time() < end_time: - stack_status = get_stack_status(stack=stack_name, auth_info=auth_info, - con_ssh=con_ssh, fail_ok=True) - if not stack_status: - return True - elif stack_status[0] == HeatStackStatus.DELETE_FAILED: - LOG.warning('Heat stack in DELETE_FAILED state') - return False - - time.sleep(check_interval) - - msg = "Heat stack {} did not get deleted within timeout".format(stack_name) - - LOG.warning(msg) - return False - - -def wait_for_heat_status(stack_name=None, - status=HeatStackStatus.CREATE_COMPLETE, - timeout=300, check_interval=5, - fail_ok=False, con_ssh=None, auth_info=None): - """ - This will wait for the desired state of the heat stack or timeout - Args: - stack_name(str): Heat stack name to check for state - status(str): Status to check for - timeout (int) - check_interval (int) - fail_ok (bool - con_ssh (SSHClient): If None, active controller ssh will be used. - auth_info (dict): Tenant dict. If None, primary tenant will be used. - - Returns (tuple): , - - """ - LOG.info("Waiting for {} to be shown in {} ...".format(stack_name, status)) - end_time = time.time() + timeout - - fail_status = current_status = None - if status == HeatStackStatus.CREATE_COMPLETE: - fail_status = HeatStackStatus.CREATE_FAILED - elif status == HeatStackStatus.UPDATE_COMPLETE: - fail_status = HeatStackStatus.UPDATE_FAILED - - while time.time() < end_time: - current_status = get_stack_status(stack=stack_name, auth_info=auth_info, - con_ssh=con_ssh) - if status == current_status: - return True, 'Heat stack {} has reached {} status'.format( - stack_name, status) - elif fail_status == current_status: - stack_id = get_stack_values(stack=stack_name, fields='id', - auth_info=auth_info, con_ssh=con_ssh)[0] - get_stack_resources(stack=stack_id, auth_info=auth_info, - con_ssh=con_ssh) - - err = "Heat stack {} failed to reach {}, actual status: {}".format( - stack_name, status, fail_status) - if fail_ok: - LOG.warning(err) - return False, err - raise exceptions.HeatError(err) - - time.sleep(check_interval) - - stack_id = get_stack_values(stack=stack_name, fields='id', - auth_info=auth_info, con_ssh=con_ssh)[0] - get_stack_resources(stack=stack_id, auth_info=auth_info, con_ssh=con_ssh) - err_msg = "Heat stack {} did not reach {} within {}s. Actual " \ - "status: {}".format(stack_name, status, timeout, current_status) - if fail_ok: - LOG.warning(err_msg) - return False, err_msg - raise exceptions.HeatError(err_msg) - - -def get_stack_values(stack, fields='stack_status_reason', con_ssh=None, - auth_info=None, fail_ok=False): - code, out = cli.openstack('stack show', stack, ssh_client=con_ssh, - auth_info=auth_info, fail_ok=fail_ok) - if code > 0: - return None - - table_ = table_parser.table(out) - return table_parser.get_multi_values_two_col_table(table_=table_, - fields=fields) - - -def get_stacks(name=None, field='id', con_ssh=None, auth_info=None, all_=True): - """ - Get the stacks list based on name if given for a given tenant. - - Args: - con_ssh (SSHClient): If None, active controller ssh will be used. - auth_info (dict): Tenant dict. If None, primary tenant will be used. - all_ (bool): whether to display all stacks for admin user - name (str): Given name for the heat stack - field (str|list|tuple) - - Returns (list): list of heat stacks. - - """ - args = '' - if auth_info is not None: - if auth_info['user'] == 'admin' and all_: - args = '--a' - table_ = table_parser.table( - cli.openstack('stack list', positional_args=args, ssh_client=con_ssh, - auth_info=auth_info)[1]) - - kwargs = {'Stack Name': name} if name else {} - return table_parser.get_multi_values(table_, field, **kwargs) - - -def get_stack_status(stack, con_ssh=None, auth_info=None, fail_ok=False): - """ - Get the stacks status based on name if given for a given tenant. - - Args: - con_ssh (SSHClient): If None, active controller ssh will be used. - auth_info (dict): Tenant dict. If None, primary tenant will be used. - stack (str): Given name for the heat stack - fail_ok (bool): - - Returns (str): Heat stack status of a specific tenant. - - """ - status = get_stack_values(stack, fields='stack_status', con_ssh=con_ssh, - auth_info=auth_info, fail_ok=fail_ok) - status = status[0] if status else None - return status - - -def get_stack_resources(stack, field='resource_name', auth_info=None, - con_ssh=None, **kwargs): - """ - - Args: - stack (str): id (or name) for the heat stack. ID is required if admin - user is used to display tenant resource. - field: values to return - auth_info: - con_ssh: - kwargs: key/value pair to filer out the values to return - - Returns (list): - - """ - table_ = table_parser.table( - cli.openstack('stack resource list --long', stack, ssh_client=con_ssh, - auth_info=auth_info)[1]) - return table_parser.get_values(table_, target_header=field, **kwargs) - - -def delete_stack(stack, fail_ok=False, check_first=False, con_ssh=None, - auth_info=None): - """ - Delete the given heat stack for a given tenant. - - Args: - con_ssh (SSHClient): If None, active controller ssh will be used. - fail_ok (bool): - check_first (bool): whether or not to check the stack existence - before attempt to delete - auth_info (dict): Tenant dict. If None, primary tenant will be used. - stack (str): Given name for the heat stack - - Returns (tuple): Status and msg of the heat deletion. - - """ - - if not stack: - raise ValueError("stack_name is not provided.") - - if check_first: - if not get_stack_status(stack, con_ssh=con_ssh, auth_info=auth_info, - fail_ok=True): - msg = "Heat stack {} doesn't exist on the system. Do " \ - "nothing.".format(stack) - LOG.info(msg) - return -1, msg - - LOG.info("Deleting Heat Stack %s", stack) - exitcode, output = cli.openstack('stack delete -y', stack, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if exitcode > 1: - LOG.warning("Delete heat stack request rejected.") - return 1, output - - if not _wait_for_heat_stack_deleted(stack_name=stack, auth_info=auth_info): - stack_id = get_stack_values(stack=stack, fields='id', - auth_info=auth_info, con_ssh=con_ssh)[0] - get_stack_resources(stack=stack_id, auth_info=auth_info, - con_ssh=con_ssh) - - msg = "heat stack {} is not removed after stack-delete.".format(stack) - if fail_ok: - LOG.warning(msg) - return 2, msg - raise exceptions.HeatError(msg) - - succ_msg = "Heat stack {} is successfully deleted.".format(stack) - LOG.info(succ_msg) - return 0, succ_msg - - -def get_heat_params(param_name=None): - """ - Generate parameters for heat based on keywords - - Args: - param_name (str): template to be used to create heat stack. - - Returns (str): return None if failure or the val for the given param - - """ - if param_name == 'NETWORK': - net_id = network_helper.get_mgmt_net_id() - return network_helper.get_net_name_from_id(net_id=net_id) - elif param_name == 'FLAVOR': - return 'small_ded' - elif param_name == 'IMAGE': - return GuestImages.DEFAULT['guest'] - else: - return None - - -def create_stack(stack_name, template, pre_creates=None, environments=None, - stack_timeout=None, parameters=None, param_files=None, - enable_rollback=None, dry_run=None, wait=None, tags=None, - fail_ok=False, con_ssh=None, auth_info=None, - cleanup='function', timeout=300): - """ - Create the given heat stack for a given tenant. - - Args: - stack_name (str): Given name for the heat stack - template (str): path of heat template - pre_creates (str|list|None) - environments (str|list|None) - stack_timeout (int|str|None): stack creating timeout in minutes - parameters (str|dict|None) - param_files (str|dict|None) - enable_rollback (bool|None) - dry_run (bool|None) - wait (bool|None) - tags (str|list|None) - auth_info (dict): Tenant dict. If None, primary tenant will be used. - con_ssh (SSHClient): If None, active controller ssh will be used. - timeout (int): automation timeout in seconds - fail_ok (bool): - cleanup (str|None) - - Returns (tuple): Status and msg of the heat deletion. - """ - - args_dict = { - '--template': template, - '--environment': environments, - '--timeout': stack_timeout, - '--pre-create': pre_creates, - '--enable-rollback': enable_rollback, - '--parameter': parameters, - '--parameter-file': param_files, - '--wait': wait, - '--tags': ','.join(tags) if isinstance(tags, (list, tuple)) else tags, - '--dry-run': dry_run, - } - args = common.parse_args(args_dict, repeat_arg=True) - LOG.info("Create Heat Stack {} with args: {}".format(stack_name, args)) - exitcode, output = cli.openstack('stack create', '{} {}'. - format(args, stack_name), - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info, timeout=timeout) - if exitcode > 0: - return 1, output - - if cleanup: - ResourceCleanup.add('heat_stack', resource_id=stack_name, scope=cleanup) - - LOG.info("Wait for Heat Stack Status to reach CREATE_COMPLETE for " - "stack %s", stack_name) - res, msg = wait_for_heat_status(stack_name=stack_name, - status=HeatStackStatus.CREATE_COMPLETE, - auth_info=auth_info, fail_ok=fail_ok) - if not res: - return 2, msg - - LOG.info("Stack {} created successfully".format(stack_name)) - return 0, stack_name - - -def update_stack(stack_name, params_string, fail_ok=False, con_ssh=None, - auth_info=None, timeout=300): - """ - Update the given heat stack for a given tenant. - - Args: - con_ssh (SSHClient): If None, active controller ssh will be used. - fail_ok (bool): - params_string: Parameters to pass to the heat create cmd. - ex: -f -P IMAGE=tis - auth_info (dict): Tenant dict. If None, primary tenant will be used. - stack_name (str): Given name for the heat stack - timeout (int) - - Returns (tuple): Status and msg of the heat deletion. - """ - - if not params_string: - raise ValueError("Parameters not provided.") - - LOG.info("Create Heat Stack %s", params_string) - exitcode, output = cli.heat('stack-update', params_string, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if exitcode == 1: - LOG.warning("Create heat stack request rejected.") - return 1, output - - LOG.info("Wait for Heat Stack Status to reach UPDATE_COMPLETE for stack %s", - stack_name) - res, msg = wait_for_heat_status(stack_name=stack_name, - status=HeatStackStatus.UPDATE_COMPLETE, - auth_info=auth_info, fail_ok=fail_ok, - timeout=timeout) - if not res: - return 2, msg - - LOG.info("Stack {} updated successfully".format(stack_name)) - return 0, stack_name - - -def get_custom_heat_files(file_name, file_dir=HEAT_CUSTOM_TEMPLATES, - cli_client=None): - """ - - Args: - file_name: - file_dir: - cli_client: - - Returns: - - """ - file_path = '{}/{}'.format(file_dir, file_name) - - if cli_client is None: - cli_client = get_cli_client() - - if not cli_client.file_exists(file_path=file_path): - LOG.debug('Create userdata directory if not already exists') - cmd = 'mkdir -p {}'.format(file_dir) - cli_client.exec_cmd(cmd, fail_ok=False) - source_file = TestServerPath.CUSTOM_HEAT_TEMPLATES + file_name - dest_path = common.scp_from_test_server_to_user_file_dir( - source_path=source_file, dest_dir=file_dir, - dest_name=file_name, timeout=300, con_ssh=cli_client) - if dest_path is None: - raise exceptions.CommonError( - "Heat template file {} does not exist after download".format( - file_path)) - - return file_path diff --git a/automated-pytest-suite/keywords/horizon_helper.py b/automated-pytest-suite/keywords/horizon_helper.py deleted file mode 100644 index c22edfcf..00000000 --- a/automated-pytest-suite/keywords/horizon_helper.py +++ /dev/null @@ -1,62 +0,0 @@ -# -# Copyright (c) 2019, 2020 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import os - -from keywords import common -from utils.tis_log import LOG -from utils.horizon.helper import HorizonDriver -from consts.auth import Tenant, CliAuth -from consts.proj_vars import ProjVar - - -def download_openrc_files(quit_driver=True): - """ - Download openrc files from Horizon to /horizon/. - - """ - LOG.info("Download openrc files from horizon") - local_dir = os.path.join(ProjVar.get_var('LOG_DIR'), 'horizon') - - from utils.horizon.pages import loginpage - rc_files = [] - login_pg = loginpage.LoginPage() - login_pg.go_to_target_page() - try: - for auth_info in (Tenant.get('admin'), Tenant.get('tenant1'), Tenant.get('tenant2')): - user = auth_info['user'] - password = auth_info['password'] - openrc_file = '{}-openrc.sh'.format(user) - home_pg = login_pg.login(user, password=password) - home_pg.download_rc_v3() - home_pg.log_out() - openrc_path = os.path.join(local_dir, openrc_file) - assert os.path.exists(openrc_path), "{} not found after download".format(openrc_file) - rc_files.append(openrc_path) - - finally: - if quit_driver: - HorizonDriver.quit_driver() - - LOG.info("openrc files are successfully downloaded to: {}".format(local_dir)) - return rc_files - - -def get_url(dnsname=False): - """ - Get the base url of the Horizon application - Args: - dnsname(bool): True if return the dns name of the host instead of the IP - - Returns(str): the url on the active controller to access Horizon - - """ - domain = common.get_lab_fip(region='RegionOne') if not dnsname else \ - common.get_dnsname(region='RegionOne') - prefix = 'https' if CliAuth.get_var('https') else 'http' - port = 8080 if prefix == 'http' else 8443 - return '{}://{}:{}'.format(prefix, domain, port) diff --git a/automated-pytest-suite/keywords/host_helper.py b/automated-pytest-suite/keywords/host_helper.py deleted file mode 100755 index db8fd36a..00000000 --- a/automated-pytest-suite/keywords/host_helper.py +++ /dev/null @@ -1,4830 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -""" -This module is for helper functions targeting one or more STX host. - -Including: -- system host-xxx commands related helper functions -(Note that system host-show, host-list related helper functions are in -system_helper.py) -- Non-system operations targeting specific host, such as ssh to a host, -sudo reboot on given host(s), etc - -""" - -import ast -import re -import os -import time -import copy -from contextlib import contextmanager -from xml.etree import ElementTree - -from consts.proj_vars import ProjVar -from consts.auth import Tenant, TestFileServer, HostLinuxUser -from consts.timeout import HostTimeout, CMDTimeout -from consts.stx import HostAvailState, HostAdminState, HostOperState, \ - Prompt, MELLANOX_DEVICE, MaxVmsSupported, EventLogID, TrafficControl, \ - PLATFORM_NET_TYPES, AppStatus, PLATFORM_AFFINE_INCOMPLETE, FlavorSpec, \ - STORAGE_AGGREGATE -from utils import cli, exceptions, table_parser -from utils.clients.ssh import ControllerClient, SSHFromSSH, SSHClient -from utils.tis_log import LOG -from keywords import system_helper, common, kube_helper, security_helper, \ - nova_helper - - -@contextmanager -def ssh_to_host(hostname, username=None, password=None, prompt=None, - con_ssh=None, timeout=60): - """ - ssh to a host from ssh client. - - Args: - hostname (str|None): host to ssh to. When None, return active - controller ssh - username (str): - password (str): - prompt (str): - con_ssh (SSHClient): - timeout (int) - - Returns (SSHClient): ssh client of the host - - Examples: with ssh_to_host('controller-1') as host_ssh: - host.exec_cmd(cmd) - - """ - if not con_ssh: - con_ssh = ControllerClient.get_active_controller() - - if not hostname: - yield con_ssh - return - - user = username if username else HostLinuxUser.get_user() - password = password if password else HostLinuxUser.get_password() - if not prompt: - prompt = '.*' + hostname + r'\:~\$' - original_host = con_ssh.get_hostname() - if original_host != hostname: - host_ssh = SSHFromSSH(ssh_client=con_ssh, host=hostname, user=user, - password=password, initial_prompt=prompt, - timeout=timeout) - host_ssh.connect(prompt=prompt) - current_host = host_ssh.get_hostname() - if not current_host == hostname: - raise exceptions.SSHException("Current host is {} instead of " - "{}".format(current_host, hostname)) - close = True - else: - close = False - host_ssh = con_ssh - try: - yield host_ssh - finally: - if close: - host_ssh.close() - - -def reboot_hosts(hostnames, timeout=HostTimeout.REBOOT, con_ssh=None, - fail_ok=False, wait_for_offline=True, - wait_for_reboot_finish=True, check_hypervisor_up=True, - check_webservice_up=True, force_reboot=True, - check_up_time=True, auth_info=Tenant.get('admin_platform')): - """ - Reboot one or multiple host(s) - - Args: - hostnames (list|str): hostname(s) to reboot. str input is also - acceptable when only one host to be rebooted - timeout (int): timeout waiting for reboot to complete in seconds - con_ssh (SSHClient): Active controller ssh - fail_ok (bool): Whether it is okay or not for rebooting to fail on any - host - wait_for_offline (bool): Whether to wait for host to be offline after - reboot - wait_for_reboot_finish (bool): whether to wait for reboot finishes - before return - check_hypervisor_up (bool): - check_webservice_up (bool): - force_reboot (bool): whether to add -f, i.e., sudo reboot [-f] - check_up_time (bool): Whether to ensure active controller uptime is - more than 15 minutes before rebooting - auth_info - - Returns (tuple): (rtn_code, message) - (-1, "Reboot host command sent") Reboot host command is sent, but did - not wait for host to be back up - (0, "Host(s) state(s) - .") hosts rebooted and back to - available/degraded or online state. - (1, "Host(s) not in expected availability states or task unfinished. - () ()" ) - (2, "Hosts not up in nova hypervisor-list: )" - (3, "Hosts web-services not active in system servicegroup-list") - """ - if con_ssh is None: - con_ssh = ControllerClient.get_active_controller() - - if isinstance(hostnames, str): - hostnames = [hostnames] - - reboot_active = False - active_con = system_helper.get_active_controller_name(con_ssh=con_ssh, - auth_info=auth_info) - hostnames = list(set(hostnames)) - if active_con in hostnames: - reboot_active = True - hostnames.remove(active_con) - - system_helper.get_hosts(con_ssh=con_ssh, auth_info=auth_info) - - is_simplex = system_helper.is_aio_simplex(con_ssh=con_ssh, - auth_info=auth_info) - user, password = security_helper.LinuxUser.get_current_user_password() - # reboot hosts other than active controller - cmd = 'sudo reboot -f' if force_reboot else 'sudo reboot' - - for host in hostnames: - prompt = '.*' + host + r'\:~\$' - host_ssh = SSHFromSSH(ssh_client=con_ssh, host=host, user=user, - password=password, initial_prompt=prompt) - host_ssh.connect() - current_host = host_ssh.get_hostname() - if not current_host == host: - raise exceptions.SSHException("Current host is {} instead of " - "{}".format(current_host, host)) - - LOG.info("Rebooting {}".format(host)) - host_ssh.send(cmd) - host_ssh.expect(['.*[pP]assword:.*', 'Rebooting']) - host_ssh.send(password) - con_ssh.expect(timeout=300) - - # reconnect to lab and wait for system up if rebooting active controller - if reboot_active: - if check_up_time: - LOG.info("Ensure uptime for controller(s) is at least 15 " - "minutes before rebooting.") - time_to_sleep = max(0, 910 - system_helper.get_controller_uptime( - con_ssh=con_ssh)) - time.sleep(time_to_sleep) - - LOG.info("Rebooting active controller: {}".format(active_con)) - con_ssh.send(cmd) - index = con_ssh.expect(['.*[pP]assword:.*', 'Rebooting']) - if index == 0: - con_ssh.send(password) - - if is_simplex: - _wait_for_simplex_reconnect(con_ssh=con_ssh, timeout=timeout, - auth_info=auth_info) - else: - LOG.info("Active controller reboot started. Wait for 20 seconds " - "then attempt to reconnect for " - "maximum {}s".format(timeout)) - time.sleep(20) - con_ssh.connect(retry=True, retry_timeout=timeout) - - LOG.info("Reconnected via fip. Waiting for system show cli to " - "re-enable") - _wait_for_openstack_cli_enable(con_ssh=con_ssh, auth_info=auth_info) - - if not wait_for_offline and not is_simplex: - msg = "{} cmd sent".format(cmd) - LOG.info(msg) - return -1, msg - - if hostnames: - time.sleep(30) - hostnames = sorted(hostnames) - hosts_in_rebooting = system_helper.wait_for_hosts_states( - hostnames, timeout=HostTimeout.FAIL_AFTER_REBOOT, - check_interval=10, duration=8, con_ssh=con_ssh, - availability=[HostAvailState.OFFLINE, HostAvailState.FAILED], - auth_info=auth_info) - - if not hosts_in_rebooting: - hosts_info = system_helper.get_hosts_values( - hostnames, - ['task', 'availability'], - con_ssh=con_ssh, - auth_info=auth_info) - raise exceptions.HostError("Some hosts are not rebooting. " - "\nHosts info:{}".format(hosts_info)) - - if reboot_active: - hostnames.append(active_con) - if not is_simplex: - system_helper.wait_for_hosts_states( - active_con, timeout=HostTimeout.FAIL_AFTER_REBOOT, - fail_ok=True, check_interval=10, duration=8, - con_ssh=con_ssh, - availability=[HostAvailState.OFFLINE, HostAvailState.FAILED], - auth_info=auth_info) - - if not wait_for_reboot_finish: - msg = 'Host(s) in offline state' - LOG.info(msg) - return -1, msg - - hosts_, admin_states = \ - system_helper.get_hosts(hostname=hostnames, - field=('hostname', 'administrative'), - con_ssh=con_ssh, auth_info=auth_info) - unlocked_hosts = [] - locked_hosts = [] - for i in range(len(hosts_)): - if admin_states[i] == HostAdminState.UNLOCKED: - unlocked_hosts.append(hosts_[i]) - elif admin_states[i] == HostAdminState.LOCKED: - locked_hosts.append(hosts_[i]) - - LOG.info("Locked: {}. Unlocked:{}".format(locked_hosts, unlocked_hosts)) - sorted_total_hosts = sorted(locked_hosts + unlocked_hosts) - if not sorted_total_hosts == hostnames: - raise exceptions.HostError("Some hosts are neither locked or unlocked. " - "\nHosts Rebooted: {}. Locked: {}; " - "Unlocked: {}".format(hostnames, - locked_hosts, - unlocked_hosts)) - unlocked_hosts_in_states = True - locked_hosts_in_states = True - if len(locked_hosts) > 0: - locked_hosts_in_states = \ - system_helper.wait_for_hosts_states(locked_hosts, - timeout=HostTimeout.REBOOT, - check_interval=10, - duration=8, con_ssh=con_ssh, - availability=['online'], - auth_info=auth_info) - - if len(unlocked_hosts) > 0: - unlocked_hosts_in_states = \ - system_helper.wait_for_hosts_states(unlocked_hosts, - timeout=HostTimeout.REBOOT, - check_interval=10, - con_ssh=con_ssh, - availability=['available', - 'degraded'], - auth_info=auth_info) - - if unlocked_hosts_in_states: - for host_unlocked in unlocked_hosts: - LOG.info("Waiting for task clear for {}".format(host_unlocked)) - system_helper.wait_for_host_values( - host_unlocked, - timeout=HostTimeout.TASK_CLEAR, fail_ok=False, - task='', auth_info=auth_info) - - LOG.info( - "Get available hosts after task clear and wait for " - "hypervsior/webservice up") - hosts_avail = system_helper.get_hosts( - availability=HostAvailState.AVAILABLE, - hostname=unlocked_hosts, - con_ssh=con_ssh, auth_info=auth_info) - - if hosts_avail and (check_hypervisor_up or check_webservice_up): - - all_nodes = system_helper.get_hosts_per_personality( - con_ssh=con_ssh, auth_info=auth_info) - computes = list(set(hosts_avail) & set(all_nodes['compute'])) - controllers = list( - set(hosts_avail) & set(all_nodes['controller'])) - if system_helper.is_aio_system(con_ssh): - computes += controllers - - if check_webservice_up and controllers: - res, hosts_webdown = wait_for_webservice_up( - controllers, fail_ok=fail_ok, con_ssh=con_ssh, - timeout=HostTimeout.WEB_SERVICE_UP, auth_info=auth_info) - if not res: - err_msg = "Hosts web-services not active in system " \ - "servicegroup-list: {}".format(hosts_webdown) - if fail_ok: - return 3, err_msg - else: - raise exceptions.HostPostCheckFailed(err_msg) - - if check_hypervisor_up and computes: - res, hosts_hypervisordown = wait_for_hypervisors_up( - computes, fail_ok=fail_ok, con_ssh=con_ssh, - timeout=HostTimeout.HYPERVISOR_UP, auth_info=auth_info) - if not res: - err_msg = "Hosts not up in nova hypervisor-list: " \ - "{}".format(hosts_hypervisordown) - if fail_ok: - return 2, err_msg - else: - raise exceptions.HostPostCheckFailed(err_msg) - - hosts_affine_incomplete = [] - for host in list(set(computes) & set(hosts_avail)): - if not wait_for_tasks_affined(host, fail_ok=True, - auth_info=auth_info, - con_ssh=con_ssh): - hosts_affine_incomplete.append(host) - - if hosts_affine_incomplete: - err_msg = "Hosts platform tasks affining incomplete: " \ - "{}".format(hosts_affine_incomplete) - LOG.error(err_msg) - - states_vals = {} - failure_msg = '' - for host in hostnames: - vals = system_helper.get_host_values(host, - fields=['task', 'availability'], - rtn_dict=True) - if not vals['task'] == '': - failure_msg += " {} still in task: {}.".format(host, vals['task']) - states_vals[host] = vals - from keywords.kube_helper import wait_for_nodes_ready - hosts_not_ready = wait_for_nodes_ready(hostnames, timeout=30, - con_ssh=con_ssh, fail_ok=fail_ok)[1] - if hosts_not_ready: - failure_msg += " {} not ready in kubectl get ndoes".format( - hosts_not_ready) - - message = "Host(s) state(s) - {}.".format(states_vals) - - if locked_hosts_in_states and unlocked_hosts_in_states and \ - failure_msg == '': - succ_msg = "Hosts {} rebooted successfully".format(hostnames) - LOG.info(succ_msg) - return 0, succ_msg - - err_msg = "Host(s) not in expected states or task unfinished. " + \ - message + failure_msg - if fail_ok: - LOG.warning(err_msg) - return 1, err_msg - else: - raise exceptions.HostPostCheckFailed(err_msg) - - -def recover_simplex(con_ssh=None, fail_ok=False, - auth_info=Tenant.get('admin_platform')): - """ - Ensure simplex host is unlocked, available, and hypervisor up - This function should only be called for simplex system - - Args: - con_ssh (SSHClient): - fail_ok (bool) - auth_info (dict) - - """ - if not con_ssh: - con_name = auth_info.get('region') if \ - (auth_info and ProjVar.get_var('IS_DC')) else None - con_ssh = ControllerClient.get_active_controller(name=con_name) - - if not con_ssh.is_connected(): - con_ssh.connect(retry=True, retry_timeout=HostTimeout.REBOOT) - - _wait_for_openstack_cli_enable(con_ssh=con_ssh, timeout=HostTimeout.REBOOT, - auth_info=auth_info) - - host = 'controller-0' - is_unlocked = \ - system_helper.get_host_values(host=host, - fields='administrative', - auth_info=auth_info, - con_ssh=con_ssh)[0] \ - == HostAdminState.UNLOCKED - - if not is_unlocked: - unlock_host(host=host, available_only=True, fail_ok=fail_ok, - con_ssh=con_ssh, auth_info=auth_info) - else: - wait_for_hosts_ready(host, fail_ok=fail_ok, check_task_affinity=False, - con_ssh=con_ssh, auth_info=auth_info) - - -def wait_for_hosts_ready(hosts, fail_ok=False, check_task_affinity=False, - con_ssh=None, auth_info=Tenant.get('admin_platform'), - timeout=None, check_interval=None): - """ - Wait for hosts to be in online state if locked, and available and - hypervisor/webservice up if unlocked - Args: - hosts: - fail_ok: whether to raise exception when fail - check_task_affinity - con_ssh: - auth_info - timeout - check_interval - - Returns: - - """ - if isinstance(hosts, str): - hosts = [hosts] - - expt_online_hosts = system_helper.get_hosts( - administrative=HostAdminState.LOCKED, hostname=hosts, con_ssh=con_ssh, - auth_info=auth_info) - expt_avail_hosts = system_helper.get_hosts( - administrative=HostAdminState.UNLOCKED, hostname=hosts, con_ssh=con_ssh, - auth_info=auth_info) - - res_lock = res_unlock = True - timeout_args = {'timeout': timeout} if timeout else {} - if check_interval: - timeout_args['check_interval'] = check_interval - from keywords import kube_helper, container_helper - if expt_online_hosts: - LOG.info("Wait for hosts to be online: {}".format(hosts)) - res_lock = system_helper.wait_for_hosts_states( - expt_online_hosts, - availability=HostAvailState.ONLINE, - fail_ok=fail_ok, - con_ssh=con_ssh, - auth_info=auth_info, - **timeout_args) - - res_kube = kube_helper.wait_for_nodes_ready(hosts=expt_online_hosts, - timeout=30, - con_ssh=con_ssh, - fail_ok=fail_ok)[0] - res_lock = res_lock and res_kube - - if expt_avail_hosts: - hypervisors = [] - nova_auth = Tenant.get('admin', - dc_region=auth_info.get('region') if - auth_info else None) - hosts_per_personality = system_helper.get_hosts_per_personality( - con_ssh=con_ssh, auth_info=auth_info) - if container_helper.is_stx_openstack_deployed(con_ssh=con_ssh, - auth_info=auth_info): - hypervisors = list(set( - get_hypervisors(con_ssh=con_ssh, auth_info=nova_auth)) & set( - expt_avail_hosts)) - computes = hypervisors - else: - computes = list( - set(hosts_per_personality['compute']) & set(expt_avail_hosts)) - - controllers = list( - set(hosts_per_personality['controller']) & set(expt_avail_hosts)) - - LOG.info("Wait for hosts to be available: {}".format(hosts)) - res_unlock = system_helper.wait_for_hosts_states( - expt_avail_hosts, - availability=HostAvailState.AVAILABLE, - fail_ok=fail_ok, - con_ssh=con_ssh, - auth_info=auth_info, - **timeout_args) - - if res_unlock: - res_1 = wait_for_task_clear_and_subfunction_ready( - hosts, - fail_ok=fail_ok, - auth_info=auth_info, - con_ssh=con_ssh) - res_unlock = res_unlock and res_1 - - if controllers: - LOG.info( - "Wait for webservices up for hosts: {}".format(controllers)) - res_2 = wait_for_webservice_up(controllers, fail_ok=fail_ok, - con_ssh=con_ssh, auth_info=auth_info, - timeout=HostTimeout.WEB_SERVICE_UP) - res_unlock = res_unlock and res_2 - if hypervisors: - LOG.info( - "Wait for hypervisors up for hosts: {}".format(hypervisors)) - res_3 = wait_for_hypervisors_up(hypervisors, fail_ok=fail_ok, - con_ssh=con_ssh, - auth_info=nova_auth, - timeout=HostTimeout.HYPERVISOR_UP) - res_unlock = res_unlock and res_3 - - if computes and check_task_affinity: - for host in computes: - # Do not fail the test due to task affining incomplete for - # now to unblock test case. - wait_for_tasks_affined(host, fail_ok=True, auth_info=auth_info, - con_ssh=con_ssh) - # res_4 = wait_for_tasks_affined(host=host, fail_ok=fail_ok, - # auth_info=auth_info, con_ssh=con_ssh) - # res_unlock = res_unlock and res_4 - - res_kube = \ - kube_helper.wait_for_nodes_ready(hosts=expt_avail_hosts, timeout=30, - con_ssh=con_ssh, - fail_ok=fail_ok)[0] - res_unlock = res_unlock and res_kube - - return res_lock and res_unlock - - -def wait_for_task_clear_and_subfunction_ready( - hosts, fail_ok=False, con_ssh=None, - timeout=HostTimeout.SUBFUNC_READY, - auth_info=Tenant.get('admin_platform')): - if isinstance(hosts, str): - hosts = [hosts] - - hosts_to_check = list(hosts) - LOG.info("Waiting for task clear and subfunctions enable/available " - "(if applicable) for hosts: {}".format(hosts_to_check)) - end_time = time.time() + timeout - while time.time() < end_time: - hosts_vals = system_helper.get_hosts_values( - hosts_to_check, - ['subfunction_avail', 'subfunction_oper', 'task'], - con_ssh=con_ssh, - auth_info=auth_info) - for host, vals in hosts_vals.items(): - if not vals['task'] and vals['subfunction_avail'] in \ - ('', HostAvailState.AVAILABLE) and \ - vals['subfunction_oper'] in ('', HostOperState.ENABLED): - hosts_to_check.remove(host) - - if not hosts_to_check: - LOG.info( - "Hosts task cleared and subfunctions (if applicable) are now " - "in enabled/available states") - return True - - time.sleep(10) - - err_msg = "Host(s) subfunctions are not all in enabled/available states: " \ - "{}".format(hosts_to_check) - if fail_ok: - LOG.warning(err_msg) - return False - - raise exceptions.HostError(err_msg) - - -def lock_host(host, force=False, lock_timeout=HostTimeout.LOCK, - timeout=HostTimeout.ONLINE_AFTER_LOCK, con_ssh=None, - fail_ok=False, check_first=True, swact=False, - check_cpe_alarm=True, auth_info=Tenant.get('admin_platform')): - """ - lock a host. - - Args: - host (str): hostname or id in string format - force (bool): - lock_timeout (int): max time in seconds waiting for host to goto - locked state after locking attempt. - timeout (int): how many seconds to wait for host to go online after lock - con_ssh (SSHClient): - fail_ok (bool): - check_first (bool): - swact (bool): whether to check if host is active controller and do a - swact before attempt locking - check_cpe_alarm (bool): whether to wait for cpu usage alarm gone - before locking - auth_info - - Returns: (return_code(int), msg(str)) # 1, 2, 3, 4, 5, 6 only returns - when fail_ok=True - (-1, "Host already locked. Do nothing.") - (0, "Host is locked and in online state."] - (1, ) # Lock host cli rejected - (2, "Host is not in locked state") # cli ran okay, but host did not - reach locked state within timeout - (3, "Host did not go online within seconds after (force) - lock") # Locked but didn't go online - (4, "Lock host is rejected. Details in host-show - vim_process_status.") - (5, "Lock host failed due to migrate vm failed. Details in - host-show vm_process_status.") - (6, "Task is not cleared within 180 seconds after host goes online") - - """ - host_avail, host_admin = \ - system_helper.get_host_values(host, - ('availability', 'administrative'), - con_ssh=con_ssh, auth_info=auth_info) - if host_avail in [HostAvailState.OFFLINE, HostAvailState.FAILED]: - LOG.warning("Host in offline or failed state before locking!") - - if check_first and host_admin == 'locked': - msg = "{} already locked. Do nothing.".format(host) - LOG.info(msg) - return -1, msg - - is_aio_dup = system_helper.is_aio_duplex(con_ssh=con_ssh, - auth_info=auth_info) - - if swact: - if system_helper.is_active_controller(host, con_ssh=con_ssh, - auth_info=auth_info) and \ - len(system_helper.get_controllers( - con_ssh=con_ssh, auth_info=auth_info, - operational=HostOperState.ENABLED)) > 1: - LOG.info("{} is active controller, swact first before attempt to " - "lock.".format(host)) - swact_host(host, auth_info=auth_info, con_ssh=con_ssh) - if is_aio_dup: - time.sleep(90) - - if check_cpe_alarm and is_aio_dup: - LOG.info( - "For AIO-duplex, wait for cpu usage high alarm gone on active " - "controller before locking standby") - active_con = system_helper.get_active_controller_name( - con_ssh=con_ssh, auth_info=auth_info) - entity_id = 'host={}'.format(active_con) - system_helper.wait_for_alarms_gone( - [(EventLogID.CPU_USAGE_HIGH, entity_id)], check_interval=45, - fail_ok=fail_ok, con_ssh=con_ssh, timeout=300, auth_info=auth_info) - - positional_arg = host - extra_msg = '' - if force: - positional_arg += ' --force' - extra_msg = 'force ' - - LOG.info("Locking {}...".format(host)) - exitcode, output = cli.system('host-lock', positional_arg, - ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - - if exitcode == 1: - return 1, output - - table_ = table_parser.table(output) - task_val = table_parser.get_value_two_col_table(table_, field='task') - admin_val = table_parser.get_value_two_col_table(table_, - field='administrative') - - if admin_val != HostAdminState.LOCKED: - if 'Locking' not in task_val: - system_helper.wait_for_host_values(host=host, timeout=30, - check_interval=0, fail_ok=True, - task='Locking', - con_ssh=con_ssh, - auth_info=auth_info) - - # Wait for task complete. If task stucks, fail the test regardless. - # Perhaps timeout needs to be increased. - system_helper.wait_for_host_values(host=host, timeout=lock_timeout, - task='', fail_ok=False, - con_ssh=con_ssh, - auth_info=auth_info) - - if not system_helper.wait_for_host_values( - host, timeout=20, - administrative=HostAdminState.LOCKED, - con_ssh=con_ssh, - auth_info=auth_info): - - # vim_progress_status | Lock of host compute-0 rejected because - # there are no other hypervisors available. - vim_status = \ - system_helper.get_host_values(host, - fields='vim_progress_status', - auth_info=auth_info, - con_ssh=con_ssh, - merge_lines=True)[0] - if re.search('ock .* host .* rejected.*', vim_status): - msg = "Lock host {} is rejected. Details in host-show " \ - "vim_process_status.".format(host) - code = 4 - elif re.search('Migrate of instance .* from host .* failed.*', - vim_status): - msg = "Lock host {} failed due to migrate vm failed. Details " \ - "in host-show vm_process_status.".format(host) - code = 5 - else: - msg = "Host is not in locked state" - code = 2 - - if fail_ok: - return code, msg - raise exceptions.HostPostCheckFailed(msg) - - LOG.info("{} is {}locked. Waiting for it to go Online...".format(host, - extra_msg)) - - if system_helper.wait_for_host_values(host, timeout=timeout, - availability=HostAvailState.ONLINE, - auth_info=auth_info, con_ssh=con_ssh): - # ensure the online status lasts for more than 5 seconds. Sometimes - # host goes online then offline to reboot.. - time.sleep(5) - if system_helper.wait_for_host_values( - host, timeout=timeout, - availability=HostAvailState.ONLINE, - auth_info=auth_info, - con_ssh=con_ssh): - if system_helper.wait_for_host_values( - host, - timeout=HostTimeout.TASK_CLEAR, - task='', auth_info=auth_info, - con_ssh=con_ssh): - LOG.info("Host is successfully locked and in online state.") - return 0, "Host is locked and in online state." - else: - msg = "Task is not cleared within {} seconds after host goes " \ - "online".format(HostTimeout.TASK_CLEAR) - if fail_ok: - LOG.warning(msg) - return 6, msg - raise exceptions.HostPostCheckFailed(msg) - - msg = "Host did not go online within {} seconds after {}lock".format( - timeout, extra_msg) - if fail_ok: - return 3, msg - else: - raise exceptions.HostPostCheckFailed(msg) - - -def _wait_for_simplex_reconnect(con_ssh=None, - timeout=HostTimeout.CONTROLLER_UNLOCK, - auth_info=Tenant.get('admin_platform'), - duplex_direct=False): - time.sleep(30) - if not con_ssh: - con_name = auth_info.get('region') if \ - (auth_info and ProjVar.get_var('IS_DC')) else None - con_ssh = ControllerClient.get_active_controller(name=con_name) - - con_ssh.wait_for_disconnect(check_interval=10, timeout=300) - time.sleep(30) - con_ssh.connect(retry=True, retry_timeout=timeout) - ControllerClient.set_active_controller(con_ssh) - - if not duplex_direct: - # Give it sometime before openstack cmds enables on after host - _wait_for_openstack_cli_enable(con_ssh=con_ssh, auth_info=auth_info, - fail_ok=False, timeout=timeout, - check_interval=10, - reconnect=True, single_node=True) - time.sleep(10) - LOG.info("Re-connected via ssh and openstack CLI enabled") - - -def unlock_host(host, timeout=HostTimeout.CONTROLLER_UNLOCK, - available_only=True, fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin_platform'), - check_hypervisor_up=True, - check_webservice_up=True, check_subfunc=True, check_first=True, - con0_install=False, - check_containers=True): - """ - Unlock given host - Args: - host (str): - timeout (int): MAX seconds to wait for host to become available or - degraded after unlocking - available_only(bool): if True, wait for host becomes Available after - unlock; otherwise wait for either - Degraded or Available - fail_ok (bool): - con_ssh (SSHClient): - auth_info (dict): - check_hypervisor_up (bool): Whether to check if host is up in nova - hypervisor-list - check_webservice_up (bool): Whether to check if host's web-service is - active in system servicegroup-list - check_subfunc (bool): whether to check subfunction_oper and - subfunction_avail for CPE system - check_first (bool): whether to check host state before unlock. - con0_install (bool) - check_containers (bool) - - Returns (tuple): Only -1, 0, 4 senarios will be returned if fail_ok=False - (-1, "Host already unlocked. Do nothing") - (0, "Host is unlocked and in available state.") - (1, ) # cli returns stderr. only applicable if fail_ok - (2, "Host is not in unlocked state") # only applicable if fail_ok - (3, "Host state did not change to available or degraded within - timeout") # only applicable if fail_ok - (4, "Host is in degraded state after unlocked.") # Only applicable - if available_only=False - (5, "Task is not cleared within 180 seconds after host goes - available") # Applicable if fail_ok - (6, "Host is not up in nova hypervisor-list") # Host with compute - function only. Applicable if fail_ok - (7, "Host web-services is not active in system servicegroup-list") # - controllers only. Applicable if fail_ok - (8, "Failed to wait for host to reach Available state after unlocked - to Degraded state") - # only applicable if fail_ok and available_only are True - (9, "Host subfunctions operational and availability are not enable - and available system host-show") # CPE only - (10, " is not ready in kubectl get nodes after unlock") - - """ - LOG.info("Unlocking {}...".format(host)) - if not con_ssh: - con_name = auth_info.get('region') if ( - auth_info and ProjVar.get_var('IS_DC')) else None - con_ssh = ControllerClient.get_active_controller(name=con_name) - - if check_first: - if system_helper.get_host_values(host, 'availability', con_ssh=con_ssh, - auth_info=auth_info)[0] in \ - [HostAvailState.OFFLINE, HostAvailState.FAILED]: - LOG.info( - "Host is offline or failed, waiting for it to go online, " - "available or degraded first...") - system_helper.wait_for_host_values(host, availability=[ - HostAvailState.AVAILABLE, HostAvailState.ONLINE, - HostAvailState.DEGRADED], con_ssh=con_ssh, - fail_ok=False, - auth_info=auth_info) - - if system_helper.get_host_values(host, 'administrative', - con_ssh=con_ssh, - auth_info=auth_info)[0] == \ - HostAdminState.UNLOCKED: - message = "Host already unlocked. Do nothing" - LOG.info(message) - return -1, message - - is_simplex = system_helper.is_aio_simplex(con_ssh=con_ssh, - auth_info=auth_info) - - from keywords import kube_helper, container_helper - check_stx = prev_bad_pods = None - if check_containers: - check_stx = container_helper.is_stx_openstack_deployed( - applied_only=True, con_ssh=con_ssh, auth_info=auth_info) - prev_bad_pods = kube_helper.get_unhealthy_pods(node=host, - con_ssh=con_ssh, - all_namespaces=True) - exitcode, output = cli.system('host-unlock', host, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info, - timeout=60) - if exitcode == 1: - return 1, output - - if is_simplex or con0_install: - time.sleep(120) - _wait_for_simplex_reconnect(con_ssh=con_ssh, auth_info=auth_info, - timeout=timeout) - - if not system_helper.wait_for_host_values( - host, timeout=60, - administrative=HostAdminState.UNLOCKED, - con_ssh=con_ssh, - fail_ok=fail_ok, - auth_info=auth_info): - return 2, "Host is not in unlocked state" - - if not system_helper.wait_for_host_values( - host, timeout=timeout, fail_ok=fail_ok, - check_interval=10, con_ssh=con_ssh, auth_info=auth_info, - availability=[HostAvailState.AVAILABLE, HostAvailState.DEGRADED]): - return 3, "Host state did not change to available or degraded within " \ - "timeout" - - if not system_helper.wait_for_host_values(host, - timeout=HostTimeout.TASK_CLEAR, - fail_ok=fail_ok, con_ssh=con_ssh, - auth_info=auth_info, - task=''): - return 5, "Task is not cleared within {} seconds after host goes " \ - "available".format(HostTimeout.TASK_CLEAR) - - if check_hypervisor_up or check_webservice_up or check_subfunc: - - subfunc, personality = system_helper.get_host_values( - host, fields=('subfunctions', 'personality'), - con_ssh=con_ssh, auth_info=auth_info) - string_total = subfunc + personality - - is_controller = 'controller' in string_total - is_compute = bool(re.search('compute|worker', string_total)) - - if check_hypervisor_up and is_compute: - if container_helper.is_stx_openstack_deployed(con_ssh=con_ssh, - auth_info=auth_info): - nova_auth = Tenant.get('admin', dc_region=auth_info.get( - 'region') if auth_info else None) - if not wait_for_hypervisors_up( - host, fail_ok=fail_ok, con_ssh=con_ssh, - auth_info=nova_auth, - timeout=HostTimeout.HYPERVISOR_UP)[0]: - return 6, "Host is not up in nova hypervisor-list" - - if not is_simplex: - # wait_for_tasks_affined(host, con_ssh=con_ssh) - # Do not fail the test due to task affining incomplete for - # now to unblock test case. - wait_for_tasks_affined(host, con_ssh=con_ssh, fail_ok=True) - - if check_webservice_up and is_controller: - if not \ - wait_for_webservice_up(host, fail_ok=fail_ok, con_ssh=con_ssh, - auth_info=auth_info, timeout=300)[0]: - return 7, "Host web-services is not active in system " \ - "servicegroup-list" - - if check_subfunc and is_controller and is_compute: - # wait for subfunction states to be operational enabled and - # available - if not system_helper.wait_for_host_values( - host, timeout=90, - fail_ok=fail_ok, - con_ssh=con_ssh, - auth_info=auth_info, - subfunction_oper=HostOperState.ENABLED, - subfunction_avail=HostAvailState.AVAILABLE): - err_msg = "Host subfunctions operational and availability " \ - "did not change to enabled and available" \ - " within timeout" - LOG.warning(err_msg) - return 9, err_msg - - if check_containers: - from keywords import kube_helper, container_helper - - res_nodes = kube_helper.wait_for_nodes_ready(hosts=host, timeout=180, - con_ssh=con_ssh, - fail_ok=fail_ok)[0] - res_app = True - if check_stx: - res_app = container_helper.wait_for_apps_status( - apps='stx-openstack', - status=AppStatus.APPLIED, - auth_info=auth_info, - con_ssh=con_ssh, - check_interval=10, - fail_ok=fail_ok)[0] - - res_pods = kube_helper.wait_for_pods_healthy(check_interval=10, - con_ssh=con_ssh, - fail_ok=fail_ok, - node=host, - name=prev_bad_pods, - exclude=True, - all_namespaces=True) - - if not (res_nodes and res_app and res_pods): - err_msg = "Container check failed after unlock {}".format(host) - return 10, err_msg - - if system_helper.get_host_values(host, 'availability', con_ssh=con_ssh, - auth_info=auth_info)[0] == \ - HostAvailState.DEGRADED: - if not available_only: - LOG.warning("Host is in degraded state after unlocked.") - return 4, "Host is in degraded state after unlocked." - else: - if not system_helper.wait_for_host_values( - host, timeout=timeout, - fail_ok=fail_ok, - check_interval=10, - con_ssh=con_ssh, - availability=HostAvailState.AVAILABLE, - auth_info=auth_info): - err_msg = "Failed to wait for host to reach Available state " \ - "after unlocked to Degraded state" - LOG.warning(err_msg) - return 8, err_msg - - LOG.info( - "Host {} is successfully unlocked and in available state".format(host)) - return 0, "Host is unlocked and in available state." - - -def unlock_hosts(hosts, timeout=HostTimeout.CONTROLLER_UNLOCK, fail_ok=True, - con_ssh=None, - auth_info=Tenant.get('admin_platform'), - check_hypervisor_up=False, check_webservice_up=False, - check_nodes_ready=True, check_containers=False): - """ - Unlock given hosts. Please use unlock_host() keyword if only one host - needs to be unlocked. - Args: - hosts (list|str): Host(s) to unlock - timeout (int): MAX seconds to wait for host to become available or - degraded after unlocking - fail_ok (bool): - con_ssh (SSHClient): - auth_info (dict): - check_hypervisor_up (bool): Whether to check if host is up in nova - hypervisor-list - check_webservice_up (bool): Whether to check if host's web-service is - active in system servicegroup-list - check_nodes_ready (bool) - check_containers (bool) - - - Returns (dict): {host_0: res_0, host_1: res_1, ...} - where res is a tuple as below, and scenario 1, 2, 3 only applicable - if fail_ok=True - (-1, "Host already unlocked. Do nothing") - (0, "Host is unlocked and in available state.") - (1, ) - (2, "Host is not in unlocked state") - (3, "Host is not in available or degraded state.") - (4, "Host is in degraded state after unlocked.") - (5, "Host is not up in nova hypervisor-list") # Host with compute - function only - (6, "Host web-services is not active in system servicegroup-list") # - controllers only - (7, "Host platform tasks affining incomplete") - (8, "Host status not ready in kubectl get nodes") - - """ - if not hosts: - raise ValueError("No host(s) provided to unlock.") - - LOG.info("Unlocking {}...".format(hosts)) - - if isinstance(hosts, str): - hosts = [hosts] - - res = {} - hosts_to_unlock = list(set(hosts)) - for host in hosts: - if system_helper.get_host_values(host, 'administrative', - con_ssh=con_ssh, - auth_info=auth_info)[0] == \ - HostAdminState.UNLOCKED: - message = "Host already unlocked. Do nothing" - - res[host] = -1, message - hosts_to_unlock.remove(host) - - if not hosts_to_unlock: - LOG.info("Host(s) already unlocked. Do nothing.") - return res - - if len(hosts_to_unlock) != len(hosts): - LOG.info("Some host(s) already unlocked. Unlocking the rest: {}".format( - hosts_to_unlock)) - - is_simplex = system_helper.is_aio_simplex(con_ssh=con_ssh, - auth_info=auth_info) - - check_stx = prev_bad_pods = None - if check_containers: - from keywords import kube_helper, container_helper - check_stx = container_helper.is_stx_openstack_deployed( - applied_only=True, con_ssh=con_ssh, auth_info=auth_info) - prev_bad_pods = kube_helper.get_unhealthy_pods(con_ssh=con_ssh, - all_namespaces=True) - - hosts_to_check = [] - for host in hosts_to_unlock: - exitcode, output = cli.system('host-unlock', host, ssh_client=con_ssh, - fail_ok=fail_ok, - auth_info=auth_info, timeout=60) - if exitcode == 1: - res[host] = 1, output - else: - hosts_to_check.append(host) - - if not hosts_to_check: - LOG.warning("Unlock host(s) rejected: {}".format(hosts_to_unlock)) - return res - - if is_simplex: - _wait_for_simplex_reconnect(con_ssh=con_ssh, - timeout=HostTimeout.CONTROLLER_UNLOCK, - auth_info=auth_info) - - if not system_helper.wait_for_hosts_states( - hosts_to_check, timeout=60, - administrative=HostAdminState.UNLOCKED, - con_ssh=con_ssh, - auth_info=auth_info): - LOG.warning("Some host(s) not in unlocked states after 60 seconds.") - - if not system_helper.wait_for_hosts_states( - hosts_to_check, timeout=timeout, check_interval=10, - con_ssh=con_ssh, auth_info=auth_info, - availability=[HostAvailState.AVAILABLE, HostAvailState.DEGRADED]): - LOG.warning( - "Some host(s) state did not change to available or degraded " - "within timeout") - - hosts_vals = system_helper.get_hosts(hostname=hosts_to_check, - field=('hostname', 'availability'), - administrative=HostAdminState.UNLOCKED, - con_ssh=con_ssh, auth_info=auth_info) - hosts_unlocked, hosts_avails_, = hosts_vals - indices = range(len(hosts_unlocked)) - hosts_not_unlocked = list(set(hosts_to_check) - set(hosts_unlocked)) - hosts_avail = [hosts_unlocked[i] for i in indices if - hosts_avails_[i].lower() == HostAvailState.AVAILABLE] - hosts_degrd = [hosts_unlocked[i] for i in indices if - hosts_avails_[i].lower() == HostAvailState.DEGRADED] - hosts_other = list( - set(hosts_unlocked) - set(hosts_avail) - set(hosts_degrd)) - - for host in hosts_not_unlocked: - res[host] = 2, "Host is not in unlocked state." - for host in hosts_degrd: - res[host] = 4, "Host is in degraded state after unlocked." - for host in hosts_other: - res[host] = 3, "Host is not in available or degraded state." - - if hosts_avail and (check_hypervisor_up or check_webservice_up): - - all_nodes = system_helper.get_hosts_per_personality(con_ssh=con_ssh, - auth_info=auth_info) - computes = list(set(hosts_avail) & set(all_nodes['compute'])) - controllers = list(set(hosts_avail) & set(all_nodes['controller'])) - if system_helper.is_aio_system(con_ssh, auth_info=auth_info): - computes += controllers - - if check_hypervisor_up and computes: - nova_auth = Tenant.get('admin', dc_region=auth_info.get( - 'region') if auth_info else None) - hosts_hypervisordown = \ - wait_for_hypervisors_up(computes, fail_ok=fail_ok, - con_ssh=con_ssh, - timeout=HostTimeout.HYPERVISOR_UP, - auth_info=nova_auth)[1] - for host in hosts_hypervisordown: - res[host] = 5, "Host is not up in nova hypervisor-list" - hosts_avail = list(set(hosts_avail) - set(hosts_hypervisordown)) - - if check_webservice_up and controllers: - hosts_webdown = wait_for_webservice_up(controllers, fail_ok=fail_ok, - con_ssh=con_ssh, timeout=180, - auth_info=auth_info)[1] - for host in hosts_webdown: - res[host] = 6, "Host web-services is not active in system " \ - "servicegroup-list" - hosts_avail = list(set(hosts_avail) - set(hosts_webdown)) - - hosts_affine_incomplete = [] - for host in list(set(computes) & set(hosts_avail)): - if not wait_for_tasks_affined(host, fail_ok=True, - auth_info=auth_info): - msg = "Host {} platform tasks affining incomplete".format(host) - hosts_affine_incomplete.append(host) - - # Do not fail the test due to task affining incomplete for - # now to unblock test case. - LOG.error(msg) - # res[host] = 7, - # hosts_avail = list(set(hosts_avail) - set(hosts_affine_incomplete)) - - if check_nodes_ready and (hosts_avail or hosts_degrd): - from keywords import kube_helper, container_helper - - hosts_to_wait = list(hosts_avail) - hosts_to_wait += hosts_degrd - res_nodes, hosts_not_ready = kube_helper.wait_for_nodes_ready( - hosts=hosts_to_wait, timeout=180, con_ssh=con_ssh, - fail_ok=fail_ok) - if hosts_not_ready: - hosts_avail = list(set(hosts_avail) - set(hosts_not_ready)) - for host in hosts_not_ready: - res[host] = 8, "Host status not ready in kubectl get nodes" - - if check_containers: - res_app = True - if check_stx: - res_app = container_helper.wait_for_apps_status( - apps='stx-openstack', - status=AppStatus.APPLIED, - con_ssh=con_ssh, - check_interval=10, - fail_ok=fail_ok)[0] - res_pods = kube_helper.wait_for_pods_healthy(check_interval=10, - con_ssh=con_ssh, - fail_ok=fail_ok, - name=prev_bad_pods, - exclude=True, - all_namespaces=True) - if not (res_app and res_pods): - err_msg = "Application status or pods status check failed " \ - "after unlock {}".format(hosts) - hosts_to_update = list( - (set(hosts_to_wait) - set(hosts_not_ready))) - hosts_avail = [] - for host_ in hosts_to_update: - res[host_] = 9, err_msg - - for host in hosts_avail: - res[host] = 0, "Host is unlocked and in available state." - - if not len(res) == len(hosts): - raise exceptions.CommonError( - "Something wrong with the keyword. Number of hosts in result is " - "incorrect.") - - if not fail_ok: - for host in res: - if res[host][0] not in [-1, 0, 4]: - raise exceptions.HostPostCheckFailed( - " Not all host(s) unlocked successfully. Detail: {}".format( - res)) - - LOG.info("Results for unlocking hosts: {}".format(res)) - return res - - -def _wait_for_openstack_cli_enable(con_ssh=None, timeout=HostTimeout.SWACT, - fail_ok=False, check_interval=10, - reconnect=True, single_node=None, - auth_info=Tenant.get('admin_platform')): - """ - Wait for 'system show' cli to work on active controller. Also wait for - host task to clear and subfunction ready. - Args: - con_ssh: - timeout: - fail_ok: - check_interval: - reconnect: - auth_info - - Returns (bool): - - """ - from keywords import container_helper - - if not con_ssh: - con_name = auth_info.get('region') if ( - auth_info and ProjVar.get_var('IS_DC')) else None - con_ssh = ControllerClient.get_active_controller(name=con_name) - - def check_sysinv_cli(): - - cli.system('show', ssh_client=con_ssh, auth_info=auth_info, - timeout=10) - time.sleep(10) - active_con = system_helper.get_active_controller_name( - con_ssh=con_ssh, auth_info=auth_info) - - if ((single_node or ( - single_node is None and system_helper.is_aio_simplex())) and - system_helper.get_host_values(active_con, - fields='administrative')[ - 0] == HostAdminState.LOCKED): - LOG.info( - "Simplex system in locked state. Wait for task to clear only") - system_helper.wait_for_host_values(host=active_con, - timeout=HostTimeout.LOCK, - task='', con_ssh=con_ssh, - auth_info=auth_info) - else: - wait_for_task_clear_and_subfunction_ready(hosts=active_con, - con_ssh=con_ssh, - auth_info=auth_info) - is_openstack_applied = container_helper.is_stx_openstack_deployed( - con_ssh=con_ssh, auth_info=auth_info) - LOG.info("system cli and subfunction enabled") - return is_openstack_applied - - def check_nova_cli(): - region = auth_info.get('region', None) if auth_info else None - nova_auth = Tenant.get('admin', dc_region=region) - cli.openstack('server list', ssh_client=con_ssh, auth_info=nova_auth, - timeout=10) - LOG.info("nova cli enabled") - - cli_enable_end_time = time.time() + timeout - LOG.info( - "Waiting for system cli and subfunctions to be ready and nova cli (if " - "stx-openstack applied) to be " - "enabled on active controller") - check_nova = None - while time.time() < cli_enable_end_time: - try: - if check_nova is None: - check_nova = check_sysinv_cli() - if check_nova: - check_nova_cli() - return True - except: - if not con_ssh.is_connected(): - if reconnect: - LOG.info( - "con_ssh connection lost while waiting for system to " - "recover. Attempt to reconnect...") - con_ssh.connect(retry_timeout=timeout, retry=True) - else: - LOG.error("system disconnected") - if fail_ok: - return False - raise - - time.sleep(check_interval) - - err_msg = "Timed out waiting for system to recover. Time waited: {}".format( - timeout) - if fail_ok: - LOG.warning(err_msg) - return False - raise TimeoutError(err_msg) - - -def swact_host(hostname=None, swact_start_timeout=HostTimeout.SWACT, - swact_complete_timeout=HostTimeout.SWACT, - fail_ok=False, auth_info=Tenant.get('admin_platform'), - con_ssh=None, wait_for_alarm=False): - """ - Swact active controller from given hostname. - - Args: - hostname (str|None): When None, active controller will be used for - swact. - swact_start_timeout (int): Max time to wait between cli executes and - swact starts - swact_complete_timeout (int): Max time to wait for swact to complete - after swact started - fail_ok (bool): - con_ssh (SSHClient): - auth_info - wait_for_alarm (bool),: whether to wait for pre-swact alarms after swact - - Returns (tuple): (rtn_code(int), msg(str)) # 1, 3, 4 only returns - when fail_ok=True - (0, "Active controller is successfully swacted.") - (1, ) # swact host cli rejected - (2, " is not active controller host, thus swact request - failed as expected.") - (3, "Swact did not start within ") - (4, "Active controller did not change after swact within - ") - - """ - active_host = system_helper.get_active_controller_name(con_ssh=con_ssh, - auth_info=auth_info) - if hostname is None: - hostname = active_host - - pre_alarms = None - if wait_for_alarm: - pre_alarms = system_helper.get_alarms(con_ssh=con_ssh, - auth_info=auth_info) - - exitcode, msg = cli.system('host-swact', hostname, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if exitcode == 1: - return 1, msg - - if hostname != active_host: - system_helper.wait_for_host_values(hostname, - timeout=swact_start_timeout, - fail_ok=False, con_ssh=con_ssh, - auth_info=auth_info, task='') - return 2, "{} is not active controller host, thus swact request " \ - "failed as expected.".format(hostname) - else: - rtn = wait_for_swact_complete( - hostname, con_ssh, swact_start_timeout=swact_start_timeout, - auth_info=auth_info, swact_complete_timeout=swact_complete_timeout, - fail_ok=fail_ok) - if rtn[0] == 0: - nova_auth = Tenant.get('admin', dc_region=auth_info.get( - 'region') if auth_info else None) - try: - res = wait_for_webservice_up( - system_helper.get_active_controller_name(), - fail_ok=fail_ok, - auth_info=auth_info, con_ssh=con_ssh)[0] - if not res: - return 5, "Web-services for new controller is not active" - - if system_helper.is_aio_duplex(con_ssh=con_ssh, - auth_info=auth_info): - hypervisor_up_res = wait_for_hypervisors_up(hostname, - fail_ok=fail_ok, - con_ssh=con_ssh, - auth_info=nova_auth) - if not hypervisor_up_res: - return 6, "Hypervisor state is not up for {} after " \ - "swacted".format(hostname) - - for host in ('controller-0', 'controller-1'): - task_aff_res = wait_for_tasks_affined(host, con_ssh=con_ssh, - fail_ok=True, - auth_info=auth_info, - timeout=300) - if not task_aff_res: - msg = "tasks affining incomplete on {} after swact " \ - "from {}".format(host, hostname) - # Do not fail the test due to task affining - # incomplete for now to unblock test case. - LOG.error(msg=msg) - return 7, msg - finally: - # After swact, there is a delay for alarms to re-appear on new - # active controller, thus the wait. - if pre_alarms: - post_alarms = system_helper.get_alarms(con_ssh=con_ssh, - auth_info=auth_info) - for alarm in pre_alarms: - if alarm not in post_alarms: - alarm_id, entity_id = alarm.split('::::') - system_helper.wait_for_alarm(alarm_id=alarm_id, - entity_id=entity_id, - fail_ok=True, timeout=300, - check_interval=15, - auth_info=auth_info) - - return rtn - - -def wait_for_swact_complete(before_host, con_ssh=None, - swact_start_timeout=HostTimeout.SWACT, - swact_complete_timeout=HostTimeout.SWACT, - fail_ok=True, - auth_info=Tenant.get('admin_platform')): - """ - Wait for swact to start and complete - NOTE: This function assumes swact command was run from ssh session using - floating ip!! - - Args: - before_host (str): Active controller name before swact request - con_ssh (SSHClient): - swact_start_timeout (int): Max time to wait between cli executs and - swact starts - swact_complete_timeout (int): Max time to wait for swact to complete - after swact started - fail_ok - auth_info - - Returns (tuple): - (0, "Active controller is successfully swacted.") - (3, "Swact did not start within ") # returns - when fail_ok=True - (4, "Active controller did not change after swact within - ") # returns when fail_ok=True - (5, "400.001 alarm is not cleared within timeout after swact") - (6, "tasks affining incomplete on ") - - """ - if con_ssh is None: - con_name = auth_info.get('region') if ( - auth_info and ProjVar.get_var('IS_DC')) else None - con_ssh = ControllerClient.get_active_controller(name=con_name) - - fip_disconnected = con_ssh.wait_for_disconnect(fail_ok=fail_ok, - timeout=swact_start_timeout) - if not fip_disconnected: - return 3, "Swact did not start within {}".format(swact_start_timeout) - - LOG.info( - "ssh to {} OAM floating IP disconnected, indicating swact " - "initiated.".format( - con_ssh.host)) - - # permission denied is received when ssh right after swact initiated. Add - # delay to avoid sanity failure - time.sleep(30) - con_ssh.connect(retry=True, retry_timeout=swact_complete_timeout - 30) - - # Give it sometime before openstack cmds enables on after host - _wait_for_openstack_cli_enable(con_ssh=con_ssh, fail_ok=False, - timeout=swact_complete_timeout, - auth_info=auth_info) - - after_host = system_helper.get_active_controller_name(con_ssh=con_ssh, - auth_info=auth_info) - LOG.info( - "Host before swacting: {}, host after swacting: {}".format(before_host, - after_host)) - - if before_host == after_host: - if fail_ok: - return 4, "Active controller did not change after swact within " \ - "{}".format(swact_complete_timeout) - raise exceptions.HostPostCheckFailed( - "Swact failed. Active controller host did not change") - - drbd_res = system_helper.wait_for_alarm_gone( - alarm_id=EventLogID.CON_DRBD_SYNC, entity_id=after_host, - strict=False, fail_ok=fail_ok, timeout=300, con_ssh=con_ssh, - auth_info=auth_info) - if not drbd_res: - return 5, "400.001 alarm is not cleared within timeout after swact" - - return 0, "Active controller is successfully swacted." - - -def wait_for_hypervisors_up(hosts, timeout=HostTimeout.HYPERVISOR_UP, - check_interval=5, fail_ok=False, - con_ssh=None, auth_info=Tenant.get('admin')): - """ - Wait for given hypervisors to be up and enabled in nova hypervisor-list - Args: - hosts (list|str): names of the hypervisors, such as compute-0 - timeout (int): - check_interval (int): - fail_ok (bool): - con_ssh (SSHClient): - auth_info - - Returns (tuple): res_bool(bool), hosts_not_up(list) - (True, []) # all hypervisors given are up and enabled - (False, [] # some hosts are not up and enabled - - """ - if isinstance(hosts, str): - hosts = [hosts] - - hypervisors = get_hypervisors(con_ssh=con_ssh, auth_info=auth_info) - - if not set(hosts) <= set(hypervisors): - msg = "Some host(s) not in nova hypervisor-list. Host(s) given: {}. " \ - "Hypervisors: {}".format(hosts, hypervisors) - raise exceptions.HostPreCheckFailed(msg) - - hosts_to_check = list(hosts) - LOG.info("Waiting for {} to be up in nova hypervisor-list...".format(hosts)) - end_time = time.time() + timeout - while time.time() < end_time: - up_hosts = get_hypervisors(state='up', con_ssh=con_ssh, - auth_info=auth_info) - for host in hosts_to_check: - if host in up_hosts: - hosts_to_check.remove(host) - - if not hosts_to_check: - msg = "Host(s) {} are up and enabled in nova " \ - "hypervisor-list".format(hosts) - LOG.info(msg) - return True, hosts_to_check - - time.sleep(check_interval) - else: - msg = "Host(s) {} are not up in hypervisor-list within timeout".format( - hosts_to_check) - if fail_ok: - LOG.warning(msg) - return False, hosts_to_check - raise exceptions.HostTimeout(msg) - - -def wait_for_webservice_up(hosts, timeout=HostTimeout.WEB_SERVICE_UP, - check_interval=5, fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - if isinstance(hosts, str): - hosts = [hosts] - - hosts_to_check = list(hosts) - LOG.info( - "Waiting for {} to be active for web-service in system " - "servicegroup-list...".format( - hosts_to_check)) - end_time = time.time() + timeout - - while time.time() < end_time: - # need to check for strict True because 'go-active' state is not - # 'active' state - active_hosts = \ - system_helper.get_servicegroups(fields='hostname', - service_group_name='web-services', - strict=True, - con_ssh=con_ssh, - auth_info=auth_info) - - for host in hosts: - if host in active_hosts and host in hosts_to_check: - hosts_to_check.remove(host) - - if not hosts_to_check: - msg = "Host(s) {} are active for web-service in system " \ - "servicegroup-list".format(hosts) - LOG.info(msg) - return True, hosts_to_check - - time.sleep(check_interval) - else: - msg = "Host(s) {} are not active for web-service in system " \ - "servicegroup-list within timeout".format(hosts_to_check) - if fail_ok: - LOG.warning(msg) - return False, hosts_to_check - raise exceptions.HostTimeout(msg) - - -def get_hosts_in_storage_backing(storage_backing='local_image', up_only=True, - hosts=None, con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Return a list of hosts that supports the given storage backing. - - System: Regular, Small footprint - - Args: - hosts (None|list|tuple): hosts to check - storage_backing (str): 'local_image', or 'remote' - up_only (bool): whether to return only up hypervisors - con_ssh (SSHClient): - auth_info - - Returns (tuple): - such as ('compute-0', 'compute-2', 'compute-1', 'compute-3') - or () if no host supports this storage backing - - """ - storage_backing = storage_backing.strip().lower() - if 'image' in storage_backing: - storage_backing = 'local_image' - elif 'remote' in storage_backing: - storage_backing = 'remote' - else: - raise ValueError("Invalid storage backing provided. " - "Please use one of these: 'local_image', 'remote'") - - hosts_per_backing = get_hosts_per_storage_backing(up_only=up_only, - con_ssh=con_ssh, - auth_info=auth_info, - hosts=hosts) - return hosts_per_backing.get(storage_backing, []) - - -def get_up_hypervisors(con_ssh=None, auth_info=Tenant.get('admin')): - return get_hypervisors(state='up', con_ssh=con_ssh, auth_info=auth_info) - - -def get_hypervisors(state=None, field='Hypervisor Hostname', - auth_info=Tenant.get('admin'), con_ssh=None): - """ - Return a list of hypervisors names in specified state and status. If None - is set to state and status, - all hypervisors will be returned. - - System: Regular - - Args: - state (str): e.g., 'up', 'down' - con_ssh (SSHClient): - field (str|list|tuple): target header. e.g., ID, Hypervisor hostname - auth_info - - Returns (list): a list of hypervisor names. Return () if no match found. - Always return () for small footprint lab. i.e., do not work with - small footprint lab - """ - table_ = table_parser.table( - cli.openstack('hypervisor list', ssh_client=con_ssh, - auth_info=auth_info)[1]) - - kwargs = {'State': state} if state else {} - return table_parser.get_multi_values(table_, field, **kwargs) - - -def _get_element_tree_virsh_xmldump(instance_name, host_ssh): - code, output = host_ssh.exec_sudo_cmd( - cmd='virsh dumpxml {}'.format(instance_name)) - if not 0 == code: - raise exceptions.SSHExecCommandFailed( - "virsh dumpxml failed to execute.") - - element_tree = ElementTree.fromstring(output) - return element_tree - - -def get_values_virsh_xmldump(instance_name, host_ssh, tag_paths, - target_type='element'): - """ - - Args: - instance_name (str): instance_name of a vm. Such as 'instance-00000002' - host_ssh (SSHFromSSH): ssh of the host that hosting the given instance - tag_paths (str|list|tuple): the tag path to reach to the target - element. such as 'memoryBacking/hugepages/page' - target_type (str): 'element', 'dict', 'text' - - Returns (list): list of Elements, dictionaries, or strings based on the - target_type param. - - """ - target_type = target_type.lower().strip() - root_element = _get_element_tree_virsh_xmldump(instance_name, host_ssh) - - is_str = False - if isinstance(tag_paths, str): - is_str = True - tag_paths = [tag_paths] - - values_list = [] - for tag_path_ in tag_paths: - elements = root_element.findall(tag_path_) - - if 'dict' in target_type: - dics = [] - for element in elements: - dics.append(element.attrib) - values_list.append(dics) - - elif 'text' in target_type: - texts = [] - for element in elements: - text_list = list(element.itertext()) - if not text_list: - LOG.warning( - "No text found under tag: {}.".format(tag_path_)) - else: - texts.append(text_list[0]) - if len(text_list) > 1: - LOG.warning(( - "More than one text found under tag: " - "{}, returning the first one.". - format(tag_path_))) - - values_list.append(texts) - - else: - values_list.append(elements) - - if is_str: - return values_list[0] - else: - return values_list - - -def _get_actual_mems(host): - headers = ('mem_avail(MiB)', 'app_hp_total_1G', 'app_hp_pending_1G') - displayed_mems = get_host_memories(host=host, headers=headers, - wait_for_update=False) - - actual_mems = {} - for proc in displayed_mems: - mem_avail, total_1g, pending_1g = displayed_mems[proc] - actual_1g = total_1g if pending_1g is None else pending_1g - - args = '-2M {} {} {}'.format(mem_avail, host, proc) - code, output = cli.system('host-memory-modify', args, fail_ok=True) - if code == 0: - raise exceptions.SysinvError( - 'system host-memory-modify is not rejected when 2M pages ' - 'exceeds mem_avail') - - # Processor 0:No available space for 2M huge page allocation, max 2M - # VM pages: 27464 - actual_mem = int(re.findall(r'max 2M pages: (\d+)', output)[0]) * 2 - actual_mems[proc] = (actual_mem, actual_1g) - - return actual_mems - - -def wait_for_memory_update(host, proc_id=None, expt_1g=None, timeout=420, - auth_info=Tenant.get('admin_platform')): - """ - Wait for host memory to be updated after modifying and unlocking host. - Args: - host: - proc_id (int|list|None): - expt_1g (int|list|None): - timeout: - auth_info - - Returns: - - """ - proc_id_type = type(proc_id) - if not isinstance(expt_1g, proc_id_type): - raise ValueError("proc_id and expt_1g have to be the same type") - - pending_2m = pending_1g = -1 - headers = ['app_hp_total_1G', 'app_hp_pending_1G', 'app_hp_pending_2M'] - current_time = time.time() - end_time = current_time + timeout - pending_end_time = current_time + 120 - while time.time() < end_time: - host_mems = get_host_memories(host, headers, proc_id=proc_id, - wait_for_update=False, - auth_info=auth_info) - for proc in host_mems: - current_1g, pending_1g, pending_2m = host_mems[proc] - if not (pending_2m is None and pending_1g is None): - break - else: - if time.time() > pending_end_time: - LOG.info("Pending memories are None for at least 120 seconds") - break - time.sleep(15) - else: - err = "Pending memory after {}s. Pending 2M: {}; Pending 1G: {}".format( - timeout, pending_2m, pending_1g) - assert 0, err - - if expt_1g: - if isinstance(expt_1g, int): - expt_1g = [expt_1g] - proc_id = [proc_id] - - for i in range(len(proc_id)): - actual_1g = host_mems[proc_id[i]][0] - expt = expt_1g[i] - assert expt == actual_1g, "{} proc{} 1G pages - actual: {}, " \ - "expected: {}". \ - format(host, proc_id[i], actual_1g, expt_1g) - - -def modify_host_memory(host, proc, gib_1g=None, gib_4k_range=None, - actual_mems=None, fail_ok=False, - con_ssh=None, auth_into=Tenant.get('admin_platform')): - """ - - Args: - host (str): - proc (int|str) - gib_1g (None|int): 1g page to set - gib_4k_range (None|tuple): - None: no requirement on 4k page - tuple: (min_val(None|int), max_val(None|int)) make sure 4k page - total gib fall between the range (inclusive) - actual_mems - con_ssh - auth_into - fail_ok - - Returns (tuple): - - """ - args = '' - if not actual_mems: - actual_mems = _get_actual_mems(host=host) - mib_avail, page_1g = actual_mems[str(proc)] - - if gib_1g is not None: - page_1g = gib_1g - args += ' -1G {}'.format(gib_1g) - mib_avail_2m = mib_avail - page_1g * 1024 - - if gib_4k_range: - min_4k, max_4k = gib_4k_range - if not (min_4k is None and max_4k is None): - if min_4k is None: - gib_4k_final = max(0, max_4k - 2) - elif max_4k is None: - gib_4k_final = min_4k + 2 - else: - gib_4k_final = (min_4k + max_4k) / 2 - mib_avail_2m = mib_avail_2m - gib_4k_final * 1024 - - page_2m = int(mib_avail_2m / 2) - args += ' -2M {} {} {}'.format(page_2m, host, proc) - - code, output = cli.system('host-memory-modify', args, ssh_client=con_ssh, - auth_info=auth_into, fail_ok=fail_ok) - if code > 0: - return 1, output - - LOG.info("{} memory modified successfully".format(host)) - return 0, page_2m - - -def modify_host_cpu(host, cpu_function, timeout=CMDTimeout.HOST_CPU_MODIFY, - fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin_platform'), **kwargs): - """ - Modify host cpu to given key-value pairs. i.e., system host-cpu-modify -f - -p - Notes: This assumes given host is already locked. - - Args: - host (str): hostname of host to be modified - cpu_function (str): cpu function to modify. e.g., 'vSwitch', 'platform' - timeout (int): Timeout waiting for system host-cpu-modify cli to return - fail_ok (bool): - con_ssh (SSHClient): - auth_info (dict): - **kwargs: processor id and number of cores pair(s). e.g., p0=1, p1=1 - - Returns (tuple): (rtn_code(int), message(str)) - (0, "Host cpu function modified successfully") - (1, ) # cli rejected - (2, "Number of actual log_cores for is different than - number set. Actual: , expect: ") - - """ - LOG.info( - "Modifying host {} CPU function {} to {}".format(host, cpu_function, - kwargs)) - - if not kwargs: - raise ValueError( - "At least one key-value pair such as p0=1 has to be provided.") - - final_args = {} - proc_args = '' - for proc, cores in kwargs.items(): - if cores is not None: - final_args[proc] = cores - cores = str(cores) - proc_args = ' '.join([proc_args, '-' + proc.lower().strip(), cores]) - - if not final_args: - raise ValueError("cores values cannot be all None") - - if not proc_args: - raise ValueError( - "At least one key-value pair should have non-None value. e.g., " - "p1=2") - - subcmd = ' '.join( - ['host-cpu-modify', '-f', cpu_function.lower().strip(), proc_args]) - code, output = cli.system(subcmd, host, ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info, timeout=timeout) - - if code == 1: - return 1, output - - LOG.info("Post action check for host-cpu-modify...") - table_ = table_parser.table(output) - threads = len(set(table_parser.get_column(table_, 'thread'))) - - table_ = table_parser.filter_table(table_, assigned_function=cpu_function) - - for proc, num in final_args.items(): - num = int(num) - proc_id = re.findall(r'\d+', proc)[0] - expt_cores = threads * num - actual_cores = len( - table_parser.get_values(table_, 'log_core', processor=proc_id)) - if expt_cores != actual_cores: - msg = "Number of actual log_cores for {} is different than " \ - "number set. Actual: {}, expect: {}". \ - format(proc, actual_cores, expt_cores) - if fail_ok: - LOG.warning(msg) - return 2, msg - raise exceptions.HostPostCheckFailed(msg) - - msg = "Host cpu function modified successfully" - LOG.info(msg) - return 0, msg - - -def add_host_interface(host, if_name, ports_or_ifs, if_type=None, pnet=None, - ae_mode=None, tx_hash_policy=None, - vlan_id=None, mtu=None, if_class=None, network=None, - ipv4_mode=None, ipv6_mode=None, - ipv4_pool=None, ipv6_pool=None, lock_unlock=True, - fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - - Args: - host: - if_name: - ports_or_ifs: - if_type: - pnet: - ae_mode: - tx_hash_policy: - vlan_id: - mtu: - if_class: - network: - ipv4_mode: - ipv6_mode: - ipv4_pool: - ipv6_pool: - lock_unlock: - fail_ok: - con_ssh: - auth_info: - - Returns: - - """ - if lock_unlock: - lock_host(host=host, con_ssh=con_ssh, swact=True, fail_ok=False) - - if isinstance(ports_or_ifs, str): - ports_or_ifs = [ports_or_ifs] - args = '{} {}{}{} {}'.format(host, if_name, - ' ' + if_type if if_type else '', - ' ' + pnet if pnet else '', - ' '.join(ports_or_ifs)) - opt_args_dict = { - '--aemode': ae_mode, - '--txhashpolicy': tx_hash_policy, - '--vlan_id': vlan_id, - '--imtu': mtu, - '--ifclass': if_class, - '--networks': network, - '--ipv4-mode': ipv4_mode, - '--ipv6-mode': ipv6_mode, - '--ipv4-pool': ipv4_pool, - '--ipv6-pool': ipv6_pool, - } - - opt_args = '' - for key, val in opt_args_dict.items(): - if val is not None: - opt_args += '{} {} '.format(key, val) - - args = '{} {}'.format(args, opt_args).strip() - code, out = cli.system('host-if-add', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code > 0: - return 1, out - - if lock_unlock: - unlock_host(host, con_ssh=con_ssh) - - msg = "Interface {} successfully added to {}".format(if_name, host) - LOG.info(msg) - - return 0, msg - - -def modify_host_interface(host, interface, pnet=None, ae_mode=None, - tx_hash_policy=None, - mtu=None, if_class=None, network=None, ipv4_mode=None, - ipv6_mode=None, - ipv4_pool=None, ipv6_pool=None, sriov_vif_count=None, - new_if_name=None, - lock_unlock=True, fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - - Args: - host: - interface: - pnet: - ae_mode: - tx_hash_policy: - mtu: - if_class: - network: - ipv4_mode: - ipv6_mode: - ipv4_pool: - ipv6_pool: - sriov_vif_count: - new_if_name: - lock_unlock: - fail_ok: - con_ssh: - auth_info: - - Returns: - - """ - if lock_unlock: - lock_host(host=host, con_ssh=con_ssh, swact=True, fail_ok=False) - - args = '{} {}'.format(host, interface) - opt_args_dict = { - '--ifname': new_if_name, - '--aemode': ae_mode, - '--txhashpolicy': tx_hash_policy, - '--imtu': mtu, - '--ifclass': if_class, - '--networks': network, - '--ipv4-mode': ipv4_mode, - '--ipv6-mode': ipv6_mode, - '--ipv4-pool': ipv4_pool, - '--ipv6-pool': ipv6_pool, - '--num-vfs': sriov_vif_count, - '--providernetworks': pnet, - } - - opt_args = '' - for key, val in opt_args_dict.items(): - if val is not None: - opt_args += '{} {} '.format(key, val) - - args = '{} {}'.format(args, opt_args).strip() - code, out = cli.system('host-if-modify', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code > 0: - return 1, out - - if lock_unlock: - unlock_host(host, con_ssh=con_ssh) - - msg = "{} interface {} is successfully modified".format(host, interface) - LOG.info(msg) - - return 0, msg - - -def compare_host_to_cpuprofile(host, profile_uuid): - """ - Compares the cpu function assignments of a host and a cpu profile. - - Args: - host (str): name of host - profile_uuid (str): name or uuid of the cpu profile - - Returns (tuple): (rtn_code(int), message(str)) - (0, "The host and cpu profile have the same information") - (2, "The function of one of the cores has not been changed correctly: - ") - - """ - if not host or not profile_uuid: - raise ValueError("There is either no host or no cpu profile given.") - - def check_range(core_group, core_num): - group = [] - if isinstance(core_group, str): - group.append(core_group) - elif isinstance(core_group, list): - for proc in core_group: - group.append(proc) - - for processors in group: - parts = processors.split(' ') - cores = parts[len(parts) - 1] - ranges = cores.split(',') - for range_ in ranges: - if range_ == '': - continue - range_ = range_.split('-') - if len(range_) == 2: - if int(range_[0]) <= int(core_num) <= int(range_[1]): - return True - elif len(range_) == 1: - if int(range_[0]) == int(core_num): - return True - LOG.warn("Could not match {} in {}".format(core_num, core_group)) - return False - - table_ = table_parser.table(cli.system('host-cpu-list', host)[1]) - functions = table_parser.get_column(table_=table_, - header='assigned_function') - - table_ = table_parser.table(cli.system('cpuprofile-show', profile_uuid)[1]) - - platform_cores = table_parser.get_value_two_col_table(table_, - field='platform ' - 'cores') - vswitch_cores = table_parser.get_value_two_col_table(table_, - field='vswitch cores') - shared_cores = table_parser.get_value_two_col_table(table_, - field='shared cores') - vm_cores = table_parser.get_value_two_col_table(table_, field='vm cores') - - msg = "The function of one of the cores has not been changed correctly: " - - for i in range(0, len(functions)): - if functions[i] == 'Platform': - if not check_range(platform_cores, i): - LOG.warning(msg + str(i)) - return 2, msg + str(i) - elif functions[i] == 'vSwitch': - if not check_range(vswitch_cores, i): - LOG.warning(msg + str(i)) - return 2, msg + str(i) - elif functions[i] == 'Shared': - if not check_range(shared_cores, i): - LOG.warning(msg + str(i)) - return 2, msg + str(i) - elif functions[i] == 'Applications': - if not check_range(vm_cores, i): - LOG.warning(msg + str(i)) - return 2, msg + str(i) - - msg = "The host and cpu profile have the same information" - return 0, msg - - -def apply_host_cpu_profile(host, profile_uuid, - timeout=CMDTimeout.CPU_PROFILE_APPLY, fail_ok=False, - con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Apply the given cpu profile to the host. - Assumes the host is already locked. - - Args: - host (str): name of host - profile_uuid (str): name or uuid of the cpu profile - timeout (int): timeout to wait for cli to return - fail_ok (bool): - con_ssh (SSHClient): - auth_info (dict): - - Returns (tuple): (rtn_code(int), message(str)) - (0, "cpu profile applied successfully") - (1, ) # cli rejected - (2, "The function of one of the cores has not been changed correctly: - ") - """ - if not host or not profile_uuid: - raise ValueError("There is either no host or no cpu profile given.") - - LOG.info("Applying cpu profile: {} to host: {}".format(profile_uuid, host)) - - code, output = cli.system('host-apply-cpuprofile', - '{} {}'.format(host, profile_uuid), - ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info, - timeout=timeout) - - if 1 == code: - LOG.warning(output) - return 1, output - - LOG.info("Post action host-apply-cpuprofile") - res, out = compare_host_to_cpuprofile(host, profile_uuid) - - if res != 0: - LOG.warning(output) - return res, out - - success_msg = "cpu profile applied successfully" - LOG.info(success_msg) - return 0, success_msg - - -def get_host_cpu_cores_for_function(hostname, func='vSwitch', - core_type='log_core', thread=0, - con_ssh=None, - auth_info=Tenant.get('admin_platform'), - rtn_dict_per_proc=True): - """ - Get processor/logical cpu cores/per processor on thread 0 for given - function for host via system host-cpu-list - - Args: - hostname (str): hostname to pass to system host-cpu-list - func (str|tuple|list): such as 'Platform', 'vSwitch', or 'Applications' - core_type (str): 'phy_core' or 'log_core' - thread (int|None): thread number. 0 or 1 - con_ssh (SSHClient): - auth_info (dict): - rtn_dict_per_proc (bool) - - Returns (dict|list): format: { (int): (list), ...} - e.g., {0: [1, 2], 1: [21, 22]} - - """ - table_ = get_host_cpu_list_table(hostname, con_ssh=con_ssh, - auth_info=auth_info) - procs = list(set(table_parser.get_values(table_, 'processor', - thread=thread))) if \ - rtn_dict_per_proc else [ - None] - res = {} - - convert = False - if isinstance(func, str): - func = [func] - convert = True - - for proc in procs: - funcs_cores = [] - for func_ in func: - if func_: - func_ = 'Applications' if func_.lower() == 'vms' else func_ - cores = table_parser.get_values(table_, core_type, processor=proc, - assigned_function=func_, - thread=thread) - funcs_cores.append(sorted([int(item) for item in cores])) - - if convert: - funcs_cores = funcs_cores[0] - - if proc is not None: - res[int(str(proc))] = funcs_cores - else: - res = funcs_cores - break - - LOG.info("{} {} {}s: {}".format(hostname, func, core_type, res)) - return res - - -def get_logcores_counts(host, proc_ids=(0, 1), thread='0', functions=None, - con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Get number of logical cores on given processor on thread 0. - - Args: - host: - proc_ids: - thread (str|list): '0' or ['0', '1'] - con_ssh: - functions (list|str) - auth_info - - Returns (list): - - """ - table_ = get_host_cpu_list_table(host=host, con_ssh=con_ssh, - auth_info=auth_info) - table_ = table_parser.filter_table(table_, thread=thread) - - rtns = [] - kwargs = {} - if functions: - kwargs = {'assigned_function': functions} - - for i in proc_ids: - cores_on_proc = table_parser.get_values(table_, 'log_core', - processor=str(i), **kwargs) - LOG.info("Cores on proc {}: {}".format(i, cores_on_proc)) - rtns.append(len(cores_on_proc)) - - return rtns - - -def get_host_procs(hostname, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - table_ = get_host_cpu_list_table(host=hostname, con_ssh=con_ssh, - auth_info=auth_info) - procs = table_parser.get_column(table_, 'processor') - return sorted(list(set(procs))) - - -def get_expected_vswitch_port_engine_map(host_ssh): - """ - Get expected ports and vswitch cores mapping via vshell port-list and - vshell engine-list - - Args: - host_ssh (SSHClient): ssh of a nova host - - Returns (dict): format: { (str): (list), ...} - e.g., {'0': ['1', '2'], '1': ['1', '2']} - - """ - ports_tab = table_parser.table( - host_ssh.exec_cmd("vshell port-list", fail_ok=False)[1]) - ports_tab = table_parser.filter_table(ports_tab, type='physical') - - cores_tab = table_parser.table( - host_ssh.exec_cmd("vshell engine-list", fail_ok=False)[1]) - - header = 'socket' if 'socket' in ports_tab['headers'] else 'socket-id' - sockets_for_ports = sorted(int(item) for item in list( - set(table_parser.get_column(ports_tab, header)))) - sockets_for_cores = sorted(int(item) for item in list( - set(table_parser.get_column(cores_tab, 'socket-id')))) - expt_map = {} - if sockets_for_ports == sockets_for_cores: - for socket in sockets_for_ports: - soc_ports = table_parser.get_values(ports_tab, 'id', - **{header: str(socket)}) - soc_cores = sorted(int(item) for item in - table_parser.get_values(cores_tab, 'cpuid', - **{'socket-id': str( - socket)})) - for port in soc_ports: - expt_map[port] = soc_cores - - else: - all_ports = table_parser.get_column(ports_tab, 'id') - all_cores = sorted( - int(item) for item in table_parser.get_column(cores_tab, 'cpuid')) - for port in all_ports: - expt_map[port] = all_cores - - return expt_map - - -def get_host_instance_backing(host, con_ssh=None, auth_info=Tenant.get('admin'), - fail_ok=False, refresh=False): - """ - Get instance backing for host. - - Args: - host (str): - con_ssh: - auth_info (dict) - fail_ok: - refresh (bool): if not refresh, it will try to get the value from - existing global var if already exist - - Returns (str): remote, local_image, or '' (if unable to get host backing - from nova conf) - - """ - instance_backings = ProjVar.get_var('INSTANCE_BACKING') - if not refresh and instance_backings: - for backing, hosts in instance_backings.items(): - if host in hosts: - return backing - - config = kube_helper.get_openstack_configs(conf_file='/etc/nova/nova.conf', - configs={ - 'libvirt': 'images_type'}, - node=host, - label_app='nova', - label_component='compute', - con_ssh=con_ssh) - images_type = list(config.values())[0].get('libvirt', 'images_type', - fallback='') - if not images_type: - if fail_ok: - return '' - raise exceptions.NovaError( - 'images_type cannot be determined from {} nova-compute pod'.format( - host)) - - host_backing = 'remote' if images_type == 'rbd' else 'local_image' - LOG.info("{} instance backing: {}".format(host, host_backing)) - if host_backing not in instance_backings: - instance_backings[host_backing] = [] - - for backing, hosts_with_backing in instance_backings.items(): - if host_backing == backing and host not in hosts_with_backing: - instance_backings[backing].append(host) - elif host_backing != backing and host in hosts_with_backing: - instance_backings[backing].remove(host) - - ProjVar.set_var(INSTANCE_BACKING=instance_backings) - - return host_backing - - -def assign_host_labels(host, labels, default_value='enabled', check_first=True, - lock=True, unlock=True, fail_ok=False, - con_ssh=None, auth_info=Tenant.get('admin_platform')): - """ - Assign given labels to host - Args: - host: - labels (dict|list): when list of label names instead dict, - use default_value for each label - default_value (str): - check_first: - lock: - unlock: - fail_ok: - con_ssh: - auth_info: - - Returns (tuple): - (-1, "Host already have expected labels: . Do nothing.") - (0, (dict)) - (1, ) - - """ - if isinstance(labels, (list, tuple)): - labels = {label: default_value for label in labels} - - if check_first: - existing_labels = get_host_labels_info(host, con_ssh=con_ssh, - auth_info=auth_info) - for label, expt_val in labels.items(): - if expt_val != existing_labels.get(label, 'disabled'): - LOG.debug( - "{} label needs to assigned to {}".format(label, host)) - break - else: - msg = "{} already have expected labels: {}. Do nothing.".format( - host, labels) - LOG.info(msg) - return -1, msg - - if lock: - lock_host(host, con_ssh=con_ssh, swact=True, auth_info=auth_info) - - args = '{} {}'.format(host, ' '.join( - ['{}={}'.format(key, val) for key, val in labels.items()])) - code, output = cli.system('host-label-assign', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code > 0: - return 1, output - - LOG.info("{} label(s) assigned: {}".format(host, labels)) - if unlock: - unlock_host(host, con_ssh=con_ssh, auth_info=auth_info) - - post_labels = get_host_labels_info(host, con_ssh=con_ssh, - auth_info=auth_info) - for label_, expt_val in labels.items(): - if expt_val != post_labels.get(label_, 'disabled'): - raise exceptions.SysinvError( - 'Unexpected value for {} label {}'.format(host, label_)) - - LOG.info("{} label(s) removed: {}".format(host, labels)) - - return 0, labels - - -def get_host_labels_info(host, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Get host labels - Args: - host (str): - con_ssh: - auth_info: - - Returns (dict): key/value pairs of host labels - - """ - output = cli.system('host-label-list --nowrap', host, ssh_client=con_ssh, - auth_info=auth_info)[1] - table_ = table_parser.table(output) - label_keys = table_parser.get_column(table_, 'label key') - label_values = table_parser.get_column(table_, 'label value') - - labels_info = {label_keys[i]: label_values[i] for i in - range(len(label_keys))} - return labels_info - - -def remove_host_labels(host, labels, check_first=True, lock=True, unlock=True, - fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Remove given labels from host - Args: - host: - labels (tuple|list): labels to remove - check_first: - lock: - unlock: - fail_ok: - con_ssh: - auth_info: - - Returns (tuple): - (-1, "Host already have expected labels: . Do nothing.") - (0, (list)) - (1, ) - - """ - if isinstance(labels, str): - labels = [labels] - - labels_to_remove = labels - if check_first: - existing_labels = get_host_labels_info(host, con_ssh=con_ssh, - auth_info=auth_info) - labels_to_remove = list(set(labels) & set(existing_labels)) - if not labels_to_remove: - msg = "{} does not have any of these labels to remove: {}. Do " \ - "nothing.".format(host, labels) - LOG.info(msg) - return -1, msg - - if lock: - lock_host(host, con_ssh=con_ssh, swact=True, auth_info=auth_info) - - args = '{} {}'.format(host, ' '.join(labels_to_remove)) - code, output = cli.system('host-label-remove', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code > 0: - return 1, output - - if unlock: - unlock_host(host, con_ssh=con_ssh, auth_info=auth_info) - - post_labels = get_host_labels_info(host, con_ssh=con_ssh, - auth_info=auth_info) - unremoved_labels = list(set(labels) & set(post_labels)) - if unremoved_labels: - raise exceptions.SysinvError( - "{} labels still exist after removal: {}".format(host, - unremoved_labels)) - - LOG.info("{} label(s) removed: {}".format(host, labels)) - - return 0, labels - - -def set_host_storage_backing(host, inst_backing, lock=True, unlock=True, - wait_for_configured=True, check_first=True, - fail_ok=False, - auth_info=Tenant.get('admin_platform'), - con_ssh=None): - """ - - Args: - host (str): host to modify lvg for - inst_backing (str): image, or remote - wait_for_configured (bool): Whether or not wait for host instance - backing change via system host-lvg-show - lock (bool): whether or not to lock host before modify - unlock (bool): whether or not to unlock host and verify config after - modify - check_first - fail_ok (bool): whether or not raise exception if host-label-assign - fails - auth_info (dict): - con_ssh (SSHClient): - - Returns: - - """ - if wait_for_configured and not unlock: - raise ValueError("'wait_for_configured=True' requires 'unlock=True'") - - label = { - 'remote-storage': 'enabled' if inst_backing == 'remote' else 'disabled'} - code, output = assign_host_labels(host, labels=label, lock=lock, - unlock=unlock, fail_ok=fail_ok, - check_first=check_first, - auth_info=auth_info, con_ssh=con_ssh) - if code > 0: - return 1, 'Failed to assign label to {}: {}'.format(host, output) - - if wait_for_configured: - nova_auth = Tenant.get('admin', dc_region=auth_info.get( - 'region') if auth_info else None) - res = wait_for_host_in_instance_backing(host=host, - storage_backing=inst_backing, - fail_ok=fail_ok, - auth_info=nova_auth) - if not res: - err = "Host {} is not in {} lvg within timeout".format( - host, inst_backing) - return 2, err - - return 0, "{} storage backing is successfully set to {}".format( - host, inst_backing) - - -def wait_for_host_in_instance_backing(host, storage_backing, timeout=120, - check_interval=3, fail_ok=False, - con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Wait for host instance backing to be given value via system host-lvg-show - Args: - host (str): - storage_backing: local_image or remote - timeout: - check_interval: - fail_ok: - con_ssh: - auth_info - - Returns: - - """ - storage_backing = 'local_image' if 'image' in storage_backing else \ - storage_backing - end_time = time.time() + timeout - while time.time() < end_time: - host_backing = get_host_instance_backing(host=host, con_ssh=con_ssh, - refresh=True, - auth_info=auth_info) - if host_backing in storage_backing: - LOG.info("{} is configured with {} backing".format( - host, storage_backing)) - time.sleep(30) - return True - - time.sleep(check_interval) - - err_msg = "Timed out waiting for {} to appear in {} host-aggregate".format( - host, storage_backing) - if fail_ok: - LOG.warning(err_msg) - return False - else: - raise exceptions.HostError(err_msg) - - -def __parse_total_cpus(output): - last_line = output.splitlines()[-1] - print(last_line) - # Final resource view: name=controller-0 phys_ram=44518MB used_ram=0MB - # phys_disk=141GB used_disk=1GB - # free_disk=133GB total_vcpus=31 used_vcpus=0.0 pci_stats=[PciDevicePool( - # count=1,numa_node=0,product_id='0522', - # tags={class_id='030000',configured='1',dev_type='type-PCI'}, - # vendor_id='102b')] - total = round(float(re.findall(r'used_vcpus=([\d|.]*) ', last_line)[0]), 4) - return total - - -def get_vcpus_per_proc(hosts=None, thread=None, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - if not hosts: - hosts = get_up_hypervisors(con_ssh=con_ssh) - elif isinstance(hosts, str): - hosts = [hosts] - - vcpus_per_proc = {} - for host in hosts: - vcpus_per_proc[host] = {} - cpus_per_proc = get_host_cpu_cores_for_function(host, - func='Applications', - thread=thread, - auth_info=auth_info, - con_ssh=con_ssh) - with ssh_to_host(host, con_ssh=con_ssh) as host_ssh: - cmd = """ps-sched.sh|grep qemu|grep " CPU" |awk '{{print $10;}}'""" - cores = host_ssh.exec_cmd(cmd)[1] - cores = [int(core.strip()) for core in cores.splitlines()] - - for proc, total_vcpus_per_proc in cpus_per_proc.items(): - used_cores = list(set(total_vcpus_per_proc) & set(cores)) - vcpus_per_proc[host][proc] = (used_cores, total_vcpus_per_proc) - - return vcpus_per_proc - - -def get_vcpus_for_computes(hosts=None, field='vcpus_used', con_ssh=None): - """ - Get vcpus info for given computes via openstack hypervisor show - Args: - hosts: - field (str): valid values: vcpus_used, vcpus, vcpus_avail - con_ssh: - - Returns (dict): host(str),cpu_val(float with 4 digits after decimal - point) pairs as dictionary - - """ - if hosts is None: - hosts = get_up_hypervisors(con_ssh=con_ssh) - elif isinstance(hosts, str): - hosts = [hosts] - - if field == 'used_now': - field = 'vcpus_used' - - if 'avail' not in field: - hosts_cpus = get_hypervisor_info(hosts=hosts, field=field, - con_ssh=con_ssh) - else: - cpus_info = get_hypervisor_info(hosts=hosts, - field=('vcpus', 'vcpus_used'), - con_ssh=con_ssh) - hosts_cpus = {} - for host in hosts: - total_cpu, used_cpu = cpus_info[host] - hosts_cpus[host] = float(total_cpu) - float(used_cpu) - - return hosts_cpus - - -def get_hypervisor_info(hosts, field='status', con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Get info from openstack hypervisor show for specified field - Args: - hosts (str|list): hostname(s) - field (str|list|tuple): field(s) in hypervisor show table - con_ssh: - auth_info: - - Returns (dict): {(str): val(str|list), ...} - """ - if isinstance(hosts, str): - hosts = [hosts] - - convert_to_str = False - if isinstance(field, str): - field = [field] - convert_to_str = True - - hosts_vals = {} - for host in hosts: - table_ = table_parser.table( - cli.openstack('hypervisor show --fit-width', host, - ssh_client=con_ssh, - auth_info=auth_info)[1], combine_multiline_entry=True) - vals = [] - for field_ in field: - val = table_parser.get_value_two_col_table(table_, field=field_, - strict=True, - merge_lines=True) - try: - val = eval(val) - except (NameError, SyntaxError): - pass - vals.append(val) - if convert_to_str: - vals = vals[0] - hosts_vals[host] = vals - - LOG.info("Hosts_info: {}".format(hosts_vals)) - return hosts_vals - - -def _get_host_logcores_per_thread(host, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - table_ = get_host_cpu_list_table(host=host, con_ssh=con_ssh, - auth_info=auth_info) - threads = list(set(table_parser.get_column(table_, 'thread'))) - cores_per_thread = {} - for thread in threads: - table_thread = table_parser.filter_table(table_, strict=True, - regex=False, thread=thread) - cores_str = table_parser.get_column(table_thread, 'log_core') - cores_per_thread[int(thread)] = [int(core) for core in cores_str] - - return cores_per_thread - - -def get_thread_num_for_cores(log_cores, host, con_ssh=None): - cores_per_thread = _get_host_logcores_per_thread(host=host, con_ssh=con_ssh) - - core_thread_dict = {} - for thread, cores_for_thread in cores_per_thread.items(): - for core in log_cores: - if int(core) in cores_for_thread: - core_thread_dict[core] = thread - - if len(core_thread_dict) == len(log_cores): - return core_thread_dict - else: - raise exceptions.HostError( - "Cannot find thread num for all cores provided. Cores provided: " - "{}. Threads found: {}".format(log_cores, core_thread_dict)) - - -def get_logcore_siblings(host, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Get cpu pairs for given host. - Args: - host (str): such as compute-1 - con_ssh (SSHClient): - auth_info (dict) - - Returns (list): list of log_core_siblings(tuple). Output examples: - - HT enabled: [[0, 20], [1, 21], ..., [19, 39]] - - HT disabled: [[0], [1], ..., [19]] - """ - if con_ssh is None: - con_ssh = ControllerClient.get_active_controller() - - table_ = get_host_cpu_list_table(host=host, con_ssh=con_ssh, - auth_info=auth_info) - phy_cores = sorted( - [int(i) for i in set(table_parser.get_column(table_, 'phy_core'))]) - - sibling_pairs = [] - for phy_core in phy_cores: - log_cores = table_parser.get_values(table_, 'log_core', - **{'phy_core': str(phy_core)}) - sibling_pairs.append(log_cores) - - LOG.info("Sibling cores for {}: {}".format(host, sibling_pairs)) - return sibling_pairs - - -def get_vcpu_pins_for_instance_via_virsh(host_ssh, instance_name): - vcpu_pins = get_values_virsh_xmldump(instance_name=instance_name, - host_ssh=host_ssh, - tag_paths='cputune/vcpupin', - target_type='dict') - return vcpu_pins - - -def get_hosts_per_storage_backing(up_only=True, con_ssh=None, - auth_info=Tenant.get('admin'), hosts=None, - refresh=False): - """ - Get hosts for each possible storage backing - Args: - up_only (bool): whether to return up hypervisor only - auth_info - con_ssh: - hosts (None|list|tuple): hosts to check - refresh (bool) - - Returns (dict): {'local_image': , - 'remote': - } - """ - instance_backings = ProjVar.get_var('INSTANCE_BACKING') - if instance_backings and not refresh and not up_only: - return instance_backings - - if not hosts: - host_func = get_up_hypervisors if up_only else get_hypervisors - hosts = host_func(con_ssh=con_ssh, auth_info=auth_info) - elif isinstance(hosts, str): - hosts = (hosts,) - - for host in hosts: - backing = get_host_instance_backing(host=host, con_ssh=con_ssh, - fail_ok=True, refresh=refresh) - if not backing: - LOG.warning('{} instance backing cannot be determined'.format(host)) - - post_instance_backings = ProjVar.get_var('INSTANCE_BACKING') - LOG.info("Hosts per storage backing: {}".format(post_instance_backings)) - if not ProjVar.get_var( - 'DEFAULT_INSTANCE_BACKING') or post_instance_backings != \ - instance_backings: - # Host backing changed. As a result, - # if system has more than 1 instance backings across nova - # hypervisors, nova aggregates need to be created - # in order to restrict openstack vms onto host(s) with specific - # instance backing - configured_backings = [backing for backing in post_instance_backings if - post_instance_backings.get(backing)] - default_local_storage = 'remote' - if 'local_image' in configured_backings: - default_local_storage = 'local_image' - if len(post_instance_backings.get('remote', [])) > len( - post_instance_backings['local_image']): - default_local_storage = 'remote' - - ProjVar.set_var(DEFAULT_INSTANCE_BACKING=default_local_storage) - if len(configured_backings) > 1: - # More than 1 instance backings across nova hosts - # Need to configure host aggregates - aggregates = nova_helper.get_aggregates(con_ssh=con_ssh, - auth_info=auth_info) - for inst_backing in configured_backings: - expt_hosts = sorted(post_instance_backings[inst_backing]) - aggregate_name = STORAGE_AGGREGATE[inst_backing] - if aggregate_name not in aggregates: - nova_helper.create_aggregate(name=aggregate_name, - avail_zone='nova', - check_first=False, - con_ssh=con_ssh, - auth_info=auth_info) - properties = {} - hosts_in_aggregate = [] - else: - properties, hosts_in_aggregate = \ - nova_helper.get_aggregate_values( - aggregate_name, - fields=('properties', 'hosts'), - con_ssh=con_ssh, auth_info=auth_info) - - property_key = FlavorSpec.STORAGE_BACKING.split(':')[-1].strip() - if property_key not in properties: - nova_helper.set_aggregate( - aggregate_name, - properties={property_key: inst_backing}, - con_ssh=con_ssh, auth_info=auth_info) - - if expt_hosts != sorted(hosts_in_aggregate): - hosts_to_remove = list( - set(hosts_in_aggregate) - set(expt_hosts)) - hosts_to_add = list( - set(expt_hosts) - set(hosts_in_aggregate)) - if hosts_to_add: - nova_helper.add_hosts_to_aggregate( - aggregate=aggregate_name, hosts=hosts_to_add, - check_first=False, con_ssh=con_ssh, - auth_info=auth_info) - if hosts_to_remove: - nova_helper.remove_hosts_from_aggregate( - aggregate=aggregate_name, hosts=hosts_to_remove, - check_first=False, con_ssh=con_ssh, - auth_info=auth_info) - - return {backing: hosts_ for backing, hosts_ in - post_instance_backings.items() if set(hosts_) & set(hosts)} - - -def get_coredumps_and_crashreports(move=True): - """ - Get core dumps and crash reports from every host - Args: - move: whether to move coredumps and crashreports to local automation dir - - Returns (dict): - - """ - LOG.info( - "Getting existing system crash reports from /var/crash/ and coredumps " - "from /var/lib/systemd/coredump/") - hosts_to_check = system_helper.get_hosts( - availability=(HostAvailState.FAILED, HostAvailState.OFFLINE), - exclude=True) - - core_dumps_and_reports = {} - active_con = system_helper.get_active_controller_name() - con_ssh = ControllerClient.get_active_controller() - con_dir = '{}/coredumps_and_crashreports/'.format(HostLinuxUser.get_home()) - con_ssh.exec_cmd('mkdir -p {}'.format(con_dir)) - scp_to_local = False - ls_cmd = 'ls -l --time-style=+%Y-%m-%d_%H-%M-%S {} | grep --color=never ' \ - '-v total' - core_dump_dir = '/var/lib/systemd/coredump/' - crash_report_dir = '/var/crash/' - for host in hosts_to_check: - with ssh_to_host(hostname=host) as host_ssh: - core_dumps_and_reports[host] = [] - - for failure_dir in (core_dump_dir, crash_report_dir): - failures = host_ssh.exec_cmd(ls_cmd.format(failure_dir), - fail_ok=True)[1].splitlines() - core_dumps_and_reports[host].append(failures) - - if move and failures: - for line in failures: - timestamp, name = line.split(sep=' ')[-2:] - new_name = '_'.join((host, timestamp, name)) - host_ssh.exec_sudo_cmd( - 'mv {}/{} {}/{}'.format(failure_dir, name, failure_dir, - new_name), - fail_ok=False) - - scp_to_local = True - if host_ssh.get_hostname() != active_con: - host_ssh.scp_on_source( - source_path='{}/*'.format(failure_dir), - dest_user=HostLinuxUser.get_user(), - dest_ip=active_con, dest_path=con_dir, - dest_password=HostLinuxUser.get_password()) - else: - host_ssh.exec_sudo_cmd( - 'cp -r {}/* {}'.format(failure_dir, con_dir), - fail_ok=False) - host_ssh.exec_sudo_cmd('rm -rf {}/*'.format(failure_dir)) - - if scp_to_local: - con_ssh.exec_sudo_cmd('chmod -R 755 {}'.format(con_dir)) - - log_dir = ProjVar.get_var('LOG_DIR') - coredump_and_crashreport_dir = os.path.join( - log_dir, 'coredumps_and_crashreports') - os.makedirs(coredump_and_crashreport_dir, exist_ok=True) - source_path = '{}/*'.format(con_dir) - common.scp_from_active_controller_to_localhost( - source_path=source_path, dest_path=coredump_and_crashreport_dir) - con_ssh.exec_cmd('rm -rf {}/*'.format(con_dir)) - - LOG.info("core dumps and crash reports per host: {}".format( - core_dumps_and_reports)) - return core_dumps_and_reports - - -def modify_mtu_on_interface(host, interface, mtu_val, network_type='data', - lock_unlock=True, fail_ok=False, con_ssh=None): - mtu_val = int(mtu_val) - - LOG.info("Modify MTU for IF {} of NET-TYPE {} to: {} on {}".format( - interface, network_type, mtu_val, host)) - - args = "-m {} {} {}".format(mtu_val, host, interface) - - code, output = cli.system('host-if-modify', args, ssh_client=con_ssh, - fail_ok=fail_ok) - - if code != 0: - msg = "Attempt to change MTU failed on host:{} for IF:{} to " \ - "MTU:{}".format(host, interface, mtu_val) - if fail_ok: - return 2, msg - raise exceptions.HostPostCheckFailed(msg) - - if lock_unlock: - unlock_host(host) - - return code, output - - -def modify_mtu_on_interfaces(hosts, mtu_val, network_type, lock_unlock=True, - fail_ok=False, con_ssh=None): - if not hosts: - raise exceptions.HostError("No hostname provided.") - - mtu_val = int(mtu_val) - - if isinstance(hosts, str): - hosts = [hosts] - - res = {} - rtn_code = 0 - - if_class = network_type - network = '' - if network_type in PLATFORM_NET_TYPES: - if_class = 'platform' - network = network_type - - for host in hosts: - table_ = table_parser.table( - cli.system('host-if-list', '{} --nowrap'.format(host), - ssh_client=con_ssh)[1]) - table_ = table_parser.filter_table(table_, **{'class': if_class}) - # exclude unmatched platform interfaces from the table. - if 'platform' == if_class: - platform_ifs = table_parser.get_values(table_, target_header='name', - **{'class': 'platform'}) - for pform_if in platform_ifs: - if_nets = \ - get_host_interface_values(host=host, interface=pform_if, - fields='networks', - con_ssh=con_ssh)[0] - if_nets = [if_net.strip() for if_net in if_nets.split(sep=',')] - if network not in if_nets: - table_ = table_parser.filter_table(table_, strict=True, - exclude=True, - name=pform_if) - - uses_if_names = table_parser.get_values(table_, 'name', exclude=True, - **{'uses i/f': '[]'}) - non_uses_if_names = table_parser.get_values(table_, 'name', - exclude=False, - **{'uses i/f': '[]'}) - uses_if_first = False - if uses_if_names: - current_mtu = int( - get_host_interface_values(host, interface=uses_if_names[0], - fields=['imtu'], - con_ssh=con_ssh)[0]) - if current_mtu <= mtu_val: - uses_if_first = True - - if uses_if_first: - if_names = uses_if_names + non_uses_if_names - else: - if_names = non_uses_if_names + uses_if_names - - if lock_unlock: - lock_host(host, swact=True) - - LOG.info("Modify MTU for {} {} interfaces to: {}".format( - host, network_type, mtu_val)) - - res_for_ifs = {} - for if_name in if_names: - args = "-m {} {} {}".format(mtu_val, host, if_name) - # system host-if-modify controller-1 --imtu - code, output = cli.system('host-if-modify', args, - ssh_client=con_ssh, fail_ok=fail_ok) - res_for_ifs[if_name] = code, output - - if code != 0: - rtn_code = 1 - - res[host] = res_for_ifs - - if lock_unlock: - unlock_hosts(hosts, check_hypervisor_up=True, check_webservice_up=True) - - check_failures = [] - for host in hosts: - host_res = res[host] - for if_name in host_res: - mod_res = host_res[if_name] - - # Check mtu modified correctly - if mod_res[0] == 0: - actual_mtu = int( - get_host_interface_values(host, interface=if_name, - fields=['imtu'], - con_ssh=con_ssh)[0]) - if not actual_mtu == mtu_val: - check_failures.append((host, if_name, actual_mtu)) - - if check_failures: - msg = "Actual MTU value after modify is not as expected. " \ - "Expected MTU value: {}. Actual [Host, Interface, " \ - "MTU value]: {}".format(mtu_val, check_failures) - if fail_ok: - return 2, msg - raise exceptions.HostPostCheckFailed(msg) - - return rtn_code, res - - -def get_hosts_and_pnets_with_pci_devs(pci_type='pci-sriov', up_hosts_only=True, - con_ssh=None, - auth_info=Tenant.get('admin')): - """ - - Args: - pci_type (str|list|tuple): pci-sriov, pci-passthrough - up_hosts_only: - con_ssh: - auth_info: - - Returns (dict): hosts and pnets with ALL specified pci devs - - """ - state = 'up' if up_hosts_only else None - hosts = get_hypervisors(state=state, auth_info=auth_info) - sysinv_auth = Tenant.get('admin_platform', dc_region=auth_info.get( - 'region') if auth_info else None) - - hosts_pnets_with_pci = {} - if isinstance(pci_type, str): - pci_type = [pci_type] - - for host_ in hosts: - pnets_list_for_host = [] - for pci_type_ in pci_type: - - pnets_list = get_host_interfaces(host_, field='data networks', - net_type=pci_type_, - con_ssh=con_ssh, - auth_info=sysinv_auth) - pnets_for_type = [] - for pnets_ in pnets_list: - pnets_for_type += pnets_ - - if not pnets_for_type: - LOG.info("{} {} interface data network not found".format( - host_, pci_type_)) - pnets_list_for_host = [] - break - pnets_list_for_host.append(list(set(pnets_for_type))) - - if pnets_list_for_host: - pnets_final = pnets_list_for_host[0] - for pnets_ in pnets_list_for_host[1:]: - pnets_final = list(set(pnets_final) & set(pnets_)) - - if pnets_final: - hosts_pnets_with_pci[host_] = pnets_final - - if not hosts_pnets_with_pci: - LOG.info("No {} interface found from any of following hosts: " - "{}".format(pci_type, hosts)) - else: - LOG.info("Hosts and provider networks with {} devices: {}".format( - pci_type, hosts_pnets_with_pci)) - - return hosts_pnets_with_pci - - -def get_sm_dump_table(controller, con_ssh=None): - """ - - Args: - controller (str|SSHClient): controller name/ssh client to get sm-dump - con_ssh (SSHClient): ssh client for active controller - - Returns (): - table_ (dict): Dictionary of a table parsed by tempest. - Example: table = - { - 'headers': ["Field", "Value"]; - 'values': [['name', 'internal-subnet0'], ['id', '36864844783']]} - - """ - if isinstance(controller, str): - with ssh_to_host(controller, con_ssh=con_ssh) as host_ssh: - return table_parser.sm_dump_table( - host_ssh.exec_sudo_cmd('sm-dump', fail_ok=False)[1]) - - host_ssh = controller - return table_parser.sm_dump_table( - host_ssh.exec_sudo_cmd('sm-dump', fail_ok=False)[1]) - - -def get_sm_dump_items(controller, item_names=None, con_ssh=None): - """ - get sm dump dict for specified items - Args: - controller (str|SSHClient): hostname or ssh client for a controller - such as controller-0, controller-1 - item_names (list|str|None): such as 'oam-services', or ['oam-ip', - 'oam-services'] - con_ssh (SSHClient): - - Returns (dict): such as {'oam-services': {'desired-state': 'active', - 'actual-state': 'active'}, - 'oam-ip': {...} - } - - """ - sm_dump_tab = get_sm_dump_table(controller=controller, con_ssh=con_ssh) - if item_names: - if isinstance(item_names, str): - item_names = [item_names] - - sm_dump_tab = table_parser.filter_table(sm_dump_tab, name=item_names) - - sm_dump_items = table_parser.row_dict_table(sm_dump_tab, key_header='name', - unique_key=True) - return sm_dump_items - - -def get_sm_dump_item_states(controller, item_name, con_ssh=None): - """ - get desired and actual states of given item - - Args: - controller (str|SSHClient): hostname or host_ssh for a controller - such as controller-0, controller-1 - item_name (str): such as 'oam-services' - con_ssh (SSHClient): - - Returns (tuple): (, ) such as ('active', - 'active') - - """ - item_value_dict = \ - get_sm_dump_items(controller=controller, item_names=item_name, - con_ssh=con_ssh)[item_name] - - return item_value_dict['desired-state'], item_value_dict['actual-state'] - - -def wait_for_sm_dump_desired_states(controller, item_names=None, timeout=60, - strict=True, fail_ok=False, con_ssh=None): - """ - Wait for sm_dump item(s) to reach desired state(s) - - Args: - controller (str): controller name - item_names (str|list|None): item(s) name(s) to wait for desired - state(s). Wait for desired states for all items - when set to None. - timeout (int): max seconds to wait - strict (bool): whether to find strict match for given item_names. - e.g., item_names='drbd-', strict=False will - check all items whose name contain 'drbd-' - fail_ok (bool): whether or not to raise exception if any item did not - reach desired state before timed out - con_ssh (SSHClient): - - Returns (bool): True if all of given items reach desired state - - """ - - LOG.info("Waiting for {} {} in sm-dump to reach desired state".format( - controller, item_names)) - if item_names is None: - item_names = get_sm_dump_items(controller=controller, - item_names=item_names, con_ssh=con_ssh) - - elif not strict: - table_ = get_sm_dump_table(controller=controller, con_ssh=con_ssh) - item_names = table_parser.get_values(table_, 'name', strict=False, - name=item_names) - - if isinstance(item_names, str): - item_names = [item_names] - - items_to_check = {} - for item in item_names: - items_to_check[item] = {} - items_to_check[item]['prev-state'] = items_to_check[item][ - 'actual-state'] = \ - items_to_check[item]['desired-state'] = '' - - def __wait_for_desired_state(ssh_client): - end_time = time.time() + timeout - - while time.time() < end_time: - items_names_to_check = list(items_to_check.keys()) - items_states = get_sm_dump_items(ssh_client, - item_names=items_names_to_check, - con_ssh=con_ssh) - - for item_ in items_states: - items_to_check[item_].update(**items_states[item_]) - - prev_state = items_to_check[item_]['prev-state'] - desired_state = items_states[item_]['desired-state'] - actual_state = items_states[item_]['actual-state'] - - if desired_state == actual_state: - LOG.info( - "{} in sm-dump has reached desired state: {}".format( - item_, desired_state)) - items_to_check.pop(item_) - continue - - elif prev_state and actual_state != prev_state: - LOG.info( - "{} actual state changed from {} to {} while desired " - "state is: {}". - format(item_, prev_state, actual_state, desired_state)) - - items_to_check[item_].update(prev_state=actual_state) - - if not items_to_check: - return True - - time.sleep(3) - - err_msg = "Timed out waiting for sm-dump item(s) to reach desired " \ - "state(s): {}".format(items_to_check) - if fail_ok: - LOG.warning(err_msg) - return False - else: - raise exceptions.TimeoutException(err_msg) - - if isinstance(controller, str): - with ssh_to_host(controller, con_ssh=con_ssh) as host_ssh: - return __wait_for_desired_state(host_ssh) - else: - return __wait_for_desired_state(controller) - - -# This is a copy from installer_helper due to blocking issues in -# installer_helper on importing non-exist modules - - -@contextmanager -def ssh_to_test_server(test_srv=TestFileServer.SERVER, user=TestFileServer.USER, - password=TestFileServer.PASSWORD, prompt=None): - """ - ssh to test server. - Usage: Use with context_manager. i.e., - with ssh_to_build_server(bld_srv=cgts-yow3-lx) as bld_srv_ssh: - # do something - # ssh session will be closed automatically - - Args: - test_srv (str): test server ip - user (str): svc-cgcsauto if unspecified - password (str): password for svc-cgcsauto user if unspecified - prompt (str|None): expected prompt. such as: - svc-cgcsauto@yow-cgts4-lx.wrs.com$ - - Yields (SSHClient): ssh client for given build server and user - - """ - # Get build_server dict from bld_srv param. - - prompt = prompt if prompt else Prompt.TEST_SERVER_PROMPT_BASE.format(user) - test_server_conn = SSHClient(test_srv, user=user, password=password, - initial_prompt=prompt) - test_server_conn.connect() - - try: - yield test_server_conn - finally: - test_server_conn.close() - - -def get_host_co_processor_pci_list(hostname): - host_pci_info = [] - with ssh_to_host(hostname) as host_ssh: - LOG.info( - "Getting the Co-processor pci list for host {}".format(hostname)) - cmd = r"lspci -nnm | grep Co-processor | grep --color=never -v -A 1 " \ - r"-E 'Device \[0000\]|Virtual'" - rc, output = host_ssh.exec_cmd(cmd) - if rc != 0: - return host_pci_info - - # sample output: - # wcp7-12: - # 09:00.0 "Co-processor [0b40]" "Intel Corporation [8086]" "DH895XCC - # Series QAT [0435]" "Intel Corporation [8086]" "Device [35c5]" - # 09:01.0 "Co-processor [0b40]" "Intel Corporation [8086]" "DH895XCC - # Series QAT Virtual Function [0443]" "Intel Corporation [8086]" - # "Device [0000]" - - # wolfpass-13_14: - # 3f:00.0 "Co-processor [0b40]" "Intel Corporation [8086]" "Device [ - # 37c8]" -r04 "Intel Corporation [8086]" "Device [35cf]" - # 3f:01.0 "Co-processor [0b40]" "Intel Corporation [8086]" "Device [ - # 37c9]" -r04 "Intel Corporation [8086]" "Device [0000]" - # -- - # da:00.0 "Co-processor [0b40]" "Intel Corporation [8086]" "Device [ - # 37c8]" -r04 "Intel Corporation [8086]" "Device [35cf]" - # da:01.0 "Co-processor [0b40]" "Intel Corporation [8086]" "Device [ - # 37c9]" -r04 "Intel Corporation [8086]" "Device [0000]" - dev_sets = output.split('--\n') - for dev_set in dev_sets: - pdev_line, vdev_line = dev_set.strip().splitlines() - class_id, vendor_id, device_id = re.findall(r'\[([0-9a-fA-F]{4})\]', - pdev_line)[0:3] - vf_class_id, vf_vendor_id, vf_device_id = re.findall( - r'\[([0-9a-fA-F]{4})\]', vdev_line)[0:3] - assert vf_class_id == class_id - assert vf_vendor_id == vendor_id - assert device_id != vf_device_id - - vendor_name = \ - re.findall(r'\"([^\"]+) \[{}\]'.format(vendor_id), pdev_line)[0] - pci_alias = \ - re.findall(r'\"([^\"]+) \[{}\]'.format(device_id), pdev_line)[0] - if pci_alias == 'Device': - pci_alias = None - else: - pci_alias = 'qat-{}-vf'.format(pci_alias.lower()) - pci_address = ( - "0000:{}".format(pdev_line.split(sep=' "', maxsplit=1)[0])) - pci_name = "pci_{}".format( - pci_address.replace('.', '_').replace(':', '_').strip()) - # Ensure class id is at least 6 digits as displayed in nova - # device-list and system host-device-list - class_id = (class_id + '000000')[0:6] - - LOG.info("pci_name={} device_id={}".format(pci_name, device_id)) - pci_info = {'pci_address': pci_address, - 'pci_name': pci_name, - 'vendor_name': vendor_name, - 'vendor_id': vendor_id, - 'device_id': device_id, - 'class_id': class_id, - 'pci-alias': pci_alias, - 'vf_device_id': vf_device_id, - } - - host_pci_info.append(pci_info) - - LOG.info("The Co-processor pci list for host {}: {}".format( - hostname, host_pci_info)) - - return host_pci_info - - -def get_mellanox_ports(host): - """ - Get Mellanox data ports for given host - - Args: - host (str): hostname - - Returns (list): - - """ - data_ports = get_host_ports_for_net_type(host, net_type='data', - ports_only=True) - mt_ports = get_host_ports(host, 'uuid', if_name=data_ports, strict=False, - regex=True, **{'device type': MELLANOX_DEVICE}) - LOG.info("Mellanox ports: {}".format(mt_ports)) - return mt_ports - - -def is_host_locked(host, con_ssh=None): - admin_state = system_helper.get_host_values(host, 'administrative', - con_ssh=con_ssh)[0] - return admin_state.lower() == HostAdminState.LOCKED.lower() - - -def get_host_network_interface_dev_names(host, con_ssh=None): - dev_names = [] - with ssh_to_host(host, con_ssh=con_ssh) as host_ssh: - - cmd = "ifconfig -a | sed 's/[ \t].*//;/^$/d;/^lo/d'" - rc, output = host_ssh.exec_sudo_cmd(cmd) - if rc == 0: - output = output.splitlines() - for dev in output: - if dev.endswith(':'): - dev = dev[:-1] - dev_names.append(dev) - LOG.info( - "Host {} interface device names: {}".format(host, dev_names)) - else: - LOG.warning( - "Failed to get interface device names for host {}".format(host)) - - return dev_names - - -def get_host_interfaces_for_net_type(host, net_type='infra', if_type=None, - exclude_iftype=False, con_ssh=None): - """ - Get interface names for given net_type that is expected to be listed in - ifconfig on host - Args: - host (str): - net_type (str): 'infra', 'mgmt' or 'oam', (data is handled in AVS - thus not shown in ifconfig on host) - if_type (str|None): When None, interfaces with all eth types will return - exclude_iftype(bool): whether or not to exclude the if type specified. - con_ssh (SSHClient): - - Returns (dict): { - 'ethernet': [, , etc], - 'vlan': [, , etc], - 'ae': [(, []), (, []), ...] - } - - """ - LOG.info("Getting expected eth names for {} network on {}".format(net_type, - host)) - table_origin = get_host_interfaces_table(host=host, con_ssh=con_ssh) - - if if_type: - table_ = table_parser.filter_table(table_origin, exclude=exclude_iftype, - **{'type': if_type}) - else: - table_ = copy.deepcopy(table_origin) - - network = '' - if_class = net_type - if net_type in PLATFORM_NET_TYPES: - if_class = 'platform' - network = net_type - - table_ = table_parser.filter_table(table_, **{'class': if_class}) - # exclude unmatched platform interfaces from the table. - if 'platform' == if_class: - platform_ifs = table_parser.get_values(table_, target_header='name', - **{'class': 'platform'}) - for pform_if in platform_ifs: - if_nets = get_host_interface_values(host=host, interface=pform_if, - fields='networks')[0] - if_nets = [if_net.strip() for if_net in if_nets.split(sep=',')] - if network not in if_nets: - table_ = table_parser.filter_table(table_, strict=True, - exclude=True, name=pform_if) - - interfaces = {} - table_eth = table_parser.filter_table(table_, **{'type': 'ethernet'}) - eth_ifs = table_parser.get_values(table_eth, 'ports') - interfaces['ethernet'] = eth_ifs - # such as ["[u'enp134s0f1']", "[u'enp131s0f1']"] - - table_ae = table_parser.filter_table(table_, **{'type': 'ae'}) - ae_names = table_parser.get_values(table_ae, 'name') - ae_ifs = table_parser.get_values(table_ae, 'uses i/f') - - ae_list = [] - for i in range(len(ae_names)): - ae_list.append((ae_names[i], ae_ifs[i])) - interfaces['ae'] = ae_list - - table_vlan = table_parser.filter_table(table_, - **{'type': ['vlan', 'vxlan']}) - vlan_ifs_ = table_parser.get_values(table_vlan, 'uses i/f') - vlan_ids = table_parser.get_values(table_vlan, 'vlan id') - vlan_list = [] - for i in range(len(vlan_ifs_)): - # assuming only 1 item in 'uses i/f' list - vlan_useif = eval(vlan_ifs_[i])[0] - vlan_useif_ports = eval( - table_parser.get_values(table_origin, 'ports', name=vlan_useif)[0]) - if vlan_useif_ports: - vlan_useif = vlan_useif_ports[0] - vlan_list.append("{}.{}".format(vlan_useif, vlan_ids[i])) - - LOG.info( - "Expected eth names for {} network on {}: {}".format(net_type, host, - interfaces)) - return interfaces - - -def get_host_cpu_model(host, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Get cpu model for a given host. e.g., Intel(R) Xeon(R) CPU E5-2680 v2 @ - 2.80GHz - Args: - host (str): e.g., compute-0 - con_ssh (SSHClient): - auth_info - - Returns (str): - """ - table_ = get_host_cpu_list_table(host=host, con_ssh=con_ssh, - auth_info=auth_info) - cpu_model = table_parser.get_column(table_, 'processor_model')[0] - - LOG.info("CPU Model for {}: {}".format(host, cpu_model)) - return cpu_model - - -def get_max_vms_supported(host, con_ssh=None): - max_count = 10 - cpu_model = get_host_cpu_model(host=host, con_ssh=con_ssh) - if ProjVar.get_var('IS_VBOX'): - max_count = MaxVmsSupported.VBOX - elif re.search(r'Xeon.* CPU D-[\d]+', cpu_model): - max_count = MaxVmsSupported.XEON_D - - LOG.info("Max number vms supported on {}: {}".format(host, max_count)) - return max_count - - -def get_hypersvisors_with_config(hosts=None, up_only=True, hyperthreaded=None, - storage_backing=None, con_ssh=None): - """ - Get hypervisors with specified configurations - Args: - hosts (None|list): - up_only (bool): - hyperthreaded - storage_backing (None|str): - con_ssh (SSHClient): - - Returns (list): list of hosts meeting the requirements - - """ - if up_only: - hypervisors = get_up_hypervisors(con_ssh=con_ssh) - else: - hypervisors = get_hypervisors(con_ssh=con_ssh) - - if hosts: - candidate_hosts = list(set(hypervisors) & set(hosts)) - else: - candidate_hosts = hypervisors - - if candidate_hosts and storage_backing: - candidate_hosts = get_hosts_in_storage_backing( - storage_backing=storage_backing, con_ssh=con_ssh, - hosts=candidate_hosts) - - if hyperthreaded is not None and candidate_hosts: - ht_hosts = [] - non_ht = [] - for host in candidate_hosts: - if is_host_hyperthreaded(host, con_ssh=con_ssh): - ht_hosts.append(host) - else: - non_ht.append(host) - candidate_hosts = ht_hosts if hyperthreaded else non_ht - - return candidate_hosts - - -def lock_unlock_controllers(host_recover='function', alarm_ok=False, - no_standby_ok=False): - """ - lock/unlock both controller to get rid of the config out of date situations - - Args: - host_recover (None|str): try to recover host if lock/unlock fails - alarm_ok (bool) - no_standby_ok (bool) - - Returns (tuple): return code and msg - - """ - active, standby = system_helper.get_active_standby_controllers() - if standby: - LOG.info("Locking unlocking controllers to complete action") - from testfixtures.recover_hosts import HostsToRecover - if host_recover: - HostsToRecover.add(hostnames=standby, scope=host_recover) - lock_host(standby) - unlock_host(standby) - if host_recover: - HostsToRecover.remove(hostnames=standby, scope=host_recover) - drbd_res = system_helper.wait_for_alarm_gone( - alarm_id=EventLogID.CON_DRBD_SYNC, entity_id=standby, - strict=False, fail_ok=alarm_ok, timeout=300, check_interval=20) - if not drbd_res: - return 1, "400.001 alarm is not cleared within timeout after " \ - "unlock standby" - - lock_host(active, swact=True) - unlock_host(active) - drbd_res = system_helper.wait_for_alarm_gone( - alarm_id=EventLogID.CON_DRBD_SYNC, entity_id=active, - strict=False, fail_ok=alarm_ok, timeout=300) - if not drbd_res: - return 1, "400.001 alarm is not cleared within timeout after " \ - "unlock standby" - - elif system_helper.is_aio_simplex(): - LOG.info("Simplex system - lock/unlock only controller") - lock_host('controller-0', swact=False) - unlock_host('controller-0') - - else: - LOG.warning( - "Standby controller unavailable. Unable to lock active controller.") - if no_standby_ok: - return 2, 'No standby available, thus unable to lock/unlock ' \ - 'controllers' - else: - raise exceptions.HostError( - "Unable to lock/unlock controllers due to no standby " - "controller") - - return 0, "Locking unlocking controller(s) completed" - - -def lock_unlock_hosts(hosts, force_lock=False, con_ssh=None, - auth_info=Tenant.get('admin_platform'), - recover_scope='function'): - """ - Lock/unlock hosts simultaneously when possible. - Args: - hosts (str|list): - force_lock (bool): lock without migrating vms out - con_ssh: - auth_info - recover_scope (None|str): - - Returns: - - """ - if isinstance(hosts, str): - hosts = [hosts] - - last_compute = last_storage = None - from testfixtures.recover_hosts import HostsToRecover - controllers, computes, storages = system_helper.get_hosts_per_personality( - con_ssh=con_ssh, auth_info=auth_info, - rtn_tuple=True) - controllers = list(set(controllers) & set(hosts)) - computes_to_lock = list(set(computes) & set(hosts)) - storages = list(set(storages) & set(hosts)) - - hosts_to_lock = list(computes_to_lock) - from keywords import container_helper, vm_helper - nova_auth = Tenant.get('admin', - auth_info.get('region') if auth_info else None) - if computes and not force_lock and \ - len(computes) == len(computes_to_lock) and \ - container_helper.is_stx_openstack_deployed() and \ - vm_helper.get_vms(auth_info=nova_auth): - # leave a compute if there are vms on system and force lock=False - last_compute = hosts_to_lock.pop() - - active, standby = system_helper.get_active_standby_controllers( - con_ssh=con_ssh, auth_info=auth_info) - - if standby and standby in controllers: - hosts_to_lock.append(standby) - - if storages and 'storage-0' in storages: - # storage-0 cannot be locked with any controller - last_storage = 'storage-0' - storages.remove(last_storage) - if storages: - hosts_to_lock += storages - - LOG.info("Lock/unlock: {}".format(hosts_to_lock)) - hosts_locked = [] - try: - for host in hosts_to_lock: - HostsToRecover.add(hostnames=host, scope=recover_scope) - lock_host(host, con_ssh=con_ssh, force=force_lock, - auth_info=auth_info) - hosts_locked.append(host) - - finally: - if hosts_locked: - unlock_hosts(hosts=hosts_locked, con_ssh=con_ssh, - auth_info=auth_info) - wait_for_hosts_ready(hosts=hosts_locked, con_ssh=con_ssh, - auth_info=auth_info) - HostsToRecover.remove(hosts_locked, scope=recover_scope) - - LOG.info("Lock/unlock last compute {} and storage {} if any".format( - last_compute, last_storage)) - hosts_locked_next = [] - try: - for host in (last_compute, last_storage): - if host: - HostsToRecover.add(host, scope=recover_scope) - lock_host(host=host, con_ssh=con_ssh, auth_info=auth_info) - hosts_locked_next.append(host) - - finally: - if hosts_locked_next: - unlock_hosts(hosts_locked_next, con_ssh=con_ssh, - auth_info=auth_info) - wait_for_hosts_ready(hosts_locked_next, con_ssh=con_ssh, - auth_info=auth_info) - HostsToRecover.remove(hosts_locked_next, scope=recover_scope) - - if active in controllers: - if active and system_helper.is_aio_duplex(con_ssh=con_ssh, - auth_info=auth_info): - system_helper.wait_for_alarm_gone( - alarm_id=EventLogID.CPU_USAGE_HIGH, check_interval=30, - timeout=300, con_ssh=con_ssh, entity_id=active, - auth_info=auth_info) - LOG.info("Lock/unlock {}".format(active)) - HostsToRecover.add(active, scope=recover_scope) - lock_host(active, swact=True, con_ssh=con_ssh, force=force_lock, - auth_info=auth_info) - unlock_hosts(active, con_ssh=con_ssh, auth_info=auth_info) - wait_for_hosts_ready(active, con_ssh=con_ssh, - auth_info=auth_info) - HostsToRecover.remove(active, scope=recover_scope) - - LOG.info("Hosts lock/unlock completed: {}".format(hosts)) - - -def get_traffic_control_rates(dev, con_ssh=None): - """ - Check the traffic control profile on given device name - - Returns (dict): return traffic control rates in Mbit. - e.g., {'root': [10000, 10000], 'drbd': [8000, 10000], ... } - - """ - if con_ssh is None: - con_ssh = ControllerClient.get_active_controller() - output = con_ssh.exec_cmd('tc class show dev {}'.format(dev), - expect_timeout=10)[1] - - traffic_classes = {} - for line in output.splitlines(): - match = re.findall(TrafficControl.RATE_PATTERN, line) - if match: - ratio, rate, rate_unit, ceil_rate, ceil_rate_unit = match[0] - class_name = TrafficControl.CLASSES[ratio] - else: - root_match = re.findall(TrafficControl.RATE_PATTERN_ROOT, line) - if not root_match: - raise NotImplementedError( - 'Unrecognized traffic class line: {}'.format(line)) - rate, rate_unit, ceil_rate, ceil_rate_unit = root_match[0] - class_name = 'root' - - rate = int(rate) - ceil_rate = int(ceil_rate) - - rates = [] - for rate_info in ((rate, rate_unit), (ceil_rate, ceil_rate_unit)): - rate_, unit_ = rate_info - rate_ = int(rate_) - if unit_ == 'G': - rate_ = int(rate_ * 1000) - elif unit_ == 'K': - rate_ = int(rate_ / 1000) - - rates.append(rate_) - - traffic_classes[class_name] = rates - - LOG.info("Traffic classes for {}: {}".format(dev, traffic_classes)) - return traffic_classes - - -def get_nic_speed(interface, con_ssh=None): - """ - Check the speed on given interface name - Args: - interface (str|list) - con_ssh - - Returns (list): return speed - - """ - if con_ssh is None: - con_ssh = ControllerClient.get_active_controller() - - if isinstance(interface, str): - interface = [interface] - - speeds = [] - for if_ in interface: - if_speed = con_ssh.exec_cmd('cat /sys/class/net/{}/speed'.format(if_), - expect_timeout=10, fail_ok=False)[1] - speeds.append(int(if_speed)) - - return speeds - - -def get_host_cmdline_options(host, con_ssh=None): - with ssh_to_host(hostname=host, con_ssh=con_ssh) as host_ssh: - output = host_ssh.exec_cmd('cat /proc/cmdline')[1] - - return output - - -def get_host_memories(host, headers=('app_hp_avail_2M',), proc_id=None, - wait_for_update=True, con_ssh=None, - auth_info=Tenant.get('admin_platform'), rtn_dict=True): - """ - Get host memory values - Args: - host (str): hostname - headers (str|list|tuple): - proc_id (int|str|None|tuple|list): such as 0, '1' - wait_for_update (bool): wait for app_hp_pending_2M and - app_hp_pending_1G to be None - con_ssh (SSHClient): - auth_info (dict): - rtn_dict - - Returns (dict|list): {(int): (list), ... } or [( - list), (list), ...] - e.g., {0: [62018, 1]} - - """ - - cmd = 'host-memory-list --nowrap' - table_ = table_parser.table( - cli.system(cmd, host, ssh_client=con_ssh, auth_info=auth_info)[1]) - - if proc_id is None: - proc_id = table_parser.get_column(table_, 'processor') - elif isinstance(proc_id, (str, int)): - proc_id = [int(proc_id)] - - procs = sorted([int(proc) for proc in proc_id]) - - if wait_for_update: - end_time = time.time() + 330 - while time.time() < end_time: - pending_2m, pending_1g = table_parser.get_multi_values( - table_, evaluate=True, - fields=('app_hp_pending_2M', 'app_hp_pending_1G')) - for i in range(len(pending_2m)): - if (pending_2m[i] is not None) or (pending_1g[i] is not None): - break - else: - LOG.debug("No pending 2M or 1G mem pages") - break - - LOG.info("Pending 2M or 1G pages, wait for mem page to update") - time.sleep(30) - table_ = table_parser.table(cli.system(cmd, host, - ssh_client=con_ssh, - auth_info=auth_info)[1]) - else: - raise exceptions.SysinvError( - "Pending 2M or 1G pages after 5 minutes") - - values_all_procs = [] - for proc in procs: - vals = table_parser.get_multi_values(table_, headers, evaluate=True, - convert_single_field=False, - **{'processor': str(proc)}) - # Since proc is set, there will be only 1 row filtered out. - vals = [val[0] for val in vals] - values_all_procs.append(vals) - - if rtn_dict: - values_all_procs = {str(procs[i]): values_all_procs[i] for i in range(len(procs))} - - return values_all_procs - - -def get_host_used_mem_values(host, proc_id=0, - auth_info=Tenant.get('admin_platform'), - con_ssh=None): - """ - Return number of MiB used by a specific host - Args: - host: - proc_id: - auth_info: - con_ssh: - - Returns (int): - - """ - mem_vals = get_host_memories( - host, ['mem_total(MiB)', 'mem_avail(MiB)', 'avs_hp_size(MiB)', - 'avs_hp_total'], - proc_id=proc_id, con_ssh=con_ssh, auth_info=auth_info)[int(proc_id)] - - mem_total, mem_avail, avs_hp_size, avs_hp_total = [int(val) for val in - mem_vals] - - used_mem = mem_total - mem_avail - avs_hp_size * avs_hp_total - - return used_mem - - -def is_host_hyperthreaded(host, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - table_ = table_parser.table( - cli.system('host-cpu-list', host, ssh_client=con_ssh, - auth_info=auth_info)[1]) - return len(set(table_parser.get_column(table_, 'thread'))) > 1 - - -def get_host_cpu_list_table(host, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Get the parsed version of the output from system host-cpu-list - Args: - host (str): host's name - con_ssh (SSHClient): - auth_info (dict): - - Returns (dict): output of system host-cpu-list parsed by table_parser - - """ - output = cli.system('host-cpu-list --nowrap', host, ssh_client=con_ssh, - auth_info=auth_info)[1] - table_ = table_parser.table(output) - return table_ - - -def get_host_ports(host, field='name', if_name=None, pci_addr=None, proc=None, - dev_type=None, strict=True, - regex=False, rtn_dict=False, con_ssh=None, - auth_info=Tenant.get('admin_platform'), **kwargs): - """ - Get - Args: - host: - field (str|list): - if_name: - pci_addr: - proc: - dev_type: - strict: - regex: - con_ssh: - auth_info: - rtn_dict - **kwargs: - - Returns (list|dict): list if header is string, dict if header is list. - - """ - table_ = table_parser.table( - cli.system('host-port-list --nowrap', host, ssh_client=con_ssh, - auth_info=auth_info)[1]) - - args_tmp = { - 'name': if_name, - 'pci address': pci_addr, - 'processor': proc, - 'device_type': dev_type - } - - kwargs.update({k: v for k, v in args_tmp.items() if v is not None}) - return table_parser.get_multi_values(table_, field, rtn_dict=rtn_dict, - strict=strict, regex=regex, **kwargs) - - -def get_host_interfaces_table(host, show_all=False, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Get system host-if-list table - Args: - host (str): - show_all (bool): - con_ssh (SSHClient): - auth_info (dict): - - Returns (dict): - - """ - args = '' - args += ' --a' if show_all else '' - args += ' ' + host - - table_ = table_parser.table( - cli.system('host-if-list --nowrap', args, ssh_client=con_ssh, - auth_info=auth_info)[1]) - return table_ - - -def get_host_interfaces(host, field='name', net_type=None, if_type=None, - uses_ifs=None, used_by_ifs=None, - show_all=False, strict=True, regex=False, con_ssh=None, - auth_info=Tenant.get('admin_platform'), - exclude=False, **kwargs): - """ - Get specified interfaces info for given host via system host-if-list - - Args: - host (str): - field (str|tuple): header for return info - net_type (str|list|tuple): valid values: 'oam', 'data', 'infra', - 'mgmt', 'None'(string instead of None type) - if_type (str): possible values: 'ethernet', 'ae', 'vlan' - uses_ifs (str): - used_by_ifs (str): - show_all (bool): whether or not to show unused interfaces - exclude (bool): whether or not to exclude the interfaces filtered - strict (bool): - regex (bool): - con_ssh (SSHClient): - auth_info (dict): - **kwargs: extraheader=value pairs to further filter out info. such as - attributes='MTU=1500'. - - Returns (list): - - """ - table_ = get_host_interfaces_table(host=host, show_all=show_all, - con_ssh=con_ssh, auth_info=auth_info) - - if isinstance(net_type, str): - net_type = [net_type] - networks = if_classes = None - if net_type is not None: - networks = [] - if_classes = [] - for net in net_type: - network = '' - if_class = net - if net in PLATFORM_NET_TYPES: - if_class = 'platform' - network = net - networks.append(network) - if_classes.append(if_class) - - args_tmp = { - 'class': if_classes, - 'type': if_type, - 'uses i/f': uses_ifs, - 'used by i/f': used_by_ifs - } - - for key, value in args_tmp.items(): - if value is not None: - kwargs[key] = value - - table_ = table_parser.filter_table(table_, strict=strict, regex=regex, - exclude=exclude, **kwargs) - - # exclude the platform interface that does not have desired net_type - if if_classes is not None and 'platform' in if_classes: - platform_ifs = table_parser.get_values(table_, target_header='name', - **{'class': 'platform'}) - for pform_if in platform_ifs: - if_nets = get_host_interface_values(host=host, interface=pform_if, - fields='networks', - con_ssh=con_ssh)[0] - if_nets = [if_net.strip() for if_net in if_nets.split(sep=',')] - if not (set(if_nets) & set(networks)): - table_ = table_parser.filter_table(table_, strict=True, - exclude=(not exclude), - name=pform_if) - - vals = table_parser.get_multi_values(table_, fields=field, evaluate=True) - if not isinstance(field, str) and len(vals) > 1: - vals = list(zip(*vals)) - - return vals - - -def get_host_ports_for_net_type(host, net_type='data', ports_only=True, - con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - - Args: - host: - net_type: - ports_only: whether to include dev_name as well - con_ssh: - auth_info: - - Returns (list): - - """ - table_ = get_host_interfaces_table(host=host, con_ssh=con_ssh, - auth_info=auth_info) - table_origin = copy.deepcopy(table_) - if net_type: - if_class = net_type - network = '' - if net_type in PLATFORM_NET_TYPES: - if_class = 'platform' - network = net_type - - table_ = table_parser.filter_table(table_, **{'class': if_class}) - # exclude unmatched platform interfaces from the table. - if 'platform' == if_class: - platform_ifs = table_parser.get_values(table_, target_header='name', - **{'class': 'platform'}) - for pform_if in platform_ifs: - if_nets = \ - get_host_interface_values(host=host, interface=pform_if, - fields='networks', - con_ssh=con_ssh)[0] - if_nets = [if_net.strip() for if_net in if_nets.split(sep=',')] - if network not in if_nets: - table_ = table_parser.filter_table(table_, strict=True, - exclude=True, - name=pform_if) - - net_ifs_names = table_parser.get_column(table_, 'name') - total_ports = [] - for if_name in net_ifs_names: - if_type = table_parser.get_values(table_, 'type', name=if_name)[0] - if if_type == 'ethernet': - ports = ast.literal_eval( - table_parser.get_values(table_, 'ports', name=if_name)[0]) - dev_name = ports[0] if len(ports) == 1 else if_name - else: - dev_name = if_name - ports = [] - uses_ifs = ast.literal_eval( - table_parser.get_values(table_, 'uses i/f', name=if_name)[0]) - for use_if in uses_ifs: - use_if_type = \ - table_parser.get_values(table_origin, 'type', - name=use_if)[0] - if use_if_type == 'ethernet': - useif_ports = ast.literal_eval( - table_parser.get_values(table_origin, 'ports', - name=use_if)[0]) - else: - # uses if is ae - useif_ports = ast.literal_eval( - table_parser.get_values(table_origin, 'uses i/f', - name=use_if)[0]) - ports += useif_ports - - if if_type == 'vlan': - vlan_id = \ - table_parser.get_values(table_, 'vlan id', name=if_name)[0] - if ports: - dev_name = ports[0] if len(ports) == 1 else uses_ifs[0] - dev_name = '{}.{}'.format(dev_name, vlan_id) - - if ports_only: - total_ports += ports - else: - total_ports.append((dev_name, sorted(ports))) - - LOG.info("{} {} network ports are: {}".format(host, net_type, total_ports)) - if ports_only: - total_ports = list(set(total_ports)) - - return total_ports - - -def get_host_port_pci_address(host, interface, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - - Args: - host: - interface: - con_ssh: - auth_info: - - Returns (str): pci address of interface - - """ - table_ = table_parser.table( - cli.system('host-port-list --nowrap', host, ssh_client=con_ssh, - auth_info=auth_info)[1]) - pci_addresses = table_parser.get_values(table_, 'pci address', - name=interface) - - pci_address = pci_addresses.pop() - LOG.info("pci address of interface {} for host is: {}".format(interface, - pci_address)) - - return pci_address - - -def get_host_port_pci_address_for_net_type(host, net_type='mgmt', rtn_list=True, - con_ssh=None, - auth_info=Tenant.get( - 'admin_platform')): - """ - - Args: - host: - net_type: - rtn_list: - con_ssh: - auth_info: - - Returns (list): - - """ - ports = get_host_ports_for_net_type(host, net_type=net_type, - ports_only=rtn_list, con_ssh=con_ssh, - auth_info=auth_info) - pci_addresses = [] - for port in ports: - pci_address = get_host_port_pci_address(host, port, con_ssh=con_ssh, - auth_info=auth_info) - pci_addresses.append(pci_address) - - return pci_addresses - - -def get_host_mgmt_pci_address(host, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - - Args: - host: - con_ssh: - auth_info: - - Returns: - - """ - mgmt_ip = \ - system_helper.get_host_values(host=host, fields='mgmt_ip', - con_ssh=con_ssh, - auth_info=auth_info)[0] - mgmt_ports = get_host_ifnames_by_address(host, address=mgmt_ip) - pci_addresses = [] - for port in mgmt_ports: - pci_address = get_host_port_pci_address(host, port, con_ssh=con_ssh, - auth_info=auth_info) - pci_addresses.append(pci_address) - - return pci_addresses - - -def get_host_interface_values(host, interface, fields, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - args = "{} {}".format(host, interface) - table_ = table_parser.table( - cli.system('host-if-show', args, ssh_client=con_ssh, - auth_info=auth_info)[1]) - return table_parser.get_multi_values_two_col_table(table_, fields) - - -def get_hosts_interfaces_info(hosts, fields, con_ssh=None, - auth_info=Tenant.get('admin_platform'), - strict=True, - **interface_filters): - if isinstance(hosts, str): - hosts = [hosts] - - res = {} - for host in hosts: - interfaces = get_host_interfaces(host, field='name', strict=strict, - **interface_filters) - host_res = {} - for interface in interfaces: - values = get_host_interface_values(host, interface, fields=fields, - con_ssh=con_ssh, - auth_info=auth_info) - host_res[interface] = values - - res[host] = host_res - - return res - - -def get_host_ethernet_port_table(host, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Get system host-if-list table - Args: - host (str): - con_ssh (SSHClient): - auth_info (dict): - - Returns (dict): - - """ - args = '' - args += ' ' + host - - table_ = table_parser.table( - cli.system('host-ethernet-port-list --nowrap', args, ssh_client=con_ssh, - auth_info=auth_info)[1]) - return table_ - - -def get_host_ifnames_by_address(host, field='ifname', address=None, id_=None, - fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Get the host ifname by address. - Args: - host - con_ssh (SSHClient): - address: - id_: - field: - auth_info (dict): - fail_ok: whether return False or raise exception when some services - fail to reach enabled-active state - - Returns (list): - - """ - - table_ = table_parser.table( - cli.system('host-addr-list', host, ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info)[1]) - args_dict = { - 'uuid': id_, - 'address': address, - } - kwargs = ({k: v for k, v in args_dict.items() if v is not None}) - ifnames = table_parser.get_multi_values(table_, field, strict=True, - regex=True, merge_lines=True, - **kwargs) - return ifnames - - -def get_host_addresses(host, field='address', ifname=None, id_=None, - auth_info=Tenant.get('admin_platform'), - fail_ok=False, con_ssh=None): - """ - Disable Murano Services - Args: - host - con_ssh (SSHClient): - ifname: - id_: - field: - auth_info (dict): - fail_ok: whether return False or raise exception when some services - fail to reach enabled-active state - - Returns: - - """ - - table_ = table_parser.table( - cli.system('host-addr-list --nowrap', host, ssh_client=con_ssh, - fail_ok=fail_ok, - auth_info=auth_info)[1]) - args_dict = { - 'id': id_, - 'ifname': ifname, - } - kwargs = ({k: v for k, v in args_dict.items() if v is not None}) - address = table_parser.get_multi_values(table_, field, strict=True, - regex=True, merge_lines=True, - **kwargs) - return address - - -def get_host_lldp_agents(host, field='uuid', uuid=None, local_port=None, - status=None, chassis_id=None, - port_id=None, system_name=None, - system_description=None, - auth_info=Tenant.get('admin_platform'), con_ssh=None, - strict=True, regex=None, **kwargs): - """ - Get lldp agent table via system host-lldp-agent-list - Args: - host: (mandatory) - field: 'uuid' (default) - uuid: - local_port: - status: - chassis_id: - port_id: - system_name: - system_description: - auth_info: - con_ssh: - strict: - regex: - **kwargs: - - Returns (list): - - """ - table_ = table_parser.table( - cli.system('host-lldp-agent-list --nowrap', host, ssh_client=con_ssh, - auth_info=auth_info)[1]) - - args_temp = { - 'uuid': uuid, - 'local_port': local_port, - 'status': status, - 'chassis_id': chassis_id, - 'system_name': system_name, - 'system_description': system_description, - 'port_id': port_id, - } - kwargs.update({k: v for k, v in args_temp.items() if v is not None}) - return table_parser.get_multi_values(table_, field, strict=strict, - regex=regex, **kwargs) - - -def get_host_lldp_neighbors(host, field='uuid', uuid=None, local_port=None, - remote_port=None, chassis_id=None, - management_address=None, system_name=None, - system_description=None, - auth_info=Tenant.get('admin_platform'), - con_ssh=None, strict=True, - regex=None, **kwargs): - """ - Get lldp neighbour table via system host-lldp-neighbor-list - Args: - host (str) - field (str|list|tuple): 'uuid' (default value) - uuid: - local_port: - remote_port: - chassis_id: - management_address: - system_name: - system_description: - auth_info: - con_ssh: - strict: - regex: - **kwargs: - - Returns (list): - - """ - table_ = table_parser.table( - cli.system('host-lldp-neighbor-list --nowrap', host, ssh_client=con_ssh, - auth_info=auth_info)[1]) - args_temp = { - 'uuid': uuid, - 'local_port': local_port, - 'remote_port': remote_port, - 'chassis_id': chassis_id, - 'system_name': system_name, - 'system_description': system_description, - 'management_address': management_address - } - kwargs.update({k: v for k, v in args_temp.items() if v is not None}) - return table_parser.get_multi_values(table_, field, strict=strict, - regex=regex, **kwargs) - - -def get_host_device_values(host, device, fields, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Get host device values for given fields via system host-device-show - Args: - host: - device: - fields (str|list|tuple): - con_ssh: - auth_info: - - Returns (list): - - """ - args = "{} {}".format(host, device) - table_ = table_parser.table( - cli.system('host-device-show', args, ssh_client=con_ssh, - auth_info=auth_info)[1]) - - return table_parser.get_value_two_col_table(table_, fields) - - -def get_host_devices(host, field='name', list_all=False, con_ssh=None, - auth_info=Tenant.get('admin_platform'), strict=True, - regex=False, **kwargs): - """ - Get the parsed version of the output from system host-device-list - Args: - host (str): host's name - field (str): field name to return value for - list_all (bool): whether to list all devices including the disabled ones - con_ssh (SSHClient): - auth_info (dict): - strict (bool): whether to perform strict search on filter - regex (bool): whether to use regular expression to search the value in - kwargs - kwargs: key-value pairs to filter the table - - Returns (list): output of system host-device-list parsed by - table_parser - - """ - param = '--nowrap' - param += ' --all' if list_all else '' - table_ = table_parser.table( - cli.system('host-device-list {}'.format(param), host, - ssh_client=con_ssh, auth_info=auth_info)[1]) - - values = table_parser.get_multi_values(table_, field, strict=strict, - evaluate=True, regex=regex, **kwargs) - - return values - - -def modify_host_device(host, device, new_name=None, new_state=None, - check_first=True, lock_unlock=False, fail_ok=False, - con_ssh=None, auth_info=Tenant.get('admin_platform')): - """ - Modify host device to given name or state. - Args: - host: host to modify - device: device name or pci address - new_name (str): new name to modify to - new_state (bool): new state to modify to - lock_unlock (bool): whether to lock unlock host before and after modify - con_ssh (SSHClient): - fail_ok (bool): - check_first (bool): - auth_info (dict): - - Returns (tuple): - - """ - args = '' - fields = [] - expt_vals = [] - if new_name: - fields.append('name') - expt_vals.append(new_name) - args += ' --name {}'.format(new_name) - if new_state is not None: - fields.append('enabled') - expt_vals.append(new_state) - args += ' --enabled {}'.format(new_state) - - if check_first and fields: - vals = get_host_device_values(host, device, fields=fields, - con_ssh=con_ssh, auth_info=auth_info) - if vals == expt_vals: - return -1, "{} device {} already set to given name and/or " \ - "state".format(host, device) - - try: - if lock_unlock: - LOG.info("Lock host before modify host device") - lock_host(host=host, con_ssh=con_ssh, auth_info=auth_info) - - LOG.info("Modify {} device {} with args: {}".format(host, device, args)) - args = "{} {} {}".format(host, device, args.strip()) - res, out = cli.system('host-device-modify', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - - if res == 1: - return 1, out - - LOG.info("Verifying the host device new pci name") - post_vals = get_host_device_values(host, device, fields=fields, - con_ssh=con_ssh, auth_info=auth_info) - assert expt_vals == post_vals, "{} device {} is not modified to " \ - "given values. Expt: {}, actual: {}". \ - format(host, device, expt_vals, post_vals) - - msg = "{} device {} is successfully modified to given values".format( - host, device) - LOG.info(msg) - return 0, msg - finally: - if lock_unlock: - LOG.info("Unlock host after host device modify") - unlock_host(host=host, con_ssh=con_ssh, auth_info=auth_info) - - -def enable_disable_hosts_devices(hosts, devices, enable=True, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Enable/Disable given devices on specified hosts. (lock/unlock required - unless devices already in state) - Args: - hosts (str|list|tuple): hostname(s) - devices (str|list|tuple): device(s) name or address via - system host-device-list - enable (bool): whether to enable or disable devices - con_ssh - auth_info - - Returns: - - """ - if isinstance(hosts, str): - hosts = [hosts] - - if isinstance(devices, str): - devices = [devices] - - key = 'name' if 'pci_' in devices[0] else 'address' - - for host_ in hosts: - states = get_host_devices(host=host_, field='enabled', list_all=True, - con_ssh=con_ssh, - auth_info=auth_info, **{key: devices}) - if (not enable) in states: - try: - lock_host(host=host_, swact=True, con_ssh=con_ssh, - auth_info=auth_info) - for i in range(len(states)): - if states[i] is not enable: - device = devices[i] - modify_host_device(host=host_, device=device, - new_state=enable, check_first=False, - con_ssh=con_ssh, auth_info=auth_info) - finally: - unlock_host(host=host_, con_ssh=con_ssh, auth_info=auth_info) - - post_states = get_host_devices(host=host_, field='enabled', - list_all=True, con_ssh=con_ssh, - auth_info=auth_info, **{key: devices}) - assert not ((not enable) in post_states), \ - "Some devices enabled!={} after unlock".format(enable) - - LOG.info("enabled={} set successfully for following devices on hosts " - "{}: {}".format(enable, hosts, devices)) - - -def wait_for_tasks_affined(host, timeout=180, fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - if system_helper.is_aio_simplex(con_ssh=con_ssh, auth_info=auth_info): - return True - - LOG.info( - "Check {} non-existent on {}".format(PLATFORM_AFFINE_INCOMPLETE, host)) - if not con_ssh: - con_name = auth_info.get('region') if ( - auth_info and ProjVar.get_var('IS_DC')) else None - con_ssh = ControllerClient.get_active_controller(name=con_name) - - with ssh_to_host(host, con_ssh=con_ssh) as host_ssh: - end_time = time.time() + timeout - while time.time() < end_time: - if not host_ssh.file_exists(PLATFORM_AFFINE_INCOMPLETE): - LOG.info( - "{} platform tasks re-affined successfully".format(host)) - return True - time.sleep(5) - - err = "{} did not clear on {}".format(PLATFORM_AFFINE_INCOMPLETE, host) - if fail_ok: - LOG.warning(err) - return False - raise exceptions.HostError(err) - - -def get_storage_backing_with_max_hosts(rtn_down_hosts=False, con_ssh=None): - """ - Get storage backing that has the most hypervisors - Args: - rtn_down_hosts (bool): whether to return down hosts if no up - hosts available - con_ssh (SSHClient): - - Returns (tuple): ((str), (list)) - Examples: - Regular/Storage system: ('local_image',['compute-1', 'compute-3']) - AIO: ('local_image', ['controller-0', 'controller-1']) - - """ - hosts_per_backing = get_hosts_per_storage_backing( - up_only=not rtn_down_hosts, con_ssh=con_ssh) - default_backing = ProjVar.get_var('DEFAULT_INSTANCE_BACKING') - return default_backing, hosts_per_backing.get(default_backing, []) diff --git a/automated-pytest-suite/keywords/html_helper.py b/automated-pytest-suite/keywords/html_helper.py deleted file mode 100644 index dca85da4..00000000 --- a/automated-pytest-suite/keywords/html_helper.py +++ /dev/null @@ -1,198 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import json -import requests - -from consts.auth import Tenant -from utils import table_parser, cli -from utils.tis_log import LOG -from consts.proj_vars import ProjVar -from keywords import keystone_helper - - -def get_ip_addr(): - return ProjVar.get_var('lab')['floating ip'] - - -def create_url(ip=None, port=None, version=None, extension=None): - """ - Creates a url with the given parameters inn the form: - http(s)://:// - Args: - ip (str): the main ip address. If set to None will be set to the lab's - ip address by default. - port (int): the port number to connect to. - version (str): for REST API. version number, e.g. "v1", "v2.0" - extension (str): extensions to add to the url - - Returns (str): a url created with the given parameters - - """ - if keystone_helper.is_https_enabled() is True: - url = 'https://' - else: - url = 'http://' - if ip: - url += ip - else: - url += get_ip_addr() - - if port: - url += ':{}'.format(port) - - if version: - url += '/{}'.format(version) - - if extension: - url += '/{}'.format(extension) - - return url - - -def get_user_token(field='id', con_ssh=None, auth_info=Tenant.get('admin')): - """ - Return an authentication token for the admin. - - Args: - field (str): - con_ssh (SSHClient): - auth_info - Returns (list): a list containing at most one authentication token - - """ - table_ = table_parser.table(cli.openstack('token issue', ssh_client=con_ssh, - auth_info=auth_info)[1]) - token = table_parser.get_value_two_col_table(table_, field) - return token - - -def get_request(url, headers, verify=True): - """ - Sends a GET request to the url - Args: - url (str): url to send request to - headers (dict): header to add to the request - verify: Verify SSL certificate - - Returns (dict): The response for the request - - """ - LOG.info("Sending GET request to {}. Headers: {}".format(url, headers)) - resp = requests.get(url, headers=headers, verify=verify) - - if resp.status_code == requests.codes.ok: - data = json.loads(resp.text) - LOG.info("The returned data is: {}".format(data)) - return data - - LOG.info("Error {}".format(resp.status_code)) - return None - - -def post_request(url, data, headers, verify=True): - """ - Sends a POST request to the url - Args: - url (str): url to send request to - data (dict): data to be sent in the request body - headers (dict): header to add to the request - verify: Verify SSL certificate - - Returns (dict): The response for the request - - """ - if not isinstance(data, str): - data = json.dumps(data) - LOG.info("Sending POST request to {}. Headers: {}. Data: " - "{}".format(url, headers, data)) - resp = requests.post(url, headers=headers, data=data, verify=verify) - - if resp.status_code == requests.codes.ok: - data = json.loads(resp.text) - LOG.info("The returned data is: {}".format(data)) - return data - - LOG.info("Error {}".format(resp.status_code)) - return None - - -def put_request(url, data, headers, verify=True): - """ - Sends a GET request to the url - Args: - url (str): url to send request to - data (dict): data to be sent in the request body - headers (dict): header to add to the request - verify: Verify SSL certificate - - Returns (dict): The response for the request - - """ - if not isinstance(data, str): - data = json.dumps(data) - LOG.info("Sending PUT request to {}. Headers: {}. Data: " - "{}".format(url, headers, data)) - resp = requests.put(url, headers=headers, data=data, verify=verify) - - if resp.status_code == requests.codes.ok: - data = json.loads(resp.text) - LOG.info("The returned data is: {}".format(data)) - return data - - LOG.info("Error {}".format(resp.status_code)) - return None - - -def delete_request(url, headers, verify=True): - """ - Sends a GET request to the url - Args: - url (str): url to send request to - headers (dict): header to add to the request - verify: Verify SSL certificate - - Returns (dict): The response for the request - - """ - LOG.info("Sending DELETE request to {}. Headers: {}".format(url, headers)) - resp = requests.delete(url, headers=headers, verify=verify) - - if resp.status_code == requests.codes.ok: - data = json.loads(resp.text) - LOG.info("The returned data is: {}".format(data)) - return data - - LOG.info("Error {}".format(resp.status_code)) - return None - - -def patch_request(url, data, headers, verify=True): - """ - Sends a PATCH request to the url - Args: - url (str): url to send request to - data (dict|str|list): data to be sent in the request body - headers (dict): header to add to the request - verify: Verify SSL certificate - - Returns (dict): The response for the request - - """ - if not isinstance(data, str): - data = json.dumps(data) - LOG.info("Sending PATCH request to {}. Headers: {}. Data: " - "{}".format(url, headers, data)) - resp = requests.patch(url, headers=headers, data=data, verify=verify) - - if resp.status_code == requests.codes.ok: - data = json.loads(resp.text) - LOG.info("The returned data is: {}".format(data)) - return data - - LOG.info("Error {}".format(resp.status_code)) - return None diff --git a/automated-pytest-suite/keywords/keystone_helper.py b/automated-pytest-suite/keywords/keystone_helper.py deleted file mode 100644 index c8cfc1c9..00000000 --- a/automated-pytest-suite/keywords/keystone_helper.py +++ /dev/null @@ -1,623 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import re - -from consts.auth import Tenant, HostLinuxUser -from consts.proj_vars import ProjVar -from utils import cli, exceptions, table_parser -from utils.clients.ssh import ControllerClient -from utils.tis_log import LOG -from keywords import common - - -def get_roles(field='ID', con_ssh=None, auth_info=Tenant.get('admin'), - **kwargs): - table_ = table_parser.table(cli.openstack('role list', ssh_client=con_ssh, - auth_info=auth_info)[1]) - return table_parser.get_multi_values(table_, field, **kwargs) - - -def get_users(field='ID', con_ssh=None, auth_info=Tenant.get('admin'), - **kwargs): - """ - Return a list of user id(s) with given user name. - - Args: - field (str|list|tuple): - con_ssh (SSHClient): - auth_info - - Returns (list): list of user id(s) - - """ - table_ = table_parser.table(cli.openstack('user list', ssh_client=con_ssh, - auth_info=auth_info)[1]) - return table_parser.get_multi_values(table_, field, **kwargs) - - -def add_or_remove_role(add_=True, role='admin', project=None, user=None, - domain=None, group=None, group_domain=None, - project_domain=None, user_domain=None, inherited=None, - check_first=True, fail_ok=False, - con_ssh=None, auth_info=Tenant.get('admin')): - """ - Add or remove given role for specified user and tenant. e.g., add admin - role to tenant2 user on tenant2 project - - Args: - add_(bool): whether to add or remove - role (str): an existing role from openstack role list - project (str): tenant name. When unset, the primary tenant name - will be used - user (str): an existing user that belongs to given tenant - domain (str): Include (name or ID) - group (str): Include (name or ID) - group_domain (str): Domain the group belongs to (name or ID). - This can be used in case collisions between group names exist. - project_domain (str): Domain the project belongs to (name or ID). - This can be used in case collisions between project names exist. - user_domain (str): Domain the user belongs to (name or ID). - This can be used in case collisions between user names exist. - inherited (bool): Specifies if the role grant is inheritable to the - sub projects - check_first (bool): whether to check if role already exists for given - user and tenant - fail_ok (bool): whether to throw exception on failure - con_ssh (SSHClient): active controller ssh session - auth_info (dict): auth info to use to executing the add role cli - - Returns (tuple): - - """ - tenant_dict = {} - - if project is None: - if auth_info and auth_info.get('platform'): - project = auth_info['tenant'] - else: - tenant_dict = Tenant.get_primary() - project = tenant_dict['tenant'] - - if user is None: - user = tenant_dict.get('user', project) - - if check_first: - existing_roles = get_role_assignments(role=role, project=project, - user=user, - user_domain=user_domain, - group=group, - group_domain=group_domain, - domain=domain, - project_domain=project_domain, - inherited=inherited, - effective_only=False, - con_ssh=con_ssh, - auth_info=auth_info) - if existing_roles: - if add_: - msg = "Role already exists with given criteria: {}".format( - existing_roles) - LOG.info(msg) - return -1, msg - else: - if not add_: - msg = "Role with given criteria does not exist. Do nothing." - LOG.info(msg) - return -1, msg - - msg_str = 'Add' if add_ else 'Remov' - LOG.info( - "{}ing {} role to {} user under {} project".format(msg_str, role, user, - project)) - - sub_cmd = "--user {} --project {}".format(user, project) - if inherited is True: - sub_cmd += ' --inherited' - - optional_args = { - 'domain': domain, - 'group': group, - 'group-domain': group_domain, - 'project-domain': project_domain, - 'user-domain': user_domain, - } - - for key, val in optional_args.items(): - if val is not None: - sub_cmd += ' --{} {}'.format(key, val) - - sub_cmd += ' {}'.format(role) - - cmd = 'role add' if add_ else 'role remove' - res, out = cli.openstack(cmd, sub_cmd, ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - - if res == 1: - return 1, out - - LOG.info("{} cli accepted. Check role is {}ed " - "successfully".format(cmd, msg_str)) - post_roles = get_role_assignments(role=role, project=project, user=user, - user_domain=user_domain, group=group, - group_domain=group_domain, domain=domain, - project_domain=project_domain, - inherited=inherited, effective_only=True, - con_ssh=con_ssh, auth_info=auth_info) - - err_msg = '' - if add_ and not post_roles: - err_msg = "No role is added with given criteria" - elif post_roles and not add_: - err_msg = "Role is not removed" - if err_msg: - if fail_ok: - LOG.warning(err_msg) - return 2, err_msg - else: - raise exceptions.KeystoneError(err_msg) - - succ_msg = "Role is successfully {}ed".format(msg_str) - LOG.info(succ_msg) - return 0, succ_msg - - -def get_role_assignments(field='Role', names=True, role=None, user=None, - project=None, user_domain=None, group=None, - group_domain=None, domain=None, project_domain=None, - inherited=None, effective_only=None, - con_ssh=None, auth_info=Tenant.get('admin')): - """ - Get values from 'openstack role assignment list' table - - Args: - field (str|list|tuple): role assignment table header to determine - which values to return - names (bool): whether to display role assignment with name - (default is ID) - role (str): an existing role from openstack role list - project (str): tenant name. When unset, the primary tenant name - will be used - user (str): an existing user that belongs to given tenant - domain (str): Include (name or ID) - group (str): Include (name or ID) - group_domain (str): Domain the group belongs to (name or ID). This can - be used in case collisions between group names exist. - project_domain (str): Domain the project belongs to (name or ID). This - can be used in case collisions between project names exist. - user_domain (str): Domain the user belongs to (name or ID). This can - be used in case collisions between user names exist. - inherited (bool): Specifies if the role grant is inheritable to the - sub projects - effective_only (bool): Whether to show effective roles only - con_ssh (SSHClient): active controller ssh session - auth_info (dict): auth info to use to executing the add role cli - - Returns (list): list of values - - """ - optional_args = { - 'role': role, - 'user': user, - 'project': project, - 'domain': domain, - 'group': group, - 'group-domain': group_domain, - 'project-domain': project_domain, - 'user-domain': user_domain, - 'names': True if names else None, - 'effective': True if effective_only else None, - 'inherited': True if inherited else None - } - args = common.parse_args(optional_args) - - role_assignment_tab = table_parser.table( - cli.openstack('role assignment list', args, ssh_client=con_ssh, - auth_info=auth_info)[1]) - - if not role_assignment_tab['headers']: - LOG.info("No role assignment is found with criteria: {}".format(args)) - return [] - - return table_parser.get_multi_values(role_assignment_tab, field) - - -def set_current_user_password(original_password, new_password, fail_ok=False, - auth_info=None, con_ssh=None): - """ - Set password for current user - Args: - original_password: - new_password: - fail_ok: - auth_info: - con_ssh: - - Returns (tuple): - - """ - args = "--password '{}' --original-password '{}'".format(new_password, original_password) - code, output = cli.openstack('user password set', args, ssh_client=con_ssh, - auth_info=auth_info, fail_ok=fail_ok) - if code > 0: - return 1, output - - if not auth_info: - auth_info = Tenant.get_primary() - - user = auth_info['user'] - tenant_dictname = user - if auth_info.get('platform'): - tenant_dictname += '_platform' - Tenant.update(tenant_dictname, password=new_password) - - if user == 'admin': - from consts.proj_vars import ProjVar - if ProjVar.get_var('REGION') != 'RegionOne': - LOG.info( - "Run openstack_update_admin_password on secondary region " - "after admin password change") - if not con_ssh: - con_ssh = ControllerClient.get_active_controller() - with con_ssh.login_as_root(timeout=30) as con_ssh: - con_ssh.exec_cmd( - "echo 'y' | openstack_update_admin_password '{}'".format(new_password)) - - msg = 'User {} password successfully updated from {} to {}'.format(user, original_password, - new_password) - LOG.info(msg) - return 0, output - - -def set_user(user, name=None, project=None, password=None, project_doamin=None, - email=None, description=None, - enable=None, fail_ok=False, auth_info=Tenant.get('admin'), - con_ssh=None): - LOG.info("Updating {}...".format(user)) - arg = '' - optional_args = { - 'name': name, - 'project': project, - 'password': password, - 'project-domain': project_doamin, - 'email': email, - 'description': description, - } - for key, val in optional_args.items(): - if val is not None: - arg += "--{} '{}' ".format(key, val) - - if enable is not None: - arg += '--{} '.format('enable' if enable else 'disable') - - if not arg.strip(): - raise ValueError( - "Please specify the param(s) and value(s) to change to") - - arg += user - - code, output = cli.openstack('user set', arg, ssh_client=con_ssh, timeout=120, - fail_ok=fail_ok, auth_info=auth_info) - - if code > 0: - return 1, output - - if name or project or password: - tenant_dictname = user - if auth_info and auth_info.get('platform'): - tenant_dictname += '_platform' - Tenant.update(tenant_dictname, username=name, password=password, - tenant=project) - - if password and user == 'admin': - from consts.proj_vars import ProjVar - if ProjVar.get_var('REGION') != 'RegionOne': - LOG.info( - "Run openstack_update_admin_password on secondary region " - "after admin password change") - if not con_ssh: - con_ssh = ControllerClient.get_active_controller() - with con_ssh.login_as_root(timeout=30) as con_ssh: - con_ssh.exec_cmd( - "echo 'y' | openstack_update_admin_password '{}'".format( - password)) - - msg = 'User {} updated successfully'.format(user) - LOG.info(msg) - return 0, output - - -def get_endpoints(field='ID', endpoint_id=None, service_name=None, - service_type=None, enabled=None, interface="admin", - region=None, url=None, strict=False, - auth_info=Tenant.get('admin'), con_ssh=None, cli_filter=True): - """ - Get a list of endpoints with given arguments - Args: - field (str|list|tuple): valid header of openstack endpoints list - table. 'ID' - endpoint_id (str): id of the endpoint - service_name (str): Service name of endpoint like novaav3, neutron, - keystone. vim, heat, swift, etc - service_type(str): Service type - enabled (str): True/False - interface (str): Interface of endpoints. valid entries: admin, - internal, public - region (str): RegionOne or RegionTwo - url (str): url of endpoint - strict(bool): - auth_info (dict): - con_ssh (SSHClient): - cli_filter (bool): whether to filter out using cli. e.g., openstack - endpoint list --service xxx - - Returns (list): - - """ - pre_args_str = '' - if cli_filter: - pre_args_dict = { - '--service': service_name, - '--interface': interface, - '--region': region, - } - - pre_args = [] - for key, val in pre_args_dict.items(): - if val: - pre_args.append('{}={}'.format(key, val)) - pre_args_str = ' '.join(pre_args) - - output = cli.openstack('endpoint list', positional_args=pre_args_str, - ssh_client=con_ssh, auth_info=auth_info)[1] - if not output.strip(): - LOG.warning("No endpoints returned with param: {}".format(pre_args_str)) - return [] - - table_ = table_parser.table(output) - - kwargs = { - 'ID': endpoint_id, - 'Service Name': service_name, - 'Service Type': service_type, - 'Enabled': enabled, - 'Interface': interface, - 'URL': url, - 'Region': region, - } - kwargs = {k: v for k, v in kwargs.items() if v} - return table_parser.get_multi_values(table_, field, strict=strict, - regex=True, merge_lines=True, **kwargs) - - -def get_endpoints_values(endpoint_id, fields, con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Gets the endpoint target field value for given endpoint Id - Args: - endpoint_id: the endpoint id to get the value of - fields: the target field name to retrieve value of - con_ssh: - auth_info - - Returns (list): list of endpoint values - - """ - table_ = table_parser.table( - cli.openstack('endpoint show', endpoint_id, ssh_client=con_ssh, - auth_info=auth_info)[1]) - return table_parser.get_multi_values_two_col_table(table_, fields) - - -def is_https_enabled(con_ssh=None, source_openrc=True, interface='public', - auth_info=Tenant.get('admin_platform')): - """ - Check whether interface is https - Args: - con_ssh: - source_openrc: - interface: default is public - auth_info: - Returns True or False - """ - if not con_ssh: - con_name = auth_info.get('region') if ( - auth_info and ProjVar.get_var('IS_DC')) else None - con_ssh = ControllerClient.get_active_controller(name=con_name) - - table_ = table_parser.table( - cli.openstack('endpoint list', ssh_client=con_ssh, auth_info=auth_info, - source_openrc=source_openrc)[1]) - con_ssh.exec_cmd('unset OS_REGION_NAME') # Workaround - filters = {'Service Name': 'keystone', 'Service Type': 'identity', - 'Interface': interface} - keystone_values = table_parser.get_values(table_=table_, target_header='URL', - **filters) - LOG.info('keystone {} URLs: {}'.format(interface, keystone_values)) - return all('https' in i for i in keystone_values) - - -def delete_users(user, fail_ok=False, auth_info=Tenant.get('admin'), - con_ssh=None): - """ - Delete the given openstack user - Args: - user: user name to delete - fail_ok: if the deletion expected to fail - auth_info - con_ssh - - Returns: tuple, (code, msg) - """ - LOG.info('Deleting {} keystone user: {}'.format('platform' if auth_info and auth_info.get( - 'platform') else 'containerized', user)) - return cli.openstack('user delete', user, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - - -def get_projects(field='ID', auth_info=Tenant.get('admin'), con_ssh=None, - strict=False, **filters): - """ - Get list of Project names or IDs - Args: - field (str|list|tuple): - auth_info: - con_ssh: - strict (bool): used for filters - filters - - Returns (list): - - """ - table_ = table_parser.table( - cli.openstack('project list', ssh_client=con_ssh, auth_info=auth_info)[ - 1]) - return table_parser.get_multi_values(table_, field, strict=strict, - **filters) - - -def create_project(name=None, field='ID', domain=None, parent=None, - description=None, enable=None, con_ssh=None, - rtn_exist=None, fail_ok=False, auth_info=Tenant.get('admin'), - **properties): - """ - Create a openstack project - Args: - name (str|None): - field (str): ID or Name. Whether to return project id or name if - created successfully - domain (str|None): - parent (str|None): - description (str|None): - enable (bool|None): - con_ssh: - rtn_exist - fail_ok: - auth_info: - **properties: - - Returns (tuple): - (0, ) - (1, ) - - """ - if not name: - existing_names = get_projects(field='Name', - auth_info=Tenant.get('admin'), - con_ssh=con_ssh) - max_count = 0 - end_str = '' - for name in existing_names: - match = re.match(r'tenant(\d+)(.*)', name) - if match: - count, end_str = match.groups() - max_count = max(int(count), max_count) - name = 'tenant{}{}'.format(max_count + 1, end_str) - - LOG.info("Create/Show openstack project {}".format(name)) - - arg_dict = { - 'domain': domain, - 'parent': parent, - 'description': description, - 'enable': True if enable is True else None, - 'disable': True if enable is False else None, - 'or-show': rtn_exist, - 'property': properties, - } - - arg_str = common.parse_args(args_dict=arg_dict, repeat_arg=True) - arg_str += ' {}'.format(name) - - code, output = cli.openstack('project create', arg_str, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code > 0: - return 1, output - - project_ = table_parser.get_value_two_col_table(table_parser.table(output), - field=field) - LOG.info("Project {} successfully created/showed.".format(project_)) - - return 0, project_ - - -def create_user(name=None, field='name', domain=None, project=None, - project_domain=None, rtn_exist=None, - password=None, email=None, - description=None, enable=None, - auth_info=Tenant.get('admin'), fail_ok=False, con_ssh=None): - """ - Create an openstack user - Args: - name (str|None): - field: name or id - domain: - project (str|None): default project - project_domain: - rtn_exist (bool) - password: - email: - description: - enable: - auth_info: - fail_ok: - con_ssh: - - Returns (tuple): - (0, ) - (1, ) - - """ - if not password: - password = HostLinuxUser.get_password() - - if not name: - name = 'user' - common.get_unique_name(name_str=name) - - LOG.info("Create/Show openstack user {}".format(name)) - arg_dict = { - 'domain': domain, - 'project': project, - 'project-domain': project_domain, - 'password': password, - 'email': email, - 'description': description, - 'enable': True if enable is True else None, - 'disable': True if enable is False else None, - 'or-show': rtn_exist, - } - - arg_str = '{} {}'.format(common.parse_args(args_dict=arg_dict), name) - - code, output = cli.openstack('user create', arg_str, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code > 0: - return 1, output - - table_ = table_parser.table(output) - username = table_parser.get_value_two_col_table(table_, field='name') - user = username if field == 'name' else table_parser.get_value_two_col_table(table_, - field=field) - - is_platform = auth_info and auth_info.get('platform') - keystone = 'platform' if is_platform else 'containerized' - dictname = user + '_platform' if is_platform else user - existing_auth = Tenant.get(dictname) - if existing_auth: - if existing_auth['user'] != username: - raise ValueError('Tenant.{} already exists for a different user {}'.format( - dictname, existing_auth['user'])) - Tenant.update(dictname, username=username, password=password, tenant=project, - platform=is_platform) - else: - Tenant.add(username=username, tenantname=project, dictname=dictname, password=password, - platform=is_platform) - LOG.info('Tenant.{} for {} keystone user {} is added'.format(dictname, keystone, user)) - - LOG.info("{} keystone user {} successfully created/showed".format(keystone, user)) - return 0, user diff --git a/automated-pytest-suite/keywords/kube_helper.py b/automated-pytest-suite/keywords/kube_helper.py deleted file mode 100644 index dd2cf934..00000000 --- a/automated-pytest-suite/keywords/kube_helper.py +++ /dev/null @@ -1,1136 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import re -import configparser -import time - -import yaml - -from utils import table_parser, exceptions -from utils.tis_log import LOG -from utils.clients.ssh import ControllerClient -from keywords import common, system_helper -from consts.stx import PodStatus - - -def exec_kube_cmd(sub_cmd, args=None, con_ssh=None, fail_ok=False, grep=None): - """ - Execute an kubectl cmd on given ssh client. i.e., 'kubectl ' - Args: - sub_cmd (str): - args (None|str): - con_ssh: - fail_ok: - grep (None|str|tuple|list) - - Returns (tuple): - (0, ) - (1, ) - - """ - if not con_ssh: - con_ssh = ControllerClient.get_active_controller() - cmd = 'kubectl {} {}'.format(sub_cmd.strip(), - args.strip() if args else '').strip() - - get_exit_code = True - if cmd.endswith(';echo'): - get_exit_code = False - if grep: - if isinstance(grep, str): - grep = (grep,) - for grep_str in grep: - if '-v ' not in grep_str and '-e ' in grep_str and 'NAME' not in \ - grep_str: - grep_str += ' -e NAME' - cmd += ' | grep --color=never {}'.format(grep_str) - - code, out = con_ssh.exec_cmd(cmd, fail_ok=True, get_exit_code=get_exit_code) - if code <= 0: - return 0, out - - if fail_ok: - return 1, out - else: - raise exceptions.KubeCmdError('CMD: {} Output: {}'.format(cmd, out)) - - -def __get_resource_tables(namespace=None, all_namespaces=None, - resource_types=None, resource_names=None, - labels=None, field_selectors=None, wide=True, - con_ssh=None, fail_ok=False, grep=None): - if not resource_types: - resource_types = '' - elif isinstance(resource_types, (list, tuple)): - resource_types = ','.join(resource_types) - resources = resource_types - - if resource_names: - if ',' in resource_types: - raise ValueError( - "At most 1 resource_types can be specified if resource_names " - "are provided.") - if all_namespaces and not namespace: - raise ValueError( - "all_namespaces is disallowed when resource_names are provided") - if isinstance(resource_names, (list, tuple)): - resource_names = ' '.join(resource_names) - resources = '{} {}'.format(resources, resource_names) - - args_dict = { - '-n': namespace, - '--all-namespaces': True if all_namespaces and not namespace else None, - '-l': labels, - '--field-selector': field_selectors, - '-o': 'wide' if wide else None - } - args = '{} {}'.format(resources, - common.parse_args(args_dict, repeat_arg=False, - vals_sep=',')) - code, out = exec_kube_cmd(sub_cmd='get', args=args, con_ssh=con_ssh, - fail_ok=fail_ok, grep=grep) - if code > 0: - return code, out - - tables = table_parser.tables_kube(out) - return code, tables - - -def get_unhealthy_pods(field='NAME', namespace=None, all_namespaces=True, - pod_names=None, - labels=None, exclude=False, strict=True, con_ssh=None, - **kwargs): - """ - Get pods that are not Completed and not Running - Args: - namespace (str|None): - all_namespaces: (bool|None) - pod_names (str|list|tuple|None): full names of pods to check - labels (str|dict|None): - field (str|tuple|list): - exclude: - strict: - con_ssh: - - Returns (list): - - """ - field_selector = 'status.phase!=Running,status.phase!=Succeeded' - return get_pods(field=field, namespace=namespace, - all_namespaces=all_namespaces, pod_names=pod_names, - labels=labels, field_selectors=field_selector, - exclude=exclude, strict=strict, - con_ssh=con_ssh, **kwargs) - - -def get_pods(field='NAME', namespace=None, all_namespaces=False, pod_names=None, - labels=None, field_selectors=None, - fail_ok=False, con_ssh=None, exclude=False, strict=True, **kwargs): - """ - Get pods values for specified field(s) - Args: - field (str|tuple|list): return values for given header(s) - namespace (str|None): when None, --all-namespaces will be used. - all_namespaces (bool|none): - pod_names (str|list|tuple): Full pod name(s). When specified, labels - and field_selectors will be ignored. - labels (str|dict|None|list|tuple): label selectors. Used only if - full_names are unspecified. - e.g., application=nova,component=compute - field_selectors (str): Used only if full_names are unspecified. - e.g., , 'spec.nodeName=controller-0,status.phase!=Running, - status.phase!=Succeeded' - exclude (bool): - strict (bool): - con_ssh: - fail_ok (bool) - **kwargs: table filters for post processing output to return filtered - values - - Returns (list): examples: - Input: - field=('NAME', 'STATUS') OR 'Name' - labels='application=nova,component=compute', - field_selector='spec.nodeName=compute-0' - Output: - [('nova-compute-compute-0-xdjkds', 'Running')] OR [ - 'nova-compute-compute-0-xdjkds'] - - """ - return get_resources(field=field, namespace=namespace, - all_namespaces=all_namespaces, resource_type='pod', - resource_names=pod_names, labels=labels, - field_selectors=field_selectors, - con_ssh=con_ssh, fail_ok=fail_ok, exclude=exclude, - strict=strict, **kwargs) - - -def get_resources(field='NAME', namespace=None, all_namespaces=None, - resource_names=None, resource_type='pod', - labels=None, field_selectors=None, con_ssh=None, - fail_ok=False, grep=None, - exclude=False, strict=True, **kwargs): - """ - Get resources values for single resource type via kubectl get - Args: - field (str|tuple|list) - namespace (None|str): e.g., kube-system, openstack, default. - all_namespaces (bool|None): used only when namespace is unspecified - resource_names (str|None|list|tuple): e.g., calico-typha - resource_type (str): e.g., "deployments.apps", "pod", "service" - labels (dict|str|list|tuple): Used only when resource_names are - unspecified - field_selectors (dict|str|list|tuple): Used only when resource_names - are unspecified - con_ssh: - fail_ok: - grep (str|None): grep on cmd output - exclude - strict - **kwargs: table filters for post processing return values - - Returns (list): - key is the name prefix, e.g., service, default, deployment.apps, - replicaset.apps - value is a list. Each item is a dict rep for a row with lowercase keys. - e.g., [{'name': 'cinder-api', 'age': '4d19h', ... }, ...] - - """ - name_filter = None - if resource_names and ( - (all_namespaces and not namespace) or field_selectors or labels): - name_filter = {'name': resource_names} - resource_names = None - - code, tables = __get_resource_tables(namespace=namespace, - all_namespaces=all_namespaces, - resource_types=resource_type, - resource_names=resource_names, - labels=labels, - field_selectors=field_selectors, - con_ssh=con_ssh, fail_ok=fail_ok, - grep=grep) - if code > 0: - output = tables - if 'NAME ' not in output: # no resource returned - return [] - - output = output.split('\nError from server')[0] - tables = table_parser.tables_kube(output) - - final_table = tables[0] - if len(tables) > 1: - combined_values = final_table['values'] - column_count = len(combined_values) - for table_ in tables[1:]: - table_values = table_['values'] - combined_values = [combined_values[i] + table_values[i] for i in - range(column_count)] - final_table['values'] = combined_values - - if name_filter: - final_table = table_parser.filter_table(final_table, **name_filter) - - return table_parser.get_multi_values(final_table, fields=field, - zip_values=True, strict=strict, - exclude=exclude, **kwargs) - - -def apply_pod(file_path, pod_name, namespace=None, recursive=None, - select_all=None, - labels=None, con_ssh=None, fail_ok=False, - check_both_controllers=True): - """ - Apply a pod from given file via kubectl apply - Args: - file_path (str): - pod_name (str): - namespace (None|str): - recursive (None|bool): - select_all (None|bool): - labels (dict|str|list|tuple|None): key value pairs - con_ssh: - fail_ok: - check_both_controllers (bool): - - Returns (tuple): - (0, (dict)) - (1, ) - (2, ) # pod is not running after apply - (3, ) # pod if not running on the other controller after - apply - - """ - arg_dict = { - '--all': select_all, - '-l': labels, - '--recursive': recursive, - } - - arg_str = common.parse_args(args_dict=arg_dict, vals_sep=',') - arg_str += ' -f {}'.format(file_path) - - if not con_ssh: - con_ssh = ControllerClient.get_active_controller() - code, output = exec_kube_cmd(sub_cmd='apply', args=arg_str, con_ssh=con_ssh, - fail_ok=fail_ok) - if code > 0: - return 1, output - - LOG.info("Check pod is running on current host") - res = wait_for_pods_status(pod_names=pod_name, namespace=namespace, - status=PodStatus.RUNNING, - con_ssh=con_ssh, fail_ok=fail_ok) - if not res: - return 2, "Pod {} is not running after apply on active " \ - "controller".format(pod_name) - - if check_both_controllers and not system_helper.is_aio_simplex( - con_ssh=con_ssh): - LOG.info("Check pod is running on the other controller as well") - con_name = 'controller-1' if con_ssh.get_hostname() == 'controller-0' \ - else 'controller-0' - from keywords import host_helper - with host_helper.ssh_to_host(hostname=con_name, - con_ssh=con_ssh) as other_con: - res, pods_info = wait_for_pods_status(pod_names=pod_name, - namespace=namespace, - con_ssh=other_con, - fail_ok=fail_ok) - if not res: - return 3, "Pod {} is not running after apply on standby " \ - "controller".format(pod_name) - - LOG.info("{} pod is successfully applied and running".format(pod_name)) - return 0, pod_name - - -def wait_for_pods_status(pod_names=None, partial_names=None, labels=None, - namespace=None, status=PodStatus.RUNNING, - timeout=120, check_interval=3, con_ssh=None, - fail_ok=False, strict=False, **kwargs): - """ - Wait for pod(s) to reach given status via kubectl get pod - Args: - pod_names (str|list|tuple): full name of the pods - partial_names (str|list|tuple): Used only if pod_names are not provided - labels (str|list|tuple|dict|None): Used only if pod_names are not - provided - namespace (None|str): - status (str|None|list): None means any state as long as pod exists. - timeout: - check_interval: - con_ssh: - fail_ok: - strict (bool): - - Returns (tuple): - (True, ) # actual_pods_info is a dict with - pod_name as key, and pod_info(dict) as value - (False, ) - - """ - - pods_to_check = [] - if pod_names: - if isinstance(pod_names, str): - pod_names = [pod_names] - else: - pod_names = list(pod_names) - labels = partial_names = None - pods_to_check = list(pod_names) - elif partial_names: - if isinstance(partial_names, str): - partial_names = [partial_names] - else: - partial_names = list(partial_names) - kwargs['NAME'] = partial_names - pods_to_check = list(partial_names) - - actual_status = {} - end_time = time.time() + timeout - - while time.time() < end_time: - pod_full_names = pods_to_check if pod_names else None - pods_values = get_pods(pod_names=pod_full_names, - field=('NAME', 'status'), namespace=namespace, - labels=labels, - strict=strict, fail_ok=True, con_ssh=con_ssh, - **kwargs) - if not pods_values: - # No pods returned, continue to check. - time.sleep(check_interval) - continue - - continue_check = False # This is used when only labels are provided - for pod_info in pods_values: - pod_name, pod_status = pod_info - actual_status[pod_name] = pod_status - if status and pod_status not in status: - # Status not as expected, continue to wait - continue_check = True - if partial_names: - # In this case, there might be multiple pods that matches - # 1 partial name, so the partial name that - # matches current pod could have been removed if there - # was one other pod that also matched the name - # had reached the desired state. In this case, we will - # add the partial name back to check list - for partial_name in partial_names: - if partial_name in pod_name and partial_name not in \ - pods_to_check: - pods_to_check.append(partial_name) - break - else: - # Criteria met for current pod, remove it from check_list - if pod_names: - pods_to_check.remove(pod_name) - elif partial_names: - for partial_name in partial_names: - if partial_name in pod_name and partial_name in \ - pods_to_check: - pods_to_check.remove(partial_name) - break - - if not pods_to_check and not continue_check: - return True, actual_status - - time.sleep(check_interval) - - name_str = 'Names: {}'.format(pods_to_check) if pods_to_check else '' - label_str = 'Labels: {}'.format(labels) if labels else '' - criteria = '{} {}'.format(name_str, label_str).strip() - msg = "Pods did not reach expected status within {}s. Criteria not met: " \ - "{}. Actual info: {}".format(timeout, criteria, actual_status) - if fail_ok: - LOG.info(msg) - return False, actual_status - - raise exceptions.KubeError(msg) - - -def wait_for_resources_gone(resource_names=None, resource_type='pod', - namespace=None, timeout=120, - check_interval=3, con_ssh=None, fail_ok=False, - strict=True, exclude=False, **kwargs): - """ - Wait for pod(s) to be gone from kubectl get - Args: - resource_names (str|list|tuple): full name of a pod - resource_type (str): - namespace (None|str): - timeout: - check_interval: - con_ssh: - fail_ok: - strict (bool): - exclude - **kwargs - - Returns (tuple): - (True, None) - (False, ) # actual_pods_info is a dict with - pod_name as key, and pod_info(dict) as value - - """ - - end_time = time.time() + timeout - resources_to_check = resource_names - - while time.time() < end_time: - - resources_to_check = get_resources(resource_names=resources_to_check, - namespace=namespace, - resource_type=resource_type, - con_ssh=con_ssh, - fail_ok=True, strict=strict, - exclude=exclude, **kwargs) - - if not resources_to_check: - return True, resources_to_check - - time.sleep(check_interval) - - msg = 'Resources did not disappear in {} seconds. Remaining resources: ' \ - '{}, namespace: {}'.format(timeout, resources_to_check, namespace) - - if fail_ok: - LOG.info(msg) - return False, resources_to_check - - raise exceptions.KubeError(msg) - - -def delete_resources(resource_names=None, select_all=None, resource_types='pod', - namespace=None, - recursive=None, labels=None, con_ssh=None, fail_ok=False, - post_check=True, - check_both_controllers=True): - """ - Delete pods via kubectl delete - Args: - resource_names (None|str|list|tuple): - select_all (None|bool): - resource_types (str|list|tuple): - namespace (None|str): - recursive (bool): - labels (None|dict): - con_ssh: - fail_ok: - post_check (bool): Whether to check if resources are gone after deletion - check_both_controllers (bool): - - Returns (tuple): - (0, None) # pods successfully deleted - (1, ) - (2, (list of dict)) # pod(s) still exist in - kubectl after deletion - (3, (list of dict)) # - pod(s) still exist on the other controller - - """ - arg_dict = { - '--all': select_all, - '-l': labels, - '--recursive': recursive, - } - - arg_str = common.parse_args(args_dict=arg_dict, vals_sep=',') - if resource_types: - if isinstance(resource_types, str): - resource_types = [resource_types] - arg_str = '{} {}'.format(','.join(resource_types), arg_str).strip() - - if resource_names: - if isinstance(resource_names, str): - resource_names = [resource_names] - arg_str = '{} {}'.format(arg_str, ' '.join(resource_names)) - - if not con_ssh: - con_ssh = ControllerClient.get_active_controller() - code, output = exec_kube_cmd(sub_cmd='delete', args=arg_str, - con_ssh=con_ssh, fail_ok=fail_ok) - if code > 0: - return 1, output - - if post_check: - def __wait_for_resources_gone(ssh_client): - final_remaining = [] - if resource_types: - for resource_type in resource_types: - res, remaining_res = wait_for_resources_gone( - resource_names=resource_names, - resource_type=resource_type, - namespace=namespace, - con_ssh=ssh_client, fail_ok=fail_ok) - if not res: - final_remaining += remaining_res - else: - res, final_remaining = wait_for_resources_gone( - resource_names=resource_names, - namespace=namespace, - con_ssh=ssh_client, fail_ok=fail_ok) - return final_remaining - - LOG.info("Check pod is not running on current host") - - remaining = __wait_for_resources_gone(con_ssh) - if remaining: - return 2, remaining - - if check_both_controllers and not system_helper.is_aio_simplex( - con_ssh=con_ssh): - LOG.info("Check pod is running on the other controller as well") - con_name = 'controller-1' if \ - con_ssh.get_hostname() == 'controller-0' else 'controller-0' - from keywords import host_helper - with host_helper.ssh_to_host(hostname=con_name, - con_ssh=con_ssh) as other_con: - remaining = __wait_for_resources_gone(other_con) - if remaining: - return 3, remaining - - LOG.info("{} are successfully removed.".format(resource_names)) - return 0, None - - -def get_pods_info_yaml(type_names='pods', namespace=None, con_ssh=None, - fail_ok=False): - """ - pods info parsed from yaml output of kubectl get cmd - Args: - namespace (None|str): e.g., kube-system, openstack, default. If set - to 'all', use --all-namespaces. - type_names (None|list|tuple|str): e.g., ("deployments.apps", - "services/calico-typha") - con_ssh: - fail_ok: - - Returns (list): each item is a pod info dictionary - - """ - if isinstance(type_names, (list, tuple)): - type_names = ','.join(type_names) - args = type_names - - if namespace == 'all': - args += ' --all-namespaces' - elif namespace: - args += ' --namespace={}'.format(namespace) - - args += ' -o yaml' - - code, out = exec_kube_cmd(sub_cmd='get', args=args, con_ssh=con_ssh, - fail_ok=fail_ok) - if code > 0: - return [] - - try: - pods_info = yaml.load(out) - except yaml.YAMLError: - LOG.warning('Output is not yaml') - return [] - - pods_info = pods_info.get('items', [pods_info]) - - return pods_info - - -def get_pod_value_jsonpath(type_name, jsonpath, namespace=None, con_ssh=None): - """ - Get value for specified pod with jsonpath - Args: - type_name (str): e.g., 'service/kubernetes' - jsonpath (str): e.g., '{.spec.ports[0].targetPort}' - namespace (str|None): e.g., 'kube-system' - con_ssh: - - Returns (str): - - """ - args = '{} -o jsonpath="{}"'.format(type_name, jsonpath) - if namespace: - args += ' --namespace {}'.format(namespace) - - args += ';echo' - value = exec_kube_cmd('get', args, con_ssh=con_ssh)[1] - return value - - -def expose_the_service(deployment_name, type, service_name, namespace=None, con_ssh=None): - """ - Exposes the service of a deployment - Args: - deployment_name (str): name of deployment - type (str): "LoadBalancer" or "NodePort" - service_name(str): service name - namespace (str|None): e.g., 'kube-system' - con_ssh: - - Returns (str): - - """ - args = '{} --type={} --name={}'.format(deployment_name, type, service_name) - if namespace: - args += ' --namespace {}'.format(namespace) - return exec_kube_cmd('expose deployment', args, con_ssh=con_ssh) - - -def get_nodes(hosts=None, status=None, field='STATUS', exclude=False, - con_ssh=None, fail_ok=False): - """ - Get nodes values via 'kubectl get nodes' - Args: - hosts (None|str|list|tuple): table filter - status (None|str|list|tuple): table filter - field (str|list|tuple): any header of the nodes table - exclude (bool): whether to exclude rows with given criteria - con_ssh: - fail_ok: - - Returns (None|list): None if cmd failed. - - """ - code, output = exec_kube_cmd('get', args='nodes', con_ssh=con_ssh, - fail_ok=fail_ok) - if code > 0: - return None - - table_ = table_parser.table_kube(output) - if hosts or status: - table_ = table_parser.filter_table(table_, exclude=exclude, - **{'NAME': hosts, 'STATUS': status}) - - return table_parser.get_multi_values(table_, field) - - -def wait_for_nodes_ready(hosts=None, timeout=120, check_interval=5, - con_ssh=None, fail_ok=False): - """ - Wait for hosts in ready state via kubectl get nodes - Args: - hosts (None|list|str|tuple): Wait for all hosts ready if None is - specified - timeout: - check_interval: - con_ssh: - fail_ok: - - Returns (tuple): - (True, None) - (False, (list)) - - """ - end_time = time.time() + timeout - nodes_not_ready = None - while time.time() < end_time: - nodes_not_ready = get_nodes(status='Ready', field='NAME', - exclude=True, con_ssh=con_ssh, - fail_ok=True) - - if nodes_not_ready and hosts: - nodes_not_ready = list(set(nodes_not_ready) & set(hosts)) - - if nodes_not_ready: - LOG.info('{} not ready yet'.format(nodes_not_ready)) - elif nodes_not_ready is not None: - LOG.info("All nodes are ready{}".format( - ': {}'.format(hosts) if hosts else '')) - return True, None - - time.sleep(check_interval) - - msg = '{} are not ready within {}s'.format(nodes_not_ready, timeout) - LOG.warning(msg) - if fail_ok: - return False, nodes_not_ready - else: - raise exceptions.KubeError(msg) - - -def exec_cmd_in_container(cmd, pod, namespace=None, container_name=None, - stdin=None, tty=None, con_ssh=None, - fail_ok=False): - """ - Execute given cmd in given pod via kubectl exec - Args: - cmd: - pod: - namespace: - container_name: - stdin: - tty: - con_ssh: - fail_ok: - - Returns (tuple): - (0, ) - (1, ) - - """ - args = pod - if namespace: - args += ' -n {}'.format(namespace) - if container_name: - args += ' -c {}'.format(container_name) - if stdin: - args += ' -i' - if tty: - args += ' -t' - args += ' -- {}'.format(cmd) - - code, output = exec_kube_cmd(sub_cmd='exec', args=args, con_ssh=con_ssh, - fail_ok=fail_ok) - return code, output - - -def wait_for_pods_healthy(pod_names=None, namespace=None, all_namespaces=True, - labels=None, timeout=300, - check_interval=5, con_ssh=None, fail_ok=False, - exclude=False, strict=False, **kwargs): - """ - Wait for pods ready - Args: - pod_names (list|tuple|str|None): full name of pod(s) - namespace (str|None): - all_namespaces (bool|None) - labels (str|dict|list|tuple|None): - timeout: - check_interval: - con_ssh: - fail_ok: - exclude (bool) - strict (bool): strict applies to node and name matching if given - **kwargs - - Returns (tuple): - - """ - LOG.info("Wait for pods ready..") - if not pod_names: - pod_names = None - elif isinstance(pod_names, str): - pod_names = [pod_names] - - bad_pods = None - end_time = time.time() + timeout - while time.time() < end_time: - bad_pods_info = get_unhealthy_pods(labels=labels, - field=('NAME', 'STATUS'), - namespace=namespace, - all_namespaces=all_namespaces, - con_ssh=con_ssh, exclude=exclude, - strict=strict, **kwargs) - bad_pods = {pod_info[0]: pod_info[1] for pod_info in bad_pods_info if - (not pod_names or pod_info[0] in pod_names)} - if not bad_pods: - LOG.info("Pods are Completed or Running.") - if pod_names: - pod_names = [pod for pod in pod_names if - not re.search('audit-|init-', pod)] - if not pod_names: - return True - - is_ready = wait_for_running_pods_ready( - pod_names=pod_names, - namespace=namespace, - all_namespaces=all_namespaces, - labels=labels, timeout=int(end_time - time.time()), - strict=strict, - con_ssh=con_ssh, - fail_ok=fail_ok, **kwargs) - return is_ready - time.sleep(check_interval) - - msg = 'Some pods are not Running or Completed: {}'.format(bad_pods) - LOG.warning(msg) - if fail_ok: - return False - dump_pods_info(con_ssh=con_ssh) - raise exceptions.KubeError(msg) - - -def wait_for_running_pods_ready(pod_names=None, namespace=None, - all_namespaces=False, labels=None, timeout=300, - fail_ok=False, con_ssh=None, exclude=False, - strict=False, **kwargs): - """ - Wait for Running pods to be Ready, such as 1/1, 3/3 - Args: - pod_names: - namespace: - all_namespaces: - labels: - timeout: - fail_ok: - con_ssh: - exclude: - strict: - **kwargs: - - Returns (bool): - - """ - unready_pods = get_unready_running_pods(namespace=namespace, - all_namespaces=all_namespaces, - pod_names=pod_names, labels=labels, - exclude=exclude, strict=strict, - con_ssh=con_ssh, **kwargs) - if not unready_pods: - return True - - end_time = time.time() + timeout - while time.time() < end_time: - pods_info = get_pods(field=('NAME', 'READY'), namespace=namespace, - all_namespaces=all_namespaces, - pod_names=unready_pods, con_ssh=con_ssh) - for pod_info in pods_info: - pod_name, pod_ready = pod_info - ready_count, total_count = pod_ready.split('/') - if ready_count == total_count: - unready_pods.remove(pod_name) - if not unready_pods: - return True - - msg = "Some pods are not ready within {}s: {}".format(timeout, unready_pods) - LOG.warning(msg) - if fail_ok: - return False - raise exceptions.KubeError(msg) - - -def get_unready_running_pods(pod_names=None, namespace=None, - all_namespaces=False, labels=None, - con_ssh=None, exclude=False, strict=False, - **kwargs): - """ - Get Running pods that are not yet Ready. - Args: - pod_names: - namespace: - all_namespaces: - labels: - con_ssh: - exclude: - strict: - **kwargs: - - Returns (list): pod names - - """ - # field_selector does not work with pod_names, determine whether to use - # field_selector or do post filtering instead - # If field_selector is specified, the underlying get_pods function will - # use pod_names for post filtering - if exclude or labels or (not namespace and all_namespaces) or not pod_names: - field_selector = 'status.phase=Running' - else: - field_selector = None - kwargs['Status'] = 'Running' - - pods_running = get_pods(field=('NAME', 'READY'), namespace=namespace, - all_namespaces=all_namespaces, - pod_names=pod_names, labels=labels, - field_selectors=field_selector, grep='-v 1/1', - exclude=exclude, strict=strict, con_ssh=con_ssh, - fail_ok=True, **kwargs) - not_ready_pods = [] - for pod_info in pods_running: - pod_name, pod_ready = pod_info - ready_count, total_count = pod_ready.split('/') - if ready_count != total_count: - not_ready_pods.append(pod_name) - - return not_ready_pods - - -def wait_for_openstack_pods_status(pod_names=None, application=None, - component=None, status=PodStatus.RUNNING, - con_ssh=None, timeout=60, check_interval=5, - fail_ok=False): - """ - Wait for openstack pods to be in Completed or Running state - Args: - pod_names (str|tuple|list|None): - application (str|None): only used when pod_names are not provided - component (str|None): only used when pod_names are not provided - status (str|tuple|list|None): - con_ssh: - timeout: - check_interval: - fail_ok: - - Returns: - - """ - if not pod_names and not application and not component: - raise ValueError( - 'pod_names, or application and component have to be provided to ' - 'filter out pods') - - labels = None - if not pod_names: - labels = [] - if application: - labels.append('application={}'.format(application)) - if component: - labels.append('component={}'.format(component)) - - return wait_for_pods_status(pod_names=pod_names, labels=labels, - status=status, namespace='openstack', - con_ssh=con_ssh, check_interval=check_interval, - timeout=timeout, fail_ok=fail_ok) - - -def get_pod_logs(pod_name, namespace='openstack', grep_pattern=None, - tail_count=10, strict=False, - fail_ok=False, con_ssh=None): - """ - Get logs for given pod via kubectl logs cmd - Args: - pod_name (str): partial or full pod_name. If full name, set strict to - True. - namespace (str|None): - grep_pattern (str|None): - tail_count (int|None): - strict (bool): - fail_ok: - con_ssh: - - Returns (str): - - """ - if pod_name and not strict: - grep = '-E -i "{}|NAME"'.format(pod_name) - pod_name = get_resources(namespace='openstack', resource_type='pod', - con_ssh=con_ssh, rtn_list=True, - grep=grep, fail_ok=fail_ok)[0].get('name') - namespace = '-n {} '.format(namespace) if namespace else '' - - grep = '' - if grep_pattern: - if isinstance(grep_pattern, str): - grep_pattern = (grep_pattern,) - grep = ''.join( - [' | grep --color=never {}'.format(grep_str) for grep_str in - grep_pattern]) - tail = ' | tail -n {}'.format(tail_count) if tail_count else '' - args = '{}{}{}{}'.format(namespace, pod_name, grep, tail) - code, output = exec_kube_cmd(sub_cmd='logs', args=args, con_ssh=con_ssh) - if not output and not fail_ok: - raise exceptions.KubeError( - "No kubectl logs found with args: {}".format(args)) - return output - - -def dump_pods_info(con_ssh=None): - """ - Dump pods info for debugging purpose. - Args: - con_ssh: - - Returns: - - """ - LOG.info('------- Dump pods info --------') - exec_kube_cmd('get pods', - '--all-namespaces -o wide | grep -v -e Running -e Completed', - con_ssh=con_ssh, - fail_ok=True) - exec_kube_cmd( - 'get pods', - "--all-namespaces -o wide | grep -v -e Running -e Completed " - "-e NAMESPACE | awk " - + """'{system("kubectl describe pods -n "$1" "$2)}'""""", - con_ssh=con_ssh, fail_ok=True) - - -def get_openstack_pods(field='Name', namespace='openstack', application=None, - component=None, pod_names=None, - extra_labels=None, field_selectors=None, - exclude_label=False, fail_ok=False, con_ssh=None, - strict=True, exclude=False, **kwargs): - """ - Get openstack pods via kubectl get pods - Note that pod labels can be found via kubectl get pods -n - --show-labels - Args: - field (str|list|tuple): - namespace: - application (str|None): label: application - component (str|None): label: component - pod_names - extra_labels (str|None): - field_selectors (str|list|tuple|dict|None): - exclude_label - fail_ok: - con_ssh: - exclude: - strict: - **kwargs: - - Returns (list): - - """ - if pod_names: - labels = None - else: - connector = '!=' if exclude_label else '=' - labels = [] - if application: - labels.append('application{}{}'.format(connector, application)) - if component: - labels.append('component{}{}'.format(connector, component)) - if extra_labels: - labels.append(extra_labels) - labels = ','.join(labels) - - pods = get_pods(pod_names=pod_names, field=field, namespace=namespace, - labels=labels, fail_ok=fail_ok, - field_selectors=field_selectors, strict=strict, - exclude=exclude, con_ssh=con_ssh, **kwargs) - if not pods: - msg = "No pods found for namespace - {} with selectors: {}".format( - namespace, labels) - LOG.info(msg) - if not fail_ok: - raise exceptions.KubeError(msg) - - return pods - - -def get_openstack_configs(conf_file, configs=None, node=None, pods=None, - label_component=None, label_app=None, - fail_ok=False, con_ssh=None): - """ - Get config values for openstack pods with given chart - Args: - pods (str|list|tuple): openstack pod name(s) - label_app (str|None): e.g., nova, neutron, panko, ... - label_component (str|None): e.g., api, compute, etc. - conf_file (str): config file path inside the filtered openstack - container, e.g., /etc/nova/nova.conf - configs (dict): {(str): (str|list|tuple), - : ..} - e.g., {'database': 'event_time_to_live'} - node (str|None) - fail_ok: - con_ssh: - - Returns (dict): {(str): (dict), ... } - - """ - if not pods and not (label_component and label_app): - raise ValueError('Either pods, or label_component and label_app ' - 'have to be specified to locate the containers') - - if not pods: - pods = get_openstack_pods(component=label_component, - application=label_app, fail_ok=fail_ok, - node=node, - con_ssh=con_ssh) - elif isinstance(pods, str): - pods = (pods,) - - LOG.info('Getting {} {} values from openstack pods: {}'.format(conf_file, - configs, - pods)) - - cmd = 'cat {}'.format(conf_file) - if configs: - all_fields = [] - section_filter = r'$1 ~ /^\[.*\]/' - for fields in configs.values(): - if isinstance(fields, str): - all_fields.append(fields) - elif isinstance(fields, (tuple, list)): - all_fields += list(fields) - - fields_filter = '|| '.join( - ['$1 ~ /^{}/'.format(field) for field in set(all_fields)]) - cmd += r" | awk '{{ if ( {} || {}) print }}' | grep --color=never " \ - r"--group-separator='' -B 1 -v '\[.*\]'". \ - format(section_filter, fields_filter) - - config_values = {} - for pod in pods: - code, output = exec_cmd_in_container(cmd, pod=pod, - namespace='openstack', - con_ssh=con_ssh, fail_ok=fail_ok) - if code > 0: - config_values[pod] = {} - continue - - # Remove irrelevant string at beginning of the output - output = "[{}".format( - re.split(r'\n\[', r'\n{}'.format(output), maxsplit=1)[-1]) - settings = configparser.ConfigParser() - settings.read_string(output) - config_values[pod] = settings - - return config_values diff --git a/automated-pytest-suite/keywords/network_helper.py b/automated-pytest-suite/keywords/network_helper.py deleted file mode 100755 index ccf6d13a..00000000 --- a/automated-pytest-suite/keywords/network_helper.py +++ /dev/null @@ -1,5694 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import ipaddress -import math -import re -import os -import time -from collections import Counter -from contextlib import contextmanager - -import pexpect - -from consts.auth import Tenant, HostLinuxUser -from consts.filepaths import UserData -from consts.stx import Networks, PING_LOSS_RATE, MELLANOX4, \ - VSHELL_PING_LOSS_RATE, DevClassID, UUID -from consts.proj_vars import ProjVar -from consts.timeout import VMTimeout -from keywords import common, keystone_helper, host_helper, system_helper -from testfixtures.fixture_resources import ResourceCleanup -from utils import table_parser, cli, exceptions -from utils.clients.ssh import NATBoxClient, get_cli_client, ControllerClient -from utils.tis_log import LOG - - -def is_valid_ip_address(ip=None): - """ - Validate the input IP address - - Args: - ip: IPv4 or IPv6 address - - Returns: - True: valid IPv4 or IPv6 address - False: otherwise - """ - return bool(get_ip_address_str(ip)) - - -def get_ip_address_str(ip=None): - """ - Get the representation of the input IP address - - Args: - ip: IPv4 or IPv6 address - - Returns: - str: string representation of the input IP address if it's valid - None: otherwise - """ - try: - ipaddr = ipaddress.ip_address(ip) - return str(ipaddr) - except ValueError: - # invalid IPv4 or IPv6 address - return None - - -def create_network(name=None, shared=None, project=None, network_type=None, - segmentation_id=None, qos=None, - physical_network=None, vlan_transparent=None, - port_security=None, avail_zone=None, external=None, - default=None, tags=None, fail_ok=False, auth_info=None, - con_ssh=None, cleanup=None): - """ - Create a network for given tenant - - Args: - name (str): name of the network - shared (bool) - project: such as tenant1, tenant2. - network_type (str): The physical mechanism by which the virtual - network is implemented - segmentation_id (None|str): w VLAN ID for VLAN networks - qos - physical_network (str): Name of the physical network over which the - virtual network is implemented - vlan_transparent(None|bool): Create a VLAN transparent network - port_security (None|bool) - avail_zone (None|str) - external (None|bool) - default (None|bool): applicable only if external=True. - tags (None|False|str|list|tuple) - fail_ok (bool): - auth_info (dict): run 'openstack network create' cli using these - authorization info - con_ssh (SSHClient): - cleanup (str|None): function, module, class, session or None - - Returns (tuple): (rnt_code (int), net_id (str), message (str)) - - """ - if name is None: - name = common.get_unique_name(name_str='net') - - args = name - if project is not None: - tenant_id = keystone_helper.get_projects(field='ID', name=project, - con_ssh=con_ssh)[0] - args += ' --project ' + tenant_id - - if shared is not None: - args += ' --share' if shared else ' --no-share' - if vlan_transparent is not None: - args += ' --transparent-vlan' if vlan_transparent else \ - ' --no-transparent-vlan' - if port_security is not None: - args += ' --enable-port-security' if port_security else \ - ' --disable-port-security' - - if external: - args += ' --external' - if default is not None: - args += ' --default' if default else ' --no-default' - elif external is False: - args += ' --internal' - - if tags is False: - args += ' --no-tag' - elif tags: - if isinstance(tags, str): - tags = [tags] - for tag in tags: - args += ' --tag ' + tag - - if segmentation_id: - args += ' --provider:segmentation_id ' + segmentation_id - if network_type: - args += ' --provider:network_type ' + network_type - if physical_network: - args += ' --provider:physical_network ' + physical_network - if avail_zone: - args += ' --availability-zone-hint ' + avail_zone - if qos: - args += ' --wrs-tm:qos ' + qos - - LOG.info("Creating network: Args: {}".format(args)) - code, output = cli.openstack('network create', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - table_ = table_parser.table(output) - net_id = table_parser.get_value_two_col_table(table_, 'id') - if cleanup and net_id: - ResourceCleanup.add('network', net_id, scope=cleanup) - - if code == 1: - return 1, output - - succ_msg = "Network {} is successfully created".format(net_id) - LOG.info(succ_msg) - return 0, net_id - - -def create_subnet(network, name=None, subnet_range=None, gateway=None, - dhcp=None, dns_servers=None, - allocation_pools=None, ip_version=None, subnet_pool=None, - use_default_subnet_pool=None, - project=None, project_domain=None, prefix_length=None, - description=None, host_routes=None, - ipv6_ra_mode=None, ipv6_addr_mode=None, network_segment=None, - service_types=None, - tags=None, no_tag=None, fail_ok=False, auth_info=None, - con_ssh=None, cleanup=None): - """ - Create a subnet with given parameters - - Args: - network (str): id of the network to create subnet for - name (str|None): name of the subnet - subnet_range (str|None): such as "192.168.3.0/24" - project (str|None): such as tenant1, tenant2. - project_domain (str|None) - gateway (str): Valid values: , auto, none - dhcp (bool): whether or not to enable DHCP - dns_servers (list|tuple|str|None): DNS name servers. e.g., - ["147.11.57.133", "128.224.144.130", "147.11.57.128"] - allocation_pools (list|dict|None): {'start': , 'end': - 'end_ip'} - ip_version (int|str|None): 4, or 6 - subnet_pool (str|None): ID or name of subnetpool from which this - subnet will obtain a CIDR. - use_default_subnet_pool (bool|None) - prefix_length (str|None) - description (str|None) - host_routes (str|None) - ipv6_addr_mode (str|None) - ipv6_ra_mode (str|None) - network_segment (str|None) - service_types (list|tuple|str|None) - tags (list|tuple|str|None) - no_tag (bool|None) - fail_ok (bool): - auth_info (dict): run the neutron subnet-create cli using these - authorization info - con_ssh (SSHClient): - cleanup (str|None) - - Returns (tuple): (rnt_code (int), subnet_id (str)) - - """ - - if subnet_range is None and subnet_pool is None: - raise ValueError("Either cidr or subnet_pool has to be specified.") - - args_dict = { - '--project': project, - '--project-domain': project_domain, - '--subnet-pool': subnet_pool, - '--use-default-subnet-pool': use_default_subnet_pool, - '--prefix-length': prefix_length, - '--subnet-range': subnet_range, - '--dhcp': True if dhcp else None, - '--no-dhcp': True if dhcp is False else None, - '--gateway': gateway, - '--ip-version': ip_version, - '--ipv6-ra-mode': ipv6_ra_mode, - '--ipv6-address-mode': ipv6_addr_mode, - '--network-segment': network_segment, - '--network': network, - '--description': description, - '--allocation-pool': allocation_pools, - '--dns-nameserver': dns_servers, - '--host-route': host_routes, - '--service-type': service_types, - '--tag': tags, - '--no-tag': no_tag - } - - if not name: - name = '{}-subnet'.format( - get_net_name_from_id(network, con_ssh=con_ssh, auth_info=auth_info)) - name = "{}-{}".format(name, common.Count.get_subnet_count()) - args = '{} {}'.format( - common.parse_args(args_dict, repeat_arg=True, vals_sep=','), name) - - LOG.info("Creating subnet for network: {}. Args: {}".format(network, args)) - code, output = cli.openstack('subnet create', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - table_ = table_parser.table(output) - subnet_id = table_parser.get_value_two_col_table(table_, 'id') - if cleanup and subnet_id: - ResourceCleanup.add('subnet', subnet_id, scope=cleanup) - - if code > 0: - return 1, output - - LOG.info( - "Subnet {} is successfully created for network {}".format(subnet_id, - network)) - return 0, subnet_id - - -def delete_subnets(subnets, auth_info=Tenant.get('admin'), con_ssh=None, - fail_ok=False): - """ - Delete subnet(s) - Args: - subnets (str|list|tuple): - auth_info: - con_ssh: - fail_ok: - - Returns (tuple): - - """ - if isinstance(subnets, str): - subnets = (subnets,) - - args = ' '.join(subnets) - LOG.info("Deleting subnet {}".format(subnets)) - code, output = cli.openstack('subnet delete', args, ssh_client=con_ssh, - fail_ok=True, auth_info=auth_info) - - if code > 0: - return 1, output - - field = 'ID' if re.match(UUID, subnets[0]) else 'Name' - undeleted_subnets = list(set(subnets) & set( - get_subnets(auth_info=auth_info, con_ssh=con_ssh, field=field))) - if undeleted_subnets: - msg = "Subnet(s) still listed in openstack subnet list after " \ - "deletion: {}".format(undeleted_subnets) - if fail_ok: - LOG.warning(msg) - return 2, msg - raise exceptions.NeutronError(msg) - - succ_msg = "Subnet(s) successfully deleted: {}".format(subnets) - LOG.info(succ_msg) - return 0, succ_msg - - -def set_subnet(subnet, allocation_pools=None, dns_servers=None, - host_routes=None, service_types=None, - tags=None, no_tag=None, name=None, dhcp=None, gateway=None, - network_segment=None, description=None, - no_dns_servers=None, no_host_routes=None, - no_allocation_pool=None, - auth_info=Tenant.get('admin'), fail_ok=False, con_ssh=None): - kwargs = locals() - kwargs['unset'] = False - return __update_subnet(**kwargs) - - -def unset_subnet(subnet, allocation_pools=None, dns_servers=None, - host_routes=None, service_types=None, - tags=None, no_tag=None, auth_info=Tenant.get('admin'), - fail_ok=False, con_ssh=None): - kwargs = locals() - kwargs['unset'] = True - return __update_subnet(**kwargs) - - -def __update_subnet(subnet, unset=False, allocation_pools=None, - dns_servers=None, host_routes=None, service_types=None, - tags=None, no_tag=None, name=None, dhcp=None, gateway=None, - network_segment=None, description=None, - no_dns_servers=None, no_host_routes=None, - no_allocation_pool=None, - auth_info=Tenant.get('admin'), fail_ok=False, con_ssh=None): - """ - set/unset given setup - Args: - subnet (str): - unset (bool): set or unset - allocation_pools (None|str|tuple|list): - dns_servers (None|str|tuple|list): - host_routes (None|str|tuple|list): - service_types (None|str|tuple|list): - tags (None|bool): - name (str|None): - dhcp (None|bool): - gateway (str|None): valid str: or 'none' - description: - auth_info: - fail_ok: - con_ssh: - - Returns: - - """ - LOG.info("Update subnet {}".format(subnet)) - - arg_dict = { - '--allocation-pool': allocation_pools, - '--dns-nameserver': dns_servers, - '--host-route': host_routes, - '--service-type': service_types, - '--tag': tags, - } - - if unset: - arg_dict.update(**{'all-tag': True if no_tag else None}) - cmd = 'unset' - else: - set_only_dict = { - '--name': name, - '--dhcp': True if dhcp else None, - '--gateway': gateway, - '--description': description, - '--network-segment': network_segment, - '--no-dhcp': True if dhcp is False else None, - '--no-tag': True if no_tag else None, - '--no-dns-nameservers': True if no_dns_servers else None, - '--no-host-route': True if no_host_routes else None, - '--no-allocation-pool': True if no_allocation_pool else None, - } - arg_dict.update(**set_only_dict) - cmd = 'set' - - args = '{} {}'.format( - common.parse_args(args_dict=arg_dict, repeat_arg=True, vals_sep=','), - subnet) - - code, output = cli.openstack('subnet {}'.format(cmd), args, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - - if code > 0: - return 1, output - - LOG.info("Subnet {} {} successfully".format(subnet, cmd)) - return 0, subnet - - -def get_subnets(field='ID', long=False, network=None, subnet_range=None, - gateway_ip=None, full_name=None, - ip_version=None, dhcp=None, project=None, project_domain=None, - service_types=None, - tags=None, any_tags=None, not_tags=None, not_any_tags=None, - name=None, strict=True, regex=False, auth_info=None, - con_ssh=None): - """ - Get subnets ids based on given criteria. - - Args: - field (str): header of subnet list table - long (bool) - network (str|None): - subnet_range (str|None): - gateway_ip (str|None): - full_name (str|None): - ip_version (str|None): - dhcp (bool) - project (str|None): - project_domain (str|None): - service_types (str|list|tuple|None): - tags (str|list|tuple|None): - any_tags (str|list|tuple|None): - not_tags (str|list|tuple|None): - not_any_tags (str|list|tuple|None): - name (str): name of the subnet - strict (bool): whether to perform strict search on given name and cidr - regex (bool): whether to use regext to search - auth_info (dict): - con_ssh (SSHClient): - - Returns (list): a list of subnet ids - - """ - args_dict = { - '--long': long, - '--ip-version': ip_version, - '--network': network, - '--subnet-range': subnet_range, - '--gateway': gateway_ip, - '--name': full_name, - '--dhcp': True if dhcp else None, - '--no-dhcp': True if dhcp is False else None, - '--project': project, - '--project-domain': project_domain, - '--tags': tags, - '--any-tags': any_tags, - '--not-tags': not_tags, - '--not-any-tags': not_any_tags - } - args = common.parse_args(args_dict, repeat_arg=False, vals_sep=',') - service_type_args = common.parse_args({'--server-type': service_types}, - repeat_arg=True) - args = ' '.join((args, service_type_args)) - - table_ = table_parser.table( - cli.openstack('subnet list', args, ssh_client=con_ssh, - auth_info=auth_info)[1]) - if name is not None: - table_ = table_parser.filter_table(table_, strict=strict, regex=regex, - name=name) - - return table_parser.get_multi_values(table_, field) - - -def get_subnet_values(subnet, fields, con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Subnet values for given fields via openstack subnet show - Args: - subnet: - fields: - con_ssh: - auth_info: - - Returns (list): - - """ - table_ = table_parser.table( - cli.openstack('subnet show', subnet, ssh_client=con_ssh, - auth_info=auth_info)[1]) - return table_parser.get_multi_values_two_col_table(table_, fields) - - -def get_network_values(network, fields, strict=True, rtn_dict=False, - con_ssh=None, auth_info=Tenant.get('admin')): - """ - Get network values via openstack network show - Args: - network: - fields: - strict: - rtn_dict: - con_ssh: - auth_info: - - Returns (list|dict): - - """ - if isinstance(fields, str): - fields = [fields] - - table_ = table_parser.table( - cli.openstack('network show', network, ssh_client=con_ssh, - auth_info=auth_info)[1]) - vals = [] - for field in fields: - val = table_parser.get_value_two_col_table(table_, field, strict=strict, - merge_lines=True) - if field == 'subnets': - val = val.split(',') - val = [val_.strip() for val_ in val] - vals.append(val) - - if rtn_dict: - return {fields[i]: vals[i] for i in range(len(fields))} - return vals - - -def set_network(net_id, name=None, enable=None, share=None, - enable_port_security=None, external=None, default=None, - provider_net_type=None, provider_phy_net=None, - provider_segment=None, transparent_vlan=None, - auth_info=Tenant.get('admin'), fail_ok=False, con_ssh=None, - **kwargs): - """ - Update network with given parameters - Args: - net_id (str): - name (str|None): name to update to. Don't update name when None. - enable (bool|None): True to add --enable. False to add --disable. - Don't update enable/disable when None. - share (bool|None): - enable_port_security (bool|None): - external (bool|None): - default (bool|None): - provider_net_type (str|None): - provider_phy_net (str|None): - provider_segment (str|int|None): - transparent_vlan (bool|None): - auth_info (dict): - fail_ok (bool): - con_ssh (SSHClient): - **kwargs: additional key/val pairs that are not listed in 'openstack - network update -h'. - e,g.,{'wrs-tm:qos': } - - Returns (tuple): (code, msg) - (0, "Network is successfully updated") Network updated - successfully - (1, ) 'openstack network update' cli is rejected - - """ - args_dict = { - '--name': (name, {'name': name}), - '--enable': ( - True if enable is True else None, {'admin_state_up': 'UP'}), - '--disable': ( - True if enable is False else None, {'admin_state_up': 'DOWN'}), - '--share': (True if share is True else None, {'shared': 'True'}), - '--no-share': (True if share is False else None, {'shared': 'False'}), - '--enable-port-security': ( - True if enable_port_security is True else None, - {'port_security_enabled': 'True'}), - '--disable-port-security': ( - True if enable_port_security is False else None, - {'port_security_enabled': 'False'}), - '--external': ( - True if external is True else None, - {'router:external': 'External'}), - '--internal': ( - True if external is False else None, - {'router:external': 'Internal'}), - '--default': ( - True if default is True else None, {'is_default': 'True'}), - '--no-default': ( - True if default is False else None, {'is_default': 'False'}), - '--transparent-vlan': (True if transparent_vlan is True else None, - {'vlan_transparent': 'True'}), - '--no-transparent-vlan': (True if transparent_vlan is False else None, - {'vlan_transparent': 'False'}), - '--provider-network-type': ( - provider_net_type, {'provider:network_type': provider_net_type}), - '--provider-physical-network': ( - provider_phy_net, {'provider:physical_network': provider_phy_net}), - '--provider-segment': ( - provider_segment, {'provider:segmentation_id': provider_segment}), - } - checks = {} - args_str = '' - for arg in args_dict: - val, check = args_dict[arg] - if val is not None: - set_val = '' if val is True else ' {}'.format(val) - args_str += ' {}{}'.format(arg, set_val) - if check: - checks.update(**check) - else: - LOG.info("Unknown check field in 'openstack network show' " - "for arg {}".format(arg)) - - for key, val_ in kwargs.items(): - val_ = ' {}'.format(val_) if val_ else '' - field_name = key.split('--', 1)[-1] - arg = '--{}'.format(field_name) - args_str += ' {}{}'.format(arg, val_) - if val_: - checks.update(**kwargs) - else: - LOG.info("Unknown check field in 'openstack network show' for " - "arg {}".format(arg)) - - if not args_str: - raise ValueError( - "Nothing to update. Please specify at least one None value") - - LOG.info("Updating network {} with: {}".format(net_id, args_str)) - code, out = cli.openstack('network set', '{} {}'.format(args_str, net_id), - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if code > 0: - return 1, out - - if checks: - LOG.info("Check network {} is updated with: {}".format(net_id, checks)) - actual_res = get_network_values(net_id, fields=list(checks.keys()), - rtn_dict=True, auth_info=auth_info) - failed = {} - for field in checks: - expt_val = checks[field] - actual_val = actual_res[field] - if expt_val != actual_val: - failed[field] = (expt_val, actual_val) - - # Fail directly. If a field is not allowed to be updated, the cli - # should be rejected - assert not failed, "Actual value is different than set value in " \ - "following fields: {}".format(failed) - - msg = "Network {} is successfully updated".format(net_id) - return 0, msg - - -def create_security_group(name, project=None, description=None, - project_domain=None, tag=None, no_tag=None, - auth_info=None, fail_ok=False, con_ssh=None, - cleanup='function'): - """ - Create a security group - Args: - name (str): - project - project_domain - tag (str|None|list|tuple) - no_tag (bool|None) - description (str): - auth_info (dict): - create under this project - fail_ok (bool): - con_ssh - cleanup (str): - - Returns (str|tuple): - str identifier for the newly created security group - or if fail_ok=True, return tuple: - (0, identifier) succeeded - (1, msg) failed - """ - args_dict = { - '--project': project, - '--project-domain': project_domain, - '--description': description, - '--tag': tag, - '--no-tag': no_tag, - } - args = '{} {}'.format(common.parse_args(args_dict, repeat_arg=True), name) - - code, output = cli.openstack("security group create", args, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if code > 0: - return 1, output - - table_ = table_parser.table(output) - group_id = table_parser.get_value_two_col_table(table_, 'id') - if cleanup: - ResourceCleanup.add('security_group', group_id, scope=cleanup) - - LOG.info("Security group created: name={} id={}".format(name, group_id)) - return 0, group_id - - -def delete_security_group(group_id, fail_ok=False, - auth_info=Tenant.get('admin')): - """ - Delete a security group - Args: - group_id (str): security group to be deleted - fail_ok - auth_info (dict): - - Returns (tuple): (code, msg) - (0, msg): succeeded - (1, err_msg): failed - """ - LOG.info("Deleting security group {}".format(group_id)) - return cli.openstack("security group delete", group_id, fail_ok=fail_ok, - auth_info=auth_info) - - -def create_security_group_rule(group=None, remote_ip=None, remote_group=None, - description=None, dst_port=None, - icmp_type=None, icmp_code=None, protocol=None, - ingress=None, egress=None, - ethertype=None, project=None, - project_domain=None, fail_ok=False, - auth_info=None, - con_ssh=None, field='id', cleanup=None): - """ - Create security group rule for given security group - Args: - group: - remote_ip: - remote_group: - description: - dst_port: - icmp_type: - icmp_code: - protocol: - ingress: - egress: - ethertype: - project: - project_domain: - fail_ok: - auth_info: - con_ssh: - field (str) - cleanup - - Returns: - - """ - if not group: - groups = get_security_groups(name='default', project=project, - project_domain=project_domain, - auth_info=auth_info, con_ssh=con_ssh) - if len(groups) != 1: - return ValueError( - 'group has to be specified when multiple default groups exist') - group = groups[0] - - args_dict = { - 'remote-ip': remote_ip, - 'remote-group': remote_group, - 'description': description, - 'dst-port': dst_port, - 'icmp-type': icmp_type, - 'icmp-code': icmp_code, - 'protocol': protocol, - 'ingress': ingress, - 'egress': egress, - 'ethertype': ethertype, - 'project': project, - 'project-domain': project_domain - } - args = ' '.join((common.parse_args(args_dict), group)) - - LOG.info( - "Creating security group rule for group {} with args: {}".format(group, - args)) - code, output = cli.openstack('security group rule create', args, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if code > 0: - return 1, output - - table_ = table_parser.table(output) - value = table_parser.get_value_two_col_table(table_, field) - if cleanup: - ResourceCleanup.add('security_group_rule', - table_parser.get_value_two_col_table(table_, 'id')) - - LOG.info( - "Security group rule created successfully for group {} with " - "{}={}".format(group, field, value)) - return 0, value - - -def delete_security_group_rules(sec_rules, check_first=True, fail_ok=False, - con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Delete given security group rules - Args: - sec_rules: - check_first: - fail_ok: - con_ssh: - auth_info: - - Returns (tuple): - - """ - if isinstance(sec_rules, str): - sec_rules = (sec_rules,) - - if check_first: - existing_sec_rules = get_security_group_rules(long=False, - auth_info=auth_info, - con_ssh=con_ssh) - sec_rules = list(set(sec_rules) & set(existing_sec_rules)) - - code, output = cli.openstack('security group rule delete', - ' '.join(sec_rules), ssh_client=con_ssh, - fail_ok=fail_ok, - auth_info=auth_info) - if code > 0: - return 1, output - - post_sec_rules = get_security_group_rules(long=False, auth_info=auth_info, - con_ssh=con_ssh) - undeleted_rules = sec_rules = list(set(sec_rules) & set(post_sec_rules)) - if undeleted_rules: - msg = 'Security group rule(s) still exist after deletion: {}'.format( - undeleted_rules) - LOG.warning(msg) - if fail_ok: - return 2, msg - - msg = "Security group rule(s) deleted successfully: {}".format(sec_rules) - LOG.info(msg) - return 0, msg - - -def get_security_group_rules(field='ID', long=True, protocol=None, ingress=None, - egress=None, group=None, - auth_info=None, con_ssh=None, **filters): - """ - Get security group rules - Args: - field (str|list|tuple) - long (bool) - protocol: - ingress: - egress: - group (str): security group id - auth_info: - con_ssh: - **filters: header value pairs for security group rules table - - Returns (list): - - """ - args_dict = { - 'protocol': protocol, - 'ingress': ingress, - 'egress': egress, - 'long': long, - } - args = common.parse_args(args_dict) - if group: - args += ' {}'.format(group) - output = cli.openstack('security group rule list', args, ssh_client=con_ssh, - auth_info=auth_info)[1] - table_ = table_parser.table(output) - return table_parser.get_multi_values(table_, field, **filters) - - -def add_icmp_and_tcp_rules(security_group, auth_info=Tenant.get('admin'), - con_ssh=None, cleanup=None): - """ - Add icmp and tcp security group rules to given security group to allow - ping and ssh - Args: - security_group (str): - auth_info: - con_ssh: - cleanup - - """ - security_rules = get_security_group_rules( - con_ssh=con_ssh, auth_info=auth_info, group=security_group, - **{'IP Protocol': ('tcp', 'icmp')}) - if len(security_rules) >= 3: - LOG.info("Security group rules for {} already exist to allow ping and " - "ssh".format(security_group)) - return - - LOG.info("Create icmp and ssh security group rules for {} with best " - "effort".format(security_group)) - for rules in (('icmp', None, None), ('icmp', None, True), ('tcp', 22, None)): - protocol, dst_port, egress = rules - create_security_group_rule(group=security_group, protocol=protocol, - dst_port=dst_port, egress=egress, fail_ok=True, - auth_info=auth_info, cleanup=cleanup) - - -def get_net_name_from_id(net_id, con_ssh=None, auth_info=None): - """ - Get network name from id - - Args: - net_id (str): - con_ssh (SSHClient): - auth_info (dict): - - Returns (str): name of a network - - """ - return get_networks(auth_info=auth_info, con_ssh=con_ssh, net_id=net_id, - field='Name')[0] - - -def get_net_id_from_name(net_name, con_ssh=None, auth_info=None): - """ - Get network id from full name - - Args: - net_name (str): - con_ssh (SSHClient): - auth_info (dict): - - Returns (str): id of a network - - """ - return get_networks(auth_info=auth_info, con_ssh=con_ssh, - full_name=net_name, field='ID')[0] - - -def create_floating_ip(external_net=None, subnet=None, port=None, - fixed_ip_addr=None, floating_ip_addr=None, - qos_policy=None, description=None, dns_domain=None, - dns_name=None, tags=None, no_tag=None, - project=None, project_domain=None, fail_ok=False, - con_ssh=None, auth_info=None, cleanup=None): - """ - Create a floating ip for given tenant - - Args: - external_net (str|None): external network to allocate the floating - ip from - subnet (str|None): - qos_policy (str|None): - description (str|None): - dns_name (str|None): - dns_domain (str|None): - tags (tuple|list|str|None) - no_tag (bool|None) - project_domain (str|None): - project (str|None): name of the tenant to create floating ip for. - e.g., 'tenant1', 'tenant2' - port (str|None): id of the port - fixed_ip_addr (str): fixed ip address. such as 192.168.x.x - floating_ip_addr (str): specific floating ip to create - fail_ok (bool): - con_ssh (SSHClient): - auth_info (dict): - cleanup (None|str): valid scopes: function, class, module, session - - Returns (str): floating IP. such as 192.168.x.x - - """ - if not external_net: - external_net = get_networks(con_ssh=con_ssh, external=True, - auth_info=auth_info)[0] - - args_dict = { - '--subnet': subnet, - '--port': port, - '--floating-ip-address': floating_ip_addr, - '--fixed-ip-address': fixed_ip_addr, - '--qos-policy': qos_policy, - '--dns-domain': dns_domain, - '--dns-name': dns_name, - '--description': description, - '--project': project, - '--project-domain': project_domain, - '--tag': tags, - '--no-tag': no_tag - } - - args = '{} {}'.format(common.parse_args(args_dict, repeat_arg=True), - external_net) - code, output = cli.openstack('floating ip create', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - - table_ = table_parser.table(output) - actual_fip_addr = table_parser.get_value_two_col_table( - table_, "floating_ip_address") - if actual_fip_addr and cleanup: - ResourceCleanup.add('floating_ip', actual_fip_addr, scope=cleanup) - - if code > 0: - return 1, output - - if not actual_fip_addr: - msg = "Floating IP is not found in the list" - if fail_ok: - LOG.warning(msg) - return 2, msg - raise exceptions.NeutronError(msg) - - succ_msg = "Floating IP created successfully: {}".format(actual_fip_addr) - LOG.info(succ_msg) - return 0, actual_fip_addr - - -def delete_floating_ips(floating_ips, auth_info=Tenant.get('admin'), - con_ssh=None, fail_ok=False): - """ - Delete a floating ip - - Args: - floating_ips (str|tuple|list): floating ip to delete. - auth_info (dict): - con_ssh (SSHClient): - fail_ok (bool): whether to raise exception if fail to delete floating ip - - Returns (tuple): (rtn_code(int), msg(str)) - - (0, Floating ip is successfully deleted.) - - (1, ) - - (2, Floating ip still exists in floatingip-list.) - - """ - if isinstance(floating_ips, str): - floating_ips = (floating_ips,) - - args = ' '.join(floating_ips) - code, output = cli.openstack('floating ip delete', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - - if code > 0: - return 1, output - - post_deletion_fips = get_floating_ips(field='ID', con_ssh=con_ssh, - auth_info=Tenant.get('admin')) - undeleted_fips = list(set(floating_ips) & set(post_deletion_fips)) - - if undeleted_fips: - msg = "Floating ip {} still exists in floating ip list.".format( - undeleted_fips) - if fail_ok: - LOG.warning(msg) - return 2, msg - raise exceptions.NeutronError(msg) - - succ_msg = "Floating ip deleted successfully: {}".format(floating_ips) - LOG.info(succ_msg) - return 0, succ_msg - - -def get_floating_ips(field='Floating IP Address', long=False, network=None, - port=None, router=None, - floating_ip=None, fixed_ip=None, status=None, project=None, - project_domain=None, - tags=None, any_tags=None, not_tags=None, not_any_tags=None, - floating_ips=None, - auth_info=Tenant.get('admin'), con_ssh=None): - """ - Get floating ips values with given parameters. - - Args: - field (str|tuple|list): header of floating ip list table, such as - 'Floating IP Address' or 'Fixed IP Address' - long (bool) - network (str|None) - router (str|None) - fixed_ip (str|None): fixed ip address - floating_ip (str|None): - port (str|None): port id - status (str|None): - project (str|None): - project_domain (str|None): - tags (str|tuple|listNone): - any_tags (str|tuple|listNone): - not_tags (str|tuple|listNone): - not_any_tags (str|tuple|listNone): - floating_ips (str|list|tuple): post execution table filters - auth_info (dict): if tenant auth_info is given instead of admin, - only floating ips for this tenant will be - returned. - con_ssh (SSHClient): - - Returns (list): list of floating ips values - - """ - args_dict = { - '--long': long, - '--network': network, - '--port': port, - '--fixed-ip-address': fixed_ip, - '--floating-ip-address': floating_ip, - '--status': status, - '--router': router, - '--project': project, - '--project-domain': project_domain, - '--tags': tags, - '--any-tags': any_tags, - '--not-tags': not_tags, - '--not-any-tags': not_any_tags - } - args = common.parse_args(args_dict, repeat_arg=False, vals_sep=',') - table_ = table_parser.table( - cli.openstack('floating ip list', args, ssh_client=con_ssh, - auth_info=auth_info)[1]) - if floating_ips: - table_ = table_parser.filter_table(table_, **{ - 'Floating IP Address': floating_ips}) - - return table_parser.get_multi_values(table_, field) - - -def get_floating_ip_values(fip, fields='fixed_ip_address', - auth_info=Tenant.get('admin'), con_ssh=None): - """ - Get floating ip info for given field. - Args: - fip (str): ip or id of a floating ip - fields (str|list|tuple): field(s) in floating ip show table. - auth_info (dict): - con_ssh (SSHClient): - - Returns (list): values of given fields for specified floating ip - - """ - table_ = table_parser.table( - cli.openstack('floating ip show', fip, ssh_client=con_ssh, - auth_info=auth_info)[1]) - - return table_parser.get_multi_values_two_col_table(table_, fields=fields, - evaluate=True) - - -def unset_floating_ip(floating_ip, port=None, qos_policy=None, tags=None, - all_tag=None, auth_info=Tenant.get('admin'), - con_ssh=None, fail_ok=False): - """ - Disassociate a floating ip - - Args: - floating_ip (str): ip or id of the floating ip - port (bool) - qos_policy (bool) - tags (str|None|list|tuple) - all_tag (bool) - auth_info (dict): - con_ssh (SSHClient): - fail_ok (bool): - - Returns (tuple): (rtn_code(int), msg(str)) - (0, "Floating ip is successfully disassociated with fixed ip") - (1, ) - - """ - - args_dict = { - '--port': port, - '--qos-policy': qos_policy, - '--tag': tags, - '--all-tag': all_tag, - } - - args = common.parse_args(args_dict, repeat_arg=True) - if not args: - raise ValueError("Nothing is specified to unset") - - args = '{} {}'.format(args, floating_ip) - code, output = cli.openstack('floating ip unset', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - - if code == 1: - return 1, output - - fixed_ip = get_floating_ip_values(floating_ip, fields='fixed_ip_address', - auth_info=auth_info, con_ssh=con_ssh)[0] - if fixed_ip is not None: - err_msg = "Fixed ip address is {} instead of None for floating ip " \ - "{}".format(fixed_ip, floating_ip) - if fail_ok: - return 2, err_msg - else: - raise exceptions.NeutronError(err_msg) - - succ_msg = "Floating ip {} is successfully disassociated with fixed " \ - "ip".format(floating_ip) - LOG.info(succ_msg) - return 0, succ_msg - - -def associate_floating_ip_to_vm(floating_ip, vm_id, vm_ip=None, - auth_info=Tenant.get('admin'), - con_ssh=None, fail_ok=False): - """ - Associate a floating ip to management net ip of given vm. - - Args: - floating_ip (str): ip or id of the floating ip - vm_id (str): vm id - vm_ip (str): management ip of a vm used to find the matching port to - attach floating ip to - auth_info (dict): - con_ssh (SSHClient): - fail_ok (bool): - - Returns (tuple): (rtn_code(int), msg(str)) - (0, ) - (1, ) - - """ - if not vm_ip: - # get a vm management ip if not given - vm_ip = get_mgmt_ips_for_vms(vm_id, con_ssh=con_ssh)[0] - - port = get_ports(server=vm_id, fixed_ips={'ip-address': vm_ip}, - con_ssh=con_ssh)[0] - - code, output = set_floating_ip(floating_ip=floating_ip, port=port, - fixed_ip_addr=vm_ip, auth_info=auth_info, - con_ssh=con_ssh, fail_ok=fail_ok) - if code > 0: - return 1, output - - if re.match(floating_ip, UUID): - floating_ip = \ - get_floating_ip_values(floating_ip, fields='floating_ip_address', - con_ssh=con_ssh)[0] - - _wait_for_ip_in_nova_list(vm_id, ip_addr=floating_ip, fail_ok=False, - con_ssh=con_ssh) - return 0, floating_ip - - -def set_floating_ip(floating_ip, port=None, fixed_ip_addr=None, qos_policy=None, - tags=None, no_tag=None, - auth_info=Tenant.get('admin'), con_ssh=None, fail_ok=False): - """ - Set floating ip properties - Args: - floating_ip: - port: - fixed_ip_addr: - qos_policy: - tags: - no_tag: - auth_info: - con_ssh: - fail_ok: - - Returns (tuple): - - """ - args_dict = { - '--port': port, - '--fixed-ip-address': fixed_ip_addr, - '--qos-policy': qos_policy, - '--tag': tags, - '--no-tag': no_tag, - } - - args = common.parse_args(args_dict, repeat_arg=True) - if not args: - raise ValueError("Nothing is specified to set") - - args = '{} {}'.format(args, floating_ip) - - code, output = cli.openstack('floating ip set', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code > 0: - return 1, output - - succ_msg = "port {} is successfully associated with floating ip {}".format( - port, floating_ip) - LOG.info(succ_msg) - return 0, floating_ip - - -def _wait_for_ip_in_nova_list(vm_id, ip_addr, timeout=300, fail_ok=False, - con_ssh=None, auth_info=Tenant.get('admin')): - end_time = time.time() + timeout - while time.time() < end_time: - vm_ips = _get_net_ips_for_vms(vms=vm_id, rtn_dict=False, - con_ssh=con_ssh, auth_info=auth_info) - if ip_addr in vm_ips: - return True - else: - msg = "IP address {} is not found in openstack server list for vm {} " \ - "within {} seconds".format(ip_addr, vm_id, timeout) - if fail_ok: - return False - raise exceptions.TimeoutException(msg) - - -def get_providernet_ranges(field='name', range_name=None, providernet_name=None, - providernet_type=None, strict=False, - auth_info=Tenant.get('admin'), con_ssh=None): - """ - - Args: - field (str): 'name' or 'id' - range_name (str): - providernet_name (str): - providernet_type (str): - strict (bool): - auth_info (dict): - con_ssh (SSHClient): - - Returns (list): list of range names or ids - - """ - - table_ = table_parser.table( - cli.neutron('providernet-range-list', ssh_client=con_ssh, - auth_info=auth_info)[1]) - - kwargs = {} - if providernet_name is not None: - kwargs['providernet'] = providernet_name - - if range_name is not None: - kwargs['name'] = range_name - - if providernet_type is not None: - kwargs['type'] = providernet_type - - return table_parser.get_values(table_, field, strict=strict, **kwargs) - - -def get_security_groups(field='id', project=None, project_domain=None, - tags=None, any_tags=None, - not_tags=None, not_any_tags=None, name=None, - strict=False, con_ssh=None, auth_info=None): - """ - Get the neutron security group list based on name if given for given - user. - - Args: - field (str|list|tuple) - project - project_domain - tags (list|tuple|str|None) - any_tags (list|tuple|str|None) - not_tags (list|tuple|str|None) - not_any_tags (list|tuple|str|None) - con_ssh (SSHClient): If None, active controller ssh will be used. - auth_info (dict): Tenant dict. If None, primary tenant will be used. - name (str): Given name for the security group to filter - strict (bool): strict match for name - - Returns (list): Neutron security group id. - - """ - args_dict = { - 'project': project, - 'project_domain': project_domain, - 'tags': tags, - 'any-tags': any_tags, - 'not-tags': not_tags, - 'not-any-tags': not_any_tags, - } - args = common.parse_args(args_dict, vals_sep=',') - table_ = table_parser.table( - cli.openstack('security group list', args, ssh_client=con_ssh, - auth_info=auth_info)[1]) - if name: - table_ = table_parser.filter_table(table_, strict=strict, name=name) - - return table_parser.get_multi_values(table_, field) - - -def get_internal_net_id(net_name=None, strict=False, con_ssh=None, - auth_info=None): - """ - Get internal network id that matches the given net_name of a specific - tenant. - - Args: - net_name (str): name of the internal network. This can be a substring - of the tenant net name, such as 'net1', - and it will return id for internal0-net1 - strict (bool): Whether to perform strict search on given net_name - con_ssh (SSHClient): - auth_info (dict): If None, primary tenant will be used. - - Returns (str): A tenant network id for given tenant network name. - If multiple ids matches the given name, only the first will return - - """ - net_ids = get_internal_net_ids(net_names=net_name, strict=strict, - con_ssh=con_ssh, auth_info=auth_info) - if not net_ids: - raise exceptions.TiSError( - "No network name contains {} in 'openstack network list'".format( - net_name)) - - return net_ids[0] - - -def get_mgmt_net_id(con_ssh=None, auth_info=None): - """ - Get the management net id of given tenant. - - Args: - con_ssh (SSHClient): If None, active controller ssh will be used. - auth_info (dict): Tenant dict. If None, primary tenant will be used. - - Returns (str): Management network id of a specific tenant. - - """ - mgmt_net_name = Networks.get_nenutron_net_patterns(net_type='mgmt')[0] - mgmt_ids = get_networks(name=mgmt_net_name, con_ssh=con_ssh, - auth_info=auth_info, strict=False, regex=True) - if not mgmt_ids: - raise exceptions.TiSError( - "No network name contains {} in 'openstack network list'".format( - mgmt_net_name)) - return mgmt_ids[0] - - -def get_tenant_net_id(net_name=None, con_ssh=None, auth_info=None): - """ - Get tenant network id that matches the given net_name of a specific tenant. - - Args: - net_name (str): name of the tenant network. This can be a substring - of the tenant net name, such as 'net1', - and it will return id for -net1 - con_ssh (SSHClient): - auth_info (dict): If None, primary tenant will be used. - - Returns (str): A tenant network id for given tenant network name. - If multiple ids matches the given name, only the first will return - - """ - net_ids = get_tenant_net_ids(net_names=net_name, con_ssh=con_ssh, - auth_info=auth_info) - if not net_ids: - raise exceptions.TiSError( - "No network name contains {} in 'openstack network list'".format( - net_name)) - - return net_ids[0] - - -def get_tenant_net_ids(net_names=None, strict=False, regex=True, con_ssh=None, - auth_info=None, field='id'): - """ - Get a list of tenant network ids that match the given net_names for a - specific tenant. - - Args: - net_names (str or list): list of tenant network name(s) to get id(s) for - strict (bool): whether to perform a strict search on given name - regex (bool): whether to search using regular expression - con_ssh (SSHClient): - auth_info (dict): If None, primary tenant will be used - field (str): id or name - - Returns (list): list of tenant nets. such as (, - ) - - """ - if net_names is None: - net_names = Networks.get_nenutron_net_patterns('data')[0] - regex = True - strict = False - - return get_networks(field=field, con_ssh=con_ssh, auth_info=auth_info, - strict=strict, regex=regex, name=net_names) - - -def get_internal_net_ids(net_names=None, strict=False, regex=True, con_ssh=None, - auth_info=None): - """ - Get a list of internal network ids that match the given net_names for a - specific tenant. - - Args: - net_names (str or list): list of internal network name(s) to get id( - s) for - strict (bool): whether to perform a strict search on given name - regex (bool): whether to search using regular expression - con_ssh (SSHClient): - auth_info (dict): If None, primary tenant will be used - - Returns (list): list of tenant nets. such as (, - ) - - """ - if net_names is None: - net_names = Networks.get_nenutron_net_patterns('internal')[0] - strict = False - regex = True - else: - if isinstance(net_names, str): - net_names = [net_names] - - for i in range(len(net_names)): - net_name = net_names[i] - if 'internal' not in net_name: - net_names[i] = 'internal.*{}'.format(net_name) - - return get_networks(field='ID', con_ssh=con_ssh, auth_info=auth_info, - strict=strict, regex=regex, name=net_names) - - -def get_tenant_ips_for_vms(vms=None, con_ssh=None, - auth_info=Tenant.get('admin'), rtn_dict=False, - exclude_nets=None): - """ - This function returns the management IPs for all VMs on the system. - We make the assumption that the management IPs start with "192". - Args: - vms (str|list|None): vm ids list. If None, management ips for ALL vms - with given Tenant(via auth_info) will be - returned. - con_ssh (SSHClient): active controller SSHClient object - auth_info (dict): use admin by default unless specified - rtn_dict (bool): return list if False, return dict if True - exclude_nets (list|str) network name(s) - exclude ips from given - network name(s) - - Returns (list|dict): - a list of all VM management IPs # rtn_dict=False - dictionary with vm IDs as the keys, and mgmt ips as values # - rtn_dict=True - """ - net_name_pattern, net_ip_pattern = Networks.get_nenutron_net_patterns( - 'data') - return _get_net_ips_for_vms(netname_pattern=net_name_pattern, - ip_pattern=net_ip_pattern, vms=vms, - con_ssh=con_ssh, auth_info=auth_info, - rtn_dict=rtn_dict, - exclude_nets=exclude_nets) - - -def get_internal_ips_for_vms(vms=None, con_ssh=None, - auth_info=Tenant.get('admin'), rtn_dict=False, - exclude_nets=None): - """ - This function returns the management IPs for all VMs on the system. - We make the assumption that the management IPs start with "192". - Args: - vms (str|list|None): vm ids list. If None, management ips for ALL vms - with given Tenant(via auth_info) will be - returned. - con_ssh (SSHClient): active controller SSHClient object - auth_info (dict): use admin by default unless specified - rtn_dict (bool): return list if False, return dict if True - exclude_nets (list|str) network name(s) - exclude ips from given - network name(s) - - Returns (list|dict): - a list of all VM management IPs # rtn_dict=False - dictionary with vm IDs as the keys, and mgmt ips as values # - rtn_dict=True - """ - net_name_pattern, net_ip_pattern = Networks.get_nenutron_net_patterns( - 'internal') - return _get_net_ips_for_vms(netname_pattern=net_name_pattern, - ip_pattern=net_ip_pattern, vms=vms, - con_ssh=con_ssh, auth_info=auth_info, - rtn_dict=rtn_dict, - exclude_nets=exclude_nets) - - -def get_external_ips_for_vms(vms=None, con_ssh=None, - auth_info=Tenant.get('admin'), rtn_dict=False, - exclude_nets=None): - net_name_pattern, net_ip_pattern = Networks.get_nenutron_net_patterns( - 'external') - return _get_net_ips_for_vms(netname_pattern=net_name_pattern, - ip_pattern=net_ip_pattern, vms=vms, - con_ssh=con_ssh, auth_info=auth_info, - rtn_dict=rtn_dict, - exclude_nets=exclude_nets) - - -def get_mgmt_ips_for_vms(vms=None, con_ssh=None, auth_info=Tenant.get('admin'), - rtn_dict=False, exclude_nets=None): - """ - This function returns the management IPs for all VMs on the system. - We make the assumption that the management IP pattern is "192.168.xxx.x( - xx)". - Args: - vms (str|list|None): vm ids list. If None, management ips for ALL vms - with given Tenant(via auth_info) will be - returned. - con_ssh (SSHClient): active controller SSHClient object - auth_info (dict): use admin by default unless specified - rtn_dict (bool): return list if False, return dict if True - exclude_nets (list|str) network name(s) - exclude ips from given - network name(s) - - Returns (list|dict): - a list of all VM management IPs # rtn_dict=False - dictionary with vm IDs as the keys, and mgmt ips as values # - rtn_dict=True - """ - net_name_pattern, net_ip_pattern = Networks.get_nenutron_net_patterns( - 'mgmt') - return _get_net_ips_for_vms(netname_pattern=net_name_pattern, - ip_pattern=net_ip_pattern, vms=vms, - con_ssh=con_ssh, auth_info=auth_info, - rtn_dict=rtn_dict, - exclude_nets=exclude_nets) - - -def _get_net_ips_for_vms(netname_pattern=None, ip_pattern=None, vms=None, - con_ssh=None, auth_info=Tenant.get('admin'), - rtn_dict=False, exclude_nets=None, fail_ok=False): - if not vms and vms is not None: - raise ValueError("Invalid value for vms: {}".format(vms)) - - args = '--a' if auth_info and auth_info.get('user') == 'admin' else '' - table_ = table_parser.table( - cli.openstack('server list', args, ssh_client=con_ssh, - auth_info=auth_info)[1]) - if vms: - table_ = table_parser.filter_table(table_, ID=vms) - - vm_ids = table_parser.get_column(table_, 'ID') - if not vm_ids: - raise ValueError("No VM found.") - - all_ips = [] - all_ips_dict = {} - vms_nets = table_parser.get_column(table_, 'Networks') - - if exclude_nets and isinstance(exclude_nets, str): - exclude_nets = [exclude_nets] - - for i in range(len(vm_ids)): - vm_id = vm_ids[i] - vm_nets = vms_nets[i].split(sep=';') - ips_for_vm = [] - for vm_net in vm_nets: - net_name, net_ips = vm_net.strip().split('=') - if exclude_nets: - for net_to_exclude in exclude_nets: - if net_to_exclude in net_name: - LOG.info("Excluding IPs from {}".format(net_to_exclude)) - continue - # find ips for given netname_pattern - if not netname_pattern or re.search(netname_pattern, net_name): - net_ips = [net_ip.strip() for net_ip in net_ips.split(',')] - ips_for_vm += net_ips - - if not ips_for_vm: - LOG.warning( - "No network found for vm {} with net name pattern: {}".format( - vm_id, netname_pattern)) - continue - - # Filter further if IP pattern is given - if ip_pattern: - ips_for_vm = re.findall(ip_pattern, ','.join(ips_for_vm)) - if not ips_for_vm: - LOG.warning( - "No ip found for vm {} with pattern {}".format(vm_id, - ip_pattern)) - continue - - LOG.debug('targeted ips for vm: {}'.format(ips_for_vm)) - all_ips_dict[vm_id] = ips_for_vm - all_ips += ips_for_vm - - if not all_ips: - if fail_ok: - return all_ips_dict if rtn_dict else all_ips - raise ValueError( - "No ip found for VM(s) {} with net name pattern: {}{}".format( - vm_ids, netname_pattern, ', and ip pattern: {}'.format( - ip_pattern) if ip_pattern else '')) - - LOG.info("IPs dict: {}".format(all_ips_dict)) - if rtn_dict: - return all_ips_dict - else: - return all_ips - - -def get_routers(field='ID', name=None, distributed=None, ha=None, - gateway_ip=None, strict=True, regex=False, - auth_info=None, con_ssh=None): - """ - Get router id(s) based on given criteria. - Args: - field (str|tuple|list): header(s) of the router list table - name (str): router name - distributed (bool): filter out dvr or non-dvr router - ha (bool): filter out HA router - gateway_ip (str): ip of the external router gateway such as - "192.168.13.3" - strict (bool): whether to perform strict search on router name - regex - auth_info (dict): - con_ssh (SSHClient): - - Returns (list): list of routers - - """ - param_dict = { - 'Distributed': distributed, - 'HA': ha, - 'External_gateway_info': gateway_ip, - } - params = {k: str(v) for k, v in param_dict.items() if v is not None} - args = '--long' if 'External_gateway_info' in params else '' - - table_ = table_parser.table( - cli.openstack('router list', args, ssh_client=con_ssh, - auth_info=auth_info)[1], - combine_multiline_entry=True) - if name is not None: - table_ = table_parser.filter_table(table_, strict=strict, regex=regex, - name=name) - if params: - table_ = table_parser.filter_table(table_, **params) - - convert = False - if isinstance(field, str): - field = [field] - convert = True - - values_all_fields = [] - for header in field: - values = table_parser.get_values(table_, header) - if header.lower() == 'external gateway info': - values = [ - eval(value.replace('true', 'True').replace('false', 'False')) - for value in values] - values_all_fields.append(values) - - if convert: - return values_all_fields[0] - - return values_all_fields - - -def get_tenant_router(router_name=None, auth_info=None, con_ssh=None): - """ - Get id of tenant router with specified name. - - Args: - router_name (str): name of the router - auth_info (dict): - con_ssh (SSHClient): - - Returns (str): router id - - """ - if router_name is None: - tenant_name = common.get_tenant_name(auth_info=auth_info) - router_name = tenant_name + '-router' - - routers = get_routers(auth_info=auth_info, con_ssh=con_ssh, - name=router_name) - if not routers: - LOG.warning("No router with name {} found".format(router_name)) - return None - return routers[0] - - -def get_router_values(router_id=None, fields='status', strict=True, - auth_info=Tenant.get('admin'), con_ssh=None): - """ - Get values of specified fields for given router via openstack router show - - Args: - router_id (str): - fields (str|list|tuple): - strict (bool): - auth_info (dict): - con_ssh (SSHClient): - - Returns (list): values for given fields in openstack router show - - """ - if router_id is None: - router_id = get_tenant_router(con_ssh=con_ssh) - - table_ = table_parser.table( - cli.openstack('router show', router_id, ssh_client=con_ssh, - auth_info=auth_info)[1], - combine_multiline_entry=True) - - if isinstance(fields, str): - fields = (fields,) - values = [] - for field in fields: - value = table_parser.get_value_two_col_table(table_, field, - strict=strict) - if field in ('interfaces_info', 'external_gateway_info', - 'distributed') or value in ('None', 'False', 'True'): - value = eval( - value.replace('true', 'True').replace('false', 'False')) - values.append(value) - return values - - -def create_router(name=None, project=None, distributed=None, ha=None, - disable=None, description=None, tags=None, - no_tag=None, avail_zone_hint=None, project_domain=None, - rtn_name=False, - fail_ok=False, auth_info=Tenant.get('admin'), con_ssh=None, - cleanup=None): - """ - Create a neutron router with given parameters - Args: - name (str|None): - project (str|None): - distributed (bool|None): - ha (bool|None): - disable (bool|None): - description (str|None): - tags (str|list|tuple|None): - no_tag (bool|None): - avail_zone_hint (str|None): - project_domain (str|None): - rtn_name (bool): return router name if True else return router id - fail_ok (bool): - auth_info: - con_ssh: - cleanup (str|None): Valid cleanup scopes: function, class, module, - session - - Returns (tuple): - (0, ) # router created successfully - (1, ) # CLI rejected - - """ - if name is None: - name = 'router' - name = '-'.join([project, name, str(common.Count.get_router_count())]) - - if not project and auth_info and auth_info['tenant'] == 'admin': - project = Tenant.get_primary()['tenant'] - - args_dict = { - '--project': project, - '--distributed': True if distributed else None, - '--centralized': True if distributed is False else None, - '--ha': True if ha else None, - '--no-ha': True if ha is False else None, - '--enable': True if disable is False else None, - '--disable': True if disable else None, - '--description': description, - '--tag': tags, - '--no-tag': no_tag, - '--availability-zone-hint': avail_zone_hint, - '--project-domain': project_domain, - } - args = '{} {}'.format(common.parse_args(args_dict, repeat_arg=True), name) - - LOG.info("Creating router with args: {}".format(args)) - code, output = cli.openstack('router create', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - - table_ = table_parser.table(output) - router_id = table_parser.get_value_two_col_table(table_, 'id') - if cleanup and router_id: - ResourceCleanup.add('router', router_id, scope=cleanup) - - # process result - if code > 0: - return 1, output - - succ_msg = "Router {} is created successfully.".format(name) - LOG.info(succ_msg) - return 0, name if rtn_name else router_id - - -def get_router_subnets(router, field='subnet_id', router_interface_only=True, - auth_info=Tenant.get('admin'), - con_ssh=None): - """ - Get router subnets' ids or ips via openstack port list - Args: - router (str): router name or id - field (str): 'subnet_id' or 'ip_address' - router_interface_only - auth_info: - con_ssh: - - Returns (list): - - """ - fixed_ips, device_owners = get_ports( - field=('Fixed IP Addresses', 'Device Owner'), router=router, long=True, - auth_info=auth_info, con_ssh=con_ssh) - - subnets = [] - for i in range(len(device_owners)): - device_owner = device_owners[i] - # Assume router can have only 1 fixed ip on same port - fixed_ip_info = fixed_ips[i][0] - if router_interface_only and 'router_interface' not in device_owner: - continue - subnets.append(fixed_ip_info.get(field, None)) - - return subnets - - -def get_next_subnet_cidr(net_id, ip_pattern=Networks.IPV4_IP, con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Get next unused cider for given network - Args: - net_id: - ip_pattern: - con_ssh: - auth_info: - - Returns: - - """ - existing_subnets = get_subnets(field='Subnet', network=net_id, - con_ssh=con_ssh, auth_info=auth_info) - existing_subnets_str = ','.join(existing_subnets) - # TODO: add ipv6 support - mask = re.findall(ip_pattern + r'/(\d{1,3})', existing_subnets_str)[0] - increment = int(math.pow(2, math.ceil(math.log2(int(mask))))) - - ips = re.findall(ip_pattern, existing_subnets_str) - ips = [ipaddress.ip_address(item) for item in ips] - max_ip = ipaddress.ip_address(max(ips)) - - cidr = "{}/{}".format(str(ipaddress.ip_address(int(max_ip) + increment)), - mask) - LOG.info("Next unused CIDR for network {}: {}".format(net_id, cidr)) - - return cidr - - -def delete_router(router, remove_ports=True, auth_info=Tenant.get('admin'), - con_ssh=None, fail_ok=False): - """ - Delete given router - Args: - router (str): - remove_ports (bool): - auth_info: - con_ssh: - fail_ok: - - Returns (tuple): - - """ - - if remove_ports: - LOG.info("Clear router gateway and remove attached ports for router " - "{}".format(router)) - clear_router_gateway(router, auth_info=auth_info, con_ssh=con_ssh) - router_ports = get_ports(router=router, con_ssh=con_ssh, - auth_info=auth_info) - for port in router_ports: - remove_router_interface(router, port=port, auth_info=auth_info, - con_ssh=con_ssh) - - LOG.info("Deleting router {}...".format(router)) - code, output = cli.openstack('router delete', router, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code > 0: - return 1, output - - rtn_val = 'ID' if re.match(UUID, router) else 'Name' - post_routers = get_routers(auth_info=auth_info, con_ssh=con_ssh, - field=rtn_val) - if router in post_routers: - msg = "Router {} is still showing in neutron router-list".format(router) - if fail_ok: - LOG.warning(msg) - return 2, msg - - succ_msg = "Router {} deleted successfully".format(router) - LOG.info(succ_msg) - return 0, succ_msg - - -def add_router_interface(router=None, subnet=None, port=None, auth_info=None, - con_ssh=None, fail_ok=False): - """ - Add port or subnet to router - Args: - router (str|None): - subnet (str|None): - port (str|None): - auth_info (dict): - con_ssh: - fail_ok (bool): - - Returns (tuple): - """ - - return __add_remove_router_interface(router=router, port=port, - subnet=subnet, action='add', - auth_info=auth_info, con_ssh=con_ssh, - fail_ok=fail_ok) - - -def remove_router_interface(router=None, subnet=None, port=None, auth_info=None, - con_ssh=None, fail_ok=False): - """ - Remove port or subnet from router - Args: - router (str|None): - subnet (str|None): - port (str|None): - auth_info (dict): - con_ssh: - fail_ok (bool): - - Returns (tuple): - """ - return __add_remove_router_interface(router=router, port=port, - subnet=subnet, action='remove', - auth_info=auth_info, con_ssh=con_ssh, - fail_ok=fail_ok) - - -def __add_remove_router_interface(router=None, subnet=None, port=None, - action='add', auth_info=None, - con_ssh=None, fail_ok=False): - """ - Remove router port or subnet - Args: - router (str): - subnet - port - action (str): add or remove - auth_info: - con_ssh: - fail_ok: - - Returns (tuple): - - """ - if subnet is None and port is None: - raise ValueError("subnet or port has to be provided") - - if not router: - router = get_tenant_router(con_ssh=con_ssh, auth_info=auth_info) - - if subnet: - interface = subnet - interface_type = 'subnet' - else: - interface = port - interface_type = 'port' - - cmd = 'router {} {}'.format(action, interface_type) - - args = '{} {}'.format(router, interface) - LOG.info("Removing router interface: {}".format(args)) - code, output = cli.openstack(cmd, args, ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - - if code == 1: - return 1, output - - succ_msg = "{} ran successfully for router {}.".format(cmd, router) - LOG.info(succ_msg) - return 0, interface - - -def set_router(router=None, enable=None, external_gateway=None, - enable_snat=None, routes=None, no_route=None, - fixed_ips=None, tags=None, no_tag=None, qos_policy=None, - no_qos_policy=None, ha=None, distributed=None, - name=None, description=None, fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Set router with given parameters - Args: - router (str): - enable (bool): - external_gateway (str): - enable_snat (bool): - routes (list): list of dict or strings - list of dict: - [{'destination': , 'gateway': }, - {'destination': , 'gateway': }] - list of strings: - ['destination=,gateway=', - 'destination=,gateway='] - no_route (bool): - fixed_ips (list|tuple|str|dict): If list, it could be a list of dict - or strings - list of dict: - [{'subnet': , 'ip-address': }, {'subnet': - , 'ip-address': }] - list of strings: - ['subnet=,ip-address=', 'subnet=, - ip-address='] - tags (list\tuple): list of strings - no_tag (bool): - qos_policy (str): - no_qos_policy (bool): - ha (bool): - distributed (bool): - name (str): - description (str): - fail_ok (bool): - con_ssh: - auth_info: - - Returns: - - """ - args_dict = { - '--name': name, - '--description': description, - '--enable': True if enable else None, - '--disable': True if enable is False else None, - '--distributed': True if distributed else None, - '--centralized': True if distributed is False else None, - '--route': routes, - '--no-route': True if no_route else None, - '--ha': True if ha else None, - '--no-ha': True if ha is False else None, - '--external-gateway': external_gateway, - '--fixed-ip': fixed_ips, - '--enable-snat': True if enable_snat else None, - '--disable-snat': True if enable_snat is False else None, - '--qos-policy': qos_policy, - '--no-qos-policy': True if no_qos_policy else None, - '--tag': tags, - '--no-tag': True if no_tag else None, - } - args = common.parse_args(args_dict, repeat_arg=True) - if not args: - raise ValueError("No parameters provided to set router") - - if not router: - router = get_tenant_router(con_ssh=con_ssh) - - LOG.info("Setting router {} with args: {}".format(router, args)) - args = '{} {}'.format(args, router) - code, out = cli.openstack('router set', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code > 0: - return 1, out - - LOG.info("Router {} set successfully".format(router)) - return 0, router - - -def unset_router(router_id=None, external_gateway=None, routes=None, - qos_policy=None, tag=None, all_tag=None, - fail_ok=False, con_ssh=None, auth_info=Tenant.get('admin')): - """ - Unset router with given parameters - Args: - router_id (str|None): - external_gateway (bool): - qos_policy (bool): - tag (str): - all_tag (bool): - fail_ok: - con_ssh: - auth_info: - routes (list): list of dict or string. - list of dict: - [{'destination': , 'gateway': }, - {'destination': , 'gateway': }] - list of strings: - ['destination=,gateway=', - 'destination=,gateway='] - Returns: - - """ - args_dict = { - '--route': routes, - '--external-gateway': external_gateway, - '--qos-polity': qos_policy, - '--tag': tag, - '--all-tag': all_tag - } - args = common.parse_args(args_dict, repeat_arg=True) - if not args: - raise ValueError("No parameter specified to unset") - - if not router_id: - router_id = get_tenant_router(con_ssh=con_ssh) - - LOG.info("Unsetting router {} with args: {}".format(router_id, args)) - args = '{} {}'.format(args, router_id) - code, output = cli.openstack('router unset', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code > 0: - return 1, output - - msg = "Router {} unset successfully".format(router_id) - LOG.info(msg) - return 0, msg - - -def get_router_ext_gateway_info(router_id=None, auth_info=None, con_ssh=None): - """ - Get router's external gateway info as a dictionary - - Args: - router_id (str): - auth_info (dict|None): - con_ssh (SSHClient): - - Returns (dict): external gateway info as a dict. - Examples: {"network_id": "55e5967a-2138-4f27-a17c-d700af1c2429", - "enable_snat": True, - "external_fixed_ips": [{"subnet_id": - "892d3ad8-9cbc-46db-88f3-84e151bbc116", - "ip_address": "192.168.9.3"}] - } - """ - return get_router_values(router_id=router_id, - fields='external_gateway_info', - con_ssh=con_ssh, - auth_info=auth_info)[0] - - -def set_router_gateway(router_id=None, external_net=None, enable_snat=False, - fixed_ips=None, fail_ok=False, - auth_info=Tenant.get('admin'), con_ssh=None, - clear_first=False): - """ - Set router gateway with given snat, ip settings. - - Args: - router_id (str): id of the router to set gateway for. If None, - tenant router for Primary tenant will be used. - external_net (str): id of the external network for getting the gateway - enable_snat (bool): whether to enable SNAT. - fixed_ips (str|None|list|tuple): ip address(es) on external gateway - fail_ok (bool): - auth_info (dict): auth info for running the router-gateway-set cli - con_ssh (SSHClient): - clear_first (bool): Whether to clear the router gateway first if - router already has a gateway set - - Returns (tuple): (rtn_code (int), message (str)) scenario 1,2,3, - 4 only returns if fail_ok=True - - (0, "Router gateway is successfully set.") - - (1, ) -- cli is rejected - - """ - # Process args - if fixed_ips: - if isinstance(fixed_ips, str): - fixed_ips = (fixed_ips,) - fixed_ips = [{'ip-address': fixed_ip} for fixed_ip in fixed_ips] - if not router_id: - router_id = get_tenant_router(con_ssh=con_ssh) - if not external_net: - external_net = \ - get_networks(con_ssh=con_ssh, external=True, auth_info=auth_info)[0] - - # Clear first if gateway already set - if clear_first and get_router_ext_gateway_info(router_id, - auth_info=auth_info, - con_ssh=con_ssh): - clear_router_gateway(router_id=router_id, check_first=False, - auth_info=auth_info, con_ssh=con_ssh) - - return set_router(router_id, external_gateway=external_net, - enable_snat=enable_snat, fixed_ips=fixed_ips, - con_ssh=con_ssh, auth_info=auth_info, fail_ok=fail_ok) - - -def clear_router_gateway(router_id=None, fail_ok=False, - auth_info=Tenant.get('admin'), con_ssh=None, - check_first=True): - """ - Clear router gateway - - Args: - router_id (str): id of router to clear gateway for. If None, tenant - router for primary tenant will be used. - fail_ok (bool): - auth_info (dict): auth info for running the router-gateway-clear cli - con_ssh (SSHClient): - check_first (bool): whether to check if gateway is set for given - router before clearing - - Returns (tuple): (rtn_code (int), message (str)) - - (0, "Router gateway is successfully cleared.") - - (1, ) -- cli is rejected - - (2, "Failed to clear gateway for router ") - - """ - if router_id is None: - router_id = get_tenant_router(con_ssh=con_ssh, auth_info=auth_info) - - if check_first and not get_router_ext_gateway_info(router_id, - con_ssh=con_ssh, - auth_info=auth_info): - msg = "No gateway found for router. Do nothing." - LOG.info(msg) - return -1, msg - - return unset_router(router_id=router_id, external_gateway=True, - fail_ok=fail_ok, con_ssh=con_ssh, - auth_info=auth_info) - - -def get_router_external_gateway_ips(router_id, auth_info=None, con_ssh=None): - """ - Get router external gateway fixed ips - Args: - router_id: - auth_info: - con_ssh: - - Returns (list): list of ip addresses - - """ - ext_gateway_info = get_router_ext_gateway_info(router_id, - auth_info=auth_info, - con_ssh=con_ssh) - fixed_ips = [] - if ext_gateway_info: - fixed_ips = ext_gateway_info['external_fixed_ips'] - fixed_ips = [fixed_ip['ip_address'] for fixed_ip in fixed_ips if - fixed_ip.get('ip_address', '')] - - return fixed_ips - - -def get_router_host(router=None, auth_info=Tenant.get('admin'), con_ssh=None): - """ - Get router host - Args: - router (str|None): - auth_info: - con_ssh: - - Returns (str): - - """ - if not router: - router = get_tenant_router(con_ssh=con_ssh, auth_info=auth_info) - - return get_network_agents(router=router, field='Host', con_ssh=con_ssh, - auth_info=auth_info)[0] - - -def set_router_mode(router_id=None, distributed=None, ha=None, - enable_on_failure=True, fail_ok=False, - auth_info=Tenant.get('admin'), con_ssh=None): - """ - Update router to distributed or centralized - - Args: - router_id (str): id of the router to update - distributed (bool|None): True if set to distributed, False if set to - centralized - ha (bool|None) - enable_on_failure (bool): whether to set admin state up if updating - router failed - fail_ok (bool): whether to throw exception if cli got rejected - auth_info (dict): - con_ssh (SSHClient): - - Returns: - - """ - router_mode = [] - if distributed is not None: - router_mode.append('distributed' if distributed else 'centralized') - if ha is not None: - router_mode.append('ha' if ha else 'no-ha') - - if not router_mode: - raise ValueError("Distributed or ha has to be specified") - - router_mode = ' and '.join(router_mode) - LOG.info("Disable router {} and set it to {} mode".format(router_id, - router_mode)) - try: - code, output = set_router(router=router_id, distributed=distributed, - ha=ha, enable=False, fail_ok=fail_ok, - con_ssh=con_ssh, auth_info=auth_info) - except (exceptions.TiSError, pexpect.ExceptionPexpect): - if enable_on_failure: - set_router(router=router_id, enable=True, con_ssh=con_ssh, - auth_info=auth_info) - raise - - LOG.info("Re-enable router after set to {}".format(router_mode)) - set_router(router=router_id, enable=True, con_ssh=con_ssh, - auth_info=auth_info) - - if code > 0: - return 1, output - - fields = ('distributed', 'ha') - expt_values = (distributed, ha) - post_values = get_router_values(router_id, fields, auth_info=auth_info, - con_ssh=con_ssh) - - for i in range(len(fields)): - field = fields[i] - post_value = post_values[i] - expt_value = expt_values[i] - if expt_value and post_value != expt_value: - msg = "Router {} {} is {} instead of {}".format(router_id, field, - post_value, - expt_value) - raise exceptions.NeutronError(msg) - - succ_msg = "Router is successfully updated to distributed={}".format( - distributed) - LOG.info(succ_msg) - return 0, succ_msg - - -def get_networks_on_providernet(providernet, segment=None, external=None, - field='id', - con_ssh=None, auth_info=Tenant.get('admin'), - name=None, net_id=None, - strict=True, regex=False, exclude=False): - """ - - Args: - providernet(str): - segment (int|None) - external (bool|None) - field(str): 'id' or 'name' - con_ssh (SSHClient): - auth_info (dict): - name - net_id - strict (bool) - regex (bool) - exclude (bool): whether to return networks that are NOT on given - providernet - - Returns (list): list of networks - """ - if not providernet: - raise ValueError("No providernet_id provided.") - - return get_networks(field=field, provider_physical_network=providernet, - provider_setment=segment, - external=external, name=name, net_id=net_id, - strict=strict, regex=regex, exclude=exclude, - con_ssh=con_ssh, auth_info=auth_info) - - -def get_eth_for_mac(ssh_client, mac_addr, timeout=VMTimeout.IF_ADD, - vshell=False): - """ - Get the eth name for given mac address on the ssh client provided - Args: - ssh_client (SSHClient): usually a vm_ssh - mac_addr (str): such as "fa:16:3e:45:0d:ec" - timeout (int): max time to wait for the given mac address appear in - ip addr - vshell (bool): if True, get eth name from "vshell port-list" - - Returns (str): The first matching eth name for given mac. such as "eth3" - - """ - end_time = time.time() + timeout - while time.time() < end_time: - if not vshell: - if mac_addr in ssh_client.exec_cmd('ip addr')[1]: - code, output = ssh_client.exec_cmd( - 'ip addr | grep --color=never -B 1 "{}"'.format(mac_addr)) - # sample output: - # 7: eth4: mtu 1500 qdisc noop state - # DOWN qlen 1000 - # link/ether 90:e2:ba:60:c8:08 brd ff:ff:ff:ff:ff:ff - - return output.split(sep=':')[1].strip() - else: - code, output = ssh_client.exec_cmd( - 'vshell port-list | grep {}'.format(mac_addr)) - # |uuid|id|type|name|socket|admin|oper|mtu|mac-address|pci - # -address|network-uuid|network-name - return output.split(sep='|')[4].strip() - time.sleep(1) - else: - LOG.warning( - "Cannot find provided mac address {} in 'ip addr'".format(mac_addr)) - return '' - - -def _get_interfaces_via_vshell(ssh_client, net_type='internal'): - """ - Get interface uuids for given network type - Args: - ssh_client (SSHClient): - net_type: 'data', 'mgmt', or 'internal' - - Returns (list): interface uuids - - """ - LOG.info( - "Getting {} interface-uuid via vshell address-list".format(net_type)) - table_ = table_parser.table( - ssh_client.exec_cmd('vshell address-list', fail_ok=False)[1]) - interfaces = table_parser.get_values( - table_, 'interface-uuid', regex=True, - address=Networks.get_nenutron_net_patterns(net_type=net_type)[1]) - - return interfaces - - -__PING_LOSS_MATCH = re.compile(PING_LOSS_RATE) - - -def ping_server(server, ssh_client, num_pings=5, timeout=60, check_interval=5, - fail_ok=False, vshell=False, interface=None, retry=0, - net_type='internal'): - """ - - Args: - server (str): server ip to ping - ssh_client (SSHClient): ping from this ssh client - num_pings (int): - timeout (int): max time to wait for ping response in seconds - check_interval (int): seconds in between retries - fail_ok (bool): whether to raise exception if packet loss rate is 100% - vshell (bool): whether to ping via 'vshell ping' cmd - interface (str): interface uuid. vm's internal interface-uuid will be - used when unset - retry (int): - net_type (str): 'mgmt', 'data', 'internal', or 'external', only used - for vshell=True and interface=None - - Returns (tuple): ( (int), - (int)) - - """ - LOG.info('Ping {} from host {}'.format(server, ssh_client.host)) - output = packet_loss_rate = None - for i in range(max(retry + 1, 0)): - if not vshell: - cmd = 'ping -c {} {}'.format(num_pings, server) - code, output = ssh_client.exec_cmd(cmd=cmd, expect_timeout=timeout, - fail_ok=True) - if code != 0: - packet_loss_rate = 100 - else: - packet_loss_rate = __PING_LOSS_MATCH.findall(output)[-1] - else: - if not interface: - interface = _get_interfaces_via_vshell(ssh_client, - net_type=net_type)[0] - cmd = 'vshell ping --count {} {} {}'.format(num_pings, server, - interface) - code, output = ssh_client.exec_cmd(cmd=cmd, expect_timeout=timeout) - if code != 0: - packet_loss_rate = 100 - else: - if "ERROR" in output: - # usually due to incorrectly selected interface (no route - # to destination) - raise exceptions.NeutronError( - "vshell ping rejected, output={}".format(output)) - packet_loss_rate = re.findall(VSHELL_PING_LOSS_RATE, output)[-1] - - packet_loss_rate = int(packet_loss_rate) - if packet_loss_rate < 100: - if packet_loss_rate > 0: - LOG.warning("Some packets dropped when ping from {} ssh " - "session to {}. Packet loss rate: {}%". - format(ssh_client.host, server, packet_loss_rate)) - else: - LOG.info("All packets received by {}".format(server)) - break - - LOG.info("retry in 3 seconds") - time.sleep(3) - else: - msg = "Ping from {} to {} failed.".format(ssh_client.host, server) - LOG.warning(msg) - if not fail_ok: - raise exceptions.VMNetworkError(msg) - - untransmitted_packets = re.findall(r"(\d+) packets transmitted,", output) - if untransmitted_packets: - untransmitted_packets = int(num_pings) - int(untransmitted_packets[0]) - else: - untransmitted_packets = num_pings - - return packet_loss_rate, untransmitted_packets - - -def get_pci_vm_network(pci_type='pci-sriov', net_name=None, strict=False, - con_ssh=None, auth_info=Tenant.get('admin'), - rtn_all=False): - """ - - Args: - pci_type (str|tuple|list): - net_name: - strict: - con_ssh: - auth_info: - rtn_all - - Returns (tuple|list): None if no network for given pci type; 2 nets(list) - if CX nics; 1 net otherwise. - - """ - if isinstance(pci_type, str): - pci_type = [pci_type] - - hosts_and_pnets = host_helper.get_hosts_and_pnets_with_pci_devs( - pci_type=pci_type, up_hosts_only=True, - con_ssh=con_ssh, auth_info=auth_info) - if not hosts_and_pnets: - if rtn_all: - return [], None - return [] - - # print("hosts and pnets: {}".format(hosts_and_pnets)) - - host = list(hosts_and_pnets.keys())[0] - pnet_name = hosts_and_pnets[host][0] - nets = list(set(get_networks_on_providernet(pnet_name, field='name'))) - - nets_list_all_types = [] - for pci_type_ in pci_type: - if pci_type_ == 'pci-sriov': - # Exclude network on first segment - # The switch is setup with untagged frames for the first segment - # within the range. - # This is suitable for PCI passthrough, but would not work for SRIOV - first_segs = get_first_segments_of_pnet_ranges(pnet_name, - con_ssh=con_ssh) - first_segs = [seg for seg in first_segs if seg > 20] - for seg in first_segs: - untagged_net = get_net_on_segment(pnet_name, seg_id=seg, - field='name', con_ssh=con_ssh) - if untagged_net in nets: - LOG.info( - "{} is on first segment of {} range with untagged " - "frames. Remove for sriov.". - format(untagged_net, pnet_name)) - nets.remove(untagged_net) - - # print("pnet: {}; Nets: {}".format(pnet_name, nets)) - nets_for_type = _get_preferred_nets(nets=nets, net_name=net_name, - strict=strict) - if not nets_for_type: - nets_list_all_types = [] - break - - nets_list_all_types.append(nets_for_type) - - final_nets = [] - cx_for_pcipt = False - if nets_list_all_types: - final_nets = set(nets_list_all_types[0]) - for nets_ in nets_list_all_types[1:]: - final_nets.intersection_update(set(nets_)) - final_nets = list(final_nets) - if final_nets: - if 'pci-passthrough' in pci_type: - - port = host_helper.get_host_interfaces(host, field='ports', - net_type=pci_type)[0] - host_nic = host_helper.get_host_ports(host, field='device type', - **{'name': port})[0] - if re.match(MELLANOX4, host_nic): - cx_for_pcipt = True - - if not rtn_all: - final_nets = final_nets[0:2] if cx_for_pcipt else final_nets[-1] - - if rtn_all: - final_nets = final_nets, cx_for_pcipt - - return final_nets - - -def get_network_segment_ranges(field=('Minimum ID', 'Maximum ID'), long=False, - shared=None, physical_network=None, - network_type=None, project_id=None, - auth_info=Tenant.get('admin'), con_ssh=None): - """ - Get network segment ranges info - Args: - field (str|tuple|list): - long (bool|None): cli parameter --long - shared (bool|None): return value filter - physical_network (str|None): return value filter - network_type (str|None): return value filter - project_id (str|None): return value filter - auth_info: - con_ssh: - - Returns (list of str|tuple): return list of str if rtn_val is str, - otherwise rtn list of tuples - - """ - - table_ = table_parser.table( - cli.openstack('network segment range list', '--long' if long else '', - ssh_client=con_ssh, - auth_info=auth_info)[1]) - kwargs = { - 'Shared': shared, - 'Physical Network': physical_network, - 'Network Type': network_type, - 'Project ID': project_id, - } - kwargs = {k: v for k, v in kwargs.items() if v is not None} - - vals = table_parser.get_multi_values(table_, field, evaluate=True, **kwargs) - if not isinstance(field, str): - vals = zip(*vals) - - return vals - - -def get_first_segments_of_pnet_ranges(providernet, con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Get first segment id within the range of given providernet - Args: - providernet (str): physical network name - con_ssh (SSHClient): - auth_info (dict): - - Returns (list of int): list of min segment for each range of the physical - network - - """ - min_segments = get_network_segment_ranges(field='Minimum ID', - physical_network=providernet, - auth_info=auth_info, - con_ssh=con_ssh) - - return min_segments - - -def get_net_on_segment(providernet, seg_id, field='name', con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Get network name on given prvidernet with specified segment id - Args: - providernet (str): pnet name or id - seg_id (int|list|tuple): segment id(s) - field (str): 'name' or 'id' - con_ssh (SSHClient): - auth_info (dict): - - Returns (str|None): network id/name or None if no network on given seg id - - """ - nets = get_networks_on_providernet(providernet=providernet, field=field, - con_ssh=con_ssh, segment=seg_id, - auth_info=auth_info) - - net = nets[0] if nets else None - return net - - -def _get_preferred_nets(nets, net_name=None, strict=False): - specified_nets = [] - nets_dict = { - 'internal': [], - 'mgmt': [], - 'data': [] - } - - for net in nets: - if net_name: - if strict: - if re.match(net_name, net): - specified_nets.append(net) - else: - if re.search(net_name, net): - specified_nets.append(net) - else: - # If net_name unspecified: - for net_type, nets_found in nets_dict.items(): - net_name_pattern = Networks.get_nenutron_net_patterns(net_type)[ - 0] - if net_name_pattern and re.search(net_name_pattern, net): - nets_found.append(net) - break - else: - LOG.warning("Unknown network: {}. Ignore.".format(net)) - - for nets_ in (specified_nets, nets_dict['internal'], nets_dict['data'], - nets_dict['mgmt']): - if nets_: - nets_counts = Counter(nets_) - nets_ = sorted(nets_counts.keys(), key=nets_counts.get, - reverse=True) - LOG.info("Preferred networks selected: {}".format(nets_)) - return nets_ - - -def create_port(net_id, name, project=None, fixed_ips=None, device_id=None, - device_owner=None, port_security=None, - enable_port=None, mac_addr=None, vnic_type=None, - security_groups=None, no_security_groups=None, - qos_pol=None, allowed_addr_pairs=None, dns_name=None, tag=None, - no_tag=None, - host_id=None, wrs_vif=None, fail_ok=False, auth_info=None, - con_ssh=None, cleanup=None): - """ - Create a port on given network - - Args: - net_id (str): network id to create port for - name (str): name of the new port - project (str): tenant name. such as tenant1, tenant2 - fixed_ips (list|tuple|dict|None): e.g., [{"subnet_id": , - "ip-address"=}, {"ip-address": } - device_id (str): device id of this port - device_owner (str): Device owner of this port - port_security (None|bool): - enable_port (bool|None): - mac_addr (str): MAC address of this port - vnic_type: one of the: - security_groups (str|list): Security group(s) associated with the port - no_security_groups (bool): Associate no security groups with the port - qos_pol (str): Attach QoS policy ID or name to the resource - allowed_addr_pairs (str|list): Allowed address pair associated with - the port. - e.g., "ip_address=IP_ADDR[,mac_address=MAC_ADDR]" - dns_name (str): Assign DNS name to the port (requires DNS - integration extension) - host_id (str) - tag (str|None) - no_tag (str|None) - wrs_vif - fail_ok (bool): - auth_info (dict): - con_ssh (SSHClient): - cleanup (None|str) - - Returns (tuple): (, ) - (0, ) - port created successfully - (1, ) - CLI rejected - (2, "Network ID for created port is not as specified.") - post - create check fail - - """ - LOG.info("Creating port on network {}".format(net_id)) - if not net_id: - raise ValueError("network id is required") - tenant_id = \ - keystone_helper.get_projects(field='ID', name=project, - con_ssh=con_ssh)[0] if project else None - - args_dict = { - '--no-security-groups': no_security_groups, - '--enable-port-security': True if port_security else None, - '--disable-port-security': True if port_security is False else None, - '--tenant-id': tenant_id, - '--device-id': device_id, - '--device-owner': device_owner, - '--mac-address': mac_addr, - '--vnic-type': vnic_type, - '--host': host_id, - # '--binding-profile': - '--enable': True if port_security else None, - '--disable': True if enable_port is False else None, - '--qos-policy': qos_pol, - '--dns-name': dns_name, - '--binding-profile vif_model': wrs_vif, - '--fixed-ip': fixed_ips, - '--allowed-address-pair': allowed_addr_pairs, - '--security-group': security_groups, - '--tag': tag, - '--no-tag': no_tag - } - - args = common.parse_args(args_dict=args_dict, repeat_arg=True, vals_sep=',') - args = '--network={} {} {}'.format(net_id, args, name) - - code, output = cli.openstack('port create', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - - port_tab = table_parser.table(output) - port_net_id = table_parser.get_value_two_col_table(port_tab, 'network_id') - port_id = table_parser.get_value_two_col_table(port_tab, 'id') - if port_id and cleanup: - ResourceCleanup.add('port', port_id) - - if code == 1: - return code, output - - if not net_id == port_net_id: - err_msg = "Network ID for created port is not as specified. Expt:{}; " \ - "Actual: {}".format(net_id, port_net_id) - if fail_ok: - LOG.warning(err_msg) - return 2, port_id - - succ_msg = "Port {} is successfully created on network {}".format(port_id, - net_id) - LOG.info(succ_msg) - return 0, port_id - - -def delete_port(port_id, fail_ok=False, auth_info=Tenant.get('admin'), - con_ssh=None): - """ - Delete given port - Args: - port_id (str): - fail_ok (bool): - auth_info (dict): - con_ssh (SSHClient): - - Returns (tuple): (, ) - (0, "Port is successfully deleted") - (1, ) - delete port cli rejected - (2, "Port still exists after deleting") - post deletion - check failed - - """ - LOG.info("Deleting port: {}".format(port_id)) - if not port_id: - msg = "No port specified" - LOG.warning(msg) - return -1, msg - - code, output = cli.openstack('port delete', port_id, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code > 0: - return 1, output - - existing_ports = get_ports(field='id', auth_info=auth_info, con_ssh=con_ssh) - if port_id in existing_ports: - err_msg = "Port {} still exists after deleting".format(port_id) - if fail_ok: - LOG.warning(err_msg) - return 2, err_msg - raise exceptions.NeutronError(err_msg) - - succ_msg = "Port {} is successfully deleted".format(port_id) - LOG.info(succ_msg) - return 0, succ_msg - - -def set_port(port_id, name=None, fixed_ips=None, no_fixed_ip=None, - device_id=None, device_owner=None, - port_security=None, enable_port=None, mac_addr=None, - vnic_type=None, wrs_vif=None, - security_groups=None, no_security_groups=None, qos_pol=None, - host_id=None, - allowed_addr_pairs=None, no_allowed_addr_pairs=None, dns_name=None, - description=None, - tag=None, no_tag=None, fail_ok=False, auth_info=None, - con_ssh=None): - args_dict = { - '--description': description, - '--device': device_id, - '--mac-address': mac_addr, - '--device-owner': device_owner, - '--vnic-type': vnic_type, - '--host': host_id, - '--dns-name': dns_name, - '--enable': enable_port, - '--disable': True if enable_port is False else None, - '--enable-port-security': port_security, - '--disable-port-security': True if port_security is False else None, - '--name': name, - '--fixed-ip': fixed_ips, - '--no-fixed-ip': no_fixed_ip, - '--qos-policy': qos_pol, - '--security-group': security_groups, - '--no-security-group': no_security_groups, - '--allowed-address': allowed_addr_pairs, - '--no-allowed-address': no_allowed_addr_pairs, - '--tag': tag, - '--no-tag': no_tag, - '--binding-profile vif_model': wrs_vif, - } - args = '{} {}'.format( - common.parse_args(args_dict, repeat_arg=True, vals_sep=','), port_id) - code, out = cli.openstack('port set', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code != 0: - return code, out - - msg = "Port {} is updated.".format(port_id) - LOG.info(msg) - return code, msg - - -def __convert_ip_subnet(line): - ip_addr = subnet = '' - if 'ip_address' in line: - ip_addrs = re.findall("ip_address=\'(.*)\',", line) - if ip_addrs: - ip_addr = ip_addrs[0] - subnets = re.findall("subnet_id=\'(.*)\'", line) - if subnets: - subnet = subnets[0] - - return {'ip_address': ip_addr, 'subnet_id': subnet} - - -def get_ports(field='id', network=None, router=None, server=None, project=None, - fixed_ips=None, long=False, mac=None, - port_id=None, port_name=None, auth_info=Tenant.get('admin'), - con_ssh=None, strict=False): - """ - Get a list of ports with given arguments - Args: - field (str|list|tuple): openstack port list table header(s). 'ID', - 'NAME', 'MAC Address', 'Fixed IP Addresses' - network (str|None) - router (str|None) - server (str|None) - project (str|None) - mac (str|None) - fixed_ips (list|tuple|dict|None) e.g., ({'subnet': , - 'ip-address': }, {'ip-address': }) - long (bool): - port_id (str): id of the port - port_name (str): name of the port - strict (bool): - auth_info (dict): - con_ssh (SSHClient): - - Returns (list): - - """ - optional_args = { - '--fixed-ip': fixed_ips, - '--project': project, - '--network': network, - '--router': router, - '--server': server, - '--mac-address': mac, - '--long': long, - } - args_str = common.parse_args(args_dict=optional_args, repeat_arg=True, - vals_sep=',') - table_ = table_parser.table( - cli.openstack('port list', args_str, ssh_client=con_ssh, - auth_info=auth_info)[1]) - - filters = {} - if port_id: - filters['id'] = port_id - elif port_name: - filters['name'] = port_name - - convert = False - if isinstance(field, str): - convert = True - field = (field,) - - res = [] - for header in field: - ports_info = table_parser.get_values(table_, header, strict=strict, - merge_lines=False, **filters) - if header.lower() == 'fixed ip addresses': - values = [] - for port_info in ports_info: - if isinstance(port_info, str): - port_info = [port_info] - port_info = [__convert_ip_subnet(line=line) for line in - port_info] - values.append(port_info) - ports_info = values - res.append(ports_info) - - if convert: - res = res[0] - return res - - -def get_port_values(port, fields=('binding_vnic_type', 'mac_address'), - con_ssh=None, auth_info=None): - """ - Get port info via openstack port show - Args: - port (str): - fields (str|list|tuple): - con_ssh (SSHClient): - auth_info (dict): - - Returns (list): return list of list if field is fixed_ips e.g., - fields = ('id', 'fixed_ips') - returns: [, [{'ip_address': , 'subnet_id': }, - ..]] - - """ - if isinstance(fields, str): - fields = (fields,) - - table_ = table_parser.table( - cli.openstack('port show', port, ssh_client=con_ssh, - auth_info=auth_info)[1]) - values = [] - for field in fields: - value = table_parser.get_value_two_col_table(table_, field) - if field == 'fixed_ips': - if isinstance(value, str): - value = [value] - value = [__convert_ip_subnet(line) for line in value] - values.append(value) - - return values - - -def get_pci_devices_info(class_id, con_ssh=None, auth_info=None): - """ - Get PCI devices with nova device-list/show. - - As in load "2017-01-17_22-01-49", the known supported devices are: - Coleto Creek PCIe Co-processor Device Id: 0443 Vendor Id:8086 - - Args: - class_id (str|list): Some possible values: - 0b4000 (Co-processor), - 0c0330 (USB controller), - 030000 (VGA compatible controller) - con_ssh: - auth_info: - - Returns (dict): nova pci devices dict. - Format: {: {: {}, : {...}}, - : {...}, - ...} - Examples: - {'qat-dh895xcc-vf': {'compute-0': {'Device ID':'0443','Class - Id':'0b4000', ...} 'compute-1': {...}}} - - """ - table_ = table_parser.table( - cli.nova('device-list', ssh_client=con_ssh, auth_info=auth_info)[1]) - table_ = table_parser.filter_table(table_, **{'class_id': class_id}) - LOG.info('output of nova device-list for {}: {}'.format(class_id, table_)) - - devices = table_parser.get_column(table_, 'PCI Alias') - LOG.info('PCI Alias from device-list:{}'.format(devices)) - - nova_pci_devices = {} - for alias in devices: - table_ = table_parser.table(cli.nova('device-show {}'.format(alias))[0]) - # LOG.debug('output from nova device-show for device-id:{}\n{ - # }'.format(alias, table_)) - - table_dict = table_parser.row_dict_table(table_, key_header='Host', - unique_key=True, - lower_case=False) - nova_pci_devices[alias] = table_dict - # {'qat-dh895xcc-vf': {'compute-0': {'Device ID':'0443','Class - # Id':'0b4000', ...} 'compute-1': {...}}} - - LOG.info('nova_pci_devices: {}'.format(nova_pci_devices)) - - return nova_pci_devices - - -def get_pci_device_configured_vfs_value(device_id, con_ssh=None, - auth_info=None): - """ - Get PCI device configured vfs value for given device id - - Args: - device_id (str): device vf id - con_ssh: - auth_info: - - Returns: - str : - - """ - _table = table_parser.table( - cli.nova('device-list', ssh_client=con_ssh, auth_info=auth_info)[1]) - LOG.info('output of nova device-list:{}'.format(_table)) - _table = table_parser.filter_table(_table, **{'Device Id': device_id}) - return table_parser.get_column(_table, 'pci_vfs_configured')[0] - - -def get_pci_device_used_vfs_value(device_id, con_ssh=None, auth_info=None): - """ - Get PCI device used number of vfs value for given device id - - Args: - device_id (str): device vf id - con_ssh: - auth_info: - - Returns: - str : - - """ - _table = table_parser.table( - cli.nova('device-list', ssh_client=con_ssh, auth_info=auth_info)[1]) - LOG.info('output of nova device-list:{}'.format(_table)) - _table = table_parser.filter_table(_table, **{'Device Id': device_id}) - LOG.info('output of nova device-list:{}'.format(_table)) - return table_parser.get_column(_table, 'pci_vfs_used')[0] - - -def get_pci_device_vfs_counts_for_host( - host, device_id=None, fields=('pci_vfs_configured', 'pci_vfs_used'), - con_ssh=None, auth_info=Tenant.get('admin')): - """ - Get PCI device used number of vfs value for given device id - - Args: - host (str): compute hostname - device_id (str): device vf id - fields (tuple|str|list) - con_ssh: - auth_info: - - Returns: - list - - """ - if device_id is None: - device_id = get_pci_device_list_values(field='Device Id', - con_ssh=con_ssh, - auth_info=auth_info)[0] - - table_ = table_parser.table( - cli.nova('device-show {}'.format(device_id), ssh_client=con_ssh, - auth_info=auth_info)[1]) - LOG.debug( - 'output from nova device-show for device-id:{}\n{}'.format(device_id, - table_)) - - table_ = table_parser.filter_table(table_, host=host) - counts = [] - if isinstance(fields, str): - fields = [fields] - - for field in fields: - counts.append(int(table_parser.get_column(table_, field)[0])) - - return counts - - -def get_pci_device_list_values(field='pci_vfs_used', con_ssh=None, - auth_info=Tenant.get('admin'), **kwargs): - table_ = table_parser.table( - cli.nova('device-list', ssh_client=con_ssh, auth_info=auth_info)[1]) - - values = table_parser.get_values(table_, field, **kwargs) - if field in ['pci_pfs_configured', 'pci_pfs_used', 'pci_vfs_configured', - 'pci_vfs_used']: - values = [int(value) for value in values] - - return values - - -def get_pci_device_list_info(con_ssh=None, header_key='pci alias', - auth_info=Tenant.get('admin'), **kwargs): - table_ = table_parser.table( - cli.nova('device-list', ssh_client=con_ssh, auth_info=auth_info)[1]) - if kwargs: - table_ = table_parser.filter_table(table_, **kwargs) - - return table_parser.row_dict_table(table_, key_header=header_key) - - -def get_tenant_routers_for_vms(vms, con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Get tenant routers for given vms - - Args: - vms (str|list): - con_ssh (SSHClient): - auth_info - - Returns (list): list of router ids - - """ - if isinstance(vms, str): - vms = [vms] - - router_ids, router_projects = get_routers(auth_info=auth_info, - con_ssh=con_ssh, - field=('ID', 'Project')) - vms_routers = [] - from keywords import vm_helper - for i in range(len(router_ids)): - router_project = router_projects[i] - vms_with_router = vm_helper.get_vms(vms=vms, project=router_project, - all_projects=False, - auth_info=auth_info, - con_ssh=con_ssh) - if vms_with_router: - vms_routers.append(router_ids[i]) - vms = list(set(vms) - set(vms_with_router)) - - if not vms: - break - - return vms_routers - - -def collect_networking_info(time_stamp, routers=None, vms=None, sep_file=None, - con_ssh=None): - LOG.info("Ping tenant(s) router's external and internal gateway IPs") - - if not routers: - if vms: - if isinstance(vms, str): - vms = [vms] - routers = get_tenant_routers_for_vms(vms=vms) - else: - routers = get_routers(name='tenant[12]-router', regex=True, - auth_info=Tenant.get('admin'), - con_ssh=con_ssh) - elif isinstance(routers, str): - routers = [routers] - - ips_to_ping = [] - for router_ in routers: - router_ips = get_router_subnets(router=router_, field='ip_address', - con_ssh=con_ssh) - ips_to_ping += router_ips - - res_bool, res_dict = ping_ips_from_natbox(ips_to_ping, num_pings=3, - timeout=15) - if sep_file: - res_str = "succeeded" if res_bool else 'failed' - content = "#### Ping router interfaces {} ####\n{}\n".format(res_str, - res_dict) - common.write_to_file(sep_file, content=content) - - # if ProjVar.get_var('ALWAYS_COLLECT'): - # common.collect_software_logs() - - hosts = host_helper.get_up_hypervisors(con_ssh=con_ssh) - for router in routers: - router_host = get_network_agents(field='Host', router=router, - con_ssh=con_ssh)[0] - if router_host and router_host not in hosts: - hosts.append(router_host) - LOG.info("Router {} is hosted on {}".format(router, router_host)) - - if hosts: - is_avs = system_helper.is_avs(con_ssh=con_ssh) - vswitch_type = 'avs' if is_avs else 'ovs' - LOG.info( - "Collect {}.info for {} router(s) on router host(s): {}".format( - vswitch_type, routers, hosts)) - for host in hosts: - collect_vswitch_info_on_host(host, vswitch_type, - collect_extra_ovs=(not is_avs), - time_stamp=time_stamp, - con_ssh=con_ssh) - - -def get_network_agents(field='Host', agent_host=None, router=None, network=None, - agent_type=None, long=False, - con_ssh=None, auth_info=Tenant.get('admin'), **kwargs): - """ - Get network agents values from openstack network agent list - Args: - field (str|list|tuple): - agent_host: - router: - network: - agent_type: - long: - con_ssh: - auth_info: - **kwargs: - - Returns (list): - - """ - args_dict = { - '--agent-type': agent_type, - '--host': agent_host, - '--network': network, - '--router': router, - '--long': long, - } - args = common.parse_args(args_dict) - table_ = table_parser.table( - cli.openstack('network agent list', args, ssh_client=con_ssh, - auth_info=auth_info)[1]) - return table_parser.get_multi_values(table_, field, **kwargs) - - -def ping_ips_from_natbox(ips, natbox_ssh=None, num_pings=5, timeout=30): - if not natbox_ssh: - natbox_ssh = NATBoxClient.get_natbox_client() - - res_dict = {} - for ip_ in ips: - packet_loss_rate = ping_server( - server=ip_, ssh_client=natbox_ssh, num_pings=num_pings, - timeout=timeout, fail_ok=True, vshell=False)[0] - res_dict[ip_] = packet_loss_rate - - res_bool = not any(loss_rate == 100 for loss_rate in res_dict.values()) - # LOG.error("PING RES: {}".format(res_dict)) - if res_bool: - LOG.info("Ping successful from NatBox: {}".format(ips)) - else: - LOG.warning("Ping unsuccessful from NatBox: {}".format(res_dict)) - - return res_bool, res_dict - - -def collect_vswitch_info_on_host(host, vswitch_type, time_stamp, - collect_extra_ovs=False, con_ssh=None): - """ - - Args: - host (str): - vswitch_type (str): avs or ovs - time_stamp (str) - collect_extra_ovs - con_ssh - - Returns: - - """ - if not con_ssh: - con_ssh = ControllerClient.get_active_controller() - if not time_stamp: - time_stamp = common.get_date_in_format(ssh_client=con_ssh, - date_format='%Y%m%d_%H-%M') - con_name = con_ssh.get_hostname() - with host_helper.ssh_to_host(host, con_ssh=con_ssh) as host_ssh: - # create log file for host under home dir - # time_stamp = common.get_date_in_format(ssh_client=host_ssh, - # date_format='%Y%m%d_%H-%M') - test_name = ProjVar.get_var("TEST_NAME").split(sep='[')[0] - file_name = os.path.join(HostLinuxUser.get_home(), - '{}-{}-{}-vswitch.log'.format(time_stamp, - test_name, host)) - host_ssh.exec_cmd('touch {}'.format(file_name)) - - # Collect vswitch logs using collect tool - # vswitch log will be saved to /scratch/var/extra/avs.info on the - # compute host - host_ssh.exec_sudo_cmd('/etc/collect.d/collect_{}'.format(vswitch_type)) - vswitch_info_path = '/scratch/var/extra/{}.info'.format(vswitch_type) - host_ssh.exec_cmd( - r'echo -e "##### {} {}.info collected #####\n" >> {}'.format( - host, vswitch_type, file_name), - get_exit_code=False) - time.sleep(1) - host_ssh.exec_sudo_cmd( - 'cat {} >> {}'.format(vswitch_info_path, file_name), - get_exit_code=False) - host_ssh.exec_sudo_cmd('rm -f {}'.format(vswitch_info_path)) - - if collect_extra_ovs: - # Run a few cmds to collect more ovs info - host_ssh.exec_cmd(r'echo -e "\n\n#### Additional ovs ' - r'cmds on {} ####\n >> {}"'.format(host, - file_name), - get_exit_code=False) - for cmd in ('ovs-ofctl show br-int', 'ovs-ofctl dump-flows br-int', - 'ovs-appctl dpif/dump-flows br-int'): - host_ssh.exec_cmd( - r'echo -e "\n\n\n$ sudo {}" >> {}'.format(cmd, file_name)) - cmd = '{} >> {}'.format(cmd, file_name) - host_ssh.exec_sudo_cmd(cmd, get_exit_code=False) - - host_ssh.exec_sudo_cmd('chmod 777 {}'.format(file_name)) - - if host != con_name: - host_ssh.scp_on_source(file_name, - dest_user=HostLinuxUser.get_user(), - dest_ip=con_name, - dest_path=file_name, - dest_password=HostLinuxUser.get_password(), - timeout=120) - - dest_path = os.path.join(ProjVar.get_var('PING_FAILURE_DIR'), - os.path.basename(file_name)) - common.scp_from_active_controller_to_localhost(file_name, - dest_path=dest_path, - timeout=120) - return dest_path - - -def get_pci_device_numa_nodes(hosts): - """ - Get processors of crypto PCI devices for given hosts - - Args: - hosts (list): list of hosts to check - - Returns (dict): host, numa_nodes map. e.g., {'compute-0': ['0'], - 'compute-1': ['0', '1']} - - """ - hosts_numa = {} - for host in hosts: - numa_nodes = host_helper.get_host_devices(host, field='numa_node') - hosts_numa[host] = numa_nodes - - LOG.info("Hosts numa_nodes map for PCI devices: {}".format(hosts_numa)) - return hosts_numa - - -def get_pci_procs(hosts, net_type='pci-sriov'): - """ - Get processors of pci-sriov or pci-passthrough devices for given hosts - - Args: - hosts (list): list of hosts to check - net_type (str): pci-sriov or pci-passthrough - - Returns (dict): host, procs map. e.g., {'compute-0': ['0'], 'compute-1': - ['0', '1']} - - """ - hosts_procs = {} - for host in hosts: - ports_list = host_helper.get_host_interfaces(host, field='ports', - net_type=net_type) - - ports = [] - for port in ports_list: - ports += port - ports = list(set(ports)) - - procs = host_helper.get_host_ports(host, field='processor', - **{'name': ports}) - hosts_procs[host] = list(set(procs)) - - LOG.info("Hosts procs map for {} devices: {}".format(net_type, hosts_procs)) - return hosts_procs - - -def wait_for_agents_healthy(hosts=None, timeout=120, fail_ok=False, - con_ssh=None, auth_info=Tenant.get('admin')): - """ - Wait for neutron agents to be alive - Args: - hosts (str|list): hostname(s) to check. When None, all nova - hypervisors will be checked - timeout (int): max wait time in seconds - fail_ok (bool): whether to return False or raise exception when - non-alive agents exist - con_ssh (SSHClient): - auth_info (dict): - - Returns (tuple): ((bool), (str)) - (True, "All agents for are alive") - (False, "Some agents are not alive: ") - Applicable when fail_ok=True - - """ - if hosts is None: - hosts = host_helper.get_hypervisors(con_ssh=con_ssh, - auth_info=auth_info) - elif isinstance(hosts, str): - hosts = [hosts] - - unhealthy_agents = None - LOG.info("Wait for neutron agents to be alive for {}".format(hosts)) - end_time = time.time() + timeout - while time.time() < end_time: - alive_vals, states, agents, agent_hosts = get_network_agents( - field=('Alive', 'State', 'Binary', 'Host'), - host=hosts, con_ssh=con_ssh, auth_info=auth_info) - - unhealthy_agents = [i for i in - list(zip(agents, agent_hosts, states, alive_vals)) - if - (i[-1] != ':-)' or i[-2] != 'UP')] - if not unhealthy_agents: - succ_msg = "All agents for {} are alive and up".format(hosts) - LOG.info(succ_msg) - return True, succ_msg - - msg = "Some network agents are not healthy: {}".format(unhealthy_agents) - LOG.warning(msg) - if fail_ok: - return False, msg - raise exceptions.NeutronError(msg) - - -def get_trunks(field='id', trunk_id=None, trunk_name=None, parent_port=None, - strict=False, - auth_info=Tenant.get('admin'), con_ssh=None): - """ - Get a list of trunks with given arguments - Args: - field (str): any valid header of neutron trunk list table. 'id', - 'name', 'mac_address', or 'fixed_ips' - trunk_id (str): id of the trunk - trunk_name (str): name of the trunk - parent_port (str): parent port of the trunk - strict (bool): - auth_info (dict): - con_ssh (SSHClient): - - Returns (list): - - """ - table_ = table_parser.table( - cli.openstack('network trunk list', ssh_client=con_ssh, - auth_info=auth_info)[1]) - - kwargs = { - 'id': trunk_id, - 'name': trunk_name, - 'parent_port': parent_port, - } - kwargs = {k: v for k, v in kwargs.items() if v} - - trunks = table_parser.get_values(table_, field, strict=strict, regex=True, - merge_lines=True, **kwargs) - return trunks - - -def create_trunk(parent_port, name=None, sub_ports=None, description=None, - project=None, project_domain=None, - enable=True, fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin'), cleanup=None): - """ - Create a trunk via API. - Args: - parent_port (str): Parent port of trunk. - project (str|None): tenant name to create the trunk under. - project_domain (str|None) - name (str|None): Name of the trunk. - enable (bool|None): Admin state of the trunk. - sub_ports (list|tuple|dict|str|None): List of subport dictionaries in - format - [[, - segmentation_type(vlan), - segmentation_id()] []..] - description (str|None) - fail_ok - con_ssh - auth_info - cleanup - - Return: List with trunk's data returned from Neutron API. - - """ - if not project and auth_info and auth_info['tenant'] == 'admin': - project = Tenant.get_primary()['tenant'] - - args_dict = { - '--description': description, - '--parent-port': parent_port, - '--subport': sub_ports, - '--enable': True if enable else None, - '--disable': True if enable is False else None, - '--project': project, - '--project-domain': project_domain, - } - args = common.parse_args(args_dict, repeat_arg=True, vals_sep=',') - if not name: - name = common.get_unique_name('port_trunk') - - LOG.info("Creating trunk {} with args: {}".format(name, args)) - args = '{} {}'.format(args, name) - code, output = cli.openstack('network trunk create', args, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - - table_ = table_parser.table(output) - trunk_id = table_parser.get_value_two_col_table(table_, 'id') - - if cleanup and trunk_id: - ResourceCleanup.add('trunk', trunk_id) - - if code > 0: - return 1, output - - succ_msg = "Trunk {} is successfully created for port {}".format( - name, parent_port) - LOG.info(succ_msg) - return 0, trunk_id - - -def delete_trunks(trunks, fail_ok=False, auth_info=Tenant.get('admin'), - con_ssh=None): - """ - Delete given trunk - Args: - trunks (str): - fail_ok (bool): - auth_info (dict): - con_ssh (SSHClient): - - Returns (tuple): (, ) - (0, "Port is successfully deleted") - (1, ) - delete port cli rejected - (2, "trunk still exists after deleting") - post deletion - check failed - - """ - if not trunks: - msg = "No trunk specified" - LOG.info(msg) - return -1, msg - - if isinstance(trunks, str): - trunks = [trunks] - - rtn_val = 'id' if re.match(UUID, trunks[0]) else 'name' - existing_trunks = get_trunks(field=rtn_val, auth_info=auth_info, - con_ssh=con_ssh) - trunks = list(set(trunks) & set(existing_trunks)) - if not trunks: - msg = "Given trunks not found on system. Do nothing." - LOG.info(msg) - return -1, msg - - trunks = ' '.join(trunks) - LOG.info("Deleting trunk: {}".format(trunks)) - code, output = cli.openstack('network trunk delete', trunks, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - - if code > 0: - return 1, output - - existing_trunks = get_trunks(field='id', auth_info=auth_info, - con_ssh=con_ssh) - undeleted_trunks = list(set(trunks) & set(existing_trunks)) - if undeleted_trunks: - err_msg = "Trunk {} still exists after deleting".format( - undeleted_trunks) - if fail_ok: - LOG.warning(err_msg) - return 2, err_msg - raise exceptions.NeutronError(err_msg) - - succ_msg = "Trunk {} is successfully deleted".format(trunks) - LOG.info(succ_msg) - return 0, succ_msg - - -def set_trunk(trunk, sub_ports=None, name=None, enable=None, description=None, - fail_ok=False, - con_ssh=None, auth_info=Tenant.get('admin')): - """ - Set trunk with given parameters. - Args: - trunk (str): Trunk id to add the subports - sub_ports (list|tuple|str|None): - name (str|None) - description (str|None) - enable (bool|None) - fail_ok - con_ssh - auth_info - - Return (tuple): - - """ - args_dict = { - '--name': name, - '--description': description, - '--subport': sub_ports, - '--enable': True if enable else None, - '--disable': True if enable is False else None, - } - - args = common.parse_args(args_dict, repeat_arg=True, vals_sep=',') - if not args: - raise ValueError("Nothing specified to set") - - LOG.info("Setting trunk {} with args: {}".format(trunk, args)) - args = '{} {}'.format(args, trunk) - code, output = cli.openstack('network trunk set', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - - if code > 0: - return 1, output - - msg = 'Trunk {} is set successfully'.format(trunk) - LOG.info(msg) - return 0, msg - - -def unset_trunk(trunk, sub_ports, fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Remove subports from a trunk via API. - Args: - trunk: Trunk id to remove the subports from - sub_ports (list|str|tuple) - fail_ok - con_ssh - auth_info - - Return: list with return code and msg - - """ - args = {'--subport': sub_ports} - args = '{} {}'.format(common.parse_args(args, repeat_arg=True), trunk) - - LOG.info("Unsetting trunk: {}".format(args)) - code, output = cli.openstack('network trunk unset', args, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if code > 0: - return 1, output - - msg = 'Subport(s) removed from trunk {} successfully: {}'.format(trunk, - sub_ports) - LOG.info(msg) - return 0, msg - - -def get_networks(field='ID', long=False, full_name=None, external=None, - enabled=None, project=None, - project_domain=None, shared=None, status=None, - providernet_type=None, provider_physical_network=None, - provider_setment=None, agent=None, tags=None, any_tags=None, - not_tags=None, not_any_tags=None, - name=None, subnets=None, net_id=None, strict=False, - regex=False, exclude=False, auth_info=None, - con_ssh=None): - """ - Get networks based on given criteria. - - Args: - field (str|tuple|list) - long (bool|None): - full_name (str|None): - external (bool|None): - enabled (bool|None): - project (str|None): - project_domain (str|None): - shared (bool|None): - status (str|None): - providernet_type (str|None): - provider_physical_network (str|None): - provider_setment (str|None): - agent (str|None): - tags (list|tuple|str|None): - any_tags (list|tuple|str|None): - not_tags (list|tuple|str|None): - not_any_tags (list|tuple|str|None): - name (str): partial/full name of network, can be regex. This will be - used to filter networks after cmd executed - subnets (str|list\tuple): post filter - net_id (str|None): post filter - strict (bool): whether to perform strict search on given name and - subnets - regex (bool): whether to use regex to search - exclude (bool) - auth_info (dict): - con_ssh (SSHClient): - - Returns (list): list of networks - - """ - args_dict = { - '--long': long, - '--name': full_name, - '--project': project, - '--project-domain': project_domain, - '--external': True if external else None, - '--internal': True if external is False else None, - '--enable': True if enabled else None, - '--disable': True if enabled is False else None, - '--share': True if shared else None, - '--no-share': True if shared is False else None, - '--status': status, - '--provider-network-type': providernet_type, - '--provider-physical-network': provider_physical_network, - '--provider-segment': provider_setment, - '--agent': agent, - '--tags': tags, - '--any-tags': any_tags, - '--not-tags': not_tags, - '--not-any-tags': not_any_tags, - } - args = common.parse_args(args_dict, repeat_arg=False, vals_sep=',') - table_ = table_parser.table( - cli.openstack('network list', args, ssh_client=con_ssh, - auth_info=auth_info)[1]) - - filters = {'name': name, 'subnets': subnets, 'id': net_id} - filters = {k: v for k, v in filters.items() if str(v)} - if filters: - table_ = table_parser.filter_table(table_, strict=strict, regex=regex, - exclude=exclude, **filters) - - convert = False - if isinstance(field, str): - field = (field,) - convert = True - - res = [] - for header in field: - vals = table_parser.get_column(table_, header) - if header.lower() == 'subnets': - vals = [val.split(sep=', ') for val in vals] - res.append(vals) - if convert: - res = res[0] - - return res - - -def delete_network(network_id, auth_info=Tenant.get('admin'), con_ssh=None, - fail_ok=False): - """ - Delete given network - Args: - network_id: network id to be deleted. - con_ssh (SSHClient): - auth_info (dict): - fail_ok (bool): whether to return False or raise exception when - non-alive agents exist - - Returns (list): - - """ - LOG.info("Deleting network {}".format(network_id)) - code, output = cli.openstack('network delete', network_id, - ssh_client=con_ssh, fail_ok=True, - auth_info=auth_info) - - if code > 0: - return 1, output - - if network_id in get_networks(auth_info=auth_info, con_ssh=con_ssh): - msg = "Network {} is still listed in neutron net-list".format( - network_id) - if fail_ok: - LOG.warning(msg) - return 2, msg - raise exceptions.NeutronError(msg) - - succ_msg = "Network {} is successfully deleted.".format(network_id) - return 0, succ_msg - - -def create_sfc_port_pair(ingress_port, egress_port, name=None, description=None, - service_func_param=None, fail_ok=False, - con_ssh=None, auth_info=None, cleanup=None): - """ - Create port pair - - Args: - ingress_port (str): - egress_port (str): - name (str|None): - description (str|None): - service_func_param (str|None): - fail_ok (bool): - con_ssh: - auth_info: - cleanup (str|None) - - Returns (tuple): - (0, ) # successfully created - (1, ) # create CLI rejected - - """ - if not name: - name = 'port_pair' - name = common.get_unique_name(name_str=name) - - args_dict = { - '--ingress': ingress_port, - '--egress': egress_port, - '--description': description, - '--service-function-parameters': service_func_param, - } - - arg = '{} {}'.format( - common.parse_args(args_dict, repeat_arg=True, vals_sep=','), name) - LOG.info("Creating port pair {}".format(name)) - code, output = cli.openstack(cmd='sfc port pair create', - positional_args=arg, ssh_client=con_ssh, - fail_ok=fail_ok, - auth_info=auth_info) - table_ = table_parser.table(output) - pair_id = table_parser.get_value_two_col_table(table_, field='ID') - if pair_id and cleanup: - ResourceCleanup.add('port_pair', pair_id, scope=cleanup) - - if code > 0: - return 1, output - - LOG.info("Port pair {} created successfully".format(pair_id)) - return 0, pair_id - - -def delete_sfc_port_pairs(port_pairs=None, value='ID', check_first=True, - fail_ok=False, con_ssh=None, auth_info=None): - """ - Delete port pairs - Args: - port_pairs (str|list|tuple|None): - value: ID or Name - check_first (bool): - fail_ok (bool): - con_ssh: - auth_info: - - Returns (tuple): ((int), (list), - (list), list) - (0, (list), [], []) - (1, , (list), - list) # fail_ok=True - - """ - if not port_pairs: - port_pairs = get_sfc_port_pairs(field=value, auth_info=auth_info, - con_ssh=con_ssh) - else: - if isinstance(port_pairs, str): - port_pairs = [port_pairs] - - if check_first: - existing_pairs = get_sfc_port_pairs(field=value, - auth_info=auth_info, - con_ssh=con_ssh) - port_pairs = list(set(port_pairs) & set(existing_pairs)) - - if not port_pairs: - msg = 'Port pair(s) do not exist. Do nothing.' - LOG.info(msg) - return -1, msg - - errors = [] - LOG.info("Deleting port pair(s): {}".format(port_pairs)) - for port_pair in port_pairs: - code, output = cli.openstack(cmd='sfc port pair delete', - positional_args=port_pair, - ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - - if code > 0: - errors.append(output) - - if errors: - return 1, '\n'.join(errors) - - post_del_pairs = get_sfc_port_pairs(field=value, auth_info=auth_info, - con_ssh=con_ssh) - failed_pairs = list(set(port_pairs) & set(post_del_pairs)) - if failed_pairs: - msg = "Some port-pair(s) still exist after deletion: {}".format( - failed_pairs) - LOG.warning(msg) - if fail_ok: - return 2, msg - raise exceptions.NeutronError(msg) - - msg = "Port pair(s) deleted successfully." - LOG.info(msg) - return 0, msg - - -def get_sfc_port_pairs(field='ID', con_ssh=None, auth_info=None, **filters): - """ - Get port pairs - Args: - field (str|tuple|list): header of the table. ID or Name - con_ssh: - auth_info: - **filters: - - Returns (list): - - """ - arg = '--print-empty' - table_ = table_parser.table( - cli.openstack('sfc port pair list', positional_args=arg, - ssh_client=con_ssh, auth_info=auth_info)[1]) - return table_parser.get_multi_values(table_, field, **filters) - - -def create_sfc_port_pair_group(port_pairs=None, port_pair_val='ID', name=None, - description=None, group_param=None, - fail_ok=False, con_ssh=None, auth_info=None, - cleanup=None): - """ - Create a port pair group - Args: - port_pairs (str|list|tuple|None): - port_pair_val (str): ID or Name - name (str|None): - description (str|None): - group_param (str|None): - fail_ok (bool): - con_ssh: - auth_info: - cleanup - - Returns (tuple): - (0, ) - (1, ) - - """ - args_dict = { - '--port-pair': port_pairs, - '--description': description, - '--port-pair-group-parameters': group_param - } - - if not name: - name = 'port_pair_group' - name = common.get_unique_name(name_str=name) - arg = '{} {}'.format( - common.parse_args(args_dict, repeat_arg=True, vals_sep=','), name) - - LOG.info("Creating port pair group {}".format(name)) - code, output = cli.openstack('sfc port pair group create', arg, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - - table_ = table_parser.table(output) - group_id = table_parser.get_value_two_col_table(table_, 'ID') - if cleanup and group_id: - ResourceCleanup.add('port_pair_group', group_id, scope=cleanup) - - if code > 0: - return 1, output - - # Check specified port-pair(s) are in created group - port_pairs_in_group = eval( - table_parser.get_value_two_col_table(table_, 'Port Pair')) - if port_pairs: - if port_pair_val.lower() != 'id': - pair_ids = [] - for port_pair in port_pairs: - port_pair_id = \ - get_sfc_port_pairs(Name=port_pair, con_ssh=con_ssh, - auth_info=auth_info)[0] - pair_ids.append(port_pair_id) - port_pairs = pair_ids - assert sorted(port_pairs_in_group) == sorted( - port_pairs), "Port pairs expected in group: {}. Actual: {}". \ - format(port_pairs, port_pairs_in_group) - else: - assert not port_pairs_in_group, "Port pair(s) exist in group even " \ - "though no port pair is specified" - - LOG.info("Port pair group {} created successfully".format(name)) - return 0, group_id - - -def set_sfc_port_pair_group(group, port_pairs=None, name=None, description=None, - fail_ok=False, con_ssh=None, - auth_info=None): - """ - Set port pair group with given values - Args: - group (str): port pair group to set - port_pairs (list|str|tuple|None): port pair(s) to add - name (str|None): - description (str|None): - fail_ok (bool): - con_ssh: - auth_info: - - Returns (tuple): - (0, "Port pair group set successfully") - (1, ) - - """ - LOG.info("Setting port pair group {}".format(group)) - arg = '' - verify = {} - if port_pairs is not None: - if port_pairs: - if isinstance(port_pairs, str): - port_pairs = [port_pairs] - port_pairs = list(port_pairs) - for port_pair in port_pairs: - arg += ' --port-pair {}'.format(port_pair) - - verify['Port Pair'] = port_pairs - else: - arg += ' --no-port-pair' - verify['Port Pair'] = [] - - if name is not None: - arg += ' --name {}'.format(name) - verify['Name'] = name - if description is not None: - arg += ' --description {}'.format(description) - verify['Description'] = description - - arg = '{} {}'.format(arg, group) - code, output = cli.openstack('sfc port pair group set', positional_args=arg, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if code > 0: - return 1, output - - LOG.info("Verify port pair group is set correctly") - table_ = table_parser.table(output) - for key, val in verify.items(): - actual_val = table_parser.get_value_two_col_table(table_, key) - if isinstance(val, list): - actual_val = eval(actual_val) - if val: - assert set(val) <= set( - actual_val), "Port pair(s) set: {}; pairs in group: " \ - "{}".format(val, actual_val) - assert len(set(actual_val)) == len( - actual_val), "Duplicated item found in Port pairs field: " \ - "{}". format(actual_val) - else: - assert not actual_val, "Port pair still exist in group {} " \ - "after setting to no: {}". \ - format(group, actual_val) - else: - assert val == actual_val, "Value set for {} is {} ; " \ - "actual: {}".format(key, val, actual_val) - - msg = "Port pair group set successfully" - LOG.info("Port pair group set successfully") - return 0, msg - - -def unset_sfc_port_pair_group(group, port_pairs='all', fail_ok=False, - con_ssh=None, auth_info=None): - """ - Remove port pair(s) from a group - Args: - group (str): - port_pairs (str|list|tuple|None): port_pair(s). When 'all': remove - all port pairs from group. - fail_ok (bool): - con_ssh: - auth_info: - - Returns: - (0, (list)) - (1, (str)) - - """ - LOG.info("Unsetting port pair group {}".format(group)) - arg = '' - if port_pairs == 'all': - arg = '--all-port-pair' - else: - if isinstance(port_pairs, str): - port_pairs = [port_pairs] - port_pairs = list(port_pairs) - - for port_pair in port_pairs: - arg += ' --port-pair {}'.format(port_pair) - - arg = '{} {}'.format(arg, group) - - code, output = cli.openstack('sfc port pair group unset', - positional_args=arg, ssh_client=con_ssh, - fail_ok=fail_ok, - auth_info=auth_info) - - if code > 0: - return 1, output - - LOG.info("Verify port pair group is unset correctly") - table_ = table_parser.table(output) - actual_pairs = eval( - table_parser.get_value_two_col_table(table_, 'Port Pair')) - if port_pairs == 'all': - assert not actual_pairs - else: - unremoved_pairs = list(set(actual_pairs) & set(port_pairs)) - assert not unremoved_pairs - - LOG.info("Port pairs are successfully removed from group {}".format(group)) - return 0, actual_pairs - - -def delete_sfc_port_pair_group(group, check_first=True, fail_ok=False, - auth_info=None, con_ssh=None): - """ - Delete given port pair group - Args: - group (str): - check_first (bool): Whether to check before deletion - fail_ok (bool): - auth_info: - con_ssh: - - Returns (tuple): - (-1, 'Port pair group does not exist. Skip deleting.') # - check_first=True - (0, 'Port pair group successfully deleted') - (1, ) # CLI rejected. fail_ok=True - - """ - if check_first: - group_id = get_sfc_port_pair_group_values(group=group, field='ID', - auth_info=auth_info, - con_ssh=con_ssh, - fail_ok=True) - if group_id is None: - msg = 'Port pair group {} does not exist. Skip deleting.'.format( - group) - LOG.info(msg) - return -1, msg - - LOG.info("Deleting port pair group {}".format(group)) - code, output = cli.openstack('sfc port pair group delete', group, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - - if code > 0: - return 1, output - - group_id = get_sfc_port_pair_group_values(group=group, field='ID', - auth_info=auth_info, - con_ssh=con_ssh, - fail_ok=True) - assert group_id is None, "Port pair group {} still exists after " \ - "deletion".format(group) - - msg = 'Port pair group {} successfully deleted'.format(group) - LOG.info(msg) - return 0, msg - - -def get_sfc_port_pair_groups(field='ID', auth_info=None, con_ssh=None): - """ - Get port pair groups - Args: - field (str|tuple|list): field(s) for port pair groups table - auth_info: - con_ssh: - - Returns (list): - - """ - table_ = table_parser.table( - cli.openstack('sfc port pair group list --print-empty', - ssh_client=con_ssh, auth_info=auth_info)[1]) - - return table_parser.get_multi_values(table_, field) - - -def get_sfc_port_pair_group_values(group, field='Port Pair', fail_ok=False, - auth_info=None, con_ssh=None): - """ - Get port pair group value from 'openstack sfc port pair group show' - Args: - group (str): - field (str|list|tuple): - fail_ok (bool): - auth_info: - con_ssh: - - Returns (list|None): - None # if group does not exist. Only when fail_ok=True - str|dict|list # value of given field. - - """ - code, output = cli.openstack('sfc port pair group show', group, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if code > 0: - return None - - table_ = table_parser.table(output) - values = table_parser.get_multi_values_two_col_table(table_, field, - evaluate=True) - - return values - - -def get_sfc_flow_classifiers(field='ID', auth_info=None, con_ssh=None): - """ - Get flow classifiers - Args: - field (str|tuple|list): ID or Name - auth_info: - con_ssh: - - Returns (list): - - """ - table_ = table_parser.table( - cli.openstack('sfc flow classifier list --print-empty', - ssh_client=con_ssh, auth_info=auth_info)[1]) - - return table_parser.get_multi_values(table_, field) - - -def get_sfc_port_chains(field='ID', auth_info=None, con_ssh=None): - """ - Get flow classifiers - Args: - field (str): ID or Name - auth_info: - con_ssh: - - Returns (list): - - """ - table_ = table_parser.table( - cli.openstack('sfc port chain list --print-empty', ssh_client=con_ssh, - auth_info=auth_info)[1]) - - return table_parser.get_multi_values(table_, field) - - -def create_sfc_port_chain(port_pair_groups, name=None, flow_classifiers=None, - description=None, chain_param=None, - auth_info=None, fail_ok=False, con_ssh=None, - cleanup=None): - """ - Create port chain - Args: - port_pair_groups (str|list|tuple): - name (str|None): - flow_classifiers (str|list|tuple|None): - description (str|None): - chain_param (str|None): - auth_info: - fail_ok: - con_ssh: - cleanup - - Returns (tuple): - (1, ) # CLI rejected. fail_ok=True - (0, ) - - """ - - args_dict = { - '--port-pair-group': port_pair_groups, - '--flow-classifier': flow_classifiers, - '--description': description, - '--chain-parameters': chain_param - } - arg = common.parse_args(args_dict, repeat_arg=True, vals_sep=',') - - if not name: - name = common.get_unique_name(name_str='port_chain') - - arg = '{} {}'.format(arg, name) - LOG.info("Creating port chain {}".format(name)) - code, output = cli.openstack('sfc port chain create', arg, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - - if code > 0: - return 1, output - - table_ = table_parser.table(output, combine_multiline_entry=True) - port_chain_id = table_parser.get_value_two_col_table(table_, 'ID') - if cleanup: - ResourceCleanup.add('port_chain', port_chain_id, scope=cleanup) - - LOG.info("Port chain {} successfully created".format(name)) - return 0, port_chain_id - - -def set_sfc_port_chain(port_chain, port_pair_groups=None, flow_classifiers=None, - no_flow_classifier=None, - no_port_pair_group=None, fail_ok=False, con_ssh=None, - auth_info=None): - """ - Set port chain with given values - Args: - port_chain (str): port chain to set - port_pair_groups (list|str|tuple|None): port pair group(s) to add. - Use '' if no port pair group is desired - flow_classifiers (list|str|tuple|None): flow classifier(s) to add. - Use '' if no flow classifier is desired - no_flow_classifier (bool|None) - no_port_pair_group (bool|None) - fail_ok (bool): - con_ssh: - auth_info: - - Returns (tuple): - (0, "Port chain set successfully") - (1, ) - - """ - LOG.info("Setting port chain {}".format(port_chain)) - arg_dict = { - 'flow-classifier': flow_classifiers, - 'no-flow-classifier': no_flow_classifier, - 'port-pair-group': port_pair_groups, - 'no-port-pair-group': no_port_pair_group, - } - - arg = '{} {}'.format(common.parse_args(arg_dict, repeat_arg=True), - port_chain) - code, output = cli.openstack('sfc port chain set', positional_args=arg, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if code > 0: - return 1, output - - msg = "Port chain {} set successfully".format(port_chain) - LOG.info(msg) - return 0, msg - - -def unset_sfc_port_chain(port_chain, flow_classifiers=None, - port_pair_groups=None, all_flow_classifier=None, - fail_ok=False, con_ssh=None, - auth_info=None): - """ - Remove port pair(s) from a group - Args: - port_chain (str): - flow_classifiers (str|list|tuple|None): flow_classifier(s) to remove. - When 'all': remove all flow_classifiers from group. - port_pair_groups (str|list|tuple|None): port_pair_group(s) to remove. - all_flow_classifier (bool|None) - fail_ok (bool): - con_ssh: - auth_info: - - Returns: - (0, "Port chain unset successfully") - (1, (str)) - - """ - LOG.info("Unsetting port chain {}".format(port_chain)) - args_dict = { - '--all-flow-classifier': all_flow_classifier, - '--flow-classifier': flow_classifiers, - '--port-pair-group': port_pair_groups - } - arg = common.parse_args(args_dict, repeat_arg=True) - if not arg: - raise ValueError("Nothing specified to unset.") - - arg = '{} {}'.format(arg, port_chain) - code, output = cli.openstack('sfc port chain unset', arg, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - - if code > 0: - return 1, output - - msg = "Port chain unset successfully" - LOG.info(msg) - return 0, msg - - -def delete_sfc_port_chain(port_chain, check_first=True, fail_ok=False, - auth_info=None, con_ssh=None): - """ - Delete given port pair group - Args: - port_chain (str): - check_first (bool): Whether to check before deletion - fail_ok (bool): - auth_info: - con_ssh: - - Returns (tuple): - (-1, 'Port chain does not exist. Skip deleting.') # - check_first=True - (0, 'Port chain successfully deleted') - (1, ) # CLI rejected. fail_ok=True - - """ - if check_first: - chain_id = get_sfc_port_chain_values(port_chain=port_chain, fields='ID', - auth_info=auth_info, - con_ssh=con_ssh, - fail_ok=True) - if chain_id is None: - msg = 'Port chain {} does not exist. Skip deleting.'.format( - port_chain) - LOG.info(msg) - return -1, msg - - LOG.info("Deleting port chain {}".format(port_chain)) - code, output = cli.openstack('sfc port chain delete', port_chain, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - - if code > 0: - return 1, output - - chain_id = get_sfc_port_chain_values(port_chain=port_chain, fields='ID', - auth_info=auth_info, con_ssh=con_ssh, - fail_ok=True) - assert chain_id is None, "Port chain {} still exists after deletion".format( - port_chain) - - msg = 'Port chain {} successfully deleted'.format(port_chain) - LOG.info(msg) - return 0, msg - - -def get_sfc_port_chain_values(port_chain, fields='Flow Classifiers', - fail_ok=False, auth_info=None, con_ssh=None): - """ - Get port chain value from 'openstack sfc port chain show' - Args: - port_chain (str): - fields (str|list|tuple): - fail_ok (bool): - auth_info: - con_ssh: - - Returns (None|list): None # if chain does not exist. Only when - fail_ok=True - - """ - code, output = cli.openstack('sfc port chain show', port_chain, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if code > 0: - return None - - table_ = table_parser.table(output) - return table_parser.get_multi_values_two_col_table(table_, fields, - evaluate=True, - merge_lines=True) - - -def get_sfc_flow_classifier_values(flow_classifier, fields='Protocol', - fail_ok=False, auth_info=None, con_ssh=None): - """ - Get flow classifier value from 'openstack sfc flow classifier show' - Args: - flow_classifier (str): - fields (str): - fail_ok (bool): - auth_info: - con_ssh: - - Returns (None|list): return None if flow classifier does not exist. - Only when fail_ok=True - - """ - code, output = cli.openstack('sfc flow classifier show', flow_classifier, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if code > 0: - return None - - table_ = table_parser.table(output) - return table_parser.get_multi_values_two_col_table(table_, fields, - merge_lines=True) - - -def create_flow_classifier(name=None, description=None, protocol=None, - ether_type=None, source_port=None, - dest_port=None, source_ip_prefix=None, - dest_ip_prefix=None, logical_source_port=None, - logical_dest_port=None, l7_param=None, fail_ok=False, - auth_info=None, con_ssh=None, - cleanup=None): - """ - Create a flow classifier - Args: - name: - description: - protocol: - ether_type: - source_port: - dest_port: - source_ip_prefix: - dest_ip_prefix: - logical_source_port: - logical_dest_port: - l7_param: - fail_ok: - auth_info: - con_ssh: - cleanup - - Returns (tuple): - (0, ) - (1, ) - - """ - arg_dict = { - 'description': description, - 'protocol': protocol, - 'ethertype': ether_type, - 'logical-source-port': logical_source_port, - 'logical-destination-port': logical_dest_port, - 'source-ip-prefix': source_ip_prefix, - 'destination-ip-prefix': dest_ip_prefix, - 'l7-parameters': l7_param, - 'source-port': source_port, - 'destination-port': dest_port, - } - - arg = common.parse_args(arg_dict) - if not name: - name = 'flow_classifier' - name = common.get_unique_name(name_str=name) - - arg += ' {}'.format(name) - - LOG.info("Creating flow classifier {}".format(name)) - code, output = cli.openstack('sfc flow classifier create', arg, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - - if code > 0: - return 1, output - - table_ = table_parser.table(output) - id_ = table_parser.get_value_two_col_table(table_, 'ID') - if cleanup and id_: - ResourceCleanup.add('flow_classifier', id_) - - msg = "Flow classifier {} successfully created.".format(id_) - LOG.info(msg) - return 0, id_ - - -def delete_flow_classifier(flow_classifier, check_first=True, fail_ok=False, - auth_info=None, con_ssh=None): - """ - Delete flow classifier - Args: - flow_classifier (str): - check_first: - fail_ok: - auth_info: - con_ssh: - - Returns (tuple): - (-1, Flow classifier does not exist. Skip deletion.") - (0, "Flow classifier successfully deleted") - (1, ) - - """ - if check_first: - info = get_sfc_flow_classifier_values(flow_classifier, fields='ID', - fail_ok=True, con_ssh=con_ssh, - auth_info=auth_info) - if info is None: - msg = "Flow classifier {} does not exist. Skip deletion.".format( - flow_classifier) - LOG.info(msg) - return -1, msg - - code, output = cli.openstack('sfc flow classifier delete', flow_classifier, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if code > 0: - return 1, output - - post_del_id = get_sfc_flow_classifier_values(flow_classifier, fields='ID', - auth_info=auth_info, - con_ssh=con_ssh, - fail_ok=True)[0] - if post_del_id: - err = "Flow classifier {} still exists after deletion".format( - flow_classifier) - LOG.warning(err) - if fail_ok: - return 2, err - raise exceptions.NeutronError(err) - - msg = "Flow classifier {} successfully deleted".format(flow_classifier) - LOG.info(msg) - return 0, msg - - -def get_ip_for_eth(ssh_client, eth_name): - """ - Get the IP addr for given eth on the ssh client provided - Args: - ssh_client (SSHClient): usually a vm_ssh - eth_name (str): such as "eth1, eth1.1" - - Returns (str): The first matching ipv4 addr for given eth. such as - "30.0.0.2" - - """ - if eth_name in ssh_client.exec_cmd('ip addr')[1]: - output = ssh_client.exec_cmd('ip addr show {}'.format(eth_name), - fail_ok=False)[1] - if re.search('inet {}'.format(Networks.IPV4_IP), output): - return re.findall('{}'.format(Networks.IPV4_IP), output)[0] - else: - LOG.warning( - "Cannot find ip address for interface{}".format(eth_name)) - return '' - - else: - LOG.warning( - "Cannot find provided interface{} in 'ip addr'".format(eth_name)) - return '' - - -def _is_v4_only(ip_list): - rtn_val = True - for ip in ip_list: - ip_addr = ipaddress.ip_address(ip) - if ip_addr.version == 6: - rtn_val = False - return rtn_val - - -def get_internal_net_ids_on_vxlan(vxlan_provider_net_id, ip_version=4, - mode='dynamic', con_ssh=None): - """ - Get the networks ids that matches the vxlan underlay ip version - Args: - vxlan_provider_net_id: vxlan provider net id to get the networks info - ip_version: 4 or 6 (IPV4 or IPV6) - mode: mode of the vxlan: dynamic or static - con_ssh (SSHClient): - - Returns (list): The list of networks name that matches the vxlan underlay - (v4/v6) and the mode - - """ - rtn_networks = [] - networks = get_networks_on_providernet(providernet=vxlan_provider_net_id, - field='id', con_ssh=con_ssh) - if not networks: - return rtn_networks - provider_attributes = get_networks_on_providernet( - providernet=vxlan_provider_net_id, con_ssh=con_ssh, - field='providernet_attributes') - if not provider_attributes: - return rtn_networks - - index = 0 - new_attr_list = [] - # In the case where some val could be 'null', need to change that to 'None' - for attr in provider_attributes: - new_attr = attr.replace('null', 'None') - new_attr_list.append(new_attr) - - # getting the configured vxlan mode - dic_attr_1 = eval(new_attr_list[0]) - vxlan_mode = dic_attr_1['mode'] - - if mode == 'static' and vxlan_mode == mode: - data_if_name = host_helper.get_host_interfaces('compute-0', - net_type='data', - con_ssh=con_ssh) - address = host_helper.get_host_addresses(host='compute-0', - ifname=data_if_name, - con_ssh=con_ssh) - if ip_version == 4 and _is_v4_only(address): - rtn_networks.append(networks[index]) - elif ip_version == 6 and not _is_v4_only(address): - LOG.info("here in v6") - rtn_networks = networks - else: - return rtn_networks - elif mode == 'dynamic' and vxlan_mode == mode: - for attr in provider_attributes: - dic_attr = eval(attr) - ip = dic_attr['group'] - ip_addr = ipaddress.ip_address(ip) - if ip_addr.version == ip_version: - rtn_networks.append(networks[index]) - index += 1 - - return rtn_networks - - -def get_dpdk_user_data(con_ssh=None): - """ - copy the cloud-config userdata to TiS server. - This userdata adds wrsroot/li69nux user to guest - - Args: - con_ssh (SSHClient): - - Returns (str): TiS filepath of the userdata - - """ - file_dir = '{}/userdata/'.format(ProjVar.get_var('USER_FILE_DIR')) - file_name = UserData.DPDK_USER_DATA - file_path = file_dir + file_name - - if con_ssh is None: - con_ssh = get_cli_client() - - if con_ssh.file_exists(file_path=file_path): - # LOG.info('userdata {} already exists. Return existing path'.format( - # file_path)) - # return file_path - con_ssh.exec_cmd('rm -f {}'.format(file_path), fail_ok=False) - - LOG.debug('Create userdata directory if not already exists') - cmd = 'mkdir -p {};touch {}'.format(file_dir, file_path) - con_ssh.exec_cmd(cmd, fail_ok=False) - - content = "#wrs-config\nFUNCTIONS=hugepages,avr\n" - con_ssh.exec_cmd('echo "{}" >> {}'.format(content, file_path), - fail_ok=False) - output = con_ssh.exec_cmd('cat {}'.format(file_path))[1] - assert output in content - - return file_path - - -def get_ping_failure_duration(server, ssh_client, end_event, timeout=600, - ipv6=False, start_event=None, - ping_interval=0.2, single_ping_timeout=1, - cumulative=False, init_timeout=60): - """ - Get ping failure duration in milliseconds - Args: - server (str): destination ip - ssh_client (SSHClient): where the ping cmd sent from - timeout (int): Max time to ping and gather ping loss duration before - ipv6 (bool): whether to use ping IPv6 address - start_event - end_event: an event that signals the end of the ping - ping_interval (int|float): interval between two pings in seconds - single_ping_timeout (int): timeout for ping reply in seconds. Minimum - is 1 second. - cumulative (bool): Whether to accumulate the total loss time before - end_event set - init_timeout (int): Max time to wait before vm pingable - - Returns (int): ping failure duration in milliseconds. 0 if ping did not - fail. - - """ - optional_args = '' - if ipv6: - optional_args += '6' - - fail_str = 'no answer yet' - cmd = 'ping{} -i {} -W {} -D -O {} | grep -B 1 -A 1 ' \ - '--color=never "{}"'.format(optional_args, ping_interval, - single_ping_timeout, server, fail_str) - - start_time = time.time() - ping_init_end_time = start_time + init_timeout - prompts = [ssh_client.prompt, fail_str] - ssh_client.send_sudo(cmd=cmd) - while time.time() < ping_init_end_time: - index = ssh_client.expect(prompts, timeout=10, searchwindowsize=100, - fail_ok=True) - if index == 1: - continue - elif index == 0: - raise exceptions.CommonError("Continuous ping cmd interrupted") - - LOG.info("Ping to {} succeeded".format(server)) - start_event.set() - break - else: - raise exceptions.VMNetworkError( - "VM is not reachable within {} seconds".format(init_timeout)) - - end_time = start_time + timeout - while time.time() < end_time: - if end_event.is_set(): - LOG.info("End event set. Stop continuous ping and process results") - break - - # End ping upon end_event set or timeout reaches - ssh_client.send_control() - try: - ssh_client.expect(fail_ok=False) - except (exceptions.TiSError, pexpect.ExceptionPexpect): - ssh_client.send_control() - ssh_client.expect(fail_ok=False) - - # Process ping output to get the ping loss duration - output = ssh_client.process_cmd_result(cmd='sudo {}'.format(cmd), - get_exit_code=False)[1] - lines = output.splitlines() - prev_succ = '' - duration = 0 - count = 0 - prev_line = '' - succ_str = 'bytes from' - post_succ = '' - for line in lines: - if succ_str in line: - if prev_succ and (fail_str in prev_line): - # Ping resumed after serious of lost ping - count += 1 - post_succ = line - tmp_duration = _parse_ping_timestamp( - post_succ) - _parse_ping_timestamp(prev_succ) - LOG.info("Count {} ping loss duration: {}".format(count, - tmp_duration)) - if cumulative: - duration += tmp_duration - elif tmp_duration > duration: - duration = tmp_duration - prev_succ = line - - prev_line = line - - if not post_succ: - LOG.warning("Ping did not resume within {} seconds".format(timeout)) - duration = -1 - else: - LOG.info("Final ping loss duration: {}".format(duration)) - return duration - - -def _parse_ping_timestamp(output): - timestamp = math.ceil(float(re.findall(r'\[(.*)\]', output)[0]) * 1000) - return timestamp - - -@contextmanager -def vconsole(ssh_client): - """ - Enter vconsole for the given ssh connection. - raises if vconsole connection cannot be established - - Args: - ssh_client (SSHClient): - the connection to use for vconsole session - - Yields (function): - executer function for vconsole - - """ - LOG.info("Entering vconsole") - original_prompt = ssh_client.get_prompt() - ssh_client.set_prompt("AVS> ") - try: - ssh_client.exec_sudo_cmd("vconsole", get_exit_code=False) - except Exception as err: - # vconsole failed to connect - # this is usually because vswitch initialization failed - # check instance logs - ssh_client.set_prompt(original_prompt) - ssh_client.flush(3) - ssh_client.send_control('c') - ssh_client.flush(10) - raise err - - def v_exec(cmd, fail_ok=False): - LOG.info("vconsole execute: {}".format(cmd)) - if cmd.strip().lower() == 'quit': - raise ValueError("shall not exit vconsole without proper cleanup") - - code, output = ssh_client.exec_cmd(cmd, get_exit_code=False) - if "done" in output.lower(): - return 0, output - - LOG.warning(output) - if not fail_ok: - assert 0, 'vconsole failed to execute "{}"'.format(cmd) - return 1, output - - yield v_exec - - LOG.info("Exiting vconsole") - ssh_client.set_prompt(original_prompt) - ssh_client.exec_cmd("quit") - - -def create_port_forwarding_rule(router_id, inside_addr=None, inside_port=None, - outside_port=None, protocol='tcp', - tenant=None, description=None, fail_ok=False, - auth_info=Tenant.get('admin'), - con_ssh=None): - """ - - Args: - router_id (str): The router_id of the tenant router the - portforwarding rule is created - inside_addr(str): private ip address - inside_port (int|str): private protocol port number - outside_port(int|str): The public layer4 protocol port number - protocol(str): the protocol tcp|udp|udp-lite|sctp|dccp - tenant(str): The owner Tenant id. - description(str): User specified text description. The default is - "portforwarding" - fail_ok: - auth_info: - con_ssh: - - Returns (tuple): - 0, , - Portforwarding rule - created successfully - 1, '', - Portforwarding rule create cli rejected - 2, '', - Portforwarding rule create failed; one or more - values required are not specified. - - - """ - # Process args - if tenant is None: - tenant = Tenant.get_primary()['tenant'] - - if description is None: - description = '"portforwarding"' - - tenant_id = keystone_helper.get_projects(field='ID', name=tenant, - con_ssh=con_ssh)[0] - - mgmt_ips_for_vms = get_mgmt_ips_for_vms() - - if inside_addr not in mgmt_ips_for_vms: - msg = "The inside_addr {} must be one of the vm mgmt internal " \ - "addresses: {}.".format(inside_addr, mgmt_ips_for_vms) - return 1, msg - - args_dict = { - '--tenant-id': tenant_id if auth_info == Tenant.get('admin') else None, - '--inside-addr': inside_addr, - '--inside-port': inside_port, - '--outside-port': outside_port, - '--protocol': protocol, - '--description': description, - } - args = router_id - - for key, value in args_dict.items(): - if value is None: - msg = 'A value must be specified for {}'.format(key) - if fail_ok: - return 1, '', msg - raise exceptions.NeutronError(msg) - else: - args = "{} {} {}".format(key, value, args) - - LOG.info("Creating port forwarding with args: {}".format(args)) - # send portforwarding-create cli - code, output = cli.neutron('portforwarding-create', args, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - - # process result - if code == 1: - msg = 'Fail to create port forwarding rules: {}'.format(output) - if fail_ok: - return 1, '', msg - raise exceptions.NeutronError(msg) - - table_ = table_parser.table(output) - portforwarding_id = table_parser.get_value_two_col_table(table_, 'id') - - expt_values = { - 'router_id': router_id, - 'tenant_id': tenant_id - } - - for field, expt_val in expt_values.items(): - if table_parser.get_value_two_col_table(table_, field) != expt_val: - msg = "{} is not set to {} for portforwarding {}".format( - field, expt_val, router_id) - if fail_ok: - return 2, portforwarding_id, msg - raise exceptions.NeutronError(msg) - - succ_msg = "Portforwarding {} is created successfully.".format( - portforwarding_id) - LOG.info(succ_msg) - return 0, portforwarding_id, succ_msg - - -def create_port_forwarding_rule_for_vm(vm_id, inside_addr=None, - inside_port=None, outside_port=None, - protocol='tcp', - description=None, fail_ok=False, - auth_info=Tenant.get('admin'), - con_ssh=None): - """ - - Args: - vm_id (str): The id of vm the portforwarding rule is created for - inside_addr(str): private ip address; default is mgmt address of vm. - inside_port (str): private protocol port number; default is 80 ( web - port) - outside_port(str): The public layer4 protocol port number; default is - 8080 - protocol(str): the protocol tcp|udp|udp-lite|sctp|dccp; default is tcp - description(str): User specified text description. The default is - "portforwarding" - fail_ok: - auth_info: - con_ssh: - - Returns (tuple): - 0, , - Portforwarding rule - created successfully - 1, '', - Portforwarding rule create cli rejected - 2, '', - Portforwarding rule create failed; one or more - values required are not specified. - - """ - # Process args - router_id = get_tenant_router() - - if inside_addr is None: - inside_addr = get_mgmt_ips_for_vms(vm_id)[0] - if inside_port is None: - inside_port = "80" - - if outside_port is None: - outside_port = "8080" - - return create_port_forwarding_rule(router_id, inside_addr=inside_addr, - inside_port=inside_port, - outside_port=outside_port, - protocol=protocol, - description=description, fail_ok=fail_ok, - auth_info=auth_info, - con_ssh=con_ssh) - - -def update_portforwarding_rule(portforwarding_id, inside_addr=None, - inside_port=None, outside_port=None, - protocol=None, description=None, fail_ok=False, - auth_info=Tenant.get('admin'), con_ssh=None): - """ - - Args: - portforwarding_id (str): Id or name of portfowarding rule to update - inside_addr (str): Private ip address - inside_port (str): Private layer4 protocol port - outside_port (str): Public layer4 protocol port - protocol (str): protocol name tcp|udp|udp-lite|sctp|dccp - description (str): User specified text description - fail_ok: - auth_info: - con_ssh: - - Returns (tuple): - 0, - Portforwarding rule updated successfully - - - """ - - if portforwarding_id is None or not isinstance(portforwarding_id, str): - raise ValueError( - "Expecting string value for portforwarding_id. Get {}".format( - type(portforwarding_id))) - - args = '' - - args_dict = { - '--inside_addr': inside_addr, - '--inside_port': inside_port, - '--outside_port': outside_port, - '--protocol': protocol, - '--description': description, - } - - for key, value in args_dict.items(): - if value is not None: - args += ' {} {}'.format(key, value) - - if not args: - raise ValueError("At least of the args need to be specified.") - - LOG.info("Updating router {}: {}".format(portforwarding_id, args)) - - args = '{} {}'.format(portforwarding_id, args.strip()) - return cli.neutron('portforwarding-update', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - - -def delete_portforwarding_rules(pf_ids, auth_info=Tenant.get('admin'), - con_ssh=None, fail_ok=False): - """ - Deletes list of portforwarding rules - - Args: - pf_ids(list): list of portforwarding rules to be deleted. - auth_info: - con_ssh: - fail_ok: - - Returns (tuple): - 0, - Portforwarding rules delete successful - - """ - if pf_ids is None or len(pf_ids) == 0: - return 0, None - - for pf_id in pf_ids: - rc, output = delete_portforwarding_rule(pf_id, auth_info=auth_info, - con_ssh=con_ssh, - fail_ok=fail_ok) - if rc != 0: - return rc, output - return 0, None - - -def delete_portforwarding_rule(portforwarding_id, auth_info=Tenant.get('admin'), - con_ssh=None, fail_ok=False): - """ - Deletes a single portforwarding rule - Args: - portforwarding_id (str): Id or name of portforwarding rule to delete. - auth_info: - con_ssh: - fail_ok: - - Returns (tuple): - 0, - Portforwarding rules delete successful - 1, - Portforwarding rules delete cli rejected - 2, - Portforwarding rules delete fail - - """ - - LOG.info("Deleting port-forwarding {}...".format(portforwarding_id)) - code, output = cli.neutron('portforwarding-delete', portforwarding_id, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if code != 0: - msg = "CLI rejected. Fail to delete Port-forwarding {}; {}".format( - portforwarding_id, output) - LOG.warn(msg) - if fail_ok: - return code, msg - else: - raise exceptions.NeutronError(msg) - - portforwardings = get_portforwarding_rules(auth_info=auth_info, - con_ssh=con_ssh) - if portforwarding_id in portforwardings: - msg = "Port-forwarding {} is still showing in neutron " \ - "portforwarding-list".format(portforwarding_id) - if fail_ok: - LOG.warning(msg) - return 2, msg - - succ_msg = "Port-forwarding {} is successfully deleted.".format( - portforwarding_id) - LOG.info(succ_msg) - return 0, succ_msg - - -def get_portforwarding_rules(router_id=None, inside_addr=None, inside_port=None, - outside_port=None, - protocol=None, strict=True, auth_info=None, - con_ssh=None): - """ - Get porforwarding id(s) based on given criteria. - Args: - router_id (str): portforwarding router id - inside_addr (str): portforwarding inside_addr - inside_port (str): portforwarding inside_port - outside_port (str): portforwarding outside_port" - protocol (str): portforwarding protocol - strict (bool): - auth_info (dict): - con_ssh (SSHClient): - - Returns (list): list of porforwarding id(s) - - """ - - param_dict = { - 'router_id': router_id, - 'inside_addr': inside_addr, - 'inside_port': inside_port, - 'outside_port': outside_port, - 'protocol': protocol, - } - - final_params = {} - for key, val in param_dict.items(): - if val is not None: - final_params[key] = str(val) - - table_ = table_parser.table( - cli.neutron('portforwarding-list', ssh_client=con_ssh, - auth_info=auth_info)[1], - combine_multiline_entry=True) - if not table_parser.get_all_rows(table_): - return [] - - if router_id is not None: - table_ = table_parser.filter_table(table_, strict=strict, - router_id=router_id) - - return table_parser.get_values(table_, 'id', **final_params) - - -def get_portforwarding_rule_info(portforwarding_id, field='inside_addr', - strict=True, auth_info=Tenant.get('admin'), - con_ssh=None): - """ - Get value of specified field for given portforwarding rule - - Args: - portforwarding_id (str): Id or name of portforwarding rule - field (str): the name of the field attribute - strict (bool): - auth_info (dict): - con_ssh (SSHClient): - - Returns (str): value of specified field for given portforwarding rule - - """ - - table_ = table_parser.table( - cli.neutron('portforwarding-show', portforwarding_id, - ssh_client=con_ssh, auth_info=auth_info)[1], - combine_multiline_entry=True) - return table_parser.get_value_two_col_table(table_, field, strict) - - -def create_pci_alias_for_devices(dev_type, hosts=None, devices=None, - alias_names=None, apply=True, con_ssh=None): - """ - Create pci alias for given devices by adding nova pci-alias service - parameters - Args: - dev_type (str): Valid values: 'gpu-pf', 'user' - hosts (str|list|tuple|None): Check devices on given host(s). - Check all hosts when None - devices (str|list|tuple|None): Devices to add in pci-alias. - When None, add all devices for given dev_type - alias_names (str|list|tuple|None): Pci alias' to create. - When None, name automatically. - apply (bool): whether to apply after nova service parameters modify - con_ssh: - - Returns (list): list of dict. - e.g., [{'device_id': '1d2d', 'vendor_id': '8086', 'name': user_intel-1}, - {'device_id': '1d26', 'vendor_id': '8086', 'name': - user_intel-2}, ... ] - - Examples: - network_helper.create_pci_alias_for_devices(dev_type='user', - hosts=('compute-2', 'compute-3')) - network_helper.create_pci_alias_for_devices(dev_type='gpu-pf', - devices='pci_0000_0c_00_0') - - """ - LOG.info("Prepare for adding pci alias") - if not hosts: - hosts = host_helper.get_hypervisors(con_ssh=con_ssh) - - if not devices: - if 'gpu' in dev_type: - class_id = DevClassID.GPU - else: - class_id = DevClassID.USB - devices = host_helper.get_host_devices(host=hosts[0], field='address', - list_all=True, regex=True, - **{'class id': class_id}) - elif isinstance(devices, str): - devices = [devices] - - if not alias_names: - alias_names = [None] * len(devices) - elif isinstance(alias_names, str): - alias_names = [alias_names] - - if len(devices) != len(alias_names): - raise ValueError( - "Number of devices do not match number of alias names provided") - - LOG.info( - "Ensure devices are enabled on hosts {}: {}".format(hosts, devices)) - host_helper.enable_disable_hosts_devices(hosts, devices) - - host = hosts[0] - devices_to_create = [] - param_strs = [] - for i in range(len(devices)): - device = devices[i] - alias_name = alias_names[i] - dev_id, vendor_id, vendor_name = host_helper.get_host_device_values( - host=host, device=device, - fields=('device id', 'vendor id', 'vendor name')) - - if not alias_name: - alias_name = '{}_{}'.format(dev_type, - vendor_name.split()[0].lower()) - alias_name = common.get_unique_name(name_str=alias_name) - - param = {'device_id': dev_id, 'vendor_id': vendor_id, - 'name': alias_name} - param_str = ','.join( - ['{}={}'.format(key, val) for key, val in param.items()]) - param_strs.append(param_str) - - pci_alias_dict = {'device id': dev_id, 'vendor id': vendor_id, - 'pci alias': alias_name} - devices_to_create.append(pci_alias_dict) - - LOG.info("Create nova pci alias service parameters: {}".format( - devices_to_create)) - system_helper.create_service_parameter( - service='nova', section='pci_alias', - con_ssh=con_ssh, name=dev_type, - value='"{}"'.format(';'.join(param_strs))) - - if apply: - LOG.info("Apply service parameters") - system_helper.apply_service_parameters(service='nova') - LOG.info("Verify nova pci alias' are listed after applying service " - "parameters: {}".format(devices_to_create)) - _check_pci_alias_created(devices_to_create, con_ssh=con_ssh) - - return devices_to_create - - -def _check_pci_alias_created(devices, con_ssh=None, timeout=60): - end_time = time.time() + timeout - out = None - while time.time() < end_time: - code, out = cli.nova('device-list', ssh_client=con_ssh, fail_ok=True, - auth_info=Tenant.get('admin')) - if code == 0: - break - time.sleep(10) - else: - raise exceptions.NovaError( - 'nova device-list failed. Error: \n{}'.format(out)) - - pci_alias_dict = get_pci_device_list_info(con_ssh=con_ssh) - for param_ in devices: - pci_alias = param_.get('pci alias') - assert pci_alias, "pci alias {} is not shown in nova " \ - "device-list".format(pci_alias) - created_alias = pci_alias_dict[pci_alias] - assert param_.get('vendor id') == created_alias['vendor id'] - assert param_.get('device id') == created_alias['device id'] - - -def get_qos_policies(field='id', name=None, qos_ids=None, con_ssh=None, - auth_info=None): - """ - Get qos policies - Args: - field (str|list|tuple) - name - qos_ids(str|list|None): QoS id to filter name. - con_ssh(SSHClient): If None, active controller ssh will be used. - auth_info(dict): Tenant dict. If None, primary tenant will be used. - - Returns(list): List of neutron qos names filtered by qos_id. - - """ - table_ = table_parser.table( - cli.neutron('qos-list', ssh_client=con_ssh, auth_info=auth_info)[1]) - filters = {'id': qos_ids, 'name': name} - - return table_parser.get_multi_values(table_, field, **filters) - - -def create_qos(name=None, tenant_name=None, description=None, scheduler=None, - dscp=None, ratelimit=None, fail_ok=False, - con_ssh=None, auth_info=Tenant.get('admin'), cleanup=None): - """ - Args: - name(str): Name of the QoS to be created. - tenant_name(str): Such as tenant1, tenant2. If none uses primary tenant. - description(str): Description of the created QoS. - scheduler(dict): Dictionary of scheduler policies formatted - as {'policy': value}. - dscp(dict): Dictionary of dscp policies formatted as {'policy': value}. - ratelimit(dict): Dictionary of ratelimit policies formatted - as {'policy': value}. - fail_ok(bool): - con_ssh(SSHClient): - auth_info(dict): Run the neutron qos-create cli using this - authorization info. Admin by default, - cleanup (str): - - Returns(tuple): exit_code(int), qos_id(str) - (0, qos_id) qos successfully created. - (1, output) qos not created successfully - """ - tenant_id = keystone_helper.get_projects(field='ID', - name=tenant_name, - con_ssh=con_ssh)[0] - check_dict = {} - args = '' - current_qos = get_qos_policies(field='name', con_ssh=con_ssh, - auth_info=auth_info) - if name is None: - if tenant_name is None: - tenant_name = common.get_tenant_name(Tenant.get_primary()) - name = common.get_unique_name("{}-qos".format(tenant_name), - existing_names=current_qos, - resource_type='qos') - else: - name = common.get_unique_name("{}-qos".format(tenant_name), - existing_names=current_qos, - resource_type='qos') - args_dict = {'name': name, - 'tenant-id': tenant_id, - 'description': description, - 'scheduler': scheduler, - 'dscp': dscp, - 'ratelimit': ratelimit - } - check_dict['policies'] = {} - for key, value in args_dict.items(): - if value: - if key in ('scheduler', 'dscp', 'ratelimit'): - args += " --{}".format(key) - for policy, val in value.items(): - args += " {}={}".format(policy, val) - value[policy] = str(val) - check_dict['policies'][key] = value - else: - args += " --{} '{}'".format(key, value) - check_dict[key] = value - - LOG.info("Creating QoS with args: {}".format(args)) - exit_code, output = cli.neutron('qos-create', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if exit_code == 1: - return 1, output - - table_ = table_parser.table(output) - for key, exp_value in check_dict.items(): - if key == 'policies': - actual_value = eval( - table_parser.get_value_two_col_table(table_, key)) - else: - actual_value = table_parser.get_value_two_col_table(table_, key) - if actual_value != exp_value: - msg = "Qos created but {} expected to be {} but actually {}".format( - key, exp_value, actual_value) - raise exceptions.NeutronError(msg) - - qos_id = table_parser.get_value_two_col_table(table_, 'id') - if cleanup: - ResourceCleanup.add('network_qos', qos_id, scope=cleanup) - LOG.info("QoS successfully created") - return 0, qos_id - - -def delete_qos(qos_id, auth_info=Tenant.get('admin'), con_ssh=None, - fail_ok=False): - """ - - Args: - qos_id(str): QoS to be deleted - auth_info(dict): tenant to be used, if none admin will be used - con_ssh(SSHClient): - fail_ok(bool): - - Returns: code(int), output(string) - (0, "QoS successfully deleted" ) - (1, ) openstack qos delete cli rejected - """ - - LOG.info("deleting QoS: {}".format(qos_id)) - code, output = cli.neutron('qos-delete', qos_id, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code == 1: - return 1, output - - if qos_id in get_qos_policies(auth_info=auth_info, con_ssh=con_ssh): - msg = "QoS {} still listed in neutron QoS list".format(qos_id) - raise exceptions.NeutronError(msg) - - succ_msg = "QoS {} successfully deleted".format(qos_id) - LOG.info(succ_msg) - return 0, succ_msg - - -def update_net_qos(net_id, qos_id=None, fail_ok=False, - auth_info=Tenant.get('admin'), con_ssh=None): - """ - Update network qos to given value - Args: - net_id (str): network to update - qos_id (str|None): when None, remove the qos from network - fail_ok (bool): - auth_info (dict): - con_ssh (SSHClient): - - Returns (tuple): (code, msg) - (0, "Network qos is successfully updated to ") - (1, ) openstack network update cli rejected - - """ - if qos_id: - kwargs = {'--wrs-tm:qos': qos_id} - arg_str = '--wrs-tm:qos {}'.format(qos_id) - else: - kwargs = {'--no-qos': None} - arg_str = '--no-qos' - - code, msg = cli.neutron('net-update', '{} {}'.format(arg_str, net_id), - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if code > 0: - return code, msg - - if '--no-qos' in kwargs: - actual_qos = get_network_values(net_id, fields='wrs-tm:qos', - auth_info=auth_info, con_ssh=con_ssh)[0] - assert not actual_qos, "Qos {} is not removed from {}".format( - actual_qos, net_id) - - msg = "Network {} qos is successfully updated to {}".format(net_id, qos_id) - LOG.info(msg) - return 0, msg diff --git a/automated-pytest-suite/keywords/nova_helper.py b/automated-pytest-suite/keywords/nova_helper.py deleted file mode 100755 index dbd9bbb3..00000000 --- a/automated-pytest-suite/keywords/nova_helper.py +++ /dev/null @@ -1,1309 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from utils import cli, exceptions -from utils import table_parser -from utils.tis_log import LOG -from consts.proj_vars import ProjVar -from consts.auth import Tenant -from consts.stx import FlavorSpec, GuestImages -from keywords import common -from testfixtures.fixture_resources import ResourceCleanup - - -def create_flavor(name=None, flavor_id=None, vcpus=1, ram=1024, root_disk=None, - ephemeral=None, swap=None, - is_public=None, rxtx_factor=None, project=None, - project_domain=None, description=None, guest_os=None, - fail_ok=False, auth_info=Tenant.get('admin'), con_ssh=None, - storage_backing=None, - rtn_id=True, cleanup=None, add_default_specs=True, - properties=None): - """ - Create a flavor with given criteria. - - Args: - name (str): substring of flavor name. Whole name will be - -. e,g., 'myflavor-1'. If None, name - will be set to 'flavor'. - flavor_id (str): auto generated by default unless specified. - vcpus (int): - ram (int): - root_disk (int): - ephemeral (int): - swap (int|None): - is_public (bool): - rxtx_factor (str): - project - project_domain - description - guest_os (str|None): guest name such as 'tis-centos-guest' or None - - default tis guest assumed - fail_ok (bool): whether it's okay to fail to create a flavor. Default - to False. - auth_info (dict): This is set to Admin by default. Can be set to - other tenant for negative test. - con_ssh (SSHClient): - storage_backing (str): storage backing in extra flavor. Auto set - storage backing based on system config if None. - Valid values: 'local_image', 'remote' - rtn_id (bool): return id or name - cleanup (str|None): cleanup scope. function, class, module, or session - add_default_specs (False): Whether to automatically add extra specs - that are needed to launch vm - properties (str|list|dict) - - Returns (tuple): (rtn_code (int), flavor_id/err_msg (str)) - (0, ): flavor created successfully - (1, ): create flavor cli rejected - - """ - - table_ = table_parser.table( - cli.openstack('flavor list', ssh_client=con_ssh, auth_info=auth_info)[ - 1]) - existing_names = table_parser.get_column(table_, 'Name') - - if name is None: - name = 'flavor' - flavor_name = common.get_unique_name(name_str=name, - existing_names=existing_names, - resource_type='flavor') - - if root_disk is None: - if not guest_os: - guest_os = GuestImages.DEFAULT['guest'] - root_disk = GuestImages.IMAGE_FILES[guest_os][1] - - args_dict = { - '--ephemeral': ephemeral, - '--swap': swap, - '--rxtx-factor': rxtx_factor, - '--disk': root_disk, - '--ram': ram, - '--vcpus': vcpus, - '--id': flavor_id, - '--project': project, - '--project-domain': project_domain, - '--description': description, - '--public': True if is_public else None, - '--private': True if is_public is False else None, - '--property': properties, - } - args = '{} {}'.format(common.parse_args(args_dict, repeat_arg=True), - flavor_name) - - LOG.info("Creating flavor {}...".format(flavor_name)) - LOG.info("openstack flavor create option: {}".format(args)) - exit_code, output = cli.openstack('flavor create', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if exit_code > 1: - return 1, output - - table_ = table_parser.table(output) - flavor_id = table_parser.get_value_two_col_table(table_, 'id') - LOG.info("Flavor {} created successfully.".format(flavor_name)) - - if cleanup: - ResourceCleanup.add('flavor', flavor_id, scope=cleanup) - - if add_default_specs: - extra_specs = {FlavorSpec.MEM_PAGE_SIZE: '2048'} - # extra_specs = {FlavorSpec.MEM_PAGE_SIZE: 'small'} - default_flavor_backing = ProjVar.get_var('DEFAULT_INSTANCE_BACKING') - sys_inst_backing = ProjVar.get_var('INSTANCE_BACKING') - if not default_flavor_backing: - from keywords import host_helper - sys_inst_backing = host_helper.get_hosts_per_storage_backing( - up_only=False, auth_info=auth_info, - con_ssh=con_ssh, refresh=True) - configured_backings = [backing for backing in sys_inst_backing if - sys_inst_backing.get(backing)] - LOG.debug( - "configured backing:{} sys inst backing: {}, required storage " - "backing: {}". - format(configured_backings, sys_inst_backing, storage_backing)) - - if storage_backing and storage_backing not in configured_backings: - raise ValueError( - 'Required local_storage {} is not configured on any nova ' - 'hypervisor'. - format(storage_backing)) - - if len(configured_backings) > 1: - extra_specs[ - FlavorSpec.STORAGE_BACKING] = storage_backing if \ - storage_backing else \ - ProjVar.get_var('DEFAULT_INSTANCE_BACKING') - - if extra_specs: - LOG.info("Setting flavor specs: {}".format(extra_specs)) - set_flavor(flavor_id, con_ssh=con_ssh, auth_info=auth_info, - **extra_specs) - - flavor = flavor_id if rtn_id else flavor_name - return 0, flavor, storage_backing - - -def set_aggregate(aggregate, properties=None, no_property=None, zone=None, - name=None, fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Set aggregate with given params - Args: - aggregate (str): aggregate to set - properties (dict|None): - no_property (bool|None): - zone (str|None): - name (str|None): - fail_ok (bool): - con_ssh: - auth_info: - - Returns (tuple): - (0, "Aggregate set successfully with param: ) - (1, ) returns only if fail_ok=True - - """ - args_dict = { - '--zone': zone, - '--name': name, - '--property': properties, - '--no-property': no_property, - } - - args = '{} {}'.format(common.parse_args(args_dict, repeat_arg=True), - aggregate) - code, output = cli.openstack('aggregate set', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code > 0: - return 1, output - - msg = "Aggregate {} set successfully with param: {}".format(aggregate, args) - LOG.info(msg) - return 0, msg - - -def unset_aggregate(aggregate, properties, fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Unset given properties for aggregate - Args: - aggregate (str): aggregate to unset - properties (list|tuple|str|None): - fail_ok (bool): - con_ssh: - auth_info: - - Returns (tuple): - (0, "Aggregate set successfully with param: ) - (1, ) returns only if fail_ok=True - - """ - if isinstance(properties, str): - properties = (properties,) - - args = ' '.join(['--property {}'.format(key) for key in properties]) - args = '{} {}'.format(args, aggregate) - code, output = cli.openstack('aggregate unset', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code > 0: - return 1, output - - msg = "Aggregate {} properties unset successfully: {}".format(aggregate, - properties) - LOG.info(msg) - return 0, msg - - -def get_aggregate_values(aggregate, fields, con_ssh=None, - auth_info=Tenant.get('admin'), fail_ok=False): - """ - Get values of a nova aggregate for given fields - Args: - aggregate (str): - fields (str|list|tuple): - con_ssh: - auth_info (dict): - fail_ok (bool) - - Returns (list): - - """ - code, out = cli.openstack('aggregate show', aggregate, ssh_client=con_ssh, - auth_info=auth_info, fail_ok=fail_ok) - if code > 0: - return [] - - table_ = table_parser.table(out) - return table_parser.get_multi_values_two_col_table( - table_, fields, evaluate=True, dict_fields=('properties',)) - - -def delete_flavors(flavors, check_first=True, fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Delete given flavor(s) - Args: - flavors (list|str): id(s) of flavor(s) to delete - check_first (bool) - fail_ok (bool): whether to raise exception if any flavor fails to delete - con_ssh (SSHClient): - auth_info (dict): - - Returns (tuple): - (-1, 'None of the flavor(s) exists. Do nothing.') - (0, 'Flavor is successfully deleted') - (1, ) - (2, "Flavor still exists on system after deleted.") - - """ - if isinstance(flavors, str): - flavors = [flavors] - - if check_first: - existing_favors = get_flavors(con_ssh=con_ssh, auth_info=auth_info) - flavors = list(set(flavors) & set(existing_favors)) - if not flavors: - msg = "None of the given flavors exist. Do nothing." - LOG.info(msg) - return -1, msg - - LOG.info("Flavor(s) to delete: {}".format(flavors)) - code, output = cli.openstack('flavor delete', ' '.join(flavors), - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if code > 0: - return 1, output - - existing_favors = get_flavors(con_ssh=con_ssh, auth_info=auth_info) - flavors_still_exist = list(set(flavors) & set(existing_favors)) - if flavors_still_exist: - err_msg = "Flavor(s) still exist after deletion: {}".format( - flavors_still_exist) - LOG.warning(err_msg) - if fail_ok: - return 2, err_msg - else: - raise exceptions.FlavorError(err_msg) - - success_msg = "Flavor(s) deleted successfully." - LOG.info(success_msg) - return 0, success_msg - - -def get_flavors(name=None, memory=None, disk=None, ephemeral=None, swap=None, - vcpu=None, rxtx=None, is_public=None, - flv_id=None, long=False, con_ssh=None, auth_info=None, - strict=True, field='id'): - """ - Get a flavor id with given criteria. If no criteria given, a random - flavor will be returned. - - Args: - name (str): name of a flavor - memory (int): memory size in MB - disk (int): size of the disk in GB - ephemeral (int): size of ephemeral disk in GB - swap (int): size of swap disk in GB - vcpu (int): number of vcpus - rxtx (str): - is_public (bool): - flv_id (str) - long (bool) - con_ssh (SSHClient): - auth_info (dict): - strict (bool): whether or not to perform strict search on provided - values - field (str|list|tuple) - - Returns (list): - - """ - - args = '--long' if long else '' - table_ = table_parser.table( - cli.openstack('flavor list', args, ssh_client=con_ssh, - auth_info=auth_info)[1]) - - req_dict = {'Name': name, - 'RAM': memory, - 'Disk': disk, - 'Ephemeral': ephemeral, - 'Swap': '' if str(swap) == '0' else swap, - 'VCPUs': vcpu, - 'RXTX Factor': rxtx, - 'Is Public': is_public, - 'ID': flv_id, - } - final_dict = {k: str(v) for k, v in req_dict.items() if v is not None} - return table_parser.get_multi_values(table_, field, strict=strict, - **final_dict) - - -def get_basic_flavor(auth_info=None, con_ssh=None, guest_os='', rtn_id=True): - """ - Get a basic flavor with the default arg values and without adding extra - specs. - Args: - auth_info (dict): - con_ssh (SSHClient): - guest_os - rtn_id (bool): return flavor id or name - - Returns (str): id of the basic flavor - - """ - if not guest_os: - guest_os = GuestImages.DEFAULT['guest'] - size = GuestImages.IMAGE_FILES[guest_os][1] - - default_flavor_name = 'flavor-default-size{}'.format(size) - rtn_val = 'id' if rtn_id else 'name' - flavors = get_flavors(name=default_flavor_name, con_ssh=con_ssh, - auth_info=auth_info, strict=False, - field=rtn_val) - flavor = flavors[0] if flavors else \ - create_flavor(name=default_flavor_name, root_disk=size, con_ssh=con_ssh, - cleanup='session', rtn_id=rtn_id)[1] - - return flavor - - -def set_flavor(flavor, project=None, project_domain=None, description=None, - no_property=None, con_ssh=None, - auth_info=Tenant.get('admin'), fail_ok=False, **properties): - """ - Set flavor with given parameters - Args: - flavor (str): id of a flavor - project (str) - project_domain (str) - description (str) - no_property (bool) - con_ssh (SSHClient): - auth_info (dict): - fail_ok (bool): - **properties: extra specs to set. e.g., **{"hw:mem_page_size": "2048"} - - Returns (tuple): (rtn_code (int), message (str)) - (0, 'Flavor extra specs set successfully.'): required extra spec(s) - added successfully - (1, ): add extra spec cli rejected - - """ - args_dict = { - '--description': description, - '--project': project, - '--project-domain': project_domain, - '--no-property': no_property and not properties, - '--property': properties - } - args = common.parse_args(args_dict, repeat_arg=True) - - if not args.strip(): - raise ValueError("Nothing is provided to set") - - LOG.info("Setting flavor {} with args: {}".format(flavor, args)) - args = '{} {}'.format(args, flavor) - exit_code, output = cli.openstack('flavor set', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if exit_code == 1: - return 1, output - - msg = "Flavor {} set successfully".format(flavor) - LOG.info(msg) - return 0, flavor - - -def unset_flavor(flavor, properties=None, project=None, project_domain=None, - check_first=True, fail_ok=False, - auth_info=Tenant.get('admin'), con_ssh=None): - """ - Unset specific extra spec(s) from given flavor. - - Args: - flavor (str): id of the flavor - properties (str|list|tuple): extra spec(s) to be removed. At least - one should be provided. - project_domain - project - check_first (bool): Whether to check if extra spec exists in flavor - before attempt to unset - con_ssh (SSHClient): - auth_info (dict): - fail_ok (bool): - con_ssh - - Returns (tuple): (rtn_code (int), message (str)) - (-1, 'Extra spec(s) not exist in flavor. Do nothing.') - (0, 'Flavor extra specs unset successfully.'): required extra spec(s) - removed successfully - (1, ): unset extra spec cli rejected - (2, ' is still in the extra specs list'): post action - check failed - - """ - if isinstance(properties, str): - properties = [properties] - - if properties and check_first: - existing_specs = get_flavor_values(flavor, fields='properties', - con_ssh=con_ssh, - auth_info=auth_info)[0] - properties = list(set(properties) & set(existing_specs.keys())) - - args_dict = { - '--property': properties, - '--project': project, - '--project_domain': project_domain, - } - args = common.parse_args(args_dict, repeat_arg=True) - if not args: - msg = "Nothing to unset for flavor {}. Do nothing.".format(flavor) - LOG.info(msg) - return -1, msg - - LOG.info("Unsetting flavor {} with args: {}".format(flavor, args)) - exit_code, output = cli.openstack('flavor unset', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if exit_code > 0: - return 1, output - - success_msg = "Flavor {} unset successfully".format(flavor) - LOG.info(success_msg) - return 0, success_msg - - -def get_flavor_properties(flavor, con_ssh=None, auth_info=Tenant.get('admin')): - """ - Get extra specs of a flavor as dictionary - Args: - flavor (str): id of a flavor - con_ssh (SSHClient): - auth_info (dict): - - Returns (dict): e.g., {"aggregate_instance_extra_specs:storage": - "local_image", "hw:mem_page_size": "2048"} - - """ - return get_flavor_values(flavor, fields='properties', con_ssh=con_ssh, - auth_info=auth_info)[0] - - -def create_server_group(name=None, policy='affinity', rule=None, fail_ok=False, - auth_info=None, con_ssh=None, - rtn_exist=False, field='id'): - """ - Create a server group with given criteria - - Args: - name (str): name of the server group - policy (str): affinity or anti_infinity - rule (str|None): max_server_per_host can be specified when - policy=anti-affinity - fail_ok (bool): - auth_info (dict): - con_ssh (SSHClient): - rtn_exist (bool): Whether to return existing server group that - matches the given name - field (str): id or name - - Returns (tuple): (rtn_code (int), err_msg_or_srv_grp_id (str)) - - (0, ) # server group created successfully - - (1, ) # create server group cli rejected - - """ - # process server group metadata - if name and rtn_exist: - existing_grp = get_server_groups(name=name, strict=False, - con_ssh=con_ssh, auth_info=auth_info, - field=field) - if existing_grp: - LOG.debug( - "Returning existing server group {}".format(existing_grp[0])) - return -1, existing_grp[0] - - # process server group name and policy - if not name: - name = 'grp_{}'.format(policy.replace('-', '_')) - name = common.get_unique_name(name_str=name) - args = '{}{} {}'.format('--rule {} '.format(rule) if rule else '', name, - policy.replace('_', '-')) - - LOG.info("Creating server group with args: {}...".format(args)) - exit_code, output = cli.nova('server-group-create', args, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if exit_code > 0: - return 1, output - - table_ = table_parser.table(output) - srv_grp_id = table_parser.get_values(table_, field)[0] - LOG.info("Server group {} created successfully.".format(name)) - return 0, srv_grp_id - - -def get_server_groups(field='ID', all_projects=True, long=False, strict=True, - regex=False, - auth_info=Tenant.get('admin'), con_ssh=None, **kwargs): - """ - Get server groups ids based on the given criteria - - Args: - auth_info (dict): - con_ssh (SSHClient): - strict (bool): whether to do strict search for given name - regex (bool): whether or not to use regex when for given name - all_projects(bool): whether to list for all projects - long - field (str|list|tuple): - **kwargs: filters - - Returns (list): list of server groups - - """ - args_dict = { - '--all-projects': all_projects, - '--long': long - } - args = common.parse_args(args_dict) - table_ = table_parser.table( - cli.openstack('server group list', args, ssh_client=con_ssh, - auth_info=auth_info)[1]) - - def _parse_list(value_str): - return [val.strip() for val in value_str.split(',')] - - parsers = {_parse_list: ('Policies', 'Members')} - - return table_parser.get_multi_values(table_, field, strict=strict, - regex=regex, parsers=parsers, **kwargs) - - -def get_server_groups_info(headers=('Policies', 'Members'), auth_info=None, - con_ssh=None, - strict=False, **kwargs): - """ - Get a server group(s) info as a list - - Args: - headers (str|list|tuple): header string for info. such as 'Member', - 'Metadata', 'Policies' - auth_info (dict): - con_ssh (SSHClient): - strict - kwargs - - Returns (dict): server group(s) info in dict. server group id as key, - and values of specified headers as value. - Examples: {: [['affinity'], [, , ...]], - : ['anti-affinity', []]} - - """ - if isinstance(headers, str): - headers = [headers] - headers = ['ID'] + list(headers) - - values = get_server_groups(field=headers, all_projects=True, long=True, - con_ssh=con_ssh, auth_info=auth_info, - strict=strict, **kwargs) - group_ids = values.pop(0) - values = list(zip(*values)) - srv_groups_info = {group_ids[i]: values[i] for i in range(len(group_ids))} - return srv_groups_info - - -def get_server_group_info(group_id=None, group_name=None, - headers=('Policies', 'Members'), strict=False, - auth_info=None, con_ssh=None): - """ - Get server group info for specified server group - Args: - group_id: - group_name: - headers (str|list|tuple): - auth_info: - strict - con_ssh: - - Returns (list): - - """ - filters = {'ID': group_id} - if group_name: - filters['Name'] = group_name - - group_info = get_server_groups_info(headers=headers, auth_info=auth_info, - strict=strict, - con_ssh=con_ssh, **filters) - assert len(group_info) == 1, "More than 1 server group filtered" - - values = list(group_info.values())[0] - - return values - - -def server_group_exists(srv_grp_id, auth_info=Tenant.get('admin'), - con_ssh=None): - """ - Return True if given server group exists else False - - Args: - srv_grp_id (str): - auth_info (dict): - con_ssh (SSHClient): - - Returns (bool): True or False - - """ - existing_server_groups = get_server_groups(all_projects=True, - auth_info=auth_info, - con_ssh=con_ssh) - return srv_grp_id in existing_server_groups - - -def delete_server_groups(srv_grp_ids=None, check_first=True, fail_ok=False, - auth_info=Tenant.get('admin'), - con_ssh=None): - """ - Delete server group(s) - - Args: - srv_grp_ids (list|str): id(s) for server group(s) to delete. - check_first (bool): whether to check existence of given server groups - before attempt to delete. Default: True. - fail_ok (bool): - auth_info (dict|None): - con_ssh (SSHClient): - - Returns (tuple): (rtn_code(int), msg(str)) # rtn_code 1,2 only returns - when fail_ok=True - (-1, 'No server group(s) to delete.') # "Empty vm list/string - provided and no vm exist on system. - (-1, 'None of the given server group(s) exists on system.') - (0, "Server group(s) deleted successfully.") - (1, ) # Deletion rejected for all of the server groups. - Return CLI stderr. - (2, "Some deleted server group(s) still exist on system:: - ") - """ - existing_sgs = None - if not srv_grp_ids: - existing_sgs = srv_grp_ids = get_server_groups(con_ssh=con_ssh, - auth_info=auth_info) - elif isinstance(srv_grp_ids, str): - srv_grp_ids = [srv_grp_ids] - - srv_grp_ids = [sg for sg in srv_grp_ids if sg] - if not srv_grp_ids: - LOG.info("No server group(s) to delete. Do Nothing") - return -1, 'No server group(s) to delete.' - - if check_first: - if existing_sgs is None: - existing_sgs = get_server_groups(con_ssh=con_ssh, - auth_info=auth_info) - - srv_grp_ids = list(set(srv_grp_ids) & set(existing_sgs)) - if not srv_grp_ids: - msg = "None of the given server group(s) exists on system. Do " \ - "nothing" - LOG.info(msg) - return -1, msg - - LOG.info("Deleting server group(s): {}".format(srv_grp_ids)) - code, output = cli.openstack('server group delete', ' '.join(srv_grp_ids), - ssh_client=con_ssh, fail_ok=True, - auth_info=auth_info, timeout=60) - if code == 1: - return 1, output - - existing_sgs = get_server_groups(con_ssh=con_ssh, auth_info=auth_info) - grps_undeleted = list(set(srv_grp_ids) & set(existing_sgs)) - if grps_undeleted: - msg = "Some server group(s) still exist on system after deletion: " \ - "{}".format(grps_undeleted) - LOG.warning(msg) - if fail_ok: - return 2, msg - raise exceptions.NovaError(msg) - - msg = "Server group(s) deleted successfully." - LOG.info(msg) - return 0, "Server group(s) deleted successfully." - - -def get_keypairs(name=None, field='Name', con_ssh=None, auth_info=None): - """ - - Args: - name (str): Name of the key pair to filter for a given user - field (str|list|tuple) - con_ssh (SSHClient): - auth_info (dict): Tenant to be used to execute the cli if none - Primary tenant will be used - - Returns (list):return keypair names - - """ - table_ = table_parser.table( - cli.openstack('keypair list', ssh_client=con_ssh, auth_info=auth_info)[ - 1]) - return table_parser.get_multi_values(table_, field, Name=name) - - -def get_flavor_values(flavor, fields, strict=True, con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Get flavor values for given fields via openstack flavor show - Args: - flavor (str): - fields (str|list|tuple): - strict (bool): strict search for field name or not - con_ssh: - auth_info: - - Returns (list): - - """ - table_ = table_parser.table( - cli.openstack('flavor show', flavor, ssh_client=con_ssh, - auth_info=auth_info)[1]) - return table_parser.get_multi_values_two_col_table( - table_, fields, merge_lines=True, evaluate=True, - strict=strict, dict_fields=('properties',)) - - -def copy_flavor(origin_flavor, new_name=None, con_ssh=None): - """ - Extract the info from an existing flavor and create a new flavor that is - has identical info - - Args: - origin_flavor (str): id of an existing flavor to extract the info from - new_name: - con_ssh: - - Returns (str): flavor_id - - """ - table_ = table_parser.table( - cli.openstack('flavor show', origin_flavor, ssh_client=con_ssh, - auth_info=Tenant.get('admin'))[1]) - - extra_specs = table_parser.get_value_two_col_table(table_, 'properties') - extra_specs = table_parser.convert_value_to_dict(value=extra_specs) - ephemeral = table_parser.get_value_two_col_table(table_, 'ephemeral', - strict=False) - disk = table_parser.get_value_two_col_table(table_, 'disk') - is_public = table_parser.get_value_two_col_table(table_, 'is_public', - strict=False) - ram = table_parser.get_value_two_col_table(table_, 'ram') - rxtx_factor = table_parser.get_value_two_col_table(table_, 'rxtx_factor') - swap = table_parser.get_value_two_col_table(table_, 'swap') - vcpus = table_parser.get_value_two_col_table(table_, 'vcpus') - old_name = table_parser.get_value_two_col_table(table_, 'name') - - if not new_name: - new_name = "{}-{}".format(old_name, new_name) - swap = swap if swap else 0 - new_flavor_id = \ - create_flavor(name=new_name, vcpus=vcpus, ram=ram, swap=swap, - root_disk=disk, ephemeral=ephemeral, - is_public=is_public, rxtx_factor=rxtx_factor, - con_ssh=con_ssh)[1] - set_flavor(new_flavor_id, con_ssh=con_ssh, **extra_specs) - - return new_flavor_id - - -# TODO: nova providernet-show no longer exists for pci pfs/vfs info. Update -# required. -def get_provider_net_info(providernet_id, field='pci_pfs_configured', - strict=True, auth_info=Tenant.get('admin'), - con_ssh=None, rtn_int=True): - """ - Get provider net info from "nova providernet-show" - - Args: - providernet_id (str): id of a providernet - field (str): Field name such as pci_vfs_configured, pci_pfs_used, etc - strict (bool): whether to perform a strict search on field name - auth_info (dict): - con_ssh (SSHClient): - rtn_int (bool): whether to return integer or string - - Returns (int|str): value of specified field. Convert to integer by - default unless rnt_int=False. - - """ - if not providernet_id: - raise ValueError("Providernet id is not provided.") - - table_ = table_parser.table( - cli.nova('providernet-show', providernet_id, ssh_client=con_ssh, - auth_info=auth_info)[1]) - info_str = table_parser.get_value_two_col_table(table_, field, - strict=strict) - return int(info_str) if rtn_int else info_str - - -def get_pci_interface_stats_for_providernet( - providernet_id, - fields=('pci_pfs_configured', 'pci_pfs_used', 'pci_vfs_configured', - 'pci_vfs_used'), - auth_info=Tenant.get('admin'), con_ssh=None): - """ - get pci interface usage - Args: - providernet_id (str): id of a providernet - fields: fields such as ('pci_vfs_configured', 'pci_pfs_used') - auth_info (dict): - con_ssh (SSHClient): - - Returns (tuple): tuple of integers - - """ - if not providernet_id: - raise ValueError("Providernet id is not provided.") - - table_ = table_parser.table( - cli.nova('providernet-show', providernet_id, ssh_client=con_ssh, - auth_info=auth_info)[1]) - rtn_vals = [] - for field in fields: - pci_stat = int( - table_parser.get_value_two_col_table(table_, field, strict=True)) - rtn_vals.append(pci_stat) - return tuple(rtn_vals) - - -def create_aggregate(field='name', name=None, avail_zone=None, properties=None, - check_first=True, fail_ok=False, - con_ssh=None, auth_info=Tenant.get('admin')): - """ - Add a aggregate with given name and availability zone. - - Args: - field (str): name or id - name (str): name for aggregate to create - avail_zone (str|None): - properties (dict|None) - check_first (bool) - fail_ok (bool): - con_ssh (SSHClient): - auth_info (dict): - - Returns (tuple): - (0, ) -- aggregate successfully created - (1, ) -- cli rejected - (2, "Created aggregate is not as specified") -- name and/or - availability zone mismatch - - """ - if not name: - existing_names = get_aggregates(field='name') - name = common.get_unique_name(name_str='stxauto', - existing_names=existing_names) - - args_dict = { - '--zone': avail_zone, - '--property': properties, - } - args = '{} {}'.format(common.parse_args(args_dict, repeat_arg=True), name) - - if check_first: - aggregates_ = get_aggregates(field=field, name=name, - avail_zone=avail_zone) - if aggregates_: - LOG.warning("Aggregate {} already exists. Do nothing.".format(name)) - return -1, aggregates_[0] - - LOG.info("Adding aggregate {}".format(name)) - res, out = cli.openstack('aggregate create', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if res == 1: - return res, out - - out_tab = table_parser.table(out) - - succ_msg = "Aggregate {} is successfully created".format(name) - LOG.info(succ_msg) - return 0, table_parser.get_value_two_col_table(out_tab, field) - - -def get_aggregates(field='name', name=None, avail_zone=None, con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Get a list of aggregates - - Args: - field (str|list|tuple): id or name - name (str|list): filter out the aggregates with given name if specified - avail_zone (str): filter out the aggregates with given availability - zone if specified - con_ssh (SSHClient): - auth_info (dict): - - Returns (list): - - """ - kwargs = {} - if avail_zone: - kwargs['Availability Zone'] = avail_zone - if name: - kwargs['Name'] = name - - aggregates_tab = table_parser.table( - cli.openstack('aggregate list', ssh_client=con_ssh, - auth_info=auth_info)[1]) - return table_parser.get_multi_values(aggregates_tab, field, **kwargs) - - -def delete_aggregates(names, check_first=True, remove_hosts=True, fail_ok=False, - con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Add a aggregate with given name and availability zone. - - Args: - names (str|list): name for aggregate to delete - check_first (bool) - remove_hosts (bool) - fail_ok (bool): - con_ssh (SSHClient): - auth_info (dict): - - Returns (tuple): - (0, "Aggregate is successfully deleted") -- aggregate - successfully deletec - (1, ) -- cli rejected - (2, "Aggregate still exists in aggregate-list after deletion") - -- failed although cli accepted - - """ - if check_first: - names = get_aggregates(name=names, con_ssh=con_ssh, auth_info=auth_info) - if not names: - msg = 'Aggregate {} does not exists. Do nothing.'.format(names) - LOG.warning(msg) - return -1, msg - elif isinstance(names, str): - names = [names] - - if remove_hosts: - for name in names: - remove_hosts_from_aggregate(aggregate=name, check_first=True) - - LOG.info("Deleting aggregate {}".format(names)) - res, out = cli.openstack('aggregate delete', ' '.join(names), - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if res == 1: - return res, out - - post_aggregates = get_aggregates(name=names, con_ssh=con_ssh, - auth_info=auth_info) - if post_aggregates: - err_msg = "Aggregate {} still exists in openstack aggregate list " \ - "after deletion.".format(post_aggregates) - LOG.warning(err_msg) - if fail_ok: - return 2, err_msg - else: - raise exceptions.NovaError(err_msg) - - succ_msg = "Aggregate(s) successfully deleted: {}".format(names) - LOG.info(succ_msg) - return 0, succ_msg - - -def get_compute_services(field, con_ssh=None, auth_info=Tenant.get('admin'), - **kwargs): - """ - Get values from compute services list - - System: Regular, Small footprint - - Args: - field (str) - con_ssh (SSHClient): - auth_info (dict): - kwargs: Valid keys: Id, Binary, Host, Zone, Status, State, Updated At - - Returns (list): a list of hypervisors in given zone - """ - table_ = table_parser.table( - cli.openstack('compute service list', ssh_client=con_ssh, - auth_info=auth_info)[1]) - return table_parser.get_values(table_, field, **kwargs) - - -def remove_hosts_from_aggregate(aggregate, hosts=None, check_first=True, - fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Remove hosts from specified aggregate - - Args: - aggregate (str): name of the aggregate to remove hosts. stxauto - aggregate can be added via add_stxauto_zone - session fixture - hosts (list|str): host(s) to remove from aggregate - check_first (bool): - fail_ok (bool): - con_ssh (SSHClient): - auth_info (dict): - - Returns (tuple): - (0, "Hosts successfully removed from aggregate") - (1, ) cli rejected on at least one host - (2, "Host(s) still exist in aggregate after - aggregate-remove-host: ) - - """ - __remove_or_add_hosts_in_aggregate(remove=True, aggregate=aggregate, - hosts=hosts, check_first=check_first, - fail_ok=fail_ok, con_ssh=con_ssh, - auth_info=auth_info) - - -def add_hosts_to_aggregate(aggregate, hosts, check_first=True, fail_ok=False, - con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Add host(s) to specified aggregate - - Args: - aggregate (str): name of the aggregate to add hosts. stxauto - aggregate can be added via add_stxauto_zone - session fixture - hosts (list|str): host(s) to add to aggregate - check_first (bool): - fail_ok (bool): - con_ssh (SSHClient): - auth_info (dict): - - Returns (tuple): - (0, "Hosts successfully added from aggregate") - (1, ) cli rejected on at least one host - (2, "aggregate-add-host accepted, but some host(s) are not added in - aggregate") - - """ - __remove_or_add_hosts_in_aggregate(remove=False, aggregate=aggregate, - hosts=hosts, check_first=check_first, - fail_ok=fail_ok, con_ssh=con_ssh, - auth_info=auth_info) - - -def __remove_or_add_hosts_in_aggregate(aggregate, hosts=None, remove=False, - check_first=True, fail_ok=False, - con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Remove/Add hosts from/to given aggregate - - Args: - aggregate (str): name of the aggregate to add/remove hosts. stxauto - aggregate can be added via - add_stxauto_zone session fixture - hosts (list|str): - remove (bool): True if remove hosts from given aggregate, otherwise - add hosts to aggregate - check_first (bool): - fail_ok (bool): - con_ssh (SSHClient): - auth_info (dict): - - Returns (tuple): - (0, "Hosts successfully removed from aggregate") - (1, ) cli rejected on at least one host - (2, "Host(s) still exist in aggregate after - aggregate-remove-host: ) - - """ - hosts_in_aggregate = get_hosts_in_aggregate(aggregate, con_ssh=con_ssh) - - if hosts is None: - if remove: - hosts = hosts_in_aggregate - else: - from keywords import host_helper - hosts = host_helper.get_hypervisors() - - if isinstance(hosts, str): - hosts = [hosts] - - msg_str = 'Remov' if remove else 'Add' - LOG.info("{}ing hosts {} in aggregate {}".format(msg_str, hosts, aggregate)) - if check_first: - if remove: - hosts_to_rm_or_add = list(set(hosts) & set(hosts_in_aggregate)) - else: - hosts_to_rm_or_add = list(set(hosts) - set(hosts_in_aggregate)) - else: - hosts_to_rm_or_add = list(hosts) - - if not hosts_to_rm_or_add: - warn_str = 'No' if remove else 'All' - msg = "{} given host(s) in aggregate {}. Do nothing. Given hosts: " \ - "{}; hosts in aggregate: {}". \ - format(warn_str, aggregate, hosts, hosts_in_aggregate) - LOG.warning(msg) - return -1, msg - - failed_res = {} - cmd = 'aggregate remove host' if remove else 'aggregate add host' - for host in hosts_to_rm_or_add: - args = '{} {}'.format(aggregate, host) - code, output = cli.openstack(cmd, args, ssh_client=con_ssh, - fail_ok=True, auth_info=auth_info) - if code > 0: - failed_res[host] = output - - if failed_res: - err_msg = "'{}' is rejected for following host(s) in aggregate " \ - "{}: {}".format(cmd, aggregate, failed_res) - if fail_ok: - LOG.warning(err_msg) - return 1, err_msg - else: - raise exceptions.NovaError(err_msg) - - post_hosts_in_aggregate = get_hosts_in_aggregate(aggregate, con_ssh=con_ssh) - if remove: - failed_hosts = list(set(hosts) & set(post_hosts_in_aggregate)) - else: - failed_hosts = list(set(hosts) - set(post_hosts_in_aggregate)) - - if failed_hosts: - err_msg = "{} accepted, but some host(s) are not {}ed in aggregate " \ - "{}: {}".format(cmd, msg_str, aggregate, failed_hosts) - if fail_ok: - LOG.warning(err_msg) - return 2, err_msg - else: - raise exceptions.NovaError(err_msg) - - succ_msg = "Hosts successfully {}ed in aggregate {}: {}".format( - msg_str.lower(), aggregate, hosts) - LOG.info(succ_msg) - return 0, succ_msg - - -def get_migration_list_table(con_ssh=None, auth_info=Tenant.get('admin')): - """ - nova migration-list to collect migration history of each vm - Args: - con_ssh (SSHClient): - auth_info (dict): - - """ - LOG.info("Listing migration history...") - return table_parser.table( - cli.nova('migration-list', ssh_client=con_ssh, auth_info=auth_info)[1]) - - -def create_keypair(name, public_key=None, private_key=None, fail_ok=False, - con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Create a new keypair - Args: - name (str): keypair name to create - public_key (str|None): existing public key file path to use - private_key (str|None): file path to save private key - fail_ok (bool) - con_ssh (SSHClient): - auth_info (dict): - - Returns (tuple): - - """ - args_dict = {'--public-key': public_key, '--private-key': private_key} - args = '{} "{}"'.format(common.parse_args(args_dict), name) - LOG.info("Creating keypair with args: {}".format(args)) - - code, out = cli.openstack('keypair create', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code > 0: - return 1, out - - LOG.info("Keypair {} created successfully".format(name)) - return 0, name - - -def delete_keypairs(keypairs, check_first=True, fail_ok=False, con_ssh=None, - auth_info=None): - """ - Delete keypair(s) - Args: - keypairs (list/str): keypair(s) to delete - check_first (bool) - fail_ok (bool) - con_ssh (SSHClient): - auth_info (dict): - - Returns (tuple): - - """ - if isinstance(keypairs, str): - keypairs = (keypairs,) - - if check_first: - existing_keypairs = get_keypairs(con_ssh=con_ssh, auth_info=auth_info) - keypairs = list(set(keypairs) & set(existing_keypairs)) - if not keypairs: - msg = 'Give keypair(s) not exist. Do nothing.' - LOG.info(msg) - return -1, msg - - LOG.info('Deleting keypairs: {}'.format(keypairs)) - code, out = cli.openstack('keypair delete', ' '.join(keypairs), - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if code > 0: - return code, out - - post_keypairs = get_keypairs(con_ssh=con_ssh, auth_info=auth_info) - undeleted_kp_names = list(set(keypairs) & set(post_keypairs)) - if undeleted_kp_names: - raise exceptions.NovaError( - "keypair(s) still exist after deletion: {}".format( - undeleted_kp_names)) - - msg = 'keypairs deleted successfully: {}'.format(keypairs) - LOG.info(msg) - return 0, msg - - -def get_hosts_in_aggregate(aggregate, con_ssh=None, - auth_info=Tenant.get('admin'), fail_ok=False): - """ - Get list of hosts in given nova aggregate - Args: - aggregate (str): - con_ssh: - auth_info: - fail_ok (bool) - - Returns (list): - - """ - if 'image' in aggregate: - aggregate = 'local_storage_image_hosts' - elif 'remote' in aggregate: - aggregate = 'remote_storage_hosts' - - hosts = get_aggregate_values(aggregate, 'hosts', con_ssh=con_ssh, - auth_info=auth_info, fail_ok=fail_ok) - if hosts: - hosts = hosts[0] - LOG.info("Hosts in {} aggregate: {}".format(aggregate, hosts)) - return hosts diff --git a/automated-pytest-suite/keywords/pm_helper.py b/automated-pytest-suite/keywords/pm_helper.py deleted file mode 100644 index 18b4696f..00000000 --- a/automated-pytest-suite/keywords/pm_helper.py +++ /dev/null @@ -1,1152 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import configparser -import datetime -import os.path -import re -import time -from io import StringIO - -import pexpect - -from consts.auth import Tenant -from consts.timeout import MTCTimeout -from keywords import system_helper, host_helper -from utils.clients.ssh import ControllerClient -from utils.tis_log import LOG - -KILL_CMD = 'kill -9' -PROCESS_TYPES = ['sm', 'pmon', 'other'] -KILL_PROC_EVENT_FORMAT = { - # documented - # 401.001 Service group state change from to on - # host - # - # actual in 2017-02-20_22-01-22 - # clear | 400.001 | Service group cloud-services degraded; - # cinder-api(disabled, failed) |\ - # service_domain=controller.service_group=cloud-services.host=controller-1 - # log | 401.001 | Service group cloud-services state change from - # active-degraded to active on host - # set | 400.001 | Service group cloud-services degraded; - # cinder-api(disabled, failed) |\ - # service_domain=controller.service_group=cloud-services.host=controller-1 - - # 'sm': ('401.001', - # actual in 2017-02-20_22-01-22 - # clear 400.001 Service group cloud-services warning; nova-novnc(disabled, - # failed) - # service_domain=controller.service_group=cloud-services.host=controller-0 - - # 'sm': ('400.001', - # r'Service group ([^\s]+) ([^\s]+);\s*(.*)', - # r'service_domain=controller\.service_group=([^\.]+)\.host=(.*)'), - 'sm': { - 'event_id': '400.001', - 'critical': ( - r'Service group ([^\s]+) ([^\s]+);\s*(.*)', - r'service_domain=controller\.service_group=([^\.]+)\.host=(.*)' - ), - 'major': ( - r'Service group ([^\s]+) ([^\s]+);\s*(.*)', - r'service_domain=controller\.service_group=([^\.]+)\.host=(.*)' - ), - 'minor': ( - r'Service group ([^\s]+) ([^\s]+);\s*(.*)', - r'service_domain=controller\.service_group=([^\.]+)\.host=(.*)' - ), - }, - - # set 200.006 controller-1 'acpid' process has failed. - # Auto recovery in progress. host=controller-1.process=acpid minor - 'pmon': { - 'event_id': '200.006', - # controller-1 critical 'sm' process has failed and could not be - # auto-recovered gracefully. - # Auto- recovery progression by host reboot is required and in - # progress. host=controller-1.process=sm - 'critical': ( - r'([^\s]+) ([^\s]+) \'([^\']+)\' process has ([^\s]+) and could ' - r'not be auto-recovered gracefully. ' - r'Auto.recovery progression by host reboot is required and in ' - r'progress.', - r'host=([^\.]+)\.process=([^\s]+)' - ), - # compute-2 is degraded due to the failure of its 'fsmond' process. - # Auto recovery of this major - # | host=compute-2.process= | major | process is in progress. - 'major': ( - r'([^\s]+) is ([^\s]+) due to the failure of its \'([^\']+)\' ' - r'process. Auto recovery of this ([^\s]+) ' - r'process is in progress.', - r'host=([^\.]+)\.process=([^\s]+)' - ), - # clear 200.006 compute-2 'mtclogd' process has failed. Auto - # recovery in progress. - # host=compute-2.process=mtclogd minor - # "compute-2 'mtclogd' process has failed. Auto recovery in progress." - # set compute-1 'ntpd' process has failed. Manual recovery is required. - 'minor': ( - r"([^\s]+) '([^\']+)' process has ([^\s]+)\. [^\s]+ recovery.*", - r'host=([^\.]+)\.process=([^\s]+)' - ), - }, -} -AVAILABILITY_MAPPINGS = {'active': 'enabled', 'enabled': 'active'} -PMON_PROC_CONF_DIR = '/etc/pmon.d' - - -def get_pmon_process_info(name, host, conf_file=None, con_ssh=None): - """ - Get process info from its PMON config file - Args: - name (str): name of the PMON process - host (str): host on which the PROM process running - con_ssh: connection to the active controller - conf_file (str): configuration file for the PMON process - - Returns (dict): settings of the process - - """ - LOG.info('Get PMON process information for {}'.format(name)) - - if not conf_file: - file_name = '{}.conf'.format(name) - else: - file_name = conf_file - - cmd = 'cat {}'.format(os.path.join(PMON_PROC_CONF_DIR, file_name)) - - with host_helper.ssh_to_host(host, con_ssh=con_ssh) as con0_ssh: - code, output = con0_ssh.exec_sudo_cmd(cmd) - - if 0 != code or not output.strip(): - LOG.error( - 'Failed to read config file:{}/{} for PMON process:{} on host:{}, ' - 'code:{}, message:{}'.format( - PMON_PROC_CONF_DIR, file_name, name, host, code, output)) - return {} - - conf_parser = configparser.ConfigParser() - conf_parser.read_file(StringIO(output)) - - settings = {} - - if 'process' in conf_parser.sections(): - settings = {k.strip(): v.split(';')[0].strip() for k, v in - conf_parser.items('process')} - - settings['interval'] = int(settings.get('interval', 5)) - settings['debounce'] = int(settings.get('debounce', 20)) - LOG.debug('process settings:{}'.format(settings)) - return settings - - -def get_ancestor_process(name, host, cmd='', fail_ok=False, retries=5, - retry_interval=3, con_ssh=None): - """ - Get the ancestor of the processes with the given name and command-line if - any. - - Args: - name: name of the process - host: host on which to find the process - cmd: executable name - fail_ok: do not throw exception when errors - retries: times to try before return - retry_interval: wait before next re-try - con_ssh: ssh connection/client to the active controller - - Returns: - pid (int), process id, -1 if there is any error - ppid (int), parent process id, -1 if there is any error - cmdline (str) command line of the process - """ - retries = retries if retries > 1 else 3 - retry_interval = retry_interval if retry_interval > 0 else 1 - - if cmd: - ps_cmd = r'ps -e -oppid,pid,cmd | /usr/bin/grep "{}\|{}" | ' \ - r'/usr/bin/grep -v grep | /usr/bin/grep {}'.\ - format(name, os.path.basename(cmd), cmd) - else: - ps_cmd = 'ps -e -oppid,pid,cmd | /usr/bin/grep "{}" | /usr/bin/grep ' \ - '-v grep'.format(name) - - code, output = -1, '' - if fail_ok: - for count in range(retries): - with host_helper.ssh_to_host(host, con_ssh=con_ssh) as con0_ssh: - code, output = con0_ssh.exec_cmd(ps_cmd, fail_ok=True) - if 0 == code and output.strip(): - break - LOG.warn('Failed to run cli:{} on controller at retry:{:02d}, ' - 'wait:{} seconds and try again'.format(cmd, count, - retry_interval)) - time.sleep(retry_interval) - else: - with host_helper.ssh_to_host(host, con_ssh=con_ssh) as con0_ssh: - code, output = con0_ssh.exec_cmd(ps_cmd, fail_ok=False) - - if not (0 == code and output.strip()): - LOG.error( - 'Failed to find process with name:{} and cmd:{}'.format(name, cmd)) - return -1, -1, '' - - procs = [] - ppids = [] - for line in output.strip().splitlines(): - proc_attr = line.strip().split() - if not proc_attr: - continue - try: - ppid = int(proc_attr[0].strip()) - pid = int(proc_attr[1].strip()) - cmdline = ' '.join(proc_attr[2:]) - LOG.info('ppid={}, pid={}\ncmdline={}'.format(ppid, pid, cmdline)) - except IndexError: - LOG.warn( - 'Failed to execute ps -p ?! cmd={}, line={}, output={}'.format( - cmd, line, output.strip())) - continue - - if cmd and cmd not in cmdline: - continue - procs.append((pid, ppid, cmdline)) - ppids.append(ppid) - - if len(procs) <= 0: - LOG.error( - 'Could not find process with name:{} and cmd:{}'.format(name, cmd)) - return -1, -1, '' - - pids = [v[1] for v in procs] - - if len(pids) == 1: - LOG.info('porcs[0]:{}'.format(procs[0])) - return procs[0] - - LOG.warn( - 'Multiple ({}) parent processes?, ppids:{}'.format(len(ppids), ppids)) - - if '1' not in ppids: - LOG.warn( - 'Init is not the grand parent process?, ppids:{}'.format(ppids)) - - for ppid, pid, cmdline in procs: - if pid in ppids and ppid not in pids and 1 != pid: - LOG.info('pid={}, ppid={}, cmdline={}'.format(pid, ppid, cmdline)) - return pid, ppid, cmdline - - LOG.error( - 'Could not find process, procs:{}, ppids:{}, pids:{}'.format(procs, - ppids, - pids)) - return -1, -1, '' - - -def verify_process_with_pid_file(pid, pid_file, con_ssh=None): - """ - Check if the given PID matching the PID in the specified pid_file - - Args: - pid: process id - pid_file: the file containing the process id - con_ssh: ssh connnection/client to the host on which the process - resides - - Returns: - - """ - con_ssh = con_ssh or ControllerClient.get_active_controller() - - code, output = con_ssh.exec_sudo_cmd('cat {} | head -n1'.format(pid_file), - fail_ok=False) - LOG.info('code={}, output={}'.format(code, output)) - - output = output.strip() - if not output or int(output) != pid: - LOG.info('Mismatched PID, expected:<{}>, from pid_file:<{}>, ' - 'pid_file={}'.format(pid, output, pid_file)) - return False - else: - LOG.info( - 'OK PID:{} matches with that from pid_file:{}, pid_file={}'.format( - pid, output.strip(), pid_file)) - return True - - -def get_process_from_sm(name, con_ssh=None, pid_file='', - expecting_status='enabled-active'): - """ - Get the information for the process from SM, including PID, Name, Current - Status and Pid-File - - Args: - name: name of the process - con_ssh: ssh connection/client to the active-controller - pid_file: known pid-file path/name to compare with - expecting_status: expected status of the process - - Returns: - pid (int): process id - proc_name (str): process name - actual_status (str): actual/current status of the process - sm_pid_file (str): pid-file in records of SM - """ - con_ssh = con_ssh or ControllerClient.get_active_controller() - - cmd = "true; NM={}; sudo sm-dump --impact --pid --pid_file | awk -v " \ - "pname=$NM '{{ if ($1 == pname) print }}'; " \ - "echo".format(name) - - code, output = con_ssh.exec_sudo_cmd(cmd, fail_ok=True) - - pid, proc_name, impact, sm_pid_file, actual_status = -1, '', '', '', '' - - if 0 != code or not output: - LOG.warn( - 'Cannot find the process:{} in SM with error code:\n{}\n' - 'output:{}'.format(name, code, output)) - return pid, proc_name, impact, sm_pid_file, actual_status - - for line in output.splitlines(): - if not line.strip(): - continue - pid, proc_name, impact, sm_pid_file, actual_status = -1, '', '', '', '' - - results_array = line.strip().split() - LOG.info('results_array={}'.format(results_array)) - - if len(results_array) != 6: - LOG.debug( - 'Invalid format from output of sm-dump?! line={}\n' - 'cmd={}'.format(line, cmd)) - continue - - proc_name = results_array[0] - if proc_name != name: - continue - - expect_status = results_array[1] - actual_status = results_array[2] - - if expect_status != actual_status: - LOG.warn( - 'service:{} is not in expected status yet. expected:{}, ' - 'actual:{}. Retry'.format( - proc_name, expect_status, actual_status)) - continue - - if actual_status != expecting_status: - LOG.warn( - 'service:{} is not in expected status yet. expected:{}, ' - 'actual:{}. Retry'.format( - proc_name, expecting_status, actual_status)) - break - - impact = results_array[3] - - pid = int(results_array[4].strip()) - if results_array[5] != sm_pid_file: - LOG.warn( - 'pid_file not matching with that from SM-dump, pid_file={}, ' - 'sm-dump-pid_file={}'.format( - sm_pid_file, results_array[5])) - sm_pid_file = results_array[5] - - if pid_file and sm_pid_file != pid_file: - LOG.warn( - 'pid_file differs from input pid_file, pid_file={}, ' - 'sm-dump-pid_file={}'.format( - pid_file, sm_pid_file)) - - if sm_pid_file: - if not verify_process_with_pid_file(pid, sm_pid_file, - con_ssh=con_ssh): - LOG.warn( - 'pid of service mismatch that from pid-file, pid:{}, ' - 'pid-file:{}, proc-name:{}'.format( - pid, sm_pid_file, proc_name)) - # found - break - - if -1 != pid: - host = system_helper.get_active_controller_name() - running, msg = is_process_running(pid, host) - if not running: - LOG.warn( - 'Process not existing, name={}, pid={}, msg={}'.format(name, - pid, - msg)) - return -1, '', '', '', '' - else: - LOG.info( - 'OK, Process is running: name={}, pid={}, output={}'.format( - name, pid, msg)) - - return pid, proc_name, impact, actual_status, sm_pid_file - - -def is_controller_swacted( - prev_active, prev_standby, - swact_start_timeout=MTCTimeout.KILL_PROCESS_SWACT_NOT_START, - swact_complete_timeout=MTCTimeout.KILL_PROCESS_SWACT_COMPLETE, - con_ssh=None): - """ - Wait and check if the active-controller on the system was 'swacted' with - give time period - - Args: - prev_active: previous active controller - prev_standby: previous standby controller - swact_start_timeout: check within this time frame if the swacting - started - swact_complete_timeout: check if the swacting (if any) completed in - this time period - con_ssh: ssh connection/client to the current - active-controller - - Returns: - - """ - LOG.info( - 'Check if the controllers started to swact within:{}, and completing ' - 'swacting within:{}'.format( - swact_start_timeout, swact_complete_timeout)) - - code = -1 - host = prev_active - for retry in range(1, 5): - LOG.info( - 'retry{:02d}: checking if swacting triggered, ' - 'prev-active-controller={}'.format( - retry, prev_active)) - code = 0 - try: - code, msg = host_helper.wait_for_swact_complete( - host, con_ssh=con_ssh, fail_ok=True, - swact_start_timeout=swact_start_timeout, - swact_complete_timeout=swact_complete_timeout) - - if 0 == code: - LOG.info( - 'OK, host-swacted, prev-active:{}, pre-standby:{}, ' - 'code:{}, message:{}'.format( - prev_active, prev_active, code, msg)) - return True - - active, standby = system_helper.get_active_standby_controllers() - if active == prev_standby and standby == prev_active: - LOG.info( - 'swacted?! prev-active:{} prev-standby:{}, cur active:{}, ' - 'cur standby:{}'.format( - prev_active, prev_standby, active, standby)) - return True - break - - except Exception as e: - LOG.warn( - 'erred, indicating system is in unstable state, meaning ' - 'probably swacting is in process. ' - 'previous active-controller:{}, previous standby-controller:{}' - '\nerror message:{}'.format(prev_active, prev_standby, e)) - - if retry >= 4: - LOG.error( - 'Fail the test after retry {} times, system remains in ' - 'unstable state, ' - 'meaning probably swacting is in process. previous ' - 'active-controller:{}, ' - 'previous standby-controller:{}\nerror message:{}'. - format(retry, prev_active, prev_standby, e)) - raise - - time.sleep(10) - - return 0 == code - - -def wait_for_sm_process_events(service, host, target_status, expecting=True, - severity='major', - last_events=None, process_type='sm', timeout=60, - interval=3, con_ssh=None): - if process_type not in KILL_PROC_EVENT_FORMAT: - LOG.error('unknown type of process:{}'.format(process_type)) - - event_log_id = KILL_PROC_EVENT_FORMAT[process_type]['event_id'] - reason_pattern, entity_id_pattern = KILL_PROC_EVENT_FORMAT[process_type][ - severity][0:2] - - if last_events is not None: - last_event = last_events['values'][0] - start_time = \ - last_event[1].replace('-', '').replace('T', ' ').split('.')[0] - else: - start_time = '' - - search_keys = { - 'Event Log ID': event_log_id, - 'Reason Text': reason_pattern, - 'Entity Instance ID': entity_id_pattern, - } - - expected_availability = target_status.get('availability', None) - - matched_events = [] - stop_time = time.time() + timeout - if expecting and (service == 'nova-novnc' or service == 'vim-webserver'): - stop_time = time.time() + timeout + 300 - interval = 60 - retry = 0 - while time.time() < stop_time: - retry += 1 - matched_events[:] = [] - events_table = system_helper.get_events_table( - event_log_id=event_log_id, show_uuid=True, - start=start_time, limit=10, con_ssh=con_ssh, regex=True, - **search_keys) - - if not events_table or not events_table['values']: - LOG.warn( - 'run{:02d} for process:{}: Empty event table?!\n' - 'evens_table:{}\nevent_id={}, ' - 'start={}\nkeys={}, severify={}'. - format(retry, service, events_table, event_log_id, start_time, - search_keys, severity)) - continue - - for event in events_table['values']: - try: - actual_event_id = event[3].strip() - if actual_event_id != event_log_id: - LOG.warn('Irrelevant event? event-list quering broken?!' - ' looking-for-event-id={}, actual-event-id={}, ' - 'event={}'. - format(event_log_id, actual_event_id, event)) - continue - - actual_state = event[2] - if actual_state not in ('set', 'clear'): - LOG.info( - 'State not matching, expected-state="log", ' - 'actual-state={}", event={}'.format( - actual_state, event)) - continue - - actual_reason = event[4].strip() - # ('cloud-services', 'active', 'active-degraded', - # 'controller-0;', ' glance-api(disabled, failed)') - m = re.match(reason_pattern, actual_reason) - if not m: - LOG.info( - 'Not matched event:{},\nevent_id={}, start={}, ' - 'reason_text={}'.format( - event, event_log_id, start_time, reason_pattern)) - continue - - actual_group_status = m.group(2) - if actual_group_status not in ('active', expected_availability): - LOG.info( - 'Group status not matching!, expected-status={}, ' - 'actual-status={}\nevent={}'.format( - expected_availability, actual_group_status, event)) - continue - - if 'host={}'.format(host) not in event[5]: - LOG.info( - 'Host not matching, expected-host={}, acutal-host={}, ' - 'event={}'.format( - host, event[5], event)) - continue - - actual_service_name, status = m.group(3).split('(') - service_operational, service_availability = status.split(',') - matched_events.append(dict( - uuid=event[0], - event=event[1:-1], - service=actual_service_name, - serice_operational=service_operational, - service_availability=service_availability.strip().strip( - ')'), - group_name=m.group(1), - group_prev_status=m.group(2), - group_status=m.group(3) - )) - - if not expecting: - LOG.error( - 'Found set/clear event while it should NOT\nevent:' - '{}'.format(event)) - return -1, tuple(matched_events) - - matched_events = list(reversed(matched_events)) - if len(matched_events) > 1: - if matched_events[-1]['event'][1] == 'clear' and \ - matched_events[-2]['event'][1] == 'set': - LOG.info('OK, found matched events:{}'.format( - matched_events)) - return 0, tuple(matched_events) - - except IndexError: - LOG.error( - 'CLI fm event-list changed its output format?\nsearching ' - 'keys={}'.format( - search_keys)) - raise - - LOG.warn( - 'No matched event found at try:{}, will sleep {} seconds and retry' - '\nmatched events:\n{}, host={}'.format(retry, interval, - matched_events, host)) - - time.sleep(interval) - continue - - LOG.info('No matched events:\n{}'.format(matched_events)) - - return -1, tuple() - - -def _check_status_after_killing_process(service, host, target_status, - expecting=True, process_type='sm', - last_events=None, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - LOG.info( - 'check for process:{} on host:{} expecting status:{}, process_type:' - '{}'.format(service, host, target_status, process_type)) - - try: - operational, availability = target_status.split('-') - except ValueError as e: - LOG.error('unknown host status:{}, error:{}'.format(target_status, e)) - raise - - expected = {'operational': operational, 'availability': availability} - - if availability == 'warning': - LOG.info('impact:{} meaning: operational={}, availabiltiy={}'.format( - target_status, operational, availability)) - code, _ = wait_for_sm_process_events( - service, - host, - expected, - expecting=expecting, - last_events=last_events, - process_type=process_type, - con_ssh=con_ssh) - - return (0 == code) == expecting - - total_wait = 120 if expecting else 30 - time.sleep(1) - - found = system_helper.wait_for_host_values(host, timeout=total_wait / 2, - con_ssh=con_ssh, fail_ok=True, - auth_info=auth_info, **expected) - - if expecting and found: - LOG.debug('OK, process:{} in status:{} as expected.'.format( - service, target_status)) - - LOG.debug('Next, wait and verify the sytstem recovers') - expected = {'operational': 'enabled', 'availability': 'available'} - return system_helper.wait_for_host_values( - host, timeout=total_wait / 2, con_ssh=con_ssh, auth_info=auth_info, - fail_ok=True, **expected) - # return True - - elif not expecting and found: - LOG.error('Unexpected status for process:{}, expected status:{}'.format( - service, expected)) - return False - - elif not expecting and not found: - LOG.info( - 'OK, IMPACT did not happen which is correct. ' - 'target_status={}'.format(target_status)) - return True - - elif expecting and not found: - LOG.warn( - 'host is not in expected status:{} for service:{}'.format(expected, - service)) - - code = wait_for_sm_process_events( - service, host, expected, expecting=expecting, - last_events=last_events, - process_type=process_type, con_ssh=con_ssh)[0] - - return 0 == code - - else: - # should never reach here - pass - - -def check_impact(impact, service_name, host='', last_events=None, - expecting_impact=False, process_type='sm', con_ssh=None, - timeout=80, **kwargs): - """ - Check if the expected IMPACT happens (or NOT) on the specified host - - Args: - impact (str): system behavior to check, including: - swact --- the active controller swacting - enabled-degraded --- the host changed to - 'enalbed-degraded' status - disabled-failed --- the host changed to - 'disabled-failed' status - ... - service_name (str): name of the service/process - host (str): the host to check - last_events (dict) the last events before action - expecting_impact (bool): if the IMPACT should happen timeout - process_type (str): type of the process: sm, pm, other - con_ssh: ssh connection/client to the active controller - timeout - **kwargs: - - Returns: - boolean - whether the IMPACT happens as expected - - """ - LOG.info( - 'Checking impact:{} on host:{} after killing process:{}, ' - 'process_type={}'.format( - impact, host, service_name, process_type)) - - prev_active = kwargs.get('active_controller', 'controller-0') - prev_standby = kwargs.get('standby_controller', 'controller-1') - severity = kwargs.get('severity', 'major') - - if impact == 'swact': - if expecting_impact: - return is_controller_swacted(prev_active, prev_standby, - con_ssh=con_ssh, - swact_start_timeout=max(timeout / 2, - 20), - swact_complete_timeout=timeout) - else: - return not is_controller_swacted(prev_active, prev_standby, - con_ssh=con_ssh, - swact_start_timeout=timeout / 4) - - elif impact in ('enabled-degraded', 'enabled-warning'): - return _check_status_after_killing_process( - service_name, host, target_status=impact, - expecting=expecting_impact, - process_type=process_type, last_events=last_events, con_ssh=con_ssh) - - elif impact == 'disabled-failed': - - if host == prev_active: - LOG.info( - 'Killing PMON process:{} on active host:{} will trigger ' - 'swact. impact:{}, ' - 'severity:{}'.format(service_name, host, impact, severity)) - swacted = is_controller_swacted(prev_active, prev_standby, - con_ssh=con_ssh, - swact_start_timeout=20, - swact_complete_timeout=timeout) - assert swacted, 'Active-controller must be swacted before been ' \ - 'taken into disabled-failed status' - - operational, available = impact.split('-') - expected = {'operational': operational, 'available': available} - - reached = system_helper.wait_for_host_values(host, timeout=timeout, - con_ssh=con_ssh, - fail_ok=True, **expected) - if reached and expecting_impact: - LOG.info( - 'host {} reached status {} as expected after killing process ' - '{}'.format( - host, expected, service_name)) - return True - - elif not reached and not expecting_impact: - LOG.info( - 'host {} DID NOT reach status {} (as expected) after killing ' - 'process {}'.format( - host, expected, service_name)) - return True - - else: - LOG.error( - 'Host:{} did not get into status:{} in {} seconds, seaching ' - 'for related events'.format( - host, expected, timeout)) - - # todo: it's better to do this in parallel with process-monitoring - expected = {'operational': 'enabled', - 'available': ['available', 'degraded']} - reached = system_helper.wait_for_host_values(host, timeout=timeout, - con_ssh=con_ssh, - fail_ok=True, - **expected) - - if reached: - LOG.info( - 'Host:{} did not recover into status:{} in {} ' - 'seconds'.format( - host, expected, timeout)) - return True - - LOG.error( - 'Host:{} did not get into status:{} in {} seconds, and there ' - 'is no related events'.format( - host, expected, timeout)) - - return False - else: - LOG.warn( - 'impact-checker for impact:{} not implemented yet, ' - 'kwargs:{}'.format(impact, kwargs)) - return False - - -def get_pmon_process_id(pid_file, host, con_ssh=None): - cmd = 'cat {} 2>/dev/null | head -n1 && echo 2>/dev/null'.format(pid_file) - - with host_helper.ssh_to_host(host, con_ssh=con_ssh) as con: - code, output = con.exec_cmd(cmd) - - if output.strip(): - return int(output.strip()) - - return -1 - - -def get_process_info(name, cmd='', pid_file='', host='', process_type='sm', - con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Get the information of the process with the specified name - - Args: - name (str): name of the process - cmd (str): path of the executable - pid_file (str): path of the file containing the process id - host (str): host on which the process resides - process_type (str): type of service/process, must be one of 'sm', - 'pm', 'other' - con_ssh: ssh connection/client to the active controller - auth_info - - Returns: - - """ - LOG.info('name:{} cmd={} pid_file={} host={} process_type={}'.format( - name, cmd, pid_file, host, process_type)) - - active_controller = system_helper.get_active_controller_name( - con_ssh=con_ssh, auth_info=auth_info) - if not host: - host = active_controller - - if process_type == 'sm': - LOG.debug( - 'to get_process_info for SM process:{} on host:{}'.format(name, - host)) - - if host != active_controller: - LOG.warn( - 'Already swacted? host:{} is not the active controller now. ' - 'Active controller is {}'.format( - host, active_controller)) - pid, name, impact, status, pid_file = get_process_from_sm( - name, con_ssh=con_ssh, pid_file=pid_file) - if status != 'enabled-active': - LOG.warn('SM process is in status:{}, not "enabled-active"'.format( - status)) - if 'disabl' in status: - LOG.warn( - 'Wrong controller? Or controller already swacted, ' - 'wait and try on the other controller') - time.sleep(10) - return get_process_from_sm(name, pid_file=pid_file) - - return -1, name, impact, status, pid_file - else: - return pid, name, impact, status, pid_file - - elif process_type == 'pmon': - pid = get_pmon_process_id(pid_file, host, con_ssh=con_ssh) - LOG.info('Found: PID={} for PMON process:{}'.format(pid, name)) - return pid, name - - else: - LOG.info('Try to find the process:{} using "ps"'.format(name)) - - pid = get_ancestor_process(name, host, cmd=cmd, con_ssh=con_ssh)[0] - if -1 == pid: - return -1, '' - - return pid, name - - -def is_process_running(pid, host, con_ssh=None, retries=3, interval=3): - """ - Check if the process with the PID is existing - - Args: - pid (int): process id - host (str): host the process resides - con_ssh: ssh connection/client to the host - retries (int): times to re-try if no process found before return - failure - interval (int): time to wait before next re-try - - Returns: - boolean - true if the process existing, false otherwise - msg (str) - the details of the process or error messages - """ - cmd = 'ps -p {}'.format(pid) - for _ in range(retries): - with host_helper.ssh_to_host(host, con_ssh=con_ssh) as host_ssh: - code, output = host_ssh.exec_cmd(cmd, fail_ok=True) - if 0 != code: - LOG.warn( - 'Process:{} DOES NOT exist, error:{}'.format(pid, output)) - else: - return True, output - time.sleep(interval) - - return False, '' - - -def _get_last_events_timestamps(limit=1, event_log_id=None, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - latest_events = system_helper.get_events_table(limit=limit, - event_log_id=event_log_id, - show_uuid=True, - con_ssh=con_ssh, - auth_info=auth_info) - - return latest_events - - -def kill_sm_process_and_verify_impact(name, cmd='', pid_file='', retries=2, - impact='swact', host='controller-0', - interval=20, action_timeout=90, - total_retries=3, process_type='sm', - on_active_controller=True, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Kill the process with the specified name and verify the system behaviors - as expected - - Args: - name (str): name of the process - cmd (str): executable of the process - pid_file (str): file containing process id - retries (int): times of killing actions upon which the - IMPACT will be triggered - impact (str): system behavior including: - swact -- active controller is swacted - enabled-degraded -- the status of the - service will change to - disabled-failed -- the status of the - service will change to - ... - host (str): host to test on - interval (int): least time to wait between kills - action_timeout (int): kills and impact should happen within this - time frame - total_retries (int): total number of retries for whole kill and - wait actions - process_type (str): valid types are: sm, pmon, other - on_active_controller (boolean): - con_ssh: ssh connection/client to the active controller - auth_info - - Returns: (pid, host) - pid: - >0 suceess, the final PID of the process - -1 fail because of impact NOT happening after killing the - process up to threshold times - -2 fail because of impact happening before killing threshold times - -3 fail after try total_retries times - host: - the host tested on - """ - active_controller, standby_controller = \ - system_helper.get_active_standby_controllers(con_ssh=con_ssh, - auth_info=auth_info) - - if on_active_controller: - LOG.info( - 'on active controller: {}, host:{}'.format(active_controller, host)) - - host = active_controller - con_ssh = con_ssh or ControllerClient.get_active_controller() - - LOG.info('on host: {}'.format(host)) - - if total_retries < 1 or retries < 1: - LOG.error( - 'retries/total-retries < 1? retires:{}, total retries:{}'.format( - retries, total_retries)) - return None - count = 0 - for i in range(1, total_retries + 1): - LOG.info( - 'retry:{:02d} kill the process:{} and verify impact:{}'.format( - i, name, impact)) - - exec_times = [] - killed_pids = [] - - timeout = time.time() + action_timeout * ( - retries / 2 if retries > 2 else 1) - - while time.time() < timeout: - count += 1 - - LOG.debug( - 'retry{:02d}-{:02d}: Failed to get process id for {} on ' - 'host:{}, swacted unexpectedly?'.format( - i, count, name, host)) - - try: - pid, proc_name = get_process_info(name, cmd=cmd, host=host, - process_type=process_type, - pid_file=pid_file, - con_ssh=con_ssh)[0:2] - - except pexpect.exceptions.EOF: - LOG.warn( - 'retry{:02d}-{:02d}: Failed to get process id for {} on ' - 'host:{}, swacted unexpectedly?'.format( - i, count, name, host)) - time.sleep(interval / 3.0) - continue - - if -1 == pid: - LOG.error( - 'retry{:02d}-{:02d}: Failed to get PID for process with ' - 'name:{}, cmd:{}, ' - 'wait and retries'.format(i, count, name, cmd)) - time.sleep(interval / 3.0) - continue - - if killed_pids and pid in killed_pids: - LOG.warn( - 'retry{:02d}-{:02d}: No new process re-created, ' - 'prev-pid={}, cur-pid={}'.format( - i, count, killed_pids[-1], pid)) - time.sleep(interval / 3.0) - continue - - last_killed_pid = killed_pids[-1] if killed_pids else None - killed_pids.append(pid) - last_kill_time = exec_times[-1] if exec_times else None - exec_times.append(datetime.datetime.utcnow()) - - latest_events = _get_last_events_timestamps( - event_log_id=KILL_PROC_EVENT_FORMAT[process_type]['event_id'], - limit=10) - - LOG.info( - 'retry{:02d}-{:02d}: before kill CLI, proc_name={}, pid={}, ' - 'last_killed_pid={}, last_kill_time={}'.format( - i, count, proc_name, pid, last_killed_pid, last_kill_time)) - - LOG.info('\tactive-controller={}, standby-controller={}'.format( - active_controller, standby_controller)) - - kill_cmd = '{} {}'.format(KILL_CMD, pid) - - with host_helper.ssh_to_host(host, con_ssh=con_ssh) as con: - code, output = con.exec_sudo_cmd(kill_cmd, fail_ok=True) - if 0 != code: - # it happens occasionaly - LOG.error('Failed to kill pid:{}, cmd={}, output=<{}>, ' - 'at run:{}, already terminated?'.format( - pid, kill_cmd, output, count)) - - if count < retries: - # IMPACT should not happen yet - if not check_impact(impact, proc_name, - last_events=latest_events, - active_controller=active_controller, - standby_controller=standby_controller, - expecting_impact=False, - process_type=process_type, host=host, - con_ssh=con_ssh): - LOG.error( - 'Impact:{} observed unexpectedly, it should happen ' - 'only after killing {} times, ' - 'actual killed times:{}'.format(impact, retries, count)) - return -2, host - - LOG.info( - 'retry{:02d}-{:02d}: OK, NO impact as expected, impact={}, ' - 'will kill it another time'.format(i, count, impact)) - - time.sleep(max(interval * 1 / 2.0, 5)) - - else: - no_standby_controller = standby_controller is None - expecting_impact = True if not no_standby_controller else False - if not check_impact( - impact, proc_name, last_events=latest_events, - active_controller=active_controller, - standby_controller=standby_controller, - expecting_impact=expecting_impact, - process_type=process_type, host=host, con_ssh=con_ssh): - LOG.error( - 'No impact after killing process {} {} times, while ' - '{}'.format(proc_name, count, - ('expecting impact' if expecting_impact - else 'not expecting impact'))) - - return -1, host - - LOG.info( - 'OK, final retry{:02d}-{:02d}: OK, IMPACT happened ' - '(if applicable) as expected, ' - 'impact={}'.format(i, count, impact)) - - active_controller, standby_controller = \ - system_helper.get_active_standby_controllers( - con_ssh=con_ssh) - - LOG.info( - 'OK, after impact:{} (tried:{} times), ' - 'now active-controller={}, standby-controller={}'.format( - impact, count, active_controller, standby_controller)) - - pid, proc_name = get_process_info(name, cmd=cmd, host=host, - pid_file=pid_file, - process_type=process_type, - con_ssh=con_ssh)[0:2] - - return pid, active_controller - - return -3, host - - -def wait_for_sm_dump_services_active(timeout=60, fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Wait for all services - Args: - timeout: - fail_ok: - con_ssh: - auth_info - - Returns: - - """ - active_controller = system_helper.get_active_controller_name( - con_ssh=con_ssh, auth_info=auth_info) - return host_helper.wait_for_sm_dump_desired_states( - controller=active_controller, timeout=timeout, fail_ok=fail_ok) diff --git a/automated-pytest-suite/keywords/security_helper.py b/automated-pytest-suite/keywords/security_helper.py deleted file mode 100644 index da5183cd..00000000 --- a/automated-pytest-suite/keywords/security_helper.py +++ /dev/null @@ -1,1284 +0,0 @@ -# -# Copyright (c) 2019, 2020 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import json -import random -import re -import os -import time -import requests - -from pexpect import EOF -from string import ascii_lowercase, ascii_uppercase, digits - -from consts.auth import Tenant, HostLinuxUser, CliAuth -from consts.stx import Prompt, EventLogID -from consts.proj_vars import ProjVar -from utils.tis_log import LOG -from utils import exceptions -from utils.clients.ssh import ControllerClient, SSHClient, SSHFromSSH -from keywords import system_helper, keystone_helper, common - -MIN_LINUX_PASSWORD_LEN = 7 -SPECIAL_CHARACTERS = r'!@#$%^&*()<>{}+=_\\\[\]\-?|~`,.;:' - -# use this simple "dictionary" for now, because no english dictionary -# installed on test server -SIMPLE_WORD_DICTIONARY = ''' -and is being proof-read and supplemented by volunteers from around the -world. This is an unfunded project, and future enhancement of this -dictionary will depend on the efforts of volunteers willing to help build -this free resource into a comprehensive body of general information. New -definitions for missing words or words senses and longer explanatory notes, -as well as images to accompany the articles are needed. More modern -illustrative quotations giving recent examples of usage of the words in -their various senses will be very helpful, since most quotations in the -original 1913 dictionary are now well over 100 years old -''' - - -class LinuxUser: - users = {HostLinuxUser.get_user(): HostLinuxUser.get_password()} - con_ssh = None - - def __init__(self, user, password, con_ssh=None): - self.user = user - self.password = password - self.added = False - self.con_ssh = con_ssh if con_ssh is not None else \ - ControllerClient.get_active_controller() - - def add_user(self): - self.added = True - LinuxUser.users[self.user] = self.password - raise NotImplementedError - - def modify_password(self): - raise NotImplementedError - - def delete_user(self): - raise NotImplementedError - - def login(self): - raise NotImplementedError - - @classmethod - def get_user_password(cls): - raise NotImplementedError - - @classmethod - def get_current_user_password(cls, con_ssh=None): - if con_ssh: - cls.con_ssh = con_ssh - elif not cls.con_ssh: - cls.con_ssh = ControllerClient.get_active_controller() - user = cls.con_ssh.get_current_user() - return user, cls.users[user] - - -class Singleton(type): - """ - A singleton used to make sure only one instance of a class is allowed to - create - """ - - __instances = {} - - def __call__(cls, *args, **kwargs): - if cls not in cls.__instances: - cls.__instances[cls] = super(Singleton, cls).__call__(*args, - **kwargs) - return cls.__instances[cls] - - -def get_ldap_user_manager(): - """ - Get the only instance of the LDAP User Manager - - Returns (LdapUserManager): - the only instance of the LDAP User Manager - """ - return LdapUserManager() - - -class LdapUserManager(object, metaclass=Singleton): - """ - The LDAP User Manager - - """ - - LINUX_ROOT_PASSWORD = HostLinuxUser.get_password() - KEYSTONE_USER_NAME = Tenant.get('admin')['user'] - KEYSTONE_USER_DOMAIN_NAME = 'Default' - KEYSTONE_PASSWORD = Tenant.get('admin')['password'] - PROJECT_NAME = 'admin' - PROJECT_DOMAIN_NAME = 'Default' - - def __init__(self, ssh_con=None): - if ssh_con is not None: - self.ssh_con = ssh_con - else: - self.ssh_con = ControllerClient.get_active_controller() - - self.users_info = {} - - def ssh_to_host(self, host=None): - """ - Get the ssh connection to the active controller or the specified host - (if it's the case) - - Args: - host (str): the host to ssh to, using the active controller - if it's unset or None - - Returns (object): - the ssh connection session to the active controller - - """ - if host is None: - return self.ssh_con - else: - return SSHClient(host=host) - - def get_ldap_admin_password(self): - """ - Get the LDAP Administrator's password - - Args: - - Returns (str): - The password of the LDAP Administrator - - """ - cmd = 'grep "credentials" /etc/openldap/slapd.conf.backup' - self.ssh_con.flush() - code, output = self.ssh_con.exec_sudo_cmd(cmd) - - if 0 == code and output.strip(): - for line in output.strip().splitlines(): - if 'credentials' in line and '=' in line: - password = line.split('=')[1] - return password - - return '' - - def get_ldap_user_password(self, user_name): - """ - Get the password of the LDAP User - - Args: - user_name (str): - the user name - - Returns (str): - the password of the user - """ - if user_name in self.users_info and \ - self.users_info[user_name]['passwords']: - return self.users_info[user_name]['passwords'][-1] - - return None - - def login_as_ldap_user_first_time(self, user_name, new_password=None, - host=None): - """ - Login with the specified LDAP User for the first time, - during which change the initial password as a required step. - - Args: - user_name (str): user name of the LDAP user - new_password (str): password of the LDAP user - host (str): host name to which the user will login - - Returns (tuple): - results (bool): True if success, otherwise False - password (str): new password of the LDAP user - - """ - - hostname_ip = 'controller-1' if host is None else host - - if new_password is not None: - password = new_password - else: - password = 'new_{}_Li69nux!'.format( - ''.join(random.sample(user_name, len(user_name)))) - - cmd_expected = [ - ( - 'ssh -l {} -o UserKnownHostsFile=/dev/null {}'.format( - user_name, hostname_ip), - (r'Are you sure you want to continue connecting (yes/no)?',), - ('Failed to get "continue connecting" prompt',) - ), - ( - 'yes', - # ("{}@{}'s password:".format(user_name, hostname_ip),), - (r".*@{}'s password: ".format(hostname_ip),), - ('Failed to get password prompt',) - ), - ( - '{}'.format(user_name), - (r'\(current\) LDAP Password: ',), - ('Failed to get password prompt for current password',) - ), - ( - '{}'.format(user_name), - ('New password: ',), - ('Failed to get password prompt for new password',) - ), - ( - '{}'.format(password), - ('Retype new password: ',), - ('Failed to get confirmation password prompt for new password',) - ), - ( - '{}'.format(password), - ( - 'passwd: all authentication tokens updated successfully.', - 'Connection to controller-1 closed.', - ), - ('Failed to change to new password for current user:{}'.format( - user_name),) - ), - ( - '', - (self.ssh_con.get_prompt(),), - ( - 'Failed in last step of first-time login as LDAP ' - 'User:{}'.format(user_name),) - ), - ] - - result = True - self.ssh_con.flush() - for cmd, expected, errors in cmd_expected: - self.ssh_con.send(cmd) - index = self.ssh_con.expect(blob_list=list(expected + errors)) - if len(expected) <= index: - result = False - break - - self.ssh_con.flush() - - return result, password - - def find_ldap_user(self, user_name): - """ - Find the LDAP User with the specified name - - Args: - user_name (str): - user name of the LDAP User to - search for - - Returns: - existing_flag (boolean) - True, the LDAP User with the - specified name existing - - False, cannot find a LDAP User with - the specified name - - user_info (dict): - user information - """ - - cmd = 'ldapfinger -u {}'.format(user_name) - self.ssh_con.flush() - code, output = self.ssh_con.exec_sudo_cmd(cmd, fail_ok=True, - strict_passwd_prompt=True) - - found = False - user_info = {} - if output.strip(): - for line in output.strip().splitlines(): - if line.startswith('dn: '): - user_info['dn'] = line.split()[1].strip() - elif line.startswith('cn: '): - user_info['cn'] = line.split()[1].strip() - elif line.startswith('uid: '): - user_info['uid'] = line.split()[1].strip() - elif line.startswith('uidNumber: '): - user_info['uid_number'] = int(line.split()[1].strip()) - elif line.startswith('gidNumber: '): - user_info['gid_number'] = int(line.split()[1].strip()) - elif line.startswith('homeDirectory: '): - user_info['home_directory'] = line.split()[1].strip() - elif line.startswith('userPassword:: '): - user_info['user_password'] = line.split()[1].strip() - elif line.startswith('loginShell: '): - user_info['login_shell'] = line.split()[1].strip() - elif line.startswith('shadowMax: '): - user_info['shadow_max'] = int(line.split()[1].strip()) - elif line.startswith('shadowWarning: '): - user_info['shadow_warning'] = int(line.split()[1].strip()) - else: - pass - else: - found = True - - return found, user_info - - def rm_ldap_user(self, user_name): - """ - Delete the LDAP User with the specified name - - Args: - user_name: - - Returns (tuple): - code - 0 successfully deleted the specified LDAP User - otherwise: failed - output - message from the deleting CLI - """ - - cmd = 'ldapdeleteuser {}'.format(user_name) - - self.ssh_con.flush() - code, output = self.ssh_con.exec_sudo_cmd(cmd, fail_ok=True) - - if 0 == code and user_name in self.users_info: - del self.users_info[user_name] - - return code, output - - @staticmethod - def validate_user_settings(secondary_group=False, - secondary_group_name=None, - password_expiry_days=90, - password_expiry_warn_days=2 - ): - """ - Validate the settings to be used as attributes of a LDAP User - - Args: - secondary_group (bool): - True - Secondary group to add user to - False - No secondary group - secondary_group_name (str): Name of secondary group (will be - ignored if secondary_group is False - password_expiry_days (int): - password_expiry_warn_days (int): - - Returns: - - """ - - try: - opt_expiry_days = int(password_expiry_days) - opt_expiry_warn_days = int(password_expiry_warn_days) - bool(secondary_group) - str(secondary_group_name) - except ValueError: - return 1, 'invalid input: {}, {}'.format(password_expiry_days, - password_expiry_warn_days) - - if opt_expiry_days <= 0: - return 4, 'invalid password expiry days:{}'.format(opt_expiry_days) - - if opt_expiry_warn_days <= 0: - return 5, 'invalid password expiry days:{}'.format( - opt_expiry_warn_days) - - return 0, '' - - def create_ldap_user(self, - user_name, - sudoer=False, - secondary_group=False, - secondary_group_name=None, - password_expiry_days=90, - password_expiry_warn_days=2, - delete_if_existing=True, - check_if_existing=True): - """ - - Args: - user_name (str): user name of the LDAP User - sudoer (boo) - True - Add the user to sudoer list - False - Do not add the user to sudoer list - secondary_group (bool): - True - Secondary group to add user to - False - No secondary group - secondary_group_name (str): Name of secondary group (will be - ignored if secondary_group is False - password_expiry_days (int): - password_expiry_warn_days (int): - delete_if_existing (bool): - True - Delete the user if it is already existing - False - Return the existing LDAP User - check_if_existing (bool): - True - Check if the LDAP User existing with the - specified name - False - Do not check if any LDAP Users with the specified - name existing - - Returns tuple(code, user_infor): - code (int): - -1 -- a LDAP User already existing with the same name ( - don't care other attributes for now) - 0 -- successfully created a LDAP User withe specified name - and attributes - 1 -- a LDAP User already existing but fail_on_existing - specified - 2 -- CLI to create a user succeeded but cannot find the user - after - 3 -- failed to create a LDAP User (the CLI failed) - 4 -- failed to change the initial password and login the - first time - 5 -- invalid inputs - """ - password_expiry_days = 90 if password_expiry_days is None else \ - password_expiry_days - password_expiry_warn_days = 2 if password_expiry_warn_days is None \ - else password_expiry_warn_days - secondary_group = False if secondary_group is None else secondary_group - secondary_group_name = '' if secondary_group_name is None else \ - secondary_group_name - - code, message = self.validate_user_settings( - secondary_group=secondary_group, - secondary_group_name=secondary_group_name, - password_expiry_days=password_expiry_days, - password_expiry_warn_days=password_expiry_warn_days) - if 0 != code: - return 5, {} - - if check_if_existing: - existing, user_info = self.find_ldap_user(user_name) - if existing: - if delete_if_existing: - code, message = self.rm_ldap_user(user_name) - if 0 != code: - return 1, user_info - else: - return -1, user_info - cmds_expectings = [ - ( - 'sudo ldapusersetup', - (r'Enter username to add to LDAP:',), - () - ), - ( - '{}'.format(user_name), - (r'Add {} to sudoer list? (yes/NO): '.format(user_name),), - ('Critical setup error: cannot add user.*',), - ), - ( - 'yes' if sudoer else 'NO', - (r'Add .* to secondary user group\? \(yes/NO\):',), - () - ), - ] - - if secondary_group: - cmds_expectings += [ - ( - 'yes', - (r'Secondary group to add user to? [wrs_protected]: ',), - () - ), - ( - '{}'.format(secondary_group_name), - ( - r'Enter days after which user password must be changed ' - r'\[{}\]:'.format(password_expiry_days),), - () - ) - - ] - else: - cmds_expectings += [ - ( - 'NO', - ( - r'Enter days after which user password must be changed ' - r'\[{}\]:'.format(password_expiry_days),), - (), - ), - ] - - cmds_expectings += [ - ( - '{}'.format(password_expiry_days), - ( - r'Enter days before password is to expire that user is ' - r'warned \[{}\]:'.format(password_expiry_warn_days),), - (), - ), - ( - '{}'.format(password_expiry_warn_days), - ( - 'Successfully modified user entry uid=m-user01,ou=People,' - 'dc=cgcs,dc=local in LDAP', - 'Updating password expiry to {} days'.format( - password_expiry_warn_days), - ), - (), - ) - ] - - created = True - self.ssh_con.flush() - for cmd, outputs, errors in cmds_expectings: - self.ssh_con.send(cmd) - expected_outputs = list(outputs + errors) - - index = self.ssh_con.expect(blob_list=expected_outputs, - fail_ok=True) - if len(outputs) <= index: - created = False - break - expected_outputs[:] = [] - - time.sleep(3) - - user_info = {} - if created: - existing, user_info = self.find_ldap_user(user_name) - if existing: - success, password = self.login_as_ldap_user_first_time( - user_name) - if not success: - code = 4 - else: - user_info['passwords'] = [password] - self.users_info[user_name] = user_info - code = 0 - else: - code = 2 - else: - code = 3 - - return code, user_info - - def login_as_ldap_user(self, user_name, password, host=None, - pre_store=False, disconnect_after=False): - """ - Login as the specified user name and password onto the specified host - - Args: - user_name (str): user name - password (str): password - host (str): host to login to - pre_store (bool): - True - pre-store keystone user credentials for - session - False - chose 'N' (by default) meaning do not - pre-store keystone user credentials - disconnect_after (bool): - True - disconnect the logged in session - False - keep the logged in session - - Returns (tuple): - logged_in (bool) - True if successfully logged into the - specified host - using the specified user/password - password (str) - the password used to login - ssh_con (object) - the ssh session logged in - """ - if not host: - host = 'controller-1' - if system_helper.is_aio_simplex(): - host = 'controller-0' - - prompt_keystone_user_name = r'Enter Keystone username \[{}\]: '.format( - user_name) - cmd_expected = ( - ( - 'ssh -l {} -o UserKnownHostsFile=/dev/null {}'.format(user_name, - host), - (r'Are you sure you want to continue connecting \(yes/no\)\?',), - ( - 'ssh: Could not resolve hostname {}: Name or service not ' - 'known'.format(host),), - ), - ( - 'yes', - (r'{}@{}\'s password: '.format(user_name, host),), - (), - ), - ( - '{}'.format(password), - (prompt_keystone_user_name, Prompt.CONTROLLER_PROMPT,), - (r'Permission denied, please try again\.',), - ), - ) - - logged_in = False - self.ssh_con.flush() - for i in range(len(cmd_expected)): - cmd, expected, errors = cmd_expected[i] - LOG.info('cmd={}\nexpected={}\nerrors={}\n'.format(cmd, expected, - errors)) - self.ssh_con.send(cmd) - - index = self.ssh_con.expect(blob_list=list(expected + errors)) - if len(expected) <= index: - break - elif 3 == i: - if expected[index] == prompt_keystone_user_name: - assert pre_store, \ - 'pre_store is False, while selecting "y" to ' \ - '"Pre-store Keystone user credentials ' \ - 'for this session!"' - else: - logged_in = True - break - else: - logged_in = True - - if logged_in: - if disconnect_after: - self.ssh_con.send('exit') - - return logged_in, password, self.ssh_con - - def change_ldap_user_password(self, user_name, password, new_password, - change_own_password=True, - check_if_existing=True, host=None, - disconnect_after=False): - """ - Modify the password of the specified user to the new one - - Args: - user_name (str): - - name of the LDAP User - - password (str): - - password of the LDAP User - - new_password (str): - - new password to change to - change_own_password (bool): - - check_if_existing (bool): - - True: check if the user already existing first - False: change the password without checking the - existence of the user - - host (str): - - The host to log into - - disconnect_after (bool) - - True: disconnect the ssh connection after changing the - password - - False: keep the ssh connection - - Returns (bool): - True if successful, False otherwise - """ - - if check_if_existing: - found, user_info = self.find_ldap_user(user_name) - if not found: - return False - - if not change_own_password: - return False - - logged_in, password, ssh_con = \ - self.login_as_ldap_user(user_name, - password=password, - host=host, - disconnect_after=False) - - if not logged_in or not password or not ssh_con: - return False, ssh_con - - cmds_expected = ( - ( - 'passwd', - (r'\(current\) LDAP Password: ',), - (), - ), - ( - password, - ('New password: ',), - ('passwd: Authentication token manipulation error', EOF,), - ), - ( - new_password, - ('Retype new password: ',), - ( - 'BAD PASSWORD: The password is too similar to the old one', - 'BAD PASSWORD: No password supplied', - 'passwd: Have exhausted maximum number of retries for ' - 'service', - EOF, - ), - ), - ( - new_password, - ('passwd: all authentication tokens updated successfully.',), - (), - ), - ) - - changed = True - ssh_con.flush() - for cmd, expected, errors in cmds_expected: - ssh_con.send(cmd) - index = ssh_con.expect(blob_list=list(expected + errors)) - if len(expected) <= index: - changed = False - break - - if disconnect_after: - ssh_con.send('exit') - - return changed, ssh_con - - -def get_admin_password_in_keyring(con_ssh=None): - """ - Get admin password via 'keyring get CGCS admin' - Args: - con_ssh (SSHClient): active controller client - - Returns (str): admin password returned - - """ - if con_ssh is None: - con_ssh = ControllerClient.get_active_controller() - - admin_pswd = con_ssh.exec_cmd('keyring get CGCS admin', fail_ok=False)[1] - return admin_pswd - - -def change_linux_user_password(password, new_password, user=None, - host=None): - if not user: - user = HostLinuxUser.get_user() - - LOG.info( - 'Attempt to change password, from password:{}, to new-password:{}, ' - 'on host:{}'.format( - password, new_password, host)) - - input_outputs = ( - ( - 'passwd', - (r'\(current\) UNIX password: ',), - (), - ), - ( - password, - ('New password: ',), - (': Authentication token manipulation error', EOF,), - ), - ( - new_password, - ('Retype new password:',), - ( - 'BAD PASSWORD: The password is too similar to the old one', - 'BAD PASSWORD: No password supplied', - 'passwd: Have exhausted maximum number of retries for service', - EOF, - ), - ), - ( - new_password, - (': all authentication tokens updated successfully.', - Prompt.CONTROLLER_PROMPT,), - (), - ), - ) - conn_to_ac = ControllerClient.get_active_controller() - initial_prompt = r'.*{}\:~\$ '.format(host) - LOG.info('Will login as user:"{}", password:"{}", to host:"{}"'.format( - user, password, host)) - - conn = SSHFromSSH(conn_to_ac, host, user, password, force_password=True, - initial_prompt=initial_prompt) - passed = True - try: - conn.connect(retry=False, use_password=True) - for cmd, expected, errors in input_outputs: - # conn.flush() - LOG.info("Send '{}'\n".format(cmd)) - conn.send(cmd) - blob_list = list(expected) + list(errors) - LOG.info("Expect: {}\n".format(blob_list)) - index = conn.expect(blob_list=blob_list) - LOG.info('returned index:{}\n'.format(index)) - if len(expected) <= index: - passed = False - break - - except Exception as e: - LOG.warn( - 'Caught exception when connecting to host:{} as user:{} with ' - 'pasword:{}\n{}\n'.format( - host, user, password, e)) - - raise - - finally: - if user != HostLinuxUser.get_user(): - conn.close() - - # flush the output to the cli so the next cli is correctly registered - conn.flush() - LOG.info( - 'Successfully changed password from:\n{}\nto:{} for user:{} on ' - 'host:{}'.format(password, new_password, user, host)) - - return passed, new_password - - -def gen_linux_password(exclude_list=None, length=32): - if exclude_list is None: - exclude_list = [] - - if not isinstance(exclude_list, list): - exclude_list = [exclude_list] - - if length < MIN_LINUX_PASSWORD_LEN: - LOG.warn( - 'Length requested is too small, must longer than {}, requesting ' - '{}'.format(MIN_LINUX_PASSWORD_LEN, length)) - return None - - total = length - left = 3 - - vocabulary = [ascii_lowercase, ascii_uppercase, digits, SPECIAL_CHARACTERS] - - password = '' - while not password: - raw_password = [] - for chars in vocabulary: - count = random.randint(1, total - left) - raw_password += random.sample(chars, min(count, len(chars))) - left -= 1 - total -= count - - password = ''.join( - random.sample(raw_password, min(length, len(raw_password)))) - - missing_length = length - len(password) - if missing_length > 0: - all_chars = ''.join(vocabulary) - password += ''.join( - random.choice(all_chars) for _ in range(missing_length)) - - if password in exclude_list: - password = '' - - LOG.debug('generated valid password:{}'.format(password)) - - return password - - -def gen_invalid_password(invalid_type='shorter', previous_passwords=None, - minimum_length=7): - if previous_passwords is None: - previous_passwords = [] - - valid_password = list(gen_linux_password(exclude_list=previous_passwords, - length=minimum_length * 4)) - - current_length = len(valid_password) - - if invalid_type == 'shorter': - invalid_len = random.randint(1, minimum_length - 1) - invalid_password = random.sample(valid_password, invalid_len) - - elif invalid_type == '1_lowercase': - invalid_password = ''.join( - c for c in valid_password if c not in ascii_lowercase) - missing_length = current_length - len(invalid_password) - invalid_password += ''.join( - random.choice(ascii_uppercase) for _ in range(missing_length)) - - elif invalid_type == '1_uppercase': - invalid_password = ''.join( - c for c in valid_password if c not in ascii_uppercase) - missing_length = current_length - len(invalid_password) - invalid_password += ''.join( - random.choice(ascii_lowercase) for _ in range(missing_length)) - - elif invalid_type == '1_digit': - invalid_password = ''.join(c for c in valid_password if c not in digits) - missing_length = current_length - len(invalid_password) - invalid_password += ''.join( - random.choice(ascii_lowercase) for _ in range(missing_length)) - - elif invalid_type == '1_special': - invalid_password = ''.join( - c for c in valid_password if c not in SPECIAL_CHARACTERS) - missing_length = current_length - len(invalid_password) - invalid_password += ''.join( - random.choice(ascii_lowercase) for _ in range(missing_length)) - - elif invalid_type == 'not_in_dictionary': - invalid_password = random.choice( - re.split(r'\W', SIMPLE_WORD_DICTIONARY)) - - elif invalid_type == 'diff_more_than_3': - if not previous_passwords or len(previous_passwords) < 1: - return None - - last_password = previous_passwords[-1] - len_last_password = len(last_password) - count_difference = random.randint(0, 2) - for index in random.sample(range(len_last_password), count_difference): - cur_char = last_password[index] - last_password[index] = random.choice( - c for c in last_password if c != cur_char) - invalid_password = ''.join(last_password) - - elif invalid_type == 'not_simple_reverse': - if not previous_passwords or len(previous_passwords) < 1: - return None - invalid_password = ''.join(reversed(previous_passwords[-1])) - - elif invalid_type == 'not_only_case_diff': - if not previous_passwords or len(previous_passwords) < 1: - return None - invalid_password = [] - for ch in valid_password: - if ch.islower(): - invalid_password.append(ch.upper()) - elif ch.isupper(): - invalid_password.append(ch.lower()) - else: - invalid_password.append(ch) - - invalid_password = ''.join(invalid_password) - - elif invalid_type == 'not_last_2': - if not previous_passwords or len(previous_passwords) < 1: - return None - invalid_password = random.choice(previous_passwords[-2:]) - - elif invalid_type == '5_failed_attempts': - invalid_password = '' - - else: - assert False, 'Unknown password rule:{}'.format(invalid_type) - - return ''.join(invalid_password) - - -def modify_https(enable_https=True, check_first=True, con_ssh=None, - auth_info=Tenant.get('admin_platform'), - fail_ok=False): - """ - Modify platform https via 'system modify https_enable=' - - Args: - enable_https (bool): True/False to enable https or not - check_first (bool): if user want to check if the lab is already in - the state that user try to enable - con_ssh (SSHClient): - auth_info (dict): - fail_ok (bool): - - Returns (tuple): - (-1, msg) - (0, msg) - (1, ) - - """ - if check_first: - is_https = keystone_helper.is_https_enabled(source_openrc=False, - auth_info=auth_info, - con_ssh=con_ssh) - if (is_https and enable_https) or (not is_https and not enable_https): - msg = "Https is already {}. Do nothing.".format( - 'enabled' if enable_https else 'disabled') - LOG.info(msg) - return -1, msg - - LOG.info("Modify system to {} https".format( - 'enable' if enable_https else 'disable')) - res, output = system_helper.modify_system(fail_ok=fail_ok, con_ssh=con_ssh, - auth_info=auth_info, - https_enabled='{}'.format( - str(enable_https).lower())) - if res == 1: - return 1, output - - LOG.info("Wait up to 60s for config out-of-date alarm with best effort.") - system_helper.wait_for_alarm(alarm_id=EventLogID.CONFIG_OUT_OF_DATE, - entity_id='controller-', strict=False, - con_ssh=con_ssh, timeout=60, fail_ok=True, - auth_info=auth_info) - - LOG.info("Wait up to 600s for config out-of-date alarm to clear.") - system_helper.wait_for_alarm_gone(EventLogID.CONFIG_OUT_OF_DATE, - con_ssh=con_ssh, timeout=600, - check_interval=20, fail_ok=False, - auth_info=auth_info) - - LOG.info("Wait up to 300s for public endpoints to be updated") - expt_status = 'enabled' if enable_https else 'disabled' - end_time = time.time() + 300 - while time.time() < end_time: - if keystone_helper.is_https_enabled(con_ssh=con_ssh, - source_openrc=False, - auth_info=auth_info) == \ - enable_https: - break - time.sleep(10) - else: - raise exceptions.KeystoneError( - "Https is not {} in 'openstack endpoint list'".format(expt_status)) - - msg = 'Https is {} successfully'.format(expt_status) - LOG.info(msg) - # TODO: install certificate for https. There will be a warning msg if - # self-signed certificate is used - - if not ProjVar.get_var('IS_DC') or \ - (auth_info and auth_info.get('region', None) in ( - 'RegionOne', 'SystemController')): - # If DC, use the central region https as system https, since that is - # the one used for external access - CliAuth.set_vars(HTTPS=enable_https) - - return 0, msg - - -def set_ldap_user_password(user_name, new_password, check_if_existing=True, - fail_ok=False): - """ - Set ldap user password use ldapsetpasswd - - Args: - user_name (str): - - name of the LDAP User - - new_password (str): - - new password to change to - - check_if_existing (bool): - - True: check if the user already existing first - False: change the password without checking the existence of - the user - - fail_ok (bool) - - Returns (bool): - True if successful, False otherwise - """ - - if check_if_existing: - found, user_info = LdapUserManager().find_ldap_user(user_name=user_name) - if not found: - return False - - ssh_client = ControllerClient.get_active_controller() - rc, output = ssh_client.exec_sudo_cmd( - 'ldapsetpasswd {} {}'.format(user_name, new_password), fail_ok=fail_ok) - if rc > 1: - return 1, output - - return rc, output - - -def fetch_cert_file(cert_file=None, scp_to_local=True, con_ssh=None): - """ - fetch cert file from build server. scp to TiS. - Args: - cert_file (str): valid values: ca-cert, server-with-key - scp_to_local (bool): Whether to scp cert file to localhost as well. - con_ssh (SSHClient): active controller ssh client - - Returns (str|None): - cert file path on localhost if scp_to_local=True, else cert file path - on TiS system. If no certificate found, return None. - - """ - if not cert_file: - cert_file = '{}/ca-cert.pem'.format(HostLinuxUser.get_home()) - - if not con_ssh: - con_ssh = ControllerClient.get_active_controller() - - if not con_ssh.file_exists(cert_file): - raise FileNotFoundError( - '{} not found on active controller'.format(cert_file)) - - if scp_to_local: - cert_name = os.path.basename(cert_file) - dest_path = os.path.join(ProjVar.get_var('TEMP_DIR'), cert_name) - common.scp_from_active_controller_to_localhost(source_path=cert_file, - dest_path=dest_path, - timeout=120) - cert_file = dest_path - LOG.info("Cert file copied to {} on localhost".format(dest_path)) - - return cert_file - - -def get_auth_token(region=None, auth_info=Tenant.get('admin_platform'), use_dnsname=True): - """ - Get an authentication token from keystone - Args: - region(str): the cloud region for get the keystone token - auth_info: - use_dnsname(bool): True if use dns name instead of IP to perform the rest request - - Returns(str|None): Authentication token - - """ - keystone_endpoint = keystone_helper.get_endpoints(field='URL', service_name='keystone', - interface="public", region=region, - auth_info=auth_info)[0] - keystone_url = '{}/{}'.format(keystone_endpoint, 'auth/tokens') - if use_dnsname: - lab_ip = common.get_lab_fip(region=region) - lab_dns_name = common.get_dnsname(region=region) - keystone_url = keystone_url.replace(lab_ip, lab_dns_name) - LOG.info('Get authentication token from keystone url {}'.format(keystone_url)) - headers = {'Content-type': 'application/json'} - body = { - 'auth': { - 'identity': { - 'methods': ['password'], - 'password': { - 'user': { - 'domain': { - 'name': 'Default' - }, - 'name': 'admin', - 'password': 'Li69nux*' - } - } - }, - 'scope': { - 'project': { - 'name': 'admin', - 'domain': { - 'name': 'Default' - } - } - } - } - } - try: - req = requests.post(url=keystone_url, headers=headers, data=json.dumps(body), verify=False) - except Exception as e: - LOG.error('Error trying to get a token') - LOG.debug(e) - return None - LOG.debug('\n{} {}\nHeaders: {}\nBody: {}\nResponse code: {}\nResponse body: {}'.format( - req.request.method, req.request.url, req.request.headers, - req.request.body, req.status_code, req.text)) - LOG.info('Status: [{}]'.format(req.status_code)) - req.raise_for_status() - return req.headers.get('X-Subject-Token') - - -def check_url_access(url, headers=None, verify=True, fail_ok=False): - """ - Check the access to a given url - Args: - url(str): url to check - headers(None|dict): request headers of the http request - verify(bool|str): - True: secure request - False: equivalent to --insecure in curl cmd - str: applies to https system. CA-Certificate path. e.g., verify=/path/to/cert - fail_ok(bool): - Returns(tuple): (status_code, response) - - (1, ): An exception has occurred - - (status_code, response): status code and response from requests call - - """ - LOG.info('curl -i {}...'.format(url)) - try: - req = requests.get(url=url, headers=headers, verify=verify) - except requests.exceptions.RequestException as e: - if fail_ok: - message = 'Exception trying to access {}: {}'.format(url, e) - LOG.warn(message) - return 1, message - raise e - - LOG.info('Status: [{}]'.format(req.status_code)) - LOG.debug('\n{} {}\nHeaders: {}\nResponse code: {}\nResponse body: {}'.format( - req.request.method, req.request.url, req.request.headers, req.status_code, req.text)) - if not fail_ok: - req.raise_for_status() - return req.status_code, req.text - - -def check_services_access(service_name=None, region=None, auth=True, verify=True, - use_dnsname=True, auth_info=Tenant.get('admin_platform')): - """ - Check public endpoints of services are reachable via get request - Args: - service_name(str|list|None): filter only certainly services to check - region(str|None): filter only the endpoints from a certain region - auth(bool): perform the requests with an authentication from keystone - verify(bool|str): - True: if https is enabled, verify the cert with the default CA - False: equivalent to --insecure in curl cmd - str: applies to https system. CA-Certificate path. e.g., verify=/path/to/cert - use_dnsname(bool): True if use dns name instead of IP to perform the rest request - auth_info(dict): - - Returns(None): - - """ - if not use_dnsname: - verify = False - LOG.info('Check services access via curl') - token = None - if auth: - token = get_auth_token(region=region, auth_info=auth_info, use_dnsname=use_dnsname) - headers = {'X-Auth-Token': token} if token else None - - if service_name: - urls_to_check = [] - if isinstance(service_name, str): - service_name = [service_name] - for service in service_name: - url = keystone_helper.get_endpoints(field='URL', interface='public', region=region, - enabled='True', service_name=service, - auth_info=auth_info) - if url: - urls_to_check.append(url) - else: - LOG.warn('{} service\'s public endpoint not found or not enabled') - else: - urls_to_check = keystone_helper.get_endpoints(field='URL', interface='public', - region=region, enabled='True', - auth_info=auth_info) - if use_dnsname: - lab_ip = common.get_lab_fip(region=region) - lab_dns_name = common.get_dnsname(region=region) - urls_to_check = [url.replace(lab_ip, lab_dns_name) for url in urls_to_check] - - for url in urls_to_check: - # FIXME skip unreachable port 7777 (sm-api) until CGTS-19988 is resolved - # FIXME skip unreachable port 8219 (dcdbsync) until 1892391 is resolved - if url.endswith('7777') or url.endswith('8219/v1.0'): - continue - check_url_access(url=url, headers=headers, verify=verify) - - -def check_platform_horizon_access(verify=True, use_dnsname=True): - """ - Check horizon URL is reachable via get request - Args: - verify(bool|str): - True: if https is enabled, verify the cert with the default CA - False: equivalent to --insecure in curl cmd - str: applies to https system. CA-Certificate path. e.g., verify=/path/to/cert - use_dnsname(bool): True if use dns name instead of IP to perform the rest request - Returns(None): - - """ - from keywords import horizon_helper - if not use_dnsname: - verify = False - LOG.info('Check platform horizon access via curl') - horizon_url = horizon_helper.get_url(dnsname=use_dnsname) - check_url_access(url=horizon_url, verify=verify) - diff --git a/automated-pytest-suite/keywords/storage_helper.py b/automated-pytest-suite/keywords/storage_helper.py deleted file mode 100644 index 9178dd8c..00000000 --- a/automated-pytest-suite/keywords/storage_helper.py +++ /dev/null @@ -1,1677 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -""" -This module provides helper functions for storage based testing - -Including: -- system commands for system/host storage configs -- CEPH related helper functions that are not using system commands - -""" - -import re -import time - -from consts.auth import Tenant -from consts.stx import EventLogID, BackendState, BackendTask, GuestImages, \ - PartitionStatus -from consts.timeout import HostTimeout, SysInvTimeout - -from keywords import system_helper, host_helper, keystone_helper, common - -from utils import table_parser, cli, exceptions -from utils.clients.ssh import ControllerClient, get_cli_client -from utils.tis_log import LOG - - -def is_ceph_healthy(con_ssh=None): - """ - Query 'ceph -s' and return True if ceph health is okay - and False otherwise. - - Args: - con_ssh (SSHClient): - - Returns: - - (bool) True if health okay, False otherwise - - (string) message - """ - - health_ok = 'HEALTH_OK' - if con_ssh is None: - con_ssh = ControllerClient.get_active_controller() - - rtn_code, out = con_ssh.exec_cmd('ceph -s') - if rtn_code > 0: - LOG.warning('ceph -s failed to execute.') - return (False, out) - - health_state = re.findall('health: (.*)\n', out) - if not health_state: - LOG.warning('Unable to determine ceph health state') - return (False, out) - - health_state = health_state[0] - if health_ok in health_state: - LOG.info('CEPH cluster is healthy') - return (True, out) - - msg = 'CEPH unhealthy. State: {}'.format(health_state) - LOG.warning(msg) - return (False, out) - - -def get_ceph_osd_count(fail_ok=False, con_ssh=None): - """ - Return the number of OSDs on a CEPH system" - Args: - fail_ok - con_ssh(SSHClient): - - Returns (int): Return the number of OSDs on the system, - """ - if not con_ssh: - con_ssh = ControllerClient.get_active_controller() - - rtn_code, out = con_ssh.exec_cmd('ceph -s', fail_ok=fail_ok) - if rtn_code > 0: - return 0 - - osds = re.search(r'(\d+) osds', out) - if osds: - LOG.info('There are {} OSDs on the system'.format(osds.group(1))) - return int(osds.group(1)) - - msg = 'There are no OSDs on the system' - LOG.info(msg) - if fail_ok: - return 0 - else: - raise exceptions.StorageError(msg) - - -def get_osd_host(osd_id, fail_ok=False, con_ssh=None): - """ - Return the host associated with the provided OSD ID - Args: - con_ssh(SSHClient): - fail_ok - osd_id (int): an OSD number, e.g. 0, 1, 2, 3... - - Returns (str|None): hostname is found else None - """ - storage_hosts = system_helper.get_storage_nodes(con_ssh=con_ssh) - for host in storage_hosts: - osd_list = get_host_stors(host, 'osdid') - if int(osd_id) in osd_list: - msg = 'OSD ID {} is on host {}'.format(osd_id, host) - LOG.info(msg) - return host - - msg = 'Could not find host for OSD ID {}'.format(osd_id) - LOG.warning(msg) - if not fail_ok: - raise exceptions.StorageError(msg) - - -def kill_process(host, pid): - """ - Given the id of an OSD, kill the process and ensure it restarts. - Args: - host (string) - the host to ssh into, e.g. 'controller-1' - pid (string) - pid to kill, e.g. '12345' - - Returns: - - (bool) True if process was killed, False otherwise - - (string) message - """ - - cmd = 'kill -9 {}'.format(pid) - - # SSH could be redundant if we are on controller-0 (oh well!) - LOG.info('Kill process {} on {}'.format(pid, host)) - with host_helper.ssh_to_host(host) as host_ssh: - with host_ssh.login_as_root() as root_ssh: - root_ssh.exec_cmd(cmd, expect_timeout=60) - LOG.info(cmd) - - LOG.info('Ensure the PID is no longer listed') - pid_exists, msg = check_pid_exists(pid, root_ssh) - if pid_exists: - return False, msg - - return True, msg - - -def get_osd_pid(osd_host, osd_id, con_ssh=None, fail_ok=False): - """ - Given the id of an OSD, return the pid. - Args: - osd_host (string) - the host to ssh into, e.g. 'storage-0' - osd_id (int|str) - osd_id to get the pid of, e.g. '0' - con_ssh - fail_ok - - Returns (int|None): - - """ - pid_file = '/var/run/ceph/osd.{}.pid'.format(osd_id) - return __get_pid_from_file(osd_host, pid_file=pid_file, con_ssh=con_ssh, - fail_ok=fail_ok) - - -def get_mon_pid(mon_host, con_ssh=None, fail_ok=False): - """ - Given the host name of a monitor, return the pid of the ceph-mon process - Args: - mon_host (string) - the host to get the pid of, e.g. 'storage-1' - con_ssh (SSHClient) - fail_ok - - Returns (int|None) - - """ - pid_file = '/var/run/ceph/mon.{}.pid'.format( - 'controller' if system_helper.is_aio_duplex() else mon_host) - return __get_pid_from_file(mon_host, pid_file=pid_file, con_ssh=con_ssh, - fail_ok=fail_ok) - - -def __get_pid_from_file(host, pid_file, con_ssh=None, fail_ok=False): - with host_helper.ssh_to_host(host, con_ssh=con_ssh) as host_ssh: - rtn_code, out = host_ssh.exec_cmd('cat {}'.format(pid_file), - expect_timeout=10, fail_ok=fail_ok) - mon_match = r'(\d+)' - pid = re.match(mon_match, out) - if pid: - msg = '{} for {} is {}'.format(pid_file, host, pid.group(1)) - LOG.info(msg) - return pid.group(1) - - msg = '{} for {} was not found'.format(pid_file, host) - LOG.warning(msg) - if not fail_ok: - raise exceptions.StorageError(msg) - - -def get_osds(host=None, con_ssh=None): - """ - Given a hostname, get all OSDs on that host - - Args: - con_ssh(SSHClient) - host(str|None): the host to ssh into - Returns: - (list) List of OSDs on the host. Empty list if none. - """ - - osd_list = [] - - if host: - osd_list += get_host_stors(host, 'osdid', con_ssh) - else: - storage_hosts = system_helper.get_storage_nodes() - for host in storage_hosts: - osd_list += get_host_stors(host, 'osdid', con_ssh) - - return osd_list - - -def is_osd_up(osd_id, con_ssh=None): - """ - Determine if a particular OSD is up. - - Args: - osd_id (int) - ID of OSD we want to query - con_ssh - - Returns: - (bool) True if OSD is up, False if OSD is down - """ - - cmd = r"ceph osd tree | grep 'osd.{}\s'".format(osd_id) - rtn_code, out = con_ssh.exec_cmd(cmd, expect_timeout=60) - if re.search('up', out): - return True - else: - return False - - -def check_pid_exists(pid, host_ssh): - """ - Check if a PID exists on a particular host. - Args: - host_ssh (SSHClient) - pid (int|str): the process ID - Returns (bool): - True if pid exists and False otherwise - """ - - cmd = 'kill -0 {}'.format(pid) - - rtn_code, out = host_ssh.exec_cmd(cmd, expect_timeout=60) - if rtn_code != 1: - msg = 'Process {} exists'.format(pid) - return True, msg - - msg = 'Process {} does not exist'.format(pid) - return False, msg - - -def get_storage_group(host): - """ - Determine the storage replication group name associated with the storage - host. - - Args: - host (string) - storage host, e.g. 'storage-0' - Returns: - storage_group (string) - group name, e.g. 'group-0' - msg (string) - log message - """ - peers = system_helper.get_host_values(host, fields='peers')[0] - - storage_group = re.search(r'(group-\d+)', peers) - msg = 'Unable to determine replication group for {}'.format(host) - assert storage_group, msg - storage_group = storage_group.group(0) - msg = 'The replication group for {} is {}'.format(host, storage_group) - return storage_group, msg - - -def download_images(dload_type='all', img_dest='~/images/', con_ssh=None): - """ - Retrieve images for testing purposes. Note, this will add *a lot* of time - to the test execution. - - Args: - - type: 'all' to get all images (default), - 'ubuntu' to get ubuntu images, - 'centos' to get centos images - - con_ssh - - image destination - where on fileystem images are stored - - Returns: - - List containing the names of the imported images - """ - - def _wget(urls): - """ - This function does a wget on the provided urls. - """ - for url in urls: - cmd_ = 'wget {} --no-check-certificate -P {}'.format(url, img_dest) - rtn_code_, out_ = con_ssh.exec_cmd(cmd_, expect_timeout=7200) - assert not rtn_code, out_ - - centos_image_location = \ - [ - 'http://cloud.centos.org/centos/7/images/CentOS-7-x86_64' - '-GenericCloud.qcow2', - 'http://cloud.centos.org/centos/6/images/CentOS-6-x86_64' - '-GenericCloud.qcow2'] - - ubuntu_image_location = \ - [ - 'https://cloud-images.ubuntu.com/precise/current/precise-server' - '-cloudimg-amd64-disk1.img'] - - if not con_ssh: - con_ssh = ControllerClient.get_active_controller() - - LOG.info('Create directory for image storage') - cmd = 'mkdir -p {}'.format(img_dest) - rtn_code, out = con_ssh.exec_cmd(cmd) - assert not rtn_code, out - - LOG.info('wget images') - if dload_type == 'ubuntu' or dload_type == 'all': - LOG.info("Downloading ubuntu image") - _wget(ubuntu_image_location) - elif dload_type == 'centos' or dload_type == 'all': - LOG.info("Downloading centos image") - _wget(centos_image_location) - - -def find_images(con_ssh=None, image_type='qcow2', image_name=None, - location=None): - """ - This function finds all images of a given type, in the given location. - This is designed to save test time, to prevent downloading images if not - necessary. - - Arguments: - - image_type(string): image format, e.g. 'qcow2', 'raw', etc. - - if the user specifies 'all', return all images - - location(string): where to find images, e.g. '~/images' - - Test Steps: - 1. Cycle through the files in a given location - 2. Create a list of image names of the expected type - - Return: - - image_names(list): list of image names of a given type, e.g. - 'cgcs-guest.img' or all images if the user specified 'all' as the - argument to image_type. - """ - - image_names = [] - if not location: - location = GuestImages.DEFAULT['image_dir'] - if not con_ssh: - con_ssh = get_cli_client() - - cmd = 'ls {}'.format(location) - rtn_code, out = con_ssh.exec_cmd(cmd) - image_list = out.split() - LOG.info('Found the following files: {}'.format(image_list)) - if image_type == 'all' and not image_name: - return image_list, location - - # Return a list of image names where the image type matches what the user - # is looking for, e.g. qcow2 - for image in image_list: - if image_name and image_name not in image: - continue - image_path = location + "/" + image - cmd = 'qemu-img info {}'.format(image_path) - rtn_code, out = con_ssh.exec_cmd(cmd) - if image_type in out: - image_names.append(image) - - LOG.info('{} images available: {}'.format(image_type, image_names)) - return image_names, location - - -def find_image_size(con_ssh, image_name='cgcs-guest.img', location='~/images'): - """ - This function uses qemu-img info to determine what size of flavor to use. - Args: - con_ssh: - image_name (str): e.g. 'cgcs-guest.img' - location (str): where to find images, e.g. '~/images' - - Returns: - image_size(int): e.g. 8 - """ - - image_path = location + "/" + image_name - cmd = 'qemu-img info {}'.format(image_path) - rtn_code, out = con_ssh.exec_cmd(cmd) - virtual_size = re.search(r'virtual size: (\d+\.*\d*[M|G])', out) - msg = 'Unable to determine size of image {}'.format(image_name) - assert virtual_size.group(0), msg - # If the size is less than 1G, round to 1 - # If the size is greater than 1G, round up - if 'M' in virtual_size.group(1): - image_size = 1 - else: - image_size = round(float(virtual_size.group(1).strip('G'))) - - return image_size - - -def wait_for_ceph_health_ok(con_ssh=None, timeout=300, fail_ok=False, - check_interval=5): - end_time = time.time() + timeout - output = None - while time.time() < end_time: - rc, output = is_ceph_healthy(con_ssh=con_ssh) - if rc: - return True - - time.sleep(check_interval) - else: - err_msg = "Ceph is not healthy within {} seconds: {}".format(timeout, - output) - if fail_ok: - LOG.warning(err_msg) - return False, err_msg - else: - raise exceptions.TimeoutException(err_msg) - - -def get_storage_backends(field='backend', con_ssh=None, - auth_info=Tenant.get('admin_platform'), **filters): - """ - Get storage backends values from system storage-backend-list - Args: - field (str|list|tuple): - con_ssh: - auth_info: - **filters: - - Returns (list): - - """ - table_ = table_parser.table( - cli.system('storage-backend-list', ssh_client=con_ssh, - auth_info=auth_info)[1], - combine_multiline_entry=True) - return table_parser.get_multi_values(table_, field, **filters) - - -def get_storage_backend_values(backend, fields=None, rtn_dict=False, - con_ssh=None, - auth_info=Tenant.get('admin_platform'), - **kwargs): - """ - Get storage backend values for given backend via system storage-backend-show - - Args: - backend (str): storage backend to get info (e.g. ceph) - fields (list|tuple|str|None): keys to return, e.g., ['name', - 'backend', 'task'] - rtn_dict (bool) - con_ssh: - auth_info - - Returns (list|dict): - Examples: - Input: ('cinder_pool_gib', 'glance_pool_gib', - 'ephemeral_pool_gib', 'object_pool_gib', - 'ceph_total_space_gib', 'object_gateway') - Output: - if rtn_dict: {'cinder_pool_gib': 202, 'glance_pool_gib': 20, - 'ephemeral_pool_gib': 0, - 'object_pool_gib': 0, 'ceph_total_space_gib': - 222, 'object_gateway': False} - if list: [202, 20, 0, 0, 222, False] - """ - # valid_backends = ['ceph-store', 'lvm-store', 'file-store', - # 'shared_services] - backend = backend.lower() - if re.fullmatch('ceph|lvm|file', backend): - backend += '-store' - elif backend == 'external': - backend = 'shared_services' - - table_ = table_parser.table( - cli.system('storage-backend-show', backend, ssh_client=con_ssh, - auth_info=auth_info)[1], - combine_multiline_entry=True) - if not fields: - fields = table_parser.get_column(table_, 'Property') - return table_parser.get_multi_values_two_col_table(table_, fields, - evaluate=True, - rtn_dict=rtn_dict, - **kwargs) - - -def wait_for_storage_backend_vals(backend, timeout=300, fail_ok=False, - con_ssh=None, - auth_info=Tenant.get('admin_platform'), - **expt_values): - if not expt_values: - raise ValueError( - "At least one key/value pair has to be provided via expt_values") - - LOG.info( - "Wait for storage backend {} to reach: {}".format(backend, expt_values)) - end_time = time.time() + timeout - dict_to_check = expt_values.copy() - stor_backend_info = None - while time.time() < end_time: - stor_backend_info = get_storage_backend_values( - backend=backend, fields=list(dict_to_check.keys()), - rtn_dict=True, con_ssh=con_ssh, auth_info=auth_info) - dict_to_iter = dict_to_check.copy() - for key, expt_val in dict_to_iter.items(): - actual_val = stor_backend_info[key] - if str(expt_val) == str(actual_val): - dict_to_check.pop(key) - - if not dict_to_check: - return True, dict_to_check - - if fail_ok: - return False, stor_backend_info - raise exceptions.StorageError( - "Storage backend show field(s) did not reach expected value(s). " - "Expected: {}; Actual: {}".format(dict_to_check, stor_backend_info)) - - -def add_storage_backend(backend='ceph', ceph_mon_gib='20', ceph_mon_dev=None, - ceph_mon_dev_controller_0_uuid=None, - ceph_mon_dev_controller_1_uuid=None, con_ssh=None, - fail_ok=False): - """ - - Args: - backend (str): The backend to add. Only ceph is supported - ceph_mon_gib(int/str): The ceph-mon-lv size in GiB. The default is 20GiB - ceph_mon_dev (str): The disk device that the ceph-mon will be created - on. This applies to both controllers. In - case of separate device names on controllers use the options - below to specify device name for each controller - ceph_mon_dev_controller_0_uuid (str): The uuid of controller-0 disk - device that the ceph-mon will be created on - ceph_mon_dev_controller_1_uuid (str): The uuid of controller-1 disk - device that the ceph-mon will be created on - con_ssh: - fail_ok: - - Returns: - - """ - - if backend != 'ceph': - msg = "Invalid backend {} specified. Valid choices are {}".format( - backend, ['ceph']) - if fail_ok: - return 1, msg - else: - raise exceptions.CLIRejected(msg) - if isinstance(ceph_mon_gib, int): - ceph_mon_gib = str(ceph_mon_gib) - - cmd = 'system storage-backend-add --ceph-mon-gib {}'.format(ceph_mon_gib) - if ceph_mon_dev: - cmd += ' --ceph-mon-dev {}'.format( - ceph_mon_dev if '/dev' in ceph_mon_dev else '/dev/' + - ceph_mon_dev.strip()) - if ceph_mon_dev_controller_0_uuid: - cmd += ' --ceph_mon_dev_controller_0_uuid {}'.format( - ceph_mon_dev_controller_0_uuid) - if ceph_mon_dev_controller_1_uuid: - cmd += ' --ceph_mon_dev_controller_1_uuid {}'.format( - ceph_mon_dev_controller_1_uuid) - - cmd += " {}".format(backend) - controler_ssh = con_ssh if con_ssh else \ - ControllerClient.get_active_controller() - controler_ssh.send(cmd) - index = controler_ssh.expect([controler_ssh.prompt, r'\[yes/N\]']) - if index == 1: - controler_ssh.send('yes') - controler_ssh.expect() - - rc, output = controler_ssh.process_cmd_result(cmd) - if rc != 0: - if fail_ok: - return rc, output - raise exceptions.CLIRejected("Fail Cli command cmd: {}".format(cmd)) - else: - output = table_parser.table(output) - return rc, output - - -def modify_storage_backend(backend, cinder=None, glance=None, ephemeral=None, - object_gib=None, object_gateway=None, - services=None, lock_unlock=False, fail_ok=False, - con_ssh=None): - """ - Modify ceph storage backend pool allocation - - Args: - backend (str): storage backend to modify (e.g. ceph) - cinder: - glance: - ephemeral: - object_gib: - object_gateway (bool|None) - services (str|list|tuple): - lock_unlock (bool): whether to wait for config out-of-date alarms - against controllers and lock/unlock them - fail_ok: - con_ssh: - - Returns: - 0, dict of new allocation - 1, cli err message - - """ - if re.fullmatch('ceph|lvm|file', backend): - backend += '-store' - backend = backend.lower() - - args = '' - if services: - if isinstance(services, (list, tuple)): - services = ','.join(services) - args = '-s {} '.format(services) - args += backend - - get_storage_backend_values(backend, fields='backend') - - if cinder: - args += ' cinder_pool_gib={}'.format(cinder) - - if 'ceph' in backend: - if glance: - args += ' glance_pool_gib={}'.format(glance) - if ephemeral: - args += ' ephemeral_pool_gib={}'.format(ephemeral) - if object_gateway is not None: - args += ' object_gateway={}'.format(object_gateway) - if object_gib: - args += ' object_pool_gib={}'.format(object_gib) - - code, out = cli.system('storage-backend-modify', args, con_ssh, - fail_ok=fail_ok) - if code > 0: - return 1, out - - if lock_unlock: - from testfixtures.recover_hosts import HostsToRecover - LOG.info( - "Lock unlock controllers and ensure config out-of-date alarms " - "clear") - system_helper.wait_for_alarm(alarm_id=EventLogID.CONFIG_OUT_OF_DATE, - timeout=30, fail_ok=False, - entity_id='controller-') - - active_controller, standby_controller = \ - system_helper.get_active_standby_controllers(con_ssh=con_ssh) - for controller in [standby_controller, active_controller]: - if not controller: - continue - HostsToRecover.add(controller) - host_helper.lock_host(controller, swact=True, con_ssh=con_ssh) - wait_for_storage_backend_vals( - backend=backend, - **{'task': BackendTask.RECONFIG_CONTROLLER, - 'state': BackendState.CONFIGURING}) - - host_helper.unlock_host(controller, con_ssh=con_ssh) - - system_helper.wait_for_alarm_gone( - alarm_id=EventLogID.CONFIG_OUT_OF_DATE, fail_ok=False) - - # TODO return new values of storage allocation and check they are the - # right values - updated_backend_info = get_storage_backend_values(backend, rtn_dict=True) - return 0, updated_backend_info - - -def add_ceph_mon(host, con_ssh=None, fail_ok=False): - """ - - Args: - host: - con_ssh: - fail_ok: - - Returns: - - """ - - valid_ceph_mon_hosts = ['controller-0', 'controller-1', 'storage-0', - 'compute-0'] - if host not in valid_ceph_mon_hosts: - msg = "Invalid host {} specified. Valid choices are {}".format( - host, valid_ceph_mon_hosts) - if fail_ok: - return 1, msg - else: - raise exceptions.CLIRejected(msg) - - if not con_ssh: - con_ssh = ControllerClient.get_active_controller() - - existing_ceph_mons = get_ceph_mon_values(con_ssh=con_ssh) - if host in existing_ceph_mons: - state = get_ceph_mon_state(host, con_ssh=con_ssh) - LOG.warning( - "Host {} is already added as ceph-mon and is in state: {}".format( - host, state)) - if state == 'configuring': - wait_for_ceph_mon_configured(host, con_ssh=con_ssh, fail_ok=True) - state = get_ceph_mon_state(host, con_ssh=con_ssh) - if state == 'configured' or state == 'configuring': - return 0, None - else: - msg = "The existing ceph-mon is in state {}".format(state) - if fail_ok: - return 1, msg - else: - raise exceptions.HostError(msg) - - if not host_helper.is_host_locked(host, con_ssh=con_ssh): - rc, output = host_helper.lock_host(host, con_ssh=con_ssh) - if rc != 0: - msg = "Cannot add ceph-mon to host {} because the host fail to " \ - "lock: {}".format(host, output) - if fail_ok: - return rc, msg - else: - raise exceptions.HostError(msg) - - cmd = 'ceph-mon-add' - - rc, output = cli.system(cmd, host, ssh_client=con_ssh, fail_ok=fail_ok) - if rc != 0: - msg = "CLI command {} failed to add ceph mon in host {}: {}".format( - cmd, host, output) - LOG.warning(msg) - if fail_ok: - return rc, msg - else: - raise exceptions.StorageError(msg) - rc, state, output = wait_for_ceph_mon_configured(host, con_ssh=con_ssh, - fail_ok=True) - if state == 'configured': - return 0, None - elif state == 'configuring': - return 1, "The ceph mon in host {} is in state {}".format(host, state) - else: - return 2, "The ceph mon in host {} failed: state = {}; msg = {}".format( - host, state, output) - - -def wait_for_ceph_mon_configured(host, state=None, - timeout=HostTimeout.CEPH_MON_ADD_CONFIG, - con_ssh=None, - fail_ok=False, check_interval=5): - end_time = time.time() + timeout - while time.time() < end_time: - state = get_ceph_mon_state(host, con_ssh=con_ssh) - if state == 'configured': - return True, state, None - - time.sleep(check_interval) - - msg = "The added ceph-mon on host {} did not reach configured state " \ - "within {} seconds. Last state = {}" \ - .format(host, timeout, state) - if fail_ok: - LOG.warning(msg) - return False, state, msg - else: - raise exceptions.StorageError(msg) - - -def get_ceph_mon_values(field='hostname', hostname=None, uuid=None, state=None, - task=None, con_ssh=None): - """ - - Args: - field: - hostname: - uuid: - state: - task: - con_ssh: - - Returns: - - """ - ceph_mons = [] - table_ = table_parser.table( - cli.system('ceph-mon-list', ssh_client=con_ssh)[1], - combine_multiline_entry=True) - - filters = {} - if table_: - if hostname: - filters['hostname'] = hostname - if uuid: - filters['uuid'] = uuid - if state: - filters['state'] = state - if task: - filters['task'] = task - - table_ = table_parser.filter_table(table_, **filters) - ceph_mons = table_parser.get_column(table_, field) - return ceph_mons - - -def get_ceph_mon_state(hostname, con_ssh=None): - return get_ceph_mon_values(field='state', hostname=hostname, - con_ssh=con_ssh)[0] - - -def get_fs_mount_path(ssh_client, fs): - mount_cmd = 'mount | grep --color=never {}'.format(fs) - exit_code, output = ssh_client.exec_sudo_cmd(mount_cmd, fail_ok=True) - - mounted_on = fs_type = None - msg = "Filesystem {} is not mounted".format(fs) - is_mounted = exit_code == 0 - if is_mounted: - # Get the first mount point - mounted_on, fs_type = \ - re.findall('{} on ([^ ]*) type ([^ ]*) '.format(fs), output)[0] - msg = "Filesystem {} is mounted on {}".format(fs, mounted_on) - - LOG.info(msg) - return mounted_on, fs_type - - -def is_fs_auto_mounted(ssh_client, fs): - auto_cmd = 'cat /etc/fstab | grep --color=never {}'.format(fs) - exit_code, output = ssh_client.exec_sudo_cmd(auto_cmd, fail_ok=True) - - is_auto_mounted = exit_code == 0 - LOG.info("Filesystem {} is {}auto mounted".format(fs, - '' if is_auto_mounted - else 'not ')) - return is_auto_mounted - - -def mount_partition(ssh_client, disk, partition=None, fs_type=None): - if not partition: - partition = '/dev/{}'.format(disk) - - disk_id = ssh_client.exec_sudo_cmd( - 'blkid | grep --color=never "{}:"'.format(partition))[1] - if disk_id: - mount_on, fs_type_ = get_fs_mount_path(ssh_client=ssh_client, - fs=partition) - if mount_on: - return mount_on, fs_type_ - - fs_type = re.findall('TYPE="([^ ]*)"', disk_id)[0] - if 'swap' == fs_type: - fs_type = 'swap' - turn_on_swap(ssh_client=ssh_client, disk=disk, partition=partition) - mount_on = 'none' - else: - mount_on = None - if not fs_type: - fs_type = 'ext4' - - LOG.info("mkfs for {}".format(partition)) - - cmd = "mkfs -t {} {}".format(fs_type, partition) - ssh_client.exec_sudo_cmd(cmd, fail_ok=False) - - if not mount_on: - mount_on = '/mnt/{}'.format(disk) - LOG.info("mount {} to {}".format(partition, mount_on)) - ssh_client.exec_sudo_cmd( - 'mkdir -p {}; mount {} {}'.format(mount_on, partition, mount_on), - fail_ok=False) - LOG.info("{} successfully mounted to {}".format(partition, mount_on)) - mount_on_, fs_type_ = get_fs_mount_path(ssh_client=ssh_client, - fs=partition) - assert mount_on == mount_on_ and fs_type == fs_type_ - - return mount_on, fs_type - - -def turn_on_swap(ssh_client, disk, partition=None): - if not partition: - partition = '/dev/{}'.format(disk) - swap_info = ssh_client.exec_sudo_cmd( - 'blkid | grep --color=never "{}:"'.format(partition), fail_ok=False)[1] - swap_uuid = re.findall('UUID="(.*)" TYPE="swap"', swap_info)[0] - LOG.info('swapon for {}'.format(partition)) - proc_swap = ssh_client.exec_sudo_cmd( - 'cat /proc/swaps | grep --color=never "{} "'.format(partition))[1] - if not proc_swap: - ssh_client.exec_sudo_cmd('swapon {}'.format(partition)) - proc_swap = ssh_client.exec_sudo_cmd( - 'cat /proc/swaps | grep --color=never "{} "'.format(partition))[1] - assert proc_swap, "swap partition is not shown in /proc/swaps after " \ - "swapon" - - return swap_uuid - - -def auto_mount_fs(ssh_client, fs, mount_on=None, fs_type=None, - check_first=True): - if check_first: - if is_fs_auto_mounted(ssh_client=ssh_client, fs=fs): - return - - if fs_type == 'swap' and not mount_on: - raise ValueError("swap uuid required via mount_on") - - if not mount_on: - mount_on = '/mnt/{}'.format(fs.rsplit('/', maxsplit=1)[-1]) - - if not fs_type: - fs_type = 'ext4' - cmd = 'echo "{} {} {} defaults 0 0" >> /etc/fstab'.format(fs, mount_on, - fs_type) - ssh_client.exec_sudo_cmd(cmd, fail_ok=False) - ssh_client.exec_sudo_cmd('cat /etc/fstab', get_exit_code=False) - - -def modify_swift(enable=True, check_first=True, fail_ok=False, apply=True, - con_ssh=None): - """ - Enable/disable swift service - Args: - enable: - check_first: - fail_ok: - apply: - con_ssh - - Returns (tuple): - (-1, "swift service parameter is already xxx") only apply when - check_first=True - (0, ) - (1, ) system service-parameter-modify cli got rejected. - - """ - if enable: - expt_val = 'true' - extra_str = 'enable' - else: - expt_val = 'false' - extra_str = 'disable' - - if check_first: - swift_endpoints = keystone_helper.get_endpoints(service_name='swift', - con_ssh=con_ssh, - cli_filter=False) - if enable is bool(swift_endpoints): - msg = "swift service parameter is already {}d. Do nothing.".format( - extra_str) - LOG.info(msg) - return -1, msg - - LOG.info("Modify system service parameter to {} Swift".format(extra_str)) - code, msg = system_helper.modify_service_parameter(service='swift', - section='config', - name='service_enabled', - value=expt_val, - apply=apply, - check_first=False, - fail_ok=fail_ok, - con_ssh=con_ssh) - - if apply and code == 0: - LOG.info("Check Swift endpoints after service {}d".format(extra_str)) - swift_endpoints = keystone_helper.get_endpoints(service_name='swift', - con_ssh=con_ssh, - cli_filter=False) - if enable is not bool(swift_endpoints): - raise exceptions.SwiftError( - "Swift endpoints did not {} after modify".format(extra_str)) - msg = 'Swift is {}d successfully'.format(extra_str) - - return code, msg - - -def get_qemu_image_info(image_filename, ssh_client, fail_ok=False): - """ - Provides information about the disk image filename, like file format, - virtual size and disk size - Args: - image_filename (str); the disk image file name - ssh_client: - fail_ok: - - Returns: - 0, dict { image: , format: , virtual size: - , disk size: 0: - return None - - table_ = table_parser.table(out) - values = [] - for field in fields: - convert_to_gib = False - if field == 'size_gib': - field = 'size_mib' - convert_to_gib = True - - param_value = table_parser.get_value_two_col_table(table_, field) - if '_mib' in field: - param_value = float(param_value) - if convert_to_gib: - param_value = float(param_value) / 1024 - - values.append(param_value) - - return values - - -def delete_host_partition(host, uuid, fail_ok=False, - timeout=SysInvTimeout.PARTITION_DELETE, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Delete a partition from a specific host. - - Arguments: - * host(str) - hostname, e.g. controller-0 - * uuid(str) - uuid of partition - * timeout(int) - how long to wait for partition deletion (sec) - - Returns: - * rc, out - return code and output of the host-disk-partition-delete - """ - - rc, out = cli.system('host-disk-partition-delete {} {}'.format(host, uuid), - fail_ok=fail_ok, ssh_client=con_ssh, - auth_info=auth_info) - if rc > 0: - return 1, out - - wait_for_host_partition_status(host=host, uuid=uuid, timeout=timeout, - final_status=None, - interim_status=PartitionStatus.DELETING, - con_ssh=con_ssh, auth_info=auth_info) - return 0, "Partition successfully deleted" - - -def create_host_partition(host, device_node, size_gib, fail_ok=False, wait=True, - timeout=SysInvTimeout.PARTITION_CREATE, - con_ssh=None, auth_info=Tenant.get('admin_platform')): - """ - Create a partition on host. - - Arguments: - * host(str) - hostname, e.g. controller-0 - * device_node(str) - device, e.g. /dev/sdh - * size_gib(str) - size of partition in gib - * wait(bool) - if True, wait for partition creation. False, return - * immediately. - * timeout(int) - how long to wait for partition creation (sec) - - Returns: - * rc, out - return code and output of the host-disk-partition-command - """ - args = '{} {} {}'.format(host, device_node, size_gib) - rc, out = cli.system('host-disk-partition-add', args, fail_ok=fail_ok, - ssh_client=con_ssh, auth_info=auth_info) - if rc > 0 or not wait: - return rc, out - - uuid = table_parser.get_value_two_col_table(table_parser.table(out), "uuid") - wait_for_host_partition_status(host=host, uuid=uuid, timeout=timeout, - con_ssh=con_ssh, auth_info=auth_info) - return 0, uuid - - -def modify_host_partition(host, uuid, size_gib, fail_ok=False, - timeout=SysInvTimeout.PARTITION_MODIFY, - final_status=PartitionStatus.READY, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - This test modifies the size of a partition. - - Args: - host(str) - hostname, e.g. controller-0 - uuid(str) - uuid of the partition - size_gib(str) - new partition size in gib - fail_ok - timeout(int) - how long to wait for partition creation (sec) - final_status (str|list) - con_ssh - auth_info - - Returns: - * rc, out - return code and output of the host-disk-partition-command - """ - - args = '-s {} {} {}'.format(size_gib, host, uuid) - rc, out = cli.system('host-disk-partition-modify', args, fail_ok=fail_ok, - ssh_client=con_ssh, auth_info=auth_info) - if rc > 0: - return 1, out - - uuid = table_parser.get_value_two_col_table(table_parser.table(out), "uuid") - wait_for_host_partition_status(host=host, uuid=uuid, timeout=timeout, - interim_status=PartitionStatus.MODIFYING, - final_status=final_status, con_ssh=con_ssh, - auth_info=auth_info) - - msg = "{} partition successfully modified".format(host) - LOG.info(msg) - return 0, msg - - -def wait_for_host_partition_status(host, uuid, - final_status=PartitionStatus.READY, - interim_status=PartitionStatus.CREATING, - timeout=120, fail_ok=False, - con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Wait for host partition to reach given status - Args: - host: - uuid: - final_status (str|list|None|tuple): - interim_status: - timeout: - fail_ok: - con_ssh - auth_info - - Returns (bool): - - """ - if not final_status: - final_status = [None] - elif isinstance(final_status, str): - final_status = (final_status,) - - valid_status = list(final_status) - if isinstance(interim_status, str): - interim_status = (interim_status,) - for status_ in interim_status: - valid_status.append(status_) - - end_time = time.time() + timeout - prev_status = '' - while time.time() < end_time: - status = \ - get_host_partition_values(host, uuid, "status", con_ssh=con_ssh, - auth_info=auth_info)[0] - assert status in valid_status, "Partition has unexpected state " \ - "{}".format(status) - - if status in final_status: - LOG.info( - "Partition {} on host {} has reached state: {}".format(uuid, - host, - status)) - return True - elif status != prev_status: - prev_status = status - LOG.info("Partition {} on host {} is in {} state".format(uuid, host, - status)) - - time.sleep(5) - - msg = "Partition {} on host {} not in {} state within {} seconds".format( - uuid, host, final_status, timeout) - LOG.warning(msg) - if fail_ok: - return False - else: - raise exceptions.StorageError(msg) - - -def get_host_disks(host, field='uuid', auth_info=Tenant.get('admin_platform'), - con_ssh=None, **kwargs): - """ - Get values from system host-disk-list - Args: - host (str): - field (str|list|tuple) - con_ssh (SSHClient): - auth_info (dict): - - Returns (dict): - - """ - table_ = table_parser.table( - cli.system('host-disk-list --nowrap', host, ssh_client=con_ssh, - auth_info=auth_info)[1]) - return table_parser.get_multi_values(table_, field, evaluate=True, **kwargs) - - -def get_host_disk_values(host, disk, fields, - auth_info=Tenant.get('admin_platform'), con_ssh=None): - """ - Get host disk values via system host-disk-show - Args: - host: - disk: - fields: - auth_info: - con_ssh: - - Returns: - - """ - table_ = table_parser.table( - cli.system('host-disk-show', '{} {}'.format(host, disk), - ssh_client=con_ssh, - auth_info=auth_info)[1]) - return table_parser.get_multi_values_two_col_table(table_, fields, - evaluate=True) - - -def get_host_disks_with_free_space(host, disk_list, - auth_info=Tenant.get('admin_platform'), - con_ssh=None): - """ - Given a list of disks, return the ones with free space. - - Arguments: - host(str) - hostname, e.g. ocntroller-0 - disk_list (list) - list of disks - auth_info - con_ssh - - Returns (dict): disks that have usable space. - """ - - free_disks = {} - for disk in disk_list: - LOG.info("Querying disk {} on host {}".format(disk, host)) - available_space = float( - get_host_disk_values(host, disk, fields='available_gib', - auth_info=auth_info, - con_ssh=con_ssh)[0]) - LOG.info("{} has disk {} with {} gib available".format(host, disk, - available_space)) - if available_space <= 0: - LOG.info( - "Removing disk {} from host {} due to insufficient " - "space".format( - disk, host)) - else: - free_disks[disk] = available_space - - return free_disks - - -def get_hosts_rootfs(hosts, auth_info=Tenant.get('admin_platform'), - con_ssh=None): - """ - This returns the rootfs disks of each node. - - Arguments: - * hosts(list) - e.g. controller-0, controller-1, etc. - - Returns: - * Dict of host mapped to rootfs disk - """ - - rootfs_uuid = {} - for host in hosts: - rootfs_device = system_helper.get_host_values(host, 'rootfs_device', - auth_info=auth_info, - con_ssh=con_ssh)[0] - LOG.debug("{} is using rootfs disk: {}".format(host, rootfs_device)) - key = 'device_path' - if '/dev/disk' not in rootfs_device: - key = 'device_node' - rootfs_device = '/dev/{}'.format(rootfs_device) - - disk_uuids = get_host_disks(host, 'uuid', auth_info=auth_info, - con_ssh=con_ssh, **{key: rootfs_device}) - rootfs_uuid[host] = disk_uuids - - LOG.info("Root disk UUIDS: {}".format(rootfs_uuid)) - return rootfs_uuid - - -def get_controllerfs_list(field='Size in GiB', fs_name=None, con_ssh=None, - auth_info=Tenant.get('admin_platform'), - **filters): - table_ = table_parser.table( - cli.system('controllerfs-list --nowrap', ssh_client=con_ssh, - auth_info=auth_info)[1]) - - if fs_name: - filters['FS Name'] = fs_name - - return table_parser.get_multi_values(table_, field, evaluate=True, - **filters) - - -def get_controllerfs_values(filesystem, fields='size', rtn_dict=False, - auth_info=Tenant.get('admin_platform'), - con_ssh=None): - """ - Returns the value of a particular filesystem. - - Arguments: - - fields (str|list|tuple) - what value to get, e.g. size - - filesystem(str) - e.g. scratch, database, etc. - - Returns (list): - - """ - table_ = table_parser.table( - cli.system('controllerfs-show', filesystem, ssh_client=con_ssh, - auth_info=auth_info)[1]) - return table_parser.get_multi_values_two_col_table(table_, fields, - rtn_dict=rtn_dict, - evaluate=True) - - -def get_controller_fs_values(con_ssh=None, - auth_info=Tenant.get('admin_platform')): - table_ = table_parser.table( - cli.system('controllerfs-show', ssh_client=con_ssh, - auth_info=auth_info)[1]) - - rows = table_parser.get_all_rows(table_) - values = {} - for row in rows: - values[row[0].strip()] = row[1].strip() - return values - - -def modify_controllerfs(fail_ok=False, auth_info=Tenant.get('admin_platform'), - con_ssh=None, **kwargs): - """ - Modifies the specified controller filesystem, e.g. scratch, database, etc. - - Arguments: - - kwargs - dict of name:value pair(s) - - fail_ok(bool) - True if failure is expected. False if not. - """ - - attr_values_ = ['{}="{}"'.format(attr, value) for attr, value in - kwargs.items()] - args_ = ' '.join(attr_values_) - - rc, out = cli.system("controllerfs-modify", args_, fail_ok=fail_ok, - ssh_client=con_ssh, auth_info=auth_info) - if rc > 0: - return 1, out - - msg = "Filesystem update succeeded" - LOG.info(msg) - return 0, msg - - -def get_host_stors(host, field='uuid', con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Get host storage values from system host-stor-list - Args: - host: - field (str|tuple|list): - auth_info: - con_ssh: - - Returns (list): - - """ - table_ = table_parser.table( - cli.system('host-stor-list --nowrap', host, ssh_client=con_ssh, - auth_info=auth_info)[1]) - return table_parser.get_multi_values(table_, field, evaluate=True) - - -def get_host_stor_values(host, stor_uuid, fields="size", con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Returns the value of a particular filesystem. - - Arguments: - host - stor_uuid - fields (str|list|tuple) - auth_info - con_ssh - - Returns (list): - - """ - args = '{} {}'.format(host, stor_uuid) - table_ = table_parser.table( - cli.system('host-stor-show', args, ssh_client=con_ssh, - auth_info=auth_info)[1]) - return table_parser.get_multi_values_two_col_table(table_, fields, - evaluate=True) - - -def get_storage_tiers(cluster, field='uuid', con_ssh=None, - auth_info=Tenant.get('admin_platform'), **filters): - """ - - Args: - cluster: - field (str|tuple|list): - con_ssh: - auth_info: - **filters: - - Returns: - - """ - table_ = table_parser.table( - cli.system('storage-tier-list {}'.format(cluster), ssh_client=con_ssh, - auth_info=auth_info), combine_multiline_entry=True) - return table_parser.get_multi_values(table_, field, **filters) - - -def add_host_storage(host, disk_uuid, journal_location=None, journal_size=None, - function=None, tier_uuid=None, - auth_info=Tenant.get('admin_platform'), con_ssh=None, - fail_ok=False): - """ - Add storage to host - Args: - host: - disk_uuid: - journal_location: - journal_size: - function: - tier_uuid: - auth_info: - con_ssh: - fail_ok: - - Returns (tuple): - - """ - if not host or not disk_uuid: - raise ValueError("host name and disk uuid must be specified") - - args_dict = { - '--journal-location': journal_location, - '--journal-size': journal_size, - '--tier-uuid': tier_uuid - } - args = common.parse_args(args_dict) - - function = ' {}'.format(function) if function else '' - args += " {} {}{}".format(host, function, disk_uuid) - LOG.info("Adding storage to {}".format(host)) - rc, output = cli.system('host-stor-add', ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if rc > 0: - return 1, output - - table_ = table_parser.table(output) - uuid = table_parser.get_value_two_col_table(table_, 'uuid') - LOG.info("Storage added to {} successfully: {}".format(host, uuid)) - return 0, uuid - - -def clear_local_storage_cache(host, con_ssh=None): - with host_helper.ssh_to_host(host, con_ssh=con_ssh) as host_ssh: - with host_ssh.login_as_root() as root_ssh: - root_ssh.exec_cmd('rm -rf /var/lib/nova/instances/_base/*', - fail_ok=True) - root_ssh.exec_cmd('sync;echo 3 > /proc/sys/vm/drop_caches', - fail_ok=True) diff --git a/automated-pytest-suite/keywords/system_helper.py b/automated-pytest-suite/keywords/system_helper.py deleted file mode 100644 index c6fc6397..00000000 --- a/automated-pytest-suite/keywords/system_helper.py +++ /dev/null @@ -1,3530 +0,0 @@ -# -# Copyright (c) 2019, 2020 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import ipaddress -import re -import os -import time -import yaml - -from pytest import skip - -from consts.auth import Tenant, HostLinuxUser -from consts.stx import UUID, Prompt, SysType, EventLogID, HostAvailState -from consts.proj_vars import ProjVar -from consts.timeout import SysInvTimeout, MiscTimeout, HostTimeout -from utils import cli, table_parser, exceptions -from utils.clients.ssh import ControllerClient -from utils.tis_log import LOG -from testfixtures.fixture_resources import ResourceCleanup -from keywords import common - - -def get_sys_type(con_ssh=None): - """ - Please do NOT call this function in testcase/keyword. This is used to set - global variable SYS_TYPE in ProjVar. - Use ProjVar.get_var('SYS_TYPE') in testcase/keyword instead. - Args: - con_ssh: - - Returns: - - """ - auth_info = Tenant.get('admin_platform') - is_aio = is_aio_system(controller_ssh=con_ssh, auth_info=auth_info) - if is_aio: - sys_type = SysType.AIO_DX - if len(get_controllers(con_ssh=con_ssh, auth_info=auth_info)) == 1: - sys_type = SysType.AIO_SX - elif get_storage_nodes(con_ssh=con_ssh): - sys_type = SysType.STORAGE - else: - sys_type = SysType.REGULAR - - LOG.info("============= System type: {} ==============".format(sys_type)) - return sys_type - - -def is_storage_system(con_ssh=None, auth_info=Tenant.get('admin_platform')): - sys_type = ProjVar.get_var('SYS_TYPE') - if sys_type: - if not (ProjVar.get_var('IS_DC') and auth_info and - ProjVar.get_var('PRIMARY_SUBCLOUD') != auth_info.get('region')): - return SysType.STORAGE == sys_type - else: - return bool(get_storage_nodes(con_ssh=con_ssh, auth_info=auth_info)) - - -def is_aio_duplex(con_ssh=None, auth_info=Tenant.get('admin_platform')): - """ - Whether it is two node CPE system - Args: - con_ssh: - auth_info - - Returns (bool): - - """ - - sys_type = ProjVar.get_var('SYS_TYPE') - if sys_type: - if not (ProjVar.get_var('IS_DC') and auth_info and - ProjVar.get_var('PRIMARY_SUBCLOUD') != auth_info.get('region', - None)): - return SysType.AIO_DX == sys_type - else: - return is_aio_system(controller_ssh=con_ssh) \ - and len(get_controllers(con_ssh=con_ssh)) == 2 - - -def is_aio_simplex(con_ssh=None, auth_info=Tenant.get('admin_platform')): - sys_type = ProjVar.get_var('SYS_TYPE') - if sys_type: - if not (con_ssh and ProjVar.get_var('IS_DC') and auth_info and - ProjVar.get_var('PRIMARY_SUBCLOUD') != auth_info.get('region', - None)): - return SysType.AIO_SX == sys_type - - return is_aio_system(controller_ssh=con_ssh, - auth_info=auth_info) and \ - len(get_controllers(con_ssh=con_ssh, auth_info=auth_info)) == 1 - - -def is_aio_system(controller_ssh=None, controller='controller-0', - auth_info=Tenant.get('admin_platform')): - """ - Whether it is AIO-Duplex or AIO-Simplex system where controller has both - controller and compute functions - Args: - controller_ssh (SSHClient): - controller (str): controller to check - auth_info - - Returns (bool): True if CPE or Simplex, else False - - """ - sys_type = ProjVar.get_var('SYS_TYPE') - if sys_type: - if not (controller_ssh and ProjVar.get_var('IS_DC') and auth_info and - ProjVar.get_var('PRIMARY_SUBCLOUD') != auth_info.get('region', - None)): - return 'aio' in sys_type.lower() - - subfunc = get_host_values(host=controller, fields='subfunctions', - con_ssh=controller_ssh, auth_info=auth_info)[0] - combined = 'controller' in subfunc and re.search('compute|worker', subfunc) - - str_ = 'not ' if not combined else '' - - LOG.info("This is {}small footprint system.".format(str_)) - return combined - - -def get_storage_nodes(con_ssh=None, auth_info=Tenant.get('admin_platform')): - """ - Get hostnames with 'storage' personality from system host-list - Args: - con_ssh (SSHClient): - auth_info - - Returns (list): list of hostnames. Empty list [] returns when no storage - nodes. - - """ - return get_hosts(personality='storage', con_ssh=con_ssh, - auth_info=auth_info) - - -def get_controllers(administrative=None, operational=None, availability=None, - con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Get hostnames with 'controller' personality from system host-list - Args: - administrative - operational - availability - con_ssh (SSHClient): - auth_info - - Returns (list): list of hostnames - - """ - return get_hosts(personality='controller', administrative=administrative, - operational=operational, - availability=availability, con_ssh=con_ssh, - auth_info=auth_info) - - -def get_computes(administrative=None, operational=None, availability=None, - con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Get hostnames with 'compute' personality from system host-list - Args: - administrative - operational - availability - con_ssh (SSHClient): - auth_info - - Returns (list): list of hostnames. Empty list [] returns when no compute - nodes. - - """ - return get_hosts(personality='compute', administrative=administrative, - operational=operational, - availability=availability, con_ssh=con_ssh, - auth_info=auth_info) - - -def get_hypervisors(administrative=None, operational=None, - availability=None, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Get nodes that can be used as hypervisor/worker. - e.g., in standard config, it will mean worker nodes. In DX+worker config, it will mean worker - nodes and controller nodes. - - Args: - administrative: - operational: - availability: - con_ssh: - auth_info: - - Returns (list): - - """ - computes = get_computes(administrative=administrative, operational=operational, - availability=availability, con_ssh=con_ssh, - auth_info=auth_info) - if is_aio_system(controller_ssh=con_ssh, - auth_info=auth_info): - computes += get_controllers(administrative=administrative, operational=operational, - availability=availability, - con_ssh=con_ssh, auth_info=auth_info) - - return computes - - -def get_hosts(personality=None, administrative=None, operational=None, - availability=None, hostname=None, strict=True, - exclude=False, con_ssh=None, - auth_info=Tenant.get('admin_platform'), - field='hostname', rtn_dict=False): - """ - Get hostnames with given criteria - Args: - personality (None|str|tuple|list): - administrative (None|str|list|tuple): - operational (None|str|list|tuple): - availability (None|str|list|tuple): - hostname (None|tuple|list|str): filter out these hosts only - strict (bool): - exclude (bool): - con_ssh (SSHClient|None): - auth_info - field (str|list|tuple) - rtn_dict (bool): Whether to return dict where each field is a key, - and value is a list - - Returns (list): hosts - - """ - if not con_ssh: - con_name = auth_info.get('region') if ( - auth_info and ProjVar.get_var('IS_DC')) else None - con_ssh = ControllerClient.get_active_controller(name=con_name) - - table_ = table_parser.table( - cli.system('host-list', ssh_client=con_ssh, - auth_info=auth_info)[1]) - - table_ = table_parser.filter_table(table_, exclude=True, hostname='None') - if hostname: - table_ = table_parser.filter_table(table_, hostname=hostname) - - if personality: - compute_personality = 'compute|worker' - if personality == 'compute': - personality = compute_personality - elif not isinstance(personality, str): - personality = list(personality) - if 'compute' in personality: - compute_index = personality.index('compute') - personality[compute_index] = compute_personality - - filters = {'personality': personality, - 'administrative': administrative, - 'operational': operational, - 'availability': availability} - filters = {k: v for k, v in filters.items() if v is not None} - if filters: - table_ = table_parser.filter_table(table_, strict=strict, - exclude=exclude, regex=True, - **filters) - - hostnames = table_parser.get_multi_values(table_, field, rtn_dict=rtn_dict) - LOG.debug("Filtered hosts: {}".format(hostnames)) - - return hostnames - - -def get_host_list_data(columns=None, con_ssh=None, - auth_info=Tenant.get('admin_platform'), source_rc=False): - """ - Args: - columns - con_ssh - auth_info - source_rc - - Returns (list of dict of hosts): - e.g., [{'administrative': 'unlocked', 'availability': 'available', 'hostname': 'controller-0', - 'id': 1, 'operational': 'enabled', 'personality': 'controller'}, - {'administrative': 'unlocked', 'availability': 'available', 'hostname': 'compute-1', - 'id': 2, 'operational': 'enabled', 'personality': 'worker'}, - {'administrative': 'unlocked', 'availability': 'available', 'hostname': 'compute-0', - 'id': 3, 'operational': 'enabled', 'personality': 'worker'}, - {'administrative': 'unlocked', 'availability': 'available', 'hostname': 'controller-1', - 'id': 4, 'operational': 'enabled', 'personality': 'controller'}, - ] - - """ - - args = "" - if columns: - for col in columns: - args += ' --column {}'.format(col) - args += " --format yaml" - - code, output = cli.system('host-list', args, ssh_client=con_ssh, - auth_info=auth_info, source_openrc=source_rc) - - if code == 0: - return yaml.safe_load(output) - else: - LOG.error("Error with CLI command") - return output - - -def get_hosts_per_personality(availability=None, administrative=None, - operational=None, con_ssh=None, - auth_info=Tenant.get('admin_platform'), - source_rc=False, - rtn_tuple=False): - """ - Args: - availability - administrative - operational - con_ssh: - auth_info - source_rc - rtn_tuple (bool): whether to return tuple instead of dict. i.e., - , , - - Returns (dict|tuple): - e.g., {'controller': ['controller-0', 'controller-1'], 'compute': [ - 'compute-0', 'compute-1], 'storage': []} - - """ - table_ = table_parser.table( - cli.system('host-list', ssh_client=con_ssh, auth_info=auth_info, - source_openrc=source_rc)[1]) - personalities = ('controller', 'compute', 'storage') - res = {} - for personality in personalities: - personality_tmp = 'compute|worker' if personality == 'compute' else \ - personality - hosts = table_parser.get_values(table_, 'hostname', - personality=personality_tmp, - availability=availability, - administrative=administrative, - operational=operational, regex=True) - hosts = [host for host in hosts if host.lower() != 'none'] - res[personality] = hosts - - if rtn_tuple: - res = res['controller'], res['compute'], res['storage'] - - return res - - -def get_active_controller_name(con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - This assumes system has 1 active controller - Args: - con_ssh: - auth_info - - Returns: hostname of the active controller - Further info such as ip, uuid can be obtained via System.CONTROLLERS[ - hostname]['uuid'] - """ - return get_active_standby_controllers(con_ssh=con_ssh, auth_info=auth_info)[ - 0] - - -def get_standby_controller_name(con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - This assumes system has 1 standby controller - Args: - con_ssh: - auth_info - - Returns (str): hostname of the active controller - Further info such as ip, uuid can be obtained via System.CONTROLLERS[ - hostname]['uuid'] - """ - active, standby = get_active_standby_controllers(con_ssh=con_ssh, - auth_info=auth_info) - return standby if standby else '' - - -def get_active_standby_controllers(con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Get active controller name and standby controller name (if any) - Args: - con_ssh (SSHClient): - auth_info - - Returns (tuple): such as ('controller-0', 'controller-1'), - when non-active controller is in bad state or degraded - state, or any scenarios where standby controller does not exist, - this function will return - (, None) - - """ - table_ = table_parser.table( - cli.system('servicegroup-list', ssh_client=con_ssh, - auth_info=auth_info)[1]) - - table_ = table_parser.filter_table(table_, - service_group_name='controller-services') - active_con = table_parser.get_values(table_, 'hostname', state='active', - strict=False)[0] - standby_con = table_parser.get_values(table_, 'hostname', state='standby', - strict=False) - - standby_con = standby_con[0] if standby_con else None - return active_con, standby_con - - -def get_alarms_table(uuid=True, show_suppress=False, query_key=None, - query_value=None, query_type=None, con_ssh=None, - mgmt_affecting=None, - auth_info=Tenant.get('admin_platform'), - retry=0): - """ - Get active alarms_and_events dictionary with given criteria - Args: - uuid (bool): whether to show uuid - show_suppress (bool): whether to show suppressed alarms_and_events - query_key (str): one of these: 'event_log_id', 'entity_instance_id', - 'uuid', 'severity', - query_value (str): expected value for given key - query_type (str): data type of value. one of these: 'string', - 'integer', 'float', 'boolean' - mgmt_affecting (bool) - con_ssh (SSHClient): - auth_info (dict): - retry (None|int): number of times to retry if the alarm-list cli got - rejected - - Returns: - dict: events table in format: {'headers': , 'values': - } - """ - args = '--nowrap' - args = __process_query_args(args, query_key, query_value, query_type) - if uuid: - args += ' --uuid' - if show_suppress: - args += ' --include_suppress' - if mgmt_affecting: - args += ' --mgmt_affecting' - - fail_ok = True - if not retry: - fail_ok = False - retry = 0 - - output = None - for i in range(retry + 1): - code, output = cli.fm('alarm-list', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code == 0: - table_ = table_parser.table(output, combine_multiline_entry=True) - return table_ - - if i < retry: - time.sleep(5) - else: - raise exceptions.CLIRejected( - 'fm alarm-list cli got rejected after {} retries: {}'.format( - retry, output)) - - -def get_alarms(fields=('Alarm ID', 'Entity ID'), alarm_id=None, - reason_text=None, entity_id=None, - severity=None, time_stamp=None, strict=False, - show_suppress=False, query_key=None, query_value=None, - query_type=None, mgmt_affecting=None, con_ssh=None, - auth_info=Tenant.get('admin_platform'), - combine_entries=True): - """ - Get a list of alarms with values for specified fields. - Args: - fields (tuple): fields to get values for - alarm_id (str): filter out the table using given alarm id ( - strict=True). if None, table will not be filtered. - reason_text (str): reason text to filter out the table (strict - defined in param) - entity_id (str): entity instance id to filter out the table (strict - defined in param) - severity (str): severity such as 'critical', 'major' - time_stamp (str): - strict (bool): whether to perform strict filter on reason text, - entity_id, severity, or time_stamp - show_suppress (bool): whether to show suppressed alarms. Default to - False. - query_key (str): key in --query = passed to fm alarm-list - query_value (str): value in --query = passed to fm - alarm-list - query_type (str): 'string', 'integer', 'float', or 'boolean' - mgmt_affecting (bool) - con_ssh (SSHClient): - auth_info (dict): - combine_entries (bool): return list of strings when set to True, - else return a list of tuples. - e.g., when True, returns ["800.003::::cluster=829851fa", - "250.001::::host=controller-0"] - when False, returns [("800.003", "cluster=829851fa"), - ("250.001", "host=controller-0")] - - Returns (list): list of alarms with values of specified fields - - """ - - table_ = get_alarms_table(show_suppress=show_suppress, query_key=query_key, - query_value=query_value, - query_type=query_type, con_ssh=con_ssh, - auth_info=auth_info, - mgmt_affecting=mgmt_affecting) - - if alarm_id: - table_ = table_parser.filter_table(table_, **{'Alarm ID': alarm_id}) - - kwargs_dict = { - 'Reason Text': reason_text, - 'Entity ID': entity_id, - 'Severity': severity, - 'Time Stamp': time_stamp - } - - kwargs = {} - for key, value in kwargs_dict.items(): - if value is not None: - kwargs[key] = value - - if kwargs: - table_ = table_parser.filter_table(table_, strict=strict, **kwargs) - - rtn_vals_list = [] - for field in fields: - vals = table_parser.get_column(table_, field) - rtn_vals_list.append(vals) - - rtn_vals_list = zip(*rtn_vals_list) - if combine_entries: - rtn_vals_list = ['::::'.join(vals) for vals in rtn_vals_list] - else: - rtn_vals_list = list(rtn_vals_list) - - return rtn_vals_list - - -def get_suppressed_alarms(uuid=False, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Get suppressed alarms_and_events as dictionary - Args: - uuid (bool): whether to show uuid - con_ssh (SSHClient): - auth_info (dict): - - Returns: - dict: events table in format: {'headers': , 'values': - } - """ - args = '' - if uuid: - args += ' --uuid' - args += ' --nowrap --nopaging' - table_ = table_parser.table( - cli.fm('event-suppress-list', args, ssh_client=con_ssh, - auth_info=auth_info)[1]) - return table_ - - -def unsuppress_all_events(ssh_con=None, fail_ok=False, - auth_info=Tenant.get('admin_platform')): - """ - - Args: - ssh_con: - fail_ok: - auth_info: - - Returns (tuple): ((int), (str)) - - """ - LOG.info("Un-suppress all events") - args = '--nowrap --nopaging' - code, output = cli.fm('event-unsuppress-all', positional_args=args, - ssh_client=ssh_con, fail_ok=fail_ok, - auth_info=auth_info) - - if code == 1: - return 1, output - - if not output: - msg = "No suppressed events to un-suppress" - LOG.warning(msg) - return -1, msg - - table_ = table_parser.table(output) - if not table_['values']: - suppressed_list = [] - else: - suppressed_list = table_parser.get_values(table_, - target_header="Suppressed " - "Alarm ID's", - **{'Status': 'suppressed'}) - - if suppressed_list: - msg = "Unsuppress-all failed. Suppressed Alarm IDs: {}".format( - suppressed_list) - if fail_ok: - LOG.warning(msg) - return 2, msg - raise exceptions.NeutronError(msg) - - succ_msg = "All events unsuppressed successfully." - LOG.info(succ_msg) - return 0, succ_msg - - -def get_events(fields=('Event Log ID', 'Entity Instance ID'), limit=10, - event_id=None, entity_id=None, - severity=None, show_suppress=False, start=None, end=None, - state=None, show_uuid=True, - strict=False, time_stamp=None, reason_text=None, uuid=None, - con_ssh=None, auth_info=Tenant.get('admin_platform'), - combine_entries=True): - """ - Get a list of alarms with values for specified fields. - Args: - fields (tuple|list|str): fields to get values for - limit (int) - event_id (str): filter event using event log id - reason_text (str): reason text to filter out the table (strict - defined in param) - entity_id (str): entity instance id to filter out the table (strict - defined in param) - severity (str): severity such as 'critical', 'major' - show_suppress (bool): whether to show suppressed events. Default to - False. - show_uuid (bool): Whether to show uuid in event table - start (str): display events after this time stamp - end (str): display events prior to this time stamp - state (str): filter with events state - time_stamp (str): exact timestamp for the event, filter after events - displayed - uuid (str) - strict (bool): whether to perform strict filter on reason text, - or time_stamp - con_ssh (SSHClient): - auth_info (dict): - combine_entries (bool): return list of strings when set to True, - else return a list of tuples. - e.g., when True, returns ["800.003::::cluster=829851fa", - "250.001::::host=controller-0"] - when False, returns [("800.003", "cluster=829851fa"), - ("250.001", "host=controller-0")] - - Returns (list): list of events with values of specified fields - - """ - - table_ = get_events_table(show_uuid=show_uuid, limit=limit, - event_log_id=event_id, - entity_instance_id=entity_id, - show_suppress=show_suppress, con_ssh=con_ssh, - auth_info=auth_info, - start=start, end=end, severity=severity) - - kwargs_dict = { - 'Reason Text': reason_text, - 'Time Stamp': time_stamp, - 'UUID': uuid, - 'State': state, - } - - kwargs = {} - for key, value in kwargs_dict.items(): - if value is not None: - kwargs[key] = value - - if kwargs: - table_ = table_parser.filter_table(table_, strict=strict, **kwargs) - - rtn_vals_list = [] - if isinstance(fields, str): - fields = (fields,) - for header in fields: - vals = table_parser.get_column(table_, header) - if not vals: - vals = [] - rtn_vals_list.append(vals) - - LOG.warning('{}'.format(rtn_vals_list)) - rtn_vals_list = list(zip(*rtn_vals_list)) - if combine_entries: - rtn_vals_list = ['::::'.join(vals) for vals in rtn_vals_list] - - return rtn_vals_list - - -def get_events_table(limit=5, show_uuid=False, show_only=None, - show_suppress=False, event_log_id=None, - entity_type_id=None, entity_instance_id=None, - severity=None, start=None, end=None, - con_ssh=None, auth_info=Tenant.get('admin_platform'), - regex=False, **kwargs): - """ - Get a list of events with given criteria as dictionary - Args: - limit (int): max number of event logs to return - show_uuid (bool): whether to show uuid - show_only (str): 'alarms_and_events' or 'logs' to return only - alarms_and_events or logs - show_suppress (bool): whether or not to show suppressed - alarms_and_events - event_log_id (str|None): event log id passed to system eventlog -q - event_log_id= - entity_type_id (str|None): entity_type_id passed to system eventlog - -q entity_type_id= - entity_instance_id (str|None): entity_instance_id passed to - system eventlog -q entity_instance_id= - severity (str|None): - start (str|None): start date/time passed to '--query' in format - "20170410"/"20170410 01:23:34" - end (str|None): end date/time passed to '--query' in format - "20170410"/"20170410 01:23:34" - con_ssh (SSHClient): - auth_info (dict): - regex (bool): - **kwargs: filter table after table returned - - Returns: - dict: events table in format: {'headers': , 'values': - } - """ - - args = '-l {}'.format(limit) - - # args = __process_query_args(args, query_key, query_value, query_type) - query_dict = { - 'event_log_id': event_log_id, - 'entity_type_id': entity_type_id, - 'entity_instance_id': entity_instance_id, - 'severity': severity, - 'start': '{}'.format(start) if start else None, - 'end': '{}'.format(end) if end else None - } - - queries = [] - for q_key, q_val in query_dict.items(): - if q_val is not None: - queries.append('{}={}'.format(q_key, str(q_val))) - - query_string = ';'.join(queries) - if query_string: - args += " -q '{}'".format(query_string) - - args += ' --nowrap --nopaging' - if show_uuid: - args += ' --uuid' - if show_only: - args += ' --{}'.format(show_only.lower()) - if show_suppress: - args += ' --include_suppress' - - table_ = table_parser.table( - cli.fm('event-list ', args, ssh_client=con_ssh, auth_info=auth_info)[1]) - - if kwargs: - table_ = table_parser.filter_table(table_, regex=regex, **kwargs) - - return table_ - - -def _compose_events_table(output, uuid=False): - if not output['headers']: - headers = ['UUID', 'Time Stamp', 'State', 'Event Log ID', 'Reason Text', - 'Entity Instance ID', 'Severity'] - if not uuid: - headers.remove('UUID') - values = [] - output['headers'] = headers - output['values'] = values - - return output - - -def __process_query_args(args, query_key, query_value, query_type): - if query_key: - if not query_value: - raise ValueError( - "Query value is not supplied for key - {}".format(query_key)) - data_type_arg = '' if not query_type else "{}::".format( - query_type.lower()) - args += ' -q {}={}"{}"'.format(query_key.lower(), data_type_arg, - query_value.lower()) - return args - - -def wait_for_events(timeout=60, num=30, uuid=False, show_only=None, - event_log_id=None, entity_type_id=None, - entity_instance_id=None, severity=None, start=None, - end=None, field='Event Log ID', - regex=False, strict=True, check_interval=5, fail_ok=True, - con_ssh=None, - auth_info=Tenant.get('admin_platform'), **kwargs): - """ - Wait for event(s) to appear in fm event-list - Args: - timeout (int): max time to wait in seconds - num (int): max number of event logs to return - uuid (bool): whether to show uuid - show_only (str): 'alarms_and_events' or 'logs' to return only - alarms_and_events or logs - fail_ok (bool): whether to return False if event(s) did not appear - within timeout - field (str): list of values to return. Defaults to 'Event Log ID' - con_ssh (SSHClient): - auth_info (dict): - regex (bool): Whether to use regex or string operation to - search/match the value in kwargs - strict (bool): whether it's a strict match (case is always ignored - regardless of this flag) - check_interval (int): how often to check the event logs - event_log_id (str|None): event log id passed to system eventlog -q - event_log_id= - entity_type_id (str|None): entity_type_id passed to system eventlog - -q entity_type_id= - entity_instance_id (str|None): entity_instance_id passed to - system eventlog -q entity_instance_id= - severity (str|None): - start (str|None): start date/time passed to '--query' in format - "20170410"/"20170410 01:23:34" - end (str|None): end date/time passed to '--query' in format - "20170410"/"20170410 01:23:34" - - **kwargs: criteria to filter out event(s) from the events list table - - Returns: - list: list of event log ids (or whatever specified in rtn_value) for - matching events. - - """ - end_time = time.time() + timeout - while time.time() < end_time: - events_tab = get_events_table(limit=num, show_uuid=uuid, - show_only=show_only, - event_log_id=event_log_id, - entity_type_id=entity_type_id, - entity_instance_id=entity_instance_id, - severity=severity, start=start, end=end, - con_ssh=con_ssh, auth_info=auth_info) - events_tab = table_parser.filter_table(events_tab, strict=strict, - regex=regex, **kwargs) - events = table_parser.get_column(events_tab, field) - if events: - LOG.info("Event(s) appeared in event-list: {}".format(events)) - return events - - time.sleep(check_interval) - - msg = "Event(s) did not appear in fm event-list within timeout." - if fail_ok: - LOG.warning(msg) - return [] - else: - raise exceptions.TimeoutException(msg) - - -def delete_alarms(alarms=None, fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Delete active alarms_and_events - - Args: - alarms (list|str): UUID(s) of alarms_and_events to delete - fail_ok (bool): whether or not to raise exception if any alarm failed - to delete - con_ssh (SSHClient): - auth_info (dict): - - Returns (tuple): (rtn_code(int), message(str)) - 0, "Alarms deleted successfully" - 1, "Some alarm(s) still exist on system after attempt to delete: - " - - """ - if alarms is None: - alarms_tab = get_alarms_table(uuid=True) - alarms = [] - if alarms_tab['headers']: - alarms = table_parser.get_column(alarms_tab, 'UUID') - - if isinstance(alarms, str): - alarms = [alarms] - - LOG.info("Deleting following alarms_and_events: {}".format(alarms)) - - res = {} - failed_clis = [] - for alarm in alarms: - code, out = cli.fm('alarm-delete', alarm, ssh_client=con_ssh, - auth_info=auth_info) - res[alarm] = code, out - - if code != 0: - failed_clis.append(alarm) - - post_alarms_tab = get_alarms_table(uuid=True) - if post_alarms_tab['headers']: - post_alarms = table_parser.get_column(post_alarms_tab, 'UUID') - else: - post_alarms = [] - - undeleted_alarms = list(set(alarms) & set(post_alarms)) - if undeleted_alarms: - err_msg = "Some alarm(s) still exist on system after attempt to " \ - "delete: {}\nAlarm delete results: {}". \ - format(undeleted_alarms, res) - - if fail_ok: - return 1, err_msg - raise exceptions.SysinvError(err_msg) - - elif failed_clis: - LOG.warning( - "Some alarm-delete cli(s) rejected, but alarm no longer " - "exists.\nAlarm delete results: {}". - format(res)) - - succ_msg = "Alarms deleted successfully" - LOG.info(succ_msg) - return 0, succ_msg - - -def wait_for_alarm_gone(alarm_id, entity_id=None, reason_text=None, - strict=False, timeout=120, check_interval=10, - fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Wait for given alarm to disappear from fm alarm-list - Args: - alarm_id (str): such as 200.009 - entity_id (str): entity instance id for the alarm (strict as defined - in param) - reason_text (str): reason text for the alarm (strict as defined in - param) - strict (bool): whether to perform strict string match on entity - instance id and reason - timeout (int): max seconds to wait for alarm to disappear - check_interval (int): how frequent to check - fail_ok (bool): whether to raise exception if alarm did not disappear - within timeout - con_ssh (SSHClient): - auth_info (dict): - - Returns (bool): True if alarm is gone else False - - """ - - LOG.info( - "Waiting for alarm {} to disappear from fm alarm-list".format(alarm_id)) - build_ver = get_sw_version(con_ssh=con_ssh) - - alarmcmd = 'alarm-list' - if build_ver != '15.12': - alarmcmd += ' --nowrap' - - end_time = time.time() + timeout - while time.time() < end_time: - alarms_tab = table_parser.table( - cli.fm(alarmcmd, ssh_client=con_ssh, auth_info=auth_info)[1]) - - alarm_tab = table_parser.filter_table(alarms_tab, - **{'Alarm ID': alarm_id}) - if table_parser.get_all_rows(alarm_tab): - kwargs = {} - if entity_id: - kwargs['Entity ID'] = entity_id - if reason_text: - kwargs['Reason Text'] = reason_text - - if kwargs: - alarms = table_parser.get_values(alarm_tab, - target_header='Alarm ID', - strict=strict, **kwargs) - if not alarms: - LOG.info( - "Alarm {} with {} is not displayed in fm " - "alarm-list".format( - alarm_id, kwargs)) - return True - - else: - LOG.info( - "Alarm {} is not displayed in fm alarm-list".format(alarm_id)) - return True - - time.sleep(check_interval) - - else: - err_msg = "Timed out waiting for alarm {} to disappear".format(alarm_id) - if fail_ok: - LOG.warning(err_msg) - return False - else: - raise exceptions.TimeoutException(err_msg) - - -def _get_alarms(alarms_tab): - alarm_ids = table_parser.get_column(alarms_tab, 'Alarm_ID') - entity_ids = table_parser.get_column(alarms_tab, 'Entity ID') - alarms = list(zip(alarm_ids, entity_ids)) - return alarms - - -def wait_for_alarm(field='Alarm ID', alarm_id=None, entity_id=None, reason=None, - severity=None, timeout=60, - check_interval=3, regex=False, strict=False, fail_ok=False, - con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Wait for given alarm to appear - Args: - field: - alarm_id (str): such as 200.009 - entity_id (str|list|tuple): entity instance id for the alarm (strict - as defined in param) - reason (str): reason text for the alarm (strict as defined in param) - severity (str): severity of the alarm to wait for - timeout (int): max seconds to wait for alarm to appear - check_interval (int): how frequent to check - regex (bool): whether to use regex when matching entity instance id - and reason - strict (bool): whether to perform strict match on entity instance id - and reason - fail_ok (bool): whether to raise exception if alarm did not disappear - within timeout - con_ssh (SSHClient): - auth_info (dict): - - Returns (tuple): (, ). Such as (True, '200.009') or ( - False, None) - - """ - - kwargs = {} - if alarm_id: - kwargs['Alarm ID'] = alarm_id - if reason: - kwargs['Reason Text'] = reason - if severity: - kwargs['Severity'] = severity - - if entity_id and isinstance(entity_id, str): - entity_id = [entity_id] - - end_time = time.time() + timeout - while time.time() < end_time: - current_alarms_tab = get_alarms_table(con_ssh=con_ssh, - auth_info=auth_info) - if kwargs: - current_alarms_tab = table_parser.filter_table( - table_=current_alarms_tab, strict=strict, regex=regex, - **kwargs) - if entity_id: - val = [] - for entity in entity_id: - entity_filter = {'Entity ID': entity} - val_ = table_parser.get_values(current_alarms_tab, field, - strict=strict, regex=regex, - **entity_filter) - if not val_: - LOG.info( - "Alarm for entity {} has not appeared".format(entity)) - time.sleep(check_interval) - continue - val += val_ - else: - val = table_parser.get_values(current_alarms_tab, field) - - if val: - LOG.info('Expected alarm appeared. Filters: {}'.format(kwargs)) - return True, val - - time.sleep(check_interval) - - entity_str = ' for entity {}'.format(entity_id) if entity_id else '' - err_msg = "Alarm {}{} did not appear in fm alarm-list within {} " \ - "seconds".format(kwargs, entity_str, timeout) - if fail_ok: - LOG.warning(err_msg) - return False, None - - raise exceptions.TimeoutException(err_msg) - - -def wait_for_alarms_gone(alarms, timeout=120, check_interval=3, fail_ok=False, - con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Wait for given alarms_and_events to be gone from fm alarm-list - Args: - alarms (list): list of tuple. [(, ), ...] - timeout (int): - check_interval (int): - fail_ok (bool): - con_ssh (SSHClient): - auth_info (dict): - - Returns (tuple): (res(bool), remaining_alarms(list of tuple)) - - """ - pre_alarms = list(alarms) # Don't update the original list - LOG.info( - "Waiting for alarms_and_events to disappear from fm alarm-list: " - "{}".format(pre_alarms)) - alarms_to_check = pre_alarms.copy() - - alarms_cleared = [] - - def _update_alarms(alarms_to_check_, alarms_cleared_): - current_alarms_tab = get_alarms_table(con_ssh=con_ssh, - auth_info=auth_info) - current_alarms = _get_alarms(current_alarms_tab) - - for alarm in pre_alarms: - if alarm not in current_alarms: - LOG.info( - "Removing alarm {} from current alarms_and_events list: " - "{}".format(alarm, alarms_to_check)) - alarms_to_check_.remove(alarm) - alarms_cleared_.append(alarm) - - _update_alarms(alarms_to_check_=alarms_to_check, - alarms_cleared_=alarms_cleared) - if not alarms_to_check: - LOG.info( - "Following alarms_and_events cleared: {}".format(alarms_cleared)) - return True, [] - - end_time = time.time() + timeout - while time.time() < end_time: - pre_alarms = alarms_to_check.copy() - time.sleep(check_interval) - _update_alarms(alarms_to_check_=alarms_to_check, - alarms_cleared_=alarms_cleared) - if not alarms_to_check: - LOG.info("Following alarms_and_events cleared: {}".format( - alarms_cleared)) - return True, [] - else: - err_msg = "Following alarms_and_events did not clear within {} " \ - "seconds: {}".format(timeout, alarms_to_check) - if fail_ok: - LOG.warning(err_msg) - return False, alarms_to_check - else: - raise exceptions.TimeoutException(err_msg) - - -def wait_for_all_alarms_gone(timeout=120, check_interval=3, fail_ok=False, - con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Wait for all alarms_and_events to be cleared from fm alarm-list - Args: - timeout (int): - check_interval (int): - fail_ok (bool): - con_ssh (SSHClient): - auth_info (dict): - - Returns (tuple): (res(bool), remaining_alarms(tuple)) - - """ - - LOG.info( - "Waiting for all existing alarms_and_events to disappear from fm " - "alarm-list: {}".format( - get_alarms())) - - end_time = time.time() + timeout - while time.time() < end_time: - current_alarms_tab = get_alarms_table(con_ssh=con_ssh, - auth_info=auth_info) - current_alarms = _get_alarms(current_alarms_tab) - - if len(current_alarms) == 0: - return True, [] - else: - time.sleep(check_interval) - - else: - existing_alarms = get_alarms() - err_msg = "Alarms did not clear within {} seconds: {}".format( - timeout, existing_alarms) - if fail_ok: - LOG.warning(err_msg) - return False, existing_alarms - else: - raise exceptions.TimeoutException(err_msg) - - -def host_exists(host, field='hostname', con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - - Args: - host: - field: - con_ssh: - auth_info - - Returns (bool): whether given host exists in system host-list - - """ - if not field.lower() in ['hostname', 'id']: - raise ValueError("field has to be either \'hostname\' or \'id\'") - - hosts = get_hosts(con_ssh=con_ssh, auth_info=auth_info, field=field) - return host in hosts - - -def modify_system(fail_ok=True, con_ssh=None, - auth_info=Tenant.get('admin_platform'), **kwargs): - """ - Modify the System configs/info. - - Args: - fail_ok (bool): - con_ssh (SSHClient): - auth_info (dict): - **kwargs: attribute-value pairs - - Returns: (int, str) - 0 - success - 1 - error - - Test Steps: - - Set the value via system modify = [,= action=install - - anystr system dns-modify <> action=anystring... - Returns (tuple): - (-1, ) - (0, ) - (1, ) - - """ - if not nameservers: - raise ValueError("Please specify DNS server(s).") - - if check_first: - dns_servers = get_dns_servers(con_ssh=con_ssh, - auth_info=auth_info) - if dns_servers == nameservers and with_action_option is None: - msg = 'DNS servers already set to {}. Do nothing.'.format( - dns_servers) - LOG.info(msg) - return -1, dns_servers - - args_ = 'nameservers="{}"'.format(','.join(nameservers)) - - if with_action_option is not None: - args_ += ' action={}'.format(with_action_option) - - LOG.info('args_:{}'.format(args_)) - code, output = cli.system('dns-modify', args_, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info, - timeout=SysInvTimeout.DNS_MODIFY) - if code == 1: - return 1, output - - post_dns_servers = get_dns_servers(auth_info=auth_info, con_ssh=con_ssh) - if post_dns_servers != nameservers: - raise exceptions.SysinvError( - 'dns servers expected: {}; actual: {}'.format(nameservers, - post_dns_servers)) - - LOG.info("DNS servers successfully updated to: {}".format(nameservers)) - return 0, nameservers - - -def get_vm_topology_tables(*table_names, con_ssh=None, combine_multiline=False, - exclude_one_col_table=True, - auth_info=Tenant.get('admin')): - if con_ssh is None: - con_name = auth_info.get('region') if ( - auth_info and ProjVar.get_var('IS_DC')) else None - con_ssh = ControllerClient.get_active_controller(name=con_name) - - show_args = ','.join(table_names) - - tables_ = table_parser.tables(con_ssh.exec_sudo_cmd('vm-topology --show {}'. - format(show_args), - expect_timeout=30)[1], - combine_multiline_entry=combine_multiline) - - if exclude_one_col_table: - new_tables = [] - for table_ in tables_: - if len(table_['headers']) > 1: - new_tables.append(table_) - return new_tables - - return tables_ - - -def __suppress_unsuppress_event(alarm_id, suppress=True, check_first=False, - fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - suppress/unsuppress an event by uuid - Args: - alarm_id (str): - fail_ok (bool): - con_ssh (SSHClient) - suppress(bool) True or false - - Returns (tuple): (rtn_code, message) - (0, ) - """ - - suppressed_alarms_tab = get_suppressed_alarms(uuid=True, con_ssh=con_ssh, - auth_info=auth_info) - - alarm_status = "unsuppressed" if suppress else "suppressed" - cmd = "event-suppress" if suppress else "event-unsuppress" - alarm_filter = {"Suppressed Event ID's": alarm_id} - - if check_first: - if not suppressed_alarms_tab['values']: - pre_status = "unsuppressed" - else: - pre_status = table_parser.get_values(table_=suppressed_alarms_tab, - target_header='Status', - strict=True, - **alarm_filter)[0] - if pre_status.lower() != alarm_status: - msg = "Event is already {}. Do nothing".format(pre_status) - LOG.info(msg) - return -1, msg - - code, output = cli.fm(cmd, '--alarm_id ' + alarm_id, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - - if code == 1: - return 1, output - - post_suppressed_alarms_tab = get_suppressed_alarms(uuid=True, - con_ssh=con_ssh) - if not post_suppressed_alarms_tab['values']: - post_status = ["unsuppressed"] - else: - post_status = table_parser.get_values(table_=post_suppressed_alarms_tab, - target_header="Status", - strict=True, - **{"Event id": alarm_id}) - expt_status = "suppressed" if suppress else "unsuppressed" - if post_status[0].lower() != expt_status: - msg = "Alarm {} is not {}".format(alarm_id, expt_status) - if fail_ok: - LOG.warning(msg) - return 2, msg - raise exceptions.TiSError(msg) - - succ_msg = "Event {} is {} successfully".format(alarm_id, expt_status) - LOG.info(succ_msg) - return 0, succ_msg - - -def suppress_event(alarm_id, check_first=False, fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - return __suppress_unsuppress_event(alarm_id, True, check_first=check_first, - fail_ok=fail_ok, con_ssh=con_ssh, - auth_info=auth_info) - - -def unsuppress_event(alarm_id, check_first=False, fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - return __suppress_unsuppress_event(alarm_id, False, check_first=check_first, - fail_ok=fail_ok, con_ssh=con_ssh, - auth_info=auth_info) - - -def generate_event(event_id='300.005', state='set', severity='critical', - reason_text='Generated for testing', - entity_id='STXAuto', unknown_text='unknown1', - unknown_two='unknown2', con_ssh=None): - cmd = '''fmClientCli -c "### ###{}###{}###{}###{}### ###{}### ###{}### - {}### ###True###True###"'''. \ - format(event_id, state, reason_text, entity_id, severity, unknown_text, - unknown_two) - - LOG.info("Generate system event: {}".format(cmd)) - if not con_ssh: - con_ssh = ControllerClient.get_active_controller() - - output = con_ssh.exec_cmd(cmd, fail_ok=False)[1] - event_uuid = re.findall(UUID, output)[0] - LOG.info("Event {} generated successfully".format(event_uuid)) - - return event_uuid - - -def get_service_parameter_values(service=None, section=None, name=None, - field='value', con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Returns the list of values from system service-parameter-list - service, section, name can be used to filter the table - Args: - field (str): field to return valueds for. Default to 'value' - service (str): - section (str): - name (str): - con_ssh: - auth_info - - Returns (list): - - """ - kwargs = {} - if service: - kwargs['service'] = service - if section: - kwargs['section'] = section - if name: - kwargs['name'] = name - - table_ = table_parser.table( - cli.system('service-parameter-list --nowrap', ssh_client=con_ssh, - auth_info=auth_info)[1]) - return table_parser.get_values(table_, field, **kwargs) - - -def create_service_parameter(service, section, name, value, con_ssh=None, - fail_ok=False, check_first=True, - modify_existing=True, verify=True, apply=False, - auth_info=Tenant.get('admin_platform')): - """ - Add service-parameter - system service-parameter-add (service) (section) (name)=(value) - Args: - service (str): Required - section (str): Required - name (str): Required - value (str): Required - con_ssh: - fail_ok: - check_first (bool): Check if the service parameter exists before - modify_existing (bool): Whether to modify the service parameter if it - already exists - verify: this enables to skip the verification. sometimes not all - values are displayed in the - service-parameter-list, ex password - apply (bool): whether to apply service parameter after add - auth_info - - Returns (tuple): (rtn_code, err_msg or param_uuid) - - """ - if check_first: - val = get_service_parameter_values(service=service, section=section, - name=name, con_ssh=con_ssh, - auth_info=auth_info) - if val: - val = val[0] - msg = "The service parameter {} {} {} already exists. value: " \ - "{}".format(service, section, name, val) - LOG.info(msg) - if value != val and modify_existing: - return modify_service_parameter(service, section, name, value, - create=False, apply=apply, - con_ssh=con_ssh, - fail_ok=fail_ok, - check_first=False, - verify=verify, - auth_info=auth_info) - return -1, msg - - LOG.info("Creating service parameter") - args = service + ' ' + section + ' ' + name + '=' + value - res, out = cli.system('service-parameter-add', args, ssh_client=con_ssh, - fail_ok=fail_ok) - if res == 1: - return 1, out - - LOG.info("Verifying the service parameter value") - val = get_service_parameter_values(service=service, section=section, - name=name, con_ssh=con_ssh, - auth_info=auth_info)[0] - value = value.strip('\"') - if verify: - if val != value: - msg = 'The service parameter was not added with the correct ' \ - 'value {} to {}'.format(val, value) - if fail_ok: - return 2, msg - raise exceptions.SysinvError(msg) - LOG.info("Service parameter was added with the correct value") - uuid = get_service_parameter_values(field='uuid', service=service, - section=section, name=name, - con_ssh=con_ssh, - auth_info=auth_info)[0] - if apply: - apply_service_parameters(service, wait_for_config=True, - con_ssh=con_ssh, - auth_info=auth_info) - - return 0, uuid - - -def modify_service_parameter(service, section, name, value, apply=False, - con_ssh=None, fail_ok=False, - check_first=True, create=True, verify=True, - auth_info=Tenant.get('admin_platform')): - """ - Modify a service parameter - Args: - service (str): Required - section (str): Required - name (str): Required - value (str): Required - apply - con_ssh: - fail_ok: - check_first (bool): Check if the parameter exists first - create (bool): Whether to create the parameter if it does not exist - verify: this enables to skip the verification. sometimes not all - values are displayed in the service-parameter-list, ex password - auth_info - - Returns (tuple): (rtn_code, message) - - """ - if check_first: - val = get_service_parameter_values(service=service, section=section, - name=name, con_ssh=con_ssh) - if not val: - msg = "The service parameter {} {} {} doesn't exist".format(service, - section, - name) - LOG.info(msg) - if create: - return create_service_parameter(service, section, name, value, - auth_info=auth_info, - con_ssh=con_ssh, - fail_ok=fail_ok, - check_first=False) - return -1, msg - if val[0] == value: - msg = "The service parameter value is already set to {}".format(val) - return -1, msg - - LOG.info("Modifying service parameter") - args = service + ' ' + section + ' ' + name + '=' + value - res, out = cli.system('service-parameter-modify', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - - if res == 1: - return 1, out - - LOG.info("Verifying the service parameter value") - val = get_service_parameter_values(service=service, section=section, - name=name, con_ssh=con_ssh, - auth_info=auth_info)[0] - value = value.strip('\"') - if verify: - if val != value: - msg = 'The service parameter was not modified to the correct value' - if fail_ok: - return 2, msg - raise exceptions.SysinvError(msg) - msg = "Service parameter modified to {}".format(val) - LOG.info(msg) - - if apply: - apply_service_parameters(service, wait_for_config=True, con_ssh=con_ssh, - auth_info=auth_info) - - return 0, msg - - -def delete_service_parameter(uuid, con_ssh=None, fail_ok=False, - check_first=True, - auth_info=Tenant.get('admin_platform')): - """ - Delete a service parameter - Args: - uuid (str): Required - con_ssh: - fail_ok: - check_first (bool): Check if the service parameter exists before - auth_info - - Returns (tuple): - - """ - if check_first: - uuids = get_service_parameter_values(field='uuid', con_ssh=con_ssh, - auth_info=auth_info) - if uuid not in uuids: - return -1, "There is no service parameter with uuid {}".format(uuid) - - res, out = cli.system('service-parameter-delete', uuid, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - - if res == 1: - return 1, out - - LOG.info("Deleting service parameter") - uuids = get_service_parameter_values(field='uuid', con_ssh=con_ssh, - auth_info=auth_info) - if uuid in uuids: - err_msg = "Service parameter was not deleted" - if fail_ok: - return 2, err_msg - raise exceptions.SysinvError(err_msg) - msg = "The service parameter {} was deleted".format(uuid) - LOG.info(msg) - return 0, msg - - -def apply_service_parameters(service, wait_for_config=True, timeout=300, - con_ssh=None, - fail_ok=False, - auth_info=Tenant.get('admin_platform')): - """ - Apply service parameters - Args: - service (str): Required - wait_for_config (bool): Wait for config out of date alarms to clear - timeout (int): - con_ssh: - auth_info - fail_ok: - - Returns (tuple): (rtn_code, message) - - """ - LOG.info("Applying service parameters {}".format(service)) - res, out = cli.system('service-parameter-apply', service, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - - if res == 1: - return res, out - - alarm_id = '250.001' - time.sleep(10) - - if wait_for_config: - LOG.info("Waiting for config-out-of-date alarms to clear. " - "There may be cli errors when active controller's config " - "updates") - end_time = time.time() + timeout - while time.time() < end_time: - table_ = get_alarms_table(uuid=True, con_ssh=con_ssh, retry=3) - alarms_tab = table_parser.filter_table(table_, - **{'Alarm ID': alarm_id}) - uuids = table_parser.get_values(alarms_tab, 'uuid') - if not uuids: - LOG.info("Config has been applied") - break - time.sleep(5) - else: - err_msg = "The config has not finished applying after timeout" - if fail_ok: - return 2, err_msg - raise exceptions.TimeoutException(err_msg) - - return 0, "The {} service parameter was applied".format(service) - - -def get_system_health_query(con_ssh=None, - auth_info=Tenant.get('admin_platform')): - output = cli.system('health-query', ssh_client=con_ssh, fail_ok=False, - auth_info=auth_info, source_openrc=True)[1] - output = output.splitlines() - failed = [] - for line in output: - if "[Fail]" in line: - failed_item = line.split(sep=': ')[0] - failed.append(failed_item.strip()) - - if failed: - return 1, failed - else: - return 0, None - - -def get_build_info(con_ssh=None, refresh=False): - """ - Get build info from /etc/build.info - Args: - con_ssh: - refresh: - - Returns (dict): - - """ - - build_info = ProjVar.get_var('BUILD_INFO') - if build_info and not refresh: - return build_info - - con_client = con_ssh - code, output = con_client.exec_cmd('cat /etc/build.info') - build_info = {} - for line in output.splitlines(): - if '="' in line: - key, value = re.findall('(.*)="(.*)"', line)[0] - build_info[key] = value - - for mandatory_key in ('BUILD_ID', 'BUILD_HOST', 'BUILD_BY', 'JOB'): - if mandatory_key not in build_info: - build_info[mandatory_key] = '' - - ProjVar.set_var(BUILD_INFO=build_info) - sw_version = build_info.get('SW_VERSION') - if sw_version: - existing_versions = ProjVar.get_var('SW_VERSION') - if not (existing_versions and sw_version == existing_versions[-1]): - ProjVar.set_var(append=True, SW_VERSION=sw_version) - - return build_info - - -def get_sw_version(con_ssh=None, use_existing=True): - """ - - Args: - con_ssh: - use_existing - - Returns (str): e.g., 16.10 - - """ - sw_versions = ProjVar.get_var('SW_VERSION') - if use_existing and sw_versions: - return sw_versions[-1] - - info_dict = get_build_info(con_ssh=con_ssh, refresh=True) - return info_dict.get('SW_VERSION') - - -def install_license(license_path, timeout=30, con_ssh=None): - if con_ssh is None: - con_ssh = ControllerClient.get_active_controller() - - cmd = "test -e {}".format(license_path) - rc = con_ssh.exec_cmd(cmd, fail_ok=True)[0] - - if rc != 0: - msg = "The {} file missing from active controller".format(license_path) - return rc, msg - - cmd = "sudo license-install " + license_path - con_ssh.send(cmd) - end_time = time.time() + timeout - rc = 1 - while time.time() < end_time: - index = con_ssh.expect( - [con_ssh.prompt, Prompt.PASSWORD_PROMPT, Prompt.Y_N_PROMPT], - timeout=timeout) - if index == 2: - con_ssh.send('y') - - if index == 1: - con_ssh.send(HostLinuxUser.get_password()) - - if index == 0: - rc = con_ssh.exec_cmd("echo $?")[0] - con_ssh.flush() - break - - return rc - - -def wait_for_services_enable(timeout=300, fail_ok=False, con_ssh=None): - """ - Wait for services to be enabled-active in system service-list - Args: - timeout (int): max wait time in seconds - fail_ok (bool): whether return False or raise exception when some - services fail to reach enabled-active state - con_ssh (SSHClient): - - Returns (tuple): ((bool), (str)) - (True, "All services are enabled-active") - (False, "Some services are not enabled-active: ") - Applicable if fail_ok=True - - """ - LOG.info("Wait for services to be enabled-active in system service-list") - service_list_tab = None - end_time = time.time() + timeout - while time.time() < end_time: - service_list_tab = table_parser.table( - cli.system('service-list', ssh_client=con_ssh)[1]) - states = table_parser.get_column(service_list_tab, 'state') - if all(state == 'enabled-active' for state in states): - LOG.info("All services are enabled-active in system service-list") - return True, "All services are enabled-active" - - LOG.warning( - "Not all services are enabled-ative within {} seconds".format(timeout)) - inactive_services_tab = table_parser.filter_table(service_list_tab, - exclude=True, - state='enabled-active') - msg = "Some services are not enabled-active: {}".format( - table_parser.get_all_rows(inactive_services_tab)) - if fail_ok: - return False, msg - raise exceptions.SysinvError(msg) - - -def enable_service(service_name, con_ssh=None, - auth_info=Tenant.get('admin_platform'), fail_ok=False): - """ - Enable Service - Args: - service_name (str): - con_ssh (SSHClient): - auth_info (dict): - fail_ok: whether return False or raise exception when some services - fail to reach enabled-active state - - Returns (tuple): - - """ - - res, output = cli.system('service-enable', service_name, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if res == 1: - return 1, output - - msg = "Service enabled: {}".format(service_name) - LOG.info(msg) - return 0, msg - - -def disable_service(service_name, con_ssh=None, - auth_info=Tenant.get('admin_platform'), fail_ok=False): - """ - Disable Service - Args: - service_name (str) - con_ssh (SSHClient): - auth_info (dict): - fail_ok: whether return False or raise exception when some services - fail to reach enabled-active state - - Returns (tuple): - - """ - - res, output = cli.system('service-disable', service_name, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if res == 1: - return 1, output - - msg = "Service disabled: {}".format(service_name) - LOG.info(msg) - return 0, msg - - -def get_system_networks(field='uuid', uuid=None, net_type=None, mtu=None, - dynamic=None, pool_uuid=None, - auth_info=Tenant.get('admin_platform'), con_ssh=None, - strict=True, - regex=None, **kwargs): - """ - Get networks values from system network-list - Args: - field: 'uuid' (default) - uuid: - net_type: - mtu: - dynamic: - pool_uuid: - auth_info: - con_ssh: - strict: - regex: - **kwargs: - - Returns (list): - """ - table_ = table_parser.table( - cli.system('network-list --nowrap', ssh_client=con_ssh, - auth_info=auth_info)[1]) - args_temp = { - 'uuid': uuid, - 'type': net_type, - 'mtu': mtu, - 'dynamic': dynamic, - 'pool_uuid': pool_uuid - } - kwargs.update({k: v for k, v in args_temp.items() if v is not None}) - return table_parser.get_multi_values(table_, field, strict=strict, - regex=regex, **kwargs) - - -def get_clusters(field='uuid', uuid=None, cluster_uuid=None, ntype=None, - name=None, - auth_info=Tenant.get('admin_platform'), con_ssh=None, - strict=True, regex=None, **kwargs): - """ - Get cluster values from system cluster-list - Args: - field: 'uuid' (default) - uuid: - cluster_uuid: - ntype: (mapped as ntype) - name: - auth_info: - con_ssh: - strict: - regex: - **kwargs: - - Returns (list): - - """ - table_ = table_parser.table( - cli.system('cluster-list --nowrap', ssh_client=con_ssh, - auth_info=auth_info)[1]) - args_temp = { - 'uuid': uuid, - 'cluster_uuid': cluster_uuid, - 'ntype': ntype, - 'name': name, - } - kwargs.update({k: v for k, v in args_temp.items() if v is not None}) - return table_parser.get_multi_values(table_, field, strict=strict, - regex=regex, **kwargs) - - -def get_services(field='id', service_id=None, service_name=None, hostname=None, - state=None, - auth_info=Tenant.get('admin_platform'), con_ssh=None, - strict=True, regex=None, **kwargs): - """ - Get service_list through service service-list command - Args: - field: 'id' (default value) - service_id: - service_name: - hostname: - state: - auth_info: - con_ssh: - strict: - regex: - **kwargs: - - Returns (list): - - """ - table_ = table_parser.table( - cli.system('service-list --nowrap', ssh_client=con_ssh, - auth_info=auth_info)[1]) - args_temp = { - 'id': service_id, - 'service_name': service_name, - 'hostname': hostname, - 'state': state - } - kwargs.update({k: v for k, v in args_temp.items() if v is not None}) - return table_parser.get_multi_values(table_, field, strict=strict, - regex=regex, **kwargs) - - -def get_servicenodes(field='id', servicenode_id=None, name=None, - operational=None, availability=None, - ready_state=None, auth_info=Tenant.get('admin_platform'), - con_ssh=None, strict=True, - regex=None, **kwargs): - """ - Get servicenodes list through service servicenode-list - - Args: - field (str|tuple|list): 'id' (default) - servicenode_id: - name: - operational: - availability: - ready_state: - auth_info: - con_ssh: - strict: - regex: - **kwargs: - - Returns (list): - - """ - table_ = table_parser.table( - cli.system('servicenode-list --nowrap', ssh_client=con_ssh, - auth_info=auth_info)[1]) - args_temp = { - 'id': servicenode_id, - 'name': name, - 'operational': operational, - 'ready_state': ready_state, - 'availability': availability - } - kwargs.update({k: v for k, v in args_temp.items() if v is not None}) - return table_parser.get_multi_values(table_, field, strict=strict, - regex=regex, **kwargs) - - -def get_servicegroups(fields='uuid', uuid=None, service_group_name=None, - hostname=None, state=None, - auth_info=Tenant.get('admin_platform'), con_ssh=None, - strict=True, regex=None, **kwargs): - """ - Get servicegroups via system servicegroup-list - Args: - fields: 'uuid' (default) - uuid: - service_group_name: - hostname: - state: - auth_info: - con_ssh: - strict: - regex - **kwargs: - - Returns (list): - - """ - table_ = table_parser.table( - cli.system('servicegroup-list', ssh_client=con_ssh, - auth_info=auth_info)[1]) - args_temp = { - 'uuid': uuid, - 'service_group_name': service_group_name, - 'hostname': hostname, - 'state': state - } - kwargs.update({k: v for k, v in args_temp.items() if v is not None}) - return table_parser.get_multi_values(table_, fields, strict=strict, - regex=regex, **kwargs) - - -def get_oam_values(fields=None, con_ssh=None, - auth_info=Tenant.get('admin_platform'), rtn_dict=True): - """ - Get oam info via system oam-show - Args: - fields: - con_ssh: - auth_info: - rtn_dict - - Returns (dict|list): - - """ - table_ = table_parser.table( - cli.system('oam-show', ssh_client=con_ssh, auth_info=auth_info)[1]) - - if not fields: - fields = table_parser.get_column(table_, 'Property') - fields = [field for field in fields if field.startswith('oam_')] - - return table_parser.get_multi_values_two_col_table(table_, fields, - rtn_dict=rtn_dict) - - -def modify_oam_ips(fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin_platform'), **kwargs): - """ - Modify oam ip(s) - Args: - fail_ok: - con_ssh: - auth_info: - - Returns: - - """ - if not kwargs: - raise ValueError("Nothing is provided to modify") - - args = ' '.join(['{}={}'.format(key, val) for key, val in kwargs.items()]) - LOG.info("Modify oam ip(s): {}".format(args)) - if not con_ssh: - con_ssh = ControllerClient.get_active_controller() - - original = get_oam_values(fields=list(kwargs.keys()), auth_info=auth_info, - con_ssh=con_ssh) - code, output = cli.system('oam-modify', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code > 0: - return 1, output - - oam_info = get_oam_values(fields=list(kwargs.keys()), auth_info=auth_info, - con_ssh=con_ssh) - for field, expt_val in kwargs.items(): - actual_val = oam_info[field] - if expt_val != actual_val: - raise exceptions.SysinvError( - "{} expected: {}, actual: {}".format(field, expt_val, - actual_val)) - - from keywords import host_helper - active, standby = get_active_standby_controllers(con_ssh=con_ssh, - auth_info=auth_info) - standby_configured = True - if standby: - standby_configured = False - if wait_for_alarm(alarm_id=EventLogID.CONFIG_OUT_OF_DATE, - entity_id=standby, timeout=120, - con_ssh=con_ssh, fail_ok=True, - auth_info=auth_info)[0]: - host_helper.lock_unlock_hosts(standby, auth_info=auth_info, - con_ssh=con_ssh) - standby_configured = True - - if not standby_configured: - revert_args = ' '.join( - ['{}={}'.format(key, val) for key, val in original.items()]) - LOG.error("Failed to modify oam ip. Revert to: {}".format(revert_args)) - cli.system('oam-modify', revert_args, ssh_client=con_ssh, - auth_info=auth_info) - raise exceptions.SysinvError( - "Config out-of-date alarm did not appear or standby controller " - "lock/unlock" - "failed after oam-modify.") - - # Update system ssh client and global var - fip_field = 'oam_if' if is_aio_simplex(con_ssh=con_ssh, - auth_info=auth_info) else \ - 'oam_floating_ip' - new_lab = ProjVar.get_var('lab') - if fip_field in kwargs: - new_fip = kwargs[fip_field] - con_ssh.update_host() - new_lab['floating ip'] = new_fip - if 'oam_c0_ip' in kwargs: - new_lab['controller-0 ip'] = kwargs['oam_c0_ip'] - if 'oam_c1_ip' in kwargs: - new_lab['controller-1 ip'] = kwargs['oam_c1_ip'] - ProjVar.set_var(LAB=new_lab) - - host_helper.lock_unlock_hosts(active, con_ssh=con_ssh, auth_info=auth_info) - LOG.info("Wait for config out-of-date alarm to clear on system") - wait_for_alarm_gone(alarm_id=EventLogID.CONFIG_OUT_OF_DATE, timeout=120, - auth_info=auth_info, - con_ssh=con_ssh) - - msg = "OAM IP(s) modified successfully." - LOG.info(msg) - return 0, msg - - -def modify_spectre_meltdown_version(version='spectre_meltdown_all', - check_first=True, con_ssh=None, - fail_ok=False, - auth_info=Tenant.get('admin_platform')): - """ - Modify spectre meltdown version - Args: - version (str): valid values: spectre_meltdown_v1, spectre_meltdown_all. - Other values will be rejected by system modify cmd. - check_first (bool): - con_ssh: - fail_ok (bool): - auth_info - - Returns (tuple): - (-1, "Security feature already set to . Do nothing") - (0, "System security_feature is successfully modified to: ") - (1, ) - - """ - current_version = get_system_values(fields='security_feature')[0] - if not current_version: - skip('spectre_meltdown update feature is unavailable in current load') - - from keywords import host_helper - hosts = get_hosts(con_ssh=con_ssh) - check_val = 'nopti nospectre_v2 nospectre_v1' - if check_first and version == current_version: - LOG.info( - "{} already set in 'system show'. Checking actual cmdline options " - "on each host.".format( - version)) - hosts_to_configure = [] - for host in hosts: - cmdline_options = host_helper.get_host_cmdline_options(host=host) - if 'v1' in version: - if check_val not in cmdline_options: - hosts_to_configure.append(host) - elif check_val in cmdline_options: - hosts_to_configure.append(host) - - hosts = hosts_to_configure - if not hosts_to_configure: - msg = 'Security feature already set to {}. Do nothing.'.format( - current_version) - LOG.info(msg) - return -1, msg - - LOG.info("Set spectre_meltdown version to {}".format(version)) - code, output = cli.system('modify -S {}'.format(version), - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if code > 0: - return 1, output - - conf_storage0 = False - if 'storage-0' in hosts: - hosts.remove('storage-0') - conf_storage0 = True - - active_controller = get_active_controller_name(con_ssh=con_ssh, - auth_info=auth_info) - conf_active = False - if active_controller in hosts: - hosts.remove(active_controller) - conf_active = True - - if hosts: - LOG.info( - "Lock/unlock unconfigured hosts other than active controller: " - "{}".format(hosts)) - try: - for host in hosts: - host_helper.lock_host(host=host, con_ssh=con_ssh, - auth_info=auth_info) - finally: - host_helper.unlock_hosts(hosts=hosts, fail_ok=False, - con_ssh=con_ssh, auth_info=auth_info) - host_helper.wait_for_hosts_ready(hosts=hosts, con_ssh=con_ssh, - auth_info=auth_info) - - if conf_storage0: - LOG.info("Lock/unlock storage-0") - try: - host_helper.lock_host(host='storage-0', con_ssh=con_ssh, - auth_info=auth_info) - finally: - host_helper.unlock_host(host='storage-0', con_ssh=con_ssh, - auth_info=auth_info) - - if conf_active: - LOG.info( - "Lock/unlock active controller (swact first if needed): {}".format( - active_controller)) - try: - host_helper.lock_host(host=active_controller, swact=True, - con_ssh=con_ssh, auth_info=auth_info) - finally: - host_helper.unlock_host(host=active_controller, con_ssh=con_ssh, - auth_info=auth_info) - - LOG.info("Check 'system show' is updated to {}".format(version)) - post_version = \ - get_system_values(fields='security_feature', auth_info=auth_info)[0] - assert version == post_version, 'Value is not {} after system ' \ - 'modify'.format(version) - - LOG.info('Check cmdline options are updated on each host via /proc/cmdline') - hosts.append(active_controller) - for host in hosts: - options = host_helper.get_host_cmdline_options(host=host) - if 'v1' in version: - assert check_val in options, '{} not in cmdline options after set' \ - ' to {}'.format(check_val, version) - else: - assert check_val not in options, '{} in cmdline options after set' \ - ' to {}'.format(check_val, version) - - msg = 'System spectre meltdown version is successfully modified to: ' \ - '{}'.format(version) - LOG.info(msg) - return 0, msg - - -def is_avs(con_ssh=None): - vswitch_type = ProjVar.get_var('VSWITCH_TYPE') - if vswitch_type is None: - vswitch_type = get_system_values(fields='vswitch_type', - con_ssh=con_ssh)[0] - ProjVar.set_var(VSWITCH_TYPE=vswitch_type) - return 'ovs' not in vswitch_type - - -def get_controller_uptime(con_ssh, auth_info=Tenant.get('admin_platform')): - """ - Get uptime for all controllers. If no standby controller, then we only - calculate for current active controller. - Args: - con_ssh - auth_info - - Returns (int): in seconds - """ - active_con, standby_con = get_active_standby_controllers( - con_ssh=con_ssh, auth_info=auth_info) - active_con_uptime = int( - get_host_values(host=active_con, fields='uptime', con_ssh=con_ssh, - auth_info=auth_info)[0]) - - con_uptime = active_con_uptime - if standby_con: - standby_con_uptime = int( - get_host_values(host=standby_con, fields='uptime', con_ssh=con_ssh, - auth_info=auth_info)[0]) - con_uptime = min(active_con_uptime, standby_con_uptime) - - return con_uptime - - -def add_ml2_extension_drivers(drivers, auth_info=Tenant.get('admin_platform'), - con_ssh=None): - """ - Add given ml2 extension drivers to helm charts override if they don't - currently exist - Args: - drivers (str|list|tuple): - auth_info: - con_ssh: - - Returns (tuple): - - """ - return __update_ml2_extension_drivers(drivers=drivers, enable=True, - auth_info=auth_info, con_ssh=con_ssh) - - -def remove_ml2_extension_drivers(drivers, - auth_info=Tenant.get('admin_platform'), - con_ssh=None): - """ - Remove given ml2 extension drivers from helm charts override if they exist - Args: - drivers (str|list|tuple): - auth_info: - con_ssh: - - Returns (tuple): - - """ - return __update_ml2_extension_drivers(drivers=drivers, enable=False, - auth_info=auth_info, con_ssh=con_ssh) - - -def __update_ml2_extension_drivers(drivers, enable=True, - auth_info=Tenant.get('admin_platform'), - con_ssh=None): - """ - Add or remove ml2 extension drivers by updating helm charts user override - - Args: - drivers (str|list|tuple): - enable (bool): whether to enable or disable given ml2 extension - driver(s) - auth_info: - con_ssh: - - Returns (tuple): - - """ - if isinstance(drivers, str): - drivers = (drivers,) - - from keywords import container_helper - known_drivers = ['port_security', 'qos', 'dns'] - all_drivers = known_drivers + [driver for driver in drivers if - driver not in known_drivers] - chart = 'neutron' - - LOG.info("Check existing ml2 extension_drivers") - field = 'combined_overrides' - combined_overrides = \ - container_helper.get_helm_override_values(chart, namespace='openstack', - fields=field)[0] - current_drivers = combined_overrides['conf'].get('plugins', {}).get( - 'ml2_conf', {}).get('ml2', {}). \ - get('extension_drivers', '').split(sep=',') - - if enable: - expt_drivers = set(current_drivers + list(drivers)) - # convert expt_drivers to ordered list by removing unwanted drivers - # from ordered all_drivers list - drivers_to_remove = set(all_drivers) - expt_drivers - expt_drivers = [driver for driver in all_drivers if - driver not in drivers_to_remove] - else: - expt_drivers = [driver for driver in current_drivers if - driver not in drivers] - - if expt_drivers == current_drivers: - LOG.info("ml2 extension drivers already set to {}. Do nothing.".format( - expt_drivers)) - return -1, current_drivers - - path = 'conf.plugins.ml2_conf.ml2.extension_drivers' - new_value = ','.join(expt_drivers) - LOG.info("Update neutron helm-override: {}={}".format(path, new_value)) - if len(expt_drivers) <= 1: - kw_args = {'kv_pairs': {path: new_value}} - else: - content = """ - conf: - plugins: - ml2_conf: - ml2: - extension_drivers: {} - """.format(new_value) - yaml_file = os.path.join(HostLinuxUser.get_home(), 'ml2_drivers.yaml') - if not con_ssh: - con_ssh = ControllerClient.get_active_controller() - con_ssh.exec_cmd('rm -f {}'.format(yaml_file), get_exit_code=False) - con_ssh.exec_cmd("echo '{}' >> {}".format(content, yaml_file)) - kw_args = {'yaml_file': yaml_file} - - container_helper.update_helm_override(chart=chart, namespace='openstack', - auth_info=auth_info, con_ssh=con_ssh, - **kw_args) - post_overrides = \ - container_helper.get_helm_override_values(chart, namespace='openstack', - fields=field)[0] - post_drivers = post_overrides['conf'].get('plugins', {}).\ - get('ml2_conf', {}).get('ml2', {}).get('extension_drivers', '').\ - split(sep=',') - - if not post_drivers == expt_drivers: - raise exceptions.SysinvError( - "ml2 extension_drivers override is not reflected") - - LOG.info("Re-apply stx-openstack application") - container_helper.apply_app(app_name='stx-openstack', applied_timeout=1200, - auth_info=auth_info, con_ssh=con_ssh) - return 0, post_drivers - - -def get_ptp_values(fields='mode', rtn_dict=False, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Get values from system ptp-show table. - Args: - fields (str|tuple|list): - rtn_dict (bool): whether to return dict or list - con_ssh: - auth_info - - Returns (list|dict): - - """ - table_ = table_parser.table( - cli.system('ptp-show', ssh_client=con_ssh, auth_info=auth_info)[1]) - return table_parser.get_multi_values_two_col_table(table_, fields, - rtn_dict=rtn_dict, - merge_lines=True) - - -def modify_ptp(enabled=None, mode=None, transport=None, mechanism=None, - fail_ok=False, con_ssh=None, clear_alarm=True, - wait_with_best_effort=False, check_first=True, - auth_info=Tenant.get('admin_platform')): - """ - Modify ptp with given parameters - Args: - enabled (bool|None): - mode (str|None): - transport (str|None): - mechanism (str|None): - fail_ok (bool): - clear_alarm (bool): - wait_with_best_effort (bool): - check_first: - auth_info (dict): - con_ssh: - - Returns: - - """ - args_map = { - 'enabled': enabled, - 'mode': mode, - 'transport': transport, - 'mechanism': mechanism, - } - - args_dict = {} - for key, val in args_map.items(): - if val is not None: - args_dict[key] = str(val) - - if not args_dict: - raise ValueError("At least one parameter has to be specified.") - - arg_str = ' '.join(['--{} {}'.format(k, v) for k, v in args_dict.items()]) - - if check_first: - actual_val_list = get_ptp_values(fields=list(args_dict.keys()), - con_ssh=con_ssh, rtn_dict=True, - auth_info=auth_info) - changeparm = False - for field in args_dict: - param_val = args_dict[field] - actual_val = actual_val_list[field] - if actual_val != param_val: - changeparm = True - break - if not changeparm: - return -1, 'No parameter chage' - - code, output = cli.system('ptp-modify', arg_str, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code > 0: - return 1, output - - if clear_alarm: - wait_and_clear_config_out_of_date_alarms( - host_type='controller', - wait_with_best_effort=wait_with_best_effort, - con_ssh=con_ssh, - auth_info=auth_info) - - post_args = get_ptp_values(fields=list(args_dict.keys()), con_ssh=con_ssh, - rtn_dict=True, auth_info=auth_info) - for field in args_dict: - expt_val = args_dict[field] - actual_val = post_args[field] - if actual_val != expt_val: - raise exceptions.SysinvError( - "{} in ptp-show is not as expected after modify. Expt: {}; " - "actual: {}". - format(field, expt_val, actual_val)) - - msg = 'ptp modified successfully. {}'.format( - 'Alarm not cleared yet.' if not clear_alarm else '') - return 0, msg - - -def get_ntp_values(fields='ntpservers', rtn_dict=False, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Get values from system ntp-show table. - Args: - fields (str|tuple|list): - rtn_dict (bool) - con_ssh: - auth_info - - Returns (list|dict): - - """ - table_ = table_parser.table( - cli.system('ntp-show', ssh_client=con_ssh, auth_info=auth_info)[1]) - return table_parser.get_multi_values_two_col_table(table_, fields, - rtn_dict=rtn_dict) - - -def get_ntp_servers(con_ssh=None, auth_info=Tenant.get('admin_platform')): - """ - Get ntp servers via system ntp-show - Args: - con_ssh: - auth_info: - - Returns (list): - - """ - ntp_servers = get_ntp_values(fields='ntpservers', rtn_dict=False, - con_ssh=con_ssh, auth_info=auth_info) - ntp_servers = ntp_servers[0].split(',') - return ntp_servers - - -def modify_ntp(enabled=None, ntp_servers=None, check_first=True, fail_ok=False, - clear_alarm=True, - wait_with_best_effort=False, con_ssh=None, - auth_info=Tenant.get('admin_platform'), **kwargs): - """ - - Args: - enabled (bool|None): - ntp_servers (str|None|list|tuple): - check_first (bool) - fail_ok (bool) - clear_alarm (bool): Whether to wait and lock/unlock hosts to clear alarm - wait_with_best_effort (bool): whether to wait for alarm with best - effort only - con_ssh: - check_first: - auth_info: - **kwargs - - Returns (tuple): - (0, ) - (1, ) # cli rejected - - """ - arg = '' - verify_args = {} - if enabled is not None: - arg += '--enabled {}'.format(enabled).lower() - verify_args['enabled'] = str(enabled) - - if ntp_servers: - if isinstance(ntp_servers, (tuple, list)): - ntp_servers = ','.join(ntp_servers) - arg += ' ntpservers="{}"'.format(ntp_servers) - verify_args['ntpservers'] = ntp_servers - - if kwargs: - for k, v in kwargs.items(): - arg += ' {}={}'.format(k, v) - verify_args[k] = v - - if not arg: - raise ValueError( - "Nothing to modify. enable, ntp_servers or kwwargs has to be " - "provided") - - prev_args = None - toggle_state = False - if enabled is not None: - prev_args = get_ntp_values(fields=list(verify_args.keys()), - con_ssh=con_ssh, rtn_dict=True, - auth_info=auth_info) - if prev_args['enabled'] != verify_args['enabled']: - toggle_state = True - - if check_first and not toggle_state: - if not clear_alarm or (clear_alarm and not get_alarms( - alarm_id=EventLogID.CONFIG_OUT_OF_DATE, con_ssh=con_ssh, - entity_id='controller', auth_info=auth_info)): - if not prev_args: - prev_args = get_ntp_values(fields=list(verify_args.keys()), - con_ssh=con_ssh, rtn_dict=True, - auth_info=auth_info) - - for field in verify_args: - expt_val = verify_args[field] - actual_val = prev_args[field] - if actual_val != expt_val: - break - else: - msg = 'NTP already configured with given criteria {}. Do ' \ - 'nothing.'.format(verify_args) - LOG.info(msg) - return -1, msg - - code, out = cli.system('ntp-modify', arg.strip(), ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code > 0: - return 1, out - - if clear_alarm: - # config out-of-date alarm only on controller if only ntp servers are - # changed. - # If ntp state changes, ALL hosts need to be lock/unlock. - host_type = None if toggle_state else 'controller' - wait_and_clear_config_out_of_date_alarms( - host_type=host_type, - con_ssh=con_ssh, - auth_info=auth_info, - wait_with_best_effort=wait_with_best_effort) - - post_args = get_ntp_values(fields=list(verify_args.keys()), con_ssh=con_ssh, - rtn_dict=True, auth_info=auth_info) - for field in verify_args: - expt_val = verify_args[field] - actual_val = post_args[field] - if actual_val != expt_val: - raise exceptions.SysinvError( - "{} in ntp-show is not as expected after modify. Expt: {}; " - "actual: {}". - format(field, expt_val, actual_val)) - - msg = 'ntp modified successfully. {}'.format( - 'Alarm not cleared yet.' if not clear_alarm else '') - return 0, msg - - -def wait_and_clear_config_out_of_date_alarms( - hosts=None, host_type=None, lock_unlock=True, wait_timeout=60, - wait_with_best_effort=False, clear_timeout=60, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Wait for config out-of-date alarms on given hosts and (lock/unlock and) - wait for clear - Args: - hosts: - host_type (str|list|tuple): valid types: controller, compute, storage - lock_unlock (bool) - wait_timeout (int) - wait_with_best_effort (bool): - clear_timeout (int) - con_ssh: - auth_info - - Returns: - - """ - from keywords.host_helper import lock_unlock_hosts - - if not hosts: - if not host_type: - host_type = ('controller', 'compute', 'storage') - elif isinstance(host_type, str): - host_type = (host_type,) - - avail_states = (HostAvailState.DEGRADED, HostAvailState.AVAILABLE, - HostAvailState.ONLINE) - hosts_per_type = get_hosts_per_personality(availability=avail_states, - con_ssh=con_ssh, - auth_info=auth_info) - - # host_groups: ordered list for controller, compute, storage hosts - host_groups = [hosts_per_type[type_] for type_ in host_type if - hosts_per_type[type_]] - if not host_groups: - raise exceptions.HostError( - "No valid hosts found for host_type: {}".format(host_type)) - - else: - if isinstance(hosts, str): - hosts = [hosts] - host_groups = [hosts] - - hosts_out_of_date = [] - all_hosts = [] - for hosts_ in host_groups: - LOG.info( - "Wait for config out-of-date alarms for {} with best effort".format( - hosts_)) - all_hosts += hosts_ - if wait_for_alarm(alarm_id=EventLogID.CONFIG_OUT_OF_DATE, - entity_id=hosts_, timeout=wait_timeout, - con_ssh=con_ssh, fail_ok=True, - auth_info=auth_info)[0]: - hosts_out_of_date += hosts_ - - hosts_out_of_date = list(set(hosts_out_of_date)) - all_hosts = list(set(all_hosts)) - LOG.info("Config out-of-date hosts: {}".format(hosts_out_of_date)) - if hosts_out_of_date: - if lock_unlock: - LOG.info( - "Wait for 60 seconds, then lock/unlock config out-of-date " - "hosts: {}".format(hosts_out_of_date)) - time.sleep(60) - lock_unlock_hosts(hosts_out_of_date, con_ssh=con_ssh, - auth_info=auth_info) - - LOG.info("Wait for config out-of-date alarm to clear on system") - wait_for_alarm_gone(alarm_id=EventLogID.CONFIG_OUT_OF_DATE, - timeout=clear_timeout, auth_info=auth_info, - con_ssh=con_ssh) - - if not wait_with_best_effort and all_hosts != hosts_out_of_date: - raise exceptions.SysinvError( - "Expect config out of date: {}; actual: {}".format( - all_hosts, hosts_out_of_date)) - - -def get_timezone(auth_info=Tenant.get('admin_platform'), con_ssh=None): - return get_system_values(fields='timezone', auth_info=auth_info, - con_ssh=con_ssh)[0] - - -def modify_timezone(timezone, check_first=True, fail_ok=False, clear_alarm=True, - auth_info=Tenant.get('admin_platform'), - con_ssh=None): - """ - Modify timezone to given zone - Args: - timezone: - check_first: - fail_ok: - clear_alarm: - auth_info: - con_ssh: - - Returns (tuple): - - """ - if check_first: - current_timezone = get_timezone(auth_info=auth_info, con_ssh=con_ssh) - if current_timezone == timezone: - msg = "Timezone is already set to {}. Do nothing.".format(timezone) - LOG.info(msg) - return -1, msg - - LOG.info("Modifying Timezone to {}".format(timezone)) - code, out = modify_system(fail_ok=fail_ok, auth_info=auth_info, - con_ssh=con_ssh, timezone=timezone) - if code > 0: - return 1, out - - if clear_alarm: - if wait_for_alarm(alarm_id=EventLogID.CONFIG_OUT_OF_DATE, timeout=30, - con_ssh=con_ssh, fail_ok=True, - auth_info=auth_info)[0]: - wait_for_alarm_gone(alarm_id=EventLogID.CONFIG_OUT_OF_DATE, - timeout=180, con_ssh=con_ssh, - auth_info=auth_info) - - time.sleep(10) - post_timezone = get_timezone(auth_info=auth_info, con_ssh=con_ssh) - if post_timezone != timezone: - msg = 'Timezone is {} instead of {} after modify'.format(post_timezone, - timezone) - if fail_ok: - LOG.warning(msg) - return 2, post_timezone - - raise exceptions.SysinvError(msg) - - LOG.info("Timezone is successfully modified to {}".format(timezone)) - return 0, timezone - - -def create_data_network(name, net_type='vlan', mode=None, mtu=None, - port_num=None, multicast_group=None, ttl=None, - description=None, field='uuid', fail_ok=False, - con_ssh=None, - auth_info=Tenant.get('admin_platform'), cleanup=None): - """ - Add a datanetwork - Args: - name (str): - net_type (str): vlan, vxlan or flat - mode (None|str|None): - mtu (int|str|None): - port_num (int|str|None): - multicast_group (str|None): - ttl (int|str|None): - description (str|None): - field (str): uuid or name - fail_ok: - con_ssh: - auth_info: - cleanup (str|None): function, class, module or session - - Returns (tuple): - (0, ) - (1, ) - - """ - args_dict = { - 'description': description, - 'mtu': mtu, - 'port_num': port_num, - 'multicast_group': multicast_group, - 'ttl': ttl, - 'mode': mode, - } - args = '{} {} {}'.format(common.parse_args(args_dict), name, net_type) - code, output = cli.system('datanetwork-add', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code > 0: - return 1, output - - table_ = table_parser.table(output) - LOG.info("data network {} is created successfully".format(name)) - - if cleanup: - uuid = table_parser.get_value_two_col_table(table_, field='uuid') - ResourceCleanup.add('datanetwork', uuid, scope=cleanup) - - return 0, table_parser.get_value_two_col_table(table_, field) - - -def get_data_networks(field='name', con_ssh=None, - auth_info=Tenant.get('admin_platform'), strict=True, - **kwargs): - """ - Get values from system datanetwork-list - Args: - field (str|tuple|list): - con_ssh: - auth_info: - strict: - **kwargs: - - Returns (list): - - """ - table_ = table_parser.table( - cli.system('datanetwork-list', ssh_client=con_ssh, auth_info=auth_info)[ - 1]) - return table_parser.get_multi_values(table_, fields=field, strict=strict, - **kwargs) - - -def get_data_network_values(datanetwork, fields=('uuid',), fail_ok=False, - con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Get datanetwork values from system datanetwork-show table. - Args: - datanetwork (str): name or uuid of datanetwork - fields (str|tuple|list): - fail_ok: - con_ssh: - auth_info: - - Returns (list|None): values for given fields. None if cli is rejected. - - """ - code, output = cli.system('datanetwork-show', datanetwork, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if code > 0: - return None - - table_ = table_parser.table(output) - return table_parser.get_multi_values_two_col_table(table_=table_, - fields=fields) - - -def delete_data_network(datanetwork_uuid, fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Delete given datanetwork - Args: - datanetwork_uuid (str): - fail_ok: - con_ssh: - auth_info: - - Returns (tuple): - (0, "datanetwork deleted successfully") - (1, ) - (2, "datanetwork still exists after deletion") - - """ - code, output = cli.system('datanetwork-delete', datanetwork_uuid, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if code > 0: - return 1, output - - if get_data_network_values(datanetwork=datanetwork_uuid, con_ssh=con_ssh, - auth_info=auth_info, fail_ok=True): - err = 'datanetwork {} still exists after deletion'.format( - datanetwork_uuid) - LOG.warning(err) - if fail_ok: - return 2, err - else: - raise exceptions.SysinvError(err) - - msg = 'datanetwork {} deleted successfully'.format(datanetwork_uuid) - LOG.info(msg) - return 0, msg - - -def get_addr_pools(field, name=None, uuid=None, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Get values from system addrpool-list - Args: - field (str|list|tuple): - name: - uuid: - con_ssh: - auth_info: - - Returns (list): - - """ - table_ = table_parser.table( - cli.system('addrpool-list --nowrap', ssh_client=con_ssh, - auth_info=auth_info)[1]) - - kwargs = {'name': name, 'uuid': uuid} - return table_parser.get_multi_values(table_=table_, fields=field, - **{k: v for k, v in kwargs.items()}) - - -def get_addr_pool_values(fields, addr_pool=None, network_type=None, - con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Get values from system addrpool-show - Args: - fields (str|tuple|list): - addr_pool: - network_type: - con_ssh: - auth_info: - - Returns (list): - - """ - if not addr_pool and not network_type: - raise ValueError('addr_pool uuid or network_type has to be provided') - - if not addr_pool: - addr_pool = \ - get_system_networks(field='pool_uuid', net_type=network_type, - con_ssh=con_ssh, auth_info=auth_info)[0] - if not addr_pool: - raise exceptions.SysinvError( - "No pool_uuid found for network type {}".format(network_type)) - - table_ = table_parser.table( - cli.system('addrpool-show', addr_pool, ssh_client=con_ssh, - auth_info=auth_info)[1]) - return table_parser.get_multi_values_two_col_table(table_, fields=fields) - - -def get_system_network_cidr(network_type, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Get cidr for given network type, such as mgmt, oam, cluster-host, etc. - Args: - network_type: - con_ssh: - auth_info: - - Returns (str): - - """ - network, prefix = get_addr_pool_values(fields=('network', 'prefix'), - network_type=network_type, - con_ssh=con_ssh, - auth_info=auth_info) - - return '{}/{}'.format(network, prefix) - - -def get_host_values(host, fields, rtn_dict=False, merge_lines=True, - auth_info=Tenant.get('admin_platform'), - con_ssh=None): - """ - Get values from system host-show - Args: - host (str): - fields (str|list|tuple): - rtn_dict: - merge_lines - auth_info: - con_ssh: - - Returns (list): - - """ - table_ = table_parser.table( - cli.system('host-show', host, ssh_client=con_ssh, auth_info=auth_info)[ - 1], - combine_multiline_entry=merge_lines) - return table_parser.get_multi_values_two_col_table(table_, fields, - rtn_dict=rtn_dict, - evaluate=True) - - -def get_hosts_values(hosts, fields, merge_lines=False, rtn_dict_per_host=True, - con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Get values for multiple hosts via system host-show - Args: - hosts: - fields: - merge_lines: - rtn_dict_per_host: - con_ssh: - auth_info: - - Returns (dict): - e.g., {'controller-0': {'task': booting, 'subfunctions': ...}, - 'controller-1':...} - - """ - if isinstance(fields, str): - fields = [fields] - - states_vals = {} - for host in hosts: - vals = get_host_values(host, fields, merge_lines=merge_lines, - con_ssh=con_ssh, - rtn_dict=rtn_dict_per_host, auth_info=auth_info) - states_vals[host] = vals - - return states_vals - - -def get_ntpq_status(host, mgmt_cidr=None, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Get ntp status via 'sudo ntpq -pn' - - Args: - host (str): host to check - mgmt_cidr (str): internal management ip from peer host - con_ssh (SSHClient) - auth_info - - Returns(tuple): (, ) - - (0, " NTPQ is in healthy state") - - (1, "No NTP server selected") - - (2, "Some NTP servers are discarded") - - """ - if not mgmt_cidr: - mgmt_cidr = get_system_network_cidr('mgmt', con_ssh=con_ssh, - auth_info=auth_info) - - cmd = 'ntpq -pn' - from keywords import host_helper - with host_helper.ssh_to_host(host, con_ssh=con_ssh) as host_ssh: - output = host_ssh.exec_sudo_cmd(cmd, fail_ok=False)[1] - - output_lines = output.splitlines() - server_lines = list(output_lines) - for line in output_lines: - server_lines.remove(line) - if '======' in line: - break - - selected = None - discarded = [] - for server_line in server_lines: - try: - # Check if its an internal mgmt net ip - if ipaddress.ip_address(server_line[1:]) in ipaddress.ip_network( - mgmt_cidr): - continue - except ValueError: - pass - - if server_line.startswith('*'): - selected = server_line - elif server_line.startswith('-') or server_line.startswith( - 'x') or server_line.startswith(' '): - discarded.append(server_line) - - if not selected: - return 1, "No NTP server selected" - - if discarded: - return 2, "Some NTP servers are discarded" - - return 0, "{} NTPQ is in healthy state".format(host) - - -def wait_for_ntp_sync(host, timeout=MiscTimeout.NTPQ_UPDATE, fail_ok=False, - con_ssh=None, - auth_info=Tenant.get('admin_platform')): - """ - Wait for ntp alarm inline with sudo ntpq output. - Args: - host: - timeout: - fail_ok: - con_ssh: - auth_info: - - Returns (bool): - - """ - - LOG.info("Waiting for ntp alarm to clear or sudo ntpq -pn indicate " - "unhealthy server for {}".format(host)) - end_time = time.time() + timeout - msg = ntp_alarms = None - if not con_ssh: - con_name = auth_info.get('region') if ( - auth_info and ProjVar.get_var('IS_DC')) else None - con_ssh = ControllerClient.get_active_controller(name=con_name) - - mgmt_cidr = get_system_network_cidr('mgmt', con_ssh=con_ssh, - auth_info=auth_info) - while time.time() < end_time: - ntp_alarms = get_alarms(alarm_id=EventLogID.NTP_ALARM, entity_id=host, - strict=False, - con_ssh=con_ssh, auth_info=auth_info) - status, msg = get_ntpq_status(host, mgmt_cidr=mgmt_cidr, - con_ssh=con_ssh, auth_info=auth_info) - if ntp_alarms and status != 0: - LOG.info("Valid NTP alarm") - return True - elif not ntp_alarms and status == 0: - LOG.info("NTP alarm cleared and sudo ntpq shows servers healthy") - return True - - LOG.info("NTPQ status: {}; NTP alarms: {}".format(msg, ntp_alarms)) - time.sleep(30) - - err_msg = "Timed out waiting for NTP alarm to be in sync with ntpq " \ - "output. NTPQ status: {}; NTP alarms: {}".format(msg, ntp_alarms) - if fail_ok: - LOG.warning(err_msg) - return False - - raise exceptions.HostTimeout(err_msg) - - -def __hosts_stay_in_states(hosts, duration=10, con_ssh=None, - auth_info=Tenant.get('admin_platform'), - **states): - """ - Check if hosts stay in specified state(s) for given duration. - - Args: - hosts (list|str): hostname(s) - duration (int): duration to check for in seconds - con_ssh (SSHClient): - **states: such as availability=[online, available] - - Returns: - bool: True if host stayed in specified states for given duration; - False if host is not in specified states - anytime in the duration. - - """ - end_time = time.time() + duration - while time.time() < end_time: - if not __hosts_in_states(hosts=hosts, con_ssh=con_ssh, - auth_info=auth_info, **states): - return False - time.sleep(1) - - return True - - -def wait_for_hosts_states(hosts, timeout=HostTimeout.REBOOT, check_interval=5, - duration=3, con_ssh=None, - fail_ok=True, auth_info=Tenant.get('admin_platform'), - **states): - """ - Wait for hosts to go in specified states via system host-list - - Args: - hosts (str|list): - timeout (int): - check_interval (int): - duration (int): wait for a host to be in given state(s) for at - least seconds - con_ssh (SSHClient): - fail_ok (bool) - auth_info - **states: such as availability=[online, available] - - Returns (bool): True if host reaches specified states within timeout, - and stays in states for given duration; False otherwise - - """ - if not hosts: - raise ValueError("No host(s) provided to wait for states.") - - if isinstance(hosts, str): - hosts = [hosts] - for key, value in states.items(): - if isinstance(value, str): - value = [value] - states[key] = value - - LOG.info("Waiting for {} to reach state(s): {}...".format(hosts, states)) - end_time = time.time() + timeout - while time.time() < end_time: - if __hosts_stay_in_states(hosts, con_ssh=con_ssh, - duration=duration, auth_info=auth_info, - **states): - LOG.info("{} have reached state(s): {}".format(hosts, states)) - return True - time.sleep(check_interval) - else: - msg = "Timed out waiting for {} in state(s) - {}".format(hosts, states) - if fail_ok: - LOG.warning(msg) - return False - raise exceptions.HostTimeout(msg) - - -def __hosts_in_states(hosts, con_ssh=None, - auth_info=Tenant.get('admin_platform'), - **states): - actual_values = get_hosts(hostname=hosts, field=list(states.keys()), - con_ssh=con_ssh, - auth_info=auth_info, rtn_dict=True) - for field, expt_values in states.items(): - actual_states = actual_values[field] - for actual_state in actual_states: - if actual_state not in expt_values: - LOG.debug("At least one host from {} has {} state(s) in {} " - "instead of {}".format(hosts, field, actual_state, - expt_values)) - return False - - return True - - -def wait_for_host_values(host, timeout=HostTimeout.REBOOT, check_interval=3, - strict=True, regex=False, fail_ok=True, - con_ssh=None, auth_info=Tenant.get('admin_platform'), - **kwargs): - """ - Wait for host values via system host-show - Args: - host: - timeout: - check_interval: - strict: - regex: - fail_ok: - con_ssh: - auth_info - **kwargs: key/value pair to wait for. - - Returns: - - """ - if not kwargs: - raise ValueError( - "Expected host state(s) has to be specified via " - "keyword argument states") - - LOG.info("Waiting for {} to reach state(s) - {}".format(host, kwargs)) - end_time = time.time() + timeout - last_vals = {} - for field in kwargs: - last_vals[field] = None - - while time.time() < end_time: - actual_vals = get_host_values(host, fields=list(kwargs.keys()), - con_ssh=con_ssh, rtn_dict=True, - auth_info=auth_info, merge_lines=False) - for field, expt_vals in kwargs.items(): - actual_val = actual_vals[field] - if isinstance(actual_val, list): - actual_val = ' '.join(actual_val) - - actual_val_lower = actual_val.lower() - if isinstance(expt_vals, str): - expt_vals = [expt_vals] - - for expected_val in expt_vals: - expected_val_lower = expected_val.strip().lower() - found_match = False - if regex: - if strict: - res_ = re.match(expected_val_lower, actual_val_lower) - else: - res_ = re.search(expected_val_lower, actual_val_lower) - if res_: - found_match = True - else: - if strict: - found_match = actual_val_lower == expected_val_lower - else: - found_match = actual_val_lower in expected_val_lower - - if found_match: - LOG.info( - "{} {} has reached: {}".format(host, field, actual_val)) - break - else: # no match found. run system host-show again - if last_vals[field] != actual_val_lower: - LOG.info("{} {} is {}.".format(host, field, actual_val)) - last_vals[field] = actual_val_lower - break - else: - LOG.info("{} is in state(s): {}".format(host, kwargs)) - return True - time.sleep(check_interval) - else: - msg = "{} did not reach state(s) within {}s - {}".format(host, timeout, - kwargs) - if fail_ok: - LOG.warning(msg) - return False - raise exceptions.TimeoutException(msg) - - -def is_active_controller(host, con_ssh=None, - auth_info=Tenant.get('admin_platform')): - personality = get_host_values(host, fields='capabilities', - auth_info=auth_info, - merge_lines=True, - con_ssh=con_ssh)[0].get('Personality', '') - return personality.lower() == 'Controller-Active'.lower() - - -def is_lowlatency_host(host): - subfuncs = get_host_values(host=host, fields='subfunctions')[0] - return 'lowlatency' in subfuncs - - -def get_system_iplist(): - """ - Checks the ipv4 or ipv6 simplex or other and returns the ip list accordingly - Return: returns the system ipv4/ipv6 list - """ - ip = [] - out = get_oam_values() - if is_aio_simplex(): - ip.append(out["oam_ip"]) - else: - ip.extend([out["oam_floating_ip"], out["oam_c0_ip"], out["oam_c1_ip"]]) - if ProjVar.get_var('IPV6_OAM'): - iplist = ["[{}]".format(i) for i in ip] - ip = iplist - return ip diff --git a/automated-pytest-suite/keywords/vm_helper.py b/automated-pytest-suite/keywords/vm_helper.py deleted file mode 100755 index 0d8f4cee..00000000 --- a/automated-pytest-suite/keywords/vm_helper.py +++ /dev/null @@ -1,6292 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import copy -import math -import os -import random -import re -import time -import ipaddress -import pexpect -from contextlib import contextmanager - -from consts.auth import Tenant, TestFileServer, HostLinuxUser -from consts.stx import VMStatus, NovaCLIOutput, EXT_IP, ImageStatus, \ - VMNetwork, EventLogID, GuestImages, Networks, FlavorSpec, VimEventID -from consts.filepaths import VMPath, UserData, TestServerPath -from consts.proj_vars import ProjVar -from consts.timeout import VMTimeout, CMDTimeout -from utils import exceptions, cli, table_parser, multi_thread -from utils import local_host -from utils.clients.ssh import NATBoxClient, VMSSHClient, ControllerClient, \ - Prompt, get_cli_client -from utils.clients.local import LocalHostClient -from utils.guest_scripts.scripts import TisInitServiceScript -from utils.multi_thread import MThread, Events -from utils.tis_log import LOG -from keywords import network_helper, nova_helper, cinder_helper, host_helper, \ - glance_helper, common, system_helper, \ - storage_helper -from testfixtures.fixture_resources import ResourceCleanup -from testfixtures.recover_hosts import HostsToRecover - - -def set_vm(vm_id, name=None, state=None, con_ssh=None, auth_info=None, - fail_ok=False, **properties): - """ - Set vm with given parameters - name, state, and/or properties - Args: - vm_id: - name: - state: - con_ssh: - auth_info: - fail_ok: - **properties: - - Returns (tuple): - (0, ) - - """ - args_dict = { - '--name': name, - '--state': state.lower() if state else None, - '--property': properties, - } - args = '{} {}'.format(common.parse_args(args_dict, repeat_arg=True), vm_id) - LOG.info("Setting vm with args: {}".format(args)) - code, output = cli.openstack('server set', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code > 0: - return 1, output - - msg = "VM {} is set successfully.".format(vm_id) - LOG.info(msg) - return 0, msg - - -def unset_vm(vm_id, properties, con_ssh=None, auth_info=None, fail_ok=False): - """ - Unset given properties for VM - Args: - vm_id: - properties: - con_ssh: - auth_info: - fail_ok: - - Returns (tuple): - (1, ) - cli rejected - (0, "VM properties unset successfully: ") - - """ - if isinstance(properties, str): - properties = (properties,) - - args = '{} {}'.format( - common.parse_args({'--property': properties}, repeat_arg=True), vm_id) - LOG.info("Unsetting vm {} properties: {}".format(vm_id, properties)) - code, output = cli.openstack('server unset', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code > 0: - return 1, output - - msg = "VM {} properties unset successfully: {}".format(vm_id, properties) - LOG.info(msg) - return 0, msg - - -def get_any_vms(count=None, con_ssh=None, auth_info=None, all_tenants=False, - rtn_new=False): - """ - Get a list of ids of any active vms. - - Args: - count (int): number of vms ids to return. If None, all vms for - specific tenant will be returned. If num of - existing vm is less than count additional vm will be created to match - the count - con_ssh (SSHClient): - auth_info (dict): - all_tenants (bool): whether to get any vms from all tenants or just - admin tenant if auth_info is set to Admin - rtn_new (bool): whether to return an extra list containing only the - newly created vms - - Returns (list): - vms(list) # rtn_new=False - [vms(list), new_vms(list)] # rtn_new=True - - """ - vms = get_vms(con_ssh=con_ssh, auth_info=auth_info, - all_projects=all_tenants, Status='ACTIVE') - if count is None: - if rtn_new: - vms = [vms, []] - return vms - diff = count - len(vms) - if diff <= 0: - vms = random.sample(vms, count) - if rtn_new: - vms = [vms, []] - return vms - - new_vms = [] - for i in range(diff): - new_vm = boot_vm(con_ssh=con_ssh, auth_info=auth_info)[1] - vms.append(new_vm) - new_vms.append(new_vm) - - if rtn_new: - vms = [vms, new_vms] - return vms - - -def create_image_from_vm(vm_id, image_name=None, wait=True, - expt_cinder_snapshot=None, - fail_ok=False, con_ssh=None, auth_info=None, - cleanup=None): - """ - Create glance image from an existing vm - Args: - vm_id: - image_name: - wait: - expt_cinder_snapshot (bool): if vm was booted from cinder volume, - then a cinder snapshot is expected - fail_ok: - con_ssh: - auth_info: - cleanup (None|str): valid scopes: function, class, module, session - - Returns (tuple): - - """ - LOG.info("Creating image from vm {}".format(vm_id)) - args_dict = {'--name': image_name, '--wait': wait} - args = '{} {}'.format(common.parse_args(args_dict), vm_id) - code, out = cli.openstack('server image create', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - - table_ = table_parser.table(out) - image_id = table_parser.get_value_two_col_table(table_, 'id') - cinder_snapshot_id = None - if cleanup and image_id: - ResourceCleanup.add('image', image_id, scope=cleanup) - - if code > 0: - return 1, out, cinder_snapshot_id - - post_name = table_parser.get_value_two_col_table(table_, 'name') - if image_name and image_name != post_name: - raise exceptions.NovaError( - "Create image does not expected name. Actual {}, expected: " - "{}".format(post_name, image_name)) - - LOG.info( - "Wait for created image {} to reach active state".format(post_name)) - glance_helper.wait_for_image_status(image_id, status=ImageStatus.ACTIVE, - con_ssh=con_ssh, auth_info=auth_info) - - image_size = table_parser.get_value_two_col_table(table_, 'size') - if str(image_size) == '0' or expt_cinder_snapshot: - cinder_snapshotname = "snapshot for {}".format(post_name) - vol_snapshots = cinder_helper.get_vol_snapshots( - name=cinder_snapshotname) - if not vol_snapshots: - raise exceptions.CinderError( - "cinder snapshot expected, but was not found: {}".format( - cinder_snapshotname)) - cinder_snapshot_id = vol_snapshots[0] - if cleanup: - ResourceCleanup.add('vol_snapshot', cinder_snapshot_id) - - LOG.info("glance image {} successfully created from vm {}".format(post_name, - vm_id)) - return 0, image_id, cinder_snapshot_id - - -def add_security_group(vm_id, security_group, fail_ok=False, con_ssh=None, - auth_info=None): - """ - Add given security group to vm - Args: - vm_id: - security_group: - fail_ok: - con_ssh: - auth_info: - - Returns (tuple): - - """ - LOG.info("Adding security group {} to vm {}".format(security_group, vm_id)) - args = '{} {}'.format(vm_id, security_group) - code, output = cli.openstack('server add security group', args, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - if code > 0: - return 1, output - - msg = "Security group {} added to VM {} successfully".format(security_group, - vm_id) - LOG.info(msg) - return 0, msg - - -def wait_for_vol_attach(vm_id, vol_id, timeout=VMTimeout.VOL_ATTACH, - con_ssh=None, auth_info=None, fail_ok=False): - """ - Wait for volume attachment appear in openstack server show as well as - opentstack volume show - Args: - vm_id: - vol_id: - timeout: - con_ssh: - auth_info: - fail_ok: - - Returns (bool): - - """ - end_time = time.time() + timeout - while time.time() < end_time: - vols_attached = get_vm_volumes(vm_id=vm_id, con_ssh=con_ssh, - auth_info=auth_info) - if vol_id in vols_attached: - cinder_helper.wait_for_volume_status(vol_id, status='in-use', - timeout=120, fail_ok=False, - con_ssh=con_ssh, - auth_info=auth_info) - return True - time.sleep(5) - else: - msg = "Volume {} is not shown in nova show {} in {} seconds".format( - vol_id, vm_id, timeout) - LOG.warning(msg) - if not fail_ok: - raise exceptions.VMError(msg) - return False - - -def attach_vol_to_vm(vm_id, vol_id=None, device=None, mount=False, con_ssh=None, - auth_info=None, fail_ok=False, - cleanup=None): - """ - Attach a volume to VM - Args: - vm_id (str): - vol_id (str|None): volume to attach. When None, a non-bootable volume - will be created to attach to given vm - device (str|None): whether to specify --device in cmd - mount (bool): if True, login to vm and attempt to mount the device - after attached. Best effort only. - con_ssh: - auth_info: - fail_ok: - cleanup: - - Returns: - - """ - if not vol_id: - vol_id = \ - cinder_helper.create_volume(bootable=False, auth_info=auth_info, - con_ssh=con_ssh, cleanup=cleanup)[1] - - LOG.info("Attaching volume {} to vm {}".format(vol_id, vm_id)) - args = '{}{} {}'.format('--device {} '.format(device) if device else '', - vm_id, vol_id) - code, output = cli.openstack('server add volume', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code > 0: - return 1, output - - LOG.info( - "Waiting for attached volume to appear in openstack server show and " - "volume show") - wait_for_vol_attach(vm_id=vm_id, vol_id=vol_id, con_ssh=con_ssh, - auth_info=auth_info) - - if mount: - LOG.info("Mount attached volume {} to vm {}".format(vol_id, vm_id)) - guest = get_vm_image_name(vm_id) - if not (guest and 'cgcs-guest' in guest): - attached_devs = get_vm_volume_attachments(vm_id=vm_id, - field='device', - vol_id=vol_id, - auth_info=auth_info, - con_ssh=con_ssh) - device_name = attached_devs[0] - device = device_name.split('/')[-1] - LOG.info( - "Volume {} is attached to VM {} as {}".format(vol_id, vm_id, - device_name)) - mount_attached_volume(vm_id, device, vm_image_name=guest) - - return 0, vol_id - - -def is_attached_volume_mounted(vm_id, rootfs, vm_image_name=None, vm_ssh=None): - """ - Checks if an attached volume is mounted in VM - Args: - vm_id (str): - the vm uuid where the volume is attached to - rootfs (str) - the device name of the attached volume like vda, vdb, - vdc, .... - vm_image_name (str): - the guest image the vm is booted with - vm_ssh (VMSSHClient): ssh client session to vm - Returns: bool - - """ - - if vm_image_name is None: - vm_image_name = get_vm_image_name(vm_id) - - cmd = "mount | grep {} | wc -l".format(rootfs) - mounted_msg = "Filesystem /dev/{} is mounted: {}".format(rootfs, vm_id) - not_mount_msg = "Filesystem /dev/{} is not mounted: {}".format(rootfs, - vm_id) - if vm_ssh: - cmd_output = vm_ssh.exec_sudo_cmd(cmd)[1] - if cmd_output != '0': - LOG.info(mounted_msg) - return True - LOG.info(not_mount_msg) - return False - - with ssh_to_vm_from_natbox(vm_id, vm_image_name=vm_image_name) as vm_ssh: - - cmd_output = vm_ssh.exec_sudo_cmd(cmd)[1] - if cmd_output != '0': - LOG.info(mounted_msg) - return True - LOG.info(not_mount_msg) - return False - - -def get_vm_volume_attachments(vm_id, vol_id=None, field='device', con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Get volume attachments for given vm - Args: - vm_id: - vol_id: - field: - con_ssh: - auth_info: - - Returns (list): - - """ - # No replacement in openstack client - table_ = table_parser.table( - cli.nova('volume-attachments', vm_id, ssh_client=con_ssh, - auth_info=auth_info)[1]) - return table_parser.get_values(table_, field, **{'volume id': vol_id}) - - -def mount_attached_volume(vm_id, rootfs, vm_image_name=None): - """ - Mounts an attached volume on VM - Args: - vm_id (str): - the vm uuid where the volume is attached to - rootfs (str) - the device name of the attached volume like vda, vdb, - vdc, .... - vm_image_name (str): - the guest image the vm is booted with - - Returns: bool - - """ - wait_for_vm_pingable_from_natbox(vm_id) - if vm_image_name is None: - vm_image_name = get_vm_image_name(vm_id) - - with ssh_to_vm_from_natbox(vm_id, vm_image_name=vm_image_name) as vm_ssh: - - if not is_attached_volume_mounted(vm_id, rootfs, - vm_image_name=vm_image_name, - vm_ssh=vm_ssh): - LOG.info("Creating ext4 file system on /dev/{} ".format(rootfs)) - cmd = "mkfs -t ext4 /dev/{}".format(rootfs) - rc, output = vm_ssh.exec_cmd(cmd) - if rc != 0: - msg = "Failed to create filesystem on /dev/{} for vm " \ - "{}: {}".format(rootfs, vm_id, output) - LOG.warning(msg) - return False - LOG.info("Mounting /dev/{} to /mnt/volume".format(rootfs)) - cmd = "test -e /mnt/volume" - rc, output = vm_ssh.exec_cmd(cmd) - mount_cmd = '' - if rc == 1: - mount_cmd += "mkdir -p /mnt/volume; mount /dev/{} " \ - "/mnt/volume".format(rootfs) - else: - mount_cmd += "mount /dev/{} /mnt/volume".format(rootfs) - - rc, output = vm_ssh.exec_cmd(mount_cmd) - if rc != 0: - msg = "Failed to mount /dev/{} for vm {}: {}".format(rootfs, - vm_id, - output) - LOG.warning(msg) - return False - - LOG.info( - "Adding /dev/{} mounting point in /etc/fstab".format(rootfs)) - cmd = "echo \"/dev/{} /mnt/volume ext4 defaults 0 0\" >> " \ - "/etc/fstab".format(rootfs) - - rc, output = vm_ssh.exec_cmd(cmd) - if rc != 0: - msg = "Failed to add /dev/{} mount point to /etc/fstab for " \ - "vm {}: {}".format(rootfs, vm_id, output) - LOG.warning(msg) - - LOG.info( - "/dev/{} is mounted to /mnt/volume for vm {}".format(rootfs, - vm_id)) - return True - else: - LOG.info( - "/dev/{} is already mounted to /mnt/volume for vm {}".format( - rootfs, vm_id)) - return True - - -def get_vm_devices_via_virsh(vm_id, con_ssh=None): - """ - Get vm disks in dict format via 'virsh domblklist ' - Args: - vm_id (str): - con_ssh: - - Returns (dict): vm disks per type. - Examples: - {'root_img': {'vda': '/dev/nova-local/a746beb9-08e4-4b08-af2a - -000c8ca72851_disk'}, - 'attached_vol': {'vdb': '/dev/disk/by-path/ip-192.168.205.106:3260-iscsi - -iqn.2010-10.org.openstack:volume-...'}, - 'swap': {}, - 'eph': {}} - - """ - vm_host = get_vm_host(vm_id=vm_id, con_ssh=con_ssh) - inst_name = get_vm_instance_name(vm_id=vm_id, con_ssh=con_ssh) - - with host_helper.ssh_to_host(vm_host, con_ssh=con_ssh) as host_ssh: - output = host_ssh.exec_sudo_cmd('virsh domblklist {}'.format(inst_name), - fail_ok=False)[1] - disk_lines = output.split('-------------------------------\n', 1)[ - -1].splitlines() - - disks = {} - root_line = disk_lines.pop(0) - root_dev, root_source = root_line.split() - if re.search('openstack:volume|cinder-volumes|/dev/sd', root_source): - disk_type = 'root_vol' - else: - disk_type = 'root_img' - disks[disk_type] = {root_dev: root_source} - LOG.info("Root disk: {}".format(disks)) - - disks.update({'eph': {}, 'swap': {}, 'attached_vol': {}}) - for line in disk_lines: - dev, source = line.split() - if re.search('disk.swap', source): - disk_type = 'swap' - elif re.search('openstack:volume|cinder-volumes|/dev/sd', source): - disk_type = 'attached_vol' - elif re.search('disk.eph|disk.local', source): - disk_type = 'eph' - else: - raise exceptions.CommonError( - "Unknown disk in virsh: {}. Automation update " - "required.".format( - line)) - disks[disk_type][dev] = source - - LOG.info("disks for vm {}: {}".format(vm_id, disks)) - return disks - - -def get_vm_boot_volume_via_virsh(vm_id, con_ssh=None): - """ - Get cinder volume id where the vm is booted from via virsh cmd. - Args: - vm_id (str): - con_ssh (SSHClient): - - Returns (str|None): vol_id or None if vm is not booted from cinder volume - - """ - disks = get_vm_devices_via_virsh(vm_id=vm_id, con_ssh=con_ssh) - root_vol = disks.get('root_vol', {}) - if not root_vol: - LOG.info("VM is not booted from volume. Return None") - return - - root_vol = list(root_vol.values())[0] - root_vol = re.findall('openstack:volume-(.*)-lun', root_vol)[0] - LOG.info("vm {} is booted from cinder volume {}".format(vm_id, root_vol)) - return root_vol - - -def auto_mount_vm_devices(vm_id, devices, guest_os=None, check_first=True, - vm_ssh=None): - """ - Mount and auto mount devices on vm - Args: - vm_id (str): - the vm uuid where the volume is attached to - devices (str|list) - the device name(s). such as vdc or [vda, vdb] - guest_os (str): - the guest image the vm is booted with. such as - tis-centos-guest - check_first (bool): where to check if the device is already mounted - and auto mounted before mount and automount - vm_ssh (VMSSHClient): - """ - if isinstance(devices, str): - devices = [devices] - - def _auto_mount(vm_ssh_): - _mounts = [] - for disk in devices: - fs = '/dev/{}'.format(disk) - mount_on, fs_type = storage_helper.mount_partition( - ssh_client=vm_ssh_, disk=disk, partition=fs) - storage_helper.auto_mount_fs(ssh_client=vm_ssh_, fs=fs, - mount_on=mount_on, fs_type=fs_type, - check_first=check_first) - _mounts.append(mount_on) - return _mounts - - if vm_ssh: - mounts = _auto_mount(vm_ssh_=vm_ssh) - else: - with ssh_to_vm_from_natbox(vm_id, vm_image_name=guest_os) as vm_ssh: - mounts = _auto_mount(vm_ssh_=vm_ssh) - - return mounts - - -def touch_files(vm_id, file_dirs, file_name=None, content=None, guest_os=None): - """ - touch files from vm in specified dirs,and adds same content to all - touched files. - Args: - vm_id (str): - file_dirs (list): e.g., ['/', '/mnt/vdb'] - file_name (str|None): defaults to 'test_file.txt' if set to None - content (str|None): defaults to "I'm a test file" if set to None - guest_os (str|None): default guest assumed to set to None - - Returns (tuple): (, ) - - """ - if not file_name: - file_name = 'test_file.txt' - if not content: - content = "I'm a test file" - - if isinstance(file_dirs, str): - file_dirs = [file_dirs] - file_paths = [] - with ssh_to_vm_from_natbox(vm_id=vm_id, vm_image_name=guest_os) as vm_ssh: - for file_dir in file_dirs: - file_path = "{}/{}".format(file_dir, file_name) - file_path = file_path.replace('//', '/') - vm_ssh.exec_sudo_cmd( - 'mkdir -p {}; touch {}'.format(file_dir, file_path), - fail_ok=False) - time.sleep(3) - vm_ssh.exec_sudo_cmd('echo "{}" >> {}'.format(content, file_path), - fail_ok=False) - output = \ - vm_ssh.exec_sudo_cmd('cat {}'.format(file_path), - fail_ok=False)[1] - # TO DELETE: Debugging purpose only - vm_ssh.exec_sudo_cmd('mount | grep vd') - assert content in output, "Expected content {} is not in {}. " \ - "Actual content: {}". \ - format(content, file_path, output) - file_paths.append(file_path) - - vm_ssh.exec_sudo_cmd('sync') - return file_paths, content - - -def auto_mount_vm_disks(vm_id, disks=None, guest_os=None): - """ - Auto mount non-root vm disks and return all the mount points including - root dir - Args: - vm_id (str): - disks (dict|None): disks returned by get_vm_devices_via_virsh() - guest_os (str|None): when None, default guest is assumed. - - Returns (list): list of mount points. e.g., ['/', '/mnt/vdb'] - - """ - if not disks: - disks_to_check = get_vm_devices_via_virsh(vm_id=vm_id) - else: - disks_to_check = copy.deepcopy(disks) - - root_disk = disks_to_check.pop('root_vol', {}) - if not root_disk: - disks_to_check.pop('root_img') - - # add root dir - mounted_on = ['/'] - devs_to_mount = [] - for val in disks_to_check.values(): - devs_to_mount += list(val.keys()) - - LOG.info("Devices to mount: {}".format(devs_to_mount)) - if devs_to_mount: - mounted_on += auto_mount_vm_devices(vm_id=vm_id, devices=devs_to_mount, - guest_os=guest_os) - else: - LOG.info("No non-root disks to mount for vm {}".format(vm_id)) - - return mounted_on - - -vif_map = { - 'e1000': 'normal', - 'rt18139': 'normal', - 'virtio': 'normal', - 'avp': 'normal', - 'pci-sriov': 'direct', - 'pci-passthrough': 'direct-physical'} - - -def _convert_vnics(nics, con_ssh, auth_info, cleanup): - """ - Conversion from wrs vif-model to upstream implementation - Args: - nics (list|tuple|dict): - con_ssh - auth_info - cleanup (None|str) - - Returns (list): - - """ - converted_nics = [] - for nic in nics: - nic = dict(nic) # Do not modify original nic param - if 'vif-model' in nic: - vif_model = nic.pop('vif-model') - if vif_model: - vnic_type = vif_map[vif_model] - vif_model_ = vif_model if ( - system_helper.is_avs() and vnic_type == 'normal')\ - else None - if 'port-id' in nic: - port_id = nic['port-id'] - current_vnic_type, current_vif_model = \ - network_helper.get_port_values( - port=port_id, - fields=('binding_vnic_type', 'binding_profile'), - con_ssh=con_ssh, - auth_info=auth_info) - if current_vnic_type != vnic_type or ( - vif_model_ and vif_model_ not in current_vif_model): - network_helper.set_port(port_id, vnic_type=vnic_type, - con_ssh=con_ssh, - auth_info=auth_info, - wrs_vif=vif_model_) - else: - net_id = nic.pop('net-id') - port_name = common.get_unique_name( - 'port_{}'.format(vif_model)) - port_id = network_helper.create_port(net_id, name=port_name, - wrs_vif=vif_model_, - vnic_type=vnic_type, - con_ssh=con_ssh, - auth_info=auth_info, - cleanup=cleanup)[1] - nic['port-id'] = port_id - converted_nics.append(nic) - - return converted_nics - -def boot_vm_openstack(name=None, flavor=None, block_device_mapping=None, - source=None, source_id=None, image_id=None, - image_property=None, security_groups=None, key_name=None, - inject_file=None, user_data=None, avail_zone=None, - nics=None, network=None, port=None, hint=None, - config_drive=False, min_count=None, max_count=None, - reuse_vol=False, guest_os='', wait=True, - fail_ok=False, auth_info=None, con_ssh=None, cleanup=None, - **properties): - """ - Boot a vm with given parameters using opnstack - Args: - name (str): New server name - flavor (str): Create server with this flavor (name or ID) - block_device_mapping (str): Create a block device on the server. - Block device mapping in the format - =::: - : block device name, like: vdb, xvdc (required) - : Name or ID of the volume, volume snapshot or image (required) - : volume, snapshot or image; default: volume (optional) - : volume size if create from image or snapshot (optional) - : true or false; default: false (optional) - source (str): 'image', 'volume', 'snapshot' - source_id (str): id of the specified source. such as volume_id, image_id, or snapshot_id - image_id (str): id of glance image. Will not be used if source is image and source_id is - specified - image_property (str): Image property to be matched - security_groups (str|list|tuple): Security group/groups - to assign to this server (name or ID) - key_name (str): Keypair to inject into this server (optional extension) - inject_file (str|list|tuple): File/Files to inject into image before boot - user_data (str|list): User data file to serve from the metadata server - avail_zone (str): Select an availability zone for the server - nics (list|tuple): Create NIC's on the server. - each nic: - , - - Examples: [{'net-id': , 'vif-model': }, - {'net-id': , 'vif-model': }, ...] - Notes: valid vif-models: - virtio, avp, e1000, pci-passthrough, pci-sriov, rtl8139, ne2k_pci, pcnet - network (str|list|tuple): Create a NIC on the server and connect it to network. - This is a wrapper for the ‘–nic net-id=’ parameter that provides simple syntax - for the standard use case of connecting a new server to a given network. - For more advanced use cases, refer to the ‘–nic’ parameter. - port (str|list|tuple): Create a NIC on the server and connect it to port. - This is a wrapper for the ‘–nic port-id=’ parameter that provides simple syntax - for the standard use case of connecting a new server to a given port. - For more advanced use cases, refer to the ‘–nic’ parameter. - hint (dict): key/value pair(s) sent to scheduler for custom use, such as - group= - config_drive (|True): Use specified volume as the config drive, - or ‘True’ to use an ephemeral drive - min_count (int): Minimum number of servers to launch (default=1) - max_count (int): Maximum number of servers to launch (default=1) - reuse_vol (bool): whether or not to reuse the existing volume (default False) - guest_os (str): Valid values: 'cgcs-guest', 'ubuntu_14', 'centos_6', 'centos_7', etc. - This will be overriden by image_id if specified. - wait (bool): Wait for build to complete (default True) - fail_ok (bool): - auth_info: - con_ssh: - cleanup (str|None): valid values: 'module', 'session', 'function', 'class', - vm (and volume) will be deleted as part of teardown - - Returns (tuple): (rtn_code(int), new_vm_id_if_any(str), message(str), - new_vol_id_if_any(str)) - (0, vm_id, 'VM is booted successfully') # vm is created - successfully and in Active state. - (1, vm_id, ) # boot vm cli command failed, but vm is - still booted - (2, vm_id, "VM boot started, check skipped (wait={}).") # boot vm cli - accepted, but vm building is not - 100% completed. Only applicable when wait=False - (3, vm_id, "VM did not reach ACTIVE state within . VM - status: ") - # vm is not in Active state after created. - (4, '', ): create vm cli command failed, vm is not booted - - """ - # Prechecks - valid_cleanups = (None, 'function', 'class', 'module', 'session') - if cleanup not in valid_cleanups: - raise ValueError( - "Invalid scope provided. Choose from: {}".format(valid_cleanups)) - - if user_data is None and guest_os and not re.search( - GuestImages.TIS_GUEST_PATTERN, guest_os): - # create userdata cloud init file to run right after vm - # initialization to get ip on interfaces other than eth0. - user_data = _create_cloud_init_if_conf(guest_os, nics_num=len(nics)) - - if user_data and user_data.startswith('~'): - user_data = user_data.replace('~', HostLinuxUser.get_home(), 1) - - if inject_file and inject_file.startswith('~'): - inject_file = inject_file.replace('~', HostLinuxUser.get_home(), 1) - - if guest_os == 'vxworks': - LOG.tc_step("Add HPET Timer extra spec to flavor") - extra_specs = {FlavorSpec.HPET_TIMER: 'True'} - properties.update(extra_specs) - - LOG.info("Processing boot_vm_openstack args...") - # Handle mandatory arg - name - tenant = common.get_tenant_name(auth_info=auth_info) - if name is None: - name = 'vm' - name = "{}-{}".format(tenant, name) - name = common.get_unique_name(name, resource_type='vm') - - # Handle mandatory arg - key_name - key_name = key_name if key_name is not None else get_default_keypair( - auth_info=auth_info, con_ssh=con_ssh) - - # Handle mandatory arg - flavor - if flavor is None: - flavor = nova_helper.get_basic_flavor(auth_info=auth_info, - con_ssh=con_ssh, - guest_os=guest_os) - - # Handle mandatory arg - nics - if not nics: - mgmt_net_id = network_helper.get_mgmt_net_id(auth_info=auth_info, - con_ssh=con_ssh) - if not mgmt_net_id: - raise exceptions.NeutronError("Cannot find management network") - nics = [{'net-id': mgmt_net_id}] - - if 'edge' not in guest_os and 'vxworks' not in guest_os: - tenant_net_id = network_helper.get_tenant_net_id( - auth_info=auth_info, con_ssh=con_ssh) - if tenant_net_id: - nics.append({'net-id': tenant_net_id}) - - if isinstance(nics, dict): - nics = [nics] - nics = _convert_vnics(nics, con_ssh=con_ssh, auth_info=auth_info, - cleanup=cleanup) - - # Handle mandatory arg - boot source - volume_id = snapshot_id = image = None - if source is None: - if min_count is None and max_count is None: - source = 'volume' - else: - source = 'image' - - elif source.lower() == 'snapshot' and not block_device_mapping: - snapshot_id = source_id - if not snapshot_id: - snapshot_id = cinder_helper.get_vol_snapshots( - auth_info=auth_info, con_ssh=con_ssh) - if not snapshot_id: - raise ValueError( - "snapshot id is required to boot vm; however no " - "snapshot exists on the system.") - snapshot_id = snapshot_id[0] - block_device_mapping = {"vdb": "{}:snapshot".format(snapshot_id)} - vol_size, vol_id = cinder_helper.get_volume_snapshot_values(snapshot_id, ["size", "volume_id"]) - img_id = cinder_helper.get_volume_show_values(vol_id, "volume_image_metadata")[0]["image_id"] - image = img_id - if vol_size: - block_device_mapping["vdb"] = "{}:{}".format(block_device_mapping["vdb"], vol_size) - elif source.lower() == 'volume': - if source_id: - volume_id = source_id - else: - vol_name = 'vol-' + name - if reuse_vol: - volume_id = cinder_helper.get_any_volume( - new_name=vol_name, - auth_info=auth_info, - con_ssh=con_ssh, - cleanup=cleanup) - else: - volume_id = cinder_helper.create_volume( - name=vol_name, - source_id=image_id, - auth_info=auth_info, - con_ssh=con_ssh, - guest_image=guest_os, - cleanup=cleanup)[1] - elif source.lower() == 'image': - # image property is not compatible with image - if not image_property: - image = source_id if source_id else image_id - if not image: - img_name = guest_os if guest_os else GuestImages.DEFAULT['guest'] - image = glance_helper.get_image_id_from_name(img_name, - strict=True, - fail_ok=False) - - # create cmd - non_repeat_args = {'--flavor': flavor, - '--block-device-mapping': block_device_mapping, - '--image': image, - '--image_property': image_property, - '--volume': volume_id, - '--min-count': str(min_count) if min_count is not None else None, - '--max-count': str(max_count) if max_count is not None else None, - '--key-name': key_name, - '--user-data': user_data, - '--availability_zone': avail_zone, - '--config-drive': str(config_drive) if config_drive else None, - '--wait': wait, - } - non_repeat_args = common.parse_args(non_repeat_args, repeat_arg=False, - vals_sep=',') - - repeat_args = { - '--nic': nics, - '--network': network, - '--port': port, - '--file': inject_file, - '--security-groups': security_groups, - '--hint': hint, - '--property': properties - } - repeat_args = common.parse_args(repeat_args, repeat_arg=True, vals_sep=',') - - pre_boot_vms = [] - if not (min_count is None and max_count is None): - name_str = name + '-' - pre_boot_vms = get_vms(auth_info=auth_info, con_ssh=con_ssh, - strict=False, name=name_str) - - args_ = ' '.join([non_repeat_args, repeat_args, name]) - LOG.info("Booting VM {} with args: {}".format(name, args_)) - exitcode, output = cli.openstack('server create', positional_args=args_, - ssh_client=con_ssh, fail_ok=True, - auth_info=auth_info, - timeout=VMTimeout.BOOT_VM) - - if min_count is None and max_count is None: - table_ = table_parser.table(output) - vm_id = table_parser.get_value_two_col_table(table_, 'id') - if cleanup and vm_id: - ResourceCleanup.add('vm', vm_id, scope=cleanup, del_vm_vols=False) - # if source="snapshot": - # ResourceCleanup.add('snapshot', snapshot_id, scope=cleanup, del_vm_vols=False) - - if exitcode == 1: - if vm_id: - # print out vm show for debugging purpose - cli.openstack('server show', vm_id, ssh_client=con_ssh, - auth_info=Tenant.get('admin')) - if not fail_ok: - raise exceptions.VMOperationFailed(output) - - if vm_id: - return 1, vm_id, output # vm_id = '' if cli is rejected - # without vm created - return 4, '', output - - LOG.info("Post action check...") - vm_status = get_vm_values(vm_id, 'status', strict=True, con_ssh=con_ssh, - auth_info=auth_info)[0] - if wait: - if vm_status != VMStatus.ACTIVE: - message = "VM did not reach {} state: {}".format(VMStatus.ACTIVE, vm_status) - if fail_ok: - LOG.warning(message) - return 2, vm_id, message - else: - raise exceptions.VMPostCheckFailed(message) - else: - LOG.info("VM {} started to create, \ - check skipped because of wait argument wait={}, \ - vm status is: {}".format(vm_id, wait, vm_status)) - return 2, vm_id, "VM boot started, \ - check skipped (wait={}), \ - vm status is: {}".format(wait, vm_status) - LOG.info("VM {} is booted successfully.".format(vm_id)) - return 0, vm_id, 'VM is booted successfully' - else: - name_str = name + '-' - post_boot_vms = get_vms(auth_info=auth_info, con_ssh=con_ssh, - strict=False, name=name_str) - vm_ids = list(set(post_boot_vms) - set(pre_boot_vms)) - if cleanup and vm_ids: - ResourceCleanup.add('vm', vm_ids, scope=cleanup, del_vm_vols=False) - - if exitcode == 1: - return 1, vm_ids, output - - - for instance_id in vm_ids: - vm_status = get_vm_values(instance_id, 'status', strict=True, con_ssh=con_ssh, - auth_info=auth_info)[0] - if wait: - if vm_status != VMStatus.ACTIVE: - msg = "VMs failed to reach {} state: {}".format(VMStatus.ACTIVE, vm_status) - if fail_ok: - LOG.warning(msg) - return 3, vm_ids, msg - else: - LOG.warning("VM {} started to create, \ - check skipped because of wait argument wait={}, \ - vm status is: {}".format(vm_id, wait, vm_status)) - LOG.info("VMs booted successfully: {}".format(vm_ids)) - return 0, vm_ids, "VMs are booted successfully" - - -def boot_vm(name=None, flavor=None, source=None, source_id=None, image_id=None, - min_count=None, nics=None, hint=None, - max_count=None, key_name=None, swap=None, ephemeral=None, - user_data=None, block_device=None, - block_device_mapping=None, security_groups=None, vm_host=None, - avail_zone=None, file=None, - config_drive=False, meta=None, tags=None, - fail_ok=False, auth_info=None, con_ssh=None, reuse_vol=False, - guest_os='', poll=True, cleanup=None): - """ - Boot a vm with given parameters - Args: - name (str): - flavor (str): - source (str): 'image', 'volume', 'snapshot', or 'block_device' - source_id (str): id of the specified source. such as volume_id, - image_id, or snapshot_id - image_id (str): id of glance image. Will not be used if source is - image and source_id is specified - min_count (int): - max_count (int): - key_name (str): - swap (int|None): - ephemeral (int): - user_data (str|list): - vm_host (str): which host to place the vm - avail_zone (str): availability zone for vm host, Possible values: - 'nova', 'stxauto', etc - block_device (dict|list|tuple): dist or list of dict, each dictionary - is a block device. - e.g, {'source': 'volume', 'volume_id': xxxx, ...} - block_device_mapping (str): Block device mapping in the format - '=:::'. - auth_info (dict): - con_ssh (SSHClient): - security_groups (str|list|tuple): add nova boot option - --security-groups $(sec_group_name) - nics (list): nics to be created for the vm - each nic: , - vif-pci-address=pci-address> - Examples: [{'net-id': , 'vif-model': }, {'net-id': - , 'vif-model': }, ...] - Notes: valid vif-models: - virtio, avp, e1000, pci-passthrough, pci-sriov, rtl8139, - ne2k_pci, pcnet - - hint (dict): key/value pair(s) sent to scheduler for custom use. such - as group= - file (str): To store files from local - to on the new server. - config_drive (bool): To enable config drive. - meta (dict): key/value pairs for vm meta data. e.g., - {'sw:wrs:recovery_priority': 1, ...} - tags (None|str|tuple|list) - fail_ok (bool): - reuse_vol (bool): whether or not to reuse the existing volume - guest_os (str): Valid values: 'cgcs-guest', 'ubuntu_14', 'centos_6', - 'centos_7', etc. - This will be overriden by image_id if specified. - poll (bool): - cleanup (str|None): valid values: 'module', 'session', 'function', - 'class', vm (and volume) will be deleted as - part of teardown - - Returns (tuple): (rtn_code(int), new_vm_id_if_any(str), message(str), - new_vol_id_if_any(str)) - (0, vm_id, 'VM is booted successfully') # vm is created - successfully and in Active state. - (1, vm_id, ) # boot vm cli command failed, but vm is - still booted - (2, vm_id, "VM building is not 100% complete.") # boot vm cli - accepted, but vm building is not - 100% completed. Only applicable when poll=True - (3, vm_id, "VM did not reach ACTIVE state within . VM - status: ") - # vm is not in Active state after created. - (4, '', ): create vm cli command failed, vm is not booted - - """ - valid_cleanups = (None, 'function', 'class', 'module', 'session') - if cleanup not in valid_cleanups: - raise ValueError( - "Invalid scope provided. Choose from: {}".format(valid_cleanups)) - - LOG.info("Processing boot_vm args...") - # Handle mandatory arg - name - tenant = common.get_tenant_name(auth_info=auth_info) - if name is None: - name = 'vm' - name = "{}-{}".format(tenant, name) - name = common.get_unique_name(name, resource_type='vm') - - # Handle mandatory arg - key_name - key_name = key_name if key_name is not None else get_default_keypair( - auth_info=auth_info, con_ssh=con_ssh) - - # Handle mandatory arg - flavor - if flavor is None: - flavor = nova_helper.get_basic_flavor(auth_info=auth_info, - con_ssh=con_ssh, - guest_os=guest_os) - - if guest_os == 'vxworks': - LOG.tc_step("Add HPET Timer extra spec to flavor") - extra_specs = {FlavorSpec.HPET_TIMER: 'True'} - nova_helper.set_flavor(flavor=flavor, **extra_specs) - - # Handle mandatory arg - nics - if not nics: - mgmt_net_id = network_helper.get_mgmt_net_id(auth_info=auth_info, - con_ssh=con_ssh) - if not mgmt_net_id: - raise exceptions.NeutronError("Cannot find management network") - nics = [{'net-id': mgmt_net_id}] - - if 'edge' not in guest_os and 'vxworks' not in guest_os: - tenant_net_id = network_helper.get_tenant_net_id( - auth_info=auth_info, con_ssh=con_ssh) - if tenant_net_id: - nics.append({'net-id': tenant_net_id}) - - if isinstance(nics, dict): - nics = [nics] - nics = _convert_vnics(nics, con_ssh=con_ssh, auth_info=auth_info, - cleanup=cleanup) - - # Handle mandatory arg - boot source - volume_id = snapshot_id = image = None - if source != 'block_device': - if source is None: - if min_count is None and max_count is None: - source = 'volume' - else: - source = 'image' - - if source.lower() == 'volume': - if source_id: - volume_id = source_id - else: - vol_name = 'vol-' + name - if reuse_vol: - volume_id = cinder_helper.get_any_volume( - new_name=vol_name, - auth_info=auth_info, - con_ssh=con_ssh, - cleanup=cleanup) - else: - volume_id = cinder_helper.create_volume( - name=vol_name, - source_id=image_id, - auth_info=auth_info, - con_ssh=con_ssh, - guest_image=guest_os, - cleanup=cleanup)[1] - - elif source.lower() == 'image': - image = source_id if source_id else image_id - if not image: - img_name = guest_os if guest_os else GuestImages.DEFAULT[ - 'guest'] - image = glance_helper.get_image_id_from_name(img_name, - strict=True, - fail_ok=False) - - elif source.lower() == 'snapshot': - snapshot_id = source_id - if not snapshot_id: - snapshot_id = cinder_helper.get_vol_snapshots( - auth_info=auth_info, con_ssh=con_ssh) - if not snapshot_id: - raise ValueError( - "snapshot id is required to boot vm; however no " - "snapshot exists on the system.") - snapshot_id = snapshot_id[0] - - if vm_host and not avail_zone: - avail_zone = 'nova' - if avail_zone and vm_host: - avail_zone = '{}:{}'.format(avail_zone, vm_host) - - if user_data is None and guest_os and not re.search( - GuestImages.TIS_GUEST_PATTERN, guest_os): - # create userdata cloud init file to run right after vm - # initialization to get ip on interfaces other than eth0. - user_data = _create_cloud_init_if_conf(guest_os, nics_num=len(nics)) - - if user_data and user_data.startswith('~'): - user_data = user_data.replace('~', HostLinuxUser.get_home(), 1) - - if file and file.startswith('~'): - file = file.replace('~', HostLinuxUser.get_home(), 1) - - # create cmd - non_repeat_args = {'--flavor': flavor, - '--image': image, - '--boot-volume': volume_id, - '--snapshot': snapshot_id, - '--min-count': str( - min_count) if min_count is not None else None, - '--max-count': str( - max_count) if max_count is not None else None, - '--key-name': key_name, - '--swap': swap, - '--user-data': user_data, - '--ephemeral': ephemeral, - '--availability-zone': avail_zone, - '--file': file, - '--config-drive': str( - config_drive) if config_drive else None, - '--block-device-mapping': block_device_mapping, - '--security-groups': security_groups, - '--tags': tags, - '--poll': poll, - } - non_repeat_args = common.parse_args(non_repeat_args, repeat_arg=False, - vals_sep=',') - - repeat_args = { - '--meta': meta, - '--nic': nics, - '--hint': hint, - '--block-device': block_device, - } - repeat_args = common.parse_args(repeat_args, repeat_arg=True, vals_sep=',') - - pre_boot_vms = [] - if not (min_count is None and max_count is None): - name_str = name + '-' - pre_boot_vms = get_vms(auth_info=auth_info, con_ssh=con_ssh, - strict=False, name=name_str) - - args_ = ' '.join([non_repeat_args, repeat_args, name]) - LOG.info("Booting VM {} with args: {}".format(name, args_)) - exitcode, output = cli.nova('boot', positional_args=args_, - ssh_client=con_ssh, fail_ok=True, - auth_info=auth_info, - timeout=VMTimeout.BOOT_VM) - - tmout = VMTimeout.STATUS_CHANGE - if min_count is None and max_count is None: - table_ = table_parser.table(output) - vm_id = table_parser.get_value_two_col_table(table_, 'id') - if cleanup and vm_id: - ResourceCleanup.add('vm', vm_id, scope=cleanup, del_vm_vols=False) - - if exitcode == 1: - if vm_id: - # print out vm show for debugging purpose - cli.openstack('server show', vm_id, ssh_client=con_ssh, - auth_info=Tenant.get('admin')) - if not fail_ok: - raise exceptions.VMOperationFailed(output) - - if vm_id: - return 1, vm_id, output # vm_id = '' if cli is rejected - # without vm created - return 4, '', output - - LOG.info("Post action check...") - if poll and "100% complete" not in output: - message = "VM building is not 100% complete." - if fail_ok: - LOG.warning(message) - return 2, vm_id, "VM building is not 100% complete." - else: - raise exceptions.VMOperationFailed(message) - - if not wait_for_vm_status(vm_id=vm_id, status=VMStatus.ACTIVE, - timeout=tmout, con_ssh=con_ssh, - auth_info=auth_info, fail_ok=True): - vm_status = \ - get_vm_values(vm_id, 'status', strict=True, con_ssh=con_ssh, - auth_info=auth_info)[0] - message = "VM {} did not reach ACTIVE state within {}. VM " \ - "status: {}".format(vm_id, tmout, vm_status) - if fail_ok: - LOG.warning(message) - return 3, vm_id, message - else: - raise exceptions.VMPostCheckFailed(message) - - LOG.info("VM {} is booted successfully.".format(vm_id)) - - return 0, vm_id, 'VM is booted successfully' - - else: - name_str = name + '-' - post_boot_vms = get_vms(auth_info=auth_info, con_ssh=con_ssh, - strict=False, name=name_str) - vm_ids = list(set(post_boot_vms) - set(pre_boot_vms)) - if cleanup and vm_ids: - ResourceCleanup.add('vm', vm_ids, scope=cleanup, del_vm_vols=False) - - if exitcode == 1: - return 1, vm_ids, output - - result, vms_in_state, vms_failed_to_reach_state = wait_for_vms_values( - vm_ids, fail_ok=True, timeout=tmout, - con_ssh=con_ssh, - auth_info=Tenant.get('admin')) - if not result: - msg = "VMs failed to reach ACTIVE state: {}".format( - vms_failed_to_reach_state) - if fail_ok: - LOG.warning(msg=msg) - return 3, vm_ids, msg - - LOG.info("VMs booted successfully: {}".format(vm_ids)) - return 0, vm_ids, "VMs are booted successfully" - - -def wait_for_vm_pingable_from_natbox(vm_id, timeout=200, fail_ok=False, - con_ssh=None, use_fip=False): - """ - Wait for ping vm from natbox succeeds. - - Args: - vm_id (str): id of the vm to ping - timeout (int): max retry time for pinging vm - fail_ok (bool): whether to raise exception if vm cannot be ping'd - successfully from natbox within timeout - con_ssh (SSHClient): TiS server ssh handle - use_fip (bool): whether or not to ping floating ip only if any - - Returns (bool): True if ping vm succeeded, False otherwise. - - """ - ping_end_time = time.time() + timeout - while time.time() < ping_end_time: - if ping_vms_from_natbox(vm_ids=vm_id, fail_ok=True, con_ssh=con_ssh, - num_pings=3, use_fip=use_fip)[0]: - # give it sometime to settle after vm booted and became pingable - time.sleep(5) - return True - else: - msg = "Ping from NatBox to vm {} failed for {} seconds.".format(vm_id, - timeout) - if fail_ok: - LOG.warning(msg) - return False - else: - time_stamp = common.get_date_in_format(ssh_client=con_ssh, - date_format='%Y%m%d_%H-%M') - f_path = '{}/{}-{}'.format(ProjVar.get_var('PING_FAILURE_DIR'), - time_stamp, ProjVar.get_var('TEST_NAME')) - common.write_to_file(f_path, - "=================={}===============\n".format( - msg)) - ProjVar.set_var(PING_FAILURE=True) - get_console_logs(vm_ids=vm_id, sep_file=f_path) - network_helper.collect_networking_info(vms=vm_id, sep_file=f_path, - time_stamp=time_stamp) - raise exceptions.VMNetworkError(msg) - - -def __merge_dict(base_dict, merge_dict): - # identical to {**base_dict, **merge_dict} in python3.6+ - d = dict(base_dict) # id() will be different, making a copy - for k in merge_dict: - d[k] = merge_dict[k] - return d - - -def get_default_keypair(auth_info=None, con_ssh=None): - """ - Get keypair for specific tenant. - - Args: - auth_info (dict): If None, default tenant will be used. - con_ssh (SSHClient): - - Returns (str): key name - - """ - if auth_info is None: - auth_info = Tenant.get_primary() - - keypair_name = auth_info['nova_keypair'] - existing_keypairs = nova_helper.get_keypairs(name=keypair_name, - con_ssh=con_ssh, - auth_info=auth_info) - if existing_keypairs: - return existing_keypairs[0] - - # Assume that public key file already exists since it should have been - # set up in session config. - # In the case of public key file does not exist, there should be existing - # nova keypair, so it should not - # reach this step. Config done via setups.setup_keypair() - keyfile_stx_final = ProjVar.get_var('STX_KEYFILE_SYS_HOME') - public_key_stx = '{}.pub'.format(keyfile_stx_final) - LOG.info("Create nova keypair {} using public key {}".format( - keypair_name, public_key_stx)) - nova_helper.create_keypair(keypair_name, public_key=public_key_stx, - auth_info=auth_info, con_ssh=con_ssh) - - return keypair_name - - -def live_migrate_vm(vm_id, destination_host='', con_ssh=None, - block_migrate=None, force=None, fail_ok=False, - auth_info=Tenant.get('admin')): - """ - - Args: - vm_id (str): - destination_host (str): such as compute-0, compute-1 - con_ssh (SSHClient): - block_migrate (bool): whether to add '--block-migrate' to command - force (None|bool): force live migrate - fail_ok (bool): if fail_ok, return a numerical number to indicate the - execution status - One exception is if the live-migration command exit_code > 1, - which indicating the command itself may - be incorrect. In this case CLICommandFailed exception will be - thrown regardless of the fail_ok flag. - auth_info (dict): - - Returns (tuple): (return_code (int), error_msg_if_migration_rejected (str)) - (0, 'Live migration is successful.'): - live migration succeeded and post migration checking passed - (1, ): # This scenario is changed to host did not change - as excepted - live migration request rejected as expected. e.g., no available - destination host, - or live migrate a vm with block migration - (2, ): live migration request rejected due to unknown - reason. - (3, 'Post action check failed: VM is in ERROR state.'): - live migration command executed successfully, but VM is in Error - state after migration - (4, 'Post action check failed: VM is not in original state.'): - live migration command executed successfully, but VM is not in - before-migration-state - (5, 'Post action check failed: VM host did not change!'): (this - scenario is removed from Newton) - live migration command executed successfully, but VM is still on - the same host after migration - (6, ) This happens when vote_note_to_migrate is set for - vm, or pci device is used in vm, etc - - For the first two scenarios, results will be returned regardless of the - fail_ok flag. - For scenarios other than the first two, returns are only applicable if - fail_ok=True - - Examples: - 1) If a test case is meant to test live migration with a specific - flavor which would block the migration, the - following call can be made: - - return_code, msg = live_migrate_vm(vm_id, fail_ok=True) - expected_err_str = "your error string" - assert return_code in [1, 2] - assert expected_err_str in msg - - 2) For a test that needs to live migrate - - """ - optional_arg = '' - - if block_migrate: - optional_arg += '--block-migrate' - - if force: - optional_arg += '--force' - - before_host = get_vm_host(vm_id, con_ssh=con_ssh) - before_status = get_vm_values(vm_id, 'status', strict=True, con_ssh=con_ssh, - auth_info=Tenant.get('admin'))[0] - if not before_status == VMStatus.ACTIVE: - LOG.warning("Non-active VM status before live migrate: {}".format( - before_status)) - - extra_str = '' - if not destination_host == '': - extra_str = ' to ' + destination_host - positional_args = ' '.join( - [optional_arg.strip(), str(vm_id), destination_host]).strip() - LOG.info( - "Live migrating VM {} from {}{} started.".format(vm_id, before_host, - extra_str)) - LOG.info("nova live-migration {}".format(positional_args)) - # auto host/block migration selection unavailable in openstack client - exit_code, output = cli.nova('live-migration', - positional_args=positional_args, - ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info) - - if exit_code == 1: - return 6, output - - LOG.info("Waiting for VM status change to {} with best effort".format( - VMStatus.MIGRATING)) - in_mig_state = wait_for_vm_status(vm_id, status=VMStatus.MIGRATING, - timeout=60, fail_ok=True) - if not in_mig_state: - LOG.warning( - "VM did not reach {} state after triggering live-migration".format( - VMStatus.MIGRATING)) - - LOG.info("Waiting for VM status change to original state {}".format( - before_status)) - end_time = time.time() + VMTimeout.LIVE_MIGRATE_COMPLETE - while time.time() < end_time: - time.sleep(2) - status = get_vm_values(vm_id, 'status', strict=True, con_ssh=con_ssh, - auth_info=Tenant.get('admin'))[0] - if status == before_status: - LOG.info("Live migrate vm {} completed".format(vm_id)) - break - elif status == VMStatus.ERROR: - if fail_ok: - return 3, "Post action check failed: VM is in ERROR state." - nova_helper.get_migration_list_table(con_ssh=con_ssh, - auth_info=auth_info) - raise exceptions.VMPostCheckFailed( - "VM {} is in {} state after live migration. Original state " - "before live migration is: {}".format(vm_id, VMStatus.ERROR, - before_status)) - else: - if fail_ok: - return 4, "Post action check failed: VM is not in original state." - else: - nova_helper.get_migration_list_table(con_ssh=con_ssh, - auth_info=auth_info) - raise exceptions.TimeoutException( - "VM {} did not reach original state within {} seconds after " - "live migration".format(vm_id, VMTimeout.LIVE_MIGRATE_COMPLETE)) - - after_host = before_host - for i in range(3): - after_host = get_vm_host(vm_id, con_ssh=con_ssh) - if after_host != before_host: - break - time.sleep(3) - - if before_host == after_host: - LOG.warning( - "Live migration of vm {} failed. Checking if this is expected " - "failure...".format( - vm_id)) - if _is_live_migration_allowed(vm_id, vm_host=before_host, - block_migrate=block_migrate) and \ - (destination_host or get_dest_host_for_live_migrate(vm_id)): - if fail_ok: - return 1, "Unknown live migration failure" - else: - nova_helper.get_migration_list_table(con_ssh=con_ssh, - auth_info=auth_info) - raise exceptions.VMPostCheckFailed( - "Unexpected failure of live migration!") - else: - LOG.debug( - "System does not allow live migrating vm {} as " - "expected.".format( - vm_id)) - return 2, "Live migration failed as expected" - - LOG.info( - "VM {} successfully migrated from {} to {}".format(vm_id, before_host, - after_host)) - return 0, "Live migration is successful." - - -def _is_live_migration_allowed(vm_id, vm_host, con_ssh=None, - block_migrate=None): - vm_info = VMInfo.get_vm_info(vm_id, con_ssh=con_ssh) - storage_backing = vm_info.get_storage_type() - if not storage_backing: - storage_backing = host_helper.get_host_instance_backing(host=vm_host, - con_ssh=con_ssh) - - vm_boot_from = vm_info.boot_info['type'] - - if storage_backing == 'local_image': - if block_migrate and vm_boot_from == 'volume' and not \ - vm_info.has_local_disks(): - LOG.warning( - "Live block migration is not supported for boot-from-volume " - "vm with local_image storage") - return False - return True - - elif storage_backing == 'local_lvm': - if (not block_migrate) and vm_boot_from == 'volume' and not \ - vm_info.has_local_disks(): - return True - else: - LOG.warning( - "Live (block) migration is not supported for local_lvm vm " - "with localdisk") - return False - - else: - # remote backend - if block_migrate: - LOG.warning( - "Live block migration is not supported for vm with remote " - "storage") - return False - else: - return True - - -def get_dest_host_for_live_migrate(vm_id, con_ssh=None): - """ - Check whether a destination host exists with following criteria: - Criteria: - 1) host has same storage backing as the vm - 2) host is unlocked - 3) different than current host - Args: - vm_id (str): - con_ssh (SSHClient): - - Returns (str): hostname for the first host found. Or '' if no proper host - found - """ - vm_info = VMInfo.get_vm_info(vm_id, con_ssh=con_ssh) - vm_storage_backing = vm_info.get_storage_type() - current_host = vm_info.get_host_name() - if not vm_storage_backing: - vm_storage_backing = host_helper.get_host_instance_backing( - host=current_host, con_ssh=con_ssh) - candidate_hosts = host_helper.get_hosts_in_storage_backing( - storage_backing=vm_storage_backing, con_ssh=con_ssh) - - hosts_table_ = table_parser.table(cli.system('host-list')[1]) - for host in candidate_hosts: - if not host == current_host: - host_state = table_parser.get_values(hosts_table_, 'administrative', - hostname=host)[0] - if host_state == 'unlocked': - LOG.debug( - "At least one host - {} is available for live migrating " - "vm {}".format( - host, vm_id)) - return host - - LOG.warning("No valid host found for live migrating vm {}".format(vm_id)) - return '' - - -def cold_migrate_vm(vm_id, revert=False, con_ssh=None, fail_ok=False, - auth_info=Tenant.get('admin')): - """ - Cold migrate a vm and confirm/revert - Args: - vm_id (str): vm to cold migrate - revert (bool): False to confirm resize, True to revert - con_ssh (SSHClient): - fail_ok (bool): True if fail ok. Default to False, ie., throws - exception upon cold migration fail. - auth_info (dict): - - Returns (tuple): (rtn_code, message) - (0, success_msg) # Cold migration and confirm/revert succeeded. VM is - back to original state or Active state. - (1, ) # cold migration cli rejected - # (2, ) # Cold migration cli command rejected. is - the err message returned by cli cmd. - (3, ) # Cold migration cli accepted, but not finished. - is the output of cli cmd. - (4, timeout_message] # Cold migration command ran successfully, - but timed out waiting for VM to reach - 'Verify Resize' state or Error state. - (5, err_msg) # Cold migration command ran successfully, but VM is in - Error state. - (6, err_msg) # Cold migration command ran successfully, and resize - confirm/revert performed. But VM is not in - Active state after confirm/revert. - (7, err_msg) # Cold migration and resize confirm/revert ran - successfully and vm in active state. But host for vm - is not as expected. i.e., still the same host after confirm - resize, or different host after revert resize. - (8, ) # Confirm/Revert resize cli rejected - - """ - before_host = get_vm_host(vm_id, con_ssh=con_ssh) - before_status = \ - get_vm_values(vm_id, 'status', strict=True, con_ssh=con_ssh)[0] - if not before_status == VMStatus.ACTIVE: - LOG.warning("Non-active VM status before cold migrate: {}".format( - before_status)) - - LOG.info("Cold migrating VM {} from {}...".format(vm_id, before_host)) - exitcode, output = cli.nova('migrate --poll', vm_id, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info, - timeout=VMTimeout.COLD_MIGRATE_CONFIRM) - - if exitcode == 1: - return 1, output - - LOG.info( - "Waiting for VM status change to {}".format(VMStatus.VERIFY_RESIZE)) - vm_status = wait_for_vm_status(vm_id=vm_id, - status=[VMStatus.VERIFY_RESIZE, - VMStatus.ERROR], - timeout=300, - fail_ok=fail_ok, con_ssh=con_ssh) - - if vm_status is None: - return 4, 'Timed out waiting for Error or Verify_Resize status for ' \ - 'VM {}'.format(vm_id) - - verify_resize_str = 'Revert' if revert else 'Confirm' - if vm_status == VMStatus.VERIFY_RESIZE: - LOG.info("{}ing resize..".format(verify_resize_str)) - res, out = _confirm_or_revert_resize(vm=vm_id, revert=revert, - fail_ok=fail_ok, con_ssh=con_ssh) - if res > 0: - return 8, out - - elif vm_status == VMStatus.ERROR: - err_msg = "VM {} in Error state after cold migrate. {} resize is not " \ - "reached.".format(vm_id, verify_resize_str) - if fail_ok: - return 5, err_msg - nova_helper.get_migration_list_table(con_ssh=con_ssh, - auth_info=auth_info) - raise exceptions.VMPostCheckFailed(err_msg) - - post_confirm_state = wait_for_vm_status( - vm_id, status=VMStatus.ACTIVE, - timeout=VMTimeout.COLD_MIGRATE_CONFIRM, fail_ok=fail_ok, - con_ssh=con_ssh) - - if post_confirm_state is None: - err_msg = "VM {} is not in Active state after {} Resize".format( - vm_id, verify_resize_str) - return 6, err_msg - - # Process results - after_host = get_vm_host(vm_id, con_ssh=con_ssh) - host_changed = before_host != after_host - host_change_str = "changed" if host_changed else "did not change" - operation_ok = not host_changed if revert else host_changed - - if not operation_ok: - err_msg = ( - "VM {} host {} after {} Resize. Before host: {}. After host: {}". - format(vm_id, host_change_str, verify_resize_str, before_host, - after_host)) - if fail_ok: - return 7, err_msg - nova_helper.get_migration_list_table(con_ssh=con_ssh, - auth_info=auth_info) - raise exceptions.VMPostCheckFailed(err_msg) - - success_msg = "VM {} successfully cold migrated and {}ed Resize.".format( - vm_id, verify_resize_str) - LOG.info(success_msg) - return 0, success_msg - - -def resize_vm(vm_id, flavor_id, revert=False, con_ssh=None, fail_ok=False, - auth_info=Tenant.get('admin')): - """ - Resize vm to given flavor - - Args: - vm_id (str): - flavor_id (str): flavor to resize to - revert (bool): True to revert resize, else confirm resize - con_ssh (SSHClient): - fail_ok (bool): - auth_info (dict): - - Returns (tuple): (rtn_code, msg) - (0, "VM successfully resized and confirmed/reverted.") - (1, ) # resize cli rejected - (2, "Timed out waiting for Error or Verify_Resize status for VM - ") - (3, "VM in Error state after resizing. VERIFY_RESIZE is not - reached.") - (4, "VM is not in Active state after confirm/revert Resize") - (5, "Flavor is changed after revert resizing.") - (6, "VM flavor is not changed to expected after resizing.") - """ - before_flavor = get_vm_flavor(vm_id, con_ssh=con_ssh) - before_status = \ - get_vm_values(vm_id, 'status', strict=True, con_ssh=con_ssh)[0] - if not before_status == VMStatus.ACTIVE: - LOG.warning("Non-active VM status before cold migrate: {}".format( - before_status)) - - LOG.info("Resizing VM {} to flavor {}...".format(vm_id, flavor_id)) - args = '--wait --flavor {} {}'.format(flavor_id, vm_id) - exitcode, output = cli.openstack('server resize', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info, - timeout=VMTimeout.COLD_MIGRATE_CONFIRM) - if exitcode > 0: - return 1, output - - LOG.info( - "Waiting for VM status change to {}".format(VMStatus.VERIFY_RESIZE)) - vm_status = wait_for_vm_status(vm_id=vm_id, - status=[VMStatus.VERIFY_RESIZE, - VMStatus.ERROR], - fail_ok=fail_ok, - timeout=300, con_ssh=con_ssh) - - if vm_status is None: - err_msg = 'Timed out waiting for Error or Verify_Resize status for ' \ - 'VM {}'.format(vm_id) - LOG.error(err_msg) - return 2, err_msg - - verify_resize_str = 'Revert' if revert else 'Confirm' - if vm_status == VMStatus.VERIFY_RESIZE: - LOG.info("{}ing resize..".format(verify_resize_str)) - _confirm_or_revert_resize(vm=vm_id, revert=revert, con_ssh=con_ssh) - - elif vm_status == VMStatus.ERROR: - err_msg = "VM {} in Error state after resizing. {} is not " \ - "reached.".format(vm_id, VMStatus.VERIFY_RESIZE) - if fail_ok: - LOG.error(err_msg) - return 3, err_msg - raise exceptions.VMPostCheckFailed(err_msg) - - post_confirm_state = wait_for_vm_status( - vm_id, status=VMStatus.ACTIVE, timeout=VMTimeout.COLD_MIGRATE_CONFIRM, - fail_ok=fail_ok, con_ssh=con_ssh) - - if post_confirm_state is None: - err_msg = "VM {} is not in Active state after {} Resize".format( - vm_id, verify_resize_str) - LOG.error(err_msg) - return 4, err_msg - - after_flavor = get_vm_flavor(vm_id) - if revert and after_flavor != before_flavor: - err_msg = "Flavor is changed after revert resizing. Before flavor: " \ - "{}, after flavor: {}".format(before_flavor, after_flavor) - if fail_ok: - LOG.error(err_msg) - return 5, err_msg - raise exceptions.VMPostCheckFailed(err_msg) - - if not revert and after_flavor != flavor_id: - err_msg = "VM flavor {} is not changed to expected after resizing. " \ - "Before flavor: {}, after flavor: {}".\ - format(flavor_id, before_flavor, after_flavor) - if fail_ok: - LOG.error(err_msg) - return 6, err_msg - raise exceptions.VMPostCheckFailed(err_msg) - - success_msg = "VM {} successfully resized and {}ed.".format( - vm_id, verify_resize_str) - LOG.info(success_msg) - return 0, success_msg - - -def wait_for_vm_values(vm_id, timeout=VMTimeout.STATUS_CHANGE, check_interval=3, - fail_ok=True, strict=True, - regex=False, con_ssh=None, auth_info=None, **kwargs): - """ - Wait for vm to reach given states. - - Args: - vm_id (str): vm id - timeout (int): in seconds - check_interval (int): in seconds - fail_ok (bool): whether to return result or raise exception when vm - did not reach expected value(s). - strict (bool): whether to perform strict search(match) for the value(s) - For regular string: if True, match the whole string; if False, - find any substring match - For regex: if True, match from start of the value string; if - False, search anywhere of the value string - regex (bool): whether to use regex to find matching value(s) - con_ssh (SSHClient): - auth_info (dict): - **kwargs: field/value pair(s) to identify the waiting criteria. - - Returns (tuple): (result(bool), actual_vals(dict)) - - """ - if not kwargs: - raise ValueError("No field/value pair is passed via kwargs") - LOG.info("Waiting for vm to reach state(s): {}".format(kwargs)) - - fields_to_check = list(kwargs.keys()) - results = {} - end_time = time.time() + timeout - while time.time() < end_time: - actual_vals = get_vm_values(vm_id=vm_id, con_ssh=con_ssh, - auth_info=auth_info, - fields=fields_to_check) - for i in range(len(fields_to_check)): - field = fields_to_check[i] - expt_vals = kwargs[field] - actual_val = actual_vals[i] - results[field] = actual_val - if not isinstance(expt_vals, list): - expt_vals = [expt_vals] - for expt_val in expt_vals: - if regex: - match_found = re.match(expt_val, - actual_val) if strict else re.search( - expt_val, actual_val) - else: - match_found = expt_val == actual_val if strict else \ - expt_val in actual_val - - if match_found: - fields_to_check.remove(field) - - if not fields_to_check: - LOG.info("VM has reached states: {}".format(results)) - return True, results - - time.sleep(check_interval) - - msg = "VM {} did not reach expected states within timeout. Actual state(" \ - "s): {}".format(vm_id, results) - if fail_ok: - LOG.warning(msg) - return False, results - else: - raise exceptions.VMTimeout(msg) - - -def wait_for_vm_status(vm_id, status=VMStatus.ACTIVE, - timeout=VMTimeout.STATUS_CHANGE, check_interval=3, - fail_ok=False, - con_ssh=None, auth_info=Tenant.get('admin')): - """ - - Args: - vm_id: - status (list|str): - timeout: - check_interval: - fail_ok (bool): - con_ssh: - auth_info: - - Returns: The Status of the vm_id depend on what Status it is looking for - - """ - end_time = time.time() + timeout - if isinstance(status, str): - status = [status] - - current_status = get_vm_values(vm_id, 'status', strict=True, - con_ssh=con_ssh, auth_info=auth_info)[0] - while time.time() < end_time: - for expected_status in status: - if current_status == expected_status: - LOG.info("VM status has reached {}".format(expected_status)) - return expected_status - - time.sleep(check_interval) - current_status = get_vm_values(vm_id, 'status', strict=True, - con_ssh=con_ssh, auth_info=auth_info)[0] - - err_msg = "Timed out waiting for vm status: {}. Actual vm status: " \ - "{}".format(status, current_status) - if fail_ok: - LOG.warning(err_msg) - return None - else: - raise exceptions.VMTimeout(err_msg) - - -def _confirm_or_revert_resize(vm, revert=False, con_ssh=None, fail_ok=False): - args = '--revert' if revert else '--confirm' - args = '{} {}'.format(args, vm) - return cli.openstack('server resize', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=Tenant.get('admin')) - - -def _get_vms_ips(vm_ids, net_types='mgmt', exclude_nets=None, con_ssh=None, - vshell=False): - if isinstance(net_types, str): - net_types = [net_types] - - if isinstance(vm_ids, str): - vm_ids = [vm_ids] - - valid_net_types = ['mgmt', 'data', 'internal', 'external'] - if not set(net_types) <= set(valid_net_types): - raise ValueError( - "Invalid net type(s) provided. Valid net_types: {}. net_types " - "given: {}". - format(valid_net_types, net_types)) - - vms_ips = [] - vshell_ips_dict = dict(data=[], internal=[]) - if 'mgmt' in net_types: - mgmt_ips = network_helper.get_mgmt_ips_for_vms( - vms=vm_ids, con_ssh=con_ssh, exclude_nets=exclude_nets) - if not mgmt_ips: - raise exceptions.VMNetworkError( - "Management net ip is not found for vms {}".format(vm_ids)) - vms_ips += mgmt_ips - - if 'external' in net_types: - ext_ips = network_helper.get_external_ips_for_vms( - vms=vm_ids, con_ssh=con_ssh, exclude_nets=exclude_nets) - if not ext_ips: - raise exceptions.VMNetworkError( - "No external network ip found for vms {}".format(vm_ids)) - vms_ips += ext_ips - - if 'data' in net_types: - data_ips = network_helper.get_tenant_ips_for_vms( - vms=vm_ids, con_ssh=con_ssh, exclude_nets=exclude_nets) - if not data_ips: - raise exceptions.VMNetworkError( - "Data network ip is not found for vms {}".format(vm_ids)) - if vshell: - vshell_ips_dict['data'] = data_ips - else: - vms_ips += data_ips - - if 'internal' in net_types: - internal_ips = network_helper.get_internal_ips_for_vms( - vms=vm_ids, con_ssh=con_ssh, exclude_nets=exclude_nets) - if not internal_ips: - raise exceptions.VMNetworkError( - "Internal net ip is not found for vms {}".format(vm_ids)) - if vshell: - vshell_ips_dict['internal'] = internal_ips - else: - vms_ips += internal_ips - - return vms_ips, vshell_ips_dict - - -def _ping_vms(ssh_client, vm_ids=None, con_ssh=None, num_pings=5, timeout=15, - fail_ok=False, net_types='mgmt', retry=3, - retry_interval=3, vshell=False, sep_file=None, - source_net_types=None): - """ - - Args: - vm_ids (list|str): list of vms to ping - ssh_client (SSHClient): ping from this ssh client. Usually a natbox' - ssh client or another vm's ssh client - con_ssh (SSHClient): active controller ssh client to run cli command - to get all the management ips - num_pings (int): number of pings to send - timeout (int): timeout waiting for response of ping messages in seconds - fail_ok (bool): Whether it's okay to have 100% packet loss rate. - sep_file (str|None) - net_types (str|list|tuple) - source_net_types (str|list|tuple|None): - vshell specific - None: use the same net_type s as the target IPs' - str: use the specified net_type for all target IPs - tuple: (net_type_data, net_type_internal) - use net_type_data for data IPs - use net_type_internal for internal IPs - list: same as tuple - - Returns (tuple): (res (bool), packet_loss_dict (dict)) - Packet loss rate dictionary format: - { - ip1: packet_loss_percentile1, - ip2: packet_loss_percentile2, - ... - } - - """ - vms_ips, vshell_ips_dict = _get_vms_ips(vm_ids=vm_ids, net_types=net_types, - con_ssh=con_ssh, vshell=vshell) - - res_bool = False - res_dict = {} - for i in range(retry + 1): - for ip in vms_ips: - packet_loss_rate = network_helper.ping_server( - server=ip, ssh_client=ssh_client, num_pings=num_pings, - timeout=timeout, fail_ok=True, vshell=False)[0] - res_dict[ip] = packet_loss_rate - - for net_type, vshell_ips in vshell_ips_dict.items(): - - if source_net_types is None: - pass - elif isinstance(source_net_types, str): - net_type = source_net_types - else: - net_type_data, net_type_internal = source_net_types - if net_type == 'data': - net_type = net_type_data - elif net_type == 'internal': - net_type = net_type_internal - else: - raise ValueError(net_type) - - for vshell_ip in vshell_ips: - packet_loss_rate = network_helper.ping_server( - server=vshell_ip, ssh_client=ssh_client, - num_pings=num_pings, timeout=timeout, fail_ok=True, - vshell=True, net_type=net_type)[0] - res_dict[vshell_ip] = packet_loss_rate - - res_bool = not any(loss_rate == 100 for loss_rate in res_dict.values()) - if res_bool: - LOG.info( - "Ping successful from {}: {}".format(ssh_client.host, res_dict)) - return res_bool, res_dict - - if i < retry: - LOG.info("Retry in {} seconds".format(retry_interval)) - time.sleep(retry_interval) - - if not res_dict: - raise ValueError("Ping res dict contains no result.") - - err_msg = "Ping unsuccessful from vm (logged in via {}): {}".format( - ssh_client.host, res_dict) - if fail_ok: - LOG.info(err_msg) - return res_bool, res_dict - else: - if sep_file: - msg = "==========================Ping unsuccessful from vm to " \ - "vms====================" - common.write_to_file( - sep_file, - content="{}\nLogged into vm via {}. Result: {}".format( - msg, ssh_client.host, res_dict)) - raise exceptions.VMNetworkError(err_msg) - - -def configure_vm_vifs_on_same_net(vm_id, vm_ips=None, ports=None, - vm_prompt=None, restart_service=True, - reboot=False): - """ - Configure vm routes if the vm has multiple vifs on same network. - Args: - vm_id (str): - vm_ips (str|list): ips for specific vifs. Only works if vifs are up - with ips assigned - ports (list of dict): vm ports to configure. - vm_prompt (None|str) - restart_service - reboot - - Returns: - - """ - - if isinstance(vm_ips, str): - vm_ips = [vm_ips] - - vnics_info = {} - if ports: - LOG.info("Get vm interfaces' mac and ip addressess") - if isinstance(ports, str): - ports = [ports] - vm_interfaces_table = table_parser.table( - cli.openstack('port list', '--server {}'.format(vm_id))[1]) - vm_interfaces_dict = table_parser.row_dict_table( - table_=vm_interfaces_table, key_header='ID') - for i in range(len(ports)): - port_id = ports[i] - vif_info = vm_interfaces_dict[port_id] - vif_ip = vif_info['fixed ip addresses'] - if vif_ip and 'ip_address' in vif_ip: - vif_ip = \ - re.findall("ip_address='(.*)'", vif_ip.split(sep=',')[0])[0] - else: - if not vm_ips: - raise ValueError( - "vm_ips for matching vnics has to be provided for " - "ports without ip address " - "listed in neutron port-list") - vif_ip = vm_ips[i] - cidr = vif_ip.rsplit('.', maxsplit=1)[0] + '.0/24' - vif_mac = vif_info['mac address'] - vnics_info[vif_mac] = (cidr, vif_ip) - - LOG.info("Configure vm routes if the vm has multiple vifs on same network.") - with ssh_to_vm_from_natbox(vm_id=vm_id, prompt=vm_prompt) as vm_ssh: - vifs_to_conf = {} - if not ports: - extra_grep = '| grep --color=never -E "{}"'.format( - '|'.join(vm_ips)) if vm_ips else '' - kernel_routes = vm_ssh.exec_cmd( - 'ip route | grep --color=never "proto kernel" {}'.format( - extra_grep))[1] - cidr_dict = {} - for line in kernel_routes.splitlines(): - found = re.findall( - r'^(.*/\d+)\sdev\s(.*)\sproto kernel.*\ssrc\s(.*)$', line) - cidr, dev_name, dev_ip = found[0] - if cidr not in cidr_dict: - cidr_dict[cidr] = [] - cidr_dict[cidr].append((dev_name, dev_ip)) - - for cidr_, val in cidr_dict.items(): - if not vm_ips: - val = val[1:] - for eth_info in val: - dev_name, dev_ip = eth_info - vifs_to_conf[dev_name] = \ - (cidr_, dev_ip, 'stxauto_{}'.format(dev_name)) - - if not vifs_to_conf: - LOG.info( - "Did not find multiple vifs on same subnet. Do nothing.") - - else: - for mac_addr in vnics_info: - dev_name = network_helper.get_eth_for_mac(vm_ssh, - mac_addr=mac_addr) - cidr_, dev_ip = vnics_info[mac_addr] - vifs_to_conf[dev_name] = ( - cidr_, dev_ip, 'stxauto_{}'.format(dev_name)) - - used_tables = vm_ssh.exec_cmd( - 'grep --color=never -E "^[0-9]" {}'.format(VMPath.RT_TABLES))[1] - used_tables = [int(re.split(r'[\s\t]', line_)[0].strip()) for line_ in - used_tables.splitlines()] - - start_range = 110 - for eth_name, eth_info in vifs_to_conf.items(): - cidr_, vif_ip, table_name = eth_info - exiting_tab = vm_ssh.exec_cmd( - 'grep --color=never {} {}'.format(table_name, - VMPath.RT_TABLES))[1] - if not exiting_tab: - for i in range(start_range, 250): - if i not in used_tables: - LOG.info( - "Append new routing table {} to rt_tables". - format(table_name)) - vm_ssh.exec_sudo_cmd( - 'echo "{} {}" >> {}'.format(i, table_name, - VMPath.RT_TABLES)) - start_range = i + 1 - break - else: - raise ValueError( - "Unable to get a valid table number to create route " - "for {}".format(eth_name)) - - LOG.info( - "Update arp_filter, arp_announce, route and rule scripts for " - "vm {} {}".format(vm_id, eth_name)) - vm_ssh.exec_sudo_cmd( - 'echo 2 > {}'.format(VMPath.ETH_ARP_ANNOUNCE.format(eth_name))) - vm_ssh.exec_sudo_cmd( - 'echo 1 > {}'.format(VMPath.ETH_ARP_FILTER.format(eth_name))) - route = '{} dev {} proto kernel scope link src {} table {}'.format( - cidr_, eth_name, vif_ip, table_name) - vm_ssh.exec_sudo_cmd('echo "{}" > {}'.format( - route, VMPath.ETH_RT_SCRIPT.format(eth_name))) - rule = 'table {} from {}'.format(table_name, vif_ip) - vm_ssh.exec_sudo_cmd('echo "{}" > {}'.format( - rule, VMPath.ETH_RULE_SCRIPT.format(eth_name))) - - if restart_service and not reboot: - LOG.info("Restart network service after configure vm routes") - vm_ssh.exec_sudo_cmd('systemctl restart network', - expect_timeout=120, get_exit_code=False) - # vm_ssh.exec_cmd('ip addr') - - if reboot: - LOG.info("Reboot vm after configure vm routes") - reboot_vm(vm_id=vm_id) - - -def cleanup_routes_for_vifs(vm_id, vm_ips, rm_ifcfg=True, restart_service=True, - reboot=False): - """ - Cleanup the configured routes for specified vif(s). This is needed when a - vif is detached from a vm. - - Args: - vm_id: - vm_ips: - rm_ifcfg - restart_service - reboot - - Returns: - - """ - with ssh_to_vm_from_natbox(vm_id=vm_id) as vm_ssh: - - if isinstance(vm_ips, str): - vm_ips = [vm_ips] - - for vm_ip in vm_ips: - LOG.info("Clean up route for dev with ip {}".format(vm_ip)) - route = vm_ssh.exec_sudo_cmd( - 'grep --color=never {} {}'.format( - vm_ip, VMPath.ETH_RT_SCRIPT.format('*')))[1] - if not route: - continue - - pattern = '(.*) dev (.*) proto kernel .* src {} table (.*)'.format( - vm_ip) - found = re.findall(pattern, route) - if found: - cidr, eth_name, table_name = found[0] - LOG.info( - "Update arp_filter, arp_announce, route and rule scripts " - "for vm {} {}".format(vm_id, eth_name)) - # vm_ssh.exec_sudo_cmd('rm -f {}'.format( - # VMPath.ETH_ARP_ANNOUNCE.format(eth_name))) - # vm_ssh.exec_sudo_cmd('rm -f {}'.format( - # VMPath.ETH_ARP_FILTER.format(eth_name))) - vm_ssh.exec_sudo_cmd( - 'rm -f {}'.format(VMPath.ETH_RULE_SCRIPT.format(eth_name))) - vm_ssh.exec_sudo_cmd( - 'rm -f {}'.format(VMPath.ETH_RT_SCRIPT.format(eth_name))) - vm_ssh.exec_sudo_cmd("sed -n -i '/{}/!p' {}".format( - table_name, VMPath.RT_TABLES)) - - if rm_ifcfg: - vm_ssh.exec_sudo_cmd('rm -f {}'.format( - VMPath.ETH_PATH_CENTOS.format(eth_name))) - - if restart_service and not reboot: - LOG.info("Restart network service") - vm_ssh.exec_sudo_cmd('systemctl restart network', - get_exit_code=False, expect_timeout=60) - - if reboot: - reboot_vm(vm_id=vm_id) - - -def ping_vms_from_natbox(vm_ids=None, natbox_client=None, con_ssh=None, - num_pings=5, timeout=30, fail_ok=False, - use_fip=False, retry=0): - """ - - Args: - vm_ids: vms to ping. If None, all vms will be ping'd. - con_ssh (SSHClient): active controller client to retrieve the vm info - natbox_client (NATBoxClient): ping vms from this client - num_pings (int): number of pings to send - timeout (int): timeout waiting for response of ping messages in seconds - fail_ok (bool): When False, test will stop right away if one ping - failed. When True, test will continue to ping - the rest of the vms and return results even if pinging one vm - failed. - use_fip (bool): Whether to ping floating ip only if a vm has more - than one management ips - retry (int): number of times to retry if ping fails - - Returns (tuple): (res (bool), packet_loss_dict (dict)) - Packet loss rate dictionary format: - { - ip1: packet_loss_percentile1, - ip2: packet_loss_percentile2, - ... - } - """ - if isinstance(vm_ids, str): - vm_ids = [vm_ids] - - if not natbox_client: - natbox_client = NATBoxClient.get_natbox_client() - - if not con_ssh: - con_ssh = ControllerClient.get_active_controller() - - net_type = 'external' if use_fip else 'mgmt' - res_bool, res_dict = _ping_vms(ssh_client=natbox_client, vm_ids=vm_ids, - con_ssh=con_ssh, num_pings=num_pings, - timeout=timeout, fail_ok=True, - net_types=net_type, retry=retry, - vshell=False) - if not res_bool and not fail_ok: - msg = "==================Ping vm(s) from NatBox failed - Collecting " \ - "extra information===============" - LOG.error(msg) - time_stamp = common.get_date_in_format(ssh_client=con_ssh, - date_format='%Y%m%d_%H-%M') - f_path = '{}/{}-{}'.format(ProjVar.get_var('PING_FAILURE_DIR'), - time_stamp, ProjVar.get_var("TEST_NAME")) - common.write_to_file(file_path=f_path, - content="\n{}\nResult(s): {}\n".format(msg, - res_dict)) - ProjVar.set_var(PING_FAILURE=True) - get_console_logs(vm_ids=vm_ids, sep_file=f_path) - network_helper.collect_networking_info(vms=vm_ids, sep_file=f_path, - time_stamp=time_stamp, - con_ssh=con_ssh) - raise exceptions.VMNetworkError( - "Ping failed from NatBox. Details: {}".format(res_dict)) - - return res_bool, res_dict - - -def get_console_logs(vm_ids, length=None, con_ssh=None, sep_file=None): - """ - Get console logs for given vm(s) - Args: - vm_ids (str|list): - length (int|None): how many lines to tail - con_ssh: - sep_file (str|None): write vm console logs to given sep_file if - specified. - - Returns (dict): {: , : , ...} - """ - if isinstance(vm_ids, str): - vm_ids = [vm_ids] - - vm_ids = list(set(vm_ids)) - console_logs = {} - args = '--lines={} '.format(length) if length else '' - content = '' - for vm_id in vm_ids: - vm_args = '{}{}'.format(args, vm_id) - output = cli.openstack('console log show', vm_args, ssh_client=con_ssh, - auth_info=Tenant.get('admin'))[1] - console_logs[vm_id] = output - content += "\n#### Console log for vm {} ####\n{}\n".format(vm_id, - output) - - if sep_file: - common.write_to_file(sep_file, content=content) - - return console_logs - - -def ping_vms_from_vm(to_vms=None, from_vm=None, user=None, password=None, - prompt=None, con_ssh=None, natbox_client=None, - num_pings=5, timeout=120, fail_ok=False, from_vm_ip=None, - from_fip=False, net_types='mgmt', - retry=3, retry_interval=5, vshell=False, - source_net_types=None): - """ - - Args: - from_vm (str): - to_vms (str|list|None): - user (str): - password (str): - prompt (str): - con_ssh (SSHClient): - natbox_client (SSHClient): - num_pings (int): - timeout (int): max number of seconds to wait for ssh connection to - from_vm - fail_ok (bool): When False, test will stop right away if one ping - failed. When True, test will continue to ping - the rest of the vms and return results even if pinging one vm - failed. - from_vm_ip (str): vm ip to ssh to if given. from_fip flag will be - considered only if from_vm_ip=None - from_fip (bool): whether to ssh to vm's floating ip if it has - floating ip associated with it - net_types (list|str|tuple): 'mgmt', 'data', or 'internal' - retry (int): number of times to retry - retry_interval (int): seconds to wait between each retries - vshell (bool): whether to ping vms' data interface through internal - interface. - Usage: when set to True, use 'vshell ping --count 3 - ' - - dpdk vms should be booted from lab_setup scripts - source_net_types (str|list|tuple|None): - vshell specific - None: use the same net_type s as the target IPs' - str: use the specified net_type for all target IPs - tuple: (net_type_data, net_type_internal) - use net_type_data for data IPs - use net_type_internal for internal IPs - list: same as tuple - - Returns (tuple): - A tuple in form: (res (bool), packet_loss_dict (dict)) - - Packet loss rate dictionary format: - { - ip1: packet_loss_percentile1, - ip2: packet_loss_percentile2, - ... - } - - """ - if isinstance(net_types, str): - net_types = [net_types] - - if from_vm is None or to_vms is None: - vms_ips = network_helper.get_mgmt_ips_for_vms(con_ssh=con_ssh, - rtn_dict=True) - if not vms_ips: - raise exceptions.NeutronError("No management ip found for any vms") - - vms_ids = list(vms_ips.keys()) - if from_vm is None: - from_vm = random.choice(vms_ids) - if to_vms is None: - to_vms = vms_ids - - if isinstance(to_vms, str): - to_vms = [to_vms] - - if not isinstance(from_vm, str): - raise ValueError("from_vm is not a string: {}".format(from_vm)) - - assert from_vm and to_vms, "from_vm: {}, to_vms: {}".format(from_vm, to_vms) - - time_stamp = common.get_date_in_format(ssh_client=con_ssh, - date_format='%Y%m%d_%H-%M') - f_path = '{}/{}-{}'.format(ProjVar.get_var('PING_FAILURE_DIR'), time_stamp, - ProjVar.get_var('TEST_NAME')) - try: - with ssh_to_vm_from_natbox(vm_id=from_vm, username=user, - password=password, - natbox_client=natbox_client, - prompt=prompt, con_ssh=con_ssh, - vm_ip=from_vm_ip, use_fip=from_fip, - retry_timeout=300) as from_vm_ssh: - res = _ping_vms(ssh_client=from_vm_ssh, vm_ids=to_vms, - con_ssh=con_ssh, num_pings=num_pings, - timeout=timeout, fail_ok=fail_ok, - net_types=net_types, retry=retry, - retry_interval=retry_interval, vshell=vshell, - sep_file=f_path, - source_net_types=source_net_types) - return res - - except (exceptions.TiSError, pexpect.ExceptionPexpect): - ProjVar.set_var(PING_FAILURE=True) - collect_to_vms = False if list(to_vms) == [from_vm] else True - get_console_logs(vm_ids=from_vm, length=20, sep_file=f_path) - if collect_to_vms: - get_console_logs(vm_ids=to_vms, sep_file=f_path) - network_helper.collect_networking_info(vms=to_vms, sep_file=f_path, - time_stamp=time_stamp) - try: - LOG.warning( - "Ping vm(s) from vm failed - Attempt to ssh to from_vm and " - "collect vm networking info") - with ssh_to_vm_from_natbox(vm_id=from_vm, username=user, - password=password, - natbox_client=natbox_client, - prompt=prompt, con_ssh=con_ssh, - vm_ip=from_vm_ip, - use_fip=from_fip) as from_vm_ssh: - _collect_vm_networking_info(vm_ssh=from_vm_ssh, sep_file=f_path, - vm_id=from_vm) - - if collect_to_vms: - LOG.warning( - "Ping vm(s) from vm failed - Attempt to ssh to to_vms and " - "collect vm networking info") - for vm_ in to_vms: - with ssh_to_vm_from_natbox(vm_, retry=False, - con_ssh=con_ssh) as to_ssh: - _collect_vm_networking_info(to_ssh, sep_file=f_path, - vm_id=vm_) - except (exceptions.TiSError, pexpect.ExceptionPexpect): - pass - - raise - - -def _collect_vm_networking_info(vm_ssh, sep_file=None, vm_id=None): - vm = vm_id if vm_id else '' - content = '#### VM network info collected when logged into vm {}via {} ' \ - '####'.format(vm, vm_ssh.host) - for cmd in ('ip addr', 'ip neigh', 'ip route'): - output = vm_ssh.exec_cmd(cmd, get_exit_code=False)[1] - content += '\nSent: {}\nOutput:\n{}\n'.format(cmd, output) - - if sep_file: - common.write_to_file(sep_file, content=content) - - -def ping_ext_from_vm(from_vm, ext_ip=None, user=None, password=None, - prompt=None, con_ssh=None, natbox_client=None, - num_pings=5, timeout=30, fail_ok=False, vm_ip=None, - use_fip=False): - if ext_ip is None: - ext_ip = EXT_IP - - with ssh_to_vm_from_natbox(vm_id=from_vm, username=user, password=password, - natbox_client=natbox_client, - prompt=prompt, con_ssh=con_ssh, vm_ip=vm_ip, - use_fip=use_fip) as from_vm_ssh: - from_vm_ssh.exec_cmd('ip addr', get_exit_code=False) - return network_helper.ping_server(ext_ip, ssh_client=from_vm_ssh, - num_pings=num_pings, - timeout=timeout, fail_ok=fail_ok)[0] - - -def scp_to_vm_from_natbox(vm_id, source_file, dest_file, timeout=60, - validate=True, natbox_client=None, sha1sum=None): - """ - scp a file to a vm from natbox - the file must be located in the natbox - the natbox must has connectivity to the VM - - Args: - vm_id (str): vm to scp to - source_file (str): full pathname to the source file - dest_file (str): destination full pathname in the VM - timeout (int): scp timeout - validate (bool): verify src and dest sha1sum - natbox_client (NATBoxClient|None): - sha1sum (str|None): validates the source file prior to operation, - or None, only checked if validate=True - - Returns (None): - - """ - if natbox_client is None: - natbox_client = NATBoxClient.get_natbox_client() - - LOG.info("scp-ing from {} to VM {}".format(natbox_client.host, vm_id)) - - tmp_loc = '/tmp' - fname = os.path.basename(os.path.normpath(source_file)) - - # ensure source file exists - natbox_client.exec_cmd('test -f {}'.format(source_file), fail_ok=False) - - # calculate sha1sum - src_sha1 = None - if validate: - src_sha1 = natbox_client.exec_cmd('sha1sum {}'.format(source_file), - fail_ok=False)[1] - src_sha1 = src_sha1.split(' ')[0] - LOG.info("src: {}, sha1sum: {}".format(source_file, src_sha1)) - if sha1sum is not None and src_sha1 != sha1sum: - raise ValueError( - "src sha1sum validation failed {} != {}".format(src_sha1, - sha1sum)) - - with ssh_to_vm_from_natbox(vm_id) as vm_ssh: - vm_ssh.exec_cmd('mkdir -p {}'.format(tmp_loc)) - vm_ssh.scp_on_dest(natbox_client.user, natbox_client.host, source_file, - '/'.join([tmp_loc, fname]), natbox_client.password, - timeout=timeout) - - # `mv $s $d` fails if $s == $d - if os.path.normpath(os.path.join(tmp_loc, fname)) != os.path.normpath( - dest_file): - vm_ssh.exec_sudo_cmd( - 'mv -f {} {}'.format('/'.join([tmp_loc, fname]), dest_file), - fail_ok=False) - - # ensure destination file exists - vm_ssh.exec_sudo_cmd('test -f {}'.format(dest_file), fail_ok=False) - - # validation - if validate: - dest_sha1 = vm_ssh.exec_sudo_cmd( - 'sha1sum {}'.format(dest_file), fail_ok=False)[1] - dest_sha1 = dest_sha1.split(' ')[0] - LOG.info("dst: {}, sha1sum: {}".format(dest_file, dest_sha1)) - if src_sha1 != dest_sha1: - raise ValueError( - "dst sha1sum validation failed {} != {}".format(src_sha1, - dest_sha1)) - LOG.info("scp completed successfully") - - -def scp_to_vm(vm_id, source_file, dest_file, timeout=60, validate=True, - source_ssh=None, natbox_client=None): - """ - scp a file from any SSHClient to a VM - since not all SSHClient's has connectivity to the VM, this function scps - the source file to natbox first - - Args: - vm_id (str): vm to scp to - source_file (str): full pathname to the source file - dest_file (str): destination path in the VM - timeout (int): scp timeout - validate (bool): verify src and dest sha1sum - source_ssh (SSHClient|None): the source ssh session, or None to use - 'localhost' - natbox_client (NATBoxClient|None): - - Returns (None): - - """ - if not natbox_client: - natbox_client = NATBoxClient.get_natbox_client() - - close_source = False - if not source_ssh: - source_ssh = LocalHostClient() - source_ssh.connect() - close_source = True - - try: - # scp-ing from natbox, forward the call - if source_ssh.host == natbox_client.host: - return scp_to_vm_from_natbox(vm_id, source_file, dest_file, timeout, - validate, natbox_client=natbox_client) - - LOG.info("scp-ing from {} to natbox {}".format(source_ssh.host, - natbox_client.host)) - tmp_loc = '~' - fname = os.path.basename(os.path.normpath(source_file)) - - # ensure source file exists - source_ssh.exec_cmd('test -f {}'.format(source_file), fail_ok=False) - - # calculate sha1sum - if validate: - src_sha1 = source_ssh.exec_cmd('sha1sum {}'.format(source_file), - fail_ok=False)[1] - src_sha1 = src_sha1.split(' ')[0] - LOG.info("src: {}, sha1sum: {}".format(source_file, src_sha1)) - else: - src_sha1 = None - - # scp to natbox - # natbox_client.exec_cmd('mkdir -p {}'.format(tmp_loc)) - source_ssh.scp_on_source( - source_file, natbox_client.user, natbox_client.host, tmp_loc, - natbox_client.password, timeout=timeout) - - return scp_to_vm_from_natbox( - vm_id, '/'.join([tmp_loc, fname]), dest_file, timeout, validate, - natbox_client=natbox_client, sha1sum=src_sha1) - - finally: - if close_source: - source_ssh.close() - - -@contextmanager -def ssh_to_vm_from_natbox(vm_id, vm_image_name=None, username=None, - password=None, prompt=None, - timeout=VMTimeout.SSH_LOGIN, natbox_client=None, - con_ssh=None, vm_ip=None, - vm_ext_port=None, use_fip=False, retry=True, - retry_timeout=120, close_ssh=True, - auth_info=Tenant.get('admin')): - """ - ssh to a vm from natbox. - - Args: - vm_id (str): vm to ssh to - vm_image_name (str): such as cgcs-guest, tis-centos-guest, ubuntu_14 - username (str): - password (str): - prompt (str): - timeout (int): - natbox_client (NATBoxClient): - con_ssh (SSHClient): ssh connection to TiS active controller - vm_ip (str): ssh to this ip from NatBox if given - vm_ext_port (str): port forwarding rule external port. If given this - port will be used. vm_ip must be external - router ip address. - use_fip (bool): Whether to ssh to floating ip if a vm has one - associated. Not applicable if vm_ip is given. - retry (bool): whether or not to retry if fails to connect - retry_timeout (int): max time to retry - close_ssh - auth_info (dict|None) - - Yields (VMSSHClient): - ssh client of the vm - - Examples: - with ssh_to_vm_from_natbox(vm_id=) as vm_ssh: - vm_ssh.exec_cmd(cmd) - - """ - if vm_image_name is None: - vm_image_name = get_vm_image_name(vm_id=vm_id, con_ssh=con_ssh, - auth_info=auth_info).strip().lower() - - if vm_ip is None: - if use_fip: - vm_ip = network_helper.get_external_ips_for_vms( - vms=vm_id, con_ssh=con_ssh, auth_info=auth_info)[0] - else: - vm_ip = network_helper.get_mgmt_ips_for_vms( - vms=vm_id, con_ssh=con_ssh, auth_info=auth_info)[0] - - if not natbox_client: - natbox_client = NATBoxClient.get_natbox_client() - - try: - vm_ssh = VMSSHClient(natbox_client=natbox_client, vm_ip=vm_ip, - vm_ext_port=vm_ext_port, - vm_img_name=vm_image_name, user=username, - password=password, prompt=prompt, - timeout=timeout, retry=retry, - retry_timeout=retry_timeout) - - except (exceptions.TiSError, pexpect.ExceptionPexpect): - LOG.warning( - 'Failed to ssh to VM {}! Collecting vm console log'.format(vm_id)) - get_console_logs(vm_ids=vm_id) - raise - - try: - yield vm_ssh - finally: - if close_ssh: - vm_ssh.close() - - -def get_vm_pid(instance_name, host_ssh): - """ - Get instance pid on its host. - - Args: - instance_name: instance name of a vm - host_ssh: ssh for the host of the given instance - - Returns (str): pid of a instance on its host - - """ - code, vm_pid = host_ssh.exec_sudo_cmd( - "ps aux | grep --color='never' {} | grep -v grep | awk '{{print $2}}'". - format(instance_name)) - if code != 0: - raise exceptions.SSHExecCommandFailed( - "Failed to get pid for vm: {}".format(instance_name)) - - if not vm_pid: - LOG.warning("PID for {} is not found on host!".format(instance_name)) - - return vm_pid - - -class VMInfo: - """ - class for storing and retrieving information for specific VM using - openstack admin. - - Notes: Do not use this class for vm actions, such as boot, delete, - migrate, etc as these actions should be done by - tenants. - """ - __instances = {} - active_controller_ssh = None - - def __init__(self, vm_id, con_ssh=None, auth_info=Tenant.get('admin')): - """ - - Args: - vm_id: - con_ssh: floating controller ssh for the system - - Returns: - - """ - if con_ssh is None: - con_ssh = ControllerClient.get_active_controller() - VMInfo.active_controller_ssh = con_ssh - self.vm_id = vm_id - self.con_ssh = con_ssh - self.auth_info = auth_info - self.initial_table_ = table_parser.table( - cli.openstack('server show', vm_id, ssh_client=con_ssh, - auth_info=self.auth_info, timeout=60)[1]) - self.table_ = self.initial_table_ - self.name = table_parser.get_value_two_col_table(self.initial_table_, - 'name', strict=True) - self.tenant_id = table_parser.get_value_two_col_table( - self.initial_table_, 'project_id') - self.user_id = table_parser.get_value_two_col_table(self.initial_table_, - 'user_id') - self.boot_info = self.__get_boot_info() - self.flavor_table = None - VMInfo.__instances[ - vm_id] = self # add instance to class variable for tracking - - def refresh_table(self): - self.table_ = table_parser.table( - cli.openstack('server show', self.vm_id, ssh_client=self.con_ssh, - auth_info=self.auth_info, timeout=60)[1]) - - def get_host_name(self): - self.refresh_table() - return table_parser.get_value_two_col_table(table_=self.table_, - field=':host', strict=False) - - def get_flavor_id(self): - """ - - Returns: (dict) {'name': flavor_name, 'id': flavor_id} - - """ - flavor = table_parser.get_value_two_col_table(self.table_, 'flavor') - flavor_id = re.findall(r'\((.*)\)', flavor)[0] - return flavor_id - - def refresh_flavor_table(self): - flavor_id = self.get_flavor_id() - self.flavor_table = table_parser.table( - cli.openstack('flavor show', flavor_id, ssh_client=self.con_ssh, - auth_info=Tenant.get('admin'))[1]) - return self.flavor_table - - def __get_boot_info(self): - return _get_boot_info(table_=self.table_, vm_id=self.vm_id, - auth_info=self.auth_info, - con_ssh=self.con_ssh) - - def get_storage_type(self): - table_ = self.flavor_table - if not table_: - table_ = self.refresh_flavor_table() - extra_specs = table_parser.get_value_two_col_table(table_, 'properties', - merge_lines=True) - extra_specs = table_parser.convert_value_to_dict(value=extra_specs) - return extra_specs.get(FlavorSpec.STORAGE_BACKING, None) - - def has_local_disks(self): - if self.boot_info['type'] == 'image': - return True - - table_ = self.flavor_table - if not table_: - table_ = self.refresh_flavor_table() - swap = table_parser.get_value_two_col_table(table_, 'swap') - ephemeral = table_parser.get_value_two_col_table(table_, 'ephemeral', - strict=False) - return bool(swap or int(ephemeral)) - - @classmethod - def get_vms_info(cls): - return tuple(cls.__instances) - - @classmethod - def get_vm_info(cls, vm_id, con_ssh=None): - if vm_id not in cls.__instances: - if vm_id in get_all_vms(con_ssh=con_ssh): - return cls(vm_id, con_ssh) - else: - raise exceptions.VMError( - "VM with id {} does not exist!".format(vm_id)) - instance = cls.__instances[vm_id] - instance.refresh_table() - return instance - - @classmethod - def remove_instance(cls, vm_id): - cls.__instances.pop(vm_id, default="No instance found") - - -def delete_vms(vms=None, delete_volumes=True, check_first=True, - timeout=VMTimeout.DELETE, fail_ok=False, - stop_first=True, con_ssh=None, auth_info=Tenant.get('admin'), - remove_cleanup=None): - """ - Delete given vm(s) (and attached volume(s)). If None vms given, all vms - on the system will be deleted. - - Args: - vms (list|str): list of vm ids to be deleted. If string input, - assume only one vm id is provided. - check_first (bool): Whether to check if given vm(s) exist on system - before attempt to delete - timeout (int): Max time to wait for delete cli finish and wait for - vms actually disappear from system - delete_volumes (bool): delete attached volume(s) if set to True - fail_ok (bool): - stop_first (bool): whether to stop active vm(s) first before - deleting. Best effort only - con_ssh (SSHClient): - auth_info (dict): - remove_cleanup (None|str): remove from vm cleanup list if deleted - successfully - - Returns (tuple): (rtn_code(int), msg(str)) # rtn_code 1,2,3 only returns - when fail_ok=True - (-1, 'No vm(s) to delete.') # "Empty vm list/string provided and - no vm exist on system. - (-1, 'None of the given vm(s) exists on system.') - (0, "VM(s) deleted successfully.") - (1, ) # delete vm(s) cli returns stderr, some or all vms - failed to delete. - (2, "VMs deletion cmd all accepted, but some vms still exist after - deletion") - - """ - existing_vms = None - if not vms: - vms = get_vms(con_ssh=con_ssh, auth_info=auth_info, all_projects=True, - long=False) - existing_vms = list(vms) - elif isinstance(vms, str): - vms = [vms] - - vms = [vm for vm in vms if vm] - if not vms: - LOG.warning( - "Empty vm list/string provided and no vm exist on system. Do " - "Nothing") - return -1, 'No vm(s) to delete.' - - if check_first: - if existing_vms is None: - existing_vms = get_vms(con_ssh=con_ssh, auth_info=auth_info, - all_projects=True, long=False) - - vms = list(set(vms) & set(existing_vms)) - if not vms: - LOG.info("None given vms exist on system. Do nothing") - return -1, 'None of the given vm(s) exists on system.' - - if stop_first: # best effort only - active_vms = get_vms(vms=vms, auth_info=auth_info, con_ssh=con_ssh, - all_projects=True, - Status=VMStatus.ACTIVE) - if active_vms: - stop_vms(active_vms, fail_ok=True, con_ssh=con_ssh, - auth_info=auth_info) - - vols_to_del = [] - if delete_volumes: - vols_to_del = cinder_helper.get_volumes_attached_to_vms( - vms=vms, auth_info=auth_info, con_ssh=con_ssh) - - LOG.info("Deleting vm(s): {}".format(vms)) - vms_accepted = [] - deletion_err = '' - for vm in vms: - # Deleting vm one by one due to the cmd will stop if a failure is - # encountered, causing no attempt to delete - # other vms - code, output = cli.openstack('server delete', vm, ssh_client=con_ssh, - fail_ok=True, auth_info=auth_info, - timeout=timeout) - if code > 0: - deletion_err += '{}\n'.format(output) - else: - vms_accepted.append(vm) - - # check if vms are actually removed from nova list - all_deleted, vms_undeleted = _wait_for_vms_deleted(vms_accepted, - fail_ok=True, - auth_info=auth_info, - timeout=timeout, - con_ssh=con_ssh) - if remove_cleanup: - vms_deleted = list(set(vms_accepted) - set(vms_undeleted)) - ResourceCleanup.remove('vm', vms_deleted, scope=remove_cleanup, - del_vm_vols=False) - - # Delete volumes results will not be returned. Best effort only. - if delete_volumes: - res = cinder_helper.delete_volumes(vols_to_del, fail_ok=True, - auth_info=auth_info, - con_ssh=con_ssh)[0] - if res == 0 and remove_cleanup: - ResourceCleanup.remove('volume', vols_to_del, scope=remove_cleanup) - - # Process returns - if deletion_err: - LOG.warning(deletion_err) - if fail_ok: - return 1, deletion_err - raise exceptions.CLIRejected(deletion_err) - - if vms_undeleted: - msg = 'VM(s) still exsit after deletion: {}'.format(vms_undeleted) - LOG.warning(msg) - if fail_ok: - return 2, msg - raise exceptions.VMPostCheckFailed(msg) - - LOG.info("VM(s) deleted successfully: {}".format(vms)) - return 0, "VM(s) deleted successfully." - - -def _wait_for_vms_deleted(vms, timeout=VMTimeout.DELETE, fail_ok=True, - check_interval=3, con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Wait for specific vm to be removed from nova list - - Args: - vms (str|list): list of vms' ids - timeout (int): in seconds - fail_ok (bool): - check_interval (int): - con_ssh (SSHClient|None): - auth_info (dict|None): - - Returns (tuple): (result(bool), vms_failed_to_delete(list)) - - """ - if isinstance(vms, str): - vms = [vms] - - vms_to_check = list(vms) - end_time = time.time() + timeout - while time.time() < end_time: - try: - vms_to_check = get_vms(vms=vms_to_check, con_ssh=con_ssh, - auth_info=auth_info) - except exceptions.CLIRejected: - pass - - if not vms_to_check: - return True, [] - time.sleep(check_interval) - - if fail_ok: - return False, vms_to_check - raise exceptions.VMPostCheckFailed( - "Some vm(s) are not removed from nova list within {} seconds: {}". - format(timeout, vms_to_check)) - - -def wait_for_vms_values(vms, header='Status', value=VMStatus.ACTIVE, - timeout=VMTimeout.STATUS_CHANGE, fail_ok=True, - check_interval=3, con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Wait for specific vms to reach any of the given state(s) in openstack - server list - - Args: - vms (str|list): id(s) of vms to check - header (str): target header in nova list - value (str|list): expected value(s) - timeout (int): in seconds - fail_ok (bool): - check_interval (int): - con_ssh (SSHClient|None): - auth_info (dict|None): - - Returns (list): [result(bool), vms_in_state(dict), - vms_failed_to_reach_state(dict)] - - """ - if isinstance(vms, str): - vms = [vms] - - if isinstance(value, str): - value = [value] - - res_fail = res_pass = None - end_time = time.time() + timeout - while time.time() < end_time: - res_pass = {} - res_fail = {} - vms_values = get_vms(vms=vms, con_ssh=con_ssh, auth_info=auth_info, - field=header) - for i in range(len(vms)): - vm = vms[i] - vm_value = vms_values[i] - if vm_value in value: - res_pass[vm] = vm_value - else: - res_fail[vm] = vm_value - - if not res_fail: - return True, res_pass, res_fail - - time.sleep(check_interval) - - fail_msg = "Some vm(s) did not reach given status from nova list within " \ - "{} seconds: {}".format(timeout, res_fail) - if fail_ok: - LOG.warning(fail_msg) - return False, res_pass, res_fail - raise exceptions.VMPostCheckFailed(fail_msg) - - -def set_vm_state(vm_id, check_first=False, error_state=True, fail_ok=False, - auth_info=Tenant.get('admin'), - con_ssh=None): - """ - Set vm state to error or active via nova reset-state. - - Args: - vm_id: - check_first: - error_state: - fail_ok: - auth_info: - con_ssh: - - Returns (tuple): - - """ - expt_vm_status = VMStatus.ERROR if error_state else VMStatus.ACTIVE - LOG.info("Setting vm {} state to: {}".format(vm_id, expt_vm_status)) - - if check_first: - pre_vm_status = get_vm_values(vm_id, fields='status', con_ssh=con_ssh, - auth_info=auth_info)[0] - if pre_vm_status.lower() == expt_vm_status.lower(): - msg = "VM {} already in {} state. Do nothing.".format(vm_id, - pre_vm_status) - LOG.info(msg) - return -1, msg - - code, out = set_vm(vm_id=vm_id, state=expt_vm_status, con_ssh=con_ssh, - auth_info=auth_info, fail_ok=fail_ok) - if code > 0: - return 1, out - - result = wait_for_vm_status(vm_id, expt_vm_status, fail_ok=fail_ok) - if result is None: - msg = "VM {} did not reach expected state - {} after " \ - "reset-state.".format(vm_id, expt_vm_status) - LOG.warning(msg) - return 2, msg - - msg = "VM state is successfully set to: {}".format(expt_vm_status) - LOG.info(msg) - return 0, msg - - -def reboot_vm(vm_id, hard=False, fail_ok=False, con_ssh=None, auth_info=None, - cli_timeout=CMDTimeout.REBOOT_VM, - reboot_timeout=VMTimeout.REBOOT): - """ - reboot vm via openstack server reboot - Args: - vm_id: - hard (bool): hard or soft reboot - fail_ok: - con_ssh: - auth_info: - cli_timeout: - reboot_timeout: - - Returns (tuple): - - """ - vm_status = get_vm_status(vm_id, con_ssh=con_ssh) - if not vm_status.lower() == 'active': - LOG.warning( - "VM is not in active state before rebooting. VM status: {}".format( - vm_status)) - - extra_arg = '--hard ' if hard else '' - arg = "{}{}".format(extra_arg, vm_id) - - date_format = "%Y%m%d %T" - start_time = common.get_date_in_format(date_format=date_format) - code, output = cli.openstack('server reboot', arg, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info, - timeout=cli_timeout) - if code > 0: - return 1, output - - # expt_reboot = VMStatus.HARD_REBOOT if hard else VMStatus.SOFT_REBOOT - # _wait_for_vm_status(vm_id, expt_reboot, check_interval=0, fail_ok=False) - LOG.info("Wait for vm reboot events to appear in fm event-list") - expt_reason = 'hard-reboot' if hard else 'soft-reboot' - system_helper.wait_for_events( - timeout=30, num=10, entity_instance_id=vm_id, - start=start_time, fail_ok=False, strict=False, - **{'Event Log ID': EventLogID.REBOOT_VM_ISSUED, - 'Reason Text': expt_reason}) - - system_helper.wait_for_events( - timeout=reboot_timeout, num=10, entity_instance_id=vm_id, - start=start_time, fail_ok=False, - **{'Event Log ID': EventLogID.REBOOT_VM_COMPLETE}) - - LOG.info("Check vm status from nova show") - actual_status = wait_for_vm_status(vm_id, - [VMStatus.ACTIVE, VMStatus.ERROR], - fail_ok=fail_ok, con_ssh=con_ssh, - timeout=30) - if not actual_status: - msg = "VM {} did not reach active state after reboot.".format(vm_id) - LOG.warning(msg) - return 2, msg - - if actual_status.lower() == VMStatus.ERROR.lower(): - msg = "VM is in error state after reboot." - if fail_ok: - LOG.warning(msg) - return 3, msg - raise exceptions.VMPostCheckFailed(msg) - - succ_msg = "VM rebooted successfully." - LOG.info(succ_msg) - return 0, succ_msg - - -def __perform_vm_action(vm_id, action, expt_status, - timeout=VMTimeout.STATUS_CHANGE, fail_ok=False, - con_ssh=None, - auth_info=None): - LOG.info("{} vm {} begins...".format(action, vm_id)) - code, output = cli.nova(action, vm_id, ssh_client=con_ssh, fail_ok=fail_ok, - auth_info=auth_info, timeout=120) - - if code == 1: - return 1, output - - actual_status = wait_for_vm_status(vm_id, [expt_status, VMStatus.ERROR], - fail_ok=fail_ok, con_ssh=con_ssh, - timeout=timeout) - - if not actual_status: - msg = "VM {} did not reach expected state {} after {}.".format( - vm_id, expt_status, action) - LOG.warning(msg) - return 2, msg - - if actual_status.lower() == VMStatus.ERROR.lower(): - msg = "VM is in error state after {}.".format(action) - if fail_ok: - LOG.warning(msg) - return 3, msg - raise exceptions.VMPostCheckFailed(msg) - - succ_msg = "{} VM succeeded.".format(action) - LOG.info(succ_msg) - return 0, succ_msg - - -def suspend_vm(vm_id, timeout=VMTimeout.STATUS_CHANGE, fail_ok=False, - con_ssh=None, auth_info=None): - return __perform_vm_action(vm_id, 'suspend', VMStatus.SUSPENDED, - timeout=timeout, fail_ok=fail_ok, - con_ssh=con_ssh, auth_info=auth_info) - - -def resume_vm(vm_id, timeout=VMTimeout.STATUS_CHANGE, fail_ok=False, - con_ssh=None, auth_info=None): - return __perform_vm_action(vm_id, 'resume', VMStatus.ACTIVE, - timeout=timeout, fail_ok=fail_ok, - con_ssh=con_ssh, - auth_info=auth_info) - - -def pause_vm(vm_id, timeout=VMTimeout.PAUSE, fail_ok=False, con_ssh=None, - auth_info=None): - return __perform_vm_action(vm_id, 'pause', VMStatus.PAUSED, timeout=timeout, - fail_ok=fail_ok, con_ssh=con_ssh, - auth_info=auth_info) - - -def unpause_vm(vm_id, timeout=VMTimeout.STATUS_CHANGE, fail_ok=False, - con_ssh=None, auth_info=None): - return __perform_vm_action(vm_id, 'unpause', VMStatus.ACTIVE, - timeout=timeout, fail_ok=fail_ok, - con_ssh=con_ssh, - auth_info=auth_info) - - -def stop_vms(vms, timeout=VMTimeout.STATUS_CHANGE, fail_ok=False, con_ssh=None, - auth_info=None): - return _start_or_stop_vms(vms, 'stop', VMStatus.STOPPED, timeout, - check_interval=1, fail_ok=fail_ok, - con_ssh=con_ssh, auth_info=auth_info) - - -def start_vms(vms, timeout=VMTimeout.STATUS_CHANGE, fail_ok=False, con_ssh=None, - auth_info=None): - return _start_or_stop_vms(vms, 'start', VMStatus.ACTIVE, timeout, - check_interval=1, fail_ok=fail_ok, - con_ssh=con_ssh, auth_info=auth_info) - - -def _start_or_stop_vms(vms, action, expt_status, - timeout=VMTimeout.STATUS_CHANGE, check_interval=3, - fail_ok=False, - con_ssh=None, auth_info=None): - LOG.info("{}ing vms {}...".format(action, vms)) - action = action.lower() - if isinstance(vms, str): - vms = [vms] - - # Not using openstack client due to stop will be aborted at first - # failure, without continue processing other vms - code, output = cli.nova(action, ' '.join(vms), ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - - vms_to_check = list(vms) - if code == 1: - vms_to_check = re.findall( - NovaCLIOutput.VM_ACTION_ACCEPTED.format(action), output) - if not vms_to_check: - return 1, output - - res_bool, res_pass, res_fail = wait_for_vms_values( - vms_to_check, 'Status', [expt_status, VMStatus.ERROR], - fail_ok=fail_ok, check_interval=check_interval, con_ssh=con_ssh, - timeout=timeout) - - if not res_bool: - msg = "Some VM(s) did not reach expected state(s) - {}. Actual " \ - "states: {}".format(expt_status, res_fail) - LOG.warning(msg) - return 2, msg - - error_vms = [vm_id for vm_id in vms_to_check if - res_pass[vm_id].lower() == VMStatus.ERROR.lower()] - if error_vms: - msg = "Some VM(s) in error state after {}: {}".format(action, error_vms) - if fail_ok: - LOG.warning(msg) - return 3, msg - raise exceptions.VMPostCheckFailed(msg) - - succ_msg = "Action {} performed successfully on vms.".format(action) - LOG.info(succ_msg) - return 0, succ_msg - - -def rebuild_vm(vm_id, image_id=None, new_name=None, preserve_ephemeral=None, - fail_ok=False, con_ssh=None, - auth_info=Tenant.get('admin'), **metadata): - if image_id is None: - image_id = glance_helper.get_image_id_from_name( - GuestImages.DEFAULT['guest'], strict=True) - - args = '{} {}'.format(vm_id, image_id) - - if new_name: - args += ' --name {}'.format(new_name) - - if preserve_ephemeral: - args += ' --preserve-ephemeral' - - for key, value in metadata.items(): - args += ' --meta {}={}'.format(key, value) - - LOG.info("Rebuilding vm {}".format(vm_id)) - # Some features such as trusted image cert not available with openstack - # client - code, output = cli.nova('rebuild', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code == 1: - return code, output - - LOG.info("Check vm status after vm rebuild") - wait_for_vm_status(vm_id, status=VMStatus.ACTIVE, fail_ok=fail_ok, - con_ssh=con_ssh) - actual_status = wait_for_vm_status(vm_id, [VMStatus.ACTIVE, VMStatus.ERROR], - fail_ok=fail_ok, con_ssh=con_ssh, - timeout=VMTimeout.REBUILD) - - if not actual_status: - msg = "VM {} did not reach active state after rebuild.".format(vm_id) - LOG.warning(msg) - return 2, msg - - if actual_status.lower() == VMStatus.ERROR.lower(): - msg = "VM is in error state after rebuild." - if fail_ok: - LOG.warning(msg) - return 3, msg - raise exceptions.VMPostCheckFailed(msg) - - succ_msg = "VM rebuilded successfully." - LOG.info(succ_msg) - return 0, succ_msg - - -def get_vm_numa_nodes_via_ps(vm_id=None, instance_name=None, host=None, - con_ssh=None, auth_info=Tenant.get('admin'), - per_vcpu=False): - """ - Get numa nodes VM is currently on - Args: - vm_id: - instance_name: - host: - con_ssh: - auth_info: - per_vcpu (bool): if True, return per vcpu, e.g., if vcpu=0,1,2, - returned list will have same length [0,1,0] - - Returns (list): e.g., [0], [0, 1] - - """ - if not instance_name or not host: - if not vm_id: - raise ValueError('vm_id has to be provided') - instance_name, host = get_vm_values(vm_id, - fields=[":instance_name", ":host"], - strict=False, con_ssh=con_ssh, - auth_info=auth_info) - - with host_helper.ssh_to_host(host, con_ssh=con_ssh) as host_ssh: - vcpu_cpu_map = get_vcpu_cpu_map(instance_names=instance_name, - host_ssh=host_ssh, con_ssh=con_ssh)[ - instance_name] - cpus = [] - for i in range(len(vcpu_cpu_map)): - cpus.append(vcpu_cpu_map[i]) - - cpu_non_dup = sorted(list(set(cpus))) - grep_str = ' '.join( - ['-e "processor.*: {}$"'.format(cpu) for cpu in cpu_non_dup]) - cmd = 'cat /proc/cpuinfo | grep -A 10 {} | grep --color=never ' \ - '"physical id"'.format(grep_str) - physical_ids = host_ssh.exec_cmd(cmd, fail_ok=False)[1].splitlines() - physical_ids = [int(proc.split(sep=':')[-1].strip()) for proc in - physical_ids if 'physical' in proc] - if per_vcpu: - physical_ids = [physical_ids[cpu_non_dup.index(cpu)] for cpu in - cpus] - - return physical_ids - - -def get_vm_host_and_numa_nodes(vm_id, con_ssh=None, per_vcpu=False): - """ - Get vm host and numa nodes used for the vm on the host - Args: - vm_id (str): - con_ssh (SSHClient): - per_vcpu (bool): if True, return numa nodes per vcpu, e.g., vcpu=0,1, - 2, returned list can be: [0,1,0] - - Returns (tuple): ( (str), (list of integers)) - - """ - instance_name, host = get_vm_values(vm_id, - fields=[":instance_name", ":host"], - strict=False) - actual_node_vals = get_vm_numa_nodes_via_ps(vm_id=vm_id, - instance_name=instance_name, - host=host, con_ssh=con_ssh, - per_vcpu=per_vcpu) - - return host, actual_node_vals - - -def perform_action_on_vm(vm_id, action, auth_info=Tenant.get('admin'), - con_ssh=None, **kwargs): - """ - Perform action on a given vm. - - Args: - vm_id (str): - action (str): action to perform on vm. Valid_actions: 'start', - 'stop', 'suspend', 'resume', 'pause', 'unpause', - 'reboot', 'live_migrate', or 'cold_migrate' - auth_info (dict): - con_ssh (SSHClient): - **kwargs: extra params to pass to action function, - e.g.destination_host='compute-0' when action is live_migrate - - Returns (None): - - """ - action_function_map = { - 'start': start_vms, - 'stop': stop_vms, - 'suspend': suspend_vm, - 'resume': resume_vm, - 'pause': pause_vm, - 'unpause': unpause_vm, - 'reboot': reboot_vm, - 'rebuild': rebuild_vm, - 'live_migrate': live_migrate_vm, - 'cold_migrate': cold_migrate_vm, - 'cold_mig_revert': cold_migrate_vm, - } - if not vm_id: - raise ValueError("vm id is not provided.") - - valid_actions = list(action_function_map.keys()) - action = action.lower().replace(' ', '_') - if action not in valid_actions: - raise ValueError( - "Invalid action provided: {}. Valid actions: {}".format( - action, valid_actions)) - - if action == 'cold_mig_revert': - kwargs['revert'] = True - - return action_function_map[action](vm_id, con_ssh=con_ssh, - auth_info=auth_info, **kwargs) - - -def get_vm_nics_info(vm_id, network=None, vnic_type=None, rtn_dict=False): - """ - Get vm nics info - Args: - vm_id: - network: - vnic_type: - rtn_dict: - - Returns (list of dict|dict of dict): - list or dict (port as key) of port_info_dict. Each port_info_dict - contains following info: - { - 'port_id': , - 'network': , - 'network_id': , - 'vnic_type': , - 'mac_address': , - 'subnet_id': , - 'subnet_cidr': - } - - """ - vm_ports, vm_macs, vm_ips_info = network_helper.get_ports( - server=vm_id, network=network, - field=('ID', 'MAC Address', 'Fixed IP Addresses')) - vm_subnets = [] - vm_ips = [] - for ip_info in vm_ips_info: - ip_info = ip_info[0] - vm_ips.append(ip_info.get('ip_address')) - vm_subnets.append(ip_info.get('subnet_id')) - - indexes = list(range(len(vm_ports))) - vnic_types = [] - vm_net_ids = [] - for port in vm_ports: - port_vnic_type, port_net_id = network_helper.get_port_values( - port=port, fields=('binding_vnic_type', 'network_id')) - vnic_types.append(port_vnic_type) - vm_net_ids.append(port_net_id) - if vnic_type and vnic_type != port_vnic_type: - indexes.remove(list(vm_ports).index(port)) - - vm_net_names = [] - ids_, names_, = network_helper.get_networks(field=('ID', 'Name'), - strict=False) - for net_id in vm_net_ids: - vm_net_names.append(names_[ids_.index(net_id)]) - - res_dict = {} - res = [] - for i in indexes: - port_dict = { - 'port_id': vm_ports[i], - 'network': vm_net_names[i], - 'network_id': vm_net_ids[i], - 'vnic_type': vnic_types[i], - 'mac_address': vm_macs[i], - 'ip_address': vm_ips[i] - } - if rtn_dict: - res_dict[vm_ports[i]] = port_dict - else: - res.append(port_dict) - - return res_dict if rtn_dict else res - - -def get_vm_interfaces_via_virsh(vm_id, con_ssh=None): - """ - - Args: - vm_id: - con_ssh: - - Returns (list of tuple): - [(mac_0, vif_model_0)...] - - """ - vm_host = get_vm_host(vm_id=vm_id, con_ssh=con_ssh) - inst_name = get_vm_instance_name(vm_id=vm_id, con_ssh=con_ssh) - - vm_ifs = [] - with host_helper.ssh_to_host(vm_host, con_ssh=con_ssh) as host_ssh: - output = host_ssh.exec_sudo_cmd('virsh domiflist {}'.format(inst_name), - fail_ok=False)[1] - if_lines = output.split('-------------------------------\n', 1)[ - -1].splitlines() - for line in if_lines: - if not line.strip(): - continue - - interface, type_, source, model, mac = line.split() - vm_ifs.append((mac, model)) - - return vm_ifs - - -def add_vlan_for_vm_pcipt_interfaces(vm_id, net_seg_id, retry=3, - init_conf=False): - """ - Add vlan for vm pci-passthrough interface and restart networking service. - Do nothing if expected vlan interface already exists in 'ip addr'. - - Args: - vm_id (str): - net_seg_id (int|str|dict): such as 1792 - retry (int): max number of times to reboot vm to try to recover it - from non-exit - init_conf (bool): To workaround upstream bug where mac changes after - migrate or resize https://bugs.launchpad.net/nova/+bug/1617429 - - Returns: None - - Raises: VMNetworkError if vlan interface is not found in 'ip addr' after - adding - - Notes: - Sometimes a non-exist 'rename6' interface will be used for - pci-passthrough nic after vm maintenance - Sudo reboot from the vm as workaround. - By default will try to reboot for a maximum of 3 times - - """ - if not vm_id or not net_seg_id: - raise ValueError("vm_id and/or net_seg_id not provided.") - - net_seg_id_dict = None - if isinstance(net_seg_id, dict): - net_seg_id_dict = net_seg_id - net_seg_id = None - - for i in range(retry): - vm_pcipt_nics = get_vm_nics_info(vm_id, vnic_type='direct-physical') - - if not vm_pcipt_nics: - LOG.warning("No pci-passthrough device found for vm from nova " - "show {}".format(vm_id)) - return - - with ssh_to_vm_from_natbox(vm_id=vm_id) as vm_ssh: - for pcipt_nic in vm_pcipt_nics: - - mac_addr = pcipt_nic['mac_address'] - eth_name = network_helper.get_eth_for_mac(mac_addr=mac_addr, - ssh_client=vm_ssh) - if not eth_name: - if not init_conf: - LOG.warning( - "Interface with mac {} is not listed in 'ip addr' " - "in vm {}".format(mac_addr, vm_id)) - LOG.info("Try to get first eth with mac 90:...") - eth_name = network_helper.get_eth_for_mac( - mac_addr="link/ether 90:", ssh_client=vm_ssh) - if not eth_name: - exceptions.VMNetworkError( - "No Mac starts with 90: in ip addr for vm " - "{}".format(vm_id)) - else: - raise exceptions.VMNetworkError( - "Interface with mac {} is not listed in 'ip addr' " - "in vm {}".format(mac_addr, vm_id)) - - if 'rename' in eth_name: - LOG.warning( - "Retry {}: non-existing interface {} found on " - "pci-passthrough nic in vm {}, " - "reboot vm to try to recover".format( - i + 1, eth_name, vm_id)) - sudo_reboot_from_vm(vm_id=vm_id, vm_ssh=vm_ssh) - wait_for_vm_pingable_from_natbox(vm_id) - break - - else: - if net_seg_id_dict: - net_name = pcipt_nic['network'] - net_seg_id = net_seg_id_dict[net_name] - LOG.info( - "Seg id for {}: {}".format(net_name, net_seg_id)) - - vlan_name = "{}.{}".format(eth_name, net_seg_id) - - output_pre_ipaddr = \ - vm_ssh.exec_cmd('ip addr', fail_ok=False)[1] - if vlan_name in output_pre_ipaddr: - LOG.info("{} already in ip addr. Skip.".format( - vlan_name)) - continue - - # Bring up pcipt interface and assign IP manually. - # Upstream bug causes dev name and MAC addr - # change after reboot,migrate, making it impossible to - # use DHCP or configure permanant static IP. - # https://bugs.launchpad.net/nova/+bug/1617429 - wait_for_interfaces_up(vm_ssh, eth_name, set_up=True) - # 'ip link add' works for all linux guests but it does - # not persists after network service restart - vm_ssh.exec_cmd( - 'ip link add link {} name {} type vlan id {}'.format( - eth_name, vlan_name, - net_seg_id)) - vm_ssh.exec_cmd('ip link set {} up'.format(vlan_name)) - vnic_ip = pcipt_nic['ip_address'] - vm_ssh.exec_cmd( - 'ip addr add {}/24 dev {}'.format(vnic_ip, vlan_name)) - - LOG.info( - "Check if vlan is added successfully with IP assigned") - output_post_ipaddr = \ - vm_ssh.exec_cmd('ip addr', fail_ok=False)[1] - if vlan_name not in output_post_ipaddr: - raise exceptions.VMNetworkError( - "{} is not found in 'ip addr' after adding vlan " - "interface". - format(vlan_name)) - time.sleep(5) - if not is_ip_assigned(vm_ssh, eth_name=vlan_name): - msg = 'No IP assigned to {} vlan interface for VM ' \ - '{}'.format(vlan_name, vm_id) - LOG.warning(msg) - raise exceptions.VMNetworkError(msg) - else: - LOG.info( - "vlan {} is successfully added and an IP is " - "assigned.".format(vlan_name)) - else: - # did not break, meaning no 'rename' interface detected, - # vlan either existed or successfully added - return - - # 'for' loop break which means 'rename' interface detected, - # and vm reboot triggered - known issue with wrl - LOG.info("Reboot vm completed. Retry started.") - - else: - raise exceptions.VMNetworkError( - "'rename' interface still exists in pci-passthrough vm {} with {} " - "reboot attempts.".format(vm_id, retry)) - - -def is_ip_assigned(vm_ssh, eth_name): - output = vm_ssh.exec_cmd('ip addr show {}'.format(eth_name), - fail_ok=False)[1] - return re.search('inet {}'.format(Networks.IPV4_IP), output) - - -def wait_for_interfaces_up(vm_ssh, eth_names, check_interval=10, timeout=180, - set_up=False): - LOG.info( - "Waiting for vm interface(s) to be in UP state: {}".format(eth_names)) - end_time = time.time() + timeout - if isinstance(eth_names, str): - eth_names = [eth_names] - ifs_to_check = list(eth_names) - while time.time() < end_time: - for eth in ifs_to_check: - output = \ - vm_ssh.exec_cmd('ip -d link show {}'.format(eth), - fail_ok=False)[1] - if 'state UP' in output: - ifs_to_check.remove(eth) - continue - else: - if set_up: - vm_ssh.exec_cmd('ip link set {} up'.format(eth)) - LOG.info( - "{} is not up - wait for {} seconds and check again".format( - eth, check_interval)) - break - - if not ifs_to_check: - LOG.info('interfaces are up: {}'.format(eth_names)) - return - - time.sleep(check_interval) - - raise exceptions.VMNetworkError("Interface(s) not up for given vm") - - -def sudo_reboot_from_vm(vm_id, vm_ssh=None, check_host_unchanged=True, - con_ssh=None): - pre_vm_host = None - if check_host_unchanged: - pre_vm_host = get_vm_host(vm_id, con_ssh=con_ssh) - - LOG.info("Initiate sudo reboot from vm") - - def _sudo_reboot(vm_ssh_): - extra_prompt = 'Broken pipe' - output = vm_ssh_.exec_sudo_cmd('reboot -f', get_exit_code=False, - extra_prompt=extra_prompt)[1] - expt_string = 'The system is going down for reboot|Broken pipe' - if re.search(expt_string, output): - # Sometimes system rebooting msg will be displayed right after - # reboot cmd sent - vm_ssh_.parent.flush() - return - - try: - time.sleep(10) - vm_ssh_.send('') - index = vm_ssh_.expect([expt_string, vm_ssh_.prompt], timeout=60) - if index == 1: - raise exceptions.VMOperationFailed("Unable to reboot vm {}") - vm_ssh_.parent.flush() - except pexpect.TIMEOUT: - vm_ssh_.send_control('c') - vm_ssh_.expect() - raise - - if not vm_ssh: - with ssh_to_vm_from_natbox(vm_id) as vm_ssh: - _sudo_reboot(vm_ssh) - else: - _sudo_reboot(vm_ssh) - - LOG.info( - "sudo vm reboot initiated - wait for reboot completes and VM reaches " - "active state") - system_helper.wait_for_events(VMTimeout.AUTO_RECOVERY, strict=False, - fail_ok=False, con_ssh=con_ssh, - **{'Entity Instance ID': vm_id, - 'Event Log ID': - EventLogID.REBOOT_VM_COMPLETE}) - wait_for_vm_status(vm_id, status=VMStatus.ACTIVE, fail_ok=False, - con_ssh=con_ssh) - - if check_host_unchanged: - post_vm_host = get_vm_host(vm_id, con_ssh=con_ssh) - if not pre_vm_host == post_vm_host: - raise exceptions.HostError( - "VM host changed from {} to {} after sudo reboot vm".format( - pre_vm_host, post_vm_host)) - - -def get_proc_nums_from_vm(vm_ssh): - total_cores = common.parse_cpus_list( - vm_ssh.exec_cmd('cat /sys/devices/system/cpu/present', fail_ok=False)[ - 1]) - online_cores = common.parse_cpus_list( - vm_ssh.exec_cmd('cat /sys/devices/system/cpu/online', fail_ok=False)[1]) - offline_cores = common.parse_cpus_list( - vm_ssh.exec_cmd('cat /sys/devices/system/cpu/offline', fail_ok=False)[ - 1]) - - return total_cores, online_cores, offline_cores - - -def get_instance_names_via_virsh(host_ssh): - """ - Get instance names via virsh list on given host - Args: - host_ssh: - - Returns (list): - - """ - inst_names = host_ssh.exec_sudo_cmd( - "virsh list | grep instance- | awk {{'print $2'}}", - get_exit_code=False)[1] - return [name.strip() for name in inst_names.splitlines()] - - -def get_vcpu_cpu_map(instance_names=None, host_ssh=None, host=None, - con_ssh=None): - """ - Get vm(s) vcpu cpu map on given host - Args: - instance_names (str|tuple|list|None): - host_ssh (SSHClient|None): - host (str|None): - con_ssh: - - Returns (dict): {: {0: , - 1: , ...}, ...} - - """ - if not host and not host_ssh: - raise ValueError('host or host_ssh has to be specified') - - extra_grep = '' - if instance_names: - if isinstance(instance_names, str): - instance_names = (instance_names,) - extra_grep = '|grep -E "{}"'.format('|'.join(instance_names)) - cmd = 'ps-sched.sh|grep qemu{}|grep " CPU" '.format(extra_grep) + \ - """| awk '{{print $10" "$12" "$15 ;}}'""" - - if host_ssh: - output = host_ssh.exec_cmd(cmd)[1] - else: - with host_helper.ssh_to_host(host, con_ssh=con_ssh) as host_ssh: - output = host_ssh.exec_cmd(cmd)[1] - vcpu_cpu_map = {} - for line in output.splitlines(): - cpu, vcpu, instance_name = line.split() - instance_name = instance_name.split(sep=',')[0].split(sep='=')[1] - if instance_name not in vcpu_cpu_map: - vcpu_cpu_map[instance_name] = {} - vcpu_cpu_map[instance_name][int(vcpu.split(sep='/')[0])] = int(cpu) - return vcpu_cpu_map - - -def get_affined_cpus_for_vm(vm_id, host_ssh=None, vm_host=None, - instance_name=None, con_ssh=None): - """ - cpu affinity list for vm via taskset -pc - Args: - vm_id (str): - host_ssh - vm_host - instance_name - con_ssh (SSHClient): - - Returns (list): such as [10, 30] - - """ - cmd = "ps-sched.sh|grep qemu|grep {}|grep -v grep|awk '{{print $2;}}'" + \ - '|xargs -i /bin/sh -c "taskset -pc {{}}"' - - if host_ssh: - if not vm_host or not instance_name: - raise ValueError( - "vm_host and instance_name have to be provided together with " - "host_ssh") - - output = host_ssh.exec_cmd(cmd.format(instance_name))[1] - - else: - vm_host = get_vm_host(vm_id, con_ssh=con_ssh) - instance_name = get_vm_instance_name(vm_id, con_ssh=con_ssh) - - with host_helper.ssh_to_host(vm_host, con_ssh=con_ssh) as host_ssh: - output = host_ssh.exec_cmd(cmd.format(instance_name))[1] - - # Sample output: - # pid 6376's current affinity list: 10 - # pid 6380's current affinity list: 10 - # pid 6439's current affinity list: 10 - # pid 6441's current affinity list: 10 - # pid 6442's current affinity list: 30 - # pid 6445's current affinity list: 10 - # pid 24142's current affinity list: 10 - - all_cpus = [] - lines = output.splitlines() - for line in lines: - - # skip line if below output occurs due to timing in executing cmds - # taskset: failed to get pid 17125's affinity: No such process - if "No such process" in line: - continue - - cpu_str = line.split(sep=': ')[-1].strip() - cpus = common.parse_cpus_list(cpus=cpu_str) - all_cpus += cpus - - all_cpus = sorted(list(set(all_cpus))) - LOG.info("Affined cpus on host {} for vm {}: {}".format(vm_host, vm_id, - all_cpus)) - - return all_cpus - - -def _scp_net_config_cloud_init(guest_os): - con_ssh = get_cli_client() - dest_dir = '{}/userdata'.format(ProjVar.get_var('USER_FILE_DIR')) - - if 'ubuntu' in guest_os: - dest_name = 'ubuntu_cloud_init_if_conf.sh' - elif 'centos' in guest_os: - dest_name = 'centos_cloud_init_if_conf.sh' - else: - raise ValueError("Unknown guest_os") - - dest_path = '{}/{}'.format(dest_dir, dest_name) - - if con_ssh.file_exists(file_path=dest_path): - LOG.info('userdata {} already exists. Return existing path'.format( - dest_path)) - return dest_path - - LOG.debug('Create userdata directory if not already exists') - cmd = 'mkdir -p {}'.format(dest_dir) - con_ssh.exec_cmd(cmd, fail_ok=False) - - # LOG.info('wget image from {} to {}/{}'.format(img_url, img_dest, - # new_name)) - # cmd = 'wget {} --no-check-certificate -P {} -O {}'.format(img_url, - # img_dest, new_name) - # con_ssh.exec_cmd(cmd, expect_timeout=7200, fail_ok=False) - - source_path = '{}/userdata/{}'.format(TestFileServer.HOME, dest_name) - LOG.info('scp image from test server to active controller') - - scp_cmd = 'scp -oStrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' \ - ' {}@{}:{} {}'.format(TestFileServer.USER, TestFileServer.SERVER, - source_path, dest_dir) - - con_ssh.send(scp_cmd) - index = con_ssh.expect( - [con_ssh.prompt, Prompt.PASSWORD_PROMPT, Prompt.ADD_HOST], timeout=3600) - if index == 2: - con_ssh.send('yes') - index = con_ssh.expect([con_ssh.prompt, Prompt.PASSWORD_PROMPT], - timeout=3600) - if index == 1: - con_ssh.send(TestFileServer.PASSWORD) - index = con_ssh.expect() - if index != 0: - raise exceptions.SSHException("Failed to scp files") - - return dest_dir - - -def _create_cloud_init_if_conf(guest_os, nics_num): - """ - - Args: - guest_os: - nics_num: - - Returns (str|None): file path of the cloud init userdata file for given - guest os and number of nics - Sample file content for Centos vm: - #!/bin/bash - sudo cp /etc/sysconfig/network-scripts/ifcfg-eth0 - /etc/sysconfig/network-scripts/ifcfg-eth1 - sudo sed -i 's/eth0/eth1/g' - /etc/sysconfig/network-scripts/ifcfg-eth1 - sudo ifup eth1 - - Sample file content for Ubuntu vm: - - """ - - file_dir = '{}/userdata'.format(ProjVar.get_var('USER_FILE_DIR')) - guest_os = guest_os.lower() - - # default eth_path for non-ubuntu image - eth_path = VMPath.ETH_PATH_CENTOS - new_user = None - - if 'ubuntu' in guest_os or 'trusty_uefi' in guest_os: - guest_os = 'ubuntu' - # vm_if_path = VMPath.VM_IF_PATH_UBUNTU - eth_path = VMPath.ETH_PATH_UBUNTU - new_user = 'ubuntu' - elif 'centos' in guest_os: - # vm_if_path = VMPath.VM_IF_PATH_CENTOS - new_user = 'centos' - - file_name = '{}_{}nic_cloud_init_if_conf.sh'.format(guest_os, nics_num) - - file_path = file_dir + file_name - con_ssh = get_cli_client() - if con_ssh.file_exists(file_path=file_path): - LOG.info('userdata {} already exists. Return existing path'.format( - file_path)) - return file_path - - LOG.info('Create userdata directory if not already exists') - cmd = 'mkdir -p {}'.format(file_dir) - con_ssh.exec_cmd(cmd, fail_ok=False) - - tmp_dir = '{}/userdata'.format(ProjVar.get_var('TEMP_DIR')) - os.makedirs(tmp_dir, exist_ok=True) - tmp_file = tmp_dir + file_name - - # No longer need to specify bash using cloud-config - # if 'centos_7' in guest_os: - # shell = '/usr/bin/bash' - # else: - # shell = '/bin/bash' - - with open(tmp_file, mode='a', encoding='utf8') as f: - f.write("#cloud-config\n") - - if new_user is not None: - f.write("user: {}\n" - "password: {}\n" - "chpasswd: {{ expire: False}}\n" - "ssh_pwauth: True\n\n".format(new_user, new_user)) - - if eth_path is not None: - eth0_path = eth_path.format('eth0') - f.write("runcmd:\n") - # f.write(" - echo '#!{}'\n".format(shell)) - for i in range(nics_num - 1): - ethi_name = 'eth{}'.format(i + 1) - ethi_path = eth_path.format(ethi_name) - f.write(' - cp {} {}\n'.format(eth0_path, ethi_path)) - f.write( - " - sed -i 's/eth0/{}/g' {}\n".format(ethi_name, ethi_path)) - f.write(' - ifup {}\n'.format(ethi_name)) - - if not ProjVar.get_var('REMOTE_CLI'): - common.scp_from_localhost_to_active_controller(source_path=tmp_file, - dest_path=file_path, - is_dir=False) - - LOG.info("Userdata file created: {}".format(file_path)) - return file_path - - -def _get_cloud_config_add_user(con_ssh=None): - """ - copy the cloud-config userdata to STX server. - This userdata adds stx/li69nux user to guest - - Args: - con_ssh (SSHClient): - - Returns (str): STX filepath of the userdata - - """ - file_dir = ProjVar.get_var('USER_FILE_DIR') - file_name = UserData.ADDUSER_TO_GUEST - file_path = file_dir + file_name - - if con_ssh is None: - con_ssh = get_cli_client() - if con_ssh.file_exists(file_path=file_path): - LOG.info('userdata {} already exists. Return existing path'.format( - file_path)) - return file_path - - source_file = TestServerPath.USER_DATA + file_name - dest_path = common.scp_from_test_server_to_user_file_dir( - source_path=source_file, dest_dir=file_dir, - dest_name=file_name, con_ssh=con_ssh) - if dest_path is None: - raise exceptions.CommonError( - "userdata file {} does not exist after download".format(dest_path)) - - return dest_path - - -def boost_vm_cpu_usage(vm_id, end_event, new_dd_events=None, dd_event=None, - timeout=1200, con_ssh=None): - """ - Boost cpu usage on given number of cpu cores on specified vm using dd cmd - on a new thread - - Args: - vm_id (str): - end_event (Events): Event for kill the dd processes - new_dd_events (list|Events): list of Event(s) for adding new dd - process(es) - dd_event (Events): Event to set after sending first dd cmd. - timeout: Max time to wait for the end_event to be set before killing dd. - con_ssh - - Returns: thread - - Examples: - LOG.tc_step("Boost VM cpu usage") - - - """ - if not new_dd_events: - new_dd_events = [] - elif not isinstance(new_dd_events, list): - new_dd_events = [new_dd_events] - - def _boost_cpu_in_vm(): - LOG.info("Boosting cpu usage for vm {} using 'dd'".format(vm_id)) - dd_cmd = 'dd if=/dev/zero of=/dev/null &' - kill_dd = 'pkill -ex dd' - - with ssh_to_vm_from_natbox(vm_id, con_ssh=con_ssh, timeout=120, - auth_info=None) as vm_ssh: - LOG.info("Start first 2 dd processes in vm") - vm_ssh.exec_cmd(cmd=dd_cmd) - vm_ssh.exec_cmd(cmd=dd_cmd) - if dd_event: - dd_event.set() - - end_time = time.time() + timeout - while time.time() < end_time: - if end_event.is_set(): - LOG.info("End event set, kill dd processes in vm") - vm_ssh.flush() - vm_ssh.exec_cmd(kill_dd, get_exit_code=False) - return - - for event in new_dd_events: - if event.is_set(): - LOG.info( - "New dd event set, start 2 new dd processes in vm") - vm_ssh.exec_cmd(cmd=dd_cmd) - vm_ssh.exec_cmd(cmd=dd_cmd) - new_dd_events.remove(event) - break - - time.sleep(3) - - LOG.error( - "End event is not set within timeout - {}s, kill dd " - "anyways".format( - timeout)) - vm_ssh.exec_cmd(kill_dd) - - LOG.info( - "Creating new thread to spike cpu_usage on vm cores for vm {}".format( - vm_id)) - thread = multi_thread.MThread(_boost_cpu_in_vm) - thread.start_thread(timeout=timeout + 10) - - return thread - - -def write_in_vm(vm_id, end_event, start_event=None, expect_timeout=120, - thread_timeout=None, write_interval=5, - con_ssh=None): - """ - Continue to write in vm using dd - - Args: - vm_id (str): - start_event (Events): set this event when write in vm starts - end_event (Events): if this event is set, end write right away - expect_timeout (int): - thread_timeout (int): - write_interval (int): how frequent to write. Note: 5 seconds seem to - be a good interval, - 1 second interval might have noticeable impact on the performance - of pexpect. - con_ssh (SSHClient): controller ssh client - - Returns (MThread): new_thread - - """ - if not start_event: - start_event = Events("Write in vm {} start".format(vm_id)) - write_cmd = "while (true) do date; dd if=/dev/urandom of=output.txt " \ - "bs=1k count=1 conv=fsync || break; echo ; " \ - "sleep {}; done 2>&1 | tee trace.txt".format(write_interval) - - def _keep_writing(vm_id_): - LOG.info("starting to write to vm using dd...") - with ssh_to_vm_from_natbox(vm_id_, con_ssh=con_ssh, - close_ssh=False) as vm_ssh_: - vm_ssh_.send(cmd=write_cmd) - - start_event.set() - LOG.info("Write_in_vm started") - - LOG.info("Reading the dd output from vm {}".format(vm_id)) - thread.res = True - try: - while True: - expt_output = '1024 bytes' - index = vm_ssh_.expect([expt_output, vm_ssh_.prompt], - timeout=expect_timeout, fail_ok=True, - searchwindowsize=100) - if index != 0: - LOG.warning( - "write has stopped or expected output-'{}' is not " - "found".format( - expt_output)) - thread.res = False - break - - if end_event.is_set(): - LOG.info("End thread now") - break - - LOG.info("Writing in vm continues...") - time.sleep(write_interval) - - finally: - vm_ssh_.send_control('c') - - return vm_ssh_ - - thread = multi_thread.MThread(_keep_writing, vm_id) - thread_timeout = expect_timeout + 30 if thread_timeout is None else \ - thread_timeout - thread.start_thread(timeout=thread_timeout) - - start_event.wait_for_event(timeout=thread_timeout) - - return thread - - -def attach_interface(vm_id, port_id=None, net_id=None, fixed_ip=None, - fail_ok=False, auth_info=None, - con_ssh=None): - """ - Attach interface to a vm via port_id OR net_id - Args: - vm_id (str): - port_id (str): port to attach to vm - net_id (str): port from given net to attach to vm - fixed_ip (str): fixed ip for attached interface. Only works when - attaching interface via net_id - fail_ok (bool): - auth_info (dict): - con_ssh (SSHClient): - - Returns (tuple): (, ) - (0, ) - (1, ) - cli rejected - (2, "Post interface attach check failed: ") - - net_id/port_id, vif_model, or fixed_ip do not match - with - given value - - """ - LOG.info("Attaching interface to VM {}".format(vm_id)) - if not vm_id: - raise ValueError('vm_id is not supplied') - - args = '' - args_dict = { - '--port-id': port_id, - '--net-id': net_id, - '--fixed-ip': fixed_ip, - } - - for key, val in args_dict.items(): - if val is not None: - args += ' {} {}'.format(key, val) - - args += ' {}'.format(vm_id) - - prev_ports = network_helper.get_ports(server=vm_id, auth_info=auth_info, - con_ssh=con_ssh) - # Not switching to openstack client due to nova cli makes more sense. - # openstack client has separate cmds for adding - # port, network and fixed ip, while fixed ip cmd has to specify the network. - code, output = cli.nova('interface-attach', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - - if code == 1: - return code, output - - LOG.info("Post interface-attach checks started...") - post_ports = network_helper.get_ports(server=vm_id, auth_info=auth_info, - con_ssh=con_ssh) - attached_port = list(set(post_ports) - set(prev_ports)) - - err_msgs = [] - if len(attached_port) != 1: - err_msg = "NICs for vm {} is not incremented by 1".format(vm_id) - err_msgs.append(err_msg) - else: - attached_port = attached_port[0] - - if net_id: - net_name = network_helper.get_net_name_from_id(net_id, con_ssh=con_ssh, - auth_info=auth_info) - net_ips = get_vm_values(vm_id, fields=net_name, strict=False, - con_ssh=con_ssh, auth_info=auth_info)[0] - if fixed_ip and fixed_ip not in net_ips.split(sep=', '): - err_msg = "specified fixed ip {} is not found in nova show " \ - "{}".format(fixed_ip, vm_id) - err_msgs.append(err_msg) - - elif port_id and port_id not in post_ports: - err_msg = "port {} is not associated to VM".format(port_id) - err_msgs.append(err_msg) - - if err_msgs: - err_msgs_str = "Post interface attach check failed:\n{}".format( - '\n'.join(err_msgs)) - if fail_ok: - LOG.warning(err_msgs_str) - return 2, attached_port - raise exceptions.NovaError(err_msgs_str) - - succ_msg = "Port {} successfully attached to VM {}".format(attached_port, - vm_id) - LOG.info(succ_msg) - return 0, attached_port - - -def add_ifcfg_scripts(vm_id, mac_addrs, static_ips=None, ipv6='no', reboot=True, - vm_prompt=None, - **extra_configs): - """ - - Args: - vm_id: - mac_addrs (list of str): - static_ips (None|str|list): - ipv6: - reboot: - vm_prompt - **extra_configs: - - Returns: - - """ - LOG.info('Add ifcfg script(s) to VM {}'.format(vm_id)) - with ssh_to_vm_from_natbox(vm_id, prompt=vm_prompt) as vm_ssh: - vm_eths = [] - for mac_addr in mac_addrs: - eth_name = network_helper.get_eth_for_mac(mac_addr=mac_addr, - ssh_client=vm_ssh) - assert eth_name, "vif not found for expected mac_address {} in vm" \ - " {}".format(mac_addr, vm_id) - vm_eths.append(eth_name) - - if static_ips: - if isinstance(static_ips, str): - static_ips = [static_ips] - if len(static_ips) != len(vm_eths): - raise ValueError( - "static_ips count has to be the same as vm devs to be " - "configured") - - for i in range(len(vm_eths)): - eth = vm_eths[i] - if static_ips: - static_ip = static_ips[i] - script_content = VMNetwork.IFCFG_STATIC.format(eth, ipv6, - static_ip) - else: - script_content = VMNetwork.IFCFG_DHCP.format(eth, ipv6) - - if extra_configs: - extra_str = '\n'.join( - ['{}={}'.format(k, v) for k, v in extra_configs.items()]) - script_content += '\n{}'.format(extra_str) - - script_path = VMPath.ETH_PATH_CENTOS.format(eth) - vm_ssh.exec_sudo_cmd('touch {}'.format(script_path)) - vm_ssh.exec_sudo_cmd( - "cat > {} << 'EOT'\n{}\nEOT".format(script_path, - script_content), - fail_ok=False) - - if reboot: - reboot_vm(vm_id=vm_id) - - -def detach_interface(vm_id, port_id, cleanup_route=False, fail_ok=False, - auth_info=None, con_ssh=None, - verify_virsh=True): - """ - Detach a port from vm - Args: - vm_id (str): - port_id (str): existing port that is attached to given vm - fail_ok (bool): - auth_info (dict): - con_ssh (SSHClient): - cleanup_route (bool) - verify_virsh (bool): Whether to verify in virsh xmldump for detached - port - - Returns (tuple): (, ) - (0, Port is successfully detached from VM ) - (1, ) - cli rejected - (2, "Port is not detached from VM ") - detached - port is still shown in nova show - - """ - target_ips = None - if cleanup_route: - fixed_ips = \ - network_helper.get_ports(field='Fixed IP Addresses', - port_id=port_id, - con_ssh=con_ssh, auth_info=auth_info)[0] - target_ips = [fixed_ip['ip_address'] for fixed_ip in fixed_ips] - - mac_to_check = None - if verify_virsh: - prev_ports, prev_macs = network_helper.get_ports( - server=vm_id, auth_info=auth_info, con_ssh=con_ssh, - field=('ID', 'MAC Address')) - for prev_port in prev_ports: - if port_id == prev_port: - mac_to_check = prev_macs[list(prev_ports).index(prev_port)] - break - - LOG.info("Detaching port {} from vm {}".format(port_id, vm_id)) - args = '{} {}'.format(vm_id, port_id) - code, output = cli.nova('interface-detach', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code == 1: - return code, output - - post_ports = network_helper.get_ports(server=vm_id, auth_info=auth_info, - con_ssh=con_ssh) - if port_id in post_ports: - err_msg = "Port {} is not detached from VM {}".format(port_id, vm_id) - if fail_ok: - return 2, err_msg - else: - raise exceptions.NeutronError( - 'port {} is still listed for vm {} after detaching'.format( - port_id, vm_id)) - - succ_msg = "Port {} is successfully detached from VM {}".format(port_id, - vm_id) - LOG.info(succ_msg) - - if cleanup_route and target_ips: - cleanup_routes_for_vifs(vm_id=vm_id, vm_ips=target_ips, reboot=True) - - if verify_virsh and mac_to_check: - if not (cleanup_route and target_ips): - reboot_vm(vm_id=vm_id, auth_info=auth_info, con_ssh=con_ssh) - - check_devs_detached(vm_id=vm_id, mac_addrs=mac_to_check, - con_ssh=con_ssh) - - return 0, succ_msg - - -def check_devs_detached(vm_id, mac_addrs, con_ssh=None): - if isinstance(mac_addrs, str): - mac_addrs = [mac_addrs] - - wait_for_vm_pingable_from_natbox(vm_id, con_ssh=con_ssh) - - LOG.info("Check dev detached from vm") - vm_err = '' - with ssh_to_vm_from_natbox(vm_id=vm_id, con_ssh=con_ssh, - retry_timeout=180) as vm_ssh: - for mac_addr in mac_addrs: - if vm_ssh.exec_cmd('ip addr | grep -B 1 "{}"'.format(mac_addr))[0] \ - == 0: - vm_err += 'Interface with mac address {} still exists in ' \ - 'vm\n'.format(mac_addr) - - LOG.info("Check virsh xmldump on compute host") - inst_name, vm_host = get_vm_values(vm_id, - fields=[":instance_name", ":host"], - strict=False) - host_err = '' - with host_helper.ssh_to_host(vm_host, con_ssh=con_ssh) as host_ssh: - for mac_addr in mac_addrs: - if host_ssh.exec_sudo_cmd( - 'virsh dumpxml {} | grep -B 1 -A 1 "{}"'.format( - inst_name, mac_addr))[0] == 0: - host_err += 'VM interface with mac address {} still exists in' \ - ' virsh\n'.format(mac_addr) - assert not host_err, host_err - - assert not vm_err, vm_err - - -def evacuate_vms(host, vms_to_check, con_ssh=None, timeout=600, - wait_for_host_up=False, fail_ok=False, post_host=None, - force=True, ping_vms=False): - """ - Evacuate given vms by rebooting their host. VMs should be on specified - host already when this keyword called. - Args: - host (str): host to reboot - vms_to_check (list): vms to check status for after host reboot - con_ssh (SSHClient): - timeout (int): Max time to wait for vms to reach active state after - reboot -f initiated on host - wait_for_host_up (bool): whether to wait for host reboot completes - before checking vm status - fail_ok (bool): whether to return or to fail test when vm(s) failed - to evacuate - post_host (str): expected host for vms to be evacuated to - force (bool): whether to use 'reboot -f'. This param is only used if - vlm=False. - ping_vms (bool): whether to ping vms after evacuation - - Returns (tuple): ( (int), (list)) - - (0, []) all vms evacuated successfully. i.e., active state, - host changed, pingable from NatBox - - (1, ) some vms did not reach active state after - host reboot - - (2, ) some vms' host did not change after host reboot - - """ - if isinstance(vms_to_check, str): - vms_to_check = [vms_to_check] - - HostsToRecover.add(host) - is_swacted = False - standby = None - if wait_for_host_up: - active, standby = system_helper.get_active_standby_controllers( - con_ssh=con_ssh) - if standby and active == host: - is_swacted = True - - is_sx = system_helper.is_aio_simplex() - - LOG.tc_step("'sudo reboot -f' from {}".format(host)) - host_helper.reboot_hosts(host, wait_for_offline=True, - wait_for_reboot_finish=False, force_reboot=force, - con_ssh=con_ssh) - - if is_sx: - host_helper.wait_for_hosts_ready(hosts=host, con_ssh=con_ssh) - - try: - LOG.tc_step( - "Wait for vms to reach ERROR or REBUILD state with best effort") - if not is_sx: - wait_for_vms_values(vms_to_check, - value=[VMStatus.ERROR, VMStatus.REBUILD], - fail_ok=True, timeout=120, - con_ssh=con_ssh) - - LOG.tc_step( - "Check vms are in Active state and moved to other host(s) (" - "non-sx) after host failure") - res, active_vms, inactive_vms = wait_for_vms_values( - vms=vms_to_check, value=VMStatus.ACTIVE, timeout=timeout, - con_ssh=con_ssh) - - if not is_sx: - vms_host_err = [] - for vm in vms_to_check: - if post_host: - if get_vm_host(vm) != post_host: - vms_host_err.append(vm) - else: - if get_vm_host(vm) == host: - vms_host_err.append(vm) - - if vms_host_err: - if post_host: - err_msg = "Following VMs is not moved to expected host " \ - "{} from {}: {}\nVMs did not reach Active " \ - "state: {}".format(post_host, host, vms_host_err, - inactive_vms) - else: - err_msg = "Following VMs stayed on the same host {}: " \ - "{}\nVMs did not reach Active state: {}".\ - format(host, vms_host_err, inactive_vms) - - if fail_ok: - LOG.warning(err_msg) - return 1, vms_host_err - raise exceptions.VMError(err_msg) - - if inactive_vms: - err_msg = "VMs did not reach Active state after vm host rebooted:" \ - " {}".format(inactive_vms) - if fail_ok: - LOG.warning(err_msg) - return 2, inactive_vms - raise exceptions.VMError(err_msg) - - if ping_vms: - LOG.tc_step("Ping vms after evacuated") - for vm_ in vms_to_check: - wait_for_vm_pingable_from_natbox(vm_id=vm_, - timeout=VMTimeout.DHCP_RETRY) - - LOG.info("All vms are successfully evacuated to other host") - return 0, [] - - finally: - if wait_for_host_up: - LOG.tc_step("Waiting for {} to recover".format(host)) - host_helper.wait_for_hosts_ready(host, con_ssh=con_ssh) - # Do not fail the test due to task affining incomplete for now to - # unblock test case. - host_helper.wait_for_tasks_affined(host=host, con_ssh=con_ssh, - fail_ok=True) - if is_swacted: - host_helper.wait_for_tasks_affined(standby, con_ssh=con_ssh, - fail_ok=True) - time.sleep(60) # Give some idle time before continue. - if system_helper.is_aio_duplex(con_ssh=con_ssh): - system_helper.wait_for_alarm_gone( - alarm_id=EventLogID.CPU_USAGE_HIGH, fail_ok=True, - check_interval=30) - - -def boot_vms_various_types(storage_backing=None, target_host=None, - cleanup='function', avail_zone='nova', vms_num=5): - """ - Boot following 5 vms and ensure they are pingable from NatBox: - - vm1: ephemeral=0, swap=0, boot_from_volume - - vm2: ephemeral=1, swap=1, boot_from_volume - - vm3: ephemeral=0, swap=0, boot_from_image - - vm4: ephemeral=0, swap=0, boot_from_image, attach_volume - - vm5: ephemeral=1, swap=1, boot_from_image - Args: - storage_backing (str|None): storage backing to set in flavor spec. - When None, storage backing which used by - most up hypervisors will be used. - target_host (str|None): Boot vm on target_host when specified. (admin - role has to be added to tenant under test) - cleanup (str|None): Scope for resource cleanup, valid values: - 'function', 'class', 'module', None. - When None, vms/volumes/flavors will be kept on system - avail_zone (str): availability zone to boot the vms - vms_num - - Returns (list): list of vm ids - - """ - LOG.info("Create a flavor without ephemeral or swap disks") - flavor_1 = \ - nova_helper.create_flavor('flv_rootdisk', - storage_backing=storage_backing, - cleanup=cleanup)[1] - - LOG.info("Create another flavor with ephemeral and swap disks") - flavor_2 = nova_helper.create_flavor('flv_ephemswap', ephemeral=1, swap=512, - storage_backing=storage_backing, - cleanup=cleanup)[1] - - launched_vms = [] - for i in range(int(math.ceil(vms_num / 5.0))): - LOG.info( - "Boot vm1 from volume with flavor flv_rootdisk and wait for it " - "pingable from NatBox") - vm1_name = "vol_root" - vm1 = boot_vm(vm1_name, flavor=flavor_1, source='volume', - avail_zone=avail_zone, vm_host=target_host, - cleanup=cleanup)[1] - - wait_for_vm_pingable_from_natbox(vm1) - launched_vms.append(vm1) - if len(launched_vms) == vms_num: - break - - LOG.info( - "Boot vm2 from volume with flavor flv_localdisk and wait for it " - "pingable from NatBox") - vm2_name = "vol_ephemswap" - vm2 = boot_vm(vm2_name, flavor=flavor_2, source='volume', - avail_zone=avail_zone, vm_host=target_host, - cleanup=cleanup)[1] - - wait_for_vm_pingable_from_natbox(vm2) - launched_vms.append(vm2) - if len(launched_vms) == vms_num: - break - - LOG.info( - "Boot vm3 from image with flavor flv_rootdisk and wait for it " - "pingable from NatBox") - vm3_name = "image_root" - vm3 = boot_vm(vm3_name, flavor=flavor_1, source='image', - avail_zone=avail_zone, vm_host=target_host, - cleanup=cleanup)[1] - - wait_for_vm_pingable_from_natbox(vm3) - launched_vms.append(vm3) - if len(launched_vms) == vms_num: - break - - LOG.info( - "Boot vm4 from image with flavor flv_rootdisk, attach a volume to " - "it and wait for it " - "pingable from NatBox") - vm4_name = 'image_root_attachvol' - vm4 = boot_vm(vm4_name, flavor_1, source='image', avail_zone=avail_zone, - vm_host=target_host, - cleanup=cleanup)[1] - - vol = cinder_helper.create_volume(bootable=False, cleanup=cleanup)[1] - attach_vol_to_vm(vm4, vol_id=vol, cleanup=cleanup) - - wait_for_vm_pingable_from_natbox(vm4) - launched_vms.append(vm4) - if len(launched_vms) == vms_num: - break - - LOG.info( - "Boot vm5 from image with flavor flv_localdisk and wait for it " - "pingable from NatBox") - vm5_name = 'image_ephemswap' - vm5 = boot_vm(vm5_name, flavor_2, source='image', avail_zone=avail_zone, - vm_host=target_host, - cleanup=cleanup)[1] - - wait_for_vm_pingable_from_natbox(vm5) - launched_vms.append(vm5) - if len(launched_vms) == vms_num: - break - - assert len(launched_vms) == vms_num - return launched_vms - - -def get_vcpu_model(vm_id, guest_os=None, con_ssh=None): - """ - Get vcpu model of given vm. e.g., Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz - Args: - vm_id (str): - guest_os (str): - con_ssh (SSHClient): - - Returns (str): - - """ - with ssh_to_vm_from_natbox(vm_id, vm_image_name=guest_os, - con_ssh=con_ssh) as vm_ssh: - out = vm_ssh.exec_cmd("cat /proc/cpuinfo | grep --color=never " - "'model name'", fail_ok=False)[1] - vcpu_model = out.strip().splitlines()[0].split(sep=': ')[1].strip() - - LOG.info("VM {} cpu model: {}".format(vm_id, vcpu_model)) - return vcpu_model - - -def get_quotas(quotas, default=False, tenant=None, auth_info=None, - con_ssh=None): - """ - Get openstack quotas - Args: - quotas (str|list|tuple): - default (bool) - tenant (str|None): Only used if admin user is used in auth_info - auth_info (dict): - con_ssh: - - Returns (list): - - """ - if auth_info is None: - auth_info = Tenant.get_primary() - - args = '' - if default: - args += '--default' - if tenant and auth_info['user'] == 'admin': - args += ' {}'.format(tenant) - - if isinstance(quotas, str): - quotas = [quotas] - - table_ = table_parser.table( - cli.openstack('quota show', args, ssh_client=con_ssh, - auth_info=auth_info)[1]) - - values = [] - for item in quotas: - val = table_parser.get_value_two_col_table(table_, item) - try: - val = eval(val) - except (NameError, SyntaxError): - pass - values.append(val) - - return values - - -def get_quota_details_info(component='compute', tenant=None, detail=True, - resources=None, - auth_info=Tenant.get('admin'), con_ssh=None): - """ - Get quota details table from openstack quota list --detail - Args: - component (str): compute, network or volume - tenant: - detail (bool) - resources (str|list|tuple|None): filter out table. Used only if - detail is True and component is not volume - auth_info: - con_ssh: - - Returns (dict): All keys are converted to lower case. - e.g., - {'server_groups': {'in use': 0, 'reserved': 1, 'limit': 10}, - ...} - - """ - valid_components = ('compute', 'network', 'volume') - if component not in valid_components: - raise ValueError( - "Please specify a valid component: {}".format(valid_components)) - - if not tenant: - tenant = Tenant.get_primary()['tenant'] - - detail_str = ' --detail' if detail and component != 'volume' else '' - args = '--project={} --{}{}'.format(tenant, component, detail_str) - - table_ = table_parser.table( - cli.openstack('quota list', args, ssh_client=con_ssh, - auth_info=auth_info)[1]) - key_header = 'Project ID' - if detail_str: - if resources: - table_ = table_parser.filter_table(table_, Resource=resources) - key_header = 'resource' - - table_ = table_parser.row_dict_table(table_, key_header=key_header, - lower_case=True, - eliminate_keys=key_header) - return {k: int(v) for k, v in table_.items()} - - -def set_quotas(tenant=None, auth_info=Tenant.get('admin'), con_ssh=None, - sys_con_for_dc=True, fail_ok=False, - **kwargs): - """ - Set openstack quotas - Args: - tenant (str): - auth_info (dict): - con_ssh: - sys_con_for_dc (bool): - fail_ok (bool): - **kwargs: quotas to set. e.g., **{'instances': 10, 'volumes': 20} - - Returns (tuple): - - """ - if not tenant: - tenant = Tenant.get_primary()['tenant'] - if not auth_info: - auth_info = Tenant.get_primary() - if ProjVar.get_var('IS_DC') and sys_con_for_dc and auth_info['region'] \ - != 'SystemController': - auth_info = Tenant.get(auth_info['user'], dc_region='SystemController') - - args = common.parse_args( - args_dict={k.replace('_', '-'): v for k, v in kwargs.items()}) - args = '{} {}'.format(args, tenant) - code, output = cli.openstack('quota set', args, ssh_client=con_ssh, - fail_ok=fail_ok, auth_info=auth_info) - if code > 0: - return 1, output - - msg = '{} quotas set successfully'.format(tenant) - LOG.info(msg) - return 0, msg - - -def ensure_vms_quotas(vms_num=10, cores_num=None, vols_num=None, ram=None, - tenant=None, auth_info=Tenant.get('admin'), - con_ssh=None): - """ - Update instances, cores, volumes quotas to given numbers - Args: - vms_num (int): max number of instances allowed for given tenant - cores_num (int|None): twice of the vms quota when None - vols_num (int|None): twice of the vms quota when None - ram (int|None) - tenant (None|str): - auth_info (dict): auth info for admin user - con_ssh (SSHClient): - - """ - if not vols_num: - vols_num = 2 * vms_num - if not cores_num: - cores_num = 2 * vms_num - if not ram: - ram = 2048 * vms_num - - if not tenant: - tenant = Tenant.get_primary()['tenant'] - - volumes_quota, vms_quota, cores_quota, ram_quota = get_quotas( - quotas=['volumes', 'instances', 'cores', 'ram'], - con_ssh=con_ssh, tenant=tenant, auth_info=auth_info) - kwargs = {} - if vms_num > vms_quota: - kwargs['instances'] = vms_num - if cores_num > cores_quota: - kwargs['cores'] = cores_num - if vols_num > volumes_quota: - kwargs['volumes'] = vols_num - if ram > ram_quota: - kwargs['ram'] = ram - - if kwargs: - set_quotas(con_ssh=con_ssh, tenant=tenant, auth_info=auth_info, - **kwargs) - - -def launch_vms(vm_type, count=1, nics=None, flavor=None, storage_backing=None, - image=None, boot_source=None, - guest_os=None, avail_zone=None, target_host=None, ping_vms=False, - con_ssh=None, auth_info=None, - cleanup='function', **boot_vm_kwargs): - """ - - Args: - vm_type: - count: - nics: - flavor: - storage_backing (str): - storage backend for flavor to be created - only used if flavor is None - image: - boot_source: - guest_os - avail_zone: - target_host: - ping_vms - con_ssh: - auth_info: - cleanup: - boot_vm_kwargs (dict): - additional kwargs to pass to boot_vm - - Returns: - - """ - - if not flavor: - flavor = nova_helper.create_flavor(name=vm_type, vcpus=2, - storage_backing=storage_backing, - cleanup=cleanup)[1] - extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated'} - - if vm_type in ['vswitch', 'dpdk', 'vhost']: - extra_specs.update({FlavorSpec.VCPU_MODEL: 'SandyBridge', - FlavorSpec.MEM_PAGE_SIZE: '2048'}) - - nova_helper.set_flavor(flavor=flavor, **extra_specs) - - resource_id = None - boot_source = boot_source if boot_source else 'volume' - if image: - if boot_source == 'volume': - resource_id = \ - cinder_helper.create_volume(name=vm_type, source_id=image, - auth_info=auth_info, - guest_image=guest_os)[1] - if cleanup: - ResourceCleanup.add('volume', resource_id, scope=cleanup) - else: - resource_id = image - - if not nics: - if vm_type in ['pci-sriov', 'pci-passthrough']: - raise NotImplementedError("nics has to be provided for pci-sriov and " - "pci-passthrough") - - if vm_type in ['vswitch', 'dpdk', 'vhost']: - vif_model = 'avp' - else: - vif_model = vm_type - - mgmt_net_id = network_helper.get_mgmt_net_id(auth_info=auth_info) - tenant_net_id = network_helper.get_tenant_net_id(auth_info=auth_info) - internal_net_id = network_helper.get_internal_net_id( - auth_info=auth_info) - - nics = [{'net-id': mgmt_net_id}, - {'net-id': tenant_net_id, 'vif-model': vif_model}, - {'net-id': internal_net_id, 'vif-model': vif_model}] - - user_data = None - if vm_type in ['vswitch', 'dpdk', 'vhost']: - user_data = network_helper.get_dpdk_user_data(con_ssh=con_ssh) - - vms = [] - for i in range(count): - vm_id = boot_vm(name="{}-{}".format(vm_type, i), flavor=flavor, - source=boot_source, source_id=resource_id, - nics=nics, guest_os=guest_os, avail_zone=avail_zone, - vm_host=target_host, user_data=user_data, - auth_info=auth_info, con_ssh=con_ssh, cleanup=cleanup, - **boot_vm_kwargs)[1] - vms.append(vm_id) - - if ping_vms: - wait_for_vm_pingable_from_natbox(vm_id=vm_id, con_ssh=con_ssh) - return vms, nics - - -def get_ping_loss_duration_between_vms(from_vm, to_vm, net_type='data', - timeout=600, ipv6=False, - start_event=None, - end_event=None, con_ssh=None, - ping_interval=1): - """ - Get ping loss duration in milliseconds from one vm to another - Args: - from_vm (str): id of the ping source vm - to_vm (str): id of the ping destination vm - net_type (str): e.g., data, internal, mgmt - timeout (int): max time to wait for ping loss before force end it - ipv6 (bool): whether to use ping -6 for ipv6 address - start_event (Event): set given event to signal ping has started - end_event (Event): stop ping loss detection if given event is set - con_ssh (SSHClient): - ping_interval (int|float): timeout of ping cmd in seconds - - Returns (int): milliseconds of ping loss duration - - """ - - to_vm_ip = _get_vms_ips(vm_ids=to_vm, net_types=net_type, - con_ssh=con_ssh)[0][0] - with ssh_to_vm_from_natbox(vm_id=from_vm, con_ssh=con_ssh) as from_vm_ssh: - duration = network_helper.get_ping_failure_duration( - server=to_vm_ip, ssh_client=from_vm_ssh, timeout=timeout, - ipv6=ipv6, start_event=start_event, end_event=end_event, - ping_interval=ping_interval) - return duration - - -def get_ping_loss_duration_from_natbox(vm_id, timeout=900, start_event=None, - end_event=None, con_ssh=None, - ping_interval=0.5): - vm_ip = _get_vms_ips(vm_ids=vm_id, net_types='mgmt', con_ssh=con_ssh)[0][0] - natbox_client = NATBoxClient.get_natbox_client() - duration = network_helper.get_ping_failure_duration( - server=vm_ip, ssh_client=natbox_client, timeout=timeout, - start_event=start_event, end_event=end_event, - ping_interval=ping_interval) - return duration - - -def get_ping_loss_duration_on_operation(vm_id, timeout, ping_interval, - oper_func, *func_args, **func_kwargs): - LOG.tc_step("Start pinging vm {} from NatBox on a new thread".format(vm_id)) - start_event = Events("Ping started") - end_event = Events("Operation completed") - ping_thread = MThread(get_ping_loss_duration_from_natbox, vm_id=vm_id, - timeout=timeout, - start_event=start_event, end_event=end_event, - ping_interval=ping_interval) - ping_thread.start_thread(timeout=timeout + 30) - - try: - if start_event.wait_for_event(timeout=60): - LOG.tc_step( - "Perform operation on vm and ensure it's reachable after that") - oper_func(*func_args, **func_kwargs) - # Operation completed. Set end flag so ping thread can end properly - time.sleep(3) - end_event.set() - # Expect ping thread to end in less than 1 minute after - # live-migration complete - duration = ping_thread.get_output(timeout=60) - # assert duration, "No ping loss detected" - if duration == 0: - LOG.warning("No ping loss detected") - return duration - - assert False, "Ping failed since start" - finally: - ping_thread.wait_for_thread_end(timeout=5) - - -def collect_guest_logs(vm_id): - LOG.info("Attempt to collect guest logs with best effort") - log_names = ['messages', 'user.log'] - try: - res = _recover_vm(vm_id=vm_id) - if not res: - LOG.info( - "VM {} in unrecoverable state, skip collect guest logs.".format( - vm_id)) - return - - with ssh_to_vm_from_natbox(vm_id) as vm_ssh: - for log_name in log_names: - log_path = '/var/log/{}'.format(log_name) - if not vm_ssh.file_exists(log_path): - continue - - local_log_path = '{}/{}_{}'.format( - ProjVar.get_var('GUEST_LOGS_DIR'), log_name, vm_id) - current_user = local_host.get_user() - if current_user == TestFileServer.USER: - vm_ssh.exec_sudo_cmd('chmod -R 755 {}'.format(log_path), - fail_ok=True) - vm_ssh.scp_on_source_to_localhost( - source_file=log_path, - dest_user=current_user, - dest_password=TestFileServer.PASSWORD, - dest_path=local_log_path) - else: - output = vm_ssh.exec_cmd('tail -n 200 {}'.format(log_path), - fail_ok=False)[1] - with open(local_log_path, mode='w', encoding='utf8') as f: - f.write(output) - return - - except Exception as e: - LOG.warning("Failed to collect guest logs: {}".format(e)) - - -def _recover_vm(vm_id, con_ssh=None): - status = get_vm_status(vm_id=vm_id, con_ssh=con_ssh) - if status == VMStatus.ACTIVE: - return True - elif status == VMStatus.STOPPED: - code, msg = start_vms(vms=vm_id, fail_ok=True) - return code == 0 - elif status == VMStatus.PAUSED: - code, msg = unpause_vm(vm_id=vm_id, fail_ok=True, con_ssh=con_ssh) - if code > 0: - code, msg = resume_vm(vm_id, fail_ok=True, con_ssh=con_ssh) - if code > 0: - return False - return True - else: - return False - - -def get_vim_events(vm_id, event_ids=None, controller=None, con_ssh=None): - """ - Get vim events from nfv-vim-events.log - Args: - vm_id (str): - event_ids (None|str|list|tuple): return only given vim events when - specified - controller (None|str): controller where vim log is on. Use current - active controller if None. - con_ssh (SSHClient): - - Returns (list): list of dictionaries, each dictionary is one event. e.g.,: - [{'log-id': '47', 'event-id': 'instance-live-migrate-begin', ... , - 'timestamp': '2018-03-04 01:34:28.915008'}, - {'log-id': '49', 'event-id': 'instance-live-migrated', ... , - 'timestamp': '2018-03-04 01:35:34.043094'}] - - """ - if not controller: - controller = system_helper.get_active_controller_name() - - if isinstance(event_ids, str): - event_ids = [event_ids] - - with host_helper.ssh_to_host(controller, con_ssh=con_ssh) as controller_ssh: - vm_logs = controller_ssh.exec_cmd( - 'grep --color=never -A 4 -B 6 -E "entity .*{}" ' - '/var/log/nfv-vim-events.log'. - format(vm_id))[1] - - log_lines = vm_logs.splitlines() - vm_events = [] - vm_event = {} - for line in log_lines: - if re.search(' = ', line): - if line.startswith('log-id') and vm_event: - if not event_ids or vm_event['event-id'] in event_ids: - vm_events.append(vm_event) - - vm_event = {} - key, val = re.findall('(.*)= (.*)', line)[0] - vm_event[key.strip()] = val.strip() - - if vm_event and (not event_ids or vm_event['event-id'] in event_ids): - vm_events.append(vm_event) - - LOG.info("VM events: {}".format(vm_events)) - return vm_events - - -def get_live_migrate_duration(vm_id, con_ssh=None): - LOG.info( - "Get live migration duration from nfv-vim-events.log for vm {}".format( - vm_id)) - events = (VimEventID.LIVE_MIG_BEGIN, VimEventID.LIVE_MIG_END) - live_mig_begin, live_mig_end = get_vim_events(vm_id=vm_id, event_ids=events, - con_ssh=con_ssh) - - start_time = live_mig_begin['timestamp'] - end_time = live_mig_end['timestamp'] - duration = common.get_timedelta_for_isotimes(time1=start_time, - time2=end_time).total_seconds() - LOG.info("Live migration for vm {} took {} seconds".format(vm_id, duration)) - - return duration - - -def get_cold_migrate_duration(vm_id, con_ssh=None): - LOG.info("Get cold migration duration from vim-event-log for vm {}".format( - vm_id)) - events = (VimEventID.COLD_MIG_BEGIN, VimEventID.COLD_MIG_END, - VimEventID.COLD_MIG_CONFIRM_BEGIN, VimEventID.COLD_MIG_CONFIRMED) - cold_mig_begin, cold_mig_end, cold_mig_confirm_begin, \ - cold_mig_confirm_end = get_vim_events(vm_id=vm_id, event_ids=events, - con_ssh=con_ssh) - - duration_cold_mig = common.get_timedelta_for_isotimes( - time1=cold_mig_begin['timestamp'], - time2=cold_mig_end['timestamp']).total_seconds() - - duration_confirm = common.get_timedelta_for_isotimes( - time1=cold_mig_confirm_begin['timestamp'], - time2=cold_mig_confirm_end['timestamp']).total_seconds() - - duration = duration_cold_mig + duration_confirm - LOG.info("Cold migrate and confirm for vm {} took {} seconds".format( - vm_id, duration)) - - return duration - - -def live_migrate_force_complete(vm_id, migration_id=None, timeout=300, - fail_ok=False, con_ssh=None): - """ - Run nova live-migration-force-complete against given vm and migration - session. - Args: - vm_id (str): - migration_id (str|int): - timeout: - fail_ok: - con_ssh: - - Returns (tuple): - (0, 'VM is successfully live-migrated after - live-migration-force-complete') - (1, ) # nova live-migration-force-complete cmd - rejected. Only returns if fail_ok=True. - - """ - if not migration_id: - migration_id = get_vm_migration_values(vm_id=vm_id, fail_ok=False, - con_ssh=con_ssh)[0] - - # No replacement in openstack client - code, output = cli.nova('live-migration-force-complete', - '{} {}'.format(vm_id, migration_id), - ssh_client=con_ssh, - fail_ok=fail_ok) - - if code > 0: - return 1, output - - wait_for_vm_migration_status(vm_id=vm_id, migration_id=migration_id, - fail_ok=False, timeout=timeout, - con_ssh=con_ssh) - msg = "VM is successfully live-migrated after live-migration-force-complete" - LOG.info(msg) - return 0, msg - - -def get_vm_migration_values(vm_id, field='Id', migration_type='live-migration', - fail_ok=True, con_ssh=None, **kwargs): - """ - Get values for given vm via nova migration-list - Args: - vm_id (str): - field (str): - migration_type(str): valid types: live-migration, migration - fail_ok: - con_ssh: - **kwargs: - - Returns (list): - - """ - migration_tab = nova_helper.get_migration_list_table(con_ssh=con_ssh) - filters = {'Instance UUID': vm_id, 'Type': migration_type} - if kwargs: - filters.update(kwargs) - mig_ids = table_parser.get_values(migration_tab, target_header=field, - **filters) - if not mig_ids and not fail_ok: - raise exceptions.VMError( - "{} has no {} session with filters: {}".format(vm_id, - migration_type, - kwargs)) - - return mig_ids - - -def wait_for_vm_migration_status(vm_id, migration_id=None, migration_type=None, - expt_status='completed', - fail_ok=False, timeout=300, check_interval=5, - con_ssh=None): - """ - Wait for a migration session to reach given status in nova mgiration-list - Args: - vm_id (str): - migration_id (str|int): - migration_type (str): valid types: live-migration, migration - expt_status (str): migration status to wait for. such as completed, - running, etc - fail_ok (bool): - timeout (int): max time to wait for the state - check_interval (int): - con_ssh: - - Returns (tuple): - (0, ) # migration status reached as expected - (1, ) # did not reach given status. This only - returns if fail_ok=True - - """ - if not migration_id: - migration_id = get_vm_migration_values( - vm_id=vm_id, migration_type=migration_type, fail_ok=False, - con_ssh=con_ssh)[0] - - LOG.info("Waiting for migration {} for vm {} to reach {} status".format( - migration_id, vm_id, expt_status)) - end_time = time.time() + timeout - prev_state = None - while time.time() < end_time: - mig_status = get_vm_migration_values(vm_id=vm_id, field='Status', - **{'Id': migration_id})[0] - if mig_status == expt_status: - LOG.info( - "Migration {} for vm {} reached status: {}".format(migration_id, - vm_id, - expt_status)) - return True, expt_status - - if mig_status != prev_state: - LOG.info( - "Migration {} for vm {} is in status - {}".format(migration_id, - vm_id, - mig_status)) - prev_state = mig_status - - time.sleep(check_interval) - - msg = 'Migration {} for vm {} did not reach {} status within {} seconds. ' \ - 'It is in {} status.'.format(migration_id, vm_id, expt_status, - timeout, prev_state) - if fail_ok: - LOG.warning(msg) - return False, prev_state - else: - raise exceptions.VMError(msg) - - -def get_vms_ports_info(vms, rtn_subnet_id=False): - """ - Get VMs' ports' (ip_addr, subnet_cidr_or_id, mac_addr). - - Args: - vms (str|list): - vm_id, or a list of vm_ids - rtn_subnet_id (bool): - replaces cidr with subnet_id in result - - Returns (dict): - {vms[0]: [(ip_addr, subnet, ...], vms[1]: [...], ...} - """ - if not issubclass(type(vms), (list, tuple)): - vms = [vms] - - info = {} - subnet_tab_ = table_parser.table( - cli.openstack('subnet list', auth_info=Tenant.get('admin'))[1]) - for vm in vms: - info[vm] = [] - vm_ports, vm_macs, vm_fixed_ips = network_helper.get_ports( - server=vm, field=('ID', 'MAC Address', 'Fixed IP Addresses')) - for i in range(len(vm_ports)): - port = vm_ports[i] - mac = vm_macs[i] - fixed_ips = vm_fixed_ips[i] - if not isinstance(fixed_ips, list): - fixed_ips = [fixed_ips] - - for fixed_ip in fixed_ips: - subnet_id = fixed_ip['subnet_id'] - ip_addr = fixed_ip['ip_address'] - subnet = subnet_id if rtn_subnet_id else \ - table_parser.get_values(subnet_tab_, 'Subnet', - id=subnet_id)[0] - net_id = table_parser.get_values(subnet_tab_, 'Network', - id=subnet_id)[0] - - LOG.info( - "VM {} port {}: mac={} ip={} subnet={} net_id={}".format( - vm, port, mac, ip_addr, subnet, net_id)) - info[vm].append((port, ip_addr, subnet, mac, net_id)) - - return info - - -def _set_vm_route(vm_id, target_subnet, via_ip, dev_or_mac, persist=True): - # returns True if the targeted VM is vswitch-enabled - # for vswitch-enabled VMs, it must be setup with TisInitServiceScript if - # persist=True - with ssh_to_vm_from_natbox(vm_id) as ssh_client: - vshell, msg = ssh_client.exec_cmd("vshell port-list", fail_ok=True) - vshell = not vshell - if ':' in dev_or_mac: - dev = network_helper.get_eth_for_mac(ssh_client, dev_or_mac, - vshell=vshell) - else: - dev = dev_or_mac - if not vshell: # not avs managed - param = target_subnet, via_ip, dev - LOG.info("Routing {} via {} on interface {}".format(*param)) - ssh_client.exec_sudo_cmd( - "route add -net {} gw {} {}".format(*param), fail_ok=False) - if persist: - LOG.info("Setting persistent route") - ssh_client.exec_sudo_cmd( - "echo -e \"{} via {}\" > " - "/etc/sysconfig/network-scripts/route-{}".format( - *param), fail_ok=False) - return False - else: - param = target_subnet, via_ip, dev - LOG.info( - "Routing {} via {} on interface {}, AVS-enabled".format(*param)) - ssh_client.exec_sudo_cmd( - "sed -i $'s,quit,route add {} {} {} 1\\\\nquit," - "g' /etc/vswitch/vswitch.cmds.default".format( - target_subnet, dev, via_ip), fail_ok=False) - # reload vswitch - ssh_client.exec_sudo_cmd("/etc/init.d/vswitch restart", - fail_ok=False) - if persist: - LOG.info("Setting persistent route") - ssh_client.exec_sudo_cmd( - # ROUTING_STUB - # "192.168.1.0/24,192.168.111.1,eth0" - "sed -i $'s@#ROUTING_STUB@\"{},{}," - "{}\"\\\\n#ROUTING_STUB@g' {}".format( - target_subnet, via_ip, dev, - TisInitServiceScript.configuration_path - ), fail_ok=False) - return True - - -def route_vm_pair(vm1, vm2, bidirectional=True, validate=True): - """ - Route the pair of VMs' data interfaces through internal interfaces - If multiple interfaces available on either of the VMs, the last one is used - If no interfaces available for data/internal network for either VM, - raises IndexError - The internal interfaces for the pair VM must be on the same gateway - no fail_ok option, since if failed, the vm's state is undefined - - Args: - vm1 (str): - vm_id, src if bidirectional=False - vm2 (str): - vm_id, dest if bidirectional=False - bidirectional (bool): - if True, also routes from vm2 to vm1 - validate (bool): - validate pings between the pair over the data network - - Returns (dict): - the interfaces used for routing, - {vm_id: {'data': {'ip', 'cidr', 'mac'}, - 'internal':{'ip', 'cidr', 'mac'}}} - """ - if vm1 == vm2: - raise ValueError("cannot route to a VM itself") - - auth_info = Tenant.get('admin') - LOG.info("Collecting VMs' networks") - interfaces = { - vm1: {"data": network_helper.get_tenant_ips_for_vms( - vm1, auth_info=auth_info), - "internal": network_helper.get_internal_ips_for_vms(vm1)}, - vm2: {"data": network_helper.get_tenant_ips_for_vms( - vm2, auth_info=auth_info), - "internal": network_helper.get_internal_ips_for_vms(vm2)}, - } - - for vm, info in get_vms_ports_info([vm1, vm2]).items(): - for port, ip, cidr, mac, net_id in info: - # expect one data and one internal - if ip in interfaces[vm]['data']: - interfaces[vm]['data'] = {'ip': ip, 'cidr': cidr, 'mac': mac, - 'port': port} - elif ip in interfaces[vm]['internal']: - interfaces[vm]['internal'] = {'ip': ip, 'cidr': cidr, - 'mac': mac, 'port': port} - - if interfaces[vm1]['internal']['cidr'] != \ - interfaces[vm2]['internal']['cidr']: - raise ValueError( - "the internal interfaces for the VM pair is not on the same " - "gateway") - - vshell_ = _set_vm_route( - vm1, - interfaces[vm2]['data']['cidr'], interfaces[vm2]['internal']['ip'], - interfaces[vm1]['internal']['mac']) - - if bidirectional: - _set_vm_route(vm2, interfaces[vm1]['data']['cidr'], - interfaces[vm1]['internal']['ip'], - interfaces[vm2]['internal']['mac']) - - for vm in (vm1, vm2): - LOG.info("Add vms' data network ip as allowed address for internal " - "network port") - network_helper.set_port( - port_id=interfaces[vm]['internal']['port'], - auth_info=auth_info, - allowed_addr_pairs={'ip-address': interfaces[vm]['data']['ip']}) - - if validate: - LOG.info("Validating route(s) across data") - ping_between_routed_vms(to_vm=vm2, from_vm=vm1, vshell=vshell_, - bidirectional=bidirectional) - - return interfaces - - -def ping_between_routed_vms(to_vm, from_vm, vshell=True, bidirectional=True, - timeout=120): - """ - Ping between routed vm pair - Args: - to_vm: - from_vm: - vshell: - bidirectional: - timeout: - - Returns: - - """ - ping_vms_from_vm(to_vms=to_vm, from_vm=from_vm, timeout=timeout, - net_types='data', vshell=vshell, - source_net_types='internal') - if bidirectional: - ping_vms_from_vm(to_vms=from_vm, from_vm=to_vm, timeout=timeout, - net_types='data', vshell=vshell, - source_net_types='internal') - - -def setup_kernel_routing(vm_id, **kwargs): - """ - Setup kernel routing function for the specified VM - replicates the operation as in wrs_guest_setup.sh (and comes - with the same assumptions) - in order to persist kernel routing after reboots, the operation has to be - stored in /etc/init.d - see TisInitServiceScript for script details - no fail_ok option, since if failed, the vm's state is undefined - - Args: - vm_id (str): - the VM to be configured - kwargs (dict): - kwargs for TisInitServiceScript.configure - - """ - LOG.info( - "Setting up kernel routing for VM {}, kwargs={}".format(vm_id, kwargs)) - - scp_to_vm(vm_id, TisInitServiceScript.src(), TisInitServiceScript.dst()) - with ssh_to_vm_from_natbox(vm_id) as ssh_client: - r, msg = ssh_client.exec_cmd("cat /proc/sys/net/ipv4/ip_forward", - fail_ok=False) - if msg == "1": - LOG.warn( - "VM {} has ip_forward enabled already, skipping".format(vm_id)) - return - TisInitServiceScript.configure(ssh_client, **kwargs) - TisInitServiceScript.enable(ssh_client) - TisInitServiceScript.start(ssh_client) - - -def setup_avr_routing(vm_id, mtu=1500, vm_type='vswitch', **kwargs): - """ - Setup avr routing (vswitch L3) function for the specified VM - replciates the operation as in wrs_guest_setup.sh (and comes with - the same assumptions) - in order to persist kernel routing after reboots, the operation has to be - stored in /etc/init.d - see TisInitServiceScript for script details - no fail_ok option, since if failed, the vm's state is undefined - - Args: - vm_id (str): - the VM to be configured - mtu (int): - 1500 by default - for jumbo frames (9000), tenant net support is required - vm_type (str): - PCI NIC_DEVICE - vhost: "${PCI_VENDOR_VIRTIO}:${PCI_DEVICE_VIRTIO}: - ${PCI_SUBDEVICE_NET}" - any other: "${PCI_VENDOR_VIRTIO}:${PCI_DEVICE_MEMORY}: - ${PCI_SUBDEVICE_AVP}" (default) - kwargs (dict): - kwargs for TisInitServiceScript.configure - - """ - LOG.info( - "Setting up avr routing for VM {}, kwargs={}".format(vm_id, kwargs)) - datas = network_helper.get_tenant_ips_for_vms(vm_id) - data_dict = dict() - try: - internals = network_helper.get_internal_ips_for_vms(vm_id) - except ValueError: - internals = list() - internal_dict = dict() - for vm, info in get_vms_ports_info([vm_id]).items(): - for port, ip, cidr, mac, net_id in info: - if ip in datas: - data_dict[ip] = ipaddress.ip_network(cidr).netmask - elif ip in internals: - internal_dict[ip] = ipaddress.ip_network(cidr).netmask - - interfaces = list() - items = list(data_dict.items()) + list(internal_dict.items()) - - if len(items) > 2: - LOG.warn( - "wrs_guest_setup/tis_automation_init does not support more than " - "two DPDK NICs") - LOG.warn("stripping {} from interfaces".format(items[2:])) - items = items[:2] - - for (ip, netmask), ct in zip(items, range(len(items))): - interfaces.append( - """\"{},{},eth{},{}\"""".format(ip, netmask, ct, str(mtu))) - - nic_device = "" - if vm_type == 'vhost': - nic_device = "\"${PCI_VENDOR_VIRTIO}:${PCI_DEVICE_VIRTIO}:" \ - "${PCI_SUBDEVICE_NET}\"" - - scp_to_vm(vm_id, TisInitServiceScript.src(), TisInitServiceScript.dst()) - with ssh_to_vm_from_natbox(vm_id) as ssh_client: - TisInitServiceScript.configure( - ssh_client, NIC_DEVICE=nic_device, - NIC_COUNT=str(len(items)), FUNCTIONS="avr,", - ROUTES="""( - #ROUTING_STUB -)""", - ADDRESSES="""( - {} -) -""".format("\n ".join(interfaces)), **kwargs) - TisInitServiceScript.enable(ssh_client) - TisInitServiceScript.start(ssh_client) - - -def launch_vm_pair(vm_type='virtio', primary_kwargs=None, secondary_kwargs=None, - **launch_vms_kwargs): - """ - Launch a pair of routed VMs - one on the primary tenant, and the other on the secondary tenant - - Args: - vm_type (str): - one of 'virtio', 'avp', 'dpdk' - primary_kwargs (dict): - launch_vms_kwargs for the VM launched under the primary tenant - secondary_kwargs (dict): - launch_vms_kwargs for the VM launched under the secondary tenant - **launch_vms_kwargs: - additional keyword arguments for launch_vms for both tenants - overlapping keys will be overridden by primary_kwargs and - secondary_kwargs - shall not specify count, ping_vms, auth_info - - Returns (tuple): - (vm_id_on_primary_tenant, vm_id_on_secondary_tenant) - """ - LOG.info("Launch a {} test-observer pair of VMs".format(vm_type)) - for invalid_key in ('count', 'ping_vms'): - if invalid_key in launch_vms_kwargs: - launch_vms_kwargs.pop(invalid_key) - - primary_kwargs = dict() if not primary_kwargs else primary_kwargs - secondary_kwargs = dict() if not secondary_kwargs else secondary_kwargs - if 'auth_info' not in primary_kwargs: - primary_kwargs['auth_info'] = Tenant.get_primary() - if 'auth_info' not in secondary_kwargs: - secondary_kwargs['auth_info'] = Tenant.get_secondary() - - if 'nics' not in primary_kwargs or 'nics' not in secondary_kwargs: - if vm_type in ['pci-sriov', 'pci-passthrough']: - raise NotImplementedError( - "nics has to be provided for pci-sriov and pci-passthrough") - - if vm_type in ['vswitch', 'dpdk', 'vhost']: - vif_model = 'avp' - else: - vif_model = vm_type - - internal_net_id = network_helper.get_internal_net_id() - for tenant_info in (primary_kwargs, secondary_kwargs): - auth_info_ = tenant_info['auth_info'] - mgmt_net_id = network_helper.get_mgmt_net_id(auth_info=auth_info_) - tenant_net_id = network_helper.get_tenant_net_id( - auth_info=auth_info_) - nics = [{'net-id': mgmt_net_id}, - {'net-id': tenant_net_id, 'vif-model': vif_model}, - {'net-id': internal_net_id, 'vif-model': vif_model}] - tenant_info['nics'] = nics - - vm_test = launch_vms(vm_type=vm_type, count=1, ping_vms=True, - **__merge_dict(launch_vms_kwargs, primary_kwargs) - )[0][0] - vm_observer = launch_vms(vm_type=vm_type, count=1, ping_vms=True, - **__merge_dict(launch_vms_kwargs, - secondary_kwargs))[0][0] - - LOG.info("Route the {} test-observer VM pair".format(vm_type)) - if vm_type in ('dpdk', 'vhost', 'vswitch'): - setup_avr_routing(vm_test, vm_type=vm_type) - setup_avr_routing(vm_observer, vm_type=vm_type) - else: - # vm_type in ('virtio', 'avp'): - setup_kernel_routing(vm_test) - setup_kernel_routing(vm_observer) - - route_vm_pair(vm_test, vm_observer) - - return vm_test, vm_observer - - -def get_all_vms(field='ID', con_ssh=None, auth_info=Tenant.get('admin')): - """ - Get VMs for all tenants in the systems - - Args: - field: - con_ssh: - auth_info - - Returns (list): list of all vms on the system - - """ - return get_vms(field=field, all_projects=True, long=False, con_ssh=con_ssh, - auth_info=auth_info) - - -def get_vms_info(fields, vms=None, con_ssh=None, long=True, all_projects=True, - host=None, - auth_info=Tenant.get('admin')): - """ - Get vms values for given fields - Args: - fields (str|list|tuple): - vms: - con_ssh: - long: - all_projects: - host - auth_info: - - Returns (dict): vm as key, values for given fields as value - Examples: - input: fields = [field1, field2] - output: {vm_1: [vm1_field1_value, vm1_field2_value], - vm_2: [vm2_field1_value, vm2_field2_value]} - - """ - if isinstance(fields, str): - fields = (fields,) - fields = ['ID'] + list(fields) - - values = get_vms(vms=vms, field=fields, con_ssh=con_ssh, long=long, - all_projects=all_projects, host=host, - auth_info=auth_info) - vm_ids = values.pop(0) - values = list(zip(*values)) - results = {vm_ids[i]: values[i] for i in range(len(vm_ids))} - - return results - - -def get_vms(vms=None, field='ID', long=False, all_projects=True, host=None, - project=None, project_domain=None, - strict=True, regex=False, con_ssh=None, auth_info=None, **kwargs): - """ - get a list of VM IDs or Names for given tenant in auth_info param. - - Args: - vms (list): filter vms from this list if not None - field (str|tuple|list): 'ID' or 'Name' - con_ssh (SSHClient): controller SSHClient. - auth_info (dict): such as ones in auth.py: auth.ADMIN, auth.TENANT1 - long (bool): whether to use --long in cmd - project (str) - project_domain (str) - all_projects (bool): whether to use --a in cmd - host (str): value for --host arg in cmd - strict (bool): applies to search for value(s) specified in kwargs - regex (bool): whether to use regular expression to search for the - kwargs value(s) - **kwargs: header/value pair to filter out the vms - - Returns (list): list of VMs for tenant(s). - - """ - args_dict = {'--long': long, - '--a': all_projects if auth_info and auth_info[ - 'user'] == 'admin' else None, - '--host': host, - '--project': project, - '--project-domain': project_domain} - args = common.parse_args(args_dict) - table_ = table_parser.table( - cli.openstack('server list', args, ssh_client=con_ssh, - auth_info=auth_info)[1]) - if vms: - table_ = table_parser.filter_table(table_, ID=vms) - - return table_parser.get_multi_values(table_, field, strict=strict, - regex=regex, **kwargs) - - -def get_vm_status(vm_id, con_ssh=None, auth_info=Tenant.get('admin')): - return get_vm_values(vm_id, 'status', strict=True, con_ssh=con_ssh, - auth_info=auth_info)[0] - - -def get_vm_id_from_name(vm_name, con_ssh=None, strict=True, regex=False, - fail_ok=False, auth_info=Tenant.get('admin')): - if not auth_info: - auth_info = Tenant.get_primary() - vm_ids = get_vms(name=vm_name, strict=strict, regex=regex, con_ssh=con_ssh, - auth_info=auth_info) - if not vm_ids: - err_msg = "No vm found with name: {}".format(vm_name) - LOG.info(err_msg) - if fail_ok: - return '' - raise exceptions.VMError(err_msg) - - return vm_ids[0] - - -def get_vm_name_from_id(vm_id, con_ssh=None, auth_info=None): - return get_vm_values(vm_id, fields='name', con_ssh=con_ssh, - auth_info=auth_info)[0] - - -def get_vm_volumes(vm_id, con_ssh=None, auth_info=None): - """ - Get volume ids attached to given vm. - - Args: - vm_id (str): - con_ssh (SSHClient): - auth_info (dict): - - Returns (tuple): list of volume ids attached to specific vm - - """ - table_ = table_parser.table( - cli.openstack('server show', vm_id, ssh_client=con_ssh, - auth_info=auth_info)[1]) - return _get_vm_volumes(table_) - - -def get_vm_values(vm_id, fields, strict=True, con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Get vm values via openstack server show - Args: - vm_id (str): - fields (str|list|tuple): fields in openstack server show table - strict (bool): whether to perform a strict search on given field name - con_ssh (SSHClient): - auth_info (dict|None): - - Returns (list): values for given fields - - """ - if isinstance(fields, str): - fields = [fields] - - table_ = table_parser.table( - cli.openstack('server show', vm_id, ssh_client=con_ssh, - auth_info=auth_info)[1]) - - values = [] - for field in fields: - merge = False - if field in ('fault',): - merge = True - value = table_parser.get_value_two_col_table(table_, field, strict, - merge_lines=merge) - if field in ('properties',): - value = table_parser.convert_value_to_dict(value) - elif field in ('security_groups',): - if isinstance(value, str): - value = [value] - value = [re.findall("name='(.*)'", v)[0] for v in value] - values.append(value) - return values - - -def get_vm_fault_message(vm_id, con_ssh=None, auth_info=None): - return get_vm_values(vm_id=vm_id, fields='fault', con_ssh=con_ssh, - auth_info=auth_info)[0] - - -def get_vm_flavor(vm_id, field='id', con_ssh=None, auth_info=None): - """ - Get flavor id of given vm - - Args: - vm_id (str): - field (str): id or name - con_ssh (SSHClient): - auth_info (dict): - - Returns (str): - - """ - flavor = get_vm_values(vm_id, fields='flavor', strict=True, con_ssh=con_ssh, - auth_info=auth_info)[0] - flavor_name, flavor_id = flavor.split('(') - if field == 'id': - flavor = flavor_id.strip().split(')')[0] - else: - flavor = flavor_name.strip() - return flavor - - -def get_vm_host(vm_id, con_ssh=None, auth_info=Tenant.get('admin')): - """ - Get host of given vm via openstack server show - Args: - vm_id: - con_ssh: - auth_info - - Returns (str): - - """ - return get_vm_values(vm_id, ':host', strict=False, con_ssh=con_ssh, - auth_info=auth_info)[0] - - -def get_vms_hosts(vm_ids, con_ssh=None, auth_info=Tenant.get('admin')): - """ - Get vms' hosts via openstack server list - Args: - vm_ids: - con_ssh: - auth_info - - Returns: - - """ - vms_hosts = get_vms_info(vms=vm_ids, fields='host', auth_info=auth_info, - con_ssh=con_ssh) - vms_hosts = [vms_hosts[vm][0] for vm in vm_ids] - - return vms_hosts - - -def get_vms_on_host(hostname, field='ID', con_ssh=None, - auth_info=Tenant.get('admin')): - """ - Get vms on given host - Args: - field: ID or Name - hostname (str):Name of a compute node - con_ssh: - auth_info - - Returns (list): A list of VMs' ID under a hypervisor - - """ - vms = get_vms(host=hostname, all_projects=True, long=False, con_ssh=con_ssh, - auth_info=auth_info, field=field) - return vms - - -def get_vms_per_host(vms=None, con_ssh=None, auth_info=Tenant.get('admin')): - """ - Get vms per host - Args: - vms - con_ssh (SSHClient): - auth_info (dict) - - Returns (dict):return a dictionary where the host(hypervisor) is the key - and value are a list of VMs under the host - - """ - vms_hosts = get_vms_info(vms=vms, fields='host', auth_info=auth_info, - con_ssh=con_ssh, long=True, all_projects=True) - vms_per_host = {} - for vm in vms_hosts: - host = vms_hosts[vm][0] - if host in vms_per_host: - vms_per_host[host].append(vm) - else: - vms_per_host[host] = [vm] - - return vms_per_host - - -def _get_boot_info(table_, vm_id, auth_info=None, con_ssh=None): - image = table_parser.get_value_two_col_table(table_, 'image') - if not image: - volumes = _get_vm_volumes(table_) - if len(volumes) == 0: - raise exceptions.VMError( - "Booted from volume, but no volume id found.") - - from keywords import cinder_helper - if len(volumes) == 1: - vol_id = volumes[0] - vol_name, image_info = cinder_helper.get_volume_show_values( - vol_id, auth_info=auth_info, con_ssh=con_ssh, - fields=('name', 'volume_image_metadata')) - LOG.info("VM booted from volume.") - return {'type': 'volume', 'id': vol_id, 'volume_name': vol_name, - 'image_name': image_info['image_name']} - else: - LOG.info( - "VM booted from volume. Multiple volumes found, taking the " - "first boot-able volume.") - for volume in volumes: - bootable, vol_name, image_info = \ - cinder_helper.get_volume_show_values( - volume, - fields=('bootable', 'name', 'volume_image_metadata'), - auth_info=auth_info, con_ssh=con_ssh) - if str(bootable).lower() == 'true': - return {'type': 'volume', 'id': volume, - 'volume_name': vol_name, - 'image_name': image_info['image_name']} - - raise exceptions.VMError( - "VM {} has no bootable volume attached.".format(vm_id)) - - else: - name, img_uuid = image.strip().split(sep='(') - return {'type': 'image', 'id': img_uuid.split(sep=')')[0], - 'image_name': name.strip()} - - -def get_vm_boot_info(vm_id, auth_info=None, con_ssh=None): - """ - Get vm boot source and id. - - Args: - vm_id (str): - auth_info (dict|None): - con_ssh (SSHClient): - - Returns (dict): VM boot info dict. - Format: {'type': , 'id': }. - is either 'volume' or 'image' - - """ - table_ = table_parser.table( - cli.openstack('server show', vm_id, ssh_client=con_ssh, - auth_info=auth_info)[1]) - return _get_boot_info(table_, vm_id=vm_id, auth_info=auth_info, - con_ssh=con_ssh) - - -def get_vm_image_name(vm_id, auth_info=None, con_ssh=None): - """ - - Args: - vm_id (str): - auth_info (dict): - con_ssh (SSHClient): - - Returns (str): image name for the vm. If vm booted from volume, - then image name in volume image metadata will be returned. - - """ - boot_info = get_vm_boot_info(vm_id, auth_info=auth_info, con_ssh=con_ssh) - - return boot_info['image_name'] - - -def _get_vm_volumes(table_): - """ - Args: - table_ (dict): - - Returns (list: A list of volume ids from the novashow_table. - - """ - volumes = table_parser.get_value_two_col_table(table_, 'volumes_attached', - merge_lines=False) - if not volumes: - return [] - - if isinstance(volumes, str): - volumes = [volumes] - - return [re.findall("id='(.*)'", volume)[0] for volume in volumes] - - -def get_vm_instance_name(vm_id, con_ssh=None): - return get_vm_values(vm_id, ":instance_name", strict=False, - con_ssh=con_ssh)[0] diff --git a/automated-pytest-suite/pytest.ini b/automated-pytest-suite/pytest.ini deleted file mode 100644 index 627deb4c..00000000 --- a/automated-pytest-suite/pytest.ini +++ /dev/null @@ -1,23 +0,0 @@ -[pytest] -addopts = -s -rxs -v -testpaths = testcases/functional -log_print = False -markers = - sanity: mark test for sanity run - robotsanity: temporary mark for the tests from robotframework - robotperformance: temporary mark for the tests from robotframework - cpe_sanity: mark tests for cpe sanity - storage_sanity: mark tests for storage sanity - sx_sanity: mark tests for simplex sanity - nightly: nightly regression - sx_nightly: mark tests for simplex nightly regression - platform: mark tests for container platform tests that don't require openstack services - p1: mark test priority as p1 - p2: mark test priority as p2 - p3: mark test priority as p3 - domain_sanity: mark test priority as domain sanity - nics: networking testcases for nic testing - dc: distributed cloud test cases - # features(feature1, feature2, ...): mark impacted feature(s) for a test case. - slow: slow test that possibly involves reboot or lock/unlock host(s) - abslast: test case that absolutely should be run the last diff --git a/automated-pytest-suite/requirements.txt b/automated-pytest-suite/requirements.txt deleted file mode 100644 index 0b1858a7..00000000 --- a/automated-pytest-suite/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -pytest==3.6.1 -pexpect -requests -selenium -pyvirtualdisplay -PyYAML==5.4.1 \ No newline at end of file diff --git a/automated-pytest-suite/setups.py b/automated-pytest-suite/setups.py deleted file mode 100644 index b60e8a78..00000000 --- a/automated-pytest-suite/setups.py +++ /dev/null @@ -1,809 +0,0 @@ -# -# Copyright (c) 2019, 2020 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import os -import re -import time -import ipaddress -import configparser - -from consts.auth import Tenant, HostLinuxUser, CliAuth, Guest -from consts.stx import Prompt, SUBCLOUD_PATTERN, SysType, GuestImages, Networks -from consts.lab import Labs, add_lab_entry, NatBoxes -from consts.proj_vars import ProjVar -from keywords import host_helper, nova_helper, system_helper, keystone_helper, \ - common, container_helper, dc_helper -from utils import exceptions -from utils.clients.ssh import SSHClient, CONTROLLER_PROMPT, ControllerClient, \ - NATBoxClient, PASSWORD_PROMPT -from utils.tis_log import LOG - - -def less_than_two_controllers(con_ssh=None, - auth_info=Tenant.get('admin_platform')): - return len( - system_helper.get_controllers(con_ssh=con_ssh, auth_info=auth_info)) < 2 - - -def setup_tis_ssh(lab): - con_ssh = ControllerClient.get_active_controller(fail_ok=True) - - if con_ssh is None: - con_ssh = SSHClient(lab['floating ip'], HostLinuxUser.get_user(), - HostLinuxUser.get_password(), - CONTROLLER_PROMPT) - con_ssh.connect(retry=True, retry_timeout=30) - ControllerClient.set_active_controller(con_ssh) - - return con_ssh - - -def setup_vbox_tis_ssh(lab): - if 'external_ip' in lab.keys(): - - con_ssh = ControllerClient.get_active_controller(fail_ok=True) - if con_ssh: - con_ssh.disconnect() - - con_ssh = SSHClient(lab['external_ip'], HostLinuxUser.get_user(), - HostLinuxUser.get_password(), - CONTROLLER_PROMPT, port=lab['external_port']) - con_ssh.connect(retry=True, retry_timeout=30) - ControllerClient.set_active_controller(con_ssh) - - else: - con_ssh = setup_tis_ssh(lab) - - return con_ssh - - -def setup_primary_tenant(tenant): - Tenant.set_primary(tenant) - LOG.info("Primary Tenant for test session is set to {}".format( - Tenant.get(tenant)['tenant'])) - - -def setup_natbox_ssh(natbox, con_ssh): - natbox_ip = natbox['ip'] if natbox else None - if not natbox_ip and not container_helper.is_stx_openstack_deployed( - con_ssh=con_ssh): - LOG.info( - "stx-openstack is not applied and natbox is unspecified. Skip " - "natbox config.") - return None - - NATBoxClient.set_natbox_client(natbox_ip) - nat_ssh = NATBoxClient.get_natbox_client() - ProjVar.set_var(natbox_ssh=nat_ssh) - - setup_keypair(con_ssh=con_ssh, natbox_client=nat_ssh) - - return nat_ssh - - -def setup_keypair(con_ssh, natbox_client=None): - """ - copy private keyfile from controller-0:/opt/platform to natbox: priv_keys/ - Args: - natbox_client (SSHClient): NATBox client - con_ssh (SSHClient) - """ - """ - copy private keyfile from controller-0:/opt/platform to natbox: priv_keys/ - Args: - natbox_client (SSHClient): NATBox client - con_ssh (SSHClient) - """ - if not container_helper.is_stx_openstack_deployed(con_ssh=con_ssh): - LOG.info("stx-openstack is not applied. Skip nova keypair config.") - return - - # ssh private key should now exist under keyfile_path - if not natbox_client: - natbox_client = NATBoxClient.get_natbox_client() - - LOG.info("scp key file from controller to NATBox") - # keyfile path that can be specified in testcase config - keyfile_stx_origin = os.path.normpath(ProjVar.get_var('STX_KEYFILE_PATH')) - - # keyfile will always be copied to sysadmin home dir first and update file - # permission - keyfile_stx_final = os.path.normpath( - ProjVar.get_var('STX_KEYFILE_SYS_HOME')) - public_key_stx = '{}.pub'.format(keyfile_stx_final) - - # keyfile will also be saved to /opt/platform as well, so it won't be - # lost during system upgrade. - keyfile_opt_pform = '/opt/platform/{}'.format( - os.path.basename(keyfile_stx_final)) - - # copy keyfile to following NatBox location. This can be specified in - # testcase config - keyfile_path_natbox = os.path.normpath( - ProjVar.get_var('NATBOX_KEYFILE_PATH')) - - auth_info = Tenant.get_primary() - keypair_name = auth_info.get('nova_keypair', - 'keypair-{}'.format(auth_info['user'])) - nova_keypair = nova_helper.get_keypairs(name=keypair_name, - auth_info=auth_info) - - linux_user = HostLinuxUser.get_user() - nonroot_group = _get_nonroot_group(con_ssh=con_ssh, user=linux_user) - if not con_ssh.file_exists(keyfile_stx_final): - with host_helper.ssh_to_host('controller-0', - con_ssh=con_ssh) as con_0_ssh: - if not con_0_ssh.file_exists(keyfile_opt_pform): - if con_0_ssh.file_exists(keyfile_stx_origin): - # Given private key file exists. Need to ensure public - # key exists in same dir. - if not con_0_ssh.file_exists('{}.pub'.format( - keyfile_stx_origin)) and not nova_keypair: - raise FileNotFoundError( - '{}.pub is not found'.format(keyfile_stx_origin)) - else: - # Need to generate ssh key - if nova_keypair: - raise FileNotFoundError( - "Cannot find private key for existing nova " - "keypair {}".format(nova_keypair)) - - con_0_ssh.exec_cmd("ssh-keygen -f '{}' -t rsa -N ''".format( - keyfile_stx_origin), fail_ok=False) - if not con_0_ssh.file_exists(keyfile_stx_origin): - raise FileNotFoundError( - "{} not found after ssh-keygen".format( - keyfile_stx_origin)) - - # keyfile_stx_origin and matching public key should now exist - # on controller-0 - # copy keyfiles to home dir and opt platform dir - con_0_ssh.exec_cmd( - 'cp {} {}'.format(keyfile_stx_origin, keyfile_stx_final), - fail_ok=False) - con_0_ssh.exec_cmd( - 'cp {}.pub {}'.format(keyfile_stx_origin, public_key_stx), - fail_ok=False) - con_0_ssh.exec_sudo_cmd( - 'cp {} {}'.format(keyfile_stx_final, keyfile_opt_pform), - fail_ok=False) - - # Make sure owner is sysadmin - # If private key exists in opt platform, then it must also exist - # in home dir - con_0_ssh.exec_sudo_cmd( - 'chown {}:{} {}'.format(linux_user, nonroot_group, - keyfile_stx_final), - fail_ok=False) - - # ssh private key should now exists under home dir and opt platform - # on controller-0 - if con_ssh.get_hostname() != 'controller-0': - # copy file from controller-0 home dir to controller-1 - con_ssh.scp_on_dest(source_user=HostLinuxUser.get_user(), - source_ip='controller-0', - source_path=keyfile_stx_final, - source_pswd=HostLinuxUser.get_password(), - dest_path=keyfile_stx_final, timeout=60) - - if not nova_keypair: - LOG.info("Create nova keypair {} using public key {}". - format(nova_keypair, public_key_stx)) - if not con_ssh.file_exists(public_key_stx): - con_ssh.scp_on_dest(source_user=HostLinuxUser.get_user(), - source_ip='controller-0', - source_path=public_key_stx, - source_pswd=HostLinuxUser.get_password(), - dest_path=public_key_stx, timeout=60) - con_ssh.exec_sudo_cmd('chown {}:{} {}'.format( - linux_user, nonroot_group, public_key_stx), - fail_ok=False) - - if ProjVar.get_var('REMOTE_CLI'): - dest_path = os.path.join(ProjVar.get_var('TEMP_DIR'), - os.path.basename(public_key_stx)) - common.scp_from_active_controller_to_localhost( - source_path=public_key_stx, dest_path=dest_path, timeout=60) - public_key_stx = dest_path - LOG.info("Public key file copied to localhost: {}".format( - public_key_stx)) - - nova_helper.create_keypair(keypair_name, public_key=public_key_stx, - auth_info=auth_info) - - natbox_client.exec_cmd( - 'mkdir -p {}'.format(os.path.dirname(keyfile_path_natbox))) - tis_ip = ProjVar.get_var('LAB').get('floating ip') - for i in range(10): - try: - natbox_client.scp_on_dest(source_ip=tis_ip, - source_user=HostLinuxUser.get_user(), - source_pswd=HostLinuxUser.get_password(), - source_path=keyfile_stx_final, - dest_path=keyfile_path_natbox, - timeout=120) - LOG.info("private key is copied to NatBox: {}".format( - keyfile_path_natbox)) - break - except exceptions.SSHException as e: - if i == 9: - raise - - LOG.info(e.__str__()) - time.sleep(10) - - -def _get_nonroot_group(con_ssh, user=None): - if not user: - user = HostLinuxUser.get_user() - groups = con_ssh.exec_cmd('groups {}'.format(user), fail_ok=False)[1] - err = 'Please ensure linux_user {} belongs to both root and non_root ' \ - 'groups'.format(user) - if 'root' not in groups: - raise ValueError(err) - - groups = groups.split(': ')[-1].split() - for group in groups: - if group.strip() != 'root': - return group - - raise ValueError('Please ensure linux_user {} belongs to both root ' - 'and at least one non-root groups'.format(user)) - - -def get_lab_dict(labname): - labname = labname.strip().lower().replace('-', '_') - labs = get_labs_list() - - for lab in labs: - if labname in lab.get('name').replace('-', '_').lower().strip() \ - or labname == lab.get('short_name').replace('-', '_').\ - lower().strip() or labname == lab.get('floating ip'): - return lab - else: - return add_lab_entry(labname) - - -def get_labs_list(): - labs = [getattr(Labs, item) for item in dir(Labs) if - not item.startswith('__')] - labs = [lab_ for lab_ in labs if isinstance(lab_, dict)] - return labs - - -def get_natbox_dict(natboxname, user=None, password=None, prompt=None): - natboxname = natboxname.lower().strip() - natboxes = [getattr(NatBoxes, item) for item in dir(NatBoxes) if - item.startswith('NAT_')] - - for natbox in natboxes: - if natboxname.replace('-', '_') in natbox.get('name').\ - replace('-', '_') or natboxname == natbox.get('ip'): - return natbox - else: - if __get_ip_version(natboxname) == 6: - raise ValueError('Only IPv4 address is supported for now') - - return NatBoxes.add_natbox(ip=natboxname, user=user, - password=password, prompt=prompt) - - -def get_tenant_dict(tenantname): - # tenantname = tenantname.lower().strip().replace('_', '').replace('-', '') - tenants = [getattr(Tenant, item) for item in dir(Tenant) if - not item.startswith('_') and item.isupper()] - - for tenant in tenants: - if tenantname == tenant.get('tenant').replace('_', '').replace('-', ''): - return tenant - else: - raise ValueError("{} is not a valid input".format(tenantname)) - - -def collect_tis_logs(con_ssh): - common.collect_software_logs(con_ssh=con_ssh) - - -def get_tis_timestamp(con_ssh): - return con_ssh.exec_cmd('date +"%T"')[1] - - -def set_build_info(con_ssh): - system_helper.get_build_info(con_ssh=con_ssh) - - -def _rsync_files_to_con1(con_ssh=None, central_region=False, - file_to_check=None): - region = 'RegionOne' if central_region else None - auth_info = Tenant.get('admin_platform', dc_region=region) - if less_than_two_controllers(auth_info=auth_info, con_ssh=con_ssh): - LOG.info("Less than two controllers on system. Skip copying file to " - "controller-1.") - return - - LOG.info("rsync test files from controller-0 to controller-1 if not " - "already done") - stx_home = HostLinuxUser.get_home() - if not file_to_check: - file_to_check = '{}/images/tis-centos-guest.img'.format(stx_home) - try: - with host_helper.ssh_to_host("controller-1", - con_ssh=con_ssh) as con_1_ssh: - if con_1_ssh.file_exists(file_to_check): - LOG.info( - "Test files already exist on controller-1. Skip rsync.") - return - - except Exception as e: - LOG.error( - "Cannot ssh to controller-1. Skip rsync. " - "\nException caught: {}".format(e.__str__())) - return - - cmd = "rsync -avr -e 'ssh -o UserKnownHostsFile=/dev/null -o " \ - "StrictHostKeyChecking=no ' " \ - "{}/* controller-1:{}".format(stx_home, stx_home) - - timeout = 1800 - with host_helper.ssh_to_host("controller-0", con_ssh=con_ssh) as con_0_ssh: - LOG.info("rsync files from controller-0 to controller-1...") - con_0_ssh.send(cmd) - - end_time = time.time() + timeout - while time.time() < end_time: - index = con_0_ssh.expect( - [con_0_ssh.prompt, PASSWORD_PROMPT, Prompt.ADD_HOST], - timeout=timeout, - searchwindowsize=100) - if index == 2: - con_0_ssh.send('yes') - - if index == 1: - con_0_ssh.send(HostLinuxUser.get_password()) - - if index == 0: - output = int(con_0_ssh.exec_cmd('echo $?')[1]) - if output in [0, 23]: - LOG.info( - "Test files are successfully copied to controller-1 " - "from controller-0") - break - else: - raise exceptions.SSHExecCommandFailed( - "Failed to rsync files from controller-0 to " - "controller-1") - - else: - raise exceptions.TimeoutException( - "Timed out rsync files to controller-1") - - -def copy_test_files(): - con_ssh = None - central_region = False - if ProjVar.get_var('IS_DC'): - _rsync_files_to_con1( - con_ssh=ControllerClient.get_active_controller( - name=ProjVar.get_var('PRIMARY_SUBCLOUD')), - file_to_check='~/heat/README', - central_region=central_region) - con_ssh = ControllerClient.get_active_controller(name='RegionOne') - central_region = True - - _rsync_files_to_con1(con_ssh=con_ssh, central_region=central_region) - - -def get_auth_via_openrc(con_ssh, use_telnet=False, con_telnet=None): - valid_keys = ['OS_AUTH_URL', - 'OS_ENDPOINT_TYPE', - 'CINDER_ENDPOINT_TYPE', - 'OS_USER_DOMAIN_NAME', - 'OS_PROJECT_DOMAIN_NAME', - 'OS_IDENTITY_API_VERSION', - 'OS_REGION_NAME', - 'OS_INTERFACE', - 'OS_KEYSTONE_REGION_NAME'] - - client = con_telnet if use_telnet and con_telnet else con_ssh - code, output = client.exec_cmd('cat /etc/platform/openrc') - if code != 0: - return None - - lines = output.splitlines() - auth_dict = {} - for line in lines: - if 'export' in line: - if line.split('export ')[1].split(sep='=')[0] in valid_keys: - key, value = line.split(sep='export ')[1].split(sep='=') - auth_dict[key.strip().upper()] = value.strip() - - return auth_dict - - -def is_https(con_ssh): - return keystone_helper.is_https_enabled(con_ssh=con_ssh, source_openrc=True, - auth_info=Tenant.get( - 'admin_platform')) - - -def get_version_and_patch_info(): - version = ProjVar.get_var('SW_VERSION')[0] - info = 'Software Version: {}\n'.format(version) - - patches = ProjVar.get_var('PATCH') - if patches: - info += 'Patches:\n{}\n'.format('\n'.join(patches)) - - # LOG.info("SW Version and Patch info: {}".format(info)) - return info - - -def get_system_mode_from_lab_info(lab, multi_region_lab=False, - dist_cloud_lab=False): - """ - - Args: - lab: - multi_region_lab: - dist_cloud_lab: - - Returns: - - """ - - if multi_region_lab: - return SysType.MULTI_REGION - elif dist_cloud_lab: - return SysType.DISTRIBUTED_CLOUD - - elif 'system_mode' not in lab: - if 'storage_nodes' in lab: - return SysType.STORAGE - elif 'compute_nodes' in lab: - return SysType.REGULAR - - elif len(lab['controller_nodes']) > 1: - return SysType.AIO_DX - else: - return SysType.AIO_SX - - elif 'system_mode' in lab: - if "simplex" in lab['system_mode']: - return SysType.AIO_SX - else: - return SysType.AIO_DX - else: - LOG.warning( - "Can not determine the lab to install system type based on " - "provided information. Lab info: {}" - .format(lab)) - return None - - -def add_ping_failure(test_name): - file_path = '{}{}'.format(ProjVar.get_var('PING_FAILURE_DIR'), - 'ping_failures.txt') - with open(file_path, mode='a', encoding='utf8') as f: - f.write(test_name + '\n') - - -def set_region(region=None): - """ - set global variable region. - This needs to be called after CliAuth.set_vars, since the custom region - value needs to override what is - specified in openrc file. - - local region and auth url is saved in CliAuth, while the remote region - and auth url is saved in Tenant. - - Args: - region: region to set - - """ - local_region = CliAuth.get_var('OS_REGION_NAME') - if not region: - if ProjVar.get_var('IS_DC'): - region = 'SystemController' - else: - region = local_region - Tenant.set_region(region=region) - ProjVar.set_var(REGION=region) - if re.search(SUBCLOUD_PATTERN, region): - # Distributed cloud, lab specified is a subcloud. - urls = keystone_helper.get_endpoints(region=region, field='URL', - interface='internal', - service_name='keystone') - if not urls: - raise ValueError( - "No internal endpoint found for region {}. Invalid value for " - "--region with specified lab." - "sub-cloud tests can be run on controller, but not the other " - "way round".format( - region)) - Tenant.set_platform_url(urls[0]) - - -def set_dc_vars(): - if not ProjVar.get_var('IS_DC') or ControllerClient.get_active_controller( - name='RegionOne', fail_ok=True): - return - - central_con_ssh = ControllerClient.get_active_controller() - ControllerClient.set_active_controller(central_con_ssh, name='RegionOne') - primary_subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD') - sub_clouds = dc_helper.get_subclouds(avail='online', mgmt='managed', - con_ssh=central_con_ssh) - LOG.info("Online subclouds: {}".format(sub_clouds)) - - lab = ProjVar.get_var('LAB') - primary_ssh = None - for subcloud in sub_clouds: - subcloud_lab = lab.get(subcloud, None) - if not subcloud_lab: - raise ValueError('Please add {} to {} in consts/lab.py'.format( - subcloud, lab['short_name'])) - - LOG.info("Create ssh connection to {}, and add to ControllerClient". - format(subcloud)) - # subcloud_ssh = SSHClient(subcloud_lab['floating ip'], - # HostLinuxUser.get_user(), - # HostLinuxUser.get_password(), - # CONTROLLER_PROMPT) - - subcloud_ssh = common.ssh_to_stx(lab=subcloud_lab) - - try: - subcloud_ssh.connect(retry=True, retry_timeout=30) - ControllerClient.set_active_controller(subcloud_ssh, name=subcloud) - except exceptions.SSHException as e: - if subcloud == primary_subcloud: - raise - LOG.warning('Cannot connect to {} via its floating ip. {}'. - format(subcloud, e.__str__())) - continue - - LOG.info("Add {} to DC_MAP".format(subcloud)) - subcloud_auth = get_auth_via_openrc(subcloud_ssh) - auth_url = subcloud_auth['OS_AUTH_URL'] - region = subcloud_auth['OS_REGION_NAME'] - Tenant.add_dc_region(region_info={subcloud: {'auth_url': auth_url, - 'region': region}}) - - if subcloud == primary_subcloud: - primary_ssh = subcloud_ssh - LOG.info("Set default cli auth to use {}".format(subcloud)) - Tenant.set_region(region=region) - Tenant.set_platform_url(url=auth_url) - - LOG.info("Set default controller ssh to {} in ControllerClient". - format(primary_subcloud)) - ControllerClient.set_default_ssh(primary_subcloud) - return primary_ssh - - -def set_sys_type(con_ssh): - primary_ssh = set_dc_vars() - sys_type = system_helper.get_sys_type(con_ssh=primary_ssh if primary_ssh else con_ssh) - ProjVar.set_var(SYS_TYPE=sys_type) - - -def arp_for_fip(lab, con_ssh): - fip = lab['floating ip'] - code, output = con_ssh.exec_cmd( - 'ip addr | grep -B 4 {} | grep --color=never BROADCAST'.format(fip)) - if output: - target_str = output.splitlines()[-1] - dev = target_str.split(sep=': ')[1].split('@')[0] - con_ssh.exec_cmd('arping -c 3 -A -q -I {} {}'.format(dev, fip)) - - -def __get_ip_version(ip_addr): - try: - ip_version = ipaddress.ip_address(ip_addr).version - except ValueError: - ip_version = None - - return ip_version - - -def setup_testcase_config(testcase_config, lab=None, natbox=None): - fip_error = 'A valid IPv4 OAM floating IP has to be specified via ' \ - 'cmdline option --lab=, ' \ - 'or testcase config file has to be provided via ' \ - '--testcase-config with oam_floating_ip ' \ - 'specified under auth_platform section.' - if not testcase_config: - if not lab: - raise ValueError(fip_error) - return lab, natbox - - testcase_config = os.path.expanduser(testcase_config) - auth_section = 'auth' - guest_image_section = 'guest_image' - guest_networks_section = 'guest_networks' - guest_keypair_section = 'guest_keypair' - natbox_section = 'natbox' - - config = configparser.ConfigParser() - config.read(testcase_config) - - # - # Update global variables for auth section - # - # Update OAM floating IP - if lab: - fip = lab.get('floating ip') - config.set(auth_section, 'oam_floating_ip', fip) - else: - fip = config.get(auth_section, 'oam_floating_ip', fallback='').strip() - lab = get_lab_dict(fip) - - if __get_ip_version(fip) != 4: - raise ValueError(fip_error) - - # controller-0 oam ip is updated with best effort if a valid IPv4 IP is - # provided - if not lab.get('controller-0 ip') and config.get(auth_section, - 'controller0_oam_ip', - fallback='').strip(): - con0_ip = config.get(auth_section, 'controller0_oam_ip').strip() - if __get_ip_version(con0_ip) == 4: - lab['controller-0 ip'] = con0_ip - else: - LOG.info( - "controller0_oam_ip specified in testcase config file is not " - "a valid IPv4 address. Ignore.") - - # Update linux user credentials - if config.get(auth_section, 'linux_username', fallback='').strip(): - HostLinuxUser.set_user( - config.get(auth_section, 'linux_username').strip()) - if config.get(auth_section, 'linux_user_password', fallback='').strip(): - HostLinuxUser.set_password( - config.get(auth_section, 'linux_user_password').strip()) - - # Update openstack keystone user credentials - auth_dict_map = { - 'platform_admin': 'admin_platform', - 'admin': 'admin', - 'test1': 'tenant1', - 'test2': 'tenant2', - } - for conf_prefix, dict_name in auth_dict_map.items(): - kwargs = {} - default_auth = Tenant.get(dict_name) - conf_user = config.get(auth_section, '{}_username'.format(conf_prefix), - fallback='').strip() - conf_password = config.get(auth_section, - '{}_password'.format(conf_prefix), - fallback='').strip() - conf_project = config.get(auth_section, - '{}_project_name'.format(conf_prefix), - fallback='').strip() - conf_domain = config.get(auth_section, - '{}_domain_name'.format(conf_prefix), - fallback='').strip() - conf_keypair = config.get(auth_section, - '{}_nova_keypair'.format(conf_prefix), - fallback='').strip() - if conf_user and conf_user != default_auth.get('user'): - kwargs['username'] = conf_user - if conf_password and conf_password != default_auth.get('password'): - kwargs['password'] = conf_password - if conf_project and conf_project != default_auth.get('tenant'): - kwargs['tenant'] = conf_project - if conf_domain and conf_domain != default_auth.get('domain'): - kwargs['domain'] = conf_domain - if conf_keypair and conf_keypair != default_auth.get('nova_keypair'): - kwargs['nova_keypair'] = conf_keypair - - if kwargs: - Tenant.update(dict_name, **kwargs) - - # - # Update global variables for natbox section - # - natbox_host = config.get(natbox_section, 'natbox_host', fallback='').strip() - natbox_user = config.get(natbox_section, 'natbox_user', fallback='').strip() - natbox_password = config.get(natbox_section, 'natbox_password', - fallback='').strip() - natbox_prompt = config.get(natbox_section, 'natbox_prompt', - fallback='').strip() - if natbox_host and (not natbox or natbox_host != natbox['ip']): - natbox = get_natbox_dict(natbox_host, user=natbox_user, - password=natbox_password, prompt=natbox_prompt) - # - # Update global variables for guest_image section - # - img_file_dir = config.get(guest_image_section, 'img_file_dir', - fallback='').strip() - glance_image_name = config.get(guest_image_section, 'glance_image_name', - fallback='').strip() - img_file_name = config.get(guest_image_section, 'img_file_name', - fallback='').strip() - img_disk_format = config.get(guest_image_section, 'img_disk_format', - fallback='').strip() - min_disk_size = config.get(guest_image_section, 'min_disk_size', - fallback='').strip() - img_container_format = config.get(guest_image_section, - 'img_container_format', - fallback='').strip() - image_ssh_user = config.get(guest_image_section, 'image_ssh_user', - fallback='').strip() - image_ssh_password = config.get(guest_image_section, 'image_ssh_password', - fallback='').strip() - - if img_file_dir and img_file_dir != GuestImages.DEFAULT['image_dir']: - # Update default image file directory - img_file_dir = os.path.expanduser(img_file_dir) - if not os.path.isabs(img_file_dir): - raise ValueError( - "Please provide a valid absolute path for img_file_dir " - "under guest_image section in testcase config file") - GuestImages.DEFAULT['image_dir'] = img_file_dir - - if glance_image_name and glance_image_name != GuestImages.DEFAULT['guest']: - # Update default glance image name - GuestImages.DEFAULT['guest'] = glance_image_name - if glance_image_name not in GuestImages.IMAGE_FILES: - # Add guest image info to consts.stx.GuestImages - if not (img_file_name and img_disk_format and min_disk_size): - raise ValueError( - "img_file_name and img_disk_format under guest_image " - "section have to be " - "specified in testcase config file") - - img_container_format = img_container_format if \ - img_container_format else 'bare' - GuestImages.IMAGE_FILES[glance_image_name] = \ - (None, min_disk_size, img_file_name, img_disk_format, - img_container_format) - - # Add guest login credentials - Guest.CREDS[glance_image_name] = { - 'user': image_ssh_user if image_ssh_user else 'root', - 'password': image_ssh_password if image_ssh_password else None, - } - - # - # Update global variables for guest_keypair section - # - natbox_keypair_dir = config.get(guest_keypair_section, 'natbox_keypair_dir', - fallback='').strip() - private_key_path = config.get(guest_keypair_section, 'private_key_path', - fallback='').strip() - - if natbox_keypair_dir: - natbox_keypair_path = os.path.join(natbox_keypair_dir, - 'keyfile_{}.pem'.format( - lab['short_name'])) - ProjVar.set_var(NATBOX_KEYFILE_PATH=natbox_keypair_path) - if private_key_path: - ProjVar.set_var(STX_KEYFILE_PATH=private_key_path) - - # - # Update global variables for guest_networks section - # - net_name_patterns = { - 'mgmt': config.get(guest_networks_section, 'mgmt_net_name_pattern', - fallback='').strip(), - 'data': config.get(guest_networks_section, 'data_net_name_pattern', - fallback='').strip(), - 'internal': config.get(guest_networks_section, - 'internal_net_name_pattern', - fallback='').strip(), - 'external': config.get(guest_networks_section, - 'external_net_name_pattern', fallback='').strip() - } - - for net_type, net_name_pattern in net_name_patterns.items(): - if net_name_pattern: - Networks.set_neutron_net_patterns(net_type=net_type, - net_name_pattern=net_name_pattern) - - return lab, natbox diff --git a/automated-pytest-suite/stx-test_template.conf b/automated-pytest-suite/stx-test_template.conf deleted file mode 100644 index 937ccaf6..00000000 --- a/automated-pytest-suite/stx-test_template.conf +++ /dev/null @@ -1,137 +0,0 @@ -[auth] -# -# Auth info to ssh to active controller and run platform commands -# - -# Linux user info for ssh to StarlingX controller node -# controllers' OAM network floating ip and unit ip if applicable. -# controller_fip is mandatory unless --lab= is provided -# via cmdline. Only IPv4 is supported by test framework for now. -# Required by all configurations. - -oam_floating_ip = -controller0_oam_ip = -controller1_oam_ip = -linux_username = sysadmin -linux_user_password = Li69nux* - -# Platform keystone admin user and project info -platform_admin_username = admin -platform_admin_project_name = admin -platform_admin_password = Li69nux* -platform_admin_domain_name = Default - - -# Non-platform keystone info -# Required if stx-openstack is deployed - -# non-platform keystone: admin user and project info -admin_username = admin -admin_project_name = admin -admin_password = Li69nux* -admin_domain_name = Default - -# non-platform keystone: first test user and tenant. Will be used for most of -# the openstack related test cases. -test1_username = tenant1 -test1_project_name = tenant1 -test1_password = Li69nux* -test1_domain_name = Default -# nova keypair to use when create VM -test1_nova_keypair = keypair-tenant1 - -# non-platform keystone: second test user and tenant. Should be in the same -# domain as first test user and tenant. -test2_username = tenant2 -test2_project_name = tenant2 -test2_password = Li69nux* -test2_domain_name = Default -test2_nova_keypair = keypair-tenant2 - - -[natbox] -# -# NATBox will be used to ping/ssh to a guest -# Required if stx-openstack is deployed -# - -# Info to ssh to a NATBox. If NatBox is localhost from where the tests are -# executed from, set: natbox_host = localhost -natbox_host = -natbox_user = -natbox_password = - -# python regex pattern for natbox prompt, -# default prompt is natbox_user@.*[$#] when unspecified -natbox_prompt = - - -[guest_image] -# -# Glance image info -# Required if stx-openstack is deployed -# - -# Image file path on active controller. Will be used to create glance image -# in some test cases. -img_file_dir = /home/sysadmin/images -img_file_name = tis-centos-guest.img -# minimum root disk size in GiB if this image is used to launch VM -min_disk_size = 2 -img_disk_format=raw -img_container_format = bare - -# Full name of an existing glance image that will be used as default image -# to create cinder volume, VM, etc. If glance_image_name is not provided, -# an glance image will be created from above image file at the begining -# of the test session. -glance_image_name = tis-centos-guest - -# username and password that will be used to ssh to VM that is created -# from above glance image -image_ssh_user = root -image_ssh_password = root - - -[guest_keypair] -# -# Nova keypair to ssh to VM from NATBox without using password in some tests -# Required if stx-openstack is deployed -# - -# Directory to store private keyfile on natbox. -natbox_keypair_dir = ~/priv_keys/ - -# private key path on controller-0 that was used to create above nova keypair. -# If not provided or not exist, a nova keypair will be created using a key from -# ssh-keygen on controller-0. -private_key_path = /home/sysadmin/.ssh/id_rsa - - -[guest_networks] -# -# Neutron networks for openstack VM -# Required if stx-openstack is deployed -# - -# Python pattern for different types of neutron networks - -# used in re.search(, ) -# Pattern needs to be unique for each network type - -# mgmt networks - need to be reachable from above NATBox. Will always be -# used to create first nic of the vm, so that VM can be ping'd or ssh'd -# from NATBox. -mgmt_net_name_pattern = tenant\d-mgmt-net - -# data networks - usually un-shared. Will be used in some test cases -# that require communication between two VMs -data_net_name_pattern = tenant\d-net - -# internal network - need to be shared among tenants. Will be used in a few -# test cases to route data network traffic via internal interface between -# two VMs that belong to different tenants -internal_net_name_pattern = internal - -# external network - neutron floating ips will be created off this network. -# Needs to be reachable from NATBox. -external_net_name_pattern = external diff --git a/automated-pytest-suite/testcases/conftest.py b/automated-pytest-suite/testcases/conftest.py deleted file mode 100755 index 7c902fb1..00000000 --- a/automated-pytest-suite/testcases/conftest.py +++ /dev/null @@ -1,72 +0,0 @@ -import pytest - -import setups -from consts.auth import CliAuth, Tenant -from consts.proj_vars import ProjVar -from utils.tis_log import LOG -from utils.clients.ssh import ControllerClient - -natbox_ssh = None -initialized = False - - -@pytest.fixture(scope='session', autouse=True) -def setup_test_session(global_setup): - """ - Setup primary tenant and Nax Box ssh before the first test gets executed. - STX ssh was already set up at collecting phase. - """ - LOG.fixture_step("(session) Setting up test session...") - setups.setup_primary_tenant(ProjVar.get_var('PRIMARY_TENANT')) - - global con_ssh - if not con_ssh: - con_ssh = ControllerClient.get_active_controller() - # set build id to be used to upload/write test results - setups.set_build_info(con_ssh) - - # Ensure tis and natbox (if applicable) ssh are connected - con_ssh.connect(retry=True, retry_interval=3, retry_timeout=300) - - # set up natbox connection and copy keyfile - natbox_dict = ProjVar.get_var('NATBOX') - global natbox_ssh - natbox_ssh = setups.setup_natbox_ssh(natbox_dict, con_ssh=con_ssh) - - # set global var for sys_type - setups.set_sys_type(con_ssh=con_ssh) - - # rsync files between controllers - setups.copy_test_files() - - -def pytest_collectstart(): - """ - Set up the ssh session at collectstart. Because skipif condition is - evaluated at the collecting test cases phase. - """ - global initialized - if not initialized: - global con_ssh - con_ssh = setups.setup_tis_ssh(ProjVar.get_var("LAB")) - ProjVar.set_var(con_ssh=con_ssh) - CliAuth.set_vars(**setups.get_auth_via_openrc(con_ssh)) - if setups.is_https(con_ssh): - CliAuth.set_vars(HTTPS=True) - - auth_url = CliAuth.get_var('OS_AUTH_URL') - Tenant.set_platform_url(auth_url) - setups.set_region(region=None) - if ProjVar.get_var('IS_DC'): - Tenant.set_platform_url(url=auth_url, central_region=True) - initialized = True - - -def pytest_runtest_teardown(): - for con_ssh_ in ControllerClient.get_active_controllers( - current_thread_only=True): - con_ssh_.flush() - con_ssh_.connect(retry=True, retry_interval=3, retry_timeout=300) - if natbox_ssh: - natbox_ssh.flush() - natbox_ssh.connect(retry=False) diff --git a/automated-pytest-suite/testcases/functional/ceilometer/conftest.py b/automated-pytest-suite/testcases/functional/ceilometer/conftest.py deleted file mode 100755 index 157d7f85..00000000 --- a/automated-pytest-suite/testcases/functional/ceilometer/conftest.py +++ /dev/null @@ -1,3 +0,0 @@ -from testfixtures.resource_mgmt import * -from testfixtures.resource_create import * -from testfixtures.config_host import * diff --git a/automated-pytest-suite/testcases/functional/ceilometer/test_ceilometer_statistics.py b/automated-pytest-suite/testcases/functional/ceilometer/test_ceilometer_statistics.py deleted file mode 100755 index a8c0cc44..00000000 --- a/automated-pytest-suite/testcases/functional/ceilometer/test_ceilometer_statistics.py +++ /dev/null @@ -1,102 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import time -import random -from datetime import datetime, timedelta -from pytest import mark, skip - -from utils.tis_log import LOG - -from consts.stx import GuestImages -from consts.auth import Tenant -from keywords import common, ceilometer_helper, network_helper, \ - glance_helper, system_helper, gnocchi_helper - - -def _wait_for_measurements(meter, resource_type, extra_query, start_time, - overlap=None, timeout=1860, - check_interval=60): - end_time = time.time() + timeout - - while time.time() < end_time: - values = gnocchi_helper.get_aggregated_measures( - metrics=meter, resource_type=resource_type, start=start_time, - overlap=overlap, extra_query=extra_query)[1] - if values: - return values - - time.sleep(check_interval) - - -@mark.cpe_sanity -@mark.sanity -@mark.sx_nightly -@mark.parametrize('meter', [ - 'image.size' -]) -def test_measurements_for_metric(meter): - """ - Validate statistics for one meter - - """ - LOG.tc_step('Get ceilometer statistics table for image.size meter') - - now = datetime.utcnow() - start = (now - timedelta(minutes=10)) - start = start.strftime("%Y-%m-%dT%H:%M:%S") - image_name = GuestImages.DEFAULT['guest'] - resource_type = 'image' - extra_query = "name='{}'".format(image_name) - overlap = None - - code, output = gnocchi_helper.get_aggregated_measures( - metrics=meter, resource_type=resource_type, start=start, - extra_query=extra_query, fail_ok=True) - if code > 0: - if "Metrics can't being aggregated" in output: - # there was another glance image that has the same - # string in its name - overlap = '0' - else: - assert False, output - - values = output - if code == 0 and values: - assert len(values) <= 4, "Incorrect count for {} {} metric via " \ - "'openstack metric measures aggregation'". \ - format(image_name, meter) - else: - values = _wait_for_measurements(meter=meter, - resource_type=resource_type, - extra_query=extra_query, - start_time=start, overlap=overlap) - assert values, "No measurements for image.size for 25+ minutes" - - LOG.tc_step('Check that values are larger than zero') - for val in values: - assert 0 <= float(val), "{} {} value in metric measurements " \ - "table is less than zero".format( - image_name, meter) - - -def check_event_in_tenant_or_admin(resource_id, event_type): - for auth_ in (None, Tenant.get('admin')): - traits = ceilometer_helper.get_events(event_type=event_type, - header='traits:value', - auth_info=auth_) - for trait in traits: - if resource_id in trait: - LOG.info("Resource found in ceilometer events using " - "auth: {}".format(auth_)) - break - else: - continue - break - else: - assert False, "{} event for resource {} was not found under admin or " \ - "tenant".format(event_type, resource_id) diff --git a/automated-pytest-suite/testcases/functional/common/conftest.py b/automated-pytest-suite/testcases/functional/common/conftest.py deleted file mode 100755 index 2407e080..00000000 --- a/automated-pytest-suite/testcases/functional/common/conftest.py +++ /dev/null @@ -1,3 +0,0 @@ -from testfixtures.resource_mgmt import * -from testfixtures.config_host import * -from testfixtures.resource_create import * diff --git a/automated-pytest-suite/testcases/functional/common/test_host_connections.py b/automated-pytest-suite/testcases/functional/common/test_host_connections.py deleted file mode 100755 index 29cacc6c..00000000 --- a/automated-pytest-suite/testcases/functional/common/test_host_connections.py +++ /dev/null @@ -1,66 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from pytest import mark - -from consts.stx import HostAvailState -from keywords import system_helper, network_helper, host_helper -from utils.clients.ssh import ControllerClient -from utils.tis_log import LOG - - -@mark.p3 -def test_ping_hosts(): - con_ssh = ControllerClient.get_active_controller() - - ping_failed_list = [] - for hostname in system_helper.get_hosts(): - LOG.tc_step( - "Send 100 pings to {} from Active Controller".format(hostname)) - ploss_rate, untran_p = network_helper.ping_server(hostname, con_ssh, - num_pings=100, - timeout=300, - fail_ok=True) - if ploss_rate > 0: - if ploss_rate == 100: - ping_failed_list.append( - "{}: Packet loss rate: {}/100\n".format(hostname, - ploss_rate)) - else: - ping_failed_list.append( - "{}: All packets dropped.\n".format(hostname)) - if untran_p > 0: - ping_failed_list.append( - "{}: {}/100 pings are untransmitted within 300 seconds".format( - hostname, untran_p)) - - LOG.tc_step("Ensure all packets are received.") - assert not ping_failed_list, "Dropped/Un-transmitted packets detected " \ - "when ping hosts. " \ - "Details:\n{}".format(ping_failed_list) - - -@mark.sanity -@mark.cpe_sanity -@mark.sx_sanity -def test_ssh_to_hosts(): - """ - Test ssh to every host on system from active controller - - """ - hosts_to_ssh = system_helper.get_hosts( - availability=[HostAvailState.AVAILABLE, HostAvailState.ONLINE]) - failed_list = [] - for hostname in hosts_to_ssh: - LOG.tc_step("Attempt SSH to {}".format(hostname)) - try: - with host_helper.ssh_to_host(hostname): - pass - except Exception as e: - failed_list.append("\n{}: {}".format(hostname, e.__str__())) - - assert not failed_list, "SSH to host(s) failed: {}".format(failed_list) diff --git a/automated-pytest-suite/testcases/functional/common/test_system_health.py b/automated-pytest-suite/testcases/functional/common/test_system_health.py deleted file mode 100755 index 178f513c..00000000 --- a/automated-pytest-suite/testcases/functional/common/test_system_health.py +++ /dev/null @@ -1,58 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -from pytest import mark, fixture - -from utils.tis_log import LOG -from keywords import host_helper, check_helper - - -# Do not check alarms for test in this module, which are read only tests. -@fixture() -def check_alarms(): - pass - - -class TestCoreDumpsAndCrashes: - @fixture(scope='class') - def post_coredumps_and_crash_reports(self): - LOG.fixture_step( - "Gather core dumps and crash reports info for all hosts") - return host_helper.get_coredumps_and_crashreports() - - @mark.abslast - @mark.sanity - @mark.cpe_sanity - @mark.sx_sanity - @mark.parametrize('report_type', [ - 'core_dumps', - 'crash_reports', - ]) - def test_system_coredumps_and_crashes(self, report_type, - post_coredumps_and_crash_reports): - - LOG.tc_step("Check {} does not exist on any host".format(report_type)) - existing_files = {} - for host in post_coredumps_and_crash_reports: - core_dumps, crash_reports = post_coredumps_and_crash_reports[host] - failures = {'core_dumps': core_dumps, - 'crash_reports': crash_reports} - - if failures[report_type]: - existing_files[host] = failures[report_type] - - assert not existing_files, "{} exist on {}".format(report_type, list( - existing_files.keys())) - - -@mark.abslast -@mark.sanity -@mark.cpe_sanity -@mark.sx_sanity -def test_system_alarms(pre_alarms_session): - LOG.tc_step("Gathering system alarms at the end of test session") - check_helper.check_alarms(before_alarms=pre_alarms_session) - LOG.info("No new alarms found after test session.") diff --git a/automated-pytest-suite/testcases/functional/conftest.py b/automated-pytest-suite/testcases/functional/conftest.py deleted file mode 100755 index 94085859..00000000 --- a/automated-pytest-suite/testcases/functional/conftest.py +++ /dev/null @@ -1,5 +0,0 @@ -# Do NOT remove following imports. Needed for test fixture discovery purpose -from testfixtures.resource_mgmt import delete_resources_func, delete_resources_class, delete_resources_module -from testfixtures.recover_hosts import hosts_recover_func, hosts_recover_class, hosts_recover_module -from testfixtures.verify_fixtures import * -from testfixtures.pre_checks_and_configs import * diff --git a/automated-pytest-suite/testcases/functional/dc/conftest.py b/automated-pytest-suite/testcases/functional/dc/conftest.py deleted file mode 100644 index c71fa443..00000000 --- a/automated-pytest-suite/testcases/functional/dc/conftest.py +++ /dev/null @@ -1,18 +0,0 @@ -# -# Copyright (c) 2020 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -from pytest import fixture, skip - -from consts.proj_vars import ProjVar - -# Import DC fixtures for testcases to use -from testfixtures.dc_fixtures import check_central_alarms_module, check_central_alarms - - -@fixture(scope='module', autouse=True) -def dc_only(): - if not ProjVar.get_var('IS_DC'): - skip('Skip Distributed Cloud test cases for non-DC system.') diff --git a/automated-pytest-suite/testcases/functional/dc/test_alarm_aggregation.py b/automated-pytest-suite/testcases/functional/dc/test_alarm_aggregation.py deleted file mode 100644 index 9170872e..00000000 --- a/automated-pytest-suite/testcases/functional/dc/test_alarm_aggregation.py +++ /dev/null @@ -1,287 +0,0 @@ -# -# Copyright (c) 2020 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import time - -from pytest import fixture - -from utils import cli -from utils.tis_log import LOG -from utils.clients.ssh import ControllerClient -from utils import table_parser -from consts.proj_vars import ProjVar -from consts.auth import Tenant -from consts.stx import SubcloudStatus, EventLogID -from consts.timeout import DCTimeout -from keywords import dc_helper, system_helper - -# Set the level of stress you want to test -ALARMS_NO = 500 - - -@fixture(scope="module") -def subcloud_to_test(): - check_alarm_summary_match_subcloud(ProjVar.get_var('PRIMARY_SUBCLOUD')) - return ProjVar.get_var('PRIMARY_SUBCLOUD') - - -def check_alarm_summary_match_subcloud(subcloud, timeout=400): - LOG.info("Ensure alarm summary on SystemController with subcloud {}".format(subcloud)) - subcloud_auth = Tenant.get('admin_platform', dc_region=subcloud) - central_auth = Tenant.get('admin_platform', dc_region='RegionOne') - - severities = ["critical_alarms", "major_alarms", "minor_alarms", "warnings"] - central_alarms = subcloud_alarms = None - end_time = time.time() + timeout - while time.time() < end_time: - output_central = cli.dcmanager('alarm summary', auth_info=central_auth, fail_ok=False)[1] - output_sub = cli.fm("alarm-summary", auth_info=subcloud_auth, fail_ok=False)[1] - - central_alarms = table_parser.get_multi_values(table_parser.table(output_central), - fields=severities, **{"NAME": subcloud}) - subcloud_alarms = table_parser.get_multi_values(table_parser.table(output_sub), severities) - - if central_alarms == subcloud_alarms: - LOG.info("'dcmanager alarm summary' output for {} matches 'fm alarm-summary' on " - "{}".format(subcloud, subcloud)) - return - - time.sleep(30) - - assert central_alarms == subcloud_alarms, \ - "'dcmanager alarm summary did not match 'fm alarm-summary' on {} " \ - "within {}s".format(subcloud, timeout) - - -def alarm_summary_add_and_del(subcloud): - try: - # Test adding alarm on subcloud - ssh_client = ControllerClient.get_active_controller(name=subcloud) - LOG.info("Wait for alarm raised on subcloud {}".format(subcloud)) - system_helper.wait_for_alarm(alarm_id=EventLogID.PROVIDER_NETWORK_FAILURE, - con_ssh=ssh_client) - LOG.tc_step("Ensure alarm summary match nn Central with subcloud: {}".format(subcloud)) - check_alarm_summary_match_subcloud(subcloud) - - # Test clearing alarm on subcloud - LOG.tc_step("Clear alarm on subcloud: {}".format(subcloud)) - ssh_client.exec_cmd('fmClientCli -D host=testhost-0', fail_ok=False) - LOG.info("Wait for alarm clear on subcloud {}".format(subcloud)) - system_helper.wait_for_alarm_gone(alarm_id=EventLogID.PROVIDER_NETWORK_FAILURE, - con_ssh=ssh_client) - check_alarm_summary_match_subcloud(subcloud) - finally: - ssh_client = ControllerClient.get_active_controller(name=subcloud) - LOG.info("Clear alarm on subcloud: {}".format(subcloud)) - ssh_client.exec_cmd('fmClientCli -D host=testhost-0') - - -def add_routes_to_subcloud(subcloud, subcloud_table, fail_ok=False): - LOG.debug("Add routes back to subcloud: {}".format(subcloud)) - ssh_client = ControllerClient.get_active_controller(name=subcloud) - for host_id in subcloud_table: - comm_args = table_parser.get_multi_values(subcloud_table[host_id], - ["ifname", "network", "prefix", "gateway"]) - command = "host-route-add {} {} {} {} {}".format(host_id, comm_args[0][0], - comm_args[1][0], comm_args[2][0], - comm_args[3][0]) - code, output = cli.system("host-route-list {}".format(host_id)) - uuid_list = table_parser.get_values(table_parser.table(output), "uuid") - if table_parser.get_values(subcloud_table[host_id], "uuid")[0] not in uuid_list: - cli.system(command, ssh_client=ssh_client, fail_ok=fail_ok) - - -def test_dc_alarm_aggregation_managed(subcloud_to_test): - """ - Test Alarm Aggregation on Distributed Cloud - Args: - subcloud_to_test (str): module fixture - - Setups: - - Make sure there is consistency between alarm summary on - Central Cloud and on subclouds - - Test Steps: - - Raise an alarm at subcloud; - - Ensure relative alarm raised on subcloud - - Ensure system alarm-summary on subcloud matches dcmanager alarm summary on system - - Clean alarm at subcloud - - Ensure relative alarm cleared on subcloud - - Ensure system alarm-summary on subcloud matches dcmanager alarm summary on system - """ - - ssh_client = ControllerClient.get_active_controller(name=subcloud_to_test) - LOG.tc_step("Raise alarm on subcloud: {}".format(subcloud_to_test)) - ssh_client.exec_cmd( - "fmClientCli -c \"### ###300.005###clear###system.vm###host=testhost-0" - "### ###critical### ###processing-error###cpu-cycles-limit-exceeded### ###" - "True###True###'\"", fail_ok=False) - - alarm_summary_add_and_del(subcloud_to_test) - - -def test_dc_fault_scenario(subcloud_to_test): - """ - Test Fault Scenario on Distributed Cloud - Args: - subcloud_to_test (str): module fixture - - Setup: - - Make sure there is consistency between alarm summary on - Central Cloud and on subclouds - - Test Steps: - - Make subcloud offline (e. g. delete route) - Step1: - - Ensure suncloud shows offline - Step2: - - Raise alarm on subcloud - - Ensure relative alarm raised on subcloud, - - Ensure system alarm-summary on subcloud has changed - - Ensure  dcmanager alarm summary on system controller has no change - Step3: - - Resume connectivity to subcloud (e. g. add route back) - - Ensure suncloud shows online and in-sync - - Ensure system alarm-summary on subcloud matches dcmanager alarm summary on system - controller - Step4: - - Clean alarm on subcloud - - Ensure relative alarm cleared on subcloud - - Ensure system alarm-summary on subcloud matches dcmanager alarm summary on system - controller - """ - ssh_central = ControllerClient.get_active_controller(name="RegionOne") - ssh_subcloud = ControllerClient.get_active_controller(name=subcloud_to_test) - subcloud_table = {} - try: - code, output = cli.dcmanager("subcloud show {}".format(subcloud_to_test), - ssh_client=ssh_central) - gateway = table_parser.get_value_two_col_table(table_parser.table(output), - "management_gateway_ip") - code, hosts_raw = cli.system("host-list", ssh_client=ssh_subcloud) - hosts_id = table_parser.get_values(table_parser.table(hosts_raw), 'id') - for host_id in hosts_id: - code, route_raw = cli.system("host-route-list {}".format(host_id), - ssh_client=ssh_subcloud) - route_table = table_parser.filter_table(table_parser.table(route_raw), - **{'gateway': gateway}) - subcloud_table[host_id] = route_table - - LOG.tc_step("Delete route for subcloud: {} and wait for it to go offline.".format( - subcloud_to_test)) - ssh_subcloud = ControllerClient.get_active_controller(name=subcloud_to_test) - for host_id in subcloud_table: - command = "host-route-delete {}".format(table_parser.get_values( - subcloud_table[host_id], "uuid")[0]) - cli.system(command, ssh_client=ssh_subcloud) - - dc_helper.wait_for_subcloud_status(subcloud_to_test, - avail=SubcloudStatus.AVAIL_OFFLINE, - timeout=DCTimeout.SYNC, con_ssh=ssh_central) - - LOG.tc_step("Raise alarm on subcloud: {}".format(subcloud_to_test)) - ssh_subcloud = ControllerClient.get_active_controller(name=subcloud_to_test) - code_sub_before, output_sub_before = cli.fm("alarm-summary", ssh_client=ssh_subcloud) - code_central_before, output_central_before = cli.dcmanager('alarm summary') - ssh_subcloud.exec_cmd( - "fmClientCli -c \"### ###300.005###clear###system.vm###host=" - "testhost-0### ###critical### ###processing-error###cpu-cycles-limit-exceeded" - "### ###True###True###'\"", fail_ok=False) - LOG.info("Ensure relative alarm was raised at subcloud: {}".format(subcloud_to_test)) - system_helper.wait_for_alarm(alarm_id=EventLogID.PROVIDER_NETWORK_FAILURE, - con_ssh=ssh_subcloud) - code_sub_after, output_sub_after = cli.fm("alarm-summary", ssh_client=ssh_subcloud) - code_central_after, output_central_after = cli.dcmanager('alarm summary') - LOG.info("Ensure fm alarm summary on subcloud: {} has changed but dcmanager alarm" - "summary has not changed".format(subcloud_to_test)) - assert output_central_before == output_central_after and output_sub_before != \ - output_sub_after - - add_routes_to_subcloud(subcloud_to_test, subcloud_table) - - dc_helper.wait_for_subcloud_status(subcloud_to_test, avail=SubcloudStatus.AVAIL_ONLINE, - sync=SubcloudStatus.SYNCED, timeout=DCTimeout.SYNC, - con_ssh=ssh_central) - alarm_summary_add_and_del(subcloud_to_test) - - finally: - cli.dcmanager("subcloud show {}".format(subcloud_to_test), - ssh_client=ssh_central, fail_ok=True) - add_routes_to_subcloud(subcloud_to_test, subcloud_table, fail_ok=True) - LOG.info("Clear alarm on subcloud: {}".format(subcloud_to_test)) - ssh_subcloud.exec_cmd('fmClientCli -D host=testhost-0') - check_alarm_summary_match_subcloud(subcloud=subcloud_to_test) - - -def test_dc_stress_alarm(subcloud_to_test): - """ - Test Stress Scenario on Distributed Cloud - Args: - subcloud_to_test (str): module fixture - - Setup: - - Make sure there is consistency between alarm summary on - Central Cloud and on subclouds - - Test Steps: - Step1: - - Trigger large amount of alarms, quickly on one subcloud - - ensure system alarm-summary on subcloud matches dcmanager alarm summary on system - controller - Step2: - - Trigger large amount of alarms quickly for a long time on all subclouds - - Each alarm summary updates once every 30 seconds until the event is over - - Ensure system alarm-summary on subcloud matches dcmanager alarm summary on system - controller - Step3: - - Clear all alarms - - Ensure system alarm-summary on subcloud matches dcmanager alarm summary on system - controller - """ - ssh_client = ControllerClient.get_active_controller(name=subcloud_to_test) - - # Step 1 - LOG.tc_step("Trigger large amount of alarms, quickly on one subcloud") - try: - for i in range(1, ALARMS_NO + 1): - ssh_client.exec_cmd( - "fmClientCli -c \"### ###300.005###clear###system.vm###host=" - "testhost-{}### ###critical### ###processing-error###cpu-cycles-limit-exceeded" - "### ###True###True###'\"".format(i), fail_ok=False) - finally: - for i in range(1, ALARMS_NO + 1): - ssh_client.exec_cmd('fmClientCli -D host=testhost-{}'.format(i)) - - check_alarm_summary_match_subcloud(subcloud_to_test) - - # Step 2 - ssh_client_list = {} - for subcloud in dc_helper.get_subclouds(mgmt='managed'): - ssh_client_list[subcloud] = ControllerClient.get_active_controller(name=subcloud_to_test) - - try: - LOG.tc_step("Trigger large amount of alarms quickly for a long time on all subclouds") - for subcloud in ssh_client_list: - subcloud_ssh = ssh_client_list[subcloud] - for i in range(1, ALARMS_NO + 1): - subcloud_ssh.exec_cmd( - "fmClientCli -c \"### ###300.005###clear###" - "system.vm###host=testhost-{}### ###critical### ###processing-error###" - "cpu-cycles-limit-exceeded### ###True###True###'\"".format(i), - fail_ok=False) - - for subcloud in ssh_client_list: - check_alarm_summary_match_subcloud(subcloud) - finally: - # Step 3 - LOG.tc_step("Clear all alarms on all subclouds") - for subcloud in ssh_client_list: - subcloud_ssh = ssh_client_list[subcloud] - for i in range(1, ALARMS_NO + 1): - subcloud_ssh.exec_cmd('fmClientCli -D host=testhost-{}'.format(i)) - - for subcloud in ssh_client_list: - check_alarm_summary_match_subcloud(subcloud) diff --git a/automated-pytest-suite/testcases/functional/dc/test_dc_swact_host.py b/automated-pytest-suite/testcases/functional/dc/test_dc_swact_host.py deleted file mode 100644 index c630d240..00000000 --- a/automated-pytest-suite/testcases/functional/dc/test_dc_swact_host.py +++ /dev/null @@ -1,78 +0,0 @@ -# -# Copyright (c) 2020 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -from pytest import fixture - -from consts.auth import Tenant -from consts.proj_vars import ProjVar -from consts.stx import SubcloudStatus -from utils.clients.ssh import ControllerClient -from utils.tis_log import LOG -from keywords import host_helper, dc_helper - - -@fixture(scope='module') -def swact_precheck(request): - LOG.info("Gather subcloud management info") - subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD') - - def revert(): - LOG.fixture_step("Manage {} if unmanaged".format(subcloud)) - dc_helper.manage_subcloud(subcloud) - - request.addfinalizer(revert) - - managed_subclouds = dc_helper.get_subclouds(mgmt=SubcloudStatus.MGMT_MANAGED, - avail=SubcloudStatus.AVAIL_ONLINE, - sync=SubcloudStatus.SYNCED) - if subcloud in managed_subclouds: - managed_subclouds.remove(subcloud) - - ssh_map = ControllerClient.get_active_controllers_map() - managed_subclouds = [subcloud for subcloud in managed_subclouds if subcloud in ssh_map] - - return subcloud, managed_subclouds - - -def test_dc_swact_host(swact_precheck, check_central_alarms): - """ - Test host swact on central region - Args: - swact_precheck(fixture): check subclouds managed and online - Setup: - - Ensure primary subcloud is managed - Test Steps: - - Unmanage primary subcloud - - Swact the host - - Verify subclouds are managed - Teardown: - - Manage unmanaged subclouds - """ - primary_subcloud, managed_subcloud = swact_precheck - ssh_central = ControllerClient.get_active_controller(name="RegionOne") - - LOG.tc_step("Unmanage {}".format(primary_subcloud)) - dc_helper.unmanage_subcloud(subcloud=primary_subcloud, check_first=True) - - LOG.tc_step("Swact host on central region") - central_auth = Tenant.get('admin_platform', dc_region='RegionOne') - host_helper.swact_host(auth_info=central_auth) - - LOG.tc_step("Check subclouds after host swact on central region") - for managed_subcloud in managed_subcloud: - dc_helper.wait_for_subcloud_status(subcloud=managed_subcloud, - avail=SubcloudStatus.AVAIL_ONLINE, - mgmt=SubcloudStatus.MGMT_MANAGED, - sync=SubcloudStatus.SYNCED, - con_ssh=ssh_central) - - LOG.tc_step("Manage {}".format(primary_subcloud)) - dc_helper.manage_subcloud(subcloud=primary_subcloud, check_first=True) - dc_helper.wait_for_subcloud_status(subcloud=primary_subcloud, - avail=SubcloudStatus.AVAIL_ONLINE, - mgmt=SubcloudStatus.MGMT_MANAGED, - sync=SubcloudStatus.SYNCED, - con_ssh=ssh_central) diff --git a/automated-pytest-suite/testcases/functional/dc/test_https_unshared.py b/automated-pytest-suite/testcases/functional/dc/test_https_unshared.py deleted file mode 100644 index 7ca704ec..00000000 --- a/automated-pytest-suite/testcases/functional/dc/test_https_unshared.py +++ /dev/null @@ -1,163 +0,0 @@ -# -# Copyright (c) 2020 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -from pytest import fixture - -from consts.auth import Tenant -from consts.proj_vars import ProjVar -from keywords import security_helper, keystone_helper, dc_helper, container_helper, host_helper, \ - system_helper, common -from utils import cli -from utils.tis_log import LOG - - -@fixture(scope='module') -def revert_https(request): - """ - Fixture for get the current http mode of the system, and if the test fails, - leave the system in the same mode than before - """ - central_auth = Tenant.get('admin_platform', dc_region='RegionOne') - sub_auth = Tenant.get('admin_platform') - use_dnsname = (bool(common.get_dnsname()) and - bool(common.get_dnsname(region=ProjVar.get_var('PRIMARY_SUBCLOUD')))) - - origin_https_sub = keystone_helper.is_https_enabled(auth_info=sub_auth) - origin_https_central = keystone_helper.is_https_enabled(auth_info=central_auth) - - def _revert(): - LOG.fixture_step("Revert central https config to {}.".format(origin_https_central)) - security_helper.modify_https(enable_https=origin_https_central, auth_info=central_auth) - - LOG.fixture_step("Revert subcloud https config to {}.".format(origin_https_sub)) - security_helper.modify_https(enable_https=origin_https_central, auth_info=sub_auth) - - LOG.fixture_step("Verify cli's on subcloud and central region.".format(origin_https_sub)) - verify_cli(sub_auth, central_auth) - - request.addfinalizer(_revert) - - return origin_https_sub, origin_https_central, central_auth, sub_auth, use_dnsname - - -def test_dc_modify_https(revert_https): - """ - Test enable/disable https - - Test Steps: - - Ensure central region and subcloud admin endpoint are https - - Ensure central region https to be different than subcloud - - Wait for subcloud sync audit and ensure subcloud https is not changed - - Verify cli's in subcloud and central region - - Modify https on central and subcloud - - Verify cli's in subcloud and central region - - swact central and subcloud - - Ensure central region and subcloud admin endpoint are https - - Teardown: - - Revert https config on central and subcloud - - """ - origin_https_sub, origin_https_central, central_auth, sub_auth, use_dnsname = revert_https - subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD') - - LOG.tc_step( - "Before testing, Ensure central region and subcloud admin internal endpoint are https") - assert keystone_helper.is_https_enabled(interface='admin', auth_info=central_auth), \ - "Central region admin internal endpoint is not https" - assert keystone_helper.is_https_enabled(interface='admin', auth_info=sub_auth), \ - "Subcloud admin internal endpoint is not https" - - new_https_sub = not origin_https_sub - new_https_central = not origin_https_central - - LOG.tc_step("Ensure central region https to be different than {}".format(subcloud)) - security_helper.modify_https(enable_https=new_https_sub, auth_info=central_auth) - - LOG.tc_step('Check public endpoints accessibility for central region') - security_helper.check_services_access(region='RegionOne', auth_info=central_auth, - use_dnsname=use_dnsname) - LOG.tc_step('Check platform horizon accessibility') - security_helper.check_platform_horizon_access(use_dnsname=use_dnsname) - - LOG.tc_step("Wait for subcloud sync audit with best effort and ensure {} https is not " - "changed".format(subcloud)) - dc_helper.wait_for_sync_audit(subclouds=subcloud, fail_ok=True, timeout=660) - assert origin_https_sub == keystone_helper.is_https_enabled(auth_info=sub_auth), \ - "HTTPS config changed in subcloud" - - LOG.tc_step("Verify cli's in {} and central region".format(subcloud)) - verify_cli(sub_auth, central_auth) - - if new_https_central != new_https_sub: - LOG.tc_step("Set central region https to {}".format(new_https_central)) - security_helper.modify_https(enable_https=new_https_central, auth_info=central_auth) - LOG.tc_step("Ensure central region and subcloud admin internal endpoint are still https") - assert keystone_helper.is_https_enabled(interface='admin', auth_info=central_auth), \ - "Central region admin internal endpoint is not https" - assert keystone_helper.is_https_enabled(interface='admin', auth_info=sub_auth), \ - "Subcloud admin internal endpoint is not https" - LOG.tc_step('Check public endpoints accessibility for central region') - security_helper.check_services_access(region='RegionOne', auth_info=central_auth, - use_dnsname=use_dnsname) - LOG.tc_step('Check platform horizon accessibility') - security_helper.check_platform_horizon_access(use_dnsname=use_dnsname) - - LOG.tc_step("Set {} https to {}".format(subcloud, new_https_sub)) - security_helper.modify_https(enable_https=new_https_sub, auth_info=sub_auth) - LOG.tc_step('Check public endpoints accessibility for {} region'.format(subcloud)) - security_helper.check_services_access(region=subcloud, auth_info=sub_auth, - use_dnsname=use_dnsname) - - LOG.tc_step("Ensure central region and subcloud admin internal endpoint are still https") - assert keystone_helper.is_https_enabled(interface='admin', auth_info=central_auth), \ - "Central region admin internal endpoint is not https" - assert keystone_helper.is_https_enabled(interface='admin', auth_info=sub_auth), \ - "Subcloud admin internal endpoint is not https" - - LOG.tc_step("Verify cli's in {} and central region after https modify on " - "subcloud".format(subcloud)) - verify_cli(sub_auth, central_auth) - - LOG.tc_step("Swact on central region") - host_helper.swact_host(auth_info=central_auth) - - LOG.tc_step( - "Verify cli's in {} and central region after central region swact" .format(subcloud)) - verify_cli(sub_auth, central_auth) - - if not system_helper.is_aio_simplex(auth_info=sub_auth): - LOG.tc_step("Swact on subcloud {}".format(subcloud)) - host_helper.swact_host(auth_info=sub_auth) - LOG.tc_step("Verify cli's in {} and central region after subcloud swact".format(subcloud)) - verify_cli(sub_auth, central_auth) - - LOG.tc_step("Ensure after swact, central region and subcloud admin internal endpoint are https") - assert keystone_helper.is_https_enabled(interface='admin', auth_info=central_auth), \ - "Central region admin internal endpoint is not https" - assert keystone_helper.is_https_enabled(interface='admin', auth_info=sub_auth), \ - "Subcloud admin internal endpoint is not https" - - -def verify_cli(sub_auth=None, central_auth=None): - auths = [central_auth, sub_auth] - auths = [auth for auth in auths if auth] - - for auth in auths: - cli.system('host-list', fail_ok=False, auth_info=auth) - cli.fm('alarm-list', fail_ok=False, auth_info=auth) - if container_helper.is_stx_openstack_deployed(applied_only=True, auth_info=auth): - cli.openstack('server list --a', fail_ok=False, auth_info=auth) - cli.openstack('image list', fail_ok=False, auth_info=auth) - cli.openstack('volume list --a', fail_ok=False, auth_info=auth) - cli.openstack('user list', fail_ok=False, auth_info=auth) - cli.openstack('router list', fail_ok=False, auth_info=auth) - - if sub_auth and container_helper.is_stx_openstack_deployed(applied_only=True, - auth_info=sub_auth): - cli.openstack('stack list', fail_ok=False, auth_info=sub_auth) - cli.openstack('alarm list', fail_ok=False, auth_info=sub_auth) - cli.openstack('metric status', fail_ok=False, auth_info=sub_auth) diff --git a/automated-pytest-suite/testcases/functional/dc/test_shared_config_dns.py b/automated-pytest-suite/testcases/functional/dc/test_shared_config_dns.py deleted file mode 100644 index 73e9388c..00000000 --- a/automated-pytest-suite/testcases/functional/dc/test_shared_config_dns.py +++ /dev/null @@ -1,231 +0,0 @@ -# -# Copyright (c) 2020 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -from pytest import fixture, skip, mark - -from utils.tis_log import LOG -from utils.clients.ssh import ControllerClient -from consts.proj_vars import ProjVar -from consts.auth import Tenant -from keywords import dc_helper, system_helper, host_helper - - -@fixture(scope='module') -def subclouds_to_test(request): - - LOG.info("Gather DNS config and subcloud management info") - sc_auth = Tenant.get('admin_platform', dc_region='SystemController') - dns_servers = system_helper.get_dns_servers(auth_info=sc_auth) - - subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD') - - def revert(): - LOG.fixture_step("Manage {} if unmanaged".format(subcloud)) - dc_helper.manage_subcloud(subcloud) - - LOG.fixture_step("Revert DNS config if changed") - system_helper.set_dns_servers(nameservers=dns_servers, auth_info=sc_auth) - request.addfinalizer(revert) - - managed_subclouds = dc_helper.get_subclouds(mgmt='managed', avail='online') - if subcloud in managed_subclouds: - managed_subclouds.remove(subcloud) - - ssh_map = ControllerClient.get_active_controllers_map() - managed_subclouds = [subcloud for subcloud in managed_subclouds if subcloud in ssh_map] - - return subcloud, managed_subclouds - - -def compose_new_dns_servers(scenario, prev_dns_servers): - dns_servers = list(prev_dns_servers) - unreachable_dns_server_ip = "8.4.4.4" - - if scenario == 'add_unreachable_server': - dns_servers.append(unreachable_dns_server_ip) - elif scenario == 'unreachable_server': - dns_servers = [unreachable_dns_server_ip] - else: - if len(dns_servers) < 2: - skip('Less than two DNS servers configured.') - - if scenario == 'change_order': - dns_servers.append(dns_servers.pop(0)) - elif scenario == 'remove_one_server': - dns_servers.append(dns_servers.pop(0)) - dns_servers.pop() - else: - raise ValueError("Unknown scenario: {}".format(scenario)) - - return dns_servers - - -@fixture() -def ensure_synced(subclouds_to_test, check_central_alarms): - primary_subcloud, managed_subclouds = subclouds_to_test - - LOG.fixture_step( - "Ensure {} is managed and DNS config is valid and synced".format(primary_subcloud)) - subcloud_auth = Tenant.get('admin_platform', dc_region=primary_subcloud) - subcloud_dns = system_helper.get_dns_servers(con_ssh=None, auth_info=subcloud_auth) - sc_dns = system_helper.get_dns_servers(con_ssh=None, - auth_info=Tenant.get('admin_platform', - dc_region='SystemController')) - - if subcloud_dns != sc_dns: - dc_helper.manage_subcloud(subcloud=primary_subcloud, check_first=True) - dc_helper.wait_for_subcloud_dns_config(subcloud=primary_subcloud, expected_dns=sc_dns) - verify_dns_on_central_and_subcloud(primary_subcloud) - - return primary_subcloud, managed_subclouds, sc_dns - - -@mark.parametrize('scenario', ( - 'add_unreachable_server', - 'change_order', - 'remove_one_server', -)) -def test_dc_dns_modify(ensure_synced, scenario): - """ - Update DNS servers on central region and check it is propagated to subclouds - Args: - ensure_synced: test fixture - scenario: DNS change scenario - - Setups: - - Ensure primary subcloud is managed and DNS config is valid and synced - - Test Steps: - - Un-manage primary subcloud - - Configure DNS servers on central region to new value based on given scenario - - Wait for new DNS config to sync over to managed online subclouds - - Ensure DNS config is not updated on unmanaged primary subcloud - - Re-manage primary subcloud and ensure DNS config syncs over - - Verify nslookup works in Central Region and primary subcloud - - Teardown: - - Reset DNS servers to original value (module) - - """ - primary_subcloud, managed_subclouds, prev_dns_servers = ensure_synced - new_dns_servers = compose_new_dns_servers(scenario=scenario, prev_dns_servers=prev_dns_servers) - - LOG.tc_step("Unmanage {}".format(primary_subcloud)) - dc_helper.unmanage_subcloud(subcloud=primary_subcloud, check_first=True) - - LOG.tc_step("Reconfigure DNS servers on central region from {} to {}". - format(prev_dns_servers, new_dns_servers)) - system_helper.set_dns_servers(new_dns_servers, - auth_info=Tenant.get('admin_platform', - dc_region='SystemController')) - - LOG.tc_step("Wait for new DNS config to sync over to managed online subclouds") - for managed_sub in managed_subclouds: - dc_helper.wait_for_subcloud_dns_config(subcloud=managed_sub, expected_dns=new_dns_servers) - - LOG.tc_step("Ensure DNS config is not updated on unmanaged subcloud: {}". - format(primary_subcloud)) - code = dc_helper.wait_for_subcloud_dns_config(subcloud=primary_subcloud, - expected_dns=new_dns_servers, - timeout=60, fail_ok=True)[0] - assert 1 == code, "Actual return code: {}".format(code) - - LOG.tc_step('Re-manage {} and ensure DNS config syncs over'.format(primary_subcloud)) - dc_helper.manage_subcloud(subcloud=primary_subcloud, check_first=False) - dc_helper.wait_for_subcloud_dns_config(subcloud=primary_subcloud, expected_dns=new_dns_servers) - - LOG.tc_step('Verify nslookup works in Central Region and {}'.format(primary_subcloud)) - verify_dns_on_central_and_subcloud(primary_subcloud) - - -def test_dc_dns_override_local_change(ensure_synced): - """ - Verify DNS modification on subcloud will be overridden by central region config - Args: - ensure_synced: test fixture - - Setups: - - Ensure primary subcloud is managed and DNS config is valid and synced - - Test Steps: - - Un-manage primary subcloud - - Configure DNS servers on primary subcloud to a unreachable ip address (8.4.4.4) - - Wait for sync log for any managed subcloud with best effort - - Ensure DNS config is not updated on unmanaged primary subcloud - - Verify nslookup passes on central region and fails on primary subcloud - - Re-manage primary subcloud and ensure DNS config syncs over - - Verify nslookup in Central Region and primary subcloud are working as expected - - Teardown: - - Manage primary subcloud if not managed (module) - - Reset DNS servers to original value on central region (module) - - """ - primary_subcloud, managed_subclouds, sc_dns = ensure_synced - new_dns_servers = compose_new_dns_servers(scenario='unreachable_server', - prev_dns_servers=sc_dns) - - LOG.tc_step("Unmanage {}".format(primary_subcloud)) - dc_helper.unmanage_subcloud(subcloud=primary_subcloud, check_first=True) - - LOG.tc_step("Reconfigure DNS on {} from {} to {}".format( - primary_subcloud, sc_dns, new_dns_servers)) - system_helper.set_dns_servers(new_dns_servers, auth_info=Tenant.get('admin_platform', - dc_region=primary_subcloud)) - - managed_cloud = managed_subclouds[0] if managed_subclouds else '' - LOG.tc_step("Wait for sync update log for managed subcloud {} with best effort".format( - managed_cloud)) - dc_helper.wait_for_sync_audit(subclouds=managed_cloud, fail_ok=True, timeout=660) - - LOG.tc_step("Ensure DNS config is not updated on unmanaged subcloud: {}".format( - primary_subcloud)) - code = dc_helper.wait_for_subcloud_dns_config(subcloud=primary_subcloud, expected_dns=sc_dns, - fail_ok=True, timeout=60)[0] - assert 1 == code, "Actual return code: {}".format(code) - - LOG.tc_step("Verify nslookup fails on {}".format(primary_subcloud)) - central_res, local_res = verify_dns_on_central_and_subcloud(primary_subcloud, fail_ok=True, - sc_dns=sc_dns) - assert 0 == central_res, "nslookup failed on central region" - assert 1 == local_res, "nslookup succeeded on {} with unreachable DNS servers configured".\ - format(primary_subcloud) - - central_auth = Tenant.get('admin_platform', dc_region='RegionOne') - if system_helper.get_standby_controller_name(auth_info=central_auth): - LOG.tc_step("Swact in central region") - host_helper.swact_host(auth_info=central_auth) - - LOG.tc_step('Re-manage {} and ensure local DNS config is overridden by central config'. - format(primary_subcloud)) - dc_helper.manage_subcloud(subcloud=primary_subcloud, check_first=False) - dc_helper.wait_for_subcloud_dns_config(subcloud=primary_subcloud, expected_dns=sc_dns) - - LOG.tc_step('Verify nslookup works in Central Region and {}'.format(primary_subcloud)) - verify_dns_on_central_and_subcloud(primary_subcloud, sc_dns=sc_dns) - - -def verify_dns_on_central_and_subcloud(primary_subcloud, fail_ok=False, sc_dns=None): - res = [] - for region in ('RegionOne', primary_subcloud): - # take snapshot - orig_dns_servers = system_helper.get_dns_servers(auth_info=Tenant.get('admin_platform', - dc_region=region)) - if not sc_dns or set(sc_dns) <= set(orig_dns_servers): - LOG.info("Modify dns server to public dns") - system_helper.set_dns_servers(nameservers=['8.8.8.8'], - auth_info=Tenant.get('admin_platform', - dc_region=region)) - LOG.info("Check dns on {}".format(region)) - con_ssh = ControllerClient.get_active_controller(name=region) - code, out = con_ssh.exec_cmd('nslookup -timeout=1 www.google.com', fail_ok=fail_ok, - expect_timeout=30) - res.append(code) - # revert - system_helper.set_dns_servers(nameservers=orig_dns_servers, - auth_info=Tenant.get('admin_platform', - dc_region=region)) - return res diff --git a/automated-pytest-suite/testcases/functional/dc/test_timezone_unshared.py b/automated-pytest-suite/testcases/functional/dc/test_timezone_unshared.py deleted file mode 100644 index bc0b0316..00000000 --- a/automated-pytest-suite/testcases/functional/dc/test_timezone_unshared.py +++ /dev/null @@ -1,173 +0,0 @@ -# -# Copyright (c) 2020 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import time -import random - -from pytest import fixture, mark - -from utils.tis_log import LOG -from consts.auth import Tenant -from consts.stx import TIMEZONES -from consts.proj_vars import ProjVar -from keywords import system_helper, dc_helper, host_helper - - -TIMEZONES = TIMEZONES[:-1] # exclude UTC -TIMESTAMP_PATTERN = r'\d{4}-\d{2}-\d{2}[T| ]\d{2}:\d{2}:\d{2}' -DEFAULT_ZONE = 'UTC' - - -@fixture(scope='module', autouse=True) -def prev_check(request, check_central_alarms_module): - - LOG.fixture_step("(module) Ensure both central and subcloud are configured with {} timezone" - .format(DEFAULT_ZONE)) - subcloud = ProjVar.get_var('PRIMARY_SUBCLOUD') - central_auth = Tenant.get('admin_platform', dc_region='RegionOne') - sub_auth = Tenant.get('admin_platform', dc_region=subcloud) - system_helper.modify_timezone(timezone=DEFAULT_ZONE, auth_info=central_auth) - code = system_helper.modify_timezone(timezone=DEFAULT_ZONE, auth_info=sub_auth)[0] - if code == 0: - # allow sometime for change to apply - time.sleep(30) - - prev_central_time = system_helper.get_host_values(host="controller-0", fields='created_at', - auth_info=central_auth)[0] - prev_sub_time = system_helper.get_host_values(host="controller-0", fields='created_at', - auth_info=sub_auth)[0] - LOG.fixture_step("prev_time: {}.".format(prev_central_time)) - central_zone, sub_zone = __select_two_timezones(current_zone=DEFAULT_ZONE) - - def _revert(): - LOG.fixture_step("Revert timezone to {} and ensure host created timestamp also reverted" - .format(DEFAULT_ZONE)) - system_helper.modify_timezone(timezone=DEFAULT_ZONE, auth_info=central_auth) - system_helper.modify_timezone(timezone=DEFAULT_ZONE, auth_info=sub_auth) - wait_for_timestamp_update(auth_info=central_auth, expt_time=prev_central_time) - wait_for_timestamp_update(auth_info=sub_auth, expt_time=prev_sub_time) - request.addfinalizer(_revert) - - return prev_central_time, prev_sub_time, central_zone, sub_zone, central_auth, sub_auth, \ - subcloud - - -def __select_two_timezones(current_zone=None): - if not current_zone: - current_zone = system_helper.get_timezone() - - zones = list(TIMEZONES) - if current_zone in zones: - zones.remove(current_zone) - - selected_zones = random.sample(zones, 2) - LOG.info("Timezone selected to test: {}".format(selected_zones)) - return selected_zones - - -def wait_for_timestamp_update(auth_info, prev_timestamp=None, expt_time=None): - timeout = time.time() + 60 - while time.time() < timeout: - post_timestamp = system_helper.get_host_values(host='controller-0', fields='created_at', - auth_info=auth_info)[0] - if prev_timestamp and prev_timestamp != post_timestamp: - if prev_timestamp != post_timestamp: - return post_timestamp - elif expt_time: - if post_timestamp == expt_time: - return post_timestamp - - time.sleep(5) - else: - LOG.info("Timestamp for fm event did not change") - return None - - -def test_dc_modify_timezone(prev_check): - """ - Test timezone modify on system controller and subcloud. Ensure timezone change is not - propagated. - Setups: - - Ensure both central and subcloud regions are configured with UTC - - Get the timestamps for host created_at before timezone modify - - Test Steps - - Change the timezone in central region and wait until the change is applied - - Change the timezone to a different zone in subcloud and wait until the change is applied - - Verify host created_at timestamp updated according to the local timezone for the region - - Swact on subcloud and ensure timezone and host created_at timestamp persists locally - - Swact central controller and ensure timezone and host created_at timestamp persists - in central and subcloud - - Teardown - - Change timezone to UTC in both central and subcloud regions - - Ensure host created_at timestamp is reverted to original - - """ - prev_central_time, prev_sub_time, central_zone, sub_zone, central_auth, subcloud_auth, \ - subcloud = prev_check - - LOG.tc_step("Modify timezone to {} in central region".format(central_zone)) - system_helper.modify_timezone(timezone=central_zone, auth_info=central_auth) - - LOG.tc_step("Waiting for timestamp for host created_at to update in central region") - post_central_time = wait_for_timestamp_update(prev_timestamp=prev_central_time, - auth_info=central_auth) - assert post_central_time != prev_central_time, \ - "host created_at timestamp did not update after timezone changed " \ - "to {} in central region".format(central_zone) - - LOG.tc_step("Modify timezone to {} in {}".format(sub_zone, subcloud)) - system_helper.modify_timezone(timezone=sub_zone, auth_info=subcloud_auth) - - LOG.tc_step("Waiting for timestamp for same host created_at to update in {}".format(subcloud)) - post_sub_time = wait_for_timestamp_update(prev_timestamp=prev_sub_time, - auth_info=subcloud_auth) - assert post_sub_time != prev_sub_time, \ - "host created_at timestamp did not update after timezone changed to {} " \ - "in {}".format(sub_zone, subcloud) - assert post_sub_time != post_central_time, \ - "Host created_at timestamp is the same on central and {} when configured with different " \ - "timezones".format(subcloud) - - LOG.tc_step("Ensure host created_at timestamp does not change after subcloud sync audit") - dc_helper.wait_for_sync_audit(subclouds=subcloud, fail_ok=True, timeout=660) - post_sync_sub_time = system_helper.get_host_values(host='controller-0', fields='created_at', - auth_info=subcloud_auth)[0] - assert post_sub_time == post_sync_sub_time, \ - "Host created_at timestamp changed after sync audit on {}".format(subcloud) - - if not system_helper.is_aio_simplex(): - LOG.tc_step("Swact in {} region and verify timezone persists locally".format(subcloud)) - host_helper.swact_host(auth_info=subcloud_auth) - post_swact_sub_zone = system_helper.get_timezone(auth_info=subcloud_auth) - assert post_swact_sub_zone == sub_zone - - post_swact_sub_time = system_helper.get_host_values(host='controller-0', - fields='created_at', - auth_info=subcloud_auth)[0] - assert post_swact_sub_time == post_sub_time - - if system_helper.get_standby_controller_name(auth_info=central_auth): - LOG.tc_step("Swact in central region, and ensure timezone persists locally in central" - " and subcloud") - host_helper.swact_host(auth_info=central_auth) - - # Verify central timezone persists - post_swact_central_zone = system_helper.get_timezone(auth_info=central_auth) - assert post_swact_central_zone == central_zone - post_swact_central_time = system_helper.get_host_values(host='controller-0', - fields='created_at', - auth_info=central_auth)[0] - assert post_swact_central_time == post_central_time - - # Verify subcloud timezone persists - post_central_swact_sub_zone = system_helper.get_timezone(auth_info=subcloud_auth) - assert post_central_swact_sub_zone == sub_zone - post_central_swact_sub_time = system_helper.get_host_values(host='controller-0', - fields='created_at', - auth_info=subcloud_auth)[0] - assert post_central_swact_sub_time == post_sub_time diff --git a/automated-pytest-suite/testcases/functional/fault_mgmt/conftest.py b/automated-pytest-suite/testcases/functional/fault_mgmt/conftest.py deleted file mode 100755 index 157d7f85..00000000 --- a/automated-pytest-suite/testcases/functional/fault_mgmt/conftest.py +++ /dev/null @@ -1,3 +0,0 @@ -from testfixtures.resource_mgmt import * -from testfixtures.resource_create import * -from testfixtures.config_host import * diff --git a/automated-pytest-suite/testcases/functional/fault_mgmt/test_fm_on_host_operation.py b/automated-pytest-suite/testcases/functional/fault_mgmt/test_fm_on_host_operation.py deleted file mode 100755 index cb61fc1f..00000000 --- a/automated-pytest-suite/testcases/functional/fault_mgmt/test_fm_on_host_operation.py +++ /dev/null @@ -1,110 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from pytest import mark, skip - -from utils import table_parser, cli -from utils.tis_log import LOG - -from consts.stx import EventLogID -from keywords import system_helper, host_helper, common - -from testfixtures.recover_hosts import HostsToRecover - - -@mark.sanity -def test_system_alarms_and_events_on_lock_unlock_compute(no_simplex): - """ - Verify fm alarm-show command - - Test Steps: - - Delete active alarms - - Lock a host - - Check active alarm generated for host lock - - Check relative values are the same in fm alarm-list and fm alarm-show - - - Check host lock 'set' event logged via fm event-list - - Unlock host - - Check active alarms cleared via fm alarm-list - - Check host lock 'clear' event logged via fm event-list - """ - - # Remove following step because it's unnecessary and fails the test when - # alarm is re-generated - # # Clear the alarms currently present - # LOG.tc_step("Clear the alarms table") - # system_helper.delete_alarms() - - # Raise a new alarm by locking a compute node - # Get the compute - compute_host = host_helper.get_up_hypervisors()[0] - if compute_host == system_helper.get_active_controller_name(): - compute_host = system_helper.get_standby_controller_name() - if not compute_host: - skip('Standby controller unavailable') - - LOG.tc_step("Lock a nova hypervisor host {}".format(compute_host)) - pre_lock_time = common.get_date_in_format() - HostsToRecover.add(compute_host) - host_helper.lock_host(compute_host) - - LOG.tc_step("Check host lock alarm is generated") - post_lock_alarms = \ - system_helper.wait_for_alarm(field='UUID', entity_id=compute_host, - reason=compute_host, - alarm_id=EventLogID.HOST_LOCK, - strict=False, - fail_ok=False)[1] - - LOG.tc_step( - "Check related fields in fm alarm-list and fm alarm-show are of the " - "same values") - post_lock_alarms_tab = system_helper.get_alarms_table(uuid=True) - - alarms_l = ['Alarm ID', 'Entity ID', 'Severity', 'Reason Text'] - alarms_s = ['alarm_id', 'entity_instance_id', 'severity', 'reason_text'] - - # Only 1 alarm since we are now checking the specific alarm ID - for post_alarm in post_lock_alarms: - LOG.tc_step( - "Verify {} for alarm {} in alarm-list are in sync with " - "alarm-show".format( - alarms_l, post_alarm)) - - alarm_show_tab = table_parser.table(cli.fm('alarm-show', post_alarm)[1]) - alarm_list_tab = table_parser.filter_table(post_lock_alarms_tab, - UUID=post_alarm) - - for i in range(len(alarms_l)): - alarm_l_val = table_parser.get_column(alarm_list_tab, - alarms_l[i])[0] - alarm_s_val = table_parser.get_value_two_col_table(alarm_show_tab, - alarms_s[i]) - - assert alarm_l_val == alarm_s_val, \ - "{} value in alarm-list: {} is different than alarm-show: " \ - "{}".format(alarms_l[i], alarm_l_val, alarm_s_val) - - LOG.tc_step("Check host lock is logged via fm event-list") - system_helper.wait_for_events(entity_instance_id=compute_host, - start=pre_lock_time, timeout=60, - event_log_id=EventLogID.HOST_LOCK, - fail_ok=False, **{'state': 'set'}) - - pre_unlock_time = common.get_date_in_format() - LOG.tc_step("Unlock {}".format(compute_host)) - host_helper.unlock_host(compute_host) - - LOG.tc_step("Check host lock active alarm cleared") - alarm_sets = [(EventLogID.HOST_LOCK, compute_host)] - system_helper.wait_for_alarms_gone(alarm_sets, fail_ok=False) - - LOG.tc_step("Check host lock clear event logged") - system_helper.wait_for_events(event_log_id=EventLogID.HOST_LOCK, - start=pre_unlock_time, - entity_instance_id=compute_host, - fail_ok=False, **{'state': 'clear'}) diff --git a/automated-pytest-suite/testcases/functional/horizon/conftest.py b/automated-pytest-suite/testcases/functional/horizon/conftest.py deleted file mode 100644 index 150887ed..00000000 --- a/automated-pytest-suite/testcases/functional/horizon/conftest.py +++ /dev/null @@ -1 +0,0 @@ -from testfixtures.horizon import * diff --git a/automated-pytest-suite/testcases/functional/horizon/test_fault_management.py b/automated-pytest-suite/testcases/functional/horizon/test_fault_management.py deleted file mode 100755 index 9477361f..00000000 --- a/automated-pytest-suite/testcases/functional/horizon/test_fault_management.py +++ /dev/null @@ -1,60 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import time - -from pytest import fixture, mark - -from utils.horizon.regions import messages -from utils.horizon.pages.admin.fault_management import eventssuppressionpage - -from utils.tis_log import LOG -from consts import horizon - - -@fixture(scope='function') -def events_suppression_pg(admin_home_pg, request): - LOG.fixture_step('Go to Admin > Fault Management > Events Suppression') - events_suppression_pg = eventssuppressionpage.EventsSuppressionPage(admin_home_pg.driver) - events_suppression_pg.go_to_target_page() - - def teardown(): - LOG.fixture_step('Back to Events Suppression page') - events_suppression_pg.go_to_target_page() - - request.addfinalizer(teardown) - return events_suppression_pg - - -def test_suppress_event(events_suppression_pg): - """ - Test Steps: - - Suppress event - - Check for success message - - Unsuppress event - - Check for success message - """ - event_id = '100.101' - LOG.tc_step('Suppress event {}.'.format(event_id)) - events_suppression_pg.suppress_event(event_id) - end_time = time.time() - while time.time() < end_time: - if events_suppression_pg.find_message_and_dismiss(messages.SUCCESS): - break - elif events_suppression_pg.find_message_and_dismiss(messages.ERROR): - assert "Failed to suppress event: {}".format(event_id) - - LOG.tc_step('Unsuppress event {}.'.format(event_id)) - events_suppression_pg.unsuppress_event(event_id) - end_time = time.time() - while time.time() < end_time: - if events_suppression_pg.find_message_and_dismiss(messages.SUCCESS): - break - elif events_suppression_pg.find_message_and_dismiss(messages.ERROR): - assert "Failed to unsuppress event: {}".format(event_id) - - horizon.test_result = True diff --git a/automated-pytest-suite/testcases/functional/horizon/test_hosts.py b/automated-pytest-suite/testcases/functional/horizon/test_hosts.py deleted file mode 100755 index ee859623..00000000 --- a/automated-pytest-suite/testcases/functional/horizon/test_hosts.py +++ /dev/null @@ -1,322 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import re - -from pytest import fixture, mark - -from consts import horizon -from utils import table_parser, cli -from utils.tis_log import LOG -from utils.horizon.pages.admin.platform import hostinventorypage -from keywords import system_helper - - -@fixture(scope='function') -def host_inventory_pg(admin_home_pg, request): - LOG.fixture_step('Go to Admin > Platform > Host Inventory') - host_inventory_pg = hostinventorypage.HostInventoryPage( - admin_home_pg.driver) - host_inventory_pg.go_to_target_page() - - def teardown(): - LOG.fixture_step('Back to Host Inventory page') - host_inventory_pg.go_to_target_page() - - request.addfinalizer(teardown) - return host_inventory_pg - - -def format_uptime(uptime): - """ - Uptime displays in horizon may display like this format: - 2 weeks, 10 hours - 2 hours, 2 minutes - 45 minutes - ... - """ - uptime = int(uptime) - min_ = 60 - hour = min_ * 60 - day = hour * 24 - week = day * 7 - month = week * 4 - - uptime_months = uptime // month - uptime_weeks = uptime % month // week - uptime_days = uptime % month % week // day - uptime_hours = uptime % month % week % day // hour - uptime_mins = uptime % month % week % day % hour // min_ - - if uptime < min_: - return '0 minutes' - elif uptime < hour: - return '{} minute'.format(uptime_mins) - elif uptime < day: - return '{} hour, {} minute'.format(uptime_hours, uptime_mins) - elif uptime < week: - return '{} day, {} hour'.format(uptime_days, uptime_hours) - elif uptime < month: - return '{} week, {} day'.format(uptime_weeks, uptime_days) - elif uptime > week: - return '{} month'.format(uptime_months, uptime_weeks) - - -@mark.platform_sanity -def test_horizon_host_inventory_display(host_inventory_pg): - """ - Test the hosts inventory display: - - Setups: - - Login as Admin - - Go to Admin > Platform > Host Inventory - - Test Steps: - - Test host tables display - - Teardown: - - Back to Host Inventory page - - Logout - - """ - LOG.tc_step('Test host inventory display') - host_inventory_pg.go_to_hosts_tab() - host_list = system_helper.get_hosts() - for host_name in host_list: - LOG.info("Checking {}...".format(host_name)) - headers_map = host_inventory_pg.hosts_table( - host_name).get_cli_horizon_mapping() - fields = list(headers_map.keys()) - cli_values = system_helper.get_host_values(host_name, fields, - rtn_dict=True) - cli_values['uptime'] = format_uptime(cli_values['uptime']) - if cli_values.get('peers'): - cli_values['peers'] = cli_values.get('peers').get('name') - - horizon_vals = host_inventory_pg.horizon_vals(host_name) - for cli_field in fields: - cli_val = cli_values[cli_field] - horizon_field = headers_map[cli_field] - horizon_val = horizon_vals[horizon_field] - if cli_field == 'uptime': - assert re.match(r'\d+ [dhm]', horizon_val) - else: - assert str(cli_val).lower() in horizon_val.lower(), \ - '{} {} display incorrectly, expect: {} actual: {}'. \ - format(host_name, horizon_field, cli_val, horizon_val) - - horizon.test_result = True - - -@mark.parametrize('host_name', [ - 'controller-0' -]) -def test_horizon_host_details_display(host_inventory_pg, host_name): - """ - Test the host details display: - - Setups: - - Login as Admin - - Go to Admin > Platform > Host Inventory > Controller-0 - - Test Steps: - - Test host controller-0 overview display - - Test host controller-0 processor display - - Test host controller-0 memory display - - Test host controller-0 storage display - - Test host controller-0 ports display - - Test host controller-0 lldp display - - Teardown: - - Logout - """ - host_table = host_inventory_pg.hosts_table(host_name) - host_details_pg = host_inventory_pg.go_to_host_detail_page(host_name) - - # OVERVIEW TAB - LOG.tc_step('Test host: {} overview display'.format(host_name)) - host_details_pg.go_to_overview_tab() - horizon_vals = host_details_pg.host_detail_overview( - host_table.driver).get_content() - fields_map = host_details_pg.host_detail_overview( - host_table.driver).OVERVIEW_INFO_HEADERS_MAP - cli_host_vals = system_helper.get_host_values(host_name, fields_map.keys(), - rtn_dict=True) - for field in fields_map: - horizon_header = fields_map[field] - cli_host_val = cli_host_vals[field] - horizon_val = horizon_vals.get(horizon_header) - if horizon_val is None: - horizon_val = 'None' - assert cli_host_val == horizon_val, '{} display incorrectly'.\ - format(horizon_header) - else: - assert cli_host_val.upper() in horizon_val.upper(), \ - '{} display incorrectly'.format(horizon_header) - LOG.info('Host: {} overview display correct'.format(host_name)) - - # PROCESSOR TAB - LOG.tc_step('Test host {} processor display'.format(host_name)) - host_details_pg.go_to_processor_tab() - cpu_table = table_parser.table( - cli.system('host-cpu-list {}'.format(host_name))[1]) - expt_cpu_info = { - 'Processor Model:': - table_parser.get_values(cpu_table, 'processor_model')[0], - 'Processors:': str( - len(set(table_parser.get_values(cpu_table, 'processor'))))} - - horizon_cpu_info = host_details_pg.inventory_details_processor_info\ - .get_content() - assert horizon_cpu_info['Processor Model:'] == expt_cpu_info[ - 'Processor Model:'] - assert horizon_cpu_info['Processors:'] == expt_cpu_info['Processors:'] - - # MEMORY TABLE - LOG.tc_step('Test host {} memory display'.format(host_name)) - checking_list = ['mem_total(MiB)', 'mem_avail(MiB)'] - - host_details_pg.go_to_memory_tab() - memory_table = table_parser.table( - cli.system('host-memory-list {}'.format(host_name))[1]) - column_names = host_details_pg.memory_table.column_names - processor_list = table_parser.get_values(memory_table, column_names[0]) - cli_memory_table_dict = table_parser.row_dict_table(memory_table, - column_names[0], - lower_case=False) - - for processor in processor_list: - horizon_vm_pages_val = \ - host_details_pg.get_memory_table_info(processor, column_names[2]) - horizon_memory_val = \ - host_details_pg.get_memory_table_info(processor, 'Memory') - if cli_memory_table_dict[processor]['hugepages(hp)_configured'] == \ - 'False': - assert horizon_vm_pages_val is None, \ - 'Horizon {} display incorrectly'.format(column_names[2]) - else: - for field in checking_list: - assert cli_memory_table_dict[processor][field] in \ - horizon_memory_val, 'Memory {} display incorrectly' - - # STORAGE TABLE - # This test will loop each table and test their display - # Test may fail in following case: - # 1. disk table's Size header eg. Size(GiB) used different unit such as - # Size (MiB), Size (TiB) - # 2. lvg table may display different: - # Case 1: Name | State | Access | Size (GiB) | Avail Size(GiB) | - # Current Physical Volume - Current Logical Volumes - # Case 2: Name | State | Access | Size | - # Current Physical Volume - Current Logical Volumes - # Case 2 Size values in horizon are rounded by 2 digits but in CLI not - # rounded - - LOG.tc_step('Test host {} storage display'.format(host_name)) - host_details_pg.go_to_storage_tab() - - cmd_list = ['host-disk-list {}'.format(host_name), - 'host-disk-partition-list {}'.format(host_name), - 'host-lvg-list {}'.format(host_name), - 'host-pv-list {}'.format(host_name)] - table_names = ['disk table', 'disk partition table', - 'local volume groups table', 'physical volumes table'] - - horizon_storage_tables = [host_details_pg.storage_disks_table, - host_details_pg.storage_partitions_table, - host_details_pg.storage_lvg_table, - host_details_pg.storage_pv_table] - cli_storage_tables = [] - for cmd in cmd_list: - cli_storage_tables.append(table_parser.table(cli.system(cmd))[1]) - - for i in range(len(horizon_storage_tables)): - horizon_table = horizon_storage_tables[i] - unique_key = horizon_table.column_names[0] - horizon_row_dict_table = host_details_pg.get_horizon_row_dict( - horizon_table, key_header_index=0) - cli_table = cli_storage_tables[i] - table_dict_unique_key = list(horizon_table.HEADERS_MAP.keys())[ - list(horizon_table.HEADERS_MAP.values()).index(unique_key)] - - cli_row_dict_storage_table = \ - table_parser.row_dict_table(cli_table, - table_dict_unique_key, - lower_case=False) - for key_header in horizon_row_dict_table: - for cli_header in horizon_table.HEADERS_MAP: - horizon_header = horizon_table.HEADERS_MAP[cli_header] - horizon_row_dict = horizon_row_dict_table[key_header] - cli_row_dict = cli_row_dict_storage_table[key_header] - # Solve parser issue: e.g. Size (GiB)' should be '558.029' - # not ['5589.', '029'] - cli_val = cli_row_dict[cli_header] - if isinstance(cli_val, list): - cli_row_dict[cli_header] = ''.join(cli_val) - assert horizon_row_dict[horizon_header] == cli_row_dict[ - cli_header], \ - 'In {}: disk: {} {} display incorrectly'.format( - table_names[i], key_header, horizon_header) - LOG.info('{} display correct'.format(table_names[i])) - - # PORT TABLE - LOG.tc_step('Test host {} port display'.format(host_name)) - host_details_pg.go_to_ports_tab() - horizon_port_table = host_details_pg.ports_table() - cli_port_table = table_parser.table( - cli.system('host-ethernet-port-list {}'.format(host_name))[1]) - horizon_row_dict_port_table = host_details_pg.get_horizon_row_dict( - horizon_port_table, key_header_index=0) - - cli_row_dict_port_table = table_parser.row_dict_table(cli_port_table, - 'name', - lower_case=False) - for ethernet_name in cli_row_dict_port_table: - for cli_header in horizon_port_table.HEADERS_MAP: - horizon_header = horizon_port_table.HEADERS_MAP[cli_header] - horizon_row_dict = horizon_row_dict_port_table[ethernet_name] - cli_row_dict = cli_row_dict_port_table[ethernet_name] - if cli_header not in cli_row_dict and cli_header == 'mac address': - cli_val = cli_row_dict['macaddress'] - else: - cli_val = cli_row_dict[cli_header] - horizon_val = horizon_row_dict[horizon_header] - # Solve table parser issue: MAC Address returns list eg: [ - # 'a4:bf:01:35:4a:', '32'] - if isinstance(cli_val, list): - cli_val = ''.join(cli_val) - assert cli_val in horizon_val, '{} display incorrectly'.format( - horizon_header) - - # LLDP TABLE - LOG.tc_step('Test host {} lldp display'.format(host_name)) - host_details_pg.go_to_lldp_tab() - lldp_list_table = table_parser.table( - cli.system('host-lldp-neighbor-list {}'.format(host_name))[1]) - lldp_uuid_list = table_parser.get_values(lldp_list_table, 'uuid') - horizon_lldp_table = host_details_pg.lldp_table() - cli_row_dict_lldp_table = {} - horizon_row_dict_lldp_table = host_details_pg.get_horizon_row_dict( - horizon_lldp_table, key_header_index=1) - for uuid in lldp_uuid_list: - cli_row_dict = {} - lldp_show_table = table_parser.table( - cli.system('lldp-neighbor-show {}'.format(uuid))[1]) - row_dict_key = table_parser.get_value_two_col_table(lldp_show_table, - 'port_identifier') - for cli_header in horizon_lldp_table.HEADERS_MAP: - horizon_header = horizon_lldp_table.HEADERS_MAP[cli_header] - horizon_row_dict = horizon_row_dict_lldp_table[row_dict_key] - cli_row_dict[cli_header] = table_parser.get_value_two_col_table( - lldp_show_table, cli_header) - cli_row_dict_lldp_table[row_dict_key] = cli_row_dict - assert cli_row_dict[cli_header] == \ - horizon_row_dict[horizon_header], \ - 'lldp neighbor:{} {} display incorrectly'.\ - format(row_dict_key, horizon_header) - - horizon.test_result = True diff --git a/automated-pytest-suite/testcases/functional/horizon/test_instances.py b/automated-pytest-suite/testcases/functional/horizon/test_instances.py deleted file mode 100755 index 9b911fac..00000000 --- a/automated-pytest-suite/testcases/functional/horizon/test_instances.py +++ /dev/null @@ -1,86 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from pytest import fixture, mark - -from consts import horizon -from consts.auth import Tenant -from consts.stx import GuestImages -from keywords import nova_helper -from utils.tis_log import LOG -from utils.horizon import helper -from utils.horizon.regions import messages -from utils.horizon.pages.project.compute import instancespage - - -@fixture(scope='function') -def instances_pg(tenant_home_pg_container, request): - LOG.fixture_step('Go to Project > Compute > Instance') - instance_name = helper.gen_resource_name('instance') - instances_pg = instancespage.InstancesPage( - tenant_home_pg_container.driver, port=tenant_home_pg_container.port) - instances_pg.go_to_target_page() - - def teardown(): - LOG.fixture_step('Back to instance page') - if instances_pg.is_instance_present(instance_name): - instances_pg.delete_instance_by_row(instance_name) - instances_pg.go_to_target_page() - - request.addfinalizer(teardown) - - return instances_pg, instance_name - - -@mark.sanity -@mark.cpe_sanity -@mark.sx_sanity -def test_horizon_create_delete_instance(instances_pg): - """ - Test the instance creation and deletion functionality: - - Setups: - - Login as Tenant - - Go to Project > Compute > Instance - - Teardown: - - Back to Instances page - - Logout - - Test Steps: - - Create a new instance - - Verify the instance appears in the instances table as active - - Delete the newly lunched instance - - Verify the instance does not appear in the table after deletion - """ - instances_pg, instance_name = instances_pg - - mgmt_net_name = '-'.join([Tenant.get_primary()['tenant'], 'mgmt', 'net']) - flavor_name = nova_helper.get_basic_flavor(rtn_id=False) - guest_img = GuestImages.DEFAULT['guest'] - - LOG.tc_step('Create new instance {}'.format(instance_name)) - instances_pg.create_instance(instance_name, - boot_source_type='Image', - source_name=guest_img, - flavor_name=flavor_name, - network_names=mgmt_net_name, - create_new_volume=False) - assert not instances_pg.find_message_and_dismiss(messages.ERROR) - - LOG.tc_step('Verify the instance appears in the instances table as active') - assert instances_pg.is_instance_active(instance_name) - - LOG.tc_step('Delete instance {}'.format(instance_name)) - instances_pg.delete_instance_by_row(instance_name) - assert instances_pg.find_message_and_dismiss(messages.INFO) - assert not instances_pg.find_message_and_dismiss(messages.ERROR) - - LOG.tc_step( - 'Verify the instance does not appear in the table after deletion') - assert instances_pg.is_instance_deleted(instance_name) - horizon.test_result = True diff --git a/automated-pytest-suite/testcases/functional/mtc/conftest.py b/automated-pytest-suite/testcases/functional/mtc/conftest.py deleted file mode 100755 index 157d7f85..00000000 --- a/automated-pytest-suite/testcases/functional/mtc/conftest.py +++ /dev/null @@ -1,3 +0,0 @@ -from testfixtures.resource_mgmt import * -from testfixtures.resource_create import * -from testfixtures.config_host import * diff --git a/automated-pytest-suite/testcases/functional/mtc/test_evacuate.py b/automated-pytest-suite/testcases/functional/mtc/test_evacuate.py deleted file mode 100755 index f150c297..00000000 --- a/automated-pytest-suite/testcases/functional/mtc/test_evacuate.py +++ /dev/null @@ -1,146 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from pytest import fixture, skip, mark - -from utils.tis_log import LOG -from consts.reasons import SkipHypervisor - -from keywords import vm_helper, host_helper, nova_helper, system_helper, \ - network_helper -from testfixtures.fixture_resources import ResourceCleanup - - -@fixture(scope='module', autouse=True) -def skip_test_if_less_than_two_hosts(no_simplex): - hypervisors = host_helper.get_up_hypervisors() - if len(hypervisors) < 2: - skip(SkipHypervisor.LESS_THAN_TWO_HYPERVISORS) - - LOG.fixture_step( - "Update instance and volume quota to at least 10 and 20 respectively") - vm_helper.ensure_vms_quotas(vms_num=10) - - return len(hypervisors) - - -class TestDefaultGuest: - - @fixture(scope='class') - def vms_(self, add_admin_role_class): - LOG.fixture_step("Create a flavor without ephemeral or swap disks") - flavor_1 = nova_helper.create_flavor('flv_nolocaldisk')[1] - ResourceCleanup.add('flavor', flavor_1, scope='class') - - LOG.fixture_step("Create a flavor with ephemeral and swap disks") - flavor_2 = \ - nova_helper.create_flavor('flv_localdisk', ephemeral=1, swap=512)[1] - ResourceCleanup.add('flavor', flavor_2, scope='class') - - LOG.fixture_step( - "Boot vm1 from volume with flavor flv_nolocaldisk and wait for it " - "pingable from NatBox") - vm1_name = "vol_nolocal" - vm1 = vm_helper.boot_vm(vm1_name, flavor=flavor_1, source='volume', - cleanup='class')[1] - vm_helper.wait_for_vm_pingable_from_natbox(vm1) - - vm_host = vm_helper.get_vm_host(vm_id=vm1) - - LOG.fixture_step( - "Boot vm2 from volume with flavor flv_localdisk and wait for it " - "pingable from NatBox") - vm2_name = "vol_local" - vm2 = vm_helper.boot_vm(vm2_name, flavor=flavor_2, source='volume', - cleanup='class', avail_zone='nova', - vm_host=vm_host)[1] - vm_helper.wait_for_vm_pingable_from_natbox(vm2) - - LOG.fixture_step( - "Boot vm3 from image with flavor flv_nolocaldisk and wait for it " - "pingable from NatBox") - vm3_name = "image_novol" - vm3 = vm_helper.boot_vm(vm3_name, flavor=flavor_1, source='image', - cleanup='class', avail_zone='nova', - vm_host=vm_host)[1] - vm_helper.wait_for_vm_pingable_from_natbox(vm3) - - LOG.fixture_step( - "Boot vm4 from image with flavor flv_nolocaldisk and wait for it " - "pingable from NatBox") - vm4_name = 'image_vol' - vm4 = vm_helper.boot_vm(vm4_name, flavor_1, source='image', - cleanup='class', avail_zone='nova', - vm_host=vm_host)[1] - vm_helper.wait_for_vm_pingable_from_natbox(vm4) - - LOG.fixture_step( - "Attach volume to vm4 which was booted from image: {}.".format(vm4)) - vm_helper.attach_vol_to_vm(vm4) - - return [vm1, vm2, vm3, vm4], vm_host - - @mark.trylast - @mark.sanity - @mark.cpe_sanity - def test_evacuate_vms(self, vms_): - """ - Test evacuated vms - Args: - vms_: (fixture to create vms) - - Pre-requisites: - - At least two up hypervisors on system - - Test Steps: - - Create vms with various options: - - vm booted from cinder volume, - - vm booted from glance image, - - vm booted from glance image, and have an extra cinder - volume attached after launch, - - vm booed from cinder volume with ephemeral and swap disks - - Move vms onto same hypervisor - - sudo reboot -f on the host - - Ensure vms are successfully evacuated to other host - - Live migrate vms back to original host - - Check vms can move back, and vms are still reachable from natbox - - Check system services are enabled and neutron agents are alive - - """ - vms, target_host = vms_ - - pre_res_sys, pre_msg_sys = system_helper.wait_for_services_enable( - timeout=20, fail_ok=True) - up_hypervisors = host_helper.get_up_hypervisors() - pre_res_neutron, pre_msg_neutron = \ - network_helper.wait_for_agents_healthy( - up_hypervisors, timeout=20, fail_ok=True) - - LOG.tc_step( - "reboot -f on vms host, ensure vms are successfully evacuated and " - "host is recovered after reboot") - vm_helper.evacuate_vms(host=target_host, vms_to_check=vms, - wait_for_host_up=True, ping_vms=True) - - LOG.tc_step("Check rebooted host can still host vm") - vm_helper.live_migrate_vm(vms[0], destination_host=target_host) - vm_helper.wait_for_vm_pingable_from_natbox(vms[0]) - - LOG.tc_step("Check system services and neutron agents after {} " - "reboot".format(target_host)) - post_res_sys, post_msg_sys = system_helper.wait_for_services_enable( - fail_ok=True) - post_res_neutron, post_msg_neutron = \ - network_helper.wait_for_agents_healthy(hosts=up_hypervisors, - fail_ok=True) - - assert post_res_sys, "\nPost-evac system services stats: {}" \ - "\nPre-evac system services stats: {}". \ - format(post_msg_sys, pre_msg_sys) - assert post_res_neutron, "\nPost evac neutron agents stats: {}" \ - "\nPre-evac neutron agents stats: {}". \ - format(pre_msg_neutron, post_msg_neutron) diff --git a/automated-pytest-suite/testcases/functional/mtc/test_host_operations_negative.py b/automated-pytest-suite/testcases/functional/mtc/test_host_operations_negative.py deleted file mode 100755 index 1ecde76d..00000000 --- a/automated-pytest-suite/testcases/functional/mtc/test_host_operations_negative.py +++ /dev/null @@ -1,31 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from pytest import mark - -from utils import cli -from utils.tis_log import LOG - - -@mark.sx_sanity -def test_add_host_simplex_negative(simplex_only): - """ - Test add second controller is rejected on simplex system - Args: - simplex_only: skip if non-sx system detected - - Test Steps: - - On simplex system, check 'system host-add -n controller-1' is rejected - - """ - LOG.tc_step("Check adding second controller is rejected on simplex system") - code, out = cli.system('host-add', '-n controller-1', fail_ok=True) - - assert 1 == code, "Unexpected exitcode for 'system host-add " \ - "controller-1': {}".format(code) - assert 'Adding a host on a simplex system is not allowed' in out, \ - "Unexpected error message: {}".format(out) diff --git a/automated-pytest-suite/testcases/functional/mtc/test_lock_unlock_host.py b/automated-pytest-suite/testcases/functional/mtc/test_lock_unlock_host.py deleted file mode 100755 index 98fc7f92..00000000 --- a/automated-pytest-suite/testcases/functional/mtc/test_lock_unlock_host.py +++ /dev/null @@ -1,93 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import time -from pytest import mark, skip, param - -from utils.tis_log import LOG -from consts.stx import HostOperState, HostAvailState -from testfixtures.recover_hosts import HostsToRecover -from keywords import host_helper, system_helper - - -@mark.platform_sanity -def test_lock_active_controller_reject(no_simplex): - """ - Verify lock unlock active controller. Expected it to fail - - Test Steps: - - Get active controller - - Attempt to lock active controller and ensure it's rejected - - """ - LOG.tc_step('Retrieve the active controller from the lab') - active_controller = system_helper.get_active_controller_name() - assert active_controller, "No active controller available" - - # lock standby controller node and verify it is successfully locked - LOG.tc_step("Lock active controller and ensure it fail to lock") - exit_code, cmd_output = host_helper.lock_host(active_controller, - fail_ok=True, swact=False, - check_first=False) - assert exit_code == 1, 'Expect locking active controller to ' \ - 'be rejected. Actual: {}'.format(cmd_output) - status = system_helper.get_host_values(active_controller, - 'administrative')[0] - assert status == 'unlocked', "Fail: The active controller was locked." - - -@mark.parametrize('host_type', [ - param('controller', marks=mark.priorities('platform_sanity', - 'sanity', 'cpe_sanity')), - param('compute', marks=mark.priorities('platform_sanity')), - param('storage', marks=mark.priorities('platform_sanity')), -]) -def test_lock_unlock_host(host_type): - """ - Verify lock unlock host - - Test Steps: - - Select a host per given type. If type is controller, select - standby controller. - - Lock selected host and ensure it is successfully locked - - Unlock selected host and ensure it is successfully unlocked - - """ - LOG.tc_step("Select a {} node from system if any".format(host_type)) - if host_type == 'controller': - if system_helper.is_aio_simplex(): - host = 'controller-0' - else: - host = system_helper.get_standby_controller_name() - assert host, "No standby controller available" - - else: - if host_type == 'compute' and system_helper.is_aio_system(): - skip("No compute host on AIO system") - elif host_type == 'storage' and not system_helper.is_storage_system(): - skip("System does not have storage nodes") - - hosts = system_helper.get_hosts(personality=host_type, - availability=HostAvailState.AVAILABLE, - operational=HostOperState.ENABLED) - - assert hosts, "No good {} host on system".format(host_type) - host = hosts[0] - - LOG.tc_step("Lock {} host - {} and ensure it is successfully " - "locked".format(host_type, host)) - HostsToRecover.add(host) - host_helper.lock_host(host, swact=False) - - # wait for services to stabilize before unlocking - time.sleep(20) - - # unlock standby controller node and verify controller node is - # successfully unlocked - LOG.tc_step("Unlock {} host - {} and ensure it is successfully " - "unlocked".format(host_type, host)) - host_helper.unlock_host(host) diff --git a/automated-pytest-suite/testcases/functional/mtc/test_services_persists_over_reboot.py b/automated-pytest-suite/testcases/functional/mtc/test_services_persists_over_reboot.py deleted file mode 100755 index 822c2a6d..00000000 --- a/automated-pytest-suite/testcases/functional/mtc/test_services_persists_over_reboot.py +++ /dev/null @@ -1,85 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import time -from pytest import mark, skip, param - -from utils.tis_log import LOG - -from consts.stx import VMStatus -from consts.timeout import VMTimeout -from keywords import host_helper, system_helper, vm_helper, network_helper -from testfixtures.recover_hosts import HostsToRecover - - -@mark.usefixtures('check_alarms') -@mark.parametrize('host_type', [ - param('controller', marks=mark.sanity), - 'compute', - # 'storage' -]) -def test_system_persist_over_host_reboot(host_type): - """ - Validate Inventory summary over reboot of one of the controller see if - data persists over reboot - - Test Steps: - - capture Inventory summary for list of hosts on system service-list - and neutron agent-list - - reboot the current Controller-Active - - Wait for reboot to complete - - Validate key items from inventory persist over reboot - - """ - if host_type == 'controller': - host = system_helper.get_active_controller_name() - elif host_type == 'compute': - if system_helper.is_aio_system(): - skip("No compute host for AIO system") - - host = None - else: - hosts = system_helper.get_hosts(personality='storage') - if not hosts: - skip(msg="Lab has no storage nodes. Skip rebooting storage node.") - - host = hosts[0] - - LOG.tc_step("Pre-check for system status") - system_helper.wait_for_services_enable() - up_hypervisors = host_helper.get_up_hypervisors() - network_helper.wait_for_agents_healthy(hosts=up_hypervisors) - - LOG.tc_step("Launch a vm") - vm_id = vm_helper.boot_vm(cleanup='function')[1] - vm_helper.wait_for_vm_pingable_from_natbox(vm_id) - - if host is None: - host = vm_helper.get_vm_host(vm_id) - - LOG.tc_step("Reboot a {} node and wait for reboot completes: " - "{}".format(host_type, host)) - HostsToRecover.add(host) - host_helper.reboot_hosts(host) - host_helper.wait_for_hosts_ready(host) - - LOG.tc_step("Check vm is still active and pingable after {} " - "reboot".format(host)) - vm_helper.wait_for_vm_status(vm_id, status=VMStatus.ACTIVE, fail_ok=False) - vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id, - timeout=VMTimeout.DHCP_RETRY) - - LOG.tc_step("Check neutron agents and system services are in good state " - "after {} reboot".format(host)) - network_helper.wait_for_agents_healthy(up_hypervisors) - system_helper.wait_for_services_enable() - - if host in up_hypervisors: - LOG.tc_step("Check {} can still host vm after reboot".format(host)) - if not vm_helper.get_vm_host(vm_id) == host: - time.sleep(30) - vm_helper.live_migrate_vm(vm_id, destination_host=host) diff --git a/automated-pytest-suite/testcases/functional/mtc/test_swact.py b/automated-pytest-suite/testcases/functional/mtc/test_swact.py deleted file mode 100755 index 385954db..00000000 --- a/automated-pytest-suite/testcases/functional/mtc/test_swact.py +++ /dev/null @@ -1,123 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from pytest import mark, skip - -from utils.tis_log import LOG -from consts.reasons import SkipSysType -from keywords import host_helper, system_helper, vm_helper, network_helper, \ - kube_helper - - -@mark.sanity -@mark.cpe_sanity -def test_swact_controllers(wait_for_con_drbd_sync_complete): - """ - Verify swact active controller - - Test Steps: - - Boot a vm on system and check ping works - - Swact active controller - - Verify standby controller and active controller are swapped - - Verify vm is still pingable - - """ - if system_helper.is_aio_simplex(): - skip("Simplex system detected") - - if not wait_for_con_drbd_sync_complete: - skip(SkipSysType.LESS_THAN_TWO_CONTROLLERS) - - LOG.tc_step('retrieve active and available controllers') - pre_active_controller, pre_standby_controller = \ - system_helper.get_active_standby_controllers() - assert pre_standby_controller, "No standby controller available" - - pre_res_sys, pre_msg_sys = system_helper.wait_for_services_enable( - timeout=20, fail_ok=True) - up_hypervisors = host_helper.get_up_hypervisors() - pre_res_neutron, pre_msg_neutron = network_helper.wait_for_agents_healthy( - up_hypervisors, timeout=20, fail_ok=True) - - LOG.tc_step("Boot a vm from image and ping it") - vm_id_img = vm_helper.boot_vm(name='swact_img', source='image', - cleanup='function')[1] - vm_helper.wait_for_vm_pingable_from_natbox(vm_id_img) - - LOG.tc_step("Boot a vm from volume and ping it") - vm_id_vol = vm_helper.boot_vm(name='swact', cleanup='function')[1] - vm_helper.wait_for_vm_pingable_from_natbox(vm_id_vol) - - LOG.tc_step("Swact active controller and ensure active controller is " - "changed") - host_helper.swact_host(hostname=pre_active_controller) - - LOG.tc_step("Verify standby controller and active controller are swapped") - post_active_controller = system_helper.get_active_controller_name() - post_standby_controller = system_helper.get_standby_controller_name() - - assert pre_standby_controller == post_active_controller, \ - "Prev standby: {}; Post active: {}".format( - pre_standby_controller, post_active_controller) - assert pre_active_controller == post_standby_controller, \ - "Prev active: {}; Post standby: {}".format( - pre_active_controller, post_standby_controller) - - LOG.tc_step("Check boot-from-image vm still pingable after swact") - vm_helper.wait_for_vm_pingable_from_natbox(vm_id_img, timeout=30) - LOG.tc_step("Check boot-from-volume vm still pingable after swact") - vm_helper.wait_for_vm_pingable_from_natbox(vm_id_vol, timeout=30) - - LOG.tc_step("Check system services and neutron agents after swact " - "from {}".format(pre_active_controller)) - post_res_sys, post_msg_sys = \ - system_helper.wait_for_services_enable(fail_ok=True) - post_res_neutron, post_msg_neutron = \ - network_helper.wait_for_agents_healthy(hosts=up_hypervisors, - fail_ok=True) - - assert post_res_sys, "\nPost-evac system services stats: {}" \ - "\nPre-evac system services stats: {}". \ - format(post_msg_sys, pre_msg_sys) - assert post_res_neutron, "\nPost evac neutron agents stats: {}" \ - "\nPre-evac neutron agents stats: {}". \ - format(pre_msg_neutron, post_msg_neutron) - - LOG.tc_step("Check hosts are Ready in kubectl get nodes after swact") - kube_helper.wait_for_nodes_ready(hosts=(pre_active_controller, - pre_standby_controller), timeout=30) - - -@mark.platform_sanity -def test_swact_controller_platform(wait_for_con_drbd_sync_complete): - """ - Verify swact active controller - - Test Steps: - - Swact active controller - - Verify standby controller and active controller are swapped - - Verify nodes are ready in kubectl get nodes - - """ - if system_helper.is_aio_simplex(): - skip("Simplex system detected") - - if not wait_for_con_drbd_sync_complete: - skip(SkipSysType.LESS_THAN_TWO_CONTROLLERS) - - LOG.tc_step('retrieve active and available controllers') - pre_active_controller, pre_standby_controller = \ - system_helper.get_active_standby_controllers() - assert pre_standby_controller, "No standby controller available" - - LOG.tc_step("Swact active controller and ensure active controller " - "is changed") - host_helper.swact_host(hostname=pre_active_controller) - - LOG.tc_step("Check hosts are Ready in kubectl get nodes after swact") - kube_helper.wait_for_nodes_ready(hosts=(pre_active_controller, - pre_standby_controller), timeout=30) diff --git a/automated-pytest-suite/testcases/functional/mtc/test_ungraceful_reboot.py b/automated-pytest-suite/testcases/functional/mtc/test_ungraceful_reboot.py deleted file mode 100644 index 91a89e9d..00000000 --- a/automated-pytest-suite/testcases/functional/mtc/test_ungraceful_reboot.py +++ /dev/null @@ -1,45 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from pytest import mark, skip, param - -from utils.tis_log import LOG -from consts.stx import HostAvailState -from testfixtures.recover_hosts import HostsToRecover -from keywords import host_helper, system_helper - - -@mark.parametrize('host_type', [ - param('controller', marks=mark.platform), - param('compute', marks=mark.platform), - param('storage', marks=mark.platform), -]) -def test_force_reboot_host(host_type): - """ - Verify lock unlock host - - Test Steps: - - Select a host per given type. If type is controller, select standby - controller. - - Lock selected host and ensure it is successfully locked - - Unlock selected host and ensure it is successfully unlocked - - """ - - LOG.tc_step("Select a {} node from system if any".format(host_type)) - hosts = system_helper.get_hosts(availability=(HostAvailState.AVAILABLE, - HostAvailState.DEGRADED), - personality=host_type) - if not hosts: - skip("No available or degraded {} host found on system".format( - host_type)) - - host = hosts[0] - LOG.tc_step("Force reboot {} host: {}".format(host_type, host)) - HostsToRecover.add(host) - host_helper.reboot_hosts(hostnames=host) - host_helper.wait_for_hosts_ready(host) diff --git a/automated-pytest-suite/testcases/functional/networking/conftest.py b/automated-pytest-suite/testcases/functional/networking/conftest.py deleted file mode 100755 index 157d7f85..00000000 --- a/automated-pytest-suite/testcases/functional/networking/conftest.py +++ /dev/null @@ -1,3 +0,0 @@ -from testfixtures.resource_mgmt import * -from testfixtures.resource_create import * -from testfixtures.config_host import * diff --git a/automated-pytest-suite/testcases/functional/networking/test_dvr.py b/automated-pytest-suite/testcases/functional/networking/test_dvr.py deleted file mode 100755 index c6699072..00000000 --- a/automated-pytest-suite/testcases/functional/networking/test_dvr.py +++ /dev/null @@ -1,203 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import time - -from pytest import mark, fixture, skip, param - -from utils.tis_log import LOG - -from consts.auth import Tenant -from consts.stx import RouterStatus -from keywords import network_helper, vm_helper, system_helper, host_helper, \ - cinder_helper -from testfixtures.fixture_resources import ResourceCleanup - -result_ = None - - -@fixture(scope='module') -def router_info(request, stx_openstack_required): - global result_ - result_ = False - - LOG.fixture_step( - "Disable SNAT and update router to DVR if not already done.") - - router_id = network_helper.get_tenant_router() - network_helper.set_router_gateway(router_id, enable_snat=False) - is_dvr = network_helper.get_router_values(router_id, fields='distributed', - auth_info=Tenant.get('admin'))[0] - - def teardown(): - post_dvr = \ - network_helper.get_router_values(router_id, fields='distributed', - auth_info=Tenant.get('admin'))[0] - if post_dvr != is_dvr: - network_helper.set_router_mode(router_id, distributed=is_dvr) - - request.addfinalizer(teardown) - - if not is_dvr: - network_helper.set_router_mode(router_id, distributed=True, - enable_on_failure=False) - - result_ = True - return router_id - - -@fixture() -def _bring_up_router(request): - def _router_up(): - if result_ is False: - router_id = network_helper.get_tenant_router() - network_helper.set_router(router=router_id, fail_ok=False, - enable=True) - - request.addfinalizer(_router_up) - - -@mark.domain_sanity -def test_dvr_update_router(router_info, _bring_up_router): - """ - Test update router to distributed and non-distributed - - Args: - router_info (str): router_id (str) - - Setups: - - Get the router id and original distributed setting - - Test Steps: - - Boot a vm before updating router and ping vm from NatBox - - Change the distributed value of the router and verify it's updated - successfully - - Verify router is in ACTIVE state - - Verify vm can still be ping'd from NatBox - - Repeat the three steps above with the distributed value reverted to - original value - - Teardown: - - Delete vm - - Revert router to it's original distributed setting if not already - done so - - """ - global result_ - result_ = False - router_id = router_info - - LOG.tc_step("Boot a vm before updating router and ping vm from NatBox") - vm_id = vm_helper.boot_vm(name='dvr_update', reuse_vol=False, - cleanup='function')[1] - vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False) - - for update_to_val in [False, True]: - LOG.tc_step("Update router distributed to {}".format(update_to_val)) - network_helper.set_router_mode(router_id, distributed=update_to_val, - enable_on_failure=False) - - # Wait for 30 seconds to allow the router update completes - time.sleep(30) - LOG.tc_step( - "Verify router is in active state and vm can be ping'd from NatBox") - assert RouterStatus.ACTIVE == \ - network_helper.get_router_values(router_id, - fields='status')[0], \ - "Router is not in active state after updating distributed to " \ - "{}.".format(update_to_val) - vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False) - - result_ = True - - -@mark.parametrize(('vms_num', 'srv_grp_policy'), [ - param(2, 'affinity', marks=mark.p2), - param(2, 'anti-affinity', marks=mark.nightly), - param(3, 'affinity', marks=mark.p2), - param(3, 'anti-affinity', marks=mark.p2), -]) -def test_dvr_vms_network_connection(vms_num, srv_grp_policy, server_groups, - router_info): - """ - Test vms East West connection by pinging vms' data network from vm - - Args: - vms_num (int): number of vms to boot - srv_grp_policy (str): affinity to boot vms on same host, - anti-affinity to boot vms on different hosts - server_groups: test fixture to return affinity and anti-affinity - server groups - router_info (str): id of tenant router - - Skip Conditions: - - Only one nova host on the system - - Setups: - - Enable DVR (module) - - Test Steps - - Update router to distributed if not already done - - Boot given number of vms with specific server group policy to - schedule vms on same or different host(s) - - Ping vms' over data and management networks from one vm to test NS - and EW traffic - - Teardown: - - Delete vms - - Revert router to - - """ - # Increase instance quota count if needed - current_vms = len(vm_helper.get_vms(strict=False)) - quota_needed = current_vms + vms_num - vm_helper.ensure_vms_quotas(quota_needed) - - if srv_grp_policy == 'anti-affinity' and len( - host_helper.get_up_hypervisors()) == 1: - skip("Only one nova host on the system.") - - LOG.tc_step("Update router to distributed if not already done") - router_id = router_info - is_dvr = network_helper.get_router_values(router_id, fields='distributed', - auth_info=Tenant.get('admin'))[0] - if not is_dvr: - network_helper.set_router_mode(router_id, distributed=True) - - LOG.tc_step("Boot {} vms with server group policy {}".format( - vms_num, srv_grp_policy)) - affinity_grp, anti_affinity_grp = server_groups(soft=True) - srv_grp_id = affinity_grp if srv_grp_policy == 'affinity' else \ - anti_affinity_grp - - vms = [] - tenant_net_id = network_helper.get_tenant_net_id() - mgmt_net_id = network_helper.get_mgmt_net_id() - internal_net_id = network_helper.get_internal_net_id() - - internal_vif = {'net-id': internal_net_id} - if system_helper.is_avs(): - internal_vif['vif-model'] = 'avp' - - nics = [{'net-id': mgmt_net_id}, {'net-id': tenant_net_id}, internal_vif] - for i in range(vms_num): - vol = cinder_helper.create_volume()[1] - ResourceCleanup.add(resource_type='volume', resource_id=vol) - vm_id = \ - vm_helper.boot_vm('dvr_ew_traffic', source='volume', source_id=vol, - nics=nics, cleanup='function', - hint={'group': srv_grp_id})[1] - vms.append(vm_id) - LOG.tc_step("Wait for vm {} pingable from NatBox".format(vm_id)) - vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False) - - from_vm = vms[0] - LOG.tc_step( - "Ping vms over management and data networks from vm {}, and " - "verify ping successful.".format(from_vm)) - vm_helper.ping_vms_from_vm(to_vms=vms, from_vm=from_vm, fail_ok=False, - net_types=['data', 'mgmt', 'internal']) diff --git a/automated-pytest-suite/testcases/functional/networking/test_multiple_ports.py b/automated-pytest-suite/testcases/functional/networking/test_multiple_ports.py deleted file mode 100755 index f699928d..00000000 --- a/automated-pytest-suite/testcases/functional/networking/test_multiple_ports.py +++ /dev/null @@ -1,538 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import copy - -from pytest import fixture, mark, skip, param - -from utils.tis_log import LOG - -from consts.stx import FlavorSpec, VMStatus -from consts.reasons import SkipHostIf -from keywords import vm_helper, nova_helper, network_helper, glance_helper, \ - system_helper -from testfixtures.fixture_resources import ResourceCleanup - - -def id_params(val): - if not isinstance(val, str): - new_val = [] - for val_1 in val: - if isinstance(val_1, (tuple, list)): - val_1 = '_'.join([str(val_2).lower() for val_2 in val_1]) - new_val.append(val_1) - else: - new_val = val - - return '_'.join(new_val) - - -def _append_nics_for_net(vifs, net_id, nics): - glance_vif = None - nics = copy.deepcopy(nics) - for vif in vifs: - vif_ = vif.split(sep='_x') - vif_model = vif_[0] - if vif_model in ('e1000', 'rt18139'): - glance_vif = vif_model - iter_ = int(vif_[1]) if len(vif_) > 1 else 1 - for i in range(iter_): - nic = {'net-id': net_id, 'vif-model': vif_model} - nics.append(nic) - - return nics, glance_vif - - -def _boot_multiports_vm(flavor, mgmt_net_id, vifs, net_id, net_type, base_vm, - pcipt_seg_id=None): - nics = [{'net-id': mgmt_net_id}] - - nics, glance_vif = _append_nics_for_net(vifs, net_id=net_id, nics=nics) - img_id = None - if glance_vif: - img_id = glance_helper.create_image(name=glance_vif, - hw_vif_model=glance_vif, - cleanup='function')[1] - - LOG.tc_step("Boot a test_vm with following nics on same networks as " - "base_vm: {}".format(nics)) - vm_under_test = \ - vm_helper.boot_vm(name='multiports', nics=nics, flavor=flavor, - cleanup='function', - image_id=img_id)[1] - vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test, fail_ok=False) - - if pcipt_seg_id: - LOG.tc_step("Add vlan to pci-passthrough interface for VM.") - vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_under_test, - net_seg_id=pcipt_seg_id, - init_conf=True) - - LOG.tc_step("Ping test_vm's own {} network ips".format(net_type)) - vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=vm_under_test, - net_types=net_type) - - vm_helper.configure_vm_vifs_on_same_net(vm_id=vm_under_test) - - LOG.tc_step( - "Ping test_vm from base_vm to verify management and data networks " - "connection") - vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=base_vm, - net_types=['mgmt', net_type]) - - return vm_under_test, nics - - -class TestMutiPortsBasic: - @fixture(scope='class') - def base_setup(self, stx_openstack_required): - - flavor_id = nova_helper.create_flavor(name='dedicated')[1] - ResourceCleanup.add('flavor', flavor_id, scope='class') - - extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated'} - nova_helper.set_flavor(flavor=flavor_id, **extra_specs) - - mgmt_net_id = network_helper.get_mgmt_net_id() - tenant_net_id = network_helper.get_tenant_net_id() - internal_net_id = network_helper.get_internal_net_id() - - nics = [{'net-id': mgmt_net_id}, - {'net-id': tenant_net_id}, - {'net-id': internal_net_id}] - - LOG.fixture_step( - "(class) Boot a base vm with following nics: {}".format(nics)) - base_vm = vm_helper.boot_vm(name='multiports_base', - flavor=flavor_id, nics=nics, - cleanup='class', - reuse_vol=False)[1] - - vm_helper.wait_for_vm_pingable_from_natbox(base_vm) - vm_helper.ping_vms_from_vm(base_vm, base_vm, net_types='data') - - return base_vm, flavor_id, mgmt_net_id, tenant_net_id, internal_net_id - - @mark.parametrize('vifs', [ - param(('virtio_x4',), marks=mark.priorities('nightly', 'sx_nightly')) - ], ids=id_params) - def test_multiports_on_same_network_vm_actions(self, vifs, base_setup): - """ - Test vm actions on vm with multiple ports with given vif models on - the same tenant network - - Args: - vifs (tuple): each item in the tuple is 1 nic to be added to vm - with specified (vif_mode, pci_address) - base_setup (list): test fixture to boot base vm - - Setups: - - create a flavor with dedicated cpu policy (class) - - choose one tenant network and one internal network to be used - by test (class) - - boot a base vm - vm1 with above flavor and networks, and ping - it from NatBox (class) - - Boot a vm under test - vm2 with above flavor and with multiple - ports on same tenant network with base vm, - and ping it from NatBox (class) - - Ping vm2's own data network ips (class) - - Ping vm2 from vm1 to verify management and data networks - connection (class) - - Test Steps: - - Perform given actions on vm2 (migrate, start/stop, etc) - - Verify pci_address preserves - - Verify ping from vm1 to vm2 over management and data networks - still works - - Teardown: - - Delete created vms and flavor - """ - base_vm, flavor, mgmt_net_id, tenant_net_id, internal_net_id = \ - base_setup - - vm_under_test, nics = _boot_multiports_vm(flavor=flavor, - mgmt_net_id=mgmt_net_id, - vifs=vifs, - net_id=tenant_net_id, - net_type='data', - base_vm=base_vm) - - for vm_actions in [['auto_recover'], - ['cold_migrate'], - ['pause', 'unpause'], - ['suspend', 'resume'], - ['hard_reboot']]: - if vm_actions[0] == 'auto_recover': - LOG.tc_step( - "Set vm to error state and wait for auto recovery " - "complete, then verify ping from " - "base vm over management and data networks") - vm_helper.set_vm_state(vm_id=vm_under_test, error_state=True, - fail_ok=False) - vm_helper.wait_for_vm_values(vm_id=vm_under_test, - status=VMStatus.ACTIVE, - fail_ok=True, timeout=600) - else: - LOG.tc_step("Perform following action(s) on vm {}: {}".format( - vm_under_test, vm_actions)) - for action in vm_actions: - if 'migrate' in action and system_helper.is_aio_simplex(): - continue - - kwargs = {} - if action == 'hard_reboot': - action = 'reboot' - kwargs['hard'] = True - kwargs['action'] = action - - vm_helper.perform_action_on_vm(vm_under_test, **kwargs) - - vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test) - - # LOG.tc_step("Verify vm pci address preserved after {}".format( - # vm_actions)) - # check_helper.check_vm_pci_addr(vm_under_test, nics) - - LOG.tc_step( - "Verify ping from base_vm to vm_under_test over management " - "and data networks still works " - "after {}".format(vm_actions)) - vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=base_vm, - net_types=['mgmt', 'data']) - - -class TestMutiPortsPCI: - - @fixture(scope='class') - def base_setup_pci(self, stx_openstack_required): - LOG.fixture_step( - "(class) Get an internal network that supports both pci-sriov and " - "pcipt vif to boot vm") - avail_pcipt_nets, is_cx4 = network_helper.get_pci_vm_network( - pci_type='pci-passthrough', - net_name='internal0-net', rtn_all=True) - avail_sriov_nets, _ = network_helper.get_pci_vm_network( - pci_type='pci-sriov', - net_name='internal0-net', rtn_all=True) - - if not avail_pcipt_nets and not avail_sriov_nets: - skip(SkipHostIf.PCI_IF_UNAVAIL) - - avail_nets = list(set(avail_pcipt_nets) & set(avail_sriov_nets)) - extra_pcipt_net = avail_pcipt_net = avail_sriov_net = None - pcipt_seg_ids = {} - if avail_nets: - avail_net_name = avail_nets[-1] - avail_net, segment_id = network_helper.get_network_values( - network=avail_net_name, - fields=('id', 'provider:segmentation_id')) - internal_nets = [avail_net] - pcipt_seg_ids[avail_net_name] = segment_id - avail_pcipt_net = avail_sriov_net = avail_net - LOG.info( - "Internal network(s) selected for pcipt and sriov: {}".format( - avail_net_name)) - else: - LOG.info("No internal network support both sriov and pcipt") - internal_nets = [] - if avail_pcipt_nets: - avail_pcipt_net_name = avail_pcipt_nets[-1] - avail_pcipt_net, segment_id = network_helper.get_network_values( - network=avail_pcipt_net_name, - fields=('id', 'provider:segmentation_id')) - internal_nets.append(avail_pcipt_net) - pcipt_seg_ids[avail_pcipt_net_name] = segment_id - LOG.info("pci-passthrough net: {}".format(avail_pcipt_net_name)) - if avail_sriov_nets: - avail_sriov_net_name = avail_sriov_nets[-1] - avail_sriov_net = network_helper.get_net_id_from_name( - avail_sriov_net_name) - internal_nets.append(avail_sriov_net) - LOG.info("pci-sriov net: {}".format(avail_sriov_net_name)) - - mgmt_net_id = network_helper.get_mgmt_net_id() - tenant_net_id = network_helper.get_tenant_net_id() - base_nics = [{'net-id': mgmt_net_id}, {'net-id': tenant_net_id}] - nics = base_nics + [{'net-id': net_id} for net_id in internal_nets] - - if avail_pcipt_nets and is_cx4: - extra_pcipt_net_name = avail_nets[0] if avail_nets else \ - avail_pcipt_nets[0] - extra_pcipt_net, seg_id = network_helper.get_network_values( - network=extra_pcipt_net_name, - fields=('id', 'provider:segmentation_id')) - if extra_pcipt_net not in internal_nets: - nics.append({'net-id': extra_pcipt_net}) - pcipt_seg_ids[extra_pcipt_net_name] = seg_id - - LOG.fixture_step("(class) Create a flavor with dedicated cpu policy.") - flavor_id = \ - nova_helper.create_flavor(name='dedicated', vcpus=2, ram=2048, - cleanup='class')[1] - extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated', - FlavorSpec.PCI_NUMA_AFFINITY: 'preferred'} - nova_helper.set_flavor(flavor=flavor_id, **extra_specs) - - LOG.fixture_step( - "(class) Boot a base pci vm with following nics: {}".format(nics)) - base_vm_pci = \ - vm_helper.boot_vm(name='multiports_pci_base', flavor=flavor_id, - nics=nics, cleanup='class')[1] - - LOG.fixture_step("(class) Ping base PCI vm interfaces") - vm_helper.wait_for_vm_pingable_from_natbox(base_vm_pci) - vm_helper.ping_vms_from_vm(to_vms=base_vm_pci, from_vm=base_vm_pci, - net_types=['data', 'internal']) - - return base_vm_pci, flavor_id, base_nics, avail_sriov_net, \ - avail_pcipt_net, pcipt_seg_ids, extra_pcipt_net - - @mark.parametrize('vifs', [ - param(('virtio', 'pci-sriov', 'pci-passthrough'), marks=mark.p3), - param(('pci-passthrough',), marks=mark.nightly), - param(('pci-sriov',), marks=mark.nightly), - ], ids=id_params) - def test_multiports_on_same_network_pci_vm_actions(self, base_setup_pci, - vifs): - """ - Test vm actions on vm with multiple ports with given vif models on - the same tenant network - - Args: - base_setup_pci (tuple): base_vm_pci, flavor, mgmt_net_id, - tenant_net_id, internal_net_id, seg_id - vifs (list): list of vifs to add to same internal net - - Setups: - - Create a flavor with dedicated cpu policy (class) - - Choose management net, one tenant net, and internal0-net1 to be - used by test (class) - - Boot a base pci-sriov vm - vm1 with above flavor and networks, - ping it from NatBox (class) - - Ping vm1 from itself over data, and internal networks - - Test Steps: - - Boot a vm under test - vm2 with above flavor and with multiple - ports on same tenant network with vm1, - and ping it from NatBox - - Ping vm2's own data and internal network ips - - Ping vm2 from vm1 to verify management and data networks - connection - - Perform one of the following actions on vm2 - - set to error/ wait for auto recovery - - suspend/resume - - cold migration - - pause/unpause - - Update vlan interface to proper eth if pci-passthrough device - moves to different eth - - Verify ping from vm1 to vm2 over management and data networks - still works - - Repeat last 3 steps with different vm actions - - Teardown: - - Delete created vms and flavor - """ - - base_vm_pci, flavor, base_nics, avail_sriov_net, avail_pcipt_net, \ - pcipt_seg_ids, extra_pcipt_net = base_setup_pci - - pcipt_included = False - internal_net_id = None - for vif in vifs: - if not isinstance(vif, str): - vif = vif[0] - if 'pci-passthrough' in vif: - if not avail_pcipt_net: - skip(SkipHostIf.PCIPT_IF_UNAVAIL) - internal_net_id = avail_pcipt_net - pcipt_included = True - continue - elif 'pci-sriov' in vif: - if not avail_sriov_net: - skip(SkipHostIf.SRIOV_IF_UNAVAIL) - internal_net_id = avail_sriov_net - - assert internal_net_id, "test script error. Internal net should have " \ - "been determined." - - nics, glance_vif = _append_nics_for_net(vifs, net_id=internal_net_id, - nics=base_nics) - if pcipt_included and extra_pcipt_net: - nics.append( - {'net-id': extra_pcipt_net, 'vif-model': 'pci-passthrough'}) - - img_id = None - if glance_vif: - img_id = glance_helper.create_image(name=glance_vif, - hw_vif_model=glance_vif, - cleanup='function')[1] - - LOG.tc_step("Boot a vm with following vifs on same internal net: " - "{}".format(vifs)) - vm_under_test = vm_helper.boot_vm(name='multiports_pci', - nics=nics, flavor=flavor, - cleanup='function', - reuse_vol=False, image_id=img_id)[1] - vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test, fail_ok=False) - - if pcipt_included: - LOG.tc_step("Add vlan to pci-passthrough interface for VM.") - vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_under_test, - net_seg_id=pcipt_seg_ids, - init_conf=True) - - LOG.tc_step("Ping vm's own data and internal network ips") - vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=vm_under_test, - net_types=['data', 'internal']) - - LOG.tc_step( - "Ping vm_under_test from base_vm over management, data, " - "and internal networks") - vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=base_vm_pci, - net_types=['mgmt', 'data', 'internal']) - - for vm_actions in [['auto_recover'], ['cold_migrate'], - ['pause', 'unpause'], ['suspend', 'resume']]: - if 'auto_recover' in vm_actions: - LOG.tc_step( - "Set vm to error state and wait for auto recovery " - "complete, " - "then verify ping from base vm over management and " - "internal networks") - vm_helper.set_vm_state(vm_id=vm_under_test, error_state=True, - fail_ok=False) - vm_helper.wait_for_vm_values(vm_id=vm_under_test, - status=VMStatus.ACTIVE, - fail_ok=False, timeout=600) - else: - LOG.tc_step("Perform following action(s) on vm {}: {}".format( - vm_under_test, vm_actions)) - for action in vm_actions: - vm_helper.perform_action_on_vm(vm_under_test, action=action) - - vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_under_test) - if pcipt_included: - LOG.tc_step( - "Bring up vlan interface for pci-passthrough vm {}.".format( - vm_under_test)) - vm_helper.add_vlan_for_vm_pcipt_interfaces( - vm_id=vm_under_test, net_seg_id=pcipt_seg_ids) - - LOG.tc_step( - "Verify ping from base_vm to vm_under_test over management " - "and internal networks still works " - "after {}".format(vm_actions)) - vm_helper.ping_vms_from_vm(to_vms=vm_under_test, - from_vm=base_vm_pci, - net_types=['mgmt', 'internal']) - - @mark.parametrize('vifs', [ - ('pci-sriov',), - ('pci-passthrough',), - ], ids=id_params) - def test_multiports_on_same_network_pci_evacuate_vm(self, base_setup_pci, - vifs): - """ - Test evacuate vm with multiple ports on same network - - Args: - base_setup_pci (tuple): base vm id, vm under test id, segment id - for internal0-net1 - vifs (list): list of vifs to add to same internal net - - Setups: - - create a flavor with dedicated cpu policy (module) - - choose one tenant network and one internal network to be used - by test (module) - - boot a base vm - vm1 with above flavor and networks, and ping - it from NatBox (module) - - Boot a vm under test - vm2 with above flavor and with multiple - ports on same tenant network with base vm, - and ping it from NatBox (class) - - Ping vm2's own data network ips (class) - - Ping vm2 from vm1 to verify management and internal networks - connection (class) - - Test Steps: - - Reboot vm2 host - - Wait for vm2 to be evacuated to other host - - Wait for vm2 pingable from NatBox - - Verify ping from vm1 to vm2 over management and internal - networks still works - - Teardown: - - Delete created vms and flavor - """ - base_vm_pci, flavor, base_nics, avail_sriov_net, avail_pcipt_net, \ - pcipt_seg_ids, extra_pcipt_net = base_setup_pci - - internal_net_id = None - pcipt_included = False - nics = copy.deepcopy(base_nics) - if 'pci-passthrough' in vifs: - if not avail_pcipt_net: - skip(SkipHostIf.PCIPT_IF_UNAVAIL) - pcipt_included = True - internal_net_id = avail_pcipt_net - if extra_pcipt_net: - nics.append( - {'net-id': extra_pcipt_net, 'vif-model': 'pci-passthrough'}) - if 'pci-sriov' in vifs: - if not avail_sriov_net: - skip(SkipHostIf.SRIOV_IF_UNAVAIL) - internal_net_id = avail_sriov_net - assert internal_net_id, "test script error. sriov or pcipt has to be " \ - "included." - - for vif in vifs: - nics.append({'net-id': internal_net_id, 'vif-model': vif}) - - LOG.tc_step( - "Boot a vm with following vifs on same network internal0-net1: " - "{}".format(vifs)) - vm_under_test = vm_helper.boot_vm(name='multiports_pci_evac', - nics=nics, flavor=flavor, - cleanup='function', - reuse_vol=False)[1] - vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test, fail_ok=False) - - if pcipt_included: - LOG.tc_step("Add vlan to pci-passthrough interface.") - vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_under_test, - net_seg_id=pcipt_seg_ids, - init_conf=True) - - LOG.tc_step("Ping vm's own data and internal network ips") - vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=vm_under_test, - net_types=['data', 'internal']) - vm_helper.configure_vm_vifs_on_same_net(vm_id=vm_under_test) - - LOG.tc_step( - "Ping vm_under_test from base_vm over management, data, and " - "internal networks") - vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=base_vm_pci, - net_types=['mgmt', 'data', 'internal']) - - host = vm_helper.get_vm_host(vm_under_test) - - LOG.tc_step("Reboot vm host {}".format(host)) - vm_helper.evacuate_vms(host=host, vms_to_check=vm_under_test, - ping_vms=True) - - if pcipt_included: - LOG.tc_step( - "Add/Check vlan interface is added to pci-passthrough device " - "for vm {}.".format(vm_under_test)) - vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_under_test, - net_seg_id=pcipt_seg_ids) - - LOG.tc_step( - "Verify ping from base_vm to vm_under_test over management and " - "internal networks still works after evacuation.") - vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=base_vm_pci, - net_types=['mgmt', 'internal']) diff --git a/automated-pytest-suite/testcases/functional/networking/test_ping_vms.py b/automated-pytest-suite/testcases/functional/networking/test_ping_vms.py deleted file mode 100755 index bfc9eca7..00000000 --- a/automated-pytest-suite/testcases/functional/networking/test_ping_vms.py +++ /dev/null @@ -1,117 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from pytest import mark, param - -from utils.tis_log import LOG -from consts.stx import FlavorSpec, GuestImages -from keywords import vm_helper, glance_helper, nova_helper, network_helper, \ - cinder_helper - - -def id_gen(val): - if not isinstance(val, str): - new_val = [] - for val_1 in val: - if not isinstance(val_1, str): - val_1 = '_'.join([str(val_2).lower() for val_2 in val_1]) - new_val.append(val_1) - new_val = '_'.join(new_val) - else: - new_val = val - - return new_val - - -def _compose_nics(vifs, net_ids, image_id, guest_os): - nics = [] - glance_vif = None - if isinstance(vifs, str): - vifs = (vifs,) - for i in range(len(vifs)): - vif_model = vifs[i] - nic = {'net-id': net_ids[i]} - if vif_model in ('e1000', 'rt18139'): - glance_vif = vif_model - elif vif_model != 'virtio': - nic['vif-model'] = vif_model - nics.append(nic) - - if glance_vif: - glance_helper.set_image(image=image_id, hw_vif_model=glance_vif, - new_name='{}_{}'.format(guest_os, glance_vif)) - - return nics - - -@mark.parametrize(('guest_os', 'vm1_vifs', 'vm2_vifs'), [ - param('default', 'virtio', 'virtio', - marks=mark.priorities('cpe_sanity', 'sanity', 'sx_sanity')), - ('ubuntu_14', 'virtio', 'virtio'), -], ids=id_gen) -def test_ping_between_two_vms(stx_openstack_required, guest_os, vm1_vifs, vm2_vifs): - """ - Ping between two vms with given vif models - - Test Steps: - - Create a favor with dedicated cpu policy and proper root disk size - - Create a volume from guest image under test with proper size - - Boot two vms with given vif models from above volume and flavor - - Ping VMs from NatBox and between two vms - - Test Teardown: - - Delete vms, volumes, flavor, glance image created - - """ - if guest_os == 'default': - guest_os = GuestImages.DEFAULT['guest'] - - reuse = False if 'e1000' in vm1_vifs or 'e1000' in vm2_vifs else True - cleanup = 'function' if not reuse or 'ubuntu' in guest_os else None - image_id = glance_helper.get_guest_image(guest_os, cleanup=cleanup, - use_existing=reuse) - - LOG.tc_step("Create a favor dedicated cpu policy") - flavor_id = nova_helper.create_flavor(name='dedicated', guest_os=guest_os, - cleanup='function')[1] - nova_helper.set_flavor(flavor_id, **{FlavorSpec.CPU_POLICY: 'dedicated'}) - - mgmt_net_id = network_helper.get_mgmt_net_id() - tenant_net_id = network_helper.get_tenant_net_id() - internal_net_id = network_helper.get_internal_net_id() - net_ids = (mgmt_net_id, tenant_net_id, internal_net_id) - vms = [] - for vifs_for_vm in (vm1_vifs, vm2_vifs): - # compose vm nics - nics = _compose_nics(vifs_for_vm, net_ids=net_ids, image_id=image_id, - guest_os=guest_os) - net_types = ['mgmt', 'data', 'internal'][:len(nics)] - LOG.tc_step("Create a volume from {} image".format(guest_os)) - vol_id = cinder_helper.create_volume(name='vol-{}'.format(guest_os), - source_id=image_id, - guest_image=guest_os, - cleanup='function')[1] - - LOG.tc_step( - "Boot a {} vm with {} vifs from above flavor and volume".format( - guest_os, vifs_for_vm)) - vm_id = vm_helper.boot_vm('{}_vifs'.format(guest_os), flavor=flavor_id, - cleanup='function', - source='volume', source_id=vol_id, nics=nics, - guest_os=guest_os)[1] - - LOG.tc_step("Ping VM {} from NatBox(external network)".format(vm_id)) - vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False) - - vms.append(vm_id) - - LOG.tc_step( - "Ping between two vms over management, data, and internal networks") - vm_helper.ping_vms_from_vm(to_vms=vms[0], from_vm=vms[1], - net_types=net_types) - vm_helper.ping_vms_from_vm(to_vms=vms[1], from_vm=vms[0], - net_types=net_types) diff --git a/automated-pytest-suite/testcases/functional/networking/test_pod_to_pod.py b/automated-pytest-suite/testcases/functional/networking/test_pod_to_pod.py deleted file mode 100644 index 4777d261..00000000 --- a/automated-pytest-suite/testcases/functional/networking/test_pod_to_pod.py +++ /dev/null @@ -1,234 +0,0 @@ -# -# Copyright (c) 2020 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import copy - -from pytest import mark, fixture - -from utils.tis_log import LOG -from utils import rest - -from consts.proj_vars import ProjVar -from consts.auth import HostLinuxUser -from keywords import system_helper, kube_helper, common - - -@fixture(scope="class") -def deploy_test_pods(request): - """ - Fixture to deploy the server app,client app and returns serverips & client pods - - Label the nodes and add node selector to the deployment files - if not simplex system - - Copy the deployment files from localhost to active controller - - Deploy server pod - - Deploy client pods - - Get the server pods and client pods - - Get the server pods and client pods status before test begins - - Delete the service - - Delete the server pod deployment - - Delete the client pods - - Remove the labels on the nodes if not simplex - """ - server_dep_file = "server_pod.yaml" - home_dir = HostLinuxUser.get_home() - service_name = "test-service" - - client_pod1_name = "client-pod1" - client_pod2_name = "client-pod2" - - server_dep_file_path = "utils/test_files/server_pod_deploy.yaml" - client_pod_template_file_path = "utils/test_files/client_pod.yaml" - - server_pod_dep_data = common.get_yaml_data(server_dep_file_path) - client_pod1_data = common.get_yaml_data(client_pod_template_file_path) - client_pod2_data = copy.deepcopy(client_pod1_data) - - client_pod1_data['metadata']['name'] = client_pod1_name - client_pod2_data['metadata']['name'] = client_pod2_name - deployment_name = server_pod_dep_data['metadata']['name'] - - computes = system_helper.get_hypervisors( - operational="enabled", availability="available") - - if len(computes) > 1: - LOG.fixture_step("Label the nodes and add node selector to the deployment files\ - if not simplex system") - kube_helper.exec_kube_cmd(sub_cmd="label nodes {}".format( - computes[0]), args="test=server") - kube_helper.exec_kube_cmd(sub_cmd="label nodes {}".format( - computes[1]), args="test=client") - server_pod_dep_data['spec']['template']['spec']['nodeSelector'] = { - 'test': 'server'} - client_pod1_data['spec']['nodeSelector'] = {'test': 'server'} - client_pod2_data['spec']['nodeSelector'] = {'test': 'client'} - - server_pod_path = common.write_yaml_data_to_file( - server_pod_dep_data, server_dep_file) - client_pod1_path = common.write_yaml_data_to_file( - client_pod1_data, "{}.yaml".format(client_pod1_name)) - client_pod2_path = common.write_yaml_data_to_file( - client_pod2_data, "{}.yaml".format(client_pod2_name)) - - LOG.fixture_step( - "Copy the deployment files from localhost to active controller") - common.scp_from_localhost_to_active_controller( - source_path=server_pod_path, dest_path=home_dir) - - common.scp_from_localhost_to_active_controller( - source_path=client_pod1_path, dest_path=home_dir) - - common.scp_from_localhost_to_active_controller( - source_path=client_pod2_path, dest_path=home_dir) - - LOG.fixture_step("Deploy server pods {}".format(server_dep_file)) - kube_helper.exec_kube_cmd(sub_cmd="create -f ", args=server_dep_file) - LOG.fixture_step("Deploy client pod {}.yaml & client pod {}.yaml".format( - client_pod1_name, client_pod2_name)) - kube_helper.exec_kube_cmd(sub_cmd="create -f ", - args="{}.yaml".format(client_pod1_name)) - - kube_helper.exec_kube_cmd(sub_cmd="create -f ", - args="{}.yaml".format(client_pod2_name)) - - LOG.fixture_step("Get the server pods and client pods") - server_pods = kube_helper.get_pods(labels="server=pod-to-pod") - client_pods = kube_helper.get_pods(labels="client=pod-to-pod") - - def teardown(): - LOG.fixture_step("Delete the service {}".format(service_name)) - kube_helper.exec_kube_cmd( - sub_cmd="delete service ", args=service_name) - LOG.fixture_step("Delete the deployment {}".format(deployment_name)) - kube_helper.exec_kube_cmd( - sub_cmd="delete deployment ", args=deployment_name) - LOG.fixture_step("Delete the client pods {} & {}".format( - client_pod1_name, client_pod2_name)) - kube_helper.delete_resources(labels="client=pod-to-pod") - if len(computes) > 1: - LOG.fixture_step("Remove the labels on the nodes if not simplex") - kube_helper.exec_kube_cmd(sub_cmd="label nodes {}".format( - computes[0]), args="test-") - kube_helper.exec_kube_cmd(sub_cmd="label nodes {}".format( - computes[1]), args="test-") - - request.addfinalizer(teardown) - LOG.fixture_step("Get the server pods and client pods status before test begins") - kube_helper.wait_for_pods_status( - pod_names=server_pods+client_pods, namespace="default") - return get_pod_ips(server_pods), client_pods, deployment_name, service_name - - -def get_pod_ips(pods): - """ - Returns the pods ips - Args: - pods(list): list of pod names - Returns: pod ips - """ - pod_ips = [] - for i in pods: - pod_ips.append(kube_helper.get_pod_value_jsonpath( - "pod {}".format(i), "{.status.podIP}")) - return pod_ips - - -@mark.platform_sanity -@mark.dc_subcloud -class TestPodtoPod: - def test_pod_to_pod_connection(self, deploy_test_pods): - """ - Verify Ping test between pods - Args: - deploy_test_pods(fixture): returns server_ips, client_pods, deployment_name, service_name - Setup: - - Label the nodes and add node selector to the deployment files - if not simplex system - - Copy the deployment files from localhost to active controller - - Deploy server pod - - Deploy client pods - Steps: - - Ping the server pod ip from the client pod - Teardown: - - Delete the service - - Delete the server pod deployment - - Delete the client pods - - Remove the labels on the nodes if not simplex - - """ - server_ips, client_pods, _, _ = deploy_test_pods - for client_pod in client_pods: - for ip in server_ips: - LOG.tc_step("Ping the server pod ip {} from the client pod {}".format( - ip, client_pod)) - cmd = "ping -c 3 {} -w 5".format(ip) - code, _ = kube_helper.exec_cmd_in_container( - cmd=cmd, pod=client_pod) - assert code == 0 - - def test_pod_to_service_connection(self, deploy_test_pods): - """ - Verify client pod to service multiple endpoints access - Args: - deploy_test_pods(fixture): returns server_ips, client_pods, deployment_name, service_name - Setup: - - Label the nodes and add node selector to the deployment files - if not simplex system - - Copy the deployment files from localhost to active controller - - Deploy server pod - - Deploy client pods - Steps: - - Curl the server pod ip from the client pod - Teardown: - - Delete the service - - Delete the server pod deployment - - Delete the client pods - - Remove the labels on the nodes if not simplex - - """ - server_ips, client_pods, _, _ = deploy_test_pods - for client_pod in client_pods: - for ip in server_ips: - if ProjVar.get_var('IPV6_OAM'): - ip = "[{}]".format(ip) - cmd = "curl -Is {}:8080".format(ip) - LOG.tc_step("Curl({}) the server pod ip {} from the client pod {}".format( - cmd, ip, client_pod)) - code, _ = kube_helper.exec_cmd_in_container( - cmd=cmd, pod=client_pod) - assert code == 0 - - def test_host_to_service_connection(self, deploy_test_pods): - """ - Verify the service connectivity from external network - Args: - deploy_test_pods(fixture): returns server_ips, client_pods, deployment_name, service_name - Setup: - - Label the nodes and add node selector to the deployment files - if not simplex system - - Copy the deployment files from localhost to active controller - - Deploy server pod - - Deploy client pods - Steps: - - Expose the service with NodePort - - Check the service access from local host - Teardown: - - Delete the service - - Delete the server pod deployment - - Delete the client pods - - Remove the labels on the nodes if not simplex - """ - _, _, deploy_name, service_name = deploy_test_pods - LOG.tc_step("Expose the service {} with NodePort".format(service_name)) - kube_helper.expose_the_service( - deployment_name=deploy_name, type="NodePort", service_name=service_name) - node_port = kube_helper.get_pod_value_jsonpath( - "service {}".format(service_name), "{.spec.ports[0].nodePort}") - for i in system_helper.get_system_iplist(): - url = "http://{}:{}".format(i, node_port) - LOG.tc_step( - "Check the service access {} from local host".format(url)) - rest.check_url(url) diff --git a/automated-pytest-suite/testcases/functional/networking/test_vm_meta_data_retrieval.py b/automated-pytest-suite/testcases/functional/networking/test_vm_meta_data_retrieval.py deleted file mode 100755 index 23c72aed..00000000 --- a/automated-pytest-suite/testcases/functional/networking/test_vm_meta_data_retrieval.py +++ /dev/null @@ -1,45 +0,0 @@ -from pytest import mark - -from utils.tis_log import LOG -from keywords import vm_helper -from consts.stx import METADATA_SERVER - - -@mark.sanity -def test_vm_meta_data_retrieval(stx_openstack_required): - """ - VM meta-data retrieval - - Test Steps: - - Launch a boot-from-image vm - - Retrieve vm meta_data within vm from metadata server - - Ensure vm uuid from metadata server is the same as nova show - - Test Teardown: - - Delete created vm and flavor - """ - LOG.tc_step("Launch a boot-from-image vm") - vm_id = vm_helper.boot_vm(source='image', cleanup='function')[1] - vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False) - - LOG.tc_step('Retrieve vm meta_data within vm from metadata server') - # retrieve meta instance id by ssh to VM from natbox and wget to remote - # server - _access_metadata_server_from_vm(vm_id=vm_id) - - -def _access_metadata_server_from_vm(vm_id): - with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh: - vm_ssh.exec_cmd('ip route') - command = 'wget http://{}/openstack/latest/meta_data.json'.format( - METADATA_SERVER) - vm_ssh.exec_cmd(command, fail_ok=False) - metadata = vm_ssh.exec_cmd('more meta_data.json', fail_ok=False)[1] - - LOG.tc_step("Ensure vm uuid from metadata server is the same as nova show") - metadata = metadata.replace('\n', '') - LOG.info(metadata) - metadata_uuid = eval(metadata)['uuid'] - - assert vm_id == metadata_uuid, "VM UUID retrieved from metadata server " \ - "is not the same as nova show" diff --git a/automated-pytest-suite/testcases/functional/nova/conftest.py b/automated-pytest-suite/testcases/functional/nova/conftest.py deleted file mode 100755 index 157d7f85..00000000 --- a/automated-pytest-suite/testcases/functional/nova/conftest.py +++ /dev/null @@ -1,3 +0,0 @@ -from testfixtures.resource_mgmt import * -from testfixtures.resource_create import * -from testfixtures.config_host import * diff --git a/automated-pytest-suite/testcases/functional/nova/test_config_drive.py b/automated-pytest-suite/testcases/functional/nova/test_config_drive.py deleted file mode 100755 index 4142a7d4..00000000 --- a/automated-pytest-suite/testcases/functional/nova/test_config_drive.py +++ /dev/null @@ -1,131 +0,0 @@ -from pytest import fixture, skip, mark - -from consts.timeout import VMTimeout -from keywords import vm_helper, host_helper, cinder_helper, glance_helper, \ - system_helper -from testfixtures.fixture_resources import ResourceCleanup -from testfixtures.recover_hosts import HostsToRecover -from utils.tis_log import LOG - -TEST_STRING = 'Config-drive test file content' - - -@fixture(scope='module') -def hosts_per_stor_backing(): - hosts_per_backing = host_helper.get_hosts_per_storage_backing() - LOG.fixture_step("Hosts per storage backing: {}".format(hosts_per_backing)) - - return hosts_per_backing - - -@mark.nightly -@mark.sx_nightly -def test_vm_with_config_drive(hosts_per_stor_backing): - """ - Skip Condition: - - no host with local_image backend - - Test Steps: - - Launch a vm using config drive - - Add test data to config drive on vm - - Do some operations (reboot vm for simplex, cold migrate and lock - host for non-simplex) and - check test data persisted in config drive after each operation - Teardown: - - Delete created vm, volume, flavor - - """ - guest_os = 'cgcs-guest' - img_id = glance_helper.get_guest_image(guest_os) - hosts_num = len(hosts_per_stor_backing.get('local_image', [])) - if hosts_num < 1: - skip("No host with local_image storage backing") - - volume_id = cinder_helper.create_volume(name='vol_inst1', source_id=img_id, - guest_image=guest_os)[1] - ResourceCleanup.add('volume', volume_id, scope='function') - - block_device = {'source': 'volume', 'dest': 'volume', 'id': volume_id, - 'device': 'vda'} - vm_id = vm_helper.boot_vm(name='config_drive', config_drive=True, - block_device=block_device, - cleanup='function', guest_os=guest_os, - meta={'foo': 'bar'})[1] - - LOG.tc_step("Confirming the config drive is set to True in vm ...") - assert str(vm_helper.get_vm_values(vm_id, "config_drive")[ - 0]) == 'True', "vm config-drive not true" - - LOG.tc_step("Add date to config drive ...") - check_vm_config_drive_data(vm_id) - - vm_host = vm_helper.get_vm_host(vm_id) - instance_name = vm_helper.get_vm_instance_name(vm_id) - LOG.tc_step("Check config_drive vm files on hypervisor after vm launch") - check_vm_files_on_hypervisor(vm_id, vm_host=vm_host, - instance_name=instance_name) - - if not system_helper.is_aio_simplex(): - LOG.tc_step("Cold migrate VM") - vm_helper.cold_migrate_vm(vm_id) - - LOG.tc_step("Check config drive after cold migrate VM...") - check_vm_config_drive_data(vm_id) - - LOG.tc_step("Lock the compute host") - compute_host = vm_helper.get_vm_host(vm_id) - HostsToRecover.add(compute_host) - host_helper.lock_host(compute_host, swact=True) - - LOG.tc_step("Check config drive after locking VM host") - check_vm_config_drive_data(vm_id, ping_timeout=VMTimeout.DHCP_RETRY) - vm_host = vm_helper.get_vm_host(vm_id) - - else: - LOG.tc_step("Reboot vm") - vm_helper.reboot_vm(vm_id) - - LOG.tc_step("Check config drive after vm rebooted") - check_vm_config_drive_data(vm_id) - - LOG.tc_step("Check vm files exist after nova operations") - check_vm_files_on_hypervisor(vm_id, vm_host=vm_host, - instance_name=instance_name) - - -def check_vm_config_drive_data(vm_id, ping_timeout=VMTimeout.PING_VM): - """ - Args: - vm_id: - ping_timeout - - Returns: - - """ - vm_helper.wait_for_vm_pingable_from_natbox(vm_id, timeout=ping_timeout) - dev = '/dev/hd' - with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh: - # Run mount command to determine the /dev/hdX is mount at: - cmd = """mount | grep "{}" | awk '{{print $3}} '""".format(dev) - mount = vm_ssh.exec_cmd(cmd)[1] - assert mount, "{} is not mounted".format(dev) - - file_path = '{}/openstack/latest/meta_data.json'.format(mount) - content = vm_ssh.exec_cmd('python -m json.tool {} | grep ' - 'foo'.format(file_path), fail_ok=False)[1] - assert '"foo": "bar"' in content - - -def check_vm_files_on_hypervisor(vm_id, vm_host, instance_name): - with host_helper.ssh_to_host(vm_host) as host_ssh: - cmd = " ls /var/lib/nova/instances/{}".format(vm_id) - cmd_output = host_ssh.exec_cmd(cmd)[1] - for expt_file in ('console.log', 'disk.config'): - assert expt_file in cmd_output, \ - "{} is not found for config drive vm {} on " \ - "{}".format(expt_file, vm_id, vm_host) - - output = host_ssh.exec_cmd('ls /run/libvirt/qemu')[1] - libvirt = "{}.xml".format(instance_name) - assert libvirt in output, "{} is not found in /run/libvirt/qemu on " \ - "{}".format(libvirt, vm_host) diff --git a/automated-pytest-suite/testcases/functional/nova/test_cpu_policy.py b/automated-pytest-suite/testcases/functional/nova/test_cpu_policy.py deleted file mode 100755 index ef5576c3..00000000 --- a/automated-pytest-suite/testcases/functional/nova/test_cpu_policy.py +++ /dev/null @@ -1,185 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from pytest import mark, param - -from utils.tis_log import LOG - -from consts.stx import FlavorSpec, ImageMetadata, GuestImages -from consts.cli_errs import CPUPolicyErr # used by eval - -from keywords import nova_helper, vm_helper, glance_helper, cinder_helper, \ - check_helper, host_helper -from testfixtures.fixture_resources import ResourceCleanup - - -@mark.parametrize( - ('flv_vcpus', 'flv_pol', 'img_pol', 'boot_source', 'expt_err'), [ - param(3, None, 'shared', 'image', None, marks=mark.p3), - param(4, 'dedicated', 'dedicated', 'volume', None, marks=mark.p3), - param(1, 'dedicated', None, 'image', None, marks=mark.p3), - param(1, 'shared', 'shared', 'volume', None, marks=mark.p3), - param(2, 'shared', None, 'image', None, marks=mark.p3), - param(3, 'dedicated', 'shared', 'volume', None, - marks=mark.domain_sanity), - param(1, 'shared', 'dedicated', 'image', - 'CPUPolicyErr.CONFLICT_FLV_IMG', marks=mark.p3), - ]) -def test_boot_vm_cpu_policy_image(flv_vcpus, flv_pol, img_pol, boot_source, - expt_err): - LOG.tc_step("Create flavor with {} vcpus".format(flv_vcpus)) - flavor_id = nova_helper.create_flavor(name='cpu_pol_{}'.format(flv_pol), - vcpus=flv_vcpus)[1] - ResourceCleanup.add('flavor', flavor_id) - - if flv_pol is not None: - specs = {FlavorSpec.CPU_POLICY: flv_pol} - - LOG.tc_step("Set following extra specs: {}".format(specs)) - nova_helper.set_flavor(flavor_id, **specs) - - if img_pol is not None: - image_meta = {ImageMetadata.CPU_POLICY: img_pol} - LOG.tc_step( - "Create image with following metadata: {}".format(image_meta)) - image_id = glance_helper.create_image( - name='cpu_pol_{}'.format(img_pol), cleanup='function', - **image_meta)[1] - else: - image_id = glance_helper.get_image_id_from_name( - GuestImages.DEFAULT['guest'], strict=True) - - if boot_source == 'volume': - LOG.tc_step("Create a volume from image") - source_id = cinder_helper.create_volume(name='cpu_pol_img', - source_id=image_id)[1] - ResourceCleanup.add('volume', source_id) - else: - source_id = image_id - - prev_cpus = host_helper.get_vcpus_for_computes(field='used_now') - - LOG.tc_step("Attempt to boot a vm from above {} with above flavor".format( - boot_source)) - code, vm_id, msg = vm_helper.boot_vm(name='cpu_pol', flavor=flavor_id, - source=boot_source, - source_id=source_id, fail_ok=True, - cleanup='function') - - # check for negative tests - if expt_err is not None: - LOG.tc_step( - "Check VM failed to boot due to conflict in flavor and image.") - assert 4 == code, "Expect boot vm cli reject and no vm booted. " \ - "Actual: {}".format(msg) - assert eval(expt_err) in msg, \ - "Expected error message is not found in cli return." - return # end the test for negative cases - - # Check for positive tests - LOG.tc_step("Check vm is successfully booted.") - assert 0 == code, "Expect vm boot successfully. Actual: {}".format(msg) - - # Calculate expected policy: - expt_cpu_pol = flv_pol if flv_pol else img_pol - expt_cpu_pol = expt_cpu_pol if expt_cpu_pol else 'shared' - - vm_host = vm_helper.get_vm_host(vm_id) - check_helper.check_topology_of_vm(vm_id, vcpus=flv_vcpus, - cpu_pol=expt_cpu_pol, vm_host=vm_host, - prev_total_cpus=prev_cpus[vm_host]) - - -@mark.parametrize(('flv_vcpus', 'cpu_pol', 'pol_source', 'boot_source'), [ - param(4, None, 'flavor', 'image', marks=mark.p2), - param(2, 'dedicated', 'flavor', 'volume', marks=mark.domain_sanity), - param(3, 'shared', 'flavor', 'volume', marks=mark.p2), - param(1, 'dedicated', 'flavor', 'image', marks=mark.p2), - param(2, 'dedicated', 'image', 'volume', marks=mark.nightly), - param(3, 'shared', 'image', 'volume', marks=mark.p2), - param(1, 'dedicated', 'image', 'image', marks=mark.domain_sanity), -]) -def test_cpu_pol_vm_actions(flv_vcpus, cpu_pol, pol_source, boot_source): - LOG.tc_step("Create flavor with {} vcpus".format(flv_vcpus)) - flavor_id = nova_helper.create_flavor(name='cpu_pol', vcpus=flv_vcpus)[1] - ResourceCleanup.add('flavor', flavor_id) - - image_id = glance_helper.get_image_id_from_name( - GuestImages.DEFAULT['guest'], strict=True) - if cpu_pol is not None: - if pol_source == 'flavor': - specs = {FlavorSpec.CPU_POLICY: cpu_pol} - - LOG.tc_step("Set following extra specs: {}".format(specs)) - nova_helper.set_flavor(flavor_id, **specs) - else: - image_meta = {ImageMetadata.CPU_POLICY: cpu_pol} - LOG.tc_step( - "Create image with following metadata: {}".format(image_meta)) - image_id = glance_helper.create_image( - name='cpu_pol_{}'.format(cpu_pol), cleanup='function', - **image_meta)[1] - if boot_source == 'volume': - LOG.tc_step("Create a volume from image") - source_id = cinder_helper.create_volume(name='cpu_pol_{}'.format(cpu_pol), - source_id=image_id)[1] - ResourceCleanup.add('volume', source_id) - else: - source_id = image_id - - prev_cpus = host_helper.get_vcpus_for_computes(field='used_now') - - LOG.tc_step( - "Boot a vm from {} with above flavor and check vm topology is as " - "expected".format(boot_source)) - vm_id = vm_helper.boot_vm(name='cpu_pol_{}_{}'.format(cpu_pol, flv_vcpus), - flavor=flavor_id, source=boot_source, - source_id=source_id, cleanup='function')[1] - - vm_helper.wait_for_vm_pingable_from_natbox(vm_id) - vm_host = vm_helper.get_vm_host(vm_id) - check_helper.check_topology_of_vm(vm_id, vcpus=flv_vcpus, cpu_pol=cpu_pol, - vm_host=vm_host, - prev_total_cpus=prev_cpus[vm_host]) - - LOG.tc_step("Suspend/Resume vm and check vm topology stays the same") - vm_helper.suspend_vm(vm_id) - vm_helper.resume_vm(vm_id) - - vm_helper.wait_for_vm_pingable_from_natbox(vm_id) - check_helper.check_topology_of_vm(vm_id, vcpus=flv_vcpus, cpu_pol=cpu_pol, - vm_host=vm_host, - prev_total_cpus=prev_cpus[vm_host]) - - LOG.tc_step("Stop/Start vm and check vm topology stays the same") - vm_helper.stop_vms(vm_id) - vm_helper.start_vms(vm_id) - - vm_helper.wait_for_vm_pingable_from_natbox(vm_id) - prev_siblings = check_helper.check_topology_of_vm( - vm_id, vcpus=flv_vcpus, cpu_pol=cpu_pol, vm_host=vm_host, - prev_total_cpus=prev_cpus[vm_host])[1] - - LOG.tc_step("Live migrate vm and check vm topology stays the same") - vm_helper.live_migrate_vm(vm_id=vm_id) - - vm_helper.wait_for_vm_pingable_from_natbox(vm_id) - vm_host = vm_helper.get_vm_host(vm_id) - prev_siblings = prev_siblings if cpu_pol == 'dedicated' else None - check_helper.check_topology_of_vm(vm_id, vcpus=flv_vcpus, cpu_pol=cpu_pol, - vm_host=vm_host, - prev_total_cpus=prev_cpus[vm_host], - prev_siblings=prev_siblings) - - LOG.tc_step("Cold migrate vm and check vm topology stays the same") - vm_helper.cold_migrate_vm(vm_id=vm_id) - - vm_helper.wait_for_vm_pingable_from_natbox(vm_id) - vm_host = vm_helper.get_vm_host(vm_id) - check_helper.check_topology_of_vm(vm_id, vcpus=flv_vcpus, cpu_pol=cpu_pol, - vm_host=vm_host, - prev_total_cpus=prev_cpus[vm_host]) diff --git a/automated-pytest-suite/testcases/functional/nova/test_cpu_thread.py b/automated-pytest-suite/testcases/functional/nova/test_cpu_thread.py deleted file mode 100755 index 848374e3..00000000 --- a/automated-pytest-suite/testcases/functional/nova/test_cpu_thread.py +++ /dev/null @@ -1,437 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from pytest import mark, fixture, skip, param - -from utils.tis_log import LOG - -from consts.reasons import SkipHypervisor, SkipHyperthreading -from consts.stx import FlavorSpec, ImageMetadata -# Do not remove used imports below as they are used in eval() -from consts.cli_errs import CPUThreadErr - -from keywords import nova_helper, vm_helper, host_helper, glance_helper, \ - check_helper -from testfixtures.fixture_resources import ResourceCleanup -from testfixtures.recover_hosts import HostsToRecover - - -def id_gen(val): - if isinstance(val, list): - return '-'.join(val) - - -@fixture(scope='module') -def ht_and_nonht_hosts(): - LOG.fixture_step( - "(Module) Get hyper-threading enabled and disabled hypervisors") - nova_hosts = host_helper.get_up_hypervisors() - ht_hosts = [] - non_ht_hosts = [] - for host in nova_hosts: - if host_helper.is_host_hyperthreaded(host): - ht_hosts.append(host) - else: - non_ht_hosts.append(host) - - LOG.info( - '-- Hyper-threading enabled hosts: {}; Hyper-threading disabled ' - 'hosts: {}'.format( - ht_hosts, non_ht_hosts)) - return ht_hosts, non_ht_hosts - - -class TestHTEnabled: - - @fixture(scope='class', autouse=True) - def ht_hosts_(self, ht_and_nonht_hosts): - ht_hosts, non_ht_hosts = ht_and_nonht_hosts - - if not ht_hosts: - skip("No up hypervisor found with Hyper-threading enabled.") - - return ht_hosts, non_ht_hosts - - def test_isolate_vm_on_ht_host(self, ht_hosts_, add_admin_role_func): - """ - Test isolate vms take the host log_core sibling pair for each vcpu - when HT is enabled. - Args: - ht_hosts_: - add_admin_role_func: - - Pre-conditions: At least on hypervisor has HT enabled - - Test Steps: - - Launch VM with isolate thread policy and 4 vcpus, until all - Application cores on thread-0 are taken - - Attempt to launch another vm on same host, and ensure it fails - - """ - ht_hosts, non_ht_hosts = ht_hosts_ - vcpu_count = 4 - cpu_thread_policy = 'isolate' - LOG.tc_step("Create flavor with {} vcpus and {} thread policy".format( - vcpu_count, cpu_thread_policy)) - flavor_id = nova_helper.create_flavor( - name='cpu_thread_{}'.format(cpu_thread_policy), vcpus=vcpu_count, - cleanup='function')[1] - specs = {FlavorSpec.CPU_POLICY: 'dedicated', - FlavorSpec.CPU_THREAD_POLICY: cpu_thread_policy} - nova_helper.set_flavor(flavor_id, **specs) - - LOG.tc_step( - "Get used vcpus for vm host before booting vm, and ensure " - "sufficient instance and core quotas") - host = ht_hosts[0] - vms = vm_helper.get_vms_on_host(hostname=host) - vm_helper.delete_vms(vms=vms) - log_core_counts = host_helper.get_logcores_counts( - host, thread='0', functions='Applications') - max_vm_count = int(log_core_counts[0] / vcpu_count) + int( - log_core_counts[1] / vcpu_count) - vm_helper.ensure_vms_quotas(vms_num=max_vm_count + 10, - cores_num=4 * (max_vm_count + 2) + 10) - - LOG.tc_step( - "Boot {} isolate 4vcpu vms on a HT enabled host, and check " - "topology of vm on host and vms". - format(max_vm_count)) - for i in range(max_vm_count): - name = '4vcpu_isolate-{}'.format(i) - LOG.info( - "Launch VM {} on {} and check it's topology".format(name, host)) - prev_cpus = host_helper.get_vcpus_for_computes( - hosts=[host], field='used_now')[host] - vm_id = vm_helper.boot_vm(name=name, flavor=flavor_id, vm_host=host, - cleanup='function')[1] - - check_helper.check_topology_of_vm(vm_id, vcpus=vcpu_count, - prev_total_cpus=prev_cpus, - cpu_pol='dedicated', - cpu_thr_pol=cpu_thread_policy, - vm_host=host) - - LOG.tc_step( - "Attempt to boot another vm on {}, and ensure it fails due to no " - "free sibling pairs".format(host)) - code = vm_helper.boot_vm(name='cpu_thread_{}'.format(cpu_thread_policy), - flavor=flavor_id, vm_host=host, - fail_ok=True, cleanup='function')[0] - assert code > 0, "VM is still scheduled even though all sibling " \ - "pairs should have been occupied" - - @mark.parametrize(('vcpus', 'cpu_thread_policy', 'min_vcpus'), [ - param(4, 'require', None), - param(3, 'require', None), - param(3, 'prefer', None), - ]) - def test_boot_vm_cpu_thread_positive(self, vcpus, cpu_thread_policy, - min_vcpus, ht_hosts_): - """ - Test boot vm with specific cpu thread policy requirement - - Args: - vcpus (int): number of vpus to set when creating flavor - cpu_thread_policy (str): cpu thread policy to set in flavor - min_vcpus (int): min_vcpus extra spec to set - ht_hosts_ (tuple): (ht_hosts, non-ht_hosts) - - Skip condition: - - no host is hyperthreading enabled on system - - Setups: - - Find out HT hosts and non-HT_hosts on system (module) - - Test Steps: - - Create a flavor with given number of vcpus - - Set cpu policy to dedicated and extra specs as per test params - - Get the host vcpu usage before booting vm - - Boot a vm with above flavor - - Ensure vm is booted on HT host for 'require' vm - - Check vm-topology, host side vcpu usage, topology from within - the guest to ensure vm is properly booted - - Teardown: - - Delete created vm, volume, flavor - - """ - ht_hosts, non_ht_hosts = ht_hosts_ - LOG.tc_step("Create flavor with {} vcpus".format(vcpus)) - flavor_id = nova_helper.create_flavor( - name='cpu_thread_{}'.format(cpu_thread_policy), vcpus=vcpus)[1] - ResourceCleanup.add('flavor', flavor_id) - - specs = {FlavorSpec.CPU_POLICY: 'dedicated'} - if cpu_thread_policy is not None: - specs[FlavorSpec.CPU_THREAD_POLICY] = cpu_thread_policy - - if min_vcpus is not None: - specs[FlavorSpec.MIN_VCPUS] = min_vcpus - - LOG.tc_step("Set following extra specs: {}".format(specs)) - nova_helper.set_flavor(flavor_id, **specs) - - LOG.tc_step("Get used cpus for all hosts before booting vm") - hosts_to_check = ht_hosts if cpu_thread_policy == 'require' else \ - ht_hosts + non_ht_hosts - pre_hosts_cpus = host_helper.get_vcpus_for_computes( - hosts=hosts_to_check, field='used_now') - - LOG.tc_step( - "Boot a vm with above flavor and ensure it's booted on a HT " - "enabled host.") - vm_id = vm_helper.boot_vm( - name='cpu_thread_{}'.format(cpu_thread_policy), - flavor=flavor_id, - cleanup='function')[1] - - vm_host = vm_helper.get_vm_host(vm_id) - if cpu_thread_policy == 'require': - assert vm_host in ht_hosts, "VM host {} is not hyper-threading " \ - "enabled.".format(vm_host) - - LOG.tc_step("Check topology of the {}vcpu {} vm on hypervisor and " - "on vm".format(vcpus, cpu_thread_policy)) - prev_cpus = pre_hosts_cpus[vm_host] - check_helper.check_topology_of_vm(vm_id, vcpus=vcpus, - prev_total_cpus=prev_cpus, - cpu_pol='dedicated', - cpu_thr_pol=cpu_thread_policy, - min_vcpus=min_vcpus, vm_host=vm_host) - - @mark.parametrize(('vcpus', 'cpu_pol', 'cpu_thr_pol', 'flv_or_img', - 'vs_numa_affinity', 'boot_source', 'nova_actions'), [ - param(2, 'dedicated', 'isolate', 'image', None, 'volume', - 'live_migrate', marks=mark.priorities('domain_sanity', - 'nightly')), - param(3, 'dedicated', 'require', 'image', None, 'volume', - 'live_migrate', marks=mark.domain_sanity), - param(3, 'dedicated', 'prefer', 'flavor', None, 'volume', - 'live_migrate', marks=mark.p2), - param(3, 'dedicated', 'require', 'flavor', None, 'volume', - 'live_migrate', marks=mark.p2), - param(3, 'dedicated', 'isolate', 'flavor', None, 'volume', - 'cold_migrate', marks=mark.domain_sanity), - param(2, 'dedicated', 'require', 'image', None, 'image', - 'cold_migrate', marks=mark.domain_sanity), - param(2, 'dedicated', 'require', 'flavor', None, 'volume', - 'cold_mig_revert', marks=mark.p2), - param(5, 'dedicated', 'prefer', 'image', None, 'volume', - 'cold_mig_revert'), - param(4, 'dedicated', 'isolate', 'image', None, 'volume', - ['suspend', 'resume', 'rebuild'], marks=mark.p2), - param(6, 'dedicated', 'require', 'image', None, 'image', - ['suspend', 'resume', 'rebuild'], marks=mark.p2), - ], ids=id_gen) - def test_cpu_thread_vm_topology_nova_actions(self, vcpus, cpu_pol, - cpu_thr_pol, flv_or_img, - vs_numa_affinity, - boot_source, nova_actions, - ht_hosts_): - ht_hosts, non_ht_hosts = ht_hosts_ - if 'mig' in nova_actions: - if len(ht_hosts) + len(non_ht_hosts) < 2: - skip(SkipHypervisor.LESS_THAN_TWO_HYPERVISORS) - if cpu_thr_pol in ['require', 'isolate'] and len(ht_hosts) < 2: - skip(SkipHyperthreading.LESS_THAN_TWO_HT_HOSTS) - - name_str = 'cpu_thr_{}_in_img'.format(cpu_pol) - - LOG.tc_step("Create flavor with {} vcpus".format(vcpus)) - flavor_id = nova_helper.create_flavor(name='vcpus{}'.format(vcpus), - vcpus=vcpus)[1] - ResourceCleanup.add('flavor', flavor_id) - - specs = {} - if vs_numa_affinity: - specs[FlavorSpec.VSWITCH_NUMA_AFFINITY] = vs_numa_affinity - - if flv_or_img == 'flavor': - specs[FlavorSpec.CPU_POLICY] = cpu_pol - specs[FlavorSpec.CPU_THREAD_POLICY] = cpu_thr_pol - - if specs: - LOG.tc_step("Set following extra specs: {}".format(specs)) - nova_helper.set_flavor(flavor_id, **specs) - - image_id = None - if flv_or_img == 'image': - image_meta = {ImageMetadata.CPU_POLICY: cpu_pol, - ImageMetadata.CPU_THREAD_POLICY: cpu_thr_pol} - LOG.tc_step( - "Create image with following metadata: {}".format(image_meta)) - image_id = glance_helper.create_image(name=name_str, - cleanup='function', - **image_meta)[1] - - LOG.tc_step("Get used cpus for all hosts before booting vm") - hosts_to_check = ht_hosts if cpu_thr_pol == 'require' else \ - ht_hosts + non_ht_hosts - pre_hosts_cpus = host_helper.get_vcpus_for_computes( - hosts=hosts_to_check, field='used_now') - - LOG.tc_step("Boot a vm from {} with above flavor".format(boot_source)) - vm_id = vm_helper.boot_vm(name=name_str, flavor=flavor_id, - source=boot_source, image_id=image_id, - cleanup='function')[1] - - vm_host = vm_helper.get_vm_host(vm_id) - - if cpu_thr_pol == 'require': - LOG.tc_step("Check vm is booted on a HT host") - assert vm_host in ht_hosts, "VM host {} is not hyper-threading " \ - "enabled.".format(vm_host) - - prev_cpus = pre_hosts_cpus[vm_host] - prev_siblings = check_helper.check_topology_of_vm( - vm_id, vcpus=vcpus, prev_total_cpus=prev_cpus, cpu_pol=cpu_pol, - cpu_thr_pol=cpu_thr_pol, vm_host=vm_host)[1] - - LOG.tc_step("Perform following nova action(s) on vm {}: " - "{}".format(vm_id, nova_actions)) - if isinstance(nova_actions, str): - nova_actions = [nova_actions] - - check_prev_siblings = False - for action in nova_actions: - kwargs = {} - if action == 'rebuild': - kwargs['image_id'] = image_id - elif action == 'live_migrate': - check_prev_siblings = True - vm_helper.perform_action_on_vm(vm_id, action=action, **kwargs) - - post_vm_host = vm_helper.get_vm_host(vm_id) - pre_action_cpus = pre_hosts_cpus[post_vm_host] - - if cpu_thr_pol == 'require': - LOG.tc_step("Check vm is still on HT host") - assert post_vm_host in ht_hosts, "VM host {} is not " \ - "hyper-threading " \ - "enabled.".format(vm_host) - - LOG.tc_step( - "Check VM topology is still correct after {}".format(nova_actions)) - if cpu_pol != 'dedicated' or not check_prev_siblings: - # Allow prev_siblings in live migration case - prev_siblings = None - check_helper.check_topology_of_vm(vm_id, vcpus=vcpus, - prev_total_cpus=pre_action_cpus, - cpu_pol=cpu_pol, - cpu_thr_pol=cpu_thr_pol, - vm_host=post_vm_host, - prev_siblings=prev_siblings) - - @fixture(scope='class') - def _add_hosts_to_stxauto(self, request, ht_hosts_, add_stxauto_zone): - ht_hosts, non_ht_hosts = ht_hosts_ - - if not non_ht_hosts: - skip("No non-HT host available") - - LOG.fixture_step("Add one HT host and nonHT hosts to stxauto zone") - - if len(ht_hosts) > 1: - ht_hosts = [ht_hosts[0]] - - host_in_stxauto = ht_hosts + non_ht_hosts - - def _revert(): - nova_helper.remove_hosts_from_aggregate(aggregate='stxauto', - hosts=host_in_stxauto) - - request.addfinalizer(_revert) - - nova_helper.add_hosts_to_aggregate('stxauto', ht_hosts + non_ht_hosts) - - LOG.info( - "stxauto zone: HT: {}; non-HT: {}".format(ht_hosts, non_ht_hosts)) - return ht_hosts, non_ht_hosts - - -class TestHTDisabled: - - @fixture(scope='class', autouse=True) - def ensure_nonht(self, ht_and_nonht_hosts): - ht_hosts, non_ht_hosts = ht_and_nonht_hosts - if not non_ht_hosts: - skip("No host with HT disabled") - - if ht_hosts: - LOG.fixture_step( - "Locking HT hosts to ensure only non-HT hypervisors available") - HostsToRecover.add(ht_hosts, scope='class') - for host_ in ht_hosts: - host_helper.lock_host(host_, swact=True) - - @mark.parametrize(('vcpus', 'cpu_thread_policy', 'min_vcpus', 'expt_err'), [ - param(2, 'require', None, 'CPUThreadErr.HT_HOST_UNAVAIL'), - param(3, 'require', None, 'CPUThreadErr.HT_HOST_UNAVAIL'), - param(3, 'isolate', None, None), - param(2, 'prefer', None, None), - ]) - def test_boot_vm_cpu_thread_ht_disabled(self, vcpus, cpu_thread_policy, - min_vcpus, expt_err): - """ - Test boot vm with specified cpu thread policy when no HT host is - available on system - - Args: - vcpus (int): number of vcpus to set in flavor - cpu_thread_policy (str): cpu thread policy in flavor extra spec - min_vcpus (int): min_vpus in flavor extra spec - expt_err (str|None): expected error message in nova show if any - - Skip condition: - - All hosts are hyperthreading enabled on system - - Setups: - - Find out HT hosts and non-HT_hosts on system (module) - - Enusre no HT hosts on system - - Test Steps: - - Create a flavor with given number of vcpus - - Set flavor extra specs as per test params - - Get the host vcpu usage before booting vm - - Attempt to boot a vm with above flavor - - if expt_err is None: - - Ensure vm is booted on non-HT host for 'isolate'/'prefer' - vm - - Check vm-topology, host side vcpu usage, topology from - within the guest to ensure vm is properly booted - - else, ensure expected error message is included in nova - show for 'require' vm - - Teardown: - - Delete created vm, volume, flavor - - """ - - LOG.tc_step("Create flavor with {} vcpus".format(vcpus)) - flavor_id = nova_helper.create_flavor(name='cpu_thread', vcpus=vcpus)[1] - ResourceCleanup.add('flavor', flavor_id) - - specs = {FlavorSpec.CPU_THREAD_POLICY: cpu_thread_policy, - FlavorSpec.CPU_POLICY: 'dedicated'} - if min_vcpus is not None: - specs[FlavorSpec.MIN_VCPUS] = min_vcpus - - LOG.tc_step("Set following extra specs: {}".format(specs)) - nova_helper.set_flavor(flavor_id, **specs) - - LOG.tc_step("Attempt to boot a vm with the above flavor.") - code, vm_id, msg = vm_helper.boot_vm( - name='cpu_thread_{}'.format(cpu_thread_policy), - flavor=flavor_id, fail_ok=True, cleanup='function') - - if expt_err: - assert 1 == code, "Boot vm cli is not rejected. Details: " \ - "{}".format(msg) - else: - assert 0 == code, "Boot vm with isolate policy was unsuccessful. " \ - "Details: {}".format(msg) diff --git a/automated-pytest-suite/testcases/functional/nova/test_evacuate_vms.py b/automated-pytest-suite/testcases/functional/nova/test_evacuate_vms.py deleted file mode 100755 index 639da813..00000000 --- a/automated-pytest-suite/testcases/functional/nova/test_evacuate_vms.py +++ /dev/null @@ -1,318 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from pytest import fixture, skip, mark - -import keywords.host_helper -from utils.tis_log import LOG -from consts.timeout import VMTimeout -from consts.stx import VMStatus -from consts.reasons import SkipStorageBacking, SkipHypervisor - -from keywords import vm_helper, host_helper, nova_helper, cinder_helper, \ - system_helper, check_helper -from testfixtures.fixture_resources import ResourceCleanup - -from testfixtures.recover_hosts import HostsToRecover - - -@fixture(scope='module', autouse=True) -def update_quotas(add_admin_role_module): - LOG.fixture_step("Update instance and volume quota to at least 10 and " - "20 respectively") - vm_helper.ensure_vms_quotas() - - -@fixture(scope='module') -def hosts_per_backing(): - hosts_per_backend = host_helper.get_hosts_per_storage_backing() - return hosts_per_backend - - -def touch_files_under_vm_disks(vm_id, ephemeral, swap, vm_type, disks): - expt_len = 1 + int(bool(ephemeral)) + int(bool(swap)) + \ - (1 if 'with_vol' in vm_type else 0) - - LOG.info("\n--------------------------Auto mount non-root disks if any") - mounts = vm_helper.auto_mount_vm_disks(vm_id=vm_id, disks=disks) - assert expt_len == len(mounts) - - if bool(swap): - mounts.remove('none') - - LOG.info("\n--------------------------Create files under vm disks: " - "{}".format(mounts)) - file_paths, content = vm_helper.touch_files(vm_id=vm_id, file_dirs=mounts) - return file_paths, content - - -class TestDefaultGuest: - - @fixture(scope='class', autouse=True) - def skip_test_if_less_than_two_hosts(self): - if len(host_helper.get_up_hypervisors()) < 2: - skip(SkipHypervisor.LESS_THAN_TWO_HYPERVISORS) - - @mark.parametrize('storage_backing', [ - 'local_image', - 'remote', - ]) - def test_evacuate_vms_with_inst_backing(self, hosts_per_backing, - storage_backing): - """ - Test evacuate vms with various vm storage configs and host instance - backing configs - - Args: - storage_backing: storage backing under test - - Skip conditions: - - Less than two hosts configured with storage backing under test - - Setups: - - Add admin role to primary tenant (module) - - Test Steps: - - Create flv_rootdisk without ephemeral or swap disks, and set - storage backing extra spec - - Create flv_ephemswap with ephemeral AND swap disks, and set - storage backing extra spec - - Boot following vms on same host and wait for them to be - pingable from NatBox: - - Boot vm1 from volume with flavor flv_rootdisk - - Boot vm2 from volume with flavor flv_localdisk - - Boot vm3 from image with flavor flv_rootdisk - - Boot vm4 from image with flavor flv_rootdisk, and attach a - volume to it - - Boot vm5 from image with flavor flv_localdisk - - sudo reboot -f on vms host - - Ensure evacuation for all 5 vms are successful (vm host - changed, active state, pingable from NatBox) - - Teardown: - - Delete created vms, volumes, flavors - - Remove admin role from primary tenant (module) - - """ - hosts = hosts_per_backing.get(storage_backing, []) - if len(hosts) < 2: - skip(SkipStorageBacking.LESS_THAN_TWO_HOSTS_WITH_BACKING.format( - storage_backing)) - - target_host = hosts[0] - - LOG.tc_step("Create a flavor without ephemeral or swap disks") - flavor_1 = nova_helper.create_flavor('flv_rootdisk', - storage_backing=storage_backing)[1] - ResourceCleanup.add('flavor', flavor_1, scope='function') - - LOG.tc_step("Create another flavor with ephemeral and swap disks") - flavor_2 = nova_helper.create_flavor('flv_ephemswap', ephemeral=1, - swap=512, - storage_backing=storage_backing)[1] - ResourceCleanup.add('flavor', flavor_2, scope='function') - - LOG.tc_step("Boot vm1 from volume with flavor flv_rootdisk and wait " - "for it pingable from NatBox") - vm1_name = "vol_root" - vm1 = vm_helper.boot_vm(vm1_name, flavor=flavor_1, source='volume', - avail_zone='nova', vm_host=target_host, - cleanup='function')[1] - - vms_info = {vm1: {'ephemeral': 0, - 'swap': 0, - 'vm_type': 'volume', - 'disks': vm_helper.get_vm_devices_via_virsh(vm1)}} - vm_helper.wait_for_vm_pingable_from_natbox(vm1) - - LOG.tc_step("Boot vm2 from volume with flavor flv_localdisk and wait " - "for it pingable from NatBox") - vm2_name = "vol_ephemswap" - vm2 = vm_helper.boot_vm(vm2_name, flavor=flavor_2, source='volume', - avail_zone='nova', vm_host=target_host, - cleanup='function')[1] - - vm_helper.wait_for_vm_pingable_from_natbox(vm2) - vms_info[vm2] = {'ephemeral': 1, - 'swap': 512, - 'vm_type': 'volume', - 'disks': vm_helper.get_vm_devices_via_virsh(vm2)} - - LOG.tc_step("Boot vm3 from image with flavor flv_rootdisk and wait for " - "it pingable from NatBox") - vm3_name = "image_root" - vm3 = vm_helper.boot_vm(vm3_name, flavor=flavor_1, source='image', - avail_zone='nova', vm_host=target_host, - cleanup='function')[1] - - vm_helper.wait_for_vm_pingable_from_natbox(vm3) - vms_info[vm3] = {'ephemeral': 0, - 'swap': 0, - 'vm_type': 'image', - 'disks': vm_helper.get_vm_devices_via_virsh(vm3)} - - LOG.tc_step("Boot vm4 from image with flavor flv_rootdisk, attach a " - "volume to it and wait for it " - "pingable from NatBox") - vm4_name = 'image_root_attachvol' - vm4 = vm_helper.boot_vm(vm4_name, flavor_1, source='image', - avail_zone='nova', - vm_host=target_host, - cleanup='function')[1] - - vol = cinder_helper.create_volume(bootable=False)[1] - ResourceCleanup.add('volume', vol, scope='function') - vm_helper.attach_vol_to_vm(vm4, vol_id=vol, mount=False) - - vm_helper.wait_for_vm_pingable_from_natbox(vm4) - vms_info[vm4] = {'ephemeral': 0, - 'swap': 0, - 'vm_type': 'image_with_vol', - 'disks': vm_helper.get_vm_devices_via_virsh(vm4)} - - LOG.tc_step("Boot vm5 from image with flavor flv_localdisk and wait " - "for it pingable from NatBox") - vm5_name = 'image_ephemswap' - vm5 = vm_helper.boot_vm(vm5_name, flavor_2, source='image', - avail_zone='nova', vm_host=target_host, - cleanup='function')[1] - vm_helper.wait_for_vm_pingable_from_natbox(vm5) - vms_info[vm5] = {'ephemeral': 1, - 'swap': 512, - 'vm_type': 'image', - 'disks': vm_helper.get_vm_devices_via_virsh(vm5)} - - LOG.tc_step("Check all VMs are booted on {}".format(target_host)) - vms_on_host = vm_helper.get_vms_on_host(hostname=target_host) - vms = [vm1, vm2, vm3, vm4, vm5] - assert set(vms) <= set(vms_on_host), "VMs booted on host: {}. " \ - "Current vms on host: {}". \ - format(vms, vms_on_host) - - for vm_ in vms: - LOG.tc_step("Touch files under vm disks {}: " - "{}".format(vm_, vms_info[vm_])) - file_paths, content = touch_files_under_vm_disks(vm_, - **vms_info[vm_]) - vms_info[vm_]['file_paths'] = file_paths - vms_info[vm_]['content'] = content - - LOG.tc_step("Reboot target host {}".format(target_host)) - vm_helper.evacuate_vms(host=target_host, vms_to_check=vms, - ping_vms=True) - - LOG.tc_step("Check files after evacuation") - for vm_ in vms: - LOG.info("--------------------Check files for vm {}".format(vm_)) - check_helper.check_vm_files(vm_id=vm_, vm_action='evacuate', - storage_backing=storage_backing, - prev_host=target_host, **vms_info[vm_]) - vm_helper.ping_vms_from_natbox(vms) - - @fixture(scope='function') - def check_hosts(self): - storage_backing, hosts = \ - keywords.host_helper.get_storage_backing_with_max_hosts() - if len(hosts) < 2: - skip("at least two hosts with the same storage backing are " - "required") - - acceptable_hosts = [] - for host in hosts: - numa_num = len(host_helper.get_host_procs(host)) - if numa_num > 1: - acceptable_hosts.append(host) - if len(acceptable_hosts) == 2: - break - else: - skip("at least two hosts with multiple numa nodes are required") - - target_host = acceptable_hosts[0] - return target_host - - -class TestOneHostAvail: - @fixture(scope='class') - def get_zone(self, request, add_stxauto_zone): - if system_helper.is_aio_simplex(): - zone = 'nova' - return zone - - zone = 'stxauto' - storage_backing, hosts = \ - keywords.host_helper.get_storage_backing_with_max_hosts() - host = hosts[0] - LOG.fixture_step('Select host {} with backing ' - '{}'.format(host, storage_backing)) - nova_helper.add_hosts_to_aggregate(aggregate='stxauto', hosts=[host]) - - def remove_hosts_from_zone(): - nova_helper.remove_hosts_from_aggregate(aggregate='stxauto', - check_first=False) - - request.addfinalizer(remove_hosts_from_zone) - return zone - - @mark.sx_sanity - def test_reboot_only_host(self, get_zone): - """ - Test reboot only hypervisor on the system - - Args: - get_zone: fixture to create stxauto aggregate, to ensure vms can - only on one host - - Setups: - - If more than 1 hypervisor: Create stxauto aggregate and add - one host to the aggregate - - Test Steps: - - Launch various vms on target host - - vm booted from cinder volume, - - vm booted from glance image, - - vm booted from glance image, and have an extra cinder - volume attached after launch, - - vm booed from cinder volume with ephemeral and swap disks - - sudo reboot -f only host - - Check host is recovered - - Check vms are recovered and reachable from NatBox - - """ - zone = get_zone - - LOG.tc_step("Launch 5 vms in {} zone".format(zone)) - vms = vm_helper.boot_vms_various_types(avail_zone=zone, - cleanup='function') - target_host = vm_helper.get_vm_host(vm_id=vms[0]) - for vm in vms[1:]: - vm_host = vm_helper.get_vm_host(vm) - assert target_host == vm_host, "VMs are not booted on same host" - - LOG.tc_step("Reboot -f from target host {}".format(target_host)) - HostsToRecover.add(target_host) - host_helper.reboot_hosts(target_host) - - LOG.tc_step("Check vms are in Active state after host come back up") - res, active_vms, inactive_vms = vm_helper.wait_for_vms_values( - vms=vms, value=VMStatus.ACTIVE, timeout=600) - - vms_host_err = [] - for vm in vms: - if vm_helper.get_vm_host(vm) != target_host: - vms_host_err.append(vm) - - assert not vms_host_err, "Following VMs are not on the same host {}: " \ - "{}\nVMs did not reach Active state: {}". \ - format(target_host, vms_host_err, inactive_vms) - - assert not inactive_vms, "VMs did not reach Active state after " \ - "evacuated to other host: " \ - "{}".format(inactive_vms) - - LOG.tc_step("Check VMs are pingable from NatBox after evacuation") - vm_helper.wait_for_vm_pingable_from_natbox(vms, - timeout=VMTimeout.DHCP_RETRY) diff --git a/automated-pytest-suite/testcases/functional/nova/test_lock_with_vms.py b/automated-pytest-suite/testcases/functional/nova/test_lock_with_vms.py deleted file mode 100755 index dc5bb790..00000000 --- a/automated-pytest-suite/testcases/functional/nova/test_lock_with_vms.py +++ /dev/null @@ -1,183 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import random - -from pytest import fixture, mark, skip - -import keywords.host_helper -from utils.tis_log import LOG -from consts.reasons import SkipStorageBacking -from consts.stx import VMStatus, SysType -from consts.timeout import VMTimeout -from testfixtures.recover_hosts import HostsToRecover -from keywords import vm_helper, nova_helper, host_helper, system_helper - - -@fixture(scope='module', autouse=True) -def update_instances_quota(): - vm_helper.ensure_vms_quotas() - - -def _boot_migrable_vms(storage_backing): - """ - Create vms with specific storage backing that can be live migrated - - Args: - storage_backing: 'local_image' or 'remote' - - Returns: (vms_info (list), flavors_created (list)) - vms_info : [(vm_id1, block_mig1), (vm_id2, block_mig2), ...] - - """ - vms_to_test = [] - flavors_created = [] - flavor_no_localdisk = nova_helper.create_flavor( - ephemeral=0, swap=0, storage_backing=storage_backing)[1] - flavors_created.append(flavor_no_localdisk) - - vm_1 = vm_helper.boot_vm(flavor=flavor_no_localdisk, source='volume')[1] - - block_mig_1 = False - vms_to_test.append((vm_1, block_mig_1)) - - LOG.info("Boot a VM from image if host storage backing is local_image or " - "remote...") - vm_2 = vm_helper.boot_vm(flavor=flavor_no_localdisk, source='image')[1] - block_mig_2 = True - vms_to_test.append((vm_2, block_mig_2)) - if storage_backing == 'remote': - LOG.info("Boot a VM from volume with local disks if storage backing " - "is remote...") - ephemeral_swap = random.choice([[0, 512], [1, 512], [1, 0]]) - flavor_with_localdisk = nova_helper.create_flavor( - ephemeral=ephemeral_swap[0], swap=ephemeral_swap[1])[1] - flavors_created.append(flavor_with_localdisk) - vm_3 = vm_helper.boot_vm(flavor=flavor_with_localdisk, - source='volume')[1] - block_mig_3 = False - vms_to_test.append((vm_3, block_mig_3)) - LOG.info("Boot a VM from image with volume attached if " - "storage backing is remote...") - vm_4 = vm_helper.boot_vm(flavor=flavor_no_localdisk, source='image')[1] - vm_helper.attach_vol_to_vm(vm_id=vm_4) - block_mig_4 = False - vms_to_test.append((vm_4, block_mig_4)) - - return vms_to_test, flavors_created - - -class TestLockWithVMs: - @fixture() - def target_hosts(self): - """ - Test fixture for test_lock_with_vms(). - Calculate target host(s) to perform lock based on storage backing of - vms_to_test, and live migrate suitable vms - to target host before test start. - """ - - storage_backing, target_hosts = \ - keywords.host_helper.get_storage_backing_with_max_hosts() - if len(target_hosts) < 2: - skip(SkipStorageBacking.LESS_THAN_TWO_HOSTS_WITH_BACKING. - format(storage_backing)) - - target_host = target_hosts[0] - if SysType.AIO_DX == system_helper.get_sys_type(): - target_host = system_helper.get_standby_controller_name() - - return storage_backing, target_host - - @mark.nightly - def test_lock_with_vms(self, target_hosts, no_simplex, add_admin_role_func): - """ - Test lock host with vms on it. - - Args: - target_hosts (list): targeted host(s) to lock that was prepared - by the target_hosts test fixture. - - Skip Conditions: - - Less than 2 hypervisor hosts on the system - - Prerequisites: - - Hosts storage backing are pre-configured to storage backing - under test - ie., 2 or more hosts should support the storage backing under - test. - Test Setups: - - Set instances quota to 10 if it was less than 8 - - Determine storage backing(s) under test. i.e.,storage backings - supported by at least 2 hosts on the system - - Create flavors with storage extra specs set based on storage - backings under test - - Create vms_to_test that can be live migrated using created flavors - - Determine target host(s) to perform lock based on which host(s) - have the most vms_to_test - - Live migrate vms to target host(s) - Test Steps: - - Lock target host - - Verify lock succeeded and vms status unchanged - - Repeat above steps if more than one target host - Test Teardown: - - Delete created vms and volumes - - Delete created flavors - - Unlock locked target host(s) - - """ - storage_backing, host = target_hosts - vms_num = 5 - vm_helper.ensure_vms_quotas(vms_num=vms_num) - - LOG.tc_step("Boot {} vms with various storage settings".format(vms_num)) - vms = vm_helper.boot_vms_various_types(cleanup='function', - vms_num=vms_num, - storage_backing=storage_backing, - target_host=host) - - LOG.tc_step("Attempt to lock target host {}...".format(host)) - HostsToRecover.add(host) - host_helper.lock_host(host=host, check_first=False, fail_ok=False, - swact=True) - - LOG.tc_step("Verify lock succeeded and vms still in good state") - vm_helper.wait_for_vms_values(vms=vms, fail_ok=False) - for vm in vms: - vm_host = vm_helper.get_vm_host(vm_id=vm) - assert vm_host != host, "VM is still on {} after lock".format(host) - - vm_helper.wait_for_vm_pingable_from_natbox( - vm_id=vm, timeout=VMTimeout.DHCP_RETRY) - - @mark.sx_nightly - def test_lock_with_max_vms_simplex(self, simplex_only): - vms_num = host_helper.get_max_vms_supported(host='controller-0') - vm_helper.ensure_vms_quotas(vms_num=vms_num) - - LOG.tc_step("Boot {} vms with various storage settings".format(vms_num)) - vms = vm_helper.boot_vms_various_types(cleanup='function', - vms_num=vms_num) - - LOG.tc_step("Lock vm host on simplex system") - HostsToRecover.add('controller-0') - host_helper.lock_host('controller-0') - - LOG.tc_step("Ensure vms are in {} state after locked host come " - "online".format(VMStatus.STOPPED)) - vm_helper.wait_for_vms_values(vms, value=VMStatus.STOPPED, - fail_ok=False) - - LOG.tc_step("Unlock host on simplex system") - host_helper.unlock_host(host='controller-0') - - LOG.tc_step("Ensure vms are Active and Pingable from NatBox") - vm_helper.wait_for_vms_values(vms, value=VMStatus.ACTIVE, - fail_ok=False, timeout=600) - for vm in vms: - vm_helper.wait_for_vm_pingable_from_natbox( - vm, timeout=VMTimeout.DHCP_RETRY) diff --git a/automated-pytest-suite/testcases/functional/nova/test_mempage_size.py b/automated-pytest-suite/testcases/functional/nova/test_mempage_size.py deleted file mode 100755 index 39ec35e7..00000000 --- a/automated-pytest-suite/testcases/functional/nova/test_mempage_size.py +++ /dev/null @@ -1,501 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import re -import random - -from pytest import fixture, mark, skip, param - -import keywords.host_helper -from utils.tis_log import LOG -from consts.stx import FlavorSpec, ImageMetadata, NovaCLIOutput -from keywords import nova_helper, vm_helper, system_helper, cinder_helper, \ - host_helper, glance_helper - -MEMPAGE_HEADERS = ('app_total_4K', 'app_hp_avail_2M', 'app_hp_avail_1G') - - -def skip_4k_for_ovs(mempage_size): - if mempage_size in (None, 'any', 'small') and not system_helper.is_avs(): - skip("4K VM is unsupported by OVS by default") - - -@fixture(scope='module') -def prepare_resource(add_admin_role_module): - hypervisor = random.choice(host_helper.get_up_hypervisors()) - flavor = nova_helper.create_flavor(name='flavor-1g', ram=1024, - cleanup='module')[1] - vol_id = cinder_helper.create_volume('vol-mem_page_size', - cleanup='module')[1] - return hypervisor, flavor, vol_id - - -def _get_expt_indices(mempage_size): - if mempage_size in ('small', None): - expt_mempage_indices = (0,) - elif str(mempage_size) == '2048': - expt_mempage_indices = (1,) - elif str(mempage_size) == '1048576': - expt_mempage_indices = (2,) - elif mempage_size == 'large': - expt_mempage_indices = (1, 2) - else: - expt_mempage_indices = (0, 1, 2) - return expt_mempage_indices - - -def is_host_mem_sufficient(host, mempage_size=None, mem_gib=1): - host_mems_per_proc = host_helper.get_host_memories(host, - headers=MEMPAGE_HEADERS) - mempage_size = 'small' if not mempage_size else mempage_size - expt_mempage_indices = _get_expt_indices(mempage_size) - - for proc, mems_for_proc in host_mems_per_proc.items(): - pages_4k, pages_2m, pages_1g = mems_for_proc - mems_for_proc = (int(pages_4k * 4 / 1048576), - int(pages_2m * 2 / 1024), int(pages_1g)) - for index in expt_mempage_indices: - avail_g_for_memsize = mems_for_proc[index] - if avail_g_for_memsize >= mem_gib: - LOG.info("{} has sufficient {} mempages to launch {}G " - "vm".format(host, mempage_size, mem_gib)) - return True, host_mems_per_proc - - LOG.info("{} does not have sufficient {} mempages to launch {}G " - "vm".format(host, mempage_size, mem_gib)) - return False, host_mems_per_proc - - -def check_mempage_change(vm, host, prev_host_mems, mempage_size=None, - mem_gib=1, numa_node=None): - expt_mempage_indics = _get_expt_indices(mempage_size) - if numa_node is None: - numa_node = vm_helper.get_vm_numa_nodes_via_ps(vm_id=vm, host=host)[0] - - prev_host_mems = prev_host_mems[numa_node] - current_host_mems = host_helper.get_host_memories( - host, headers=MEMPAGE_HEADERS)[numa_node] - - if 0 in expt_mempage_indics: - if current_host_mems[1:] == prev_host_mems[1:] and \ - abs(prev_host_mems[0] - current_host_mems[ - 0]) <= mem_gib * 512 * 1024 / 4: - return - - for i in expt_mempage_indics: - if i == 0: - continue - - expt_pagecount = 1 if i == 2 else 1024 - if prev_host_mems[i] - expt_pagecount == current_host_mems[i]: - LOG.info("{} {} memory page reduced by {}GiB as " - "expected".format(host, MEMPAGE_HEADERS[i], mem_gib)) - return - - LOG.info("{} {} memory pages - Previous: {}, current: " - "{}".format(host, MEMPAGE_HEADERS[i], - prev_host_mems[i], current_host_mems[i])) - - assert 0, "{} available vm {} memory page count did not change as " \ - "expected".format(host, mempage_size) - - -@mark.parametrize('mem_page_size', [ - param('2048', marks=mark.domain_sanity), - param('large', marks=mark.p1), - param('small', marks=mark.domain_sanity), - param('1048576', marks=mark.p3), -]) -def test_vm_mem_pool_default_config(prepare_resource, mem_page_size): - """ - Test memory used by vm is taken from the expected memory pool - - Args: - prepare_resource (tuple): test fixture - mem_page_size (str): mem page size setting in flavor - - Setup: - - Create a flavor with 1G RAM (module) - - Create a volume with default values (module) - - Select a hypervisor to launch vm on - - Test Steps: - - Set memory page size flavor spec to given value - - Attempt to boot a vm with above flavor and a basic volume - - Verify the system is taking memory from the expected memory pool: - - If boot vm succeeded: - - Calculate the available/used memory change on the vm host - - Verify the memory is taken from memory pool specified via - mem_page_size - - If boot vm failed: - - Verify system attempted to take memory from expected pool, - but insufficient memory is available - - Teardown: - - Delete created vm - - Delete created volume and flavor (module) - - """ - hypervisor, flavor_1g, volume_ = prepare_resource - - LOG.tc_step("Set memory page size extra spec in flavor") - nova_helper.set_flavor(flavor_1g, - **{FlavorSpec.CPU_POLICY: 'dedicated', - FlavorSpec.MEM_PAGE_SIZE: mem_page_size}) - - LOG.tc_step("Check system host-memory-list before launch vm") - is_sufficient, prev_host_mems = is_host_mem_sufficient( - host=hypervisor, mempage_size=mem_page_size) - - LOG.tc_step("Boot a vm with mem page size spec - {}".format(mem_page_size)) - code, vm_id, msg = vm_helper.boot_vm('mempool_' + mem_page_size, flavor_1g, - source='volume', fail_ok=True, - vm_host=hypervisor, source_id=volume_, - cleanup='function') - - if not is_sufficient: - LOG.tc_step("Check boot vm rejected due to insufficient memory from " - "{} pool".format(mem_page_size)) - assert 1 == code, "{} vm launched successfully when insufficient " \ - "mempage configured on {}". \ - format(mem_page_size, hypervisor) - else: - LOG.tc_step("Check vm launches successfully and {} available mempages " - "change accordingly".format(hypervisor)) - assert 0 == code, "VM failed to launch with '{}' " \ - "mempages".format(mem_page_size) - check_mempage_change(vm_id, host=hypervisor, - prev_host_mems=prev_host_mems, - mempage_size=mem_page_size) - - -def get_hosts_to_configure(candidates): - hosts_selected = [None, None] - hosts_to_configure = [None, None] - max_4k, expt_p1_4k, max_1g, expt_p1_1g = \ - 1.5 * 1048576 / 4, 2.5 * 1048576 / 4, 1, 2 - for host in candidates: - host_mems = host_helper.get_host_memories(host, headers=MEMPAGE_HEADERS) - if 1 not in host_mems: - LOG.info("{} has only 1 processor".format(host)) - continue - - proc0_mems, proc1_mems = host_mems[0], host_mems[1] - p0_4k, p1_4k, p0_1g, p1_1g = \ - proc0_mems[0], proc1_mems[0], proc0_mems[2], proc1_mems[2] - - if p0_4k <= max_4k and p0_1g <= max_1g: - if not hosts_selected[1] and p1_4k >= expt_p1_4k and \ - p1_1g <= max_1g: - hosts_selected[1] = host - elif not hosts_selected[0] and p1_4k <= max_4k and \ - p1_1g >= expt_p1_1g: - hosts_selected[0] = host - - if None not in hosts_selected: - LOG.info("1G and 4k hosts already configured and selected: " - "{}".format(hosts_selected)) - break - else: - for i in range(len(hosts_selected)): - if hosts_selected[i] is None: - hosts_selected[i] = hosts_to_configure[i] = \ - list(set(candidates) - set(hosts_selected))[0] - LOG.info("Hosts selected: {}; To be configured: " - "{}".format(hosts_selected, hosts_to_configure)) - - return hosts_selected, hosts_to_configure - - -class TestConfigMempage: - MEM_CONFIGS = [None, 'any', 'large', 'small', '2048', '1048576'] - - @fixture(scope='class') - def add_1g_and_4k_pages(self, request, config_host_class, - skip_for_one_proc, add_stxauto_zone, - add_admin_role_module): - storage_backing, candidate_hosts = \ - keywords.host_helper.get_storage_backing_with_max_hosts() - - if len(candidate_hosts) < 2: - skip("Less than two up hosts have same storage backing") - - LOG.fixture_step("Check mempage configs for hypervisors and select " - "host to use or configure") - hosts_selected, hosts_to_configure = get_hosts_to_configure( - candidate_hosts) - - if set(hosts_to_configure) != {None}: - def _modify(host): - is_1g = True if hosts_selected.index(host) == 0 else False - proc1_kwargs = {'gib_1g': 2, 'gib_4k_range': (None, 2)} if \ - is_1g else {'gib_1g': 0, 'gib_4k_range': (2, None)} - kwargs = {'gib_1g': 0, 'gib_4k_range': (None, 2)}, proc1_kwargs - - actual_mems = host_helper._get_actual_mems(host=host) - LOG.fixture_step("Modify {} proc0 to have 0 of 1G pages and " - "<2GiB of 4K pages".format(host)) - host_helper.modify_host_memory(host, proc=0, - actual_mems=actual_mems, - **kwargs[0]) - LOG.fixture_step("Modify {} proc1 to have >=2GiB of {} " - "pages".format(host, '1G' if is_1g else '4k')) - host_helper.modify_host_memory(host, proc=1, - actual_mems=actual_mems, - **kwargs[1]) - - for host_to_config in hosts_to_configure: - if host_to_config: - config_host_class(host=host_to_config, modify_func=_modify) - LOG.fixture_step("Check mem pages for {} are modified " - "and updated successfully". - format(host_to_config)) - host_helper.wait_for_memory_update(host=host_to_config) - - LOG.fixture_step("Check host memories for {} after mem config " - "completed".format(hosts_selected)) - _, hosts_unconfigured = get_hosts_to_configure(hosts_selected) - assert not hosts_unconfigured[0], \ - "Failed to configure {}. Expt: proc0:1g<2,4k<2gib;" \ - "proc1:1g>=2,4k<2gib".format(hosts_unconfigured[0]) - assert not hosts_unconfigured[1], \ - "Failed to configure {}. Expt: proc0:1g<2,4k<2gib;" \ - "proc1:1g<2,4k>=2gib".format(hosts_unconfigured[1]) - - LOG.fixture_step('(class) Add hosts to stxauto aggregate: ' - '{}'.format(hosts_selected)) - nova_helper.add_hosts_to_aggregate(aggregate='stxauto', - hosts=hosts_selected) - - def remove_host_from_zone(): - LOG.fixture_step('(class) Remove hosts from stxauto aggregate: ' - '{}'.format(hosts_selected)) - nova_helper.remove_hosts_from_aggregate(aggregate='stxauto', - check_first=False) - - request.addfinalizer(remove_host_from_zone) - - return hosts_selected, storage_backing - - @fixture(scope='class') - def flavor_2g(self, add_1g_and_4k_pages): - hosts, storage_backing = add_1g_and_4k_pages - LOG.fixture_step("Create a 2G memory flavor to be used by mempage " - "testcases") - flavor = nova_helper.create_flavor(name='flavor-2g', ram=2048, - storage_backing=storage_backing, - cleanup='class')[1] - return flavor, hosts, storage_backing - - @fixture(scope='class') - def image_mempage(self): - LOG.fixture_step("(class) Create a glance image for mempage testcases") - image_id = glance_helper.create_image(name='mempage', - cleanup='class')[1] - return image_id - - @fixture() - def check_alarms(self, add_1g_and_4k_pages): - hosts, storage_backing = add_1g_and_4k_pages - host_helper.get_hypervisor_info(hosts=hosts) - for host in hosts: - host_helper.get_host_memories(host, wait_for_update=False) - - @fixture(params=MEM_CONFIGS) - def flavor_mem_page_size(self, request, flavor_2g): - flavor_id = flavor_2g[0] - mem_page_size = request.param - skip_4k_for_ovs(mem_page_size) - - if mem_page_size is None: - nova_helper.unset_flavor(flavor_id, FlavorSpec.MEM_PAGE_SIZE) - else: - nova_helper.set_flavor(flavor_id, - **{FlavorSpec.MEM_PAGE_SIZE: mem_page_size}) - - return mem_page_size - - @mark.parametrize('image_mem_page_size', MEM_CONFIGS) - def test_boot_vm_mem_page_size(self, flavor_2g, flavor_mem_page_size, - image_mempage, image_mem_page_size): - """ - Test boot vm with various memory page size setting in flavor and image. - - Args: - flavor_2g (tuple): flavor id of a flavor with ram set to 2G, - hosts configured and storage_backing - flavor_mem_page_size (str): memory page size extra spec value to - set in flavor - image_mempage (str): image id for tis image - image_mem_page_size (str): memory page metadata value to set in - image - - Setup: - - Create a flavor with 2G RAM (module) - - Get image id of tis image (module) - - Test Steps: - - Set/Unset flavor memory page size extra spec with given value ( - unset if None is given) - - Set/Unset image memory page size metadata with given value ( - unset if None if given) - - Attempt to boot a vm with above flavor and image - - Verify boot result based on the mem page size values in the - flavor and image - - Teardown: - - Delete vm if booted - - Delete created flavor (module) - - """ - skip_4k_for_ovs(image_mem_page_size) - - flavor_id, hosts, storage_backing = flavor_2g - - if image_mem_page_size is None: - glance_helper.unset_image(image_mempage, - properties=ImageMetadata.MEM_PAGE_SIZE) - expt_code = 0 - else: - glance_helper.set_image(image=image_mempage, - properties={ImageMetadata.MEM_PAGE_SIZE: - image_mem_page_size}) - if flavor_mem_page_size is None: - expt_code = 4 - elif flavor_mem_page_size.lower() in ['any', 'large']: - expt_code = 0 - else: - expt_code = 0 if flavor_mem_page_size.lower() == \ - image_mem_page_size.lower() else 4 - - LOG.tc_step("Attempt to boot a vm with flavor_mem_page_size: {}, and " - "image_mem_page_size: {}. And check return " - "code is {}.".format(flavor_mem_page_size, - image_mem_page_size, expt_code)) - - actual_code, vm_id, msg = vm_helper.boot_vm(name='mem_page_size', - flavor=flavor_id, - source='image', - source_id=image_mempage, - fail_ok=True, - avail_zone='stxauto', - cleanup='function') - - assert expt_code == actual_code, "Expect boot vm to return {}; " \ - "Actual result: {} with msg: " \ - "{}".format(expt_code, actual_code, - msg) - - if expt_code != 0: - assert re.search( - NovaCLIOutput.VM_BOOT_REJECT_MEM_PAGE_SIZE_FORBIDDEN, msg) - else: - assert vm_helper.get_vm_host(vm_id) in hosts, \ - "VM is not booted on hosts in stxauto zone" - LOG.tc_step("Ensure VM is pingable from NatBox") - vm_helper.wait_for_vm_pingable_from_natbox(vm_id) - - @mark.parametrize('mem_page_size', [ - param('1048576', marks=mark.priorities('domain_sanity', 'nightly')), - param('large'), - param('small', marks=mark.nightly), - ]) - def test_schedule_vm_mempage_config(self, flavor_2g, mem_page_size): - """ - Test memory used by vm is taken from the expected memory pool and the - vm was scheduled on the correct - host/processor - - Args: - flavor_2g (tuple): flavor id of a flavor with ram set to 2G, - hosts, storage_backing - mem_page_size (str): mem page size setting in flavor - - Setup: - - Create host aggregate - - Add two hypervisors to the host aggregate - - Host-0 configuration: - - Processor-0: - - Insufficient 1g pages to boot vm that requires 2g - - Insufficient 4k pages to boot vm that requires 2g - - Processor-1: - - Sufficient 1g pages to boot vm that requires 2g - - Insufficient 4k pages to boot vm that requires 2g - - Host-1 configuration: - - Processor-0: - - Insufficient 1g pages to boot vm that requires 2g - - Insufficient 4k pages to boot vm that requires 2g - - Processor-1: - - Insufficient 1g pages to boot vm that requires 2g - - Sufficient 4k pages to boot vm that requires 2g - - Configure a compute to have 4 1G hugepages (module) - - Create a flavor with 2G RAM (module) - - Create a volume with default values (module) - - Test Steps: - - Set memory page size flavor spec to given value - - Boot a vm with above flavor and a basic volume - - Calculate the available/used memory change on the vm host - - Verify the memory is taken from 1G hugepage memory pool - - Verify the vm was booted on a supporting host - - Teardown: - - Delete created vm - - Delete created volume and flavor (module) - - Re-Configure the compute to have 0 hugepages (module) - - Revert host mem pages back to original - """ - skip_4k_for_ovs(mem_page_size) - - flavor_id, hosts_configured, storage_backing = flavor_2g - LOG.tc_step("Set memory page size extra spec in flavor") - nova_helper.set_flavor(flavor_id, - **{FlavorSpec.CPU_POLICY: 'dedicated', - FlavorSpec.MEM_PAGE_SIZE: mem_page_size}) - - host_helper.wait_for_hypervisors_up(hosts_configured) - prev_computes_mems = {} - for host in hosts_configured: - prev_computes_mems[host] = host_helper.get_host_memories( - host=host, headers=MEMPAGE_HEADERS) - - LOG.tc_step( - "Boot a vm with mem page size spec - {}".format(mem_page_size)) - - host_1g, host_4k = hosts_configured - code, vm_id, msg = vm_helper.boot_vm('mempool_configured', flavor_id, - fail_ok=True, - avail_zone='stxauto', - cleanup='function') - assert 0 == code, "VM is not successfully booted." - - instance_name, vm_host = vm_helper.get_vm_values( - vm_id, fields=[":instance_name", ":host"], strict=False) - vm_node = vm_helper.get_vm_numa_nodes_via_ps( - vm_id=vm_id, instance_name=instance_name, host=vm_host) - if mem_page_size == '1048576': - assert host_1g == vm_host, \ - "VM is not created on the configured host " \ - "{}".format(hosts_configured[0]) - assert vm_node == [1], "VM (huge) did not boot on the correct " \ - "processor" - elif mem_page_size == 'small': - assert host_4k == vm_host, "VM is not created on the configured " \ - "host {}".format(hosts_configured[1]) - assert vm_node == [1], "VM (small) did not boot on the correct " \ - "processor" - else: - assert vm_host in hosts_configured - - LOG.tc_step("Calculate memory change on vm host - {}".format(vm_host)) - check_mempage_change(vm_id, vm_host, - prev_host_mems=prev_computes_mems[vm_host], - mempage_size=mem_page_size, mem_gib=2, - numa_node=vm_node[0]) - - LOG.tc_step("Ensure vm is pingable from NatBox") - vm_helper.wait_for_vm_pingable_from_natbox(vm_id) diff --git a/automated-pytest-suite/testcases/functional/nova/test_migrate_vms.py b/automated-pytest-suite/testcases/functional/nova/test_migrate_vms.py deleted file mode 100755 index 89c70690..00000000 --- a/automated-pytest-suite/testcases/functional/nova/test_migrate_vms.py +++ /dev/null @@ -1,412 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from pytest import fixture, mark, skip, param - -from utils.tis_log import LOG -from consts.stx import FlavorSpec, EventLogID -# Don't remove this import, used by eval() -from consts.cli_errs import LiveMigErr -from keywords import vm_helper, nova_helper, host_helper, cinder_helper, \ - glance_helper, check_helper, system_helper -from testfixtures.fixture_resources import ResourceCleanup - - -@fixture(scope='module') -def check_system(): - up_hypervisors = host_helper.get_up_hypervisors() - if len(up_hypervisors) < 2: - skip("Less than two up hypervisors") - - -@fixture(scope='module') -def hosts_per_stor_backing(check_system): - hosts_per_backing = host_helper.get_hosts_per_storage_backing() - LOG.fixture_step("Hosts per storage backing: {}".format(hosts_per_backing)) - - return hosts_per_backing - - -def touch_files_under_vm_disks(vm_id, ephemeral=0, swap=0, vm_type='volume', - disks=None): - expt_len = 1 + int(bool(ephemeral)) + int(bool(swap)) + ( - 1 if 'with_vol' in vm_type else 0) - - LOG.tc_step("Auto mount ephemeral, swap, and attached volume if any") - mounts = vm_helper.auto_mount_vm_disks(vm_id=vm_id, disks=disks) - assert expt_len == len(mounts) - - LOG.tc_step("Create files under vm disks: {}".format(mounts)) - file_paths, content = vm_helper.touch_files(vm_id=vm_id, file_dirs=mounts) - return file_paths, content - - -@mark.parametrize(('storage_backing', 'ephemeral', 'swap', 'cpu_pol', 'vcpus', - 'vm_type', 'block_mig'), [ - param('local_image', 0, 0, None, 1, 'volume', False, - marks=mark.p1), - param('local_image', 0, 0, 'dedicated', 2, 'volume', - False, marks=mark.p1), - ('local_image', 1, 0, 'dedicated', 2, 'volume', False), - ('local_image', 0, 512, 'shared', 1, 'volume', False), - ('local_image', 1, 512, 'dedicated', 2, 'volume', True), - # Supported from Newton - param('local_image', 0, 0, 'shared', 2, 'image', True, - marks=mark.domain_sanity), - param('local_image', 1, 512, 'dedicated', 1, 'image', - False, marks=mark.domain_sanity), - ('local_image', 0, 0, None, 2, 'image_with_vol', False), - ('local_image', 0, 0, 'dedicated', 1, 'image_with_vol', - True), - ('local_image', 1, 512, 'dedicated', 2, 'image_with_vol', - True), - ('local_image', 1, 512, 'dedicated', 1, 'image_with_vol', - False), - param('remote', 0, 0, None, 2, 'volume', False, - marks=mark.p1), - param('remote', 1, 0, 'dedicated', 1, 'volume', False, - marks=mark.p1), - param('remote', 1, 512, None, 1, 'image', False, - marks=mark.domain_sanity), - param('remote', 0, 512, 'dedicated', 2, 'image_with_vol', - False, marks=mark.domain_sanity), - ]) -def test_live_migrate_vm_positive(hosts_per_stor_backing, storage_backing, - ephemeral, swap, cpu_pol, vcpus, vm_type, - block_mig): - """ - Skip Condition: - - Less than two hosts have specified storage backing - - Test Steps: - - create flavor with specified vcpus, cpu_policy, ephemeral, swap, - storage_backing - - boot vm from specified boot source with above flavor - - (attach volume to vm if 'image_with_vol', specified in vm_type) - - Live migrate the vm with specified block_migration flag - - Verify VM is successfully live migrated to different host - - Teardown: - - Delete created vm, volume, flavor - - """ - if len(hosts_per_stor_backing.get(storage_backing, [])) < 2: - skip("Less than two hosts have {} storage backing".format( - storage_backing)) - - vm_id = _boot_vm_under_test(storage_backing, ephemeral, swap, cpu_pol, - vcpus, vm_type) - - prev_vm_host = vm_helper.get_vm_host(vm_id) - vm_helper.wait_for_vm_pingable_from_natbox(vm_id) - - vm_disks = vm_helper.get_vm_devices_via_virsh(vm_id) - file_paths, content = touch_files_under_vm_disks(vm_id=vm_id, - ephemeral=ephemeral, - swap=swap, vm_type=vm_type, - disks=vm_disks) - - LOG.tc_step("Live migrate VM and ensure it succeeded") - # block_mig = True if boot_source == 'image' else False - code, output = vm_helper.live_migrate_vm(vm_id, block_migrate=block_mig) - assert 0 == code, "Live migrate is not successful. Details: {}".format( - output) - - post_vm_host = vm_helper.get_vm_host(vm_id) - assert prev_vm_host != post_vm_host - - LOG.tc_step("Ensure vm is pingable from NatBox after live migration") - vm_helper.wait_for_vm_pingable_from_natbox(vm_id) - - LOG.tc_step("Check files after live migrate") - check_helper.check_vm_files(vm_id=vm_id, storage_backing=storage_backing, - ephemeral=ephemeral, swap=swap, - vm_type=vm_type, vm_action='live_migrate', - file_paths=file_paths, content=content, - disks=vm_disks, prev_host=prev_vm_host, - post_host=post_vm_host) - - -@mark.parametrize(('storage_backing', 'ephemeral', 'swap', 'vm_type', - 'block_mig', 'expt_err'), [ - param('local_image', 0, 0, 'volume', True, - 'LiveMigErr.BLOCK_MIG_UNSUPPORTED'), - param('remote', 0, 0, 'volume', True, - 'LiveMigErr.BLOCK_MIG_UNSUPPORTED'), - param('remote', 1, 0, 'volume', True, - 'LiveMigErr.BLOCK_MIG_UNSUPPORTED'), - param('remote', 0, 512, 'volume', True, - 'LiveMigErr.BLOCK_MIG_UNSUPPORTED'), - param('remote', 0, 512, 'image', True, - 'LiveMigErr.BLOCK_MIG_UNSUPPORTED'), - param('remote', 0, 0, 'image_with_vol', True, - 'LiveMigErr.BLOCK_MIG_UNSUPPORTED'), - ]) -def test_live_migrate_vm_negative(storage_backing, ephemeral, swap, vm_type, - block_mig, expt_err, - hosts_per_stor_backing, no_simplex): - """ - Skip Condition: - - Less than two hosts have specified storage backing - - Test Steps: - - create flavor with specified vcpus, cpu_policy, ephemeral, swap, - storage_backing - - boot vm from specified boot source with above flavor - - (attach volume to vm if 'image_with_vol', specified in vm_type) - - Live migrate the vm with specified block_migration flag - - Verify VM is successfully live migrated to different host - - Teardown: - - Delete created vm, volume, flavor - - """ - if len(hosts_per_stor_backing.get(storage_backing, [])) < 2: - skip("Less than two hosts have {} storage backing".format( - storage_backing)) - - vm_id = _boot_vm_under_test(storage_backing, ephemeral, swap, None, 1, - vm_type) - vm_helper.wait_for_vm_pingable_from_natbox(vm_id) - - prev_vm_host = vm_helper.get_vm_host(vm_id) - vm_disks = vm_helper.get_vm_devices_via_virsh(vm_id) - file_paths, content = touch_files_under_vm_disks(vm_id=vm_id, - ephemeral=ephemeral, - swap=swap, vm_type=vm_type, - disks=vm_disks) - - LOG.tc_step( - "Live migrate VM and ensure it's rejected with proper error message") - # block_mig = True if boot_source == 'image' else False - code, output = vm_helper.live_migrate_vm(vm_id, block_migrate=block_mig) - assert 2 == code, "Expect live migration to have expected fail. Actual: " \ - "{}".format(output) - - # Remove below code due to live-migration is async in newton - # assert 'Unexpected API Error'.lower() not in output.lower(), - # "'Unexpected API Error' returned." - # - # # remove extra spaces in error message - # output = re.sub(r'\s\s+', " ", output) - # assert eval(expt_err) in output, "Expected error message {} is not in - # actual error message: {}".\ - # format(eval(expt_err), output) - - post_vm_host = vm_helper.get_vm_host(vm_id) - assert prev_vm_host == post_vm_host, "VM host changed even though live " \ - "migration request rejected." - - LOG.tc_step( - "Ensure vm is pingable from NatBox after live migration rejected") - vm_helper.wait_for_vm_pingable_from_natbox(vm_id) - - LOG.tc_step("Check files after live migrate attempt") - check_helper.check_vm_files(vm_id=vm_id, storage_backing=storage_backing, - ephemeral=ephemeral, swap=swap, - vm_type=vm_type, vm_action='live_migrate', - file_paths=file_paths, content=content, - disks=vm_disks, prev_host=prev_vm_host, - post_host=post_vm_host) - - -@mark.parametrize(('storage_backing', 'ephemeral', 'swap', 'cpu_pol', - 'vcpus', 'vm_type', 'resize'), [ - param('local_image', 0, 0, None, 1, 'volume', 'confirm'), - param('local_image', 0, 0, 'dedicated', 2, 'volume', 'confirm'), - param('local_image', 1, 0, 'shared', 2, 'image', 'confirm'), - param('local_image', 0, 512, 'dedicated', 1, 'image', 'confirm'), - param('local_image', 0, 0, None, 1, 'image_with_vol', 'confirm'), - param('remote', 0, 0, None, 2, 'volume', 'confirm'), - param('remote', 1, 0, None, 1, 'volume', 'confirm'), - param('remote', 1, 512, None, 1, 'image', 'confirm'), - param('remote', 0, 0, None, 2, 'image_with_vol', 'confirm'), - param('local_image', 0, 0, None, 2, 'volume', 'revert'), - param('local_image', 0, 0, 'dedicated', 1, 'volume', 'revert'), - param('local_image', 1, 0, 'shared', 2, 'image', 'revert'), - param('local_image', 0, 512, 'dedicated', 1, 'image', 'revert'), - param('local_image', 0, 0, 'dedicated', 2, 'image_with_vol', 'revert'), - param('remote', 0, 0, None, 2, 'volume', 'revert'), - param('remote', 1, 512, None, 1, 'volume', 'revert'), - param('remote', 0, 0, None, 1, 'image', 'revert'), - param('remote', 1, 0, None, 2, 'image_with_vol', 'revert'), -]) -def test_cold_migrate_vm(storage_backing, ephemeral, swap, cpu_pol, vcpus, - vm_type, resize, hosts_per_stor_backing, - no_simplex): - """ - Skip Condition: - - Less than two hosts have specified storage backing - - Test Steps: - - create flavor with specified vcpus, cpu_policy, ephemeral, swap, - storage_backing - - boot vm from specified boot source with above flavor - - (attach volume to vm if 'image_with_vol', specified in vm_type) - - Cold migrate vm - - Confirm/Revert resize as specified - - Verify VM is successfully cold migrated and confirmed/reverted resize - - Verify that instance files are not found on original host. (TC6621) - - Teardown: - - Delete created vm, volume, flavor - - """ - if len(hosts_per_stor_backing.get(storage_backing, [])) < 2: - skip("Less than two hosts have {} storage backing".format( - storage_backing)) - - vm_id = _boot_vm_under_test(storage_backing, ephemeral, swap, cpu_pol, - vcpus, vm_type) - prev_vm_host = vm_helper.get_vm_host(vm_id) - vm_helper.wait_for_vm_pingable_from_natbox(vm_id) - - vm_disks = vm_helper.get_vm_devices_via_virsh(vm_id) - file_paths, content = touch_files_under_vm_disks(vm_id=vm_id, - ephemeral=ephemeral, - swap=swap, vm_type=vm_type, - disks=vm_disks) - - LOG.tc_step("Cold migrate VM and {} resize".format(resize)) - revert = True if resize == 'revert' else False - code, output = vm_helper.cold_migrate_vm(vm_id, revert=revert) - assert 0 == code, "Cold migrate {} is not successful. Details: {}".format( - resize, output) - - # Below steps are unnecessary as host is already checked in - # cold_migrate_vm keyword. Add steps below just in case. - LOG.tc_step( - "Check VM host is as expected after cold migrate {}".format(resize)) - post_vm_host = vm_helper.get_vm_host(vm_id) - if revert: - assert prev_vm_host == post_vm_host, "vm host changed after cold " \ - "migrate revert" - else: - assert prev_vm_host != post_vm_host, "vm host did not change after " \ - "cold migrate" - LOG.tc_step("Check that source host no longer has instance files") - with host_helper.ssh_to_host(prev_vm_host) as prev_ssh: - assert not prev_ssh.file_exists( - '/var/lib/nova/instances/{}'.format(vm_id)), \ - "Instance files found on previous host {} after cold migrate " \ - "to {}".format(prev_vm_host, post_vm_host) - - LOG.tc_step("Ensure vm is pingable from NatBox after cold migration " - "{}".format(resize)) - vm_helper.wait_for_vm_pingable_from_natbox(vm_id) - - LOG.tc_step("Check files after cold migrate {}".format(resize)) - action = None if revert else 'cold_migrate' - check_helper.check_vm_files(vm_id=vm_id, storage_backing=storage_backing, - ephemeral=ephemeral, swap=swap, - vm_type=vm_type, vm_action=action, - file_paths=file_paths, content=content, - disks=vm_disks, prev_host=prev_vm_host, - post_host=post_vm_host) - - -def _boot_vm_under_test(storage_backing, ephemeral, swap, cpu_pol, vcpus, - vm_type): - LOG.tc_step( - "Create a flavor with {} vcpus, {}G ephemera disk, {}M swap " - "disk".format(vcpus, ephemeral, swap)) - flavor_id = nova_helper.create_flavor( - name='migration_test', ephemeral=ephemeral, swap=swap, vcpus=vcpus, - storage_backing=storage_backing, cleanup='function')[1] - - if cpu_pol is not None: - specs = {FlavorSpec.CPU_POLICY: cpu_pol} - - LOG.tc_step("Add following extra specs: {}".format(specs)) - nova_helper.set_flavor(flavor=flavor_id, **specs) - - boot_source = 'volume' if vm_type == 'volume' else 'image' - LOG.tc_step("Boot a vm from {}".format(boot_source)) - vm_id = vm_helper.boot_vm('migration_test', - flavor=flavor_id, source=boot_source, - reuse_vol=False, - cleanup='function')[1] - - if vm_type == 'image_with_vol': - LOG.tc_step("Attach volume to vm") - vm_helper.attach_vol_to_vm(vm_id=vm_id, mount=False) - - return vm_id - - -@mark.parametrize(('guest_os', 'mig_type', 'cpu_pol'), [ - ('ubuntu_14', 'live', 'dedicated'), - # Live migration with pinned VM may not be unsupported - param('ubuntu_14', 'cold', 'dedicated', - marks=mark.priorities('sanity', 'cpe_sanity')), - param('tis-centos-guest', 'live', None, - marks=mark.priorities('sanity', 'cpe_sanity')), - ('tis-centos-guest', 'cold', None), -]) -def test_migrate_vm(check_system, guest_os, mig_type, cpu_pol): - """ - Test migrate vms for given guest type - Args: - check_system: - guest_os: - mig_type: - cpu_pol: - - Test Steps: - - Create a glance image from given guest type - - Create a vm from cinder volume using above image - - Live/cold migrate the vm - - Ensure vm moved to other host and in good state (active and - reachabe from NatBox) - - """ - LOG.tc_step("Create a flavor with 1 vcpu") - flavor_id = \ - nova_helper.create_flavor(name='{}-mig'.format(mig_type), vcpus=1, - root_disk=9, cleanup='function')[1] - - if cpu_pol is not None: - specs = {FlavorSpec.CPU_POLICY: cpu_pol} - LOG.tc_step("Add following extra specs: {}".format(specs)) - nova_helper.set_flavor(flavor=flavor_id, **specs) - - LOG.tc_step("Create a volume from {} image".format(guest_os)) - image_id = glance_helper.get_guest_image(guest_os=guest_os) - - vol_id = cinder_helper.create_volume(source_id=image_id, size=9, - guest_image=guest_os)[1] - ResourceCleanup.add('volume', vol_id) - - LOG.tc_step("Boot a vm from above flavor and volume") - vm_id = vm_helper.boot_vm(guest_os, flavor=flavor_id, source='volume', - source_id=vol_id, cleanup='function')[1] - vm_helper.wait_for_vm_pingable_from_natbox(vm_id) - - if guest_os == 'ubuntu_14': - system_helper.wait_for_alarm_gone(alarm_id=EventLogID.CINDER_IO_CONGEST, - entity_id='cinder_io_monitor', - strict=False, timeout=300, - fail_ok=False) - - LOG.tc_step("{} migrate vm and check vm is moved to different host".format( - mig_type)) - prev_vm_host = vm_helper.get_vm_host(vm_id) - - if mig_type == 'live': - code, output = vm_helper.live_migrate_vm(vm_id) - if code == 1: - assert False, "No host to live migrate to. System may not be in " \ - "good state." - else: - vm_helper.cold_migrate_vm(vm_id) - - vm_host = vm_helper.get_vm_host(vm_id) - assert prev_vm_host != vm_host, "vm host did not change after {} " \ - "migration".format(mig_type) - - LOG.tc_step("Ping vm from NatBox after {} migration".format(mig_type)) - vm_helper.wait_for_vm_pingable_from_natbox(vm_id) diff --git a/automated-pytest-suite/testcases/functional/nova/test_nova_actions.py b/automated-pytest-suite/testcases/functional/nova/test_nova_actions.py deleted file mode 100755 index 05bf5178..00000000 --- a/automated-pytest-suite/testcases/functional/nova/test_nova_actions.py +++ /dev/null @@ -1,91 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from pytest import mark, skip, param - -from utils.tis_log import LOG -from consts.stx import FlavorSpec, VMStatus -from consts.reasons import SkipStorageSpace - -from keywords import vm_helper, nova_helper, glance_helper, cinder_helper -from testfixtures.fixture_resources import ResourceCleanup - - -def id_gen(val): - if isinstance(val, list): - return '-'.join(val) - - -@mark.parametrize(('guest_os', 'cpu_pol', 'actions'), [ - param('tis-centos-guest', 'dedicated', ['pause', 'unpause'], - marks=mark.priorities('sanity', 'cpe_sanity', 'sx_sanity')), - param('ubuntu_14', 'shared', ['stop', 'start'], marks=mark.sanity), - param('ubuntu_14', 'dedicated', ['auto_recover'], marks=mark.sanity), - param('tis-centos-guest', 'dedicated', ['suspend', 'resume'], - marks=mark.priorities('sanity', 'cpe_sanity', 'sx_sanity')), -], ids=id_gen) -def test_nova_actions(guest_os, cpu_pol, actions): - """ - - Args: - guest_os: - cpu_pol: - actions: - - Test Steps: - - Create a glance image from given guest type - - Create a vm from cinder volume using above image with specified cpu - policy - - Perform given nova actions on vm - - Ensure nova operation succeeded and vm still in good state (active - and reachable from NatBox) - - """ - if guest_os == 'opensuse_12': - if not cinder_helper.is_volumes_pool_sufficient(min_size=40): - skip(SkipStorageSpace.SMALL_CINDER_VOLUMES_POOL) - - img_id = glance_helper.get_guest_image(guest_os=guest_os) - - LOG.tc_step("Create a flavor with 1 vcpu") - flavor_id = nova_helper.create_flavor(name=cpu_pol, vcpus=1, root_disk=9)[1] - ResourceCleanup.add('flavor', flavor_id) - - if cpu_pol is not None: - specs = {FlavorSpec.CPU_POLICY: cpu_pol} - LOG.tc_step("Add following extra specs: {}".format(specs)) - nova_helper.set_flavor(flavor=flavor_id, **specs) - - LOG.tc_step("Create a volume from {} image".format(guest_os)) - vol_id = \ - cinder_helper.create_volume(name='vol-' + guest_os, source_id=img_id, - guest_image=guest_os)[1] - ResourceCleanup.add('volume', vol_id) - - LOG.tc_step("Boot a vm from above flavor and volume") - vm_id = vm_helper.boot_vm('nova_actions', flavor=flavor_id, source='volume', - source_id=vol_id, - cleanup='function')[1] - - LOG.tc_step("Wait for VM pingable from NATBOX") - vm_helper.wait_for_vm_pingable_from_natbox(vm_id) - - for action in actions: - if action == 'auto_recover': - LOG.tc_step( - "Set vm to error state and wait for auto recovery complete, " - "then verify ping from base vm over " - "management and data networks") - vm_helper.set_vm_state(vm_id=vm_id, error_state=True, fail_ok=False) - vm_helper.wait_for_vm_values(vm_id=vm_id, status=VMStatus.ACTIVE, - fail_ok=True, timeout=600) - else: - LOG.tc_step( - "Perform following action on vm {}: {}".format(vm_id, action)) - vm_helper.perform_action_on_vm(vm_id, action=action) - - vm_helper.wait_for_vm_pingable_from_natbox(vm_id) diff --git a/automated-pytest-suite/testcases/functional/nova/test_resize_vm.py b/automated-pytest-suite/testcases/functional/nova/test_resize_vm.py deleted file mode 100755 index 744a5635..00000000 --- a/automated-pytest-suite/testcases/functional/nova/test_resize_vm.py +++ /dev/null @@ -1,508 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import time -import math - -from pytest import fixture, mark, skip, param - -from utils.tis_log import LOG - -from keywords import vm_helper, nova_helper, host_helper, check_helper, \ - glance_helper -from testfixtures.fixture_resources import ResourceCleanup -from consts.stx import FlavorSpec, GuestImages -from consts.reasons import SkipStorageBacking - - -def id_gen(val): - if isinstance(val, (tuple, list)): - val = '_'.join([str(val_) for val_ in val]) - return val - - -def touch_files_under_vm_disks(vm_id, ephemeral=0, swap=0, vm_type='volume', - disks=None): - expt_len = 1 + int(bool(ephemeral)) + int(bool(swap)) + ( - 1 if 'with_vol' in vm_type else 0) - - LOG.tc_step("Auto mount non-root disk(s)") - mounts = vm_helper.auto_mount_vm_disks(vm_id=vm_id, disks=disks) - assert expt_len == len(mounts) - - if bool(swap): - mounts.remove('none') - - LOG.tc_step("Create files under vm disks: {}".format(mounts)) - file_paths, content = vm_helper.touch_files(vm_id=vm_id, file_dirs=mounts) - return file_paths, content - - -def get_expt_disk_increase(origin_flavor, dest_flavor, boot_source, - storage_backing): - root_diff = dest_flavor[0] - origin_flavor[0] - ephemeral_diff = dest_flavor[1] - origin_flavor[1] - swap_diff = (dest_flavor[2] - origin_flavor[2]) / 1024 - - if storage_backing == 'remote': - expected_increase = 0 - expect_to_check = True - else: - if boot_source == 'volume': - expected_increase = ephemeral_diff + swap_diff - expect_to_check = False - else: - expected_increase = root_diff + ephemeral_diff + swap_diff - expect_to_check = expected_increase >= 2 - - return expected_increase, expect_to_check - - -def get_disk_avail_least(host): - return \ - host_helper.get_hypervisor_info(hosts=host, - field='disk_available_least')[host] - - -def check_correct_post_resize_value(original_disk_value, expected_increase, - host, sleep=True): - if sleep: - time.sleep(65) - - post_resize_value = get_disk_avail_least(host) - LOG.info( - "{} original_disk_value: {}. post_resize_value: {}. " - "expected_increase: {}".format( - host, original_disk_value, post_resize_value, expected_increase)) - expt_post = original_disk_value + expected_increase - - if expected_increase < 0: - # vm is on this host, backup image files may be created if not - # already existed - backup_val = math.ceil( - glance_helper.get_image_size(guest_os=GuestImages.DEFAULT['guest'], - virtual_size=False)) - assert expt_post - backup_val <= post_resize_value <= expt_post - elif expected_increase > 0: - # vm moved away from this host, or resized to smaller disk on same - # host, backup files will stay - assert expt_post - 1 <= post_resize_value <= expt_post + 1, \ - "disk_available_least on {} expected: {}+-1, actual: {}".format( - host, expt_post, post_resize_value) - else: - assert expt_post == post_resize_value, \ - "{} disk_available_least value changed to {} unexpectedly".format( - host, post_resize_value) - - return post_resize_value - - -@fixture(scope='module') -def get_hosts_per_backing(add_admin_role_module): - return host_helper.get_hosts_per_storage_backing() - - -class TestResizeSameHost: - @fixture(scope='class') - def add_hosts_to_zone(self, request, add_stxauto_zone, - get_hosts_per_backing): - hosts_per_backing = get_hosts_per_backing - avail_hosts = {key: vals[0] for key, vals in hosts_per_backing.items() - if vals} - - if not avail_hosts: - skip("No host in any storage aggregate") - - nova_helper.add_hosts_to_aggregate(aggregate='stxauto', - hosts=list(avail_hosts.values())) - - def remove_hosts_from_zone(): - nova_helper.remove_hosts_from_aggregate(aggregate='stxauto', - check_first=False) - - request.addfinalizer(remove_hosts_from_zone) - return avail_hosts - - @mark.parametrize(('storage_backing', 'origin_flavor', 'dest_flavor', - 'boot_source'), [ - ('remote', (4, 0, 0), (5, 1, 512), 'image'), - ('remote', (4, 1, 512), (5, 2, 1024), 'image'), - ('remote', (4, 1, 512), (4, 1, 0), 'image'), - # LP1762423 - param('remote', (4, 0, 0), (1, 1, 512), 'volume', - marks=mark.priorities('nightly', 'sx_nightly')), - ('remote', (4, 1, 512), (8, 2, 1024), 'volume'), - ('remote', (4, 1, 512), (0, 1, 0), 'volume'), - ('local_image', (4, 0, 0), (5, 1, 512), 'image'), - param('local_image', (4, 1, 512), (5, 2, 1024), - 'image', - marks=mark.priorities('nightly', 'sx_nightly')), - ('local_image', (5, 1, 512), (5, 1, 0), 'image'), - ('local_image', (4, 0, 0), (5, 1, 512), 'volume'), - ('local_image', (4, 1, 512), (0, 2, 1024), 'volume'), - ('local_image', (4, 1, 512), (1, 1, 0), 'volume'), - # LP1762423 - ], ids=id_gen) - def test_resize_vm_positive(self, add_hosts_to_zone, storage_backing, - origin_flavor, dest_flavor, boot_source): - """ - Test resizing disks of a vm - - Resize root disk is allowed except 0 & boot-from-image - - Resize to larger or same ephemeral is allowed - - Resize swap to any size is allowed including removing - - Args: - storage_backing: The host storage backing required - origin_flavor: The flavor to boot the vm from, listed by GBs for - root, ephemeral, and swap disks, i.e. for a - system with a 2GB root disk, a 1GB ephemeral disk, - and no swap disk: (2, 1, 0) - boot_source: Which source to boot the vm from, either 'volume' or - 'image' - add_hosts_to_zone - dest_flavor - - Skip Conditions: - - No hosts exist with required storage backing. - Test setup: - - Put a single host of each backing in stxautozone to prevent - migration and instead force resize. - - Create two flavors based on origin_flavor and dest_flavor - - Create a volume or image to boot from. - - Boot VM with origin_flavor - Test Steps: - - Resize VM to dest_flavor with revert - - If vm is booted from image and has a non-remote backing, - check that the amount of disk space post-revert - is around the same pre-revert # TC5155 - - Resize VM to dest_flavor with confirm - - If vm is booted from image and has a non-remote backing, - check that the amount of disk space post-confirm - is reflects the increase in disk-space taken up # TC5155 - Test Teardown: - - Delete created VM - - Delete created volume or image - - Delete created flavors - - Remove hosts from stxautozone - - Delete stxautozone - - """ - vm_host = add_hosts_to_zone.get(storage_backing, None) - - if not vm_host: - skip( - SkipStorageBacking.NO_HOST_WITH_BACKING.format(storage_backing)) - - expected_increase, expect_to_check = get_expt_disk_increase( - origin_flavor, dest_flavor, - boot_source, storage_backing) - LOG.info("Expected_increase of vm compute occupancy is {}".format( - expected_increase)) - - LOG.tc_step('Create origin flavor') - origin_flavor_id = _create_flavor(origin_flavor, storage_backing) - vm_id = _boot_vm_to_test(boot_source, vm_host, origin_flavor_id) - vm_helper.wait_for_vm_pingable_from_natbox(vm_id) - - vm_disks = vm_helper.get_vm_devices_via_virsh(vm_id) - root, ephemeral, swap = origin_flavor - if boot_source == 'volume': - root = GuestImages.IMAGE_FILES[GuestImages.DEFAULT['guest']][1] - file_paths, content = touch_files_under_vm_disks(vm_id=vm_id, - ephemeral=ephemeral, - swap=swap, - vm_type=boot_source, - disks=vm_disks) - - if expect_to_check: - LOG.tc_step('Check initial disk usage') - original_disk_value = get_disk_avail_least(vm_host) - LOG.info("{} space left on compute".format(original_disk_value)) - - LOG.tc_step('Create destination flavor') - dest_flavor_id = _create_flavor(dest_flavor, storage_backing) - LOG.tc_step('Resize vm to dest flavor and revert') - vm_helper.resize_vm(vm_id, dest_flavor_id, revert=True, fail_ok=False) - vm_helper.wait_for_vm_pingable_from_natbox(vm_id) - - swap_size = swap - LOG.tc_step("Check files after resize revert") - if storage_backing == 'remote' and swap and dest_flavor[2]: - swap_size = dest_flavor[2] - - time.sleep(30) - prev_host = vm_helper.get_vm_host(vm_id) - check_helper.check_vm_files(vm_id=vm_id, - storage_backing=storage_backing, root=root, - ephemeral=ephemeral, - swap=swap_size, vm_type=boot_source, - vm_action=None, file_paths=file_paths, - content=content, disks=vm_disks, - check_volume_root=True) - - LOG.tc_step('Resize vm to dest flavor and confirm') - vm_helper.resize_vm(vm_id, dest_flavor_id, revert=False, fail_ok=False) - vm_helper.wait_for_vm_pingable_from_natbox(vm_id) - post_host = vm_helper.get_vm_host(vm_id) - post_root, post_ephemeral, post_swap = dest_flavor - if boot_source == 'volume': - post_root = GuestImages.IMAGE_FILES[GuestImages.DEFAULT['guest']][1] - post_ephemeral = ephemeral if ephemeral else post_ephemeral - LOG.tc_step("Check files after resize attempt") - check_helper.check_vm_files( - vm_id=vm_id, storage_backing=storage_backing, - ephemeral=post_ephemeral, - swap=post_swap, vm_type=boot_source, - vm_action='resize', file_paths=file_paths, - content=content, prev_host=prev_host, - post_host=post_host, root=post_root, - disks=vm_disks, - post_disks=vm_helper.get_vm_devices_via_virsh(vm_id), - check_volume_root=True) - - @mark.parametrize( - ('storage_backing', 'origin_flavor', 'dest_flavor', 'boot_source'), [ - # Root disk can be resized, but cannot be 0 - ('remote', (5, 0, 0), (0, 0, 0), 'image'), - # check ephemeral disk cannot be smaller than origin - ('remote', (5, 2, 512), (5, 1, 512), 'image'), - # check ephemeral disk cannot be smaller than origin - ('remote', (1, 1, 512), (1, 0, 512), 'volume'), - # Root disk can be resized, but cannot be 0 - ('local_image', (5, 0, 0), (0, 0, 0), 'image'), - ('local_image', (5, 2, 512), (5, 1, 512), 'image'), - ('local_image', (5, 1, 512), (4, 1, 512), 'image'), - ('local_image', (5, 1, 512), (4, 1, 0), 'image'), - ('local_image', (1, 1, 512), (1, 0, 512), 'volume'), - ], ids=id_gen) - def test_resize_vm_negative(self, add_hosts_to_zone, storage_backing, - origin_flavor, dest_flavor, boot_source): - """ - Test resizing disks of a vm not allowed: - - Resize to smaller ephemeral flavor is not allowed - - Resize to zero disk flavor is not allowed (boot from image only) - - Args: - storage_backing: The host storage backing required - origin_flavor: The flavor to boot the vm from, listed by GBs for - root, ephemeral, and swap disks, i.e. for a - system with a 2GB root disk, a 1GB ephemeral disk, - and no swap disk: (2, 1, 0) - boot_source: Which source to boot the vm from, either 'volume' or - 'image' - Skip Conditions: - - No hosts exist with required storage backing. - Test setup: - - Put a single host of each backing in stxautozone to prevent - migration and instead force resize. - - Create two flavors based on origin_flavor and dest_flavor - - Create a volume or image to boot from. - - Boot VM with origin_flavor - Test Steps: - - Resize VM to dest_flavor with revert - - Resize VM to dest_flavor with confirm - Test Teardown: - - Delete created VM - - Delete created volume or image - - Delete created flavors - - Remove hosts from stxauto zone - - Delete stxauto zone - - """ - vm_host = add_hosts_to_zone.get(storage_backing, None) - - if not vm_host: - skip("No available host with {} storage backing".format( - storage_backing)) - - LOG.tc_step('Create origin flavor') - origin_flavor_id = _create_flavor(origin_flavor, storage_backing) - LOG.tc_step('Create destination flavor') - dest_flavor_id = _create_flavor(dest_flavor, storage_backing) - vm_id = _boot_vm_to_test(boot_source, vm_host, origin_flavor_id) - vm_helper.wait_for_vm_pingable_from_natbox(vm_id) - - vm_disks = vm_helper.get_vm_devices_via_virsh(vm_id) - root, ephemeral, swap = origin_flavor - file_paths, content = touch_files_under_vm_disks(vm_id=vm_id, - ephemeral=ephemeral, - swap=swap, - vm_type=boot_source, - disks=vm_disks) - - LOG.tc_step('Resize vm to dest flavor') - code, output = vm_helper.resize_vm(vm_id, dest_flavor_id, fail_ok=True) - vm_helper.wait_for_vm_pingable_from_natbox(vm_id) - - assert vm_helper.get_vm_flavor( - vm_id) == origin_flavor_id, 'VM did not keep origin flavor' - assert code > 0, "Resize VM CLI is not rejected" - - LOG.tc_step("Check files after resize attempt") - check_helper.check_vm_files(vm_id=vm_id, - storage_backing=storage_backing, root=root, - ephemeral=ephemeral, - swap=swap, vm_type=boot_source, - vm_action=None, file_paths=file_paths, - content=content, disks=vm_disks) - - -def _create_flavor(flavor_info, storage_backing): - root_disk = flavor_info[0] - ephemeral = flavor_info[1] - swap = flavor_info[2] - - flavor_id = nova_helper.create_flavor(ephemeral=ephemeral, swap=swap, - root_disk=root_disk, - storage_backing=storage_backing)[1] - ResourceCleanup.add('flavor', flavor_id) - return flavor_id - - -def _boot_vm_to_test(boot_source, vm_host, flavor_id): - LOG.tc_step('Boot a vm with given flavor') - vm_id = vm_helper.boot_vm(flavor=flavor_id, avail_zone='stxauto', - vm_host=vm_host, source=boot_source, - cleanup='function')[1] - return vm_id - - -def get_cpu_count(hosts_with_backing): - LOG.fixture_step("Find suitable vm host and cpu count and backing of host") - compute_space_dict = {} - - vm_host = hosts_with_backing[0] - numa0_used_cpus, numa0_total_cpus = \ - host_helper.get_vcpus_per_proc(vm_host)[vm_host][0] - numa0_avail_cpus = len(numa0_total_cpus) - len(numa0_used_cpus) - for host in hosts_with_backing: - free_space = get_disk_avail_least(host) - compute_space_dict[host] = free_space - LOG.info("{} space on {}".format(free_space, host)) - - # increase quota - LOG.fixture_step("Increase quota of allotted cores") - vm_helper.ensure_vms_quotas(cores_num=int(numa0_avail_cpus + 30)) - - return vm_host, numa0_avail_cpus, compute_space_dict - - -class TestResizeDiffHost: - @mark.parametrize('storage_backing', [ - 'local_image', - 'remote', - ]) - def test_resize_different_comp_node(self, storage_backing, - get_hosts_per_backing): - """ - Test resizing disks of a larger vm onto a different compute node and - check hypervisor statistics to - make sure difference in disk usage of both nodes involved is - correctly reflected - - Args: - storage_backing: The host storage backing required - Skip Conditions: - - 2 hosts must exist with required storage backing. - Test setup: - - For each of the two backings tested, the setup will return the - number of nodes for each backing, - the vm host that the vm will initially be created on and the - number of hosts for that backing. - Test Steps: - - Create a flavor with a root disk size that is slightly larger - than the default image used to boot up - the VM - - Create a VM with the aforementioned flavor - - Create a flavor will enough cpus to occupy the rest of the cpus - on the same host as the first VM - - Create another VM on the same host as the first VM - - Create a similar flavor to the first one, except that it has - one more vcpu - - Resize the first VM and confirm that it is on a different host - - Check hypervisor-show on both computes to make sure that disk - usage goes down on the original host and - goes up on the new host - Test Teardown: - - Delete created VMs - - Delete created flavors - - """ - hosts_with_backing = get_hosts_per_backing.get(storage_backing, []) - if len(hosts_with_backing) < 2: - skip(SkipStorageBacking.LESS_THAN_TWO_HOSTS_WITH_BACKING.format( - storage_backing)) - - origin_host, cpu_count, compute_space_dict = get_cpu_count( - hosts_with_backing) - - root_disk_size = \ - GuestImages.IMAGE_FILES[GuestImages.DEFAULT['guest']][1] + 5 - - # make vm (1 cpu) - LOG.tc_step("Create flavor with 1 cpu") - numa0_specs = {FlavorSpec.CPU_POLICY: 'dedicated', FlavorSpec.NUMA_0: 0} - flavor_1 = \ - nova_helper.create_flavor(ephemeral=0, swap=0, - root_disk=root_disk_size, vcpus=1, - storage_backing=storage_backing)[1] - ResourceCleanup.add('flavor', flavor_1) - nova_helper.set_flavor(flavor_1, **numa0_specs) - - LOG.tc_step("Boot a vm with above flavor") - vm_to_resize = \ - vm_helper.boot_vm(flavor=flavor_1, source='image', - cleanup='function', vm_host=origin_host)[1] - vm_helper.wait_for_vm_pingable_from_natbox(vm_to_resize) - - # launch another vm - LOG.tc_step("Create a flavor to occupy vcpus") - occupy_amount = int(cpu_count) - 1 - second_specs = {FlavorSpec.CPU_POLICY: 'dedicated', - FlavorSpec.NUMA_0: 0} - flavor_2 = nova_helper.create_flavor(vcpus=occupy_amount, - storage_backing=storage_backing)[1] - ResourceCleanup.add('flavor', flavor_2) - nova_helper.set_flavor(flavor_2, **second_specs) - - LOG.tc_step("Boot a vm with above flavor to occupy remaining vcpus") - vm_2 = vm_helper.boot_vm(flavor=flavor_2, source='image', - cleanup='function', vm_host=origin_host)[1] - vm_helper.wait_for_vm_pingable_from_natbox(vm_2) - - LOG.tc_step('Check disk usage before resize') - prev_val_origin_host = get_disk_avail_least(origin_host) - LOG.info("{} space left on compute".format(prev_val_origin_host)) - - # create a larger flavor and resize - LOG.tc_step("Create a flavor that has an extra vcpu to force resize " - "to a different node") - resize_flavor = nova_helper.create_flavor( - ephemeral=0, swap=0, root_disk=root_disk_size, vcpus=2, - storage_backing=storage_backing)[1] - ResourceCleanup.add('flavor', resize_flavor) - nova_helper.set_flavor(resize_flavor, **numa0_specs) - - LOG.tc_step("Resize the vm and verify if it is on a different host") - vm_helper.resize_vm(vm_to_resize, resize_flavor) - new_host = vm_helper.get_vm_host(vm_to_resize) - assert new_host != origin_host, "vm did not change hosts " \ - "following resize" - - LOG.tc_step('Check disk usage on computes after resize') - if storage_backing == 'remote': - LOG.info("Compute disk usage change should be minimal for " - "remote storage backing") - root_disk_size = 0 - - check_correct_post_resize_value(prev_val_origin_host, root_disk_size, - origin_host) - - prev_val_new_host = compute_space_dict[new_host] - check_correct_post_resize_value(prev_val_new_host, -root_disk_size, - new_host, sleep=False) - vm_helper.wait_for_vm_pingable_from_natbox(vm_to_resize) diff --git a/automated-pytest-suite/testcases/functional/nova/test_vm_recovery.py b/automated-pytest-suite/testcases/functional/nova/test_vm_recovery.py deleted file mode 100755 index 36b22bd9..00000000 --- a/automated-pytest-suite/testcases/functional/nova/test_vm_recovery.py +++ /dev/null @@ -1,105 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from pytest import mark, param - -from consts.stx import FlavorSpec, ImageMetadata, VMStatus -from keywords import nova_helper, vm_helper, glance_helper -from utils.tis_log import LOG - - -# Note auto recovery metadata in image will not be passed to vm if vm is booted -# from Volume - - -@mark.parametrize(('cpu_policy', 'flavor_auto_recovery', 'image_auto_recovery', - 'disk_format', 'container_format', 'expt_result'), [ - param(None, None, None, 'raw', 'bare', True, marks=mark.p1), - param(None, 'false', 'true', 'qcow2', 'bare', False, marks=mark.p3), - param(None, 'true', 'false', 'raw', 'bare', True, marks=mark.p3), - param('dedicated', 'false', None, 'raw', 'bare', False, marks=mark.p3), - param('dedicated', None, 'false', 'qcow2', 'bare', False, - marks=mark.domain_sanity), - param('shared', None, 'true', 'raw', 'bare', True, marks=mark.p3), - param('shared', 'false', None, 'raw', 'bare', False, marks=mark.p3), -]) -def test_vm_autorecovery(cpu_policy, flavor_auto_recovery, image_auto_recovery, - disk_format, container_format, expt_result): - """ - Test auto recovery setting in vm with various auto recovery settings in - flavor and image. - - Args: - cpu_policy (str|None): cpu policy to set in flavor - flavor_auto_recovery (str|None): None (unset) or true or false - image_auto_recovery (str|None): None (unset) or true or false - disk_format (str): - container_format (str): - expt_result (bool): Expected vm auto recovery behavior. - False > disabled, True > enabled. - - Test Steps: - - Create a flavor with auto recovery and cpu policy set to given - values in extra spec - - Create an image with auto recovery set to given value in metadata - - Boot a vm with the flavor and from the image - - Set vm state to error via nova reset-state - - Verify vm auto recovery behavior is as expected - - Teardown: - - Delete created vm, volume, image, flavor - - """ - - LOG.tc_step("Create a flavor with cpu_policy set to {} and auto_recovery " - "set to {} in extra spec".format(cpu_policy, - flavor_auto_recovery)) - flavor_id = nova_helper.create_flavor( - name='auto_recover_'+str(flavor_auto_recovery), cleanup='function')[1] - - # Add extra specs as specified - extra_specs = {} - if cpu_policy is not None: - extra_specs[FlavorSpec.CPU_POLICY] = cpu_policy - if flavor_auto_recovery is not None: - extra_specs[FlavorSpec.AUTO_RECOVERY] = flavor_auto_recovery - - if extra_specs: - nova_helper.set_flavor(flavor=flavor_id, **extra_specs) - - property_key = ImageMetadata.AUTO_RECOVERY - LOG.tc_step("Create an image with property auto_recovery={}, " - "disk_format={}, container_format={}". - format(image_auto_recovery, disk_format, container_format)) - if image_auto_recovery is None: - image_id = glance_helper.create_image(disk_format=disk_format, - container_format=container_format, - cleanup='function')[1] - else: - image_id = glance_helper.create_image( - disk_format=disk_format, container_format=container_format, - cleanup='function', **{property_key: image_auto_recovery})[1] - - LOG.tc_step("Boot a vm from image with auto recovery - {} and " - "using the flavor with auto recovery - " - "{}".format(image_auto_recovery, flavor_auto_recovery)) - vm_id = vm_helper.boot_vm(name='auto_recov', flavor=flavor_id, - source='image', source_id=image_id, - cleanup='function')[1] - vm_helper.wait_for_vm_pingable_from_natbox(vm_id) - - LOG.tc_step("Verify vm auto recovery is {} by setting vm to error " - "state.".format(expt_result)) - vm_helper.set_vm_state(vm_id=vm_id, error_state=True, fail_ok=False) - res_bool, actual_val = vm_helper.wait_for_vm_values( - vm_id=vm_id, status=VMStatus.ACTIVE, fail_ok=True, timeout=600) - - assert expt_result == res_bool, "Expected auto_recovery: {}. Actual vm " \ - "status: {}".format(expt_result, actual_val) - - LOG.tc_step("Ensure vm is pingable after auto recovery") - vm_helper.wait_for_vm_pingable_from_natbox(vm_id) diff --git a/automated-pytest-suite/testcases/functional/security/test_ima.py b/automated-pytest-suite/testcases/functional/security/test_ima.py deleted file mode 100755 index ae7e9801..00000000 --- a/automated-pytest-suite/testcases/functional/security/test_ima.py +++ /dev/null @@ -1,408 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -from pytest import mark, fixture, skip - -from consts.auth import HostLinuxUser -from consts.stx import EventLogID -from keywords import system_helper, common -from utils.clients.ssh import ControllerClient -from utils.tis_log import LOG - -files_to_delete = [] - - -@fixture(scope='module', autouse=True) -def ima_precheck(): - """ - This tests if the system is enabled with IMA. If not, we - should skip IMA-related tests. - """ - - LOG.info("Checking if IMA is enabled") - con_ssh = ControllerClient.get_active_controller() - - exitcode, output = con_ssh.exec_cmd("cat /proc/cmdline") - if "extended" not in output: - skip("IMA must be enabled in order to run this test") - else: - LOG.info("IMA is enabled") - - -@fixture(autouse=True) -def delete_files(request): - global files_to_delete - files_to_delete = [] - - def teardown(): - """ - Delete any created files on teardown. - """ - for filename in files_to_delete: - delete_file(filename) - - request.addfinalizer(teardown) - - -def checksum_compare(source_file, dest_file): - """ - This does a checksum comparison of two files. It returns True if the - checksum matches, and False if it doesn't. - """ - - con_ssh = ControllerClient.get_active_controller() - - LOG.info("Compare checksums on source file and destination file") - cmd = "getfattr -m . -d {}" - - exitcode, source_sha = con_ssh.exec_cmd(cmd.format(source_file)) - LOG.info("Raw source file checksum is: {}".format(source_sha)) - source_sha2 = source_sha.split("\n") - print("This is source_sha2: {}".format(source_sha2)) - assert source_sha2 != [''], "No signature on source file" - - if source_file.startswith("/"): - source_sha = source_sha2[2] + " " + source_sha2[3] - else: - source_sha = source_sha2[1] + " " + source_sha2[2] - - LOG.info("Extracted source file checksum: {}".format(source_sha)) - - exitcode, dest_sha = con_ssh.exec_cmd(cmd.format(dest_file)) - LOG.info("Raw symlink checksum is: {}".format(dest_sha)) - dest_sha2 = dest_sha.split("\n") - - if dest_file.startswith("/"): - dest_sha = dest_sha2[2] + " " + dest_sha2[3] - else: - dest_sha = dest_sha2[1] + " " + dest_sha2[2] - - LOG.info("Extracted destination file checksum: {}".format(dest_sha)) - - if source_sha == dest_sha: - return True - else: - return False - - -def create_symlink(source_file, dest_file, sudo=True): - """ - This creates a symlink given a source filename and a destination filename. - """ - LOG.info("Creating symlink to {} called {}".format(source_file, dest_file)) - cmd = "ln -sf {} {}".format(source_file, dest_file) - _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False) - - -def delete_file(filename, sudo=True): - """ - This deletes a file. - """ - LOG.info("Deleting file {}".format(filename)) - cmd = "rm {}".format(filename) - _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False) - - -def chmod_file(filename, permissions, sudo=True): - """ - This modifies permissions of a file - """ - LOG.info("Changing file permissions for {}".format(filename)) - cmd = "chmod {} {}".format(permissions, filename) - _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False) - - -def chgrp_file(filename, group, sudo=True): - """ - This modifies the group ownership of a file - """ - LOG.info("Changing file permissions for {}".format(filename)) - cmd = "chgrp {} {}".format(group, filename) - _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False) - - -def chown_file(filename, file_owner, sudo=True): - """ - This modifies the user that owns the file - """ - LOG.info("Changing the user that owns {}".format(filename)) - cmd = "chown {} {}".format(file_owner, filename) - _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False) - - -def copy_file(source_file, dest_file, sudo=True, preserve=True, cleanup=None): - """ - This creates a copy of a file - - Args: - source_file: - dest_file: - sudo (bool): whether to copy with sudo - cleanup (None|str): source or dest. Add source or dest file to files to - delete list - preserve (bool): whether to preserve attributes of source file - - Returns: - - """ - LOG.info("Copy file {} preserve attributes".format('and' if preserve - else 'without')) - preserve_str = '--preserve=all ' if preserve else '' - cmd = "cp {} {}{}".format(source_file, preserve_str, dest_file) - _exec_cmd(cmd, sudo=sudo, fail_ok=False) - - if cleanup: - file_path = source_file if cleanup == 'source' else dest_file - files_to_delete.append(file_path) - - -def move_file(source_file, dest_file, sudo=True): - """ - This moves a file from source to destination - """ - LOG.info("Copy file and preserve attributes") - cmd = "mv {} {}".format(source_file, dest_file) - _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False) - - -def create_and_execute(file_path, sudo=True): - LOG.tc_step("Create a new {} file and execute it".format('root' if sudo - else 'non-root')) - cmd = "touch {}".format(file_path) - _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False) - files_to_delete.append(file_path) - - LOG.info("Set file to be executable") - chmod_file(file_path, "755", sudo=sudo) - - LOG.info("Append to copy of monitored file") - cmd = 'echo "ls" | {}tee -a {}'.format('sudo -S ' if sudo else '', - file_path) - _exec_cmd(cmd=cmd, sudo=False, fail_ok=False) - - LOG.info("Execute created file") - _exec_cmd(file_path, sudo=sudo, fail_ok=False) - - -@mark.priorities('nightly', 'sx_nightly') -@mark.parametrize(('operation', 'file_path'), [ - ('create_symlink', '/usr/sbin/ntpq'), - ('copy_and_execute', '/usr/sbin/ntpq'), - ('change_file_attributes', '/usr/sbin/ntpq'), - ('create_and_execute', 'new_nonroot_file') -]) -def test_ima_no_event(operation, file_path): - """ - This test validates following scenarios will not generate IMA event: - - create symlink of a monitored file - - copy a root file with the proper IMA signature, the nexcute it - - make file attribute changes, include: chgrp, chown, chmod - - create and execute a files as sysadmin - - Test Steps: - - Perform specified operation on given file - - Confirm IMA violation event is not triggered - - Teardown: - - Delete created test file - - Maps to TC_17684/TC_17644/TC_17640/TC_17902 from US105523 - This test also covers TC_17665/T_16397 from US105523 (FM Event Log Updates) - - """ - - start_time = common.get_date_in_format() - source_file = file_path - con_ssh = ControllerClient.get_active_controller() - - LOG.tc_step("{} for {}".format(operation, source_file)) - if operation == 'create_symlink': - dest_file = "my_symlink" - create_symlink(source_file, dest_file) - files_to_delete.append(dest_file) - - checksum_match = checksum_compare(source_file, dest_file) - assert checksum_match, "SHA256 checksum should match source file and " \ - "the symlink but didn't" - - elif operation == 'copy_and_execute': - dest_file = "/usr/sbin/TEMP" - copy_file(source_file, dest_file) - files_to_delete.append(dest_file) - - LOG.info("Execute the copied file") - con_ssh.exec_sudo_cmd("{} -p".format(dest_file)) - - elif operation == 'change_file_attributes': - if HostLinuxUser.get_home() != 'sysadmin': - skip('sysadmin user is required to run this test') - dest_file = "/usr/sbin/TEMP" - copy_file(source_file, dest_file) - files_to_delete.append(dest_file) - - LOG.info("Change permission of copy") - chmod_file(dest_file, "777") - LOG.info("Changing group ownership of file") - chgrp_file(dest_file, "sys_protected") - LOG.info("Changing file ownership") - chown_file(dest_file, "sysadmin:sys_protected") - - elif operation == 'create_and_execute': - dest_file = "{}/TEMP".format(HostLinuxUser.get_home()) - create_and_execute(file_path=dest_file, sudo=False) - - LOG.tc_step("Ensure no IMA events are raised") - events_found = system_helper.wait_for_events(start=start_time, - timeout=60, num=10, - event_log_id=EventLogID.IMA, - fail_ok=True, strict=False) - - assert not events_found, "Unexpected IMA events found" - - -def _exec_cmd(cmd, con_ssh=None, sudo=False, fail_ok=True): - if not con_ssh: - con_ssh = ControllerClient.get_active_controller() - - if sudo: - return con_ssh.exec_sudo_cmd(cmd, fail_ok=fail_ok) - else: - return con_ssh.exec_cmd(cmd, fail_ok=fail_ok) - - -@mark.priorities('nightly', 'sx_nightly') -@mark.parametrize(('operation', 'file_path'), [ - ('edit_and_execute', '/usr/sbin/ntpq'), - ('append_and_execute', '/usr/sbin/logrotate'), - ('replace_library', '/lib64/libcrypt.so.1'), - ('create_and_execute', 'new_root_file') -]) -def test_ima_event_generation(operation, file_path): - """ - Following IMA violation scenarios are covered: - - append/edit data to/of a monitored file, result in changing of the - hash - - dynamic library changes - - create and execute a files as sysadmin - - Test Steps: - - Perform specified file operations - - Check IMA violation event is logged - - """ - - con_ssh = ControllerClient.get_active_controller() - start_time = common.get_date_in_format() - - source_file = file_path - backup_file = None - - if operation in ('edit_and_execute', 'append_and_execute'): - dest_file = "/usr/sbin/TEMP" - copy_file(source_file, dest_file, cleanup='dest') - - if operation == 'edit_and_execute': - LOG.tc_step("Open copy of monitored file and save") - cmd = "vim {} '+:wq!'".format(dest_file) - con_ssh.exec_sudo_cmd(cmd, fail_ok=False) - execute_cmd = "{} -p".format(dest_file) - else: - LOG.tc_step("Append to copy of monitored file") - cmd = 'echo "output" | sudo -S tee -a /usr/sbin/TEMP' - con_ssh.exec_cmd(cmd, fail_ok=False) - LOG.tc_step("Execute modified file") - con_ssh.exec_sudo_cmd(dest_file) - execute_cmd = "{}".format(dest_file) - - LOG.tc_step("Execute modified file") - con_ssh.exec_sudo_cmd(execute_cmd) - - elif operation == 'replace_library': - backup_file = "/root/{}".format(source_file.split('/')[-1]) - dest_file_nocsum = "/root/TEMP" - - LOG.info("Backup source file {} to {}".format(source_file, backup_file)) - copy_file(source_file, backup_file) - LOG.info("Copy the library without the checksum") - copy_file(source_file, dest_file_nocsum, preserve=False) - LOG.info("Replace the library with the unsigned one") - move_file(dest_file_nocsum, source_file) - - elif operation == 'create_and_execute': - dest_file = "{}/TEMP".format(HostLinuxUser.get_home()) - create_and_execute(file_path=dest_file, sudo=True) - - LOG.tc_step("Check for IMA event") - ima_events = system_helper.wait_for_events(start=start_time, - timeout=60, num=10, - event_log_id=EventLogID.IMA, - state='log', severity='major', - fail_ok=True, strict=False) - - if backup_file: - LOG.info("Restore backup file {} to {}".format(backup_file, - source_file)) - move_file(backup_file, source_file) - - assert ima_events, "IMA event is not generated after {} on " \ - "{}".format(operation, file_path) - - -# CHECK TEST PROCEDURE - FAILS in the middle - - -@mark.priorities('nightly', 'sx_nightly') -def test_ima_keyring_protection(): - """ - This test validates that the IMA keyring is safe from user space attacks. - - Test Steps: - - Attempt to add new keys to the keyring - - Extract key ID and save - - Attempt to change the key timeout - - Attempt to change the group and ownership of the key - - Attempt to delete the key - - This test maps to TC_17667/T_16387 from US105523 (IMA keyring is safe from - user space attacks) - - """ - - con_ssh = ControllerClient.get_active_controller() - - LOG.info("Extract ima key ID") - exitcode, msg = con_ssh.exec_sudo_cmd("cat /proc/keys | grep _ima") - raw_key_id = msg.split(" ", maxsplit=1)[0] - key_id = "0x{}".format(raw_key_id) - LOG.info("Extracted key is: {}".format(key_id)) - - LOG.info("Attempting to add new keys to keyring") - exitcode, msg = con_ssh.exec_sudo_cmd("keyctl add keyring TEST stuff " - "{}".format(key_id)) - assert exitcode != 0, \ - "Key addition should have failed but instead succeeded" - - LOG.info("Attempt to change the timeout on a key") - exitcode, msg = con_ssh.exec_sudo_cmd("keyctl timeout {} " - "3600".format(key_id)) - assert exitcode != 0, \ - "Key timeout modification should be rejected but instead succeeded" - - LOG.info("Attempt to change the group of a key") - exitcode, msg = con_ssh.exec_sudo_cmd("keyctl chgrp {} 0".format(key_id)) - assert exitcode != 0, \ - "Key group modification should be rejected but instead succeeded" - - LOG.info("Attempt to change the ownership of a key") - exitcode, msg = con_ssh.exec_sudo_cmd("keyctl chown {} 1875".format(key_id)) - assert exitcode != 0, \ - "Key ownership modification should be rejected but instead succeeded" - - LOG.info("Attempt to delete a key") - exitcode, msg = con_ssh.exec_sudo_cmd("keyctl clear {}".format(key_id)) - assert exitcode != 0, \ - "Key ownership deletion should be rejected but instead succeeded" diff --git a/automated-pytest-suite/testcases/functional/security/test_kernel_modules.py b/automated-pytest-suite/testcases/functional/security/test_kernel_modules.py deleted file mode 100755 index a48e69fa..00000000 --- a/automated-pytest-suite/testcases/functional/security/test_kernel_modules.py +++ /dev/null @@ -1,71 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import re -from pytest import mark - -from keywords import system_helper, host_helper -from utils.tis_log import LOG - - -@mark.nightly -def test_kernel_module_signatures(): - """ - Test kernel modules are properly signed on all stx hosts. - - Steps on each host: - - 'cat /proc/sys/kernel/tainted', ensure value is 4096. - If not, do following steps: - - 'grep --color=never -i "module verification failed" - /var/log/kern.log' to find out failed modules - - 'modinfo | grep --color=never -E "sig|filename" - to display signing info for each module - - """ - hosts = system_helper.get_hosts() - failed_hosts = {} - - for host in hosts: - with host_helper.ssh_to_host(host) as host_ssh: - LOG.tc_step( - "Check for unassigned kernel modules on {}".format(host)) - output = host_ssh.exec_cmd('cat /proc/sys/kernel/tainted', - fail_ok=False)[1] - output_binary = '{0:b}'.format(int(output)) - unassigned_module_bit = '0' - # 14th bit is to flag unassigned module - if len(output_binary) >= 14: - unassigned_module_bit = output_binary[-14] - if unassigned_module_bit != '0': - LOG.error( - "Kernel module verification(s) failed on {}. Collecting " - "more info".format( - host)) - - LOG.tc_step( - "Check kern.log for modules with failed verification") - failed_modules = [] - err_out = host_ssh.exec_cmd( - 'grep --color=never -i "module verification failed" ' - '/var/log/kern.log')[ - 1] - for line in err_out.splitlines(): - module = re.findall(r'\] (.*): module verification failed', - line)[0].strip() - if module not in failed_modules: - failed_modules.append(module) - - failed_hosts[host] = failed_modules - LOG.tc_step("Display signing info for {} failed kernel " - "modules: {}".format(host, failed_modules)) - for module in failed_modules: - host_ssh.exec_cmd( - 'modinfo {} | grep --color=never -E ' - '"sig|filename"'.format(module)) - - assert not failed_hosts, "Kernel module signature verification " \ - "failed on: {}".format(failed_hosts) diff --git a/automated-pytest-suite/testcases/functional/security/test_keystone_admin_psswd_change.py b/automated-pytest-suite/testcases/functional/security/test_keystone_admin_psswd_change.py deleted file mode 100755 index 54aa7b73..00000000 --- a/automated-pytest-suite/testcases/functional/security/test_keystone_admin_psswd_change.py +++ /dev/null @@ -1,90 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import time -from pytest import mark, fixture, skip, param - -from keywords import host_helper, system_helper, keystone_helper, security_helper -from utils.tis_log import LOG -from consts.auth import Tenant -from consts.reasons import SkipSysType - - -@fixture() -def _revert_admin_pw(request): - prev_pswd = Tenant.get('admin')['password'] - - def _revert(): - # revert password - LOG.fixture_step("Reverting admin password to '{}'".format(prev_pswd)) - keystone_helper.set_user('admin', password=prev_pswd, - auth_info=Tenant.get('admin_platform')) - - LOG.fixture_step("Sleep for 180 seconds after admin password change") - time.sleep(180) - assert prev_pswd == security_helper.get_admin_password_in_keyring() - request.addfinalizer(_revert) - - -@fixture(scope='module') -def less_than_two_cons(no_openstack): - return len(system_helper.get_controllers()) < 2 - - -@mark.usefixtures('check_alarms') -@mark.parametrize(('scenario'), [ - # param('lock_standby_change_pswd', marks=mark.p1), - param('change_pswd_swact', marks=mark.p1), -]) -# disable the test cases for now due to password change for admin is not ready yet -def test_admin_password(scenario, less_than_two_cons, _revert_admin_pw): - """ - Test the admin password change - - Test Steps: - - lock standby controller change password and unlock - - change password and swact - - check alarms - - """ - if 'swact' in scenario and less_than_two_cons: - skip(SkipSysType.LESS_THAN_TWO_CONTROLLERS) - - host = system_helper.get_standby_controller_name() - assert host, "No standby controller on system" - - if scenario == "lock_standby_change_pswd": - # lock the standby - LOG.tc_step("Attempting to lock {}".format(host)) - res, out = host_helper.lock_host(host=host) - LOG.tc_step("Result of the lock was: {}".format(res)) - - # change password - prev_pswd = Tenant.get('admin')['password'] - post_pswd = '!{}9'.format(prev_pswd) - - LOG.tc_step('Changing admin password to {}'.format(post_pswd)) - keystone_helper.set_user('admin', password=post_pswd, auth_info=Tenant.get( - 'admin_platform')) - - # assert "Warning: 'admin' password changed. Please wait 5 minutes before Locking/Unlocking - # the controllers" in output - LOG.tc_step("Sleep for 180 seconds after admin password change") - time.sleep(180) - - LOG.tc_step("Check admin password is updated in keyring") - assert post_pswd == security_helper.get_admin_password_in_keyring() - - if scenario == "change_pswd_swact": - LOG.tc_step("Swact active controller") - host_helper.swact_host() - else: - LOG.tc_step("Unlock host {}".format(host)) - res = host_helper.unlock_host(host) - LOG.info("Unlock hosts result: {}".format(res)) - - LOG.tc_step("Check admin password is updated in keyring") - assert post_pswd == security_helper.get_admin_password_in_keyring() diff --git a/automated-pytest-suite/testcases/functional/security/test_keystone_user_password_rules.py b/automated-pytest-suite/testcases/functional/security/test_keystone_user_password_rules.py deleted file mode 100755 index 4b49c1ec..00000000 --- a/automated-pytest-suite/testcases/functional/security/test_keystone_user_password_rules.py +++ /dev/null @@ -1,625 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import random -import re -import time -import copy -from string import ascii_lowercase, ascii_uppercase, digits, ascii_letters - -from pytest import mark, skip, fixture - -from consts.auth import Tenant -from keywords import keystone_helper, container_helper, kube_helper -from utils import cli -from utils.tis_log import LOG -from utils.clients.ssh import ControllerClient - - -SPECIAL_CHARACTERS = r'!@#$%^&*()<>{}+=_\\\[\]\-?|~`,.;:' -MIN_PASSWORD_LEN = 7 -MAX_PASSWORD_LEN = 128 - -# keystone.conf security_compliance configs -LOCKOUT_DURATION = 300 -FAILURE_ATTEMPTS = 5 -UNIQUE_LAST_COUNT = 2 - -# Test user -TEST_USER_NAME = 'stxtestuser' -TEST_PASSWORD = 'Password*Rule1Test' -USED_PASSWORDS = {} -WAIT_BETWEEN_CHANGE = 6 - -SIMPLE_WORD_DICTIONARY = ''' -and is being proof-read and supplemented by volunteers from around the -world. This is an unfunded project, and future enhancement of this -dictionary will depend on the efforts of volunteers willing to help build -this free resource into a comprehensive body of general information. New -definitions for missing words or words senses and longer explanatory notes, -as well as images to accompany the articles are needed. More modern -illustrative quotations giving recent examples of usage of the words in -their various senses will be very helpful, since most quotations in the -original 1913 dictionary are now well over 100 years old -''' - - -def save_used_password(keystone, password): - if keystone not in USED_PASSWORDS: - USED_PASSWORDS[keystone] = [password] - else: - used_passwords = USED_PASSWORDS[keystone] - used_passwords.append(password) - if len(used_passwords) > UNIQUE_LAST_COUNT: - used_passwords.pop(0) - - LOG.info('{} keystone user {} password saved. \nUsed passwords: {}'.format( - keystone, TEST_USER_NAME, USED_PASSWORDS[keystone])) - - -def is_last_used(password, keystone, depth=UNIQUE_LAST_COUNT): - used_passwords = USED_PASSWORDS.get(keystone, []) - if used_passwords: - if len(used_passwords) >= UNIQUE_LAST_COUNT: - return password in used_passwords[-1 * depth:] - else: - return password in used_passwords - - return False - - -def get_valid_password(keystone): - total_length = random.randint(MIN_PASSWORD_LEN, MAX_PASSWORD_LEN) - password = None - frequently_used_words = re.split(r'\W', SIMPLE_WORD_DICTIONARY.strip()) - - attempt = 0 - while attempt < 60: - attempt += 1 - left_count = total_length - lower_case_len = random.randint(1, left_count - 3) - left_count -= lower_case_len - - upper_case_len = random.randint(1, left_count - 2) - left_count -= upper_case_len - - digit_len = random.randint(1, left_count - 1) - left_count -= digit_len - - special_char_len = random.randint(1, left_count) - - lower_case = random.sample(ascii_lowercase, min(lower_case_len, len(ascii_lowercase))) - upper_case = random.sample(ascii_uppercase, min(upper_case_len, len(ascii_uppercase))) - password_digits = random.sample(digits, min(digit_len, len(digits))) - special_char = random.sample(SPECIAL_CHARACTERS, min(special_char_len, - len(SPECIAL_CHARACTERS))) - - actual_len = len(lower_case) + len(upper_case) + len(password_digits) + len(special_char) - - password = random.sample(lower_case + upper_case + password_digits + special_char, - min(actual_len, total_length)) - alphabet = ascii_lowercase + ascii_uppercase + digits + SPECIAL_CHARACTERS - - password = ''.join(password) - if actual_len != len(password): - LOG.warn('actual_len:{}, password len:{}, password:{}\n'.format( - actual_len, len(password), password)) - - if len(password) < total_length: - password += \ - ''.join(random.choice(alphabet) for _ in range(total_length - len(password) + 1)) - - list_of_chars = list(password) - - if (list_of_chars[0] == '{') or (list_of_chars[0] == '}') or (list_of_chars[0] == '-'): - list_of_chars[0] = 'a' - - if (list_of_chars[-1] == '{') or (list_of_chars[-1] == '}'): - list_of_chars[-1] = 'a' - - for index, char in enumerate(list_of_chars): - next_char = list_of_chars[index + 1] if index != len(list_of_chars) - 1 else '' - - if char == '{': - if next_char == '{' or next_char == '}': - list_of_chars[index + 1] = 'a' - list_of_chars[index - 1] = '{' - else: - list_of_chars[index - 1] = '{' - if char == '}': - if next_char != '{': - list_of_chars[index - 1] = '}' - - password = ''.join(list_of_chars) - - if not is_last_used(password, keystone=keystone) and password not in \ - frequently_used_words: - break - - if attempt < 60: - LOG.debug('Found valid password:\n{}\n'.format(password)) - else: - LOG.debug('Cannot found valid password, attempted:{}\n'.format(attempt)) - - return password - - -def multiple_attempts_generator(): - LOG.info('Attempt with wrong passwords multiple times') - invalid_password = ''.join(random.sample(ascii_letters, MIN_PASSWORD_LEN - 1)) - - while True: - count, keystone, is_admin, user_name = yield - current_password = USED_PASSWORDS[keystone][-1] - for n in range(int(count)): - verify_user(user_name, invalid_password, is_admin=is_admin, expect_fail=True, - keystone=keystone) - LOG.info('Command rejected with INVALID password as expected, count: {}'.format(n + 1)) - time.sleep(10) - - time.sleep(20) - - LOG.tc_step('Verify {} keystone user {} is locked out after {} failed ' - 'attempts'.format(keystone, user_name, count)) - verify_user(user_name, current_password, is_admin=is_admin, expect_fail=True, - keystone=keystone) - - LOG.tc_step('Wait for {} seconds and verify account is unlocked'.format( - LOCKOUT_DURATION + WAIT_BETWEEN_CHANGE)) - - time.sleep(LOCKOUT_DURATION + WAIT_BETWEEN_CHANGE) - verify_user(user_name, current_password, is_admin=is_admin, expect_fail=False, - keystone=keystone) - LOG.info('OK, {} keystone user is unlocked after {} seconds'.format(keystone, - LOCKOUT_DURATION)) - - yield - - -def special_char_generator(): - while True: - (args, keystone, _), expecting_pass = yield - - password = list(get_valid_password(keystone=keystone)) - - if not expecting_pass: - - special_to_letter = \ - dict(zip(SPECIAL_CHARACTERS, ascii_letters[:len(SPECIAL_CHARACTERS) + 1])) - password = \ - ''.join(special_to_letter[c] if c in SPECIAL_CHARACTERS else c for c in password) - else: - while True: - password = get_valid_password(keystone=keystone) - if not is_last_used(password, keystone=keystone): - break - - yield password - - -def case_numerical_generator(): - while True: - (args, keystone, _), expecting_pass = yield - - password = list(get_valid_password(keystone=keystone)) - - if not expecting_pass: - if args == 'lower': - password = ''.join(c.upper() if c.isalpha() else c for c in password - if not c.isalpha() or c.islower()) - elif args == 'upper': - password = ''.join(c.lower() if c.isalpha() else c for c in password - if not c.isalpha() or c.isupper()) - elif args == 'digit': - digit_to_letter = dict(zip('0123456789', 'abcdefghij')) - password = ''.join(digit_to_letter[c] if c.isdigit() else c for c in password) - else: - skip('Unknown args: case_numerical_generator: user_name={}, args={}, ' - 'expecting_pass={}\n'.format(keystone, args, expecting_pass)) - return - - else: - while True: - password = get_valid_password(keystone=keystone) - if not is_last_used(password, keystone=keystone): - break - - yield password - - -def change_history_generator(): - while True: - (args, keystone, _), expecting_pass = yield - - used_passwords = USED_PASSWORDS[keystone] - if not expecting_pass: - if args == 'not_last_2': - password = used_passwords[0] - - elif args == '3_diff': - previous = used_passwords[-1] - total_to_change = random.randrange(0, 2) - rand_indice = random.sample(range(len(previous)), total_to_change) - new_chars = [] - for i in range(len(previous)): - if i in rand_indice: - while True: - new_char = random.choice(ascii_letters) - if new_char != previous[i]: - new_chars.append(new_char) - break - else: - new_chars.append(previous[i]) - password = ''.join(new_chars) - - elif args == 'reversed': - password = ''.join(used_passwords[-1::-1]) - - else: - password = '' - skip('Unknown arg:{} for change_history_generator'.format(args)) - - else: - while True: - password = get_valid_password(keystone=keystone) - if password not in used_passwords: - break - - yield password - - -def length_generator(): - while True: - (args, keystone, _), expecting_pass = yield - - password = '' - for _ in range(30): - password = get_valid_password(keystone=keystone) - - if not expecting_pass: - password = password[:random.randint(1, MIN_PASSWORD_LEN - 1)] - break - - if not is_last_used(password, keystone=keystone): - break - - yield password - - -def verify_user(user_name, password, is_admin=True, expect_fail=False, keystone=None): - scenario = ' and expect failure' if expect_fail else '' - LOG.info('Run {} OpenStack command with {} role {}'.format( - keystone, 'admin' if is_admin else 'member', scenario)) - - dict_name = '{}_platform'.format(user_name) if keystone == 'platform' else user_name - auth_info = Tenant.get(dict_name) - auth_info = copy.deepcopy(auth_info) - auth_info['password'] = password - if is_admin: - command = 'endpoint list' - code, output = cli.openstack(command, fail_ok=expect_fail, auth_info=auth_info) - else: - command = 'user show {}'.format(user_name) - code, output = cli.openstack(command, fail_ok=expect_fail, auth_info=auth_info) - - message = 'command:{}\nauth_info:{}\noutput:{}'.format(command, auth_info, output) - - if expect_fail: - assert 1 == code, "OpenStack command ran successfully while rejection is " \ - "expected: {}".format(message) - - -def change_user_password(user_name, password, keystone, by_admin=True, expect_fail=None): - scenario = 'Change platform keystone user password with rule {} unsatisfied'.format( - expect_fail) if expect_fail else 'Change platform keyword user password to a valid password' - - if by_admin and expect_fail == 'not_last_used': - scenario += ', but still allowed when operated by admin user' - expect_fail = None - - LOG.info(scenario) - - dict_name = '{}_platform'.format(user_name) if keystone == 'platform' else user_name - user_auth = Tenant.get(dict_name) - original_password = user_auth['password'] - - if by_admin: - admin_auth = Tenant.get('admin_platform') if keystone == 'platform' else Tenant.get('admin') - code, output = keystone_helper.set_user(user=user_name, password=password, project='admin', - auth_info=admin_auth, fail_ok=expect_fail) - else: - code, output = keystone_helper.set_current_user_password( - fail_ok=expect_fail, original_password=original_password, new_password=password, - auth_info=user_auth) - - if code == 0: - save_used_password(keystone, password=password) - - if expect_fail: - assert 1 == code, "{} keystone user password change accepted unexpectedly with " \ - "password rule violated: {}".format(keystone, password) - - LOG.info('{} keystone password change {} as expected'.format( - keystone, 'rejected' if expect_fail else 'accepted')) - - return code, output - - -PASSWORD_RULE_INFO = [ - ('minimum_7_chars', (length_generator, '')), - ('at_least_1_lower_case', (case_numerical_generator, 'lower')), - ('at_least_1_upper_case', (case_numerical_generator, 'upper')), - ('at_least_1_digit', (case_numerical_generator, 'digit')), - ('at_least_1_special_case', (special_char_generator, '')), - ('not_last_used', (change_history_generator, 'not_last_2')), -] - -KEYSTONES = ['platform', 'stx-openstack'] - - -@fixture(scope='module', params=KEYSTONES) -def create_test_user(request): - keystone = request.param - if keystone == 'stx-openstack' and not container_helper.is_stx_openstack_deployed(): - skip('stx-openstack is not applied') - - LOG.fixture_step("Creating {} keystone user {} for password rules testing".format( - keystone, TEST_USER_NAME)) - auth_info = Tenant.get('admin_platform') if keystone == 'platform' else Tenant.get('admin') - existing_users = keystone_helper.get_users(field='Name', auth_info=auth_info) - print(existing_users, "exiting userssss") - if TEST_USER_NAME in existing_users: - keystone_helper.delete_users(TEST_USER_NAME, auth_info=auth_info) - - keystone_helper.create_user(name=TEST_USER_NAME, password=TEST_PASSWORD, - auth_info=auth_info, project='admin') - existing_users = keystone_helper.get_users(field='Name', auth_info=auth_info) - print(existing_users, "exiting userssss") - save_used_password(keystone, TEST_PASSWORD) - keystone_helper.add_or_remove_role(add_=True, role='member', user=TEST_USER_NAME, - auth_info=auth_info, project='admin') - - def delete(): - LOG.fixture_step("Delete keystone test {}".format(TEST_USER_NAME)) - keystone_helper.delete_users(TEST_USER_NAME, auth_info=auth_info) - - request.addfinalizer(delete) - - return keystone - - -class TestKeystonePassword: - @mark.parametrize(('role', 'scenario'), [ - ('admin_role', 'change_by_admin_user'), - ('admin_role', 'change_by_current_user'), - ('member_role', 'change_by_current_user'), - ('member_role', 'change_by_admin_user'), - ]) - def test_keystone_password_rules(self, create_test_user, role, scenario): - """ - Test keystone password rules when attempt to change the password - Args: - create_test_user: - role: - scenario (str): operator for the password change - - Setups: - - Create a platform/stx-openstack keystone user (class) - - Test Steps: - - Assign member/admin role to test user - - Ensure test user can run openstack command - - Attempt to change the test user password using current user or the default keystone - admin user - - Ensure the valid password is accepted while the invalid ones are rejected - - Teardown: - - Remove test user (class) - - """ - keystone = create_test_user - user_name = TEST_USER_NAME - is_admin = True if role == 'admin_role' else False - assign_role(keystone=keystone, user_name=user_name, role=role, is_admin=is_admin) - - random.seed() - by_admin = True if 'admin_user' in scenario else False - for item in PASSWORD_RULE_INFO: - rule, generator_args = item - - LOG.tc_step('Verify {} keystone password rule {} when {}'.format( - keystone, rule, scenario)) - password_gen, args = generator_args - - password_producer = password_gen() - password_producer.send(None) - send_args = (args, keystone, is_admin) - valid_pwd = password_producer.send((send_args, True)) - change_user_password(user_name, valid_pwd, by_admin=is_admin, keystone=keystone) - verify_user(user_name, valid_pwd, is_admin=is_admin, keystone=keystone) - - next(password_producer) - invalid_pwd = password_producer.send((send_args, False)) - wait = WAIT_BETWEEN_CHANGE + 1 - LOG.info('Wait for {} seconds to test {} violation'.format(wait, rule)) - time.sleep(wait) - change_user_password(user_name, invalid_pwd, expect_fail=rule, - by_admin=by_admin, keystone=keystone) - - LOG.info('Password rule {} verified passed'.format(rule)) - - @fixture(scope='class') - def configure_keystone_lockout(self, create_test_user): - keystone = create_test_user - set_keystone_lockout(keystone, lockout_duration=LOCKOUT_DURATION, - failure_attempts=FAILURE_ATTEMPTS) - return keystone - - @mark.parametrize('role', [ - 'admin_role', - 'member_role' - ]) - def test_keystone_account_lockout(self, configure_keystone_lockout, role): - """ - Test keystone password rules when attempt to change the password - Args: - configure_keystone_lockout: - role: - - Setups: - - Create a platform/stx-openstack keystone user (class) - - Check lockout config exists in keystone.conf (class) - - Set lockout configs to 5 failed attempts and 300 lockout duration for testing purpose - - Test Steps: - - Assign member/admin role to test user - - Attempt to run openstack command using incorrect passwords for 5 times - - Check test account is locked by running openstack command using correct password - - Wait for lockout duration - - Check user is unlocked - - Teardown: - - Remove test user (class) - - """ - keystone = configure_keystone_lockout - user_name = TEST_USER_NAME - is_admin = True if role == 'admin_role' else False - assign_role(keystone=keystone, user_name=user_name, role=role, is_admin=is_admin) - - random.seed() - LOG.tc_step('Set {} keystone lockout_duration to 300 and lockout_failure_attempts to 5 for ' - 'testing purpose'.format(keystone)) - set_keystone_lockout(keystone=keystone, lockout_duration=LOCKOUT_DURATION, - failure_attempts=5) - - LOG.tc_step('Attempt to run {} keystone command using incorrect password multiple times ' - 'and ensure account is locked out'.format(keystone)) - args = (5, keystone, is_admin, user_name) - password_producer = multiple_attempts_generator() - password_producer.send(None) - password_producer.send(args) - - -def assign_role(keystone, user_name, role, is_admin): - is_platform = True if keystone == 'platform' else False - - LOG.tc_step('Assign test user {} with {}'.format(user_name, role)) - admin_auth = Tenant.get('admin_platform') if is_platform else Tenant.get('admin') - keystone_helper.add_or_remove_role(add_=is_admin, role='admin', user=user_name, - auth_info=admin_auth, project='admin') - - user_dict_name = '{}_platform'.format(user_name) if is_platform else user_name - password = Tenant.get(user_dict_name)['password'] - LOG.tc_step('Run {} OpenStack command using {}/{} and ensure it works'.format( - keystone, user_name, password)) - verify_user(user_name, password, is_admin=is_admin, keystone=keystone) - - -def __set_non_platform_lockout(current_values, expt_values): - app_name = 'stx-openstack' - service = 'keystone' - namespace = 'openstack' - section = 'conf.keystone.security_compliance' - fields = ['lockout_duration', 'lockout_failure_attempts'] - kv_pairs = {} - for i in range(2): - if current_values[i] != expt_values[i]: - kv_pairs['{}.{}'.format(section, fields[i])] = expt_values[i] - - if not kv_pairs: - LOG.info('stx-openstack keystone lockout values already set to: {}'.format(expt_values)) - return - - container_helper.update_helm_override( - chart=service, namespace=namespace, reset_vals=False, - kv_pairs=kv_pairs) - - override_info = container_helper.get_helm_override_values( - chart=service, namespace=namespace, fields='user_overrides') - LOG.debug('override_info:{}'.format(override_info)) - - container_helper.apply_app( - app_name=app_name, check_first=False, applied_timeout=1800) - - post_values = get_lockout_values(keystone='stx-openstack') - assert expt_values == post_values, "lockout values did not set to expected after helm " \ - "override update" - LOG.info('stx-openstack keystone lockout values set successfully') - - -def __set_platform_lockout(current_values, expt_values): - conf_file = '/etc/keystone/keystone.conf' - fields = ['lockout_duration', 'lockout_failure_attempts'] - con_ssh = ControllerClient.get_active_controller() - for i in range(2): - if current_values[i] == expt_values[i]: - continue - - field = fields[i] - con_ssh.exec_sudo_cmd("sed -i 's/^{}.*=.*/{} = {}/g' " - "{}".format(field, field, expt_values[i], conf_file), fail_ok=False) - - post_values = get_lockout_values('platform') - assert expt_values == post_values, "platform keystone lockout values unexpected after sed" - - LOG.info("Restart platform keystone service after changing keystone config") - con_ssh.exec_sudo_cmd('sm-restart-safe service keystone', fail_ok=False) - time.sleep(30) - - -def set_keystone_lockout(keystone, lockout_duration=300, failure_attempts=5): - current_values = get_lockout_values(keystone=keystone) - expt_values = [lockout_duration, failure_attempts] - if current_values == expt_values: - return - - if keystone == 'platform': - __set_platform_lockout(current_values, expt_values) - else: - __set_non_platform_lockout(current_values, expt_values) - - -def get_lockout_values(keystone): - conf_file = '/etc/keystone/keystone.conf' - fields = ['lockout_duration', 'lockout_failure_attempts'] - section = 'security_compliance' - config_fields = {section: fields} - - LOG.info('Getting {} keystone account lockout values'.format(keystone)) - - if keystone == 'platform': - con_ssh = ControllerClient.get_active_controller() - code, out = con_ssh.exec_sudo_cmd('grep -E "^{}|^{}" {}'.format( - fields[0], fields[1], conf_file)) - assert code == 0, "platform keystone lockout is not configured" - for field in fields: - assert field in out, "platform keystone {} is not configured".format(field) - - values_dict = {} - for line in out.splitlines(): - key, val = line.split(sep='=') - values_dict[key.strip()] = int(val.strip()) - values = [values_dict[field] for field in fields] - - else: - configs = kube_helper.get_openstack_configs( - conf_file=conf_file, configs=config_fields, - label_app='keystone', label_component='api') - - values = [(item.get(section, fields[0], fallback=None), - item.get(section, fields[1], fallback=None)) - for item in list(configs.values())] - - assert len(set(values)) == 1, 'keystone conf differs in different keystone api pods' - values = values[0] - for value in values: - assert value is not None, "{} keystone account lockout is not " \ - "configured".format(keystone) - values = [int(val.strip()) for val in values] - - LOG.info("Lockout configs in {} keystone.conf: {}".format(keystone, values)) - return values diff --git a/automated-pytest-suite/testcases/functional/storage/ceph/test_ceph.py b/automated-pytest-suite/testcases/functional/storage/ceph/test_ceph.py deleted file mode 100755 index f15ff618..00000000 --- a/automated-pytest-suite/testcases/functional/storage/ceph/test_ceph.py +++ /dev/null @@ -1,115 +0,0 @@ -""" -This file contains CEPH-related storage test cases. -""" - -import time - -from pytest import mark, param - -from consts.stx import EventLogID -from keywords import host_helper, system_helper, storage_helper -from utils.tis_log import LOG - -PROC_RESTART_TIME = 30 # number of seconds between process restarts - - -# Tested on PV1. Runtime: 278.40 Date: Aug 2nd, 2017. Status: Pass - - -@mark.parametrize('monitor', [ - param('controller-0', marks=mark.nightly), - 'controller-1', - 'storage-0']) -# Tested on PV0. Runtime: 222.34 seconds. Date: Aug 4, 2017 Status: Pass -@mark.usefixtures('ceph_precheck') -def test_ceph_mon_process_kill(monitor): - """ - us69932_tc2_ceph_mon_process_kill from us69932_ceph_monitoring.odt - - Verify that ceph mon processes recover when they are killed. - - Args: - - Nothing - - Setup: - - Requires system with storage nodes - - Test Steps: - 1. Run CEPH pre-check fixture to check: - - system has storage nodes - - health of the ceph cluster is okay - - that we have OSDs provisioned - 2. Pick one ceph monitor and remove it from the quorum - 3. Kill the monitor process - 4. Check that the appropriate alarms are raised - 5. Restore the monitor to the quorum - 6. Check that the alarms clear - 7. Ensure the ceph monitor is restarted under a different pid - - Potential flaws: - 1. We're not checking if unexpected alarms are raised (TODO) - - Teardown: - - None - - """ - LOG.tc_step('Get process ID of ceph monitor') - mon_pid = storage_helper.get_mon_pid(monitor) - - with host_helper.ssh_to_host(monitor) as host_ssh: - with host_ssh.login_as_root() as root_ssh: - LOG.tc_step('Remove the monitor') - cmd = 'ceph mon remove {}'.format(monitor) - root_ssh.exec_cmd(cmd) - - LOG.tc_step('Stop the ceph monitor') - cmd = 'service ceph stop mon.{}'.format(monitor) - root_ssh.exec_cmd(cmd) - - LOG.tc_step('Check that ceph monitor failure alarm is raised') - system_helper.wait_for_alarm(alarm_id=EventLogID.STORAGE_DEGRADE, timeout=300) - - with host_helper.ssh_to_host(monitor) as host_ssh: - with host_ssh.login_as_root() as root_ssh: - LOG.tc_step('Get cluster fsid') - cmd = 'ceph fsid' - fsid = host_ssh.exec_cmd(cmd)[0] - ceph_conf = '/etc/ceph/ceph.conf' - - LOG.tc_step('Remove old ceph monitor directory') - cmd = 'rm -rf /var/lib/ceph/mon/ceph-{}'.format(monitor) - root_ssh.exec_cmd(cmd) - - LOG.tc_step('Re-add the monitor') - cmd = 'ceph-mon -i {} -c {} --mkfs --fsid {}'.format(monitor, ceph_conf, fsid) - root_ssh.exec_cmd(cmd) - - LOG.tc_step('Check the ceph storage alarm condition clears') - system_helper.wait_for_alarm_gone(alarm_id=EventLogID.STORAGE_DEGRADE, timeout=360) - - LOG.tc_step('Check the ceph-mon process is restarted with a different pid') - mon_pid2 = None - for i in range(0, PROC_RESTART_TIME): - mon_pid2 = storage_helper.get_mon_pid(monitor, fail_ok=True) - if mon_pid2 and mon_pid2 != mon_pid: - break - time.sleep(5) - - LOG.info('Old pid is {} and new pid is {}'.format(mon_pid, mon_pid2)) - msg = 'Process did not restart in time' - assert mon_pid2 and mon_pid2 != mon_pid, msg - - -# Testd on PV0. Ruentime: 1899.93 seconds. Date: Aug 4, 2017. Status: Pass - - -# Tested on PV0. Runtime: 2770.23 seconds sec. Date: Aug 4, 2017 Status: # Pass - - -# Tested on PV1. Runtime: 762.41 secs Date: Aug 2nd, 2017. Status: Pass - - -# Tested on PV1. Runtime: 1212.55 secs Date: Aug 2nd, 2017. Status: Pass - - -# Tested on PV0. Runtime: 58.82 seconds. Status: Pass Date: Aug 8, 2017 diff --git a/automated-pytest-suite/testcases/functional/storage/conftest.py b/automated-pytest-suite/testcases/functional/storage/conftest.py deleted file mode 100755 index 157d7f85..00000000 --- a/automated-pytest-suite/testcases/functional/storage/conftest.py +++ /dev/null @@ -1,3 +0,0 @@ -from testfixtures.resource_mgmt import * -from testfixtures.resource_create import * -from testfixtures.config_host import * diff --git a/automated-pytest-suite/testcases/functional/storage/test_storage_vm_migration.py b/automated-pytest-suite/testcases/functional/storage/test_storage_vm_migration.py deleted file mode 100755 index a27b7b56..00000000 --- a/automated-pytest-suite/testcases/functional/storage/test_storage_vm_migration.py +++ /dev/null @@ -1,521 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import time - -from pytest import fixture, skip, mark - -from consts.stx import VMStatus, GuestImages -from keywords import host_helper, vm_helper, cinder_helper, glance_helper, \ - system_helper, network_helper -from testfixtures.fixture_resources import ResourceCleanup -from utils import table_parser, exceptions -from utils.tis_log import LOG - - -@fixture(scope='module', autouse=True) -def check_system(): - if not cinder_helper.is_volumes_pool_sufficient(min_size=80): - skip("Cinder volume pool size is smaller than 80G") - - if len(host_helper.get_up_hypervisors()) < 2: - skip("at least two computes are required") - - if len(host_helper.get_storage_backing_with_max_hosts()[1]) < 2: - skip("at least two hosts with the same storage backing are required") - - -@fixture(scope='function', autouse=True) -def pre_alarm_(): - """ - Text fixture to get pre-test existing alarm list. - Args:None - - Returns: list of alarms - - """ - pre_alarms = system_helper.get_alarms_table() - pre_list = table_parser.get_all_rows(pre_alarms) - # Time stamps are removed before comparing alarms with post test alarms. - # The time stamp is the last item in each alarm row. - for n in pre_list: - n.pop() - return pre_list - - -@fixture(scope='module') -def image_(): - """ - Text fixture to get guest image - Args: - - Returns: the guest image id - - """ - return glance_helper.get_image_id_from_name() - - -@fixture(scope='function') -def volumes_(image_): - """ - Text fixture to create two large cinder volumes with size of 20 and 40 GB. - Args: - image_: the guest image_id - - Returns: list of volume dict as following: - {'id': , - 'display_name': , - 'size': <20 or 40> - } - """ - - volumes = [] - cinder_params = [{'name': 'vol_inst1', - 'size': 20}, - {'name': 'vol_inst2', - 'size': 40}] - - for param in cinder_params: - volume_id = \ - cinder_helper.create_volume(name=param['name'], source_id=image_, - size=param['size'])[1] - volume = { - 'id': volume_id, - 'display_name': param['name'], - 'size': param['size'] - } - volumes.append(volume) - ResourceCleanup.add('volume', volume['id'], scope='function') - - return volumes - - -@fixture(scope='function') -def vms_(volumes_): - """ - Text fixture to create cinder volume with specific 'display-name', - and 'size' - Args: - volumes_: list of two large volumes dict created by volumes_ fixture - - Returns: volume dict as following: - {'id': , - 'display_name': , - 'size': <20 or 40> - } - """ - vms = [] - vm_names = ['test_inst1', 'test_inst2'] - index = 0 - for vol_params in volumes_: - instance_name = vm_names[index] - vm_id = vm_helper.boot_vm(name=instance_name, source='volume', - source_id=vol_params['id'], - cleanup='function')[ - 1] # , user_data=get_user_data_file())[1] - vm = { - 'id': vm_id, - 'display_name': instance_name, - } - vms.append(vm) - index += 1 - return vms - - -@mark.storage_sanity -def test_vm_with_a_large_volume_live_migrate(vms_, pre_alarm_): - """ - Test instantiate a vm with a large volume ( 20 GB and 40 GB) and live - migrate: - Args: - vms_ (dict): vms created by vms_ fixture - pre_alarm_ (list): alarm lists obtained by pre_alarm_ fixture - - Test Setups: - - get tenant1 and management networks which are already created for lab - setup - - get or create a "small" flavor - - get the guest image id - - create two large volumes (20 GB and 40 GB) in cinder - - boot two vms ( test_inst1, test_inst2) using volumes 20 GB and 40 GB - respectively - - - Test Steps: - - Verify VM status is ACTIVE - - Validate that VMs boot, and that no timeouts or error status occur. - - Verify the VM can be pinged from NATBOX - - Verify login to VM and rootfs (dev/vda) filesystem is rw mode - - Attempt to live migrate of VMs - - Validate that the VMs migrated and no errors or alarms are present - - Log into both VMs and validate that file systems are read-write - - Terminate VMs - - Skip conditions: - - less than two computes - - no storage node - - """ - for vm in vms_: - vm_id = vm['id'] - - LOG.tc_step( - "Checking VM status; VM Instance id is: {}......".format(vm_id)) - vm_state = vm_helper.get_vm_status(vm_id) - - assert vm_state == VMStatus.ACTIVE, 'VM {} state is {}; Not in ' \ - 'ACTIVATE state as expected' \ - .format(vm_id, vm_state) - - LOG.tc_step("Verify VM can be pinged from NAT box...") - rc, boot_time = check_vm_boot_time(vm_id) - assert rc, "VM is not pingable after {} seconds ".format(boot_time) - - LOG.tc_step("Verify Login to VM and check filesystem is rw mode....") - assert is_vm_filesystem_rw( - vm_id), 'rootfs filesystem is not RW as expected for VM {}' \ - .format(vm['display_name']) - - LOG.tc_step( - "Attempting live migration; vm id = {}; vm_name = {} ....".format( - vm_id, vm['display_name'])) - - code, msg = vm_helper.live_migrate_vm(vm_id=vm_id, fail_ok=False) - LOG.tc_step("Verify live migration succeeded...") - assert code == 0, "Expected return code 0. Actual return code: {}; " \ - "details: {}".format(code, msg) - - LOG.tc_step("Verifying filesystem is rw mode after live migration....") - assert is_vm_filesystem_rw( - vm_id), 'After live migration rootfs filesystem is not RW as ' \ - 'expected for VM {}'. \ - format(vm['display_name']) - - -@mark.domain_sanity -def test_vm_with_large_volume_and_evacuation(vms_, pre_alarm_): - """ - Test instantiate a vm with a large volume ( 20 GB and 40 GB) and evacuate: - - Args: - vms_ (dict): vms created by vms_ fixture - pre_alarm_ (list): alarm lists obtained by pre_alarm_ fixture - - Test Setups: - - get tenant1 and management networks which are already created for lab - setup - - get or create a "small" flavor - - get the guest image id - - create two large volumes (20 GB and 40 GB) in cinder - - boot two vms ( test_inst1, test_inst2) using volumes 20 GB and 40 GB - respectively - - - Test Steps: - - Verify VM status is ACTIVE - - Validate that VMs boot, and that no timeouts or error status occur. - - Verify the VM can be pinged from NATBOX - - Verify login to VM and rootfs (dev/vda) filesystem is rw mode - - live migrate, if required, to bring both VMs to the same compute - - Validate migrated VM and no errors or alarms are present - - Reboot compute host to initiate evacuation - - Verify VMs are evacuated - - Check for any system alarms - - Verify login to VM and rootfs (dev/vda) filesystem is still rw mode - after evacuation - - Terminate VMs - - Skip conditions: - - less that two computes - - no storage node - - """ - vm_ids = [] - for vm in vms_: - vm_id = vm['id'] - vm_ids.append(vm_id) - LOG.tc_step( - "Checking VM status; VM Instance id is: {}......".format(vm_id)) - vm_state = vm_helper.get_vm_status(vm_id) - assert vm_state == VMStatus.ACTIVE, 'VM {} state is {}; Not in ' \ - 'ACTIVATE state as expected' \ - .format(vm_id, vm_state) - - LOG.tc_step("Verify VM can be pinged from NAT box...") - rc, boot_time = check_vm_boot_time(vm_id) - assert rc, "VM is not pingable after {} seconds ".format(boot_time) - - LOG.tc_step("Verify Login to VM and check filesystem is rw mode....") - assert is_vm_filesystem_rw( - vm_id), 'rootfs filesystem is not RW as expected for VM {}' \ - .format(vm['display_name']) - - LOG.tc_step( - "Checking if live migration is required to put the vms to a single " - "compute....") - host_0 = vm_helper.get_vm_host(vm_ids[0]) - host_1 = vm_helper.get_vm_host(vm_ids[1]) - - if host_0 != host_1: - LOG.tc_step("Attempting to live migrate vm {} to host {} ....".format( - (vms_[1])['display_name'], host_0)) - code, msg = vm_helper.live_migrate_vm(vm_ids[1], - destination_host=host_0) - LOG.tc_step("Verify live migration succeeded...") - assert code == 0, "Live migration of vm {} to host {} did not " \ - "success".format((vms_[1])['display_name'], host_0) - - LOG.tc_step("Verify both VMs are in same host....") - assert host_0 == vm_helper.get_vm_host( - vm_ids[1]), "VMs are not in the same compute host" - - LOG.tc_step( - "Rebooting compute {} to initiate vm evacuation .....".format(host_0)) - vm_helper.evacuate_vms(host=host_0, vms_to_check=vm_ids, ping_vms=True) - - LOG.tc_step("Login to VM and to check filesystem is rw mode....") - assert is_vm_filesystem_rw((vms_[0])[ - 'id']), 'After evacuation the rootfs ' \ - 'filesystem is not RW as expected ' \ - 'for VM {}'.format( - (vms_[0])['display_name']) - - LOG.tc_step("Login to VM and to check filesystem is rw mode....") - assert is_vm_filesystem_rw((vms_[1])['id']), \ - 'After evacuation the rootfs filesystem is not RW as expected ' \ - 'for VM {}'.format((vms_[1])['display_name']) - - -@mark.domain_sanity -def test_instantiate_a_vm_with_a_large_volume_and_cold_migrate(vms_, - pre_alarm_): - """ - Test instantiate a vm with a large volume ( 20 GB and 40 GB) and cold - migrate: - Args: - vms_ (dict): vms created by vms_ fixture - pre_alarm_ (list): alarm lists obtained by pre_alarm_ fixture - - Test Setups: - - get tenant1 and management networks which are already created for lab - setup - - get or create a "small" flavor - - get the guest image id - - create two large volumes (20 GB and 40 GB) in cinder - - boot two vms ( test_inst1, test_inst2) using volumes 20 GB and 40 GB - respectively - - - Test Steps: - - Verify VM status is ACTIVE - - Validate that VMs boot, and that no timeouts or error status occur. - - Verify the VM can be pinged from NATBOX - - Verify login to VM and rootfs (dev/vda) filesystem is rw mode - - Attempt to cold migrate of VMs - - Validate that the VMs migrated and no errors or alarms are present - - Log into both VMs and validate that file systems are read-write - - Terminate VMs - - Skip conditions: - - less than two hosts with the same storage backing - - less than two computes - - no storage node - - """ - LOG.tc_step("Instantiate a vm with large volume.....") - - vms = vms_ - - for vm in vms: - vm_id = vm['id'] - - LOG.tc_step( - "Checking VM status; VM Instance id is: {}......".format(vm_id)) - vm_state = vm_helper.get_vm_status(vm_id) - - assert vm_state == VMStatus.ACTIVE, 'VM {} state is {}; Not in ' \ - 'ACTIVATE state as expected' \ - .format(vm_id, vm_state) - - LOG.tc_step("Verify VM can be pinged from NAT box...") - rc, boot_time = check_vm_boot_time(vm_id) - assert rc, "VM is not pingable after {} seconds ".format(boot_time) - - LOG.tc_step("Verify Login to VM and check filesystem is rw mode....") - assert is_vm_filesystem_rw( - vm_id), 'rootfs filesystem is not RW as expected for VM {}' \ - .format(vm['display_name']) - - LOG.tc_step( - "Attempting cold migration; vm id = {}; vm_name = {} ....".format( - vm_id, vm['display_name'])) - - code, msg = vm_helper.cold_migrate_vm(vm_id=vm_id, fail_ok=True) - LOG.tc_step("Verify cold migration succeeded...") - assert code == 0, "Expected return code 0. Actual return code: {}; " \ - "details: {}".format(code, msg) - - LOG.tc_step("Verifying filesystem is rw mode after cold migration....") - assert is_vm_filesystem_rw( - vm_id), 'After cold migration rootfs filesystem is not RW as ' \ - 'expected for ' \ - 'VM {}'.format(vm['display_name']) - - # LOG.tc_step("Checking for any system alarm ....") - # rc, new_alarm = is_new_alarm_raised(pre_alarms) - # assert not rc, " alarm(s) found: {}".format(new_alarm) - - -def test_instantiate_a_vm_with_multiple_volumes_and_migrate(): - """ - Test a vm with a multiple volumes live, cold migration and evacuation: - - Test Setups: - - get guest image_id - - get or create 'small' flavor_id - - get tenenat and managment network ids - - Test Steps: - - create volume for boot and another extra size 8GB - - boot vms from the created volume - - Validate that VMs boot, and that no timeouts or error status occur. - - Verify VM status is ACTIVE - - Attach the second volume to VM - - Attempt live migrate VM - - Login to VM and verify the filesystem is rw mode on both volumes - - Attempt cold migrate VM - - Login to VM and verify the filesystem is rw mode on both volumes - - Reboot the compute host to initiate evacuation - - Login to VM and verify the filesystem is rw mode on both volumes - - Terminate VMs - - Skip conditions: - - less than two computes - - less than one storage - - """ - # skip("Currently not working. Centos image doesn't see both volumes") - LOG.tc_step("Creating a volume size=8GB.....") - vol_id_0 = cinder_helper.create_volume(size=8)[1] - ResourceCleanup.add('volume', vol_id_0, scope='function') - - LOG.tc_step("Creating a second volume size=8GB.....") - vol_id_1 = cinder_helper.create_volume(size=8, bootable=False)[1] - LOG.tc_step("Volume id is: {}".format(vol_id_1)) - ResourceCleanup.add('volume', vol_id_1, scope='function') - - LOG.tc_step("Booting instance vm_0...") - - vm_id = vm_helper.boot_vm(name='vm_0', source='volume', source_id=vol_id_0, - cleanup='function')[1] - time.sleep(5) - - LOG.tc_step("Verify VM can be pinged from NAT box...") - rc, boot_time = check_vm_boot_time(vm_id) - assert rc, "VM is not pingable after {} seconds ".format(boot_time) - - LOG.tc_step("Login to VM and to check filesystem is rw mode....") - assert is_vm_filesystem_rw( - vm_id), 'vol_0 rootfs filesystem is not RW as expected.' - - LOG.tc_step("Attemping to attach a second volume to VM...") - vm_helper.attach_vol_to_vm(vm_id, vol_id_1) - - LOG.tc_step( - "Login to VM and to check filesystem is rw mode for both volumes....") - assert is_vm_filesystem_rw(vm_id, rootfs=['vda', - 'vdb']), 'volumes rootfs ' \ - 'filesystem is not RW ' \ - 'as expected.' - - LOG.tc_step("Attemping live migrate VM...") - vm_helper.live_migrate_vm(vm_id=vm_id) - - LOG.tc_step( - "Login to VM and to check filesystem is rw mode after live " - "migration....") - assert is_vm_filesystem_rw(vm_id, rootfs=['vda', - 'vdb']), 'After live migration ' \ - 'rootfs filesystem is ' \ - 'not RW' - - LOG.tc_step("Attempting cold migrate VM...") - vm_helper.cold_migrate_vm(vm_id) - - LOG.tc_step( - "Login to VM and to check filesystem is rw mode after live " - "migration....") - assert is_vm_filesystem_rw(vm_id, rootfs=['vda', - 'vdb']), 'After cold migration ' \ - 'rootfs filesystem is ' \ - 'not RW' - LOG.tc_step("Testing VM evacuation.....") - before_host_0 = vm_helper.get_vm_host(vm_id) - - LOG.tc_step("Rebooting compute {} to initiate vm evacuation .....".format( - before_host_0)) - vm_helper.evacuate_vms(host=before_host_0, vms_to_check=vm_id, - ping_vms=True) - - LOG.tc_step( - "Login to VM and to check filesystem is rw mode after live " - "migration....") - assert is_vm_filesystem_rw(vm_id, rootfs=['vda', - 'vdb']), 'After evacuation ' \ - 'filesystem is not RW' - - -def check_vm_boot_time(vm_id): - start_time = time.time() - output = vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False) - elapsed_time = time.time() - start_time - return output, elapsed_time - - -def is_vm_filesystem_rw(vm_id, rootfs='vda', vm_image_name=None): - """ - - Args: - vm_id: - rootfs (str|list): - vm_image_name (None|str): - - Returns: - - """ - vm_helper.wait_for_vm_pingable_from_natbox(vm_id, timeout=240) - - if vm_image_name is None: - vm_image_name = GuestImages.DEFAULT['guest'] - - router_host = dhcp_host = None - try: - LOG.info( - "---------Collecting router and dhcp agent host info-----------") - router_host = network_helper.get_router_host() - mgmt_net = network_helper.get_mgmt_net_id() - dhcp_host = network_helper.get_network_agents(field='Host', - network=mgmt_net) - - with vm_helper.ssh_to_vm_from_natbox(vm_id, vm_image_name=vm_image_name, - retry_timeout=300) as vm_ssh: - if isinstance(rootfs, str): - rootfs = [rootfs] - for fs in rootfs: - cmd = "mount | grep {} | grep rw | wc -l".format(fs) - cmd_output = vm_ssh.exec_sudo_cmd(cmd)[1] - if cmd_output != '1': - LOG.info("Filesystem /dev/{} is not rw for VM: " - "{}".format(fs, vm_id)) - return False - return True - except exceptions.SSHRetryTimeout: - LOG.error("Failed to ssh, collecting vm console log.") - vm_helper.get_console_logs(vm_ids=vm_id) - LOG.info("Router host: {}. dhcp agent host: {}".format(router_host, - dhcp_host)) - raise diff --git a/automated-pytest-suite/testcases/functional/stx_monitor/test_stx_monitor.py b/automated-pytest-suite/testcases/functional/stx_monitor/test_stx_monitor.py deleted file mode 100644 index 0f632f90..00000000 --- a/automated-pytest-suite/testcases/functional/stx_monitor/test_stx_monitor.py +++ /dev/null @@ -1,473 +0,0 @@ -# -# Copyright (c) 2020 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import os -import json - -from pytest import fixture -from pytest import mark - -from utils.tis_log import LOG -from utils.clients.ssh import ControllerClient - -from keywords import container_helper -from keywords import host_helper -from keywords import kube_helper -from keywords import system_helper -from consts.stx import SysType -from consts.auth import HostLinuxUser - -from consts.auth import Tenant - -STX_MONITOR_TAR = 'stx-monitor.tgz' -STX_MONITOR_APP_NAME = 'stx-monitor' - -MONITOR_PORT = 31001 - -POD_NAME = 0 -POD_NODE = 1 - -MONITORING_HOSTS = ["controller", "compute"] - -STX_MONITOR_LABELS = ['elastic-client', 'elastic-controller', 'elastic-data', 'elastic-master'] - -CONTROLLER_LABELS = STX_MONITOR_LABELS -COMPUTE_LABELS = ['elastic-master'] -SUBCLOUD_CONTROLLER_LABELS = ['elastic-controller'] - -POD_RUNNING_ALL_HOSTS = 'all_hosts' -POD_RUNNING_ONE_INSTANCE = 'one_instance' - -POD_READY_STATE_ARGS = '--namespace=monitor --for=condition=Ready pods --timeout=30s --all ' \ - '--selector=app!=elasticsearch-curator' - -MON_METRICBEAT_DS = 'mon-metricbeat-YYYYY' -MON_METRICBEAT_LABEL = 'mon-metricbeat-LABEL' -MON_METRICBEAT_PATIAL_NAME = 'mon-metricbeat-' - -# This is a dictionary of labels and their corresponding pods names. Each pod -# can either run on all labeled hosts or on 1 instance on a labeled host. -# Daemon set pods run on all hosts and not correspond on a label. -PODS_LABEL_MATCHING_DICT = { - # 'daemon_set' is a custom label for automation only - 'daemon_set': { - 'mon-filebeat-': POD_RUNNING_ALL_HOSTS, - MON_METRICBEAT_DS: POD_RUNNING_ALL_HOSTS - }, - 'elastic-client': { - 'mon-elasticsearch-client-': POD_RUNNING_ALL_HOSTS, - }, - 'elastic-controller': { - # the curator is a transient pod so we will skip checking for it - # 'mon-elasticsearch-curator-': POD_RUNNING_ONE_INSTANCE, - 'mon-kibana-': POD_RUNNING_ONE_INSTANCE, - 'mon-kube-state-metrics-': POD_RUNNING_ONE_INSTANCE, - 'mon-logstash-': POD_RUNNING_ALL_HOSTS, - MON_METRICBEAT_LABEL: POD_RUNNING_ONE_INSTANCE, - 'mon-nginx-ingress-controller-': POD_RUNNING_ALL_HOSTS, - 'mon-nginx-ingress-default-backend-': POD_RUNNING_ONE_INSTANCE - }, - 'elastic-data': { - 'mon-elasticsearch-data-': POD_RUNNING_ALL_HOSTS - }, - 'elastic-master': { - 'mon-elasticsearch-master-': POD_RUNNING_ALL_HOSTS - } -} - -PODS_LABEL_MATCHING_SUBCLOUD_DICT = { - # 'daemon_set' is a custom label for automation only - 'daemon_set': { - 'mon-filebeat-': POD_RUNNING_ALL_HOSTS, - MON_METRICBEAT_DS: POD_RUNNING_ALL_HOSTS - }, - 'elastic-controller': { - # the curator is a transient pod so we will skip checking for it - # 'mon-elasticsearch-curator-': POD_RUNNING_ONE_INSTANCE, - 'mon-kube-state-metrics-': POD_RUNNING_ONE_INSTANCE, - 'mon-logstash-': POD_RUNNING_ALL_HOSTS, - MON_METRICBEAT_LABEL: POD_RUNNING_ONE_INSTANCE - } -} - - -def stx_monitor_file_exist(): - con_ssh = ControllerClient.get_active_controller() - home_dir = HostLinuxUser.get_home() - stx_mon_file = '{}/{}'.format(home_dir, STX_MONITOR_TAR) - - LOG.info("Check if file %s is present" % stx_mon_file) - - return con_ssh.file_exists(stx_mon_file) - - -@fixture() -def setup_app(request): - LOG.fixture_step("Setup: Clean up any pre-existing stx-monitor resources") - cleanup_app() - - def cleanup_after_test(): - LOG.fixture_step("Tear down: clean up any stx-monitor resources") - cleanup_app() - request.addfinalizer(cleanup_after_test) - - -def delete_images_from_host_registries(con_ssh=None, auth_info=Tenant.get('admin_platform')): - hosts = system_helper.get_hosts(con_ssh=con_ssh, auth_info=auth_info) - for host in hosts: - with host_helper.ssh_to_host(hostname=host, con_ssh=con_ssh) as host_ssh: - LOG.info("Delete {} images for host: {}".format(STX_MONITOR_APP_NAME, host)) - container_helper.remove_docker_images_with_pattern(pattern="elastic", con_ssh=host_ssh, - timeout=120) - - -def cleanup_app(con_ssh=None, auth_info=Tenant.get('admin_platform')): - """ - Remove application stx-monitor - Delete application stx-monitor - Remove stx-monitor images registries from all hosts - Remove stx-monitor labels from all hosts - """ - - LOG.info("Remove application {}".format(STX_MONITOR_APP_NAME)) - container_helper.remove_app(app_name=STX_MONITOR_APP_NAME, con_ssh=con_ssh, auth_info=auth_info) - - LOG.info("Delete application {}".format(STX_MONITOR_APP_NAME)) - container_helper.delete_app(app_name=STX_MONITOR_APP_NAME, con_ssh=con_ssh, auth_info=auth_info) - - delete_images_from_host_registries(con_ssh=con_ssh, auth_info=auth_info) - - LOG.info("Delete labels for {}".format(STX_MONITOR_APP_NAME)) - delete_all_monitor_labels(con_ssh=con_ssh, auth_info=auth_info) - - LOG.info("Cleanup completed") - - -def assign_labels(system_type, con_ssh=None, auth_info=Tenant.get('admin_platform')): - """ - The following labels are required on all controllers: - elastic-controller=enabled - elastic-master=enabled - elastic-data=enabled - elastic-client=enabled - - The following label is required on one compute: - elastic-master=enabled - """ - LOG.info("Assign stx-monitor labels to controller-0") - host_list = system_helper.get_hosts(con_ssh=con_ssh, auth_info=auth_info) - host_helper.assign_host_labels("controller-0", CONTROLLER_LABELS, lock=False, unlock=False, - con_ssh=con_ssh, auth_info=auth_info) - - if system_type != SysType.AIO_SX and "controller-1" in host_list: - LOG.info("Assign stx-monitor labels to controller-1") - host_helper.assign_host_labels("controller-1", CONTROLLER_LABELS, lock=False, unlock=False, - con_ssh=con_ssh, auth_info=auth_info) - - if "compute-0" in host_list: - LOG.info("Assign stx-monitor labels to compute-0") - host_helper.assign_host_labels("compute-0", COMPUTE_LABELS, lock=False, unlock=False, - con_ssh=con_ssh, auth_info=auth_info) - - -def assign_subcloud_labels(system_type, con_ssh=None, auth_info=Tenant.get('admin_platform')): - """ - The following label is required on all Subcloud controllers: - elastic-controller=enabled - """ - LOG.info("Assign stx-monitor labels to controller-0") - host_list = system_helper.get_hosts(con_ssh=con_ssh, auth_info=auth_info) - host_helper.assign_host_labels("controller-0", SUBCLOUD_CONTROLLER_LABELS, lock=False, - unlock=False, con_ssh=con_ssh, auth_info=auth_info) - - if system_type != SysType.AIO_SX and "controller-1" in host_list: - LOG.info("Assign stx-monitor labels to controller-1") - host_helper.assign_host_labels("controller-1", SUBCLOUD_CONTROLLER_LABELS, lock=False, - unlock=False, con_ssh=con_ssh, auth_info=auth_info) - - -def delete_all_monitor_labels(con_ssh=None, auth_info=Tenant.get('admin_platform')): - LOG.info("Delete monitor labels from hosts") - - host_list = system_helper.get_hosts(con_ssh=con_ssh, auth_info=auth_info) - for host in host_list: - # Remove all monitor labels from all hosts on the system - host_helper.remove_host_labels(host, STX_MONITOR_LABELS, lock=False, unlock=False, - con_ssh=con_ssh, auth_info=auth_info) - - -def app_upload_apply(con_ssh=None, auth_info=Tenant.get('admin_platform')): - """ - Upload stx-monitor - Apply stx-monitor - """ - - # Do application upload stx-monitor. - app_dir = HostLinuxUser.get_home() - tar_file = os.path.join(app_dir, STX_MONITOR_TAR) - LOG.info("Upload %s" % tar_file) - container_helper.upload_app(tar_file=tar_file, app_name=STX_MONITOR_APP_NAME, con_ssh=con_ssh, - auth_info=auth_info, uploaded_timeout=3600,) - - # Do application apply stx-monitor. - LOG.info("Apply %s" % STX_MONITOR_APP_NAME) - container_helper.apply_app(app_name=STX_MONITOR_APP_NAME, applied_timeout=3600, - check_interval=60, con_ssh=con_ssh, auth_info=auth_info) - - -def get_oam_floating_ip(): - """ - Get oam floating ip address - """ - if system_helper.is_aio_simplex(): - fields = 'oam_ip' - else: - fields = ('oam_c0_ip', 'oam_c1_ip', 'oam_floating_ip') - oam_info = system_helper.get_oam_values(fields=fields) - - for key, value in oam_info.items(): - if value is not None: - oam_floating_ip = value - - return oam_floating_ip - - -def check_cluster_health(system_type): - # Check the cluster health (cluster health status will be yellow for - # AIO-SX as there will be no replicated shards) - LOG.info("Check the cluster health") - hosts = system_helper.get_hosts() - LOG.info("System has hosts: ".format(hosts)) - prefix = 'http' - oam_ip = get_oam_floating_ip() - - for host in hosts: - with host_helper.ssh_to_host(hostname=host) as host_ssh: - code, output = host_ssh.exec_cmd( - 'curl {}://{}:31001/mon-elasticsearch-client/_cluster/health?pretty'.format( - prefix, oam_ip), fail_ok=False) - - if output: - data_dict = json.loads(output) - - # check that 'status' is green - if not (data_dict['status'] == 'green' or - (system_type == SysType.AIO_SX and data_dict['status'] == 'yellow')): - raise AssertionError("status not green or in case of AIO-SX yellow") - - # check that 'unassigned shards' is 0 - if system_type != SysType.AIO_SX and data_dict['unassigned_shards'] != 0: - raise AssertionError("unassigned_shards not 0") - - # check that 'active_shards' is 0 - if data_dict['active_shards'] == 0: - raise AssertionError("active_shards not 0") - else: - raise AssertionError("curl command failed") - - -def is_pod_running_on_host(pods, host, partial_pod_name): - - for pod in (_pod for _pod in pods if host == _pod[POD_NODE]): - - # Special case for 'mon-metricbeat-'. There are two running processes with that partial - # name; - # - The daemon set pod 'mon-metricbeat-YYYYY' - # - The label 'mon-metricbeat-YYYYYYYYYY-YYYYY'. Note that the middle Y are variable - # lengths. e.g. mon-metricbeat-557fb9cb7-pbbzs vs mon-kube-state-metrics-77db855d59-5s566 - # was seen in different labs. - if partial_pod_name == MON_METRICBEAT_DS: - if MON_METRICBEAT_PATIAL_NAME in pod[POD_NAME] and \ - len(pod[POD_NAME]) == len(MON_METRICBEAT_DS): - LOG.info('Found pod matching name {} for host {}. POD: {}'.format( - partial_pod_name, host, pod[POD_NAME])) - return True - - elif partial_pod_name == MON_METRICBEAT_LABEL: - if MON_METRICBEAT_PATIAL_NAME in pod[POD_NAME] and \ - len(pod[POD_NAME]) >= len(MON_METRICBEAT_DS)+2: - LOG.info('Found pod matching name {} for host {}. POD: {}'.format( - partial_pod_name, host, pod[POD_NAME])) - return True - - elif partial_pod_name in pod[POD_NAME]: - LOG.info('Found pod matching name {} for host {}. POD: {}'.format( - partial_pod_name, host, pod[POD_NAME])) - - return True - - LOG.info('Missing pod matching name {} for host {}'.format(partial_pod_name, host)) - return False - - -def are_monitor_pods_running(system_type, con_ssh=None, auth_info=Tenant.get('admin_platform'), - matching_dict=PODS_LABEL_MATCHING_DICT): - # Get all the pods for stx-monitor - monitor_pods = kube_helper.get_pods(field=('NAME', 'NODE'), namespace="monitor", strict=False, - con_ssh=con_ssh) - - LOG.info("Running pods for stx-monitor: %s" % monitor_pods) - - # Make a dictionary of which hosts are assigned to which stx-monitor - # labels. e.g. - # - # { - # 'daemon_set': ['controller-0', 'controller-1'], - # 'elastic-client': ['controller-0', 'controller-1'], - # 'elastic-controller': ['controller-0', 'controller-1'], - # ... - # } - # - host_list = system_helper.get_host_list_data(columns=["hostname", "personality"], - con_ssh=con_ssh, auth_info=auth_info) - labels_to_host_dict = {} - for host in (_host for _host in host_list if _host.get('hostname')): - hostname = host.get('hostname') - personality = host.get('personality') - if personality and personality in str(MONITORING_HOSTS): - - # Add the daemon set custom label, this is a special label only - # for this labels_to_host_dict - hosts_for_label = labels_to_host_dict.get('daemon_set', []) - hosts_for_label.append(hostname) - labels_to_host_dict.update({'daemon_set': hosts_for_label}) - - # Add the host's assigned labels - labels = host_helper.get_host_labels_info(hostname, con_ssh=con_ssh, - auth_info=auth_info) - for label_name, label_status in labels.items(): - if label_status == 'enabled': - hosts_for_label = labels_to_host_dict.get(label_name, []) - hosts_for_label.append(hostname) - labels_to_host_dict.update({label_name: hosts_for_label}) - - LOG.info('labels_running_hosts:{}'.format(labels_to_host_dict)) - - # For each labels currently assigned on the system, get the matching - # POD names from matching_dict - for label, hosts_for_label in labels_to_host_dict.items(): - LOG.debug('----------') - LOG.debug('label:{} hosts:{}'.format(label, hosts_for_label)) - - pod_details = None - for k, v in matching_dict.items(): - if k == label: - pod_details = v - break - - if pod_details is None: - # Label not found in dict just return True - return True - - # Get the list of pod names we need to search for, a label can have - # more than one pods. - for partial_pod_name, running_type in pod_details.items(): - LOG.info('-----') - LOG.info('partial_pod_name:{} running_type:{}'.format(partial_pod_name, running_type)) - - inst_found_count = 0 - for host in hosts_for_label: - if is_pod_running_on_host(monitor_pods, host, partial_pod_name): - # The pod was found, increment the no of instances running on all hosts for this - # pod - inst_found_count += 1 - - # Special case for AIO-DX and mon-elasticsearch-master-x - if partial_pod_name == 'mon-elasticsearch-master-' and system_type == SysType.AIO_DX \ - and inst_found_count == 1: - LOG.info('Pod {} only needs to run one instances for AIO-DX'.format( - partial_pod_name)) - pass - # Some pods only run one instances even if the label is on multiple hosts - elif inst_found_count == 1 and running_type == POD_RUNNING_ONE_INSTANCE: - LOG.info('Pod {} only needs to run one instances'.format(partial_pod_name)) - pass - # Pod did not match the number of hosts its supposed to run on - elif inst_found_count != len(hosts_for_label): - LOG.error('Pod check for {} failed, missing instances'.format(partial_pod_name)) - return False - - LOG.info('Check for pod {} SUCCESS'.format(partial_pod_name)) - - return True - - -@mark.skipif(not stx_monitor_file_exist(), reason="Missing stx-monitor tar file from system") -@mark.platform_sanity -def test_stx_monitor(setup_app): - """ - Test stx-monitor application - - Assumptions: /home/sysadmin/stx-monitor.tgz is present on controller-0 - - Args: - setup_app: fixture - - Setups: - - application remove and delete stx-monitor, - application-remove stx-monitor - application-delete stx-monitor - - delete images from all registries on all hosts. - docker images | grep elastic | awk '{print $3}' - docker image rm --force - - remove all stx-monitor labels from all hosts - e.g. host-label-remove - - Test Steps: - - Assign labels (varies depending on type of system and hosts). - e.g. host-label-assign