From 7759e2fd823dc347bdd12bd8eab165d1e81e02a9 Mon Sep 17 00:00:00 2001 From: Andreas Jaeger Date: Sun, 14 Oct 2018 12:50:35 +0200 Subject: [PATCH] Retire astara repo Retire repository, following https://docs.openstack.org/infra/manual/drivers.html#retiring-a-project Change-Id: If5f7d284bd107a93edd9272ac0ed8e6d20ba5c51 --- .gitignore | 53 - .travis.yml | 10 - LICENSE | 175 --- README.md | 70 - README.rst | 10 + astara/__init__.py | 0 astara/api/__init__.py | 33 - astara/api/astara_client.py | 91 -- astara/api/config/__init__.py | 0 astara/api/config/common.py | 105 -- astara/api/config/loadbalancer.py | 49 - astara/api/config/router.py | 171 --- astara/api/keystone.py | 38 - astara/api/neutron.py | 1269 ----------------- astara/api/nova.py | 453 ------ astara/api/rug.py | 125 -- astara/cli/__init__.py | 15 - astara/cli/app.py | 49 - astara/cli/browse.py | 377 ----- astara/cli/config.py | 37 - astara/cli/global_debug.py | 56 - astara/cli/main.py | 24 - astara/cli/message.py | 52 - astara/cli/poll.py | 33 - astara/cli/resource.py | 102 -- astara/cli/router.py | 169 --- astara/cli/tenant.py | 62 - astara/cli/worker.py | 37 - astara/commands.py | 55 - astara/common/__init__.py | 15 - astara/common/config.py | 96 -- astara/common/constants.py | 24 - astara/common/container.py | 70 - astara/common/hash_ring.py | 204 --- astara/common/i18n.py | 24 - astara/common/linux/__init__.py | 15 - astara/common/linux/interface.py | 269 ---- astara/common/linux/ip_lib.py | 450 ------ astara/common/linux/ovs_lib.py | 321 ----- astara/common/linux/utils.py | 123 -- astara/common/rpc.py | 153 -- astara/coordination.py | 186 --- astara/daemon.py | 44 - astara/db/__init__.py | 0 astara/db/api.py | 128 -- astara/db/migration.py | 56 - astara/db/sqlalchemy/__init__.py | 0 astara/db/sqlalchemy/alembic.ini | 54 - astara/db/sqlalchemy/alembic/env.py | 61 - astara/db/sqlalchemy/alembic/script.py.mako | 22 - .../4f695b725637_initial_migration.py | 69 - astara/db/sqlalchemy/api.py | 166 --- astara/db/sqlalchemy/dbsync.py | 93 -- astara/db/sqlalchemy/migration.py | 113 -- astara/db/sqlalchemy/models.py | 113 -- astara/debug.py | 80 -- astara/drivers/__init__.py | 91 -- astara/drivers/base.py | 194 --- astara/drivers/loadbalancer.py | 352 ----- astara/drivers/router.py | 368 ----- astara/drivers/states.py | 30 - astara/event.py | 98 -- astara/health.py | 87 -- astara/instance_manager.py | 824 ----------- astara/main.py | 218 --- astara/metadata.py | 229 --- astara/newton_fix.py | 58 - astara/notifications.py | 264 ---- astara/opts.py | 84 -- astara/pez/__init__.py | 0 astara/pez/manager.py | 109 -- astara/pez/pool.py | 327 ----- astara/pez/rpcapi.py | 53 - astara/pez/service.py | 65 - astara/populate.py | 79 - astara/scheduler.py | 166 --- astara/state.py | 565 -------- astara/tenant.py | 224 --- astara/test/__init__.py | 15 - astara/test/functional/__init__.py | 0 astara/test/functional/base.py | 629 -------- astara/test/functional/config.py | 67 - astara/test/functional/test.conf | 42 - astara/test/functional/test_tenant_router.py | 134 -- astara/test/unit/__init__.py | 15 - astara/test/unit/api/__init__.py | 15 - astara/test/unit/api/config/__init__.py | 15 - astara/test/unit/api/config/config_fakes.py | 127 -- .../unit/api/config/test_common_config.py | 202 --- .../api/config/test_loadbalancer_config.py | 51 - .../unit/api/config/test_router_config.py | 292 ---- astara/test/unit/api/test_astara_router.py | 140 -- astara/test/unit/api/test_keystone.py | 38 - astara/test/unit/api/test_neutron_wrapper.py | 518 ------- astara/test/unit/api/test_nova_wrapper.py | 481 ------- astara/test/unit/api/test_rug_api.py | 242 ---- astara/test/unit/base.py | 40 - astara/test/unit/common/__init__.py | 15 - .../unit/common/test_agent_linux_utils.py | 109 -- astara/test/unit/common/test_config.py | 44 - astara/test/unit/common/test_hash_ring.py | 230 --- .../test/unit/common/test_linux_interface.py | 267 ---- astara/test/unit/common/test_linux_ip_lib.py | 700 --------- astara/test/unit/db/__init__.py | 15 - astara/test/unit/db/base.py | 92 -- astara/test/unit/db/test_debug_mode.py | 100 -- astara/test/unit/drivers/__init__.py | 0 astara/test/unit/drivers/test_factory.py | 63 - astara/test/unit/drivers/test_loadbalancer.py | 377 ----- astara/test/unit/drivers/test_router.py | 404 ------ astara/test/unit/fakes.py | 176 --- astara/test/unit/openvswitch/__init__.py | 15 - astara/test/unit/openvswitch/test_ovs_lib.py | 369 ----- astara/test/unit/pez/__init__.py | 0 astara/test/unit/pez/test_pool_manager.py | 85 -- astara/test/unit/test_coordination.py | 150 -- astara/test/unit/test_debug.py | 71 - astara/test/unit/test_health.py | 50 - astara/test/unit/test_instance_manager.py | 1163 --------------- astara/test/unit/test_main.py | 91 -- astara/test/unit/test_notifications.py | 253 ---- astara/test/unit/test_populate.py | 94 -- astara/test/unit/test_rpc.py | 171 --- astara/test/unit/test_scheduler.py | 115 -- astara/test/unit/test_state.py | 711 --------- astara/test/unit/test_tenant.py | 386 ----- astara/test/unit/test_worker.py | 1046 -------------- astara/version.py | 18 - astara/worker.py | 815 ----------- devstack/README.md | 11 - devstack/files/debs/astara | 2 - devstack/local.conf.sample | 27 - devstack/plugin.sh | 332 ----- devstack/settings | 63 - doc/Makefile | 192 --- doc/source/_static/neutron-astara.png | Bin 52060 -> 0 bytes doc/source/_static/neutron-reference.png | Bin 54966 -> 0 bytes doc/source/appliance.rst | 339 ----- doc/source/conf.py | 288 ---- doc/source/contribute.rst | 8 - doc/source/developer_quickstart.rst | 92 -- doc/source/index.rst | 38 - doc/source/install.rst | 345 ----- doc/source/loadbalancer.rst | 102 -- doc/source/mitaka.rst | 5 - doc/source/operation.rst | 99 -- doc/source/orchestrator.rst | 168 --- doc/source/reference.rst | 5 - doc/source/release_notes.rst | 6 - doc/source/sample_boot.dot | 14 - doc/source/state_machine.dot | 57 - doc/source/what_is_astara.rst | 107 -- doc/source/worker_diagram.dot | 27 - etc/orchestrator.ini | 964 ------------- etc/oslo-config-generator/orchestrator.ini | 16 - etc/oslo-config-generator/test.conf | 5 - etc/provider_rules.json | 4 - etc/rootwrap.conf | 27 - etc/rootwrap.d/network.filters | 18 - releasenotes/notes/.placeholder | 0 ...variable_mtu_support-9aae24343ee58aed.yaml | 3 - ...tara-debug-cmd-fixed-2cf74ccd82f21fe0.yaml | 3 - .../astara-rootwrap-59731966fb3a0117.yaml | 5 - ...a_browse_rebuild_fix-71b9c6e76a80dc00.yaml | 4 - .../autogen-config-file-568ecfdaba9e316d.yaml | 6 - .../notes/byonf-60787a24e83c3341.yaml | 7 - ...ement_post_rebalance-3e7c64785679f239.yaml | 5 - .../deprecated-amqp-url-6e988fd7d33a9305.yaml | 5 - ...ack_default_external-d5b9e5e830d5a8df.yaml | 17 - .../dynamic-mgt-port-86d4b9f780fa3d78.yaml | 3 - .../fix-astara-ctl-ssh-9be434ac84c06c79.yaml | 4 - .../notes/ha_appliances-c2048033c2be6d51.yaml | 4 - ...zk_leadership_member-2cf3736e67d19b27.yaml | 3 - .../notes/initial-note-fd05752e5f3b87fb.yaml | 3 - ...resources-from-cache-6636d39c9f173287.yaml | 3 - ...tor_appliance_config-33552cd22b549847.yaml | 5 - releasenotes/source/_static/.placeholder | 0 releasenotes/source/_templates/.placeholder | 0 releasenotes/source/conf.py | 273 ---- releasenotes/source/index.rst | 8 - releasenotes/source/mitaka.rst | 5 - requirements.txt | 27 - setup.cfg | 80 -- setup.py | 29 - specs/README.rst | 35 - specs/kilo/ci-updates.rst | 191 --- specs/kilo/skeleton.rst | 1 - specs/liberty/doc-updates.rst | 192 --- specs/liberty/rug_ha.rst | 201 --- specs/liberty/skeleton.rst | 1 - specs/skeleton.rst | 103 -- specs/template.tst | 471 ------ test-requirements.txt | 19 - tools/astara-byonf | 116 -- tools/generate_config_file_samples.sh | 29 - tools/logstash/README.md | 3 - tools/logstash/logstash-rug-patterns | 4 - tools/logstash/rug-filter.conf | 39 - tools/run_functional.sh | 37 - tox.ini | 46 - vagrant/README.md | 60 - vagrant/Vagrantfile | 47 - vagrant/devstack.sh | 60 - vagrant/vagrant.sh | 7 - 204 files changed, 10 insertions(+), 27927 deletions(-) delete mode 100755 .gitignore delete mode 100644 .travis.yml delete mode 100644 LICENSE delete mode 100644 README.md create mode 100644 README.rst delete mode 100644 astara/__init__.py delete mode 100644 astara/api/__init__.py delete mode 100644 astara/api/astara_client.py delete mode 100644 astara/api/config/__init__.py delete mode 100644 astara/api/config/common.py delete mode 100644 astara/api/config/loadbalancer.py delete mode 100644 astara/api/config/router.py delete mode 100644 astara/api/keystone.py delete mode 100644 astara/api/neutron.py delete mode 100644 astara/api/nova.py delete mode 100644 astara/api/rug.py delete mode 100644 astara/cli/__init__.py delete mode 100644 astara/cli/app.py delete mode 100644 astara/cli/browse.py delete mode 100644 astara/cli/config.py delete mode 100644 astara/cli/global_debug.py delete mode 100644 astara/cli/main.py delete mode 100644 astara/cli/message.py delete mode 100644 astara/cli/poll.py delete mode 100644 astara/cli/resource.py delete mode 100644 astara/cli/router.py delete mode 100644 astara/cli/tenant.py delete mode 100644 astara/cli/worker.py delete mode 100644 astara/commands.py delete mode 100644 astara/common/__init__.py delete mode 100644 astara/common/config.py delete mode 100644 astara/common/constants.py delete mode 100644 astara/common/container.py delete mode 100644 astara/common/hash_ring.py delete mode 100644 astara/common/i18n.py delete mode 100644 astara/common/linux/__init__.py delete mode 100644 astara/common/linux/interface.py delete mode 100644 astara/common/linux/ip_lib.py delete mode 100644 astara/common/linux/ovs_lib.py delete mode 100644 astara/common/linux/utils.py delete mode 100644 astara/common/rpc.py delete mode 100644 astara/coordination.py delete mode 100644 astara/daemon.py delete mode 100644 astara/db/__init__.py delete mode 100644 astara/db/api.py delete mode 100644 astara/db/migration.py delete mode 100644 astara/db/sqlalchemy/__init__.py delete mode 100644 astara/db/sqlalchemy/alembic.ini delete mode 100644 astara/db/sqlalchemy/alembic/env.py delete mode 100644 astara/db/sqlalchemy/alembic/script.py.mako delete mode 100644 astara/db/sqlalchemy/alembic/versions/4f695b725637_initial_migration.py delete mode 100644 astara/db/sqlalchemy/api.py delete mode 100644 astara/db/sqlalchemy/dbsync.py delete mode 100644 astara/db/sqlalchemy/migration.py delete mode 100644 astara/db/sqlalchemy/models.py delete mode 100644 astara/debug.py delete mode 100644 astara/drivers/__init__.py delete mode 100644 astara/drivers/base.py delete mode 100644 astara/drivers/loadbalancer.py delete mode 100644 astara/drivers/router.py delete mode 100644 astara/drivers/states.py delete mode 100644 astara/event.py delete mode 100644 astara/health.py delete mode 100644 astara/instance_manager.py delete mode 100644 astara/main.py delete mode 100644 astara/metadata.py delete mode 100644 astara/newton_fix.py delete mode 100644 astara/notifications.py delete mode 100644 astara/opts.py delete mode 100644 astara/pez/__init__.py delete mode 100644 astara/pez/manager.py delete mode 100644 astara/pez/pool.py delete mode 100644 astara/pez/rpcapi.py delete mode 100644 astara/pez/service.py delete mode 100644 astara/populate.py delete mode 100644 astara/scheduler.py delete mode 100644 astara/state.py delete mode 100644 astara/tenant.py delete mode 100644 astara/test/__init__.py delete mode 100644 astara/test/functional/__init__.py delete mode 100755 astara/test/functional/base.py delete mode 100644 astara/test/functional/config.py delete mode 100644 astara/test/functional/test.conf delete mode 100644 astara/test/functional/test_tenant_router.py delete mode 100644 astara/test/unit/__init__.py delete mode 100644 astara/test/unit/api/__init__.py delete mode 100644 astara/test/unit/api/config/__init__.py delete mode 100644 astara/test/unit/api/config/config_fakes.py delete mode 100644 astara/test/unit/api/config/test_common_config.py delete mode 100644 astara/test/unit/api/config/test_loadbalancer_config.py delete mode 100644 astara/test/unit/api/config/test_router_config.py delete mode 100644 astara/test/unit/api/test_astara_router.py delete mode 100644 astara/test/unit/api/test_keystone.py delete mode 100644 astara/test/unit/api/test_neutron_wrapper.py delete mode 100644 astara/test/unit/api/test_nova_wrapper.py delete mode 100644 astara/test/unit/api/test_rug_api.py delete mode 100644 astara/test/unit/base.py delete mode 100644 astara/test/unit/common/__init__.py delete mode 100644 astara/test/unit/common/test_agent_linux_utils.py delete mode 100644 astara/test/unit/common/test_config.py delete mode 100644 astara/test/unit/common/test_hash_ring.py delete mode 100644 astara/test/unit/common/test_linux_interface.py delete mode 100644 astara/test/unit/common/test_linux_ip_lib.py delete mode 100644 astara/test/unit/db/__init__.py delete mode 100644 astara/test/unit/db/base.py delete mode 100644 astara/test/unit/db/test_debug_mode.py delete mode 100644 astara/test/unit/drivers/__init__.py delete mode 100644 astara/test/unit/drivers/test_factory.py delete mode 100644 astara/test/unit/drivers/test_loadbalancer.py delete mode 100644 astara/test/unit/drivers/test_router.py delete mode 100644 astara/test/unit/fakes.py delete mode 100644 astara/test/unit/openvswitch/__init__.py delete mode 100644 astara/test/unit/openvswitch/test_ovs_lib.py delete mode 100644 astara/test/unit/pez/__init__.py delete mode 100644 astara/test/unit/pez/test_pool_manager.py delete mode 100644 astara/test/unit/test_coordination.py delete mode 100644 astara/test/unit/test_debug.py delete mode 100644 astara/test/unit/test_health.py delete mode 100644 astara/test/unit/test_instance_manager.py delete mode 100644 astara/test/unit/test_main.py delete mode 100644 astara/test/unit/test_notifications.py delete mode 100644 astara/test/unit/test_populate.py delete mode 100644 astara/test/unit/test_rpc.py delete mode 100644 astara/test/unit/test_scheduler.py delete mode 100644 astara/test/unit/test_state.py delete mode 100644 astara/test/unit/test_tenant.py delete mode 100644 astara/test/unit/test_worker.py delete mode 100644 astara/version.py delete mode 100644 astara/worker.py delete mode 100644 devstack/README.md delete mode 100644 devstack/files/debs/astara delete mode 100644 devstack/local.conf.sample delete mode 100644 devstack/plugin.sh delete mode 100644 devstack/settings delete mode 100644 doc/Makefile delete mode 100644 doc/source/_static/neutron-astara.png delete mode 100644 doc/source/_static/neutron-reference.png delete mode 100644 doc/source/appliance.rst delete mode 100644 doc/source/conf.py delete mode 100644 doc/source/contribute.rst delete mode 100644 doc/source/developer_quickstart.rst delete mode 100644 doc/source/index.rst delete mode 100644 doc/source/install.rst delete mode 100644 doc/source/loadbalancer.rst delete mode 100644 doc/source/mitaka.rst delete mode 100644 doc/source/operation.rst delete mode 100644 doc/source/orchestrator.rst delete mode 100644 doc/source/reference.rst delete mode 100644 doc/source/release_notes.rst delete mode 100644 doc/source/sample_boot.dot delete mode 100644 doc/source/state_machine.dot delete mode 100644 doc/source/what_is_astara.rst delete mode 100644 doc/source/worker_diagram.dot delete mode 100644 etc/orchestrator.ini delete mode 100644 etc/oslo-config-generator/orchestrator.ini delete mode 100644 etc/oslo-config-generator/test.conf delete mode 100644 etc/provider_rules.json delete mode 100644 etc/rootwrap.conf delete mode 100644 etc/rootwrap.d/network.filters delete mode 100644 releasenotes/notes/.placeholder delete mode 100644 releasenotes/notes/add_variable_mtu_support-9aae24343ee58aed.yaml delete mode 100644 releasenotes/notes/astara-debug-cmd-fixed-2cf74ccd82f21fe0.yaml delete mode 100644 releasenotes/notes/astara-rootwrap-59731966fb3a0117.yaml delete mode 100644 releasenotes/notes/astara_browse_rebuild_fix-71b9c6e76a80dc00.yaml delete mode 100644 releasenotes/notes/autogen-config-file-568ecfdaba9e316d.yaml delete mode 100644 releasenotes/notes/byonf-60787a24e83c3341.yaml delete mode 100644 releasenotes/notes/cleanup_sm_management_post_rebalance-3e7c64785679f239.yaml delete mode 100644 releasenotes/notes/deprecated-amqp-url-6e988fd7d33a9305.yaml delete mode 100644 releasenotes/notes/devstack_default_external-d5b9e5e830d5a8df.yaml delete mode 100644 releasenotes/notes/dynamic-mgt-port-86d4b9f780fa3d78.yaml delete mode 100644 releasenotes/notes/fix-astara-ctl-ssh-9be434ac84c06c79.yaml delete mode 100644 releasenotes/notes/ha_appliances-c2048033c2be6d51.yaml delete mode 100644 releasenotes/notes/ignore_zk_leadership_member-2cf3736e67d19b27.yaml delete mode 100644 releasenotes/notes/initial-note-fd05752e5f3b87fb.yaml delete mode 100644 releasenotes/notes/invalidate-deleted-resources-from-cache-6636d39c9f173287.yaml delete mode 100644 releasenotes/notes/push_orchestrator_appliance_config-33552cd22b549847.yaml delete mode 100644 releasenotes/source/_static/.placeholder delete mode 100644 releasenotes/source/_templates/.placeholder delete mode 100644 releasenotes/source/conf.py delete mode 100644 releasenotes/source/index.rst delete mode 100644 releasenotes/source/mitaka.rst delete mode 100644 requirements.txt delete mode 100644 setup.cfg delete mode 100644 setup.py delete mode 100644 specs/README.rst delete mode 100644 specs/kilo/ci-updates.rst delete mode 120000 specs/kilo/skeleton.rst delete mode 100644 specs/liberty/doc-updates.rst delete mode 100644 specs/liberty/rug_ha.rst delete mode 120000 specs/liberty/skeleton.rst delete mode 100644 specs/skeleton.rst delete mode 100644 specs/template.tst delete mode 100644 test-requirements.txt delete mode 100755 tools/astara-byonf delete mode 100755 tools/generate_config_file_samples.sh delete mode 100644 tools/logstash/README.md delete mode 100644 tools/logstash/logstash-rug-patterns delete mode 100644 tools/logstash/rug-filter.conf delete mode 100755 tools/run_functional.sh delete mode 100644 tox.ini delete mode 100644 vagrant/README.md delete mode 100644 vagrant/Vagrantfile delete mode 100755 vagrant/devstack.sh delete mode 100755 vagrant/vagrant.sh diff --git a/.gitignore b/.gitignore deleted file mode 100755 index e6af6d99..00000000 --- a/.gitignore +++ /dev/null @@ -1,53 +0,0 @@ -*.py[co] - -# Packages -*.egg -*.egg-info -dist -build -eggs -parts -bin -var -sdist -develop-eggs -.installed.cfg - -# Installer logs -pip-log.txt - -# Unit test / coverage reports -.coverage -.tox - -#Translations -*.mo - -#Mr Developer -.mr.developer.cfg - -# Packaging output -*.deb - -# pbr output -AUTHORS -ChangeLog - -orchestrator.ini.sample -astara/test/functional/test.conf.sample - -*.swp - -#pycharm cruft -.idea/* - -*.db -*.db_clean - -#macos hidden files -.DS_Store -._.DS_Store - -# Vagrant -vagrant/.vagrant -vagrant/user_local.conf diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index b15d71f3..00000000 --- a/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: python -python: - - "2.7" -install: - - pip install -r test_requirements.txt --use-mirrors - - pip install flake8 --use-mirrors - - pip install -q . --use-mirrors -before_script: - - flake8 setup.py akanda --ignore=E123,E133,E226,E241,E242,E731 -script: nosetests -d \ No newline at end of file diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 67db8588..00000000 --- a/LICENSE +++ /dev/null @@ -1,175 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/README.md b/README.md deleted file mode 100644 index bb2e1556..00000000 --- a/README.md +++ /dev/null @@ -1,70 +0,0 @@ -# Astara - -A service with an open plugin architecture that manages Neutron advanced -services such as routers and load balancers within an OpenStack environment. - -## The Name - -Astara is the sanskrit word for carpet. So why name our project carpet? - -The original code name for this project was simply "The RUG" which was a -reference to a line from the popular film "The Big Lebowski": - -**That rug really tied the room together, did it not?** - -The idea is that "The Rug" really ties OpenStack neutron together nicely. We -felt it was an apt description so we kept the name. - -## Related Projects - -The code for the Astara project lives in several separate repositories to ease -packaging and management: - - - * [Astara](https://github.com/openstack/astara) - - Contains the Orchestration service for managing the creation, configuration, - and health of neutron advanced services as virtual network functions. - - * [Astara Appliance](https://github.com/openstack/astara-appliance) – - Supporting software for the Astara virtual network appliance, which is - a Linux-based service VM that provides routing and L3+ services in - a virtualized network environment. This includes a REST API for managing - the appliance via the Astara orchestration service. - - * [Astara Neutron](https://github.com/openstack/astara-neutron) –  - Ancillary subclasses of several OpenStack Neutron plugins and supporting - code. - - * [Astara Horizon](https://github.com/openstack/astara-horizon) - - OpenStack Horizon Dashboard code. - - -## Project Details - -Astara is publicly managed through the [Astara Launchpad project](https://launchpad.net/astara) - - -## Code Review - -The code goes to get reviewed by collaborators and merged at -[OpenStack Gerrit review](https://review.openstack.org) - - -## Documentation - -Can be found at [docs.akanda.io](http://docs.akanda.io) - -Developer quick start guides for making this all work in Devstack `Here -`_ - - -## Community - -Talk to the developers through IRC [#openstack-astara channel on freenode.net] -(http://webchat.freenode.net/?randomnick=1&channels=%23openstack-astara&prompt=1&uio=d4) - - -## License and Copyright - -Astara is licensed under the Apache-2.0 license and is Copyright 2015, -OpenStack Foundation diff --git a/README.rst b/README.rst new file mode 100644 index 00000000..d98af48c --- /dev/null +++ b/README.rst @@ -0,0 +1,10 @@ +This project is no longer maintained. + +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". + +For any further questions, please email +openstack-dev@lists.openstack.org or join #openstack-dev on +Freenode. diff --git a/astara/__init__.py b/astara/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/astara/api/__init__.py b/astara/api/__init__.py deleted file mode 100644 index 8e0c6cee..00000000 --- a/astara/api/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -CONF = cfg.CONF - - -api_opts = [ - cfg.StrOpt('admin_user'), - cfg.StrOpt('admin_password', secret=True), - cfg.StrOpt('admin_tenant_name'), - cfg.StrOpt('auth_url'), - cfg.StrOpt('auth_strategy', default='keystone'), - cfg.StrOpt('auth_region'), - cfg.IntOpt('max_retries', default=3), - cfg.IntOpt('retry_delay', default=1), - cfg.StrOpt('endpoint_type', default='publicURL'), -] -CONF.register_opts(api_opts) diff --git a/astara/api/astara_client.py b/astara/api/astara_client.py deleted file mode 100644 index fe2fe811..00000000 --- a/astara/api/astara_client.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import requests - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils - -ASTARA_MGT_SERVICE_PORT = 5000 -ASTARA_BASE_PATH = '/v1/' - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - -AK_CLIENT_OPTS = [ - cfg.IntOpt('alive_timeout', default=3), - cfg.IntOpt('config_timeout', default=90), -] -CONF.register_opts(AK_CLIENT_OPTS) - - -def _mgt_url(host, port, path): - if ':' in host: - host = '[%s]' % host - return 'http://%s:%s%s' % (host, port, path) - - -def _get_proxyless_session(): - s = requests.Session() - # ignore any proxy setting because we should have a direct connection - s.trust_env = False - return s - - -def is_alive(host, port, timeout=None): - timeout = timeout or cfg.CONF.alive_timeout - path = ASTARA_BASE_PATH + 'firewall/rules' - try: - s = _get_proxyless_session() - r = s.get(_mgt_url(host, port, path), timeout=timeout) - if r.status_code == 200: - return True - except Exception as e: - LOG.debug('is_alive for %s failed: %s', host, str(e)) - return False - - -def get_interfaces(host, port): - path = ASTARA_BASE_PATH + 'system/interfaces' - s = _get_proxyless_session() - r = s.get(_mgt_url(host, port, path), timeout=30) - return r.json().get('interfaces', []) - - -def update_config(host, port, config_dict): - path = ASTARA_BASE_PATH + 'system/config' - headers = {'Content-type': 'application/json'} - - s = _get_proxyless_session() - r = s.put( - _mgt_url(host, port, path), - data=jsonutils.dump_as_bytes(config_dict), - headers=headers, - timeout=cfg.CONF.config_timeout) - - if r.status_code != 200: - raise Exception('Config update failed: %s' % r.text) - else: - return r.json() - - -def read_labels(host, port): - path = ASTARA_BASE_PATH + 'firewall/labels' - s = _get_proxyless_session() - r = s.post(_mgt_url(host, port, path), timeout=30) - return r.json().get('labels', []) diff --git a/astara/api/config/__init__.py b/astara/api/config/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/astara/api/config/common.py b/astara/api/config/common.py deleted file mode 100644 index ed41adeb..00000000 --- a/astara/api/config/common.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import re - -from astara.common import constants - -SERVICE_STATIC = 'static' - - -def network_config(client, port, ifname, network_type, network_ports=[]): - network = client.get_network_detail(port.network_id) - subnets_dict = dict((s.id, s) for s in network.subnets) - - return _make_network_config_dict( - _interface_config(ifname, port, subnets_dict, network.mtu), - network_type, - port.network_id, - mtu=network.mtu, - subnets_dict=subnets_dict, - network_ports=network_ports) - - -def _make_network_config_dict(interface, network_type, network_id, mtu=None, - v4_conf=SERVICE_STATIC, v6_conf=SERVICE_STATIC, - subnets_dict={}, network_ports=[]): - return {'interface': interface, - 'network_id': network_id, - 'mtu': mtu, - 'v4_conf_service': v4_conf, - 'v6_conf_service': v6_conf, - 'network_type': network_type, - 'subnets': [_subnet_config(s) for s in subnets_dict.values()], - 'allocations': _allocation_config(network_ports, subnets_dict)} - - -def _interface_config(ifname, port, subnets_dict, mtu): - def fmt(fixed): - return '%s/%s' % (fixed.ip_address, - subnets_dict[fixed.subnet_id].cidr.prefixlen) - - retval = {'ifname': ifname, - 'addresses': [fmt(fixed) for fixed in port.fixed_ips]} - if mtu: - retval['mtu'] = mtu - - return retval - - -def _subnet_config(subnet): - return { - 'id': str(subnet.id), - 'cidr': str(subnet.cidr), - 'dhcp_enabled': subnet.enable_dhcp and subnet.ipv6_ra_mode != 'slaac', - 'dns_nameservers': subnet.dns_nameservers, - 'host_routes': subnet.host_routes, - 'gateway_ip': (str(subnet.gateway_ip) - if subnet.gateway_ip is not None - else ''), - } - - -def _allocation_config(ports, subnets_dict): - r = re.compile('[:.]') - service_ports_re = re.compile( - '^ASTARA:(' + '|'.join(constants.ASTARA_SERVICE_PORT_TYPES) + '):.*$' - ) - allocations = [] - - for port in ports: - if service_ports_re.match(port.name): - continue - - addrs = { - str(fixed.ip_address): subnets_dict[fixed.subnet_id].enable_dhcp - for fixed in port.fixed_ips - } - - if not addrs: - continue - - allocations.append( - { - 'ip_addresses': addrs, - 'device_id': port.device_id, - 'hostname': '%s.local' % r.sub('-', sorted(addrs.keys())[0]), - 'mac_address': port.mac_address - } - ) - - return allocations diff --git a/astara/api/config/loadbalancer.py b/astara/api/config/loadbalancer.py deleted file mode 100644 index 95df2a93..00000000 --- a/astara/api/config/loadbalancer.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging - -from astara.api.config import common - -LOG = logging.getLogger(__name__) - - -def build_config(client, loadbalancer, management_port, iface_map): - LOG.debug('Generating configuration for loadbalancer %s', loadbalancer.id) - - network_config = [ - common.network_config( - client, - loadbalancer.vip_port, - iface_map[loadbalancer.vip_port.network_id], - 'loadbalancer'), - - common.network_config( - client, - management_port, - iface_map[management_port.network_id], - 'management'), - ] - - out = { - 'hostname': 'ak-loadbalancer-%s' % loadbalancer.tenant_id, - 'tenant_id': loadbalancer.tenant_id, - 'networks': network_config, - 'services': { - 'loadbalancer': loadbalancer.to_dict() - } - } - return out diff --git a/astara/api/config/router.py b/astara/api/config/router.py deleted file mode 100644 index 95c3eb14..00000000 --- a/astara/api/config/router.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import netaddr - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils - -from astara.common.i18n import _LI, _LW -from astara.api.config import common - -LOG = logging.getLogger(__name__) - -DEFAULT_AS = 64512 - -OPTIONS = [ - cfg.StrOpt('provider_rules_path', - default='/etc/astara/provider_rules.json'), - cfg.IntOpt('asn', default=DEFAULT_AS), - cfg.IntOpt('neighbor_asn', default=DEFAULT_AS), -] - -cfg.CONF.register_opts(OPTIONS) - -EXTERNAL_NET = 'external' -INTERNAL_NET = 'internal' -MANAGEMENT_NET = 'management' -SERVICE_STATIC = 'static' -SERVICE_DHCP = 'dhcp' -SERVICE_RA = 'ra' - - -def build_config(worker_context, router, management_port, interfaces): - provider_rules = load_provider_rules(cfg.CONF.provider_rules_path) - - networks = generate_network_config( - worker_context.neutron, - router, - management_port, - interfaces - ) - gateway = get_default_v4_gateway( - worker_context.neutron, router, networks) - - return { - 'asn': cfg.CONF.asn, - 'neighbor_asn': cfg.CONF.neighbor_asn, - 'default_v4_gateway': gateway, - 'networks': networks, - 'labels': provider_rules.get('labels', {}), - 'floating_ips': generate_floating_config(router), - 'tenant_id': router.tenant_id, - 'hostname': 'ak-%s' % router.tenant_id, - 'orchestrator': worker_context.config, - 'ha_resource': router.ha, - 'vpn': generate_vpn_config(router, worker_context.neutron), - } - - -def get_default_v4_gateway(client, router, networks): - """Find the IPv4 default gateway for the router. - """ - LOG.debug('networks = %r', networks) - if router.external_port: - LOG.debug('external interface = %s', router.external_port.mac_address) - - # Now find the subnet that our external IP is on, and return its - # gateway. - for n in networks: - if n['network_type'] == EXTERNAL_NET: - v4_addresses = [ - addr - for addr in (netaddr.IPAddress(ip.partition('/')[0]) - for ip in n['interface']['addresses']) - if addr.version == 4 - ] - for s in n['subnets']: - subnet = netaddr.IPNetwork(s['cidr']) - if subnet.version != 4: - continue - LOG.debug( - '%s: checking if subnet %s should have the default route', - router.id, s['cidr']) - for addr in v4_addresses: - if addr in subnet: - LOG.debug( - '%s: found gateway %s for subnet %s on network %s', - router.id, - s['gateway_ip'], - s['cidr'], - n['network_id'], - ) - return s['gateway_ip'] - - # Sometimes we are asked to build a configuration for the server - # when the external interface is still marked as "down". We can - # report that case, but we don't treat it as an error here because - # we'll be asked to do it again when the interface comes up. - LOG.info(_LI('%s: no default gateway was found'), router.id) - return '' - - -def load_provider_rules(path): - try: - return jsonutils.load(open(path)) - except: # pragma nocover - LOG.warning(_LW('unable to open provider rules: %s'), path) - return {} - - -def generate_network_config(client, router, management_port, iface_map): - retval = [ - common.network_config( - client, - management_port, - iface_map[management_port.network_id], - MANAGEMENT_NET - ) - ] - - if router.external_port: - retval.extend([ - common.network_config( - client, - router.external_port, - iface_map[router.external_port.network_id], - EXTERNAL_NET)]) - - retval.extend( - common.network_config( - client, - p, - iface_map[p.network_id], - INTERNAL_NET, - client.get_network_ports(p.network_id)) - for p in router.internal_ports) - - return retval - - -def generate_floating_config(router): - return [ - {'floating_ip': str(fip.floating_ip), 'fixed_ip': str(fip.fixed_ip)} - for fip in router.floating_ips - ] - - -def generate_vpn_config(router, client): - if not cfg.CONF.router.ipsec_vpn: - return {} - - return { - 'ipsec': [ - v.to_dict() for v in client.get_vpnservices_for_router(router.id) - ] - } diff --git a/astara/api/keystone.py b/astara/api/keystone.py deleted file mode 100644 index ec05e9d7..00000000 --- a/astara/api/keystone.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) 2015 Akanda, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from keystoneclient import auth as ksauth -from keystoneclient import session as kssession - -from oslo_config import cfg - - -CONF = cfg.CONF - - -class KeystoneSession(object): - def __init__(self): - self._session = None - self.region_name = CONF.auth_region - ksauth.register_conf_options(CONF, 'keystone_authtoken') - - @property - def session(self): - if not self._session: - # Construct a Keystone session for configured auth_plugin - # and credentials - auth_plugin = ksauth.load_from_conf_options( - cfg.CONF, 'keystone_authtoken') - self._session = kssession.Session(auth=auth_plugin) - return self._session diff --git a/astara/api/neutron.py b/astara/api/neutron.py deleted file mode 100644 index 4c5683d5..00000000 --- a/astara/api/neutron.py +++ /dev/null @@ -1,1269 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -from datetime import datetime -import itertools -import socket -import time -import uuid - -import netaddr - -from neutronclient.v2_0 import client -from neutronclient.common import exceptions as neutron_exc - -from oslo_config import cfg -from oslo_context import context -from oslo_log import log as logging -from oslo_utils import importutils - -from astara.common.i18n import _, _LI, _LW -from astara.common.linux import ip_lib -from astara.api import keystone -from astara.common import constants, rpc - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - - -neutron_opts = [ - cfg.StrOpt('management_network_id'), - cfg.StrOpt('management_subnet_id'), - cfg.StrOpt('management_prefix', default='fdca:3ba5:a17a:acda::/64'), - cfg.IntOpt('astara_mgt_service_port', default=5000), - cfg.StrOpt('default_instance_flavor', default=1), - cfg.StrOpt('interface_driver', - default='astara.common.linux.interface.OVSInterfaceDriver'), - cfg.BoolOpt('neutron_port_security_extension_enabled', default=True), - - # legacy_fallback option is deprecated and will be removed in the N-release - cfg.BoolOpt('legacy_fallback_mode', default=True, - help=_('Check for resources using the Liberty naming scheme ' - 'when the modern name does not exist.')) -] - -agent_opts = [ - cfg.BoolOpt('log_agent_heartbeats', default=False, - help=_('Log agent heartbeats')), - - # The default AZ name "nova" is selected to match the default - # AZ name in Nova and Cinder. - cfg.StrOpt('availability_zone', max_length=255, default='nova', - help=_("Availability zone of this node")), - cfg.IntOpt('report_interval', default=60, - help='seconds between agent reports'), -] - -CONF.register_opts(neutron_opts) -CONF.register_opts(agent_opts, 'AGENT') - - -# copied from Neutron source -DEVICE_OWNER_ROUTER_MGT = "network:router_management" -DEVICE_OWNER_ROUTER_INT = "network:router_interface" -DEVICE_OWNER_ROUTER_HA_INT = "network:ha_router_replicated_interface" -DEVICE_OWNER_ROUTER_GW = "network:router_gateway" -DEVICE_OWNER_FLOATINGIP = "network:floatingip" -DEVICE_OWNER_RUG = "network:astara" - -PLUGIN_ROUTER_RPC_TOPIC = 'q-l3-plugin' -L3_AGENT_REPORT_TOPIC = 'q-reports-plugin' -L3_AGENT_UPDATE_TOPIC = 'l3_agent' -L3_AGENT_MODE = 'legacy' -AGENT_TYPE_L3 = 'L3 agent' -ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%f' - -STATUS_ACTIVE = 'ACTIVE' -STATUS_BUILD = 'BUILD' -STATUS_DOWN = 'DOWN' -STATUS_ERROR = 'ERROR' - -# Service operation status constants -# Copied from neutron.plugings.common.constants.py -# prefaced here with PLUGIN_ -PLUGIN_ACTIVE = "ACTIVE" -PLUGIN_DOWN = "DOWN" -PLUGIN_CREATED = "CREATED" -PLUGIN_PENDING_CREATE = "PENDING_CREATE" -PLUGIN_PENDING_UPDATE = "PENDING_UPDATE" -PLUGIN_PENDING_DELETE = "PENDING_DELETE" -PLUGIN_INACTIVE = "INACTIVE" -PLUGIN_ERROR = "ERROR" - -# XXX not sure these are needed? -ACTIVE_PENDING_STATUSES = ( - PLUGIN_ACTIVE, - PLUGIN_PENDING_CREATE, - PLUGIN_PENDING_UPDATE -) - - -class RouterGone(Exception): - pass - - -class LoadBalancerGone(Exception): - pass - - -class RouterGatewayMissing(Exception): - pass - - -class MissingIPAllocation(Exception): - - def __init__(self, port_id, missing=None): - self.port_id = port_id - self.missing = missing - msg = 'Port %s missing expected IPs ' % port_id - if missing: - ip_msg = ' and '.join( - ('IPv%s address from one of %s' % - (mv, missing_subnets)) - for mv, missing_subnets in missing - ) - msg = msg + ip_msg - super(MissingIPAllocation, self).__init__(msg) - - -class ItemCache(collections.defaultdict): - def __missing__(self, key): - if self.default_factory is None: - raise KeyError(key) - else: - ret = self[key] = self.default_factory(key) - return ret - - -class DictModelBase(object): - DICT_ATTRS = () - - def __repr__(self): - return '<%s (%s:%s)>' % (self.__class__.__name__, - getattr(self, 'name', ''), - getattr(self, 'tenant_id', '')) - - def __eq__(self, other): - return type(self) == type(other) and vars(self) == vars(other) - - def __ne__(self, other): - return not self.__eq__(other) - - def to_dict(self): - """Serialize the object into a dict, handy for building config""" - d = {} - for attr in self.DICT_ATTRS: - val = getattr(self, attr) - if isinstance(val, list): - # this'll eventually break something and you can find this - # comment and hurt me. - serialized = [] - for v in val: - if hasattr(v, 'to_dict'): - serialized.append(v.to_dict()) - elif isinstance(v, netaddr.IPNetwork): - serialized.append(str(v)) - else: - serialized.append(v) - val = serialized - elif hasattr(val, 'to_dict'): - val = val.to_dict() - elif isinstance(val, (netaddr.IPAddress, netaddr.IPNetwork)): - val = str(val) - d[attr] = val - return d - - -class Router(object): - def __init__(self, id_, tenant_id, name, admin_state_up, status, - external_port=None, internal_ports=None, floating_ips=None, - ha=False): - self.id = id_ - self.tenant_id = tenant_id - self.name = name - self.admin_state_up = admin_state_up - self.status = status - self.external_port = external_port - self.internal_ports = internal_ports or [] - self.floating_ips = floating_ips or [] - self.ha = ha - - def __repr__(self): - return '<%s (%s:%s)>' % (self.__class__.__name__, - self.name, - self.tenant_id) - - def __eq__(self, other): - return type(self) == type(other) and vars(self) == vars(other) - - def __ne__(self, other): - return not self.__eq__(other) - - @classmethod - def from_dict(cls, d): - external_port = None - internal_ports = [] - - if d.get('gw_port'): - external_port = Port.from_dict(d.get('gw_port')) - - for port_dict in d.get('_interfaces', []): - port = Port.from_dict(port_dict) - int_owners = [DEVICE_OWNER_ROUTER_INT, DEVICE_OWNER_ROUTER_HA_INT] - if port.device_owner in int_owners: - internal_ports.append(port) - - fips = [FloatingIP.from_dict(fip) for fip in d.get('_floatingips', [])] - - ha = d.get('ha', False) - - return cls( - d['id'], - d['tenant_id'], - d['name'], - d['admin_state_up'], - d['status'], - external_port, - internal_ports, - floating_ips=fips, - ha=ha, - ) - - @property - def ports(self): - if self.external_port: - return itertools.chain( - [self.external_port], - self.internal_ports - ) - else: - return self.internal_ports - - -class Network(DictModelBase): - DICT_ATTRS = ('id', 'name', 'tenant_id', 'status', 'shared', - 'admin_state_up', 'mtu', 'port_security_enabled') - - def __init__(self, id_, name, tenant_id, status, shared, admin_state_up, - mtu=None, port_security_enabled=False, subnets=()): - self.id = id_ - self.name = name - self.tenant_id = tenant_id - self.shared = shared - self.admin_state_up = admin_state_up - self.mtu = mtu - self.port_security_enabled = port_security_enabled - self.subnets = subnets - - @classmethod - def from_dict(cls, d): - optional = {} - - for opt in ['mtu', 'port_security_enabled']: - if opt in d: - optional[opt] = d[opt] - - return cls( - d['id'], - d['name'], - d['tenant_id'], - d['status'], - d['shared'], - d['admin_state_up'], - **optional - ) - - -class Subnet(DictModelBase): - DICT_ATTRS = ('id', 'name', 'tenant_id', 'network_id', 'ip_version', - 'cidr', 'gateway_ip', 'enable_dhcp', 'dns_nameservers', - 'host_routes', 'ipv6_ra_mode') - - def __init__(self, id_, name, tenant_id, network_id, ip_version, cidr, - gateway_ip, enable_dhcp, dns_nameservers, host_routes, - ipv6_ra_mode): - self.id = id_ - self.name = name - self.tenant_id = tenant_id - self.network_id = network_id - self.ip_version = ip_version - try: - self.cidr = netaddr.IPNetwork(cidr) - except (TypeError, netaddr.AddrFormatError) as e: - raise ValueError( - _('Invalid CIDR %r for subnet %s of network %s: %s') % ( - cidr, id_, network_id, e, - ) - ) - try: - self.gateway_ip = netaddr.IPAddress(gateway_ip) - except (TypeError, netaddr.AddrFormatError) as e: - self.gateway_ip = None - LOG.info(_LI( - 'Bad gateway_ip on subnet %s: %r (%s)'), - id_, gateway_ip, e) - self.enable_dhcp = enable_dhcp - self.dns_nameservers = dns_nameservers - self.host_routes = host_routes - self.ipv6_ra_mode = ipv6_ra_mode - - @classmethod - def from_dict(cls, d): - return cls( - d['id'], - d['name'], - d['tenant_id'], - d['network_id'], - d['ip_version'], - d['cidr'], - d['gateway_ip'], - d['enable_dhcp'], - d['dns_nameservers'], - d['host_routes'], - d['ipv6_ra_mode']) - - -class Port(DictModelBase): - DICT_ATTRS = ('id', 'device_id', 'fixed_ips', 'mac_address', 'network_id', - 'device_owner', 'name') - - def __init__(self, id_, device_id='', fixed_ips=None, mac_address='', - network_id='', device_owner='', name='', - neutron_port_dict=None): - self.id = id_ - self.device_id = device_id - self.fixed_ips = fixed_ips or [] - self.mac_address = mac_address - self.network_id = network_id - self.device_owner = device_owner - self.name = name - - # Unlike instance ports, management ports are created at boot and - # could be created on the Pez side. We need to pass that info - # back to Rug via RPC so hang on to the original port data for - # easier serialization, allowing Rug to re-create (via from_dict). - # without another neutron call. - self._neutron_port_dict = neutron_port_dict or {} - - def __eq__(self, other): - return type(self) == type(other) and vars(self) == vars(other) - - def __ne__(self, other): - return not self.__eq__(other) - - @property - def first_v4(self): - for fixed_ip in self.fixed_ips: - ip = netaddr.IPAddress(fixed_ip.ip_address) - if ip.version == 4: - return str(ip) - return None - - @classmethod - def from_dict(cls, d): - return cls( - d['id'], - d['device_id'], - fixed_ips=[FixedIp.from_dict(fip) for fip in d['fixed_ips']], - mac_address=d['mac_address'], - network_id=d['network_id'], - device_owner=d['device_owner'], - name=d['name'], - neutron_port_dict=d) - - def to_dict(self): - return self._neutron_port_dict - - -class FixedIp(DictModelBase): - DICT_ATTRS = ('subnet_id', 'ip_address') - - def __init__(self, subnet_id, ip_address): - self.subnet_id = subnet_id - self.ip_address = netaddr.IPAddress(ip_address) - - def __eq__(self, other): - return type(self) == type(other) and vars(self) == vars(other) - - def __ne__(self, other): - return not self.__eq__(other) - - @classmethod - def from_dict(cls, d): - return cls(d['subnet_id'], d['ip_address']) - - -class FloatingIP(object): - def __init__(self, id_, floating_ip, fixed_ip): - self.id = id_ - self.floating_ip = netaddr.IPAddress(floating_ip) - self.fixed_ip = netaddr.IPAddress(fixed_ip) - - @classmethod - def from_dict(cls, d): - return cls( - d['id'], - d['floating_ip_address'], - d['fixed_ip_address'] - ) - - -class LoadBalancer(DictModelBase): - DICT_ATTRS = ('id', 'tenant_id', 'name', 'admin_state_up', 'status', - 'listeners', 'vip_address', 'vip_port') - - def __init__(self, id_, tenant_id, name, admin_state_up, status, - vip_address=None, vip_port=None, listeners=()): - self.id = id_ - self.tenant_id = tenant_id - self.name = name - self.admin_state_up = admin_state_up - self.status = status - self.vip_address = vip_address - self.vip_port = vip_port - self.listeners = listeners - - @property - def ports(self): - if self.vip_port: - return [self.vip_port] - else: - return [] - - @classmethod - def from_dict(cls, d): - if d.get('vip_port'): - vip_port = Port.from_dict(d.get('vip_port')) - vip_address = d['vip_address'] - else: - vip_port = None - vip_address = None - return cls( - d['id'], - d['tenant_id'], - d['name'], - d['admin_state_up'], - d['provisioning_status'], - vip_address, - vip_port, - [Listener.from_dict(l) for l in d.get('listeners')], - ) - - -class Listener(DictModelBase): - DICT_ATTRS = ('id', 'tenant_id', 'name', 'admin_state_up', 'protocol', - 'protocol_port', 'default_pool') - - def __init__(self, id_, tenant_id, name, admin_state_up, protocol, - protocol_port, default_pool=None): - self.id = id_ - self.tenant_id = tenant_id - self.name = name - self.admin_state_up = admin_state_up - self.protocol = protocol - self.protocol_port = protocol_port - self.default_pool = default_pool - - @classmethod - def from_dict(cls, d): - # NOTE: we may be constructing a loadbalancer without the full - # details during pre-populate. To avoid having to do more neutron - # calls to find the additional data, support instantiation without - # full details. - return cls( - d['id'], - d.get('tenant_id'), - d.get('name'), - d.get('admin_state_up'), - d.get('protocol'), - d.get('protocol_port'), - ) - - -class Pool(DictModelBase): - DICT_ATTRS = ( - 'id', 'tenant_id', 'name', 'admin_state_up', 'lb_algorithm', - 'protocol', 'healthmonitor', 'session_persistence', 'members' - ) - - def __init__(self, id_, tenant_id, name, admin_state_up, lb_algorithm, - protocol, healthmonitor=None, session_persistence=None, - members=()): - self.id = id_ - self.tenant_id = tenant_id - self.name = name - self.admin_state_up = admin_state_up - self.lb_algorithm = lb_algorithm - self.protocol = protocol - self.healthmonitor = healthmonitor - self.session_persistence = session_persistence - self.members = members - - @classmethod - def from_dict(cls, d): - return cls( - d['id'], - d['tenant_id'], - d['name'], - d['admin_state_up'], - d['lb_algorithm'], - d['protocol'], - ) - - -class Member(DictModelBase): - DICT_ATTRS = ('id', 'tenant_id', 'admin_state_up', 'address', - 'protocol_port', 'weight', 'subnet') - - def __init__(self, id_, tenant_id, admin_state_up, address, protocol_port, - weight, subnet=None): - self.id = id_ - self.tenant_id = tenant_id - self.admin_state_up = admin_state_up - self.address = netaddr.IPAddress(address) - self.protocol_port = protocol_port - self.weight = weight - self.subnet = subnet - - @classmethod - def from_dict(cls, d): - return cls( - d['id'], - d['tenant_id'], - d['admin_state_up'], - d['address'], - d['protocol_port'], - d['weight'], - ) - - -class DeadPeerDetection(DictModelBase): - DICT_ATTRS = ('action', 'interval', 'timeout') - - def __init__(self, action, interval, timeout): - self.action = action - self.interval = interval - self.timeout = timeout - - @classmethod - def from_dict(cls, d): - return cls( - d['action'], - d['interval'], - d['timeout'] - ) - - -class Lifetime(DictModelBase): - DICT_ATTRS = ('units', 'value') - - def __init__(self, units, value): - self.units = units - self.value = value - - @classmethod - def from_dict(cls, d): - return cls( - d['units'], - d['value'] - ) - - -class EndpointGroup(DictModelBase): - DICT_ATTRS = ('id', 'tenant_id', 'name', 'type', 'endpoints') - - def __init__(self, id_, tenant_id, name, type_, endpoints=()): - self.id = id_ - self.tenant_id = tenant_id - self.name = name - self.type = type_ - if type_ == 'cidr': - self.endpoints = [netaddr.IPNetwork(ep) for ep in endpoints] - else: - self.endpoints = endpoints - - @classmethod - def from_dict(cls, d): - return cls( - d['id'], - d['tenant_id'], - d['name'], - d['type'], - d['endpoints'] - ) - - -class IkePolicy(DictModelBase): - DICT_ATTRS = ('id', 'tenant_id', 'name', 'ike_version', 'auth_algorithm', - 'encryption_algorithm', 'pfs', 'lifetime', - 'phase1_negotiation_mode') - - def __init__(self, id_, tenant_id, name, ike_version, auth_algorithm, - encryption_algorithm, pfs, phase1_negotiation_mode, lifetime): - self.id = id_ - self.tenant_id = tenant_id - self.name = name - self.ike_version = ike_version - self.auth_algorithm = auth_algorithm - self.encryption_algorithm = encryption_algorithm - self.pfs = pfs - self.phase1_negotiation_mode = phase1_negotiation_mode - self.lifetime = lifetime - - @classmethod - def from_dict(cls, d): - return cls( - d['id'], - d['tenant_id'], - d['name'], - d['ike_version'], - d['auth_algorithm'], - d['encryption_algorithm'], - d['pfs'], - d['phase1_negotiation_mode'], - Lifetime.from_dict(d['lifetime']) - ) - - -class IpsecPolicy(DictModelBase): - DICT_ATTRS = ('id', 'tenant_id', 'name', 'transform_protocol', - 'auth_algorithm', 'encryption_algorithm', - 'encapsulation_mode', 'lifetime', 'pfs') - - def __init__(self, id_, tenant_id, name, transform_protocol, - auth_algorithm, encryption_algorithm, encapsulation_mode, - lifetime, pfs): - self.id = id_ - self.tenant_id = tenant_id - self.name = name - self.transform_protocol = transform_protocol - self.auth_algorithm = auth_algorithm - self.encryption_algorithm = encryption_algorithm - self.encapsulation_mode = encapsulation_mode - self.lifetime = lifetime - self.pfs = pfs - - @classmethod - def from_dict(cls, d): - return cls( - d['id'], - d['tenant_id'], - d['name'], - d['transform_protocol'], - d['auth_algorithm'], - d['encryption_algorithm'], - d['encapsulation_mode'], - Lifetime.from_dict(d['lifetime']), - d['pfs'] - ) - - -class IpsecSiteConnection(DictModelBase): - DICT_ATTRS = ('id', 'tenant_id', 'name', 'peer_address', 'peer_id', - 'route_mode', 'mtu', 'initiator', 'auth_mode', 'psk', 'dpd', - 'status', 'admin_state_up', 'vpnservice_id', - 'local_ep_group', 'peer_ep_group', 'peer_cidrs', 'ikepolicy', - 'ipsecpolicy') - - def __init__(self, id_, tenant_id, name, peer_address, peer_id, - admin_state_up, route_mode, mtu, initiator, auth_mode, psk, - dpd, status, vpnservice_id, local_ep_group=None, - peer_ep_group=None, peer_cidrs=[], ikepolicy=None, - ipsecpolicy=None): - self.id = id_ - self.tenant_id = tenant_id - self.name = name - self.peer_address = netaddr.IPAddress(peer_address) - self.peer_id = peer_id - self.route_mode = route_mode - self.mtu = mtu - self.initiator = initiator - self.auth_mode = auth_mode - self.psk = psk - self.dpd = dpd - self.status = status - self.admin_state_up = admin_state_up - self.vpnservice_id = vpnservice_id - self.ipsecpolicy = ipsecpolicy - self.ikepolicy = ikepolicy - self.local_ep_group = local_ep_group - self.peer_ep_group = peer_ep_group - self.peer_cidrs = [netaddr.IPNetwork(pc) for pc in peer_cidrs] - - @classmethod - def from_dict(cls, d): - return cls( - d['id'], - d['tenant_id'], - d['name'], - d['peer_address'], - d['peer_id'], - d['admin_state_up'], - d['route_mode'], - d['mtu'], - d['initiator'], - d['auth_mode'], - d['psk'], - DeadPeerDetection.from_dict(d['dpd']), - d['status'], - d['vpnservice_id'], - peer_cidrs=d['peer_cidrs'] - ) - - -class VpnService(DictModelBase): - DICT_ATTRS = ('id', 'name', 'status', 'admin_state_up', 'external_v4_ip', - 'external_v6_ip', 'subnet_id', 'router_id', - 'ipsec_connections') - - def __init__(self, id_, name, status, admin_state_up, external_v4_ip, - external_v6_ip, router_id, subnet_id=None, - ipsec_connections=()): - self.id = id_ - self.name = name - self.status = status - self.admin_state_up = admin_state_up - self.external_v4_ip = netaddr.IPAddress(external_v4_ip) - self.external_v6_ip = netaddr.IPAddress(external_v6_ip) - self.router_id = router_id - self.subnet_id = subnet_id - self.ipsec_connections = ipsec_connections - - @classmethod - def from_dict(cls, d): - return cls( - d['id'], - d['name'], - d['status'], - d['admin_state_up'], - d['external_v4_ip'], - d['external_v6_ip'], - d['router_id'], - d.get('subnet_id') - ) - - -class AstaraExtClientWrapper(client.Client): - """Add client support for Astara Extensions. """ - - routerstatus_path = '/dhrouterstatus' - lbstatus_path = '/akloadbalancerstatus' - - def update_router_status(self, router, status): - return self.put( - '%s/%s' % (self.routerstatus_path, router), - body={'routerstatus': {'status': status}} - ) - - def update_loadbalancer_status(self, load_balancer, status): - return self.put( - '%s/%s' % (self.lbstatus_path, load_balancer), - # XXX We should be differentiating between these 2 states - body={ - 'loadbalancerstatus': { - 'provisioning_status': status, - 'operating_status': status, - } - } - ) - - def list_byonfs(self, retrieve_all=True, **_params): - return self.list('byonfs', '/byonf', retrieve_all, **_params).get( - 'byonfs', - [] - ) - - -class L3PluginApi(object): - - """Agent side of the Quantum l3 agent RPC API.""" - - BASE_RPC_API_VERSION = '1.0' - - def __init__(self, topic, host): - self.host = host - self._client = rpc.get_rpc_client( - topic=topic, - exchange=cfg.CONF.neutron_control_exchange, - version=self.BASE_RPC_API_VERSION) - - def get_routers(self, router_id=None): - """Make a remote process call to retrieve the sync data for routers.""" - router_id = [router_id] if router_id else None - # yes the plural is intended for havana compliance - retval = self._client.call( - context.get_admin_context().to_dict(), - 'sync_routers', host=self.host, router_ids=router_id) # plural - return retval - - -class Neutron(object): - def __init__(self, conf): - self.conf = conf - ks_session = keystone.KeystoneSession() - self.api_client = AstaraExtClientWrapper( - session=ks_session.session, - endpoint_type=cfg.CONF.endpoint_type, - ) - self.l3_rpc_client = L3PluginApi(PLUGIN_ROUTER_RPC_TOPIC, - cfg.CONF.host) - - def update_loadbalancer_status(self, loadbalancer_id, status): - try: - self.api_client.update_loadbalancer_status(loadbalancer_id, status) - except Exception as e: - # We don't want to die just because we can't tell neutron - # what the status of the router should be. Log the error - # but otherwise ignore it. - LOG.info(_LI( - 'ignoring failure to update status for %s to %s: %s'), - id, status, e, - ) - - def get_loadbalancers(self, tenant_id=None): - if tenant_id: - res = self.api_client.list_loadbalancers(tenant_id=tenant_id) - else: - res = self.api_client.list_loadbalancers() - return [ - LoadBalancer.from_dict(lb_data) for lb_data in - res.get('loadbalancers', []) - ] - - def get_loadbalancer_detail(self, lb_id): - try: - lb_data = self.api_client.show_loadbalancer(lb_id)['loadbalancer'] - except neutron_exc.NotFound: - raise LoadBalancerGone( - 'No load balancer with id %s found.' % lb_id) - - lb = LoadBalancer.from_dict(lb_data) - - lb.vip_port = Port.from_dict( - self.api_client.show_port(lb_data['vip_port_id'])['port'] - ) - lb.vip_address = lb_data.get('vip_address') - lb.listeners = [ - self.get_listener_detail(l['id']) for l in lb_data['listeners'] - ] - - return lb - - def get_listener_detail(self, listener_id): - data = self.api_client.show_listener(listener_id)['listener'] - listener = Listener.from_dict(data) - if data.get('default_pool_id'): - listener.default_pool = self.\ - get_pool_detail(data['default_pool_id']) - return listener - - def get_pool_detail(self, pool_id): - data = self.api_client.show_lbaas_pool(pool_id)['pool'] - pool = Pool.from_dict(data) - if data.get('members'): - pool.members = [self.get_member_detail(pool_id, m['id']) - for m in data['members']] - return pool - - def get_loadbalancer_by_listener(self, listener_id, tenant_id=None): - for lb in self.get_loadbalancers(tenant_id): - lbd = self.get_loadbalancer_detail(lb.id) - if listener_id in [l.id for l in lbd.listeners]: - return lbd - - def get_loadbalancer_by_member(self, member_id, tenant_id=None): - for lb in self.get_loadbalancers(tenant_id): - lbd = self.get_loadbalancer_detail(lb.id) - for listener in lbd.listeners: - pd = self.get_pool_detail(listener.default_pool.id) - if member_id in [m.id for m in pd.members]: - return lbd - - def get_member_detail(self, pool_id, member_id): - data = self.api_client.show_lbaas_member(member_id, pool_id)['member'] - member = Member.from_dict(data) - return member - - def get_vpnservices_for_router(self, router_id): - response = self.api_client.list_vpnservices(router_id=router_id) - retval = [] - for vpn_svc in response.get('vpnservices', []): - svc = VpnService.from_dict(vpn_svc) - svc.ipsec_connections = self.get_ipsec_connections_for_vpnservice( - svc.id - ) - retval.append(svc) - - return retval - - def get_ipsec_connections_for_vpnservice(self, vpnservice_id): - retval = [] - response = self.api_client.list_ipsec_site_connections( - vpnservice_id=vpnservice_id - ) - - # these items could be used more than once per router, so cache - # response while building this object - ikepolicy_cache = ItemCache(self.get_ikepolicy) - ipsecpolicy_cache = ItemCache(self.get_ipsecpolicy) - ep_cache = ItemCache(self.get_vpn_endpoint_group) - - for ipsec_conn in response.get('ipsec_site_connections', []): - conn = IpsecSiteConnection.from_dict(ipsec_conn) - conn.ipsecpolicy = ipsecpolicy_cache[ipsec_conn['ipsecpolicy_id']] - conn.ikepolicy = ikepolicy_cache[ipsec_conn['ikepolicy_id']] - conn.local_ep_group = ep_cache[ipsec_conn['local_ep_group_id']] - conn.peer_ep_group = ep_cache[ipsec_conn['peer_ep_group_id']] - retval.append(conn) - - return retval - - def get_ikepolicy(self, ikepolicy_id): - return IkePolicy.from_dict( - self.api_client.show_ikepolicy(ikepolicy_id)['ikepolicy'] - ) - - def get_ipsecpolicy(self, ipsecpolicy_id): - return IpsecPolicy.from_dict( - self.api_client.show_ipsecpolicy(ipsecpolicy_id)['ipsecpolicy'] - ) - - def get_vpn_endpoint_group(self, ep_group_id): - return EndpointGroup.from_dict( - self.api_client.show_endpoint_group(ep_group_id)['endpoint_group'] - ) - - def get_routers(self, detailed=True): - """Return a list of routers.""" - if detailed: - return [Router.from_dict(r) for r in - self.l3_rpc_client.get_routers()] - - routers = self.api_client.list_routers().get('routers', []) - return [Router.from_dict(r) for r in routers] - - def get_router_detail(self, router_id): - """Return detailed information about a router and it's networks.""" - router = self.l3_rpc_client.get_routers(router_id=router_id) - try: - return Router.from_dict(router[0]) - except IndexError: - raise RouterGone(_('the router is no longer available')) - - def get_router_for_tenant(self, tenant_id): - response = self.api_client.list_routers(tenant_id=tenant_id) - routers = response.get('routers', []) - - if routers: - return self.get_router_detail(routers[0]['id']) - else: - LOG.debug('found no router for tenant %s', tenant_id) - LOG.debug('query response: %r', response) - return None - - def get_network_ports(self, network_id): - return [Port.from_dict(p) for p in - self.api_client.list_ports(network_id=network_id)['ports']] - - def get_network_subnets(self, network_id): - response = [] - subnet_response = self.api_client.list_subnets(network_id=network_id) - subnets = subnet_response['subnets'] - for s in subnets: - try: - response.append(Subnet.from_dict(s)) - except Exception as e: - LOG.info(_LI('ignoring subnet %s (%s) on network %s: %s'), - s.get('id'), s.get('cidr'), - network_id, e) - return response - - def get_network_detail(self, network_id): - network_response = self.api_client.show_network(network_id)['network'] - network = Network.from_dict(network_response) - network.subnets = self.get_network_subnets(network_id) - - return network - - def get_ports_for_instance(self, instance_id): - ports = self.api_client.list_ports(device_id=instance_id)['ports'] - - mgt_port = None - intf_ports = [] - - for port in (Port.from_dict(p) for p in ports): - if port.network_id == self.conf.management_network_id: - mgt_port = port - else: - intf_ports.append(port) - return mgt_port, intf_ports - - def create_management_port(self, object_id): - return self.create_vrrp_port( - object_id, - self.conf.management_network_id, - 'MGT' - ) - - def create_vrrp_port(self, object_id, network_id, label='VRRP'): - port_dict = dict( - admin_state_up=True, - network_id=network_id, - name='ASTARA:%s:%s' % (label, object_id), - security_groups=[] - ) - - dis_port_types = ( - constants.ASTARA_SERVICE_PORT_TYPES + - constants.ASTARA_MGT_PORT_TYPES - ) - if label in dis_port_types: - port_dict['fixed_ips'] = [] - # disable port_securty on VRRP, LB, MGT - if self.conf.neutron_port_security_extension_enabled: - port_dict['port_security_enabled'] = False - - response = self.api_client.create_port(dict(port=port_dict)) - port_data = response.get('port') - if not port_data: - raise ValueError(_( - 'Unable to create %s port for %s on network %s') % - (label, object_id, network_id) - ) - port = Port.from_dict(port_data) - - return port - - def delete_vrrp_port(self, object_id, label='VRRP'): - name = 'ASTARA:%s:%s' % (label, object_id) - response = self.api_client.list_ports(name=name) - port_data = response.get('ports') - - if not port_data and self.conf.legacy_fallback_mode: - name = name.replace('ASTARA', 'AKANDA') - LOG.info(_LI('Attempting legacy query for %s.'), name) - response = self.api_client.list_ports(name=name) - port_data = response.get('ports') - - if not port_data: - LOG.warning(_LW( - 'Unable to find VRRP port to delete with name %s.'), name) - for port in port_data: - self.api_client.delete_port(port['id']) - - def _ensure_local_port(self, network_id, subnet_id, prefix, - network_type): - driver = importutils.import_object(self.conf.interface_driver, - self.conf) - - host_id = str(uuid.uuid5(uuid.NAMESPACE_DNS, socket.gethostname())) - - name = 'ASTARA:RUG:%s' % network_type.upper() - - query_dict = dict(device_owner=DEVICE_OWNER_RUG, - device_id=host_id, - name=name, - network_id=network_id) - - ports = self.api_client.list_ports(**query_dict)['ports'] - - if not ports and self.conf.legacy_fallback_mode: - LOG.info(_LI('Attempting legacy query for %s.'), name) - query_dict.update({ - 'name': name.replace('ASTARA', 'AKANDA'), - 'device_owner': DEVICE_OWNER_RUG.replace('astara', 'akanda') - }) - ports = self.api_client.list_ports(**query_dict)['ports'] - - if ports and 'AKANDA' in ports[0]['name']: - port = Port.from_dict(ports[0]) - LOG.info( - _LI('migrating port to ASTARA for port %r and using local %s'), - port, - network_type - ) - self.api_client.update_port( - port.id, - { - 'port': { - 'name': port.name.replace('AKANDA', 'ASTARA'), - 'device_owner': DEVICE_OWNER_RUG - } - } - ) - elif ports: - port = Port.from_dict(ports[0]) - LOG.info(_LI('already have local %s port, using %r'), - network_type, port) - else: - LOG.info(_LI('creating a new local %s port'), network_type) - port_dict = { - 'admin_state_up': True, - 'network_id': network_id, - 'device_owner': DEVICE_OWNER_ROUTER_INT, # lying here for IP - 'name': name, - 'device_id': host_id, - 'fixed_ips': [{ - 'subnet_id': subnet_id - }], - 'binding:host_id': socket.gethostname() - } - port = Port.from_dict( - self.api_client.create_port(dict(port=port_dict))['port']) - - # remove lie that enabled us pick IP on slaac subnet - self.api_client.update_port( - port.id, - {'port': {'device_owner': DEVICE_OWNER_RUG}} - ) - port.device_owner = DEVICE_OWNER_RUG - - LOG.info(_LI('new local %s port: %r'), network_type, port) - - # create the tap interface if it doesn't already exist - if not ip_lib.device_exists(driver.get_device_name(port)): - driver.plug( - port.network_id, - port.id, - driver.get_device_name(port), - port.mac_address) - - # add sleep to ensure that port is setup before use - time.sleep(1) - - try: - fixed_ip = [fip for fip in port.fixed_ips - if fip.subnet_id == subnet_id][0] - except IndexError: - raise MissingIPAllocation(port.id) - - ip_cidr = '%s/%s' % (fixed_ip.ip_address, prefix.split('/')[1]) - driver.init_l3(driver.get_device_name(port), [ip_cidr]) - return ip_cidr - - def ensure_local_service_port(self): - return self._ensure_local_port( - self.conf.management_network_id, - self.conf.management_subnet_id, - self.conf.management_prefix, - 'service') - - def purge_management_interface(self): - driver = importutils.import_object( - self.conf.interface_driver, - self.conf - ) - host_id = str(uuid.uuid5(uuid.NAMESPACE_DNS, socket.gethostname())) - query_dict = dict( - device_owner=DEVICE_OWNER_RUG, - name='ASTARA:RUG:MANAGEMENT', - device_id=host_id - ) - ports = self.api_client.list_ports(**query_dict)['ports'] - - if not ports and self.conf.legacy_fallback_mode: - query_dict.update({ - 'name': 'AKANDA:RUG:MANAGEMENT', - 'device_owner': DEVICE_OWNER_RUG.replace('astara', 'akanda') - }) - ports = self.api_client.list_ports(**query_dict)['ports'] - - if ports: - port = Port.from_dict(ports[0]) - - device_name = driver.get_device_name(port) - driver.unplug(device_name) - - def update_router_status(self, router_id, status): - try: - self.api_client.update_router_status(router_id, status) - except Exception as e: - # We don't want to die just because we can't tell neutron - # what the status of the router should be. Log the error - # but otherwise ignore it. - LOG.info(_LI( - 'ignoring failure to update status for %s to %s: %s'), - id, status, e, - ) - - def clear_device_id(self, port): - self.api_client.update_port(port.id, {'port': {'device_id': ''}}) - - def tenant_has_byo_for_function(self, tenant_id, function_type): - retval = self.api_client.list_byonfs( - function_type=function_type, - tenant_id=tenant_id - ) - if retval: - LOG.debug( - 'Found BYONF for tenant %s with function %s', - tenant_id, function_type) - return retval[0] - - -class NeutronAgentReporter(object): - def __init__(self): - self.host = CONF.host - self.state = { - 'binary': 'astara-agent', - 'host': self.host, - 'availability_zone': CONF.AGENT.availability_zone, - 'topic': L3_AGENT_UPDATE_TOPIC, - 'configurations': { - 'agent_mode': L3_AGENT_MODE, - 'handle_internal_only_routers': True, - 'external_network_bridge': '', - 'gateway_external_network_id': '', - 'interface_driver': CONF.interface_driver, - 'log_agent_heartbeats': CONF.AGENT.log_agent_heartbeats, - 'routers': 0, # TODO: make this number accurate - 'ex_gw_ports': 0, - 'interfaces': 0, - 'floating_ips': 0 - }, - 'start_flag': True, - 'agent_type': AGENT_TYPE_L3, - } - - self._client = rpc.get_rpc_client( - topic=L3_AGENT_REPORT_TOPIC, - exchange=cfg.CONF.neutron_control_exchange, - version='1.0' - ) - - def report(self): - try: - self.state['uuid'] = str(uuid.uuid4()) - self._client.call( - context.get_admin_context().to_dict(), - 'report_state', - agent_state={'agent_state': self.state}, - time=datetime.utcnow().strftime(ISO8601_TIME_FORMAT) - ) - self.state['start_flag'] = False - except AttributeError: - raise - LOG.info(_LI('State reporting not supported in Neutron Server')) - except: - LOG.exception(_('Error reporting state')) - - def report_forever(self): - period = CONF.AGENT.report_interval - while True: - self.report() - time.sleep(period) - LOG.debug('waking up') diff --git a/astara/api/nova.py b/astara/api/nova.py deleted file mode 100644 index b98c64fc..00000000 --- a/astara/api/nova.py +++ /dev/null @@ -1,453 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from datetime import datetime -import time - -import netaddr -from novaclient import client -from novaclient import exceptions as novaclient_exceptions -from oslo_config import cfg -from oslo_log import log as logging - -from astara.common.i18n import _LW, _LE, _LI -from astara.api import keystone -from astara.api import neutron -from astara.common import config -from astara.pez import rpcapi as pez_api - -LOG = logging.getLogger(__name__) - -OPTIONS = [ - cfg.StrOpt( - 'ssh_public_key', - help="Path to the SSH public key for the 'astara' user within " - "appliance instances", - default='/etc/astara/astara.pub'), - cfg.StrOpt( - 'instance_provider', default='on_demand', - help='Which instance provider to use (on_demand, pez)'), - cfg.StrOpt( - 'astara_boot_command', default='astara-configure-management', - help='The boot command to run to configure the appliance'), -] -cfg.CONF.register_opts(OPTIONS) - - -class NovaInstanceDeleteTimeout(Exception): - pass - - -class InstanceInfo(object): - def __init__(self, instance_id, name, management_port=None, ports=(), - image_uuid=None, status=None, last_boot=None): - self.id_ = instance_id - self.name = name - self.image_uuid = image_uuid - - self.nova_status = status - - self.management_port = management_port - self._ports = ports - self.last_boot = last_boot - - @property - def booting(self): - return 'BUILD' in self.nova_status - - @property - def management_address(self): - if self.management_port: - return str(self.management_port.fixed_ips[0].ip_address) - - @property - def time_since_boot(self): - if self.last_boot: - return datetime.utcnow() - self.last_boot - - @property - def ports(self): - return self._ports - - @ports.setter - def ports(self, port_list): - self._ports = [p for p in port_list if p != self.management_port] - - @classmethod - def from_nova(cls, instance): - """ - Returns an instantiated InstanceInfo object with data gathered from - an existing Nova server. - - :param instance: novaclient.v2.servers.Server object for an existing - nova instance. - :returns: InstanceInfo instance - """ - # NOTE(adam_g): We do not yet actually rebuild any instances. - # A rug REBUILD is actually a delete/create, so it - # should be safe to track last_boot as the timestamp - # the instance was last booted. - last_boot = datetime.strptime( - instance.created, "%Y-%m-%dT%H:%M:%SZ") - return cls( - instance_id=instance.id, - name=instance.name, - image_uuid=instance.image['id'], - status=instance.status, - last_boot=last_boot, - ) - - -class InstanceProvider(object): - def __init__(self, client): - self.nova_client = client - LOG.debug(_LI( - 'Initialized %s with novaclient %s'), - self.__class__.__name__, self.nova_client) - - def create_instance(self, driver, name, image_uuid, flavor, - make_ports_callback): - """Create or get an instance - - :param router_id: UUID of the resource that the instance will host - - :returns: InstanceInfo object with at least id, name and image_uuid - set. - """ - - -class PezInstanceProvider(InstanceProvider): - def __init__(self, client): - super(PezInstanceProvider, self).__init__(client) - self.rpc_client = pez_api.AstaraPezAPI(rpc_topic='astara-pez') - LOG.debug(_LI( - 'Initialized %s with rpc client %s'), - self.__class__.__name__, self.rpc_client) - - def create_instance(self, resource_type, name, image_uuid, flavor, - make_ports_callback): - # TODO(adam_g): pez already creates the mgt port on boot and the one - # we create here is wasted. callback needs to be adjusted - mgt_port, instance_ports = make_ports_callback() - - mgt_port_dict = { - 'id': mgt_port.id, - 'network_id': mgt_port.network_id, - } - instance_ports_dicts = [{ - 'id': p.id, 'network_id': p.network_id, - } for p in instance_ports] - - LOG.debug('Requesting new %s instance from Pez.', resource_type) - pez_instance = self.rpc_client.get_instance( - resource_type, name, mgt_port_dict, instance_ports_dicts) - LOG.debug('Got %s instance %s from Pez.', - resource_type, pez_instance['id']) - - server = self.nova_client.servers.get(pez_instance['id']) - - # deserialize port data - mgt_port = neutron.Port.from_dict(pez_instance['management_port']) - instance_ports = [ - neutron.Port.from_dict(p) - for p in pez_instance['instance_ports']] - - boot_time = datetime.strptime( - server.created, "%Y-%m-%dT%H:%M:%SZ") - instance_info = InstanceInfo( - instance_id=server.id, - name=server.name, - management_port=mgt_port, - ports=instance_ports, - image_uuid=image_uuid, - status=server.status, - last_boot=boot_time) - - return instance_info - - -class OnDemandInstanceProvider(InstanceProvider): - def create_instance(self, resource_type, name, image_uuid, flavor, - make_ports_callback): - mgt_port, instance_ports = make_ports_callback() - - nics = [{'net-id': p.network_id, - 'v4-fixed-ip': '', - 'port-id': p.id} - for p in ([mgt_port] + instance_ports)] - - LOG.debug('creating instance %s with image %s', - name, image_uuid) - - server = self.nova_client.servers.create( - name, - image=image_uuid, - flavor=flavor, - nics=nics, - config_drive=True, - userdata=format_userdata(mgt_port) - ) - - server_status = None - for i in range(1, 10): - try: - # novaclient loads attributes lazily and we need to wait until - # the client object is populated. moving to keystone sessions - # exposes this race. - server_status = server.status - except AttributeError: - time.sleep(.5) - assert server_status - - boot_time = datetime.strptime( - server.created, "%Y-%m-%dT%H:%M:%SZ") - instance_info = InstanceInfo( - instance_id=server.id, - name=name, - management_port=mgt_port, - ports=instance_ports, - image_uuid=image_uuid, - status=server.status, - last_boot=boot_time) - - return instance_info - -INSTANCE_PROVIDERS = { - 'on_demand': OnDemandInstanceProvider, - 'pez': PezInstanceProvider, - 'default': OnDemandInstanceProvider, -} - - -def get_instance_provider(provider): - try: - return INSTANCE_PROVIDERS[provider] - except KeyError: - default = INSTANCE_PROVIDERS['default'] - LOG.error(_LE('Could not find %s instance provider, using default %s'), - provider, default) - return default - - -class Nova(object): - def __init__(self, conf): - self.conf = conf - ks_session = keystone.KeystoneSession() - self.client = client.Client( - version='2', - session=ks_session.session, - region_name=conf.auth_region, - endpoint_type=conf.endpoint_type) - - try: - self.instance_provider = get_instance_provider( - conf.instance_provider)(self.client) - except AttributeError: - default = INSTANCE_PROVIDERS['default'] - LOG.error(_LE('Could not find provider config, using default %s'), - default) - self.instance_provider = default(self.client) - - def get_instances_for_obj(self, name): - """Retrieves all nova servers for a given instance name. - - :param name: name of the instance being queried - - :returns: a list of novaclient.v2.servers.Server objects or [] - """ - search_opt = '^' + name + '.*$' - instances = self.client.servers.list( - search_opts=dict(name=search_opt) - ) - if not instances: - return [] - return [InstanceInfo.from_nova(i) for i in instances] - - def get_instance_for_obj(self, name): - """Retrieves a nova server for a given instance name. - - :param name: name of the instance being queried - - :returns: a novaclient.v2.servers.Server object or None - """ - instances = self.client.servers.list( - search_opts=dict(name=name) - ) - - if instances: - return instances[0] - else: - return None - - def get_instance_by_id(self, instance_id): - """Retrieves a nova server for a given instance_id. - - :param instance_id: Nova instance ID of instance being queried - - :returns: a novaclient.v2.servers.Server object - """ - try: - return self.client.servers.get(instance_id) - except novaclient_exceptions.NotFound: - return None - - def destroy_instance(self, instance_info): - if instance_info: - LOG.debug('deleting instance %s', instance_info.name) - self.client.servers.delete(instance_info.id_) - - def boot_instance(self, - resource_type, - prev_instance_info, - name, - image_uuid, - flavor, - make_ports_callback): - - if not prev_instance_info: - instance = self.get_instance_for_obj(name) - else: - instance = self.get_instance_by_id(prev_instance_info.id_) - - # check to make sure this instance isn't pre-existing - if instance: - if 'BUILD' in instance.status: - if prev_instance_info: - # if we had previous instance, return the same instance - # with updated status - prev_instance_info.nova_status = instance.status - instance_info = prev_instance_info - else: - instance_info = InstanceInfo.from_nova(instance) - return instance_info - - self.client.servers.delete(instance.id) - return None - - # it is now safe to attempt boot - instance_info = self.instance_provider.create_instance( - resource_type=resource_type, - name=name, - image_uuid=image_uuid, - flavor=flavor, - make_ports_callback=make_ports_callback - ) - return instance_info - - def update_instance_info(self, instance_info): - """Used primarily for updating tracked instance status""" - instance = self.get_instance_by_id(instance_info.id_) - if not instance: - return None - instance_info.nova_status = instance.status - return instance_info - - def delete_instances_and_wait(self, instance_infos): - """Deletes the nova instance and waits for its deletion to complete""" - to_poll = list(instance_infos) - - for inst in instance_infos: - try: - self.destroy_instance(inst) - except novaclient_exceptions.NotFound: - pass - except Exception: - LOG.exception( - _LE('Error deleting instance %s' % inst.id_)) - to_poll.remove(inst) - - # XXX parallelize this - timed_out = [] - for inst in to_poll: - start = time.time() - i = 0 - while time.time() - start < cfg.CONF.boot_timeout: - i += 1 - if not self.get_instance_by_id(inst.id_): - LOG.debug('Instance %s has been deleted', inst.id_) - break - LOG.debug( - 'Instance %s has not finished stopping', inst.id_) - time.sleep(cfg.CONF.retry_delay) - else: - timed_out.append(inst) - LOG.error(_LE( - 'Instance %s failed to stop within %d secs'), - inst.id_, cfg.CONF.boot_timeout) - - if timed_out: - raise NovaInstanceDeleteTimeout() - - -# TODO(mark): Convert this to dynamic yaml, proper network prefix and ssh-keys - -TEMPLATE = """#cloud-config - -cloud_config_modules: - - emit_upstart - - set_hostname - - locale - - set-passwords - - timezone - - disable-ec2-metadata - - runcmd - -output: {all: '| tee -a /var/log/cloud-init-output.log'} - -debug: - - verbose: true - -bootcmd: - - /usr/local/bin/%(boot_command)s %(mac_address)s %(ip_address)s/%(prefix)d - -users: - - name: astara - gecos: Astara - groups: users - shell: /bin/bash - sudo: ALL=(ALL) NOPASSWD:ALL - lock-passwd: true - ssh-authorized-keys: - - %(ssh_public_key)s - -final_message: "Astara appliance is running" -""" # noqa - - -def _ssh_key(): - key = config.get_best_config_path(cfg.CONF.ssh_public_key) - if not key: - return '' - try: - with open(key) as out: - return out.read().strip() - except IOError: - LOG.warning(_LW('Could not load router ssh public key from %s'), key) - return '' - - -def format_userdata(mgt_port): - mgt_net = netaddr.IPNetwork(cfg.CONF.management_prefix) - ctxt = { - 'ssh_public_key': _ssh_key(), - 'mac_address': mgt_port.mac_address, - 'ip_address': mgt_port.fixed_ips[0].ip_address, - 'boot_command': cfg.CONF.astara_boot_command, - 'prefix': mgt_net.prefixlen - } - out = TEMPLATE % ctxt - LOG.debug('Rendered cloud-init for instance: %s' % out) - return out diff --git a/astara/api/rug.py b/astara/api/rug.py deleted file mode 100644 index d3831835..00000000 --- a/astara/api/rug.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright 2015 Akanda, Inc -# -# Author: Akanda, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import socket - -import eventlet -import eventlet.wsgi -import webob -import webob.dec -import webob.exc -import six - -from oslo_config import cfg -from oslo_log import log as logging - -from astara.cli import app -from astara.common.i18n import _, _LE, _LI, _LW - -LOG = logging.getLogger(__name__) - -RUG_API_OPTS = [ - cfg.IntOpt('api_port', default=44250, - help='Astara administrative API listening port', - deprecated_opts=[ - cfg.DeprecatedOpt('rug_api_port', - group='DEFAULT')]), - cfg.StrOpt('api_listen', default='0.0.0.0', - help='Astara administrative API listening address') -] -cfg.CONF.register_opts(RUG_API_OPTS) - - -class RugAPI(object): - - def __init__(self, ctl=app.RugController): - self.ctl = ctl() - - @webob.dec.wsgify(RequestClass=webob.Request) - def __call__(self, req): - try: - if req.method != 'PUT': - return webob.exc.HTTPMethodNotAllowed() - - args = filter(None, req.path.split('/')) - if not args: - return webob.exc.HTTPNotFound() - - command, _, _ = self.ctl.command_manager.find_command(args) - if command.interactive: - return webob.exc.HTTPNotImplemented() - - return str(self.ctl.run(['--debug'] + args)) - except SystemExit: - # cliff invokes -h (help) on argparse failure - # (which in turn results in sys.exit call) - return webob.exc.HTTPBadRequest() - except ValueError: - return webob.exc.HTTPNotFound() - except Exception: - LOG.exception(_LE("Unexpected error.")) - msg = _('An unknown error has occurred. ' - 'Please try your request again.') - return webob.exc.HTTPInternalServerError( - explanation=six.text_type(msg)) - - -class RugAPIServer(object): - def __init__(self): - self.pool = eventlet.GreenPool(1000) - - def run(self, ip_address, port): - app = RugAPI() - - try: - socket.inet_pton(socket.AF_INET6, ip_address) - family = socket.AF_INET6 - except Exception: - family = socket.AF_INET - - for i in six.moves.range(5): - LOG.info(_LI( - 'Starting the rug-api on %s:%s'), - ip_address, port, - ) - try: - sock = eventlet.listen( - (ip_address, port), - family=family, - backlog=128 - ) - except socket.error as err: - if err.errno != 99: # EADDRNOTAVAIL - raise - LOG.warning(_LW('Could not create rug-api socket: %s'), err) - LOG.warning(_LW('Sleeping %s before trying again'), i + 1) - eventlet.sleep(i + 1) - else: - break - else: - raise RuntimeError(_( - 'Could not establish rug-api socket on %s:%s') % - (ip_address, port) - ) - eventlet.wsgi.server( - sock, - app, - custom_pool=self.pool, - log=LOG) - - -def serve(): - RugAPIServer().run(cfg.CONF.api_listen, cfg.CONF.api_port) diff --git a/astara/cli/__init__.py b/astara/cli/__init__.py deleted file mode 100644 index 63621fff..00000000 --- a/astara/cli/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/astara/cli/app.py b/astara/cli/app.py deleted file mode 100644 index 2808c1c2..00000000 --- a/astara/cli/app.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import logging - -from cliff import app -from cliff import commandmanager -from oslo_config import cfg -import pkg_resources - -from astara.common import config - - -class RugController(app.App): - - log = logging.getLogger(__name__) - - def __init__(self): - dist = pkg_resources.get_distribution('astara') - super(RugController, self).__init__( - description='controller for the Astara Orchestrator service', - version=dist.version, - command_manager=commandmanager.CommandManager('astara.cli'), - ) - - def initialize_app(self, argv): - # Quiet logging for some request library - logging.getLogger('requests').setLevel(logging.WARN) - - # Don't pass argv here because cfg.CONF will intercept the - # help options and exit. - cfg.CONF(['--config-file', config.get_best_config_path()], - project='astara-orchestrator') - self.rug_ini = cfg.CONF - return super(RugController, self).initialize_app(argv) diff --git a/astara/cli/browse.py b/astara/cli/browse.py deleted file mode 100644 index 6a6941eb..00000000 --- a/astara/cli/browse.py +++ /dev/null @@ -1,377 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Interactive CLI for rebuilding routers -""" - -from __future__ import print_function -import logging -import os -import Queue -import sqlite3 -import tempfile -import threading -import six -from contextlib import closing -from datetime import datetime - -try: - from blessed import Terminal -except ImportError: - # blessed is not part of openstack global-requirements. - raise Exception("The 'blessed' python module is required to browse" - " Astara routers. Please install and try again.") - -from oslo_config import cfg - -from astara import commands -from astara.api import nova as nova_api -from astara.api import neutron as neutron_api -from astara.cli import message - -logging.getLogger("urllib3").setLevel(logging.ERROR) - -cfg.CONF.import_opt('host', 'astara.main') - - -class FakeConfig(object): - - def __init__(self, admin_user, admin_password, tenant_name, auth_url, - auth_strategy, auth_region, instance_provider): - self.admin_user = admin_user - self.admin_password = admin_password - self.tenant_name = tenant_name - self.admin_tenant_name = tenant_name - self.auth_url = auth_url - self.auth_strategy = auth_strategy - self.auth_region = auth_region - self.instance_provider = instance_provider - - -class RouterRow(object): - - id = None - name = None - status = None - latest = None - image_name = None - booted_at = None - last_fetch = None - nova_status = None - - def __init__(self, **kw): - for k, v in kw.items(): - setattr(self, k, v) - - self.image_name = self.image_name or '' - self.booted_at = self.booted_at or '' - self.nova_status = self.nova_status or '' - self.tenant_id = '*' - if self.name and self.name.startswith('ak-'): - self.tenant_id = self.name.replace('ak-', '') - - @classmethod - def from_cursor(cls, cursor, row): - d = {} - for idx, col in enumerate(cursor.description): - d[col[0]] = row[idx] - return cls(**d) - - -class RouterFetcher(object): - - def __init__(self, conf, db, workers): - self.db = db - self.conn = sqlite3.connect(self.db) - self.conn.row_factory = RouterRow.from_cursor - self.nova = nova_api.Nova(conf) - self.neutron = neutron_api.Neutron(conf) - self.nova_queue = Queue.Queue() - self.save_queue = Queue.Queue() - - # Create X threads to perform Nova calls and put results into a queue - threads = [ - threading.Thread( - name='fetcher-t%02d' % i, - target=self.fetch_router_metadata, - ) - for i in six.moves.range(workers) - ] - for t in threads: - t.setDaemon(True) - t.start() - - def fetch(self): - routers = self.neutron.get_routers(detailed=False) - routers.sort(key=lambda x: x.id) - for router in routers: - sql = ''.join([ - "INSERT OR IGNORE INTO routers ", - "('id', 'name', 'latest') VALUES (", - ', '.join("?" * 3), - ");" - ]) - - with closing(self.conn.cursor()) as cursor: - cursor.execute( - 'SELECT * FROM routers WHERE id=?;', - (router.id,) - ) - current_router = cursor.fetchone() - - if router.status not in ('BUILD', 'ACTIVE') and \ - current_router and current_router.status == 'BOOT': - continue - - cursor.execute(sql, (router.id, router.name, None)) - cursor.execute( - 'UPDATE routers SET status=? WHERE id=?', - (router.status, router.id) - ) - self.conn.commit() - self.nova_queue.put(router.id) - - # SQLite databases have global database-wide lock for writes, so - # we can't split the writes across threads. That's okay, though, the - # slowness isn't the DB writes, it's the Nova API calls - while True: - try: - router, latest, name, booted_at, nova_status = \ - self.save_queue.get(False) - with closing(self.conn.cursor()) as cursor: - cursor.execute( - 'UPDATE routers SET latest=?, image_name=?, ' - 'last_fetch=?, booted_at=? WHERE id=?', - (latest, name, datetime.utcnow(), booted_at, router) - ) - if nova_status == 'BUILD': - cursor.execute( - 'UPDATE routers SET status=? WHERE id=?', - ('BOOT', router) - ) - self.conn.commit() - self.save_queue.task_done() - except Queue.Empty: - # the queue *might* be empty, and that's okay - break - - def fetch_router_metadata(self): - conn = sqlite3.connect(self.db) - conn.row_factory = RouterRow.from_cursor - while True: - router = RouterRow(id=self.nova_queue.get()) - image = None - try: - instance = self.nova.get_instance(router) - image = self.nova.client.images.get(instance.image['id']) - except: - pass - if image: - self.save_queue.put(( - router.id, - image.id == cfg.CONF.router_image_uuid, - image.name, - instance.created, - instance.status - )) - else: - self.save_queue.put(( - router.id, - None, - None, - None, - None - )) - self.nova_queue.task_done() - - -def populate_routers(db, conf, workers): - conf = FakeConfig(*conf) - client = RouterFetcher(conf, db, workers) - while True: - try: - client.fetch() - except (KeyboardInterrupt, SystemExit): - print("Killing background worker...") - break - - -class BrowseRouters(message.MessageSending): - """browse the state of every Astara appliance""" - - log = logging.getLogger(__name__) - interactive = True - - SCHEMA = '''CREATE TABLE routers ( - id TEXT PRIMARY KEY, - name TEXT, - status TEXT, - latest INTEGER, - image_name TEXT, - last_fetch TIMESTAMP, - booted_at TIMESTAMP - );''' - - def __init__(self, *a, **kw): - self.term = Terminal() - self.position = 0 - self.routers = [] - super(BrowseRouters, self).__init__(*a, **kw) - - def init_database(self): - self.fh = tempfile.NamedTemporaryFile(delete=False) - self.conn = sqlite3.connect(self.fh.name) - self.conn.row_factory = RouterRow.from_cursor - with closing(self.conn.cursor()) as cursor: - cursor.execute(self.SCHEMA) - - def get_parser(self, prog_name): - parser = super(BrowseRouters, self).get_parser(prog_name) - parser.add_argument('--dump', dest='interactive', action='store_false') - parser.add_argument('--threads', type=int, default=16) - parser.set_defaults(interactive=True) - return parser - - def take_action(self, parsed_args): - self.interactive = parsed_args.interactive - self.init_database() - credentials = [ - cfg.CONF.admin_user, - cfg.CONF.admin_password, - cfg.CONF.admin_tenant_name, - cfg.CONF.auth_url, - cfg.CONF.auth_strategy, - cfg.CONF.auth_region, - cfg.CONF.instance_provider - ] - populate = threading.Thread( - name='router-populater', - target=populate_routers, - args=(self.fh.name, credentials, parsed_args.threads) - ) - populate.setDaemon(True) - populate.start() - self.handle_loop() - - def handle_loop(self): - try: - with self.term.fullscreen(): - with self.term.cbreak(): - val = None - while val != u'q': - if not val: - self.fetch_routers() - elif val.is_sequence: - if val.code == self.term.KEY_DOWN: - self.move_down() - if val.code == self.term.KEY_UP: - self.move_up() - elif val == u'j': - self.move_down() - elif val == u'k': - self.move_up() - elif val == u'r': - self.rebuild_router() - if self.interactive: - self.print_routers() - val = self.term.inkey(timeout=3) - elif len(self.routers) and all(map( - lambda x: x.last_fetch, self.routers - )): - self.print_routers() - val = u'q' - self._exit() - except KeyboardInterrupt: - self._exit() - raise - - def fetch_routers(self): - with self.conn: - cursor = self.conn.cursor() - cursor.execute('SELECT * FROM routers ORDER BY id ASC;') - self.routers = cursor.fetchall() - - @property - def window(self): - offset = 0 - routers = self.routers - visible_height = self.term.height - 2 - if len(routers) > visible_height: - offset = self.position - offset = min(offset, len(routers) - visible_height - 1) - return offset, routers[offset:(offset+visible_height+1)] - - def print_routers(self): - offset, routers = self.window - with self.term.location(): - for i, r in enumerate(routers): - if r.latest is None: - age = ''.ljust(11) - elif r.latest: - age = self.term.green('LATEST'.ljust(11)) - elif not r.latest: - age = self.term.red('OUT-OF-DATE') - args = [ - r.id, - r.name, - self.router_states[r.status](r.status.ljust(7)), - age, - r.image_name, - 'at', - r.booted_at - ] - if i + offset == self.position: - args = map(self.term.reverse, args[:-3]) + args[-3:] - print(self.term.move(i, 0) + ' '.join(args)) - - def make_message(self, router): - return { - 'command': commands.ROUTER_REBUILD, - 'router_id': router.id, - 'tenant_id': router.tenant_id - } - - def rebuild_router(self): - offset, routers = self.window - r = routers[self.position-offset] - r.status = 'REBUILD' - self.send_message(self.make_message(r)) - - def move_up(self): - self.position = max(0, self.position-1) - - def move_down(self): - self.position = min(len(self.routers)-1, self.position+1) - - @property - def router_states(self): - return { - 'ACTIVE': self.term.green, - 'BUILD': self.term.yellow, - 'BOOT': self.term.yellow, - 'REBUILD': self.term.yellow, - 'DOWN': self.term.red, - 'ERROR': self.term.red - } - - def _exit(self): - if self.interactive: - print('Deleting %s...' % self.fh.name) - self.fh.close() - os.remove(self.fh.name) - if self.interactive: - print('Exiting...') diff --git a/astara/cli/config.py b/astara/cli/config.py deleted file mode 100644 index 9bb1771d..00000000 --- a/astara/cli/config.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Commands related to the application configuration -""" -import logging - -from astara import commands -from astara.cli import message - - -class ConfigReload(message.MessageSending): - """reload the configuration file(s)""" - - log = logging.getLogger(__name__) - - def make_message(self, parsed_args): - self.log.info( - 'sending config reload instruction', - ) - return { - 'command': commands.CONFIG_RELOAD, - } diff --git a/astara/cli/global_debug.py b/astara/cli/global_debug.py deleted file mode 100644 index 40ceb460..00000000 --- a/astara/cli/global_debug.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2015 Akanda, Inc. -# -# Author: Akanda, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Commands related to tenants. -""" -import logging - -from astara import commands -from astara.cli import message - - -class GlobalDebug(message.MessageSending): - """Enable or disable global debug mode""" - - _COMMAND = commands.GLOBAL_DEBUG - - log = logging.getLogger(__name__) - - def get_parser(self, prog_name): - p = super(GlobalDebug, self).get_parser(prog_name) - p.add_argument( - 'status', - ) - p.add_argument( - '--reason', - ) - return p - - def make_message(self, parsed_args): - status = parsed_args.status.lower() - if status not in ['enable', 'disable']: - m = "Invalid global-debug command, must 'enable' or 'disable'" - raise ValueError(m) - - self.log.info( - "sending instruction to %s global debug mode" % status - ) - return { - 'command': self._COMMAND, - 'enabled': 1 if status == "enable" else 0, - 'reason': parsed_args.reason, - } diff --git a/astara/cli/main.py b/astara/cli/main.py deleted file mode 100644 index 53ec62ec..00000000 --- a/astara/cli/main.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import sys - -from astara.cli import app - - -def main(args=sys.argv[1:]): - return app.RugController().run(args) diff --git a/astara/cli/message.py b/astara/cli/message.py deleted file mode 100644 index f4230bf9..00000000 --- a/astara/cli/message.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Base class for command that sends a message to the rug -""" -import abc -import logging -import os -import sys - -from cliff import command - -from astara import notifications - - -class MessageSending(command.Command): - - __metaclass__ = abc.ABCMeta - - log = logging.getLogger(__name__) - interactive = False - - @abc.abstractmethod - def make_message(self, parsed_args): - """Return a dictionary containing the message contents - """ - return {} - - def take_action(self, parsed_args): - self.send_message(self.make_message(parsed_args)) - - def send_message(self, payload): - sender = notifications.Sender() - cmd = payload.get('command') - argv = os.path.basename(sys.argv[0]) - self.log.info('%s: sending %s instruction.' % (argv, cmd)) - self.log.debug('payload: %r', payload) - sender.send(event_type='astara.command', message=payload) diff --git a/astara/cli/poll.py b/astara/cli/poll.py deleted file mode 100644 index 94c7648c..00000000 --- a/astara/cli/poll.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Commands related to tenants. -""" -import logging - -from astara import commands -from astara.cli import message - - -class Poll(message.MessageSending): - - log = logging.getLogger(__name__) - - def make_message(self, parsed_args): - return { - 'command': commands.POLL, - } diff --git a/astara/cli/resource.py b/astara/cli/resource.py deleted file mode 100644 index c2a4d65f..00000000 --- a/astara/cli/resource.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Commands related to astara resources. -""" -from astara.api import nova -from astara.cli import message -from astara import commands - -from novaclient import exceptions -from oslo_config import cfg - - -class _TenantResourceCmd(message.MessageSending): - - def get_parser(self, prog_name): - p = super(_TenantResourceCmd, self).get_parser(prog_name) - p.add_argument( - 'resource_id', - ) - p.add_argument( - '--reason', - ) - return p - - def make_message(self, parsed_args): - resource_id = parsed_args.resource_id.lower() - reason = parsed_args.reason - self.log.info( - 'sending %s instruction for resource %r', - self._COMMAND, - resource_id, - ) - return { - 'command': self._COMMAND, - 'resource_id': resource_id, - 'tenant_id': '*', - 'reason': reason, - } - - -class ResourceUpdate(_TenantResourceCmd): - """force-update a resource""" - - _COMMAND = commands.RESOURCE_UPDATE - - -class ResourceRebuild(_TenantResourceCmd): - """force-rebuild a resource""" - - _COMMAND = commands.RESOURCE_REBUILD - - def get_parser(self, prog_name): - p = super(ResourceRebuild, self).get_parser(prog_name) - p.add_argument( - '--image_uuid', - ) - return p - - def take_action(self, parsed_args): - uuid = parsed_args.image_uuid - if uuid: - nova_client = nova.Nova(cfg.CONF).client - try: - nova_client.images.get(uuid) - except exceptions.NotFound: - self.log.exception( - 'could not retrieve custom image %s from Glance:' % uuid - ) - raise - return super(ResourceRebuild, self).take_action(parsed_args) - - def make_message(self, parsed_args): - message = super(ResourceRebuild, self).make_message(parsed_args) - message['resource_image_uuid'] = parsed_args.image_uuid - return message - - -class ResourceDebug(_TenantResourceCmd): - """debug a single resource""" - - _COMMAND = commands.RESOURCE_DEBUG - - -class ResourceManage(_TenantResourceCmd): - """manage a single resource""" - - _COMMAND = commands.RESOURCE_MANAGE diff --git a/astara/cli/router.py b/astara/cli/router.py deleted file mode 100644 index 3fc68cb1..00000000 --- a/astara/cli/router.py +++ /dev/null @@ -1,169 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Commands related to routers. -""" -import argparse -import subprocess -import sys - -from astara.common.i18n import _LW -from astara import commands -from astara.cli import message -from astara.api import keystone, nova - -from novaclient import exceptions -from oslo_config import cfg -from oslo_log import log as logging - -from neutronclient.v2_0 import client - - -LOG = logging.getLogger(__name__) - - -class _TenantRouterCmd(message.MessageSending): - - def get_parser(self, prog_name): - new_cmd = str(prog_name).replace('router', 'resource') - LOG.warning(_LW( - "WARNING: '%s' is deprecated in favor of '%s' and will be removed " - "in the Mitaka release.") % (prog_name, new_cmd)) - # Bypass the direct base class to let us put the tenant id - # argument first - p = super(_TenantRouterCmd, self).get_parser(prog_name) - p.add_argument( - 'router_id', - ) - p.add_argument( - '--reason', - ) - return p - - def make_message(self, parsed_args): - router_id = parsed_args.router_id.lower() - reason = parsed_args.reason - if router_id in ['error', '*']: - tenant_id = router_id - else: - # Look up the tenant for a given router so we can send the - # command using both and the rug can route it to the correct - # worker. We do the lookup here instead of in the rug to avoid - # having to teach the rug notification and dispatching code - # about how to find the owner of a router, and to shift the - # burden of the neutron API call to the client so the server - # doesn't block. It also gives us a chance to report an error - # when we can't find the router. - ks_session = keystone.KeystoneSession() - n_c = client.Client(session=ks_session.session) - response = n_c.list_routers(retrieve_all=True, id=router_id) - try: - router_details = response['routers'][0] - except (KeyError, IndexError): - raise ValueError('No router with id %r found.' % - (router_id)) - assert router_details['id'] == router_id - tenant_id = router_details['tenant_id'] - self.log.info( - 'sending %s instruction for tenant %r, router %r', - self._COMMAND, - tenant_id, - router_id, - ) - return { - 'command': self._COMMAND, - 'router_id': router_id, - 'tenant_id': tenant_id, - 'reason': reason, - } - - -class RouterUpdate(_TenantRouterCmd): - """force-update a router""" - - _COMMAND = commands.ROUTER_UPDATE - - -class RouterRebuild(_TenantRouterCmd): - """force-rebuild a router""" - - _COMMAND = commands.ROUTER_REBUILD - - def get_parser(self, prog_name): - p = super(RouterRebuild, self).get_parser(prog_name) - p.add_argument( - '--router_image_uuid', - ) - return p - - def take_action(self, parsed_args): - uuid = parsed_args.router_image_uuid - if uuid: - nova_client = nova.Nova(cfg.CONF).client - try: - nova_client.images.get(uuid) - except exceptions.NotFound: - self.log.exception( - 'could not retrieve custom image %s from Glance:' % uuid - ) - raise - return super(RouterRebuild, self).take_action(parsed_args) - - def make_message(self, parsed_args): - message = super(RouterRebuild, self).make_message(parsed_args) - message['router_image_uuid'] = parsed_args.router_image_uuid - return message - - -class RouterDebug(_TenantRouterCmd): - """debug a single router""" - - _COMMAND = commands.ROUTER_DEBUG - - -class RouterManage(_TenantRouterCmd): - """manage a single router""" - - _COMMAND = commands.ROUTER_MANAGE - - -class RouterSSH(_TenantRouterCmd): - """ssh into a router over the management network""" - - interactive = True - - def get_parser(self, prog_name): - p = super(RouterSSH, self).get_parser(prog_name) - p.add_argument('remainder', nargs=argparse.REMAINDER) - return p - - def take_action(self, parsed_args): - ks_session = keystone.KeystoneSession() - n_c = client.Client(session=ks_session.session) - router_id = parsed_args.router_id.lower() - port = n_c.list_ports(name="ASTARA:MGT:%s" % router_id) - try: - mgmt_ip_addr = port['ports'][0]['fixed_ips'].pop()['ip_address'] - except (KeyError, IndexError): - raise ValueError( - "No router management address found for router with id %s" - % router_id) - try: - cmd = ["ssh", "astara@%s" % mgmt_ip_addr] + parsed_args.remainder - subprocess.check_call(cmd) - except subprocess.CalledProcessError as e: - sys.exit(e.returncode) diff --git a/astara/cli/tenant.py b/astara/cli/tenant.py deleted file mode 100644 index c27b6568..00000000 --- a/astara/cli/tenant.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Commands related to tenants. -""" -import logging - -from astara import commands -from astara.cli import message - - -class _TenantCmd(message.MessageSending): - - log = logging.getLogger(__name__) - - def get_parser(self, prog_name): - p = super(_TenantCmd, self).get_parser(prog_name) - p.add_argument( - 'tenant_id', - ) - p.add_argument( - '--reason', - ) - return p - - def make_message(self, parsed_args): - self.log.info( - 'sending %s instruction for tenant with uuid %r', - self._COMMAND, - parsed_args.tenant_id, - ) - return { - 'command': self._COMMAND, - 'tenant_id': parsed_args.tenant_id, - 'reason': parsed_args.reason, - } - - -class TenantDebug(_TenantCmd): - """debug a single tenant""" - - _COMMAND = commands.TENANT_DEBUG - - -class TenantManage(_TenantCmd): - """manage a single tenant""" - - _COMMAND = commands.TENANT_MANAGE diff --git a/astara/cli/worker.py b/astara/cli/worker.py deleted file mode 100644 index ef26befc..00000000 --- a/astara/cli/worker.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Commands related to workers. -""" -import logging - -from astara import commands -from astara.cli import message - - -class WorkerDebug(message.MessageSending): - """debug all workers""" - - log = logging.getLogger(__name__) - - def make_message(self, parsed_args): - self.log.info( - 'sending worker debug instruction', - ) - return { - 'command': commands.WORKERS_DEBUG, - } diff --git a/astara/commands.py b/astara/commands.py deleted file mode 100644 index 31303b5c..00000000 --- a/astara/commands.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Constants for the commands -""" - -# Special values for dispatching -WILDCARDS = ('*', 'error') - -# Dump debugging details about the worker processes and threads -WORKERS_DEBUG = 'workers-debug' - -# Router commands expect a 'router_id' argument in the payload with -# the UUID of the router - -# Put a resource in debug/manage mode -RESOURCE_DEBUG = 'resource-debug' -RESOURCE_MANAGE = 'resource-manage' -# Send an updated config to the resource whether it is needed or not -RESOURCE_UPDATE = 'resource-update' -# Rebuild a resource from scratch -RESOURCE_REBUILD = 'resource-rebuild' - -# These are the deprecated versions of the above, to be removed in M. -ROUTER_DEBUG = 'router-debug' -ROUTER_MANAGE = 'router-manage' -ROUTER_UPDATE = 'router-update' -ROUTER_REBUILD = 'router-rebuild' - -# Put a tenant in debug/manage mode -# Expects a 'tenant_id' argument in the payload with the UUID of the tenant -TENANT_DEBUG = 'tenant-debug' -TENANT_MANAGE = 'tenant-manage' - -# Configuration commands -CONFIG_RELOAD = 'config-reload' - -# Force a poll of all resources right now -POLL = 'poll' - -GLOBAL_DEBUG = 'global-debug' diff --git a/astara/common/__init__.py b/astara/common/__init__.py deleted file mode 100644 index 63621fff..00000000 --- a/astara/common/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/astara/common/config.py b/astara/common/config.py deleted file mode 100644 index 1a443b93..00000000 --- a/astara/common/config.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright 2015 Akanda, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from oslo_config import cfg -from oslo_log import log - - -PREFERRED_CONFIG_FILEPATH = '/etc/astara/orchestrator.ini' -SEARCH_DIRS = ['/etc/astara', '/etc/akanda-rug', '/etc/akanda'] -LEGACY_FILE_MAP = { - 'orchestrator.ini': 'rug.ini', - 'astara.pub': 'akanda.pub' -} - -DEFAULT_CONFIG_FILES = [ - PREFERRED_CONFIG_FILEPATH -] - - -def get_best_config_path(filepath=PREFERRED_CONFIG_FILEPATH): - if os.path.isfile(filepath): - return filepath - - # now begin attemp to fallback for compatibility - dirname, basename = os.path.split(filepath) - - if dirname and dirname not in SEARCH_DIRS: - return filepath # retain the non-standard location - - for searchdir in SEARCH_DIRS: - candidate_path = os.path.join(searchdir, basename) - if os.path.isfile(candidate_path): - return candidate_path - - if basename in LEGACY_FILE_MAP: - candidate_path = os.path.join(searchdir, LEGACY_FILE_MAP[basename]) - if os.path.isfile(candidate_path): - return candidate_path - return filepath - - -def parse_config(argv, default_config_files=DEFAULT_CONFIG_FILES): - log.register_options(cfg.CONF) - # Set the logging format to include the process and thread, since - # those aren't included in standard openstack logs but are useful - # for the rug - extended = ':'.join('%(' + n + ')s' - for n in ['name', - 'process', - 'processName', - 'threadName']) - log_format = ('%(asctime)s.%(msecs)03d %(levelname)s ' + - extended + ' %(message)s') - - # Configure the default log levels for some third-party packages - # that are chatty - log_levels = [ - 'amqp=WARN', - 'amqplib=WARN', - 'qpid.messaging=INFO', - 'sqlalchemy=WARN', - 'keystoneclient=INFO', - 'stevedore=INFO', - 'eventlet.wsgi.server=WARN', - 'requests=WARN', - 'astara.openstack.common.rpc.amqp=INFO', - 'neutronclient.client=INFO', - 'oslo.messaging=INFO', - 'iso8601=INFO', - 'cliff.commandmanager=INFO', - ] - cfg.CONF.set_default('logging_default_format_string', log_format) - log.set_defaults(default_log_levels=log_levels) - - # For legacy compatibility - default_config_files = map(get_best_config_path, default_config_files) - - # remove default config files that do not exist - default_config_files = filter(os.path.isfile, default_config_files) - - cfg.CONF(argv, - project='astara-orchestrator', - default_config_files=default_config_files) diff --git a/astara/common/constants.py b/astara/common/constants.py deleted file mode 100644 index 86582028..00000000 --- a/astara/common/constants.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (c) 2016 Akanda, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -ASTARA_SERVICE_PORT_TYPES = [ - 'VRRP', - 'LB', -] - - -ASTARA_MGT_PORT_TYPES = [ - 'MGT', -] diff --git a/astara/common/container.py b/astara/common/container.py deleted file mode 100644 index 21d63a94..00000000 --- a/astara/common/container.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) 2016 Akanda, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import threading - - -class ResourceContainer(object): - - def __init__(self): - self.resources = {} - self.deleted = collections.deque(maxlen=50) - self.lock = threading.Lock() - - def __delitem__(self, item): - with self.lock: - del self.resources[item] - self.deleted.append(item) - - def items(self): - """Get all state machines. - :returns: all state machines in this RouterContainer - """ - with self.lock: - return list(self.resources.items()) - - def values(self): - with self.lock: - return list(self.resources.values()) - - def has_been_deleted(self, resource_id): - """Check if a resource has been deleted. - - :param resource_id: The resource's id to check against the deleted list - :returns: Returns True if the resource_id has been deleted. - """ - with self.lock: - return resource_id in self.deleted - - def __getitem__(self, item): - with self.lock: - return self.resources[item] - - def __setitem__(self, key, value): - with self.lock: - self.resources[key] = value - - def __contains__(self, item): - with self.lock: - return item in self.resources - - def __bool__(self): - if self.values(): - return True - else: - return False - - def __nonzero__(self): - return self.__bool__() diff --git a/astara/common/hash_ring.py b/astara/common/hash_ring.py deleted file mode 100644 index 231b7284..00000000 --- a/astara/common/hash_ring.py +++ /dev/null @@ -1,204 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# Copyright 2015 Akanda, Inc. -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import bisect -import hashlib -import threading - -from oslo_config import cfg -import six -from six.moves import range - -from astara.common.i18n import _ - -hash_opts = [ - cfg.IntOpt('hash_partition_exponent', - default=5, - help='Exponent to determine number of hash partitions to use ' - 'when distributing load across Rugs. Larger values ' - 'will result in more even distribution of load and less ' - 'load when rebalancing the ring, but more memory usage. ' - 'Number of partitions per rug is ' - '(2^hash_partition_exponent). This determines the ' - 'granularity of rebalancing: given 10 hosts, and an ' - 'exponent of the 2, there are 40 partitions in the ring.' - 'A few thousand partitions should make rebalancing ' - 'smooth in most cases. The default is suitable for up to ' - 'a few hundred rugs. Too many partitions has a CPU ' - 'impact.'), -] - -CONF = cfg.CONF -CONF.register_opts(hash_opts) - - -# A static key that can be used to choose a single host when from the -# ring we have no other data to hash with. -DC_KEY = 'astara_designated_coordinator' - - -class Invalid(Exception): - pass - - -# Lifted from ironic with some modifications. -class HashRing(object): - """A stable hash ring. - - We map item N to a host Y based on the closest lower hash: - - - hash(item) -> partition - - hash(host) -> divider - - closest lower divider is the host to use - - we hash each host many times to spread load more finely - as otherwise adding a host gets (on average) 50% of the load of - just one other host assigned to it. - """ - - def __init__(self, hosts, replicas=1): - """Create a new hash ring across the specified hosts. - - :param hosts: an iterable of hosts which will be mapped. - :param replicas: number of hosts to map to each hash partition, - or len(hosts), which ever is lesser. - Default: 1 - - """ - try: - self.hosts = set(hosts) - self.replicas = replicas if replicas <= len(hosts) else len(hosts) - except TypeError: - raise Invalid( - _("Invalid hosts supplied when building HashRing.")) - - self._host_hashes = {} - for host in hosts: - key = str(host).encode('utf8') - key_hash = hashlib.md5(key) - for p in range(2 ** CONF.hash_partition_exponent): - key_hash.update(key) - hashed_key = self._hash2int(key_hash) - self._host_hashes[hashed_key] = host - # Gather the (possibly colliding) resulting hashes into a bisectable - # list. - self._partitions = sorted(self._host_hashes.keys()) - - def _hash2int(self, key_hash): - """Convert the given hash's digest to a numerical value for the ring. - - :returns: An integer equivalent value of the digest. - """ - return int(key_hash.hexdigest(), 16) - - def _get_partition(self, data): - try: - if six.PY3 and data is not None: - data = data.encode('utf-8') - key_hash = hashlib.md5(data) - hashed_key = self._hash2int(key_hash) - position = bisect.bisect(self._partitions, hashed_key) - return position if position < len(self._partitions) else 0 - except TypeError: - raise Invalid( - _("Invalid data supplied to HashRing.get_hosts.")) - - def get_hosts(self, data, ignore_hosts=None): - """Get the list of hosts which the supplied data maps onto. - - :param data: A string identifier to be mapped across the ring. - :param ignore_hosts: A list of hosts to skip when performing the hash. - Useful to temporarily skip down hosts without - performing a full rebalance. - Default: None. - :returns: a list of hosts. - The length of this list depends on the number of replicas - this `HashRing` was created with. It may be less than this - if ignore_hosts is not None. - """ - hosts = [] - if ignore_hosts is None: - ignore_hosts = set() - else: - ignore_hosts = set(ignore_hosts) - ignore_hosts.intersection_update(self.hosts) - partition = self._get_partition(data) - for replica in range(0, self.replicas): - if len(hosts) + len(ignore_hosts) == len(self.hosts): - # prevent infinite loop - cannot allocate more fallbacks. - break - # Linear probing: partition N, then N+1 etc. - host = self._get_host(partition) - while host in hosts or host in ignore_hosts: - partition += 1 - if partition >= len(self._partitions): - partition = 0 - host = self._get_host(partition) - hosts.append(host) - return hosts - - def _get_host(self, partition): - """Find what host is serving a partition. - - :param partition: The index of the partition in the partition map. - e.g. 0 is the first partition, 1 is the second. - :return: The host object the ring was constructed with. - """ - return self._host_hashes[self._partitions[partition]] - - -class HashRingManager(object): - _hash_ring = None - _lock = threading.Lock() - - def __init__(self): - self._hosts = [] - self._balanced = False - - @property - def balanced(self): - return self._balanced - - @property - def ring(self): - # Hot path, no lock - if self._hash_ring is not None: - return self._hash_ring - - with self._lock: - if self._hash_ring is None: - ring = self._load_hash_ring() - self.__class__._hash_ring = ring - return self._hash_ring - - @property - def hosts(self): - return self.ring.hosts - - def _load_hash_ring(self): - return HashRing(self._hosts) - - @classmethod - def reset(cls): - with cls._lock: - cls._hash_ring = None - cls._balanced = False - - def rebalance(self, hosts): - self.reset() - with self._lock: - self._hosts = hosts - self._balanced = True diff --git a/astara/common/i18n.py b/astara/common/i18n.py deleted file mode 100644 index 0d99ed57..00000000 --- a/astara/common/i18n.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2014 Akanda, Inc. -# -# Author: Akanda, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import oslo_i18n - -_translators = oslo_i18n.TranslatorFactory(domain='astara') - -_ = _translators.primary -_LI = _translators.log_info -_LW = _translators.log_warning -_LE = _translators.log_error diff --git a/astara/common/linux/__init__.py b/astara/common/linux/__init__.py deleted file mode 100644 index 63621fff..00000000 --- a/astara/common/linux/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/astara/common/linux/interface.py b/astara/common/linux/interface.py deleted file mode 100644 index 2d19a99b..00000000 --- a/astara/common/linux/interface.py +++ /dev/null @@ -1,269 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -import netaddr - -from oslo_config import cfg -from oslo_log import log as logging - -from astara.common.i18n import _, _LE, _LW -from astara.common.linux import ip_lib -from astara.common.linux import ovs_lib -from astara.common.linux import utils - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - - -OPTS = [ - cfg.StrOpt('ovs_integration_bridge', - default='br-int', - help=_('Name of Open vSwitch bridge to use')), - cfg.BoolOpt('ovs_use_veth', - default=False, - help=_('Uses veth for an interface or not')), - cfg.StrOpt('network_device_mtu', - help=_('MTU setting for device.')), -] -CONF.register_opts(OPTS) - - -AGENT_OPTIONS = [ - cfg.StrOpt('root_helper', - default='sudo astara-rootwrap /etc/astara/rootwrap.conf'), -] -CONF.register_group(cfg.OptGroup(name='AGENT')) -CONF.register_opts(AGENT_OPTIONS, 'AGENT') - - -class LinuxInterfaceDriver(object): - __metaclass__ = abc.ABCMeta - - # from linux IF_NAMESIZE - DEV_NAME_LEN = 14 - DEV_NAME_PREFIX = 'tap' - - def __init__(self, conf): - self.conf = conf - self.root_helper = conf.AGENT.root_helper - - def init_l3(self, device_name, ip_cidrs, namespace=None): - """Set the L3 settings for the interface using data from the port. - ip_cidrs: list of 'X.X.X.X/YY' strings - """ - device = ip_lib.IPDevice(device_name, - self.root_helper, - namespace=namespace) - - previous = {} - for address in device.addr.list(scope='global', filters=['permanent']): - previous[address['cidr']] = address['ip_version'] - - # add new addresses - for ip_cidr in ip_cidrs: - - net = netaddr.IPNetwork(ip_cidr) - if ip_cidr in previous: - del previous[ip_cidr] - continue - - device.addr.add(net.version, ip_cidr, str(net.broadcast)) - - # clean up any old addresses - for ip_cidr, ip_version in previous.items(): - device.addr.delete(ip_version, ip_cidr) - - def check_bridge_exists(self, bridge): - if not ip_lib.device_exists(bridge): - raise Exception(_('Bridge %s does not exist') % bridge) - - def get_device_name(self, port): - return (self.DEV_NAME_PREFIX + port.id)[:self.DEV_NAME_LEN] - - @abc.abstractmethod - def plug(self, network_id, port_id, device_name, mac_address, - bridge=None, namespace=None, prefix=None): - """Plug in the interface.""" - - @abc.abstractmethod - def unplug(self, device_name, bridge=None, namespace=None, prefix=None): - """Unplug the interface.""" - - -class NullDriver(LinuxInterfaceDriver): - def plug(self, network_id, port_id, device_name, mac_address, - bridge=None, namespace=None, prefix=None): - pass - - def unplug(self, device_name, bridge=None, namespace=None, prefix=None): - pass - - -class OVSInterfaceDriver(LinuxInterfaceDriver): - """Driver for creating an internal interface on an OVS bridge.""" - - DEV_NAME_PREFIX = 'tap' - - def __init__(self, conf): - super(OVSInterfaceDriver, self).__init__(conf) - if self.conf.ovs_use_veth: - self.DEV_NAME_PREFIX = 'ns-' - - def _get_tap_name(self, dev_name, prefix=None): - if self.conf.ovs_use_veth: - dev_name = dev_name.replace(prefix or self.DEV_NAME_PREFIX, 'tap') - return dev_name - - def _ovs_add_port(self, bridge, device_name, port_id, mac_address, - internal=True): - cmd = ['ovs-vsctl', '--', '--may-exist', - 'add-port', bridge, device_name] - if internal: - cmd += ['--', 'set', 'Interface', device_name, 'type=internal'] - cmd += ['--', 'set', 'Interface', device_name, - 'external-ids:iface-id=%s' % port_id, - '--', 'set', 'Interface', device_name, - 'external-ids:iface-status=active', - '--', 'set', 'Interface', device_name, - 'external-ids:attached-mac=%s' % mac_address] - utils.execute(cmd, self.root_helper) - - def plug(self, network_id, port_id, device_name, mac_address, - bridge=None, namespace=None, prefix=None): - """Plug in the interface.""" - if not bridge: - bridge = self.conf.ovs_integration_bridge - - self.check_bridge_exists(bridge) - - if not ip_lib.device_exists(device_name, - self.root_helper, - namespace=namespace): - - ip = ip_lib.IPWrapper(self.root_helper) - tap_name = self._get_tap_name(device_name, prefix) - - if self.conf.ovs_use_veth: - root_dev, ns_dev = ip.add_veth(tap_name, device_name) - - internal = not self.conf.ovs_use_veth - self._ovs_add_port(bridge, tap_name, port_id, mac_address, - internal=internal) - - ns_dev = ip.device(device_name) - ns_dev.link.set_address(mac_address) - - if self.conf.network_device_mtu: - ns_dev.link.set_mtu(self.conf.network_device_mtu) - if self.conf.ovs_use_veth: - root_dev.link.set_mtu(self.conf.network_device_mtu) - - if namespace: - namespace_obj = ip.ensure_namespace(namespace) - namespace_obj.add_device_to_namespace(ns_dev) - - ns_dev.link.set_up() - if self.conf.ovs_use_veth: - root_dev.link.set_up() - else: - LOG.warning(_LW("Device %s already exists"), device_name) - - def unplug(self, device_name, bridge=None, namespace=None, prefix=None): - """Unplug the interface.""" - if not bridge: - bridge = self.conf.ovs_integration_bridge - - tap_name = self._get_tap_name(device_name, prefix) - self.check_bridge_exists(bridge) - ovs = ovs_lib.OVSBridge(bridge, self.root_helper) - - try: - ovs.delete_port(tap_name) - if self.conf.ovs_use_veth: - device = ip_lib.IPDevice(device_name, - self.root_helper, - namespace) - device.link.delete() - LOG.debug(_("Unplugged interface '%s'"), device_name) - except RuntimeError: - LOG.exception(_LE("Failed unplugging interface '%s'"), device_name) - - -class BridgeInterfaceDriver(LinuxInterfaceDriver): - """Driver for creating bridge interfaces.""" - - DEV_NAME_PREFIX = 'ns-' - - def plug(self, network_id, port_id, device_name, mac_address, - bridge=None, namespace=None, prefix=None): - """Plugin the interface.""" - if not ip_lib.device_exists(device_name, - self.root_helper, - namespace=namespace): - ip = ip_lib.IPWrapper(self.root_helper) - - # Enable agent to define the prefix - if prefix: - tap_name = device_name.replace(prefix, 'tap') - else: - tap_name = device_name.replace(self.DEV_NAME_PREFIX, 'tap') - root_veth, ns_veth = ip.add_veth(tap_name, device_name) - ns_veth.link.set_address(mac_address) - - if self.conf.network_device_mtu: - root_veth.link.set_mtu(self.conf.network_device_mtu) - ns_veth.link.set_mtu(self.conf.network_device_mtu) - - if namespace: - namespace_obj = ip.ensure_namespace(namespace) - namespace_obj.add_device_to_namespace(ns_veth) - - root_veth.link.set_up() - ns_veth.link.set_up() - - else: - LOG.warning(_LW("Device %s already exists"), device_name) - - def unplug(self, device_name, bridge=None, namespace=None, prefix=None): - """Unplug the interface.""" - device = ip_lib.IPDevice(device_name, self.root_helper, namespace) - try: - device.link.delete() - LOG.debug("Unplugged interface '%s'", device_name) - except RuntimeError: - LOG.exception(_LE( - "Failed unplugging interface '%s'"), device_name) diff --git a/astara/common/linux/ip_lib.py b/astara/common/linux/ip_lib.py deleted file mode 100644 index 97eff543..00000000 --- a/astara/common/linux/ip_lib.py +++ /dev/null @@ -1,450 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import netaddr - -from astara.common.linux import utils -from astara.common.i18n import _ - -from oslo_log import log - -LOOPBACK_DEVNAME = 'lo' - -LOG = log.getLogger(__name__) - - -class SubProcessBase(object): - def __init__(self, root_helper=None, namespace=None): - self.root_helper = root_helper - self.namespace = namespace - - def _run(self, options, command, args): - if self.namespace: - return self._as_root(options, command, args) - else: - return self._execute(options, command, args) - - def _as_root(self, options, command, args, use_root_namespace=False): - if not self.root_helper: - raise Exception(_('Sudo is required to run this command')) - - namespace = self.namespace if not use_root_namespace else None - - return self._execute(options, - command, - args, - self.root_helper, - namespace) - - @classmethod - def _execute(cls, options, command, args, root_helper=None, - namespace=None): - opt_list = ['-%s' % o for o in options] - if namespace: - ip_cmd = ['ip', 'netns', 'exec', namespace, 'ip'] - else: - ip_cmd = ['ip'] - return utils.execute(ip_cmd + opt_list + [command] + list(args), - root_helper=root_helper) - - -class IPWrapper(SubProcessBase): - def __init__(self, root_helper=None, namespace=None): - super(IPWrapper, self).__init__(root_helper=root_helper, - namespace=namespace) - self.netns = IpNetnsCommand(self) - - def device(self, name): - return IPDevice(name, self.root_helper, self.namespace) - - def get_devices(self, exclude_loopback=False): - retval = [] - output = self._execute('o', 'link', ('list',), - self.root_helper, self.namespace) - for line in output.split('\n'): - if '<' not in line: - continue - tokens = line.split(':', 2) - if len(tokens) >= 3: - name = tokens[1].strip() - - if exclude_loopback and name == LOOPBACK_DEVNAME: - continue - - retval.append(IPDevice(name, - self.root_helper, - self.namespace)) - return retval - - def add_tuntap(self, name, mode='tap'): - self._as_root('', 'tuntap', ('add', name, 'mode', mode)) - return IPDevice(name, self.root_helper, self.namespace) - - def add_veth(self, name1, name2): - self._as_root('', 'link', - ('add', name1, 'type', 'veth', 'peer', 'name', name2)) - - return (IPDevice(name1, self.root_helper, self.namespace), - IPDevice(name2, self.root_helper, self.namespace)) - - def ensure_namespace(self, name): - if not self.netns.exists(name): - ip = self.netns.add(name) - lo = ip.device(LOOPBACK_DEVNAME) - lo.link.set_up() - else: - ip = IPWrapper(self.root_helper, name) - return ip - - def namespace_is_empty(self): - return not self.get_devices(exclude_loopback=True) - - def garbage_collect_namespace(self): - """Conditionally destroy the namespace if it is empty.""" - if self.namespace and self.netns.exists(self.namespace): - if self.namespace_is_empty(): - self.netns.delete(self.namespace) - return True - return False - - def add_device_to_namespace(self, device): - if self.namespace: - device.link.set_netns(self.namespace) - - @classmethod - def get_namespaces(cls, root_helper): - output = cls._execute('', 'netns', ('list',), root_helper=root_helper) - return [l.strip() for l in output.split('\n')] - - -class IPDevice(SubProcessBase): - def __init__(self, name, root_helper=None, namespace=None): - super(IPDevice, self).__init__(root_helper=root_helper, - namespace=namespace) - self.name = name - self.link = IpLinkCommand(self) - self.addr = IpAddrCommand(self) - self.route = IpRouteCommand(self) - - def __eq__(self, other): - return (other is not None and self.name == other.name and - self.namespace == other.namespace) - - def __ne__(self, other): - return not self.__eq__(other) - - def __str__(self): - return self.name - - -class IpCommandBase(object): - COMMAND = '' - - def __init__(self, parent): - self._parent = parent - - def _run(self, *args, **kwargs): - return self._parent._run(kwargs.get('options', []), self.COMMAND, args) - - def _as_root(self, *args, **kwargs): - return self._parent._as_root(kwargs.get('options', []), - self.COMMAND, - args, - kwargs.get('use_root_namespace', False)) - - -class IpDeviceCommandBase(IpCommandBase): - @property - def name(self): - return self._parent.name - - -class IpLinkCommand(IpDeviceCommandBase): - COMMAND = 'link' - - def set_address(self, mac_address): - self._as_root('set', self.name, 'address', mac_address) - - def set_mtu(self, mtu_size): - self._as_root('set', self.name, 'mtu', mtu_size) - - def set_up(self): - self._as_root('set', self.name, 'up') - - def set_down(self): - self._as_root('set', self.name, 'down') - - def set_netns(self, namespace): - self._as_root('set', self.name, 'netns', namespace) - self._parent.namespace = namespace - - def set_name(self, name): - self._as_root('set', self.name, 'name', name) - self._parent.name = name - - def set_alias(self, alias_name): - self._as_root('set', self.name, 'alias', alias_name) - - def delete(self): - self._as_root('delete', self.name) - - @property - def address(self): - return self.attributes.get('link/ether') - - @property - def state(self): - return self.attributes.get('state') - - @property - def mtu(self): - return self.attributes.get('mtu') - - @property - def qdisc(self): - return self.attributes.get('qdisc') - - @property - def qlen(self): - return self.attributes.get('qlen') - - @property - def alias(self): - return self.attributes.get('alias') - - @property - def attributes(self): - return self._parse_line(self._run('show', self.name, options='o')) - - def _parse_line(self, value): - if not value: - return {} - - device_name, settings = value.replace("\\", '').split('>', 1) - tokens = settings.split() - keys = tokens[::2] - values = [int(v) if v.isdigit() else v for v in tokens[1::2]] - - retval = dict(zip(keys, values)) - return retval - - -class IpAddrCommand(IpDeviceCommandBase): - COMMAND = 'addr' - - def add(self, ip_version, cidr, broadcast, scope='global'): - self._as_root('add', - cidr, - 'brd', - broadcast, - 'scope', - scope, - 'dev', - self.name, - options=[ip_version]) - - def delete(self, ip_version, cidr): - self._as_root('del', - cidr, - 'dev', - self.name, - options=[ip_version]) - - def flush(self): - self._as_root('flush', self.name) - - def list(self, scope=None, to=None, filters=None): - if filters is None: - filters = [] - - retval = [] - - if scope: - filters += ['scope', scope] - if to: - filters += ['to', to] - - for line in self._run('show', self.name, *filters).split('\n'): - line = line.strip() - if not line.startswith('inet'): - continue - parts = line.split() - if parts[0] == 'inet6': - version = 6 - scope = parts[3] - broadcast = '::' - else: - version = 4 - if parts[2] == 'brd': - broadcast = parts[3] - scope = parts[5] - else: - # sometimes output of 'ip a' might look like: - # inet 192.168.100.100/24 scope global eth0 - # and broadcast needs to be calculated from CIDR - broadcast = str(netaddr.IPNetwork(parts[1]).broadcast) - scope = parts[3] - - retval.append(dict(cidr=parts[1], - broadcast=broadcast, - scope=scope, - ip_version=version, - dynamic=('dynamic' == parts[-1]))) - return retval - - -class IpRouteCommand(IpDeviceCommandBase): - COMMAND = 'route' - - def add_gateway(self, gateway, metric=None): - args = ['replace', 'default', 'via', gateway] - if metric: - args += ['metric', metric] - args += ['dev', self.name] - self._as_root(*args) - - def delete_gateway(self, gateway): - self._as_root('del', - 'default', - 'via', - gateway, - 'dev', - self.name) - - def get_gateway(self, scope=None, filters=None): - if filters is None: - filters = [] - - retval = None - - if scope: - filters += ['scope', scope] - - route_list_lines = self._run('list', 'dev', self.name, - *filters).split('\n') - default_route_line = next((x.strip() for x in - route_list_lines if - x.strip().startswith('default')), None) - if default_route_line: - gateway_index = 2 - parts = default_route_line.split() - retval = dict(gateway=parts[gateway_index]) - metric_index = 4 - parts_has_metric = (len(parts) > metric_index) - if parts_has_metric: - retval.update(metric=int(parts[metric_index])) - - return retval - - def pullup_route(self, interface_name): - """ - Ensures that the route entry for the interface is before all - others on the same subnet. - """ - device_list = [] - device_route_list_lines = self._run('list', 'proto', 'kernel', - 'dev', interface_name).split('\n') - for device_route_line in device_route_list_lines: - try: - subnet = device_route_line.split()[0] - except: - continue - subnet_route_list_lines = self._run('list', 'proto', 'kernel', - 'match', subnet).split('\n') - for subnet_route_line in subnet_route_list_lines: - i = iter(subnet_route_line.split()) - while(next(i) != 'dev'): - pass - device = next(i) - try: - while(next(i) != 'src'): - pass - src = next(i) - except: - src = '' - if device != interface_name: - device_list.append((device, src)) - else: - break - - for (device, src) in device_list: - self._as_root('del', subnet, 'dev', device) - if (src != ''): - self._as_root('append', subnet, 'proto', 'kernel', - 'src', src, 'dev', device) - else: - self._as_root('append', subnet, 'proto', 'kernel', - 'dev', device) - - -class IpNetnsCommand(IpCommandBase): - COMMAND = 'netns' - - def add(self, name): - self._as_root('add', name, use_root_namespace=True) - return IPWrapper(self._parent.root_helper, name) - - def delete(self, name): - self._as_root('delete', name, use_root_namespace=True) - - def execute(self, cmds, addl_env={}, check_exit_code=True): - if not self._parent.root_helper: - m = _('sudo is required to run this command') - LOG.error(m) - raise Exception(m) - elif not self._parent.namespace: - m = _('No namespace defined for parent') - LOG.error(m) - raise Exception(m) - else: - return utils.execute( - ['%s=%s' % pair for pair in addl_env.items()] + - ['ip', 'netns', 'exec', self._parent.namespace] + list(cmds), - root_helper=self._parent.root_helper, - check_exit_code=check_exit_code) - - def exists(self, name): - output = self._as_root('list', options='o', use_root_namespace=True) - - for line in output.split('\n'): - if name == line.strip(): - return True - return False - - -def device_exists(device_name, root_helper=None, namespace=None): - try: - address = IPDevice(device_name, root_helper, namespace).link.address - except RuntimeError: - return False - return bool(address) diff --git a/astara/common/linux/ovs_lib.py b/astara/common/linux/ovs_lib.py deleted file mode 100644 index 98ec3e2f..00000000 --- a/astara/common/linux/ovs_lib.py +++ /dev/null @@ -1,321 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2011 Nicira Networks, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Somik Behera, Nicira Networks, Inc. -# @author: Brad Hall, Nicira Networks, Inc. -# @author: Dan Wendlandt, Nicira Networks, Inc. -# @author: Dave Lapsley, Nicira Networks, Inc. - -import re - -from astara.common.linux import utils -from astara.common.i18n import _, _LE, _LW - -from oslo_log import log as logging - -LOG = logging.getLogger(__name__) - - -class VifPort: - def __init__(self, port_name, ofport, vif_id, vif_mac, switch): - self.port_name = port_name - self.ofport = ofport - self.vif_id = vif_id - self.vif_mac = vif_mac - self.switch = switch - - def __str__(self): - return ("iface-id=" + self.vif_id + ", vif_mac=" + - self.vif_mac + ", port_name=" + self.port_name + - ", ofport=" + str(self.ofport) + ", bridge_name =" + - self.switch.br_name) - - -class OVSBridge: - def __init__(self, br_name, root_helper): - self.br_name = br_name - self.root_helper = root_helper - self.re_id = self.re_compile_id() - - def re_compile_id(self): - external = 'external_ids\s*' - mac = 'attached-mac="(?P([a-fA-F\d]{2}:){5}([a-fA-F\d]{2}))"' - iface = 'iface-id="(?P[^"]+)"' - name = 'name\s*:\s"(?P[^"]*)"' - port = 'ofport\s*:\s(?P-?\d+)' - _re = ('%(external)s:\s{ ( %(mac)s,? | %(iface)s,? | . )* }' - ' \s+ %(name)s \s+ %(port)s' % locals()) - return re.compile(_re, re.M | re.X) - - def run_vsctl(self, args): - full_args = ["ovs-vsctl", "--timeout=2"] + args - try: - return utils.execute(full_args, root_helper=self.root_helper) - except Exception, e: - LOG.error(_LE( - "Unable to execute %(cmd)s. Exception: %(exception)s"), - {'cmd': full_args, 'exception': e}) - - def reset_bridge(self): - self.run_vsctl(["--", "--if-exists", "del-br", self.br_name]) - self.run_vsctl(["add-br", self.br_name]) - - def add_port(self, port_name): - self.run_vsctl(["--", "--may-exist", "add-port", self.br_name, - port_name]) - return self.get_port_ofport(port_name) - - def delete_port(self, port_name): - self.run_vsctl(["--", "--if-exists", "del-port", self.br_name, - port_name]) - - def set_db_attribute(self, table_name, record, column, value): - args = ["set", table_name, record, "%s=%s" % (column, value)] - self.run_vsctl(args) - - def clear_db_attribute(self, table_name, record, column): - args = ["clear", table_name, record, column] - self.run_vsctl(args) - - def run_ofctl(self, cmd, args): - full_args = ["ovs-ofctl", cmd, self.br_name] + args - try: - return utils.execute(full_args, root_helper=self.root_helper) - except Exception, e: - LOG.error(_LE( - "Unable to execute %(cmd)s. Exception: %(exception)s"), - {'cmd': full_args, 'exception': e}) - - def count_flows(self): - flow_list = self.run_ofctl("dump-flows", []).split("\n")[1:] - return len(flow_list) - 1 - - def remove_all_flows(self): - self.run_ofctl("del-flows", []) - - def get_port_ofport(self, port_name): - return self.db_get_val("Interface", port_name, "ofport") - - def get_datapath_id(self): - return self.db_get_val('Bridge', - self.br_name, 'datapath_id').strip('"') - - def _build_flow_expr_arr(self, **kwargs): - flow_expr_arr = [] - is_delete_expr = kwargs.get('delete', False) - if not is_delete_expr: - prefix = ("hard_timeout=%s,idle_timeout=%s,priority=%s" % - (kwargs.get('hard_timeout', '0'), - kwargs.get('idle_timeout', '0'), - kwargs.get('priority', '1'))) - flow_expr_arr.append(prefix) - elif 'priority' in kwargs: - raise Exception(_("Cannot match priority on flow deletion")) - - in_port = ('in_port' in kwargs and ",in_port=%s" % - kwargs['in_port'] or '') - dl_type = ('dl_type' in kwargs and ",dl_type=%s" % - kwargs['dl_type'] or '') - dl_vlan = ('dl_vlan' in kwargs and ",dl_vlan=%s" % - kwargs['dl_vlan'] or '') - dl_src = 'dl_src' in kwargs and ",dl_src=%s" % kwargs['dl_src'] or '' - dl_dst = 'dl_dst' in kwargs and ",dl_dst=%s" % kwargs['dl_dst'] or '' - nw_src = 'nw_src' in kwargs and ",nw_src=%s" % kwargs['nw_src'] or '' - nw_dst = 'nw_dst' in kwargs and ",nw_dst=%s" % kwargs['nw_dst'] or '' - tun_id = 'tun_id' in kwargs and ",tun_id=%s" % kwargs['tun_id'] or '' - proto = 'proto' in kwargs and ",%s" % kwargs['proto'] or '' - ip = ('nw_src' in kwargs or 'nw_dst' in kwargs) and ',ip' or '' - match = (in_port + dl_type + dl_vlan + dl_src + dl_dst + - (ip or proto) + nw_src + nw_dst + tun_id) - if match: - match = match[1:] # strip leading comma - flow_expr_arr.append(match) - return flow_expr_arr - - def add_flow(self, **kwargs): - if "actions" not in kwargs: - raise Exception(_("Must specify one or more actions")) - if "priority" not in kwargs: - kwargs["priority"] = "0" - - flow_expr_arr = self._build_flow_expr_arr(**kwargs) - flow_expr_arr.append("actions=%s" % (kwargs["actions"])) - flow_str = ",".join(flow_expr_arr) - self.run_ofctl("add-flow", [flow_str]) - - def delete_flows(self, **kwargs): - kwargs['delete'] = True - flow_expr_arr = self._build_flow_expr_arr(**kwargs) - if "actions" in kwargs: - flow_expr_arr.append("actions=%s" % (kwargs["actions"])) - flow_str = ",".join(flow_expr_arr) - self.run_ofctl("del-flows", [flow_str]) - - def add_tunnel_port(self, port_name, remote_ip): - self.run_vsctl(["add-port", self.br_name, port_name]) - self.set_db_attribute("Interface", port_name, "type", "gre") - self.set_db_attribute("Interface", port_name, "options:remote_ip", - remote_ip) - self.set_db_attribute("Interface", port_name, "options:in_key", "flow") - self.set_db_attribute("Interface", port_name, "options:out_key", - "flow") - return self.get_port_ofport(port_name) - - def add_patch_port(self, local_name, remote_name): - self.run_vsctl(["add-port", self.br_name, local_name]) - self.set_db_attribute("Interface", local_name, "type", "patch") - self.set_db_attribute("Interface", local_name, "options:peer", - remote_name) - return self.get_port_ofport(local_name) - - def db_get_map(self, table, record, column): - output = self.run_vsctl(["get", table, record, column]) - if output: - str = output.rstrip("\n\r") - return self.db_str_to_map(str) - return {} - - def db_get_val(self, table, record, column): - output = self.run_vsctl(["get", table, record, column]) - if output: - return output.rstrip("\n\r") - - def db_str_to_map(self, full_str): - list = full_str.strip("{}").split(", ") - ret = {} - for e in list: - if e.find("=") == -1: - continue - arr = e.split("=") - ret[arr[0]] = arr[1].strip("\"") - return ret - - def get_port_name_list(self): - res = self.run_vsctl(["list-ports", self.br_name]) - if res: - return res.strip().split("\n") - return [] - - def get_port_stats(self, port_name): - return self.db_get_map("Interface", port_name, "statistics") - - def get_xapi_iface_id(self, xs_vif_uuid): - args = ["xe", "vif-param-get", "param-name=other-config", - "param-key=nicira-iface-id", "uuid=%s" % xs_vif_uuid] - try: - return utils.execute(args, root_helper=self.root_helper).strip() - except Exception, e: - LOG.error(_LE( - "Unable to execute %(cmd)s. Exception: %(exception)s"), - {'cmd': args, 'exception': e}) - - # returns a VIF object for each VIF port - def get_vif_ports(self): - edge_ports = [] - port_names = self.get_port_name_list() - for name in port_names: - external_ids = self.db_get_map("Interface", name, "external_ids") - ofport = self.db_get_val("Interface", name, "ofport") - if "iface-id" in external_ids and "attached-mac" in external_ids: - p = VifPort(name, ofport, external_ids["iface-id"], - external_ids["attached-mac"], self) - edge_ports.append(p) - elif ("xs-vif-uuid" in external_ids and - "attached-mac" in external_ids): - # if this is a xenserver and iface-id is not automatically - # synced to OVS from XAPI, we grab it from XAPI directly - iface_id = self.get_xapi_iface_id(external_ids["xs-vif-uuid"]) - p = VifPort(name, ofport, iface_id, - external_ids["attached-mac"], self) - edge_ports.append(p) - - return edge_ports - - def get_vif_port_set(self): - edge_ports = set() - port_names = self.get_port_name_list() - for name in port_names: - external_ids = self.db_get_map("Interface", name, "external_ids") - if "iface-id" in external_ids and "attached-mac" in external_ids: - edge_ports.add(external_ids['iface-id']) - elif ("xs-vif-uuid" in external_ids and - "attached-mac" in external_ids): - # if this is a xenserver and iface-id is not automatically - # synced to OVS from XAPI, we grab it from XAPI directly - iface_id = self.get_xapi_iface_id(external_ids["xs-vif-uuid"]) - edge_ports.add(iface_id) - return edge_ports - - def get_vif_port_by_id(self, port_id): - args = ['--', '--columns=external_ids,name,ofport', - 'find', 'Interface', - 'external_ids:iface-id="%s"' % port_id] - result = self.run_vsctl(args) - if not result: - return - match = self.re_id.search(result) - try: - vif_mac = match.group('vif_mac') - vif_id = match.group('vif_id') - port_name = match.group('port_name') - ofport = int(match.group('ofport')) - return VifPort(port_name, ofport, vif_id, vif_mac, self) - except Exception, e: - LOG.warning(_LW("Unable to parse regex results. Exception: %s"), e) - return - - def delete_ports(self, all_ports=False): - if all_ports: - port_names = self.get_port_name_list() - else: - port_names = (port.port_name for port in self.get_vif_ports()) - - for port_name in port_names: - self.delete_port(port_name) - - -def get_bridge_for_iface(root_helper, iface): - args = ["ovs-vsctl", "--timeout=2", "iface-to-br", iface] - try: - return utils.execute(args, root_helper=root_helper).strip() - except Exception: - LOG.exception(_LE("Interface %s not found."), iface) - return None - - -def get_bridges(root_helper): - args = ["ovs-vsctl", "--timeout=2", "list-br"] - try: - return utils.execute(args, root_helper=root_helper).strip().split("\n") - except Exception: - LOG.exception(_LE("Unable to retrieve bridges.")) - return [] diff --git a/astara/common/linux/utils.py b/astara/common/linux/utils.py deleted file mode 100644 index 827529c7..00000000 --- a/astara/common/linux/utils.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 Locaweb. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Juliano Martinez, Locaweb. - -import fcntl -import os -import shlex -import signal -import socket -import struct -import tempfile - -from eventlet.green import subprocess - -from astara.common.i18n import _ - -from oslo_log import log as logging - - -LOG = logging.getLogger(__name__) - - -def _subprocess_setup(): - # Python installs a SIGPIPE handler by default. This is usually not what - # non-Python subprocesses expect. - signal.signal(signal.SIGPIPE, signal.SIG_DFL) - - -def subprocess_popen(args, stdin=None, stdout=None, stderr=None, shell=False, - env=None): - return subprocess.Popen(args, shell=shell, stdin=stdin, stdout=stdout, - stderr=stderr, preexec_fn=_subprocess_setup, - close_fds=True, env=env) - - -def execute(cmd, root_helper=None, process_input=None, addl_env=None, - check_exit_code=True, return_stderr=False): - if root_helper: - cmd = shlex.split(root_helper) + cmd - cmd = map(str, cmd) - - LOG.debug("Running command: %s", cmd) - env = os.environ.copy() - if addl_env: - env.update(addl_env) - obj = subprocess_popen(cmd, shell=False, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - env=env) - - _stdout, _stderr = (process_input and - obj.communicate(process_input) or - obj.communicate()) - obj.stdin.close() - m = _("\nCommand: %(cmd)s\nExit code: %(code)s\nStdout: %(stdout)r\n" - "Stderr: %(stderr)r") % {'cmd': cmd, 'code': obj.returncode, - 'stdout': _stdout, 'stderr': _stderr} - LOG.debug(m) - if obj.returncode and check_exit_code: - raise RuntimeError(m) - - return return_stderr and (_stdout, _stderr) or _stdout - - -def get_interface_mac(interface): - DEVICE_NAME_LEN = 15 - MAC_START = 18 - MAC_END = 24 - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - info = fcntl.ioctl(s.fileno(), 0x8927, - struct.pack('256s', interface[:DEVICE_NAME_LEN])) - return ''.join(['%02x:' % ord(char) - for char in info[MAC_START:MAC_END]])[:-1] - - -def replace_file(file_name, data): - """Replaces the contents of file_name with data in a safe manner. - - First write to a temp file and then rename. Since POSIX renames are - atomic, the file is unlikely to be corrupted by competing writes. - - We create the tempfile on the same device to ensure that it can be renamed. - """ - - base_dir = os.path.dirname(os.path.abspath(file_name)) - tmp_file = tempfile.NamedTemporaryFile('w+', dir=base_dir, delete=False) - tmp_file.write(data) - tmp_file.close() - os.chmod(tmp_file.name, 0644) - os.rename(tmp_file.name, file_name) diff --git a/astara/common/rpc.py b/astara/common/rpc.py deleted file mode 100644 index b2716821..00000000 --- a/astara/common/rpc.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright 2015 Akanda, Inc -# -# Author: Akanda, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from six.moves.urllib import parse as urlparse - -from oslo_log import log as logging -from oslo_config import cfg -from oslo_service import service -import oslo_messaging - -from astara.common.i18n import _LW - -LOG = logging.getLogger(__name__) - - -def _deprecated_amqp_url(): - """Allow for deprecating amqp_url setting over time. - This warns and attempts to translate an amqp_url to something - oslo_messaging can use to load a driver. - """ - url = cfg.CONF.amqp_url - if not url: - return - LOG.warning(_LW( - 'Use of amqp_url is deprecated. Please instead use options defined in ' - 'oslo_messaging_rabbit to declare your AMQP connection.')) - url = urlparse.urlsplit(url) - if url.scheme == 'amqp': - scheme = 'rabbit' - else: - scheme = url.scheme - port = str(url.port or 5672) - netloc = url.netloc - if netloc.endswith(':'): - netloc = netloc[:-1] - out = urlparse.urlunsplit(( - scheme, - '%s:%s' % (netloc, port), - url.path, - '', '' - )) - return out - - -def get_transport(): - url = _deprecated_amqp_url() - return oslo_messaging.get_transport(conf=cfg.CONF, url=url) - - -def get_server(target, endpoints): - return oslo_messaging.get_rpc_server( - transport=get_transport(), - target=target, - endpoints=endpoints, - ) - - -def get_target(topic, fanout=True, exchange=None, version=None, server=None): - return oslo_messaging.Target( - topic=topic, fanout=fanout, exchange=exchange, version=version, - server=server) - - -def get_rpc_client(topic, exchange=None, version='1.0'): - """Creates an RPC client to be used to request methods be - executed on remote RPC servers - """ - target = get_target(topic=topic, exchange=exchange, - version=version, fanout=False) - return oslo_messaging.rpc.client.RPCClient( - get_transport(), target - ) - - -def get_rpc_notifier(topic='notifications'): - return oslo_messaging.notify.Notifier( - transport=get_transport(), - # TODO(adam_g): driver should be specified in oslo.messaging's cfg - driver='messaging', - topic=topic, - ) - - -class MessagingService(service.Service): - """Used to create objects that can manage multiple RPC connections""" - def __init__(self): - super(MessagingService, self).__init__() - self._servers = set() - - def _add_server(self, server): - self._servers.add(server) - - def create_rpc_consumer(self, topic, endpoints): - """Creates an RPC server for this host that will execute RPCs requested - by clients. Adds the resulting consumer to the pool of messaging - servers. - - :param topic: Topic on which to listen for RPC requests - :param endpoints: List of endpoint objects that define methods that - the server will execute. - """ - target = get_target(topic=topic, fanout=True, server=cfg.CONF.host) - server = get_server(target, endpoints) - LOG.debug('Created RPC server on topic %s', topic) - self._add_server(server) - - def create_notification_listener(self, endpoints, exchange=None, - topic='notifications'): - """Creates an oslo.messaging notification listener associated with - provided endpoints. Adds the resulting listener to the pool of - messaging servers. - - :param endpoints: list of endpoint objects that define methods for - processing prioritized notifications - :param exchange: Optional control exchange to listen on. If not - specified, oslo_messaging defaults to 'openstack' - :param topic: Topic on which to listen for notification events - """ - transport = get_transport() - target = get_target(topic=topic, fanout=False, - exchange=exchange) - pool = 'astara.' + topic + '.' + cfg.CONF.host - server = oslo_messaging.get_notification_listener( - transport, [target], endpoints, pool=pool, executor='threading') - LOG.debug( - 'Created RPC notification listener on topic:%s/exchange:%s.', - topic, exchange) - self._add_server(server) - - def start(self): - LOG.info('Astara notification listener service starting...') - super(MessagingService, self).start() - [s.start() for s in self._servers] - LOG.info('Astara notification listener service started.') - - def stop(self): - LOG.info('Astara notification listener service stopping...') - super(MessagingService, self).stop() - [s.wait() for s in self._servers] - LOG.info('Astara notification listener service stopped.') diff --git a/astara/coordination.py b/astara/coordination.py deleted file mode 100644 index bc992be9..00000000 --- a/astara/coordination.py +++ /dev/null @@ -1,186 +0,0 @@ -# Copyright 2015 Akanda, Inc. -# -# Author: Akanda, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import signal -import time - -from oslo_config import cfg -from oslo_log import log - -import tooz -from tooz import coordination as tz_coordination - -from astara import event as ak_event -from astara.common.i18n import _, _LI - - -LOG = log.getLogger(__name__) -CONF = cfg.CONF - - -COORD_OPTS = [ - cfg.BoolOpt('enabled', default=False, - help=_('Whether to use an external coordination service to ' - 'a cluster of astara-orchestrator nodes. This may be ' - 'disabled for astara-orchestrator node environments.')), - cfg.StrOpt('url', - default='memcached://localhost:11211', - help=_('URL of supported coordination service')), - cfg.StrOpt('group_id', default='astara.orchestrator', - help=_('ID of coordination group to join.')), - cfg.IntOpt('heartbeat_interval', default=1, - help=_('Interval (in seconds) for cluster heartbeats')), -] -CONF.register_group(cfg.OptGroup(name='coordination')) -CONF.register_opts(COORD_OPTS, group='coordination') - - -class InvalidEventType(Exception): - pass - - -class CoordinatorDone(Exception): - pass - - -class RugCoordinator(object): - def __init__(self, notifications_queue): - self._queue = notifications_queue - self.host = CONF.host - self.url = CONF.coordination.url - self.group = CONF.coordination.group_id - self.heartbeat_interval = CONF.coordination.heartbeat_interval - self._coordinator = None - signal.signal(signal.SIGTERM, self.stop) - self.start() - - def start(self): - """Brings up coordination service online - - This connects the coordination service to its tooz backend. This - involves: - - - connecting to the cluster - - creating the coordination group (if required) - - joining the coordination group - - registering callbacks to respond to join/leave membership - events - - After the local node has joined the cluster and knows its remote - peers, it fires off an initial rebalance event to the workers - so they can seed their hash ring with the current membership. - """ - LOG.info(_LI('Starting RUG coordinator process for host %s on %s'), - self.host, self.url) - self._coordinator = tz_coordination.get_coordinator( - self.url, self.host) - self._coordinator.start() - - try: - self._coordinator.create_group(self.group).get() - except tooz.coordination.GroupAlreadyExist: - pass - - try: - self._coordinator.join_group(self.group).get() - self._coordinator.heartbeat() - except tooz.coordination.MemberAlreadyExist: - pass - - self._coordinator.watch_join_group(self.group, self.cluster_changed) - self._coordinator.watch_leave_group(self.group, self.cluster_changed) - self._coordinator.heartbeat() - LOG.debug("Sending initial event changed for members: %s" % - self.members) - self.cluster_changed(event=None, node_bootstrap=True) - - def run(self): - try: - while True: - self._coordinator.heartbeat() - self._coordinator.run_watchers() - time.sleep(self.heartbeat_interval) - except CoordinatorDone: - LOG.info(_LI('Stopping RUG coordinator.')) - return - - def stop(self, signal=None, frame=None): - """Stop the coordinator service. - - This ensures a clean shutdown of the coordinator service and attemps to - advertise its departure to the rest of the cluster. Note this is - registered as a signal handler for SIGINT so that its run when the main - shutdowns and subprocesses receive the signal. - """ - self._coordinator.unwatch_join_group(self.group, self.cluster_changed) - self._coordinator.unwatch_leave_group(self.group, self.cluster_changed) - - if self.is_leader: - try: - self._coordinator.stand_down_group_leader(self.group) - except tooz.NotImplemented: - pass - self._coordinator.leave_group(self.group).get() - raise CoordinatorDone() - - @property - def members(self): - """Returns the current cluster membership list""" - members = self._coordinator.get_members(self.group).get() - - # tooz ZK driver reports 'leader' as a member, which can screw with - # hashing. - try: - members.remove('leader') - except ValueError: - pass - - return members - - @property - def is_leader(self): - """Returns true if the local cluster member is the leader""" - return self._coordinator.get_leader(self.group).get() == self.host - - def cluster_changed(self, event, node_bootstrap=False): - """Event callback to be called by tooz on membership changes""" - LOG.debug('Broadcasting cluster changed event to trigger rebalance. ' - 'members=%s' % self.members) - - body = { - 'members': self.members - } - - # Flag this as a local bootstrap rebalance rather than one in reaction - # to a cluster event. - if node_bootstrap: - body['node_bootstrap'] = True - - r = ak_event.Resource( - tenant_id='*', - id='*', - driver='*', - ) - e = ak_event.Event( - resource=r, - crud=ak_event.REBALANCE, - body=body, - ) - self._queue.put(('*', e)) - - -def start(notification_queue): - return RugCoordinator(notification_queue).run() diff --git a/astara/daemon.py b/astara/daemon.py deleted file mode 100644 index 6dcd59f2..00000000 --- a/astara/daemon.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Utilities for managing ourselves as a daemon. -""" - -import signal - -from oslo_log import log as logging - -from astara.common.i18n import _LI - - -def ignore_signals(): - """Ignore signals that might interrupt processing - - Since the RUG doesn't want to be asynchronously interrupted, - various signals received needs to be ignored. The registered - signals including SIGHUP, SIGALRM, and default signals - SIGUSR1 and SIGUSR2 are captured and ignored through the SIG_IGN - action. - - :param: None - - :returns: None - - """ - for s in [signal.SIGHUP, signal.SIGUSR1, signal.SIGUSR2, signal.SIGALRM]: - logging.getLogger(__name__).info(_LI('ignoring signal %s'), s) - signal.signal(s, signal.SIG_IGN) diff --git a/astara/db/__init__.py b/astara/db/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/astara/db/api.py b/astara/db/api.py deleted file mode 100644 index ca34ce79..00000000 --- a/astara/db/api.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright 2015 Akanda, Inc. -# -# Author: Akanda, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import six - -from oslo_config import cfg -from oslo_db import api as db_api - - -_BACKEND_MAPPING = { - 'sqlalchemy': 'astara.db.sqlalchemy.api' -} - -IMPL = db_api.DBAPI.from_config( - cfg.CONF, backend_mapping=_BACKEND_MAPPING, lazy=True) - - -def get_instance(): - return IMPL - - -@six.add_metaclass(abc.ABCMeta) -class Connection(object): - @abc.abstractmethod - def __init__(self): - pass - - @abc.abstractmethod - def enable_resource_debug(self, resource_uuid, reason=None): - """Enter a resource into debug mode - - :param resource_uuid: str uuid of the resource to be placed into debug - mode - :param reason: str (optional) reason for entering resource into debug - mode - """ - - @abc.abstractmethod - def disable_resource_debug(self, resource_uuid): - """Remove a resource into debug mode - - :param resource_uuid: str uuid of the resource to be removed from debug - mode - """ - - @abc.abstractmethod - def resource_in_debug(self, resource_uuid): - """Determines if a resource is in debug mode - - :param resource_uuid: str the uuid of the resource to query - :returns: tuple (False, None) if resource is not in debug mode or - (True, "reason") if it is. - """ - - @abc.abstractmethod - def resources_in_debug(self): - """Queries all resources in debug mode - - :returns: a set of (resource_uuid, reason) tuples - """ - - @abc.abstractmethod - def enable_tenant_debug(self, tenant_uuid, reason=None): - """Enter a tenant into debug mode - - :param tenant_uuid: str uuid of the tenant to be placed into debug - mode - :param reason: str (optional) reason for entering tenant into debug - mode - """ - - @abc.abstractmethod - def disable_tenant_debug(self, tenant_uuid): - """Remove a tenant into debug mode - - :param tenant_uuid: str uuid of the tenant to be removed from debug - mode - """ - - @abc.abstractmethod - def tenant_in_debug(self, tenant_uuid): - """Determines if a tenant is in debug mode - - :param tenant_uuid: str the uuid of the tenant to query - :returns: tuple (False, None) if tenant is not in debug mode or - (True, "reason") if it is. - """ - - @abc.abstractmethod - def tenants_in_debug(self): - """Queries all tenants in debug mode - - :returns: a set of (tenant_uuid, reason) tuples - """ - - @abc.abstractmethod - def enable_global_debug(self, reason=None): - """Enter the entire system into debug mode - :param reason: str (optional) reason for entering cluster into global - debug mode. - """ - - @abc.abstractmethod - def disable_global_debug(self): - """Remove the entire system from global debug mode""" - - @abc.abstractmethod - def global_debug(self): - """Determine whether cluster is in global debug mode - - :returns: bool True if cluster is in debug mode - :returns: tuple (False, None) if cluster is not in global debug mode or - (True, "reason") if it is. - """ diff --git a/astara/db/migration.py b/astara/db/migration.py deleted file mode 100644 index 4cda4f74..00000000 --- a/astara/db/migration.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Database setup and migration commands.""" - -from oslo_config import cfg -from stevedore import driver - -_IMPL = None - - -def get_backend(): - global _IMPL - if not _IMPL: - cfg.CONF.import_opt('backend', 'oslo_db.options', group='database') - _IMPL = driver.DriverManager("astara.database.migration_backend", - cfg.CONF.database.backend).driver - return _IMPL - - -def upgrade(version=None): - """Migrate the database to `version` or the most recent version.""" - return get_backend().upgrade(version) - - -def downgrade(version=None): - return get_backend().downgrade(version) - - -def version(): - return get_backend().version() - - -def stamp(version): - return get_backend().stamp(version) - - -def revision(message, autogenerate): - return get_backend().revision(message, autogenerate) - - -def create_schema(): - return get_backend().create_schema() diff --git a/astara/db/sqlalchemy/__init__.py b/astara/db/sqlalchemy/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/astara/db/sqlalchemy/alembic.ini b/astara/db/sqlalchemy/alembic.ini deleted file mode 100644 index a7689803..00000000 --- a/astara/db/sqlalchemy/alembic.ini +++ /dev/null @@ -1,54 +0,0 @@ -# A generic, single database configuration. - -[alembic] -# path to migration scripts -script_location = %(here)s/alembic - -# template used to generate migration files -# file_template = %%(rev)s_%%(slug)s - -# max length of characters to apply to the -# "slug" field -#truncate_slug_length = 40 - -# set to 'true' to run the environment during -# the 'revision' command, regardless of autogenerate -# revision_environment = false - -#sqlalchemy.url = driver://user:pass@localhost/dbname - - -# Logging configuration -[loggers] -keys = root,sqlalchemy,alembic - -[handlers] -keys = console - -[formatters] -keys = generic - -[logger_root] -level = WARN -handlers = console -qualname = - -[logger_sqlalchemy] -level = WARN -handlers = -qualname = sqlalchemy.engine - -[logger_alembic] -level = INFO -handlers = -qualname = alembic - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatter_generic] -format = %(levelname)-5.5s [%(name)s] %(message)s -datefmt = %H:%M:%S diff --git a/astara/db/sqlalchemy/alembic/env.py b/astara/db/sqlalchemy/alembic/env.py deleted file mode 100644 index ca307366..00000000 --- a/astara/db/sqlalchemy/alembic/env.py +++ /dev/null @@ -1,61 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from logging import config as log_config - -from alembic import context - -try: - # NOTE(whaom): This is to register the DB2 alembic code which - # is an optional runtime dependency. - from ibm_db_alembic.ibm_db import IbmDbImpl # noqa -except ImportError: - pass - -from astara.db.sqlalchemy import api as sqla_api -from astara.db.sqlalchemy import models - -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. -config = context.config - -# Interpret the config file for Python logging. -# This line sets up loggers basically. -log_config.fileConfig(config.config_file_name) - -# add your model's MetaData object here -# for 'autogenerate' support -# from myapp import mymodel -target_metadata = models.Base.metadata - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - - -def run_migrations_online(): - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - engine = sqla_api.get_engine() - with engine.connect() as connection: - context.configure(connection=connection, - target_metadata=target_metadata) - with context.begin_transaction(): - context.run_migrations() - - -run_migrations_online() diff --git a/astara/db/sqlalchemy/alembic/script.py.mako b/astara/db/sqlalchemy/alembic/script.py.mako deleted file mode 100644 index 95702017..00000000 --- a/astara/db/sqlalchemy/alembic/script.py.mako +++ /dev/null @@ -1,22 +0,0 @@ -"""${message} - -Revision ID: ${up_revision} -Revises: ${down_revision} -Create Date: ${create_date} - -""" - -# revision identifiers, used by Alembic. -revision = ${repr(up_revision)} -down_revision = ${repr(down_revision)} - -from alembic import op -import sqlalchemy as sa -${imports if imports else ""} - -def upgrade(): - ${upgrades if upgrades else "pass"} - - -def downgrade(): - ${downgrades if downgrades else "pass"} diff --git a/astara/db/sqlalchemy/alembic/versions/4f695b725637_initial_migration.py b/astara/db/sqlalchemy/alembic/versions/4f695b725637_initial_migration.py deleted file mode 100644 index c5945016..00000000 --- a/astara/db/sqlalchemy/alembic/versions/4f695b725637_initial_migration.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2015 Akanda, Inc. -# -# Author: Akanda, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""initial_migration - -Revision ID: 4f695b725637 -Revises: None -Create Date: 2015-07-02 12:29:50.243891 - -""" - -import sqlalchemy as sa - -from alembic import op - -# revision identifiers, used by Alembic. -revision = '4f695b725637' -down_revision = None - - -def upgrade(): - op.create_table( - 'resource_debug', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('uuid', sa.String(length=36), nullable=False), - sa.Column('reason', sa.String(length=255), nullable=True), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('uuid', name='uniq_debug_resource0uuid'), - ) - op.create_table( - 'tenant_debug', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('uuid', sa.String(length=36), nullable=False), - sa.Column('reason', sa.String(length=255), nullable=True), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('uuid', name='uniq_debug_tenant0uuid'), - ) - op.create_table( - 'global_debug', - sa.Column('created_at', sa.DateTime(), nullable=True), - sa.Column('updated_at', sa.DateTime(), nullable=True), - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('status', sa.Integer(), nullable=False), - sa.Column('reason', sa.String(length=255), nullable=True), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('status', name='uniq_global_debug0status'), - ) - - -def downgrade(): - raise NotImplementedError(('Downgrade from initial migration is' - ' unsupported.')) diff --git a/astara/db/sqlalchemy/api.py b/astara/db/sqlalchemy/api.py deleted file mode 100644 index 476f7c83..00000000 --- a/astara/db/sqlalchemy/api.py +++ /dev/null @@ -1,166 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# Copyright 2015 Akanda, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""SQLAlchemy storage backend.""" - -from oslo_config import cfg -from oslo_db import exception as db_exc -from oslo_db.sqlalchemy import session as db_session - -from astara.db import api -from astara.db.sqlalchemy import models - -CONF = cfg.CONF - - -_FACADE = None - - -def _create_facade_lazily(): - global _FACADE - if _FACADE is None: - _FACADE = db_session.EngineFacade.from_config(CONF) - return _FACADE - - -def get_engine(): - facade = _create_facade_lazily() - return facade.get_engine() - - -def get_session(**kwargs): - facade = _create_facade_lazily() - return facade.get_session(**kwargs) - - -def get_backend(): - """The backend is this module itself.""" - return Connection() - - -def model_query(model, *args, **kwargs): - """Query helper for simpler session usage. - - :param session: if present, the session to use - """ - - session = kwargs.get('session') or get_session() - query = session.query(model, *args) - return query - - -class Connection(api.Connection): - """SqlAlchemy connection.""" - - def __init__(self): - pass - - def _enable_debug(self, model, uuid, reason=None): - model.update({ - 'uuid': uuid, - 'reason': reason, - }) - try: - model.save() - except db_exc.DBDuplicateEntry: - pass - - def _disable_debug(self, model=None, uuid=None): - query = model_query(model) - query.filter_by(uuid=uuid).delete() - - def _check_debug(self, model, uuid): - query = model_query(model) - res = query.filter_by(uuid=uuid).all() - if not res: - return (False, None) - return (True, res[0].reason) - - def _list_debug(self, model): - res = model_query(model).all() - return set((r.uuid, r.reason) for r in res) - - def enable_resource_debug(self, resource_uuid, reason=None): - self._enable_debug( - model=models.ResourceDebug(), - uuid=resource_uuid, - reason=reason, - ) - - def disable_resource_debug(self, resource_uuid): - self._disable_debug( - model=models.ResourceDebug, - uuid=resource_uuid, - ) - - def resource_in_debug(self, resource_uuid): - return self._check_debug(models.ResourceDebug, resource_uuid) - - def resources_in_debug(self): - return self._list_debug(models.ResourceDebug) - - def enable_tenant_debug(self, tenant_uuid, reason=None): - self._enable_debug( - model=models.TenantDebug(), - uuid=tenant_uuid, - reason=reason, - ) - - def disable_tenant_debug(self, tenant_uuid): - self._disable_debug( - model=models.TenantDebug, - uuid=tenant_uuid, - ) - - def tenant_in_debug(self, tenant_uuid): - return self._check_debug(models.TenantDebug, tenant_uuid) - - def tenants_in_debug(self): - return self._list_debug(models.TenantDebug) - - def _set_global_debug(self, status, reason=None): - query = model_query(models.GlobalDebug) - res = query.first() - if not res: - gdb = models.GlobalDebug() - gdb.update({ - 'status': status, - 'reason': reason, - }) - gdb.save() - - def enable_global_debug(self, reason=None): - gdb = models.GlobalDebug() - gdb.update({ - 'status': 1, - 'reason': reason, - }) - try: - gdb.save() - except db_exc.DBDuplicateEntry: - pass - - def disable_global_debug(self): - query = model_query(models.GlobalDebug) - query.filter_by(status=1).delete() - - def global_debug(self): - query = model_query(models.GlobalDebug) - res = query.filter_by(status=1).all() - if not res: - return (False, None) - return (True, res[0].reason) diff --git a/astara/db/sqlalchemy/dbsync.py b/astara/db/sqlalchemy/dbsync.py deleted file mode 100644 index 0f270836..00000000 --- a/astara/db/sqlalchemy/dbsync.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2015 Akanda, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import six - -from alembic import command as alembic_command -from alembic import config as alembic_config -from alembic import util as alembic_util -from oslo_config import cfg - -from astara.common.i18n import _ -import pkg_resources - - -CONF = cfg.CONF - - -def do_alembic_command(config, cmd, *args, **kwargs): - try: - getattr(alembic_command, cmd)(config, *args, **kwargs) - except alembic_util.CommandError as e: - alembic_util.err(six.text_type(e)) - - -def add_alembic_subparser(sub, cmd): - return sub.add_parser(cmd, help=getattr(alembic_command, cmd).__doc__) - - -def do_upgrade(config, cmd): - revision = CONF.command.revision or 'head' - do_alembic_command(config, cmd, revision, sql=CONF.command.sql) - - -def do_stamp(config, cmd): - do_alembic_command(config, cmd, - CONF.command.revision, - sql=CONF.command.sql) - - -def add_command_parsers(subparsers): - for name in ['current', 'history', 'branches']: - parser = add_alembic_subparser(subparsers, name) - parser.set_defaults(func=do_alembic_command) - - parser = add_alembic_subparser(subparsers, 'upgrade') - parser.add_argument('--delta', type=int) - parser.add_argument('--sql', action='store_true') - parser.add_argument('revision', nargs='?') - parser.add_argument('--mysql-engine', - default='', - help='Change MySQL storage engine of current ' - 'existing tables') - parser.set_defaults(func=do_upgrade) - - parser = add_alembic_subparser(subparsers, 'stamp') - parser.add_argument('--sql', action='store_true') - parser.add_argument('revision') - parser.set_defaults(func=do_stamp) - - -command_opt = cfg.SubCommandOpt('command', - title='Command', - help=_('Available commands'), - handler=add_command_parsers) - -CONF.register_cli_opt(command_opt) - - -def get_alembic_config(): - config = alembic_config.Config(os.path.join(os.path.dirname(__file__), - 'alembic.ini')) - return config - - -def main(): - dist = pkg_resources.get_distribution('astara') - CONF(project='astara-orchestrator', version=dist.version) - config = get_alembic_config() - config.astara_config = CONF - CONF.command.func(config, CONF.command.name) diff --git a/astara/db/sqlalchemy/migration.py b/astara/db/sqlalchemy/migration.py deleted file mode 100644 index 4ab29961..00000000 --- a/astara/db/sqlalchemy/migration.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -import alembic -from alembic import config as alembic_config -import alembic.migration as alembic_migration -from oslo_db import exception as db_exc - -from astara.db.sqlalchemy import api as sqla_api -from astara.db.sqlalchemy import models - - -def _alembic_config(): - path = os.path.join(os.path.dirname(__file__), 'alembic.ini') - config = alembic_config.Config(path) - return config - - -def version(config=None, engine=None): - """Current database version. - - :returns: Database version - :rtype: string - """ - if engine is None: - engine = sqla_api.get_engine() - with engine.connect() as conn: - context = alembic_migration.MigrationContext.configure(conn) - return context.get_current_revision() - - -def upgrade(revision, config=None): - """Used for upgrading database. - - :param version: Desired database version - :type version: string - """ - revision = revision or 'head' - config = config or _alembic_config() - - alembic.command.upgrade(config, revision or 'head') - - -def create_schema(config=None, engine=None): - """Create database schema from models description. - - Can be used for initial installation instead of upgrade('head'). - """ - if engine is None: - engine = sqla_api.get_engine() - - # NOTE(viktors): If we will use metadata.create_all() for non empty db - # schema, it will only add the new tables, but leave - # existing as is. So we should avoid of this situation. - if version(engine=engine) is not None: - raise db_exc.DbMigrationError("DB schema is already under version" - " control. Use upgrade() instead") - - models.Base.metadata.create_all(engine) - stamp('head', config=config) - - -def downgrade(revision, config=None): - """Used for downgrading database. - - :param version: Desired database version - :type version: string - """ - revision = revision or 'base' - config = config or _alembic_config() - return alembic.command.downgrade(config, revision) - - -def stamp(revision, config=None): - """Stamps database with provided revision. - - Don't run any migrations. - - :param revision: Should match one from repository or head - to stamp - database with most recent revision - :type revision: string - """ - config = config or _alembic_config() - return alembic.command.stamp(config, revision=revision) - - -def revision(message=None, autogenerate=False, config=None): - """Creates template for migration. - - :param message: Text that will be used for migration title - :type message: string - :param autogenerate: If True - generates diff based on current database - state - :type autogenerate: bool - """ - config = config or _alembic_config() - return alembic.command.revision(config, message=message, - autogenerate=autogenerate) diff --git a/astara/db/sqlalchemy/models.py b/astara/db/sqlalchemy/models.py deleted file mode 100644 index 07ee2205..00000000 --- a/astara/db/sqlalchemy/models.py +++ /dev/null @@ -1,113 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# Copyright 2015 Akanda, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -SQLAlchemy models for baremetal data. -""" - -from astara.common.i18n import _ - -from oslo_config import cfg -from oslo_db import options as db_options -from oslo_db.sqlalchemy import models -import six.moves.urllib.parse as urlparse -from sqlalchemy import Column -from sqlalchemy import Integer -from sqlalchemy import schema, String -from sqlalchemy.ext.declarative import declarative_base - - -sql_opts = [ - cfg.StrOpt('mysql_engine', - default='InnoDB', - help=_('MySQL engine to use.')) -] - -_DEFAULT_SQL_CONNECTION = 'sqlite:///astara.db' - - -cfg.CONF.register_opts(sql_opts, 'database') -db_options.set_defaults(cfg.CONF, _DEFAULT_SQL_CONNECTION, 'ironic.sqlite') - - -def table_args(): - engine_name = urlparse.urlparse(cfg.CONF.database.connection).scheme - if engine_name == 'mysql': - return {'mysql_engine': cfg.CONF.database.mysql_engine, - 'mysql_charset': "utf8"} - return None - - -class AstaraBase(models.TimestampMixin, - models.ModelBase): - - metadata = None - - def as_dict(self): - d = {} - for c in self.__table__.columns: - d[c.name] = self[c.name] - return d - - def save(self, session=None): - import astara.db.sqlalchemy.api as db_api - - if session is None: - session = db_api.get_session() - - super(AstaraBase, self).save(session) - -Base = declarative_base(cls=AstaraBase) - - -class ResourceDebug(Base): - """Represents a resource in debug mode.""" - - __tablename__ = 'resource_debug' - __table_args__ = ( - schema.UniqueConstraint('uuid', name='uniq_debug_resource0uuid'), - table_args() - ) - id = Column(Integer, primary_key=True) - uuid = Column(String(36)) - reason = Column(String(255), nullable=True) - - -class TenantDebug(Base): - """Represents a tenant in debug mode.""" - - __tablename__ = 'tenant_debug' - __table_args__ = ( - schema.UniqueConstraint('uuid', name='uniq_debug_tenant0uuid'), - table_args() - ) - id = Column(Integer, primary_key=True) - uuid = Column(String(36)) - reason = Column(String(255), nullable=True) - - -class GlobalDebug(Base): - """Stores a single row that serves as a status flag for global debug""" - - __tablename__ = 'global_debug' - __table_args__ = ( - schema.UniqueConstraint('status', name='uniq_global_debug0status'), - table_args() - ) - id = Column(Integer, primary_key=True) - status = Column(Integer) - reason = Column(String(255), nullable=True) diff --git a/astara/debug.py b/astara/debug.py deleted file mode 100644 index 3310fee4..00000000 --- a/astara/debug.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from __future__ import print_function - -import os -import sys - -from oslo_config import cfg -from oslo_log import log as logging - -from astara import drivers -from astara import state -from astara import worker - -from astara.common import config - - -DEBUG_OPTS = [ - cfg.StrOpt( - 'router-id', required=True, - help='The UUID for the router to debug') -] - - -class Fake(object): - def __init__(self, crud): - self.crud = crud - - -def delete_callback(): - print('DELETE') - - -def bandwidth_callback(*args, **kwargs): - print('BANDWIDTH:', args, kwargs) - - -def debug_one_router(args=sys.argv[1:]): - # Add our extra option for specifying the router-id to debug - cfg.CONF.register_cli_opts(DEBUG_OPTS) - cfg.CONF.set_override('boot_timeout', 60000) - cfg.CONF.import_opt('host', 'astara.main') - config.parse_config(args) - logging.setup(cfg.CONF, __name__) - log = logging.getLogger(__name__) - log.debug('Proxy settings: %r', os.getenv('no_proxy')) - - context = worker.WorkerContext() - driver = drivers.get('router')(context, cfg.CONF.router_id) - a = state.Automaton( - resource=driver, - tenant_id=driver._router.tenant_id, - delete_callback=delete_callback, - bandwidth_callback=bandwidth_callback, - worker_context=context, - queue_warning_threshold=100, - reboot_error_threshold=1, - ) - - a.send_message(Fake('update')) - - import pdb - pdb.set_trace() - - a.update(context) diff --git a/astara/drivers/__init__.py b/astara/drivers/__init__.py deleted file mode 100644 index 5edfe8ef..00000000 --- a/astara/drivers/__init__.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright (c) 2015 Akanda, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log as logging - -from astara.drivers.router import Router -from astara.drivers.loadbalancer import LoadBalancer - -DRIVER_OPTS = [ - cfg.ListOpt('enabled_drivers', - default=['router', ], - help='list of drivers the rug process will load'), -] -cfg.CONF.register_opts(DRIVER_OPTS) - -ASTARA_APP_OPTS = [ - cfg.IntOpt('max_sleep', default=15, - help='The max sleep seconds between each attempt by' - ' neutron client for fetching resource.'), -] -cfg.CONF.register_group(cfg.OptGroup(name='astara_appliance')) -cfg.CONF.register_opts(ASTARA_APP_OPTS, 'astara_appliance') - -LOG = logging.getLogger(__name__) - -AVAILABLE_DRIVERS = { - Router.RESOURCE_NAME: Router, - LoadBalancer.RESOURCE_NAME: LoadBalancer, -} - - -class InvalidDriverException(Exception): - """Triggered when driver is not available in AVAILABLE_DRIVERS""" - pass - - -def get(requested_driver): - """Returns driver class based on the requested_driver param - will raise InvalidDriverException if not listed in the config option - cfg.CONF.available_drivers. - - :param requested_driver: name of desired driver - :return: returns driver object - """ - if requested_driver in AVAILABLE_DRIVERS: - return AVAILABLE_DRIVERS[requested_driver] - - raise InvalidDriverException( - 'Failed loading driver: %s' % requested_driver - ) - - -def load_from_byonf(worker_context, byonf_result, resource_id): - """"Returns a loaded driver based on astara-neutron BYONF response - - :param worker_context: Worker context with clients - :param byonf_result: dict response from neutron API describing - user-provided NF info (specifically image_uuid and - driver) - :param resource_id: The UUID of the logical resource derived from the - notification message - - Responsible for also setting correct driver attributes based on BYONF - specs. - """ - driver_obj = get(byonf_result['driver'])(worker_context, resource_id) - if byonf_result.get('image_uuid'): - driver_obj.image_uuid = byonf_result['image_uuid'] - return driver_obj - - -def enabled_drivers(): - for driver in cfg.CONF.enabled_drivers: - try: - d = get(driver) - yield d - except InvalidDriverException as e: - LOG.exception(e) - pass diff --git a/astara/drivers/base.py b/astara/drivers/base.py deleted file mode 100644 index f412c173..00000000 --- a/astara/drivers/base.py +++ /dev/null @@ -1,194 +0,0 @@ -# Copyright (c) 2015 Akanda, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_log import log as logging - - -class BaseDriver(object): - - RESOURCE_NAME = 'BaseDriver' - - def __init__(self, worker_context, id, log=None): - """This is the abstract for rug drivers. - - :param id: logical resource id - :param log: override default log - """ - self.id = id - self.external_port = None - self.details = [] - self.flavor = None - self.image_uuid = None - self.name = 'ak-%s-%s' % (self.RESOURCE_NAME, self.id) - - if log: - self.log = log - else: - self.log = logging.getLogger(self.name) - - self.post_init(worker_context) - - def post_init(self, worker_context): - """post init hook - - :param worker_context: - :returns: None - """ - pass - - def pre_boot(self, worker_context): - """pre boot hook - - :param worker_context: - :returns: None - """ - pass - - def post_boot(self, worker_context): - """post boot hook - - :param worker_context: - :returns: None - """ - pass - - def update_state(self, worker_context, silent=False): - """returns state of logical resource. - - :param worker_context: - :param silent: - :returns: None - """ - pass - - def build_config(self, worker_context, mgt_port, iface_map): - """gets config of logical resource attached to worker_context. - - :param worker_context: - :returns: None - """ - pass - - def update_config(self, management_address, config): - """Updates appliance configuration - - This is responsible for pushing configuration to the managed - appliance - """ - pass - - def synchronize_state(self, worker_context, state): - """sometimes a driver will need to update a service behind it with a - new state. - - :param state: a valid state - """ - pass - - def make_ports(self, worker_context): - """Make ports call back for the nova client. - - This is expected to create the management port for the instance - and any required instance ports. - - :param worker_context: - - :returns: A tuple (management_port, [instance_ports]) - """ - def _make_ports(): - pass - - return _make_ports - - def delete_ports(self, worker_context): - """Delete all created ports. - - :param worker_context: - :returns: None - """ - - @staticmethod - def pre_populate_hook(): - """called in populate.py during driver loading loop. - """ - pass - - def pre_plug(self, worker_context): - """pre-plug hook - - :param worker_context: - :returns: None - """ - - @staticmethod - def get_resource_id_for_tenant(worker_context, tenant_id, message): - """Find the id of a resource for a given tenant id and message. - - For some resources simply searching by tenant_id is enough, for - others some context from the message payload may be necessary. - - :param worker_context: A worker context with instantiated clients - :param tenant_id: The tenant uuid to search for - :param message: The message associated with the request - - :returns: uuid of the resource owned by the tenant - """ - pass - - @staticmethod - def process_notification(tenant_id, event_type, payload): - """Process an incoming notification event - - This gets called from the notifications layer to determine whether - a driver should process an incoming notification event. It is - responsible for translating an incoming notification to an Event - object appropriate for that driver. - - :param tenant_id: str The UUID tenant_id for the incoming event - :param event_type: str event type, for example router.create.end - :param payload: The payload body of the incoming event - - :returns: A populated Event object if it should process, or None if not - """ - pass - - @property - def ports(self): - """Lists ports associated with the resource. - - :returns: A list of astara.api.neutron.Port objects or [] - """ - - def get_interfaces(self, management_address): - """Lists interfaces attached to the resource. - - This lists the interfaces attached to the resource from the POV - of the resource itself. - - :returns: A list of interfaces - """ - pass - - def is_alive(self, management_address): - """Determines whether the managed resource is alive - - :returns: bool True if alive, False if not - """ - - def get_state(self, worker_context): - """Returns the state of the managed resource""" - - @property - def is_ha(self): - """Returns True if logical resource is set to be highly-available""" - return False diff --git a/astara/drivers/loadbalancer.py b/astara/drivers/loadbalancer.py deleted file mode 100644 index eb8c48ff..00000000 --- a/astara/drivers/loadbalancer.py +++ /dev/null @@ -1,352 +0,0 @@ -# Copyright (c) 2015 Akanda, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import time - -from oslo_config import cfg -from oslo_log import log as logging - -from neutronclient.common import exceptions as q_exceptions - -from astara.common.i18n import _ -from astara.api import astara_client -from astara.api.config import loadbalancer as config -from astara import event -from astara.api import neutron -from astara.drivers.base import BaseDriver -from astara.drivers import states -from astara.common.i18n import _LW - -LOG = logging.getLogger(__name__) - - -LOADBALANCER_OPTS = [ - cfg.StrOpt('image_uuid', - help='The image_uuid for loadbalancer instances.'), - cfg.StrOpt('instance_flavor', - help='The nova flavor id to use for loadbalancer instances'), - cfg.IntOpt('mgt_service_port', default=5000, - help='The port on which the loadbalancer API service listens ' - 'on loadbalancer appliances'), -] -cfg.CONF.register_group(cfg.OptGroup(name='loadbalancer')) -cfg.CONF.register_opts(LOADBALANCER_OPTS, 'loadbalancer') - - -STATUS_MAP = { - states.DOWN: neutron.PLUGIN_DOWN, - states.BOOTING: neutron.PLUGIN_PENDING_CREATE, - states.UP: neutron.PLUGIN_PENDING_CREATE, - states.CONFIGURED: neutron.PLUGIN_ACTIVE, - states.ERROR: neutron.PLUGIN_ERROR, - states.REPLUG: neutron.PLUGIN_PENDING_UPDATE, -} - - -class LoadBalancer(BaseDriver): - - RESOURCE_NAME = 'loadbalancer' - _last_synced_status = None - - def post_init(self, worker_context): - """Called at end of __init__ in BaseDriver. - - Populates the details object from neutron and sets image_uuid and - flavor from cfg. - - :param worker_context: - """ - self.image_uuid = cfg.CONF.loadbalancer.image_uuid - self.flavor = cfg.CONF.loadbalancer.instance_flavor - self.mgt_port = cfg.CONF.loadbalancer.mgt_service_port - - self._ensure_cache(worker_context) - - def _ensure_cache(self, worker_context): - try: - lb = worker_context.neutron.get_loadbalancer_detail(self.id) - self._loadbalancer = lb - except neutron.LoadBalancerGone: - self._loadbalancer = None - - @property - def ports(self): - """Lists ports associated with the resource. - - :returns: A list of astara.api.neutron.Port objects or [] - """ - if self._loadbalancer: - return [p for p in self._loadbalancer.ports] - else: - return [] - - def pre_boot(self, worker_context): - """pre boot hook - Calls self.pre_plug(). - - :param worker_context: - :returns: None - """ - pass - - def post_boot(self, worker_context): - """post boot hook - - :param worker_context: - :returns: None - """ - pass - - def build_config(self, worker_context, mgt_port, iface_map): - """Builds / rebuilds config - - :param worker_context: - :param mgt_port: - :param iface_map: - :returns: configuration object - """ - - self._ensure_cache(worker_context) - return config.build_config( - worker_context.neutron, - self._loadbalancer, - mgt_port, - iface_map) - - def update_config(self, management_address, config): - """Updates appliance configuration - - This is responsible for pushing configuration to the managed - appliance - """ - self.log.info(_('Updating config for %s'), self.name) - astara_client.update_config(management_address, self.mgt_port, config) - - def pre_plug(self, worker_context): - """pre-plug hook - Sets up the external port. - - :param worker_context: - :returs: None - """ - - def make_ports(self, worker_context): - """make ports call back for the nova client. - - :param worker_context: - - :returns: A tuple (managment_port, [instance_ports]) - """ - def _make_ports(): - self._ensure_cache(worker_context) - mgt_port = worker_context.neutron.create_management_port( - self.id - ) - - # allocate a port on the same net as the LB VIP - lb_port = worker_context.neutron.create_vrrp_port( - object_id=self.id, - network_id=self._loadbalancer.vip_port.network_id, - label='LB', - ) - - return mgt_port, [lb_port] - - return _make_ports - - def delete_ports(self, worker_context): - """Delete all ports. - - :param worker_context: - :returns: None - - """ - worker_context.neutron.delete_vrrp_port(self.id, label='LB') - worker_context.neutron.delete_vrrp_port(self.id, label='MGT') - - @staticmethod - def pre_populate_hook(): - """Fetch the existing LBs from neutron then and returns list back - to populate to be distributed to workers. - - Wait for neutron to return the list of the existing LBs. - Pause up to max_sleep seconds between each attempt and ignore - neutron client exceptions. - - """ - nap_time = 1 - - neutron_client = neutron.Neutron(cfg.CONF) - - while True: - try: - resources = [] - for lb in neutron_client.get_loadbalancers(): - resources.append( - event.Resource(driver=LoadBalancer.RESOURCE_NAME, - id=lb.id, - tenant_id=lb.tenant_id)) - - return resources - except (q_exceptions.Unauthorized, q_exceptions.Forbidden) as err: - LOG.warning(_LW('PrePopulateWorkers thread failed: %s'), err) - return - except Exception as err: - LOG.warning( - _LW('Could not fetch loadbalancers from neutron: %s'), err) - LOG.warning(_LW( - 'sleeping %s seconds before retrying'), nap_time) - time.sleep(nap_time) - nap_time = min(nap_time * 2, - cfg.CONF.astara_appliance.max_sleep) - - @staticmethod - def get_resource_id_for_tenant(worker_context, tenant_id, message): - """Find the id of the loadbalancer owned by tenant - - Some events (ie, member.create.end) give us no context about which - LB the event is associated and only show us the tenant_id and member - id, so we for those we need to some resolution here. - - :param tenant_id: The tenant uuid to search for - :param message: Message associated /w the request - - :returns: uuid of the loadbalancer owned by the tenant - """ - - lb_id = None - - # loadbalancer.create.end contains the id in the payload - if message.body.get('loadbalancer'): - lb_id = message.body['loadbalancer'].get('id') - # listener.create.end references the loadbalancer directly - elif message.body.get('listener'): - lb_id = message.body['listener'].get('loadbalancer_id') - # pool.create.end references by listener - elif message.body.get('pool'): - listener_id = message.body['pool'].get('listener_id') - if listener_id: - lb = worker_context.neutron.get_loadbalancer_by_listener( - listener_id, tenant_id) - if lb: - lb_id = lb.id - # member.crate.end only gives us the member id itself. - elif message.body.get('member') or message.body.get('member_id'): - member_id = (message.body.get('member', {}).get('id') or - message.body.get('member_id')) - if member_id: - lb = worker_context.neutron.get_loadbalancer_by_member( - member_id=member_id, tenant_id=tenant_id) - if lb: - lb_id = lb.id - return lb_id - - @staticmethod - def process_notification(tenant_id, event_type, payload): - """Process an incoming notification event - - This gets called from the notifications layer to determine whether - this driver should process an incoming notification event. It is - responsible for translating an incoming notificatino to an Event - object appropriate for this driver. - - :param tenant_id: str The UUID tenant_id for the incoming event - :param event_type: str event type, for example loadbalancer.create.end - :param payload: The payload body of the incoming event - - :returns: A populated Event objet if it should process, or None if not - """ - if event_type.startswith('loadbalancerstatus.update'): - # these are generated when we sync state - return - lb_id = ( - payload.get('loadbalancer', {}).get('id') or - payload.get('listener', {}).get('loadbalancer_id') or - payload.get('loadbalancer_id') - ) - - update_notifications = [ - 'listener.create.start', - 'pool.create.start', - 'member.create.end', - 'member.delete.end', - ] - - # some events do not contain a lb id. - if not lb_id and event_type not in update_notifications: - return - - if event_type == 'loadbalancer.create.end': - crud = event.CREATE - elif event_type == 'loadbalancer.delete.end': - crud = event.DELETE - elif event_type in update_notifications: - crud = event.UPDATE - else: - crud = None - - if not crud: - LOG.info('Could not determine CRUD for event: %s ', event_type) - return - - resource = event.Resource(driver=LoadBalancer.RESOURCE_NAME, - id=lb_id, - tenant_id=tenant_id) - e = event.Event( - resource=resource, - crud=crud, - body=payload, - ) - return e - - def get_state(self, worker_context): - self._ensure_cache(worker_context) - if not self._loadbalancer: - return states.GONE - else: - # NOTE(adam_g): We probably want to map this status back to - # an internal astara status - return self._loadbalancer.status - - def synchronize_state(self, worker_context, state): - self._ensure_cache(worker_context) - if not self._loadbalancer: - LOG.debug('Not synchronizing state with missing loadbalancer %s', - self.id) - return - - new_status = STATUS_MAP.get(state) - old_status = self._last_synced_status - LOG.debug('Synchronizing loadbalancer %s state %s->%s', - self.id, old_status, new_status) - worker_context.neutron.update_loadbalancer_status( - self.id, new_status) - self._last_synced_status = new_status - - def get_interfaces(self, management_address): - """Lists interfaces attached to the resource. - - This lists the interfaces attached to the resource from the POV - of the resource iteslf. - - :returns: A list of interfaces - """ - return astara_client.get_interfaces(management_address, - self.mgt_port) - - def is_alive(self, management_address): - """Determines whether the managed resource is alive - - :returns: bool True if alive, False if not - """ - return astara_client.is_alive(management_address, self.mgt_port) diff --git a/astara/drivers/router.py b/astara/drivers/router.py deleted file mode 100644 index f1151a7e..00000000 --- a/astara/drivers/router.py +++ /dev/null @@ -1,368 +0,0 @@ -# Copyright (c) 2015 Akanda, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import time - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import timeutils - -from neutronclient.common import exceptions as q_exceptions - -from astara.common.i18n import _ -from astara.api import astara_client -from astara.api.config import router as configuration -from astara import event -from astara.api import neutron -from astara.drivers.base import BaseDriver -from astara.drivers import states -from astara.common.i18n import _LW - -LOG = logging.getLogger(__name__) - - -ROUTER_OPTS = [ - cfg.StrOpt('image_uuid', - help='The image_uuid for router instances.', - deprecated_opts=[ - cfg.DeprecatedOpt('router_image_uuid', - group='DEFAULT')]), - cfg.StrOpt('instance_flavor', - help='The nova id flavor to use for router instances', - deprecated_opts=[ - cfg.DeprecatedOpt('router_instance_flavor', - group='DEFAULT')]), - cfg.IntOpt('mgt_service_port', default=5000, - help='The port on which the router API service listens on ' - 'router appliances', - deprecated_opts=[ - cfg.DeprecatedOpt('akanda_mgt_service_port', - group='DEFAULT')]), - cfg.BoolOpt('ipsec_vpn', default=False, - help='Enable ipsec vpn support'), -] -cfg.CONF.register_group(cfg.OptGroup(name='router')) -cfg.CONF.register_opts(ROUTER_OPTS, 'router') - - -STATUS_MAP = { - states.DOWN: neutron.STATUS_DOWN, - states.BOOTING: neutron.STATUS_BUILD, - states.UP: neutron.STATUS_BUILD, - states.CONFIGURED: neutron.STATUS_ACTIVE, - states.ERROR: neutron.STATUS_ERROR, - states.DEGRADED: neutron.STATUS_BUILD, -} - - -_ROUTER_INTERFACE_NOTIFICATIONS = set([ - 'router.interface.create', - 'router.interface.delete', -]) - -_ROUTER_INTERESTING_NOTIFICATIONS = set([ - 'router.change.end', - 'subnet.create.end', - 'subnet.change.end', - 'subnet.delete.end', - 'port.create.end', - 'port.change.end', - 'port.delete.end', - 'floatingip.create.end', - 'floatingip.update.end', - # NOTE(adam_g): Not certain this floatingip.change.* is ever broadcast? - 'floatingip.change.end', - 'floatingip.delete.end' -]) - -_VPN_NOTIFICATIONS = set([ - 'vpnservice.change.end', - 'vpnservice.delete.end', - 'ipsec_site_connection.create.end', - 'ipsec_site_connection.change.end', - 'ipsec_site_connection.delete.end', - 'ikepolicy.change.end', - 'ipsecpolicy.change.end' -]) - - -DRIVER_NAME = 'router' - - -class Router(BaseDriver): - - RESOURCE_NAME = DRIVER_NAME - _last_synced_status = None - - def post_init(self, worker_context): - """Called at end of __init__ in BaseDriver. - - Populates the _router object from neutron and sets image_uuid and - flavor from cfg. - - :param worker_context: - """ - self.image_uuid = cfg.CONF.router.image_uuid - self.flavor = cfg.CONF.router.instance_flavor - self.mgt_port = cfg.CONF.router.mgt_service_port - - self._ensure_cache(worker_context) - - def _ensure_cache(self, worker_context): - try: - self._router = worker_context.neutron.get_router_detail(self.id) - except neutron.RouterGone: - self._router = None - - @property - def ports(self): - """Lists ports associated with the resource. - - :returns: A list of astara.api.neutron.Port objects or [] - """ - if self._router: - return [p for p in self._router.ports] - else: - return [] - - def pre_boot(self, worker_context): - """pre boot hook - Calls self.pre_plug(). - - :param worker_context: - :returns: None - """ - self.pre_plug(worker_context) - - def post_boot(self, worker_context): - """post boot hook - - :param worker_context: - :returns: None - """ - pass - - def build_config(self, worker_context, mgt_port, iface_map): - """Builds / rebuilds config - - :param worker_context: - :param mgt_port: - :param iface_map: - :returns: configuration object - """ - self._ensure_cache(worker_context) - return configuration.build_config( - worker_context, - self._router, - mgt_port, - iface_map - ) - - def update_config(self, management_address, config): - """Updates appliance configuration - - This is responsible for pushing configuration to the managed - appliance - """ - self.log.info(_('Updating config for %s'), self.name) - start_time = timeutils.utcnow() - - astara_client.update_config( - management_address, self.mgt_port, config) - delta = timeutils.delta_seconds(start_time, timeutils.utcnow()) - self.log.info(_('Config updated for %s after %s seconds'), - self.name, round(delta, 2)) - - def make_ports(self, worker_context): - """make ports call back for the nova client. - - :param worker_context: - - :returns: A tuple (managment_port, [instance_ports]) - """ - def _make_ports(): - self._ensure_cache(worker_context) - mgt_port = worker_context.neutron.create_management_port( - self.id - ) - - # FIXME(mark): ideally this should be ordered and de-duped - instance_ports = [ - worker_context.neutron.create_vrrp_port(self.id, n) - for n in (p.network_id for p in self._router.ports) - ] - - return mgt_port, instance_ports - - return _make_ports - - def delete_ports(self, worker_context): - """Delete all ports. - - :param worker_context: - :returns: None - - """ - worker_context.neutron.delete_vrrp_port(self.id) - worker_context.neutron.delete_vrrp_port(self.id, label='MGT') - - @staticmethod - def pre_populate_hook(): - """Fetch the existing routers from neutrom then and returns list back - to populate to be distributed to workers. - - Wait for neutron to return the list of the existing routers. - Pause up to max_sleep seconds between each attempt and ignore - neutron client exceptions. - - """ - nap_time = 1 - - neutron_client = neutron.Neutron(cfg.CONF) - - while True: - try: - neutron_routers = neutron_client.get_routers(detailed=False) - resources = [] - for router in neutron_routers: - resources.append( - event.Resource(driver=DRIVER_NAME, - id=router.id, - tenant_id=router.tenant_id) - ) - - return resources - except (q_exceptions.Unauthorized, q_exceptions.Forbidden) as err: - LOG.warning(_LW('PrePopulateWorkers thread failed: %s'), err) - return - except Exception as err: - LOG.warning( - _LW('Could not fetch routers from neutron: %s'), err) - LOG.warning(_LW( - 'sleeping %s seconds before retrying'), nap_time) - time.sleep(nap_time) - nap_time = min(nap_time * 2, - cfg.CONF.astara_appliance.max_sleep) - - @staticmethod - def get_resource_id_for_tenant(worker_context, tenant_id, message): - """Find the id of the router owned by tenant - - :param tenant_id: The tenant uuid to search for - :param message: message associated /w request (unused here) - - :returns: uuid of the router owned by the tenant - """ - - router = worker_context.neutron.get_router_for_tenant(tenant_id) - if not router: - LOG.debug('Router not found for tenant %s.', - tenant_id) - return None - return router.id - - @staticmethod - def process_notification(tenant_id, event_type, payload): - """Process an incoming notification event - - This gets called from the notifications layer to determine whether - this driver should process an incoming notification event. It is - responsible for translating an incoming notificatino to an Event - object appropriate for this driver. - - :param tenant_id: str The UUID tenant_id for the incoming event - :param event_type: str event type, for example router.create.end - :param payload: The payload body of the incoming event - - :returns: A populated Event objet if it should process, or None if not - """ - router_id = payload.get('router', {}).get('id') - crud = event.UPDATE - - if event_type.startswith('routerstatus.update'): - # We generate these events ourself, so ignore them. - return - - if event_type == 'router.create.end': - crud = event.CREATE - elif event_type == 'router.delete.end': - crud = event.DELETE - router_id = payload.get('router_id') - elif event_type in _ROUTER_INTERFACE_NOTIFICATIONS: - crud = event.UPDATE - router_id = payload.get('router.interface', {}).get('id') - elif event_type in _ROUTER_INTERESTING_NOTIFICATIONS: - crud = event.UPDATE - elif cfg.CONF.router.ipsec_vpn and event_type in _VPN_NOTIFICATIONS: - crud = event.UPDATE - else: - LOG.debug('Not processing event: %s' % event_type) - return - - resource = event.Resource(driver=DRIVER_NAME, - id=router_id, - tenant_id=tenant_id) - e = event.Event( - resource=resource, - crud=crud, - body=payload, - ) - return e - - def get_state(self, worker_context): - self._ensure_cache(worker_context) - if not self._router: - return states.GONE - else: - # NOTE(adam_g): We probably want to map this status back to - # an internal astara status - return self._router.status - - def synchronize_state(self, worker_context, state): - self._ensure_cache(worker_context) - if not self._router: - LOG.debug('Not synchronizing state with missing router %s', - self.id) - return - new_status = STATUS_MAP.get(state) - old_status = self._last_synced_status - if not old_status or old_status != new_status: - LOG.debug('Synchronizing router %s state %s->%s', - self.id, old_status, new_status) - worker_context.neutron.update_router_status(self.id, new_status) - self._last_synced_status = new_status - - def get_interfaces(self, management_address): - """Lists interfaces attached to the resource. - - This lists the interfaces attached to the resource from the POV - of the resource iteslf. - - :returns: A list of interfaces - """ - return astara_client.get_interfaces(management_address, - self.mgt_port) - - def is_alive(self, management_address): - """Determines whether the managed resource is alive - - :returns: bool True if alive, False if not - """ - return astara_client.is_alive(management_address, self.mgt_port) - - @property - def is_ha(self): - """Returns True if logical resource is set to be highly-available""" - if not self._router: - return False - return self._router.ha diff --git a/astara/drivers/states.py b/astara/drivers/states.py deleted file mode 100644 index c42b0f25..00000000 --- a/astara/drivers/states.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (c) 2015 Akanda, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Reference states. - -Each driver maps these to which ever neutron or other -services state. -""" -DOWN = 'down' -BOOTING = 'booting' -UP = 'up' -CONFIGURED = 'configured' -RESTART = 'restart' -REPLUG = 'replug' -GONE = 'gone' -ERROR = 'error' -DEGRADED = 'degraded' - -# base list of ready states, driver can use its own list. -READY_STATES = (UP, CONFIGURED, DEGRADED) diff --git a/astara/event.py b/astara/event.py deleted file mode 100644 index ddbbf4c9..00000000 --- a/astara/event.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# CRUD operations tracked in Event.crud -CREATE = 'create' -READ = 'read' -UPDATE = 'update' -DELETE = 'delete' -POLL = 'poll' -COMMAND = 'command' # an external command to be processed -REBUILD = 'rebuild' -REBALANCE = 'rebalance' -CLUSTER_REBUILD = 'cluster_rebuild' - - -class Event(object): - """Rug Event object - - Events are constructed from incoming messages accepted by the Rug. - They are responsible for holding the message payload (body), the - correpsonding CRUD operation and the logical resource that the - event affects. - """ - def __init__(self, resource, crud, body): - """ - :param resource: Resource instance holding context about the logical - resource that is affected by the Event. - :param crud: CRUD operation that is to be completed by the - correpsonding state machine when it is delivered. - :param body: The original message payload dict. - """ - self.resource = resource - self.crud = crud - self.body = body - - def __eq__(self, other): - if not type(self) == type(other): - return False - for k, v in vars(self).items(): - if k not in vars(other): - return False - if vars(other)[k] != v: - return False - return True - - def __ne__(self, other): - return not self.__eq__(other) - - def __repr__(self): - return '<%s (resource=%s, crud=%s, body=%s)>' % ( - self.__class__.__name__, - self.resource, - self.crud, - self.body) - - -class Resource(object): - """Rug Resource object - - A Resource object represents one instance of a logical resource - that is to be managed by the rug (ie, a router). - """ - def __init__(self, driver, id, tenant_id): - """ - :param driver: str name of the driver that corresponds to the resource - type. - :param id: ID of the resource (ie, the Neutron router's UUID). - :param tenant_id: The UUID of the tenant that owns this resource. - """ - self.driver = driver - self.id = id - self.tenant_id = tenant_id - - def __eq__(self, other): - return type(self) == type(other) and vars(self) == vars(other) - - def __ne__(self, other): - return not self.__eq__(other) - - def __repr__(self): - return '<%s (driver=%s, id=%s, tenant_id=%s)>' % ( - self.__class__.__name__, - self.driver, - self.id, - self.tenant_id) diff --git a/astara/health.py b/astara/health.py deleted file mode 100644 index 1cd23360..00000000 --- a/astara/health.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Periodic health check code. -""" - -import threading -import time - -from oslo_config import cfg - -from astara import event -from astara.api import neutron - -from oslo_log import log as logging - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - -HEALTH_INSPECTOR_OPTS = [ - cfg.IntOpt('health_check_period', - default=60, - help='seconds between health checks'), -] -CONF.register_opts(HEALTH_INSPECTOR_OPTS) - - -def _health_inspector(scheduler): - """Runs in the thread. - """ - period = CONF.health_check_period - while True: - time.sleep(period) - LOG.debug('waking up') - r = event.Resource( - id='*', - tenant_id='*', - driver='*', - ) - e = event.Event( - resource=r, - crud=event.POLL, - body={}, - ) - scheduler.handle_message('*', e) - - -def start_inspector(period, scheduler): - """Start a health check thread. - """ - t = threading.Thread( - target=_health_inspector, - args=(scheduler,), - name='HealthInspector', - ) - t.setDaemon(True) - t.start() - return t - - -def start_reporter(): - """Start a agent report thread. - """ - reporter = neutron.NeutronAgentReporter() - t = threading.Thread( - target=reporter.report_forever, - args=(), - name='AgentReporter', - ) - t.setDaemon(True) - t.start() - return t diff --git a/astara/instance_manager.py b/astara/instance_manager.py deleted file mode 100644 index 69bbc931..00000000 --- a/astara/instance_manager.py +++ /dev/null @@ -1,824 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from datetime import datetime -from functools import wraps -import time -import six - -from oslo_config import cfg - -from astara.drivers import states -from astara.common.i18n import _LE, _LI -from astara.common import container - - -CONF = cfg.CONF -INSTANCE_MANAGER_OPTS = [ - cfg.IntOpt( - 'hotplug_timeout', - default=10, - help='The amount of time to wait for nova to hotplug/unplug ' - 'networks from the instances.'), - cfg.IntOpt( - 'boot_timeout', default=600), - cfg.IntOpt( - 'error_state_cooldown', - default=30, - help='Number of seconds to ignore new events when an instance goes ' - 'into ERROR state.', - ), -] -CONF.register_opts(INSTANCE_MANAGER_OPTS) - - -def _generate_interface_map(instance, interfaces): - # TODO(mark): We're in the first phase of VRRP, so we need - # map the interface to the network ID. - # Eventually we'll send VRRP data and real interface data - port_mac_to_net = { - p.mac_address: p.network_id - for p in instance.ports - } - # Add in the management port - mgt_port = instance.management_port - port_mac_to_net[mgt_port.mac_address] = mgt_port.network_id - # this is a network to logical interface id - return { - port_mac_to_net[i['lladdr']]: i['ifname'] - for i in interfaces if i['lladdr'] in port_mac_to_net - } - - -def synchronize_driver_state(f): - """Wrapper that triggers a driver's synchronize_state function""" - def wrapper(self, *args, **kw): - state = f(self, *args, **kw) - self.resource.synchronize_state(*args, state=state) - return state - return wrapper - - -def ensure_cache(f): - """Decorator to wrap around any function that uses self.instance_info. - - Ensures that self.instance_info is up to date and catches instances in a - GONE or missing state before wasting cycles trying to do something with it. - - NOTE: This replaces the old function called _ensure_cache made a Decorator - rather than calling it explicitly at the start of all those functions. - """ - @wraps(f) - def wrapper(self, worker_context, *args, **kw): - self.instances.refresh(worker_context) - - instances = worker_context.nova_client.get_instances_for_obj( - self.resource.name) - for inst_info in instances: - self.instances[inst_info.id_] = inst_info - - self.instances.update_ports(worker_context) - - return f(self, worker_context, *args, **kw) - - return wrapper - - -class BootAttemptCounter(object): - def __init__(self): - self._attempts = 0 - - def start(self): - self._attempts += 1 - - def reset(self): - self._attempts = 0 - - @property - def count(self): - return self._attempts - - -class InstanceGroupManager(container.ResourceContainer): - def __init__(self, log, resource): - super(InstanceGroupManager, self).__init__() - self.log = log - self.resource = resource - self._alive = set() - - @property - def instances(self): - """Returns the managed instances sorted by name""" - return sorted(self.resources.values(), key=lambda i: i.name) - - def validate_ports(self): - """Checks whether instance have management ports attached - - :returns: tuple containing two lists: - (instances that have ports, instances that don't) - """ - has_ports = set() - for inst_info in set(self.resources.values()): - if inst_info.management_address: - has_ports.add(inst_info) - return has_ports, set(self.resources.values()) - has_ports - - def are_alive(self): - """Calls the check_check function all instances to ensure liveliness - - :returns: tuple containing two lists (alive_instances, dead_instances) - """ - alive = set() - for i in six.moves.range(cfg.CONF.max_retries): - for inst_info in set(self.instances) - alive: - if (inst_info.management_address and - self.resource.is_alive(inst_info.management_address)): - self.log.debug( - 'Instance %s found alive after %s of %s attempts', - inst_info.id_, i, cfg.CONF.max_retries) - alive.add(inst_info) - else: - self.log.debug( - 'Alive check failed for instance %s. Attempt %d of %d', - inst_info.id_, i, cfg.CONF.max_retries) - - # all managed instances report alive - if alive == set(self.instances): - self._alive = [i.id_ for i in alive] - return alive, [] - - # zero managed instances report alive - if not alive: - self.log.debug( - 'Alive check failed for all instances after %s attempts.', - cfg.CONF.max_retries) - return [], self.instances - - dead = set(self.resources.values()) - alive - self._alive = [i.id_ for i in alive - dead] - return list(alive), list(dead) - - def update_ports(self, worker_context): - """Refresh ports on all managed instance info objects""" - for instance_info in self.instances: - if not instance_info: - continue - ( - instance_info.management_port, - instance_info.ports - ) = worker_context.neutron.get_ports_for_instance( - instance_info.id_ - ) - - def get_interfaces(self): - """Obtain a list of interfaces from each managed instance - - Skips any instance that has not already been verified as being alive. - - :returns: dict of {instance: [interfaces_dict]} - """ - interfaces = {} - for inst in self.instances: - if inst.id_ not in self._alive: - self.log.debug( - 'Skipping interfaces on query on instance %s that ' - 'is not yet alive.', inst.id_) - continue - else: - interfaces[inst] = self.resource.get_interfaces( - inst.management_address) - return interfaces - - def verify_interfaces(self, ports): - """Verify at least one instance in group has correct ports plugged""" - for inst, interfaces in self.get_interfaces().items(): - actual_macs = set((iface['lladdr'] for iface in interfaces)) - self.log.debug( - 'MACs found on %s: %s', inst.id_, - ', '.join(sorted(actual_macs))) - if not all( - getattr(p, 'mac_address', None) for p in ports - ): - return False - - num_instance_ports = len(list(inst.ports)) - num_logical_ports = len(list(ports)) - if num_logical_ports != num_instance_ports: - self.log.debug( - 'Expected %s instance ports but found %s', - num_logical_ports, num_instance_ports) - return False - - expected_macs = set(p.mac_address - for p in inst.ports) - expected_macs.add(inst.management_port.mac_address) - self.log.debug( - 'MACs expected on: %s, %s', - inst.id_, ', '.join(sorted(expected_macs))) - - if actual_macs == expected_macs: - self.log.debug('Found all expected MACs on %s', inst.id_) - return True - - self.log.debug( - 'Did not find all expected MACs on instance %s, ' - 'actual MACs: %s', inst.id_, ', '.join(actual_macs)) - - return False - - def _update_config(self, instance, config): - self.log.debug( - 'Updating config for instance %s on resource %s', - instance.id_, self.resource.id) - self.log.debug('New config: %r', config) - attempts = cfg.CONF.max_retries - for i in six.moves.range(attempts): - try: - self.resource.update_config( - instance.management_address, - config) - except Exception: - if i == attempts - 1: - # Only log the traceback if we encounter it many times. - self.log.exception(_LE('failed to update config')) - else: - self.log.debug( - 'failed to update config, attempt %d', - i - ) - time.sleep(cfg.CONF.retry_delay) - else: - self.log.info('Instance config updated') - return True - else: - return False - - def _ha_config(self, instance): - """Builds configuration describing the HA cluster - - This informs the instance about any configuration relating to the HA - cluster it should be joining. ATM this is primarily used to inform - an instance about the management addresses of its peers. - - :param instance: InstanceInfo object - :returns: dict of HA configuration - """ - peers = [ - i.management_address for i in self.instances - if i.management_address != instance.management_address] - - # determine cluster priority by instance age. the older instance - # gets the higher priority - sorted_by_age = sorted( - self.instances, key=lambda i: i.time_since_boot, - reverse=True) - - if sorted_by_age.index(instance) == 0: - priority = 100 - else: - priority = 50 - - return { - 'peers': peers, - 'priority': priority, - } - - def configure(self, worker_context): - # XXX config update can be dispatched to threads to speed - # things up across multiple instances - failed = [] - - # get_interfaces() return returns only instances that are up and ready - # for config - instances_interfaces = self.get_interfaces() - - for inst, interfaces in instances_interfaces.items(): - # sending all the standard config over to the driver for - # final updates - config = self.resource.build_config( - worker_context, - inst.management_port, - _generate_interface_map(inst, interfaces) - ) - - # while drivers are free to express their own ha config - # requirements, the instance manager is the only one with - # high level view of the cluster, ie knowledge of membership - if self.resource.is_ha: - config['ha_config'] = config.get('ha') or {} - config['ha_config'].update(self._ha_config(inst)) - - self.log.debug( - 'preparing to update config for instance %s on %s resource ' - 'to %r', inst.id_, self.resource.RESOURCE_NAME, config) - - if self._update_config(inst, config) is not True: - failed.append(inst) - - if set(failed) == set(self.instances): - # all updates have failed - self.log.error( - 'Could not update config for any instances on %s resource %s, ' - 'marking resource state %s', - self.resource.id, self.resource.RESOURCE_NAME, states.RESTART) - return states.RESTART - elif failed: - # some updates to instances we thought to be alive have failed - self.log.error( - 'Could not update config for some instances on %s ' - 'resource %s marking %s resource state', - self.resource.RESOURCE_NAME, self.resource.id, states.DEGRADED) - return states.DEGRADED - elif len(instances_interfaces.keys()) != len(self.instances): - # instance_interfaces contains only instances that are alive - # if we're still waiting on instances, remain degraded - self.log.debug( - 'Config updated on %s of %s instances', - len(instances_interfaces.keys()), len(self.instances)) - return states.DEGRADED - else: - self.log.debug( - 'Config updated across all instances on %s resource %s', - self.resource.RESOURCE_NAME, self.resource.id) - return states.CONFIGURED - - def delete(self, instance): - """Removes nova server reference from manager""" - del self.resources[instance.id_] - - def refresh(self, worker_context): - """Update nova server reference for all managed instances""" - for i in self.instances: - if not worker_context.nova_client.update_instance_info(i): - self.delete(i) - - def destroy(self, worker_context): - """Destroys all nova instances and blocks until deletion""" - worker_context.nova_client.delete_instances_and_wait( - self.instances) - - def remove(self, worker_context, instance): - """Destroys the nova instance, removes instance from group manager""" - worker_context.nova_client.destroy_instance(instance) - self.delete(instance) - - @property - def next_instance_index(self): - ids = [ - int(i.name.split('_')[1]) for i in - self.instances] - try: - return max(ids) + 1 - except ValueError: - return 0 - - def create(self, worker_context): - to_boot = self.required_instance_count - len(self.instances) - self.log.debug( - 'Booting an additional %s instance(s) for resource %s', - to_boot, self.resource.id) - - for i in six.moves.range(to_boot): - name = '%s_%s' % (self.resource.name, self.next_instance_index) - instance = worker_context.nova_client.boot_instance( - resource_type=self.resource.RESOURCE_NAME, - prev_instance_info=None, - name=name, - image_uuid=self.resource.image_uuid, - flavor=self.resource.flavor, - make_ports_callback=self.resource.make_ports(worker_context) - - ) - self.add_instance(instance) - - @property - def required_instance_count(self): - if self.resource.is_ha is True: - return 2 - else: - return 1 - - @property - def instance_count(self): - return len(self.instances) - - @property - def cluster_degraded(self): - return self.instance_count < self.required_instance_count - - def add_instance(self, instance): - """Adds a new instance or updates existing""" - self.resources[instance.id_] = instance - - -class InstanceManager(object): - - def __init__(self, resource, worker_context): - """The instance manager is your interface to the running instance. - wether it be virtual, container or physical. - - Service specific code lives in the driver which is passed in here. - - :param resource: An driver instance for the managed resource - :param resource_id: UUID of logical resource - :param worker_context: - """ - self.resource = resource - self.log = self.resource.log - - self.state = states.DOWN - - self.instance_info = None - self.instances = InstanceGroupManager(self.log, self.resource) - self.last_error = None - self._boot_counter = BootAttemptCounter() - self._boot_logged = [] - self._last_synced_status = None - - self.state = self.update_state(worker_context, silent=True) - - @property - def attempts(self): - """Property which returns the boot count. - - :returns Int: - """ - return self._boot_counter.count - - def reset_boot_counter(self): - """Resets the boot counter. - - :returns None: - """ - self._boot_counter.reset() - - @synchronize_driver_state - @ensure_cache - def update_state(self, worker_context, silent=False): - """Updates state of the instance and, by extension, its logical resource - - :param worker_context: - :param silent: - :returns: state - """ - if self.resource.get_state(worker_context) == states.GONE: - self.log.debug('%s driver reported its state is %s', - self.resource.RESOURCE_NAME, states.GONE) - self.state = states.GONE - return self.state - - if not self.instances: - self.log.info(_LI('no backing instance(s), marking as %s'), - states.DOWN) - self.state = states.DOWN - return self.state - elif self.instances.cluster_degraded is True: - self.log.info(_LI( - 'instance cluster for resource %s reports degraded'), - self.resource.id) - self.state = states.DEGRADED - return self.state - - has_ports, no_ports = self.instances.validate_ports() - - # ports_state=None means no instances have ports - if not has_ports: - self.log.debug('waiting for instance ports to be attached') - self.state = states.BOOTING - return self.state - - # XXX TODO need to account for when only a subset of the cluster have - # correct ports, kick back to Replug - - alive, dead = self.instances.are_alive() - if not alive: - # alive checked failed on all instances for an already configured - # resource, mark it down. - # XXX need to track timeouts per instance - # self._check_boot_timeout() - - if self.state == states.CONFIGURED: - self.log.debug('No instance(s) alive, marking it as %s', - states.DOWN) - self.state = states.DOWN - return self.state - elif dead: - # some subset of instances reported not alive, mark it degraded. - if self.state == states.CONFIGURED: - for i in dead: - instance = worker_context.nova_client.get_instance_by_id( - i.id_) - if instance is None and self.state != states.ERROR: - self.log.info( - 'Instance %s was found; rebooting', i.id_) - self.instances.delete(i) - self.state = states.DEGRADED - return self.state - - self.instances.refresh(worker_context) - if self.state == states.CONFIGURED: - for i in alive: - if not i.booting and i not in self._boot_logged: - self.log.info( - '%s booted in %s seconds after %s attempts', - self.resource.RESOURCE_NAME, - i.time_since_boot.total_seconds(), - self._boot_counter.count) - self._boot_logged.append(i) - self.reset_boot_counter() - else: - if alive: - self.state = states.UP - - return self.state - - @ensure_cache - def boot(self, worker_context): - """Boots the instances with driver pre/post boot hooks. - - :returns: None - """ - self.log.info('Booting %s' % self.resource.RESOURCE_NAME) - - if self.state != states.DEGRADED: - self.state = states.DOWN - self._boot_counter.start() - - # driver preboot hook - self.resource.pre_boot(worker_context) - - try: - self.instances.create(worker_context) - if not self.instances: - self.log.info(_LI('Previous instances are still deleting')) - # Reset the boot counter, causing the state machine to start - # again with a new Instance. - self.reset_boot_counter() - return - except: - self.log.exception(_LE('Instances failed to start boot')) - else: - self.state = states.BOOTING - - # driver post boot hook - self.resource.post_boot(worker_context) - - @synchronize_driver_state - @ensure_cache - def set_error(self, worker_context, silent=False): - """Set the internal and neutron status for the router to states.ERROR. - - This is called from outside when something notices the router - is "broken". We don't use it internally because this class is - supposed to do what it's told and not make decisions about - whether or not the router is fatally broken. - """ - self.state = states.ERROR - self.last_error = datetime.utcnow() - return self.state - - @synchronize_driver_state - @ensure_cache - def clear_error(self, worker_context, silent=False): - """Clear the internal error state. - - This is called from outside when something wants to force a - router rebuild, so that the state machine that checks our - status won't think we are broken unless we actually break - again. - """ - # Clear the boot counter. - self._boot_counter.reset() - self.state = states.DOWN - return self.state - - @property - def error_cooldown(self): - """Returns True if the instance was recently set to states.ERROR state. - """ - if self.last_error and self.state == states.ERROR: - seconds_since_error = ( - datetime.utcnow() - self.last_error - ).total_seconds() - if seconds_since_error < cfg.CONF.error_state_cooldown: - return True - return False - - @synchronize_driver_state - @ensure_cache - def stop(self, worker_context): - """Attempts to destroy the instance cluster - - :param worker_context: - :returns: - """ - self.log.info(_LI('Destroying instance')) - - self.resource.delete_ports(worker_context) - - if not self.instances: - self.log.info(_LI('Instance(s) already destroyed.')) - if self.state != states.GONE: - self.state = states.DOWN - return self.state - - try: - self.instances.destroy(worker_context) - if self.state != states.GONE: - self.state = states.DOWN - except Exception: - self.log.exception(_LE('Failed to stop instance(s)')) - - @synchronize_driver_state - @ensure_cache - def configure(self, worker_context): - """Pushes config to instance - - :param worker_context: - :param failure_state: - :param attempts: - :returns: - """ - self.log.debug('Begin instance config') - self.state = states.UP - - if self.resource.get_state(worker_context) == states.GONE: - return states.GONE - - if not self.instances: - return states.DOWN - - if not self.instances.verify_interfaces(self.resource.ports): - # XXX Need to acct for degraded cluster /w subset of nodes - # having incorrect plugging. - self.log.debug("Interfaces aren't plugged as expected.") - self.state = states.REPLUG - return self.state - - self.state = self.instances.configure(worker_context) - return self.state - - def replug(self, worker_context): - - """Attempts to replug the network ports for an instance. - - :param worker_context: - :returns: - """ - self.log.debug('Attempting to replug...') - - self.resource.pre_plug(worker_context) - - for instance, interfaces in self.instances.get_interfaces().items(): - actual_macs = set((iface['lladdr'] for iface in interfaces)) - instance_macs = set(p.mac_address for p in instance.ports) - instance_macs.add(instance.management_port.mac_address) - - if instance_macs != actual_macs: - # our cached copy of the ports is wrong reboot and clean up - self.log.warning(( - 'Instance macs(%s) do not match actual macs (%s). Instance' - ' cache appears out-of-sync'), - instance_macs, actual_macs - ) - self.state = states.RESTART - return - - instance_ports = {p.network_id: p for p in instance.ports} - instance_networks = set(instance_ports.keys()) - - logical_networks = set(p.network_id for p in self.resource.ports) - - if logical_networks != instance_networks: - nova_instance = worker_context.nova_client.get_instance_by_id( - instance.id_ - ) - - # For each port that doesn't have a mac address on the instance - for network_id in logical_networks - instance_networks: - port = worker_context.neutron.create_vrrp_port( - self.resource.id, - network_id - ) - self.log.debug( - 'Net %s is missing from the appliance instance %s, ' - 'plugging: %s', network_id, instance.id_, port.id - ) - - try: - nova_instance.interface_attach(port.id, None, None) - instance.ports.append(port) - except: - self.log.exception( - 'Interface attach failed on instance %s', - instance.id_) - self.instances.remove(worker_context, instance) - - # instance has been removed for failure, do not continue with - # plugging - if instance not in self.instances.values(): - continue - - ports_to_delete = [] - for network_id in instance_networks - logical_networks: - port = instance_ports[network_id] - self.log.debug( - 'Net %s is detached from the router, unplugging: %s', - network_id, port.id - ) - - try: - nova_instance.interface_detach(port.id) - instance.ports.remove(port) - ports_to_delete.append(port) - except: - self.log.exception( - 'Interface detach failed on instance %s', - instance.id_) - self.instances.remove(worker_context, instance) - - # instance has been removed for failure, do not continue with - # plugging - if instance not in self.instances.values(): - continue - - if self._wait_for_interface_hotplug(instance) is not True: - self.instances.remove(worker_context, instance) - - if not self.instances: - # all instances were destroyed for plugging failure - self.state = states.RESTART - elif self.instances.cluster_degraded: - # some instances were destroyed for plugging failure - self.state = states.DEGRADED - else: - # plugging was successful - for p in ports_to_delete: - worker_context.neutron.api_client.delete_port(port.id) - return - - def _wait_for_interface_hotplug(self, instance): - """Waits for instance to report interfaces for all expected ports""" - # The action of attaching/detaching interfaces in Nova happens via - # the message bus and is *not* blocking. We need to wait a few - # seconds to if the list of tap devices on the appliance actually - # changed. If not, assume the hotplug failed, and reboot the - # Instance. - for i in six.moves.range(1, cfg.CONF.hotplug_timeout): - self.log.debug( - "Waiting for interface attachments to take effect..." - ) - interfaces = self.resource.get_interfaces( - instance.management_address) - - actual_macs = set((iface['lladdr'] for iface in interfaces)) - instance_macs = set(p.mac_address for p in instance.ports) - instance_macs.add(instance.management_port.mac_address) - if actual_macs == instance_macs: - return True - time.sleep(1) - else: - self.log.debug( - "Interfaces aren't plugged as expected on instance %s, ", - "marking for rebooting.", instance.id_) - return False - - def _check_boot_timeout(self): - """If the instance was created more than `boot_timeout` seconds - ago, log an error and set the state set to states.DOWN - """ - time_since_boot = self.instance_info.time_since_boot - - if time_since_boot: - if time_since_boot.seconds < cfg.CONF.boot_timeout: - # Do not reset the state if we have an error - # condition already. The state will be reset when - # the router starts responding again, or when the - # error is cleared from a forced rebuild. - if self.state != states.ERROR: - self.state = states.BOOTING - else: - # If the instance was created more than `boot_timeout` seconds - # ago, log an error and set the state set to states.DOWN - self.log.info( - 'Router is DOWN. Created over %d secs ago.', - cfg.CONF.boot_timeout) - # Do not reset the state if we have an error condition - # already. The state will be reset when the router starts - # responding again, or when the error is cleared from a - # forced rebuild. - if self.state != states.ERROR: - self.state = states.DOWN diff --git a/astara/main.py b/astara/main.py deleted file mode 100644 index f80b08d9..00000000 --- a/astara/main.py +++ /dev/null @@ -1,218 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import functools -import logging -import multiprocessing -import signal -import socket -import sys -import threading - -from oslo_config import cfg -from oslo_log import log - -from astara.common.i18n import _LE, _LI -from astara.common import config as ak_cfg -from astara import coordination -from astara import daemon -from astara import health -from astara import metadata -from astara import notifications -from astara import scheduler -from astara import populate -from astara import worker -from astara.api import neutron as neutron_api - - -LOG = log.getLogger(__name__) -CONF = cfg.CONF - -MAIN_OPTS = [ - cfg.StrOpt('host', - default=socket.gethostname(), - help="The hostname Astara is running on"), -] -CONF.register_opts(MAIN_OPTS) - - -CEILOMETER_OPTS = [ - cfg.BoolOpt('enabled', default=False, - help='Enable reporting metrics to ceilometer.'), - cfg.StrOpt('topic', default='notifications.info', - help='The name of the topic queue ceilometer consumes events ' - 'from.') -] -CONF.register_group(cfg.OptGroup(name='ceilometer', - title='Ceilometer Reporting Options')) -CONF.register_opts(CEILOMETER_OPTS, group='ceilometer') - - -def shuffle_notifications(notification_queue, sched): - """Copy messages from the notification queue into the scheduler. - """ - while True: - try: - target, message = notification_queue.get() - if target is None: - break - sched.handle_message(target, message) - except IOError: - # FIXME(rods): if a signal arrive during an IO operation - # an IOError is raised. We catch the exceptions in - # meantime waiting for a better solution. - pass - except KeyboardInterrupt: - LOG.info(_LI('got Ctrl-C')) - break - except: - LOG.exception(_LE('unhandled exception processing message')) - - -def main(argv=sys.argv[1:]): - """Main Entry point into the astara-orchestrator - - This is the main entry point into the astara-orchestrator. On invocation of - this method, logging, local network connectivity setup is performed. - This information is obtained through the 'ak-config' file, passed as - arguement to this method. Worker threads are spawned for handling - various tasks that are associated with processing as well as - responding to different Neutron events prior to starting a notification - dispatch loop. - - :param argv: list of Command line arguments - - :returns: None - - :raises: None - - """ - # TODO(rama) Error Handling to be added as part of the docstring - # description - - # Change the process and thread name so the logs are cleaner. - p = multiprocessing.current_process() - p.name = 'pmain' - t = threading.current_thread() - t.name = 'tmain' - ak_cfg.parse_config(argv) - log.setup(cfg.CONF, 'astara-orchestrator') - cfg.CONF.log_opt_values(LOG, logging.INFO) - - neutron = neutron_api.Neutron(cfg.CONF) - - # TODO(mark): develop better way restore after machine reboot - # neutron.purge_management_interface() - - # bring the mgt tap interface up - mgt_ip_address = neutron.ensure_local_service_port().split('/')[0] - - # Set up the queue to move messages between the eventlet-based - # listening process and the scheduler. - notification_queue = multiprocessing.Queue() - - # Ignore signals that might interrupt processing. - daemon.ignore_signals() - - # If we see a SIGINT, stop processing. - def _stop_processing(*args): - notification_queue.put((None, None)) - signal.signal(signal.SIGINT, _stop_processing) - - # Listen for notifications. - notification_proc = multiprocessing.Process( - target=notifications.listen, - kwargs={ - 'notification_queue': notification_queue - }, - name='notification-listener', - ) - notification_proc.start() - - if CONF.coordination.enabled: - coordinator_proc = multiprocessing.Process( - target=coordination.start, - kwargs={ - 'notification_queue': notification_queue - }, - name='coordinator', - ) - coordinator_proc.start() - else: - coordinator_proc = None - - metadata_proc = multiprocessing.Process( - target=metadata.serve, - args=(mgt_ip_address,), - name='metadata-proxy' - ) - metadata_proc.start() - - from astara.api import rug as rug_api - rug_api_proc = multiprocessing.Process( - target=rug_api.serve, - name='rug-api' - ) - rug_api_proc.start() - - # Set up the notifications publisher - Publisher = (notifications.Publisher if cfg.CONF.ceilometer.enabled - else notifications.NoopPublisher) - publisher = Publisher( - topic=cfg.CONF.ceilometer.topic, - ) - - # Set up a factory to make Workers that know how many threads to - # run. - worker_factory = functools.partial( - worker.Worker, - notifier=publisher, - management_address=mgt_ip_address, - ) - - # Set up the scheduler that knows how to manage the routers and - # dispatch messages. - sched = scheduler.Scheduler( - worker_factory=worker_factory, - ) - - # Prepopulate the workers with existing routers on startup - populate.pre_populate_workers(sched) - - # Set up the periodic health check - health.start_inspector(cfg.CONF.health_check_period, sched) - - # Set up the periodic neutron agent report - health.start_reporter() - - # Block the main process, copying messages from the notification - # listener to the scheduler - try: - shuffle_notifications(notification_queue, sched) - finally: - LOG.info(_LI('Stopping scheduler.')) - sched.stop() - LOG.info(_LI('Stopping notification publisher.')) - publisher.stop() - - # Terminate the subprocesses - for subproc in [notification_proc, coordinator_proc, metadata_proc, - rug_api_proc]: - if not subproc: - continue - LOG.info(_LI('Stopping %s.'), subproc.name) - subproc.terminate() diff --git a/astara/metadata.py b/astara/metadata.py deleted file mode 100644 index 94519187..00000000 --- a/astara/metadata.py +++ /dev/null @@ -1,229 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# @author: Mark McClain, DreamHost - -"""Proxy requests to Nova's metadata server. - -Used by main.py -""" - - -import hashlib -import hmac -from six.moves.urllib import parse as urlparse -import socket - -import eventlet -import eventlet.wsgi -import httplib2 -from oslo_config import cfg -import webob -import webob.dec -import webob.exc -import six - -from oslo_log import log as logging - -from astara.common.i18n import _, _LE, _LI, _LW - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - -METADATA_OPTS = [ - cfg.StrOpt('nova_metadata_ip', default='127.0.0.1', - help="IP address used by Nova metadata server."), - cfg.IntOpt('nova_metadata_port', - default=8775, - help="TCP Port used by Nova metadata server."), - cfg.IntOpt('astara_metadata_port', - default=9697, - help="TCP listening port used by Astara metadata proxy."), - cfg.StrOpt('neutron_metadata_proxy_shared_secret', - default='', - help='Shared secret to sign instance-id request', - deprecated_name='quantum_metadata_proxy_shared_secret') -] -CONF.register_opts(METADATA_OPTS) - - -class MetadataProxyHandler(object): - - """The actual handler for proxy requests.""" - - @webob.dec.wsgify(RequestClass=webob.Request) - def __call__(self, req): - """Inital handler for an incoming `webob.Request`. - - :param req: The webob.Request to handle - :returns: returns a valid HTTP Response or Error - """ - try: - LOG.debug("Request: %s", req) - - instance_id = self._get_instance_id(req) - if instance_id: - return self._proxy_request(instance_id, req) - else: - return webob.exc.HTTPNotFound() - - except Exception: - LOG.exception(_LE("Unexpected error.")) - msg = ('An unknown error has occurred. ' - 'Please try your request again.') - return webob.exc.HTTPInternalServerError( - explanation=six.text_type(msg)) - - def _get_instance_id(self, req): - """Pull the X-Instance-ID out of a request. - - :param req: The webob.Request to handle - :returns: returns the X-Instance-ID HTTP header - """ - return req.headers.get('X-Instance-ID') - - def _proxy_request(self, instance_id, req): - """Proxy a signed HTTP request to an instance. - - :param instance_id: ID of the Instance being proxied to - :param req: The webob.Request to handle - :returns: returns a valid HTTP Response or Error - """ - headers = { - 'X-Forwarded-For': req.headers.get('X-Forwarded-For'), - 'X-Instance-ID': instance_id, - 'X-Instance-ID-Signature': self._sign_instance_id(instance_id), - 'X-Tenant-ID': req.headers.get('X-Tenant-ID') - } - - url = urlparse.urlunsplit(( - 'http', - '%s:%s' % (cfg.CONF.nova_metadata_ip, - cfg.CONF.nova_metadata_port), - req.path_info, - req.query_string, - '')) - - h = httplib2.Http() - resp, content = h.request(url, headers=headers) - - if resp.status == 200: - LOG.debug(str(resp)) - return content - elif resp.status == 403: - msg = _LW( - 'The remote metadata server responded with Forbidden. This ' - 'response usually occurs when shared secrets do not match.' - ) - LOG.warning(msg) - return webob.exc.HTTPForbidden() - elif resp.status == 404: - return webob.exc.HTTPNotFound() - elif resp.status == 500: - msg = _LW('Remote metadata server experienced an' - ' internal server error.') - LOG.warning(msg) - return webob.exc.HTTPInternalServerError( - explanation=six.text_type(msg)) - else: - raise Exception(_('Unexpected response code: %s') % resp.status) - - def _sign_instance_id(self, instance_id): - """Get an HMAC based on the instance_id and Neutron shared secret. - - :param instance_id: ID of the Instance being proxied to - :returns: returns a hexadecimal string HMAC for a specific instance_id - """ - return hmac.new(cfg.CONF.neutron_metadata_proxy_shared_secret, - instance_id, - hashlib.sha256).hexdigest() - - -class MetadataProxy(object): - - """The proxy service.""" - - def __init__(self): - """Initialize the MetadataProxy. - - :returns: returns nothing - """ - self.pool = eventlet.GreenPool(1000) - - def run(self, ip_address, port=cfg.CONF.astara_metadata_port): - """Run the MetadataProxy. - - :param ip_address: the ip address to bind to for incoming requests - :param port: the port to bind to for incoming requests - :returns: returns nothing - """ - app = MetadataProxyHandler() - for i in six.moves.range(5): - LOG.info(_LI( - 'Starting the metadata proxy on %s:%s'), - ip_address, port - ) - try: - sock = eventlet.listen( - (ip_address, port), - family=socket.AF_INET6, - backlog=128 - ) - except socket.error as err: - if err.errno != 99: - raise - LOG.warning( - _LW('Could not create metadata proxy socket: %s'), err) - LOG.warning(_LW('Sleeping %s before trying again'), i + 1) - eventlet.sleep(i + 1) - else: - break - else: - raise RuntimeError( - _('Could not establish metadata proxy socket on %s:%s') % - (ip_address, port) - ) - eventlet.wsgi.server( - sock, - app, - custom_pool=self.pool, - log=LOG) - - -def serve(ip_address): - """Initialize the MetaData proxy. - - :param ip_address: the ip address to bind to for incoming requests - :returns: returns nothing - """ - MetadataProxy().run(ip_address) diff --git a/astara/newton_fix.py b/astara/newton_fix.py deleted file mode 100644 index 1a015490..00000000 --- a/astara/newton_fix.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2016 Mark McClain -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from astara_neutron.plugins import ml2_neutron_plugin as as_plugin - -from neutron.plugins.ml2 import plugin as ml2_plugin -from neutron.services.l3_router.service_providers import base - - -class SingleNodeDriver(base.L3ServiceProvider): - """Provider for single L3 agent routers.""" - use_integrated_agent_scheduler = False - - -class HaNodeDriver(base.L3ServiceProvider): - """Provider for HA L3 agent routers.""" - use_integrated_agent_schedule = False - ha_support = base.MANDATORY - - -class Ml2Plugin(as_plugin.Ml2Plugin): - _supported_extension_aliases = ( - as_plugin.Ml2Plugin._supported_extension_aliases + - ['ip_allocation'] - ) - - disabled_extensions = [ - "dhrouterstatus", - "byonf" - ] - - for ext in disabled_extensions: - try: - _supported_extension_aliases.remove(ext) - except ValueError: - pass - - def _make_port_dict(self, port, fields=None, process_extensions=True): - res = ml2_plugin.Ml2Plugin._make_port_dict( - self, - port, - fields, - process_extensions - ) - if not res.get('fixed_ips') and res.get('mac_address'): - res['ip_allocation'] = 'deferred' - return res diff --git a/astara/notifications.py b/astara/notifications.py deleted file mode 100644 index 946dbdd9..00000000 --- a/astara/notifications.py +++ /dev/null @@ -1,264 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Listen for notifications. -""" - -import Queue -import threading - -from astara import commands -from astara import drivers -from astara import event -from astara.common import rpc - -from oslo_config import cfg -from oslo_context import context -from oslo_log import log as logging - -from astara.common.i18n import _LE - -from oslo_service import service - -NOTIFICATIONS_OPTS = [ - cfg.StrOpt('amqp-url', - help='connection for AMQP server'), - cfg.StrOpt('incoming-notifications-exchange', - default='neutron', - help='name of the exchange where we receive notifications'), - cfg.StrOpt('rpc-exchange', - default='l3_agent_fanout', - help='name of the exchange where we receive RPC calls'), - cfg.StrOpt('neutron-control-exchange', - default='neutron', - help='The name of the exchange used by Neutron for RPCs') -] -cfg.CONF.register_opts(NOTIFICATIONS_OPTS) - -LOG = logging.getLogger(__name__) - - -def _get_tenant_id_for_message(context, payload=None): - """Find the tenant id in the incoming message.""" - - # give priority to the tenant_id in the router dict if one - # exists in the message - if payload: - for key in ('router', 'port', 'subnet'): - if key in payload and payload[key].get('tenant_id'): - val = payload[key]['tenant_id'] - return val - - for key in ['tenant_id', 'project_id']: - if key in context: - val = context[key] - # Some notifications have None as the tenant id, but we - # can't shard on None in the dispatcher, so treat those as - # invalid. - if val is not None: - return val - return None - - -_ROUTER_INTERFACE_NOTIFICATIONS = set([ - 'router.interface.create', - 'router.interface.delete', -]) - -_ROUTER_INTERESTING_NOTIFICATIONS = set([ - 'subnet.create.end', - 'subnet.change.end', - 'subnet.delete.end', - 'port.create.end', - 'port.change.end', - 'port.delete.end', -]) - - -L3_AGENT_TOPIC = 'l3_agent' - - -class L3RPCEndpoint(object): - """A RPC endpoint for servicing L3 Agent RPC requests""" - def __init__(self, notification_queue): - self.notification_queue = notification_queue - - def router_deleted(self, ctxt, router_id): - tenant_id = _get_tenant_id_for_message(ctxt) - - resource = event.Resource('router', router_id, tenant_id) - - crud = event.DELETE - e = event.Event(resource, crud, None) - self.notification_queue.put((e.resource.tenant_id, e)) - - -class NotificationsEndpoint(object): - """A RPC endpoint for processing notification""" - def __init__(self, notification_queue): - self.notification_queue = notification_queue - - def info(self, ctxt, publisher_id, event_type, payload, metadata): - tenant_id = _get_tenant_id_for_message(ctxt, payload) - crud = event.UPDATE - e = None - events = [] - if event_type.startswith('astara.command'): - LOG.debug('received a command: %r', payload) - crud = event.COMMAND - if payload.get('command') == commands.POLL: - r = event.Resource(driver='*', id='*', tenant_id='*') - e = event.Event( - resource=r, - crud=event.POLL, - body={}) - self.notification_queue.put(('*', e)) - return - else: - # If the message does not specify a tenant, send it to everyone - tenant_id = payload.get('tenant_id', '*') - router_id = payload.get('router_id') - resource = event.Resource( - driver='*', - id=router_id, - tenant_id=tenant_id) - events.append(event.Event(resource, crud, payload)) - else: - - for driver in drivers.enabled_drivers(): - driver_event = driver.process_notification( - tenant_id, event_type, payload) - if driver_event: - events.append(driver_event) - - if not events: - LOG.debug('Could not construct any events from %s /w payload: %s', - event_type, payload) - return - - LOG.debug('Generated %s events from %s /w payload: %s', - len(events), event_type, payload) - - for e in events: - self.notification_queue.put((e.resource.tenant_id, e)) - - -def listen(notification_queue): - """Create and launch the messaging service""" - connection = rpc.MessagingService() - connection.create_notification_listener( - endpoints=[NotificationsEndpoint(notification_queue)], - exchange=cfg.CONF.neutron_control_exchange, - ) - connection.create_rpc_consumer( - topic=L3_AGENT_TOPIC, - endpoints=[L3RPCEndpoint(notification_queue)] - ) - launcher = service.ServiceLauncher(cfg.CONF) - launcher.launch_service(service=connection, workers=1) - launcher.wait() - - -class Sender(object): - "Send notification messages" - - def __init__(self, topic=None): - self._notifier = None - self.topic = topic - - def get_notifier(self): - if not self._notifier: - self._notifier = rpc.get_rpc_notifier(topic=self.topic) - - def send(self, event_type, message): - self.get_notifier() - ctxt = context.get_admin_context().to_dict() - self._notifier.info(ctxt, event_type, message) - - -class Publisher(Sender): - - def __init__(self, topic=None): - super(Publisher, self).__init__(topic) - self._q = Queue.Queue() - self._t = None - - def start(self): - ready = threading.Event() - self._t = threading.Thread( - name='notification-publisher', - target=self._send, - args=(ready,), - ) - self._t.setDaemon(True) - self._t.start() - # Block until the thread is ready for work, but use a timeout - # in case of error in the thread. - ready.wait(10) - LOG.debug('started %s', self._t.getName()) - - def stop(self): - if self._t: - LOG.debug('stopping %s', self._t.getName()) - self._q.put(None) - self._t.join(timeout=1) - self._t = None - - def publish(self, incoming): - self._q.put(incoming) - - def _send(self, ready): - """Deliver notification messages from the in-process queue - to the appropriate topic via the AMQP service. - """ - # setup notifier driver ahead a time - self.get_notifier() - # Tell the start() method that we have set up the AMQP - # communication stuff and are ready to do some work. - ready.set() - while True: - msg = self._q.get() - if msg is None: - break - LOG.debug('sending notification %r', msg) - try: - self.send(event_type=msg['event_type'], message=msg['payload']) - except Exception: - LOG.exception(_LE('could not publish notification')) - - -class NoopPublisher(Publisher): - """A Publisher that doesn't do anything. - - The code that publishes notifications is spread across several - classes and cannot be easily disabled in configurations that do - not require sending metrics to ceilometer. - - This class is used in place of the Publisher class to disable - sending metrics without explicitly checking in various places - across the code base. - - """ - - def start(self): - pass - - def stop(self): - pass - - def publish(self, incoming): - pass diff --git a/astara/opts.py b/astara/opts.py deleted file mode 100644 index 057ca628..00000000 --- a/astara/opts.py +++ /dev/null @@ -1,84 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import itertools -import astara.api.nova -import astara.drivers -import astara.main -import astara.common.linux.interface -import astara.notifications -import astara.coordination -import astara.pez.manager -import astara.drivers.router -import astara.api.rug -import astara.debug - - -def list_opts(): - return [ - ('DEFAULT', - itertools.chain( - astara.api.api_opts, - astara.api.rug.RUG_API_OPTS, - astara.api.nova.OPTIONS, - astara.api.neutron.neutron_opts, - astara.api.astara_client.AK_CLIENT_OPTS, - astara.drivers.DRIVER_OPTS, - astara.main.MAIN_OPTS, - astara.common.linux.interface.OPTS, - astara.common.hash_ring.hash_opts, - astara.api.config.router.OPTIONS, - astara.notifications.NOTIFICATIONS_OPTS, - astara.debug.DEBUG_OPTS, - astara.scheduler.SCHEDULER_OPTS, - astara.worker.WORKER_OPTS, - astara.metadata.METADATA_OPTS, - astara.health.HEALTH_INSPECTOR_OPTS, - astara.instance_manager.INSTANCE_MANAGER_OPTS - )) - ] - - -def list_agent_opts(): - return [ - ('AGENT', astara.common.linux.interface.AGENT_OPTIONS) - ] - - -def list_coordination_opts(): - return [ - ('coordination', astara.coordination.COORD_OPTS) - ] - - -def list_ceilometer_opts(): - return [ - ('ceilometer', astara.main.CEILOMETER_OPTS) - ] - - -def list_router_opts(): - return [ - ('router', astara.drivers.router.ROUTER_OPTS) - ] - - -def list_loadbalancer_opts(): - return [ - ('loadbalancer', astara.drivers.loadbalancer.LOADBALANCER_OPTS) - ] - - -def list_pez_opts(): - return [ - ('pez', astara.pez.manager.PEZ_OPTIONS) - ] diff --git a/astara/pez/__init__.py b/astara/pez/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/astara/pez/manager.py b/astara/pez/manager.py deleted file mode 100644 index d894b51b..00000000 --- a/astara/pez/manager.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2015 Akanda, Inc -# -# Author: Akanda, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import threading - -from oslo_config import cfg - -from astara.common.i18n import _ -from astara.pez import pool - -CONF = cfg.CONF - -PEZ_OPTIONS = [ - cfg.IntOpt('pool_size', default=1, - help=_('How many pre-allocated hot standby nodes to keep ' - 'in the pez pool.')), - - # NOTE(adam_g): We should consider how these get configured for when - # we support multiple drivers. {router, lbaas}_image_uuid? - cfg.StrOpt('image_uuid', - help=_('Image uuid to boot.')), - cfg.StrOpt('flavor', - help=_('Nova flavor to boot')), - cfg.StrOpt('rpc_topic', default='astara-pez'), - -] - -CONF.register_group(cfg.OptGroup(name='pez')) -CONF.register_opts(PEZ_OPTIONS, group='pez') - - -CONF.import_opt('host', 'astara.main') -CONF.import_opt('management_network_id', 'astara.api.neutron') - - -class PezManager(object): - """The RPC server-side of the Pez service""" - def __init__(self): - self.image_uuid = CONF.pez.image_uuid - self.flavor = CONF.pez.flavor - self.mgt_net_id = CONF.management_network_id - self.pool_size = CONF.pez.pool_size - self.pool_mgr = pool.PezPoolManager( - self.image_uuid, - self.flavor, - self.pool_size, - self.mgt_net_id) - - def start(self): - pooler_thread = threading.Thread(target=self.pool_mgr.start) - pooler_thread.start() - - def get_instance(self, context, resource_type, name, management_port, - instance_ports): - """Obtains an instance from the pool for client - - This obtains an instance from the pool manager and returns enough data - about it to the client that the client can create an InstanceInfo - object. We purposely avoid the need to introduce versioned object (for - now) by serializing everything into a dict. This may change in the - future. - - :param context: oslo_context admin context object - :param resource_type: The str driver name of the resource - :param name: The requested name of the instance - :param managment_port: The management port dict that was created for - the instance by the RUG. - :param instance_ports: A list of dicts of ports to be attached to - instance upon reservation. - - :returns: A dict containing the following: - - 'id': The id of the reserved instance - - 'name': The name of the reserved instance - - 'image_uuid': The image id of the reserved instance - - 'management_port': A serialized dict representing the - management Neutron port. - - 'instance_port': A list of serialized instance port - dicts that the caller requested be - attached. - - """ - instance, mgt_port, instance_ports = self.pool_mgr.get_instance( - resource_type=resource_type, name=name, - management_port=management_port, instance_ports=instance_ports) - - return { - 'id': instance.id, - 'resource_type': resource_type, - 'name': instance.name, - 'image_uuid': instance.image['id'], - 'management_port': mgt_port.to_dict(), - 'instance_ports': [ - p.to_dict() for p in instance_ports - ], - } diff --git a/astara/pez/pool.py b/astara/pez/pool.py deleted file mode 100644 index 86da3aac..00000000 --- a/astara/pez/pool.py +++ /dev/null @@ -1,327 +0,0 @@ -# Copyright 2015 Akanda, Inc -# -# Author: Akanda, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import time - -from six.moves import range -from oslo_concurrency import lockutils -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import timeutils - -from astara.common.i18n import _LE, _LI -from astara import drivers -from astara.api import neutron -from astara.api import nova - -LOG = logging.getLogger(__name__) - -# Unused instances are launched with a known name -INSTANCE_FREE = 'ASTARA:UNUSED:%(resource_name)s' - -# When an instance is reserved, its renamed accordingly -# TODO(adam_g): We ideally want to make all instance naming template-able -IN_USE_TEMPLATE = 'ak-%(resource_name)s-%(resource_id)s' - - -# Nova states -ACTIVE = 'active' -ERROR = 'error' -DELETING = 'deleting' - -PEZ_LOCK = 'astara-pez' - - -class PezPoolExhausted(Exception): - pass - - -class WorkerContext(object): - """Holds resources owned by the worker and used by the Automaton. - """ - - def __init__(self): - self.nova_client = nova.Nova(cfg.CONF) - self.neutron_client = neutron.Neutron(cfg.CONF) - - -class PezPoolManager(object): - """Astara Pez Pool Manager - - This manages a pool of instances of a configurable size. All instance - state is managed and tracked in Nova itself. - - Each iteratino of the manager's main loop will scan the service tenant's - booted instances. Instances named INSTANCE_FREE (defined above) will be - considered unused. If any of these instances are in ERROR state or are - out dated in some way (ie, its image is not the currently configured - image), they will be deleted from the pool and the manager will replenish - the deficit on its next tick. - - Instances may be reserved for use via the get_instance() method. This - simply renames the instance according to the ID of the thing that it will - host and returns it to the caller. At this point, Pez no longer cares about - the instance and will refill its position in the pool on next its next - tick. - - The calling service is responsible for managing the lifecycle of the - returned instance. This includes attaching required ports, ensuring - deletion/cleanup, etc. The instance will not be returned to the pool when - it is no longer in use. - """ - def __init__(self, image_uuid, flavor, pool_size, mgt_net_id): - """ - :param image_uuid: UUID of backing image for managed instances. - :param flavor: nova flavor id to be used for managed instances. - :param mgt_net_id: UUID of management network. Each instance in the - pool is initially booted with a single port on this - network - :param pool_size: The size of the pool - """ - self.image_uuid = image_uuid - self.flavor = flavor - self.mgt_net_id = mgt_net_id - self.pool_size = int(pool_size) - self.poll_interval = 3 - self.ctxt = WorkerContext() - self.boot_timeout = 120 - self.delete_timeout = 30 - - # used to track boot/delete timeouts - self._delete_counters = {} - self._boot_counters = {} - self.load_driver_config() - - def load_driver_config(self): - self.images = {} - self.flavors = {} - self.drivers = [d for d in drivers.enabled_drivers()] - for driver in self.drivers: - self.images[driver.RESOURCE_NAME] = getattr( - cfg.CONF, driver.RESOURCE_NAME).image_uuid - self.flavors[driver.RESOURCE_NAME] = getattr( - cfg.CONF, driver.RESOURCE_NAME).instance_flavor - - @lockutils.synchronized(PEZ_LOCK) - def delete_instance(self, instance_uuid): - LOG.info(_LI('Deleting instance %s.'), instance_uuid) - self.ctxt.nova_client.client.servers.delete(instance_uuid) - self._delete_counters[instance_uuid] = timeutils.utcnow() - - def _check_err_instances(self, pools): - """Scans the pool and deletes any instances in error state""" - for resource, pool in copy.copy(pools).items(): - err_instances = [i for i in pool if i.status == ERROR] - for err_inst in err_instances: - LOG.error(_LE( - 'Instance %s is in %s state, deleting.'), - i.id, ERROR) - del_instance = self.delete_instance(err_inst.id) - i = pool.index(err_inst) - pools[resource][i] = del_instance - - def _check_del_instances(self, pools): - """Scans the pool for deleted instances and checks deletion timers""" - # XXX: What do we do with instances stuck in deleting? - # For now, just return stuck instances to caller and we can figure - # out what to do with them later. - stuck_instances = [] - del_instances = [] - for resource, pool in pools.items(): - del_instances += [i for i in pool if i.status == DELETING] - - # clean out counters for old instances that have been deleted entirely - if self._delete_counters: - del_instance_ids = [i.id for i in del_instances] - for inst_id in copy.copy(self._delete_counters): - if inst_id not in del_instance_ids: - self._delete_counters.pop(inst_id) - - for del_inst in del_instances: - if del_inst.id not in self._delete_counters: - self._delete_counters[del_inst.id] = timeutils.utcnow() - else: - if timeutils.is_older_than(self._delete_counters[del_inst.id], - self.delete_timeout): - LOG.error(_LE( - 'Instance %s is stuck in %s for more than %s ' - 'seconds.'), i.id, DELETING, self.delete_timeout) - stuck_instances.append(del_inst) - return stuck_instances - - def _check_outdated_instances(self, pools): - outdated_instances = [] - for resource, pool in pools.items(): - for server in pool: - if server.image['id'] != str(self.images[resource]): - LOG.info(_LI( - 'Deleting instance %s with outdated image, ' - '%s != %s'), - server.id, server.image['id'], self.image_uuid) - outdated_instances.append(server) - elif server.flavor['id'] != str(self.flavors[resource]): - LOG.info(_LI( - 'Deleting instance %s with outdated flavor, ' - '%s != %s'), - server.id, server.flavor['id'], self.flavor) - outdated_instances.append(server) - - if outdated_instances: - [self.delete_instance(i.id) for i in outdated_instances] - - def load_pools(self): - pools = {} - [pools.update({d.RESOURCE_NAME: []}) for d in self.drivers] - for server in self.ctxt.nova_client.client.servers.list(): - for d in self.drivers: - instance_free = INSTANCE_FREE % { - 'resource_name': d.RESOURCE_NAME - } - if server.name.startswith(instance_free): - pools[d.RESOURCE_NAME].append(server) - return pools - - @property - def unused_instances(self): - """Determines the size and contents of the current instance pool - - We list all nova servers according to the naming template. - - Any instances in an error state are deleted and will be replenished on - the next run of the main loop. - - We time instance deletion and any servers that appear to be stuck in a - deleted state will be reported as such. TODO(adam_g): We should figure - out what to do with stuck instances? - - Any instances that appear to be outdated (ie, the server's image or - flavor does not match whats configured) will be deleted and replenished - on the next tick of hte main loop. - - :returns: a dict keyed by driver name, each value a list of nova server - objects that represents the current resources pool. - """ - pools = self.load_pools() - self._check_err_instances(pools) - self._check_del_instances(pools) - self._check_outdated_instances(pools) - return pools - - def launch_instances(self, count, driver): - LOG.info(_LI( - 'Launching %s %s instances.'), driver.RESOURCE_NAME, count) - for i in range(0, count): - # NOTE: Use a fake UUID so astara-neutron's name matching still - # catches this port as an astara port. This can be avoided if - # we use a mgt security group in the future. - mgt_port = self.ctxt.neutron_client.create_management_port( - '00000000-0000-0000-0000-000000000000') - nics = [{ - 'net-id': mgt_port.network_id, - 'v4-fixed-ip': '', - 'port-id': mgt_port.id}] - - instance_name = INSTANCE_FREE % { - 'resource_name': driver.RESOURCE_NAME - } - image = self.images[driver.RESOURCE_NAME] - flavor = self.flavors[driver.RESOURCE_NAME] - - self.ctxt.nova_client.client.servers.create( - name=instance_name, - image=image, - flavor=flavor, - nics=nics, - config_drive=True, - userdata=nova.format_userdata(mgt_port), - ) - - @lockutils.synchronized(PEZ_LOCK) - def get_instance(self, resource_type, name, management_port=None, - instance_ports=None): - """Get an instance from the pool. - - This involves popping it out of the pool, updating its name and - attaching - any ports. - - :param resource_type: The str driver name of the resource - :param name: The requested name of the instance - :param managment_port: The management port dict that was created for - the instance by the RUG. - :param instance_ports: A list of dicts of ports to be attached to - instance upon reservation. - - :returns: A tuple containing (novaclient server object for the - reserved server, a port object for the management port, - a list of port objects that were attached the server) - """ - instance_ports = instance_ports or [] - - try: - server = self.unused_instances[resource_type][0] - except IndexError: - raise PezPoolExhausted() - - LOG.info(_LI('Renaming instance %s to %s'), server.name, name) - server = self.ctxt.nova_client.client.servers.update( - server, name=name) - - for port in instance_ports: - LOG.info(_LI('Attaching instance port %s to %s (%s)'), - port['id'], server.name, server.id) - self.ctxt.nova_client.client.servers.interface_attach( - server=server, port_id=port['id'], net_id=None, fixed_ip=None) - - mgt_port, instance_ports = ( - self.ctxt.neutron_client.get_ports_for_instance(server.id) - ) - - return ( - self.ctxt.nova_client.client.servers.get(server.id), - mgt_port, - instance_ports, - ) - - def start(self): - """The pool manager main loop. - - The bulk of the algorithm exists in the 'unused_instances' property. - This main loop simply checks for a deficit in the pool and dispatches - a 'launch_instances' call when a deficit needs to be filled. - """ - while True: - cur_pools = self.unused_instances - report = [] - for driver in self.drivers: - report.append( - '%s:%s/%s' % - (driver.RESOURCE_NAME, - len(cur_pools[driver.RESOURCE_NAME]), - self.pool_size)) - LOG.debug('Current pools: %s' % ' '.join(report)) - - for driver in self.drivers: - cur_pool = cur_pools[driver.RESOURCE_NAME] - deficit = self.pool_size - len(cur_pool) - if deficit: - LOG.info(_LI( - 'Need to launch %s more %s instance(s).'), - deficit, driver.RESOURCE_NAME) - self.launch_instances( - driver=driver, count=deficit) - time.sleep(self.poll_interval) diff --git a/astara/pez/rpcapi.py b/astara/pez/rpcapi.py deleted file mode 100644 index 21fcde23..00000000 --- a/astara/pez/rpcapi.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2015 Akanda, Inc -# -# Author: Akanda, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from astara.common import rpc - -from oslo_context import context - - -class AstaraPezAPI(object): - """"Client side of the Astara Pez RPC API. - """ - def __init__(self, rpc_topic): - self.topic = rpc_topic - self.client = rpc.get_rpc_client( - topic=self.topic) - self.context = context.get_admin_context().to_dict() - - def get_instance(self, resource_type, name, management_port, - instance_ports): - """Reserves an instance from the Pez service. We can instruct Pez to - attach any required instance ports during the reservation process. - The dict returned here should be enough for the caller to construct - a InstanceInfo object. Note that the port information are serialized - astara.api.neutron.Port objects that can be deserialized by the - caller during creation of InstanceInfo. - - :param resource_type: The str name of the driver that manages the - resource (ie, loadbalancer) - :param name: The requested name of the instance - :param managment_port: The management port dict that was created for - the instance by the RUG. - :param instance_ports: A list of dicts of ports to be attached to - instance upon reservation. - - """ - cctxt = self.client.prepare(topic=self.topic) - return cctxt.call( - self.context, 'get_instance', resource_type=resource_type, - name=name, management_port=management_port, - instance_ports=instance_ports) diff --git a/astara/pez/service.py b/astara/pez/service.py deleted file mode 100644 index b1d99cd3..00000000 --- a/astara/pez/service.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2015 Akanda, Inc -# -# Author: Akanda, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import sys - -from astara.common.i18n import _LI -from astara.common import config as ak_cfg - -from astara.common import rpc -from astara.pez import manager - -from oslo_config import cfg -from oslo_log import log -from oslo_service import service - -CONF = cfg.CONF - -LOG = log.getLogger(__name__) - - -class PezService(service.Service): - """Bootstraps a connection for the manager to the messaging - queue and launches the pez service - """ - def __init__(self): - super(PezService, self).__init__() - self.manager = manager.PezManager() - self.manager.start() - self._rpc_connection = None - self.rpcserver = None - - def start(self): - super(PezService, self).start() - self._rpc_connection = rpc.Connection() - self._rpc_connection.create_rpc_consumer( - topic=cfg.CONF.pez.rpc_topic, - endpoints=[self.manager]) - self._rpc_connection.consume_in_threads() - self._rpc_connection.close() - - -def main(argv=sys.argv[1:]): - ak_cfg.parse_config(argv) - log.setup(CONF, 'astara-pez') - CONF.log_opt_values(LOG, logging.INFO) - - LOG.info(_LI("Starting Astara Pez service.")) - - mgr = PezService() - launcher = service.launch(CONF, mgr) - launcher.wait() diff --git a/astara/populate.py b/astara/populate.py deleted file mode 100644 index 3f7f838d..00000000 --- a/astara/populate.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Populate the workers with the existing routers -""" - -import threading - -from oslo_log import log as logging - -from astara import event -from astara import drivers - -LOG = logging.getLogger(__name__) - - -def repopulate(): - """Called from workers after a rebalance to find newly owned resources""" - resources = [] - for driver in drivers.enabled_drivers(): - resources += driver.pre_populate_hook() - return resources - - -def _pre_populate_workers(scheduler): - """Loops through enabled drivers triggering each drivers pre_populate_hook - which is a static method for each driver. - - """ - for driver in drivers.enabled_drivers(): - resources = driver.pre_populate_hook() - - if not resources: - # just skip to the next one the drivers pre_populate_hook already - # handled the exception or error and outputs to logs - LOG.debug('No %s resources found to pre-populate', - driver.RESOURCE_NAME) - continue - - LOG.debug('Start pre-populating %d workers for the %s driver', - len(resources), - driver.RESOURCE_NAME) - - for resource in resources: - message = event.Event( - resource=resource, - crud=event.POLL, - body={} - ) - scheduler.handle_message(resource.tenant_id, message) - - -def pre_populate_workers(scheduler): - """Start the pre-populating task - """ - - t = threading.Thread( - target=_pre_populate_workers, - args=(scheduler,), - name='PrePopulateWorkers' - ) - - t.setDaemon(True) - t.start() - return t diff --git a/astara/scheduler.py b/astara/scheduler.py deleted file mode 100644 index 155b497b..00000000 --- a/astara/scheduler.py +++ /dev/null @@ -1,166 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Scheduler to send messages for a given router to the correct worker. -""" -import six -import multiprocessing -import uuid - -from six.moves import range -from oslo_config import cfg -from oslo_log import log as logging - -from astara import commands -from astara.common.i18n import _, _LE, _LI, _LW -from astara import daemon - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF -SCHEDULER_OPTS = [ - cfg.IntOpt('num_worker_processes', - default=16, - help='the number of worker processes to run'), -] -CONF.register_opts(SCHEDULER_OPTS) - - -def _worker(inq, worker_factory, scheduler, proc_name): - """Scheduler's worker process main function. - """ - daemon.ignore_signals() - LOG.debug('starting worker process') - worker = worker_factory(scheduler=scheduler, proc_name=proc_name) - while True: - try: - data = inq.get() - except IOError: - # NOTE(dhellmann): Likely caused by a signal arriving - # during processing, especially SIGCHLD. - data = None - if data is None: - target, message = None, None - else: - target, message = data - try: - worker.handle_message(target, message) - except Exception: - LOG.exception(_LE('Error processing data %s'), six.text_type(data)) - if data is None: - break - LOG.debug('exiting') - - -class Dispatcher(object): - """Choose one of the workers to receive a message. - - The current implementation uses the least significant bits of the - UUID as an integer to shard across the worker pool. - """ - - def __init__(self, workers): - self.workers = workers - - def pick_workers(self, target): - """Returns the workers that match the target. - """ - target = target.strip() if target else None - # If we get any wildcard target, send the message to all of - # the workers. - if target in commands.WILDCARDS: - return self.workers[:] - try: - idx = uuid.UUID(target).int % len(self.workers) - except (TypeError, ValueError) as e: - LOG.warning(_LW( - 'Could not determine UUID from %r: %s, ignoring message'), - target, e, - ) - return [] - else: - LOG.debug('target %s maps to worker %s', target, idx) - return [self.workers[idx]] - - -class Scheduler(object): - """Manages a worker pool and redistributes messages. - """ - - def __init__(self, worker_factory): - """ - :param num_workers: The number of worker processes to create. - :type num_workers: int - :param worker_func: Callable for the worker processes to use - when a notification is received. - :type worker_factory: Callable to create Worker instances. - """ - self.num_workers = cfg.CONF.num_worker_processes - if self.num_workers < 1: - raise ValueError(_('Need at least one worker process')) - self.workers = [] - # Create several worker processes, each with its own queue for - # sending it instructions based on the notifications we get - # when someone calls our handle_message() method. - for i in range(self.num_workers): - wq = multiprocessing.JoinableQueue() - name = 'p%02d' % i - worker = multiprocessing.Process( - target=_worker, - kwargs={ - 'inq': wq, - 'worker_factory': worker_factory, - 'scheduler': self, - 'proc_name': name, - }, - name=name, - ) - self.workers.append({ - 'queue': wq, - 'worker': worker, - }) - self.dispatcher = Dispatcher(self.workers) - for w in self.workers: - w['worker'].start() - - def stop(self): - """Shutdown all workers cleanly. - """ - LOG.info('shutting down scheduler') - # Send a poison pill to all of the workers - for w in self.workers: - LOG.debug('sending stop message to %s', w['worker'].name) - w['queue'].put(None) - # Wait for the workers to finish and be ready to exit. - for w in self.workers: - LOG.debug('waiting for queue for %s', w['worker'].name) - w['queue'].close() - LOG.debug('waiting for worker %s', w['worker'].name) - w['worker'].join() - LOG.info(_LI('scheduler shutdown')) - - def handle_message(self, target, message): - """Call this method when a new notification message is delivered. The - scheduler will distribute it to the appropriate worker. - - :param target: UUID of the resource that needs to get the message. - :type target: uuid - :param message: Dictionary full of data to send to the target. - :type message: dict - """ - for w in self.dispatcher.pick_workers(target): - w['queue'].put((target, message)) diff --git a/astara/state.py b/astara/state.py deleted file mode 100644 index 41e63328..00000000 --- a/astara/state.py +++ /dev/null @@ -1,565 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""State machine for managing a router. - -""" - -# See state machine diagram and description: -# http://akanda.readthedocs.org/en/latest/rug.html#state-machine-workers-and-router-lifecycle - -import collections -import itertools - -from astara.common.i18n import _LE, _LI, _LW -from astara.event import (POLL, CREATE, READ, UPDATE, DELETE, REBUILD, - CLUSTER_REBUILD) -from astara import instance_manager -from astara.drivers import states - - -class StateParams(object): - def __init__(self, driver, instance, queue, bandwidth_callback, - reboot_error_threshold): - self.resource = driver - self.instance = instance - self.log = driver.log - self.queue = queue - self.bandwidth_callback = bandwidth_callback - self.reboot_error_threshold = reboot_error_threshold - self.image_uuid = driver.image_uuid - - -class State(object): - - def __init__(self, params): - self.params = params - - @property - def log(self): - return self.params.log - - @property - def queue(self): - return self.params.queue - - @property - def instance(self): - return self.params.instance - - @property - def image_uuid(self): - return self.params.image_uuid - - @property - def name(self): - return self.__class__.__name__ - - def __str__(self): - return self.name - - def execute(self, action, worker_context): - return action - - def transition(self, action, worker_context): - return self - - -class CalcAction(State): - def execute(self, action, worker_context): - queue = self.queue - if DELETE in queue: - self.params.resource.log.debug('shortcutting to delete') - return DELETE - - if (self.params.instance.state == states.DEGRADED and - CLUSTER_REBUILD not in queue): - self.params.resource.log.debug( - 'Scheduling a rebuild on degraded cluster') - queue.append(CLUSTER_REBUILD) - - while queue: - self.params.resource.log.debug( - 'action = %s, len(queue) = %s, queue = %s', - action, - len(queue), - list(itertools.islice(queue, 0, 60)) - ) - - if action == UPDATE and queue[0] == CREATE: - # upgrade to CREATE from UPDATE by taking the next - # item from the queue - self.params.resource.log.debug( - 'upgrading from update to create') - action = queue.popleft() - continue - - elif (action in (CREATE, UPDATE, CLUSTER_REBUILD) and - queue[0] == REBUILD): - # upgrade to REBUILD from CREATE/UPDATE by taking the next - # item from the queue - self.params.resource.log.debug('upgrading from %s to rebuild', - action) - action = queue.popleft() - continue - - elif action == CREATE and queue[0] == UPDATE: - # CREATE implies an UPDATE so eat the update event - # without changing the action - self.params.resource.log.debug('merging create and update') - queue.popleft() - continue - - elif action and queue[0] == POLL: - # Throw away a poll following any other valid action, - # because a create or update will automatically handle - # the poll and repeated polls are not needed. - self.params.resource.log.debug( - 'discarding poll event following action %s', - action) - queue.popleft() - continue - - elif action and action != POLL and action != queue[0]: - # We are not polling and the next action is something - # different from what we are doing, so just do the - # current action. - self.params.resource.log.debug('done collapsing events') - break - - self.params.resource.log.debug('popping action from queue') - action = queue.popleft() - - return action - - def transition(self, action, worker_context): - if self.instance.state == states.GONE: - next_action = StopInstance(self.params) - elif action == DELETE: - next_action = StopInstance(self.params) - elif action == REBUILD: - next_action = RebuildInstance(self.params) - elif (action == CLUSTER_REBUILD and - self.instance.state in (states.DEGRADED, states.DOWN)): - next_action = CreateInstance(self.params) - elif self.instance.state == states.BOOTING: - next_action = CheckBoot(self.params) - elif self.instance.state in (states.DOWN, states.DEGRADED): - next_action = CreateInstance(self.params) - else: - next_action = Alive(self.params) - - if self.instance.state == states.ERROR: - if action == POLL: - # If the selected action is to poll, and we are in an - # error state, then an event slipped through the - # filter in send_message() and we should ignore it - # here. - next_action = self - elif self.instance.error_cooldown: - self.params.resource.log.debug( - 'Resource is in ERROR cooldown, ' - 'ignoring event.' - ) - next_action = self - else: - # If this isn't a POLL, and the configured `error_cooldown` - # has passed, clear the error status before doing what we - # really want to do. - next_action = ClearError(self.params, next_action) - return next_action - - -class PushUpdate(State): - """Put an update instruction on the queue for the state machine. - """ - def execute(self, action, worker_context): - # Put the action back on the front of the queue. - self.queue.appendleft(UPDATE) - return action - - def transition(self, action, worker_context): - return CalcAction(self.params) - - -class ClearError(State): - """Remove the error state from the instance. - """ - - def __init__(self, params, next_state=None): - super(ClearError, self).__init__(params) - self._next_state = next_state - - def execute(self, action, worker_context): - # If we are being told explicitly to update the instance, we should - # ignore any error status. - self.instance.clear_error(worker_context) - return action - - def transition(self, action, worker_context): - if self._next_state: - return self._next_state - return CalcAction(self.params) - - -class Alive(State): - def execute(self, action, worker_context): - self.instance.update_state(worker_context) - return action - - def transition(self, action, worker_context): - if self.instance.state == states.GONE: - return StopInstance(self.params) - elif self.instance.state in (states.DOWN, states.DEGRADED): - return CreateInstance(self.params) - elif action == POLL and \ - self.instance.state == states.CONFIGURED: - return CalcAction(self.params) - elif action == READ and \ - self.instance.state == states.CONFIGURED: - return ReadStats(self.params) - else: - return ConfigureInstance(self.params) - - -class CreateInstance(State): - def execute(self, action, worker_context): - # Check for a loop where the resource keeps failing to boot or - # accept the configuration. - if (not self.instance.state == states.DEGRADED and - self.instance.attempts >= self.params.reboot_error_threshold): - self.params.resource.log.info(_LI( - 'Dropping out of boot loop after %s trials'), - self.instance.attempts) - self.instance.set_error(worker_context) - return action - self.instance.boot(worker_context) - self.params.resource.log.debug('CreateInstance attempt %s/%s', - self.instance.attempts, - self.params.reboot_error_threshold) - return action - - def transition(self, action, worker_context): - if self.instance.state == states.GONE: - return StopInstance(self.params) - elif self.instance.state == states.ERROR: - return CalcAction(self.params) - elif self.instance.state == states.DOWN: - return CreateInstance(self.params) - return CheckBoot(self.params) - - -class CheckBoot(State): - def execute(self, action, worker_context): - self.instance.update_state(worker_context) - self.params.resource.log.debug( - 'Instance is %s' % self.instance.state.upper()) - # Put the action back on the front of the queue so that we can yield - # and handle it in another state machine traversal (which will proceed - # from CalcAction directly to CheckBoot). - if self.instance.state not in (states.DOWN, - states.GONE): - self.queue.appendleft(action) - return action - - def transition(self, action, worker_context): - if self.instance.state == states.REPLUG: - return ReplugInstance(self.params) - if self.instance.state in (states.DOWN, - states.GONE): - return StopInstance(self.params) - if self.instance.state == states.UP: - return ConfigureInstance(self.params) - return CalcAction(self.params) - - -class ReplugInstance(State): - def execute(self, action, worker_context): - self.instance.replug(worker_context) - return action - - def transition(self, action, worker_context): - if self.instance.state == states.RESTART: - return StopInstance(self.params) - return ConfigureInstance(self.params) - - -class StopInstance(State): - def execute(self, action, worker_context): - self.instance.stop(worker_context) - if self.instance.state == states.GONE: - # Force the action to delete since the router isn't there - # any more. - return DELETE - return action - - def transition(self, action, worker_context): - if self.instance.state not in (states.DOWN, - states.GONE): - return self - if self.instance.state == states.GONE: - return Exit(self.params) - if action == DELETE: - return Exit(self.params) - return CreateInstance(self.params) - - -class RebuildInstance(State): - def execute(self, action, worker_context): - self.instance.stop(worker_context) - if self.instance.state == states.GONE: - # Force the action to delete since the router isn't there - # any more. - return DELETE - # Re-create the instance - self.instance.reset_boot_counter() - return CREATE - - def transition(self, action, worker_context): - if self.instance.state not in (states.DOWN, - states.GONE): - return self - if self.instance.state == states.GONE: - return Exit(self.params) - return CreateInstance(self.params) - - -class Exit(State): - pass - - -class ConfigureInstance(State): - def execute(self, action, worker_context): - self.instance.configure(worker_context) - if self.instance.state == states.CONFIGURED: - if action == READ: - return READ - else: - return POLL - else: - return action - - def transition(self, action, worker_context): - if self.instance.state == states.REPLUG: - return ReplugInstance(self.params) - if self.instance.state in (states.RESTART, - states.DOWN, - states.GONE): - return StopInstance(self.params) - if self.instance.state == states.UP: - return PushUpdate(self.params) - # Below here, assume instance.state == states.CONFIGURED - if action == READ: - return ReadStats(self.params) - return CalcAction(self.params) - - -class ReadStats(State): - def execute(self, action, worker_context): - stats = self.instance.read_stats() - self.params.bandwidth_callback(stats) - return POLL - - def transition(self, action, worker_context): - return CalcAction(self.params) - - -class Automaton(object): - def __init__(self, resource, tenant_id, - delete_callback, bandwidth_callback, - worker_context, queue_warning_threshold, - reboot_error_threshold): - """ - :param resource: An instantiated driver object for the managed resource - :param tenant_id: UUID of the tenant being managed - :type tenant_id: str - :param delete_callback: Invoked when the Automaton decides - the router should be deleted. - :type delete_callback: callable - :param bandwidth_callback: To be invoked when the Automaton needs to - report how much bandwidth a router has used. - :type bandwidth_callback: callable taking router_id and bandwidth - info dict - :param worker_context: a WorkerContext - :type worker_context: WorkerContext - :param queue_warning_threshold: Limit after which adding items - to the queue triggers a warning. - :type queue_warning_threshold: int - :param reboot_error_threshold: Limit after which trying to reboot - the router puts it into an error state. - :type reboot_error_threshold: int - """ - self.resource = resource - self.tenant_id = tenant_id - self._delete_callback = delete_callback - self._queue_warning_threshold = queue_warning_threshold - self._reboot_error_threshold = reboot_error_threshold - self.deleted = False - self.bandwidth_callback = bandwidth_callback - self._queue = collections.deque() - - self.action = POLL - self.instance = instance_manager.InstanceManager(self.resource, - worker_context) - self._state_params = StateParams( - self.resource, - self.instance, - self._queue, - self.bandwidth_callback, - self._reboot_error_threshold, - ) - self.state = CalcAction(self._state_params) - - @property - def resource_id(self): - """Returns the ID of the managed resource""" - return self.resource.id - - def service_shutdown(self): - "Called when the parent process is being stopped" - - def _do_delete(self): - if self._delete_callback is not None: - self.resource.log.debug('calling delete callback') - self._delete_callback() - # Avoid calling the delete callback more than once. - self._delete_callback = None - # Remember that this router has been deleted - self.deleted = True - - def update(self, worker_context): - "Called when the router config should be changed" - while self._queue: - while True: - if self.deleted: - self.resource.log.debug( - 'skipping update because the router is being deleted' - ) - return - - try: - self.resource.log.debug( - '%s.execute(%s) instance.state=%s', - self.state, - self.action, - self.instance.state) - self.action = self.state.execute( - self.action, - worker_context, - ) - self.resource.log.debug( - '%s.execute -> %s instance.state=%s', - self.state, - self.action, - self.instance.state) - except: - self.resource.log.exception( - _LE('%s.execute() failed for action: %s'), - self.state, - self.action - ) - - old_state = self.state - self.state = self.state.transition( - self.action, - worker_context, - ) - self.resource.log.debug( - '%s.transition(%s) -> %s instance.state=%s', - old_state, - self.action, - self.state, - self.instance.state - ) - - # Yield control each time we stop to figure out what - # to do next. - if isinstance(self.state, CalcAction): - return # yield - - # We have reached the exit state, so the router has - # been deleted somehow. - if isinstance(self.state, Exit): - self._do_delete() - return - - def send_message(self, message): - "Called when the worker put a message in the state machine queue" - if self.deleted: - # Ignore any more incoming messages - self.resource.log.debug( - 'deleted state machine, ignoring incoming message %s', - message) - return False - - # NOTE(dhellmann): This check is largely redundant with the - # one in CalcAction.transition() but it may allow us to avoid - # adding poll events to the queue at all, and therefore cut - # down on the number of times a worker thread wakes up to - # process something on a router that isn't going to actually - # do any work. - if message.crud == POLL and \ - self.instance.state == states.ERROR: - self.resource.log.info(_LI( - 'Resource status is ERROR, ignoring POLL message: %s'), - message, - ) - return False - - if message.crud == REBUILD: - if message.body.get('image_uuid'): - self.resource.log.info(_LI( - 'Resource is being REBUILT with custom image %s'), - message.body['image_uuid'] - ) - self.image_uuid = message.body['image_uuid'] - else: - self.image_uuid = self.resource.image_uuid - - self._queue.append(message.crud) - queue_len = len(self._queue) - if queue_len > self._queue_warning_threshold: - logger = self.resource.log.warning - else: - logger = self.resource.log.debug - logger(_LW('incoming message brings queue length to %s'), queue_len) - return True - - @property - def image_uuid(self): - return self.state.params.image_uuid - - @image_uuid.setter - def image_uuid(self, value): - self.state.params.image_uuid = value - - def has_more_work(self): - "Called to check if there are more messages in the state machine queue" - return (not self.deleted) and bool(self._queue) - - def has_error(self): - return self.instance.state == states.ERROR - - def drop_queue(self): - """Drop all pending actions from the local state machine's work queue. - - This is used after a ring rebalance if this state machine no longer - maps to the local Rug process. - """ - self.resource.log.info( - 'Dropping %s pending actions from queue', len(self._queue)) - self._queue.clear() diff --git a/astara/tenant.py b/astara/tenant.py deleted file mode 100644 index ee4ae620..00000000 --- a/astara/tenant.py +++ /dev/null @@ -1,224 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Manage the resources for a given tenant. -""" - -import datetime - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import timeutils - -from astara.common.i18n import _LE -from astara import state -from astara import drivers -from astara.common import container - - -LOG = logging.getLogger(__name__) - -tenant_opts = [ - cfg.BoolOpt('enable_byonf', default=False, - help='Whether to enable bring-your-own-network-function ' - 'support via operator supplied drivers and images.'), -] -cfg.CONF.register_opts(tenant_opts) - - -class InvalidIncomingMessage(Exception): - pass - - -class StateMachineContainer(container.ResourceContainer): - def unmanage(self, resource_id): - """Used to delete a state machine from local management - - Removes the local state machine from orchestrator management during - cluster events. This is different than deleting the resource in that - it does not tag the resource as also deleted from Neutron, which would - prevent us from recreating its state machine if the resource later ends - up back under this orchestrators control. - - :param resource_id: The resource id to unmanage - """ - try: - with self.lock: - sm = self.resources.pop(resource_id) - sm.drop_queue() - LOG.debug('unmanaged tenant state machine for resource %s', - resource_id) - except KeyError: - pass - - -class TenantResourceManager(object): - """Keep track of the state machines for the logical resources for a given - tenant. - """ - - def __init__(self, tenant_id, delete_callback, notify_callback, - queue_warning_threshold, - reboot_error_threshold): - self.tenant_id = tenant_id - self.delete = delete_callback - self.notify = notify_callback - self._queue_warning_threshold = queue_warning_threshold - self._reboot_error_threshold = reboot_error_threshold - self.state_machines = StateMachineContainer() - self._default_resource_id = None - - def _delete_resource(self, resource): - "Called when the Automaton decides the resource can be deleted" - if resource.id in self.state_machines: - LOG.debug('deleting state machine for %s', resource.id) - del self.state_machines[resource.id] - if self._default_resource_id == resource.id: - self._default_resource_id = None - self.delete(resource) - - def unmanage_resource(self, resource_id): - self.state_machines.unmanage(resource_id) - - def shutdown(self): - LOG.info('shutting down') - for resource_id, sm in self.state_machines.items(): - try: - sm.service_shutdown() - except Exception: - LOG.exception(_LE( - 'Failed to shutdown state machine for %s'), resource_id - ) - - def _report_bandwidth(self, resource_id, bandwidth): - LOG.debug('reporting bandwidth for %s', resource_id) - msg = { - 'tenant_id': self.tenant_id, - 'timestamp': datetime.datetime.isoformat(timeutils.utcnow()), - 'event_type': 'astara.bandwidth.used', - 'payload': dict((b.pop('name'), b) for b in bandwidth), - 'uuid': resource_id, - } - self.notify(msg) - - def get_all_state_machines(self): - return self.state_machines.values() - - def get_state_machines(self, message, worker_context): - """Return the state machines and the queue for sending it messages for - the logical resource being addressed by the message. - """ - if (not message.resource or - (message.resource and not message.resource.id)): - LOG.error(_LE( - 'Cannot get state machine for message with ' - 'no message.resource')) - raise InvalidIncomingMessage() - - state_machines = [] - - # Send to all of our resources. - if message.resource.id == '*': - LOG.debug('routing to all state machines') - state_machines = self.state_machines.values() - - # Ignore messages to deleted resources. - elif self.state_machines.has_been_deleted(message.resource.id): - LOG.debug('dropping message for deleted resource') - return [] - - # Send to resources that have an ERROR status - elif message.resource.id == 'error': - state_machines = [ - sm for sm in self.state_machines.values() - if sm.has_error() - ] - LOG.debug('routing to %d errored state machines', - len(state_machines)) - - # Create a new state machine for this router. - elif message.resource.id not in self.state_machines: - LOG.debug('creating state machine for %s', message.resource.id) - - # load the driver - if not message.resource.driver: - LOG.error(_LE('cannot create state machine without specifying' - 'a driver.')) - return [] - - resource_obj = self._load_resource_from_message( - worker_context, message) - - if not resource_obj: - # this means the driver didn't load for some reason.. - # this might not be needed at all. - LOG.debug('for some reason loading the driver failed') - return [] - - def deleter(): - self._delete_resource(message.resource) - - new_state_machine = state.Automaton( - resource=resource_obj, - tenant_id=self.tenant_id, - delete_callback=deleter, - bandwidth_callback=self._report_bandwidth, - worker_context=worker_context, - queue_warning_threshold=self._queue_warning_threshold, - reboot_error_threshold=self._reboot_error_threshold, - ) - self.state_machines[message.resource.id] = new_state_machine - state_machines = [new_state_machine] - - # Send directly to an existing router. - elif message.resource.id: - state_machines = [self.state_machines[message.resource.id]] - - # Filter out any deleted state machines. - return [ - machine - for machine in state_machines - if (not machine.deleted and - not self.state_machines.has_been_deleted(machine.resource.id)) - ] - - def get_state_machine_by_resource_id(self, resource_id): - try: - return self.state_machines[resource_id] - except KeyError: - return None - - def _load_resource_from_message(self, worker_context, message): - if cfg.CONF.enable_byonf: - byonf_res = worker_context.neutron.tenant_has_byo_for_function( - tenant_id=self.tenant_id.replace('-', ''), - function_type=message.resource.driver) - - if byonf_res: - try: - return drivers.load_from_byonf( - worker_context, - byonf_res, - message.resource.id) - except drivers.InvalidDriverException: - LOG.exception(_LE( - 'Could not load BYONF driver, falling back to ' - 'configured image')) - pass - - return drivers.get(message.resource.driver)( - worker_context, message.resource.id) diff --git a/astara/test/__init__.py b/astara/test/__init__.py deleted file mode 100644 index 63621fff..00000000 --- a/astara/test/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/astara/test/functional/__init__.py b/astara/test/functional/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/astara/test/functional/base.py b/astara/test/functional/base.py deleted file mode 100755 index d5c13c77..00000000 --- a/astara/test/functional/base.py +++ /dev/null @@ -1,629 +0,0 @@ -# Copyright (c) 2015 Akanda, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import netaddr -import os -import six -import subprocess -import testtools -import time - -from oslo_config import cfg -from oslo_log import log as logging - -from astara.api import astara_client - -from keystoneclient import client as _keystoneclient -from keystoneclient import auth as ksauth -from keystoneclient import session as kssession - -from neutronclient.v2_0 import client as _neutronclient -from novaclient import client as _novaclient - -from keystoneclient import exceptions as ksc_exceptions -from neutronclient.common import exceptions as neutron_exceptions - -from tempest_lib.common.utils import data_utils - -from astara.test.functional import config - -DEFAULT_CONFIG = os.path.join(os.path.dirname(__file__), 'test.conf') -DEFAULT_ACTIVE_TIMEOUT = 340 -DEFAULT_DELETE_TIMEOUT = 60 -DEFAULT_DOMAIN = 'default' - - -config.register_opts() -CONF = cfg.CONF -logging.register_options(CONF) - -LOG = None - - -def parse_config(): - config_file = os.environ.get('AKANDA_TEST_CONFIG', - DEFAULT_CONFIG) - cfg.CONF( - [], project='astara-orchestrator-functional', - default_config_files=[config_file]) - logging.set_defaults(default_log_levels=[ - 'paramiko.transport=INFO', - 'neutronclient=WARN', - 'keystoneclient=WARN', - ]) - logging.setup(CONF, 'astara_functional') - global LOG - LOG = logging.getLogger(__name__) - - -class ClientManager(object): - """A client manager using specified credentials""" - def __init__(self, username, password, tenant_name, auth_url): - parse_config() - self.username = username - self.password = password - self.tenant_name = tenant_name - self.auth_url = auth_url - - self._keystoneclient = None - self._neutronclient = None - self._novaclient = None - - @property - def auth_version(self): - if self.auth_url.endswith('v3') or self.auth_url.endswith('identity'): - return 3 - else: - return 2.0 - - @property - def keystone_session(self): - auth_plugin = ksauth.get_plugin_class('password') - _args = { - 'auth_url': self.auth_url, - 'username': self.username, - 'password': self.password, - } - if self.auth_version == 3: - _args.update({ - 'user_domain_name': DEFAULT_DOMAIN, - 'project_domain_name': DEFAULT_DOMAIN, - 'project_name': self.tenant_name, - }) - else: - _args.update({ - 'tenant_name': self.tenant_name, - }) - _auth = auth_plugin(**_args) - return kssession.Session(auth=_auth) - - @property - def novaclient(self): - if not self._novaclient: - self._novaclient = _novaclient.Client( - version=2, - session=self.keystone_session, - ) - return self._novaclient - - @property - def neutronclient(self): - if not self._neutronclient: - self._neutronclient = _neutronclient.Client( - session=self.keystone_session, - ) - return self._neutronclient - - @property - def keystoneclient(self): - if not self._keystoneclient: - client = _keystoneclient.Client(session=self.keystone_session) - self._keystoneclient = client - return self._keystoneclient - - @property - def tenant_id(self): - return self.keystoneclient.tenant_id - - -class ApplianceServerNotFound(Exception): - pass - - -class ApplianceServerTimeout(Exception): - pass - - -class AdminClientManager(ClientManager): - """A client manager using credentials loaded from test.conf, which - are assumed to be admin. - """ - def __init__(self): - parse_config() - super(AdminClientManager, self).__init__( - username=CONF.os_username, - password=CONF.os_password, - tenant_name=CONF.os_tenant_name, - auth_url=CONF.os_auth_url, - ) - - def get_router_appliance_server(self, router_uuid, retries=10, - wait_for_active=False, ha_router=False): - """Returns a Nova server object for router""" - LOG.debug( - 'Looking for nova backing instance for resource %s', - router_uuid) - - if ha_router: - exp_instances = 2 - else: - exp_instances = 1 - - for i in six.moves.range(retries): - service_instances = \ - [instance for instance in - self.novaclient.servers.list( - search_opts={ - 'all_tenants': 1, - 'tenant_id': CONF.service_tenant_id} - ) if router_uuid in instance.name] - - if service_instances and len(service_instances) == exp_instances: - LOG.debug( - 'Found %s backing instance for resource %s: %s', - exp_instances, router_uuid, service_instances) - break - LOG.debug('%s backing instance not found, will retry %s/%s', - exp_instances, i, retries) - time.sleep(1) - else: - raise ApplianceServerNotFound( - 'Could not get nova %s server(s) for router %s' % - (exp_instances, router_uuid)) - - def _wait_for_active(instance): - LOG.debug('Waiting for backing instance %s to become ACTIVE', - instance) - for i in six.moves.range(CONF.appliance_active_timeout): - instance = self.novaclient.servers.get( - instance.id) - if instance.status == 'ACTIVE': - LOG.debug('Instance %s status==ACTIVE', instance) - return - else: - LOG.debug('Instance %s status==%s, will wait', - instance, instance.status) - time.sleep(1) - raise ApplianceServerTimeout( - ('Timed out waiting for backing instance of %s %s to become ' - 'ACTIVE') % router_uuid) - - if wait_for_active: - LOG.debug('Waiting for %s backing instances to become ACTIVE', - exp_instances) - - [_wait_for_active(i) for i in service_instances] - LOG.debug('Waiting for backing instance %s to become ACTIVE', - exp_instances) - - if ha_router: - return sorted(service_instances, key=lambda i: i.name) - else: - return service_instances[0] - - def get_network_info(self, network_name): - net_response = self.neutronclient.list_networks(name=network_name) - network = net_response.get('networks', [None])[0] - return network - - -class TestTenant(object): - def __init__(self): - parse_config() - self.username = data_utils.rand_name(name='user', prefix='akanda') - self.user_id = None - self.password = data_utils.rand_password() - self.tenant_name = data_utils.rand_name(name='tenant', prefix='akanda') - self.tenant_id = None - self.role_name = data_utils.rand_name(name='role', prefix='akanda') - - self._admin_clients = AdminClientManager() - self._admin_ks_client = self._admin_clients.keystoneclient - self.auth_url = self._admin_ks_client.auth_url - - # create the tenant before creating its clients. - self._create_tenant() - - self.clients = ClientManager(self.username, self.password, - self.tenant_name, self.auth_url) - self.tester = ClientManager('demo', 'akanda', 'demo', self.auth_url) - - self._subnets = [] - self._routers = [] - - def _create_tenant(self): - if self._admin_clients.auth_version == 3: - tenant = self._admin_ks_client.projects.create( - name=self.tenant_name, - domain=DEFAULT_DOMAIN) - user = self._admin_ks_client.users.create( - name=self.username, - password=self.password, - project_domain_name=DEFAULT_DOMAIN, - default_project=self.tenant_name) - role = self._admin_ks_client.roles.create(name=self.role_name) - self._admin_ks_client.roles.grant( - role=role, user=user, project=tenant) - else: - tenant = self._admin_ks_client.tenants.create(self.tenant_name) - self.tenant_id = tenant.id - user = self._admin_ks_client.users.create( - name=self.username, - password=self.password, - tenant_id=self.tenant_id) - self.user_id = user.id - self.tenant_id = tenant.id - LOG.debug('Created new test tenant: %s (%s)', - self.tenant_id, self.user_id) - - def setup_networking(self, ha_router=False): - """"Create a network + subnet for the tenant. Also creates a router - if required, and attaches the subnet to it. - - :returns: a (network dict, router dict) tuple - """ - # NOTE(adam_g): I didn't expect simply creating a network - # to also create a subnet and router automatically, but this - # does? - net_body = { - 'network': { - 'name': data_utils.rand_name(name='network', prefix='ak'), - 'admin_state_up': False, - 'tenant_id': self.tenant_id - }} - LOG.debug('Creating network: %s', net_body) - network = self.clients.neutronclient.create_network(net_body) - network = network.get('network') - if not network: - raise Exception('Failed to create default tenant network') - LOG.debug('Created network: %s', network) - - if not CONF.astara_auto_add_resources: - addr = netaddr.IPNetwork(CONF.test_subnet_cidr) - subnet_body = { - 'subnet': { - 'name': data_utils.rand_name(name='subnet', prefix='ak'), - 'network_id': network['id'], - 'cidr': CONF.test_subnet_cidr, - 'ip_version': addr.version, - } - } - LOG.debug('Creating subnet: %s', subnet_body) - subnet = self.clients.neutronclient.create_subnet( - body=subnet_body)['subnet'] - LOG.debug('Created subnet: %s', subnet) - router_body = { - 'router': { - 'name': data_utils.rand_name(name='router', prefix='ak'), - 'admin_state_up': True, - 'tenant_id': self.tenant_id, - 'ha': ha_router, - } - } - LOG.debug('Creating router: %s', router_body) - router = self._admin_clients.neutronclient.create_router( - body=router_body)['router'] - LOG.debug('Created router: %s', router) - - LOG.debug( - 'Attaching interface on subnet %s to router %s', - subnet['id'], router['id']) - self.clients.neutronclient.add_interface_router( - router['id'], {'subnet_id': subnet['id']} - ) - LOG.debug( - 'Attached interface on subnet %s to router %s', - subnet['id'], router['id']) - - else: - # routers report as ACTIVE initially (LP: #1491673) - time.sleep(2) - LOG.debug('Waiting for astara auto-created router') - for i in six.moves.range(CONF.appliance_active_timeout): - routers = self.clients.neutronclient.list_routers() - routers = routers.get('routers') - if routers: - router = routers[0] - LOG.debug('Found astara auto-created router: %s', router) - break - else: - LOG.debug( - 'Still waiting for auto-created router. %s/%s', - i, CONF.appliance_active_timeout) - time.sleep(1) - else: - raise Exception('Timed out waiting for default router.') - - # routers report as ACTIVE initially (LP: #1491673) - time.sleep(2) - return network, router - - def _wait_for_backing_instance_delete(self, resource_id): - i = 1 - LOG.debug( - 'Waiting on deletion of backing instance for resource %s', - resource_id) - - for i in six.moves.range(DEFAULT_DELETE_TIMEOUT): - try: - self._admin_clients.get_router_appliance_server( - resource_id, retries=1) - except ApplianceServerNotFound: - LOG.debug('Backing instance for resource %s deleted', - resource_id) - return - - LOG.debug( - 'Still waiting for deletion of backing instance for %s' - ' , will wait (%s/%s)', - resource_id, i, DEFAULT_DELETE_TIMEOUT) - time.sleep(1) - - m = ('Timed out waiting on deletion of backing instance for %s ' - 'after %s sec.' % (resource_id, DEFAULT_DELETE_TIMEOUT)) - LOG.debug(m) - raise ApplianceServerTimeout(m) - - def _wait_for_neutron_delete(self, thing, ids): - show = getattr(self.clients.neutronclient, 'show_' + thing) - attempt = 0 - max_attempts = 10 - for i in ids: - LOG.debug('Waiting for deletion of %s %s', thing, i) - while True: - try: - show(i) - except neutron_exceptions.NeutronClientException as e: - if e.status_code == 404: - LOG.debug('Deletion of %s %s complete', thing, i) - break - if attempt == max_attempts: - raise Exception( - 'Timed out waiting for deletion of %s %s after %s sec.' - % (thing, i, max_attempts)) - LOG.debug( - 'Still waiting for deletion of %s %s, will wait (%s/%s)', - thing, i, attempt, max_attempts) - attempt += 1 - time.sleep(1) - - # also wait for nova backing instance to delete after routers - if thing in ['router']: - [self._wait_for_backing_instance_delete(i) for i in ids] - - def cleanup_neutron(self): - """Clean tenant environment of neutron resources""" - LOG.debug('Cleaning up created neutron resources') - router_interface_ports = [ - p for p in self.clients.neutronclient.list_ports()['ports'] - if ( - 'router_interface' in p['device_owner'] or - 'ha_router_replicated_interface' in p['device_owner'] - )] - - for rip in router_interface_ports: - LOG.debug('Deleting router interface port: %s', rip) - self.clients.neutronclient.remove_interface_router( - rip['device_id'], - body=dict(port_id=router_interface_ports[0]['id'])) - - astara_router_ports = [] - router_ids = [ - r['id'] for r in - self.clients.neutronclient.list_routers().get('routers') - ] - - for rid in router_ids: - for p in ['MGT', 'VRRP']: - name = 'ASTARA:%s:%s' % (p, rid) - astara_router_ports += [ - p['id'] for p in - self._admin_clients.neutronclient.list_ports( - name=name).get('ports')] - - LOG.debug('Deleting router %s' % rid) - - try: - self.clients.neutronclient.delete_router(r['id']) - except neutron_exceptions.NeutronClientException as e: - if e.status_code == 404: - router_ids.remove(rid) - else: - raise - self._wait_for_neutron_delete('router', router_ids) - - time.sleep(2) - - port_ids = [ - p['id'] for p in - self.clients.neutronclient.list_ports().get('ports')] - for pid in port_ids: - LOG.debug('Deleting port: %s', pid) - try: - self.clients.neutronclient.delete_port(pid) - except neutron_exceptions.NeutronClientException as e: - if e.status_code == 404: - port_ids.remove(pid) - else: - raise - self._wait_for_neutron_delete('port', port_ids) - - subnet_ids = [ - s['id'] - for s in self.clients.neutronclient.list_subnets().get('subnets')] - for sid in subnet_ids: - LOG.debug('Deleting subnet: %s', sid) - try: - self.clients.neutronclient.delete_subnet(sid) - except neutron_exceptions.NeutronClientException as e: - if e.status_code == 404: - subnet_ids.remove(sid) - else: - raise - self._wait_for_neutron_delete('subnet', subnet_ids) - - # need to make sure the vrrp and mgt ports get deleted - # in time before the delete_network() - for p in astara_router_ports: - try: - self._admin_clients.neutronclient.delete_port(p) - except neutron_exceptions.NeutronClientException as e: - if e.status_code == 404: - astara_router_ports.remove(p) - else: - raise - self._wait_for_neutron_delete('port', astara_router_ports) - - networks = self.clients.neutronclient.list_networks().get('networks') - net_ids = [ - n['id'] for n in networks if n['tenant_id'] == self.tenant_id] - for nid in net_ids: - LOG.debug('Deleting network: %s', nid) - try: - self.clients.neutronclient.delete_network(nid) - except neutron_exceptions.NeutronClientException as e: - if e.status_code == 404: - net_ids.remove(nid) - else: - raise - - self._wait_for_neutron_delete('network', net_ids) - - def cleanUp(self): - self.cleanup_neutron() - - self._admin_ks_client.users.delete(self.user_id) - if self._admin_clients.auth_version == 3: - self._admin_ks_client.projects.delete(self.tenant_id) - else: - self._admin_ks_client.tenants.delete(self.tenant_id) - - -class AstaraFunctionalBase(testtools.TestCase): - _test_tenants = [] - - def setUp(self): - super(AstaraFunctionalBase, self).setUp() - log_format = '%(asctime)s.%(msecs)03d ' + self.id() + ' %(message)s' - cfg.CONF.set_default('logging_default_format_string', log_format) - parse_config() - self.ak_client = astara_client - self.admin_clients = AdminClientManager() - - @classmethod - def setUpClass(cls): - cls._test_tenants = [] - - @classmethod - def tearDownClass(cls): - try: - [t.cleanUp() for t in cls._test_tenants] - except ksc_exceptions.NotFound: - pass - - @classmethod - def get_tenant(cls): - """Creates a new test tenant - - This tenant is assumed to be empty of any cloud resources - and will be destroyed on test class teardown. - """ - tenant = TestTenant() - cls._test_tenants.append(tenant) - return tenant - - def get_router_appliance_server(self, router_uuid, retries=10, - wait_for_active=False, ha_router=False): - """Returns a Nova server object for router""" - return self.admin_clients.get_router_appliance_server( - router_uuid, retries, wait_for_active, ha_router) - - def get_management_address(self, router_uuid, retries=10): - LOG.debug('Getting management address for resource %s', router_uuid) - - service_instance = self.get_router_appliance_server( - router_uuid, - retries=retries, - wait_for_active=True - ) - - mgt_network = self.admin_clients.get_network_info( - CONF.management_network_name - ) - - for interface in service_instance.interface_list(): - if interface.net_id == mgt_network['id']: - addr = interface.fixed_ips[0]['ip_address'] - LOG.debug( - 'Got management address %s for resource %s', - addr, - router_uuid - ) - return addr - else: - raise Exception( - '"mgt" port not found on service instance %s (%s)' % - (service_instance.id, service_instance.name)) - - def assert_router_is_active(self, router_uuid, ha_router=False): - LOG.debug('Waiting for resource %s to become ACTIVE', router_uuid) - for i in six.moves.range(CONF.appliance_active_timeout): - res = self.admin_clients.neutronclient.show_router(router_uuid) - router = res['router'] - if router['status'] == 'ACTIVE': - LOG.debug('Router %s ACTIVE after %s sec.', router_uuid, i) - return - - service_instances = self.get_router_appliance_server( - router_uuid, wait_for_active=True, ha_router=ha_router) - if not ha_router: - service_instances = [service_instances] - - for instance in service_instances: - if instance.status == 'ERROR': - raise Exception( - 'Backing instance %s for router %s in ERROR state', - instance.id, router_uuid) - - LOG.debug( - 'Resource %s not active. Status==%s, will wait, %s/%s sec.', - router_uuid, router['status'], i, - CONF.appliance_active_timeout) - time.sleep(1) - - raise Exception( - 'Timed out waiting for router %s to become ACTIVE, ' - 'current status=%s' % (router_uuid, router['status'])) - - def ping_router_mgt_address(self, router_uuid): - mgt_address = self.get_management_address(router_uuid) - program = {4: 'ping', 6: 'ping6'} - - mgt_ip_version = netaddr.IPNetwork(mgt_address).version - cmd = [program[mgt_ip_version], '-c30', mgt_address] - LOG.debug('Pinging resource %s: %s', router_uuid, ' '.join(cmd)) - try: - subprocess.check_call(cmd) - except: - raise Exception('Failed to ping router with command: %s' % cmd) diff --git a/astara/test/functional/config.py b/astara/test/functional/config.py deleted file mode 100644 index 17c09a3a..00000000 --- a/astara/test/functional/config.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (c) 2016 Akanda, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import itertools -from oslo_config import cfg - - -functional_test_opts = [ - cfg.StrOpt( - 'os_auth_url', required=True, - help='Keystone auth URL'), - cfg.StrOpt( - 'os_username', required=True, - help='Username of admin user'), - cfg.StrOpt( - 'os_password', required=True, - help='Password of admin user'), - cfg.StrOpt( - 'os_tenant_name', required=True, - help='Tenant name of admin user'), - cfg.StrOpt( - 'service_tenant_id', required=True, - help='Tenant ID for the astara service user'), - cfg.StrOpt( - 'service_tenant_name', required=True, - help='Tenant name of the astara service user'), - cfg.StrOpt( - 'appliance_api_port', required=True, - help='The port on which appliance API servers listen'), - cfg.BoolOpt( - 'astara_auto_add_resources', required=False, default=True, - help='Whether astara-neutron is configured to auto-add resources'), - cfg.IntOpt( - 'appliance_active_timeout', required=False, default=340, - help='Timeout (sec) for an appliance to become ACTIVE'), - cfg.StrOpt( - 'test_subnet_cidr', required=False, default='10.1.1.0/24'), - cfg.IntOpt( - 'health_check_period', required=False, default=60, - help='Time health_check_period astara-orchestrator is configured to ' - 'use'), - cfg.StrOpt( - 'management_network_name', required=False, default='mgt', - help='The name of the management network') -] - - -def list_opts(): - return [ - ('functional', - itertools.chain(functional_test_opts))] - - -def register_opts(): - cfg.CONF.register_opts(functional_test_opts) diff --git a/astara/test/functional/test.conf b/astara/test/functional/test.conf deleted file mode 100644 index 73564bc6..00000000 --- a/astara/test/functional/test.conf +++ /dev/null @@ -1,42 +0,0 @@ -[DEFAULT] - - -[functional] - -# -# From astara.test.functional -# - -# Password of admin user (string value) -#os_password = - -# The port on which appliance API servers listen (string value) -#appliance_api_port = - -# Timeout (sec) for an appliance to become ACTIVE (integer value) -#appliance_active_timeout = 340 - -# Time health_check_period astara-orchestrator is configured to use (integer -# value) -#health_check_period = 60 - -# Tenant ID for the astara service user (string value) -#service_tenant_id = - -# Tenant name of admin user (string value) -#os_tenant_name = - -# Keystone auth URL (string value) -#os_auth_url = - -# Whether astara-neutron is configured to auto-add resources (boolean value) -#astara_auto_add_resources = true - -# (string value) -#test_subnet_cidr = 10.1.1.0/24 - -# Username of admin user (string value) -#os_username = - -# Tenant name of the astara service user (string value) -#service_tenant_name = diff --git a/astara/test/functional/test_tenant_router.py b/astara/test/functional/test_tenant_router.py deleted file mode 100644 index 5191e4df..00000000 --- a/astara/test/functional/test_tenant_router.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright (c) 2015 Akanda, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time - -from oslo_config import cfg -from oslo_log import log as logging - -from astara.test.functional import base - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -class AstaraRouterTestBase(base.AstaraFunctionalBase): - HA_ROUTER = False - - @classmethod - def setUpClass(cls): - super(AstaraRouterTestBase, cls).setUpClass() - cls.tenant = cls.get_tenant() - cls.neutronclient = cls.tenant.clients.neutronclient - cls.network, cls.router = cls.tenant.setup_networking( - ha_router=cls.HA_ROUTER) - - def setUp(self): - super(AstaraRouterTestBase, self).setUp() - self.assert_router_is_active(self.router['id'], self.HA_ROUTER) - - # refresh router ref now that its active - router = self.neutronclient.show_router(self.router['id']) - self.router = router['router'] - - @property - def router_ha(self): - router = self.admin_clients.neutronclient.show_router( - self.router['id'])['router'] - return router.get('ha', False) - - -class TestAstaraHARouter(AstaraRouterTestBase): - HA_ROUTER = True - - def test_ha_router_servers(self): - service_instances = self.get_router_appliance_server( - self.router['id'], ha_router=self.HA_ROUTER) - self.assertEqual(2, len(service_instances)) - - # kill the master and ensure it is backfilled with a new instance - master, backup = service_instances - self.admin_clients.novaclient.servers.delete(master.id) - - LOG.debug('Waiting %s seconds for astara health check to tick', - CONF.health_check_period) - time.sleep(CONF.health_check_period) - - service_instances = self.get_router_appliance_server( - self.router['id'], retries=600, ha_router=self.HA_ROUTER) - self.assertEqual(2, len(service_instances)) - self.assertEqual(backup, service_instances[0]) - - -class TestAstaraRouter(AstaraRouterTestBase): - HA_ROUTER = False - - def test_router_recovery(self): - """ - Test that creation of network/subnet/router results in a - correctly plugged appliance, and that manually destroying the - Nova instance results in a new appliance being booted. - """ - - self.skipTest("Race condition makes this test too unstable") - - # for each subnet that was created during setup, ensure we have a - # router interface added - ports = self.neutronclient.list_ports( - device_owner='network:router_interface')['ports'] - subnets = self.neutronclient.list_subnets( - network_id=self.network['id']) - subnets = subnets['subnets'] - self.assertEqual(len(ports), len(subnets)) - for port in ports: - self.assertEqual(port['device_id'], self.router['id']) - self.assertEqual( - sorted([subnet['id'] for subnet in subnets]), - sorted([fip['subnet_id'] for fip in port['fixed_ips']]) - ) - - self.ping_router_mgt_address(self.router['id']) - - # Ensure that if we destroy the nova instance, the RUG will rebuild - # the router with a new instance. - # This could live in a separate test case but it'd require the - # above as setup, so just piggyback on it. - - old_server = self.get_router_appliance_server(self.router['id']) - LOG.debug('Original server: %s', old_server) - - # NOTE(adam_g): In the gate, sometimes the appliance hangs on the - # first config update and health checks get queued up behind the - # hanging config update. If thats the case, we need to wait a while - # before deletion for the first to timeout. - time.sleep(30) - LOG.debug('Deleting original nova server: %s', old_server.id) - self.admin_clients.novaclient.servers.delete(old_server.id) - - LOG.debug('Waiting %s seconds for astara health check to tick', - CONF.health_check_period) - time.sleep(CONF.health_check_period) - - # look for the new server, retry giving rug time to do its thing. - new_server = self.get_router_appliance_server( - self.router['id'], retries=600, wait_for_active=True) - LOG.debug('Rebuilt new server found: %s', new_server) - self.assertNotEqual(old_server.id, new_server.id) - - # routers report as ACTIVE initially (LP: #1491673) - time.sleep(2) - - self.assert_router_is_active(self.router['id']) - self.ping_router_mgt_address(self.router['id']) diff --git a/astara/test/unit/__init__.py b/astara/test/unit/__init__.py deleted file mode 100644 index 63621fff..00000000 --- a/astara/test/unit/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/astara/test/unit/api/__init__.py b/astara/test/unit/api/__init__.py deleted file mode 100644 index 63621fff..00000000 --- a/astara/test/unit/api/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/astara/test/unit/api/config/__init__.py b/astara/test/unit/api/config/__init__.py deleted file mode 100644 index 63621fff..00000000 --- a/astara/test/unit/api/config/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/astara/test/unit/api/config/config_fakes.py b/astara/test/unit/api/config/config_fakes.py deleted file mode 100644 index 26ff0971..00000000 --- a/astara/test/unit/api/config/config_fakes.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright (c) 2015 Akanda, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import netaddr - -from astara.api.neutron import Subnet - - -class FakeModel(object): - def __init__(self, id_, **kwargs): - self.id = id_ - self.__dict__.update(kwargs) - - -fake_ext_port = FakeModel( - '1', - mac_address='aa:bb:cc:dd:ee:ff', - network_id='ext-net', - fixed_ips=[FakeModel('', ip_address='9.9.9.9', subnet_id='s2')], - first_v4='9.9.9.9', - device_id='e-e-e-e') - - -fake_mgt_port = FakeModel( - '2', - name='ASTARA:MGT:foo', - mac_address='aa:bb:cc:cc:bb:aa', - network_id='mgt-net', - device_id='m-m-m-m') - -fake_int_port = FakeModel( - '3', - name='ASTARA:RUG:foo', - mac_address='aa:aa:aa:aa:aa:aa', - network_id='int-net', - fixed_ips=[FakeModel('', ip_address='192.168.1.1', subnet_id='s1')], - device_id='i-i-i-i') - -fake_instance_port = FakeModel( - '4', - name='foo', - mac_address='aa:aa:aa:aa:aa:bb', - network_id='int-net', - fixed_ips=[FakeModel('', ip_address='192.168.1.2', subnet_id='s1')], - first_v4='192.168.1.2', - device_id='v-v-v-v') - -fake_instance_mgt_port = FakeModel( - '4', - name='ASTARA:MGT:foo', - mac_address='aa:aa:aa:aa:aa:bb', - network_id='int-net', - fixed_ips=[FakeModel('', ip_address='192.168.1.2', subnet_id='s1')], - first_v4='192.168.1.2', - device_id='v-v-v-v') - -fake_instance_vrrp_port = FakeModel( - '4', - name='ASTARA:VRRP:foo', - mac_address='aa:aa:aa:aa:aa:bb', - network_id='int-net', - fixed_ips=[FakeModel('', ip_address='192.168.1.2', subnet_id='s1')], - first_v4='192.168.1.2', - device_id='v-v-v-v') - -fake_instance_lb_port = FakeModel( - '4', - name='ASTARA:LB:foo', - mac_address='aa:aa:aa:aa:aa:bb', - network_id='int-net', - fixed_ips=[FakeModel('', ip_address='192.168.1.2', subnet_id='s1')], - first_v4='192.168.1.2', - device_id='v-v-v-v') - -fake_subnet = FakeModel( - 's1', - cidr=netaddr.IPNetwork('192.168.1.0/24'), - gateway_ip='192.168.1.1', - enable_dhcp=True, - dns_nameservers=['8.8.8.8'], - ipv6_ra_mode=None, - host_routes={}) - -fake_subnet_with_slaac = Subnet( - id_='fake_id', - name='s1', - tenant_id='fake_tenant_id', - network_id='fake_network_id', - ip_version=6, - cidr='fdee:9f85:83be::/48', - gateway_ip='fdee:9f85:83be::1', - enable_dhcp=True, - dns_nameservers=['8.8.8.8'], - ipv6_ra_mode='slaac', - host_routes={}) - -fake_network = FakeModel( - 'fake_network_id', - name='thenet', - tenant_id='tenant_id', - status='ACTIVE', - shared=False, - admin_statue_up=True, - mtu=1280, - port_security_enabled=False, - subnets=[fake_subnet] -) - -fake_router = FakeModel( - 'router_id', - tenant_id='tenant_id', - name='router_name', - external_port=fake_ext_port, - management_port=fake_mgt_port, - internal_ports=[fake_int_port], - ha=False) diff --git a/astara/test/unit/api/config/test_common_config.py b/astara/test/unit/api/config/test_common_config.py deleted file mode 100644 index 70209a68..00000000 --- a/astara/test/unit/api/config/test_common_config.py +++ /dev/null @@ -1,202 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import mock -import netaddr -import unittest2 as unittest - -from astara.api.config import common - -from astara.test.unit.api.config import config_fakes as fakes - - -class TestCommonConfig(unittest.TestCase): - def test_network_config(self): - mock_client = mock.Mock() - mock_client.get_network_detail.return_value = fakes.fake_network - subnets_dict = {fakes.fake_subnet.id: fakes.fake_subnet} - - with mock.patch.object(common, '_make_network_config_dict') as nc: - with mock.patch.object(common, '_interface_config') as ic: - mock_interface = mock.Mock() - ic.return_value = mock_interface - - common.network_config( - mock_client, - fakes.fake_int_port, - 'ge1', - 'internal', - []) - - ic.assert_called_once_with( - 'ge1', fakes.fake_int_port, subnets_dict, 1280) - nc.assert_called_once_with( - mock_interface, - 'internal', - 'int-net', - mtu=1280, - subnets_dict=subnets_dict, - network_ports=[]), - - def test_make_network_config(self): - interface = {'ifname': 'ge2'} - - result = common._make_network_config_dict( - interface, - 'internal', - fakes.fake_int_port.network_id, - 1280, - 'dhcp', - 'ra', - subnets_dict={fakes.fake_subnet.id: fakes.fake_subnet}, - network_ports=[fakes.fake_instance_port]) - - expected = { - 'interface': interface, - 'network_id': fakes.fake_int_port.network_id, - 'v4_conf_service': 'dhcp', - 'v6_conf_service': 'ra', - 'network_type': 'internal', - 'mtu': 1280, - 'subnets': [{'cidr': '192.168.1.0/24', - 'dhcp_enabled': True, - 'dns_nameservers': ['8.8.8.8'], - 'gateway_ip': '192.168.1.1', - 'host_routes': {}, - 'id': 's1'}], - 'allocations': [ - { - 'mac_address': 'aa:aa:aa:aa:aa:bb', - 'ip_addresses': {'192.168.1.2': True}, - 'hostname': '192-168-1-2.local', - 'device_id': 'v-v-v-v' - } - ] - } - self.assertEqual(expected, result) - - def test_interface_config(self): - expected = { - 'addresses': ['192.168.1.1/24'], - 'ifname': 'ge1', - 'mtu': 1280 - } - subnets_dict = {fakes.fake_subnet.id: fakes.fake_subnet} - - self.assertEqual( - expected, - common._interface_config( - 'ge1', - fakes.fake_int_port, - subnets_dict, - 1280 - ) - ) - - def test_subnet_config(self): - expected = { - 'cidr': '192.168.1.0/24', - 'dhcp_enabled': True, - 'dns_nameservers': ['8.8.8.8'], - 'gateway_ip': '192.168.1.1', - 'host_routes': {}, - 'id': 's1', - } - self.assertEqual(expected, common._subnet_config(fakes.fake_subnet)) - - def test_subnet_config_with_slaac_enabled(self): - expected = { - 'cidr': 'fdee:9f85:83be::/48', - 'dhcp_enabled': False, - 'dns_nameservers': ['8.8.8.8'], - 'gateway_ip': 'fdee:9f85:83be::1', - 'host_routes': {}, - 'id': 'fake_id', - } - self.assertEqual( - expected, common._subnet_config(fakes.fake_subnet_with_slaac)) - - def test_subnet_config_no_gateway(self): - expected = { - 'cidr': '192.168.1.0/24', - 'dhcp_enabled': True, - 'dns_nameservers': ['8.8.8.8'], - 'gateway_ip': '', - 'host_routes': {}, - 'id': 's1', - } - sn = fakes.FakeModel( - 's1', - cidr=netaddr.IPNetwork('192.168.1.0/24'), - gateway_ip='', - enable_dhcp=True, - dns_nameservers=['8.8.8.8'], - ipv6_ra_mode='', - host_routes={}) - self.assertEqual(expected, common._subnet_config(sn)) - - def test_subnet_config_gateway_none(self): - expected = { - 'cidr': '192.168.1.0/24', - 'dhcp_enabled': True, - 'dns_nameservers': ['8.8.8.8'], - 'gateway_ip': '', - 'host_routes': {}, - 'id': 's1', - } - sn = fakes.FakeModel( - 's1', - cidr=netaddr.IPNetwork('192.168.1.0/24'), - gateway_ip=None, - enable_dhcp=True, - dns_nameservers=['8.8.8.8'], - ipv6_ra_mode='', - host_routes={}) - self.assertEqual(expected, common._subnet_config(sn)) - - def test_allocation_config_vrrp(self): - subnets_dict = {fakes.fake_subnet.id: fakes.fake_subnet} - self.assertEqual( - [], - common._allocation_config( - [fakes.fake_instance_vrrp_port], - subnets_dict) - ) - - def test_allocation_config_lb(self): - subnets_dict = {fakes.fake_subnet.id: fakes.fake_subnet} - self.assertEqual( - [], - common._allocation_config( - [fakes.fake_instance_lb_port], - subnets_dict) - ) - - def test_allocation_config_mgt(self): - subnets_dict = {fakes.fake_subnet.id: fakes.fake_subnet} - expected = [ - {'mac_address': 'aa:aa:aa:aa:aa:bb', - 'ip_addresses': {'192.168.1.2': True}, - 'hostname': '192-168-1-2.local', - 'device_id': 'v-v-v-v'} - ] - self.assertEqual( - expected, - common._allocation_config([ - fakes.fake_instance_mgt_port], - subnets_dict) - ) diff --git a/astara/test/unit/api/config/test_loadbalancer_config.py b/astara/test/unit/api/config/test_loadbalancer_config.py deleted file mode 100644 index 4a677869..00000000 --- a/astara/test/unit/api/config/test_loadbalancer_config.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import mock - -from astara.api.config import loadbalancer as lb_config -from astara.test.unit import base, fakes - - -class TestLoadbalancerConfigAPI(base.RugTestBase): - @mock.patch('astara.api.config.common.network_config') - def test_build_config(self, fake_network_config): - fake_client = mock.Mock() - fake_lb = fakes.fake_loadbalancer() - fake_lb_net = mock.Mock() - fake_mgt_net = mock.Mock() - fake_mgt_port = mock.Mock( - network_id='fake_mgt_network_id', - ) - fake_iface_map = { - fake_lb.vip_port.network_id: fake_lb_net, - fake_mgt_port.network_id: fake_mgt_net, - } - fake_network_config.side_effect = [ - 'fake_lb_net_dict', 'fake_mgt_net_dict' - ] - res = lb_config.build_config( - fake_client, fake_lb, fake_mgt_port, fake_iface_map) - expected = { - 'hostname': 'ak-loadbalancer-%s' % fake_lb.tenant_id, - 'tenant_id': fake_lb.tenant_id, - 'networks': ['fake_lb_net_dict', 'fake_mgt_net_dict'], - 'services': { - 'loadbalancer': fake_lb.to_dict(), - } - } - self.assertEqual(expected, res) diff --git a/astara/test/unit/api/config/test_router_config.py b/astara/test/unit/api/config/test_router_config.py deleted file mode 100644 index 43066a39..00000000 --- a/astara/test/unit/api/config/test_router_config.py +++ /dev/null @@ -1,292 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import mock -import netaddr -from oslo_config import cfg -import unittest2 as unittest -from six.moves import builtins as __builtins__ - -from astara.api.config import router as conf_mod - -from astara.test.unit.api.config import config_fakes as fakes - - -class TestAstaraClient(unittest.TestCase): - def setUp(self): - cfg.CONF.set_override('provider_rules_path', '/the/path') - - def tearDown(self): - cfg.CONF.reset() - - def test_build_config(self): - methods = { - 'load_provider_rules': mock.DEFAULT, - 'generate_network_config': mock.DEFAULT, - 'generate_floating_config': mock.DEFAULT, - 'get_default_v4_gateway': mock.DEFAULT, - } - fake_orchestrator = { - 'host': 'foohost', - 'adddress': '10.0.0.1', - 'metadata_port': 80, - } - - mock_client = mock.Mock() - mock_context = mock.Mock( - neutron=mock_client, - config=fake_orchestrator, - ) - ifaces = [] - provider_rules = {'labels': {'ext': ['192.168.1.1']}} - network_config = [ - {'interface': 1, - 'network_id': 2, - 'v4_conf_service': 'static', - 'v6_conf_service': 'static', - 'network_type': 'external', - 'subnets': [ - {'cidr': '192.168.1.0/24', - 'dhcp_enabled': True, - 'dns_nameservers': [], - 'host_routes': [], - 'gateway_ip': '192.168.1.1', - }, - {'cidr': '10.0.0.0/24', - 'dhcp_enabled': True, - 'dns_nameservers': [], - 'host_routes': [], - 'gateway_ip': '10.0.0.1', - }, ], - 'allocations': []} - ] - - with mock.patch.multiple(conf_mod, **methods) as mocks: - mocks['load_provider_rules'].return_value = provider_rules - mocks['generate_network_config'].return_value = network_config - mocks['generate_floating_config'].return_value = 'floating_config' - mocks['get_default_v4_gateway'].return_value = 'default_gw' - - config = conf_mod.build_config(mock_context, fakes.fake_router, - fakes.fake_mgt_port, ifaces) - - expected = { - 'default_v4_gateway': 'default_gw', - 'networks': network_config, - 'labels': {'ext': ['192.168.1.1']}, - 'floating_ips': 'floating_config', - 'asn': 64512, - 'neighbor_asn': 64512, - 'tenant_id': 'tenant_id', - 'ha_resource': False, - 'hostname': 'ak-tenant_id', - 'orchestrator': { - 'host': 'foohost', - 'adddress': '10.0.0.1', - 'metadata_port': 80, - }, - 'vpn': {} - } - - self.assertEqual(expected, config) - - mocks['load_provider_rules'].assert_called_once_with('/the/path') - mocks['generate_network_config'].assert_called_once_with( - mock_client, fakes.fake_router, fakes.fake_mgt_port, ifaces) - - def test_load_provider_rules(self): - rules_dict = {'labels': {}, 'preanchors': [], 'postanchors': []} - with mock.patch('oslo_serialization.jsonutils.load') as load: - load.return_value = rules_dict - with mock.patch('six.moves.builtins.open') as mock_open: - r = conf_mod.load_provider_rules('/the/path') - - mock_open.assert_called_once_with('/the/path') - load.assert_called_once_with(mock_open.return_value) - self.assertEqual(rules_dict, r) - - @mock.patch.object(__builtins__, 'open', autospec=True) - def test_load_provider_rules_not_found(self, mock_open): - mock_open.side_effect = IOError() - res = conf_mod.load_provider_rules('/tmp/path') - self.assertEqual({}, res) - - @mock.patch('astara.api.config.common.network_config') - def test_generate_network_config(self, mock_net_conf): - mock_client = mock.Mock() - - iface_map = { - fakes.fake_mgt_port.network_id: 'ge0', - fakes.fake_ext_port.network_id: 'ge1', - fakes.fake_int_port.network_id: 'ge2' - } - - mock_net_conf.return_value = 'configured_network' - - result = conf_mod.generate_network_config( - mock_client, fakes.fake_router, fakes.fake_mgt_port, iface_map) - - expected = [ - 'configured_network', - 'configured_network', - 'configured_network' - ] - - self.assertEqual(expected, result) - - expected_calls = [ - mock.call( - mock_client, fakes.fake_router.management_port, - 'ge0', 'management'), - mock.call( - mock_client, fakes.fake_router.external_port, - 'ge1', 'external'), - mock.call( - mock_client, fakes.fake_int_port, - 'ge2', 'internal', mock.ANY)] - for c in expected_calls: - self.assertIn(c, mock_net_conf.call_args_list) - mock_net_conf.assert_has_calls(expected_calls) - - def test_generate_floating_config(self): - fip = fakes.FakeModel( - 'id', - floating_ip=netaddr.IPAddress('9.9.9.9'), - fixed_ip=netaddr.IPAddress('192.168.1.1') - ) - - rtr = fakes.FakeModel('rtr_id', floating_ips=[fip]) - - result = conf_mod.generate_floating_config(rtr) - expected = [{'floating_ip': '9.9.9.9', 'fixed_ip': '192.168.1.1'}] - - self.assertEqual(expected, result) - - -class TestAstaraClientGateway(unittest.TestCase): - - def setUp(self): - cfg.CONF.set_override('provider_rules_path', '/the/path') - # Sample data taken from a real devstack-created system, with - # the external MAC address modified to match the fake port in - # use for the mocked router. - self.networks = [ - {'subnets': [ - {'host_routes': [], - 'cidr': '172.16.77.0/24', - 'gateway_ip': '172.16.77.1', - 'dns_nameservers': [], - 'dhcp_enabled': True, - 'network_type': 'external'}, - {'host_routes': [], - 'cidr': 'fdee:9f85:83be::/48', - 'gateway_ip': 'fdee:9f85:83be::1', - 'dns_nameservers': [], - 'dhcp_enabled': True}], - 'v6_conf_service': 'static', - 'network_id': u'1e109e80-4a6a-483e-9dd4-2ff31adf25f5', - 'allocations': [], - 'interface': {'ifname': u'ge1', - 'addresses': [ - '172.16.77.2/24', - 'fdee:9f85:83be:0:f816:3eff:fee5:1742/48', - ]}, - 'v4_conf_service': 'static', - 'network_type': 'external'}, - {'subnets': [], - 'v6_conf_service': 'static', - 'network_id': u'698ef1d1-1089-48ab-80b0-f994a962891c', - 'allocations': [], - 'interface': { - u'addresses': [ - u'fe80::f816:3eff:fe4d:bf12/64', - u'fdca:3ba5:a17a:acda:f816:3eff:fe4d:bf12/64', - ], - u'media': u'Ethernet autoselect', - u'lladdr': u'fa:16:3e:4d:bf:12', - u'state': u'up', - u'groups': [], - u'ifname': u'ge0', - u'mtu': 1500, - u'description': u''}, - 'v4_conf_service': 'static', - 'network_type': 'management'}, - {'subnets': [ - {'host_routes': [], - 'cidr': 'fdd6:a1fa:cfa8:6c94::/64', - 'gateway_ip': 'fdd6:a1fa:cfa8:6c94::1', - 'dns_nameservers': [], - 'dhcp_enabled': False}, - {'host_routes': [], - 'cidr': '192.168.0.0/24', - 'gateway_ip': '192.168.0.1', - 'dns_nameservers': [], - 'dhcp_enabled': True}], - 'v6_conf_service': 'static', - 'network_id': u'a1ea2256-5e57-4e9e-8b7a-8bf17eb76b73', - 'allocations': [ - {'mac_address': u'fa:16:3e:1b:93:76', - 'ip_addresses': { - 'fdd6:a1fa:cfa8:6c94::1': False, - '192.168.0.1': True}, - 'hostname': '192-168-0-1.local', - 'device_id': u'c72a34fb-fb56-4ee7-b9b2-6467eb1c45d6'}], - 'interface': {'ifname': u'ge2', - 'addresses': ['192.168.0.1/24', - 'fdd6:a1fa:cfa8:6c94::1/64']}, - 'v4_conf_service': 'static', - 'network_type': 'internal'}] - - def tearDown(self): - cfg.CONF.reset() - - def test_with_interfaces(self): - mock_client = mock.Mock() - result = conf_mod.get_default_v4_gateway( - mock_client, - fakes.fake_router, - self.networks, - ) - self.assertEqual('172.16.77.1', result) - - def test_without_ipv4_on_external_port(self): - # Only set a V6 address - self.networks[0]['interface']['addresses'] = [ - 'fdee:9f85:83be:0:f816:3eff:fee5:1742/48', - ] - mock_client = mock.Mock() - result = conf_mod.get_default_v4_gateway( - mock_client, - fakes.fake_router, - self.networks, - ) - self.assertEqual('', result) - - def test_extra_ipv4_on_external_port(self): - self.networks[0]['interface']['addresses'] = [ - u'fe80::f816:3eff:fe4d:bf12/64', - u'fdca:3ba5:a17a:acda:f816:3eff:fe4d:bf12/64', - u'192.168.1.1', - u'172.16.77.2', - ] - mock_client = mock.Mock() - result = conf_mod.get_default_v4_gateway( - mock_client, - fakes.fake_router, - self.networks, - ) - self.assertEqual('172.16.77.1', result) diff --git a/astara/test/unit/api/test_astara_router.py b/astara/test/unit/api/test_astara_router.py deleted file mode 100644 index 57be751f..00000000 --- a/astara/test/unit/api/test_astara_router.py +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import mock -import unittest2 as unittest - -from astara.api import astara_client - - -class TestAstaraClient(unittest.TestCase): - def setUp(self): - self.mock_create_session = mock.patch.object( - astara_client, - '_get_proxyless_session' - ).start() - self.mock_get = self.mock_create_session.return_value.get - self.mock_put = self.mock_create_session.return_value.put - self.mock_post = self.mock_create_session.return_value.post - - self.addCleanup(mock.patch.stopall) - - def test_mgt_url(self): - self.assertEqual('http://[fe80::2]:5000/', - astara_client._mgt_url('fe80::2', 5000, '/')) - self.assertEqual('http://192.168.1.1:5000/', - astara_client._mgt_url('192.168.1.1', 5000, '/')) - - def test_is_alive_success(self): - self.mock_get.return_value.status_code = 200 - - self.assertTrue(astara_client.is_alive('fe80::2', 5000)) - self.mock_get.assert_called_once_with( - 'http://[fe80::2]:5000/v1/firewall/rules', - timeout=3.0 - ) - - def test_is_alive_bad_status(self): - self.mock_get.return_value.status_code = 500 - - self.assertFalse(astara_client.is_alive('fe80::2', 5000)) - self.mock_get.assert_called_once_with( - 'http://[fe80::2]:5000/v1/firewall/rules', - timeout=3.0 - ) - - def test_is_alive_exception(self): - self.mock_get.side_effect = Exception - - self.assertFalse(astara_client.is_alive('fe80::2', 5000)) - self.mock_get.assert_called_once_with( - 'http://[fe80::2]:5000/v1/firewall/rules', - timeout=3.0 - ) - - def test_get_interfaces(self): - self.mock_get.return_value.status_code = 200 - self.mock_get.return_value.json.return_value = { - 'interfaces': 'the_interfaces' - } - - self.assertEqual('the_interfaces', - astara_client.get_interfaces('fe80::2', 5000)) - self.mock_get.assert_called_once_with( - 'http://[fe80::2]:5000/v1/system/interfaces', - timeout=30 - ) - - def test_update_config(self): - config = {'foo': 'bar'} - self.mock_put.return_value.status_code = 200 - self.mock_put.return_value.json.return_value = config - - resp = astara_client.update_config('fe80::2', 5000, config) - - self.mock_put.assert_called_once_with( - 'http://[fe80::2]:5000/v1/system/config', - data=b'{"foo": "bar"}', - headers={'Content-type': 'application/json'}, - timeout=90) - self.assertEqual(config, resp) - - def test_update_config_with_custom_config(self): - config = {'foo': 'bar'} - self.mock_put.return_value.status_code = 200 - self.mock_put.return_value.json.return_value = config - - with mock.patch.object(astara_client.cfg, 'CONF') as cfg: - cfg.config_timeout = 5 - resp = astara_client.update_config('fe80::2', 5000, config) - - self.mock_put.assert_called_once_with( - 'http://[fe80::2]:5000/v1/system/config', - data=b'{"foo": "bar"}', - headers={'Content-type': 'application/json'}, - timeout=5) - self.assertEqual(config, resp) - - def test_update_config_failure(self): - config = {'foo': 'bar'} - - self.mock_put.return_value.status_code = 500 - self.mock_put.return_value.text = 'error_text' - - with self.assertRaises(Exception): - astara_client.update_config('fe80::2', 5000, config) - - self.mock_put.assert_called_once_with( - 'http://[fe80::2]:5000/v1/system/config', - data=b'{"foo": "bar"}', - headers={'Content-type': 'application/json'}, - timeout=90 - ) - - def test_read_labels(self): - self.mock_post.return_value.status_code = 200 - self.mock_post.return_value.json.return_value = { - 'labels': ['label1', 'label2'] - } - resp = astara_client.read_labels('fe80::2', 5000) - - self.mock_post.assert_called_once_with( - 'http://[fe80::2]:5000/v1/firewall/labels', - timeout=30 - ) - - self.assertEqual(['label1', 'label2'], resp) diff --git a/astara/test/unit/api/test_keystone.py b/astara/test/unit/api/test_keystone.py deleted file mode 100644 index 1699cb64..00000000 --- a/astara/test/unit/api/test_keystone.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) 2015 Akanda, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from astara.api import keystone - -from astara.test.unit import base - -from oslo_config import cfg - - -class KeystoneTest(base.RugTestBase): - def setUp(self): - super(KeystoneTest, self).setUp() - self.config(auth_region='foo_regin') - - @mock.patch('keystoneclient.session.Session') - @mock.patch('keystoneclient.auth.load_from_conf_options') - def test_session(self, mock_load_auth, mock_session): - fake_auth = mock.Mock() - mock_load_auth.return_value = fake_auth - fake_session = mock.Mock() - mock_session.return_value = fake_session - ks_session = keystone.KeystoneSession().session - mock_load_auth.assert_called_with(cfg.CONF, 'keystone_authtoken') - mock_session.assert_called_with(auth=fake_auth) - self.assertEqual(fake_session, ks_session) diff --git a/astara/test/unit/api/test_neutron_wrapper.py b/astara/test/unit/api/test_neutron_wrapper.py deleted file mode 100644 index f4fb4c77..00000000 --- a/astara/test/unit/api/test_neutron_wrapper.py +++ /dev/null @@ -1,518 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import six -import mock -import netaddr - -from oslo_config import cfg - -from astara.test.unit import base, fakes -from astara.api import neutron - - -class TestuNeutronModels(base.RugTestBase): - def test_router(self): - r = neutron.Router( - '1', 'tenant_id', 'name', True, 'ACTIVE', 'ext', ['int'], ['fip']) - self.assertEqual('1', r.id) - self.assertEqual('tenant_id', r.tenant_id) - self.assertEqual('name', r.name) - self.assertTrue(r.admin_state_up) - self.assertEqual('ACTIVE', r.status) - self.assertEqual('ext', r.external_port) - self.assertEqual(['fip'], r.floating_ips) - self.assertEqual(['int'], r.internal_ports) - self.assertEqual(set(['ext', 'int']), set(r.ports)) - - def test_router_from_dict(self): - p = { - 'name': 'ext', - 'id': 'ext', - 'device_id': 'device_id', - 'fixed_ips': [], - 'mac_address': 'aa:bb:cc:dd:ee:ff', - 'network_id': 'net_id', - 'device_owner': 'network:router_gateway' - } - - int_p = { - 'name': 'int', - 'id': 'int', - 'device_id': 'device_id', - 'fixed_ips': [], - 'mac_address': 'aa:bb:cc:dd:ee:ee', - 'network_id': 'net_id', - 'device_owner': 'network:router_interface' - } - - int_ha_p = { - 'name': 'ha_int', - 'id': 'ha_int', - 'device_id': 'device_id', - 'fixed_ips': [], - 'mac_address': 'aa:bb:cc:dd:ee:ee', - 'network_id': 'net_id', - 'device_owner': 'network:ha_router_replicated_interface' - } - - fip = { - 'id': 'fip', - 'floating_ip_address': '9.9.9.9', - 'fixed_ip_address': '192.168.1.1' - } - - d = { - 'id': '1', - 'tenant_id': 'tenant_id', - 'name': 'name', - 'admin_state_up': True, - 'status': 'ACTIVE', - 'ports': [p], - '_floatingips': [fip], - '_interfaces': [int_p, int_ha_p], - } - - r = neutron.Router.from_dict(d) - - self.assertEqual('1', r.id) - self.assertEqual('tenant_id', r.tenant_id) - self.assertEqual('name', r.name) - self.assertTrue(r.admin_state_up) - self.assertTrue(r.floating_ips) # just make sure this exists - self.assertEqual( - ['ha_int', 'int'], - sorted([ip.id for ip in r.internal_ports])) - - def test_router_eq(self): - r1 = neutron.Router( - '1', 'tenant_id', 'name', True, 'ext', ['int'], 'mgt') - r2 = neutron.Router( - '1', 'tenant_id', 'name', True, 'ext', ['int'], 'mgt') - - self.assertEqual(r1, r2) - - def test_router_ne(self): - r1 = neutron.Router( - '1', 'tenant_id', 'name', True, 'ext', ['int'], 'mgt') - r2 = neutron.Router( - '2', 'tenant_id', 'name', True, 'ext', ['int'], 'mgt') - - self.assertNotEqual(r1, r2) - - def test_subnet_model(self): - d = { - 'id': '1', - 'tenant_id': 'tenant_id', - 'name': 'name', - 'network_id': 'network_id', - 'ip_version': 6, - 'cidr': 'fe80::/64', - 'gateway_ip': 'fe80::1', - 'enable_dhcp': True, - 'dns_nameservers': ['8.8.8.8', '8.8.4.4'], - 'ipv6_ra_mode': 'slaac', - 'host_routes': [] - } - - s = neutron.Subnet.from_dict(d) - - self.assertEqual('1', s.id) - self.assertEqual('tenant_id', s.tenant_id) - self.assertEqual('name', s.name) - self.assertEqual('network_id', s.network_id) - self.assertEqual(6, s.ip_version) - self.assertEqual(netaddr.IPNetwork('fe80::/64'), s.cidr) - self.assertEqual(netaddr.IPAddress('fe80::1'), s.gateway_ip) - self.assertTrue(s.enable_dhcp, True) - self.assertEqual(['8.8.8.8', '8.8.4.4'], s.dns_nameservers) - self.assertEqual([], s.host_routes) - - def test_subnet_gateway_none(self): - d = { - 'id': '1', - 'tenant_id': 'tenant_id', - 'name': 'name', - 'network_id': 'network_id', - 'ip_version': 6, - 'cidr': 'fe80::/64', - 'gateway_ip': None, - 'enable_dhcp': True, - 'dns_nameservers': ['8.8.8.8', '8.8.4.4'], - 'ipv6_ra_mode': 'slaac', - 'host_routes': [] - } - s = neutron.Subnet.from_dict(d) - self.assertEqual(netaddr.IPNetwork('fe80::/64'), s.cidr) - self.assertIsNone(s.gateway_ip) - - def test_subnet_gateway_not_ip(self): - d = { - 'id': '1', - 'tenant_id': 'tenant_id', - 'name': 'name', - 'network_id': 'network_id', - 'ip_version': 6, - 'cidr': 'fe80::/64', - 'gateway_ip': 'something-that-is-not-an-ip', - 'enable_dhcp': True, - 'dns_nameservers': ['8.8.8.8', '8.8.4.4'], - 'ipv6_ra_mode': 'slaac', - 'host_routes': [] - } - s = neutron.Subnet.from_dict(d) - self.assertEqual(netaddr.IPNetwork('fe80::/64'), s.cidr) - self.assertIsNone(s.gateway_ip) - - def test_subnet_cidr_none(self): - d = { - 'id': '1', - 'tenant_id': 'tenant_id', - 'name': 'name', - 'network_id': 'network_id', - 'ip_version': 6, - 'cidr': None, - 'gateway_ip': 'fe80::1', - 'enable_dhcp': True, - 'dns_nameservers': ['8.8.8.8', '8.8.4.4'], - 'ipv6_ra_mode': 'slaac', - 'host_routes': [] - } - try: - neutron.Subnet.from_dict(d) - except ValueError as e: - self.assertIn('Invalid CIDR', six.text_type(e)) - - def test_subnet_cidr_not_valid(self): - d = { - 'id': '1', - 'tenant_id': 'tenant_id', - 'name': 'name', - 'network_id': 'network_id', - 'ip_version': 6, - 'cidr': 'something-that-is-not-an-ip', - 'gateway_ip': 'fe80::1', - 'enable_dhcp': True, - 'dns_nameservers': ['8.8.8.8', '8.8.4.4'], - 'ipv6_ra_mode': 'slaac', - 'host_routes': [] - } - try: - neutron.Subnet.from_dict(d) - except ValueError as e: - self.assertIn('Invalid CIDR', six.text_type(e)) - - def test_port_model(self): - d = { - 'id': '1', - 'name': 'name', - 'device_id': 'device_id', - 'fixed_ips': [{'ip_address': '192.168.1.1', 'subnet_id': 'sub1'}], - 'mac_address': 'aa:bb:cc:dd:ee:ff', - 'network_id': 'net_id', - 'device_owner': 'test' - } - - p = neutron.Port.from_dict(d) - - self.assertEqual('1', p.id) - self.assertEqual('device_id', p.device_id) - self.assertEqual('aa:bb:cc:dd:ee:ff', p.mac_address) - self.assertEqual('test', p.device_owner) - self.assertEqual(1, len(p.fixed_ips)) - - def test_fixed_ip_model(self): - d = { - 'subnet_id': 'sub1', - 'ip_address': '192.168.1.1' - } - - fip = neutron.FixedIp.from_dict(d) - - self.assertEqual('sub1', fip.subnet_id) - self.assertEqual(netaddr.IPAddress('192.168.1.1'), fip.ip_address) - - def test_floating_ip_model(self): - d = { - 'id': 'a-b-c-d', - 'floating_ip_address': '9.9.9.9', - 'fixed_ip_address': '192.168.1.1' - } - - fip = neutron.FloatingIP.from_dict(d) - - self.assertEqual('a-b-c-d', fip.id) - self.assertEqual(netaddr.IPAddress('9.9.9.9'), fip.floating_ip) - self.assertEqual(netaddr.IPAddress('192.168.1.1'), fip.fixed_ip) - - -class FakeConf: - admin_user = 'admin' - admin_password = 'password' - admin_tenant_name = 'admin' - auth_url = 'http://127.0.0.1/' - auth_strategy = 'keystone' - auth_region = 'RegionOne' - - -class TestNeutronWrapper(base.RugTestBase): - @mock.patch('astara.api.neutron.cfg') - @mock.patch('astara.api.neutron.AstaraExtClientWrapper') - @mock.patch('astara.api.neutron.importutils') - def test_purge_management_interface(self, import_utils, ak_wrapper, cfg): - conf = mock.Mock() - driver = mock.Mock() - import_utils.import_object.return_value = driver - - neutron_wrapper = neutron.Neutron(conf) - neutron_wrapper.purge_management_interface() - self.assertEqual(1, driver.get_device_name.call_count) - self.assertEqual(1, driver.unplug.call_count) - - def test_clear_device_id(self): - neutron_wrapper = neutron.Neutron(mock.Mock()) - neutron_wrapper.api_client.update_port = mock.Mock() - neutron_wrapper.clear_device_id(mock.Mock(id='PORT1')) - neutron_wrapper.api_client.update_port.assert_called_once_with( - 'PORT1', {'port': {'device_id': ''}} - ) - - @mock.patch('astara.api.neutron.AstaraExtClientWrapper') - def test_neutron_router_status_update_error(self, client_wrapper): - urs = client_wrapper.return_value.update_status - urs.side_effect = RuntimeError('should be caught') - conf = mock.Mock() - neutron_wrapper = neutron.Neutron(conf) - neutron_wrapper.update_router_status('router-id', 'new-status') - - @mock.patch('astara.api.neutron.AstaraExtClientWrapper') - def _test_create_vrrp_port_success_hlpr(self, ext_enabled, client_wrapper): - conf = mock.Mock() - conf.neutron_port_security_extension_enabled = ext_enabled - - expected_port_data = { - 'port': { - 'name': 'ASTARA:VRRP:obj_id', - 'admin_state_up': True, - 'network_id': 'the_net_id', - 'fixed_ips': [], - 'security_groups': [] - } - } - - if ext_enabled: - expected_port_data['port']['port_security_enabled'] = False - - neutron_wrapper = neutron.Neutron(conf) - api_client = neutron_wrapper.api_client - with mock.patch.object(api_client, 'create_port') as create_port: - with mock.patch.object(neutron.Port, 'from_dict') as port_from_d: - retval = neutron_wrapper.create_vrrp_port( - 'obj_id', - 'the_net_id' - ) - - self.assertIs(retval, port_from_d.return_value) - port_from_d.assert_called_once_with( - create_port.return_value.get() - ) - create_port.assert_called_once_with( - expected_port_data - ) - - def test_create_vrrp_port_success(self): - self._test_create_vrrp_port_success_hlpr(True) - - def test_create_vrrp_port_success_port_security_disabled(self): - self._test_create_vrrp_port_success_hlpr(False) - - @mock.patch('astara.api.neutron.AstaraExtClientWrapper') - def test_create_vrrp_port_error(self, client_wrapper): - neutron_wrapper = neutron.Neutron(mock.Mock()) - api_client = neutron_wrapper.api_client - with mock.patch.object(api_client, 'create_port') as create_port: - create_port.return_value.get.return_value = None - self.assertRaises( - ValueError, - neutron_wrapper.create_vrrp_port, - 'obj_id', - 'the_net_id' - ) - - @mock.patch('astara.api.neutron.AstaraExtClientWrapper') - def test_delete_vrrp_ports(self, client_wrapper): - conf = mock.Mock() - neutron_wrapper = neutron.Neutron(conf) - neutron_wrapper.api_client.list_ports = mock.Mock( - return_value={ - 'ports': [{'id': 'fake_port_id'}] - } - ) - neutron_wrapper.api_client.delete_port = mock.Mock() - neutron_wrapper.delete_vrrp_port(object_id='foo') - neutron_wrapper.api_client.list_ports.assert_called_with( - name='ASTARA:VRRP:foo' - ) - neutron_wrapper.api_client.delete_port.assert_called_with( - 'fake_port_id') - - @mock.patch('astara.api.neutron.AstaraExtClientWrapper') - def test_delete_vrrp_ports_not_found(self, client_wrapper): - conf = mock.Mock() - neutron_wrapper = neutron.Neutron(conf) - neutron_wrapper.api_client.list_ports = mock.Mock( - return_value={'ports': []} - ) - neutron_wrapper.api_client.delete_port = mock.Mock() - neutron_wrapper.delete_vrrp_port(object_id='foo') - neutron_wrapper.api_client.list_ports.assert_has_calls( - [ - mock.call(name='ASTARA:VRRP:foo'), - mock.call(name='AKANDA:VRRP:foo'), - ] - ) - self.assertFalse(neutron_wrapper.api_client.delete_port.called) - - -class TestLocalServicePorts(base.RugTestBase): - def setUp(self): - super(TestLocalServicePorts, self).setUp() - self.config(management_network_id='fake_mgtnet_network_id') - self.config(management_subnet_id='fake_mgtnet_subnet_id') - self.config(management_prefix='172.16.77.0/24') - self.config(management_prefix='fdca:3ba5:a17a:acda::/64') - self.neutron_wrapper = neutron.Neutron(cfg.CONF) - self.fake_interface_driver = mock.Mock( - plug=mock.Mock(), - init_l3=mock.Mock(), - get_device_name=mock.Mock()) - - def test_ensure_local_service_port(self): - with mock.patch.object(self.neutron_wrapper, - '_ensure_local_port') as ep: - self.neutron_wrapper.ensure_local_service_port() - ep.assert_called_with( - 'fake_mgtnet_network_id', - 'fake_mgtnet_subnet_id', - 'fdca:3ba5:a17a:acda::/64', - 'service', - ) - - @mock.patch('astara.api.neutron.ip_lib') - @mock.patch('astara.api.neutron.uuid') - @mock.patch('astara.api.neutron.importutils') - def test__ensure_local_port_neutron_port_exists(self, fake_import, - fake_uuid, fake_ip_lib): - fake_ip_lib.device_exists.return_value = True - fake_uuid.uuid5.return_value = 'fake_host_id' - fake_import.import_object.return_value = self.fake_interface_driver - - fake_port = fakes.fake_port() - fake_port_dict = { - 'ports': [fake_port._neutron_port_dict], - } - fake_client = mock.Mock( - list_ports=mock.Mock(return_value=fake_port_dict) - ) - self.neutron_wrapper.api_client = fake_client - self.fake_interface_driver.get_device_name.return_value = 'fake_dev' - - self.neutron_wrapper._ensure_local_port( - 'fake_network_id', - 'fake_subnet_id', - 'fdca:3ba5:a17a:acda:f816:3eff:fe2b::1/64', - 'service') - - exp_query = { - 'network_id': 'fake_network_id', - 'device_owner': 'network:astara', - 'name': 'ASTARA:RUG:SERVICE', - 'device_id': 'fake_host_id' - } - fake_client.list_ports.assert_called_with(**exp_query) - self.fake_interface_driver.init_l3.assert_called_with( - 'fake_dev', ['fdca:3ba5:a17a:acda:f816:3eff:fe2b:ced0/64'] - ) - - @mock.patch('astara.api.neutron.socket') - @mock.patch('astara.api.neutron.ip_lib') - @mock.patch('astara.api.neutron.uuid') - @mock.patch('astara.api.neutron.importutils') - def test__ensure_local_port_no_neutron_port(self, fake_import, fake_uuid, - fake_ip_lib, fake_socket): - fake_socket.gethostname.return_value = 'foo_hostname' - fake_ip_lib.device_exists.return_value = True - fake_uuid.uuid5.return_value = 'fake_host_id' - fake_import.import_object.return_value = self.fake_interface_driver - - fake_created_port = {'port': fakes.fake_port().to_dict()} - fake_client = mock.Mock( - list_ports=mock.Mock(return_value={'ports': []}), - create_port=mock.Mock(return_value=fake_created_port)) - self.neutron_wrapper.api_client = fake_client - self.fake_interface_driver.get_device_name.return_value = 'fake_dev' - - self.neutron_wrapper._ensure_local_port( - 'fake_network_id', - 'fake_subnet_id', - 'fdca:3ba5:a17a:acda:f816:3eff:fe2b::1/64', - 'service') - - exp_port_create_dict = {'port': { - 'admin_state_up': True, - 'binding:host_id': 'foo_hostname', - 'device_id': 'fake_host_id', - 'device_owner': 'network:router_interface', - 'fixed_ips': [{'subnet_id': 'fake_subnet_id'}], - 'name': 'ASTARA:RUG:SERVICE', - 'network_id': 'fake_network_id' - }} - fake_client.create_port.assert_called_with(exp_port_create_dict) - self.fake_interface_driver.init_l3.assert_called_with( - 'fake_dev', ['fdca:3ba5:a17a:acda:f816:3eff:fe2b:ced0/64'] - ) - - @mock.patch('time.sleep') - @mock.patch('astara.api.neutron.ip_lib') - @mock.patch('astara.api.neutron.uuid') - @mock.patch('astara.api.neutron.importutils') - def test__ensure_local_port_plug(self, fake_import, - fake_uuid, fake_ip_lib, fake_sleep): - fake_ip_lib.device_exists.return_value = False - fake_uuid.uuid5.return_value = 'fake_host_id' - fake_import.import_object.return_value = self.fake_interface_driver - - fake_port = fakes.fake_port() - fake_port_dict = { - 'ports': [fake_port._neutron_port_dict], - } - fake_client = mock.Mock( - list_ports=mock.Mock(return_value=fake_port_dict) - ) - self.neutron_wrapper.api_client = fake_client - self.fake_interface_driver.get_device_name.return_value = 'fake_dev' - - self.neutron_wrapper._ensure_local_port( - 'fake_network_id', - 'fake_subnet_id', - 'fdca:3ba5:a17a:acda:f816:3eff:fe2b::1/64', - 'service') - - self.fake_interface_driver.plug.assert_called_with( - 'fake_network_id', - fake_port.id, - 'fake_dev', - fake_port.mac_address) diff --git a/astara/test/unit/api/test_nova_wrapper.py b/astara/test/unit/api/test_nova_wrapper.py deleted file mode 100644 index 64010def..00000000 --- a/astara/test/unit/api/test_nova_wrapper.py +++ /dev/null @@ -1,481 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from datetime import datetime, timedelta - -import mock -import copy -from novaclient import exceptions as novaclient_exceptions -from six.moves import builtins as __builtins__ - -from astara.api import nova -from astara.test.unit import base - - -class FakeNovaServer(object): - id = '6f05906e-4538-11e5-bb22-5254003ff1ae' - name = 'ak-796aafbc-4538-11e5-88e0-5254003ff1ae' - image = {'id': '83031410-4538-11e5-abd2-5254003ff1ae'} - status = 'ACTIVE' - created = '2012-08-20T21:11:09Z' - - -class FakeModel(object): - def __init__(self, id_, **kwargs): - self.id = id_ - self.__dict__.update(kwargs) - - -fake_ext_port = FakeModel( - '1', - mac_address='aa:bb:cc:dd:ee:ff', - network_id='ext-net', - fixed_ips=[FakeModel('', ip_address='9.9.9.9', subnet_id='s2')]) - -fake_mgt_port = FakeModel( - '2', - mac_address='aa:bb:cc:cc:bb:aa', - network_id='mgt-net') - -fake_int_port = FakeModel( - '3', - mac_address='aa:aa:aa:aa:aa:aa', - network_id='int-net', - fixed_ips=[FakeModel('', ip_address='192.168.1.1', subnet_id='s1')]) - -fake_router = FakeModel( - 'router_id', - tenant_id='tenant_id', - external_port=fake_ext_port, - management_port=fake_mgt_port, - internal_ports=[fake_int_port], - ports=[fake_mgt_port, fake_ext_port, fake_int_port]) - -fake_nova_instance = FakeModel( - 'instance_id', - name='ak-appliance', - status=None, - image={'id': 'fake_image_uuid'}, - created='2012-08-20T21:11:09Z' -) - - -class FakeConf: - admin_user = 'admin' - admin_password = 'password' - admin_tenant_name = 'admin' - auth_url = 'http://127.0.0.1/' - auth_strategy = 'keystone' - auth_region = 'RegionOne' - router_image_uuid = 'astara-image' - router_instance_flavor = 1 - instance_provider = 'foo' - endpoint_type = 'publicURL' - - -EXPECTED_USERDATA = """ -#cloud-config - -cloud_config_modules: - - emit_upstart - - set_hostname - - locale - - set-passwords - - timezone - - disable-ec2-metadata - - runcmd - -output: {all: '| tee -a /var/log/cloud-init-output.log'} - -debug: - - verbose: true - -bootcmd: - - /usr/local/bin/astara-configure-management aa:aa:aa:aa:aa:aa 192.168.1.1/64 - -users: - - name: astara - gecos: Astara - groups: users - shell: /bin/bash - sudo: ALL=(ALL) NOPASSWD:ALL - lock-passwd: true - ssh-authorized-keys: - - fake_key - -final_message: "Astara appliance is running" -""" - - -def fake_make_ports_callback(): - return (fake_mgt_port, [fake_ext_port, fake_int_port]) - - -class TestNovaWrapper(base.RugTestBase): - def setUp(self): - super(TestNovaWrapper, self).setUp() - self.addCleanup(mock.patch.stopall) - patch = mock.patch('novaclient.client.Client') - self.client = mock.Mock() - self.client_cls = patch.start() - self.client_cls.return_value = self.client - - self.fake_instance_provider = mock.Mock(create_instance=mock.Mock()) - fake_instance_provider_cls = mock.Mock(name='fake_provider_class') - fake_instance_provider_cls.return_value = \ - self.fake_instance_provider - get_instance_provider_p = mock.patch.object( - nova, 'get_instance_provider').start() - get_instance_provider_p.return_value = fake_instance_provider_cls - - self.nova = nova.Nova(FakeConf) - - self.INSTANCE_INFO = nova.InstanceInfo( - instance_id='fake_instance_id', - name='fake_name', - image_uuid='fake_image_id', - status='ACTIVE', - last_boot=(datetime.utcnow() - timedelta(minutes=15)), - ports=[fake_int_port, fake_ext_port, fake_mgt_port], - management_port=fake_mgt_port, - ) - - def test_get_instance_for_obj(self): - instance = mock.Mock() - self.client.servers.list.return_value = [instance] - - expected = [ - mock.call.servers.list(search_opts={'name': 'foo_instance_name'}) - ] - - result = self.nova.get_instance_for_obj('foo_instance_name') - self.client.assert_has_calls(expected) - self.assertEqual(instance, result) - - def test_get_instance_for_obj_not_found(self): - self.client.servers.list.return_value = [] - - expected = [ - mock.call.servers.list(search_opts={'name': 'foo_instance_name'}) - ] - - result = self.nova.get_instance_for_obj('foo_instance_name') - self.client.assert_has_calls(expected) - self.assertIsNone(result) - - def test_get_instance_by_id(self): - self.client.servers.get.return_value = 'fake_instance' - expected = [ - mock.call.servers.get('instance_id') - ] - result = self.nova.get_instance_by_id('instance_id') - self.client.servers.get.assert_has_calls(expected) - self.assertEqual('fake_instance', result) - - def test_get_instance_by_id_not_found(self): - not_found = novaclient_exceptions.NotFound('instance_id') - self.client.servers.get.side_effect = not_found - result = self.nova.get_instance_by_id('instance_id') - self.assertIsNone(result) - - def test_destroy_instance(self): - self.nova.destroy_instance(self.INSTANCE_INFO) - self.client.servers.delete.assert_called_with(self.INSTANCE_INFO.id_) - - @mock.patch.object(nova, '_ssh_key') - def test_format_userdata(self, fake_ssh_key): - fake_ssh_key.return_value = 'fake_key' - result = nova.format_userdata(fake_int_port) - self.assertEqual(EXPECTED_USERDATA.strip(), result.strip()) - - @mock.patch.object(__builtins__, 'open', autospec=True) - def test_ssh_key(self, fake_open): - mock_key_file = mock.MagicMock(spec=file) - mock_key_file.read.return_value = 'fake-key' - mock_key_file.__enter__.return_value = mock_key_file - fake_open.return_value = mock_key_file - result = nova._ssh_key() - self.assertEqual('fake-key', result) - - @mock.patch.object(__builtins__, 'open', autospec=True) - def test_ssh_key_sanitize(self, fake_open): - mock_key_file = mock.MagicMock(spec=file) - mock_key_file.read.return_value = (''' - - fake-key with some newlines - - ''') - mock_key_file.__enter__.return_value = mock_key_file - fake_open.return_value = mock_key_file - result = nova._ssh_key() - self.assertEqual('fake-key with some newlines', result) - - @mock.patch.object(nova, 'LOG', autospec=True) - @mock.patch.object(__builtins__, 'open', autospec=True) - def test_ssh_key_not_found(self, fake_open, fake_log): - fake_open.side_effect = IOError - result = nova._ssh_key() - self.assertEqual('', result) - self.assertTrue(fake_log.warning.called) - - @mock.patch.object(nova.Nova, 'get_instance_for_obj', return_value=None) - def test_boot_instance(self, fake_get): - self.fake_instance_provider.create_instance.return_value = \ - 'fake_new_instance_info' - res = self.nova.boot_instance( - resource_type='router', - prev_instance_info=None, - name='foo_instance_name', - image_uuid='foo_image', - flavor='foo_flavor', - make_ports_callback='foo_callback', - ) - self.fake_instance_provider.create_instance.assert_called_with( - resource_type='router', - name='foo_instance_name', - image_uuid='foo_image', - flavor='foo_flavor', - make_ports_callback='foo_callback', - ) - fake_get.assert_called_with('foo_instance_name') - self.assertEqual('fake_new_instance_info', res) - - @mock.patch.object(nova.Nova, 'get_instance_for_obj') - def test_boot_instance_exists(self, fake_get): - fake_instance = fake_nova_instance - fake_instance.id = 'existing_instance_id' - fake_instance.status = 'SHUTOFF' - fake_get.return_value = fake_instance - self.fake_instance_provider.create_instance.return_value = \ - 'fake_new_instance_info' - res = self.nova.boot_instance( - resource_type='router', - prev_instance_info=None, - name='foo_instance_name', - image_uuid='foo_image', - flavor='foo_flavor', - make_ports_callback='foo_callback', - ) - fake_get.assert_called_with('foo_instance_name') - self.client.servers.delete.assert_called_with('existing_instance_id') - self.assertIsNone(res) - - @mock.patch.object(nova.Nova, 'get_instance_for_obj') - def test_boot_instance_exists_build(self, fake_get): - fake_instance = fake_nova_instance - fake_instance.id = 'existing_instance_id' - fake_instance.status = 'BUILD' - fake_get.return_value = fake_instance - self.fake_instance_provider.create_instance.return_value = \ - 'fake_new_instance_info' - res = self.nova.boot_instance( - resource_type='router', - prev_instance_info=None, - name='foo_instance_name', - image_uuid='foo_image', - flavor='foo_flavor', - make_ports_callback='foo_callback', - ) - fake_get.assert_called_with('foo_instance_name') - self.assertIsInstance(res, nova.InstanceInfo) - self.assertEqual('existing_instance_id', res.id_) - self.assertEqual('ak-appliance', res.name) - self.assertEqual('fake_image_uuid', res.image_uuid) - - @mock.patch.object(nova.Nova, 'get_instance_by_id', return_value=None) - def test_boot_instance_prev_inst(self, fake_get): - self.fake_instance_provider.create_instance.return_value = \ - 'fake_new_instance_info' - res = self.nova.boot_instance( - resource_type='router', - prev_instance_info=self.INSTANCE_INFO, - name='foo_instance_name', - image_uuid='foo_image', - flavor='foo_flavor', - make_ports_callback='foo_callback', - ) - fake_get.assert_called_with(self.INSTANCE_INFO.id_) - self.fake_instance_provider.create_instance.assert_called_with( - resource_type='router', - name='foo_instance_name', - image_uuid='foo_image', - flavor='foo_flavor', - make_ports_callback='foo_callback', - ) - self.assertEqual('fake_new_instance_info', res) - - @mock.patch.object(nova.Nova, 'get_instance_by_id') - def test_boot_instance_exists_prev_inst(self, fake_get): - fake_instance = fake_nova_instance - fake_instance.id = 'existing_instance_id' - fake_instance.status = 'SHUTOFF' - fake_get.return_value = fake_instance - self.fake_instance_provider.create_instance.return_value = \ - 'fake_new_instance_info' - res = self.nova.boot_instance( - resource_type='router', - prev_instance_info=self.INSTANCE_INFO, - name='foo_instance_name', - image_uuid='foo_image', - flavor='foo_flavor', - make_ports_callback='foo_callback', - ) - fake_get.assert_called_with(self.INSTANCE_INFO.id_) - self.client.servers.delete.assert_called_with('existing_instance_id') - self.assertIsNone(res) - - @mock.patch.object(nova.Nova, 'get_instance_for_obj') - def test_boot_instance_exists_build_prev_inst(self, fake_get): - fake_instance = fake_nova_instance - fake_instance.id = 'existing_instance_id' - fake_instance.status = 'BUILD' - fake_get.return_value = fake_instance - self.fake_instance_provider.create_instance.return_value = \ - 'fake_new_instance_info' - res = self.nova.boot_instance( - resource_type='router', - prev_instance_info=None, - name='foo_instance_name', - image_uuid='foo_image', - flavor='foo_flavor', - make_ports_callback='foo_callback', - ) - # assert we get back the same instance_info but with updated status - self.assertEqual('BUILD', res.nova_status) - self.assertEqual(fake_instance.id, res.id_) - self.assertIsInstance(res, nova.InstanceInfo) - - def test_from_nova(self): - fake_server = FakeNovaServer() - last_boot = datetime.strptime( - fake_server.created, "%Y-%m-%dT%H:%M:%SZ") - instance_info = nova.InstanceInfo.from_nova(fake_server) - self.assertEqual(fake_server.id, instance_info.id_) - self.assertEqual(fake_server.name, instance_info.name) - self.assertEqual(fake_server.image['id'], instance_info.image_uuid) - self.assertEqual(last_boot, instance_info.last_boot) - - def test_booting_false(self): - self.INSTANCE_INFO.nova_status = 'ACTIVE' - self.assertFalse(self.INSTANCE_INFO.booting) - - def test_booting_true(self): - self.INSTANCE_INFO.nova_status = 'BUILDING' - self.assertTrue(self.INSTANCE_INFO.booting) - - def test_no_provider_not_none(self): - NoProviderConf = copy.deepcopy(FakeConf) - del NoProviderConf.instance_provider - self.nova = nova.Nova(NoProviderConf) - self.assertIsNotNone(self.nova.instance_provider.create_instance) - - -class TestOnDemandInstanceProvider(base.RugTestBase): - def setUp(self): - super(TestOnDemandInstanceProvider, self).setUp() - self.addCleanup(mock.patch.stopall) - patch = mock.patch('novaclient.client.Client') - self.client = mock.Mock() - self.client_cls = patch.start() - self.client_cls.return_value = self.client - - @mock.patch.object(nova, 'format_userdata') - def test_create_instance(self, mock_userdata): - provider = nova.OnDemandInstanceProvider(self.client) - self.client.servers.create.return_value = fake_nova_instance - mock_userdata.return_value = 'fake_userdata' - expected = [ - mock.call.servers.create( - 'ak-instance-name', - nics=[{'port-id': '2', - 'net-id': 'mgt-net', - 'v4-fixed-ip': ''}, - {'port-id': '1', - 'net-id': 'ext-net', - 'v4-fixed-ip': ''}, - {'port-id': '3', - 'net-id': 'int-net', - 'v4-fixed-ip': ''}], - flavor=1, - image='GLANCE-IMAGE-123', - config_drive=True, - userdata='fake_userdata', - ) - ] - - provider.create_instance( - 'router', 'ak-instance-name', 'GLANCE-IMAGE-123', - 1, fake_make_ports_callback) - self.client.assert_has_calls(expected) - - -class TestPezInstanceProvider(base.RugTestBase): - def setUp(self): - super(TestPezInstanceProvider, self).setUp() - self.addCleanup(mock.patch.stopall) - patch = mock.patch('novaclient.client.Client') - self.nova_client = mock.Mock() - self.nova_client_cls = patch.start() - self.nova_client_cls.return_value = self.nova_client - - patch = mock.patch('astara.pez.rpcapi.AstaraPezAPI') - self.rpc_client = mock.Mock() - self.rpc_client_cls = patch.start() - self.rpc_client_cls.return_value = self.rpc_client - - @mock.patch.object(nova, 'format_userdata') - def test_create_instance(self, mock_userdata): - provider = nova.PezInstanceProvider(self.nova_client) - - inst_port = { - 'id': '1', - 'name': 'name', - 'device_id': 'device_id', - 'fixed_ips': [{'ip_address': '192.168.1.1', 'subnet_id': 'sub1'}], - 'mac_address': 'aa:bb:cc:dd:ee:ff', - 'network_id': 'net_id', - 'device_owner': 'test' - } - mgt_port = { - 'id': '2', - 'name': 'name', - 'device_id': 'device_id', - 'fixed_ips': [{'ip_address': '192.168.1.10', 'subnet_id': 'sub1'}], - 'mac_address': 'aa:bb:cc:dd:ee:fa', - 'network_id': 'net_id2', - 'device_owner': 'test' - } - - fake_server = FakeNovaServer() - self.nova_client.servers.get.return_value = fake_server - fake_pez_instance = { - 'id': fake_server.id, - 'management_port': mgt_port, - 'instance_ports': [inst_port], - } - self.rpc_client.get_instance.return_value = fake_pez_instance - res = provider.create_instance( - 'router', 'ak-instance-name', 'GLANCE-IMAGE-123', - 1, fake_make_ports_callback) - self.rpc_client.get_instance.assert_called_with( - 'router', 'ak-instance-name', - {'network_id': 'mgt-net', 'id': '2'}, - [{'network_id': 'ext-net', 'id': '1'}, - {'network_id': 'int-net', 'id': '3'}]) - self.nova_client.servers.get.assert_called_with(fake_server.id) - exp_instance_info = nova.InstanceInfo.from_nova(fake_server) - self.assertEqual(exp_instance_info.id_, res.id_) diff --git a/astara/test/unit/api/test_rug_api.py b/astara/test/unit/api/test_rug_api.py deleted file mode 100644 index 5f5f8d49..00000000 --- a/astara/test/unit/api/test_rug_api.py +++ /dev/null @@ -1,242 +0,0 @@ -import unittest -import socket - -from six.moves import range -from cliff import commandmanager -import mock -from oslo_log import log -import webob - -from astara.api import rug - - -try: - import blessed # noqa - HAS_BLESSED = True -except ImportError: - HAS_BLESSED = False - - -class TestRugAPI(unittest.TestCase): - - def setUp(self): - ctl = mock.Mock() - ctl.return_value.command_manager = commandmanager.CommandManager( - 'astara.cli' - ) - self.api = rug.RugAPI(ctl) - self.ctl = ctl.return_value - - @unittest.skipUnless(HAS_BLESSED, "blessed not available") - def test_browse(self): - resp = self.api(webob.Request({ - 'REQUEST_METHOD': 'PUT', - 'PATH_INFO': '/browse/' - })) - assert isinstance(resp, webob.exc.HTTPNotImplemented) - assert not self.ctl.run.called - - def test_ssh(self): - resp = self.api(webob.Request({ - 'REQUEST_METHOD': 'PUT', - 'PATH_INFO': '/ssh/ROUTER123/' - })) - assert isinstance(resp, webob.exc.HTTPNotImplemented) - assert not self.ctl.run.called - - def test_poll(self): - self.api(webob.Request({ - 'REQUEST_METHOD': 'PUT', - 'PATH_INFO': '/poll/' - })) - self.ctl.run.assert_called_with( - ['--debug', 'poll'] - ) - - def test_missing_argument(self): - # argparse failures (e.g., a missing router ID) raise a SystemExit - # because cliff's behavior is to print a help message and sys.exit() - self.ctl.run.side_effect = SystemExit - resp = self.api(webob.Request({ - 'REQUEST_METHOD': 'PUT', - 'PATH_INFO': '/router/debug/' - })) - assert isinstance(resp, webob.exc.HTTPBadRequest) - self.ctl.run.assert_called_with( - ['--debug', 'router', 'debug'] - ) - - def test_router_debug(self): - self.api(webob.Request({ - 'REQUEST_METHOD': 'PUT', - 'PATH_INFO': '/router/debug/ROUTER123' - })) - self.ctl.run.assert_called_with( - ['--debug', 'router', 'debug', 'ROUTER123'] - ) - - def test_router_manage(self): - self.api(webob.Request({ - 'REQUEST_METHOD': 'PUT', - 'PATH_INFO': '/router/manage/ROUTER123' - })) - self.ctl.run.assert_called_with( - ['--debug', 'router', 'manage', 'ROUTER123'] - ) - - def test_router_update(self): - self.api(webob.Request({ - 'REQUEST_METHOD': 'PUT', - 'PATH_INFO': '/router/update/ROUTER123' - })) - self.ctl.run.assert_called_with( - ['--debug', 'router', 'update', 'ROUTER123'] - ) - - def test_router_rebuild(self): - self.api(webob.Request({ - 'REQUEST_METHOD': 'PUT', - 'PATH_INFO': '/router/rebuild/ROUTER123' - })) - self.ctl.run.assert_called_with( - ['--debug', 'router', 'rebuild', 'ROUTER123'] - ) - - def test_tenant_debug(self): - self.api(webob.Request({ - 'REQUEST_METHOD': 'PUT', - 'PATH_INFO': '/tenant/debug/TENANT123' - })) - self.ctl.run.assert_called_with( - ['--debug', 'tenant', 'debug', 'TENANT123'] - ) - - def test_tenant_manage(self): - self.api(webob.Request({ - 'REQUEST_METHOD': 'PUT', - 'PATH_INFO': '/tenant/manage/TENANT123' - })) - self.ctl.run.assert_called_with( - ['--debug', 'tenant', 'manage', 'TENANT123'] - ) - - def test_workers_debug(self): - self.api(webob.Request({ - 'REQUEST_METHOD': 'PUT', - 'PATH_INFO': '/workers/debug/' - })) - self.ctl.run.assert_called_with( - ['--debug', 'workers', 'debug'] - ) - - def test_invalid_router_action(self): - resp = self.api(webob.Request({ - 'REQUEST_METHOD': 'PUT', - 'PATH_INFO': '/router/breakdance/ROUTER123' - })) - assert isinstance(resp, webob.exc.HTTPNotFound) - assert not self.ctl.run.called - - def test_multiple_calls(self): - for i in range(10): - self.api(webob.Request({ - 'REQUEST_METHOD': 'PUT', - 'PATH_INFO': '/poll/' - })) - - assert self.ctl.run.call_args_list == [ - mock.call(['--debug', 'poll']) - for _ in range(10) - ] - - def test_invalid_request_method(self): - resp = self.api(webob.Request({ - 'REQUEST_METHOD': 'GET', - 'PATH_INFO': '/poll/' - })) - assert isinstance(resp, webob.exc.HTTPMethodNotAllowed) - assert not self.ctl.run.called - - -class TestRugAPIServer(unittest.TestCase): - - @mock.patch('eventlet.listen') - @mock.patch('eventlet.wsgi') - def test_bind_and_serve_ipv4(self, wsgi, listen): - sock = listen.return_value - server = rug.RugAPIServer() - server.run('10.0.0.250', 44250) - listen.assert_called_with( - ('10.0.0.250', 44250), - family=socket.AF_INET, - backlog=128 - ) - args, kwargs = wsgi.server.call_args - assert all([ - args[0] == sock, - isinstance(args[1], rug.RugAPI), - kwargs['custom_pool'] == server.pool, - isinstance(kwargs['log'], log.KeywordArgumentAdapter) - ]) - - @mock.patch('eventlet.listen') - @mock.patch('eventlet.wsgi') - def test_bind_and_serve_ipv6(self, wsgi, listen): - sock = listen.return_value - server = rug.RugAPIServer() - server.run('fdca:3ba5:a17a:acda::1', 44250) - listen.assert_called_with( - ('fdca:3ba5:a17a:acda::1', 44250), - family=socket.AF_INET6, - backlog=128 - ) - args, kwargs = wsgi.server.call_args - assert all([ - args[0] == sock, - isinstance(args[1], rug.RugAPI), - kwargs['custom_pool'] == server.pool, - isinstance(kwargs['log'], log.KeywordArgumentAdapter) - ]) - - @mock.patch('eventlet.listen') - @mock.patch('eventlet.sleep', lambda x: None) - def test_fail_to_bind(self, listen): - listen.side_effect = socket.error( - 99, "Can't assign requested address" - ) - server = rug.RugAPIServer() - self.assertRaises( - RuntimeError, - server.run, - 'fdca:3ba5:a17a:acda::1', - 44250, - ) - assert listen.call_args_list == [ - mock.call(('fdca:3ba5:a17a:acda::1', 44250), - family=socket.AF_INET6, backlog=128) - for i in range(5) - ] - - @mock.patch('eventlet.listen') - @mock.patch('eventlet.wsgi') - @mock.patch('eventlet.sleep', lambda x: None) - def test_bind_fails_on_first_attempt(self, wsgi, listen): - sock = mock.Mock() - listen.side_effect = [ - socket.error(99, "Can't assign requested address"), - sock - ] - server = rug.RugAPIServer() - server.run('fdca:3ba5:a17a:acda::1', 44250) - assert listen.call_args_list == [ - mock.call(('fdca:3ba5:a17a:acda::1', 44250), - family=socket.AF_INET6, backlog=128) - for i in range(2) # fails the first time, succeeds the second - ] - args, kwargs = wsgi.server.call_args - assert all([ - args[0] == sock, - isinstance(args[1], rug.RugAPI), - kwargs['custom_pool'] == server.pool, - isinstance(kwargs['log'], log.KeywordArgumentAdapter) - ]) diff --git a/astara/test/unit/base.py b/astara/test/unit/base.py deleted file mode 100644 index af129dc0..00000000 --- a/astara/test/unit/base.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2015 Akanda, Inc. -# -# Author: Akanda, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import mock -import testtools - -from oslo_config import cfg -from oslo_config import fixture as config_fixture - - -class RugTestBase(testtools.TestCase): - def setUp(self): - super(RugTestBase, self).setUp() - self.test_config = self.useFixture(config_fixture.Config(cfg.CONF)) - self.argv = [] - cfg.CONF.import_opt('host', 'astara.main') - - self.time_patch = mock.patch('time.sleep') - self.time_mock = self.time_patch.start() - self.addCleanup(mock.patch.stopall) - - def config(self, **kw): - """Override config options for a test.""" - group = kw.pop('group', None) - for k, v in kw.items(): - cfg.CONF.set_override(k, v, group) diff --git a/astara/test/unit/common/__init__.py b/astara/test/unit/common/__init__.py deleted file mode 100644 index 63621fff..00000000 --- a/astara/test/unit/common/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/astara/test/unit/common/test_agent_linux_utils.py b/astara/test/unit/common/test_agent_linux_utils.py deleted file mode 100644 index cd46e100..00000000 --- a/astara/test/unit/common/test_agent_linux_utils.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012, Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Dan Wendlandt, Nicira, Inc. - -import fixtures -import mock -import testtools - -from astara.common.linux import utils - - -class AgentUtilsExecuteTest(testtools.TestCase): - def setUp(self): - super(AgentUtilsExecuteTest, self).setUp() - self.root_helper = "echo" - self.test_file = self.useFixture( - fixtures.TempDir()).join("test_execute.tmp") - open(self.test_file, 'w').close() - - def test_without_helper(self): - result = utils.execute(["ls", self.test_file]) - self.assertEqual("%s\n" % self.test_file, result) - - def test_with_helper(self): - result = utils.execute(["ls", self.test_file], - self.root_helper) - self.assertEqual("ls %s\n" % self.test_file, result) - - def test_stderr(self): - stdout, stderr = utils.execute(["ls", self.test_file], - return_stderr=True) - self.assertEqual("%s\n" % self.test_file, stdout) - self.assertEqual("", stderr) - - def test_check_exit_code(self): - stdout = utils.execute(["ls", self.test_file[:-1]], - check_exit_code=False) - self.assertEqual("", stdout) - self.assertRaises(RuntimeError, utils.execute, - ["ls", self.test_file[:-1]]) - - def test_process_input(self): - result = utils.execute(["cat"], process_input="%s\n" % - self.test_file[:-1]) - self.assertEqual("%s\n" % self.test_file[:-1], result) - - def test_with_addl_env(self): - result = utils.execute(["ls", self.test_file], - addl_env={'foo': 'bar'}) - self.assertEqual("%s\n" % self.test_file, result) - - -class AgentUtilsGetInterfaceMAC(testtools.TestCase): - def test_get_interface_mac(self): - expect_val = '01:02:03:04:05:06' - with mock.patch('fcntl.ioctl') as ioctl: - ioctl.return_value = ''.join(['\x00' * 18, - '\x01\x02\x03\x04\x05\x06', - '\x00' * 232]) - actual_val = utils.get_interface_mac('eth0') - self.assertEqual(expect_val, actual_val) - - -class AgentUtilsReplaceFile(testtools.TestCase): - def test_replace_file(self): - # make file to replace - with mock.patch('tempfile.NamedTemporaryFile') as ntf: - ntf.return_value.name = '/baz' - with mock.patch('os.chmod') as chmod: - with mock.patch('os.rename') as rename: - utils.replace_file('/foo', 'bar') - - expected = [mock.call('w+', dir='/', delete=False), - mock.call().write('bar'), - mock.call().close()] - - ntf.assert_has_calls(expected) - chmod.assert_called_once_with('/baz', 0644) - rename.assert_called_once_with('/baz', '/foo') diff --git a/astara/test/unit/common/test_config.py b/astara/test/unit/common/test_config.py deleted file mode 100644 index 0ce3b6e7..00000000 --- a/astara/test/unit/common/test_config.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2015 Akanda, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from astara.common import config -from astara.test.unit import base - - -class TestConfig(base.RugTestBase): - def _test_get_best_config_path(self, original, expected, files_exist=()): - def mock_isfile_f(f): - return f in files_exist - - with mock.patch('os.path.isfile', side_effect=mock_isfile_f): - self.assertEqual( - expected, - config.get_best_config_path(original) - ) - - def test_get_best_config_path_preferred(self): - self._test_get_best_config_path( - config.PREFERRED_CONFIG_FILEPATH, - config.PREFERRED_CONFIG_FILEPATH - ) - - def test_get_best_config_path_legacy(self): - self._test_get_best_config_path( - config.PREFERRED_CONFIG_FILEPATH, - '/etc/akanda/rug.ini', - ('/etc/akanda/rug.ini',) - ) diff --git a/astara/test/unit/common/test_hash_ring.py b/astara/test/unit/common/test_hash_ring.py deleted file mode 100644 index e2840c5b..00000000 --- a/astara/test/unit/common/test_hash_ring.py +++ /dev/null @@ -1,230 +0,0 @@ -# Copyright 2013 Hewlett-Packard Development Company, L.P. -# Copyright 2015 Akanda, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import hashlib - -import mock -from six.moves import range -from oslo_config import cfg -from testtools import matchers - -from astara.common import hash_ring -from astara.test.unit import base -# from astara.tests.unit.db import base as db_base - -CONF = cfg.CONF - - -class HashRingTestCase(base.RugTestBase): - - # NOTE(deva): the mapping used in these tests is as follows: - # if hosts = [foo, bar]: - # fake -> foo, bar - # if hosts = [foo, bar, baz]: - # fake -> foo, bar, baz - # fake-again -> bar, baz, foo - - @mock.patch.object(hashlib, 'md5', autospec=True) - def test__hash2int_returns_int(self, mock_md5): - CONF.set_override('hash_partition_exponent', 0) - r1 = 32 * 'a' - r2 = 32 * 'b' - mock_md5.return_value.hexdigest.side_effect = [r1, r2] - - hosts = ['foo', 'bar'] - replicas = 1 - ring = hash_ring.HashRing(hosts, replicas=replicas) - - self.assertIn(int(r1, 16), ring._host_hashes) - self.assertIn(int(r2, 16), ring._host_hashes) - - def test_create_ring(self): - hosts = ['foo', 'bar'] - replicas = 2 - ring = hash_ring.HashRing(hosts, replicas=replicas) - self.assertEqual(set(hosts), ring.hosts) - self.assertEqual(replicas, ring.replicas) - - def test_create_with_different_partition_counts(self): - hosts = ['foo', 'bar'] - CONF.set_override('hash_partition_exponent', 2) - ring = hash_ring.HashRing(hosts) - self.assertEqual(2 ** 2 * 2, len(ring._partitions)) - - CONF.set_override('hash_partition_exponent', 8) - ring = hash_ring.HashRing(hosts) - self.assertEqual(2 ** 8 * 2, len(ring._partitions)) - - CONF.set_override('hash_partition_exponent', 16) - ring = hash_ring.HashRing(hosts) - self.assertEqual(2 ** 16 * 2, len(ring._partitions)) - - def test_distribution_one_replica(self): - hosts = ['foo', 'bar', 'baz'] - ring = hash_ring.HashRing(hosts, replicas=1) - fake_1_hosts = ring.get_hosts('fake') - fake_2_hosts = ring.get_hosts('fake-again') - # We should have one hosts for each thing - self.assertThat(fake_1_hosts, matchers.HasLength(1)) - self.assertThat(fake_2_hosts, matchers.HasLength(1)) - # And they must not be the same answers even on this simple data. - self.assertNotEqual(fake_1_hosts, fake_2_hosts) - - def test_distribution_two_replicas(self): - hosts = ['foo', 'bar', 'baz'] - ring = hash_ring.HashRing(hosts, replicas=2) - fake_1_hosts = ring.get_hosts('fake') - fake_2_hosts = ring.get_hosts('fake-again') - # We should have two hosts for each thing - self.assertThat(fake_1_hosts, matchers.HasLength(2)) - self.assertThat(fake_2_hosts, matchers.HasLength(2)) - # And they must not be the same answers even on this simple data - # because if they were we'd be making the active replica a hot spot. - self.assertNotEqual(fake_1_hosts, fake_2_hosts) - - def test_distribution_three_replicas(self): - hosts = ['foo', 'bar', 'baz'] - ring = hash_ring.HashRing(hosts, replicas=3) - fake_1_hosts = ring.get_hosts('fake') - fake_2_hosts = ring.get_hosts('fake-again') - # We should have two hosts for each thing - self.assertThat(fake_1_hosts, matchers.HasLength(3)) - self.assertThat(fake_2_hosts, matchers.HasLength(3)) - # And they must not be the same answers even on this simple data - # because if they were we'd be making the active replica a hot spot. - self.assertNotEqual(fake_1_hosts, fake_2_hosts) - self.assertNotEqual(fake_1_hosts[0], fake_2_hosts[0]) - - def test_ignore_hosts(self): - hosts = ['foo', 'bar', 'baz'] - ring = hash_ring.HashRing(hosts, replicas=1) - equals_bar_or_baz = matchers.MatchesAny( - matchers.Equals(['bar']), - matchers.Equals(['baz'])) - self.assertThat( - ring.get_hosts('fake', ignore_hosts=['foo']), - equals_bar_or_baz) - self.assertThat( - ring.get_hosts('fake', ignore_hosts=['foo', 'bar']), - equals_bar_or_baz) - self.assertEqual([], ring.get_hosts('fake', ignore_hosts=hosts)) - - def test_ignore_hosts_with_replicas(self): - hosts = ['foo', 'bar', 'baz'] - ring = hash_ring.HashRing(hosts, replicas=2) - self.assertEqual( - set(['bar', 'baz']), - set(ring.get_hosts('fake', ignore_hosts=['foo']))) - self.assertEqual( - set(['baz']), - set(ring.get_hosts('fake', ignore_hosts=['foo', 'bar']))) - self.assertEqual( - set(['baz', 'foo']), - set(ring.get_hosts('fake-again', ignore_hosts=['bar']))) - self.assertEqual( - set(['foo']), - set(ring.get_hosts('fake-again', ignore_hosts=['bar', 'baz']))) - self.assertEqual([], ring.get_hosts('fake', ignore_hosts=hosts)) - - def _compare_rings(self, nodes, conductors, ring, - new_conductors, new_ring): - delta = {} - mapping = dict((node, ring.get_hosts(node)[0]) for node in nodes) - new_mapping = dict( - (node, new_ring.get_hosts(node)[0]) for node in nodes) - - for key, old in mapping.items(): - new = new_mapping.get(key, None) - if new != old: - delta[key] = (old, new) - return delta - - def test_rebalance_stability_join(self): - num_conductors = 10 - num_nodes = 10000 - # Adding 1 conductor to a set of N should move 1/(N+1) of all nodes - # Eg, for a cluster of 10 nodes, adding one should move 1/11, or 9% - # We allow for 1/N to allow for rounding in tests. - redistribution_factor = 1.0 / num_conductors - - nodes = [str(x) for x in range(num_nodes)] - conductors = [str(x) for x in range(num_conductors)] - new_conductors = conductors + ['new'] - delta = self._compare_rings( - nodes, conductors, hash_ring.HashRing(conductors), - new_conductors, hash_ring.HashRing(new_conductors)) - - self.assertLess(len(delta), num_nodes * redistribution_factor) - - def test_rebalance_stability_leave(self): - num_conductors = 10 - num_nodes = 10000 - # Removing 1 conductor from a set of N should move 1/(N) of all nodes - # Eg, for a cluster of 10 nodes, removing one should move 1/10, or 10% - # We allow for 1/(N-1) to allow for rounding in tests. - redistribution_factor = 1.0 / (num_conductors - 1) - - nodes = [str(x) for x in range(num_nodes)] - conductors = [str(x) for x in range(num_conductors)] - new_conductors = conductors[:] - new_conductors.pop() - delta = self._compare_rings( - nodes, conductors, hash_ring.HashRing(conductors), - new_conductors, hash_ring.HashRing(new_conductors)) - - self.assertLess(len(delta), num_nodes * redistribution_factor) - - def test_more_replicas_than_hosts(self): - hosts = ['foo', 'bar'] - ring = hash_ring.HashRing(hosts, replicas=10) - self.assertEqual(set(hosts), set(ring.get_hosts('fake'))) - - def test_ignore_non_existent_host(self): - hosts = ['foo', 'bar'] - ring = hash_ring.HashRing(hosts, replicas=1) - self.assertEqual(['foo'], ring.get_hosts('fake', - ignore_hosts=['baz'])) - - def test_create_ring_invalid_data(self): - hosts = None - self.assertRaises(hash_ring.Invalid, - hash_ring.HashRing, - hosts) - - def test_get_hosts_invalid_data(self): - hosts = ['foo', 'bar'] - ring = hash_ring.HashRing(hosts) - self.assertRaises(hash_ring.Invalid, - ring.get_hosts, - None) - - -class HashRingManagerTestCase(base.RugTestBase): - - def setUp(self): - super(HashRingManagerTestCase, self).setUp() - self.ring_manager = hash_ring.HashRingManager() - - def test_reset(self): - self.ring_manager.rebalance(hosts=['foo', 'bar']) - self.ring_manager.reset() - self.assertIsNone(self.ring_manager._hash_ring) - - def test_rebalance(self): - self.ring_manager.rebalance(hosts=['foo', 'bar']) - self.assertEqual(set(['foo', 'bar']), self.ring_manager.hosts) - self.ring_manager.rebalance(hosts=['bar', 'baz']) - self.assertEqual(set(['bar', 'baz']), self.ring_manager.hosts) diff --git a/astara/test/unit/common/test_linux_interface.py b/astara/test/unit/common/test_linux_interface.py deleted file mode 100644 index 0c38871f..00000000 --- a/astara/test/unit/common/test_linux_interface.py +++ /dev/null @@ -1,267 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 OpenStack LLC -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import unittest - -import mock -from oslo_config import cfg - -from astara.common.linux import interface -from astara.common.linux import ip_lib -from astara.common.linux import utils - - -class BaseChild(interface.LinuxInterfaceDriver): - def plug(*args): - pass - - def unplug(*args): - pass - - -class FakeNetwork: - id = '12345678-1234-5678-90ab-ba0987654321' - - -class FakeSubnet: - cidr = '192.168.1.1/24' - - -class FakeAllocation: - subnet = FakeSubnet() - ip_address = '192.168.1.2' - ip_version = 4 - - -class FakePort: - id = 'abcdef01-1234-5678-90ab-ba0987654321' - fixed_ips = [FakeAllocation] - device_id = 'cccccccc-cccc-cccc-cccc-cccccccccccc' - network = FakeNetwork() - network_id = network.id - - -class TestBase(unittest.TestCase): - def setUp(self): - root_helper_opt = [ - cfg.StrOpt( - 'root_helper', - default='sudo astara-rootwrap /etc/astara/rootwrap.conf' - ), - ] - self.conf = cfg.CONF - self.conf.register_opts(root_helper_opt) - self.ip_dev_p = mock.patch.object(ip_lib, 'IPDevice') - self.ip_dev = self.ip_dev_p.start() - self.ip_p = mock.patch.object(ip_lib, 'IPWrapper') - self.ip = self.ip_p.start() - self.device_exists_p = mock.patch.object(ip_lib, 'device_exists') - self.device_exists = self.device_exists_p.start() - - def tearDown(self): - # sometimes a test may turn this off - try: - self.device_exists_p.stop() - except RuntimeError: - pass - self.ip_dev_p.stop() - self.ip_p.stop() - # Note(rods): some tests override the default options so let's reset - # the config object - self.conf.reset() - - -class TestABCDriver(TestBase): - def test_get_device_name(self): - bc = BaseChild(self.conf) - device_name = bc.get_device_name(FakePort()) - self.assertEqual('tapabcdef01-12', device_name) - - def test_l3_init(self): - addresses = [dict(ip_version=4, scope='global', - dynamic=False, cidr='172.16.77.240/24')] - self.ip_dev().addr.list = mock.Mock(return_value=addresses) - - bc = BaseChild(self.conf) - ns = '12345678-1234-5678-90ab-ba0987654321' - bc.init_l3('tap0', ['192.168.1.2/24'], namespace=ns) - self.ip_dev.assert_has_calls( - [mock.call('tap0', self.conf.root_helper, namespace=ns), - mock.call().addr.list(scope='global', filters=['permanent']), - mock.call().addr.add(4, '192.168.1.2/24', '192.168.1.255'), - mock.call().addr.delete(4, '172.16.77.240/24')]) - - -class TestOVSInterfaceDriver(TestBase): - - def test_plug_no_ns(self): - self._test_plug() - - def test_plug_with_ns(self): - self._test_plug(namespace='01234567-1234-1234-99') - - def test_plug_alt_bridge(self): - self._test_plug(bridge='br-foo') - - def _test_plug(self, additional_expectation=[], bridge=None, - namespace=None): - - if not bridge: - bridge = 'br-int' - - def device_exists(dev, root_helper=None, namespace=None): - return dev == bridge - - vsctl_cmd = ['ovs-vsctl', '--', '--may-exist', 'add-port', - bridge, 'tap0', '--', 'set', 'Interface', 'tap0', - 'type=internal', '--', 'set', 'Interface', 'tap0', - 'external-ids:iface-id=port-1234', '--', 'set', - 'Interface', 'tap0', - 'external-ids:iface-status=active', '--', 'set', - 'Interface', 'tap0', - 'external-ids:attached-mac=aa:bb:cc:dd:ee:ff'] - - with mock.patch.object(utils, 'execute') as execute: - ovs = interface.OVSInterfaceDriver(self.conf) - self.device_exists.side_effect = device_exists - ovs.plug('01234567-1234-1234-99', - 'port-1234', - 'tap0', - 'aa:bb:cc:dd:ee:ff', - bridge=bridge, - namespace=namespace) - execute.assert_called_once_with(vsctl_cmd, self.conf.root_helper) - - expected = [mock.call(self.conf.root_helper), - mock.call().device('tap0'), - mock.call().device().link.set_address('aa:bb:cc:dd:ee:ff')] - expected.extend(additional_expectation) - if namespace: - expected.extend( - [mock.call().ensure_namespace(namespace), - mock.call().ensure_namespace().add_device_to_namespace( - mock.ANY)]) - expected.extend([mock.call().device().link.set_up()]) - - self.ip.assert_has_calls(expected) - - def test_plug_mtu(self): - self.conf.set_override('network_device_mtu', 9000) - self._test_plug([mock.call().device().link.set_mtu(9000)]) - - def test_unplug(self, bridge=None): - if not bridge: - bridge = 'br-int' - with mock.patch('astara.common.linux.ovs_lib.OVSBridge') as ovs_br: - ovs = interface.OVSInterfaceDriver(self.conf) - ovs.unplug('tap0') - ovs_br.assert_has_calls([mock.call(bridge, self.conf.root_helper), - mock.call().delete_port('tap0')]) - - -class TestBridgeInterfaceDriver(TestBase): - def test_get_device_name(self): - br = interface.BridgeInterfaceDriver(self.conf) - device_name = br.get_device_name(FakePort()) - self.assertEqual('ns-abcdef01-12', device_name) - - def test_plug_no_ns(self): - self._test_plug() - - def test_plug_with_ns(self): - self._test_plug(namespace='01234567-1234-1234-99') - - def _test_plug(self, namespace=None): - def device_exists(device, root_helper=None, namespace=None): - return device.startswith('brq') - - root_veth = mock.Mock() - ns_veth = mock.Mock() - - self.ip().add_veth = mock.Mock(return_value=(root_veth, ns_veth)) - - self.device_exists.side_effect = device_exists - br = interface.BridgeInterfaceDriver(self.conf) - br.plug('01234567-1234-1234-99', - 'port-1234', - 'ns-0', - 'aa:bb:cc:dd:ee:ff', - namespace=namespace) - - ip_calls = [mock.call(self.conf.root_helper), - mock.call().add_veth('tap0', 'ns-0')] - if namespace: - ip_calls.extend([ - mock.call().ensure_namespace('01234567-1234-1234-99'), - mock.call().ensure_namespace().add_device_to_namespace( - ns_veth)]) - - self.ip.assert_has_calls(ip_calls) - - root_veth.assert_has_calls([mock.call.link.set_up()]) - ns_veth.assert_has_calls([mock.call.link.set_up()]) - - def test_plug_dev_exists(self): - self.device_exists.return_value = True - with mock.patch( - 'astara.common.linux.interface.LOG.warning') as log: - br = interface.BridgeInterfaceDriver(self.conf) - br.plug('01234567-1234-1234-99', - 'port-1234', - 'tap0', - 'aa:bb:cc:dd:ee:ff') - self.ip_dev.assert_has_calls([]) - self.assertEqual(1, log.call_count) - - def test_unplug_no_device(self): - self.device_exists.return_value = False - self.ip_dev().link.delete.side_effect = RuntimeError - with mock.patch('astara.common.linux.interface.LOG') as log: - br = interface.BridgeInterfaceDriver(self.conf) - br.unplug('tap0') - [mock.call(), - mock.call('tap0', self.conf.root_helper), - mock.call().link.delete()] - self.assertEqual(1, log.exception.call_count) - - def test_unplug(self): - self.device_exists.return_value = True - with mock.patch('astara.common.linux.interface.LOG.debug') as log: - br = interface.BridgeInterfaceDriver(self.conf) - br.unplug('tap0') - self.assertEqual(1, log.call_count) - - self.ip_dev.assert_has_calls([ - mock.call('tap0', self.conf.root_helper, None), - mock.call().link.delete()]) diff --git a/astara/test/unit/common/test_linux_ip_lib.py b/astara/test/unit/common/test_linux_ip_lib.py deleted file mode 100644 index 3887b8fa..00000000 --- a/astara/test/unit/common/test_linux_ip_lib.py +++ /dev/null @@ -1,700 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import unittest - -import mock - -from astara.common.linux import ip_lib - - -NETNS_SAMPLE = [ - '12345678-1234-5678-abcd-1234567890ab', - 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', - 'cccccccc-cccc-cccc-cccc-cccccccccccc'] - -LINK_SAMPLE = [ - '1: lo: mtu 16436 qdisc noqueue state UNKNOWN \\' - 'link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00', - '2: eth0: mtu 1500 qdisc mq state UP ' - 'qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff' - '\ alias openvswitch', - '3: br-int: mtu 1500 qdisc noop state DOWN ' - '\ link/ether aa:bb:cc:dd:ee:ff brd ff:ff:ff:ff:ff:ff', - '4: gw-ddc717df-49: mtu 1500 qdisc noop ' - 'state DOWN \ link/ether fe:dc:ba:fe:dc:ba brd ff:ff:ff:ff:ff:ff'] - -ADDR_SAMPLE = (""" -2: eth0: mtu 1500 qdisc mq state UP qlen 1000 - link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff - inet 172.16.77.240/24 brd 172.16.77.255 scope global eth0 - inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic - valid_lft 14187sec preferred_lft 3387sec - inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """ - """deprecated dynamic - valid_lft 14187sec preferred_lft 0sec - inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """ - """deprecated dynamic - valid_lft 14187sec preferred_lft 0sec - inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic - valid_lft 14187sec preferred_lft 3387sec - inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link - valid_lft forever preferred_lft forever -""") - -ADDR_SAMPLE2 = (""" -2: eth0: mtu 1500 qdisc mq state UP qlen 1000 - link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff - inet 172.16.77.240/24 scope global eth0 - inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic - valid_lft 14187sec preferred_lft 3387sec - inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """ - """deprecated dynamic - valid_lft 14187sec preferred_lft 0sec - inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """ - """deprecated dynamic - valid_lft 14187sec preferred_lft 0sec - inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic - valid_lft 14187sec preferred_lft 3387sec - inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link - valid_lft forever preferred_lft forever -""") - -GATEWAY_SAMPLE1 = (""" -default via 10.35.19.254 metric 100 -10.35.16.0/22 proto kernel scope link src 10.35.17.97 -""") - -GATEWAY_SAMPLE2 = (""" -default via 10.35.19.254 metric 100 -""") - -GATEWAY_SAMPLE3 = (""" -10.35.16.0/22 proto kernel scope link src 10.35.17.97 -""") - -GATEWAY_SAMPLE4 = (""" -default via 10.35.19.254 -""") - -DEVICE_ROUTE_SAMPLE = ("10.0.0.0/24 scope link src 10.0.0.2") - -SUBNET_SAMPLE1 = ("10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1\n" - "10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2") -SUBNET_SAMPLE2 = ("10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2\n" - "10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1") - - -class TestSubProcessBase(unittest.TestCase): - def setUp(self): - super(TestSubProcessBase, self).setUp() - self.execute_p = mock.patch('astara.common.linux.utils.execute') - self.execute = self.execute_p.start() - self.addCleanup(self.execute_p.stop) - - def test_execute_wrapper(self): - ip_lib.SubProcessBase._execute('o', 'link', ('list',), 'sudo') - - self.execute.assert_called_once_with(['ip', '-o', 'link', 'list'], - root_helper='sudo') - - def test_execute_wrapper_int_options(self): - ip_lib.SubProcessBase._execute([4], 'link', ('list',)) - - self.execute.assert_called_once_with(['ip', '-4', 'link', 'list'], - root_helper=None) - - def test_execute_wrapper_no_options(self): - ip_lib.SubProcessBase._execute([], 'link', ('list',)) - - self.execute.assert_called_once_with(['ip', 'link', 'list'], - root_helper=None) - - def test_run_no_namespace(self): - base = ip_lib.SubProcessBase('sudo') - base._run([], 'link', ('list',)) - self.execute.assert_called_once_with(['ip', 'link', 'list'], - root_helper=None) - - def test_run_namespace(self): - base = ip_lib.SubProcessBase('sudo', 'ns') - base._run([], 'link', ('list',)) - self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns', - 'ip', 'link', 'list'], - root_helper='sudo') - - def test_as_root_namespace(self): - base = ip_lib.SubProcessBase('sudo', 'ns') - base._as_root([], 'link', ('list',)) - self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns', - 'ip', 'link', 'list'], - root_helper='sudo') - - def test_as_root_no_root_helper(self): - base = ip_lib.SubProcessBase() - self.assertRaisesRegexp(Exception, - 'Sudo is required to run this command', - base._as_root, - [], 'link', ('list',)) - - -class TestIpWrapper(unittest.TestCase): - def setUp(self): - super(TestIpWrapper, self).setUp() - self.execute_p = mock.patch.object(ip_lib.IPWrapper, '_execute') - self.execute = self.execute_p.start() - self.addCleanup(self.execute_p.stop) - - def test_get_devices(self): - self.execute.return_value = '\n'.join(LINK_SAMPLE) - retval = ip_lib.IPWrapper('sudo').get_devices() - self.assertEqual([ip_lib.IPDevice('lo'), - ip_lib.IPDevice('eth0'), - ip_lib.IPDevice('br-int'), - ip_lib.IPDevice('gw-ddc717df-49')], - retval) - - self.execute.assert_called_once_with('o', 'link', ('list',), - 'sudo', None) - - def test_get_devices_malformed_line(self): - self.execute.return_value = '\n'.join(LINK_SAMPLE + ['gibberish']) - retval = ip_lib.IPWrapper('sudo').get_devices() - self.assertEqual([ip_lib.IPDevice('lo'), - ip_lib.IPDevice('eth0'), - ip_lib.IPDevice('br-int'), - ip_lib.IPDevice('gw-ddc717df-49')], - retval) - - self.execute.assert_called_once_with('o', 'link', ('list',), - 'sudo', None) - - def test_get_namespaces(self): - self.execute.return_value = '\n'.join(NETNS_SAMPLE) - retval = ip_lib.IPWrapper.get_namespaces('sudo') - self.assertEqual(['12345678-1234-5678-abcd-1234567890ab', - 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', - 'cccccccc-cccc-cccc-cccc-cccccccccccc'], - retval) - - self.execute.assert_called_once_with('', 'netns', ('list',), - root_helper='sudo') - - def test_add_tuntap(self): - ip_lib.IPWrapper('sudo').add_tuntap('tap0') - self.execute.assert_called_once_with('', 'tuntap', - ('add', 'tap0', 'mode', 'tap'), - 'sudo', None) - - def test_add_veth(self): - ip_lib.IPWrapper('sudo').add_veth('tap0', 'tap1') - self.execute.assert_called_once_with('', 'link', - ('add', 'tap0', 'type', 'veth', - 'peer', 'name', 'tap1'), - 'sudo', None) - - def test_get_device(self): - dev = ip_lib.IPWrapper('sudo', 'ns').device('eth0') - self.assertEqual('sudo', dev.root_helper) - self.assertEqual('ns', dev.namespace) - self.assertEqual('eth0', dev.name) - - def test_ensure_namespace(self): - with mock.patch.object(ip_lib, 'IPDevice') as ip_dev: - ip = ip_lib.IPWrapper('sudo') - with mock.patch.object(ip.netns, 'exists') as ns_exists: - ns_exists.return_value = False - ip.ensure_namespace('ns') - self.execute.assert_has_calls( - [mock.call([], 'netns', ('add', 'ns'), 'sudo', None)]) - ip_dev.assert_has_calls([mock.call('lo', 'sudo', 'ns'), - mock.call().link.set_up()]) - - def test_ensure_namespace_existing(self): - with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd: - ip_ns_cmd.exists.return_value = True - ns = ip_lib.IPWrapper('sudo').ensure_namespace('ns') - self.assertFalse(self.execute.called) - self.assertEqual('ns', ns.namespace) - - def test_namespace_is_empty_no_devices(self): - ip = ip_lib.IPWrapper('sudo', 'ns') - with mock.patch.object(ip, 'get_devices') as get_devices: - get_devices.return_value = [] - - self.assertTrue(ip.namespace_is_empty()) - get_devices.assert_called_once_with(exclude_loopback=True) - - def test_namespace_is_empty(self): - ip = ip_lib.IPWrapper('sudo', 'ns') - with mock.patch.object(ip, 'get_devices') as get_devices: - get_devices.return_value = [mock.Mock()] - - self.assertFalse(ip.namespace_is_empty()) - get_devices.assert_called_once_with(exclude_loopback=True) - - def test_garbage_collect_namespace_does_not_exist(self): - with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls: - ip_ns_cmd_cls.return_value.exists.return_value = False - ip = ip_lib.IPWrapper('sudo', 'ns') - with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty: - - self.assertFalse(ip.garbage_collect_namespace()) - ip_ns_cmd_cls.assert_has_calls([mock.call().exists('ns')]) - self.assertNotIn(mock.call().delete('ns'), - ip_ns_cmd_cls.return_value.mock_calls) - self.assertEqual([], mock_is_empty.mock_calls) - - def test_garbage_collect_namespace_existing_empty_ns(self): - with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls: - ip_ns_cmd_cls.return_value.exists.return_value = True - - ip = ip_lib.IPWrapper('sudo', 'ns') - - with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty: - mock_is_empty.return_value = True - self.assertTrue(ip.garbage_collect_namespace()) - - mock_is_empty.assert_called_once_with() - expected = [mock.call().exists('ns'), - mock.call().delete('ns')] - ip_ns_cmd_cls.assert_has_calls(expected) - - def test_garbage_collect_namespace_existing_not_empty(self): - lo_device = mock.Mock() - lo_device.name = 'lo' - tap_device = mock.Mock() - tap_device.name = 'tap1' - - with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls: - ip_ns_cmd_cls.return_value.exists.return_value = True - - ip = ip_lib.IPWrapper('sudo', 'ns') - - with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty: - mock_is_empty.return_value = False - - self.assertFalse(ip.garbage_collect_namespace()) - - mock_is_empty.assert_called_once_with() - expected = [mock.call(ip), - mock.call().exists('ns')] - self.assertEqual(expected, ip_ns_cmd_cls.mock_calls) - self.assertNotIn(mock.call().delete('ns'), - ip_ns_cmd_cls.mock_calls) - - def test_add_device_to_namespace(self): - dev = mock.Mock() - ip_lib.IPWrapper('sudo', 'ns').add_device_to_namespace(dev) - dev.assert_has_calls([mock.call.link.set_netns('ns')]) - - def test_add_device_to_namespace_is_none(self): - dev = mock.Mock() - ip_lib.IPWrapper('sudo').add_device_to_namespace(dev) - self.assertEqual([], dev.mock_calls) - - -class TestIPDevice(unittest.TestCase): - def test_eq_same_name(self): - dev1 = ip_lib.IPDevice('tap0') - dev2 = ip_lib.IPDevice('tap0') - self.assertEqual(dev1, dev2) - - def test_eq_diff_name(self): - dev1 = ip_lib.IPDevice('tap0') - dev2 = ip_lib.IPDevice('tap1') - self.assertNotEqual(dev1, dev2) - - def test_eq_same_namespace(self): - dev1 = ip_lib.IPDevice('tap0', 'ns1') - dev2 = ip_lib.IPDevice('tap0', 'ns1') - self.assertEqual(dev1, dev2) - - def test_eq_diff_namespace(self): - dev1 = ip_lib.IPDevice('tap0', 'sudo', 'ns1') - dev2 = ip_lib.IPDevice('tap0', 'sudo', 'ns2') - self.assertNotEqual(dev1, dev2) - - def test_eq_other_is_none(self): - dev1 = ip_lib.IPDevice('tap0', 'sudo', 'ns1') - self.assertNotEqual(dev1, None) - - def test_str(self): - self.assertEqual('tap0', str(ip_lib.IPDevice('tap0'))) - - -class TestIPCommandBase(unittest.TestCase): - def setUp(self): - super(TestIPCommandBase, self).setUp() - self.ip = mock.Mock() - self.ip.root_helper = 'sudo' - self.ip.namespace = 'namespace' - self.ip_cmd = ip_lib.IpCommandBase(self.ip) - self.ip_cmd.COMMAND = 'foo' - - def test_run(self): - self.ip_cmd._run('link', 'show') - self.ip.assert_has_calls([mock.call._run([], 'foo', ('link', 'show'))]) - - def test_run_with_options(self): - self.ip_cmd._run('link', options='o') - self.ip.assert_has_calls([mock.call._run('o', 'foo', ('link', ))]) - - def test_as_root(self): - self.ip_cmd._as_root('link') - self.ip.assert_has_calls( - [mock.call._as_root([], 'foo', ('link', ), False)]) - - def test_as_root_with_options(self): - self.ip_cmd._as_root('link', options='o') - self.ip.assert_has_calls( - [mock.call._as_root('o', 'foo', ('link', ), False)]) - - -class TestIPDeviceCommandBase(unittest.TestCase): - def setUp(self): - super(TestIPDeviceCommandBase, self).setUp() - self.ip_dev = mock.Mock() - self.ip_dev.name = 'eth0' - self.ip_dev.root_helper = 'sudo' - self.ip_dev._execute = mock.Mock(return_value='executed') - self.ip_cmd = ip_lib.IpDeviceCommandBase(self.ip_dev) - self.ip_cmd.COMMAND = 'foo' - - def test_name_property(self): - self.assertEqual('eth0', self.ip_cmd.name) - - -class TestIPCmdBase(unittest.TestCase): - def setUp(self): - super(TestIPCmdBase, self).setUp() - self.parent = mock.Mock() - self.parent.name = 'eth0' - self.parent.root_helper = 'sudo' - - def _assert_call(self, options, args): - self.parent.assert_has_calls([ - mock.call._run(options, self.command, args)]) - - def _assert_sudo(self, options, args, force_root_namespace=False): - self.parent.assert_has_calls( - [mock.call._as_root(options, self.command, args, - force_root_namespace)]) - - -class TestIpLinkCommand(TestIPCmdBase): - def setUp(self): - super(TestIpLinkCommand, self).setUp() - self.parent._run.return_value = LINK_SAMPLE[1] - self.command = 'link' - self.link_cmd = ip_lib.IpLinkCommand(self.parent) - - def test_set_address(self): - self.link_cmd.set_address('aa:bb:cc:dd:ee:ff') - self._assert_sudo([], ('set', 'eth0', 'address', 'aa:bb:cc:dd:ee:ff')) - - def test_set_mtu(self): - self.link_cmd.set_mtu(1500) - self._assert_sudo([], ('set', 'eth0', 'mtu', 1500)) - - def test_set_up(self): - self.link_cmd.set_up() - self._assert_sudo([], ('set', 'eth0', 'up')) - - def test_set_down(self): - self.link_cmd.set_down() - self._assert_sudo([], ('set', 'eth0', 'down')) - - def test_set_netns(self): - self.link_cmd.set_netns('foo') - self._assert_sudo([], ('set', 'eth0', 'netns', 'foo')) - self.assertEqual('foo', self.parent.namespace) - - def test_set_name(self): - self.link_cmd.set_name('tap1') - self._assert_sudo([], ('set', 'eth0', 'name', 'tap1')) - self.assertEqual('tap1', self.parent.name) - - def test_set_alias(self): - self.link_cmd.set_alias('openvswitch') - self._assert_sudo([], ('set', 'eth0', 'alias', 'openvswitch')) - - def test_delete(self): - self.link_cmd.delete() - self._assert_sudo([], ('delete', 'eth0')) - - def test_address_property(self): - self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) - self.assertEqual('cc:dd:ee:ff:ab:cd', self.link_cmd.address) - - def test_mtu_property(self): - self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) - self.assertEqual(1500, self.link_cmd.mtu) - - def test_qdisc_property(self): - self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) - self.assertEqual('mq', self.link_cmd.qdisc) - - def test_qlen_property(self): - self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) - self.assertEqual(1000, self.link_cmd.qlen) - - def test_alias_property(self): - self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) - self.assertEqual('openvswitch', self.link_cmd.alias) - - def test_state_property(self): - self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) - self.assertEqual('UP', self.link_cmd.state) - - def test_settings_property(self): - expected = {'mtu': 1500, - 'qlen': 1000, - 'state': 'UP', - 'qdisc': 'mq', - 'brd': 'ff:ff:ff:ff:ff:ff', - 'link/ether': 'cc:dd:ee:ff:ab:cd', - 'alias': 'openvswitch'} - self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1]) - self.assertEqual(expected, self.link_cmd.attributes) - self._assert_call('o', ('show', 'eth0')) - - -class TestIpAddrCommand(TestIPCmdBase): - def setUp(self): - super(TestIpAddrCommand, self).setUp() - self.parent.name = 'tap0' - self.command = 'addr' - self.addr_cmd = ip_lib.IpAddrCommand(self.parent) - - def test_add_address(self): - self.addr_cmd.add(4, '192.168.45.100/24', '192.168.45.255') - self._assert_sudo([4], - ('add', '192.168.45.100/24', 'brd', '192.168.45.255', - 'scope', 'global', 'dev', 'tap0')) - - def test_add_address_scoped(self): - self.addr_cmd.add(4, '192.168.45.100/24', '192.168.45.255', - scope='link') - self._assert_sudo([4], - ('add', '192.168.45.100/24', 'brd', '192.168.45.255', - 'scope', 'link', 'dev', 'tap0')) - - def test_del_address(self): - self.addr_cmd.delete(4, '192.168.45.100/24') - self._assert_sudo([4], - ('del', '192.168.45.100/24', 'dev', 'tap0')) - - def test_flush(self): - self.addr_cmd.flush() - self._assert_sudo([], ('flush', 'tap0')) - - def test_list(self): - expected = [ - dict(ip_version=4, scope='global', - dynamic=False, cidr='172.16.77.240/24', - broadcast='172.16.77.255'), - dict(ip_version=6, scope='global', - dynamic=True, cidr='2001:470:9:1224:5595:dd51:6ba2:e788/64', - broadcast='::'), - dict(ip_version=6, scope='global', - dynamic=True, cidr='2001:470:9:1224:fd91:272:581e:3a32/64', - broadcast='::'), - dict(ip_version=6, scope='global', - dynamic=True, cidr='2001:470:9:1224:4508:b885:5fb:740b/64', - broadcast='::'), - dict(ip_version=6, scope='global', - dynamic=True, cidr='2001:470:9:1224:dfcc:aaff:feb9:76ce/64', - broadcast='::'), - dict(ip_version=6, scope='link', - dynamic=False, cidr='fe80::dfcc:aaff:feb9:76ce/64', - broadcast='::')] - - test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2] - - for test_case in test_cases: - self.parent._run = mock.Mock(return_value=test_case) - self.assertEqual(expected, self.addr_cmd.list()) - self._assert_call([], ('show', 'tap0')) - - def test_list_filtered(self): - expected = [ - dict(ip_version=4, scope='global', - dynamic=False, cidr='172.16.77.240/24', - broadcast='172.16.77.255')] - - test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2] - - for test_case in test_cases: - output = '\n'.join(test_case.split('\n')[0:4]) - self.parent._run.return_value = output - self.assertEqual(expected, - self.addr_cmd.list('global', - filters=['permanent'])) - self._assert_call([], ('show', 'tap0', 'permanent', 'scope', - 'global')) - - -class TestIpRouteCommand(TestIPCmdBase): - def setUp(self): - super(TestIpRouteCommand, self).setUp() - self.parent.name = 'eth0' - self.command = 'route' - self.route_cmd = ip_lib.IpRouteCommand(self.parent) - - def test_add_gateway(self): - gateway = '192.168.45.100' - metric = 100 - self.route_cmd.add_gateway(gateway, metric) - self._assert_sudo([], - ('replace', 'default', 'via', gateway, - 'metric', metric, - 'dev', self.parent.name)) - - def test_del_gateway(self): - gateway = '192.168.45.100' - self.route_cmd.delete_gateway(gateway) - self._assert_sudo([], - ('del', 'default', 'via', gateway, - 'dev', self.parent.name)) - - def test_get_gateway(self): - test_cases = [{'sample': GATEWAY_SAMPLE1, - 'expected': {'gateway': '10.35.19.254', - 'metric': 100}}, - {'sample': GATEWAY_SAMPLE2, - 'expected': {'gateway': '10.35.19.254', - 'metric': 100}}, - {'sample': GATEWAY_SAMPLE3, - 'expected': None}, - {'sample': GATEWAY_SAMPLE4, - 'expected': {'gateway': '10.35.19.254'}}] - for test_case in test_cases: - self.parent._run = mock.Mock(return_value=test_case['sample']) - self.assertEqual(test_case['expected'], - self.route_cmd.get_gateway() - ) - - def test_pullup_route(self): - # interface is not the first in the list - requires - # deleting and creating existing entries - output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE1] - - def pullup_side_effect(self, *args): - result = output.pop(0) - return result - - self.parent._run = mock.Mock(side_effect=pullup_side_effect) - self.route_cmd.pullup_route('tap1d7888a7-10') - self._assert_sudo([], ('del', '10.0.0.0/24', 'dev', 'qr-23380d11-d2')) - self._assert_sudo([], ('append', '10.0.0.0/24', 'proto', 'kernel', - 'src', '10.0.0.1', 'dev', 'qr-23380d11-d2')) - - def test_pullup_route_first(self): - # interface is first in the list - no changes - output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE2] - - def pullup_side_effect(self, *args): - result = output.pop(0) - return result - - self.parent._run = mock.Mock(side_effect=pullup_side_effect) - self.route_cmd.pullup_route('tap1d7888a7-10') - # Check two calls - device get and subnet get - self.assertEqual(2, len(self.parent._run.mock_calls)) - - -class TestIpNetnsCommand(TestIPCmdBase): - def setUp(self): - super(TestIpNetnsCommand, self).setUp() - self.command = 'netns' - self.netns_cmd = ip_lib.IpNetnsCommand(self.parent) - - def test_add_namespace(self): - ns = self.netns_cmd.add('ns') - self._assert_sudo([], ('add', 'ns'), force_root_namespace=True) - self.assertEqual('ns', ns.namespace) - - def test_delete_namespace(self): - with mock.patch('astara.common.linux.utils.execute'): - self.netns_cmd.delete('ns') - self._assert_sudo([], ('delete', 'ns'), force_root_namespace=True) - - def test_namespace_exists(self): - retval = '\n'.join(NETNS_SAMPLE) - self.parent._as_root.return_value = retval - self.assertTrue( - self.netns_cmd.exists('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb')) - self._assert_sudo('o', ('list',), force_root_namespace=True) - - def test_namespace_doest_not_exist(self): - retval = '\n'.join(NETNS_SAMPLE) - self.parent._as_root.return_value = retval - self.assertFalse( - self.netns_cmd.exists('bbbbbbbb-1111-2222-3333-bbbbbbbbbbbb')) - self._assert_sudo('o', ('list',), force_root_namespace=True) - - def test_execute(self): - self.parent.namespace = 'ns' - with mock.patch('astara.common.linux.utils.execute') as execute: - self.netns_cmd.execute(['ip', 'link', 'list']) - execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns', 'ip', - 'link', 'list'], - root_helper='sudo', - check_exit_code=True) - - def test_execute_env_var_prepend(self): - self.parent.namespace = 'ns' - with mock.patch('astara.common.linux.utils.execute') as execute: - env = collections.OrderedDict([('FOO', 1), ('BAR', 2)]) - self.netns_cmd.execute(['ip', 'link', 'list'], env) - execute.assert_called_once_with( - ['FOO=1', 'BAR=2', 'ip', 'netns', 'exec', 'ns', 'ip', 'link', - 'list'], - root_helper='sudo', check_exit_code=True) - - -class TestDeviceExists(unittest.TestCase): - def test_device_exists(self): - with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute: - _execute.return_value = LINK_SAMPLE[1] - self.assertTrue(ip_lib.device_exists('eth0')) - _execute.assert_called_once_with('o', 'link', ('show', 'eth0')) - - def test_device_does_not_exist(self): - with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute: - _execute.return_value = '' - _execute.side_effect = RuntimeError - self.assertFalse(ip_lib.device_exists('eth0')) diff --git a/astara/test/unit/db/__init__.py b/astara/test/unit/db/__init__.py deleted file mode 100644 index b9684ec7..00000000 --- a/astara/test/unit/db/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2015 Akanda, Inc. -# -# Author: Akanda, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/astara/test/unit/db/base.py b/astara/test/unit/db/base.py deleted file mode 100644 index 63a0d3c6..00000000 --- a/astara/test/unit/db/base.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright (c) 2012 NTT DOCOMO, INC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Astara DB test base class.""" - -import os -import shutil - -import fixtures -from oslo_config import cfg -from oslo_db.sqlalchemy import enginefacade - -from astara.db import api as dbapi -from astara.db.sqlalchemy import migration -from astara.test.unit import base - - -CONF = cfg.CONF - -_DB_CACHE = None - -TEST_DB_PATH = os.path.join(os.path.dirname(__file__), 'rug_test.db') -CLEAN_TEST_DB_PATH = os.path.join(os.path.dirname(__file__), - 'rug_test.db_clean') - - -def get_engine(connection): - engine = enginefacade.get_legacy_facade().get_engine() - return engine - - -class Database(fixtures.Fixture): - def __init__(self, db_migrate, sql_connection): - if sql_connection.startswith('sqlite:///'): - if os.path.exists(TEST_DB_PATH): - os.unlink(TEST_DB_PATH) - if os.path.exists(CLEAN_TEST_DB_PATH): - os.unlink(CLEAN_TEST_DB_PATH) - - self.setup_sqlite(sql_connection, db_migrate) - db_migrate.upgrade('head') - elif sql_connection == "sqlite://": - conn = self.engine.connect() - self._DB = "".join(line for line in conn.connection.iterdump()) - self.engine.dispose() - db_migrate.upgrade('head') - shutil.copyfile(TEST_DB_PATH, CLEAN_TEST_DB_PATH) - - def setup_sqlite(self, sql_connection, db_migrate): - self.sql_connection = sql_connection - self.engine = enginefacade.get_legacy_facade().get_engine() - self.engine.dispose() - self.engine.connect() - - def setUp(self): - super(Database, self).setUp() - - if self.sql_connection == "sqlite://": - conn = self.engine.connect() - conn.connection.executescript(self._DB) - self.addCleanup(self.engine.dispose) - else: - shutil.copyfile(CLEAN_TEST_DB_PATH, - TEST_DB_PATH) - self.addCleanup(os.unlink, TEST_DB_PATH) - - -class DbTestCase(base.RugTestBase): - - def setUp(self): - super(DbTestCase, self).setUp() - sql_connection = 'sqlite:///' + TEST_DB_PATH - self.config(group='database', connection=sql_connection) - self.dbapi = dbapi.get_instance() - - global _DB_CACHE - if not _DB_CACHE: - _DB_CACHE = Database(migration, - sql_connection=sql_connection) - self.useFixture(_DB_CACHE) diff --git a/astara/test/unit/db/test_debug_mode.py b/astara/test/unit/db/test_debug_mode.py deleted file mode 100644 index 9101b926..00000000 --- a/astara/test/unit/db/test_debug_mode.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright 2015 Akanda, Inc. -# -# Author: Akanda, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from six.moves import range -from astara.test.unit.db import base - - -class TestDBDebugModes(base.DbTestCase): - def test_global_debug(self): - self.dbapi.enable_global_debug() - enabled, reason = self.dbapi.global_debug() - self.assertTrue(enabled) - self.assertIsNone(None) - - self.dbapi.disable_global_debug() - enabled, reason = self.dbapi.global_debug() - self.assertFalse(enabled) - self.assertIsNone(reason) - - def test_global_debug_with_reason(self): - self.dbapi.enable_global_debug(reason='foo') - enabled, reason = self.dbapi.global_debug() - self.assertTrue(enabled) - self.assertEqual('foo', reason) - - self.dbapi.disable_global_debug() - enabled, reason = self.dbapi.global_debug() - self.assertFalse(enabled) - self.assertIsNone(reason) - - def test_resource_debug(self): - r_id = uuid.uuid4().hex - self.dbapi.enable_resource_debug( - resource_uuid=r_id) - enabled, reason = self.dbapi.resource_in_debug( - resource_uuid=r_id) - self.assertTrue(enabled) - self.assertIsNone(reason) - self.dbapi.resource_in_debug('foo_resource') - - def test_resource_debug_with_reason(self): - r_id = uuid.uuid4().hex - self.dbapi.enable_resource_debug( - resource_uuid=r_id, reason='foo') - enabled, reason = self.dbapi.resource_in_debug( - resource_uuid=r_id) - self.assertTrue(enabled) - self.assertEqual('foo', reason) - - def test_resources_in_debug(self): - r_ids = [uuid.uuid4().hex for i in range(1, 3)] - for r_id in r_ids: - self.dbapi.enable_resource_debug( - resource_uuid=r_id, reason='resource %s is broken' % r_id) - for debug_r_id, reason in self.dbapi.resources_in_debug(): - self.assertIn(debug_r_id, r_ids) - self.assertEqual('resource %s is broken' % debug_r_id, reason) - - def test_tenant_debug(self): - t_id = uuid.uuid4().hex - self.dbapi.enable_tenant_debug( - tenant_uuid=t_id) - enabled, reason = self.dbapi.tenant_in_debug( - tenant_uuid=t_id) - self.assertTrue(enabled) - self.assertIsNone(reason) - self.dbapi.tenant_in_debug('foo_tenant') - - def test_tenant_debug_with_reason(self): - t_id = uuid.uuid4().hex - self.dbapi.enable_tenant_debug( - tenant_uuid=t_id, reason='foo') - enabled, reason = self.dbapi.tenant_in_debug( - tenant_uuid=t_id) - self.assertTrue(enabled) - self.assertEqual('foo', reason) - - def test_tenants_in_debug(self): - t_ids = [uuid.uuid4().hex for i in range(1, 3)] - for t_id in t_ids: - self.dbapi.enable_tenant_debug( - tenant_uuid=t_id, reason='tenant %s is broken' % t_id) - for debug_t_id, reason in self.dbapi.tenants_in_debug(): - self.assertIn(debug_t_id, t_ids) - self.assertEqual('tenant %s is broken' % debug_t_id, reason) diff --git a/astara/test/unit/drivers/__init__.py b/astara/test/unit/drivers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/astara/test/unit/drivers/test_factory.py b/astara/test/unit/drivers/test_factory.py deleted file mode 100644 index 62dfc463..00000000 --- a/astara/test/unit/drivers/test_factory.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (c) 2015 Akanda, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from astara.test.unit import base - -from astara import drivers - - -class DriverFactoryTest(base.RugTestBase): - def test_get_driver(self): - for k, v in drivers.AVAILABLE_DRIVERS.items(): - self.assertEqual(v, drivers.get(k)) - - def test_get_bad_driver(self): - self.assertRaises( - drivers.InvalidDriverException, - drivers.get, 'foodriver' - ) - - def test_enabled_drivers(self): - all_driver_cfg = drivers.AVAILABLE_DRIVERS.keys() - all_driver_obj = drivers.AVAILABLE_DRIVERS.values() - self.config(enabled_drivers=all_driver_cfg) - enabled_drivers = [d for d in drivers.enabled_drivers()] - self.assertEqual(set(enabled_drivers), set(all_driver_obj)) - - def test_enabled_drivers_nonexistent_left_out(self): - all_driver_cfg = drivers.AVAILABLE_DRIVERS.keys() + ['foodriver'] - all_driver_obj = drivers.AVAILABLE_DRIVERS.values() - self.config(enabled_drivers=all_driver_cfg) - enabled_drivers = [d for d in drivers.enabled_drivers()] - self.assertEqual(set(enabled_drivers), set(all_driver_obj)) - - @mock.patch('astara.drivers.get') - def test_load_from_byonf(self, fake_get): - fake_driver_obj = mock.Mock( - name='fake_driver_obj', - image_uuid='configured_image_uuid') - fake_driver = mock.Mock( - return_value=fake_driver_obj) - fake_get.return_value = fake_driver - byonf = { - 'driver': 'custom_driver', - 'image_uuid': 'custom_image_uuid', - } - ctx = mock.Mock() - res = drivers.load_from_byonf(ctx, byonf, 'fake_resource_id') - self.assertEqual(fake_driver_obj, res) - self.assertEqual('custom_image_uuid', res.image_uuid) - fake_driver.assert_called_with(ctx, 'fake_resource_id') diff --git a/astara/test/unit/drivers/test_loadbalancer.py b/astara/test/unit/drivers/test_loadbalancer.py deleted file mode 100644 index 88167535..00000000 --- a/astara/test/unit/drivers/test_loadbalancer.py +++ /dev/null @@ -1,377 +0,0 @@ -# Copyright (c) 2015 Akanda, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import six - -from neutronclient.common import exceptions as neutron_exceptions - -from astara import event -from astara.api import neutron -from astara.drivers import loadbalancer, states - -from astara.test.unit import base, fakes - - -class LoadBalancerDriverTest(base.RugTestBase): - def setUp(self): - super(LoadBalancerDriverTest, self).setUp() - - self.loadbalancer_id = 'fake_loadbalancer_id' - self.image_uuid = 'fake_loadbalancer_image_uuid' - self.flavor = 'fake_loadbalancer_flavor' - self.mgt_port = '5555' - self.ctx = mock.Mock() - - self.config(group='loadbalancer', image_uuid=self.image_uuid) - self.config(group='loadbalancer', instance_flavor=self.flavor) - self.config(group='loadbalancer', mgt_service_port=self.mgt_port) - - self.ctx = fakes.fake_worker_context() - self.addCleanup(mock.patch.stopall) - - def _init_driver(self): - return loadbalancer.LoadBalancer( - worker_context=self.ctx, - id=self.loadbalancer_id, - ) - - @mock.patch('astara.drivers.loadbalancer.LoadBalancer.post_init') - def test_init(self, mock_post_init): - lb = self._init_driver() - lb.post_init = mock.Mock() - self.assertEqual( - 'ak-%s-%s' % (lb.RESOURCE_NAME, self.loadbalancer_id), - lb.name) - mock_post_init.assert_called_with(self.ctx) - - def test_ports_no_loadbalancer(self): - lb = self._init_driver() - self.assertEqual([], lb.ports) - - def test_ports_with_loadbalancer(self): - lb = self._init_driver() - fake_lb = fakes.fake_loadbalancer() - lb._loadbalancer = fake_lb - self.assertEqual(set(fake_lb.ports), set(lb.ports)) - - def test_pre_boot(self): - lb = self._init_driver() - lb.pre_boot(self.ctx) - - def test_post_boot(self): - lb = self._init_driver() - lb.post_boot(self.ctx) - - def test_pre_plug(self): - lb = self._init_driver() - lb.pre_plug(self.ctx) - - @mock.patch('astara.api.config.loadbalancer.build_config') - @mock.patch('astara.drivers.loadbalancer.LoadBalancer._ensure_cache') - def test_build_config(self, mock_ensure_cache, mock_build_config): - lb = self._init_driver() - fake_lb = fakes.fake_loadbalancer() - fake_mgt_port = mock.Mock() - fake_iface_map = mock.Mock() - lb._loadbalancer = fake_lb - mock_build_config.return_value = 'fake_config' - res = lb.build_config(self.ctx, fake_mgt_port, fake_iface_map) - self.assertTrue(mock_ensure_cache.called) - mock_build_config.return_value = 'fake_config' - mock_build_config.assert_called_with( - self.ctx.neutron, lb._loadbalancer, fake_mgt_port, fake_iface_map) - self.assertEqual('fake_config', res) - - @mock.patch('astara.api.astara_client.update_config') - def test_update_config(self, mock_update_config): - lb = self._init_driver() - lb.update_config(management_address='10.0.0.1', config='fake_config') - mock_update_config.assert_called_with( - '10.0.0.1', - lb.mgt_port, - 'fake_config',) - - @mock.patch('astara.drivers.loadbalancer.LoadBalancer._ensure_cache') - def test_make_ports(self, mock_ensure_cache): - lb = self._init_driver() - fake_lb = fakes.fake_loadbalancer() - lb._loadbalancer = fake_lb - fake_lb_port = mock.Mock(id='fake_lb_port_id') - - self.ctx.neutron.create_management_port.return_value = 'fake_mgt_port' - self.ctx.neutron.create_vrrp_port.return_value = fake_lb_port - callback = lb.make_ports(self.ctx) - res = callback() - self.assertEqual(('fake_mgt_port', [fake_lb_port]), res) - - @mock.patch('astara.api.neutron.Neutron') - def test_pre_populate_retry_loop(self, mocked_neutron_api): - neutron_client = mock.Mock() - returned_value = [Exception, []] - neutron_client.get_loadbalancers.side_effect = returned_value - - mocked_neutron_api.return_value = neutron_client - lb = self._init_driver() - with mock.patch('time.sleep'): - lb.pre_populate_hook() - self.assertEqual( - [ - mock.call() - for value in six.moves.range(len(returned_value)) - ], - neutron_client.get_loadbalancers.call_args_list - ) - self.assertEqual( - len(returned_value), - neutron_client.get_loadbalancers.call_count - ) - - def _exit_loop_bad_auth(self, mocked_neutron_api, log, exc): - neutron_client = mock.Mock() - neutron_client.get_loadbalancers.side_effect = exc - mocked_neutron_api.return_value = neutron_client - lb = self._init_driver() - lb.pre_populate_hook() - log.warning.assert_called_once_with( - 'PrePopulateWorkers thread failed: %s', - mock.ANY - ) - - @mock.patch('astara.drivers.loadbalancer.LOG') - @mock.patch('astara.api.neutron.Neutron') - def test_pre_populate_unauthorized(self, mocked_neutron_api, log): - exc = neutron_exceptions.Unauthorized - self._exit_loop_bad_auth(mocked_neutron_api, log, exc) - - @mock.patch('astara.drivers.loadbalancer.LOG') - @mock.patch('astara.api.neutron.Neutron') - def test_pre_populate_forbidden(self, mocked_neutron_api, log): - exc = neutron_exceptions.Forbidden - self._exit_loop_bad_auth(mocked_neutron_api, log, exc) - - @mock.patch('astara.drivers.loadbalancer.LOG.warning') - @mock.patch('astara.drivers.loadbalancer.LOG.debug') - @mock.patch('astara.api.neutron.Neutron') - def test_pre_populate_retry_loop_logging( - self, mocked_neutron_api, log_debug, log_warning): - neutron_client = mock.Mock() - message = mock.Mock(tenant_id='1', id='2') - returned_value = [ - neutron_exceptions.NeutronClientException, - [message] - ] - neutron_client.get_loadbalancers.side_effect = returned_value - - mocked_neutron_api.return_value = neutron_client - - lb = self._init_driver() - with mock.patch('time.sleep'): - res = lb.pre_populate_hook() - self.assertEqual(2, log_warning.call_count) - - expected_resource = event.Resource( - driver=lb.RESOURCE_NAME, - id='2', - tenant_id='1', - ) - self.assertEqual([expected_resource], res) - - def test_get_resource_id_loadbalancer_msg(self): - msg = mock.Mock( - body={'loadbalancer': {'id': 'lb_id'}} - ) - lb = self._init_driver() - self.assertEqual( - 'lb_id', - lb.get_resource_id_for_tenant(self.ctx, 'foo_tenant', msg) - ) - - def test_get_resource_id_listener_msg(self): - msg = mock.Mock( - body={'listener': {'loadbalancer_id': 'lb_id'}} - ) - lb = self._init_driver() - self.assertEqual( - 'lb_id', - lb.get_resource_id_for_tenant(self.ctx, 'foo_tenant', msg) - ) - - def test_get_resource_id_pool_msg(self): - msg = mock.Mock( - body={'pool': {'listener_id': 'fake_listener_id'}} - ) - fake_lb = fakes.fake_loadbalancer() - self.ctx.neutron.get_loadbalancer_by_listener.return_value = fake_lb - lb = self._init_driver() - self.assertEqual( - fake_lb.id, - lb.get_resource_id_for_tenant(self.ctx, 'foo_tenant', msg) - ) - self.ctx.neutron.get_loadbalancer_by_listener.assert_called_with( - 'fake_listener_id', 'foo_tenant' - ) - - def test_get_resource_id_member_msg(self): - msg = mock.Mock( - body={'member': {'id': 'fake_member_id'}} - ) - fake_lb = fakes.fake_loadbalancer() - self.ctx.neutron.get_loadbalancer_by_member.return_value = fake_lb - lb = self._init_driver() - self.assertEqual( - fake_lb.id, - lb.get_resource_id_for_tenant(self.ctx, 'foo_tenant', msg) - ) - self.ctx.neutron.get_loadbalancer_by_member.assert_called_with( - 'fake_member_id', 'foo_tenant' - ) - - def _test_notification(self, event_type, payload, expected): - tenant_id = 'fake_tenant_id' - res = loadbalancer.LoadBalancer.process_notification( - tenant_id, event_type, payload) - self.assertEqual(expected, res) - - def test_process_notification_loadbalancerstatus(self): - self._test_notification('loadbalancerstatus.update', {}, None) - - def test_process_notification_lb_create(self): - payload = {'loadbalancer': {'id': 'fake_lb_id'}} - r = event.Resource( - driver=loadbalancer.LoadBalancer.RESOURCE_NAME, - id='fake_lb_id', - tenant_id='fake_tenant_id') - e = event.Event( - resource=r, - crud=event.CREATE, - body=payload, - ) - self._test_notification('loadbalancer.create.end', payload, e) - - def test_process_notification_lb_delete(self): - payload = {'loadbalancer': {'id': 'fake_lb_id'}} - r = event.Resource( - driver=loadbalancer.LoadBalancer.RESOURCE_NAME, - id='fake_lb_id', - tenant_id='fake_tenant_id') - e = event.Event( - resource=r, - crud=event.DELETE, - body=payload, - ) - self._test_notification('loadbalancer.delete.end', payload, e) - - def test_process_notification_lb_update(self): - payload_formats = [ - {'loadbalancer': {'id': 'fake_lb_id'}}, - {'loadbalancer_id': 'fake_lb_id'}, - {'listener': {'loadbalancer_id': 'fake_lb_id'}}, - ] - update_notifications = [ - 'listener.create.start', - 'pool.create.start', - 'member.create.end', - 'member.delete.end', - ] - for notification in update_notifications: - for payload in payload_formats: - r = event.Resource( - driver=loadbalancer.LoadBalancer.RESOURCE_NAME, - id='fake_lb_id', - tenant_id='fake_tenant_id') - e = event.Event( - resource=r, - crud=event.UPDATE, - body=payload, - ) - self._test_notification(notification, payload, e) - - def test_process_notification_not_subscribed(self): - self._test_notification('whocares.about.this', {}, None) - - @mock.patch('astara.drivers.loadbalancer.LoadBalancer._ensure_cache') - def test_get_state_no_lb(self, mock_ensure_cache): - lb = self._init_driver() - lb._loadbalancer = None - self.assertEqual( - states.GONE, - lb.get_state(self.ctx) - ) - mock_ensure_cache.assert_called_with(self.ctx) - - @mock.patch('astara.drivers.loadbalancer.LoadBalancer._ensure_cache') - def test_get_state(self, mock_ensure_cache): - lb = self._init_driver() - fake_lb = fakes.fake_loadbalancer() - lb._loadbalancer = fake_lb - self.assertEqual( - fake_lb.status, - lb.get_state(self.ctx) - ) - mock_ensure_cache.assert_called_with(self.ctx) - - @mock.patch('astara.drivers.loadbalancer.LoadBalancer._ensure_cache') - def test_synchronize_state_no_loadbalancer(self, mock_ensure_cache): - lb = self._init_driver() - lb._loadbalancer = None - lb.synchronize_state(self.ctx, states.DOWN) - mock_ensure_cache.assert_called_with(self.ctx) - self.assertFalse(self.ctx.neutron.update_loadbalancer_status.called) - - @mock.patch('astara.drivers.loadbalancer.LoadBalancer._ensure_cache') - def test_synchronize_state(self, mock_ensure_cache): - lb = self._init_driver() - fake_lb = fakes.fake_loadbalancer() - lb._loadbalancer = fake_lb - lb.synchronize_state(self.ctx, states.CONFIGURED) - mock_ensure_cache.assert_called_with(self.ctx) - self.ctx.neutron.update_loadbalancer_status.assert_called_with( - lb.id, - 'ACTIVE', - ) - self.assertEqual('ACTIVE', lb._last_synced_status) - - @mock.patch('astara.api.astara_client.get_interfaces') - def test_get_interfaces(self, mock_get_interfaces): - mock_get_interfaces.return_value = ['fake_interface'] - lb = self._init_driver() - self.assertEqual( - ['fake_interface'], lb.get_interfaces('fake_mgt_addr')) - mock_get_interfaces.assert_called_with( - 'fake_mgt_addr', self.mgt_port) - - @mock.patch('astara.api.astara_client.is_alive') - def test_is_alive(self, mock_is_alive): - mock_is_alive.return_value = False - lb = self._init_driver() - self.assertFalse(lb.is_alive('fake_mgt_addr')) - mock_is_alive.assert_called_with( - 'fake_mgt_addr', self.mgt_port) - - def test__ensure_cache(self): - lb = self._init_driver() - self.ctx.neutron.get_loadbalancer_detail.return_value = 'fake_lb' - lb._ensure_cache(self.ctx) - self.assertEqual('fake_lb', lb._loadbalancer) - self.ctx.neutron.get_loadbalancer_detail.assert_called_with(lb.id) - - def test__ensure_cache_not_found(self): - lb = self._init_driver() - self.ctx.neutron.get_loadbalancer_detail.side_effect = [ - neutron.LoadBalancerGone - ] - lb._ensure_cache(self.ctx) - self.assertIsNone(lb._loadbalancer) - self.ctx.neutron.get_loadbalancer_detail.assert_called_with(lb.id) diff --git a/astara/test/unit/drivers/test_router.py b/astara/test/unit/drivers/test_router.py deleted file mode 100644 index 45ee550e..00000000 --- a/astara/test/unit/drivers/test_router.py +++ /dev/null @@ -1,404 +0,0 @@ -# Copyright (c) 2015 Akanda, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import six - -from neutronclient.common import exceptions as neutron_exceptions - -from astara import event -from astara.api import neutron -from astara.drivers import router, states - -from astara.test.unit import base, fakes - - -class RouterDriverTest(base.RugTestBase): - def setUp(self): - super(RouterDriverTest, self).setUp() - - self.router_id = 'fake_router_id' - self.image_uuid = 'fake_router_image_uuid' - self.flavor = 'fake_router_flavor' - self.mgt_port = '5555' - self.ctx = mock.Mock() - - self.config(group='router', image_uuid=self.image_uuid) - self.config(group='router', instance_flavor=self.flavor) - self.config(group='router', mgt_service_port=self.mgt_port) - - self.ctx = fakes.fake_worker_context() - self.addCleanup(mock.patch.stopall) - - def _init_driver(self): - return router.Router( - worker_context=self.ctx, - id=self.router_id, - ) - - @mock.patch('astara.drivers.router.Router.post_init') - def test_init(self, mock_post_init): - rtr = self._init_driver() - rtr.post_init = mock.Mock() - self.assertEqual( - 'ak-%s-%s' % (rtr.RESOURCE_NAME, self.router_id), - rtr.name) - mock_post_init.assert_called_with(self.ctx) - - @mock.patch('astara.drivers.router.Router._ensure_cache') - def test_post_init(self, mock_ensure_cache): - rtr = self._init_driver() - rtr.post_init(self.ctx) - self.assertEqual(self.image_uuid, rtr.image_uuid) - self.assertEqual(self.flavor, rtr.flavor) - self.assertEqual(self.mgt_port, rtr.mgt_port) - mock_ensure_cache.assert_called_with(self.ctx) - - def test__ensure_cache_no_router(self): - self.ctx.neutron.get_router_detail.return_value = None - rtr = self._init_driver() - self.assertIsNone(rtr._router) - - def test__ensure_cache_with_router(self): - rtr = self._init_driver() - self.ctx.neutron.get_router_detail.return_value = 'fake_router' - rtr._ensure_cache(self.ctx) - self.assertEqual('fake_router', rtr._router) - - def test_ports_no_router(self): - rtr = self._init_driver() - self.assertEqual([], rtr.ports) - - def test_ports_with_router(self): - rtr = self._init_driver() - fake_router_obj = fakes.fake_router() - rtr._router = fake_router_obj - self.assertEqual(set(fake_router_obj.ports), set(rtr.ports)) - - @mock.patch('astara.drivers.router.Router.pre_plug') - def test_pre_boot(self, mock_pre_plug): - rtr = self._init_driver() - rtr.pre_boot(self.ctx) - mock_pre_plug.assert_called_with(self.ctx) - - @mock.patch('astara.api.config.router.build_config') - @mock.patch('astara.drivers.router.Router._ensure_cache') - def test_build_config(self, mock_ensure_cache, mock_build_config): - rtr = self._init_driver() - fake_router_obj = fakes.fake_router() - fake_mgt_port = mock.Mock() - fake_iface_map = mock.Mock() - rtr._router = fake_router_obj - mock_build_config.return_value = 'fake_config' - res = rtr.build_config(self.ctx, fake_mgt_port, fake_iface_map) - self.assertTrue(mock_ensure_cache.called) - mock_build_config.return_value = 'fake_config' - mock_build_config.assert_called_with( - self.ctx, rtr._router, fake_mgt_port, fake_iface_map) - self.assertEqual('fake_config', res) - - @mock.patch('astara.api.astara_client.update_config') - def test_update_config(self, mock_update_config): - rtr = self._init_driver() - rtr.update_config(management_address='10.0.0.1', config='fake_config') - mock_update_config.assert_called_with( - '10.0.0.1', - rtr.mgt_port, - 'fake_config',) - - @mock.patch('astara.drivers.router.Router._ensure_cache') - def test_make_ports(self, mock_ensure_cache): - rtr = self._init_driver() - fake_router_obj = fakes.fake_router() - rtr._router = fake_router_obj - self.ctx.neutron.create_management_port.return_value = 'fake_mgt_port' - self.ctx.neutron.create_vrrp_port.side_effect = [ - 'fake_port_%s' % p.network_id for p in fake_router_obj.ports - ] - callback = rtr.make_ports(self.ctx) - res = callback() - expected_instance_ports = [ - 'fake_port_%s' % p.network_id for p in fake_router_obj.ports - ] - self.assertEqual(('fake_mgt_port', expected_instance_ports), res) - - def test_delete_ports(self): - rtr = self._init_driver() - fake_router_obj = fakes.fake_router() - rtr._router = fake_router_obj - rtr.delete_ports(self.ctx) - expected_ports = [mock.call(rtr.id), - mock.call(rtr.id, label='MGT')] - self.ctx.neutron.delete_vrrp_port.assert_has_calls(expected_ports) - - @mock.patch('astara.api.neutron.Neutron') - def test_pre_populate_retry_loop(self, mocked_neutron_api): - neutron_client = mock.Mock() - returned_value = [Exception, []] - neutron_client.get_routers.side_effect = returned_value - - mocked_neutron_api.return_value = neutron_client - rtr = self._init_driver() - with mock.patch('time.sleep'): - rtr.pre_populate_hook() - self.assertEqual( - [ - mock.call(detailed=False) - for value in six.moves.range(len(returned_value)) - ], - neutron_client.get_routers.call_args_list - ) - self.assertEqual( - len(returned_value), - neutron_client.get_routers.call_count - ) - - def _exit_loop_bad_auth(self, mocked_neutron_api, log, exc): - neutron_client = mock.Mock() - neutron_client.get_routers.side_effect = exc - mocked_neutron_api.return_value = neutron_client - rtr = self._init_driver() - rtr.pre_populate_hook() - log.warning.assert_called_once_with( - 'PrePopulateWorkers thread failed: %s', - mock.ANY - ) - - @mock.patch('astara.drivers.router.LOG') - @mock.patch('astara.api.neutron.Neutron') - def test_pre_populate_unauthorized(self, mocked_neutron_api, log): - exc = neutron_exceptions.Unauthorized - self._exit_loop_bad_auth(mocked_neutron_api, log, exc) - - @mock.patch('astara.drivers.router.LOG') - @mock.patch('astara.api.neutron.Neutron') - def test_pre_populate_forbidden(self, mocked_neutron_api, log): - exc = neutron_exceptions.Forbidden - self._exit_loop_bad_auth(mocked_neutron_api, log, exc) - - @mock.patch('astara.drivers.router.LOG.warning') - @mock.patch('astara.drivers.router.LOG.debug') - @mock.patch('astara.api.neutron.Neutron') - def test_pre_populate_retry_loop_logging( - self, mocked_neutron_api, log_debug, log_warning): - neutron_client = mock.Mock() - message = mock.Mock(tenant_id='1', id='2') - returned_value = [ - neutron_exceptions.NeutronClientException, - [message] - ] - neutron_client.get_routers.side_effect = returned_value - - mocked_neutron_api.return_value = neutron_client - - rtr = self._init_driver() - with mock.patch('time.sleep'): - res = rtr.pre_populate_hook() - self.assertEqual(2, log_warning.call_count) - - expected_resource = event.Resource( - driver=rtr.RESOURCE_NAME, - id='2', - tenant_id='1', - ) - self.assertEqual([expected_resource], res) - - def test_get_resource_id_for_tenant(self): - fake_router = fakes.fake_router() - self.ctx.neutron.get_router_for_tenant.return_value = fake_router - res = router.Router.get_resource_id_for_tenant( - self.ctx, 'fake_tenant_id', 'fake_message') - self.assertEqual(fake_router.id, res) - self.ctx.neutron.get_router_for_tenant.assert_called_with( - 'fake_tenant_id') - - def test_get_resource_id_for_tenant_no_router(self): - self.ctx.neutron.get_router_for_tenant.return_value = None - res = router.Router.get_resource_id_for_tenant( - self.ctx, 'fake_tenant_id', 'fake_message') - self.assertIsNone(res) - self.ctx.neutron.get_router_for_tenant.assert_called_with( - 'fake_tenant_id') - - def _test_notification(self, event_type, payload, expected): - tenant_id = 'fake_tenant_id' - res = router.Router.process_notification( - tenant_id, event_type, payload) - self.assertEqual(expected, res) - - def test_process_notifications_floatingips(self): - payload = {'router': {'id': 'fake_router_id'}} - r = event.Resource( - driver=router.Router.RESOURCE_NAME, - id='fake_router_id', - tenant_id='fake_tenant_id') - e = event.Event( - resource=r, - crud=event.UPDATE, - body=payload, - ) - - events = [ - 'floatingip.create.end', - 'floatingip.update.end', - 'floatingip.change.end', - 'floatingip.delete.end'] - [self._test_notification(fipe, payload, e) for fipe in events] - - def test_process_notification_routerstatus(self): - self._test_notification('routerstatus.update', {}, None) - - def test_process_notification_router_create(self): - payload = {'router': {'id': 'fake_router_id'}} - r = event.Resource( - driver=router.Router.RESOURCE_NAME, - id='fake_router_id', - tenant_id='fake_tenant_id') - e = event.Event( - resource=r, - crud=event.CREATE, - body=payload, - ) - self._test_notification('router.create.end', payload, e) - - def test_process_notification_router_delete(self): - payload = {'router_id': 'fake_router_id'} - r = event.Resource( - driver=router.Router.RESOURCE_NAME, - id='fake_router_id', - tenant_id='fake_tenant_id') - e = event.Event( - resource=r, - crud=event.DELETE, - body=payload, - ) - self._test_notification('router.delete.end', payload, e) - - def test_process_notification_interface_notifications(self): - for notification in router._ROUTER_INTERFACE_NOTIFICATIONS: - payload = {'router.interface': {'id': 'fake_router_id'}} - r = event.Resource( - driver=router.Router.RESOURCE_NAME, - id='fake_router_id', - tenant_id='fake_tenant_id') - e = event.Event( - resource=r, - crud=event.UPDATE, - body=payload, - ) - self._test_notification(notification, payload, e) - - def test_process_notification_interesting_notifications(self): - for notification in router._ROUTER_INTERESTING_NOTIFICATIONS: - payload = {'router': {'id': 'fake_router_id'}} - r = event.Resource( - driver=router.Router.RESOURCE_NAME, - id='fake_router_id', - tenant_id='fake_tenant_id') - e = event.Event( - resource=r, - crud=event.UPDATE, - body=payload, - ) - self._test_notification(notification, payload, e) - - def test_process_notification_not_subscribed(self): - payload = {'router': {'id': 'fake_router_id'}} - self._test_notification('whocares.about.this', payload, None) - - @mock.patch('astara.drivers.router.Router._ensure_cache') - def test_get_state_no_router(self, mock_ensure_cache): - rtr = self._init_driver() - rtr._router = None - self.assertEqual( - states.GONE, - rtr.get_state(self.ctx) - ) - mock_ensure_cache.assert_called_with(self.ctx) - - @mock.patch('astara.drivers.router.Router._ensure_cache') - def test_get_state(self, mock_ensure_cache): - rtr = self._init_driver() - fake_router = fakes.fake_router() - rtr._router = fake_router - self.assertEqual( - fake_router.status, - rtr.get_state(self.ctx) - ) - mock_ensure_cache.assert_called_with(self.ctx) - - @mock.patch('astara.drivers.router.Router._ensure_cache') - def test_synchronize_state_no_router(self, mock_ensure_cache): - rtr = self._init_driver() - rtr._router = None - rtr.synchronize_state(self.ctx, states.DOWN) - mock_ensure_cache.assert_called_with(self.ctx) - self.assertFalse(self.ctx.neutron.update_router_status.called) - - @mock.patch('astara.drivers.router.Router._ensure_cache') - def test_synchronize_state(self, mock_ensure_cache): - rtr = self._init_driver() - fake_router_obj = fakes.fake_router() - rtr._router = fake_router_obj - rtr.synchronize_state(self.ctx, states.CONFIGURED) - mock_ensure_cache.assert_called_with(self.ctx) - self.ctx.neutron.update_router_status.assert_called_with( - rtr.id, - 'ACTIVE', - ) - self.assertEqual('ACTIVE', rtr._last_synced_status) - - @mock.patch('astara.drivers.router.Router._ensure_cache') - def test_synchronize_state_no_change(self, mock_ensure_cache): - rtr = self._init_driver() - fake_router_obj = fakes.fake_router() - rtr._router = fake_router_obj - rtr._last_synced_status = 'ACTIVE' - rtr.synchronize_state(self.ctx, states.CONFIGURED) - mock_ensure_cache.assert_called_with(self.ctx) - self.assertFalse(self.ctx.neutron.update_router_status.called) - - @mock.patch('astara.api.astara_client.get_interfaces') - def test_get_interfaces(self, mock_get_interfaces): - mock_get_interfaces.return_value = ['fake_interface'] - rtr = self._init_driver() - self.assertEqual( - ['fake_interface'], rtr.get_interfaces('fake_mgt_addr')) - mock_get_interfaces.assert_called_with( - 'fake_mgt_addr', self.mgt_port) - - @mock.patch('astara.api.astara_client.is_alive') - def test_is_alive(self, mock_is_alive): - mock_is_alive.return_value = False - rtr = self._init_driver() - self.assertFalse(rtr.is_alive('fake_mgt_addr')) - mock_is_alive.assert_called_with( - 'fake_mgt_addr', self.mgt_port) - - def test_post_boot(self): - self._init_driver().post_boot(self.ctx) - - def test__ensure_cache(self): - rtr = self._init_driver() - self.ctx.neutron.get_router_detail.return_value = 'fake_router' - rtr._ensure_cache(self.ctx) - self.assertEqual('fake_router', rtr._router) - self.ctx.neutron.get_router_detail.assert_called_with(rtr.id) - - def test__ensure_cache_not_found(self): - rtr = self._init_driver() - self.ctx.neutron.get_router_detail.side_effect = [neutron.RouterGone] - rtr._ensure_cache(self.ctx) - self.assertIsNone(rtr._router) - self.ctx.neutron.get_router_detail.assert_called_with(rtr.id) diff --git a/astara/test/unit/fakes.py b/astara/test/unit/fakes.py deleted file mode 100644 index e99c8295..00000000 --- a/astara/test/unit/fakes.py +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright (c) 2015 Akanda, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from astara.drivers import base -from astara.api import neutron, nova -from astara import worker - -FAKE_MGT_ADDR = '10.10.1.13' - - -def fake_loadbalancer(): - lb_dict = { - 'name': u'balancer1', - 'status': u'ACTIVE', - 'tenant_id': u'd22b149cee9b4eac8349c517eda00b89', - 'vip_address': u'192.168.0.132', - 'provisioning_status': 'ACTIVE', - 'admin_state_up': True, - 'id': u'66636dbe-86f3-48e3-843f-13b05f93dd84', - 'listeners': [ - { - 'id': u'e3491d85-4d41-4c2d-99ed-e2410343b163', - 'name': u'listener1', - 'protocol': u'HTTP', - 'protocol_port': 80, - 'tenant_id': u'd22b149cee9b4eac8349c517eda00b89', - 'admin_state_up': True, - 'default_pool': { - 'name': u'pool1', - 'protocol': u'HTTP', - 'session_persistence': None, - 'tenant_id': u'd22b149cee9b4eac8349c517eda00b89', - 'admin_state_up': True, - 'healthmonitor': None, - 'id': u'ad75ea75-43e1-4f4a-9053-c66dd7235ff1', - 'lb_algorithm': u'ROUND_ROBIN', - 'members': [{ - 'address': '192.168.0.194', - 'admin_state_up': True, - 'id': u'ae70e3cd-41c9-4253-ade6-e555693d38bb', - 'protocol_port': 80, - 'subnet': None, - 'tenant_id': u'd22b149cee9b4eac8349c517eda00b89', - 'weight': 1}]}}], - 'vip_port': { - 'id': u'a3f398c5-a02a-4daa-8c9f-810b5a85ecdf', - 'mac_address': u'fa:16:3e:ff:32:7c', - 'name': u'loadbalancer-66636dbe-86f3-48e3-843f-13b05f93dd84', - 'network_id': u'b7fc9b39-401c-47cc-a07d-9f8cde75ccbf', - 'device_id': u'66636dbe-86f3-48e3-843f-13b05f93dd84', - 'device_owner': u'neutron:LOADBALANCERV2', - 'fixed_ips': [ - {'ip_address': '192.168.0.132', - 'subnet_id': u'8c58b558-be54-45de-9873-169fe845bb80'}, - {'ip_address': 'fdd6:a1fa:cfa8:6af6:f816:3eff:feff:327c', - 'subnet_id': u'89fe7a9d-be92-469c-9a1e-503a39462ed1'}]} - } - return neutron.LoadBalancer.from_dict(lb_dict) - - -def fake_port(): - port_dict = { - u'admin_state_up': True, - u'allowed_address_pairs': [], - u'binding:host_id': u'trusty', - u'binding:profile': {}, - u'binding:vif_details': { - u'ovs_hybrid_plug': True, u'port_filter': True - }, - u'binding:vif_type': u'ovs', - u'binding:vnic_type': u'normal', - u'device_id': u'fake_device_id', - u'device_owner': u'network:astara', - u'dns_assignment': [{ - u'fqdn': u'foo.openstacklocal.', - u'hostname': u'host-fdca-3ba5-a17a-acda-f816-3eff-fe2b-ced0', - u'ip_address': u'fdca:3ba5:a17a:acda:f816:3eff:fe2b:ced0' - }], - u'dns_name': u'', - u'extra_dhcp_opts': [], - u'fixed_ips': [{ - u'ip_address': u'fdca:3ba5:a17a:acda:f816:3eff:fe2b:ced0', - u'subnet_id': 'fake_subnet_id', - }], - u'id': u'fake_port_id', - u'mac_address': u'fa:16:3e:2b:ce:d0', - u'name': u'ASTARA:RUG:SERVICE', - u'network_id': u'fake_network_id', - u'port_security_enabled': False, - u'security_groups': [], - u'status': u'ACTIVE', - u'tenant_id': u'fake_tenant_id' - } - return neutron.Port.from_dict(port_dict) - - -def fake_router(): - router_gateway_port = { - 'id': 'ext', - 'name': 'router_gateway_port', - 'device_id': 'device_id', - 'fixed_ips': [], - 'mac_address': 'aa:bb:cc:dd:ee:ff', - 'network_id': 'net_id', - 'device_owner': 'network:router_gateway' - } - router_internal_port = { - 'id': 'ext', - 'name': 'router_internal_port', - 'device_id': 'device_id', - 'fixed_ips': [], - 'mac_address': 'aa:bb:cc:dd:ee:ff', - 'network_id': 'net_id', - 'device_owner': 'network:router_interface' - } - - router_fip = { - 'id': 'fip', - 'floating_ip_address': '9.9.9.9', - 'fixed_ip_address': '192.168.1.1' - } - - router_dict = { - 'id': '1', - 'tenant_id': 'tenant_id', - 'name': 'name', - 'admin_state_up': True, - 'status': 'ACTIVE', - 'gw_port': router_gateway_port, - '_interfaces': [router_internal_port], - '_floatingips': [router_fip] - } - return neutron.Router.from_dict(router_dict) - - -def fake_driver(resource_id=None): - """A factory for generating fake driver instances suitable for testing""" - fake_driver = mock.Mock(base.BaseDriver, autospec=True) - fake_driver.RESOURCE_NAME = 'FakeDriver' - fake_driver.id = resource_id or 'fake_resource_id' - fake_driver.log = mock.Mock() - fake_driver.flavor = 'fake_flavor' - fake_driver.name = 'ak-FakeDriver-fake_resource_id' - fake_driver.image_uuid = 'fake_image_uuid' - fake_driver.make_ports.return_value = 'fake_ports_callback' - fake_driver.delete_ports.return_value = 'fake_delete_ports_callback' - fake_driver.is_ha = True - return fake_driver - - -def fake_worker_context(): - """Patches client API libs in the worker context. - Caller should addCleanup(mock.patch.stopall). - """ - fake_neutron_obj = mock.patch.object( - neutron, 'Neutron', autospec=True).start() - mock.patch.object( - neutron, 'Neutron', return_value=fake_neutron_obj).start() - fake_nova_obj = mock.patch.object( - nova, 'Nova', autospec=True).start() - mock.patch.object( - nova, 'Nova', return_value=fake_nova_obj).start() - return worker.WorkerContext(FAKE_MGT_ADDR) diff --git a/astara/test/unit/openvswitch/__init__.py b/astara/test/unit/openvswitch/__init__.py deleted file mode 100644 index 63621fff..00000000 --- a/astara/test/unit/openvswitch/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/astara/test/unit/openvswitch/test_ovs_lib.py b/astara/test/unit/openvswitch/test_ovs_lib.py deleted file mode 100644 index c3c960ce..00000000 --- a/astara/test/unit/openvswitch/test_ovs_lib.py +++ /dev/null @@ -1,369 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2012, Nicira, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# @author: Dan Wendlandt, Nicira, Inc. - -import unittest -import uuid -import mox - -from astara.common.linux import ovs_lib, utils - - -def generate_uuid(): - return str(uuid.uuid4()) - - -class OVS_Lib_Test(unittest.TestCase): - """ - A test suite to excercise the OVS libraries shared by Neutron agents. - Note: these tests do not actually execute ovs-* utilities, and thus - can run on any system. That does, however, limit their scope. - """ - - def setUp(self): - super(OVS_Lib_Test, self).setUp() - self.BR_NAME = "br-int" - self.TO = "--timeout=2" - - self.mox = mox.Mox() - self.root_helper = 'sudo' - self.br = ovs_lib.OVSBridge(self.BR_NAME, self.root_helper) - self.mox.StubOutWithMock(utils, "execute") - self.addCleanup(self.mox.UnsetStubs) - - def test_vifport(self): - """create and stringify vif port, confirm no exceptions""" - self.mox.ReplayAll() - - pname = "vif1.0" - ofport = 5 - vif_id = generate_uuid() - mac = "ca:fe:de:ad:be:ef" - - # test __init__ - port = ovs_lib.VifPort(pname, ofport, vif_id, mac, self.br) - self.assertEqual(pname, port.port_name) - self.assertEqual(ofport, port.ofport) - self.assertEqual(vif_id, port.vif_id) - self.assertEqual(mac, port.vif_mac) - self.assertEqual(self.BR_NAME, port.switch.br_name) - - # test __str__ - foo = str(port) - self.assert_(foo) - - self.mox.VerifyAll() - - def test_reset_bridge(self): - utils.execute(["ovs-vsctl", self.TO, "--", - "--if-exists", "del-br", self.BR_NAME], - root_helper=self.root_helper) - utils.execute(["ovs-vsctl", self.TO, "add-br", self.BR_NAME], - root_helper=self.root_helper) - self.mox.ReplayAll() - - self.br.reset_bridge() - self.mox.VerifyAll() - - def test_delete_port(self): - pname = "tap5" - utils.execute(["ovs-vsctl", self.TO, "--", "--if-exists", - "del-port", self.BR_NAME, pname], - root_helper=self.root_helper) - - self.mox.ReplayAll() - self.br.delete_port(pname) - self.mox.VerifyAll() - - def test_add_flow(self): - ofport = "99" - vid = 4000 - lsw_id = 18 - utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME, - "hard_timeout=0,idle_timeout=0," - "priority=2,dl_src=ca:fe:de:ad:be:ef" - ",actions=strip_vlan,output:0"], - root_helper=self.root_helper) - utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME, - "hard_timeout=0,idle_timeout=0," - "priority=1,actions=normal"], - root_helper=self.root_helper) - utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME, - "hard_timeout=0,idle_timeout=0," - "priority=2,actions=drop"], - root_helper=self.root_helper) - utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME, - "hard_timeout=0,idle_timeout=0," - "priority=2,in_port=%s,actions=drop" % ofport], - root_helper=self.root_helper) - utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME, - "hard_timeout=0,idle_timeout=0," - "priority=4,in_port=%s,dl_vlan=%s," - "actions=strip_vlan,set_tunnel:%s,normal" - % (ofport, vid, lsw_id)], - root_helper=self.root_helper) - utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME, - "hard_timeout=0,idle_timeout=0," - "priority=3,tun_id=%s,actions=" - "mod_vlan_vid:%s,output:%s" - % (lsw_id, vid, ofport)], root_helper=self.root_helper) - self.mox.ReplayAll() - - self.br.add_flow(priority=2, dl_src="ca:fe:de:ad:be:ef", - actions="strip_vlan,output:0") - self.br.add_flow(priority=1, actions="normal") - self.br.add_flow(priority=2, actions="drop") - self.br.add_flow(priority=2, in_port=ofport, actions="drop") - - self.br.add_flow(priority=4, in_port=ofport, dl_vlan=vid, - actions="strip_vlan,set_tunnel:%s,normal" % - (lsw_id)) - self.br.add_flow(priority=3, tun_id=lsw_id, - actions="mod_vlan_vid:%s,output:%s" % - (vid, ofport)) - self.mox.VerifyAll() - - def test_get_port_ofport(self): - pname = "tap99" - ofport = "6" - utils.execute(["ovs-vsctl", self.TO, "get", - "Interface", pname, "ofport"], - root_helper=self.root_helper).AndReturn(ofport) - self.mox.ReplayAll() - - self.assertEqual(self.br.get_port_ofport(pname), ofport) - self.mox.VerifyAll() - - def test_get_datapath_id(self): - datapath_id = '"0000b67f4fbcc149"' - utils.execute(["ovs-vsctl", self.TO, "get", - "Bridge", self.BR_NAME, "datapath_id"], - root_helper=self.root_helper).AndReturn(datapath_id) - self.mox.ReplayAll() - - self.assertEqual(datapath_id.strip('"'), self.br.get_datapath_id()) - self.mox.VerifyAll() - - def test_count_flows(self): - utils.execute(["ovs-ofctl", "dump-flows", self.BR_NAME], - root_helper=self.root_helper).AndReturn('ignore' - '\nflow-1\n') - self.mox.ReplayAll() - - # counts the number of flows as total lines of output - 2 - self.assertEqual(1, self.br.count_flows()) - self.mox.VerifyAll() - - def test_delete_flow(self): - ofport = "5" - lsw_id = 40 - vid = 39 - utils.execute(["ovs-ofctl", "del-flows", self.BR_NAME, - "in_port=" + ofport], root_helper=self.root_helper) - utils.execute(["ovs-ofctl", "del-flows", self.BR_NAME, - "tun_id=%s" % lsw_id], root_helper=self.root_helper) - utils.execute(["ovs-ofctl", "del-flows", self.BR_NAME, - "dl_vlan=%s" % vid], root_helper=self.root_helper) - self.mox.ReplayAll() - - self.br.delete_flows(in_port=ofport) - self.br.delete_flows(tun_id=lsw_id) - self.br.delete_flows(dl_vlan=vid) - self.mox.VerifyAll() - - def test_add_tunnel_port(self): - pname = "tap99" - ip = "9.9.9.9" - ofport = "6" - - utils.execute(["ovs-vsctl", self.TO, "add-port", - self.BR_NAME, pname], root_helper=self.root_helper) - utils.execute(["ovs-vsctl", self.TO, "set", "Interface", - pname, "type=gre"], root_helper=self.root_helper) - utils.execute(["ovs-vsctl", self.TO, "set", "Interface", - pname, "options:remote_ip=" + ip], - root_helper=self.root_helper) - utils.execute(["ovs-vsctl", self.TO, "set", "Interface", - pname, "options:in_key=flow"], - root_helper=self.root_helper) - utils.execute(["ovs-vsctl", self.TO, "set", "Interface", - pname, "options:out_key=flow"], - root_helper=self.root_helper) - utils.execute(["ovs-vsctl", self.TO, "get", - "Interface", pname, "ofport"], - root_helper=self.root_helper).AndReturn(ofport) - self.mox.ReplayAll() - - self.assertEqual(ofport, self.br.add_tunnel_port(pname, ip)) - self.mox.VerifyAll() - - def test_add_patch_port(self): - pname = "tap99" - peer = "bar10" - ofport = "6" - - utils.execute(["ovs-vsctl", self.TO, "add-port", - self.BR_NAME, pname], root_helper=self.root_helper) - utils.execute(["ovs-vsctl", self.TO, "set", "Interface", - pname, "type=patch"], root_helper=self.root_helper) - utils.execute(["ovs-vsctl", self.TO, "set", - "Interface", pname, "options:peer=" + peer], - root_helper=self.root_helper) - utils.execute(["ovs-vsctl", self.TO, "get", - "Interface", pname, "ofport"], - root_helper=self.root_helper).AndReturn(ofport) - self.mox.ReplayAll() - - self.assertEqual(ofport, self.br.add_patch_port(pname, peer)) - self.mox.VerifyAll() - - def _test_get_vif_ports(self, is_xen=False): - pname = "tap99" - ofport = "6" - vif_id = generate_uuid() - mac = "ca:fe:de:ad:be:ef" - - utils.execute(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME], - root_helper=self.root_helper).AndReturn("%s\n" % pname) - - if is_xen: - external_ids = ('{xs-vif-uuid="%s", attached-mac="%s"}' - % (vif_id, mac)) - else: - external_ids = ('{iface-id="%s", attached-mac="%s"}' - % (vif_id, mac)) - - utils.execute(["ovs-vsctl", self.TO, "get", - "Interface", pname, "external_ids"], - root_helper=self.root_helper).AndReturn(external_ids) - utils.execute(["ovs-vsctl", self.TO, "get", - "Interface", pname, "ofport"], - root_helper=self.root_helper).AndReturn(ofport) - if is_xen: - utils.execute(["xe", "vif-param-get", "param-name=other-config", - "param-key=nicira-iface-id", "uuid=" + vif_id], - root_helper=self.root_helper).AndReturn(vif_id) - self.mox.ReplayAll() - - ports = self.br.get_vif_ports() - self.assertEqual(1, len(ports)) - self.assertEqual(pname, ports[0].port_name) - self.assertEqual(ofport, ports[0].ofport) - self.assertEqual(vif_id, ports[0].vif_id) - self.assertEqual(mac, ports[0].vif_mac) - self.assertEqual(self.BR_NAME, ports[0].switch.br_name) - self.mox.VerifyAll() - - def test_get_vif_ports_nonxen(self): - self._test_get_vif_ports(False) - - def test_get_vif_ports_xen(self): - self._test_get_vif_ports(True) - - def test_clear_db_attribute(self): - pname = "tap77" - utils.execute(["ovs-vsctl", self.TO, "clear", "Port", - pname, "tag"], root_helper=self.root_helper) - self.mox.ReplayAll() - self.br.clear_db_attribute("Port", pname, "tag") - self.mox.VerifyAll() - - def test_port_id_regex(self): - result = ('external_ids : {attached-mac="fa:16:3e:23:5b:f2",' - ' iface-id="5c1321a7-c73f-4a77-95e6-9f86402e5c8f",' - ' iface-status=active}\nname :' - ' "dhc5c1321a7-c7"\nofport : 2\n') - match = self.br.re_id.search(result) - vif_mac = match.group('vif_mac') - vif_id = match.group('vif_id') - port_name = match.group('port_name') - ofport = int(match.group('ofport')) - self.assertEqual('fa:16:3e:23:5b:f2', vif_mac) - self.assertEqual('5c1321a7-c73f-4a77-95e6-9f86402e5c8f', vif_id) - self.assertEqual('dhc5c1321a7-c7', port_name) - self.assertEqual(2, ofport) - - def test_iface_to_br(self): - iface = 'tap0' - br = 'br-int' - root_helper = 'sudo' - utils.execute(["ovs-vsctl", self.TO, "iface-to-br", iface], - root_helper=root_helper).AndReturn('br-int') - - self.mox.ReplayAll() - self.assertEqual(br, ovs_lib.get_bridge_for_iface(root_helper, iface)) - self.mox.VerifyAll() - - def test_iface_to_br_handles_ovs_vsctl_exception(self): - iface = 'tap0' - root_helper = 'sudo' - utils.execute(["ovs-vsctl", self.TO, "iface-to-br", iface], - root_helper=root_helper).AndRaise(Exception) - - self.mox.ReplayAll() - self.assertIsNone(ovs_lib.get_bridge_for_iface(root_helper, iface)) - self.mox.VerifyAll() - - def test_delete_all_ports(self): - self.mox.StubOutWithMock(self.br, 'get_port_name_list') - self.br.get_port_name_list().AndReturn(['port1']) - self.mox.StubOutWithMock(self.br, 'delete_port') - self.br.delete_port('port1') - self.mox.ReplayAll() - self.br.delete_ports(all_ports=True) - self.mox.VerifyAll() - - def test_delete_neutron_ports(self): - port1 = ovs_lib.VifPort('tap1234', 1, generate_uuid(), - 'ca:fe:de:ad:be:ef', 'br') - port2 = ovs_lib.VifPort('tap5678', 2, generate_uuid(), - 'ca:ee:de:ad:be:ef', 'br') - ports = [port1, port2] - self.mox.StubOutWithMock(self.br, 'get_vif_ports') - self.br.get_vif_ports().AndReturn(ports) - self.mox.StubOutWithMock(self.br, 'delete_port') - self.br.delete_port('tap1234') - self.br.delete_port('tap5678') - self.mox.ReplayAll() - self.br.delete_ports(all_ports=False) - self.mox.VerifyAll() - - def test_get_bridges(self): - bridges = ['br-int', 'br-ex'] - root_helper = 'sudo' - utils.execute(["ovs-vsctl", self.TO, "list-br"], - root_helper=root_helper).AndReturn('br-int\nbr-ex\n') - - self.mox.ReplayAll() - self.assertEqual(bridges, ovs_lib.get_bridges(root_helper)) - self.mox.VerifyAll() diff --git a/astara/test/unit/pez/__init__.py b/astara/test/unit/pez/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/astara/test/unit/pez/test_pool_manager.py b/astara/test/unit/pez/test_pool_manager.py deleted file mode 100644 index fe47317c..00000000 --- a/astara/test/unit/pez/test_pool_manager.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2015 Akanda, Inc -# -# Author: Akanda, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import mock -import time - -from six.moves import range -from astara.pez import pool as ak_pool -from astara.test.unit import base - - -class MockInstance(object): - id = 'fake_instace_uuid' - name = 'fake_name' - status = ak_pool.ACTIVE - - -class PoolManagerTest(base.RugTestBase): - def setUp(self): - self.image_uuid = 'fake_image' - self.flavor = 'fake_flavor' - self.mgt_net_id = 'fake_mgt_net_id' - self.pool_size = 3 - self.resource = 'router' - super(PoolManagerTest, self).setUp() - self.pool_manager = ak_pool.PezPoolManager( - self.image_uuid, - self.flavor, - self.pool_size, - self.mgt_net_id, - ) - - def _create_pool(self, num=3, status=ak_pool.ACTIVE): - pool = [MockInstance() for i in range(0, num)] - [setattr(p, 'status', status) for p in pool] - return {self.resource: pool} - - @mock.patch('astara.pez.pool.PezPoolManager.delete_instance') - def test__check_err_instances(self, mock_delete): - pool = self._create_pool() - pool[self.resource][1].id = 'errored_instance_id' - pool[self.resource][1].status = ak_pool.ERROR - deleting_instance = copy.copy(pool[self.resource][1]) - deleting_instance.status = ak_pool.DELETING - mock_delete.return_value = deleting_instance - self.pool_manager._check_err_instances(pool) - self.assertIn(deleting_instance, pool[self.resource]) - mock_delete.assert_called_with('errored_instance_id') - - def test__check_del_instances(self): - self.time_patch.stop() - pool = self._create_pool(num=1, status=ak_pool.DELETING) - self.pool_manager.delete_timeout = .01 - res = self.pool_manager._check_del_instances(pool) - - # deletion hasn't timed out yet - - self.assertEqual(0, len(res)) - # the deleting instance is added to the counter - self.assertIn( - pool[self.resource][0].id, self.pool_manager._delete_counters) - - # A stuck instance is reported back as such - time.sleep(.02) - res = self.pool_manager._check_del_instances(pool) - self.assertIn(pool[self.resource][0], res) - - # once an instance is completely deleted, its counter is removed - self.pool_manager._check_del_instances({self.resource: []}) - self.assertNotIn( - pool[self.resource][0], self.pool_manager._delete_counters) diff --git a/astara/test/unit/test_coordination.py b/astara/test/unit/test_coordination.py deleted file mode 100644 index 17221789..00000000 --- a/astara/test/unit/test_coordination.py +++ /dev/null @@ -1,150 +0,0 @@ -import mock - -from Queue import Queue - -from tooz import coordination as tz_coordination - -from astara import coordination -from astara import event -from astara.test.unit import base - - -class TestRugCoordinator(base.RugTestBase): - def get_fake_coordinator(self, url, member_id): - return self.fake_coord - - def setUp(self): - super(TestRugCoordinator, self).setUp() - self.config(url='memcache://foo_cache', group='coordination') - self.config(group_id='foo_coord_group', group='coordination') - self.config(heartbeat_interval=9, group='coordination') - self.config(host='foo_host') - - self.fake_coord = mock.MagicMock( - create_group=mock.MagicMock(), - join_group=mock.MagicMock(), - heartbeat=mock.MagicMock(), - watch_join_group=mock.MagicMock(), - watch_leave_group=mock.MagicMock(), - get_leader=mock.MagicMock(), - stand_down_group_leader=mock.MagicMock(), - ) - - fake_get_coord = mock.patch.object(coordination, 'tz_coordination', - autospec=True) - self._fake_get_coord = fake_get_coord.start() - self._fake_get_coord.get_coordinator = self.get_fake_coordinator - - self.addCleanup(mock.patch.stopall) - self.queue = Queue() - - @mock.patch('astara.coordination.RugCoordinator.start') - def test_setup(self, fake_start): - self.coordinator = coordination.RugCoordinator(self.queue) - self.assertEqual('memcache://foo_cache', self.coordinator.url) - self.assertEqual('foo_coord_group', self.coordinator.group) - self.assertEqual(9, self.coordinator.heartbeat_interval) - self.assertEqual('foo_host', self.coordinator.host) - self.assertTrue(fake_start.called) - - @mock.patch('astara.coordination.RugCoordinator.cluster_changed') - def test_start(self, fake_cluster_changed): - self.coordinator = coordination.RugCoordinator(self.queue) - self.assertTrue(self.fake_coord.start.called) - self.fake_coord.create_group.assert_called_with('foo_coord_group') - self.fake_coord.join_group.assert_called_with('foo_coord_group') - self.fake_coord.watch_join_group.assert_called_with( - 'foo_coord_group', - fake_cluster_changed) - self.fake_coord.watch_leave_group.assert_called_with( - 'foo_coord_group', - fake_cluster_changed) - self.assertTrue(self.fake_coord.heartbeat.called) - fake_cluster_changed.assert_called_with( - event=None, node_bootstrap=True) - - def test_start_raises(self): - self.coordinator = coordination.RugCoordinator(self.queue) - self.fake_coord.create_group.side_effect = ( - tz_coordination.GroupAlreadyExist(self.coordinator.group)) - self.fake_coord.join_group.side_effect = ( - tz_coordination.MemberAlreadyExist( - self.coordinator.host, self.coordinator.group)) - return self.test_start() - - @mock.patch('time.sleep') - @mock.patch('astara.coordination.RugCoordinator.stop') - def test_run(self, fake_stop, fake_sleep): - fake_sleep.side_effect = coordination.CoordinatorDone() - self.coordinator = coordination.RugCoordinator(self.queue) - self.coordinator.run() - self.assertTrue(self.fake_coord.heartbeat.called) - self.assertTrue(self.fake_coord.run_watchers.called) - - @mock.patch('astara.coordination.RugCoordinator.is_leader') - def test_stop_not_leader(self, fake_is_leader): - fake_is_leader.__get__ = mock.Mock(return_value=False) - self.coordinator = coordination.RugCoordinator(self.queue) - self.assertRaises(coordination.CoordinatorDone, self.coordinator.stop) - self.fake_coord.leave_group.assert_called_with(self.coordinator.group) - self.assertFalse(self.fake_coord.stand_down_group_leader.called) - - @mock.patch('astara.coordination.RugCoordinator.is_leader') - def test_stop_leader(self, fake_is_leader): - fake_is_leader.__get__ = mock.Mock(return_value=True) - self.coordinator = coordination.RugCoordinator(self.queue) - self.assertRaises(coordination.CoordinatorDone, self.coordinator.stop) - self.fake_coord.stand_down_group_leader.assert_called_with( - self.coordinator.group) - self.fake_coord.leave_group.assert_called_with(self.coordinator.group) - - def test_members(self): - fake_async_resp = mock.MagicMock( - get=mock.MagicMock(return_value=['foo', 'bar']) - ) - self.fake_coord.get_members.return_value = fake_async_resp - self.coordinator = coordination.RugCoordinator(self.queue) - self.assertEqual(['foo', 'bar'], self.coordinator.members) - self.fake_coord.get_members.assert_called_with(self.coordinator.group) - - def test_members_filter_leader(self): - fake_async_resp = mock.MagicMock( - get=mock.MagicMock(return_value=['foo', 'bar', 'leader']) - ) - self.fake_coord.get_members.return_value = fake_async_resp - self.coordinator = coordination.RugCoordinator(self.queue) - self.assertEqual(['foo', 'bar'], self.coordinator.members) - self.fake_coord.get_members.assert_called_with(self.coordinator.group) - - def test_members_filter_no_leader(self): - fake_async_resp = mock.MagicMock( - get=mock.MagicMock(return_value=['foo', 'bar']) - ) - self.fake_coord.get_members.return_value = fake_async_resp - self.coordinator = coordination.RugCoordinator(self.queue) - self.assertEqual(['foo', 'bar'], self.coordinator.members) - self.fake_coord.get_members.assert_called_with(self.coordinator.group) - - def test_is_leader(self): - fake_async_resp = mock.MagicMock( - get=mock.MagicMock(return_value='foo_host') - ) - self.fake_coord.get_leader.return_value = fake_async_resp - self.coordinator = coordination.RugCoordinator(self.queue) - self.assertEqual(True, self.coordinator.is_leader) - self.fake_coord.get_leader.assert_called_with(self.coordinator.group) - - @mock.patch('astara.coordination.RugCoordinator.start') - @mock.patch('astara.coordination.RugCoordinator.members') - def test_cluster_changed(self, fake_members, fake_start): - fake_members.__get__ = mock.Mock(return_value=['foo', 'bar']) - self.coordinator = coordination.RugCoordinator(self.queue) - expected_rebalance_event = event.Event( - resource=event.Resource('*', '*', '*'), - crud=event.REBALANCE, - body={'members': ['foo', 'bar']}) - - self.coordinator.cluster_changed(event=None) - expected = ('*', expected_rebalance_event) - res = self.queue.get() - self.assertEqual(expected, res) diff --git a/astara/test/unit/test_debug.py b/astara/test/unit/test_debug.py deleted file mode 100644 index 83becfc5..00000000 --- a/astara/test/unit/test_debug.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import mock - -from oslo_config import cfg - -from astara import debug -from astara.test.unit import base - - -class TestDebug(base.RugTestBase): - def tearDown(self): - # The router-id CLI opt is added at runtime and needs to be removed - # post-test to avoid polluting other tests' config namespace - cfg.CONF.reset() - cfg.CONF.unregister_opts(debug.DEBUG_OPTS) - super(TestDebug, self).tearDown() - - @mock.patch('astara.drivers.get') - @mock.patch('astara.worker.WorkerContext') - @mock.patch('astara.state.Automaton') - @mock.patch('pdb.set_trace') - def test_debug_one_router(self, set_trace, automaton, ctx, drivers_get): - ctx.return_value.neutron.get_router_detail.return_value = mock.Mock( - tenant_id='123' - ) - debug.debug_one_router(self.argv + ['--router-id', 'X']) - - mock_router = drivers_get.return_value.return_value._router - - assert set_trace.called - automaton.assert_called_once_with( - resource=drivers_get.return_value.return_value, - tenant_id=mock_router.tenant_id, - delete_callback=debug.delete_callback, - bandwidth_callback=debug.bandwidth_callback, - worker_context=ctx.return_value, - queue_warning_threshold=100, - reboot_error_threshold=1, - ) - - class CrudMatch(object): - - def __init__(self, crud): - self.crud = crud - - def __eq__(self, other): - return self.crud == other.crud - - def __ne__(self, other): - return not self.__eq__(other) - - automaton.return_value.send_message.assert_called_once_with( - CrudMatch('update') - ) - self.assertEqual(1, automaton.return_value.update.call_count) diff --git a/astara/test/unit/test_health.py b/astara/test/unit/test_health.py deleted file mode 100644 index 1229c2b5..00000000 --- a/astara/test/unit/test_health.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) 2015 Akanda, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from astara import event -from astara import health -from astara.test.unit import base - - -class BreakLoop(Exception): - pass - - -class HealthTest(base.RugTestBase): - @mock.patch('time.sleep') - def test_health_inspector(self, fake_sleep): - fake_scheduler = mock.Mock( - handle_message=mock.Mock() - ) - - # raise the exception to break out of the while loop. - fake_scheduler.handle_message.side_effect = BreakLoop() - try: - health._health_inspector(fake_scheduler) - except BreakLoop: - pass - - exp_res = event.Resource( - id='*', - tenant_id='*', - driver='*', - ) - exp_event = event.Event( - resource=exp_res, - crud=event.POLL, - body={}, - ) - fake_scheduler.handle_message.assert_called_with('*', exp_event) diff --git a/astara/test/unit/test_instance_manager.py b/astara/test/unit/test_instance_manager.py deleted file mode 100644 index 79688559..00000000 --- a/astara/test/unit/test_instance_manager.py +++ /dev/null @@ -1,1163 +0,0 @@ -# Copyright 2014 DreamHost, LLC - -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import mock -import six -import uuid - -from datetime import datetime, timedelta -from six.moves import range - -from astara import instance_manager -from astara.api import nova -from astara.drivers import states -from astara.test.unit import base -from astara.test.unit import fakes - -from oslo_config import cfg - -states.RETRY_DELAY = 0.4 -states.BOOT_WAIT = 1 - - -class FakeModel(object): - def __init__(self, id_, **kwargs): - self.id = id_ - self.__dict__.update(kwargs) - - -fake_mgt_port = FakeModel( - '1', - mac_address='aa:bb:cc:dd:ee:ff', - network_id='mgt-net', - fixed_ips=[FakeModel('', ip_address='9.9.9.9', subnet_id='s2')]) - -fake_int_port = FakeModel( - '2', - mac_address='bb:cc:cc:dd:ee:ff', - network_id='int-net', - fixed_ips=[FakeModel('', ip_address='10.10.10.10', subnet_id='s3')]) - -fake_ext_port = FakeModel( - '3', - mac_address='cc:cc:cc:dd:ee:ff', - network_id='ext-net', - fixed_ips=[FakeModel('', ip_address='192.168.1.1', subnet_id='s4')]) - -fake_add_port = FakeModel( - '4', - mac_address='aa:bb:cc:dd:ff:ff', - network_id='additional-net', - fixed_ips=[FakeModel('', ip_address='8.8.8.8', subnet_id='s3')]) - - -def instance_info(mgt_port=fake_mgt_port, name=None): - if not name: - name = 'ak-router-' + str(uuid.uuid4()) - - return nova.InstanceInfo( - instance_id=str(uuid.uuid4()), - name=name, - management_port=mgt_port, - ports=[fake_int_port, fake_ext_port], - image_uuid='9f3dbe8e-66d8-11e5-9952-525400cfc326', - status='ACTIVE', - last_boot=(datetime.utcnow() - timedelta(minutes=15)), - ) - - -class TestInstanceManager(base.RugTestBase): - - def setUp(self): - super(TestInstanceManager, self).setUp() - self.conf = cfg.CONF - self.fake_driver = fakes.fake_driver() - self.ctx = fakes.fake_worker_context() - - self.neutron = self.ctx.neutron - self.neutron.api_client = mock.Mock() - self.config(boot_timeout=30) - self.config(astara_mgt_service_port=5000) - self.config(max_retries=3) - self.addCleanup(mock.patch.stopall) - - self.log = mock.Mock() - self.update_state_p = mock.patch.object( - instance_manager.InstanceManager, - 'update_state' - ) - - ports = [fake_int_port, fake_ext_port] - - self.fake_driver.get_interfaces.return_value = [ - {'ifname': 'ge0', 'lladdr': fake_mgt_port.mac_address}, - {'ifname': 'ge1', 'lladdr': fake_ext_port.mac_address}, - {'ifname': 'ge2', 'lladdr': fake_int_port.mac_address}, - ] - self.fake_driver.ports = ports - - self.mock_update_state = self.update_state_p.start() - self.instance_mgr = instance_manager.InstanceManager( - self.fake_driver, - self.ctx - ) - self.instances_patch = mock.patch.object( - instance_manager, 'InstanceGroupManager', autospec=True) - self.instance_mgr.instances = self.instances_patch.start() - - self.next_state = None - - def next_state(*args, **kwargs): - if self.next_state: - self.instance_mgr.state = self.next_state - return self.instance_mgr.state - self.mock_update_state.side_effect = next_state - - def set_instances_container_mocks(self, instances=None, mocks=None): - # set up a mock InstanceGroupManager based on dict - # with specified mocks - self.instances_patch.stop() - - mocks = mocks or [] - instances = instances or [] - - class FakeInstancesContainer(dict): - @property - def instance_count(self): - return len(self.values()) - - @property - def cluster_degraded(self): - return len(self.values()) < self.count - - def remove(self, worker_context, instance): - self.pop(instance.id_) - - def refresh(self, worker_context): - pass - - self.instance_mgr.instances = FakeInstancesContainer() - for attr, _mock in mocks: - if attr not in dir(instance_manager.InstanceGroupManager): - raise AttributeError( - 'Attempting to mock non-existent method: %s' % attr) - setattr(self.instance_mgr.instances, attr, _mock) - - self.instance_mgr.instances.update({ - i.id_: i for i in instances - }) - self.instance_mgr.instances.count = len(instances) - - def test_update_state_gone(self): - self.update_state_p.stop() - self.fake_driver.get_state.return_value = states.GONE - self.assertEqual( - states.GONE, - self.instance_mgr.update_state(self.ctx) - ) - - def test_update_state_down_no_backing_instances(self): - self.update_state_p.stop() - self.fake_driver.get_state.return_value = states.UP - self.instance_mgr.instances.__nonzero__.return_value = False - self.assertEqual( - states.DOWN, - self.instance_mgr.update_state(self.ctx) - ) - self.assertEqual( - states.DOWN, - self.instance_mgr.state - ) - - def test_update_state_degraded(self): - self.update_state_p.stop() - self.fake_driver.get_state.return_value = states.UP - self.instance_mgr.instances.cluster_degraded = True - self.assertEqual( - states.DEGRADED, - self.instance_mgr.update_state(self.ctx) - ) - self.assertEqual( - states.DEGRADED, - self.instance_mgr.state - ) - - def test_update_state_booting(self): - self.update_state_p.stop() - self.fake_driver.get_state.return_value = states.UP - self.instance_mgr.instances.validate_ports.return_value = \ - ([], [mock.Mock()]) # (has_ports, no_ports) - self.assertEqual( - states.BOOTING, - self.instance_mgr.update_state(self.ctx) - ) - - def test_update_state_down_all_instances_dead(self): - self.update_state_p.stop() - self.instance_mgr.state = states.CONFIGURED - self.instance_mgr.instances.validate_ports.return_value = \ - ([mock.Mock()], []) # (has_ports, no_ports) - self.instance_mgr.instances.are_alive.return_value = \ - ([], [mock.Mock()]) # (alive, dead) - - self.assertEqual( - states.DOWN, - self.instance_mgr.update_state(self.ctx) - ) - - def test_update_state_degraded_some_instances_dead(self): - self.update_state_p.stop() - self.instance_mgr.state = states.CONFIGURED - self.instance_mgr.instances.validate_ports.return_value = \ - ([mock.Mock()], []) # (has_ports, no_ports) - self.instance_mgr.instances.are_alive.return_value = \ - ([mock.Mock()], [mock.Mock()]) # (alive, dead) - - self.assertEqual( - states.DEGRADED, - self.instance_mgr.update_state(self.ctx) - ) - - def test_update_state_up(self): - self.update_state_p.stop() - self.instance_mgr.state = states.BOOTING - self.instance_mgr.instances.validate_ports.return_value = \ - ([mock.Mock()], []) # (has_ports, no_ports) - self.instance_mgr.instances.are_alive.return_value = \ - ([mock.Mock()], []) # (alive, dead) - - self.assertEqual( - states.UP, - self.instance_mgr.update_state(self.ctx) - ) - - def test_update_state_configured(self): - self.update_state_p.stop() - self.instance_mgr.log = mock.Mock( - info=mock.Mock()) - - self.instance_mgr.state = states.CONFIGURED - self.instance_mgr.instances.validate_ports.return_value = \ - ([mock.Mock()], []) # (has_ports, no_ports) - self.instance_mgr.instances.are_alive.return_value = \ - ([mock.Mock(booting=False)], []) # (alive, dead) - - self.assertEqual( - states.CONFIGURED, - self.instance_mgr.update_state(self.ctx) - ) - - self.instance_mgr.update_state(self.ctx), - self.instance_mgr.update_state(self.ctx), - self.instance_mgr.update_state(self.ctx), - # ensure the boot was logged only once - self.assertEqual(1, len(self.instance_mgr.log.info.call_args_list)) - - @mock.patch('time.sleep') - def test_boot_success(self, sleep): - self.next_state = states.UP - self.instance_mgr.boot(self.ctx) - self.assertEqual(states.BOOTING, self.instance_mgr.state) - self.instance_mgr.instances.create.assert_called_with( - self.ctx) - self.assertEqual(1, self.instance_mgr.attempts) - - @mock.patch('time.sleep') - def test_boot_instance_deleted(self, sleep): - self.instance_mgr.instances.__nonzero__.return_value = False - self.instance_mgr.boot(self.ctx) - # a deleted VM should reset the vm mgr state and not as a failed - # attempt - self.assertEqual(0, self.instance_mgr.attempts) - - @mock.patch('time.sleep') - def test_boot_exception(self, sleep): - self.instance_mgr.instances.create.side_effect = RuntimeError - self.instance_mgr.boot(self.ctx) - self.assertEqual(states.DOWN, self.instance_mgr.state) - self.instance_mgr.instances.create.assert_called_with( - self.ctx) - self.assertEqual(1, self.instance_mgr.attempts) - - def test_stop_success(self): - self.instance_mgr.state = states.UP - instance = instance_info() - self.set_instances_container_mocks( - instances=[instance], - mocks=[ - ('destroy', mock.Mock()), - ('update_ports', mock.Mock())]) - - self.instance_mgr.stop(self.ctx) - self.instance_mgr.instances.destroy.assert_called_with(self.ctx) - self.instance_mgr.resource.delete_ports.assert_called_once_with( - self.ctx) - self.assertEqual(states.DOWN, self.instance_mgr.state) - - def test_stop_fail(self): - self.instance_mgr.state = states.UP - self.set_instances_container_mocks( - instances=[instance_info()], - mocks=[ - ('destroy', mock.Mock()), - ('update_ports', mock.Mock())]) - self.instance_mgr.instances.destroy.side_effect = Exception - self.instance_mgr.stop(self.ctx) - self.assertEqual(states.UP, self.instance_mgr.state) - self.fake_driver.delete_ports.assert_called_with(self.ctx) - - def test_stop_router_already_deleted_from_neutron(self): - self.instance_mgr.state = states.GONE - instance = instance_info() - self.set_instances_container_mocks( - instances=[instance], - mocks=[ - ('destroy', mock.Mock()), - ('update_ports', mock.Mock())]) - - self.instance_mgr.stop(self.ctx) - self.instance_mgr.instances.destroy.assert_called_with(self.ctx) - self.instance_mgr.resource.delete_ports.assert_called_once_with( - self.ctx) - self.assertEqual(states.GONE, self.instance_mgr.state) - - def test_stop_no_inst_router_already_deleted_from_neutron(self): - self.instance_mgr.state = states.GONE - self.set_instances_container_mocks( - instances=[], - mocks=[ - ('destroy', mock.Mock()), - ('update_ports', mock.Mock())]) - self.instance_mgr.stop(self.ctx) - self.fake_driver.delete_ports.assert_called_with(self.ctx) - self.assertEqual(states.GONE, self.instance_mgr.state) - - def test_stop_instance_already_deleted_from_nova(self): - self.instance_mgr.state = states.RESTART - self.set_instances_container_mocks( - instances=[], - mocks=[ - ('destroy', mock.Mock()), - ('update_ports', mock.Mock())]) - - self.instance_mgr.stop(self.ctx) - self.fake_driver.delete_ports.assert_called_with(self.ctx) - self.assertEqual(states.DOWN, self.instance_mgr.state) - - def test_configure_mismatched_interfaces(self): - self.instance_mgr.instances.verify_interfaces.return_value = False - self.assertEqual( - states.REPLUG, - self.instance_mgr.configure(self.ctx) - ) - - def test_configure_gone(self): - self.fake_driver.get_state.return_value = states.GONE - self.assertEqual( - states.GONE, self.instance_mgr.configure(self.ctx)) - - def test_configure(self): - self.instance_mgr.instances.verify_interfaces.return_value = True - self.instance_mgr.instances.configure.return_value = states.RESTART - self.assertEqual( - states.RESTART, - self.instance_mgr.configure(self.ctx) - ) - self.instance_mgr.instances.verify_interfaces.assert_called_with( - self.fake_driver.ports - ) - self.instance_mgr.instances.configure.assert_called_with(self.ctx) - - @mock.patch.object(instance_manager.InstanceManager, - '_wait_for_interface_hotplug') - def test_replug_add_new_port_success(self, wait_for_hotplug): - self.instance_mgr.state = states.REPLUG - instance = instance_info() - get_interfaces = mock.Mock( - return_value={ - instance: [ - {'lladdr': fake_mgt_port.mac_address}, - {'lladdr': fake_ext_port.mac_address}, - {'lladdr': fake_int_port.mac_address}] - } - ) - self.set_instances_container_mocks( - instances=[instance], mocks=[('get_interfaces', get_interfaces)]) - - fake_instance = mock.MagicMock() - self.ctx.nova_client.get_instance_by_id = mock.Mock( - return_value=fake_instance) - - fake_new_port = fake_add_port - self.fake_driver.ports.append(fake_new_port) - self.ctx.neutron.create_vrrp_port.return_value = fake_new_port - - self.fake_driver.get_interfaces.return_value = [ - {'lladdr': fake_mgt_port.mac_address}, - {'lladdr': fake_ext_port.mac_address}, - {'lladdr': fake_int_port.mac_address}, - {'lladdr': fake_new_port.mac_address}, - ] - - wait_for_hotplug.return_value = True - self.instance_mgr.replug(self.ctx) - - self.ctx.neutron.create_vrrp_port.assert_called_with( - self.fake_driver.id, 'additional-net' - ) - self.assertEqual(states.REPLUG, self.instance_mgr.state) - fake_instance.interface_attach.assert_called_once_with( - fake_new_port.id, None, None - ) - self.assertIn(fake_new_port, instance.ports) - - def test_replug_add_new_port_failure(self): - self.instance_mgr.state = states.REPLUG - instance = instance_info() - get_interfaces = mock.Mock( - return_value={ - instance: [ - {'lladdr': fake_mgt_port.mac_address}, - {'lladdr': fake_ext_port.mac_address}, - {'lladdr': fake_int_port.mac_address}] - } - ) - - self.set_instances_container_mocks( - instances=[instance], - mocks=[('get_interfaces', get_interfaces)] - ) - self.fake_driver.get_interfaces.return_value = [ - {'lladdr': fake_mgt_port.mac_address}, - {'lladdr': fake_ext_port.mac_address}, - {'lladdr': fake_int_port.mac_address} - ] - fake_instance = mock.MagicMock() - fake_instance.interface_attach = mock.Mock( - side_effect=Exception, - ) - self.ctx.nova_client.get_instance_by_id = mock.Mock( - return_value=fake_instance) - - fake_new_port = fake_add_port - self.fake_driver.ports.append(fake_new_port) - self.ctx.neutron.create_vrrp_port.return_value = fake_new_port - self.instance_mgr.replug(self.ctx) - self.assertEqual(states.RESTART, self.instance_mgr.state) - - fake_instance.interface_attach.assert_called_once_with( - fake_new_port.id, None, None) - - @mock.patch.object(instance_manager.InstanceManager, - '_wait_for_interface_hotplug') - def test_replug_add_new_port_failed_degraded(self, wait_for_hotplug): - self.conf.hotplug_timeout = 2 - self.instance_mgr.state = states.REPLUG - instance_1 = instance_info() - instance_2 = instance_info() - get_interfaces = mock.Mock( - return_value={ - instance_1: [ - {'lladdr': fake_mgt_port.mac_address}, - {'lladdr': fake_ext_port.mac_address}, - {'lladdr': fake_int_port.mac_address}], - instance_2: [ - {'lladdr': fake_mgt_port.mac_address}, - {'lladdr': fake_ext_port.mac_address}, - {'lladdr': fake_int_port.mac_address}] - } - ) - - self.set_instances_container_mocks( - instances=[instance_1, instance_2], - mocks=[('get_interfaces', get_interfaces)]) - self.instance_mgr.instances.update({ - i.id_: i for i in [instance_1, instance_2] - }) - - instances = [] - for i in range(2): - fake_instance = mock.MagicMock() - fake_instance.interface_attach = mock.Mock() - instances.append(fake_instance) - - instances[1].interface_attach.side_effect = Exception - self.ctx.nova_client.get_instance_by_id.side_effect = instances - - fake_new_port = fake_add_port - self.fake_driver.ports.append(fake_new_port) - self.ctx.neutron.create_vrrp_port.return_value = fake_new_port - - wait_for_hotplug.return_value = True - self.instance_mgr.replug(self.ctx) - self.assertEqual(states.DEGRADED, self.instance_mgr.state) - - for instance in instances: - instance.interface_attach.assert_called_with( - fake_new_port.id, None, None) - self.assertNotIn(instances[1], self.instance_mgr.instances.values()) - - @mock.patch.object(instance_manager.InstanceManager, - '_wait_for_interface_hotplug') - def test_replug_add_new_port_hotplug_failed_degraded(self, - wait_for_hotplug): - self.instance_mgr.state = states.REPLUG - instance_1 = instance_info() - instance_2 = instance_info() - get_interfaces = mock.Mock( - return_value={ - instance_1: [ - {'lladdr': fake_mgt_port.mac_address}, - {'lladdr': fake_ext_port.mac_address}, - {'lladdr': fake_int_port.mac_address}], - instance_2: [ - {'lladdr': fake_mgt_port.mac_address}, - {'lladdr': fake_ext_port.mac_address}, - {'lladdr': fake_int_port.mac_address}] - } - ) - - self.set_instances_container_mocks( - instances=[instance_1, instance_2], - mocks=[('get_interfaces', get_interfaces)]) - - fake_new_port = fake_add_port - - instances = [] - for i in range(2): - fake_instance = mock.MagicMock() - fake_instance.interface_attach = mock.Mock() - instances.append(fake_instance) - self.ctx.nova_client.get_instance_by_id.side_effect = instances - - fake_new_port = fake_add_port - self.fake_driver.ports.append(fake_new_port) - self.ctx.neutron.create_vrrp_port.return_value = fake_new_port - - # the second instance fails to hotplug - wait_for_hotplug.side_effect = [True, False] - - self.instance_mgr.replug(self.ctx) - self.assertEqual(states.DEGRADED, self.instance_mgr.state) - - for instance in instances: - instance.interface_attach.assert_called_with( - fake_new_port.id, None, None) - self.assertNotIn(instances[1], self.instance_mgr.instances.values()) - - @mock.patch.object(instance_manager.InstanceManager, - '_wait_for_interface_hotplug') - def test_replug_remove_port_success(self, wait_for_hotplug): - self.instance_mgr.state = states.REPLUG - - self.fake_driver.ports = [fake_ext_port, fake_int_port] - - instance_1 = instance_info() - instance_1.ports.append(fake_add_port) - - get_interfaces = mock.Mock( - return_value={ - # Instance contains an extra port, it will be removed - instance_1: [ - {'lladdr': fake_mgt_port.mac_address}, - {'lladdr': fake_ext_port.mac_address}, - {'lladdr': fake_int_port.mac_address}, - {'lladdr': fake_add_port.mac_address}, - ], - } - ) - self.set_instances_container_mocks( - instances=[instance_1], - mocks=[('get_interfaces', get_interfaces)]) - - fake_instance = mock.MagicMock() - self.ctx.nova_client.get_instance_by_id = mock.Mock( - return_value=fake_instance) - - wait_for_hotplug.return_value = True - self.instance_mgr.replug(self.ctx) - self.assertEqual(states.REPLUG, self.instance_mgr.state) - fake_instance.interface_detach.assert_called_once_with( - fake_add_port.id) - self.assertNotIn(fake_add_port, instance_1.ports) - - def test_replug_remove_port_failure(self): - self.instance_mgr.state = states.REPLUG - - self.fake_driver.ports = [fake_ext_port, fake_int_port] - - instance_1 = instance_info() - instance_1.ports.append(fake_add_port) - - get_interfaces = mock.Mock( - return_value={ - # Instance contains an extra port, it will be removed - instance_1: [ - {'lladdr': fake_mgt_port.mac_address}, - {'lladdr': fake_ext_port.mac_address}, - {'lladdr': fake_int_port.mac_address}, - {'lladdr': fake_add_port.mac_address}], - } - ) - self.set_instances_container_mocks( - instances=[instance_1], - mocks=[('get_interfaces', get_interfaces)]) - - fake_instance = mock.MagicMock() - self.ctx.nova_client.get_instance_by_id = mock.Mock( - return_value=fake_instance) - fake_instance.interface_detach.side_effect = Exception - - self.instance_mgr.replug(self.ctx) - self.assertEqual(states.RESTART, - self.instance_mgr.state) - fake_instance.interface_detach.assert_called_once_with( - fake_add_port.id - ) - - @mock.patch.object(instance_manager.InstanceManager, - '_wait_for_interface_hotplug') - def test_replug_remove_port_hotplug_failed(self, wait_for_hotplug): - self.instance_mgr.state = states.REPLUG - - self.fake_driver.ports = [fake_ext_port, fake_int_port] - - instance_1 = instance_info() - instance_1.ports.append(fake_add_port) - - get_interfaces = mock.Mock( - return_value={ - # Instance contains an extra port, it will be removed - instance_1: [ - {'lladdr': fake_mgt_port.mac_address}, - {'lladdr': fake_ext_port.mac_address}, - {'lladdr': fake_int_port.mac_address}, - {'lladdr': fake_add_port.mac_address} - ], - } - ) - self.set_instances_container_mocks( - instances=[instance_1], - mocks=[('get_interfaces', get_interfaces)]) - - fake_instance = mock.MagicMock() - self.ctx.nova_client.get_instance_by_id = mock.Mock( - return_value=fake_instance) - - wait_for_hotplug.return_value = False - self.instance_mgr.replug(self.ctx) - self.assertEqual(states.RESTART, - self.instance_mgr.state) - fake_instance.interface_detach.assert_called_once_with( - fake_add_port.id - ) - - def test_wait_for_interface_hotplug_true(self): - instance = instance_info() - self.fake_driver.get_interfaces.side_effect = [ - [ - {'lladdr': fake_mgt_port.mac_address}, - {'lladdr': fake_ext_port.mac_address}, - ], - [ - {'lladdr': fake_mgt_port.mac_address}, - {'lladdr': fake_ext_port.mac_address}, - ], - [ - {'lladdr': fake_mgt_port.mac_address}, - {'lladdr': fake_ext_port.mac_address}, - {'lladdr': fake_int_port.mac_address}, - ], - ] - self.assertEqual( - True, self.instance_mgr._wait_for_interface_hotplug(instance)) - self.assertEqual( - 3, len(self.fake_driver.get_interfaces.call_args_list)) - - def test_wait_for_interface_hotplug_false(self): - self.conf.hotplug_timeout = 5 - instance = instance_info() - self.fake_driver.get_interfaces.side_effect = [ - [ - {'lladdr': fake_mgt_port.mac_address}, - {'lladdr': fake_ext_port.mac_address}, - ] - for i in six.moves.range(5)] - self.assertEqual( - False, self.instance_mgr._wait_for_interface_hotplug(instance)) - self.assertEqual( - 4, len(self.fake_driver.get_interfaces.call_args_list)) - - def test_set_error_when_booting(self): - self.instance_mgr.state = states.BOOTING - self.instance_mgr.set_error(self.ctx) - self.fake_driver.synchronize_state.assert_called_once_with( - self.ctx, state='error') - self.assertEqual(states.ERROR, self.instance_mgr.state) - - def test_clear_error_when_gone(self): - self.instance_mgr.state = states.GONE - self.instance_mgr.clear_error(self.ctx) - self.fake_driver.synchronize_state(self.ctx, 'error') - self.assertEqual(states.DOWN, self.instance_mgr.state) - - @mock.patch('time.sleep') - def test_boot_success_after_error(self, sleep): - self.next_state = states.UP - rtr = mock.sentinel.router - self.ctx.neutron.get_router_detail.return_value = rtr - rtr.id = 'ROUTER1' - rtr.management_port = None - rtr.external_port = None - rtr.ports = mock.MagicMock() - rtr.ports.__iter__.return_value = [] - self.instance_mgr.set_error(self.ctx) - self.instance_mgr.boot(self.ctx) - self.assertEqual(states.BOOTING, self.instance_mgr.state) - self.instance_mgr.instances.create.assert_called_with(self.ctx) - - def test_error_cooldown(self): - self.config(error_state_cooldown=30) - self.assertIsNone(self.instance_mgr.last_error) - self.assertFalse(self.instance_mgr.error_cooldown) - - self.instance_mgr.state = states.ERROR - self.instance_mgr.last_error = datetime.utcnow() - timedelta(seconds=1) - self.assertTrue(self.instance_mgr.error_cooldown) - - self.instance_mgr.last_error = datetime.utcnow() - timedelta(minutes=5) - self.assertFalse(self.instance_mgr.error_cooldown) - - def test_ensure_cache(self): - self.set_instances_container_mocks(mocks=[ - ('update_ports', mock.Mock()) - ]) - self.instance_mgr.instances['fake_instance_id1'] = 'stale_instance1' - self.instance_mgr.instances['fake_instance_id2'] = 'stale_instance2' - - fake_inst_1 = mock.Mock(id_='fake_instance_id1') - fake_inst_2 = mock.Mock(id_='fake_instance_id2') - - self.ctx.nova_client.get_instances_for_obj.return_value = [ - fake_inst_1, fake_inst_2] - - def ensured_cache(self, ctx): - pass - - wrapped = instance_manager.ensure_cache(ensured_cache) - wrapped(self.instance_mgr, self.ctx) - exp_updated_instances = { - 'fake_instance_id1': fake_inst_1, - 'fake_instance_id2': fake_inst_2, - } - self.assertEqual( - exp_updated_instances, self.instance_mgr.instances) - self.instance_mgr.instances.update_ports.assert_called_with(self.ctx) - - -class TestBootAttemptCounter(base.RugTestBase): - def setUp(self): - super(TestBootAttemptCounter, self).setUp() - self.c = instance_manager.BootAttemptCounter() - - def test_start(self): - self.c.start() - self.assertEqual(1, self.c._attempts) - self.c.start() - self.assertEqual(2, self.c._attempts) - - def test_reset(self): - self.c._attempts = 2 - self.c.reset() - self.assertEqual(0, self.c._attempts) - - -class TestInstanceGroupManager(base.RugTestBase): - def setUp(self): - super(TestInstanceGroupManager, self).setUp() - self.ctx = fakes.fake_worker_context() - self.fake_driver = fakes.fake_driver() - self.group_mgr = instance_manager.InstanceGroupManager( - log=mock.Mock(), resource=self.fake_driver) - name = 'ak-resource-' + str(uuid.uuid4()) - self.instance_1 = instance_info(mgt_port=fake_mgt_port, - name=name + '_0') - self.instance_2 = instance_info(mgt_port=fake_add_port, - name=name + '_1') - self.instances = [self.instance_1, self.instance_2] - [self.group_mgr.add_instance(i) for i in self.instances] - - def test_validate_ports(self): - self.instance_2.management_port = None - has_ports, no_ports = self.group_mgr.validate_ports() - self.assertIn(self.instance_1, has_ports) - self.assertIn(self.instance_2, no_ports) - - def test_are_alive_all_alive(self): - self.fake_driver.is_alive.side_effect = [ - False, False, True, False, True] - alive, dead = self.group_mgr.are_alive() - self.assertEqual(sorted(self.instances), sorted(alive)) - - def test_are_alive_all_dead(self): - self.fake_driver.is_alive.return_value = False - alive, dead = self.group_mgr.are_alive() - self.assertEqual(sorted(self.instances), sorted(dead)) - self.assertEqual([], alive) - - def test_are_alive_some_dead(self): - self.group_mgr = instance_manager.InstanceGroupManager( - log=mock.Mock(), resource=self.fake_driver) - self.instance_1 = instance_info(mgt_port=fake_mgt_port) - self.instance_2 = instance_info(mgt_port=fake_add_port) - instances = [self.instance_1, self.instance_2] - [self.group_mgr.add_instance(i) for i in instances] - - def fake_is_alive(mgt_addr, i1=self.instance_1, i2=self.instance_2): - # tag instance 2 as dead - if mgt_addr == fake_add_port.fixed_ips[0].ip_address: - return False - else: - return True - [self.group_mgr.add_instance(i) for i in instances] - self.fake_driver.is_alive = fake_is_alive - alive, dead = self.group_mgr.are_alive() - self.assertEqual([self.instance_2], dead) - self.assertEqual([self.instance_1], alive) - - def test_update_ports(self): - self.ctx.neutron.get_ports_for_instance.side_effect = [ - ('instance1_mgt_port', ['instance1_inst_port']), - ('instance2_mgt_port', ['instance2_inst_port']), - ] - self.group_mgr.update_ports(self.ctx) - self.assertEqual('instance1_mgt_port', self.instance_1.management_port) - self.assertEqual(['instance1_inst_port'], self.instance_1.ports) - self.assertEqual('instance2_mgt_port', self.instance_2.management_port) - self.assertEqual(['instance2_inst_port'], self.instance_2.ports) - - def test_get_interfaces(self): - self.fake_driver.get_interfaces.side_effect = [ - ['instance1_interfaces'], - ['instance2_interfaces'], - ] - self.group_mgr._alive = [i.id_ for i in self.instances] - interfaces_dict = self.group_mgr.get_interfaces() - self.assertIn( - (self.instance_1, ['instance1_interfaces']), - interfaces_dict.items()) - self.assertIn( - (self.instance_2, ['instance2_interfaces']), - interfaces_dict.items()) - - def test_get_interfaces_skip_dead(self): - self.fake_driver.get_interfaces.side_effect = [ - ['instance1_interfaces'], - ['instance2_interfaces'], - ] - self.group_mgr._alive = [self.instance_1.id_] - interfaces_dict = self.group_mgr.get_interfaces() - self.assertIn( - (self.instance_1, ['instance1_interfaces']), - interfaces_dict.items()) - self.assertNotIn( - (self.instance_2, ['instance2_interfaces']), - interfaces_dict.items()) - - @mock.patch('astara.instance_manager.InstanceGroupManager.get_interfaces') - def test_verify_interfaces_true(self, fake_get_interfaces): - fake_get_interfaces.return_value = { - self.instance_1: [ - {'lladdr': p.mac_address} - for p in self.instance_1.ports + - [self.instance_1.management_port] - ], - self.instance_2: [ - {'lladdr': p.mac_address} - for p in self.instance_2.ports + - [self.instance_2.management_port] - ] - } - - ports = [fake_ext_port, fake_int_port] - self.assertTrue(self.group_mgr.verify_interfaces(ports)) - - @mock.patch('astara.instance_manager.InstanceGroupManager.get_interfaces') - def test_verify_interfaces_false_missing_inst_port(self, - fake_get_interfaces): - fake_get_interfaces.return_value = { - self.instance_1: [ - {'lladdr': p.mac_address} - for p in self.instance_1.ports + - [self.instance_1.management_port] - ], - self.instance_2: [ - {'lladdr': p.mac_address} - for p in self.instance_2.ports + - [self.instance_2.management_port] - ] - } - - ports = [fake_ext_port, fake_int_port, fake_add_port] - self.assertFalse(self.group_mgr.verify_interfaces(ports)) - - @mock.patch('astara.instance_manager.InstanceGroupManager.get_interfaces') - def test_verify_interfaces_false_missing_macs(self, fake_get_interfaces): - fake_get_interfaces.return_value = { - self.instance_1: [ - {'lladdr': p.mac_address} - for p in self.instance_1.ports - ], - self.instance_2: [ - {'lladdr': p.mac_address} - for p in self.instance_2.ports] - } - - ports = [fake_ext_port, fake_int_port] - self.assertFalse(self.group_mgr.verify_interfaces(ports)) - - def test__update_config_success(self): - self.fake_driver.update_config.side_effect = [ - Exception, Exception, True] - self.assertTrue(self.group_mgr._update_config(self.instance_1, {})) - self.fake_driver.update_config.assert_called_with( - self.instance_1.management_address, {}) - - def test__update_config_fail(self): - self.fake_driver.update_config.side_effect = Exception - self.assertFalse(self.group_mgr._update_config(self.instance_1, {})) - self.fake_driver.update_config.assert_called_with( - self.instance_1.management_address, {}) - - def test__ha_config(self): - instance_1_ha_config = self.group_mgr._ha_config(self.instance_1) - instance_2_ha_config = self.group_mgr._ha_config(self.instance_2) - self.assertEqual( - { - 'priority': 100, - 'peers': [self.instance_2.management_address], - }, - instance_1_ha_config) - self.assertEqual( - { - 'priority': 50, - 'peers': [self.instance_1.management_address], - }, - instance_2_ha_config) - - @mock.patch('astara.instance_manager.InstanceGroupManager._update_config') - @mock.patch('astara.instance_manager.InstanceGroupManager._ha_config') - @mock.patch('astara.instance_manager._generate_interface_map') - @mock.patch('astara.instance_manager.InstanceGroupManager.get_interfaces') - def test_configure_success(self, fake_get_interfaces, fake_gen_iface_map, - fake_ha_config, fake_update_config): - fake_ha_config.return_value = {'fake_ha_config': 'peers'} - self.fake_driver.is_ha = True - self.fake_driver.build_config.side_effect = [ - {'instance_1_config': 'config'}, - {'instance_2_config': 'config'}, - ] - fake_get_interfaces.return_value = collections.OrderedDict([ - (self.instance_1, [ - {'lladdr': p.mac_address} for p in self.instance_1.ports + - [self.instance_1.management_port]]), - (self.instance_2, [ - {'lladdr': p.mac_address} for p in self.instance_2.ports + - [self.instance_2.management_port]]) - ]) - - fake_update_config.return_value = True - self.assertEqual(states.CONFIGURED, self.group_mgr.configure(self.ctx)) - self.assertIn( - mock.call( - self.instance_1, - { - 'instance_1_config': 'config', - 'ha_config': {'fake_ha_config': 'peers'} - }), - fake_update_config.call_args_list) - self.assertIn( - mock.call( - self.instance_2, - { - 'instance_2_config': 'config', - 'ha_config': {'fake_ha_config': 'peers'} - }), - fake_update_config.call_args_list) - - @mock.patch('astara.instance_manager.InstanceGroupManager._update_config') - @mock.patch('astara.instance_manager.InstanceGroupManager._ha_config') - @mock.patch('astara.instance_manager._generate_interface_map') - @mock.patch('astara.instance_manager.InstanceGroupManager.get_interfaces') - def test_configure_failed_all(self, fake_get_interfaces, - fake_gen_iface_map, fake_ha_config, - fake_update_config): - fake_ha_config.return_value = {'fake_ha_config': 'peers'} - self.fake_driver.is_ha = True - self.fake_driver.build_config.side_effect = [ - {'instance_1_config': 'config'}, - {'instance_2_config': 'config'}, - ] - fake_get_interfaces.return_value = collections.OrderedDict([ - (self.instance_1, [ - {'lladdr': p.mac_address} for p in self.instance_1.ports + - [self.instance_1.management_port]]), - (self.instance_2, [ - {'lladdr': p.mac_address} for p in self.instance_2.ports + - [self.instance_2.management_port]]) - ]) - - fake_update_config.return_value = False - self.assertEqual(states.RESTART, self.group_mgr.configure(self.ctx)) - - @mock.patch('astara.instance_manager.InstanceGroupManager._update_config') - @mock.patch('astara.instance_manager.InstanceGroupManager._ha_config') - @mock.patch('astara.instance_manager._generate_interface_map') - @mock.patch('astara.instance_manager.InstanceGroupManager.get_interfaces') - def test_configure_failed_some(self, fake_get_interfaces, - fake_gen_iface_map, fake_ha_config, - fake_update_config): - fake_ha_config.return_value = {'fake_ha_config': 'peers'} - self.fake_driver.is_ha = True - self.fake_driver.build_config.side_effect = [ - {'instance_1_config': 'config'}, - {'instance_2_config': 'config'}, - ] - fake_get_interfaces.return_value = collections.OrderedDict([ - (self.instance_1, [ - {'lladdr': p.mac_address} for p in self.instance_1.ports + - [self.instance_1.management_port]]), - (self.instance_2, [ - {'lladdr': p.mac_address} for p in self.instance_2.ports + - [self.instance_2.management_port]])]) - - fake_update_config.side_effect = [False, True] - self.assertEqual(states.DEGRADED, self.group_mgr.configure(self.ctx)) - - @mock.patch('astara.instance_manager.InstanceGroupManager._update_config') - @mock.patch('astara.instance_manager.InstanceGroupManager._ha_config') - @mock.patch('astara.instance_manager._generate_interface_map') - @mock.patch('astara.instance_manager.InstanceGroupManager.get_interfaces') - def test_configure_degraded_waiting(self, fake_get_interfaces, - fake_gen_iface_map, fake_ha_config, - fake_update_config): - fake_ha_config.return_value = {'fake_ha_config': 'peers'} - self.fake_driver.is_ha = True - self.fake_driver.build_config.side_effect = [ - {'instance_1_config': 'config'}, - {'instance_2_config': 'config'}, - ] - fake_get_interfaces.return_value = collections.OrderedDict([ - (self.instance_1, [ - {'lladdr': p.mac_address} for p in self.instance_1.ports + - [self.instance_1.management_port]]) - ]) - - fake_update_config.return_value = True - self.assertEqual(states.DEGRADED, self.group_mgr.configure(self.ctx)) - - def test_delete(self): - self.group_mgr.delete(self.instance_2) - self.assertNotIn( - self.instance_2, self.group_mgr.instances) - - def test_refresh(self): - self.ctx.nova_client.update_instance_info.return_value = True - self.group_mgr.refresh(self.ctx) - [self.assertIn(mock.call(i), - self.ctx.nova_client.update_instance_info.call_args_list) - for i in self.instances] - [self.assertIn(i, self.group_mgr.instances) for i in self.instances] - - def test_refresh_instance_gone(self): - self.ctx.nova_client.update_instance_info.side_effect = [True, None] - self.group_mgr.refresh(self.ctx) - [self.assertIn(mock.call(i), - self.ctx.nova_client.update_instance_info.call_args_list) - for i in self.instances] - self.assertIn(self.instance_1, self.group_mgr.instances) - self.assertNotIn(self.instance_2, self.group_mgr.instances) - - def test_destroy(self): - self.group_mgr.destroy(self.ctx) - self.ctx.nova_client.delete_instances_and_wait.assert_called_with( - self.group_mgr.instances) - - def test_remove(self): - self.group_mgr.remove(self.ctx, self.instance_1) - self.ctx.nova_client.destroy_instance.assert_called_with( - self.instance_1) - self.assertNotIn(self.instance_1, self.group_mgr.instances) - - def test_next_instance_index(self): - self.assertEqual( - 2, self.group_mgr.next_instance_index) - - def test_next_instance_index_empty(self): - group_mgr = instance_manager.InstanceGroupManager( - log=mock.Mock(), resource=self.fake_driver) - self.assertEqual( - 0, group_mgr.next_instance_index) - - def test_create_all(self): - [self.group_mgr.delete(i) for i in self.instances] - self.ctx.nova_client.boot_instance.side_effect = [ - instance_info(name='new-instance_0'), - instance_info(name='new-instance_1'), - ] - self.group_mgr.create(self.ctx) - self.assertEqual( - 2, len(self.ctx.nova_client.boot_instance.call_args_list)) - - def test_create_some(self): - self.group_mgr.delete(self.instance_1) - self.ctx.nova_client.boot_instance.side_effect = [ - instance_info(name='new-instance_0'), - ] - self.group_mgr.create(self.ctx) - self.assertEqual( - 1, len(self.ctx.nova_client.boot_instance.call_args_list)) - self.ctx.nova_client.boot_instance.assert_called_with( - resource_type=self.fake_driver.RESOURCE_NAME, - prev_instance_info=None, - name='ak-FakeDriver-fake_resource_id_2', - image_uuid=self.fake_driver.image_uuid, - flavor=self.fake_driver.flavor, - make_ports_callback=self.fake_driver.make_ports(self.ctx), - ) - - def test_required_instance_count(self): - self.fake_driver.is_ha = True - self.assertEqual(2, self.group_mgr.required_instance_count) - self.fake_driver.is_ha = False - self.assertEqual(1, self.group_mgr.required_instance_count) - - def test_instance_count(self): - self.assertEqual(2, self.group_mgr.instance_count) - - def test_cluster_degraded_false(self): - self.assertFalse(self.group_mgr.cluster_degraded) - - def test_cluster_degraded_true(self): - self.group_mgr.delete(self.instance_1) - self.assertTrue(self.group_mgr.cluster_degraded) - - def test_add_instance(self): - instance_3 = instance_info() - self.group_mgr.add_instance(instance_3) - self.assertIn(instance_3, self.group_mgr.instances) diff --git a/astara/test/unit/test_main.py b/astara/test/unit/test_main.py deleted file mode 100644 index 9ef119af..00000000 --- a/astara/test/unit/test_main.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from astara import main -from astara import notifications as ak_notifications -from astara.test.unit import base - - -@mock.patch('astara.main.neutron_api') -@mock.patch('astara.main.multiprocessing') -@mock.patch('astara.main.notifications') -@mock.patch('astara.main.scheduler') -@mock.patch('astara.main.populate') -@mock.patch('astara.main.health') -class TestMainPippo(base.RugTestBase): - def test_shuffle_notifications(self, health, populate, scheduler, - notifications, multiprocessing, - neutron_api): - queue = mock.Mock() - queue.get.side_effect = [ - ('9306bbd8-f3cc-11e2-bd68-080027e60b25', 'message'), - KeyboardInterrupt, - ] - sched = scheduler.Scheduler.return_value - main.shuffle_notifications(queue, sched) - sched.handle_message.assert_called_once_with( - '9306bbd8-f3cc-11e2-bd68-080027e60b25', - 'message' - ) - - def test_shuffle_notifications_error( - self, health, populate, scheduler, notifications, - multiprocessing, neutron_api): - queue = mock.Mock() - queue.get.side_effect = [ - ('9306bbd8-f3cc-11e2-bd68-080027e60b25', 'message'), - RuntimeError, - KeyboardInterrupt, - ] - sched = scheduler.Scheduler.return_value - main.shuffle_notifications(queue, sched) - sched.handle_message.assert_called_once_with( - '9306bbd8-f3cc-11e2-bd68-080027e60b25', 'message' - ) - - @mock.patch('astara.main.shuffle_notifications') - def test_ensure_local_service_port(self, shuffle_notifications, health, - populate, scheduler, notifications, - multiprocessing, neutron_api): - main.main(argv=self.argv) - neutron = neutron_api.Neutron.return_value - neutron.ensure_local_service_port.assert_called_once_with() - - @mock.patch('astara.main.shuffle_notifications') - def test_ceilometer_disabled(self, shuffle_notifications, health, - populate, scheduler, notifications, - multiprocessing, neutron_api): - self.test_config.config(enabled=False, group='ceilometer') - notifications.Publisher = mock.Mock(spec=ak_notifications.Publisher) - notifications.NoopPublisher = mock.Mock( - spec=ak_notifications.NoopPublisher) - main.main(argv=self.argv) - self.assertEqual(0, len(notifications.Publisher.mock_calls)) - self.assertEqual(2, len(notifications.NoopPublisher.mock_calls)) - - @mock.patch('astara.main.shuffle_notifications') - def test_ceilometer_enabled(self, shuffle_notifications, health, - populate, scheduler, notifications, - multiprocessing, neutron_api): - self.test_config.config(enabled=True, group='ceilometer') - notifications.Publisher = mock.Mock(spec=ak_notifications.Publisher) - notifications.NoopPublisher = mock.Mock( - spec=ak_notifications.NoopPublisher) - main.main(argv=self.argv) - self.assertEqual(2, len(notifications.Publisher.mock_calls)) - self.assertEqual(0, len(notifications.NoopPublisher.mock_calls)) diff --git a/astara/test/unit/test_notifications.py b/astara/test/unit/test_notifications.py deleted file mode 100644 index 3d3f25fb..00000000 --- a/astara/test/unit/test_notifications.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import mock -import uuid - -import multiprocessing - -from astara import commands -from astara import event -from astara import notifications -from astara.test.unit import base - - -CTXT = { - 'read_only': False, - 'domain': None, - 'project_name': 'service', - 'user_id': 'f196eadd630f46bb981b304286689f53', - 'show_deleted': False, - 'roles': ['service'], - 'user_identity': 'f196eadd630f46bb981b304286689f53', - 'project_domain': None, - 'tenant_name': 'service', - 'auth_token': '736751a25b364f28b61947f99e8e1e3f', - 'resource_uuid': None, - 'project_id': '29987e1906a941f8b45c5a3ca38cec8e', - 'tenant_id': '29987e1906a941f8b45c5a3ca38cec8e', - 'is_admin': True, 'user': 'f196eadd630f46bb981b304286689f53', - 'request_id': 'req-ece574ac-b04b-4289-a367-a8b559f72df3', - 'tenant': '29987e1906a941f8b45c5a3ca38cec8e', - 'user_domain': None, - 'timestamp': '2015-06-12 19:00:35.649874', - 'read_deleted': 'no', - 'user_name': 'neutron' -} - - -class TestGetTenantID(base.RugTestBase): - def test_notification_tenant_id_from_resource_dict(self): - for res in ('router', 'port', 'subnet'): - payload = { - res: { - u'admin_state_up': True, - u'device_id': u'', - u'device_owner': u'', - u'fixed_ips': [{ - u'ip_address': u'192.168.123.3', - u'subnet_id': u'53d8a76a-3e1a-43e0-975e-83a4b464d18c', # noqa - }], - u'id': u'bbd92f5a-5a1d-4ec5-9272-8e4dd5f0c084', - u'mac_address': u'fa:16:3e:f4:81:a9', - u'name': u'', - u'network_id': u'c3a30111-dd52-405c-84b2-4d62068e2d35', # noqa - u'security_groups': [u'5124be1c-b2d5-47e6-ac62-411a0ea028c8'], # noqa - u'status': u'DOWN', - u'tenant_id': u'c25992581e574b6485dbfdf39a3df46c', - } - } - tenant_id = notifications._get_tenant_id_for_message(CTXT, payload) - self.assertEqual('c25992581e574b6485dbfdf39a3df46c', tenant_id) - - def test_notification_project_id_from_context(self): - for ctxt_key in ('tenant_id', 'project_id'): - payload = { - 'we_dont_care': { - u'admin_state_up': True, - u'device_id': u'', - u'device_owner': u'', - u'fixed_ips': [{ - u'ip_address': u'192.168.123.3', - u'subnet_id': u'53d8a76a-3e1a-43e0-975e-83a4b464d18c', # noqa - }], - u'id': u'bbd92f5a-5a1d-4ec5-9272-8e4dd5f0c084', - u'mac_address': u'fa:16:3e:f4:81:a9', - u'name': u'', - u'network_id': u'c3a30111-dd52-405c-84b2-4d62068e2d35', # noqa - u'security_groups': [u'5124be1c-b2d5-47e6-ac62-411a0ea028c8'], # noqa - u'status': u'DOWN', - u'tenant_id': u'c25992581e574b6485dbfdf39a3df46c', - } - } - tenant_id = notifications._get_tenant_id_for_message(CTXT, payload) - self.assertEqual(CTXT[ctxt_key], tenant_id) - - -class TestGetCRUD(base.RugTestBase): - def setUp(self): - super(TestGetCRUD, self).setUp() - self.queue = multiprocessing.Queue() - self.notifications_endpoint = notifications.NotificationsEndpoint( - self.queue) - self.l3_rpc_endpoint = notifications.L3RPCEndpoint(self.queue) - - def _get_event_notification(self, event_type, payload=None): - # Creates a message /w event_type and payload, sends it through the - # notifications Endpoint, asserts on its existence in the notifications - # queue, pops it off and returns it for futher assertions - payload = payload or {} - with mock.patch.object(notifications, - '_get_tenant_id_for_message') as fake_tenant: - - # events derive tenant id from different parts of the message - # depending on its format. just mock it out here for consistency - # across tests. and use a unique id per message to ensure we're - # popping off the correct message. - fake_tenant_id = uuid.uuid4().hex - fake_tenant.return_value = fake_tenant_id - self.notifications_endpoint.info( - ctxt=CTXT, - publisher_id='network.astara', - event_type=event_type, - payload=payload, metadata={}) - if not self.queue.qsize(): - # message was discarded and not queued - return None - tenant, event = self.queue.get() - self.assertEqual(fake_tenant_id, tenant) - return event - - def _get_event_l3_rpc(self, method, **kwargs): - self.assertTrue(hasattr(self.l3_rpc_endpoint, method)) - f = getattr(self.l3_rpc_endpoint, method) - kwargs['ctxt'] = CTXT - with mock.patch.object(notifications, - '_get_tenant_id_for_message') as fake_tenant: - fake_tenant_id = uuid.uuid4().hex - fake_tenant.return_value = fake_tenant_id - f(**kwargs) - if not self.queue.qsize(): - return None - tenant, event = self.queue.get() - self.assertEqual(fake_tenant_id, tenant) - return event - - def test_rpc_router_deleted(self): - e = self._get_event_l3_rpc( - method='router_deleted', - router_id='fake_router_id') - self.assertEqual(event.DELETE, e.crud) - self.assertEqual('fake_router_id', e.resource.id) - - def test_notification_port(self): - e = self._get_event_notification('port.create.start') - self.assertIsNone(e) - e = self._get_event_notification('port.create.end') - self.assertEqual(event.UPDATE, e.crud) - e = self._get_event_notification('port.change.start') - self.assertIsNone(e) - e = self._get_event_notification('port.change.end') - self.assertEqual(event.UPDATE, e.crud) - e = self._get_event_notification('port.delete.start') - self.assertIsNone(e) - e = self._get_event_notification('port.delete.end') - self.assertEqual(event.UPDATE, e.crud) - - def get_event_notification_subnet(self): - e = self._get_event_notification('subnet.create.start') - self.assertFalse(e) - e = self._get_event_notification('subnet.create.end') - self.assertEqual(event.UPDATE, e.crud) - e = self._get_event_notification('subnet.change.start') - self.assertFalse(e) - e = self._get_event_notification('subnet.change.end') - self.assertEqual(event.UPDATE, e.crud) - e = self._get_event_notification('subnet.delete.start') - self.assertFalse(e) - e = self._get_event_notification('subnet.delete.end') - self.assertEqual(event.UPDATE, e.crud) - - def test_notification_router(self): - e = self._get_event_notification('router.create.start') - self.assertFalse(e) - e = self._get_event_notification('router.create.end') - self.assertEqual(event.CREATE, e.crud) - e = self._get_event_notification('router.change.start') - self.assertFalse(e) - e = self._get_event_notification('router.change.end') - self.assertEqual(event.UPDATE, e.crud) - e = self._get_event_notification('router.delete.start') - self.assertFalse(e) - e = self._get_event_notification('router.delete.end') - self.assertEqual(event.DELETE, e.crud) - - def test_notification_router_id(self): - payload = { - u'router': { - u'admin_state_up': True, - u'external_gateway_info': None, - u'id': u'f95fb32d-0072-4675-b4bd-61d829a46aca', - u'name': u'r2', - u'ports': [], - u'status': u'ACTIVE', - u'tenant_id': u'c25992581e574b6485dbfdf39a3df46c', - } - } - e = self._get_event_notification('router.create.end', payload) - self.assertEqual(u'f95fb32d-0072-4675-b4bd-61d829a46aca', - e.resource.id) - - def test_interface_create_and_delete(self): - for action in ('create', 'delete'): - event_type = 'router.interface.%s' % action - - payload = { - 'router.interface': { - 'subnet_id': u'0535072e-6ef4-4916-b1f5-05fab4da3d0c', - 'tenant_id': u'c2a1399efbed41e5be2115afa5b5ec25', - 'port_id': u'63363e5f-59b7-49ca-b619-96c16883b543', - 'id': u'58868681-4a58-4f69-8dc0-b20955e7923f' - } - } - e = self._get_event_notification(event_type, payload) - self.assertEqual(event.UPDATE, e.crud) - self.assertEqual( - u'58868681-4a58-4f69-8dc0-b20955e7923f', - e.resource.id - ) - - def test_notification_astara(self): - e = self._get_event_notification('astara.bandwidth.used') - self.assertIsNone(e) - - def test_notification_cmd_poll(self): - event_type = 'astara.command' - payload = {'command': commands.POLL} - self.notifications_endpoint.info( - ctxt=CTXT, - publisher_id='network.astara', - event_type=event_type, - payload=payload, metadata={}) - expected_event = event.Event( - resource=event.Resource(driver='*', id='*', tenant_id='*'), - crud=event.POLL, - body={}, - ) - tenant, e = self.queue.get() - self.assertEqual('*', tenant) - self.assertEqual(expected_event, e) diff --git a/astara/test/unit/test_populate.py b/astara/test/unit/test_populate.py deleted file mode 100644 index b97a9fee..00000000 --- a/astara/test/unit/test_populate.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import mock - -from six.moves import range -from astara.test.unit import base -from astara.test.unit import fakes - -from astara import populate -from astara import event -from astara.event import Resource - - -class FakePopulateDriver(object): - pre_populate_hook = mock.Mock() - - -class TestPrePopulateWorkers(base.RugTestBase): - def setUp(self): - super(TestPrePopulateWorkers, self).setUp() - - @mock.patch('astara.drivers.enabled_drivers') - def test_pre_populate_with_resources(self, enabled_drivers): - fake_scheduler = mock.Mock() - fake_scheduler.handle_message = mock.Mock() - fake_driver = fakes.fake_driver() - fake_resources = [ - Resource( - id='fake_resource_%s' % i, - tenant_id='fake_tenant_%s' % i, - driver=fake_driver.RESOURCE_NAME, - ) for i in range(2) - ] - fake_driver.pre_populate_hook.return_value = fake_resources - enabled_drivers.return_value = [fake_driver] - populate._pre_populate_workers(fake_scheduler) - for res in fake_resources: - e = event.Event(resource=res, crud=event.POLL, body={}) - call = mock.call(res.tenant_id, e) - self.assertIn(call, fake_scheduler.handle_message.call_args_list) - - @mock.patch('astara.drivers.enabled_drivers') - def test_pre_populate_with_no_resources(self, enabled_drivers): - fake_scheduler = mock.Mock() - fake_scheduler.handle_message = mock.Mock() - fake_driver = fakes.fake_driver() - fake_driver.pre_populate_hook.return_value = [] - enabled_drivers.return_value = [fake_driver] - populate._pre_populate_workers(fake_scheduler) - self.assertFalse(fake_scheduler.handle_message.called) - - @mock.patch('threading.Thread') - def test_pre_populate_workers(self, thread): - sched = mock.Mock() - t = populate.pre_populate_workers(sched) - thread.assert_called_once_with( - target=populate._pre_populate_workers, - args=(sched,), - name='PrePopulateWorkers' - ) - self.assertEqual( - [mock.call.setDaemon(True), mock.call.start()], - t.mock_calls - ) - - @mock.patch('astara.drivers.enabled_drivers') - def test_repopulate(self, enabled_drivers): - drivers = [] - for i in range(2): - driver = mock.Mock() - driver.pre_populate_hook = mock.Mock() - driver.pre_populate_hook.return_value = [ - 'driver_%s_resource' % i, - ] - drivers.append(driver) - enabled_drivers.return_value = drivers - res = populate.repopulate() - self.assertEqual( - set(['driver_0_resource', 'driver_1_resource']), set(res)) diff --git a/astara/test/unit/test_rpc.py b/astara/test/unit/test_rpc.py deleted file mode 100644 index a8c426a7..00000000 --- a/astara/test/unit/test_rpc.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright 2015 Akanda, Inc -# -# Author: Akanda, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import cfg -from oslo_config import fixture as config_fixture -import oslo_messaging -import testtools - -from astara.common import rpc -from astara import main # noqa -from astara import notifications # noqa - - -class TestRPC(testtools.TestCase): - def setUp(self): - super(TestRPC, self).setUp() - self.config = self.useFixture(config_fixture.Config(cfg.CONF)).config - - def test__deprecated_amqp_url_not_set(self): - self.config(amqp_url=None) - self.assertIsNone(rpc._deprecated_amqp_url()) - - def test__deprecated_amqp_url(self): - self.config(amqp_url='amqp://stackrabbit:secretrabbit@127.0.0.1:/') - self.assertEqual('rabbit://stackrabbit:secretrabbit@127.0.0.1:5672/', - rpc._deprecated_amqp_url()) - - @mock.patch('oslo_messaging.get_transport') - @mock.patch.object(rpc, '_deprecated_amqp_url') - def test_get_transport(self, fake_amqp_url, fake_get_transport): - fake_amqp_url.return_value = 'fake_url' - fake_get_transport.return_value = 'fake_transport' - transport = rpc.get_transport() - self.assertEqual('fake_transport', transport) - fake_get_transport.assert_called_with(conf=cfg.CONF, url='fake_url') - - @mock.patch.object(rpc, 'get_transport') - @mock.patch('oslo_messaging.get_rpc_server') - def test_get_server(self, fake_get_server, fake_get_transport): - fake_get_transport.return_value = 'fake_transport' - fake_get_server.return_value = 'fake_server' - fake_endpoints = [1, 2] - result = rpc.get_server(target='fake_target', endpoints=fake_endpoints) - self.assertEqual('fake_server', result) - fake_get_server.assert_called_with( - transport='fake_transport', - target='fake_target', - endpoints=fake_endpoints, - ) - - @mock.patch('oslo_messaging.Target') - def test_get_target(self, fake_target): - fake_target.return_value = 'fake_target' - target_args = { - 'topic': 'fake_topic', - 'fanout': False, - 'exchange': 'fake_exchange', - 'version': 'fake_version', - 'server': 'fake_server', - } - result = rpc.get_target(**target_args) - self.assertEqual('fake_target', result) - fake_target.assert_called_with(**target_args) - - @mock.patch.object(rpc, 'get_transport') - @mock.patch.object(rpc, 'get_target') - @mock.patch('oslo_messaging.rpc.client.RPCClient') - def test_get_rpc_client(self, fake_client, fake_get_target, - fake_get_transport): - fake_get_target.return_value = 'fake_target' - fake_get_transport.return_value = 'fake_transport' - fake_client.return_value = 'fake_rpc_client' - res = rpc.get_rpc_client(topic='foo_target', exchange='foo_exchange', - version='2.5') - fake_get_target.assert_called_with( - topic='foo_target', exchange='foo_exchange', version='2.5', - fanout=False, - ) - self.assertEqual('fake_rpc_client', res) - fake_client.assert_called_with( - 'fake_transport', 'fake_target' - ) - - @mock.patch.object(rpc, 'get_transport') - @mock.patch('oslo_messaging.notify.Notifier') - def test_get_rpc_notifier(self, fake_notifier, fake_get_transport): - fake_get_transport.return_value = 'fake_transport' - fake_notifier.return_value = 'fake_rpc_notifier' - res = rpc.get_rpc_notifier(topic='foo_topic') - self.assertEqual('fake_rpc_notifier', res) - fake_notifier.assert_called_with( - transport='fake_transport', driver='messaging', topic='foo_topic') - - -@mock.patch.object(rpc, 'get_transport', - mock.MagicMock(return_value='fake_transport')) -@mock.patch.object(rpc, 'get_server', - mock.MagicMock(return_value='fake_server')) -@mock.patch.object(rpc, 'get_target', - mock.MagicMock(return_value='fake_target')) -class TestMessagingService(testtools.TestCase): - def setUp(self): - super(TestMessagingService, self).setUp() - self.connection = rpc.MessagingService() - self.config = self.useFixture(config_fixture.Config(cfg.CONF)).config - self.config(host='test_host') - - def test_create_rpc_consumer(self): - endpoints = [] - self.connection._add_server = mock.MagicMock() - self.connection.create_rpc_consumer( - topic='foo_topic', endpoints=endpoints) - rpc.get_target.return_value = 'fake_target' - rpc.get_target.assert_called_with( - topic='foo_topic', fanout=True, server='test_host') - rpc.get_server.assert_called_with('fake_target', endpoints) - self.connection._add_server.assert_called_with('fake_server') - - @mock.patch.object(oslo_messaging, 'get_notification_listener') - def test_create_notification_listener(self, fake_get_listener): - endpoints = [] - self.connection._add_server = mock.MagicMock() - fake_get_listener.return_value = 'fake_listener_server' - self.connection.create_notification_listener( - endpoints=[], exchange='foo_exchange', topic='foo_topic') - self.assertTrue(rpc.get_transport.called) - rpc.get_target.assert_called_with( - topic='foo_topic', fanout=False, exchange='foo_exchange') - fake_get_listener.assert_called_with( - 'fake_transport', ['fake_target'], endpoints, - pool='astara.foo_topic.test_host', executor='threading') - self.connection._add_server.assert_called_with( - 'fake_listener_server') - - def test__add_server(self): - fake_server = mock.MagicMock( - start=mock.MagicMock()) - self.connection._add_server(fake_server) - self.assertIn( - fake_server, - self.connection._servers) - - def test_start(self): - fake_server = mock.MagicMock( - start=mock.MagicMock() - ) - self.connection._add_server(fake_server) - self.connection.start() - self.assertTrue(fake_server.start.called) - - def test_stop(self): - fake_server = mock.MagicMock( - stop=mock.MagicMock() - ) - self.connection._add_server(fake_server) - self.connection.stop() - self.assertTrue(fake_server.wait.called) diff --git a/astara/test/unit/test_scheduler.py b/astara/test/unit/test_scheduler.py deleted file mode 100644 index f00874aa..00000000 --- a/astara/test/unit/test_scheduler.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import uuid - -import mock -from six.moves import range -from oslo_config import cfg -import unittest2 as unittest - -from astara import scheduler - - -class TestScheduler(unittest.TestCase): - - def test_invalid_num_workers(self): - cfg.CONF.num_worker_processes = 0 - self.assertRaises( - ValueError, - scheduler.Scheduler, mock.Mock) - - @mock.patch('multiprocessing.Process') - def test_creating_workers(self, process): - cfg.CONF.num_worker_processes = 2 - s = scheduler.Scheduler(mock.Mock) - self.assertEqual(2, len(s.workers)) - - @mock.patch('multiprocessing.Process') - @mock.patch('multiprocessing.JoinableQueue') - def test_stop(self, process, queue): - cfg.CONF.num_worker_processes = 2 - s = scheduler.Scheduler(mock.Mock) - s.stop() - for w in s.workers: - self.assertEqual( - [mock.call(None), mock.call(None)], - w['queue'].put.call_args_list # one put for each worker - ) - self.assertEqual(2, w['queue'].close.call_count) - self.assertEqual(2, w['worker'].join.call_count) - - -class TestDispatcher(unittest.TestCase): - - def setUp(self): - super(TestDispatcher, self).setUp() - self.workers = list(range(5)) - self.d = scheduler.Dispatcher(self.workers) - - def _mk_uuid(self, i): - # Creates a well-known UUID - return str(uuid.UUID(fields=(1, 2, 3, 4, 5, i))) - - def test_pick(self): - for i in range(len(self.workers)): - router_id = self._mk_uuid(i) - self.assertEqual( - [i], - self.d.pick_workers(router_id), - 'Incorrect index for %s' % router_id, - ) - - def test_pick_none(self): - router_id = None - self.assertEqual( - [], - self.d.pick_workers(router_id), - 'Found a router for None', - ) - - def test_pick_with_spaces(self): - for i in range(len(self.workers)): - router_id = ' %s ' % self._mk_uuid(i) - self.assertEqual( - [i], - self.d.pick_workers(router_id), - 'Incorrect index for %s' % router_id, - ) - - def test_pick_invalid(self): - for i in range(len(self.workers)): - router_id = self._mk_uuid(i) + 'Z' - self.assertEqual( - [], - self.d.pick_workers(router_id), - 'Found unexpected worker for %r' % router_id, - ) - - def test_wildcard(self): - self.assertEqual( - self.workers, - self.d.pick_workers('*'), - 'wildcard dispatch failed', - ) - - def test_error(self): - self.assertEqual( - self.workers, - self.d.pick_workers('error'), - 'error dispatch failed', - ) diff --git a/astara/test/unit/test_state.py b/astara/test/unit/test_state.py deleted file mode 100644 index 593a2b0a..00000000 --- a/astara/test/unit/test_state.py +++ /dev/null @@ -1,711 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from collections import deque - -import mock -import unittest2 as unittest - -from six.moves import range -from astara import event -from astara import state -from astara import instance_manager -from astara.drivers import states -from astara.api.neutron import RouterGone - -from astara.test.unit import fakes - - -class BaseTestStateCase(unittest.TestCase): - state_cls = state.State - - def setUp(self): - self.ctx = mock.Mock() # worker context - self.fake_driver = fakes.fake_driver() - instance_mgr_cls = \ - mock.patch('astara.instance_manager.InstanceManager').start() - self.addCleanup(mock.patch.stopall) - self.instance = instance_mgr_cls.return_value - self.params = state.StateParams( - driver=self.fake_driver, - instance=self.instance, - queue=deque(), - bandwidth_callback=mock.Mock(), - reboot_error_threshold=3, - ) - self.state = self.state_cls(self.params) - - def _test_transition_hlpr(self, action, expected_class, - instance_state=state.states.UP): - self.instance.state = instance_state - result = self.state.transition(action, self.ctx) - self.assertIsInstance(result, expected_class) - return result - - -class TestBaseState(BaseTestStateCase): - def test_execute(self): - self.assertEqual( - 'action', - self.state.execute('action', self.ctx) - ) - - def test_transition(self): - self.assertEqual( - self.state, - self.state.transition('action', self.ctx) - ) - - -class TestCalcActionState(BaseTestStateCase): - state_cls = state.CalcAction - - def _test_hlpr(self, expected_action, queue_states, - leftover=0, initial_action=event.POLL): - self.params.queue = deque(queue_states) - self.assertEqual( - expected_action, - self.state.execute(initial_action, self.ctx) - ) - self.assertEqual(leftover, len(self.params.queue)) - - def test_execute_empty_queue(self): - self._test_hlpr('testaction', [], initial_action='testaction') - - def test_execute_delete_in_queue(self): - self._test_hlpr(event.DELETE, [event.CREATE, event.DELETE], 2) - - def test_none_start_action_update(self): - self._test_hlpr(expected_action=event.UPDATE, - queue_states=[event.UPDATE, event.UPDATE], - leftover=0, - initial_action=None) - - def test_none_start_action_poll(self): - self._test_hlpr(expected_action=event.POLL, - queue_states=[event.POLL, event.POLL], - leftover=0, - initial_action=None) - - def test_execute_ignore_pending_update_follow_create(self): - self._test_hlpr(event.CREATE, [event.CREATE, event.UPDATE]) - - def test_execute_upgrade_to_create_follow_update(self): - self._test_hlpr(event.CREATE, [event.UPDATE, event.CREATE]) - - def test_execute_collapse_same_events(self): - events = [event.UPDATE, event.UPDATE, event.UPDATE] - self._test_hlpr(event.UPDATE, events, 0) - - def test_execute_collapse_mixed_events(self): - events = [ - event.UPDATE, - event.POLL, - event.UPDATE, - event.POLL, - event.UPDATE, - event.READ, - ] - self._test_hlpr(event.UPDATE, events, 1) - - def test_execute_events_ending_with_poll(self): - events = [ - event.UPDATE, - event.UPDATE, - event.POLL, - event.POLL, - ] - self._test_hlpr(event.UPDATE, events, 0) - - def test_transition_update_missing_router_down(self): - self.ctx.neutron = mock.Mock() - self.ctx.neutron.get_router_detail.side_effect = RouterGone - self._test_transition_hlpr( - event.UPDATE, - state.CheckBoot, - states.BOOTING - ) - - def test_transition_update_missing_router_not_down(self): - self.ctx.neutron = mock.Mock() - self.ctx.neutron.get_router_detail.side_effect = RouterGone - self._test_transition_hlpr( - event.UPDATE, - state.CheckBoot, - states.BOOTING - ) - - def test_transition_delete_missing_router_down(self): - self.ctx.neutron = mock.Mock() - self.ctx.neutron.get_router_detail.side_effect = RouterGone - self._test_transition_hlpr( - event.DELETE, - state.StopInstance, - states.DOWN - ) - - def test_transition_delete_missing_router_not_down(self): - self.ctx.neutron = mock.Mock() - self.ctx.neutron.get_router_detail.side_effect = RouterGone - self._test_transition_hlpr( - event.DELETE, - state.StopInstance, - states.BOOTING - ) - - def test_transition_delete_down_instance(self): - self._test_transition_hlpr(event.DELETE, - state.StopInstance, - states.DOWN) - - def test_transition_delete_up_instance(self): - self._test_transition_hlpr(event.DELETE, state.StopInstance) - - def test_transition_create_down_instance(self): - for evt in [event.POLL, event.READ, event.UPDATE, event.CREATE]: - self._test_transition_hlpr(evt, - state.CreateInstance, - states.DOWN) - - def test_transition_poll_up_instance(self): - self._test_transition_hlpr(event.POLL, - state.Alive, - states.UP) - - def test_transition_poll_configured_instance(self): - self._test_transition_hlpr( - event.POLL, - state.Alive, - states.CONFIGURED - ) - - def test_transition_other_up_instance(self): - for evt in [event.READ, event.UPDATE, event.CREATE]: - self._test_transition_hlpr(evt, state.Alive) - - def test_transition_update_error_instance(self): - self.instance.error_cooldown = False - result = self._test_transition_hlpr( - event.UPDATE, - state.ClearError, - states.ERROR, - ) - self.assertIsInstance(result._next_state, state.Alive) - - def test_transition_update_error_instance_in_error_cooldown(self): - self.instance.error_cooldown = True - self._test_transition_hlpr( - event.UPDATE, - state.CalcAction, - states.ERROR, - ) - - def test_transition_poll_error_instance(self): - self._test_transition_hlpr( - event.POLL, - state.CalcAction, - states.ERROR, - ) - - -class TestAliveState(BaseTestStateCase): - state_cls = state.Alive - - def test_execute(self): - self.assertEqual( - 'passthrough', - self.state.execute('passthrough', self.ctx) - ) - self.instance.update_state.assert_called_once_with(self.ctx) - - def test_transition_instance_down(self): - for evt in [event.POLL, event.READ, event.UPDATE, event.CREATE]: - self._test_transition_hlpr(evt, - state.CreateInstance, - states.DOWN) - - def test_transition_poll_instance_configured(self): - self._test_transition_hlpr( - event.POLL, - state.CalcAction, - states.CONFIGURED - ) - - def test_transition_read_instance_configured(self): - self._test_transition_hlpr( - event.READ, - state.ReadStats, - states.CONFIGURED - ) - - def test_transition_up_to_configured(self): - self._test_transition_hlpr( - event.CREATE, - state.ConfigureInstance, - states.UP - ) - - def test_transition_configured_instance_configured(self): - self._test_transition_hlpr( - event.CREATE, - state.ConfigureInstance, - states.CONFIGURED - ) - - -class TestCreateInstanceState(BaseTestStateCase): - state_cls = state.CreateInstance - - def test_execute(self): - self.instance.attempts = 0 - self.assertEqual( - 'passthrough', - self.state.execute('passthrough', self.ctx) - ) - self.instance.boot.assert_called_once_with(self.ctx) - - def test_execute_too_many_attempts(self): - self.instance.attempts = self.params.reboot_error_threshold - self.assertEqual( - 'passthrough', - self.state.execute('passthrough', self.ctx) - ) - self.assertEqual([], self.instance.boot.mock_calls) - self.instance.set_error.assert_called_once_with(self.ctx) - - def test_transition_instance_down(self): - self._test_transition_hlpr( - event.READ, - state.CheckBoot, - states.BOOTING - ) - - def test_transition_instance_up(self): - self._test_transition_hlpr( - event.READ, - state.CheckBoot, - instance_state=state.states.BOOTING - ) - - def test_transition_instance_missing(self): - self._test_transition_hlpr( - event.READ, - state.CreateInstance, - instance_state=state.states.DOWN - ) - - def test_transition_instance_error(self): - self._test_transition_hlpr(event.READ, state.CalcAction, - instance_state=state.states.ERROR) - - -class TestRebuildInstanceState(BaseTestStateCase): - state_cls = state.RebuildInstance - - def test_execute(self): - self.assertEqual( - event.CREATE, - self.state.execute('ignored', self.ctx) - ) - self.instance.stop.assert_called_once_with(self.ctx) - - def test_execute_gone(self): - self.instance.state = states.GONE - self.assertEqual( - event.DELETE, - self.state.execute('ignored', self.ctx) - ) - self.instance.stop.assert_called_once_with(self.ctx) - - -class TestClearErrorState(BaseTestStateCase): - state_cls = state.ClearError - - def test_execute(self): - self.assertEqual( - 'passthrough', - self.state.execute('passthrough', self.ctx) - ) - self.instance.clear_error.assert_called_once_with(self.ctx) - - def test_execute_after_error(self): - self.instance.state = states.ERROR - self.assertEqual( - 'passthrough', - self.state.execute('passthrough', self.ctx) - ) - self.instance.clear_error.assert_called_once_with(self.ctx) - - def test_transition_default(self): - st = self.state_cls(self.params) - self.assertIsInstance( - st.transition('passthrough', self.ctx), - state.CalcAction, - ) - - def test_transition_override(self): - st = self.state_cls(self.params, state.Alive(self.params)) - self.assertIsInstance( - st.transition('passthrough', self.ctx), - state.Alive, - ) - - -class TestCheckBootState(BaseTestStateCase): - state_cls = state.CheckBoot - - def test_execute(self): - self.assertEqual( - 'passthrough', - self.state.execute('passthrough', self.ctx) - ) - self.instance.update_state.assert_called_once_with(self.ctx) - assert list(self.params.queue) == ['passthrough'] - - def test_transition_instance_configure(self): - self._test_transition_hlpr( - event.UPDATE, - state.ConfigureInstance, - states.UP - ) - - def test_transition_hotplug(self): - self._test_transition_hlpr( - event.UPDATE, - state.ReplugInstance, - states.REPLUG - ) - - def test_transition_instance_booting(self): - self._test_transition_hlpr( - event.UPDATE, - state.CalcAction, - states.BOOTING - ) - - -class TestStopInstanceState(BaseTestStateCase): - state_cls = state.StopInstance - - def test_execute(self): - self.assertEqual( - 'passthrough', - self.state.execute('passthrough', self.ctx) - ) - self.instance.stop.assert_called_once_with(self.ctx) - - def test_transition_instance_still_up(self): - self._test_transition_hlpr(event.DELETE, state.StopInstance) - - def test_transition_delete_instance_down(self): - self._test_transition_hlpr(event.DELETE, - state.Exit, - states.DOWN) - - def test_transition_restart_instance_down(self): - self._test_transition_hlpr(event.READ, - state.CreateInstance, - states.DOWN) - - -class TestReplugState(BaseTestStateCase): - state_cls = state.ReplugInstance - - def test_execute(self): - self.assertEqual( - 'update', - self.state.execute('update', self.ctx) - ) - self.instance.replug.assert_called_once_with(self.ctx) - - def test_transition_hotplug_succeeded(self): - self._test_transition_hlpr( - event.UPDATE, - state.ConfigureInstance, - states.REPLUG - ) - - def test_transition_hotplug_failed(self): - self._test_transition_hlpr( - event.UPDATE, - state.StopInstance, - states.RESTART - ) - - -class TestExitState(TestBaseState): - state_cls = state.Exit - - -class TestConfigureInstanceState(BaseTestStateCase): - state_cls = state.ConfigureInstance - - def test_execute_read_configure_success(self): - self.instance.state = states.CONFIGURED - self.assertEqual(event.READ, - self.state.execute(event.READ, self.ctx)) - self.instance.configure.assert_called_once_with(self.ctx) - - def test_execute_update_configure_success(self): - self.instance.state = states.CONFIGURED - self.assertEqual(event.POLL, - self.state.execute(event.UPDATE, self.ctx)) - self.instance.configure.assert_called_once_with(self.ctx) - - def test_execute_configure_failure(self): - self.assertEqual( - event.CREATE, - self.state.execute(event.CREATE, self.ctx) - ) - self.instance.configure.assert_called_once_with(self.ctx) - - def test_transition_not_configured_down(self): - self._test_transition_hlpr(event.READ, - state.StopInstance, - states.DOWN) - - def test_transition_not_configured_restart(self): - self._test_transition_hlpr(event.READ, - state.StopInstance, - states.RESTART) - - def test_transition_not_configured_up(self): - self._test_transition_hlpr(event.READ, - state.PushUpdate, - states.UP) - - def test_transition_read_configured(self): - self._test_transition_hlpr( - event.READ, - state.ReadStats, - states.CONFIGURED - ) - - def test_transition_other_configured(self): - self._test_transition_hlpr( - event.POLL, - state.CalcAction, - states.CONFIGURED - ) - - -class TestReadStatsState(BaseTestStateCase): - state_cls = state.ReadStats - - def test_execute(self): - self.instance.read_stats.return_value = 'foo' - - self.assertEqual( - event.POLL, - self.state.execute(event.READ, self.ctx) - ) - self.instance.read_stats.assert_called_once_with() - self.params.bandwidth_callback.assert_called_once_with('foo') - - def test_transition(self): - self._test_transition_hlpr(event.POLL, state.CalcAction) - - -class TestAutomaton(unittest.TestCase): - def setUp(self): - super(TestAutomaton, self).setUp() - - self.ctx = mock.Mock() # worker context - self.fake_driver = fakes.fake_driver() - - self.instance_mgr_cls = \ - mock.patch('astara.instance_manager.InstanceManager').start() - self.addCleanup(mock.patch.stopall) - - self.delete_callback = mock.Mock() - self.bandwidth_callback = mock.Mock() - - self.sm = state.Automaton( - resource=self.fake_driver, - tenant_id='tenant-id', - delete_callback=self.delete_callback, - bandwidth_callback=self.bandwidth_callback, - worker_context=self.ctx, - queue_warning_threshold=3, - reboot_error_threshold=5, - ) - - def test_send_message(self): - message = mock.Mock() - message.crud = 'update' - with mock.patch.object(self.sm.resource, 'log') as logger: - self.sm.send_message(message) - self.assertEqual(1, len(self.sm._queue)) - logger.debug.assert_called_with( - 'incoming message brings queue length to %s', - 1, - ) - - def test_send_message_over_threshold(self): - message = mock.Mock() - message.crud = 'update' - for i in range(3): - self.sm.send_message(message) - with mock.patch.object(self.sm.resource, 'log') as logger: - self.sm.send_message(message) - logger.warning.assert_called_with( - 'incoming message brings queue length to %s', - 4, - ) - - def test_send_message_deleting(self): - message = mock.Mock() - message.crud = 'update' - self.sm.deleted = True - self.sm.send_message(message) - self.assertEqual(0, len(self.sm._queue)) - self.assertFalse(self.sm.has_more_work()) - - def test_send_message_in_error(self): - instance = self.instance_mgr_cls.return_value - instance.state = state.states.ERROR - message = mock.Mock() - message.crud = 'poll' - self.sm.send_message(message) - self.assertEqual(0, len(self.sm._queue)) - self.assertFalse(self.sm.has_more_work()) - - # Non-POLL events should *not* be ignored for routers in ERROR state - message.crud = 'create' - with mock.patch.object(self.sm.resource, 'log') as logger: - self.sm.send_message(message) - self.assertEqual(1, len(self.sm._queue)) - logger.debug.assert_called_with( - 'incoming message brings queue length to %s', - 1, - ) - - def test_send_rebuild_message_with_custom_image(self): - instance = self.instance_mgr_cls.return_value - instance.state = state.states.DOWN - with mock.patch.object(instance_manager.cfg, 'CONF'): - # rebuilds with custom - message = mock.Mock() - message.crud = 'rebuild' - message.body = {'image_uuid': 'ABC123'} - self.sm.send_message(message) - self.assertEqual('ABC123', self.sm.image_uuid) - - # rebuilds with image default. - message = mock.Mock() - message.crud = 'rebuild' - message.body = {} - self.sm.send_message(message) - self.assertEqual(self.fake_driver.image_uuid, self.sm.image_uuid) - - def test_has_more_work(self): - with mock.patch.object(self.sm, '_queue'): - self.assertTrue(self.sm.has_more_work()) - - def test_has_more_work_deleting(self): - self.sm.deleted = True - with mock.patch.object(self.sm, '_queue'): - self.assertFalse(self.sm.has_more_work()) - - def test_update_no_work(self): - with mock.patch.object(self.sm, 'state') as state: - self.sm.update(self.ctx) - self.assertFalse(state.called) - - def test_update_exit(self): - message = mock.Mock() - message.crud = event.UPDATE - self.sm.send_message(message) - self.sm.state = state.Exit(mock.Mock()) - self.sm.update(self.ctx) - self.delete_callback.called_once_with() - - def test_update_exception_during_excute(self): - message = mock.Mock() - message.crud = 'fake' - self.sm.send_message(message) - - fake_state = mock.Mock() - fake_state.execute.side_effect = Exception - fake_state.transition.return_value = state.Exit(mock.Mock()) - self.sm.action = 'fake' - self.sm.state = fake_state - - with mock.patch.object(self.sm.resource, 'log') as log: - self.sm.update(self.ctx) - - log.exception.assert_called_once_with(mock.ANY, fake_state, 'fake') - - fake_state.assert_has_calls( - [ - mock.call.execute('fake', self.ctx), - mock.call.transition('fake', self.ctx) - ] - ) - - def test_update_calc_action_args(self): - message = mock.Mock() - message.crud = event.UPDATE - self.sm.send_message(message) - - with mock.patch.object(self.sm.state, 'execute', - self.ctx) as execute: - with mock.patch.object(self.sm.state, 'transition', - self.ctx) as transition: - transition.return_value = state.Exit(mock.Mock()) - self.sm.update(self.ctx) - - execute.called_once_with( - event.POLL, - self.instance_mgr_cls.return_value, - self.ctx, - self.sm._queue - ) - self.delete_callback.called_once_with() - - def test_update_read_stats_args(self): - message = mock.Mock() - message.crud = event.READ - self.sm.send_message(message) - - self.sm.state = state.ReadStats(mock.Mock()) - with mock.patch.object(self.sm.state, 'execute', self.ctx) as execute: - execute.return_value = state.Exit(mock.Mock()) - self.sm.update(self.ctx) - - execute.called_once_with( - event.POLL, - self.instance_mgr_cls.return_value, - self.ctx, - self.bandwidth_callback - ) - - def test_has_error(self): - with mock.patch.object(self.sm, 'instance') as instance: - instance.state = states.ERROR - self.assertTrue(self.sm.has_error()) - - def test_has_no_error(self): - with mock.patch.object(self.sm, 'instance') as instance: - instance.state = states.UP - self.assertFalse(self.sm.has_error()) - - def test_drop_queue(self): - self.sm._queue.append('foo_item') - self.assertEqual(1, len(self.sm._queue)) - self.sm.drop_queue() - self.assertEqual(0, len(self.sm._queue)) diff --git a/astara/test/unit/test_tenant.py b/astara/test/unit/test_tenant.py deleted file mode 100644 index 274913cf..00000000 --- a/astara/test/unit/test_tenant.py +++ /dev/null @@ -1,386 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -import mock - -from six.moves import range -from astara import event -from astara import tenant -from astara.drivers import router -from astara import state -from astara.drivers import states -from astara.test.unit import base, fakes - - -class TestTenantResourceManager(base.RugTestBase): - - def setUp(self): - super(TestTenantResourceManager, self).setUp() - - self.fake_driver = fakes.fake_driver() - self.load_resource_p = mock.patch( - 'astara.tenant.TenantResourceManager._load_resource_from_message') - self.fake_load_resource = self.load_resource_p.start() - self.fake_load_resource.return_value = self.fake_driver - - self.tenant_id = 'cfb48b9c-66f6-11e5-a7be-525400cfc326' - self.instance_mgr = \ - mock.patch('astara.instance_manager.InstanceManager').start() - self.addCleanup(mock.patch.stopall) - self.notifier = mock.Mock() - self.deleter = mock.Mock() - self.trm = tenant.TenantResourceManager( - '1234', - delete_callback=self.deleter, - notify_callback=self.notifier, - queue_warning_threshold=10, - reboot_error_threshold=5, - ) - self.ctx = mock.Mock() - - def test_new_resource(self): - r = event.Resource( - tenant_id=self.tenant_id, - id='5678', - driver=router.Router.RESOURCE_NAME, - ) - msg = event.Event( - resource=r, - crud=event.CREATE, - body={'key': 'value'}, - ) - self.fake_load_resource.return_value = fakes.fake_driver( - resource_id='5678') - sm = self.trm.get_state_machines(msg, self.ctx)[0] - self.assertEqual('5678', sm.resource_id) - self.assertIn('5678', self.trm.state_machines) - - def test_get_state_machine_no_resoruce_id(self): - r = event.Resource( - tenant_id=self.tenant_id, - id=None, - driver=router.Router.RESOURCE_NAME, - ) - msg = event.Event( - resource=r, - crud=event.CREATE, - body={'key': 'value'}, - ) - self.assertRaises(tenant.InvalidIncomingMessage, - self.trm.get_state_machines, msg, self.ctx) - - def test_all_resources(self): - for i in range(5): - rid = str(uuid.uuid4()) - driver = fakes.fake_driver(rid) - sm = state.Automaton( - resource=driver, - worker_context=self.ctx, - tenant_id=self.tenant_id, - delete_callback=None, - bandwidth_callback=None, - queue_warning_threshold=5, - reboot_error_threshold=5) - self.trm.state_machines[rid] = sm - r = event.Resource( - tenant_id=self.tenant_id, - id='*', - driver=router.Router.RESOURCE_NAME, - ) - msg = event.Event( - resource=r, - crud=event.CREATE, - body={'key': 'value'}, - ) - sms = self.trm.get_state_machines(msg, self.ctx) - self.assertEqual(5, len(sms)) - - def test_errored_routers(self): - self.trm.state_machines.state_machines = {} - for i in range(5): - rid = str(uuid.uuid4()) - driver = fakes.fake_driver(rid) - sm = state.Automaton( - resource=driver, - worker_context=self.ctx, - tenant_id=self.tenant_id, - delete_callback=None, - bandwidth_callback=None, - queue_warning_threshold=5, - reboot_error_threshold=5) - self.trm.state_machines[rid] = sm - - # Replace the default mock with one that has 'state' set. - if i == 2: - status = states.ERROR - err_id = sm.resource_id - else: - status = states.UP - - sm.instance = mock.Mock(state=status) - self.trm.state_machines.state_machines[sm.resource_id] = sm - - r = event.Resource( - tenant_id=self.tenant_id, - id=err_id, - driver=router.Router.RESOURCE_NAME, - ) - msg = event.Event( - resource=r, - crud=event.CREATE, - body={'key': 'value'}, - ) - sms = self.trm.get_state_machines(msg, self.ctx) - self.assertEqual(1, len(sms)) - self.assertEqual(err_id, sms[0].resource_id) - self.assertIs(self.trm.state_machines.state_machines[err_id], sms[0]) - - def test_existing_resource(self): - r = event.Resource( - tenant_id=self.tenant_id, - id='5678', - driver=router.Router.RESOURCE_NAME, - ) - msg = event.Event( - resource=r, - crud=event.CREATE, - body={'key': 'value'}, - ) - # First time creates... - sm1 = self.trm.get_state_machines(msg, self.ctx)[0] - # Second time should return the same objects... - sm2 = self.trm.get_state_machines(msg, self.ctx)[0] - self.assertIs(sm1, sm2) - self.assertIs(sm1._queue, sm2._queue) - - def test_existing_resource_of_many(self): - sms = {} - for resource_id in ['5678', 'ABCD', 'EFGH']: - r = event.Resource( - tenant_id=self.tenant_id, - id=resource_id, - driver=router.Router.RESOURCE_NAME, - ) - msg = event.Event( - resource=r, - crud=event.CREATE, - body={'key': 'value'}, - ) - # First time creates... - sm1 = self.trm.get_state_machines(msg, self.ctx)[0] - sms[resource_id] = sm1 - - # Second time should return the same objects... - r = event.Resource( - id='5678', - tenant_id=self.tenant_id, - driver=router.Router.RESOURCE_NAME, - ) - msg = event.Event( - resource=r, - crud=event.CREATE, - body={'key': 'value'}, - ) - sm2 = self.trm.get_state_machines(msg, self.ctx)[0] - self.assertIs(sm2, sms['5678']) - - def test_delete_resource(self): - r = event.Resource( - id='1234', - tenant_id=self.tenant_id, - driver=router.Router.RESOURCE_NAME, - ) - self.trm.state_machines['1234'] = mock.Mock() - self.trm._delete_resource(r) - self.assertNotIn('1234', self.trm.state_machines) - self.assertTrue(self.deleter.called) - - def test_delete_default_resource(self): - r = event.Resource( - id='1234', - tenant_id=self.tenant_id, - driver=router.Router.RESOURCE_NAME) - self.trm._default_resource_id = '1234' - self.trm.state_machines['1234'] = mock.Mock() - self.trm._delete_resource(r) - self.assertNotIn('1234', self.trm.state_machines) - self.assertIsNone(self.trm._default_resource_id) - - def test_delete_not_default_resource(self): - r = event.Resource( - id='1234', - tenant_id=self.tenant_id, - driver=router.Router.RESOURCE_NAME) - self.trm._default_resource_id = 'abcd' - self.trm.state_machines['1234'] = mock.Mock() - self.trm._delete_resource(r) - self.assertEqual('abcd', self.trm._default_resource_id) - - def test_no_update_deleted_resource(self): - r = event.Resource( - tenant_id='1234', - id='5678', - driver=router.Router.RESOURCE_NAME, - ) - self.trm._default_resource_id = 'abcd' - self.trm.state_machines['5678'] = mock.Mock() - self.trm._delete_resource(r) - self.assertEqual([], self.trm.state_machines.values()) - r = event.Resource( - tenant_id='1234', - id='5678', - driver=router.Router.RESOURCE_NAME, - ) - msg = event.Event( - resource=r, - crud=event.CREATE, - body={'key': 'value'}, - ) - sms = self.trm.get_state_machines(msg, self.ctx) - self.assertEqual([], sms) - self.assertIn('5678', self.trm.state_machines.deleted) - - def test_deleter_callback(self): - r = event.Resource( - tenant_id='1234', - id='5678', - driver=router.Router.RESOURCE_NAME, - ) - msg = event.Event( - resource=r, - crud=event.CREATE, - body={'key': 'value'}, - ) - sm = self.trm.get_state_machines(msg, self.ctx)[0] - self.assertIn('5678', self.trm.state_machines) - sm._do_delete() - self.assertNotIn('5678', self.trm.state_machines) - self.assertTrue( - self.trm.state_machines.has_been_deleted('5678')) - - def test_report_bandwidth(self): - notifications = [] - self.notifier.side_effect = notifications.append - self.trm._report_bandwidth( - '5678', - [{'name': 'a', - 'value': 1, - }, - {'name': 'b', - 'value': 2, - }], - ) - n = notifications[0] - self.assertEqual('1234', n['tenant_id']) - self.assertIn('5678', n['uuid']) - self.assertIn('timestamp', n) - self.assertEqual('astara.bandwidth.used', n['event_type']) - self.assertIn('a', n['payload']) - self.assertIn('b', n['payload']) - - def test_get_state_machine_by_resource_id(self): - fake_sm = mock.Mock() - self.trm.state_machines['fake_resource_id'] = fake_sm - self.assertEqual( - fake_sm, - self.trm.get_state_machine_by_resource_id('fake_resource_id')) - - def test_unmanage_resource(self): - fake_sm = mock.Mock() - self.trm.state_machines['fake-resource_id'] = fake_sm - self.trm.unmanage_resource('fake-resource-id') - self.assertNotIn('fake-resource-id', self.trm.state_machines) - self.assertFalse( - self.trm.state_machines.has_been_deleted('fake-resource-id')) - - @mock.patch('astara.drivers.load_from_byonf') - @mock.patch('astara.drivers.get') - def test__load_driver_from_message_no_byonf(self, fake_get, fake_byonf): - self.load_resource_p.stop() - self.config(enable_byonf=False) - r = event.Resource( - tenant_id='1234', - id='5678', - driver=router.Router.RESOURCE_NAME, - ) - msg = event.Event( - resource=r, - crud=event.CREATE, - body={'key': 'value'}, - ) - fake_driver = mock.Mock() - fake_driver.return_value = 'fake_driver' - fake_get.return_value = fake_driver - - self.assertEqual( - 'fake_driver', - self.trm._load_resource_from_message(self.ctx, msg)) - fake_get.assert_called_with(msg.resource.driver) - fake_driver.assert_called_with(self.ctx, msg.resource.id) - self.assertFalse(fake_byonf.called) - - @mock.patch('astara.drivers.load_from_byonf') - @mock.patch('astara.drivers.get') - def test__load_driver_from_message_with_byonf(self, fake_get, fake_byonf): - self.load_resource_p.stop() - self.config(enable_byonf=True) - r = event.Resource( - tenant_id='1234', - id='5678', - driver=router.Router.RESOURCE_NAME, - ) - msg = event.Event( - resource=r, - crud=event.CREATE, - body={'key': 'value'}, - ) - fake_driver = mock.Mock() - fake_byonf.return_value = fake_driver - - self.ctx.neutron.tenant_has_byo_for_function.return_value = 'byonf_res' - self.assertEqual( - fake_driver, self.trm._load_resource_from_message(self.ctx, msg)) - fake_byonf.assert_called_with( - self.ctx, 'byonf_res', msg.resource.id) - self.assertFalse(fake_get.called) - - @mock.patch('astara.drivers.load_from_byonf') - @mock.patch('astara.drivers.get') - def test__load_driver_from_message_empty_byonf(self, fake_get, fake_byonf): - self.load_resource_p.stop() - self.config(enable_byonf=True) - r = event.Resource( - tenant_id='1234', - id='5678', - driver=router.Router.RESOURCE_NAME, - ) - msg = event.Event( - resource=r, - crud=event.CREATE, - body={'key': 'value'}, - ) - - fake_driver = mock.Mock() - fake_driver.return_value = 'fake_fallback_driver' - fake_get.return_value = fake_driver - - self.ctx.neutron.tenant_has_byo_for_function.return_value = None - self.assertEqual( - 'fake_fallback_driver', - self.trm._load_resource_from_message(self.ctx, msg)) - fake_get.assert_called_with(msg.resource.driver) diff --git a/astara/test/unit/test_worker.py b/astara/test/unit/test_worker.py deleted file mode 100644 index 71c73d3b..00000000 --- a/astara/test/unit/test_worker.py +++ /dev/null @@ -1,1046 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import threading - -import mock -from oslo_config import cfg -import unittest2 as unittest - -from astara import commands -from astara import event -from astara import notifications -from astara.api import neutron -from astara.drivers import router -from astara import worker - -from astara.common.hash_ring import DC_KEY - -from astara.test.unit import fakes -from astara.test.unit.db import base - - -class FakeFetchedResource(object): - id = 'fake_fetched_resource_id' - - -class WorkerTestBase(base.DbTestCase): - tenant_id = '1040f478-3c74-11e5-a72a-173606e0a6d0' - router_id = '18ffa532-3c74-11e5-a0e7-eb9f90a17ffb' - - def setUp(self): - super(WorkerTestBase, self).setUp() - cfg.CONF.boot_timeout = 1 - cfg.CONF.astara_mgt_service_port = 5000 - cfg.CONF.max_retries = 3 - cfg.CONF.management_prefix = 'fdca:3ba5:a17a:acda::/64' - cfg.CONF.num_worker_threads = 0 - - self.fake_nova = mock.patch('astara.worker.nova').start() - fake_neutron_obj = mock.patch.object( - neutron, 'Neutron', autospec=True).start() - fake_neutron_obj.get_ports_for_instance.return_value = ( - 'mgt_port', ['ext_port', 'int_port']) - fake_neutron_obj.get_router_for_tenant.return_value = ( - FakeFetchedResource()) - self.fake_neutron = mock.patch.object( - neutron, 'Neutron', return_value=fake_neutron_obj).start() - - self.fake_scheduler = mock.Mock() - self.proc_name = 'p0x' - self.w = worker.Worker( - notifier=mock.Mock(), - management_address=fakes.FAKE_MGT_ADDR, - scheduler=self.fake_scheduler, - proc_name=self.proc_name) - - self.addCleanup(mock.patch.stopall) - - self.target = self.tenant_id - r = event.Resource( - tenant_id=self.tenant_id, - id=self.router_id, - driver=router.Router.RESOURCE_NAME, - ) - self.msg = event.Event( - resource=r, - crud=event.CREATE, - body={'key': 'value'}, - ) - - def tearDown(self): - self.w._shutdown() - super(WorkerTestBase, self).tearDown() - - def enable_debug(self, resource_id=None, tenant_id=None): - if resource_id: - self.dbapi.enable_resource_debug(resource_uuid=resource_id) - is_debug, _ = self.dbapi.resource_in_debug(resource_id) - if tenant_id: - self.dbapi.enable_tenant_debug(tenant_uuid=tenant_id) - is_debug, _ = self.dbapi.tenant_in_debug(tenant_id) - self.assertTrue(is_debug) - - def assert_not_in_debug(self, resource_id=None, tenant_id=None): - if resource_id: - is_debug, _ = self.dbapi.resource_in_debug(resource_id) - in_debug = self.dbapi.resources_in_debug() - uuid = resource_id - if tenant_id: - is_debug, _ = self.dbapi.tenant_in_debug(tenant_id) - in_debug = self.dbapi.tenants_in_debug() - uuid = tenant_id - self.assertFalse(is_debug) - self.assertNotIn(uuid, in_debug) - - -class TestWorker(WorkerTestBase): - tenant_id = '1040f478-3c74-11e5-a72a-173606e0a6d0' - resource_id = '18ffa532-3c74-11e5-a0e7-eb9f90a17ffb' - driver = router.Router.RESOURCE_NAME - resource = None - - def setUp(self): - super(TestWorker, self).setUp() - self.config(enabled=True, group='coordination') - self._balanced_p = mock.patch.object( - self.w, '_ring_balanced') - self._mock_balanced = self._balanced_p.start() - self._mock_balanced.return_value = True - self.addCleanup(mock.patch.stopall) - - self.target = self.tenant_id - self.resource = event.Resource( - self.driver, - self.resource_id, - self.tenant_id) - self.msg = event.Event( - resource=self.resource, - crud=event.CREATE, - body={'key': 'value'}, - ) - self.fake_cache = worker.TenantResourceCache() - self.fake_cache.get_by_tenant = mock.MagicMock() - self.w.resource_cache = self.fake_cache - - def test__should_process_message_global_debug(self): - self.dbapi.enable_global_debug() - self.assertFalse( - self.w._should_process_message(self.target, self.msg)) - - def test__should_process_message_tenant_debug(self): - self.dbapi.enable_tenant_debug(tenant_uuid=self.tenant_id) - self.assertFalse( - self.w._should_process_message(self.target, self.msg)) - - @mock.patch('astara.worker.hash_ring', autospec=True) - def test__should_process_no_router_id(self, fake_hash): - fake_ring_manager = fake_hash.HashRingManager() - fake_ring_manager.ring.get_hosts.return_value = [self.w.host] - self.w.hash_ring_mgr = fake_ring_manager - self.fake_cache.get_by_tenant.return_value = ( - '9846d012-3c75-11e5-b476-8321b3ff1a1d') - r = event.Resource( - driver=router.Router.RESOURCE_NAME, - id=None, - tenant_id='fake_tenant_id', - ) - expected_r = event.Resource( - driver=router.Router.RESOURCE_NAME, - id='9846d012-3c75-11e5-b476-8321b3ff1a1d', - tenant_id='fake_tenant_id', - ) - msg = event.Event( - resource=r, - crud=event.CREATE, - body={'key': 'value'}, - ) - expected = event.Event( - resource=expected_r, - crud=event.CREATE, - body={'key': 'value'}, - ) - self.assertEqual( - expected, - self.w._should_process_message(self.target, msg)) - - def test__should_process_no_router_id_no_router_found(self): - self.fake_cache.get_by_tenant.return_value = None - r = event.Resource( - driver=router.Router.RESOURCE_NAME, - id=None, - tenant_id='fake_tenant_id', - ) - msg = event.Event( - resource=r, - crud=event.CREATE, - body={'key': 'value'}, - ) - self.assertFalse(self.w._should_process_message(self.target, msg)) - - @mock.patch('astara.worker.Worker._deliver_message') - @mock.patch('astara.worker.Worker._should_process_message') - def test_handle_message_should_process(self, fake_should_process, - fake_deliver): - # ensure we plumb through the return of should_process to - # deliver_message, in case some processing has been done on - # it - new_msg = event.Event( - resource=self.resource, - crud=event.CREATE, - body={'key': 'value'}, - ) - - fake_should_process.return_value = new_msg - self.w.handle_message(self.target, self.msg) - fake_deliver.assert_called_with(self.target, new_msg) - fake_should_process.assert_called_with(self.target, self.msg) - - @mock.patch('astara.worker.Worker._deliver_message') - @mock.patch('astara.worker.Worker._should_process_message') - def test_handle_message_should_not_process(self, fake_should_process, - fake_deliver): - fake_should_process.return_value = False - self.w.handle_message(self.target, self.msg) - self.assertFalse(fake_deliver.called) - fake_should_process.assert_called_with(self.target, self.msg) - - @mock.patch('astara.worker.Worker._deliver_message') - @mock.patch('astara.worker.Worker._defer_message') - def test_handle_message_defer_message(self, fake_defer, fake_deliver): - self._mock_balanced.return_value = False - self.w.handle_message(self.target, self.msg) - fake_defer.assert_called_with(self.target, self.msg) - self.assertFalse(fake_deliver.called) - - @mock.patch('astara.worker.hash_ring', autospec=True) - def test__should_process_message_does_not_hash(self, fake_hash): - fake_ring_manager = fake_hash.HashRingManager() - fake_ring_manager.ring.get_hosts.return_value = ['not_this_host'] - self.w.hash_ring_mgr = fake_ring_manager - self.assertFalse( - self.w._should_process_message(self.target, self.msg)) - fake_ring_manager.ring.get_hosts.assert_called_with(self.router_id) - - @mock.patch('astara.worker.hash_ring', autospec=True) - def test__should_process_message_wildcard_true(self, fake_hash): - fake_ring_manager = fake_hash.HashRingManager() - fake_ring_manager.ring.get_hosts.return_value = ['not_this_host'] - self.w.hash_ring_mgr = fake_ring_manager - self.assertTrue( - self.w._should_process_message('*', self.msg)) - self.assertFalse(fake_ring_manager.ring.called) - - @mock.patch('astara.worker.hash_ring', autospec=True) - def test__should_process_message_true(self, fake_hash): - fake_ring_manager = fake_hash.HashRingManager() - fake_ring_manager.ring.get_hosts.return_value = [self.w.host] - self.w.hash_ring_mgr = fake_ring_manager - self.assertEqual( - self.w._should_process_message(self.target, self.msg), - self.msg) - fake_ring_manager.ring.get_hosts.assert_called_with(self.router_id) - - def test__should_process_command_debug_config(self): - for cmd in [commands.WORKERS_DEBUG, commands.CONFIG_RELOAD]: - r = event.Resource( - tenant_id=self.tenant_id, - id=self.router_id, - driver='router', - ) - msg = event.Event( - resource=r, - crud=event.COMMAND, - body={'command': cmd}, - ) - self.assertTrue(self.w._should_process_command(msg)) - - def _test__should_process_command(self, fake_hash, cmds, key, - negative=False): - self.config(enabled=True, group='coordination') - fake_ring_manager = fake_hash.HashRingManager() - - if not negative: - fake_ring_manager.ring.get_hosts.return_value = [self.w.host] - assertion = self.assertTrue - else: - fake_ring_manager.ring.get_hosts.return_value = ['not_this_host'] - assertion = self.assertFalse - - self.w.hash_ring_mgr = fake_ring_manager - for cmd in cmds: - r = event.Resource( - tenant_id=self.tenant_id, - id=self.router_id, - driver='router', - ) - msg = event.Event( - resource=r, - crud=event.COMMAND, - body={ - 'command': cmd, - 'resource_id': self.router_id, - 'router_id': self.router_id, # compat. - 'tenant_id': self.tenant_id} - ) - assertion(self.w._should_process_command(msg)) - - if key == DC_KEY: - fake_ring_manager.ring.get_hosts.assert_called_with(DC_KEY) - else: - fake_ring_manager.ring.get_hosts.assert_called_with( - msg.body[key]) - - @mock.patch('astara.worker.hash_ring', autospec=True) - def test__should_process_command_resources(self, fake_hash): - cmds = worker.EVENT_COMMANDS - self._test__should_process_command( - fake_hash, cmds=cmds, key='resource_id', negative=False) - - @mock.patch('astara.worker.hash_ring', autospec=True) - def test__should_process_command_resources_negative(self, fake_hash): - cmds = [commands.RESOURCE_DEBUG, commands.RESOURCE_MANAGE] - self._test__should_process_command( - fake_hash, cmds=cmds, key='resource_id', negative=True) - - @mock.patch('astara.worker.hash_ring', autospec=True) - def test__should_process_command_routers(self, fake_hash): - cmds = [commands.ROUTER_DEBUG, commands.ROUTER_MANAGE] - self._test__should_process_command( - fake_hash, cmds=cmds, key='router_id', negative=False) - - @mock.patch('astara.worker.hash_ring', autospec=True) - def test__should_process_command_routers_negative(self, fake_hash): - cmds = [commands.ROUTER_DEBUG, commands.ROUTER_MANAGE] - self._test__should_process_command( - fake_hash, cmds=cmds, key='router_id', negative=True) - - @mock.patch('astara.worker.hash_ring', autospec=True) - def test__should_process_command_tenants(self, fake_hash): - cmds = [commands.TENANT_DEBUG, commands.TENANT_MANAGE] - self._test__should_process_command( - fake_hash, cmds=cmds, key='tenant_id', negative=False) - - @mock.patch('astara.worker.hash_ring', autospec=True) - def test__should_process_command_tenants_negative(self, fake_hash): - cmds = [commands.TENANT_DEBUG, commands.TENANT_MANAGE] - self._test__should_process_command( - fake_hash, cmds=cmds, key='tenant_id', negative=True) - - @mock.patch('astara.worker.hash_ring', autospec=True) - def test__should_process_command_global_debug(self, fake_hash): - fake_hash.DC_KEY = DC_KEY - cmds = [commands.GLOBAL_DEBUG] - self._test__should_process_command( - fake_hash, cmds=cmds, key=DC_KEY, negative=False) - - @mock.patch('astara.worker.hash_ring', autospec=True) - def test__should_process_command_global_debug_negative(self, fake_hash): - fake_hash.DC_KEY = DC_KEY - cmds = [commands.GLOBAL_DEBUG] - self._test__should_process_command( - fake_hash, cmds=cmds, key=DC_KEY, negative=True) - - def test__release_resource_lock(self): - resource_id = '0ae77286-c0d6-11e5-9181-525400137dfc' - fake_lock = mock.Mock(release=mock.Mock()) - - self.w._resource_locks = { - resource_id: fake_lock - } - fake_sm = mock.Mock(resource_id=resource_id) - self.w._release_resource_lock(fake_sm) - self.assertTrue(fake_lock.release.called) - - def test__release_resource_lock_unlocked(self): - resource_id = '0ae77286-c0d6-11e5-9181-525400137dfc' - fake_lock = mock.Mock(release=mock.Mock()) - fake_lock.release.side_effect = threading.ThreadError() - self.w._resource_locks = { - resource_id: fake_lock - } - fake_sm = mock.Mock(resource_id=resource_id) - # just ensure we dont raise - self.w._release_resource_lock(fake_sm) - - def test_worker_context_config(self): - self.config(astara_metadata_port=1234) - self.config(host='foohost') - ctxt = worker.WorkerContext(fakes.FAKE_MGT_ADDR) - self.assertEqual( - { - 'host': 'foohost', - 'metadata_port': 1234, - 'address': fakes.FAKE_MGT_ADDR, - }, - ctxt.config) - - @mock.patch('astara.worker.Worker._get_trms') - def test__get_all_state_machines(self, fake_get_trms): - trms = [ - mock.Mock( - get_all_state_machines=mock.Mock( - return_value=['sm1', 'sm2']), - ), - mock.Mock( - get_all_state_machines=mock.Mock( - return_value=['sm3', 'sm4']), - ), - ] - fake_get_trms.return_value = trms - res = self.w._get_all_state_machines() - self.assertEqual( - set(['sm1', 'sm2', 'sm3', 'sm4']), - res - ) - - @mock.patch('astara.worker.hash_ring', autospec=True) - def test__ring_balanced(self, fake_hash): - self._balanced_p.stop() - fake_ring_manager = fake_hash.HashRingManager() - fake_ring_manager.balanced = False - self.w.hash_ring_mgr = fake_ring_manager - self.assertFalse(self.w._ring_balanced()) - - def test__defer_message(self): - self.assertEqual( - [], self.w._deferred_messages) - self.w._defer_message(self.target, self.msg) - self.assertEqual( - [(self.target, self.msg)], self.w._deferred_messages) - - @mock.patch('astara.worker.Worker.handle_message') - def test__replay_deferred_messages_none(self, fakehandle): - self.w._deferred_messages = [] - self.w._replay_deferred_messages() - self.assertFalse(fakehandle.called) - - @mock.patch('astara.worker.Worker.handle_message') - def test__replay_deferred_messages(self, fake_handle): - msgs = [ - ('fake_tgt_1', 'fake_tgt_1'), - ('fake_tgt_2', 'fake_tgt_2'), - ('fake_tgt_3', 'fake_tgt_3'), - ] - self.w._deferred_messages = msgs - self.w._replay_deferred_messages() - exp_calls = [mock.call(t, m) for t, m in msgs] - self.assertEqual( - exp_calls, fake_handle.call_args_list) - - -class TestResourceCache(WorkerTestBase): - def setUp(self): - super(TestResourceCache, self).setUp() - self.resource_cache = worker.TenantResourceCache() - self.worker_context = worker.WorkerContext(fakes.FAKE_MGT_ADDR) - - def test_resource_cache_hit(self): - self.resource_cache._tenant_resources = { - router.Router.RESOURCE_NAME: { - 'fake_tenant_id': 'fake_cached_resource_id', - } - } - r = event.Resource( - tenant_id='fake_tenant_id', - id='fake_resource_id', - driver=router.Router.RESOURCE_NAME, - ) - msg = event.Event(resource=r, crud=event.UPDATE, body={}) - res = self.resource_cache.get_by_tenant( - resource=r, worker_context=self.worker_context, message=msg) - self.assertEqual('fake_cached_resource_id', res) - self.assertFalse(self.w._context.neutron.get_router_for_tenant.called) - - def test_resource_cache_miss(self): - r = event.Resource( - tenant_id='fake_tenant_id', - id='fake_fetched_resource_id', - driver=router.Router.RESOURCE_NAME, - ) - msg = event.Event( - resource=r, - crud=event.UPDATE, - body={}, - ) - res = self.resource_cache.get_by_tenant( - resource=r, - worker_context=self.worker_context, - message=msg) - self.assertEqual('fake_fetched_resource_id', res) - self.w._context.neutron.get_router_for_tenant.assert_called_with( - 'fake_tenant_id') - - def test_resource_cache_delete(self): - r = event.Resource( - tenant_id='fake_tenant_id', - id='fake_fetched_resource_id', - driver=router.Router.RESOURCE_NAME, - ) - msg = event.Event( - resource=r, - crud=event.UPDATE, - body={}, - ) - self.resource_cache.get_by_tenant( - resource=r, - worker_context=self.worker_context, - message=msg) - self.assertEqual( - r.id, - self.resource_cache._tenant_resources[r.driver][r.tenant_id]) - self.resource_cache.delete(r) - self.assertNotIn( - r.tenant_id, - self.resource_cache._tenant_resources[r.driver]) - - -class TestCreatingResource(WorkerTestBase): - def setUp(self): - super(TestCreatingResource, self).setUp() - self.tenant_id = '98dd9c41-d3ac-4fd6-8927-567afa0b8fc3' - self.router_id = 'ac194fc5-f317-412e-8611-fb290629f624' - self.hostname = 'astara' - - self.resource = event.Resource(router.Router.RESOURCE_NAME, - self.router_id, - self.tenant_id) - - self.msg = event.Event( - resource=self.resource, - crud=event.CREATE, - body={'key': 'value'}, - ) - self.w._should_process_message = mock.MagicMock(return_value=self.msg) - - def test_in_tenant_managers(self): - self.w.handle_message(self.tenant_id, self.msg) - self.assertIn(self.tenant_id, self.w.tenant_managers) - trm = self.w.tenant_managers[self.tenant_id] - self.assertEqual(self.tenant_id, trm.tenant_id) - - def test_not_in_tenant_managers(self): - self.w._should_process_message = mock.MagicMock(return_value=False) - self.w.handle_message(self.tenant_id, self.msg) - self.assertNotIn(self.tenant_id, self.w.tenant_managers) - - def test_message_enqueued(self): - self.w.handle_message(self.tenant_id, self.msg) - trm = self.w.tenant_managers[self.tenant_id] - sm = trm.get_state_machines(self.msg, worker.WorkerContext( - fakes.FAKE_MGT_ADDR))[0] - self.assertEqual(1, len(sm._queue)) - - -class TestWildcardMessages(WorkerTestBase): - - def setUp(self): - super(TestWildcardMessages, self).setUp() - - self.tenant_id_1 = 'a8f964d4-6631-11e5-a79f-525400cfc32a' - self.tenant_id_2 = 'ef1a6e90-6631-11e5-83cb-525400cfc326' - self.w._should_process_message = mock.MagicMock(return_value=self.msg) - - # Create some tenants - for msg in [ - event.Event( - resource=event.Resource( - driver=router.Router.RESOURCE_NAME, - id='ABCD', - tenant_id=self.tenant_id_1, - ), - crud=event.CREATE, - body={'key': 'value'}, - ), - event.Event( - resource=event.Resource( - driver=router.Router.RESOURCE_NAME, - id='EFGH', - tenant_id=self.tenant_id_2), - crud=event.CREATE, - body={'key': 'value'}, - )]: - self.w.handle_message(msg.resource.tenant_id, msg) - - def test_wildcard_to_all(self): - trms = self.w._get_trms('*') - ids = sorted(trm.tenant_id for trm in trms) - self.assertEqual([self.tenant_id_1, self.tenant_id_2], ids) - - def test_wildcard_to_error(self): - trms = self.w._get_trms('error') - ids = sorted(trm.tenant_id for trm in trms) - self.assertEqual([self.tenant_id_1, self.tenant_id_2], ids) - - -class TestShutdown(WorkerTestBase): - def test_shutdown_on_null_message(self): - with mock.patch.object(self.w, '_shutdown') as meth: - self.w.handle_message(None, None) - meth.assert_called_once_with() - - def test_stop_threads(self): - original_queue = self.w.work_queue - self.assertTrue(self.w._keep_going) - self.w._shutdown() - self.assertFalse(self.w._keep_going) - new_queue = self.w.work_queue - self.assertIsNot(original_queue, new_queue) - - @mock.patch('kombu.connection.BrokerConnection') - @mock.patch('kombu.entity.Exchange') - @mock.patch('kombu.Producer') - def test_stop_threads_notifier(self, producer, exchange, broker): - notifier = notifications.Publisher('topic') - w = worker.Worker( - notifier, fakes.FAKE_MGT_ADDR, self.fake_scheduler, self.proc_name) - self.assertTrue(notifier) - w._shutdown() - self.assertFalse(w.notifier._t) - - -class TestUpdateStateMachine(WorkerTestBase): - def setUp(self): - super(TestUpdateStateMachine, self).setUp() - self.worker_context = worker.WorkerContext(fakes.FAKE_MGT_ADDR) - self.w._should_process_message = mock.MagicMock(return_value=self.msg) - - def _test(self, fake_hash, negative=False): - self.config(enabled=True, group='coordination') - fake_ring_manager = fake_hash.HashRingManager() - if not negative: - fake_ring_manager.ring.get_hosts.return_value = [self.w.host] - else: - fake_ring_manager.ring.get_hosts.return_value = [] - - self.w.hash_ring_mgr = fake_ring_manager - - # Create the router manager and state machine so we can - # replace the update() method with a mock. - trm = self.w._get_trms(self.tenant_id)[0] - sm = trm.get_state_machines(self.msg, self.worker_context)[0] - with mock.patch.object(sm, 'update') as meth: - self.w.handle_message(self.tenant_id, self.msg) - # Add a null message so the worker loop will exit. We have - # to do this directly, because if we do it through - # handle_message() that triggers shutdown logic that keeps - # the loop from working properly. - self.w.work_queue.put(None) - # We aren't using threads (and we trust that threads do - # work) so we just invoke the thread target ourselves to - # pretend. - used_context = self.w._thread_target() - - if not negative: - meth.assert_called_once_with(used_context) - else: - self.assertFalse(meth.called) - - @mock.patch('astara.worker.hash_ring', autospec=True) - def test_host_mapped(self, fake_hash): - self._test(fake_hash) - - @mock.patch('astara.worker.hash_ring', autospec=True) - def test_host_not_mapped(self, fake_hash): - self._test(fake_hash, negative=True) - - -class TestReportStatus(WorkerTestBase): - def test_report_status_dispatched(self): - with mock.patch.object(self.w, 'report_status') as meth: - self.w.handle_message( - 'debug', - event.Event('*', event.COMMAND, - {'command': commands.WORKERS_DEBUG}) - ) - meth.assert_called_once_with() - - def test_handle_message_report_status(self): - with mock.patch('astara.worker.cfg.CONF') as conf: - conf.coordination = mock.Mock(enabled=False) - self.w.handle_message( - 'debug', - event.Event('*', event.COMMAND, - {'command': commands.WORKERS_DEBUG}) - ) - self.assertTrue(conf.log_opt_values.called) - - -class TestDebugRouters(WorkerTestBase): - def setUp(self): - super(TestDebugRouters, self).setUp() - self.w._should_process_command = mock.MagicMock(return_value=self.msg) - - def testNoDebugs(self): - self.assertEqual(set(), self.dbapi.resources_in_debug()) - - def testWithDebugs(self): - self.w.handle_message( - '*', - event.Event('*', event.COMMAND, - {'command': commands.RESOURCE_DEBUG, - 'resource_id': 'this-resource-id', - 'reason': 'foo'}), - ) - self.enable_debug(resource_id='this-resource-id') - self.assertIn(('this-resource-id', 'foo'), - self.dbapi.resources_in_debug()) - - def testManage(self): - self.enable_debug(resource_id='this-resource-id') - lock = mock.Mock() - self.w._resource_locks['this-resource-id'] = lock - r = event.Resource( - tenant_id='*', - id='*', - driver=None, - ) - self.w.handle_message( - '*', - event.Event( - resource=r, - crud=event.COMMAND, - body={'command': commands.RESOURCE_MANAGE, - 'resource_id': 'this-resource-id'}), - ) - self.assert_not_in_debug(resource_id='this-resource-id') - self.assertEqual(1, lock.release.call_count) - - def testManageNoLock(self): - self.enable_debug(resource_id='this-resource-id') - self.w.handle_message( - '*', - event.Event('*', event.COMMAND, - {'command': commands.RESOURCE_MANAGE, - 'resource_id': 'this-resource-id'}), - ) - self.assert_not_in_debug(resource_id='this-resource-id') - - def testManageUnlocked(self): - self.enable_debug(resource_id='this-resource-id') - lock = threading.Lock() - self.w._resource_locks['this-resource-id'] = lock - self.w.handle_message( - '*', - event.Event('*', event.COMMAND, - {'command': commands.RESOURCE_MANAGE, - 'resource_id': 'this-resource-id'}), - ) - self.assert_not_in_debug(resource_id='this-resource-id') - - def testDebugging(self): - tenant_id = '98dd9c41-d3ac-4fd6-8927-567afa0b8fc3' - resource_id = 'ac194fc5-f317-412e-8611-fb290629f624' - self.enable_debug(resource_id=resource_id) - msg = event.Event( - resource=event.Resource(router.Router.RESOURCE_NAME, - resource_id, - tenant_id), - crud=event.CREATE, - body={'key': 'value'}, - ) - # Create the router manager and state machine so we can - # replace the send_message() method with a mock. - trm = self.w._get_trms(tenant_id)[0] - sm = trm.get_state_machines(msg, worker.WorkerContext( - fakes.FAKE_MGT_ADDR))[0] - with mock.patch.object(sm, 'send_message') as meth: - # The router id is being ignored, so the send_message() - # method shouldn't ever be invoked. - meth.side_effect = AssertionError('send_message was called') - self.w.handle_message(tenant_id, msg) - - -class TestDebugTenants(WorkerTestBase): - def setUp(self): - super(TestDebugTenants, self).setUp() - self.w._should_process_command = mock.MagicMock(return_value=self.msg) - - def testNoDebugs(self): - self.assertEqual(set(), self.dbapi.tenants_in_debug()) - - def testWithDebugs(self): - self.enable_debug(tenant_id='this-tenant-id') - self.w.handle_message( - '*', - event.Event('*', event.COMMAND, - {'command': commands.TENANT_DEBUG, - 'tenant_id': 'this-tenant-id'}), - ) - is_debug, _ = self.dbapi.tenant_in_debug('this-tenant-id') - self.assertTrue(is_debug) - - def testManage(self): - self.enable_debug(tenant_id='this-tenant-id') - self.w.handle_message( - '*', - event.Event('*', event.COMMAND, - {'command': commands.TENANT_MANAGE, - 'tenant_id': 'this-tenant-id'}), - ) - self.assert_not_in_debug(tenant_id='this-tenant-id') - - def testDebugging(self): - tenant_id = '98dd9c41-d3ac-4fd6-8927-567afa0b8fc3' - resource_id = 'ac194fc5-f317-412e-8611-fb290629f624' - self.enable_debug(tenant_id=tenant_id) - msg = event.Event( - resource=event.Resource( - driver=router.Router.RESOURCE_NAME, - id=resource_id, - tenant_id=tenant_id), - crud=event.CREATE, - body={'key': 'value'}, - ) - # Create the router manager and state machine so we can - # replace the send_message() method with a mock. - trm = self.w._get_trms(tenant_id)[0] - sm = trm.get_state_machines(msg, worker.WorkerContext( - fakes.FAKE_MGT_ADDR))[0] - with mock.patch.object(sm, 'send_message') as meth: - # The tenant id is being ignored, so the send_message() - # method shouldn't ever be invoked. - meth.side_effect = AssertionError('send_message was called') - self.w.handle_message(tenant_id, msg) - - -class TestConfigReload(WorkerTestBase): - @mock.patch.object(worker, 'cfg') - def test(self, mock_cfg): - mock_cfg.CONF = mock.MagicMock( - log_opt_values=mock.MagicMock()) - mock_cfg.CONF.coordination.enabled = False - tenant_id = '*' - resource_id = '*' - msg = event.Event( - resource=resource_id, - crud=event.COMMAND, - body={'command': commands.CONFIG_RELOAD} - ) - self.w.handle_message(tenant_id, msg) - self.assertTrue(mock_cfg.CONF.called) - self.assertTrue(mock_cfg.CONF.log_opt_values.called) - - -class TestNormalizeUUID(unittest.TestCase): - - def test_upper(self): - self.assertEqual( - 'ac194fc5-f317-412e-8611-fb290629f624', - worker._normalize_uuid( - 'ac194fc5-f317-412e-8611-fb290629f624'.upper())) - - def test_no_dashes(self): - self.assertEqual( - 'ac194fc5-f317-412e-8611-fb290629f624', - worker._normalize_uuid('ac194fc5f317412e8611fb290629f624')) - - -class TestGlobalDebug(WorkerTestBase): - def test_global_debug_no_message_sent(self): - self.dbapi.enable_global_debug() - tenant_id = '98dd9c41-d3ac-4fd6-8927-567afa0b8fc3' - resource_id = 'ac194fc5-f317-412e-8611-fb290629f624' - msg = event.Event( - resource=event.Resource(router.Router.RESOURCE_NAME, - resource_id, - tenant_id), - crud=event.CREATE, - body={'key': 'value'}, - ) - # Create the router manager and state machine so we can - # replace the send_message() method with a mock. - trm = self.w._get_trms(tenant_id)[0] - sm = trm.get_state_machines(msg, worker.WorkerContext( - fakes.FAKE_MGT_ADDR))[0] - with mock.patch.object(sm, 'send_message') as meth: - # The tenant id is being ignored, so the send_message() - # method shouldn't ever be invoked. - meth.side_effect = AssertionError('send_message was called') - self.w.handle_message(tenant_id, msg) - - -class TestRebalance(WorkerTestBase): - def setUp(self): - super(TestRebalance, self).setUp() - self.fake_host = 'fake_host' - self.w.host = 'fake_host' - self.resource_id = '56232034-a852-11e5-854e-035a3632659f' - self.tenant_id = '601128de-a852-11e5-a09d-cf6fa26e6e6b' - - self.resource = event.Resource( - 'router', - self.resource_id, - self.tenant_id) - self.msg = event.Event( - resource=self.resource, - crud=None, - body={'key': 'value'}, - ) - - @mock.patch('astara.worker.Worker._replay_deferred_messages') - @mock.patch('astara.worker.Worker._repopulate') - def test_rebalance_bootstrap(self, fake_repop, fake_replay): - fake_hash = mock.Mock( - rebalance=mock.Mock(), - ) - self.w.hash_ring_mgr = fake_hash - msg = event.Event( - resource=self.resource, - crud=event.REBALANCE, - body={ - 'members': ['foo', 'bar'], - 'node_bootstrap': True - }, - ) - self.w.handle_message('*', msg) - fake_hash.rebalance.assert_called_with(['foo', 'bar']) - self.assertTrue(fake_replay.called) - self.assertFalse(fake_repop.called) - - @mock.patch('astara.worker.Worker._add_resource_to_work_queue') - @mock.patch('astara.worker.Worker._get_all_state_machines') - @mock.patch('astara.worker.Worker._repopulate') - def test_rebalance(self, fake_repop, fake_get_all_sms, fake_add_rsc): - sm1 = mock.Mock( - resource_id='sm1', - send_message=mock.Mock(return_value=True), - ) - sm2 = mock.Mock( - resource_id='sm2', - resource='sm2_resource', - send_message=mock.Mock(return_value=True), - ) - fake_get_all_sms.side_effect = [ - set([sm1]), - set([sm1, sm2]), - ] - fake_hash = mock.Mock( - rebalance=mock.Mock(), - ) - self.w.hash_ring_mgr = fake_hash - msg = event.Event( - resource=self.resource, - crud=event.REBALANCE, - body={ - 'members': ['foo', 'bar'], - }, - ) - self.w.handle_message('*', msg) - fake_hash.rebalance.assert_called_with(['foo', 'bar']) - self.assertTrue(fake_repop.called) - - exp_event = event.Event( - resource='sm2_resource', - crud=event.UPDATE, - body={} - ) - sm2.send_message.assert_called_with(exp_event) - sm2._add_resource_to_work_queue(sm2) - - @mock.patch('astara.populate.repopulate') - def test__repopulate_sm_removed(self, fake_repopulate): - fake_ring = mock.Mock( - get_hosts=mock.Mock() - ) - fake_hash = mock.Mock(ring=fake_ring) - self.w.hash_ring_mgr = fake_hash - - rsc1 = event.Resource( - driver='router', - tenant_id='79f418c8-a849-11e5-9c36-df27538e1b7e', - id='7f2a1d56-a849-11e5-a0ce-a74ef0b18fa1', - ) - rsc2 = event.Resource( - driver='router', - tenant_id='8d55fdb4-a849-11e5-958f-0b870649546d', - id='9005cd5a-a849-11e5-a434-27c4c7c70a8b', - ) - resources = [rsc1, rsc2] - - # create initial, pre-rebalance state machines - for r in resources: - for trm in self.w._get_trms(r.tenant_id): - e = event.Event(resource=r, crud=None, body={}) - trm.get_state_machines(e, self.w._context) - - fake_hash.ring.get_hosts.side_effect = [ - 'foo', self.fake_host - ] - fake_repopulate.return_value = resources - - # mock doesn't like to have its .name overwritten? - class FakeWorker(object): - name = self.w.proc_name - tgt = [{'worker': FakeWorker()}] - - self.w.scheduler.dispatcher.pick_workers = mock.Mock(return_value=tgt) - self.w._repopulate() - post_rebalance_sms = self.w._get_all_state_machines() - self.assertEqual(1, len(post_rebalance_sms)) - sm = post_rebalance_sms.pop() - self.assertEqual(rsc2.id, sm.resource_id) - - @mock.patch('astara.populate.repopulate') - def test__repopulate_sm_added(self, fake_repopulate): - fake_ring = mock.Mock( - get_hosts=mock.Mock() - ) - fake_hash = mock.Mock(ring=fake_ring) - self.w.hash_ring_mgr = fake_hash - - rsc1 = event.Resource( - driver='router', - tenant_id='79f418c8-a849-11e5-9c36-df27538e1b7e', - id='7f2a1d56-a849-11e5-a0ce-a74ef0b18fa1', - ) - rsc2 = event.Resource( - driver='router', - tenant_id='8d55fdb4-a849-11e5-958f-0b870649546d', - id='9005cd5a-a849-11e5-a434-27c4c7c70a8b', - ) - rsc3 = event.Resource( - driver='router', - tenant_id='455549a4-a851-11e5-a060-df26a5877746', - id='4a05c758-a851-11e5-bf9f-0387cfcb8f9b', - ) - - resources = [rsc1, rsc2, rsc3] - - # create initial, pre-rebalance state machines - for r in resources[:-1]: - for trm in self.w._get_trms(r.tenant_id): - e = event.Event(resource=r, crud=None, body={}) - trm.get_state_machines(e, self.w._context) - - fake_hash.ring.get_hosts.side_effect = [ - self.fake_host, self.fake_host, self.fake_host - ] - fake_repopulate.return_value = resources - - # mock doesn't like to have its .name overwritten? - class FakeWorker(object): - name = self.w.proc_name - tgt = [{'worker': FakeWorker()}] - - self.w.scheduler.dispatcher.pick_workers = mock.Mock(return_value=tgt) - self.w._repopulate() - post_rebalance_sms = self.w._get_all_state_machines() - self.assertEqual(3, len(post_rebalance_sms)) - rids = [r.id for r in resources] - for sm in post_rebalance_sms: - self.assertIn(sm.resource_id, rids) diff --git a/astara/version.py b/astara/version.py deleted file mode 100644 index 9783946e..00000000 --- a/astara/version.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pbr.version - -version_info = pbr.version.VersionInfo('astara') diff --git a/astara/worker.py b/astara/worker.py deleted file mode 100644 index 6b93f3bb..00000000 --- a/astara/worker.py +++ /dev/null @@ -1,815 +0,0 @@ -# Copyright 2014 DreamHost, LLC -# -# Author: DreamHost, LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -"""Worker process parts. -""" - -import collections -import Queue -import threading -import uuid -import six - -from logging import INFO - -from oslo_config import cfg -from oslo_concurrency import lockutils -from oslo_log import log as logging - -from astara import commands -from astara import drivers -from astara.common.i18n import _LE, _LI, _LW -from astara import event -from astara import tenant -from astara.common import hash_ring -from astara.api import nova -from astara.api import neutron -from astara.db import api as db_api -from astara import populate - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - -WORKER_OPTS = [ - cfg.StrOpt( - 'ignored_router_directory', - default='/etc/astara/ignored', - help='Directory to scan for routers to ignore for debugging', - ), - cfg.IntOpt( - 'queue_warning_threshold', - default=100, - help='warn if the event backlog for a tenant exceeds this value', - ), - cfg.IntOpt( - 'reboot_error_threshold', - default=5, - help=('Number of reboots to allow before assuming ' - 'a router needs manual intervention'), - ), - cfg.IntOpt( - 'num_worker_threads', - default=4, - help='the number of worker threads to run per process'), - -] -CONF.register_opts(WORKER_OPTS) - -EVENT_COMMANDS = { - commands.RESOURCE_UPDATE: event.UPDATE, - commands.RESOURCE_REBUILD: event.REBUILD, -} - -DEPRECATED_ROUTER_COMMANDS = { - commands.ROUTER_UPDATE: event.UPDATE, - commands.ROUTER_REBUILD: event.REBUILD, -} - - -def _normalize_uuid(value): - return str(uuid.UUID(value.replace('-', ''))) - - -class TenantResourceCache(object): - """Holds a cache of default resource_ids for tenants. This is constructed - and consulted when we receieve messages with no associated router_id and - avoids a Neutron call per-message of this type. - """ - # NOTE(adam_g): This is a pretty dumb caching layer and can be backed - # by an external system like memcache to further optimize lookups - # across mulitple rugs. - _tenant_resources = {} - - @lockutils.synchronized('astara-trm') - def get_by_tenant(self, resource, worker_context, message): - tenant_id = resource.tenant_id - driver = resource.driver - cached_resources = self._tenant_resources.get(driver, {}) - if tenant_id not in cached_resources: - resource_id = drivers.get(driver).get_resource_id_for_tenant( - worker_context, tenant_id, message) - if not resource_id: - LOG.debug('%s not found for tenant %s.', - driver, tenant_id) - return None - - if not cached_resources: - self._tenant_resources[driver] = {} - self._tenant_resources[driver][tenant_id] = resource_id - - return self._tenant_resources[driver][tenant_id] - - @lockutils.synchronized('astara-trm') - def delete(self, resource): - """Callback used to remove a resource from the cache upon deletion""" - try: - del self._tenant_resources[resource.driver][resource.tenant_id] - except KeyError: - pass - - -class WorkerContext(object): - """Holds resources owned by the worker and used by the Automaton. - """ - - def __init__(self, management_address=None): - self.neutron = neutron.Neutron(cfg.CONF) - self.nova_client = nova.Nova(cfg.CONF) - self.management_address = management_address - - @property - def config(self): - return { - 'address': self.management_address, - 'metadata_port': cfg.CONF.astara_metadata_port, - 'host': cfg.CONF.host, - } - - -class Worker(object): - """Manages state for the worker process. - - The Scheduler gets a callable as an argument, but we need to keep - track of a bunch of the state machines, so the callable is a - method of an instance of this class instead of a simple function. - """ - def __init__(self, notifier, management_address, scheduler, proc_name): - self._ignore_directory = cfg.CONF.ignored_router_directory - self._queue_warning_threshold = cfg.CONF.queue_warning_threshold - self._reboot_error_threshold = cfg.CONF.reboot_error_threshold - self.host = cfg.CONF.host - self.work_queue = Queue.Queue() - self.lock = threading.Lock() - self._keep_going = True - self.tenant_managers = {} - self.management_address = management_address - self.scheduler = scheduler - self.proc_name = proc_name - self.resource_cache = TenantResourceCache() - - # This process-global context should not be used in the - # threads, since the clients are not thread-safe. - self._context = WorkerContext(self.management_address) - self.notifier = notifier - # The notifier needs to be started here to ensure that it - # happens inside the worker process and not the parent. - self.notifier.start() - - # The DB is used for tracking debug modes - self.db_api = db_api.get_instance() - - # Thread locks for the routers so we only put one copy in the - # work queue at a time - self._resource_locks = collections.defaultdict(threading.Lock) - # Messages about what each thread is doing, keyed by thread id - # and reported by the debug command. - self._thread_status = {} - # Start the threads last, so they can use the instance - # variables created above. - self.threads = [ - threading.Thread( - name='t%02d' % i, - target=self._thread_target, - ) - for i in six.moves.range(cfg.CONF.num_worker_threads) - ] - - self.hash_ring_mgr = hash_ring.HashRingManager() - self._deferred_messages = [] - - for t in self.threads: - t.setDaemon(True) - t.start() - - def _thread_target(self): - """This method runs in each worker thread. - """ - my_id = threading.current_thread().name - LOG.debug('starting thread') - # Use a separate context from the one we use when receiving - # messages and talking to the tenant router manager because we - # are in a different thread and the clients are not - # thread-safe. - context = WorkerContext(self.management_address) - while self._keep_going: - try: - # Try to get a state machine from the work queue. If - # there's nothing to do, we will block for a while. - self._thread_status[my_id] = 'waiting for task' - sm = self.work_queue.get(timeout=10) - except Queue.Empty: - continue - if sm is None: - LOG.info(_LI('received stop message')) - break - - # Make sure we didn't already have some updates under way - # for a router we've been told to ignore for debug mode. - should_ignore, reason = \ - self.db_api.resource_in_debug(sm.resource_id) - if should_ignore: - LOG.debug('Skipping update of resource %s in debug mode. ' - '(reason: %s)', sm.resource_id, reason) - continue - - # In the event that a rebalance took place while processing an - # event, it may have been put back into the work queue. Check - # the hash table once more to find out if we still manage it - # and do some cleanup if not. - if cfg.CONF.coordination.enabled: - target_hosts = self.hash_ring_mgr.ring.get_hosts( - sm.resource_id) - if self.host not in target_hosts: - LOG.debug('Skipping update of router %s, it no longer ' - 'maps here.', sm.resource_id) - trm = self.tenant_managers[sm.tenant_id] - trm.unmanage_resource(sm.resource_id) - self.work_queue.task_done() - with self.lock: - self._release_resource_lock(sm) - continue - - # FIXME(dhellmann): Need to look at the router to see if - # it belongs to a tenant which is in debug mode, but we - # don't have that data in the sm, yet. - LOG.debug('performing work on %s for tenant %s', - sm.resource_id, sm.tenant_id) - try: - self._thread_status[my_id] = 'updating %s' % sm.resource_id - sm.update(context) - except: - LOG.exception(_LE('could not complete update for %s'), - sm.resource_id) - finally: - self._thread_status[my_id] = ( - 'finalizing task for %s' % sm.resource_id - ) - self.work_queue.task_done() - with self.lock: - # Release the lock that prevents us from adding - # the state machine back into the queue. If we - # find more work, we will re-acquire it. If we do - # not find more work, we hold the primary work - # queue lock so the main thread cannot put the - # state machine back into the queue until we - # release that lock. - self._release_resource_lock(sm) - # The state machine has indicated that it is done - # by returning. If there is more work for it to - # do, reschedule it by placing it at the end of - # the queue. - if sm.has_more_work(): - LOG.debug('%s has more work, returning to work queue', - sm.resource_id) - self._add_resource_to_work_queue(sm) - else: - LOG.debug('%s has no more work', sm.resource_id) - # Return the context object so tests can look at it - self._thread_status[my_id] = 'exiting' - return context - - def _shutdown(self): - """Stop the worker. - """ - self.report_status(show_config=False) - # Tell the notifier to stop - if self.notifier: - self.notifier.stop() - # Stop the worker threads - self._keep_going = False - # Drain the task queue by discarding it - # FIXME(dhellmann): This could prevent us from deleting - # routers that need to be deleted. - self.work_queue = Queue.Queue() - for t in self.threads: - LOG.debug('sending stop message to %s', t.getName()) - self.work_queue.put((None, None)) - # Wait for our threads to finish - for t in self.threads: - LOG.debug('waiting for %s to finish', t.getName()) - t.join(timeout=5) - LOG.debug('%s is %s', t.name, - 'alive' if t.is_alive() else 'stopped') - # Shutdown all of the tenant router managers. The lock is - # probably not necessary, since this should be running in the - # same thread where new messages are being received (and - # therefore those messages aren't being processed). - with self.lock: - for trm in self.tenant_managers.values(): - LOG.debug('stopping tenant manager for %s', trm.tenant_id) - trm.shutdown() - - def _get_trms(self, target): - if target.lower() in commands.WILDCARDS: - return list(self.tenant_managers.values()) - # Normalize the tenant id to a dash-separated UUID format. - tenant_id = _normalize_uuid(target) - if tenant_id not in self.tenant_managers: - LOG.debug('creating tenant manager for %s', tenant_id) - self.tenant_managers[tenant_id] = tenant.TenantResourceManager( - tenant_id=tenant_id, - delete_callback=self.resource_cache.delete, - notify_callback=self.notifier.publish, - queue_warning_threshold=self._queue_warning_threshold, - reboot_error_threshold=self._reboot_error_threshold, - ) - - return [self.tenant_managers[tenant_id]] - - def _populate_resource_id(self, message): - """Ensure message's resource is populated with a resource id if it - does not contain one. If not, attempt to lookup by tenant using the - driver supplied functionality. - - :param message: event.Event object - :returns: a new event.Event object with a populated Event.resource.id - if found, otherwise the original Event is returned. - """ - if message.resource.id: - return message - - LOG.debug("Looking for %s resource for for tenant %s", - message.resource.driver, message.resource.tenant_id) - - resource_id = self.resource_cache.get_by_tenant( - message.resource, self._context, message) - - if not resource_id: - LOG.warning(_LW( - 'Resource of type %s not found for tenant %s.'), - message.resource.driver, message.resource.tenant_id) - else: - new_resource = event.Resource( - id=resource_id, - driver=message.resource.driver, - tenant_id=message.resource.tenant_id, - ) - new_message = event.Event( - resource=new_resource, - crud=message.crud, - body=message.body, - ) - message = new_message - LOG.debug("Using resource %s.", new_resource) - - return message - - def _should_process_message(self, target, message): - """Determines whether a message should be processed or not.""" - global_debug, reason = self.db_api.global_debug() - if global_debug: - LOG.info('Skipping incoming event, cluster in global debug ' - 'mode. (reason: %s)', reason) - return False - - if message.resource.id not in commands.WILDCARDS: - message = self._populate_resource_id(message) - if not message.resource.id: - LOG.info(_LI('Ignoring message with no resource found.')) - return False - - should_ignore, reason = \ - self.db_api.tenant_in_debug(message.resource.tenant_id) - if should_ignore: - LOG.info( - 'Ignoring message intended for tenant %s in debug mode ' - '(reason: %s): %s', - message.resource.tenant_id, reason, message, - ) - return False - - should_ignore, reason = self.db_api.resource_in_debug( - message.resource.id) - if should_ignore: - LOG.info( - 'Ignoring message intended for resource %s in ' - 'debug mode (reason: %s): %s', - message.resource.id, reason, message, - ) - return False - - if target in commands.WILDCARDS: - return message - - if cfg.CONF.coordination.enabled: - target_hosts = self.hash_ring_mgr.ring.get_hosts( - message.resource.id) - if self.host not in target_hosts: - LOG.debug('Ignoring message intended for resource %s as it ' - 'does not map to this Rug process.', - message.resource.id) - return False - - return message - - def _ring_balanced(self): - return self.hash_ring_mgr.balanced - - def _defer_message(self, target, message): - LOG.debug("Deferring message for %s: %s", target, message) - self._deferred_messages.append((target, message)) - - def _replay_deferred_messages(self): - if not self._deferred_messages: - return - - LOG.debug( - 'Replaying pre-rebalance deferred messages on worker %s', - self.proc_name) - [self.handle_message(tgt, msg) for tgt, msg in self._deferred_messages] - - def handle_message(self, target, message): - """Callback to be used in main - """ - LOG.debug('got: %s %r', target, message) - - # If the cluster ring hasnt been seeded yet, we cannot make decisions - # about which messages to process. Instead, receive them and defer - # handling until we know the ring layout. - if (cfg.CONF.coordination.enabled and - not self._ring_balanced() and - message.crud != event.REBALANCE): - self._defer_message(target, message) - return - - if target is None: - # We got the shutdown instruction from our parent process. - self._shutdown() - return - if message.crud == event.COMMAND: - self._dispatch_command(target, message) - elif message.crud == event.REBALANCE: - self._rebalance(message) - else: - message = self._should_process_message(target, message) - if not message: - return - # This is an update command for the router, so deliver it - # to the state machine. - with self.lock: - self._deliver_message(target, message) - - def _find_state_machine_by_resource_id(self, resource_id): - for trm in self.tenant_managers.values(): - sm = trm.get_state_machine_by_resource_id(resource_id) - if sm: - return sm - - def _get_all_state_machines(self): - sms = set() - for trm in self._get_trms('*'): - sms.update(trm.get_all_state_machines()) - return sms - - def _repopulate(self): - """Repopulate local state machines given the new DHT - - After the hash ring has been rebalanced, this ensures the workers' - TRMs are populated with the correct set of state machines given the - current layout of the ring. We also consult the dispatcher to ensure - we're creating state machines on the correct worker process. This - also cleans up state machines that are no longer mapped here. - """ - LOG.debug('Running post-rebalance repopulate for worker %s', - self.proc_name) - for resource in populate.repopulate(): - target_hosts = self.hash_ring_mgr.ring.get_hosts( - resource.id) - if self.host not in target_hosts: - tid = _normalize_uuid(resource.tenant_id) - if tid in self.tenant_managers: - trm = self.tenant_managers[tid] - trm.unmanage_resource(resource.id) - continue - - tgt = self.scheduler.dispatcher.pick_workers( - resource.tenant_id)[0] - - if tgt['worker'].name != self.proc_name: - # Typically, state machine creation doesn't happen until the - # dispatcher has scheduled a msg to a single worker. rebalances - # are scheduled to all workers so we need to consult the - # dispatcher here to avoid creating state machines in all - # workers. - continue - - for trm in self._get_trms(resource.tenant_id): - # creates a state machine if one does not exist. - e = event.Event(resource=resource, crud=None, body={}) - trm.get_state_machines(e, self._context) - - def _rebalance(self, message): - # rebalance the ring with the new membership. - self.hash_ring_mgr.rebalance(message.body.get('members')) - - # We leverage the rebalance event to both seed the local node's - # hash ring when it comes online, and to also rebalance it in - # reaction to cluster events. Exit early if we're only responding - # to a bootstrapping rebalance, we don't need to worry about adjusting - # state because there is none yet. - if message.body.get('node_bootstrap'): - # replay any messages that may have accumulated while we were - # waiting to finish cluster bootstrap - self._replay_deferred_messages() - return - - # track which SMs we initially owned - orig_sms = self._get_all_state_machines() - - # rebuild the TRMs and SMs based on new ownership - self._repopulate() - - # TODO(adam_g): Replace the UPDATE with a POST_REBALANCE commnand - # that triggers a driver method instead of generic update. - # for newly owned resources, issue a post-rebalance update. - for sm in (self._get_all_state_machines() - orig_sms): - post_rebalance = event.Event( - resource=sm.resource, crud=event.UPDATE, - body={}) - LOG.debug('Sending post-rebalance update for %s', - sm.resource_id) - if sm.send_message(post_rebalance): - self._add_resource_to_work_queue(sm) - - # NOTE(adam_g): If somethings queued up on a SM, it means the SM - # is currently executing something thats probably long running - # (ie a create). We should add some smarts here to transfer the - # currently executing task to the new owner - - def _should_process_command(self, message): - command = message.body['command'] - - def _hash_by(k, d): - if not cfg.CONF.coordination.enabled: - return True - - data = d.get(k) - target_hosts = self.hash_ring_mgr.ring.get_hosts(data) - if self.host not in target_hosts: - LOG.debug( - 'Ignoring command, it does not map to this host by %s ' - '(%s)' % (k, data)) - return False - return True - - if command in [commands.WORKERS_DEBUG, commands.CONFIG_RELOAD]: - # All RUGs get workers_debug and config reload commands - return True - - resource_cmds = ([commands.RESOURCE_DEBUG, commands.RESOURCE_MANAGE] + - EVENT_COMMANDS.keys()) - if command in resource_cmds: - # hash router commands to a RUG by router_id - return _hash_by('resource_id', message.body) - - # NOTE(adam_g): This is compat. with old style router-specific rug-ctl - # and should be dropped in M. - router_cmds = ([commands.ROUTER_DEBUG, commands.ROUTER_MANAGE] + - DEPRECATED_ROUTER_COMMANDS.keys()) - if command in router_cmds: - # hash router commands to a RUG by router_id - return _hash_by('router_id', message.body) - - if command in [commands.TENANT_DEBUG, commands.TENANT_MANAGE]: - # hash tenant commands to a RUG by tenant_id - return _hash_by('tenant_id', message.body) - - if command in [commands.GLOBAL_DEBUG]: - # global debug can happen anywhere but to avoid a stempeding - # herd trying to update a singe thing in the DB, hash it to - # a single host using a static key - return _hash_by( - hash_ring.DC_KEY, - {hash_ring.DC_KEY: hash_ring.DC_KEY}) - - def _dispatch_command(self, target, message): - if not self._should_process_command(message): - return - - instructions = message.body - if instructions['command'] == commands.WORKERS_DEBUG: - self.report_status() - - # NOTE(adam_g): Drop 'router-debug' compat in M. - elif (instructions['command'] == commands.RESOURCE_DEBUG or - instructions['command'] == commands.ROUTER_DEBUG): - - resource_id = (instructions.get('resource_id') or - instructions.get('router_id')) - if not resource_id: - LOG.warning(_LW( - 'Ignoring instruction to debug resource with no id')) - return - reason = instructions.get('reason') - if resource_id in commands.WILDCARDS: - LOG.warning(_LW( - 'Ignoring instruction to debug all resources with %r'), - resource_id) - else: - LOG.info(_LI('Placing resource %s in debug mode (reason: %s)'), - resource_id, reason) - self.db_api.enable_resource_debug(resource_id, reason) - - elif (instructions['command'] == commands.RESOURCE_MANAGE or - instructions['command'] == commands.ROUTER_MANAGE): - resource_id = (instructions.get('resource_id') or - instructions.get('router_id')) - if not resource_id: - LOG.warning(_LW( - 'Ignoring instruction to manage resource with no id')) - return - try: - self.db_api.disable_resource_debug(resource_id) - LOG.info(_LI('Resuming management of resource %s'), - resource_id) - except KeyError: - pass - try: - self._resource_locks[resource_id].release() - LOG.info(_LI('Unlocked resource %s'), resource_id) - except KeyError: - pass - except threading.ThreadError: - # Already unlocked, that's OK. - pass - - elif instructions['command'] in EVENT_COMMANDS: - resource_id = instructions.get('resource_id') - sm = self._find_state_machine_by_resource_id(resource_id) - if not sm: - LOG.debug( - 'Will not process command, no managed state machine ' - 'found for resource %s', resource_id) - return - new_res = event.Resource( - id=resource_id, - driver=sm.resource.RESOURCE_NAME, - tenant_id=sm.tenant_id) - new_msg = event.Event( - resource=new_res, - crud=EVENT_COMMANDS[instructions['command']], - body=instructions, - ) - # Use handle_message() to ensure we acquire the lock - LOG.info(_LI('sending %s instruction to %s'), - instructions['command'], new_res) - self.handle_message(new_msg.resource.tenant_id, new_msg) - LOG.info(_LI('forced %s for %s complete'), - instructions['command'], new_res) - - # NOTE(adam_g): This is here to support the deprecated old format of - # sending commands to specific routers and can be - # removed once the CLI component is dropped in M. - elif instructions['command'] in DEPRECATED_ROUTER_COMMANDS: - new_rsc = event.Resource( - driver=drivers.router.Router.RESOURCE_NAME, - id=message.body.get('router_id'), - tenant_id=message.body.get('tenant_id'), - ) - new_msg = event.Event( - resource=new_rsc, - crud=DEPRECATED_ROUTER_COMMANDS[instructions['command']], - body=instructions, - ) - # Use handle_message() to ensure we acquire the lock - LOG.info(_LI('sending %s instruction to %s'), - instructions['command'], new_rsc) - self.handle_message(new_msg.resource.tenant_id, new_msg) - LOG.info(_LI('forced %s for %s complete'), - instructions['command'], new_rsc) - - elif instructions['command'] == commands.TENANT_DEBUG: - tenant_id = instructions['tenant_id'] - reason = instructions.get('reason') - if tenant_id in commands.WILDCARDS: - LOG.warning(_LW( - 'Ignoring instruction to debug all tenants with %r'), - tenant_id) - else: - LOG.info(_LI('Placing tenant %s in debug mode (reason: %s)'), - tenant_id, reason) - self.db_api.enable_tenant_debug(tenant_id, reason) - - elif instructions['command'] == commands.TENANT_MANAGE: - tenant_id = instructions['tenant_id'] - try: - self.db_api.disable_tenant_debug(tenant_id) - LOG.info(_LI('Resuming management of tenant %s'), tenant_id) - except KeyError: - pass - - elif instructions['command'] == commands.GLOBAL_DEBUG: - enable = instructions.get('enabled') - reason = instructions.get('reason') - if enable == 1: - LOG.info('Enabling global debug mode (reason: %s)', reason) - self.db_api.enable_global_debug(reason) - elif enable == 0: - LOG.info('Disabling global debug mode') - self.db_api.disable_global_debug() - else: - LOG.warning('Unrecognized global debug command: %s', - instructions) - elif instructions['command'] == commands.CONFIG_RELOAD: - try: - cfg.CONF() - except Exception: - LOG.exception(_LE('Could not reload configuration')) - else: - cfg.CONF.log_opt_values(LOG, INFO) - - else: - LOG.warning(_LW('Unrecognized command: %s'), instructions) - - def _deliver_message(self, target, message): - LOG.debug('preparing to deliver %r to %r', message, target) - trms = self._get_trms(target) - - for trm in trms: - sms = trm.get_state_machines(message, self._context) - for sm in sms: - # Add the message to the state machine's inbox. If - # there is already a thread working on the router, - # that thread will pick up the new work when it is - # done with the current job. The work queue lock is - # acquired before asking the state machine if it has - # more work, so this block of code won't be executed - # at the same time as the thread trying to decide if - # the router is done. - if sm.send_message(message): - self._add_resource_to_work_queue(sm) - - def _add_resource_to_work_queue(self, sm): - """Queue up the state machine by resource name. - - The work queue lock should be held before calling this method. - """ - l = self._resource_locks[sm.resource_id] - locked = l.acquire(False) - if locked: - self.work_queue.put(sm) - else: - LOG.debug('%s is already in the work queue', sm.resource_id) - - def _release_resource_lock(self, sm): - try: - self._resource_locks[sm.resource_id].release() - except threading.ThreadError: - # Already unlocked, that's OK. - pass - - def report_status(self, show_config=True): - if show_config: - cfg.CONF.log_opt_values(LOG, INFO) - LOG.info(_LI( - 'Number of state machines in work queue: %d'), - self.work_queue.qsize() - ) - LOG.info(_LI( - 'Number of tenant resource managers managed: %d'), - len(self.tenant_managers) - ) - for thread in self.threads: - LOG.info(_LI( - 'Thread %s is %s. Last seen: %s'), - thread.name, - 'alive' if thread.isAlive() else 'DEAD', - self._thread_status.get(thread.name, 'UNKNOWN'), - ) - debug_tenants = self.db_api.tenants_in_debug() - if debug_tenants: - for t_uuid, reason in debug_tenants: - LOG.info(_LI('Debugging tenant: %s (reason: %s)'), - t_uuid, reason) - else: - LOG.info(_LI('No tenants in debug mode')) - - debug_resources = self.db_api.resources_in_debug() - if debug_resources: - for resource_id, reason in debug_resources: - LOG.info(_LI('Debugging resource: %s (reason: %s)'), - resource_id, reason) - else: - LOG.info(_LI('No resources in debug mode')) - - if cfg.CONF.coordination.enabled: - # NOTE(adam_g): This list could be big with a large cluster. - LOG.info(_LI('Peer astara-orchestrator hosts: %s'), - self.hash_ring_mgr.hosts) - else: - LOG.info(_LI( - 'No peer astara-orchestrator hosts, coordination disabled.')) diff --git a/devstack/README.md b/devstack/README.md deleted file mode 100644 index 2ff38ab7..00000000 --- a/devstack/README.md +++ /dev/null @@ -1,11 +0,0 @@ -This directory contains the astara devstack plugin for Kilo and beyond. You -will need to enable the plugin in your local.conf file by adding the -following to the [[local|localrc]] section. - - enable_plugin astara [GITREF] - -For example: - - enable_plugin astara http://github.com/openstack/astara - -For more info see: http://docs.openstack.org/developer/devstack/plugins.html diff --git a/devstack/files/debs/astara b/devstack/files/debs/astara deleted file mode 100644 index d4b5f5b6..00000000 --- a/devstack/files/debs/astara +++ /dev/null @@ -1,2 +0,0 @@ -debootstrap -memcached diff --git a/devstack/local.conf.sample b/devstack/local.conf.sample deleted file mode 100644 index d1b6cc74..00000000 --- a/devstack/local.conf.sample +++ /dev/null @@ -1,27 +0,0 @@ -# -# Sample DevStack local.conf. -# -# This sample file is intended to be used for your typical DevStack environment -# that's running all of OpenStack on a single host. This can also be used as -# the first host of a multi-host test environment. -# -# No changes to this sample configuration are required for this to work. -# - -[[local|localrc]] -enable_plugin astara https://github.com/openstack/astara -enable_service q-svc q-agt astara -disable_service n-net - -# Build the Astara appliance -#BUILD_ASTARA_APPLIANCE_IMAGE=True -#ASTARA_APPLIANCE_REPO=http://github.com/openstack/astara-appliance.git -#ASTARA_APPLIANCE_BRANCH=master - -HOST_IP=127.0.0.1 -LOGFILE=/opt/stack/logs/devstack.log -DATABASE_PASSWORD=secret -RABBIT_PASSWORD=secret -SERVICE_TOKEN=secret -SERVICE_PASSWORD=secret -ADMIN_PASSWORD=secret diff --git a/devstack/plugin.sh b/devstack/plugin.sh deleted file mode 100644 index f9e9dc4a..00000000 --- a/devstack/plugin.sh +++ /dev/null @@ -1,332 +0,0 @@ -# -*- mode: shell-script -*- - -function colorize_logging { - # Add color to logging output - this is lifted from devstack's functions to colorize the non-standard - # astara format - iniset $ASTARA_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s [01;" - iniset $ASTARA_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" - iniset $ASTARA_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s:%(process)s:%(processName)s:%(threadName)s [-%(color)s] %(color)s%(message)s" - iniset $ASTARA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s:%(process)s:%(processName)s:%(threadName)s [%(request_id)s %(user)s %(tenant)s%(color)s] %(color)s%(message)s" -} - -function configure_astara() { - if [[ ! -d $ASTARA_CONF_DIR ]]; then - sudo mkdir -p $ASTARA_CONF_DIR - fi - sudo chown $STACK_USER $ASTARA_CONF_DIR - - sudo mkdir -p $ASTARA_CACHE_DIR - sudo chown $STACK_USER $ASTARA_CACHE_DIR - - if [[ ! -d $ASTARA_CONF_DIR/rootwrap.d ]]; then - sudo mkdir -p $ASTARA_CONF_DIR/rootwrap.d - fi - - sudo cp $ASTARA_DIR/etc/rootwrap.conf $ASTARA_CONF_DIR - sudo cp $ASTARA_DIR/etc/rootwrap.d/* $ASTARA_CONF_DIR/rootwrap.d/ - - (cd $ASTARA_DIR ;exec tools/generate_config_file_samples.sh) - cp $ASTARA_DIR/etc/orchestrator.ini.sample $ASTARA_CONF - cp $ASTARA_DIR/etc/provider_rules.json $ASTARA_PROVIDER_RULE_CONF - iniset $ASTARA_CONF DEFAULT debug True - iniset $ASTARA_CONF DEFAULT verbose True - configure_auth_token_middleware $ASTARA_CONF $Q_ADMIN_USERNAME $ASTARA_CACHE_DIR - iniset $ASTARA_CONF keystone_authtoken auth_plugin password - iniset $ASTARA_CONF DEFAULT auth_region $REGION_NAME - - iniset_rpc_backend astara $ASTARA_CONF - - iniset $ASTARA_CONF DEFAULT control_exchange "neutron" - iniset $ASTARA_CONF DEFAULT boot_timeout "6000" - iniset $ASTARA_CONF DEFAULT num_worker_processes "2" - iniset $ASTARA_CONF DEFAULT num_worker_threads "2" - iniset $ASTARA_CONF DEFAULT reboot_error_threshold "2" - - iniset $ASTARA_CONF DEFAULT management_prefix $ASTARA_MANAGEMENT_PREFIX - iniset $ASTARA_CONF DEFAULT astara_mgt_service_port $ASTARA_MANAGEMENT_PORT - iniset $ASTARA_CONF DEFAULT api_listen $ASTARA_API_LISTEN - iniset $ASTARA_CONF DEFAULT api_port $ASTARA_API_PORT - iniset $ASTARA_CONF DEFAULT health_check_period 10 - - # NOTE(adam_g) When running in the gate on slow VMs, gunicorn workers in the appliance - # sometimes hang during config update and eventually timeout after 60s. Update - # config_timeout in the RUG to reflect that timeout. - iniset $ASTARA_CONF DEFAULT alive_timeout 60 - iniset $ASTARA_CONF DEFAULT config_timeout 600 - - iniset $ASTARA_CONF DEFAULT enabled_drivers $ASTARA_ENABLED_DRIVERS - - if [[ "$Q_AGENT" == "linuxbridge" ]]; then - iniset $ASTARA_CONF DEFAULT interface_driver "astara.common.linux.interface.BridgeInterfaceDriver" - fi - - iniset $ASTARA_CONF DEFAULT ssh_public_key $ASTARA_APPLIANCE_SSH_PUBLIC_KEY - - iniset $ASTARA_CONF database connection `database_connection_url astara` - - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - colorize_logging - fi - - if [[ "$ASTARA_COORDINATION_ENABLED" == "True" ]]; then - iniset $ASTARA_CONF coordination enabled True - iniset $ASTARA_CONF coordination url $ASTARA_COORDINATION_URL - fi - - if is_service_enabled "neutron-vpnaas"; then - iniset $ASTARA_CONF router ipsec_vpn True - fi -} - -function configure_astara_nova() { - iniset $NOVA_CONF neutron service_metadata_proxy True - iniset $NOVA_CONF DEFAULT use_ipv6 True -} - -function configure_astara_neutron() { - iniset $NEUTRON_CONF DEFAULT core_plugin astara.newton_fix.Ml2Plugin - iniset $NEUTRON_CONF DEFAULT api_extensions_path $ASTARA_NEUTRON_DIR/astara_neutron/extensions - # Use rpc as notification driver instead of the default no_ops driver - # We need the RUG to be able to get neutron's events notification like port.create.start/end - # or router.interface.start/end to make it able to boot astara routers - iniset $NEUTRON_CONF DEFAULT notification_driver "neutron.openstack.common.notifier.rpc_notifier" - iniset $NEUTRON_CONF DEFAULT astara_auto_add_resources False - iniset $NEUTRON_CONF DEFAULT min_l3_agents_per_router 1 - - iniset_multiline $NEUTRON_CONF service_providers service_provider L3_ROUTER_NAT:single_node:astara.newton_fix.SingleNodeDriver L3_ROUTER_NAT:ha:astara.newton_fix.HaNodeDriver - - # The plugin l3 function does more than just configure the Neutron L3 - # so we pass a dummy l3 file here - TEMPFILE=`mktemp` - neutron_plugin_configure_l3_agent $TEMPFILE - rm $TEMPFILE -} - -function configure_astara_horizon() { - # _horizon_config_set depends on this being set - local local_settings=$HORIZON_LOCAL_SETTINGS - for ext in $(ls $ASTARA_HORIZON_DIR/openstack_dashboard_extensions/*.py); do - local ext_dest=$HORIZON_DIR/openstack_dashboard/local/enabled/$(basename $ext) - rm -rf $ext_dest - ln -s $ext $ext_dest - # if horizon is enabled, we assume lib/horizon has been sourced and _horizon_config_set - # is defined - _horizon_config_set $HORIZON_LOCAL_SETTINGS "" RUG_MANAGEMENT_PREFIX \"$ASTARA_MANAGEMENT_PREFIX\" - _horizon_config_set $HORIZON_LOCAL_SETTINGS "" RUG_API_PORT \"$ASTARA_API_PORT\" - done - _horizon_config_set $HORIZON_LOCAL_SETTINGS "" HORIZON_CONFIG\[\'customization_module\'\] "'astara_horizon.astara_openstack_dashboard.overrides'" -} - -function start_astara_horizon() { - restart_apache_server -} - -function install_astara() { - git_clone $ASTARA_NEUTRON_REPO $ASTARA_NEUTRON_DIR $ASTARA_NEUTRON_BRANCH - setup_develop $ASTARA_NEUTRON_DIR - setup_develop $ASTARA_DIR - - # temp hack to add blessed durring devstack installs so that rug-ctl browse works out of the box - pip_install blessed - - if [ "$BUILD_ASTARA_APPLIANCE_IMAGE" == "True" ]; then - git_clone $ASTARA_APPLIANCE_REPO $ASTARA_APPLIANCE_DIR $ASTARA_APPLIANCE_BRANCH - fi - - if is_service_enabled horizon; then - git_clone $ASTARA_HORIZON_REPO $ASTARA_HORIZON_DIR $ASTARA_HORIZON_BRANCH - setup_develop $ASTARA_HORIZON_DIR - fi -} - -function _auth_args() { - local username=$1 - local password=$2 - local tenant_name=$3 - local auth_args="--os-username $username --os-password $password --os-auth-url $OS_AUTH_URL" - if [ "$OS_IDENTITY_API_VERSION" -eq "3" ]; then - auth_args="$auth_args --os-project-name $tenant_name" - else - auth_args="$auth_args --os-tenant-name $tenant_name" - fi - echo "$auth_args" -} - -function create_astara_nova_flavor() { - openstack --os-cloud=devstack-admin flavor create astara \ - --id $ROUTER_INSTANCE_FLAVOR_ID --ram $ROUTER_INSTANCE_FLAVOR_RAM \ - --disk $ROUTER_INSTANCE_FLAVOR_DISK --vcpus $ROUTER_INSTANCE_FLAVOR_CPUS - iniset $ASTARA_CONF router instance_flavor $ROUTER_INSTANCE_FLAVOR_ID -} - -function pre_start_astara() { - # Create and init the database - recreate_database astara - astara-dbsync --config-file $ASTARA_CONF upgrade - - local auth_args="$(_auth_args $Q_ADMIN_USERNAME $SERVICE_PASSWORD $SERVICE_TENANT_NAME)" - - # having these set by something else in devstack will override those that we pass on - # CLI. - unset OS_TENANT_NAME OS_PROJECT_NAME - - # setup masq rule for public network - #sudo iptables -t nat -A POSTROUTING -s 172.16.77.0/24 -o $PUBLIC_INTERFACE_DEFAULT -j MASQUERADE - - neutron $auth_args net-create mgt - typeset mgt_network_id=$(neutron $auth_args net-show mgt | grep ' id ' | awk '{ print $4 }') - iniset $ASTARA_CONF DEFAULT management_network_id $mgt_network_id - - local subnet_create_args="" - if [[ "$ASTARA_MANAGEMENT_PREFIX" =~ ':' ]]; then - subnet_create_args="--ip-version=6 --ipv6_address_mode=slaac --enable_dhcp" - fi - typeset mgt_subnet_id=$(neutron $auth_args subnet-create mgt $ASTARA_MANAGEMENT_PREFIX $subnet_create_args | grep ' id ' | awk '{ print $4 }') - iniset $ASTARA_CONF DEFAULT management_subnet_id $mgt_subnet_id - - local astara_dev_image_src="" - local lb_element="" - - if [ "$BUILD_ASTARA_APPLIANCE_IMAGE" == "True" ]; then - if [[ $(type -P disk-image-create) == "" ]]; then - pip_install "diskimage-builder" - fi - - if [[ "$ASTARA_DEV_APPLIANCE_ENABLED_DRIVERS" =~ "loadbalancer" ]]; then - # We can make this more configurable as we add more LB backends - lb_element="nginx" - fi - - # Point DIB at the devstack checkout of the astara-appliance repo - DIB_REPOLOCATION_astara=$ASTARA_APPLIANCE_DIR \ - DIB_REPOREF_astara="$(cd $ASTARA_APPLIANCE_DIR && git rev-parse HEAD)" \ - DIB_ASTARA_APPLIANCE_DEBUG_USER=$ADMIN_USERNAME \ - DIB_ASTARA_APPLIANCE_DEBUG_PASSWORD=$ADMIN_PASSWORD \ - DIB_ASTARA_ADVANCED_SERVICES=$ASTARA_DEV_APPLIANCE_ENABLED_DRIVERS \ - http_proxy=$ASTARA_DEV_APPLIANCE_BUILD_PROXY \ - ELEMENTS_PATH=$ASTARA_APPLIANCE_DIR/diskimage-builder/elements \ - DIB_RELEASE=jessie DIB_EXTLINUX=1 disk-image-create debian vm astara debug-user $lb_element \ - -o $TOP_DIR/files/astara - astara_dev_image_src=$ASTARA_DEV_APPLIANCE_FILE - else - astara_dev_image_src=$ASTARA_DEV_APPLIANCE_URL - fi - - upload_image $astara_dev_image_src - - local image_name=$(basename $astara_dev_image_src | cut -d. -f1) - typeset image_id=$(glance $auth_args image-list | grep $image_name | get_field 1) - - die_if_not_set $LINENO image_id "Failed to find astara image" - iniset $ASTARA_CONF router image_uuid $image_id - - # NOTE(adam_g): Currently we only support keystone v2 auth so we need to - # hardcode the auth url accordingly. See (LP: #1492654) - iniset $ASTARA_CONF DEFAULT auth_url $KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:5000/v2.0 - - if is_service_enabled horizon; then - # _horizon_config_set depends on this being set - local local_settings=$HORIZON_LOCAL_SETTINGS - _horizon_config_set $HORIZON_LOCAL_SETTINGS "" ROUTER_IMAGE_UUID \"$image_id\" - fi - - create_astara_nova_flavor -} - -function start_astara() { - screen_it astara "cd $ASTARA_DIR && astara-orchestrator --config-file $ASTARA_CONF" - echo '************************************************************' - echo "Sleeping for a while to make sure the tap device gets set up" - echo '************************************************************' - sleep 10 -} - -function post_start_astara() { - # Open all traffic on the private CIDR - set_demo_tenant_sec_group_private_traffic -} - -function stop_astara() { - echo "Stopping astara..." - screen_stop_service astara - stop_process astara -} - -function set_neutron_user_permission() { - # Starting from juno services users are not granted with the admin role anymore - # but with a new `service` role. - # Since nova policy allows only vms booted by admin users to attach ports on the - # public networks, we need to modify the policy and allow users with the service - # to do that too. - - policy_add "$NOVA_CONF_DIR/policy.json" "network:attach_external_network" "\"rule:admin_api or role:service\"" - -} - -function set_demo_tenant_sec_group_private_traffic() { - local auth_args="$(_auth_args demo $OS_PASSWORD demo)" - neutron $auth_args security-group-rule-create --direction ingress --remote-ip-prefix $FIXED_RANGE default -} - -function create_astara_endpoint() { - # Publish the API endpoint of the administrative API (used by Horizon) - get_or_create_service "astara" "astara" "Astara Network Orchestration Administrative API" - get_or_create_endpoint "astara" \ - "$REGION_NAME" \ - "http://$ASTARA_API_LISTEN:$ASTARA_API_PORT" \ - "http://$ASTARA_API_LISTEN:$ASTARA_API_PORT" \ - "http://$ASTARA_API_LISTEN:$ASTARA_API_PORT" -} - -function configure_astara_ssh_keypair { - if [[ ! -e $ASTARA_APPLIANCE_SSH_PUBLIC_KEY ]]; then - if [[ ! -d $(dirname $ASTARA_APPLIANCE_SSH_PUBLIC_KEY) ]]; then - mkdir -p $(dirname $ASTARA_APPLIANCE_SSH_PUBLIC_KEY) - fi - echo -e 'n\n' | ssh-keygen -q -t rsa -P '' -f ${ASTARA_APPLIANCE_SSH_PUBLIC_KEY%.*} - fi -} - - -if is_service_enabled astara; then - if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then - configure_astara_ssh_keypair - - elif [[ "$1" == "stack" && "$2" == "install" ]]; then - echo_summary "Installing Astara" - if is_service_enabled n-api; then - set_neutron_user_permission - fi - install_astara - - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - echo_summary "Astara Post-Config" - configure_astara - configure_astara_nova - configure_astara_neutron - if is_service_enabled horizon; then - configure_astara_horizon - fi - create_astara_endpoint - cd $old_cwd - - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - echo_summary "Initializing Astara" - pre_start_astara - if is_service_enabled horizon; then - start_astara_horizon - fi - start_astara - post_start_astara - fi - - if [[ "$1" == "unstack" ]]; then - stop_astara - fi - - if [[ "$1" == "clean" ]]; then - # no-op - : - fi -fi - diff --git a/devstack/settings b/devstack/settings deleted file mode 100644 index 3ad59c78..00000000 --- a/devstack/settings +++ /dev/null @@ -1,63 +0,0 @@ -enable_service astara - -for svc in q-dhcp q-l3 q-fwaas q-lbaas q-meta q-vpn; do - if is_service_enabled $svc; then - echo "Disabling $svc in favor of Astara." - disable_service $svc - fi -done - -Q_USE_DEBUG_COMMAND=False - -# Set up default directories -ASTARA_DIR=${ASTARA_DIR:-$DEST/astara} -ASTARA_CACHE_DIR=${ASTARA_CACHE_DIR:-/var/cache/astara} -ASTARA_NEUTRON_DIR=${ASTARA_NEUTRON_DIR:-$DEST/astara-neutron} -ASTARA_NEUTRON_REPO=${ASTARA_NEUTRON_REPO:-${GIT_BASE}/openstack/astara-neutron.git} -ASTARA_NEUTRON_BRANCH=${ASTARA_NEUTRON_BRANCH:-master} - -ASTARA_APPLIANCE_DIR=${ASTARA_APPLIANCE_DIR:-$DEST/astara-appliance} -ASTARA_APPLIANCE_REPO=${ASTARA_APPLIANCE_REPO:-${GIT_BASE}/openstack/astara-appliance.git} -ASTARA_APPLIANCE_BRANCH=${ASTARA_APPLIANCE_BRANCH:-master} - -ASTARA_ENABLED_DRIVERS=${ASTARA_ENABLED_DRIVERS:-"router,loadbalancer"} - -BUILD_ASTARA_APPLIANCE_IMAGE=$(trueorfalse False BUILD_ASTARA_APPLIANCE_IMAGE) -ASTARA_DEV_APPLIANCE_URL=${ASTARA_DEV_APPLIANCE_URL:-http://tarballs.openstack.org/astara-appliance/images/astara_appliance.qcow2} -ASTARA_DEV_APPLIANCE_FILE=${ASTARA_DEV_APPLIANCE_FILE:-$TOP_DIR/files/astara.qcow2} -ASTARA_DEV_APPLIANCE_BUILD_PROXY=${ASTARA_DEV_APPLIANCE_BUILD_PROXY:-""} -ASTARA_DEV_APPLIANCE_ENABLED_DRIVERS=${ASTARA_DEV_APPLIANCE_ENABLED_DRIVERS:-${ASTARA_ENABLED_DRIVERS}} - -ASTARA_HORIZON_DIR=${ASTARA_HORIZON_DIR:-$DEST/astara-horizon} -ASTARA_HORIZON_REPO=${ASTARA_HORIZON_REPO:-${GIT_BASE}/openstack/astara-horizon} -ASTARA_HORIZON_BRANCH=${ASTARA_HORIZON_BRANCH:-master} - -ASTARA_CONF_DIR=/etc/astara -ASTARA_CONF=$ASTARA_CONF_DIR/orchestrator.ini -ASTARA_PROVIDER_RULE_CONF=$ASTARA_CONF_DIR/provider_rules.json - -# Router instances will run as a specific Nova flavor. These values configure -# the specs of the flavor devstack will create. -ROUTER_INSTANCE_FLAVOR_ID=${ROUTER_INSTANCE_FLAVOR_ID:-135} # NOTE(adam_g): This can be auto-generated UUID once RUG supports non-int IDs here -ROUTER_INSTANCE_FLAVOR_RAM=${ROUTER_INSTANCE_FLAVOR_RAM:-512} -ROUTER_INSTANCE_FLAVOR_DISK=${ROUTER_INSTANCE_FLAVOR_DISK:-5} -ROUTER_INSTANCE_FLAVOR_CPUS=${ROUTER_INSTANCE_FLAVOR_CPUS:-1} - -PUBLIC_INTERFACE_DEFAULT='eth0' -ASTARA_MANAGEMENT_PREFIX=${ASTARA_MANAGEMENT_PREFIX:-"fdca:3ba5:a17a:acda::/64"} -ASTARA_MANAGEMENT_PORT=${ASTARA_MANAGEMENT_PORT:-5000} -ASTARA_API_LISTEN=${ASTARA_API_LISTEN:-$SERVICE_HOST} -ASTARA_API_PORT=${ASTARA_API_PORT:-44250} - -HORIZON_LOCAL_SETTINGS=$HORIZON_DIR/openstack_dashboard/local/local_settings.py - -# Path to public ssh key that will be added to the 'astara' users authorized_keys -# within the appliance VM. -ASTARA_APPLIANCE_SSH_PUBLIC_KEY=${ASTARA_APPLIANCE_SSH_PUBLIC_KEY:-$HOME/.ssh/id_rsa.pub} - -ASTARA_COORDINATION_ENABLED=$(trueorfalse True ASTARA_COORDINATION_ENABLED) -ASTARA_COORDINATION_URL=${ASTARA_COORDINATION_URL:-memcached://localhost:11211} - -if [[ "$ASTARA_ENABLED_DRIVERS" =~ "router" ]]; then - Q_L3_ROUTER_PER_TENANT=True -fi diff --git a/doc/Makefile b/doc/Makefile deleted file mode 100644 index 6972076b..00000000 --- a/doc/Makefile +++ /dev/null @@ -1,192 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = -BUILDDIR = build - -# User-friendly check for sphinx-build -ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) -$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) -endif - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source - -.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext - -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " applehelp to make an Apple Help Book" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " texinfo to make Texinfo files" - @echo " info to make Texinfo files and run them through makeinfo" - @echo " gettext to make PO message catalogs" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " xml to make Docutils-native XML files" - @echo " pseudoxml to make pseudoxml-XML files for display purposes" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - @echo " coverage to run coverage check of the documentation (if enabled)" - -clean: - rm -rf $(BUILDDIR)/* - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/akanda.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/akanda.qhc" - -applehelp: - $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp - @echo - @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." - @echo "N.B. You won't be able to view it unless you put it in" \ - "~/Library/Documentation/Help or install it in your application" \ - "bundle." - -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/akanda" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/akanda" - @echo "# devhelp" - -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -latexpdfja: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through platex and dvipdfmx..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -texinfo: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo - @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." - @echo "Run \`make' in that directory to run these through makeinfo" \ - "(use \`make info' here to do that automatically)." - -info: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo "Running Texinfo files through makeinfo..." - make -C $(BUILDDIR)/texinfo info - @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." - -gettext: - $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale - @echo - @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." - -coverage: - $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage - @echo "Testing of coverage in the sources finished, look at the " \ - "results in $(BUILDDIR)/coverage/python.txt." - -xml: - $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml - @echo - @echo "Build finished. The XML files are in $(BUILDDIR)/xml." - -pseudoxml: - $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml - @echo - @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." diff --git a/doc/source/_static/neutron-astara.png b/doc/source/_static/neutron-astara.png deleted file mode 100644 index 66856653f632d82a3f9fc8892b4179887fa7b710..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 52060 zcmZU4cQl+|*zM@OBzjLmf`~*A%pgXI7F|N3L<=)|FlvGky#_&|j~+n^qxYT=X7uPS z7@~$C>UaFUyY9McU4L4`JMTHq+0Wkl*(Xv@M~#k}lNthn&}pcv7(gH-Ef5GXHzhIn zO_hS)EeHe+(NIx*XrByIPonw<<*dQM)5Xv%6LWsxsq83932AxM!az3`?j!FvLi0D z<7@VcM*0o=iKf}kXER$qE{NrfROm~)rdfq2YAZi%aar;&1DCtK&%G8N#HyBgwwp+c zqI!*H%SRuw9tk=&I+dK;=A4fi%~p+im;S|79epZ0>ol3I7&&nMOX7HnH#$$#QYatU zcz)z2-e_H7%tH{Q-!l8_hWTnPXu`?{f)Cx`RcIIpA3&R)L_R!gLWa6d}$lu zo?N_ND5zdie$8RyTK*b-U8JL&LV_!|;x(Mh1xukLqE&g-l#4gzN5(fw_{)d5eaLBZ zX)>q$Bm|X2fC|IkW*K5uvY~GS_qf?%BE5aTrtz&PJBKL)MP=1N4S%55-dPy`QLqIvK( zhrbJ7OqN4phvedX6rS^z1Vz2a{rnJ0yi7OfvAuVaMpDe}6DI&u<4rqv)r*qT>gLW1 zW_TdGD)t?(c~>%)o1O@EFD!rpE^)%m=chAtR4OAMK3tU(f#4sW;%1VGBYz&AmaqJa zK$(}x-$3iEu&$bwW5XVzM@WO&i5oivWUj>yp1f|}`RUDdaKeH{oLB9Glv!&vQz-@&* zvt4GkdLJjK`jS2BhH|&4aQHP<`pzMxdTJ#>L6@~<=XV(2gH=a$G#=_q6>Y4M-di7F3(f4+8rmig}1qB zVI@EMRIECEVf+Fc(yEA?%w=(z{1voimIiUi^c;StU+Nv8sHk_zSD`U`+)*taLip4U zt*rg1{PyU9*6`f#_<3t2Co~Cy49ix24mPtpC`&o2|6^zdOKVRcjZs2~y|p9!u*ooq-b#&YGa#1DG-DEB9RRZwN9i(sSX+alpHWJd@+BYoOJH63aYO%YK$e&!IT>xG@#kmq z-kWA>YHGc*BN=a1~wlZUVJ|N9^&Wd=qMX-a@ZNo)*3r+OCW8> z4M@2Pz4r>!XQV&y?V`H?q1>{ROeEKP2Oled@t8cLm>^yv7yu7?K<+d~&3GPDi zk~5WBJG$Co^u!qDw7Pzei+v>CThCcdC9F$Jm$W3DXW2iXDvTUhwFIXJ8BQB(Raah~ zsG?pFHqhAE*i?w!Wy9FdV0`(*_PE1aDrN~8JMkWJ!cY|_o-Q<2mz`%sZ!c8>K^Vmh z?s=|H*BghwrY6r}q7KVu$(CjRuWoMatsBlkQCAV=?t~986w&IBuw3Od1+*VoiWU6ZX%o|YUv9U3xLm`C) zyovZ;d`=?;sMKId*N)bs47)+t-_>!EN!1!+Tx*_0B-BWbM>l3)jCmtDv;C@a_I>}f z4{HDN8($uL^x6|VD<`B?sKJzJ!8<& zi3aXe^Qpk3C8K4g zf|v9&ub^Jfuf+V|p^g3vDpW-NrOSV(6d*r&T3|uqHIktoYwSu60lmR3EU}j?VjSW9 z2(q@m{z#NEQ%~Wf)MpKbYsa~XxyEPZ@%Z%Lf^~Bwu~7WRTcMsS4V>NGJNoPhI9X<7?>as8sRi~$Ds^g#8XGg;tB*@y z-^yS|J9Cs6!An$Ji_C7f_V54Jp*ZLP*(<#d&W8>0kbWD1QbLkkR&dYk>C;rsM|2Df z2L}h<-rm(!RZ^0Yco?@c3o9$^JHq?)$kX#Ro(dGP?g=;3t}2X)f?pYTmHMI}cS4rW zeTo*ShHlzm7jg&?c21$i5Fc`K@~I>1pk6L%7nDnEfps#I-*lbl-@kv`7&6EI+t!*? zu%!7b?70=!xVamyL52oe2!y+PwH_;p3-mHSp|ET@-$7>`hMUXZb7W;@%P1w#ptR}d z5uGdaT~kL|e)}m|{OJCi;N^45A|;BUyRC#TVv;|v3ooqCG)_{%4*vdi9xo65@8}|M zS$*mkD#li92ONho?Nt~@_-R- z=6(2^n3zY##&6Gsbg{m>%U~5q!HE$&;jZhzxqSf*ul)gzLj`f!z6Ygkhqz3j|QdxfLQh2U79ezVG8`Rn@X`hKLC5;S5ZF zg|b78*t9Md#3N#v{-e!|p2q^_Xpg(WS(yi?+Ex2hd-KH8FG@^HBzhnqBsUoA;nRs1 zCfQpZDuO!3|MAqxxqT;GSMdV4HUFa`SLBbi|0{0#Ol8fknVSs)JVY13QO2xXM2pwj z*ZNoH`Fqq}c(~{aN-1+&Vijt+tD1C!-YzAJ1?wEblc`MF*GylbL<+VM>AB9W@v?Im zGNG}LjMB8=;o-ShdLqPU8k@tXp|!z8>C(ALpjTu73UhIoa^PzbR-ggG?xCMm*u{#_ z-V_oN5)mo+^eG@Ks^rDN0QsNY-D&ci(|*J|aEt(=n1T|}rnmNYjv=jOK62A#Eq(lh}iAD~ev`r;n7Q2aaB8PE&0 z8{6Ds;5hM5rN|soT_jd7w67sP)}Vu+*yP|p*oxf{{D@h|68+!4A~D! zzWn@56v~RY)=8;r=zf7^%3aheF_^k+xGrrbFKtTZoQO`tf*JmZO6MuLLDSj52zr!FTt3SEtORQnPPcqU@r2hG)k5h6e_6 zl;)r@R}l}M>`I?S2=J)J(BDz6CqrV&m=-wL*X0qU->e72f|~I{Y=1N+I~Y=keKJi*wKCZ8*%Yfq{WLf&Y42L%@cdb!K^Q zPNj+sbt{~?jSx6O$15G#$RCjnKs6({VGZ<`RNv-Ii(^gEF81~sb@KlQA`*oQgpGZn zFc_?qQYsWY5jDT!8hAWe2%!@-uaiVUHbmv~emahp`X1EJ#zq^A1LW{pnDQaygkNT@ z^V&#*1+7RR`sEJif`vwA8ehsQt}wdoy0(2rzD0G2nFxn@(cq5{wsh-GH>c|}F&-8c zvnisc>LHID*!}m{pX$EGM~c;DmqGk~yuVwdlc~8D;6;qL(mI*Kz>rlqH^{>LCN z1swLH&WsZhX!=_xP9%6d)_!P^g1~GrW;@#8Hs2NsxW?M;n=Z?-Uxq4N-ghXwkl?p` zaW7wd4;rXQ5YrPRBO?pkrU=A{CX*881^^L&WyM`qEwNjn49f|oAS5IttgSix;aiH7 za-NWc!8nEBCGoG?$uT*9{tHaa*Bq6t(8{Nd2j2gQH>n#M(3 zJZ@j%ze38^*4D&Xd|0dKY!V=Ns};Ki_iZ^IF%1dk4HjhzT090ZAtmHm8F1&z>3e00T z;sIo@`k9cmSlJC##B;lx1zH{fIN$wO?*a7@z}5|e4gaFWH~c9jkr~q+!!ZQ<1u;Ba za;Jh0uGZ3!aLF$$hPi|UnN8qeh_g>oO)m~n%=Lglctl*z$~2=`G{VX9+WYZAME?(% zO5F+Hy*Mk5-hyOE!kzO!cfP)>hQwYT9UD{F{UNbCGx}Ed{?ljA0+xDM1R=3B&9Br* zIQ>^-XTK67GhFoYM7%?kG+ml}cW*RtaB`+eAmYA~Z=a4-hbn8hbcLGv7pbbM3cwa0 zoO8_{cAfm&WxMs{i|?)->e)VoPSq7~j@!2b79s_k+|?}3kH*G6nNJQ5zJB;Z9oNOn zB$x<^t(fyf{#_ffXw2*83w+fcM#pyBY&$vYuH8=(Y#-)InA54Fx^77;8Be`{;hYkAoAQqpv9^XMP-! zZT?}vP*=Xve&|L_4>Ek85c5#%{@5R5Y-~mbH#eiG@A3Wy^2F#vcrVoVkH7@<2rnpU zQeoHErOPx-P&G~fQWkIsl#p_Zj4{Mn8ON@9 zmp%HVr|{^Ppu+gnloMY7xdWFsDbge~Tjb`=m`RfB8zi_wlUnxzQskarorR!I2IX_b zzLV40^Ty4h+2h99_R#df-R!Rnd9iAD;5u3rbVAYeX}jTl1c@7G!2VL?w!PJ$N5ltZH+OM(2t z(`CscPm(lEj50JFqPV|JB#6k6cICZ2)zgq84qrq`*!>WjJ}55A8XVZ&n5^FY^C#f^ zAaM43F_ALv{_!oybXTnvHU7hQVZ%ZJ`LpG|^ivFi3%xI8o~hGSPr|~&Vny0Hu3dB) zmXBy4C3awn3ypQ1X-K7q|JEZvSo+${>+uYN%>as}h8St+7nbF-KqdMEJm`)v%<*rr zu0OT&S@qW>hl^HYB*>h3Lpg6nB^y67Qt7eMztsCq7WJ->3J*ZtEmRR8oUN_x$jAuJ z(2ZsD{scTAplR|7)>IIsbh_?2EA@@}KbYZYK51iXnq$P25o!wy?EqT$cTY4Ql{B|4 z6+!T-Lmu&vSh~`EdC#>ZR*b%&(Ff-|+^b~?pU+?Z$fsxR-8oyh!_C`JTRZOIS4E7p zSmihh4?Lt;B+KoKzBD{cB`+tJk(9Ob;Rc-?SE%`yJTI#x6y#_?jZeGA*xWo*87<}i zEJV+V>V>njvoJFWZEWwkb3*~WMoI8R(Pp2nBQQ^5I47%JyT7Hk?UkbbBO?E5#oG4& zFAw2ceC7>N|AH<6O{S>bR3m(L1Q4( z)6*Lw@_BBYuB123=(7vApah=8em1RtpS07ZwCklbdmx~?GfjxP-tfKo=s}D(QnYO# zS7#vS+?GF>dTXMwO*l>Y!&yku1q?8(u~!@r1ev>)j$@#wNjpy@WoBmD1q7oYe!wu* z=`RN?NiOTxgPup-Z}@QEzq<{_?{7~3?&(aRG(Q}t#x(C{ZfRxTpGI9#)4whcDmQ6# zueo9NbkqBeSq-D4nTg3!^5<8b!{5;sv5;749|j^$p?G3AiB&Ab89+hya8+_cBc;wFx7mYB1M|-QogntX2(So-meg8aP6sUEf znGYz?z~Q@MxnkcL85;6;>+F#gJ`SO&&RL;hyZye<&&k#G9m7OR3);x^seRslPX|NY{dZ;OYuZn{*kNeXC`#jceCuG6<$ z+yL|U$=WL`D>r7F`PhScRNmbr;uI7wn_&QDV0p{A3x-1Q{rje-WnKnkZCAdh(bB(@ z6@@nr!#B!gegFJ2_nx+03emZ`@1(V#k*4=Wa+PCoA^UVZnNeypz2~i3;F22mD^cPca1_Yl9YXA$5_=;5`|1Ltoz$@wEwB{=1 zPuQL#7!1q{SAEUI+?`XZtE)@7Z5~CfQqK|*rYRMc4LDYmR{_5g%33w->}U7=Wfr%y zB84c3mPo{a!vs{$3Q4QwQ{VM+aiY@kwD(K8INYKR_5Hi>O`wn>9vKjQ$oN$;bX)7a zByb3PkLIdHpOfL^?L(-3Qd9w^7kJVgSUS0Wx)pdH%OxFCo85|6Euw%CpF@uJWo;|tutbRs$`1qE_qm4<+v>h zX1^LNSt6IGK!)#44Gr1It-ov9yKY{%^6}%%lHw2}lKsUu3ZEumq`pWE67DY~T_ zm6?o8O~nLsC*9Oi9ssJjFyDQlBhvNNA{T?g$(Q^=AjHzG89(-_e*z#lQ1GIPm~haz z{qxqk#7J2 z_uiUm^!ZG07a5gAR99a=Nwq^uOB+uwEEs^SmLlW>(a}{gR!gTa3E5LP-&M%7ENqHa z7g#Z_T)(K`TjLeocH_*$k#|pGTt8Vf zyHwgyZ_d4(+2vh~Y+2HQaBj&J(iECCy&A9C8Ee|V_c?`Yj}?=Nsff2vflb|-h6y9t z3&}~QC|4kKFK!77EB4;K4iS`_r^ZxjWbOpKd8eAr?Af(q{7xCrRUl%xZ`iB8?GlxJ zb@*k3{K10&Y%a`@*@B&=q0s&)tp4^EC%lXR!q`MHlr<0a#tT+H7f?asqcne#fxftm(qa&{f)Ni zPmm)ug`%dclR(7O4#w}nM^vFKG{_+s48(lO_~)qp>5 zv$aGw!U7>18Ch3{suD6YoneJ(QdI{HEAll@h7{|Xn-%Q;1#Jx#YMWL%uvl^~Y1Cb- z_CQw^aWlL6_|$-oe0K8BC~}{hcyGAaa42MXe}5m5<-2Udcl`db=qchPpIPHL_Tq$d zi%Y_~!1>Gd_z9F|xot1=(~N68XdT}rr6}>oo}QlIWw+-Py&*igtHF5hrrpeZh(93C zLo9z-MNNK@Vu*SQ&!(HJMwJ=lpHJ~Fx;a2Z3#rJd@HDKqzF3qWZ=G#!#k`!SpTJ9m z45hx-F+c~bf9)|*Z7TRKkHCZ?d@qim~pJ!0nlbpi1>y*H3K+&xf z1byw0k&6yGk;wiRQ}V-2_x`{9;^U{eVW0M6+L3Yr!UjykObFJE+={ww`Y1;=m6%{WBW#FfU0rj6Oh`V!$jj4kSh39t zkP$uy{4FusTj$1P7QK?9j(2!V0qcwLtkY-2I86lDJ?SNQpm_oN1jKCn+Cd8mLydC_ z(io{wDB?azf@SH(Z1iQxPu^s+N`ruIz7ao(j|JQnuZ7;{Dg;vy#R=$h@1XvFf0lZE z$U51J&i;HOMc!P8IJe^?A%zTnTmfE``1n;N{NW%krrBF93xe|X^`(Z0j%nwhp`E3ptt2Tw1j*^IC+ ze~|ZPJYq~0-R0phRsG?^H>yt?zzEd%If>$OulOv+15pi^WR49t{*`W8?J_N`e4SX5 zMGbA_afA^;Zvq8tu7)RUZc||te^?O_GG)Cd@D)txfN7;I|E-s?zIl~2OHwr|upCSX6vD2}AljWom@;v>@dN8qq?S!KAV{o_ruJd zpB+fo5C8o=HchM(+K7iV ztx#gLu`>{!8(Eb=A=*lLhUq(wed-*V9veFd<~xD!r>MQ*y$&}dMTQnK+@J%xg|HQH zP!IO*sDh4a_5&p@ zZ6?h5FWs{aa*TWqq8gIPDX$DpjJnUwn5KQ@8kae>UMB|!Vy(cvJibcUjhUrEBEs=z zy>9h&Ar4%GFgXX&1iOh88G-GYPwRb+iq*?u2{a@t)6Qrs_ z@Bou*g{;?lOHCoOIjP8<2ybUkm4)7ww>p44@DWlJ_R0|(c zdwY65C6fFkc@Irqn>Hx1H;xOC5{A=chY7=*t*;IVAc--7)}lj=g}T`;%D8y@t_9_A zdi&LJQ8-D0Gh{-<5BxvjAqn2qkTJi*`EX})SFCcnil+}X>uocn+>Iu%=;7hvnWaA0 zY_ae1$?>X*63a#H!LFhHU1P>X_~z)kh=BkJNk1;RLm=FNnQSOH%NVORx5atgNS9%e&WN*|{b2rVr>3JQV1|7g{M_T= z!S-Ty(+WK{H#c>n`V9)duV%gaqXBj2lX=ln)5`xw&N_LKCxx>70}}PGN3~rKU~IC{?5N;TJ@vm`jcNkU;#9H zaA0m~ntmUi=lq_$(@o#vUym`KCr?K&03%_Nvzn~)UX18oVIE=yB5K}Wc;VkFcil5W zl@th&fsu8)7TvZ)O_iN<~xywNL7l6XKdP+wlXvoJ)S*#=IMD)RV1dU&ZXx*;Hk3_n`G^+aXhk= zc>NJP9v>;&MsWg_le=}czO^L?uO~-(o^TZtNzz+a#D@U3B=c5KS8AFUBL;l#JN~N= z=cIo%N~Q<^tTz9W!#lj}>vk5UiAO=IX;x zWuoGpLyn6HzvEk{8e+mQ0eVVinE0vbxEAN%?=ugaH*X!BbtvZtPe8L+_mp3h!r zh%5+pRX`~r)aLx`6sRv^#`m`sKUO{{c$0O$RMbWTfE-(*Cuw41VPRoz&U59lF!HvL zP)pv0J5H;*pZVAzOtrV!@9!GmCyMicm^&=T_DA4dF>JBVzZiX@2v=AN?8)h0gy8AT zxg%d_^8pS%TN!WPf&(rzP}(tE(*2^s)@*x4T^b<}Bnr|0W*4u_;J+QrCrL)&Yc?c| ztux&7Sk_7{xXpvE5_gx$L0D!8%O6wYdj=)az6u|Lcj<)6)A*n5W>=?a3X#~L7l)s{ zMAs6$wXU`kBJfnP+UV%ja?B@IkAXh=j*3)bxM{&xk54l*&sc<>KCLq1%af;iu=u^L z1$DG%kI#4bx4eA=`1>z*7CQM(32JrsyIGWNI=I={WnLYMg@v(*7=>8RC(k8kXDfKE z|D2vNgcfFB@CN8`U~+(sQ%_6)GV>TRs&LY-fO>!f$SYBRxGMPjYves1 zRW-HP-?i_FCF2yp6d^s&{}(@>1|7yiVhEt8Sy|Vs3*n*9S%&K;GO6ytD5F6nBj_7W z{vc!Lqr%a^o4Nu7rFI*3f`WqLI(#v|P!)_bU!9%!iZqDzJ<1ya;7L}AiLX)A>GuJD z^yu+pj%GClOy-sMobcf7j9!x58$j}fF?G)Eu+7q!*V zi8H{*unmJg1e!P9biF^$X2dqfu#@m=`9~@V1-;Wu12mZGZyOoDjfIs0&CAK@cNPa9 zBt?3ljb0qhdH+b_grD~|pU|kH3^~C(fG}9_kUso8oY#-?^O=I=TT+>Uu2v-M(wv-mzY2CoHl|WuEYx?&^!GiV-cR6)OL0<%82OvL;K*PupBZ$Ot zL&e3#U&raBe-*Sp=dTk+ZRO?Vp|3U=u(GkK*{p62qxRRw+uGW|zAGFN@}k-9eERfh z^lV~ww%MvZjEsgg!s4!8t}(O1{L4$#N^gR2!0>)c@dJl@N1;O3n>SD9)Xp=1rM9+q zce$_R!v{u%dyA8wA*v12Ions9S)WV zYQ?Lz8{gMD$9I^h(VEIZo?}l848+glzN9v^SVAVKlm*-F`Rh?mk4pw8hGlme2>U?J zCNLKUh0)%vP_JMkYcq0g;@bW!d9H)yaMmN^U;sO@0Z48NC%Tr?fj0pHG5r=N3vB1h znK1vOy*Hx6fc0IV7bOLgpw6qc;v#{IZtlP!a~k);6BK#6!{S8?x38i@Ph9vXa*cKO z47L7h1aD)1LiTLl$QMrD7ZG_#b}=Tmj#m~GZ`VS$e8NBd2xbkuzY?nd{=)^%yx-!e zW?+xCDEmQM<7RO@F$Ttl867Jx?u?c`-EBH(#Ie_@4mG+hhNT$m>FjBDeYtJ2UUdI& zF_^@Dhl8rm_A6@oll&Q3OI>zqNS<>KX0|{|K%Y%KAz`OiT&ofK%2B7X`1H$GV5cf* zvGKIApGY1>oiU0k7Qgb)RXE+--TT^)id^WR4;@IM|1yg_3advl*6% z^K%a{X{tZ*y}##}^2>aYAO%>qqI=yIQTglO*eYGx--)$L((YkYP11I=d=hrFn@Mzg zT<5rdXUAJ?5-0i7#Ni{XaXyOb5>Ovay@-e^Ngd*(4(fJ%^MH?zO>Iyg-Z{mx*RHy)Ujo0~34h5`#U&SiY24}Xor{a**@$w_wDbjMADY4T&o z>wOJY$~cqxTQaL)e(Jq35vrpWmQh)ryOKS*M(608<;BnLSCu%~Qt|dNd71l&qRe8M zX*FIWr@Q;N*~5p?@134!+(Ghdbd7zsP;mc62~&}ypR=1@o2m<~8K~>elnoD91ImK1 zGeEv>%!m>RR}a70nNAz8^y7+3_hk;7u=0yJUY;UxB%aehllPrT6Ep!vl6Mov=}|s!MvoZD+dMl5tI=?+s|%#Co~Cv8|{kj#}c6|2%HQ6nLet=lG8${$fY zwmq*z*Q0{x(C&?sU}}OSG7QTM+$$dIV_*Zhg&-Vo1CkWnvMh{B^6^Ws0y zL6v#NM67YvY)X_W?CXW27E)fGEHA(4JBhxj zh2YK`c=xQq=hBuA^>RA!$l52J2G8FNdR-x??<`CV4Uwq)tZ~aT`08N`9Cjmpec3Uuo{&BF_VNOCF*v8N8jAPD>~MyWI6MqJ;1dngM5idaBoT?-{J}n$d!=Q% z%Oo~0NLZhJbsRrf7Gj=cu}~;e_#()G$VBy(t^P`1`a5RKj!RfY-j6{P(GM0b0>rO3 zMYMdaL%Qb4Nu<~9v!olW%#bA7(eBA!;DU5~uFR`QMu};8W9N*zfvz@lMi^!I&Z>9t^fxVx3|MEq7-~5+C(SQ$$eQjrb=q z5k66<+w$TC$Wj3Z3beMRm6bq=n?2tI+CV63?!vIYp_B0;RPGWXIhq|p{WWzo((zs- z-UrSDf9NoL_z&dz#U!VuZwhcz6hXF!`G$Z+43)qEPs{<&m6 zaL*rhWy-ka0!|*FaV#E(X5-}nr8>G?9$&GujLV({;SEVi0mNJ+-HirAnP^I<;HN>~ zte;_o;laI?&s~2R+dpN!T(c!WY_6|=bxgmD{M{XklyAQoF-z{f6A=N1hHt-ggZ3P4 zvGYytT&FdN{6(j?+4Xiuvu!os=f|w!h8yygB3n0+-b8yMf_PsS{Du zWyH8u;7W^?r=+CZv^^&~B+l0pV`eyw8S#iha7_RC_KmWJ4^siGViR#N;1xBkA~;pL z7n@I!#hX2Q#)~n6WK**A{wgbkh&ne7J`!ynAi_#L%#67Hus!UgtB1symb`Zu3)Cs% zVQ`2Usq#ut$^)_?*#r1Pk7&83ldh}qhbk#sHwV$ro;(qv6BLh_86U5;Y$ZNP(z&(< zVp%sRGpXTfmPc3GLF7w;!U?3DOIUsAX)`r`aIDy8i*dd=0sPX`#3bwGCbS^_uApF1 zdHJ7T-`}!}3{ZVzX|8M|iTg?t;q~iV0$?Lo-W~03Pg{mJ;e%@d6WaZJ=DMC#Sc~4G71)&gGSrhkOxlZ*hxXr8*;>SYBSH z4i*ntfD$1&&e-f>dgS`XAGA*L%Rrv4+O8)_xNENQGNf-cm{y^Cv>_n@%XKvufg{1S zVk9D;DrV{M|j)ZpQs@+ZE;WD~wBJ{PGlx zKK)l<=h90ITJ?=NgubKWhXXns|7`iX1RpM@p`s74A zGNGT^c1}TjPv34mqM9eexIRdIrAT}HD#M3Hp`E2tSraTWL5oeuW&_ac52nnnt*^;e zJF@?D!K}pmlsZvGxO*h2bDlnTb-jc4Sn5gEynCHM zy(sdYgr{)RSFt(}^bdZuI0&pA`Tbi@53%dW*cizXh(JVcnvbn=$bJXxKR=wzBsL}n z9nzwjF6C@Mhv63%g_}CtB%|+47_eb%WcCwEr)z;^cU>d|+w%CcWIDWo(KuE9l!@X!89FF^g{=MjJVxbhKX zAghToqK-a|Ul52@jV2-s_V@LzHOQI+$=UcNP{(2mxt|%Gc^mrxans4ZAA8s5^yse@ z_W7@s6)-IOO&G`n1ozN(Bog`@xwit&gr@C33?=@I2rH~CD0A_gG-)u+0Cu0eNOXU~ zb6d|36HR}1u1q!fGC5sntPxh6UJf@K&)Zx3L|ziIKXByj7R^Z<1PA3$<3f z(VqiyHJpJDnA|jZ*B4idvTuuuIy*SP0?*{g4IkBOzj;0qnKRi9p9Mb?6DFpn$5S3S z;mwsBa6<7%L?J&8WQX#VqXkY1O_1<;Qs~Y-qv-XV)zSE03g}{ ztePTbSCGm4SXj8cK3>5dCidZC2+-6N0KT1OXgkl%J9j>R{79WID=8VW(}{Kcy82G6 zb21NI-2*r8H*o5k|gnH1e0vW&LkY#hJ1 zkddb|I!0*#Qa8>g(v}XlQ(I4W=M!Vy75Td+NztuAt~0GQSE! z`+$(@d&7oHg+=xUQBv=dbQJdIhaQ@IGQ)TfI{PK&!G<_d%azUaWSojd~&;(B7 z(kDSC^6tMPz9r_o{N;H{;wKuT2QSFV%IfM;HLHB`h2KS!4+H!>U%>Yq-qmw~KfHjN z>ZU}qK^2Zw#JtbmfSSDAt(gbAySu;z0*l{z(MyIJawa-j2?G@i1TFEZOC~gRM_`pf z1ga#(^%DkT3DWXbKN7FD{jy$ipxq$c-1(Jue~Zv0783NazP|ooYu3WZh}D25>NO;G zddYeWY;ir9SX@^H4*5HX=sf_dxX26$kKcLNwRr?LN3xul*bCsyR8>{A-yX?GG%;IK z*L2Xo04VV+*kld$C*Xh^XP(@L48m+xJ{{C-GD9L_7=DR zxsN2h0b>*~Dy7T|WwV{Z+8csN63Dnbsp&XeI|lgrILZ?` zkf^V(AM`FBj2vPvHG>prN8lNy(HWSlgat7uSW$ie3MaY;Oen|i*$4fk+RUfIGu_aF zu-)HoA(^=2;o-4!3iwK;eImGd0B&Ny<7J9(`~L}(y#S5_V8QhE_jgTqNFN&4bHrV# zB7GN}-idfsd{5Qp9k6nKWXiJH;?D#!`j9$@EJfdNIEvR`cm;g($jb}NzyXzM14j5x zk=LChYFEcYs+aSZf8C49jUv%W6@OrAhjkwQK6T+F3%{vyoxSfAxf_K8K4^wQ;QeQm zo?Yx_WIe&zFP>$XAHc5Oe167bC2_(b0GG8Qke+gJ5D`R-KcUmd%?&uRKMc>z+a!u{hK3}sICyycPydb;m{VupH3J%O=jT_O zYqjp|yIN|OF^K*QVf>2;tUX3vqiqR84AF!7`IDCqj zZ+STkMlqE|EgWEe+xZ#P5vwDG<}^rtLkzCYxRGdw@LVs5Gx}r(Om{6*T%^lv6PO+rSvJ+;Po?Sg*tdAEm0*2n{FjoAK_JT$ zAlc}sQOm`(AO5#uw-X_BEhGAK!3_6ne0HqGKBVk+NF7qCh~V{TqD?+zirZ3uVbE4u z7Y{sprO|>>rmBRYRS!h)tS=+Czo6|;gxeY`5IEM!=vjh^~>|328fcea#-Y06V!4qTM|_u8Ty! zqfRnge@&;y>%=b=&vp;w(DX@wDL22WwYhf*)4niE>3LXNU!U$mr>RXu7I6C8qC^At zYZ(Ky(x{o5*P5hv3mwnS9s6_l`=Q>J6bJ@j*8OU zq9im}cShBWz=3`MYtPBa3BqTe&FjN1fmGCN$8m-KpO=CVQPg}JS#@vy#V|n(I7t9D z*&LgjU;Qfw^5upDKi;72k`gV__bP~%T#ROFaehc*_7 z%BVF5#^N`Wbu$`3-@{?1p=EGjLrz;j(Cm?t8|#-s7Hx-1Pbpfjm?UXa90@^t!n#WP zxZjDxqiOvws?A>h7p(%v{3wqe`Dy`?a=;=Z=K8gV4A?2ih{&w-c9h5+>dzd9-zC-g z$k2s>?UZ<*vX%1!11shuz1#|ef6KSc>q7c7|1D=Yj+fiI{SI*Z?o&n_h{a+v0P6)o zR`wXD6cz2*93D<6invC$HbnmAKB?V&{IZSMCJ=dO|3MSTKFf|dwA~eiX+J!z6x%n%7_bbnm$mM{-7f(2J>j;SWZ@PB#V<=PWYF#;$8!5uyqpeizNz z^7(a9kydJM8Dl5}=5iJbj0vEq7Q|FT^$7zP*|ZNDxUW6Z;X8@{mW74{Q5R&%d+Wa6 z9^pa=J#Ri*D>-*546I0Ap*?LbTk072`uzLluMNIpyvWq6{WmEpMy+%DG;Rr=j6<+7 z`NR|(@BYkGyJlmVAAT3$l6j}~ps06fJu;N!ii_6T3Fs%(%AvIF50PEMsDfm!2w8AZ zq*H@T*|EgD>9VAxbS z!%-|Enwpy6dJUk-Et2ey1HlOMd@b|H+q9qy|BM5g4;=Umvn{}5TJb@)^NbIdz|h@e zZMfKD`A2IWVvou)9q=UF`@1A9bTLCWELpFA)%={2nHg)LI0)-vd1=2`>|@&B*VnnD zf4SxEnkGW#($fz`AY}3*F~@X9*I3}|m4>R2a+gY)>kGOWJl)ygj=)5^lmdBA6>{L3 z+5LdaMP1i1gB@KpFsEDeLIKH7nbz<80|%8L|^~VHc%gk|dyD=qF;HspyCjpv>KLN8I&di)|(%lcFg}(HI z`U6J9OMq@UIag_4xHkP;4n*#WI2QcC2XR1w_M890dvPTkNFqx}T2y2M1tG9J;DF(Cl)>6|kwjP#TOK)HeEBsl zXoIKJWlS*yG7*)-wC8##>5rd({twKXq#SSZ(>9`34Ft8Bf2>@U@21Xt&{PXV$8vgf zWO;cxaL!aN$22uH0Z5r@^5^2^As?QIw<- z?=?N&-}`&t=Qy6{c>XXmpU-_?_qCqqc~9c!96!H(!HG;JW1r^AqKt9XqIAUV;5%BE zLp(j7z7VGm7*x0PIpxwC5UOJ4^~yY_Oe|yM5$gCK(K*+kAcYqyF};D8zdT-n;iuS5 zPtV%l1B!7+Bx&~43+>KfxH1HxlPrd|WuVyJ|NEx_;pmUcx*1ryiFRr^V1(JD1c};xYS>!t*J@UIFTYcC;0nmRu(g?XfhzKA3~Ej zo9bLtlw*BR;P2ZcZ{0sq%2*|%EHXP|RZ-0dSN)pezimgSFbgf@SYOox+KK7A^TENv z#{&i6l`mZQ3|yGPV1*8#phAND49Zs@cE-)B#>W!Je%HJpc0V|CJX6qM^jZR1<|;k;EP7vMcjT{zua63I52v26i+#z@S*v+~wG z*78$vc){hHLyyP?l%5W#Y3NKCvS*al$6fYe9;P7(`wJ&j+1FKq-ygqwFY|JDVbZ1Ee@uok=|1y8|Q=2KwwAlUINs5CZ_; z$VI!;S{7OnKi1A5+pKs|72hb5jxgzP;Oj_8tjVg=r{d3FzBJa?dv|HQVIK_q(Ej5p zk2`5?4M?E;C~r@v-xpoklI`WCjc@2;1d^@zlP1@oP_W`32W6kBOk2}4?(Ss-d3mSi zPcC!6?URwH<$Wdq^(o^{hAtSS1_&T1vV)l~4Z?DWXTZv?t*-L%@#&Udi~V##fNpT4 zLWHb3vXxg@qKQJBkhCuIwzitCefaQ=sWqAQ_M)v3iF!@H{(38z6<_CwBrt4eT!Ckn zm6krzf>`1^>lW27AUij>Qe5iEKD6Cf8&v>`3jN^K>)Y)#tfN3-pec|O?pH~ zExuL3F~pDW6NrPJ(-xA%IR0HAxFd99=GgHIR+9-8d*-~L1o3m+T(J}4pNL+S3j5tr zvhK^Ne#Q+A^rFx!uM4it-Y?+oNh#F?eqP?Ee>%ROwB*NMrX`tx_p>+v(>hB{?3ws} z;eKZ%c&D`j%o;-jcV%53R|Qgn*AbMn;-(EgoRcV^z{$Z7%mqd67h01 zH8mYi3a!YQ`zOruoT>uYKu>9N7|<&trRG9-{f~2-4EW1G$SaAVB+Om$D-3SDdpGUZ z@+C`N2bd2y_dX&iV~gsvRC^!b10LSp|oM=gQ;wMVQtDaQQA` zgX2UZ4bdEX@gi7!X`O0Et_5$(+7Bf!K|a(?#*K{tJ!?daw|A1Y@=`fOAr(!{f@qxm zQ1?eJ^pk(jP)k8*+s+vK4PgwN1oUnGY;7Oq7%bHamncXI^~U556SI}W1hpeGtTmQ$ zAlBpV*HLc2oJ^agNQ(PIUIcOE3`fjhEqxP&sqHFD-jts=7$q{&1@u!c=v53EF*|5g z+`D^s_qA==1!#>?c(@@6o+K)cspvz*wy&uOOG8HdM8%56Y}7dnruM;u>|e{La}>PZ z!@gBJ_iCzOefgZJ#Zu;UN#*<7HIO@<4Ms-?U4{YbAJA&pXBz!004tG<2rLS2gph;B z<314dp_B^%E@*pF=qEQ7Y*MCRV*)WskT<2e<8(yCLuL#;vGC{rY}ump%8K5n;&W>m zci8C{;Z%#}DJj^mZd^KxG0YG=ukeLQ=tRsdA-dcwSii9KDP^(#Y6^%mQJ?1^*eWA2 z8Kfv*s32I6{PD4UkrWEB{9i(pQ{`QDaR($8b zpQoc#qdAA=_w0eXczrz}@|iPy!HE+mLapEnnA!AGlaw}#|ym11GnuAL20TA zYW?o5K>{Bz_3l7b(TC4X z0zzSRi3)E%ou++XJJ!js*1H6gc%Tddsrmz_WhW9M!115KNr&*(C7Q|r&AcVaj{xNM zYXi|M@qe|U6*T}I8=K%&L2n>o)Yg)EXu%iXtE{iDhqrX=)ofz>2o`==f8u$zFY8RL zha-vU(BJpYI;s=a4L{niy0;Lm<+kr$b$>v-E{}buSIu~?@3?(|_mG`E!k0S09_L4$ zWRLJSE4Ih1T}v11N0io4MZ$s}p&&u)gj{p?o8lsCfg{{6RP~R6@l;pBq5zE>Nj2zw z1pPsYjLMF)n1~F)D^*wR<>{ap3wC_Tg?^`mtwqixS6rrs8BG>HTJ=h44oF!b_&KFQ zZXh;J)dEbHOujsdrWGqxf?_|BezBIQtd>YT!l2h3ByUYlw?0r1fF~-8+mWH$phw3J z5bq)gE-^b?_;^f169NlFe?YLnu%AwZ%&z$RbMAVgdH4pfPkH2>$+e0yV5Xoi*Pp*z z*WMlj^Gym`-7wq;46oSo29aBtJ2;qBs9@9Nkw<_1`USY2pP&SxE1A#-7|i70CZ7EP zbr}1ZnrHQ<-kb{`Jppv)^XJcxw-<)qm~Qd=`mXKkZ0uKZ3(QJBu9pw$8lf>OCtY)6r@FVopu*av#5 z4ECl7C7S75Ya*<|k=dM$okCKGG_UuD3&+-+${a2|6)+DFBf>!y6BC0Ib+xo2Q7z0s zf{x6g)MG7DV4nF$qEM#9`tpb{3gbgrz0NAg$x&I9YliC0E0Qcf6Ipf7>c%4aX%Amw z)j@Ligr7|;RP{?>126jox(K09Z{HRTMPDfv@s+PgW0h-V^OR?z2afO5A=Y|BcY*uV zbjKh!iuK~rEs+vVt2kypAM5%7I)10-tE(kmWI+Wy6$@6(vN7`rEP8a5Bc4&?9UL4gZ;LY=n$(b&Jehh!XN}_> zK0veDz{1Am+q84xrLVrZzw}*2@1Q}vqFje{BRg<)x_D!cBQ=&3&0t9|2nzd|FFbwbj7m&^dk0RvC za&lD94Kg-FAZe9j3`3}XWvVALgm8K+h&U_dR$ay2>_r6}p32(_6Z$svd4T46mwaep zEs9NMGTLTSw(%vyvxXt*St%&WVA|afkpcOe+!SmxftIb!+OCU=1kleWN`eWmgRN~} zQYa@Ms@EtBV{!Bj6Br={77aZMqzgLuv}RBr{mYyDv}@+s%m1v6kQ6Qd>YnB9NQe7Dsc_O35VP zdQqWPcs1SM^OobHN&oZ$f7v{hjzBUut4IR%+&RE@ERtMLCX=0Lcvx zBo~#>iK|N3!AwD6nhLHGv)ZEczOSx=1l?E`@`>k*V`j%+T|#TSS>Gt6!m!!U>>*bn z!rtl8kCM!IpZt~j!HXnb)N3FfFe@yNA}=kaCC~INyq-D4)9A5r2UWVIzkAoR2MhhU z1@n(N+`)8G;c;;%Bdr;TQ;r&{qcXMEu{Q(0JnpxDp;Ayt$PM~MDHW_qreNVP{ji9dn_xwgt|SJ> zd<=c)6lFKVb)=@nNFhhg8-sSn!WoLYv zc?6TaI|!ldZ2VV`a17dbV`wVG1e2=M6V*wKlvs;B0bMe`sdbwdfqckELe!}g&OnI? zqfn-&TmrapsyjPBf12CUwoZp<=dwYsoB0 z^ND(9DKq{-<#za@yx#BLrC2&2M}C9mS6W*oBZXSXV`%T(M}gms z;)Vbsik8KEN)2a4+1k>tE~K`kCn7d3g9<1P<`K=Qu4d~mM@In^5=H=!9c#hl$f2`2 zvWd?LL3&d>w6A@&xnIU(9uITkuPmw@YQ}l$-k3Km&OHgOF~V|yr^YPpryctX`=x;P zTE3Z6eSVc>!a!tI@X(%MJNnk@WuM>1LsHgtmKl{wLNy|Dn4qYtnQ-Y1+W_=?Z$6Tf zx-vFpz9-&1qiYHP z7{6y-GfRjZVdR{{T(%mYc^FsWxAp6ZqX0*!WnGNE+DG*^%frTh@aU5**{)}G1J>Vm z2e%V6sTC}v?3(hy7dGC%&(ghcYGypG_-dj+cfRorX*X zmKyp`{!;}pgryA?CQVe|Senj~hc;(gIw;YK*!}S_vIt?xhL@H}X54t3-S=)f={!w^ zR<2SDIVRE*8TjyHr|F5D$fDc4Ne~_~Z0Yqk7EZ|&3L%SVFyzU8wUo>@qfn*@!GXM^ zcqWi2+VQ6G+GZpMkrU=L_+7OyZ{qVF5iGBppw0vPS8jq`GkDuF5m8eTvr;Z#U!q)Yv z)4dh9C&zVLl_yd|K~m&r}3Rs7;EmW<^NW`TFPvi{1aFxk^*^WHWFs(J^ILu z*3XQ+83M4zcBzEepI`0-K5T2tH!-kb)Qc5P8o33BwjqmY=52IRnfOA}S(3pkKi1z~ zxMb)d<>-<71SY|Dga5iD4Otl(tkX z?hLb@Bx!C_Y>yGZUY8~^AxHwGwY)}3=Zv#6871c+&(SBqmwNlGa1!&wbg^XwoKqyq z8<8pD;^??*na0opYjpYZq&#C^zepOxIacRmJjR!9PLI)8!WpVzCQ?#TiYv)b3=Izh z0;xA=WIBnn3eIfMfA&B{lA=@t5dhP@+oF}wI0bLD&?C-nV`ucw{04M!S{ z2ASn`t`mpAf%*9PaXhXyIS;dTDA9S@O-wksM#+}O_dqvX*lREm&fy%A9|Zti*cY$# zv?lQ3fF)Am&(4uDgB(-}^3J0zFH@O{zSGgeAv-MaPJy?`omqx?rZvz7IzWT!6dZ$voQO&qB4$$4 z?%X*s`7ygo1b_-bp9T2FUM3Pc;33;JG5`L|t!5rb{`0mWQO&7svN7BU`J|Ys?sy#` zp<4{SpIwzK*~HzTUiWd5QX$;eg^y`oC+0*at;vx#WY|(|Mglpba zQT+)x6XZlG96UlU0#j*m_{hDL;k93CmkS!oF5N5ZCAd6TORjI1m#rx}Q;wOrl2DPO z!uQB&XT-kq@tLo|$DAEA6{_{K-w=INSD#@qgkJ})_r~7#A%z{tgKAi6Pm^{$9&!J? zYuia>^d_tQ$>nZb&fafJGB zC>hz7_OV$m3UBaQhxfpOA&=!&=7l8#F}emj6>X0|Vu>;Dpa`L;a6N$%!;fE`3jwtz zH_4bc^T?uoCduAK;@B8lMk#^_@PuY_!FbvD}3k) z*pHD%@%oo>txnw{D*H^aA74O`>-IRz$McY&5rE;q*jDh=w^`qDahWI5rO}l6xp1haA`ap5_PlBs+3c(?3KuN1byx<$Z!7@ z*fAAP@W`Y7kkHJPAdtpDFPCFFc*fQB5-4fl34aijL~0|8SHQX+}Lu{ zcg++bfIJXe77)XmE&4Z^BsP{Kj4A^?%Gd!{sg>B*3I<9!u8kG zn1isXXbPyIQrIA^WH!8gZGV6NX`at?lUE|c%VzwjfoT_C@W+N_a2lU{XYl@sS=X6n zxX`raFR~EBd`VO!J4Dfb<|_vInzl9#CDlHd2B|j79V+_?kON%J*3yTu-`aXi4jr~I z8SMW#FfeeorOG^?#_9O`)R4z-$hjJr>>E2G9|9wBh3pzt4-sv8>C|Qb)CkW(A3)7Y zSOZ??uk%hXUc7Ju2B?$MG(>l2XXjIy5Nj0Md3GR%fBM*$0y^I2=H_qfofdztTbHEX zdl<`=ME*vt>q_%>M0$xUS;FXhEvaoOs&r>)$h}C;vipsV3Pcyr==8rXc~#xrb2YAU z&>aKCI4B|j>>WR54RS}wy3j^Y?~m%KRV>CIUbc+W6KLts3cMTIGAHhK`gDTub%dpW8WMGh*^YoshOzud9K%QDMlZ@4>LQ zG1xo@{4b-h zEM?Rqf0Bwsq*Tj68w=o1nD$g`SlH5#Hpz*qHrveL{JbZ8BfzgOQ5#`xHeAGf0*Gh8aLNuOy5Gx&Y_C#D5_$FjGGn5>x*~ zUl|hcvU9*p$a#K4HPL~pRW7~r!!NKf#lbTWbEWSqJ2^P;L?@Fsppe12E;`8{spepF z%YI7H$OH!fpfw6YqogGh{xQuF zAI`f!m-0QB1Dx`FUgjOs(C9Yt*BgVQ<4qzC|8q>f&5O=5Cn?nOkBFoFlv1E0-U5}= zEMJnhNAT4@Aipf{k?-RK1OE7UoeHNKAo@e_$*w3>!dfh|wNqfM+5d&bRq+Lr5H2o> zE6j2!Zm9jtS_-wk(KHjem#1xQ7a`ifW7gfjf0&Q&h=R*1bhKg9B_V%c%dv=bH9u;0 zq3&O7g-YhffjN8#IA&del=6A0C3b)R{*|`)k|6^xd8SF1{HDs(;DWNe+2OB|D9>v? zPN!Yw$r=sN)Ho`r-4=eQX>YhfKZOn~raB?ZTSA+H;D;DKV+Zmqm^S4*BO;9c&CG2a zAI!|nE8EpBA^_PN`Op_^#D077_fD{|*XA{_5qP8Cd(j+teFRYxHUL~QXySqB5db^> zmw=6t6R^6Si$gEstIriP)2?AF6*N#^j3iwdm^=ZRz#>A!H6DoQWr{E#Em0|`CfYn2 zV()~Q^%P1}J{G+{`QX+qaaq#naG|fa{tDFk5#Iq0NEX~VC4@u;6|NUBph_RwJu1>bqKaO!GPAHG7QaZn#Su8y^|vbDUG!^=?D6$R z0D_zVi7=Q)i{{*dqE<&`nhpP$w}KtroIC5yKH36G%)zcl<$|s7P)XkQ&4~h@l9*XA65LBB4rq_4wmO zcjlGMc)&P}VfNC*A+-vZ=W1Log|<;wnwd9(Ux0kWK`rEEhW=K$&C8h}4OZRVU7qo| z?e4vMi@u*i{);>G#GV@Pw6fWKL7;x-eKL))A))=r6QRk?n@7#F4(nL8Gx=ubLYe5u zrl4t-%$PC zyP^ZWr+|)5hGO+iCsnvR!{8VPGho?&ehgGc=kt$PF#S^!qC6EHtwj@+ReDP;v0dfh z)2B~LVkwt*qk+X|bzVhL@w!vFY9kcJDe~PgZAJ)fSVU5hLXZSm2x~=%KxQHUXu^06 z`~w4*ZkY1D86R&37*dLO4;Mq=>v!)yH}M05@VBpfmMuxcyPAjIVAzJV8!kWu#hB5^(6x+f~}N9I}Y+UE__*m zY8zG8xl$fqCT}SX)qlNv=gf;vU{^!bQtmB4z^}s~=3fbM40jSFOvbI^0Rtf@CY>Vw zQCtzpp(k(^6{>mYJ8XD>|J@f}&Ll`Ll$QVHSmf}WO1Uk|-HaqbO@F6!AMj9G=+n$l z0Ot`5;X$$sBH%!u-PL*xQpuS)+EIWJC%^v2@m_dr@|)8WA;NI{6n+*k^cW6OPHSyf z*Gcvkd*1IQO>bez_~M|LmS3L_004tJ*-8IbnAukFIH%`u1|<15kOdT)9Q5@m1qz0o zyW=`@{_K8xsmOt+EHLJc2EDs~o;?S#Awv$9x9Nzh7Ki1eq(W|XCdt-FHI9som{=VZ z{}&cl$tM>&S80fiD)*Sxqswtw8PF@at&yu__Fu~V@Ao%?xVXsO!;yM`5tJaX*48X8 z`M+a8>lps>1{mxPE8guWco6d2agvSg%UnqFZ%0>x7+^D z8*w60TorAh>`li*<`KpAj&{z@YyAVr^+VeKh2;PJDH~o@MDp<{WG6%G5l6-Ws$1H@ z*||M#4&OyM{C{H~6!MP%u3bcEhxjtx*jojfFyfsJVD+N^9_+uLhV}T84@n5#%G1tIOoS1(GbqyH;q z{~6Q2S2n~&)&RxhP6Wu$9G>bdzB>jZ>!qL2wVo-TdY-ce5?)qeq1!t<C0k?w{;X9kC}g3Zes2b(;IS7ksRxCC&e-0} z+qZrZhEwRznvcCEqm%qoB=Vo0IZ67qji5OR}QdmO5P90MlZS?!)@*H8Yg-9=Dw zYS`^*fd-xsI{(%!P)5@igX-^cMgzV{oe&9H*2Xd zqj?Z9Wnn4_1R3hbLDS9D9t=vQYX0m)l}GLNWcE9N9wlL2#;7;2LMt4FHYftl*uZ@d zEu+r1E>P0lqhIFUJtQ(%zDvfjgO(;o_d=rPqxV7r01hw=kSm?mq#lu4Tp|m9hy(;~ zp-0=92+vrIq7F=B(g90IeuPGY|J)aJh>y{<5##2XSnZxmV7wXX#LLSeKu4bCxEHX9MF6u-a+Fhw9InmX?>{aa6UnT{eHh z%=78Ol-u65AhQ6)1R~UR3)WBDraWQ1@0_llCVqBQDtR-`ff7Un<>0_(H;MI1YJiIPUu6UcFQ|YU$)D(GL+HUcVMUd7)+f8d_14x13B>QAf}w zCWBLbxI^zr$Z)5e2pxn?bIiR9y9dOIfa@yr#bkjUorkJ&J{HdAQ@bJ>to`>(1?zf` z_nsmr8{~6CMbf1B=M$wV2n*7hKv2qO!{vSSE_`ZbaV;n5?vG%LD3{({o_^z*2#E#IVOE(%5zg&|hgaXmNehYKe4UPONr)saAZt52 zPw`8*B!FXr`xr71gjXHNliV~I<$I%a#QAySxjKNU z4@JiTorT8w0zL1$3v7Iv_YA$2f3KH(#CN)tpM#MMaReJ_~Y4%66)0O zk2sYFrMqX!oDo<*znaZ+aFB(o2I{Ji&YafVAhs{88L`IMB`qU!Q_Co!{&WY6c;}di zFa;KcDBYKlIYLd+kTdvSjl9Z`_!BNhP@bHzu(_Q;iy6A6a)>y7>`1r|3BkWEE5&^< zBo`s#{4p%=DZ|;WgZ)1~QI2=U5U+=*#&H3Kto$x%clcCXGlTo@_nuY(?qz6X;SZ-W zLklkhuYQ_l#FUL3lh_DX2=sPKLH24;?|*)G;}H6ZSw9;sE>Hg?_;?|dzp4q9nDVsM zlrW-M#9>c5Tr!b*fMzE4gwGJ?l`-@bu<88|6X4_xc~Ezpuk4-*gTZcC1;?eQLF_fZ)ZPTwU5~N)2GN8W5b%Hr^ujJF@IHr?y5Uof=JX6&ZGpM1QEJSWh2Ye969RB z$`Z=gUYyQ1t&esp!i=9ech=&pYmwS=r~jq8nS+78!)5nc4r0G{ZCsw2xIA(tWhFZ9 zT_|d2-P!3vrq&AYxh&ar@Pm5xWBjFzZyytK4D=A z>z0c>0F=#)sRMRg^u=w+dc_W5BE;y1k=t`E;@h|#9eP3)Zp6qE!H>&h#4NvrID5U# zra9Vs8Xvd6u?Y}VK8EO=s>A3B{jK1<)p2v3xfVZ`a+CCne&gT64!0Pe>s`KFI4$Gu z?^Y0S>|7kr3P(&u*Q3 zLy3@=E%+;8JU;uOgQfp5A&uVu!Uc}$x1g5Bz=S_2mqX%_7zAn@H{y#y%&6j}q5oAc z_%dZ&RkY=+_lx@V^qG$zz3hMca11X~V`Xv|^q{u!V(BeLksgzfi(9+v>vD>iu@BR2 zo!j9Ou~ui|dVKe)9A3sR>)meB)pkd^^ghV zcGs$&OFbc1>~|5k4gmpq^XJAlRwdT~V2YS&2ad=@tQRm6w(0X`zk=7(j@rau(HDZ7 zGpH|WOf$or4DXgHqDD)MAGt}SoH-CDBGeG_hl#=6f7K7@4?9`&^#1x4z4H@pCEOB# zOUR13aE6G^AQrU)`f{Dm56-#KNM2oyS+8IJ#(-_5xxgOSAO--^j=PqMn&HRJS$WM$ z=}Cl)G`u`~0MyKpWE+Xt$)=u~MW4Qw-Lz*NA!QrNzxE#S8qlG|t~lVOx0xRvA|_i? zG#n3aCu1AP1vxizmfIw+5-4LfByVZ z38%!-?&!8q84Aczde(qSS3`xi0X9xPn zC4sl0-Q4`l;=&yoyu2CQym3YJ&SKgu28(;Z)=9QJp*Kx&t*P|}1>C=R8PR`ew{@EP z2Yu+O5WoMxX**%vgGAFJNCC+-_=zZu~It2QadRFWvN?4G;VDoGp&>x^>D@$RNy7a;9cmB5#f#0kzCB>{?LOEKcIK*LzwgvhpoG@@m6#p$B0_%yJ zZ4DGK&2Ko@tEe9EUZ9*QQDF+C6FkqHIsFZA_obhKn^*^Cv4v85kkRnd{Q4^aUdOTg zd#G@&vn#c_UEe}xdV8;XSR!ujoCnc~arZ#qg{`QW^Mn4gM;E?N7nx3N|NdnmROr&j zw^JN6Ly!*7J$Ca%yv1)rP>WLBEto-AChQ&D7K%Bv^1%!m3`R~jW`p*`_%4gwId?5f z4aNMcU9VdH>{&gALx(yqAE5K;<@9+U`q2HLbUsi?0yA%5Ax<+2QJ|mvUC?}XuOs!+#@V(Iq*5Y-uK~0w7n7y-cXO{afi#bb)Q?*ziJUDUuwSG+*C?=7`|oI^ON5t zeD;m@YrfyN%u`Cjw-dCS933~`KMe<^e*K~fWqRy9ZeQ9w!1b961HQ$J9v7*>MvD+) zLyNUi;kfCgbq#D(RaKMM?b%g7^WWA{)hXC=msY=>f8U~?=ffS%8e4A`PaTTS)yM2k}j58CKHap>6##t{Lbnv!VSCmP3ayx7Euh zlFM#g^cC;-O|wBTAVbAh3vZwtbg81d$FAXb0>fL;Fc3 zzc!T$Va*WF4+5IT{GX`RRy2RAaeV)Lzk8&=gooI%JS?FpX&t}WL+dYFOe4JIL)TIT zRD=-_Nq#=L1=iaqc_oj66kwnn&DTq$m^s5ki(o>FAqYp2hh*>HmzlV7=9+^+o(0#Ui+P* z=tpqJ{_oFwUoMG=xu9MradL5)7P1}ZPDc_Qla=~$lRESeL;?jr9*~0yTqP!kz=~A- zjGbjNq!9BfZgPRjavIkS@+#l>RG;@`v|JhrZ@t^0i63}d@^y9f1=V&jI3F*gU6qm^ zUteFNg}W$^%RjgfnWlIq*Jt2RnLG>)VN=I!VClhbGZ92K2Dd+kpVX9Zg6fB_P$I;-Ks0sHJm=XNC2#j=Hef_H^4KROWoNgJ()&03X z&2jx16l=d^WJc&wsRQehKxXo5Y5K0E!Hb8U?aR&1bI+L7H-2BCq@7}#nod4c>~$j7 zjBTP6w|p#E{g==QY5@b2`^3llHaLF_=Ef6(b|A;_FcE{xlE7CsWwoKupUbIcB+!1U z@U9{V!abnvtbDMquj;wMTJ*R=RUhX|eU5ztF!)m}t2@Axr}_KBw`g%9Nbq>njX7MK zB1b~>GgXgnkPpuc;2I_Pug}IGT#lps5=HanslKB19pp@44%XP1Y+C1pf`@VHm$-6? z6#pVG70xIxKi_Z)SkvAk`$uQKylcDuGk`UmEKCEJLhg?mi1rM<5q#9Y03%-*N=PB+#cHI93{)sfkH)D zd@r_Ntg>R#+<;_L0y{S0KUiPAipheehZI=g=tILE` zIb+3dazztkW7!wmfIc~{90D^Ehkp?cN)YC`fBB4%8``D4NP}!A$@h&hBLT!{v4sy( zcVHa*H3-;cK8J5-37=RSA_JzVEbke!CI>KU*WXTv=&(d$)%7VMCxYwneT|I4kO@7yE#VO{?-W_`IA*USe@?AlvT=Yn?N1!7)%0 z9Sx7(i0B~4HgaBDA*Y~_hM^DEmp$#~Cf}@~n$IPEl0EeUKPuaV_nsel(GOi zDal^%N71aP$8Tsr)i*SJM5t;d^wENNPurJlw1XgvEa$ilTMt{(G~gbnyl$<4ckUx( zY5enNSOc=QJ)6u2p#9PoykHYNS#VDI7SQe5a3?{jXxo=`<416TDm5+Hr|Z*^o;Oy3 zTL}|uPU;6WE1a(%Y5j;MQi)mV90U+C71mAQ0BP~(n?}F@A&3byVZ_?__?h5H4a&%H zTT(EgX(>r$4AIx=$1N=!4#oybFsr|EX*D1g2%k1dttfzkh{lFsFj7CWH$T1LV968& zwN_(FHF#QBql{B?&NARJI0{4Q$sdNEQ1o@F7y-Rk{)MHbhHNuyU;KKaNh&EUzROT| zBGvWZy(1qku3U_QJ{_7?m23Okx8~jw(WF#7`dg6q9SXE8HeF0ii{gyq6SAswTON%90n3oq#Nhnx)f?{n-iZVNP0I^>>{1VNrFsuX^fL+w{v}Gopa)H&g<&DlrFnEM>S{V`} zu+o(NTAX+UN!YK${~h%DQn;?uKwe&04M6+A1z@JCF|^5 zbUmhD883E&E$K*cg8qSEBsowqKT1!8n<%M19$5ojQ7_aA#j^xM60{EKf~Mdeq!9aX zx@ss-6SPS%e%W84``Of`u>@*%3;N6%GCGw=XSZKYLlE=&n8E)ZoJ!(8Fk;tz?;9P> zr;^vDbfoIhqkBo~;}!*n#-i#`u-JW;IOXHtWMg&-W5 z;ti3Rb@*J0qB7^bd!|%u0hhTwbSOSn+`Bc8(KCbAIp*GxIyI*9<_aD-|m#$oyoYIP}SS=S}S zxwe9nKwsC~2wl*L~r{Gwno~n8WC^^?LK&KN>`YeAQk(mMu8Edj6?kWHt>! z%Mjy$8&TUHA7@`~)$dq>zTC;0FM5uy4J)p8c96Cp*Y6U;f%<)K(PL~2{gx;AbLG1sq^pZ$0 zE(T=#&a}J#g~7r~o1d>Lq5fOT8reku3P3ywN#jGVoPQ=oDJw z$i)$$0U8&S!G)c|!otpq9*<1NeY&uPEn zQ{htcZO%D_@teHd?GqdCD?q%lF9ym zWH>TmBvQ}g=X&|Z`Rt9gn;YkEZLH;PoX_7_E7&++wD`xP)**-_B-0hC#we6p1`-+7 z8&QwKmeTz6zx!e)e#2%8J?Bp{GX|?TSu@XCNHs%^moI%WvUg3hnQw**|AC($7fFq2 zOCEU;cSTlzW%ncxZZZ!Vm5=^*#f62jbMD;8&E;zQ?gla`#CK-zdeAExTR&!7j_Kd~ zGIy#ojseloy(j0x7W}rZGlFy^f z{lSZnGl@mIqllv0hl!*G^e6uI#zwf{k=blS-Mgu>vac_!*t+aE{f;N^;ln>wkxM|L z1ulojD6cLZ{JsrRZIG=!Fwvd?gAEmlVZ01PYLr>e65;rzU1DFDjRdJsFW6hhe6aeM ze#*mmN+#k?mIxL81cmdefnXz|0V|pnkQ{l8jdli~%MI{jem6lSLNd{7)zS*y0`RsW zs^)+dc-P7lzWqusUsA^q16elt?E?)DjMULj##_dvHSfIglC1U9!WprFi{-%EBpGq? z{RT~~f{*YfUm%PMx0cz&_<{uW&vTeb#=zKLe7HfnUQAv&(7eUe+SB!q;e8@P@pMf3 zhpIs)=H5NoqAC&uOc=sD9GWPo_hTqUpZxWSqkNcHom)QE&1Uz%Oyf$X7^8;(!3AAB zIZ)vxbtH|5+|hq_RyjC8Mp~MWpFizZ*|WjH_g3H6OTHF1Qu_+i1ihVp5s+MM`+%#D zwt+O4YW44%&IulA*R-G_TyYbW(ks@|A(Sg_LCI1{98*?R1;v!ppCKMHL%bTNP?{~415F7g7#fg{vgm2m5tCXpu4 zgeS!S?ecoyiYOrhK$zDK>w#4bo_$bQSg6$S(h|G#VhA*HP^}G`i0Yaeot=D;;VW{D zYdwq9gRhw(=0y+BLTk{Dv3U9Nc<5L2CVHzKy&y`}s| zr!<@@t=V1f+;X2E!Fc41JbsLv2MSg^4kQ*m(fmX)^RyyKN-u@1&v?>>XV*e#E9(88 zO)>zAw^6Ha;^f32s%%#`Vc({g-2YZKY?oh7DR>eS@!zhDQN!t&nWEWXBGN>XccqT_ zZg{kVqwwW@Y(zxWY3T{)W27@<7jk}Fu(z`#M)a@!wF>rQNzbWSf(h@HTjhaY=g^dD zMy$z9L^Gc4tX<7sDQqZ)606y+>4~WZ2N(fv?dkY^$WG0FRgGG~ZoXyPXrH~5Gj zdCgKq2^$sR4uOFi&zfydokAC6JzK*Z7vbj1N5aPE?vkL75~8;Lnq7 zrs3hLrQCZYt@C}faG*qPL5U@M;(!G|MkBPOj>&nViKQU#h>dg%C~e^}NWuHOR$ z2OoJ32My`C>b}Z9K;RK)zXlUWxPu(D*Xv@Z!J|UwM>)R8kL8>xq~b@_^-~`0Wstq- zyLa8XVKndy4cx@Ic1;6>Wm_mU=1DU9Z91VW;uu+Xx2;m_Xa6{){xu#|pri{&p|kVv zcb)2uGaz9#HEiyToT{p$2gRw!^v&gTT+}X|f*s_?=cRm6eO|p8K>DwN`-F^6$ET|9S}Yoh?!h{}q+e6N}N4 z%!DNm>>Ml7?|uttC6WPuc4y&^LS0hGG+&mSysb?_d1Q3{Com6pG@Z?Zg4{1TJWFdlaI(z~F8F394;a zL1;m83YBBijM@0Jg;Ac+W;yqupp)7^W@-C34-Ze@1DUjVv@K|m?*%*hsbXOK_OI|%c#&>gjZ)Uo0RG5X zhkVt?h3?U67oD;$@>SRlnXt?<26~d86&g+RbiI3UlV2|;GL)zQcPmt;ixNvTU@Zs@ z7#;0H8LW3dbKB30C>rd>H_a} zOsNJT`167z2&Rspn6Ie*2B;N3BGZUD>0Z&uk##Eb!yo^jy52k<>OOiO9!r*DN}=qb zL{UOy7{;2dO^Ya`WJVckmov46g2$Sm{$-#HGsi3`gE&nXlVS!BAHCVyWuYQ$&NH}-4-RdoOlI7 zSy>t3G~Aum>#UmOn&T4^Yz^-*OG_DE+)JsM$9`XY5F{ifwlp?sf@%Bl4O!cbfSNG?9V z3-|0BR8=spTocCuMlvZYhAy$iF*NVDn&FGQJnN6%-{6d@n?eV2s)YcxB?u8#u>qb4 zUEKny91+XIP2qjN8?$H63oxhaZf9S}cCvZK0OsY37x&#>QoY5y9TaAo%~gf3S<*wU z3#6N;+BhFs4T^1@Yu`tT!4Z=WDm=U#vjg_FS(+#D1E~qhBpA}ZdGn^byBjPI&i;ko zaW5#`_U=VmYA2@={DJEgT~!?&$0Xs~s(i;&fdyc17z4!Aq`3lhB-^iBk~5r%mxwAt za`p~XWf47h^d+R1$B%C+rX<@ghsj6gI6j62z^u|z`-#X%_?+{hd3R(@p(NB8xdS}p z5w<5bH04A%i|~ggg`C(~v z+agVC)2j>Zy0l|@{%_bKDu-Lr$a(B-c;{{FAgd5Qwy+#le8Q0H2%8G^YFi&|iR5P- zHrF_kYzrX)DG`d1#H87h;$ocAVSsF}vOt)#1?RNO^(9n7tVSPBx z$a|V&U7h?KSzvt|p09baFt)8DrpzHYwWMy%`V#hGurTp1FOtaPpeRV=B4W02A1uOr z8j=q7*_=p$axK+aM#7hioi8D z3>1g7pg1qMa}=uU$JChv?>?;u)g+CP1!wAhYU-^P(LW8ls=f*7i6v~Nb9C3Z`gz_D z=lOwqH7w^SPpSX$##*J03z;2-LE*A*Sy*h`xBdS55IK3%@I|0NoGxmGtP~g=3}txi zn8a(%Yctkp_*!D^x(%uB;TmfUK%i&3IBDUFZE0$vpA`7Dp*#z|yxOBWXmbSgdTIMl z-qp^a$3#b44x%8hqKc?9v$LY%0;2&$bE_uU9>b5d_qlqY!F2goB3$>63DF0#rvao> zyWo37&@lL?yr@=*I9pl(lT2in#IYf8Le?%VEQFRF$6HE!yo8=wgeKtqjZBx|I$O=B zeTM>K{kfWSy6$W|OWe{I_;wbTZ<((|uffT62r1O14<0}0wN2+rVR7-Lpdjg@j(Y&Y zCEY`)pA`G4^v*gy3sp8_HI@O>5c`LQLr~FrlT%)V@4c4OLD#*S%Utxa5g=)kZy%K- zZ4U5*O_65b%AOSzMA2s(>-?aPOv1K;S=^@xv2-|Z1+aFIZL|QyW`IxbcZ)3nk23(< z_E@pqLh?q@DG48PkT_vSFQgpAZtm^Z*WfG#wWW4Zxi=k(9QANnfIp;#|L5g3R99cK z>}0-pu{76rF$Z5BeWrbL7Rg~~aj^oAJ=u#Q8qjv9x$1+_t%}2F%z<3T$)f&xgS+*f zwzfB)D)>QRwbC5B>;+*JNgepnqf2WWrdr@pbiq349WmC)@5V2Q>Vs4zJ{UxH#E&GRW9`F zUj+UWa3j{~~h!75)T?VYoN zvwqr~o_C2{(6Sr0LYLK6eAoEBooW7fuDj#daM}~*FIvZ}iKR`8p{6*(O zis_^mp@kfc#A!Ko36);i1J3MpaW?Ft721&0A1(dZf8?SdA@}DF`uz7Y*B6_B$JBQuLoB?ifH5`Rf_NkB`_TH^p0NPxz)^!?hkt_?Gqf=5EFL)}7yLd9q7xU)77eU7c7 z3G8OJ+#Ekm*fJA(+~Crsf#3!Am4z+z0nTcHtlAFjZ*s+a?v#Hww5@i{XA;d15Js$Ez#Hofl{DqG;+*_s{g zk)vrqK>lbpB&jq#9RbG!(uK_?e=!RS3$2g5j;=%>#|d``!F`sb!W5y&$;$FioQOR@ zIMOB*yykx%$ZCpk`>}`sflwk*OEK}Q;B6aQ5Fa0ApM;`Sh--?HwO{Yj{yyoIh!#Hq za}nG+*_lE?I#C+XtI5mDgMWVR{&7C`2U*#+K4p(qMjfA0GF5}L|FE7G35~l)<**{_ zn!uo|j1Jwp2sFL#hxpiInlIrxj&Ra@?w1Vm?U9GB9lEq8`9raedh03GjN!=Z-q4iX zyY7Vpu;}3j38_Ht3Iz*jni@lwb*n~-76K6iF|>eLOnnID5gdV5{bwq;8n(!KJ8VAR z0yKw(MMWv$uRrR?PJ$eFZ<=ac^#Z++`leYDxqU)6#u(tHpN#_$4qh3Y-l5K>nO|qm zoKZPxSi6?C1a&w;FjW6Y%a`wX>2hg%Q@^WI_1n_60kd5rUe>bx#_gh>9_N~d7AT2! z-iG!Nmboom38-~SQ;CJ~jklY_yCMb-`j6n?-!(C7r*C`y7X25}1^5(873C%4&c42k zppt6OmRN+z;px&`v@CuM>Y;tbRs;?5feb;?>C-guuBq|w9EMe0I*N(la@5x$yL*{W zfkr>KzlqOURgK*<5+!HGOPiW*F|{`pg`0D473N60Pr_zz86~CCV17VjSyyeVY2|y5 zZlvc5V2BD`u+8O!=5cVCOZq(zZn$vX2)957bJ^|0;dShl`OKBab?k7p@BePO5vE*^ z`AE$7@8@{=WzZzQe{U6(_oSeJ6!-~jwcu3vFlJQ`i;Q2z2=(8kflRe~l?av}Qn@ns z%c0c-yv;q&kH9#Y(yNpaNgpCK((KJ4vWQtwMstLPG%l#b3G|+~$fgL02KQMPhkCsv z@$Sw{yTNprl7|chCR%%YF<9)h3Nq++|=J zt>*TWx+n3^0n5B?S^3%9#<^UZE$c}?l$4b6o#5-{5mn3hkt>HiT^{NMvr9)m7F2~& zFLMK;!vvkX*eWk{2j6>~U5PdWWY*=3M6Vuhq^ZBZ9|EQ}Vy&S+I|*dV{o*HPG*Slc z9vB^ihDPu22@Ea!EJ0iLu-(Wcy!pP1Rj|iOc&gAK{Bo)J=@UH-rcrxs1p;Umn~!M?vV zH8snYgzlD|0*8f{xk-VEj{q71Sug~`s|AB^sNR&Fw`nP}o!ZIT2=$MEQqX`4B6hi& zksInY|K-*$y?_{b&G6QY2M^vPx5i!us1a@m!+EiZPugqXyWBYzB}B^>;Jy)02tsf9 zNR+di1y<3@^2ACw$Vn*g>RB_v!>xR9)2&nflE7gw7{k9WdLnm8!B5~p)qom$lWe@T z=62(ZXCo06AE90k{9^sB+HB+I|I#3g8X0NqJImtVk+ZegLX?kthtGvaHb*KG$e#G= z52o@jLua2Tlig09yjA;kQb}bi{5j790vk6ciy<`t@)c`@x!N-Dhxl^>>Si3~hhmh# zB!ypCk0(Qvi=CnV)w9TGt4adRK9ryi@J9&td{`ZwyML=6x3eAdN{<|l6UsKj6K~*G z+B&>DbsMtxCGrcCG^GXptdj~PIOAl&4AeW-+*x=7AIJ?e@$K&lTj<8UJeClDoQ5dK-#9cI=xd=Ren(oY)U5jM- zParm};^+RCvXXCWEdF@<%whD?qzIvH_)GCQ=Zi!u=)ce7Vm`3dtyGUoQY~mBpWMz} zxnjHyceVs?83?ay;D__^6>CLY4A|h5ApblFH?oJBd2_tl1HRjh5p7=2$x+6UmSp#c zC8WpIr2)+P*2vQ0=V0Z9qbox7&T6#X$?QAxV<;|oWJn|RLmAJfj7otf?$`<5!eH9Q z+aEvLIVB!Ce3;YUFP~Uzx-Uz%_b*8-IW^T@!(6zdYDj?%>nS(wO+Iu68DI$0j>)-n z&IGlJi(6lB4(Ap)w}gB;*E`dwqpeL7XWzSb@5^tXKTu#F)C<%QOgD+ZcfLr=%*-^% zkNE12G@Y!SXmR*47(KH7TlV@s!{n4KW!|B3XR9ISlYP#FjQ+uZl&+A$Z%W_V~75eSVH^!;c?N z%2j3nMKCQCwOaxrWtrgSm*QHEG0kINM2aJ7HX?6u?2x%Q+4sVQzmthQum=IqoAo!N zN+_Et##g-4}bs#o$;;+U)JKwqDIqX;X)LKFchaS|@3Gzi|s<)=Pi^qpWZ z_&Hko%0pPmSNsaae#u9?4L@$nN0R=j7I}6u$R!>Hh{*&4gJq(r?(LUt^*3*dI%p)6 zrKg_t@zHEc358V+{Fp~O(PCR0i3AEc$fUV4?6*F#_g(m4Tid;$jSHWSZofb7r^B~D za~Chh`~8?64va2ZYla_rvd4M)qszO(7JXLi=}z{EG#NH&>BxD4Mq8h)0rP;hMXOM` zvjSz$sPi}RNBdfmfe*JsOYp6uT#_Qg)bN_F4x`iDg5s^2&bRF0&AQq5Vlop z`$h>Kd|X(#Utk7Eb|wp4)wyD@3NiNk62;K^297|$5|@sM0@^Xg%-zlHKw|w0LYD`6 zQwVACsT5JqFnR5MsC@yt+ZpFy?&;Ugz| zU+yUa_u36#@pmJ&FJW7>ngj9s$U6SstexmUD=vz!vZA6fd>Nc>pGWwwzDH;aHAeV$ z_-Q?Qr|l#ZR+Jqntvx+FLA|A2VGiK}gYA9*tlkbR*|TDFB<$ERsR)dU(b7Y#>_*n~ z&#?#%(k>#IRmWCK&RWuyy^DU=WXJP4-52eJy?|oIgk-msuvlu2AFS8C?Yv@M07fFe zqS$lNaOdjXc$~VNQ+3bIxiEEqx*2t_JaETS05IqaCMO{Sm(hkaZN?o$RA zGHHFqa`G5bohYGXFC|2&%EcxHyJ{&)BJG4j#3$fXUT9I@vEyv8$2qUOput>?Y=P2siIk&uKHmobwR-Ptl6iB;oHA*Uw=YzAl^-RhtqW29{^xJtzaF8 zmnKn!mFN_mJ^PYn6#N|Q`=jYOjAT&*NnXz-9GEX}K;VQ~i#|TC$N>@JWeE%p7w{Q8 zORg{7d!cdfiES3?S%nmLYRccF;Lf_nlPS*ih6Xg?MDa2Uc-LkME*wL`i6Fu#O`%fJ*F8F$Eh9m$lj7?J%n!q`Z1*Jh8`$q+d8$aG*s^AoTqC)!9N4 zxo1~H^8~PM!CHdYR%WG$!e-jb<2{dF0gT27zX_9=Ac39jf&W5yb$8ThleZ6#G|IC{ z0?RpD&JLbSvf0x5g!8aVo;}lPm+Lt5hjdoQK-HwuX5ksQ>pYj6?)k;tdh^+uY<_m9 zjEk#Aue^0keOnwWUJ>(D-MB9c9|pZ9Rifi@QPHsAv7_PoX!ekzox0!Q`~A_KqL74j zLe=l?_(g(&M#W`^pWoh}HsMcP{zjDYPPBNI_R!8Ju=9FPa?sc|kUHCgRiyapZ}$!| z%%UnRt`C}yNo@KeL*r?j*j5kL#&05DgPF1Zh|s8_hru#8JMp1?WyN%}(cp27RDME$ z7|~nv^1ZT zz;x$U82EI*DXho8+5C0=GYX@5x8wA$h%5cd8|Q924H4OJ`#{iQC2c7O6KeB#n-4NF zi*N4j&Qa0?LLKQkEVr6WwR{bsp!;_O$SxGBm%-=YPv8W1d#e+ec3E3wG!n$`qEB^T zBFr-XRtsWc99piCpX?K+ES`bv#dj5!R1Ja#SDs8SGY`Ekm=(l3OenFTdDOQ-t zN#8o2*=t?tvLffMLCG$_o8bV3ekoO`ggbcPK-?!#?cHy3h}(SS0KZDqWq~DNuar+Q z9B?tG)jbGtaG8$J(GV8+soIPfcB9MQ*6`{Doc}IVhRqh8uw>qCQm~2CK)Ld*HtS6a z3a+J0heTZP#Kc7Ei}X?v8t+=_@80>uD@kX#B09Y|hYT6#49peMoT)3t61xWAFME>L zaf6&)FgKN$w=zXJp-wQ=Juc^kpCRyD#a!fcpkIrYAeq(}Wgc(^LMf{?9M z(1dcq^V+1%xdid}=$x7nuUwM;lgTmnmO+%(<)FJD^}9I4yZ`lWH|pTJ zV!ac0R-In?U!`~wPsj!46OtMl2I6J8nw672 ^03GZo=J2>Y zY=?!RyOD_r)j@5;O+lBK8p@L=+mq(UxZTIwtA3f1gHA@V9^Fz+|F5c)J4e`H#~IwD z<=RUB&5UdMPOR0)Wel?ta5H18*~v$@=gP7zPfGkNG>8&gc6N1ic=&pI4ygWMw3v+Y zcp)>~r2b{`yNr#<_Z>p2J3%sw!~US-l9Xfd**h@f{WMi4vRRqUIEiD3&fez_MQV%j zdZ;Zr{;MrwW46GtN#9@W``2I!)%#{A8s_Cf|LT7}0x=q0ELoVwvD->CdAw)wXLwM; zdCcf8W8p{mwdCU*(ebfb?&xSk*k9OQlhnqWnL&yp%h=0jXMo_l`1|7G8n%SI-KH-p zAjgu$-P+{xaVDtqe%4hTm&Baw*Iq2UH;ufW3_@*kyNtYSC9E7j7dC4b78JyqzOK9B zed*G=0omA=*SEnV2%|7cUwo*<=E&O-!v3T5Vn{X&O)!mY^zGEa}ABAyZ2`xJSS|Z20A3fgoF2e5=?6S`V8A61Qu#LIG^#MRe z+UM6D#^h<=x0_!@DjQ+kIsm^MNHHW)W;^?d43UOJidCOwRfhO3|-_<73y*3z?Gl-ON9?|*R+Jd2)W^2tO?{H7%Mb_iCdN|8)!-@!Jr~2 zd%ZcN4Pbel%mKg*RyDLZ!>?s_ARl;ztl!Dzc#<9%7lv-QalN^dH~+~!7o6Ax#Q_0I zblv#^YhqF)%T_sPwkzDjgRg6&WsZ_dvAD}21PoMRN+BPTOim$iPup`8HxXaw5S3rB z#%VoGKSK5ON{Ke@y5~-x#tDjpb^11oSFYz@LEEfBgSpkz;On9y^u~>r+A0cWKokL( z%Zd;X$5tW##1 z*6!gODfyuh88%EQ%I+wyS8aW@0ybrKFnAbd-13re5oE&*1+bLM`*`(rb+MmQ_mP%# zo7f)bNaVX{XU}o89M7kOsu2O~n|}ipr}x!6e3g{kb5*|Fto<62_y}l!W>^^AKp#Uq zr1vz*o!XP%@U1UkFH_MS}U~QkQr>!8FUiq>| zfC4A%Z(7mL463u>NmD+8U_x>-m8xh+Mqg6*kc?ydZ~Pr3o33B*x#!7$qpHj98Xti! z0%NlGzuzaCE^{m7tqUcI{MnJ-SOSaEoF_$P@X~T2$&szsT_iUve~aM9yg}%5*&nX7FD+P3RI9$C7uks=OMUJxCi z^sd#d(yMpY0c_fia8{x>pMG(m{QF#^RlK5-Qf*jcM2BIL4NS^k4lH~A4BVCAD5l_} z;i8)@Ws5{(DQ|J?PzMQ}Gg$y#WYx;jgkq6MBr5%&9mrWa1u!$yJvUy6>`%M`VAtA_ zXHXttI@S+QdIkJ9w%S?dNf^FXPD*MBiqvakhi)Pk~3MUq0izj_^}GFRc0C+<3|`EN2mp*G!QH|^D5}${asxnc)NDkj)8gP@2(ah z6+1sJZ?+rmA91G_!&;vMB6xJhbE2H0T7O+3{v%8l4=S~b3+tVOyN7HoCaiZhARHGH zu1-&|BA8j+a_Hr^4GV%17I2SpSJr$0@f6Q7^>?U71N*pcFUEHC+Y8Yum(9Mc*k*vJ z<$Cc7E&?`cEL7~#ep680}H!zA=u1LNoYg_*O`R*he%aF1m z*h8+97XSVXj2e;yUu@d6>GMY8q>DZf|E}@+!fnd2$yYr#Qv^7SWL84pY~$B_C15Cm zu>Hr7Gpy;tr5Q+U6R!oP^P81Yw*Qd-$W?1J_;P8IsVUFzzS zla_!sq4d^orcI}0w(%(}sE`DW=xOw&-~QkJlmq&FTQO79hU(_xQnDZpO9ype7w$ju z1qXPmL`#|th+T~J^;tjPAB>|H?y!=pCY*K=NEe_phHl^~p~Lf#kNb;`*x;>7uP@g1 z4<5J+03}NAj4X|E`uHk>Bt1b#FQfZ5d+zzd|IheqzShNm>YHuov+resvKI(7GXI-@ z16I;4@O`t5Q^4rLhD_yDRl%}F^KbpfPMoMa$h(GMqECy8=8m63(UP>0Z1i;Bh7u6v z+g}`6XFQT}|30$z{^iSg;5-cs41iG?AY+e@o(C;UV3!USJU`X3s@Ppvqe`LBtuvu8 zGcZA`M9m@#$U=5SRmEM(t@j78a0|Px1 z*UZ+U^Fp$ick#!yyzhAwPH#+o!5GU02t04*_wO}xyd?SndMcprA{_ciluPkqg$Yow z(MKdepxE=d8fY`28D1h1?k6sIdHq9*f{1P2ggzPKy<~9G60bY!pzJ9^ z?E&$|flQO})n&j>fFRgK(|tUKX0L}@2D!7?XJ;qeqva;wE34~u zNX5AXIT@m49x=XO!9$HApkXYWh5rb7RK4^-LFkYaWDqpls~I1o!tA$3r$qe40!JQ3 zDBRIWcYoORra!JRa_pm<#SSf`)FMHyT;PngHTm)iiYDdxu?(PXBEn&cQ;Qp_kS+of z!O(w6Tt6qN#+$Huz~93|SurvDMNMG__z7fU!&1KbfB(d$Yg%P!cwcs*JP_~;c(A%f z5#D{f?yb^2aC8O9`k@GAJWkedSxa?B6L>t&Pk;-@*-1Z^w;lA@MzCT$M#(kMa;FPN z!%DcC5P0`?a{hf^iX^P0mzyDvL2oF4D2{`1qcU~`9|=qeY$2I%%UrfJS^_Q2|~yLQE7z^{ex1-wP?h8)++K6)f|nL?LV50w{G@vY}FAuvY)_~+$kzFwTNN3?|j z-1$s$_}rLG99u$WKev``%)9^vCKq>f_qL=oAf!Q+L9%joartN&a3~s$%^ttYoG(Uu z8VP^Q=NC7DnsGy>*3l2tlU0g_;t_XqA@QaJR6j#z{X12=jP_-^;BicdeJ}K znn+%MtU^LttZ3MCQ(;XmXL53~YVS_ga}syAXVWP}1cC=-?&{W1U`s+b?nc($Zi z?g!(v!F|3yKHq}rYy37>DJTO$u&l69tKh$D71)n=r07z6z<^+79y{}jy$`?eXn!WX zudmPa@0jk?qxw@tj~)@cy?H->D$2+p2@G~KG`KHMeT9Ha*43Bo4YH1LZ1u4F?8~~{ ztTy`ZdXObc^O0brZ!rl=5#RbYE>pYS0rEORHn{|{d-rj8S+E8#3lBb339>ZX<`5Kd zAZz>34)ctjx}+L*8l7VPgqo27mN?MgfTj8?V8vY}skx=5q&(TVt?ebO1S4w763JEb!*abhkxllAJzv|{D)Gy&aE-gH!p<^9S9zcAmDjtl)9_U;#8#iwJz0KzRz4Z|GQrcF2TwH`14O_x0 zalWf+c{b+Z{BC!>O&GmgLR0*4WjG~O4`x#z3gk1qZ;bSZ=CRZ69C)HKiuU@@6SKJ* zxEQL+sF|^`F#z*;5o;AN?;`$mp1jC$1*nnw)q~eH((DxhNz&n!_;?xE_(2jwkh^8` z6-^M8_xVElo)H0avE?GySAbYj#=CsXYhdbH&XV4^u?-ewfGqlXA6o$-Va>JA1e9-G z+9|>JuPT7xd|{1V+r(ZLr>I6WrA{zLVy;9YMHL1Tg&k+CwkeNe*bhs&sZ<~kkXH3a zM+-S!q4xp*W?!*zZyG@LrTqJ&uKpb_<>|u`L;BJb?DA%R5`F8Py}p##H(cO|m<)`$ zF?rp@x;Bt4mrv`@ko(`xkQ;x2T%u@N|C`r`(XQnOfL}$6vR^i|M4(6y;@FYTo}O7| zvKQnGPZQdD4cqT$VRd^?*zSvImu1nz5qP}*#t06yGx&CBYk~Rbbo6K}suVa|-rn_6 zG@zyct48J=4s`GtA<5dQH-#1LYllmxltlO;A)bER+Qhpr@yLWHh zOhw@6@p3bW9X z`En-$grdG1hC=8>N#6t0GKQ zHPZIhHa&Q7v#PP6zpa@bwHOlOWDT^*4Qxm(=dz_j>gN$?Y#aF2fvJe$A2b9;c(q#d z8o#xq!6}9yZeNZFae`oeE1h1bk2KR;SOHOF&hpkmnYItrm`4epBYw{tULMP06tKiE zZtyMZ&^02){qG$K6K?2cHlHV}4TsNg_EpdtL^29%uOGV&59RGd!}*#k>dFHYY&)c> zhq{hNe3c>7UO}n7p8{ZHfd7@jw*X|@pdc%TT(Q<#oT+ZU$oN~fnn3TIHF^msdn9r$ z_mVCKXS*Yil~$FUyd=esU@oKWy5GU~SEUh%?Ryasn+6!J#i41oZz{x9OQ{-QCUIFS ztox9;$v)wU+S390Mtn{AjSX{~#n{bGiW0b6TxT>~!L0J6nj$amx=k(-MlsNzgj+Y< zWj=qv!juFhw5VEYP_Az`zHp)MQtrP?M76sBIGLMu5@R;JgS(dl>paKPwi0dJUEmL` zH+<@7lt0Ctm}n`$UF>D@e61g9$)lL+@MrX|*cs_r)Ucl*6NRmIvJdq6nwAHABq^#{ zpqsil(A=OLJjlI-Du4Z&x;t4j!$IxknI0zFTY2cY9E9E9X;Ux2I^oUwtVW;F8?8Oz z<~I2JY|I9>&$3ZDUnUk-L&htzfYkeuEZxok0i6iwX;=pS-Fr1+T%%bxI4eVYtZikf zof^Za0Uz_0=wX()RH#?TpGjqu7I<@XoblJnm^D19WvF%K$M;4<3^wFQMBk^VpdckU^W(?%lOHp3;R1R`Zq6w&f}cpNBUG!y*!|+a zp`Fi2F@+OtkMvySv)2L{R-}|})=&g={-Jpy(49KtHaK2jM9nz3H2tdbUK=0l_bWg7A8 z8IYCJ_o*}D2&1$Fxj<8~ri%bcf0JyqQ1sm$9-x9_LiD}69+ho5OaqL}?v<%Y5ds#od!_<0kTQdD> zx%0n19x6#+LS`UX)r2}eJ_^MHG(^w|L_|a+8{AlrlrC?r(EUAX8}5`i4|Y zn49ow<3eVe{=f}aNVB~ zNJ{Bo4F!~AB(O)g4zEf|z7Tz10jUoq`&I3CM+;;4>bKic}9WXG29a%dDRqv2esnVd}5%EUL_+NokAe+ z0A^QEh}3nd%GnT>~heq1KkO5{USpj~y#YhB~#1Y^@rTr9#?^ zyz`q{oRk!}sa%S31ZtV9iE_(w^#fO_i7MhG{g;zIx2Zke&N4vt8G+Mc1reZ8`i4p51kn zRu?>Q2>ISdTSKwk%=%pJo!u^!oFU%n_wq;Ud)8t4;20ink;G)aqNECtt!D&WEL%YN z#fgg4^A~It6Suga&s5Ik(l!`QxIR98(Ew6T{YWz(`2k{1h&2(AYB~$k#CMs;lp+7O2lrBPRui6S zX)AY)XJhr{b(_0CT88pMin?mHv2}0|jZ+233t2;UoyYLIAZ@0l#mTpXmv@Eo3UQ1G z134Q!w#g zIrBRf7Okh#s4y^6jAN_q`ibNTOnPj{s8yM1t_Y=TXmc$}2R~9n6RGphTe`ki4t2wy-hBC?eeVTtQWg`FTv}Kjy1#?TtEs}JQm8aR^4+UA1FVhalN0}#RuWV@BOX#sSIXy zG}=s~*VDRhEALT~%ZQqIXA|&3KCr~s{$o2XY%*&f0Uk`zjE_3aqQ6ce9eMmGEaW;k z9{ky}XEB1s=<@;f13U~5iW2LbnZ_@~1Svs@gJ2`kH#NOLW|fL4^dJFw!a}&U*~3Lc z_OAY&E^DIj(*ZE%cENw@+u#6DY~5=cCaq zN#ZIb73w+c(`x;lmub=PZ*C_C#h2?E-@yve5#_ik5QWBS_G|I63ExEAbxCC)`wcZ( z8kdi=MctjCU+?;PHw6NXxCPRA{%z83Dz)Vmgcr|3!&}g2e^j zo2(*91ry|D-RjwC(C2tr{|T?>KG3G5AT-3@#MBY54$}gk;@kL$VXDGn3i~aDPsE{j zXWzGzrRrNYI`+%&`Jdd`&wgZ~TH+7$?0m0E6r#CHk*2HmK`_>^`GQD)Jeg8=g&qg` z_Xt8v0P*igi!nFGdNENJJiPs@JFZjjBWFlipP6ZESnbKh^%|Og0Q9T#CNK7m%k*5b zzt|sRbj^M|Wq*t8j01hgdjKI2j}HLS=is9BK0k{l)ZBvSatHY0F8TD_)U3Azi!DT> zg6jjPN`gW?|HblcY6!IFBj+jRTfP9z3{+MPTb>$V?>%(?dMi5SnF#TKYK&_Wn0&7J) V!?>Zf=TPvUt~{{eJ(f2j{$A&*yz#_jOPS!cbuSdr^Y9C(@>+uI#bmP-fN-_ zrYiXtq^0TTm^}@8I=mlh-7&a##haDZJLQ}QPFefFWk_^QPWatv$3&#Zpc$s z#fw#%{ZleAewtOh>5?iQ+IAJwU+#c$(o_h3yr)pzHm`Z-;nrleGbU_hKV+r84YS1^ zY+gd=cE5J)8pHkiQ6{(98mzS2Y~AQt752=jZ0pW?(-<+Yj+k3<+GiW?QP%cpbl!IT zsE*jij+efBQmI31(jh*pI2EQlX^9NWFgQD3B5b(c%@#Br_eW*KOkl0V5EmYtA5PHM zENp&hHlHLRB9WQDPLTHw;)QFXtiRH19jx*IWr5OavTdNFLc%71Zxtz}x zG03JgY?^{_R+X+6R7ZxpH0Yy^5{EURSB3j~HG`NLb6zh$-tolQLu*AhrSyUausb{^ z(do{(NFOIfo++W=dypC!d>Wop&$y|kRc4I&IE=YIR1MR5m0M_pxn-hWaxTa4$8VQB z11L{I(w)m$jDm8}es3a~Gx9wUHwB|j8L=`r)gFQm^W-PnCH#bFE{jxn^&<1ATyIizF(1H#(LveM;7ntk2;JL* zj}UFVS&wnCV)^-ZzElV@PBf=zI4KXs(+z$Ym!t$QDY~Q|#HX5OqOC@aL*7;6;ac}G>hpYW zhPhDdaVa&x6ue{>-LOVV&2yfdQj40~eo61;Yt{<{SY_e*Uf1#|P|P0!zuhNht}x~G zdA`gYHB>K49$<}L$ty8sjm8r?5iI6|k8nt-zWmq}`1WUPQYuz1OPRw^^Hna~2oo=n z8*zPAadjmx#$43JSJrmfLXj7$!)PL+|B_K~X{x=Z7MwTuK_jtT^P~aF<(m<{JaSN- z4+wdr1uwWvPBLi45QbIO>(U)R5pqI#xrYcjPT->FsVe9B+a>8m-(m;pr&KTgz)DK? zrR3@*ofFG<95mG|bDWD!07qUPu&COBGk$2wo-3{wqs*C8$0*38fDA|!_&|fmQbvUC z)o$py;#HgLnDW>#?da;h8DkEswk6ZnUwQ=$k zmnn%a0b$(wux6EX3sdrni2_F8RAq)RF13EL9Hh_wOc03g^ohl0b_xW7gOx(8{*BC+ zle8~w2|3(p^jS51t1^ge<9jvsNfbF!ktCW=Z4KQpikT^$Yg}WaTdks*KEoFG#!; zhi_Z@6`RyIZ>;a$ot~c;7DPQ>aL!V`{rSmDLiyuGRWY0~LflxwY^c-$Nt7i_Ox$sB zxWJo|%3;8_Z(Dp&A`Yh^!(NcSzuFiWK@cDd+QSlzk$c(MmcM$!4QmT1hdXhPWT)6~ z)QG|zvt*>CL|AYCktJ?SR=1|t{NqNu%%KBCnDDBCY?!b@pFZ;eykYN!hwHN9MWG$I z_r(R6x2o`Z>eoe;OJAQ+cwc&N&ju~FC)#Y+M&!$Sz04hs%ndbCkC9n6wqh2vjFGvJ zGi8Ci#KV{0(At`NtRGmgIbBbo=EMWFG{o@$2zxwtvuZ<+h^;2Zt)3sW3H3bX?h7w6 zyjoH?3{rx>$`JW6oc|Sv zC@O$C22Dh2(4YNs;<;B@tPrAz@Qv8XC0MSI7FX)8{WKX{2&Z%@X!x zIX7SDz*Fd>7pi4TkIm|p2@^CX_;~t8ehK7cAcJtXBzGf#UC-2_9yODk+ML>Jv!3R;K zaTLMFwn8wWc~|u(eo|`K$CVx9aNT+FDdkIODo;+TMU6x{LZ5MVV&c*_SpS==UY?%f zS8o@u;cz`aex&glT)TFymteWAPC5qwz(YRvRLUb}>NKc4ISh?c7;h)XdgT!Ga7obud?d?+JvVu$rNkZ^EHGi24e^3$2 zgN;p1(=H|~mw*VotIWqk}W#NB}w<#F1nWvq+=ys8JOi)0p5Jj;7>87Z41&LhYb zP2D7_^R}1eO>Cx6;V>=eyOil7W*%k6um@UKp@fB2bp>-s@{`y{`~o~h2Y1? z8sY~Mgns1x(%7haVW#<%xK4*}{PZcslj*Xd9gSXAiqeUxc6UKa7_%21iW$Zj+2ox_ zHKjZaGJOS5@b-Y{%DHW1EZdN=?p-!*e4gL|9Pl@D6@9#avT|tC1PL%9Rd3nND{lk=)qzJPT#!Vq~CdmH? z!rK8txe6%mA}qrx?YJHVq&y^aV?wS#X!(`w+H*rvWH1j-UHARL?q>pcek9BKo-5h*`puh?lAC#e8u$)jidMlZ-Lh55 zunn@p*0K8imsWQ(I4@?HvL{$|52VOm{z8GsTpQ$g-3$uJf&fmH+2TCQJ8C>t zfg(mu;46EFeePg08wBW!l6dq=q~T2zIN$X#)eiK`%*^8AVtjo3_y;6sYiPo=w{HbX z3=wVXlixGK9urlULG<{fv0-j4u7npJ>#Y4Ugc^Oor76@fp#I?TDKzNR)YR72R%OYx zT;kEujT<+XnF!o6_x@g^6MXT2IH|kJ0dPfknrIx9WW1+GjA3dC^k?8goZK|0vHw~_k6M50%pb2PD<~*bS6BOQOnCq9Nf{g*Orucz_|Ge3 ziC_5OE6v0Gb^owc|CNE9;Df&owt2&ux!`CHYL99key2AK>eAV{79lP!-tl5!aB0Qv zKqM6j$XqkyCI|XioqVT)qT=rnZiag@moC|Rxbz`)mj43PkQ74{RjU%SxV(K}Cx zv-2mj*WAVul9Nj!bL%~(_mcdZzpeajJ=ye%`3`VnPh{U6difmrqo)`c4AwQC3-!HR zOW2TxI<}U7{Ak6S6`r5l!7$uzugE(k30I@S6&DvrPDlI?s$`43lhpTB1%c(DTP2f< z{e*p&*Y+=GOOyqxy%juv^u&;c5o>8_NyoxCt@9Opo8RV)tRUwr3A3&@8Q8i7aDqU{Cfj=W(@R_t zrbdoCm#+|ga^q@CQRqb}l(>vk(g;3oFy#@V1d`Y`>noFB?&v5o%V&|=%ABAm@)K&J z9ZZdV_Aal53Xu}ibuIG|9%>l$=g*%y)dYRvP>yi*jR(7lKZY11llW zE4T#s>?gQwI&H z(B5Y9BIC1d4)B)6ueJp5^Ua?W3ZLL!OIb(a55XsW3?Rr>?`X4mld+!7n_@FzW-Nk_T#!qJ|CET@NffJ`ORA z_Jd$0jWwqsHCAN<9B6MDK1S$MGjK_hlap3^aWTSSJTZJ)_%Dh4%6#+p%NiHV7f2XW zwEDY$baH-+lbd^*qx1$XHeNJ$nXZ=}P4|;E0{>MOD*pjDB2VD=YGxJHdh_}<>%k?> z9c<8N-!yPdp9140wYDZci8yRJ3#{cG_vU42_qIpSP(X0yRRq5nQc^OwYky`1)7r#YZsIs+g_6e>iz4ecm?Fk{|km^0Izni z(Rja=zQ~8p-`?Ifd?n6n*Xp-91%Z}`Qi2Y}07FN{7qk$?T@Ed}Lx$uTob*943kn3k z1I)k59y!4amHoevd1QR~rI~hj$3)XzYP5$fOO|*($Y*wP#iXj@;>X|PIsa|p@6Hpg z3=Mn#{;gutT3U|SC;apZ8$KA5mX?O^Bk@CG{3szoLGZ88!{s+^PtDxM%2koLa|3aV zFuTU5%R46>*XbX2&6inK*gmmFAMfq?@uR45%1TNfAG8p^2iw-U^D;9Yy~%eDdAWV$ zyE%0YCb#rL<;gECJ-sfwIZlnMrt*5l3_)Gv_wDSG;U{Y_=?BdL{pr$zf`SLT z%ixgZj=AR82hKo5J|S=MP`uwt-(wM z=O+g8xP*V~SlJtgs&I*rUU^Fu$+GZnj=WbwNpuAoZC8Q#Zs2Yem-FegI4iyHdI$CCJ7leNY z;&USYac=M*RoQngMBbSVgl?fEi~^-`c8EDEIV0p5+wA~|qd=(0%Rgknc6G(r`+pTS z&H41HpGNcG`V$WU*it9C-u%DZ{X2rk3K(^t-#x5HGyme)B^DoCbL!)GHgND%F0JCr zkK@U_lgZA`NKsfs;$uv%hi@0!zx7|vdEMqhMwm!!_yU7nmFgLfMML=CQj_{e3g_Qa zE~RZSqwn6mtGV|n*`{Uucke4@_X)^GwV68qIL{T^njUKJgz0eNaNby{>#y6d+gu4f zQ8}G3@EZ)dYGKC*O>?^+8%l~zbK=XUX~Pc^0yw*>e`YV*%-j@q+x%oMO{>?EBL>ON`<5Emo>t%7uc1+n>1kQ=veq1QlYOXqQ%X@p}x~2QhfCo z#JYffKzFimcbqTbUEBKot+lN!e@ZOvpH#K*KaHywU5b*Z^-2`rG!QH();kc3+qZ|t z$M?l`XW2WRy10C3es+3%`0o!H6t(8tTaw34Uc&@istqTWgu4>i)inxJa-Rhy;cRf# zc_-!8ketj+d_no{pV=lcvZ=1Lg8J)uu;4LPn42siLikvV8r|NRw0E{!89qyocE;px zE+#C@>g(&9_+VuA>HxQXgq!3*?{7>x_F>OX+RpsPA&Kyg{+uxASFr)h2FMg{20Jn} zRa!59SFH4v142aW#%aO34;2-gZ{j73cyhF(_xc9%nc?6bj+n6WUS!gCt<8;%nau{7 zrGiN#faGyBIs^wVR_QYC0NnrA`Q@`+WyKu&U83Wf9*&&6bu^LZB zH2WP?_H02x0k>Vqpkl-e=T|g{^X2$^r@?Le!2oy3#3)8iamlAW95kSbxJ-ey%93}A z*TZyPE{-+$$fzso8$cLG8Y zQM7u$$hka3kDQ$~_h)>Hr+;K!lk!aE;pbO4xe1x9bsM*`v|PA*dc3OX zl~oMHoX>7SN>sR$qkVrrzbv7{e0U7ONA`{lHCk@_?yS&{3^@w33u*2XRSs(-AG(-w z!-y^dk76NHMq40Lx34Q+IzHWR^LqWh%j27b#&#SgWJTd%{IVv~m883Z2r8^M1&cKn zz-Y!*FF!VlVPrS*pn3i^ys|aFQwC4lobJ zf7#i2D&ljBjc+Nkub|u_y2Rx+glF7 zIbJA42^csc$k(2$`{s!C=vR?qfF+!7Xb?Jab8~Zc_{ia%?^nYHISAom`Lthw%3D#K zBeKkxm@j4HFn&!-ZS&Gs7p`qKEL4L%pNIKA}yne$H)~zi>PAD>bm6erc zJd4qr2_X#XgGh8OKVgK*L1E`;33|-8R#es0rUMv@->n;m$DR$nX?JlAc>!xiyj~j zcb08maRj5r#E)j1{Ij#djo01nfALn2bPDt?0M<#pAD{PmQwfud3d(UC8*mL-osoQY1+UpWvf^hpm%YX}U5{?Qx1687n-z#It=f;f} zE@303K7Dq_-|ad*)V=1KRf7B|vBiH@7Zx0b>m*g|9bHs)F7l$NIi_k%czJo{S|9DM zjaqq_CrIvLLjU-9LuTjZJZ%XS!ksmieMy393DXfaOGX;m=@k6J!u75hJDu!iu^t+P ze3np9;6XCNot;aR;2d0BeEWV29aLpOn6u@!<2Mb7pgIBsM7q|HBWUXfixrhrR5H$? zcZZMvVKlv(1$0`RMPLa@NqqZpx87h=S!uA|4I*^d`J=i0R2p1z$ZwIouQcanTj^P# zp}nBkEcT3YpzT7ob)|-`o!}`_R;@J*yi=K}3t|4qOHQkQBl4ZnS;SeeCMG6y^;a)l zI?s1IHPxH)nb{+Vtt|2i67OsV;VXyOcMRIT{;05m%%7WkAcdN1@~_<9pL_%%f_CEl zAklr;|H;5xRqyHW^}F-&>uLyk?D<>#C>a?UrFU{=#Ss8s<%r2kQ06Rh0^k7^SH7t)amJTKu5pIfw~I411s3H)t5|%#_QykZ?{RaOY9Br)0z5#kxGr@K!L`qCqr& z_^^F4>6}+fgE*l~PWDpV0X^A7arJo_-lb&74#a~Vx(BK7a!OU#aul8R$sG(&Fn>f) zT*tFh%mwgm=Y!|{)gEDWlYx?j5gnnkhBAcKe!$TxC>CcYBqTr}Up{`6S=a-y_gmZO zb_Nw&(xn-Jt_9C05J4@XDlaMw>eC6wA$56YE|BA z94fW(x3+%c9Xw;ShCf^zE$i~oirhQ-;vo>$m9QhBd1#pZAfik0(A*5RoCI1IK0Y2E z*=sCmZ{FM`t3foD;CGh$yF8w_OgxE5F_O!0f=p3+mqVr|7;dtmsosaKDCEg`CMUap z^W#}7?mt?LoxC(`S=6w|Mo#$tg1chRy-7>E zLslax;0BR?`lCToezr>x1}}c6emY_M)oH2GSD(~m*|vr>;#n8Dz>{TXXD3cWoHae9 z3-^Qm%ru5UYFrdNf>B9J3g)P>N^-?D5NE}wiIE{LwgZiol~;MsThXIKqwq)|vdh+l zdG`#xyFrJw;1N?OaCLJ7s2v~mJ}3u9>L=lLUzye9-dW!AEBx@GR|vf<)jCN#KeoU1 zMpTQ1g(djm&e(9Zq}F=>XeN10p$xwj+d6GH(ySPgco%LSM164@LdXf0x77za*FAqi zg4{~)VK)rHu>~}1HEbZ2h_Ua8<*MT z-{Eb4e2ZbxXG4cR$NznZeCfxm%M-LZkdySKi52~!ilT-I-c5c-1Lr}sVM9mJ)6>7@ znm<@?+Mz_>FfzIaA>8$9S-zlkrX?Yw75+CuxF+A3g^Z{V4Wz>QBju&r7x>U;Z4BQS z77CtR$7Eaq-LP?e@ei8J#W%tbnD5%i2zd?XOI|WQuSZ|rMMu!E2q7Pt`w_HMMd6J7RpVgJLJBJiZA1ycWOOLDBN~XO_Z#*|-%21%>x6J^=!A9uP2p&N}1h z%CJp`-t5H?;`@7g3}O?y)}$WpRtY&r&>%P`fT&{<9X*f>JBs&VMN5*`Xycs&QDa6P zsD*??)0%>cVhAk!^teI#6U|27o$w_uX+o%iA-UqCD}B^lausdc{$s@UlTc(dm%NgIysD5*((W}r`yS(dD~v<2Gpd%cirmv+4XlN&OJ5sF+bCm zzC6rH$*q7)AwOP=QV2%Xm9F2ju;3WCxp_Nj4eHhq(JoGbwU@k?5k{dn(D=NKzs$k3d5I{%U_jX1h$NW z0li*AA^6(lebAm&K=|?$15sf&BpfT|yM@DNqI12iHt~SUp3R*ft`qcX-j3#kE!9j9 zSDQhYcvHA9MO4mR-WM3JKb+kPQ%3PS8yfR;L9q?*+9f_0IKP#uB()OW%h{$i0sf9; z=lD@v!|HQ`8DCEOtXm)IVpK#!Se0<@R9KAqzXaBtC}V!)I+-o}Jp{rhFMU2DgTFpg z2-e-zm2fo>v`LG!mfk4l=n|kKgGnw>Un(2e$g<+8wX)^^tCNPP)47ulq0?V1r(frK z@@Wvmrc+unWrTkKzk7jbWRk748O8nJPFonPH6lMB6r-T!zSmvS*?_r`@x{{J3Bd%< zQJe=-%Qn#Obmad2CY$15UBTpey$(K|S=HDF`oj{-*7s^$z=DPzqfic+dstSc2INWpxoOz~k zxaM?rt|fSvFf%)Qf-6k!OZ)YN68qBSRW(ca!8ZW#!^1v?hH>7DG{>MF{K<~`SXrqo z)TK6mX!=$ds3GPd0L>LW8$G7$>>+$<>Y+rWdGzUCeOoAyiGOLdk~cTG>dNXoRiwx+lLel84R>o3n+OF z^fVE~LS?P|(_?(zsf&P?qvmk2N%iGl*8l_Oeh{$GxSFf*({(9o0xkfHxKR!vN-NxN zc+#Fh$w{2(*4TU9B=2O^{w7Hs8}23jq{LFL34n&c>cV$!yW5+wP0Y%3UV2)nu}^v{WTW zywA^XM=p^hqbykPUV-il*{`Sn!cQ^9(_rlX(q-6pvums0?o{5-?aKW(=g|3)-}Pio z??I3P91qXK4u8M;xK0e+eqpERWMYmwt*tfovhnUdb)$>oE`HrXGUYfp`#I`t)W z0~k!ppEpGnTR;$o-1=AU_0u>(h{0pTvNmGbz<{Zgh(2KG0&ofk36refNTKgHh#&{> z&Qd5LFy-ji*45RO5r1kgHz$>pOZM3cac^55ZtAnt9v&V( zBSX4Q+J+{W8eRrNCYX}ap3ze_l$wTyj14>sV)B6z>&+-=-r05`FU*t;ozE#{m0|jf zG{*XCxC==%ENg5$tPek`7aSxtX_7ECDue@a4%jFPetzum1 zu?5XtZ(0dr`X%dSb|5sLEWM2I@yE;= zvKp%m-ry?Y@(D@d7tn$OVnD}kuMI{r`atnO*nS-VwDN|B02GPa?iPOYEEVVe9@QCcn+kE(sTrJHHKs^SVfSsQ^nXV4K%1R1A*X z{sLQ!66ECfD>LY#W(XeGFXT;J{~F36H!oN^q4u&%A|zZea!?OmA|Tn7qSBNf!8F#X zKOHdBCuB8@@a~Sf`Z0ICMHxsf|De7*S6lWd#suTRgP z)~RbTfZ1*H-wuYYKuoYP<{LP8mk?IH`D#53Hj05eR{@u(JXb}IU}Ra^JKEcL|$?2jZ|iDb>r%siai zZyj$CzDKUY^4#}{^G@b}^VhN|K9wHnyLBPID1>F9b#!YJ)z4 zi~G4c#bo`gr<>b1>$qUm<3w}kHbra>K_Je{&)vPetc>AOf`Wp&y1U~ZppmNZbdfCVobx~G>ZS92WO1v{SVIz<@>Ge;~9vFG>HajJ&9OA%@mL1NX zHWXQ8o)N#-Igl?7ciefs@%G{{PZY!?iVwYcU*OY|cX{_ABvanvO&9+J5#iva+%Q(`e8UnW4BC1z+fqw6G*&qN;4rp8837TA&Co zNomo=soslorfNJ?c1V9ND%fIybmg78<}vAz7aLE2 zH)-|$Ley`^Jjj$d-6l0!q$=bkeifJ?D3riMS>8~mi3Tp5w0|TU<}43i`x8*>MWyJP1?BG`f1GfyNfW*wdTv%}D4KPo6#v!lB+zNC` z6SAy+n#?fMyTGQ==NTDGdS{x50nx`)fwtF3b~r)5Xf867R-eAFUr{md{?KB9d92Im zpU>)mNyDm5^<$UmC3niRK>$p7fh)YtM=L+Ghndj%clcz4L8wduXXx5kN-l)~$@&}) zKcpBYvqu`;1_c`tX$QI)U>LZl&4z9v$Ki)dUNC`UxF#>Zvr&zq7Y7|pP0TorP=MCF zps~2irfO=uf3lJma2;LgSk5zV0Ea^IFz4S@;eo(PI_nmcp|7$<%!WRg=mrBlE<6AJ z)l2Rk9HbgfkL|_p-lhrh@$~Kq-ym5_U36;gA8S0O zrl+^rw#Puid`pjrh@iw-5S6uSLa@3-AcULehFRq!@89{s-#1j973aJ=Xd|h9lB^TY zW30~t<|tmPs&Gfwmm6=%1BMoVAO2enrNRMqwm6x__Eq|`FkF7jhBdneAVCw1XTbeo zR8)``etOs-77d*EN!3xY5rfAn~9f6EnfmD0E+|R zY-mHw4x#)CR6q6gX#QH2Ba%OE&&?-^aXJz{V7(}s+8%&YFnLWjx*S32X(&jw*`kO~`(&Kqc8)vMXj zih-a>2eE2kn^$-p6DFEV0beC_E^}P0ea^G#a2dM6Siy1+)b-Sjx6gZ$W#6-8fT`%d z02-5{CV`iT6Ve(VAFr>gqgSCGw;he9IQQa*9K>27EOw@3 znt%g7ud-{C_-UOeG6>=0M+9Tt`4@k%gD1E-xaKQVoL6XlpsKTyDBABC`6Bn^hPu@A zfiqU$DAq#r)Of3B`LC|_$pMlVS2{a8Yb!hQ@G%@7Q4hHf3^z86qNPHJh5Muk`J2kC z@HoR0b;R|oVqzmMn9ZOC#%1+pW%(e!vSB!kc*Gjm&plT~?*#;L3v{S_sn4&hB1m zLv^+Jm6hS@%3=@*pH8YV8cl~`bi>svE?jR0$9qmMBd+kh5F`cCtPE@U`jWEhoKEA7xu|hER6gO<@0%k-;Es=DiEHU7pGo}zJ zZU$Z#z)H0u-11}%c+zUM2LMO}0%yoI=O{e1gNlLc4y;TygG$&C~q{i}6CcmLrTchnw@KI@)oA z)r&aeE2eaYWhjyg;^E9+o+s;V{?sAoz%qa27L| zNz3n=Dh@wt$9|*}mN`_Qjk zlWvWJ(DJ-g_5u?>+o<_x&&UFU9(;ae=;9&?7@VrsG%1Rhit}>Fk%Tek%TA$5)G&o@ z<0vUkL5JwBE0Ts^0kbEbi^0X5liq55|KS5G>nKmyb>^fjZr~#5{lyH_Dsru|m3i91 zo$CP9ox#=&>Gx$g1ug$O23aBQWoBWGD{;-g+#`de->-eUx+34Z`+~@b3=7i$ufd1* zvgx-zqkh98fAThWh3SlonOX}YG^G?fVDXn{GcHG@mT?n#eE~S0Kv8VC38@!yWSjJO6a^f)q?DJ3w5Ytq`6SZ%PO6*5#*^BE?P& z@lwqDTu{L2KF6>KE9qIYhV5_7W90oT-jhwm=Q|R-bXtS%XO-j?I?Tl~!q{^pIc1?H zJVf9wOBIF6%d>GWySloH+GW3a6BFRcXTR*WN)p(S=v`&twDtndVLTM@40^5#5t#WS z#5LZjtq#8=Xh|$>v99W2st-W=mcW0-E+{Z=czt#LI;!|0ITn2BV-hOFXDAQ7&5avK z18Sg0w1)R0cwr9d*b99QTrchZjZaL(qFflOG8zP5&z{#ZzYIa%zn{YikzxWL@V@H5 zNcuj`9NkY8fRL_0SA#RhHrkH~OS`g@W4@?82^>YDhqY)l#9@pUn-LV^3Nrv-PX&HE za-eO&VZ9yq4!m|KvmophgQTV`sW}I;&eR-pSsy%nvZp7uIgp~dkxR7}7oX7#bPa2A zR&ad`X#D)mVNg>Qe)2z%7AW$FczAirM=b^l)H;?vf78KkIb%gY`qLuE$HvAQeo2y- zi#EAYnxTr*$PxAxqVVpa&bE}&9SpcQkrU&fy|`S%XKBz6xE|RWV|MpJRjN<6!hL=H z{bIeopa<`01_SF7v%K`~d272+PTIktA)pPD-*j?v>R|N+ZsuQ*tWzed$`_y^F#tOu znFQmM{1baznRTVn&?=Cg2>In;s-fYLU^B|2Eg>#0@OqYqm-l*uo~W>}@H`NP7*HQe zOD}TYPQ5H6W2#aBDDEf~$kn0zp!>4_ldJv2a%R}8!}n?SHH(Zn>RKX~4E;)7BB;66P_@RFscAnt#z3L@ zLIXa(;1h|OrIq9jzs$`HC@x5eXTC}TuZtI7(>lrbs$Ot+Nq9%Q1zd<7{T*dtXf)Hc0M{8JJ2Q&f5q*;M&4)enEXtoU~q3ni8`?N zvCQb`Xc7E!zLAj;ThcqR8|gC*-Xh;Y)Nv^#wPa;@O&{@YECGmzz)yOnrIcdkIc+mQBVO^ z@Z#+XM-;O!SyR;k$f~_cZ~ynCk`jLr-t-s|>C~J0U^Y9Q0E|$v$M1EI$7Y496fya9dmYis>}CxH!6A z7#JxOefg3VhA^rGa~Plu9&Uf&MTgSh42a4XZ=R|~_0trlh3z%}t$xlxV~FA4wRMV4 zH`8`3ezo|q8To#NnkFGB z4zbF`#fRqDT2L4nCf-$ti{uPro>d5n!pu2%rzE2Fe)nQ9Z$Mi?EB>5K+%9ly2f#^e zm~CXpQlj$jLCWLA#vz`}WwM}#RPG6xG#zDpl)8xw83|LNhlm{Um^ahrjjbs6w~#C80^_EM^S%Bxoqe?PKB zWc&m>3zmV3`1N&ovxyKo9~jKQhE3y>7nlByvT89zCw)TY)I&C3hl_xVA1!~t4Yp=n z!77)6u5MgD;9vHBT-WJ_zcdFf0S6S3Cw8*))?^|Wzr6@>V}G~I%jYu;bU@mWwAC?Z zepDUscH`#J;Su=ND$?2w$7`_8$}M&-_&*{SsrAKg)7G%=eOcLXw^p-N>oR?f+qg`p zv-H|UaYwzc&4HU!we9WgU>RVn$<@062y1|XupV79vjBQ1O$~{5TxyTb^d(ov#gJ{!9CQS5)kOw0v-&Qf#juRUmm8;xSRRK)?^9ZcD}P3Zf@bvK^!V zA64{vZ&Q=>OZOceyv%Ci4@pq$gxouqqCd2CVTL>Qqix!^khS+4df**_@{1coj|_sJ zkvo;UvWr-Y%^xMl8E{pZ|7=U?SdK_B)GYG?JjdL^f_F-h?|ed!zv=a&jyze>P&kPM7hmHHDgCQIZ5OICv{kkn%TReMYTCjg2-8-m!^E0zbg41_13`DX8TW zyfl$k5Er9tE5usd3z1GpPARz`!Hd$WMFu#pFbtYfh*0szw;tgvcva7JgW!&>G6y|0 zi&SikZCY7b*(;hHuuBE(I_C~9VWRk}Ra6+Iw5n0}zZ?SSwNe5Fx%fTO9ucag61Y<& z>SuqdJHZR6i?Ww}>8SQSFRx1V#b)AK8Q|ACus#Vv^#JTOt>Ew2Qb>(?&>knc&8wnj z$LkUZI21tvpXsE0sgG5w7AtC9Ua<~ zFfwe$>F-bcu3lcwpPc)=JZ2hhnZ?C7s61p<(2Ds&=D?R8?zaZIoJn6IU;RBqyDXSgfK*mx?|vO41d4P6 zp;UA{DJkipQL3`nTUf9arQDCBq&~#2mpWh56pia^YgySPgQL@X;Z1(t2ah6>1|dJm z{ftO|!vU-^0bpNNhD&5d>NVR|KTc#U;^O8OxyZ@IWu*NVjJtJ|7WokOdtPuvDIs);nT;De}Lgj(%<_W z`t^UT3TQ}RP5cs{D&gsfDwRUc@2-xB3l}YQb)(dq+^1^j2Kg`CqBQ39qQl)2%4uq9 z5}Tdl5EBpt+YEYj#=Kg?q=p4FuJR|w|EP6!M|&>Jt5Urh08~!&Q{m^&w<9I>PwK6~ z=8B~8i}={gU-vWjQ?IYKwrT|aqtpHFKmagx`(Z@VyAm>JD%A#V-k_e8VFd+!*oJzsrp^yr8yFFwbE{XjCPt6MER;QGURLJIL80+HQM^{IcW98nct!Ut*rEZ{eOhLcR1DI|37|?z4xAF%bwYeJu-^2 z3t5NC-XjMQLK(?+lvyD;qA1x?bfO`Yl^JCx^ZPu#-=E)geaChE{^_sqdfl)4evZfU z@p#-14*K<~dON4zP<1YHV()JFg_Gi4|3Jh3Z?T8_*acOTx(~O-xeTEL{07ZxB(H0l1O+@z7NlfC*r|Dk-!Vax2m@>kn3Y{?XswVH z5W6|zC|*lhp0u~O&to&dZ?a(lRvNsJkW+t!iZD1wF{v362j4h^*g?QqK(KhH^56d1 zPUmTP;Zc3Q8tRw8Gyck#=b_Wph?l(hVhebj4ioU3{^^eZ%vbFButkdIB2<8G+iC@; z#ZpbsoJodH?p{rL3VsX`d9QzDP!7S05?E=8TJ?w*FJBgi84 zWFzkt{m!0u;8b1y`O~BSgrK0_@H`SEqykBA3pKGYDRA4`1ae)iTIQHx}~znnJ1N9MBskZuK^O-tv^sw=COf4dag3eNa$;8V@*j8xM2g1 zwCS94JsmhTHi`4jZr*D1c*|xa^{Nc&547tb{XZqHYTEx#L<00j7|_{IKBzojLQNu` zx*<+UyR!N02+_i+SX5lRTJ5gm6RV3=I5eo_rA^alg(x8usA-Tkq<2 z5Ds$J0Me7le|>~_Dpg4tqXp!4W2z`93hqu`2i^e1w#~^QElLp0t~cL$@xc&w+HkFo z@X}9!ljMdaA*U`vRSI(>0vHAdEHSZ+_fuYWCQbRWH+PVS~ z;}Q0r4Lf`zSc3Z$ctZ?8ZUW8`U9kDf%1$yqy?Vb$4M7`Oc`aeX2Hrd}DU=vrqPPwV zw|9DR@(B`6SbsuDfm56nx&6r&82`fUF^-OfT0YN>bwM7Sk@k&6xb)`dC!f9be&@k# zRRXgULVA)GKTV2mKLnh~nx%pWHQ5r-)yN;_o z`l5C9r@0f5B#J+iuKdFWyi$P{=G+$kp=(JSPVDtr5FWZ&$k93v5Gmf$w7!0YFw^s+ z#Fr6D{KbFkBJducY>>g!%jdH+T#+|X+j-BL-AER(8c}>TRsQZ`10W|zwX>K|)Axj+ zL;&E-&YdG&1Q~SX+wKi+_+yNWZ$#q7DF~XFpkUqqt9JQMEZ`-^shPJukH15P4j}+= z)+}?XdV(&IqpCfb3M~x{=cr9fgvvmWHG3NMXKSlJ_Vxc21!S_J>@uQ#>E@)9_e_F9 zZ+dc#3j1=y?}o&oS#+6L1;-w~Oi%o81Q$o#+Fb^$a5kP|7C?Y|5tCqz0D*JT-tgS>5a_z%v$i))g}OKLq(> zHMbH70f~B4{KvnggtQ8iF{+Xh(9O$Pa_VKPv-i!ty(gR<@rHN{DxY_bIiM|OsJU@U zO6u;q{Y{Jje;=QN;{k$#`UlL}K>2x_9;LDN4A0K<-&=J5CtRlZd^MB|}&WvFx z@q~|ffA&8=D&_R>d#FX`d2q)e%D8~ZB1fBXLgw)gK)gW&TU%Rqu2NG`B`4*SyYuKk zbrDAtyk_2}X|Ds~D_G8XLSqVjp>Gu5mti3t-kFeL{l2|uirn@I>w^dQ^68WC z#H*)Ivz&}g9=5dD^;(v)pcD=t_Y>j_ddfHqJXeFQb{k1vc&Siz_9vTbCG=UE9&6EyDz23CZ3AT(8sxNX-yQk0+y zFg`>gKmcvZ`zEb=Z!eMvmg~O_rC935+2osi@Dt}eJw@gv-F^g+;Pcc#wR-2Eg8>j% zMM5QBfgJlyc4N!$EX$m3N0>!^UY;zi)`rO1z+&)M>K!9; zBnV^@7uGV7={kVQIayCHUqJguHxS5}(``hsx2oN5lqJdr!e|MIS74n0W<0zdVcr~KWXkPa58_vftj?WtWp;er$=Lbe=kiBL0C`6Y z9KuL7QBhIZ_lgFIlvvh!SEJ)30SSsvr&0SCUuP_N4e+MIjCU0nwpFB;OsI?!-GITx zB&~PdzJTawQC4Z`ZA_B?GzC`9gP;ujrp~7k&Jf^uD_tJJ`{TY|YO3VFk{)J}Bkekv z`91Bk-miskWD6}pS&+3AOERkJF<78Pu_55pPfSn`Kp^>6Yu)|Q$thfjFb=wX3l#); zR85l>fBlbTS*+lO#QWFS6X%pR2Zg*jNrT1{Hq-vv-r@iaNcG%UJ`6w{-hJ#{BVhP6 zc=-w}gid1&oeb*c7`Qzt2;X?r1ZiJ(_Nr{+rwl&LREB#tlWs1>)@0=5v++rqdV(ro zY1sEIYC400InJB#2;LO<$RrVz1UVs5T4eLV!GaN-6iF|zvTg4Z?3!SJSk-0AX}6FA zuo8Dd5-rN@I}iqHTmL|_ZFEB)uaJHxJoi6aZOGI=+^Wl>n2D$4@d+9~YSs#XiqaM1 zYQb&v$prjMTHMlIw( zBma(zf)o}z!X1nYetoAv#4U2MZ*9dfcC`TOwd%GBVQ)$jnG=4^5$*CZd)*&cBFw$@ z+ETI@0rwY4L`OeI23y!ee+~3G(*vzf3pI0Tw4P6SrKni@}Hy`6Y2W^3`Bv&0Bs>b|2-%0N#J z*{`OirmD&d28%r_@wOphVf+9w!8fa}MuYJDdYEQ6_es0NLkT);&;Z!BZ6Q0Tu#CwL zj*j16k|r^TSg&*_{&MATdyiyEr6fgx$%F4JXCO_Ggr}#abzc)NsjaR~j^Y`=W!s~f z!JP-?*FER!5kR4Z6W@X|3#(>-)?q6c@k7OU{UHD-M8OP*K_;FuVsuDBc-0$1y(MP7 z{pU~XTT+qp78VxJs)=lDF1+7Yo7>vnM%Jufg3-*5cysEKc3bZvIATC6r6PSI&yb#E z8dRRGQMW)lD&TYr&BngYAfyvg61^|~MOO^L^_yE^M2}t*2kri0?##MZNrZ?my`{!F z3146P9y#!0QZ>6*gT`>)kP-vb6{!lhcXmWHHedwgz-x7fWgp~_H;J&!x$z>Xn=MHf zyG{LfU&Nn{A>OL5HNeo#zTbms(vV~F3|496E&DXbAZpygr5Zvti z44r6a*;m6BasZkUxXj_E>$LT{Ex)kv=Pxp{=*rukd;VejJJ-Hk@_7jump}JBOR^q0 zUKh|IX$04g+STbdqNPP(xs*V!C3bXc&|kur9v@%ETv@t3{&t~O^Sq&9()!RzD#V}w ze&F+y2eEpYtb}YQUyzq)*W`2e=7bY~Q`wiOtW@}T?%oul&4MDm6GQ*u-y?xh(#h0< zB+{5gm8hM^>bLRGO+wE1pg|{xYgyd;=Oxk z_U?p@s}2cm+UYNk_F%PT?KvKB&V&X9HQLr40&!ktP^=k;>%V7gOM*@G*Aw821$GK( zH^5x5Cl|g$Mr#XS zS_(zme@U(KM`HEjFRx%heIL%p3^g$1%U<>bTQ_Z3*m-CqO;l4)T=N z@xM?KG2j_0x5{YBeuE1F-%+^0K&?vWy|gr%FSqEd>F}5{>$?D1!~1|dQ#i28@P2ys z65Yc4QW?I}b*73A`le^j=+w>lIXwxGU}-wQ?HJC`(=L)Ce^z{xn!1{7m=^zS9WUMO8gH8WS?Xn`pSa1t&*8|=ezG2C}1gK{Gp-uE^(bJgyQ2mn<7(=HJ z&F>JXWu3O=sQAl4D)L2O4xc4_=0$7k2nlvJ@J;WHhC6mURo6sp7>EqNAr*JBv$Nec z9|H8}!*_eAbzo>HDSKM1k`{r97JAe}+cjdTT|4Oly>~zu3kf>(Tne<3 zg!DP*(4xG#vElTdU}6Z`0YAfwP}RYVu}+Ky72#Sl9yyJEe>h5GptszsvU#asVsCIJ0>Ctt0qH5ZhY z(nm)q6bAz(97aQ5ieM)E*f>%BSgi_3(>&QZOTxF`XCP;#FVM{QYGh13FsOp^8muW! z{w40UKsathptUUutPr(QAG^qYJ@oGu&Vtet=Mk}^j?1LoUB1WUqv|~|dZj+*=tB+GE?P zXjYy7jb=uE^6Jz)eLDP23lMmQ7Y1uOPMtrJjWphfCFJV|Z{O(Yg0tNZ!%3=RvZGHA zElr6OFG)M`gxA`bJSZ#Ek#H>2sinqzUKtuKD~W`Oz*f{1I3dR|drMUYlWaUGHaU7M zoy_Dl+l<|wqv4+)+Y@!nA-P)3sWG1Htb`5-su$h&FbujUeKAturIMBw9Z&GR+h(eI z`(IS&z~pDNef@Hu(9D>~7T6S=Fih!BKwN^#$iyuNTA#}qc%pec{_GeLt_C%{b7A4T z;!pi#+ofx)mjg=XRV`ZdB#=2hXG&sr>}MMbLiBe_;yP#Ar@{je4p-J&%v5)hvJsJN z$E3H;5aDbB=<7e#E1hSJ5GqP&#gI?#c4tn#H1`B7Zqt zY_T;AxJADU{4#Ba1oYD(1aNLfIr4@&LibZqQ~-s&tQ(h`r;ZL1h_nD1I|+M6V3HsX zHkHIXXsR$&edho;sYy4mQfU@9By(B(87la!I6XVlvVFdtV|A1);w zbR!Yg=|$+M+YY0gS9thn_Po|8XeUn_$SNicLSQUsMS*3?D_;NEK3 z{t{J7W!Al&bj?pPRTte z1uZQLr)O4R4q#)GyGOHXy}+`fQ1@A>E`VelI=I$x*K60L=vY6HDFmK7cW%R|C2(#4 zN-sfApN_NqvU1OZ!@}w>wkp>m)Ut^5ZdpYV5~fgktik?WNPhaW3QYeJ2~gf}3;%~rcM$#f9Yjp6P%QGu3>KFtE11ZaU<1ak@uREZk?Ei!p(3q`@|L_7Pt-eB>%#F zPV;t2@$e?WFC=!j;_(WPbYLA!QfWOAyB}41>?U7SRrT>^&CroFie47^^;JXlBT)L^ zUFkJWPU-rRqo8#is?4DTpW=?-+%h@1uTmt(%w?G;ux@B`wB0fr@+6uH#J4o zDNE&i!}_uKsZ*&wSAHRGsE(g|ue(+>`SR7_Vo|(K#h=nCDW`jV;F8r2B^bg<@+0or62- z+HuSMZ~4+s8ym^V$TXI2rxU+Ed%O^PSV8gX!^ZZf;Ya@&dvSK*{P_Ao@G< zsO#l}#Dww%aP$K@n}*E5bi@M7nzW~7!zQ#WSHHdZ^zFLrsD90q&;=)_w51Ci?=eFs zkPJ!uZ-wWYpbUb-2DY<3=BO@_0uRU?G+^*u&Be-{T293ro>Z?TDe>30DEDIJAb3H4 zrz3_pfHoyX6O2a2d68c3?%_|Z{6QKBeg#Cu1jn?!fku^U;7V%{{ifS2Y z40b<$yTQ1ouYmj2L%>_FuYm_e*Q1W#*A{YM<)&X>KiVv9t-G+05=YX3RN+PqL>wO- zp7w)z3fBW|#X2F{`i>>7q z6c~44Ct7*DAAjQ71@IPBj7zBxTNs_0tp!5+JnrpG}z-XIRb%tw7F`jTF5fI=6%Lb z?GDE8@dP0Y>~dZaj7{S$s#oU#jz7q@Yt}dUESYT?l$)Cys7z%C4n`Y^q}g(T;iIag z2LSucPstnrZOJ7$(lOz^;vCPHHy6mTW%2Pu|DH`4bzGaMOM;i&G5cE!^Ty7GG($r} zAxm!yi=oj`4zCAOpN5A8N}~_IzQ3M3HYAP+FskT&Y31wd3q4@H_(JKy57Fa3U2yjW z_!xFDBY>5+Pb0ZDcJ4{0%7aSb(N;`?_@kx-0K&4Wa8*?|tJw%*@Q^(M|BIR z*#%Jhboh?ulfmSK^SmKg)1?-d1talA>! zf!_dj8yVn6D!Z)9nFDnBlXTcKFe!M)h z_A^)WMR%xe9iLrAqm#jo;7#*xuR9}PxlgK!z{+D0R`i$8IC3vS>g<2{s4(a}Prlgx z^z09$LwTtOl_zjyZZIngKL7wdKi}}Ec?I|Iu+8VwDUlbQ)zEvqF=dBA2|eic7OjTb z+p3)H#1I%Zu>ZMpaLr}}b^_s74-5{PtK5eL0rwRyES$3JHOKB%m7D6qCQR}nly(eb zVnxhvF62S4z~lYUgIqfIa;=D->b+6O?z1>xEnvR_0Y0~2rEPKkB91h{V$|SHl%|$CO6=Bz)H4)g!0lU>B8uN_ z=x}BWcS@^QiPeEvD(noU;Do9{c@g2f}1UZkvk0u@V#;(HZ0^On_N?&7SdRyFmjuCZ~G za4ug}QGn!DQ$00MbmIE$v^0kQ{=rq9eP3lX#XUF$?w$jH{3N{$8UO*(S_HDA{gpE# z85-P0(~FG$mNSc5P-GD1?Sxs;m<7zsjKg05w4_qkSwVmc4jgXtX>tt@iGv4YhvG4$ zs^c-;51{)8(^;Uh1+7lDJI~q?x-0?sB~pprjv^2FR0D6!o^l#{85P!99|AQ`+0P;? z@U^fc=Q7g~x56Y0g8y-d;&KE3H;r0SdN~~KY9;qnO z4XhA$c6OPIFLLmPH77`@CT?@80$NYNu}RQp-WZ!moktH431g{iG|Mg{r*JP<3^W7uVNbyXa>NWds=zt59P zK&+n6;=Pb#uG&ZYWsOL2-f4n1>yuaoSdV=fAD6)A5FS8`2uD24Wfp!Ps@_nr`NJUh zz;>dm9cWst^ciVs8UPlB{{Xa9kqEB(;J8eHB5TfZ*Y`AXFbxh4jt;lzczKDI)B^c^ zH}K%v#MH+c>+omw_3xLNr!N3S9`^QyJYGG;;(CXsQ_^!hNHCs;={9)(oAMH}L0ui) zwDIlAfBTaW%M0DVz;ud3v+u<_lf_fB0Gg+2V=yz+tjERLvDbM}0HcP0_lbjNBsArb zxfk2Q8&3F@ffPziUEMM2*~5oE13BV*-O87wzn`PvcRt<#Jk`V9eQ`Z_{$2h|4FIc% zz8z3s_3?fOPrUH6-mnQzYGUCP9zxQHQLsvLb(l_+M zM*%U}HLO<7EcO%ndw>Xy%w!F$3A+?X5PZ#N#igae$eYko_BTCw!az?YOYpo5(1_IS zX`E>X5Vs!?PgM2peoa*gQ~o@VsMc4&YnWIdb|yu%JGNtjC>GhhE5la`P6nVjlql)p zzq~N^wxI*?$)4T<1#Z8%u#X#amfDB76ITzX<4R_g@xQe>rjLx~a)Pd0`3V*@dT*w= zr2lK|{mYgth^qmgbJE;EO+$WKBczcO>WIY204mL(WNNj`AM1y7q@;0cd&mP#ecUrwQ`ANwg zi{_@7`682H_9*0rh4s)h=^}WH5^ijOa+Lz_O@ghj8PO}GE8STt z>1Vf((nWunaA>36#XnK9Ok^k_m7vasBQXl!$L$aGL2V+})hEzBdX>f~`-&1_mF_(| zn%buKp<_TbhKA2rpw6U{W1l^H2A2tRQRDMQp2OWOmT;NL^YC>6LRcZhf|?I-z}*Fu zT115rwHLl4_^F9;cAvaR(Ee)^`%T~zb**^vtxP*j$!BKSXI}*6{cUddyG(%Pf+nr@ z<~s!e67aOsdQ>~t2L3CDeY||Wltk*KEshl41FCI^!OQP+CSfR(UBX0(e@b{!F`{5u zrIq*<9S(B5-}hD1>VQ?i5oZw&3c^3BuP7=iqCd^7kspnNIz)xZ?!kqQE^DmUlj0|W z#Gilu{HY`Y(1OylFYWcCg?v~AdHES0t`puJdoybg#kwcMozuSrXdrE;F;>I3W&h>` z86)@L$aSlqLcpMG9IpT`&C84FJYDa`if310ONh)9DJ5WEtaE$g|Qy@?Mgma=E zOIZ{gkQ*`fjY@6`vJG}!+=Y)erdGKX4FFC(aR=+ zDm-+$FOMJtXY;G&#P&>kldpZWhP@p^b^9u?k{`)2{SfatM7dU>MhIpS0$| zHf;sz=)^rx$So{%gf3QRSM6?HXY*}2i1o9Il;)5?jd-wf5i)j95J9zrOv0zaF_rE) z4w%nQ%nr3_Dwy7GK{2*tp9{+ew?MA!j7vA!7;-A{zEWCvJtiC9f!Klu(f_-KnQ9deHrDkd zSi#fxmo|X6Na2zCc`hp4SvYUU*^G56M8V~Vv2!Ogf)VIz1B5w*o@fkdii;52IORBT zCHWnRqeq`C$f#3kwvhdsH3P{>t0MSo!HRsbtl ztD<-*SZ%y!GDK58u7s$LrI1dONz5WU-$(SpE%nq1S2ytKLVPd2xuf zMBOTlfmWojxVUzEh2S5+_h9|=-t~5Zqnyp(3ks%Gj^%gk{t7)kyLaL2SVH}Tg#RO@ zQ-vV4&wB{l9mii_a87E?W1=Jg2X`7djM4OwSu}};$7IH}gm#CI*ajb_uJ-}*rF373 z@8QlF+>u~{g?^T$V?bgv{&&!a`v$!+J|0vhu}2X^xB{xDb4H8T=kU_YK<$9_eVyGx z1!T^uk7XbxN|c-9;Xp+-#jk-|U)eiKNePLdKbitJW|Rp|DkIF7wf^Tip@%}if3hwq zAq4#2NQbpKNXJUt71e_K^WO{($LhPlxP$p@b-rXa61`%&@4;#PFXLp`7@rC9nu$@; zVvAzFQTtcER{oN10kw6o!skl@8kK&_dpc92Pb}A)4H3P1Jr|^uyXv+?&a6Lv9W(XR zaNK6UdtrEIDWZm)2xohFo144m;mTg%I%qa*nBCaxMa8irWm2_?Fp>{B7WT+9wtp%=Mk>O)^ zuP=M3>`Hym^TFr#Vyw~UE8w?2laAx*1@h19r=4!8v+OL7ngl-=*d+6`t)C5Wz`c{x z${ek&ZrtA*v22eByO#Syt3i{GabJjVvZ}xp+ll(U_OZqzir` zpgi8tATHL;vuUSL)D7z-+`J&Zc^AkxDZSe)b2FC@V#lLe@ek`_4%^SJ=`xUXQ0p*I zU80CT5q^>)ek$<6a;&ly(*SYv*>$pQ}(|F0Rl9%s~k9qo*MjL;Iyi@Hf3QALLe{W!HGhcA!RmOV$ z`^erfGcz*=)EUI08I3vO8VMTX?I1K4-3E8xXmQH88QIhfh@iL!PwmtJ`9yd-x@OZ< zHq>&!;>m>os)gU34d&{F0aoW^HFVc_6-A`w(=qN!!2x!NzIT`Vs$>=4KedRi3qUTs zS`xwJR-hG45f58|gnjW>dohOat!N8!GHBA3E0m0tgLt8k_KwR&6d=Z~VJ@{_Km5hw zL9__uEJ`Ar+>1NQ(y`uokMjlvI&$TFr5IGzQH>mwTA5#lXVxiab64O}6Yt-4lIqNa z5ygi0sGUe5JwL9^f(T7oagLSFN+g1V_zvlagP9(h9~*!b$fQtG7ZO*C+L6S3zJ?j< z#*+Z?Y7wXWC!;2DR$O#Vdo1~+=+Kd=w-!}lirs=po`0#dctO)UKeEbSeFLiLQ z^xD@n`^?Z>TTcb+wH8uKg~s@>wsKpxQ4jqg(ZZ=XTCJ-eI@Kt#lvtDNL}+_iUJ^`i zX4xp2Ngc?lvpB|7F}>v8H2BKP@aF{(gXGf?#EYg+80WCgxG*0`OG<*A3nn|5s@-h* zyfdVW#zLI~%2lLjgqTcC+~ij~&1ZqHB2>3BbCUhY`*XxcyZr`yNUyR5 z$tQ28)x|FmNqgNy16FYrAY8XQ1qg@w9a|lN?vO-oT6PZZ53Rt9bo$i2m;Yn_{CBzz zV6>?5&{-$w1$N$4&Jso967GWWW%gQKbMrl~&xS7i6{5cQ=4}^J#iy^O`Azd2ZVqjz zeIr4yeLnjHZd7S1GJ11Z(-uTU-_XVpkpuX6XUzN)!Vd zsU9{(T<}*7k+w$H+3(>>G{K`Vp>V4^B^Ck1-QK^)Oex)v!&~TSCEYd;FAr+Ger@J# z@u_Bnk=uxNBmZW$2;0%dMdY3GT1_Z8hd)=IVoDJj^epObMMYn#<-C!;x^P#`>UPq( ztCrxnTd4MytG}e4S^a73@%4_77i9Po{l_w+ol-@cVgThx^u)J(+ZE*GqX>|iHth=O zUtqPm#>C8;Q(9Vo#pR z4gnbDyE7*+B8;djQvN1G#H2*lqKEHP2-0@%Yc6gnhKz*E3FxCdU<+sjbbFZ(P~uV7 zE{%}!4`hEel?yg9y7^uO)3pU1Sh)paOdfUb-o3)HL!cJ{LTfCv!6{zd`L$08N;03% zH-<>?`6qG_V+n64js+7auqGsU(;6_#UWg-#s|j-8EaOx0Ke0;Gn@OgMESObU@HTw+ zyZu;8aJ~pJsOe`j#iQ{Odi?TybIp6yBMgz$BOrz|k*#G!0V0$9`znkt0*q-=x=lw? z$rrsmJ#&cB*5Y(TXd*Nd5+}g)_qucOhJR5CJ)Z`I7!bW>pQ^VsH|Kmb@PhSXnLvu22?f$_T@0p1pck!58VB-scff8CAaVpI!4U=ETi>8k(c-~yvhbQ;jLoT z`TP4T=`QHF$cTqW!p&x@E|IRTab36qgw(Fo0rWi87eqb0(2V>Aw89@bunKZp$Ac#( zi52w`<^q@<{gz3C2^s%qR8=9qz8SQ;j?V*y>+_Iwo+@w8lr-AU^&lOhhyFrqbshtI z^zSd0b#IlIpTMGB{}kZJJsDS)&9vbr9q<=DPlW{glq8K z>Jd|0@dK;$+=kWbaxz7}bR(6Es!@z+YDBt-@sB)9OUpvFrIw+%x8So-IW}C5vt{`e4}4rF z+au(EzA#e?!XZ^Z6`}v&oWaoJ08czjnG6INgUO=VSl#02{F$OWAn-h_kY^s(&!RtGK7IgV<&WFLxfYZNU?MrVv?0soJ+J=T5C~*64ie_bN zPoQL^r40obXSB0e5_QV}n;~JxDiseF64s-3H~Pl%`Gg^H#_zQ8`ljVg&CQKBtLe25 z@aLVuA^2$AuifLteM}*Y*F-If*1n$p2CvnZJq-T?ON2@&5r3wW^$47Zhwz_~YRr1f z|9O>|%5aU6O@Ch1Kz*f~cXJHr`L$Slqh^TJ4)+_dgCM|c51yV(R(uS%)>NK3!wwMNxgSNZ zt{)aSld-M513#){Cl>(hufbsn2xyRrOX$M&Csa#fUPWw*l<~beIsJ8%@ju(~->);@ zhBr&exeVxZ#hEkIQj(I%W~*TDdhf<_&%XPh0IH(Z^Lp_Pl|L-;b4genp{b^3`hd)2 zh|=~8gpjWXN$!}+uWe8@k>DMS*M7RBvR^rw$;SF3>W}ZR#|il+qb#kx#RrnEi)Ne7 z1QF{Ev5-gNkmZl7_gBr8Qjp!A`(t*J zp3_-T`#Vx4eZfWC6OI@*6NhZ$9pFJsF`-7Te*gCtwF_Hpe}|9ZiUEk`!ErT`9n%S= za2?Q?1X432@fDkG;@lPb($TRoE(X0ZEz~VDUZ3-zhlGzQ#Pm0f<^1%@EEj|L+iZ8; z|9&`1Y|u||ZxEosX!IM@#h8u(oBKzR(P|1!t__lo7*C!;opm)!Q^?yh?GCX{Le*#R6f)%+~|K@<4 zT**sIpau)JNtwU7kGJY9G?2JuLep4VB+6ud>f#?E9T*F#?^QH93v(C?GdtLWxGlA) zqokx{#kLawcPacdaQ!_lrfMJU&Q48Xwdcw1QQ+ezJ4bj;2j1WXEXQiZ=}L)l89_A@ ztzj|?DbPzlxK$z~mU*4M`6Odd7FG8qB zz0NY0Nn+fT`{`-_ykDeQIXAE2L_u&Wim`dYvIVN>A?EzjQf+nhGwhn;RlU8v`Y-bD z$v88~Rn4Cz?F$4HY~*fb0g0FH8^vhZ5H;V%&8WsT%%Cie5&Qi5?(|K#t%bUSNgIhX z&eUOezqO_Kjk>&u33n#zGX6foJe%=F?o9alOk#gZn&E@s57~haL432&6b$)QJi`2V z2?m$~r)R+tw}$7Y6&EirtGkL;Oqd0w)-91GfPj?!>W+u7;4`re|0NtjAwm#{{cr+B zN)o&aR-Z`+pqls*1}0P#TLPPEX81F3tFF9Nl4BsyxTFh!v8av3BM3wm{s*6dn6(sX zZ4{q<1x)LYe>xvCwQ)QtS8`ZgUJfL}9&fke352JgDyLg`F~1fU`=3V#1u1-^7Sc(f zVS5p@kOA_?=jV$M=u8mBMQ(}7DT-(NKcW_4fHh0Ld#wNurV~KqD0^fr0N6dhcW`P;o(siBZm!WRv?DqQF#V95M!n z;-9hyAH?R$ObpE}EvYRG&5`I7k*%+dUQ1(}PQdSsH>i1Cz*hHf;mgsgG>4P<+x0ZM zpt>b zh(!DOHHY@)Q|USO`yD70AN_1U`U!OsNSI1$WRx%F8eh;NJdkv_FFG{iBrXL)gxM;;y69 zx3U&;-wN|;Mk?<2wou~D{9t@pQlbT50O1>sxnU|zLwBeHI1D1tS?0W;b$_kHaMkDp z>$=|!(7Zq?QtcIJ3^^g*I#dB-ad?!bPz?|-MjkvK`J0XdJrXdzh>JZ5E{Z11rAS4K z#b04miMj%BL2dGM4v6BxZMVTm_v;RrM0orySpbcqBfJifegJ)haadvkkSV7YlYmX} zlvWT^$$-5^1Y)2jQ*3|)t5Z}%dTQa$~ z9Nek&TK%+xBIgP7CdjBYMl7)ig1&w-stRr)PL{k)fB7vv9i7i|+tby0s6ks1uOnd) z`MUxQ>K`~zL)SR)`7sPgMI^44<>omsjcZnBWfRYPSX&EchBU5TAJj6*{IRlI;Oj#f zp;^8F5-D{x=NL=N+jHXzN;Fb$uRo*2J1sVWHP(eZ?g>&%hQ5HIVRtE#d@(VRbVq;v zX#Y1zvdGn=;Eof(mKvI;EEkklfJu4fJDekg0~bnf_Y1HOfcZyo?-LNO1zHT4U)?H< z4|%EoGLSI!wT`njZ~wlH)P%tnM{(u@_OAN&BM0CPNYGr%Et#7?ZMFSVy`jeLl!U|~ zEgpvA2zF^8R<|WPuX$ApM_@YSQP85zIf=CP6l&_9-2J|S`1bYdnUJl^G2HcdFo&a4C5W(_SQw^28<*-7HE*)Qi_^$!? z*pj1t3%LAt&Wd$kaz8l>+>Xc5a5>}3MLSlT&U`<35<8}!Df$Ay^6?tS%%um@ZG3N! z)ozXh3f)a|Zb2y(X}8x|TSd_VotP)ZI&oqU(VXG8PbO z_vFfGo@9+LJ;K3M9R1K;lpUMCcSSAcp3bBfo}P0{KH<7ql`ZF%HQAw5@g*;OVHlC# zLfZs#K>PS$b5ddP8rfl%#c{q)yXV=;?_$=!@76TG{iyD_Y~C1seCDHsZHt~cElwaQ zw6p~Hz4K$+{&NGYZ_)i-_=j_+t&jzjnnZlF7muhudSl2q7390PRc&VYPD^(M69U)an# z+{4hCEswXO=`MOPdoYdsX{PpMlqr)TLlR?g-{TzSGcK!mFHuEa4kwOLehYnFC1Olc z?>N`Kl0)t7pXVg_x+SpA_H3``=j1%e@(la;Z$#s^G(h(Lj?Y8ourbV~zcsBn#+D8Y zb9ao2N=|;>Dr@Slc@CyH+W&53O$+9%s^Y}4M7VwM-1b#a&jFtnGw^7$Q>`>sk6Pll~&c@ zaKaP(7xsZe67_Xl=lj)!eExSYy^=mI8s-aM4P`j_Y{0I9+)*TBGGdER(c z{5v04?I6s&t}V=nrPd23KC`47789eNoXg^*Av5PPrTFMp&+#==wT1_)s@^*rkP(Bh zL6pi#bY9ny>BSVfsS%@5u9ay@Z$mg5_#7|U+XEerGJ{0xY2x$R@!{dlOC3bJPCKm) z%1*%j4#jw4W2asteGP(Ke_Iv130e^(rxbX*lz^tRZ$LrneC#`Kxj7NRgi>%)CXy2? zT2=Xei~e$7zG%CumD?tvM-3rDvzwv!5w&Mh6U@*(5_$v&1W77fb2Bp=wU4{|58(e* z()mHo9*efDy9r(LS4zFvS2kX|SQV5)nSDudaZ?bz68mT}h4a2) z`K;vI=+EV6$%&n=^4((K(Fng2By?h&;=w56S;3%|Y}0u;jPgbR*!ct;zN6{A?ENR6 zd$cf?UEwk(|K?2+x9zkdkCimt=3A@Nr9>`0M;k5Fd}O-|PhYwXsga}I-1A$PR+i#7 zrDL5;h|u#}UXuYl5wDOdCW6~~1pJ$q1O^PXGN^r}9vu;!8Zng=O3&JQWM)QSJs@-& zir|HXLbWG1>G=))W`LPv)Xt>GHKUL9VQYR|NJT%*sQdK0OhWh5LwiAc67D?nl>Mo3J%i0*c$1Hmj4-1dGz`)p}6HTM?@XlDU2@X zD~UfP!;c1PoWyu!n>zYO@lCyL_6JU>34Z|z-o%T<=cTNr_dhdis#Xjoh;o~Gx!Q~+=4t%fR3&MYRF?2%9i$lJ@Zr?30@0{<}yMn zldX!OtWy(y(CP8329nr{PX}fJNe%7#LX35hvormw*T5*GBk8}iwa9`w9=j7KYYhg9 z&S@q4gJJaJ%EE@~#D?LbhD#)G13AR~fg_Z*xyDA8(S{AQz-akTVKSE%=I6ocuJhW) z$C;CGq`TUmbo5dvQ54<{@8WAPA0WZ=WW=r5sXn@wN>2If!EEw6^4^q7QY`>v->n=R z3V5;U3SWd+Y2j7)etZ7xCSM;VdlsiX?w>pfnDJ5Kp;G00O)|6*bwC~w;a~U5XzN53 zF|yTa%4<{*GjwQHBerR$$415~C zK+3Z|eoO0_J2=Nd9`Eip)p7>LVOsJsOCidi)9+S{H(g=BtrKLVDvBaz`zs1}=w^vp zK5@M&#`pe+$m{QKjT?gxdRJ-?$#1^oiW3j&C||hBa1w5Y?hLy@j%~PhwXhMP2!ju` zYvxR-goKe_D%@L?$qV#Sy}Sz*Tkc5Q3C*`iw0s#Q_K*bcuA9wsnP5y}mDu{`%^TSC zjw+F+u$Q2u{6jF-c=8-J2N+Gei@_2y8=kz=$n6;|55UWMdrkf$smDs^U zGkngvf`gcQB|q+7gr|t6Z(I>0otx{c@`6DV#9(OAZD)53U8KcWz^v<6xYacLP6A#X@yD=8QNZ)PQEOqj1)#A?CFLH-Ua}Vg@%OW7vvdwL?+=eMJfTEo*WKJ`}RCo zgZQbbQ$`Y#_IU%2ZFK@RC2A&RSVVm2_aS>83D-|QhkadFiieqmzQ@N>ehyQ}o!SM; zK%&6wr{OfOi9EfOnxD*$?wYB8|NJ@EyN?iM#L2FaVlSKtXpzmiV}_>R#83N=I@HI) zbPx~<)Q3%QF8M0|zBlsyBK5~Mns-Bu*<4A6){lGEI?lsorHh%yR*4_n#aM=^u_i~% z$lxcvjAR-;)2t|oyqOcs4ugC6&{y5{a50STeYfc735%DFpug>MST*2!5G~6|=qel(FrmS^eyKWFXz_5P>>bM0+Fz6Hem4(q#A51O1(8yH8V+mU6Kib~J16t7?ifFp-M3fp^79#^Bw z10xpGlXR&%5?M2MDDYerDgd5Xz;wJR;Vk_1Y5#C|%+tJ|U{Bz`irDOIZB31Ro*{y} zu^TDl!BO<=*XtJ4c)SsRj8N7-KOY}J@ZR9zqWbqGm^&s}@vtO>GGlwp0V-d0P1Q4J zF5npBEt_jlEun#ShDK@YfUOqB0LcenGEy0EJLENYQ>z*JKsOR%2pWJcUdQEM3Ex<3?fCv45dvaQns-q zq?9G2OeLOCl)mfkdEfW=zT^0wzn{j;egA&fa$e_oUYV6c*G|PUoBLxvu4MvCws^kV zm6mMNt0J8CaWA!J{?NgL=KsQc%nyc^#m+JjX#5j$`bo{q5tsg`9Yy z1^N5pF$bSA{OZ40eOgr|ahI|CR-8TMb>nt$OAP38d4HcBTadj&$$0UiaP`;ENXI4r zKolSJ#YRFe9;dm3TG0!Aucako%@|q4_T~vM8?6#etr3amNR;swbdtG=rkXj)JfrQ; z@PR5SaK-qy0(t#YB_y}@ zgoeEu9W^Wn5Pu@;39EvqWU9V}0^IHq%*y{2HXL!1nov~t%p#|=X>gWhNQDwGL)$o^ zyrYD6k0%(o<*ln{oIylET(nh4Jy*4LBHDIjWaNivVo_44-iU$Cqxw zc9scYfr3pTs}Zb(vmp;H_~~ed2X|oMtoS*ES=!b%HiWwZ zx>|S~Hyf--s@gZxj+qPL#Xpw9+Fx;OHh8Q$d>`qw4S?&eD%(_Vfnyx4EOu z+Zqeii3S=lnc4H`&*p|(5Hx8(K;R(k4<{7Q+-9Ynk=<|0+te7=iEOuYd@@#Fm7JqE z`P{$cEdlz(V&P1e(?7f}(neWlIZ46X#$DI$fD#1^jedK86U;F`kZ+m1M|XD(f~`o4 z*#rxWvs6r=bpn-ZdHs*sir)XhYkmp9oLznu{_M@pHD*Q3>VcsSQ z%%Fe(q~`z%5h!k7o^FR_D9Qia4|~`gd1hywYfhR3#?Uh1oX2h=9kvw1_~F<1v{Lc3 zFCiV?9wUo9M2Pb>JGWqv%NsnB#cyc7M@_BKGz%H<{?fR(o3QPAB3bYxv6!J$3vy&CZX(z3YH?7 zme9(4XnPNYd(9ixqh=7vBp)B5bp{Lz+4u@i4bOBKh#m)9KI7<5q8ReIt$Sv&O}1UA z(X%K__kcOhf?)?SNSloFxE#ot)rq!PfRg&dPkqk%S8+AS+A^~!fW7eJgMzWjps~QB zg6-pedCB#Thyp|0PHMj!t72oW`9@}DMe&Tl` zpIVy*N}ZF&TgFBDXA5jwlo_9f90EqKHZ_N+m$xVXActZ$!3aOYa>wfLapS3I-iF^W z+Q6z_g~#1B8@^99`fejIi@26LW4ITqxVSiI<&(g8Gg~J= zfP4YwJ^*8Z9P#d6aIeQI>f-K&J?ZQ3&*=0{;tL2eYD@9+>)uYO9_Juj1J&&28NE!h zDYdGfMRyh49d12Z{{-l5YQRGPmOLg+2&lY$1P|R#+lPrxV)~cz_ZeD}1yjDl|6*kR z+^2bzoYjrl#zs5CO31NDV4;mm==y_R;TP=r`?YRO$yjfVd$zAw)6_I?_G>$rJugG& zH`uH|@`39-qB_qT(p|){a>=*77ggb86aP(OK8t%pY|DpY12Vh*LrP(3we3o#%YPAEz>@C zIiNSGm0=)I*`5s}F4B$yt9>3G@4kHtqoM8rb<=_Yo*qtrdVhKCz4Q&RNgP_>adR^_ z$F1HouTP_-24r_wNRtS5_6c_Wi=-!f?B4=v7~2FrC0*ISXi z_KH!OU4947Kc6KAgjNMCFCqgpPtR$~4CvgBx6HVg}%3<9w)YTR3o22p|VLp!J{32v+NAMw3SzjG;Vf9A0(5>D| z3dI3#tt&)$_uF8ljc1kjVd77xmvTd(FDqw(GMjVoOlm@DadGVW>LTgL2f-l_WH2WU zQ}b8WT6}viUf-XV)~tDBORx)|re0rQ^XM;2RK@n*S)I(+jqZiE3b`R#sx21ZrJ3!@Oka>^^D3sgV zw=ejvT;;iz^UtjF%(CHLp{jF^&-J|yI8>UPp6e01rFrk_)p^Jr@|lVSEY^c}4W2ZB zpvhdxEsZxyK7-Em@7(#%b5g5A z_%k1^EM-kIvkov+6V8r`j^^X#wR3dbc)ty{WkTXu1fVx02+NoY#|0jp=xZJXLuuz(MLg(P`ADhi)jY2MVNWG@VsRE!~Wss z0R>?QZb3jytUR*=etN3Spy0PI1&GQl?PM%{;?eIu$;mnan6F20A}P&|m3C<+ zpM{oFYe-CLAsNZ=L$pqah|qo?`ySZEV7L(&80hPJRV_CeVxBKCM!v^_*Ipi1yKtD8 z`E5?^23jkV9Nk>i4#i-w67spi$UtvE71G23S~7@|O}9W9*G#3Jq?~ku>AEgVRb{VA z?`f*5uU|eZ%$*Sg&M4c>1|DTakEAQY*M3m#UuUZzZ}9hkprzu$VHC>TR`kTENCDds zPtQHy4X-1(MY^|dPTd$w9BR9pt^H2N59xvSq$xHUw$6ffiC z0MvrB<7c65-Ab+EA|<)?@jne02~C1*NTy&xJ*%v)E_TNWCZ~n6q--)yVVb5R5-;OE zwr4Eui)VV}Q1OfXUI`lJ0!iWfU*3!}(TGt05a43c@d1rI=nT<)@&$tJF5t@-5B^~H zT4i2Al@u7LS{hfN^R(VNf7_fKyg1QX|2i0hy{LWxCl|_%9mb}P4i2BLjP<-L7Qz;4 zFG3HP+F2O#?!lrcBpiIJ3YU&n6x8!Q&KoqE;9LBcYc)04Z_;jMks=t;OgIZdrU!EO zDlLM6n^E+qOCsKMg{b5WW>e#29rrp7e))HrLJb_F&JVYL967lg)ud&PD3N|93KxZoaJ-P8QPm}%D*FWFdHLSpctska@`bfK{hQT&mK-#Jb zQq}bfZoiN1umdkDp(@K=(&nmSY0Q%jp-tx|vUxqRSRE}bi({Y91AWmd4aQ!7irGhvrQ_&QXk3dAD46vOPf19^FQB`-tcpecYN#Od5DEN4pnr!t#-oCg2F zei^F)+kN|B{jD-LyzAt=z0w)R>%eqEe8VL&H(|_XWLndpWGP>u4c$&9Ym+mo$(k1q z4scsV9uUCc6KJ{)xu5f4UWqIjNfuaRGILmUwRE%Rq~rZcD=Uk7*x)&nb2`<`BXjHS$v7Qb za8q+}a`O2gTFXvwOodKaq1t~d-AIfyG{93dCCrPhIr8_nsYvBX9daqJG-V~+@}Z$| z;cl#iZMi_(WIY0BFM?62g=Y z@*BBXOz#UOISfN-g~XjT?`ksHJ1EF*zB>l&AVj%^m<6?*j@k07b|~3K(X1q*;zjWf zA3=Fi@M)lq<=mehH6McTi?!9hbdBMD?-U?}F2@}!Zd(Iw;?5sCCk{cC2E1$M9b??) z9J{;7U6<1Jon?g4leWWgs~5U_EVhLh8X0AWD9Z3xnVcTZzF%se;+02M|NAoO?if>G zWW-Pq4`;o~yFdZ)uwMp5?AS`Gs7!@Xq#wvY)t{AC`{z?a(fDG~ERm6TV>HbM)PCqX z7~}4wmB$0!FwCS2FC9SzSJBwsn znU}m@gn@Tc`03>2Z3b?!v9XXAz)Gb;=!^pjIh>+w@qU))=AOn*0Ak^{?*MCY|3IOg z{OPNpq6MB^%5v9M-hK8%6m{f;V(JX1VABd9QB+tJ$5UGgDRkSm?iioI=Kk@B(65pF zNQvR}c(DWs!(BuA95QO}ysBGQPe$#;Im?zqKNGKCuK^(gXmJW3 zu8D5^6hPUl@S9yVe85gW4CJ=rOP5IVAkOWGTZSkX7#q9x__4Wsfo5Q`o2~k+Gpg=~ zRiegT**8S>Crn$lwoh#42~TPIJZ~V#B{~ic@X6RE)TgYyjqU2{8fY~FGNL@+B2=B1 zJLKG*j~sagjswHOczktldP+%{mWYSn(?jk*`F^4py*@D>R}Iz5XxG7S^YR10b$%{R zXLR3Nj`h86oAdgowp(S#*3VBS`vzDfWQc#`<9)}$F+d=tOOWArOBeD zL=?EkwwA4ShDz+D{$a`u6ypdIGvpPioQQq$=}I@w^I_M*A49c>yo)e^cKHn#HvKs0 z@U3kXEcpy^IgsjjM5%se#(Xq!7Fr65PtI!tR`Og?mV1$B|VqJOC@tuph-BnJKXw} z>5Lr9Pr4fOWvBu9TdI9_VCf0#zgf5g6|fTDhJ7LLC>6E>M%#81Pm+yzWIz4HTX zq9;x*Kzsy;$W9>;s!~jvtL4lQH>G+kZ*aXb2b>c6M1PBPNjlRzB*Y!z~0UKV%%2GpCFQZ$r>a2J^?2m2 zBpB>+`jHNR-KQG=5QN3$$8x?7R^i04QnJc`#rN2t&ZF^u$X1xum|Jaa;{BtrgzLvq zD35^Q_DG?CcM}hzlQ`lwPI72+CoA&cc~tmcqcG}DHC)|WfL)euxg-DDOxhvXHv@7V zP^^M|+3a$he1V&rTi8s`T~?Zu7Q{WA-t5HDcySJb?uG-ey@j+yo{e>pnwpwbAnt+f z{u@B1eDZn3?R@9Zugq$cELAj1oSke6<50(|;4cDX(Q!>?v?CVV39ZbT*AA}ztdCd= zmMwl5CK4dxsflAp%yf0roAAeuMKR=H&;=PHn{K3Tssr#8W(W_I&SNKWCo&R*Rn)QP z;`~jz$S0DqJBW%R_-d{uDf5NVxw$zY>0~8uHEel9f&;XKRx{~p*)`Ni(enz{SZR(T z^7CUwYzk$p{r?QGPvkQBz4IK4}rS=Bkk9_iERJ) z_{k-M{KxY^YQC;+zdykmI;-fYC|EyDzdW7KO1oEU_@H#`O$Ip~2YNJyG?8Bv`EFAN zm3mkG>p=fES77c0WL-@3lIL6-z2%Yiwj;}3F z_{Gg`O#^$Z>zw}&H++X?eX5Ay{w?q&m3@H{ydgmP{fw4Aq#!Uf;lIlJ`&*{~bY!p( z<8P$H4NfU(Q87RBg5b=kaQ)W3l#jwY6Tf`^-1{yZ^OEND@{N(VwTO)f=DwfFn?0@z z+NL_D@vA+NNStF!R8UGJ?3BE&iT==mUI&;HzUjn4OBO;03l$6mOTw#Kz^xuT0>Fie_=E@Pn}}ZJk9IW9b8r#nQjp-+7gn3=AN( zRW=gB%^H<^F{G8@nYf&B>L=ehR1;pE|9IU$G5zU^#4#ySQYQ`00#&mz$$T($Isjao zMgAvF3Mi`doi~U;Q9Yey&gj8wi{_x0L$J7AVJ1c&BM>ny{~zv~hKc`2-<3O|DTJfjvV zX~-1WsF;$<$alUsVfYIC^+1%VwzmA{JJ>MZ+^qG9$MzU_SwnF=gI)*w@#yc;e~{OE zw`eBp3H*L0pZSf)G~rp9^Y|gF;YV|RO1b{8eWalu11_E_-=z z;QuO2o0V>z5WsFYMxSeNM= z8L6}+8p^bd`ORpdj(0+XZ?bLQpyiNv$_-fiTU+@S@D|Y*p})|`95$aTxCHji5E?%` zV?BlOz0W9iep5% z$@U@*r5rOQm6iIR>rusEV0NudSA`)4(zc0r@A&(^>34kKG*3Xrvy(}GRihtcC+~*O z0yJlc<+)i;A`u^EpfJYfH#*^`)ymaSW>)*qQXs@ijg(>ShBeJE+9eF#9u&v`&-&u9 zf^)PHKuk^(!xg_Wj}w`@yDYF=6dh_AD)`tSLcqIuyw@W%Fz`S;&nD)_1M))_0;m1% zgd})@GN%E{8t7jHlQ@`@m5J$0BCaiBs^g!50rQR;Ad+w(IG6zgz&OQ|V8agvz&KPA z*i^!AXn6%QgqFK^$0c}-qUbd6Qu`sLA2LN?TOS^FYApPNsC9terul_k{4o<~YzM~I zF7HNBjd|4#20oOF1&h>V*B*KA$3s_p8yBPA=3Q9H-YNYBrMvneYy1910?iui5VVe0$z)hiagrAxgG1fwSqsiN+gy(qr4VKmw+6yHJyI z6)!T=%Ve$n*-uDoj8I^9P7aB>2rJdBgMxrrTqc7dXXCaX%fMQwu5#D|z&@X_y63N)i$hBuh`6X^)+3KEZLc zz27iPg~;56y>S5|!IvIPhY&Xf_6}DwC{WrKHJ^jabW*j0eBr%~ywgkB^OI>a8SFglIH)@v&$eRu{QRj`Nal}m+h3ny zb`s?6AuCX(uy;?c2&zdIYwT}*BW);oCLrs4t}%TneYugx^fM3bZ`}?gWusKgNi*0; zvAJ!(a=6pwC0>T2Idqd=N90`JFTzmf?rGjHbAFoJ&tz4ZxD-&g1?d=yjh|XgbH^Do zUjo)~@p{;8i!%>?=t^7lh)CT{8JCiHrbF!BcU$2F&~pt*2+{r3l-HtA8?t0D&-o#k zF?3I=%{!s+vY9SVa@x{{1`4Pewv5#;Z;bQ?$duk@J~8^wg{l-65fbo4xxU(J&|xF) zDKyn)Bz~i&vO-T}@=e17*G9J|?uD+8QL`+!@{TM>IIS$FBu?#6S_=O9>;w(2i7t!o zQ8C+!;_ef^P`fXQxtvWC`|SnGgSuE?`NR7%wnv^F$)+1CFfg^L^yB$}?Z`jNOFF|g= z9H~U<+Q!0k8E{ww2R=?Zj0FTd{eT@0$$JTzK7Pc7j^94Rs=K)a3TtEt^c_U&ZJFSm zHqhT6PJ9H?-9gN3-uO2DyD;6GU7-*uSh8TvN%7uH7&PSWlxn`c%=jT{uD=kmwAFlS z^HMq!jp5{F?6tXXH_SYAw4tdr<(sVag4_s2&U|0ZA&ONX2MHjRXVY(Jw8y&(FWNp( zLXwZQCZH-%B`-g)wdQkZ+Y;LLUA4odMA&{y{qukZ&YO<%p{8m!)t!_*^@mLP z9T6?O=q^Ya(jfcBgxGpI?PK-WdfL_7+nYI7CVCPk4WeRV*~e^A0$G^?n|sVbu=M(T zogqP)9EqyPCR^`jYv-aVgcEJH(pmoAFd{^?%~`*FL+&cG6MIdMvJ#$*;keCtl6fwu z*K4F*svuieuJ7Iaabn@S^aQFjQ}|OL6k9-rn$)7!xKQs%$c9}hN{~2i?!VYgd9)hD zO#q8QXdl7jWy^1+&e&O-&4#Kt`-Um8tr-$~1rqH3>z-Xj5Ax7=pMBVts|NTG8Mo zZu$~yygm=Dk@Tnr^>$>WF!FTR?YJ5Yp!~b8;$~QIYin!%@@|x$c`~RbuN>?xa=J$e z6xEw3!fT01tm>q1Zp@|HSNl$FWxgF~2$!mOYfx!g>kphOE8)p2&*}fsZc%I_G5Y%A z+!<#%abue6PtrJ;D^PCIQT)4Bo4Nk>O5)xGm3ao0QXT9=HCAAVx!vyw)E~@f%9RJe7%T9_6;KgbB;)5@)t!AepI3G+}H%+ zg)okqXoZ039+8k3&Wa=Fg2!w9uid!==8&1lK1J9{G2^2kykm1G?$Gh$Z-<93`ff}J z5y#-Hc-n(Dz6woE%*#?*Ir7%HXs5KP9a@<8n(E6Er}73c!xSeJUxmqO zr;}1o?PLpW>uu!SMQ&nvm=tFe<3fDQdMUi1WOf4RrgdA8$9W7nLkG-U^am?4Zdkj| zy6=C&7as4zMGYN&Hp+f_&yru8}}=ZhuOi;PeMx_C)M#XbZt~^hxfjRxSzc- z)p&P#X|Jg&2qQhbPR-)FDsc{Qpn`*z(_?3@p4s4dfqgzXD(U9_#;}`F-rL2BlRgL1F%w zVcvj)#Nh4`3Ko?FtJsRs_Z~c$8kK?u8s6j1f*$``5;q_(=78P0i{KM~kc^1TwS$&} zK>tHp@zDRg3iT6IwTn#dx_>S=c82_XV4Oan@m#;W#vfb@J5CBOrhGCwE!g$OND^O4cs{9$g#i>Z!DAbyKMipzbE?ST{d?+tiH zzvJ`$WljuV&DBEPC)Cy1fQ30s*1#9(L1ynOUsxLiS|b0l!oL#HKyFg=$)u!*#)u<= z3Ett8kL`pc{#n*X6@H<3eGXbY-!9%3?j57yGlsE%Y}gMI5vqihTF|m zdudOe5zLmUBUwG$>5a0c9m(}MIov`7iEZ09hvyEzfkn0stkj-;UbFS}^$jUIvggR` zCupgwK~l)4JVicbm-&H{gwQ4ek3`d6+~?HMo3b3m?Gx>NnUJ=#S9#%XWt(^YrBV3R zPa^qJl#~!3AEDt1fv$oFcThf?1YUdsD0mCec0ezId2-zwTW&N9}8`f!6!Uo(r@;4c)ZlE`k7V(*I&KCd$hH@}eZL`Kzp)WX4dhv$I|M zZdN_ozbji+ID76vHEcRzTl|@VTEB7Y(f8e5AA(Jdx0lg?4KEUK9h&<8dji%oJS_Z` zlc7VkaiQwv7TFH7jd)m4^g`N*13A)zF)v^FR{JYHp(EVR)Jq3p?WGKn8K&$lXh&!- zOpun8;&8d$x*NnK$-gmgUHJoIVwm6|HnT7T{Z;oDO@a%RTf$zG!LR%I{J?d|!4i5& z!@YZm;{hlM+&SSZPGUtSS1?}F=|D3|m~GWbpapTIT(Ej6bmNqQx-|5^=Ka3L9rwcxgS78?OR zz9Cb;>FD!aKD&19^1EPm#W?u2at8TfYxVJNLY6H?&V(^VrbJp+*!Vl<`a?^({$_g9 zkG$_28|n0+Qy`FJq8Jm8Xi~#%ur?ew+^Z3Zgxfu)I@!-(n6S~|gu>@xVC<9khYx7#8-wZFwr{^Pl>=r3 zwG9mcUS337Onq26K(tGjtU&kw%=+@fvS04zpPm~%cuI)1#fM*y`N=tM;`xO{X=&!; zVK-<2H01Z%^sL44aAGEyPlM_orjMN;r{GJI(=nyAR9ji*_A;6iVWW%yrxZfsqu4soD{^*2V7#rVK@&B-KS!JLe6V47b} z*_pUg@!vEmq97|+8AHE2VMfMOQyRxJ8zQ1-R?BMvMsvPe+O<<0A{KvA**+k+u#l|3 zO9vp)v15x-?mVOM^4S=NU0)A-cqFgKg4aj)H2cgiwTf>T&n>+xYd)rKWex7Aw}*Uv z=K%wYOHrIr`q`P&mOJERJ85)dI!;Ij?0{9au7%&b_BB$#G-{y1IAZ%t=d?H$RIF%;MA}VTaRGS1jwBos`dtRhk=G42>hNz2W#I*lzMaJ z@yGUr!8Dm;hN6!NuPkNv9)tnL(vsSGeYt75S|)QWXH2N(a%zC=t~IM`*#nglgNV=- zJZ;+dHjjwhf*S(vNqV0r3b@n_dY-Iq?+652k%tc}B4nBBi23q+JIMrTokn6vlrZWO;9fYTkQ8GP>MkI4kEuZI7kBtbmPL;9A%)C9fx*DY* zOeSQxhkRLDI)=x;96mnbPw>PKow}&IOpScD#AUGYBChS=O^5u-lD}j%a13#YN0{;a z>_(eAt$$ns7FU>IA~3>cX6=0UoBf1fLsICC+=A==8H0f8KN)!@EtjeOpK<=`Jy8o(eYEjdo=l4UbL zK5dYcmF>!)TiMI&MZ+{L%rJKwC zP`MZT_1*jTa>;(4K=$HZ%`^r7kz;Odg6=DzA1D*sA%0G1`we!8?i?@E2cFVW2IQ7T zVdxBs^;;ef*TI%rbnDiQomURIyPsYh-X*vK-UkDV7g7!nY&$D~x5#A*)*bH*v}QWY z{EXnJ$gXsT91r6Eq3a^%Z(f(`1-?u8YWl0M#m>Uw2?+bAH;%jAMNGO%p`A|Q!T2~b z`2{@>A08bar@O+B?tvc-d8RKtM!BcJI_2l_@vSD9*+bHUG++U4Px+-4ci8ushN>zY znqx3*2Wlw{Cnp=MGWf{SC~-UiTZ(ee}Ke~ z1%RcL>Ki_G51=Yo`nFza0X)(2va-nSxQY4=F%sqWatFW-NY~JYqM>;sPWsb3PQJQR z@(=hGi9MGr?w_63Gc`?`meZZC@&^{2=S&5t;>6!^xr&bcd>cFcJO9X@$QSIH%serM zHd3NI8~Gwa*(jnc&>Ksa%PT5(P}2#Atv|}zUDnEyOWos#-MJTqNIJj^cBUhXUv_5C zO?)eN=mabREqdj%X5>%tv?L!K);0_WB|B`nnT9P$$xo`Fg26p+x_@6fa2#S05D)?N z%3BU}q1|q77D@^PVXI5(5io_i?fb180w3i<_h&cZ4PimBD=L?U!j~VP-)Si^xfkm{ z6x+U<)dSG0-g)jp`ZT|7KOxWhhw`egTkr>FsBG+tXKuK@>W z8O>URLO_Evj=exnkPq zJz*nSxMY}4=jXM}YcPKTD}aBC3MG-rmD5K?_wX>1^V1~ot%g~VE>z!Km!jIVv{4Ni z>{|N-3+6_UNrL5|9pMn*8o;;m0FMHeyO8%lT~ddNPfp&1jA^lj4&xi`ZR~x%_>aKy z3X&Am+?;AYvG3DC%aB8%Z`$~J;a{(W=kSt%rXgvH0M|S)$7*4h*bdz|D7d$CXF%x* zb|5c2&?ir<#Boa3o{vS(r=e)&C19mu2l_%WfBIKSj>M#h>ZEFJOjhmhGovBfl*<+l zTSW0onJO)U>y>=2bRPJ$=u*5a-aTPMd@yws*hT!^bA@!i6>F(31k4_^+-x_w3;`g3 zjSd6S+e0~15gN+MUbpPP@D5%2`n@sO5+r1Z4XlK3!PFxPZdM=5&&H((Yc#222-_g* znPHZI=Hy=7ecq9msjaRYPc|1Rk<0XqffgU>G47BnSN+F=auhI-f1t2{?=t%8l^9&Z zj{rUvOW))5*6}sD5{WFH-t_p$w?a;jGd}tX?rW}CLmuiO7?CrJK21l1%}dSCepEax zjp#nCdz2`9nuWd#IvpN>6wNyr?M_ z^dLgKs!-!)k1lJngzHN2@tgC+58C0d>ldG83a9pz90*pMADWmb`D6}02Qiw~-JBd~ zsxnlYO>sD5ZmbB_xSs|{ffXPnF6dH5B*lp?BM8H^cco3eJMM%~%~1ECg7#K+)8BR> zGRN1KT*&tH1kcy0_7#fRDH!{7lhYg7I5bP=wV>z%2RlD7F8ssK^X-!)TwAwbSgMwf z#^+aFMT4Si4255l%Q|muwCY1RuDSlz<*F!7#PL3^Z(9U~yM(X!gtP;s>A&^2H40OB zl=r6Zkt&_jFK{zgi=6|bU|EkumavVy5K~F;G+2{DIGYpuEMd+%Hld1IgGp4F&W)#N zK?9c$JuEc2=<}O$p^ch!6vM&P7yt?Gh`7C^e2Ga%UWWR=!OB69q81W^q#MeJPGPH& z3nxiZKC({?4W6zlF5e{|m#$m<>PS@Har+8(1qNkzU-f@BhZUjcA7=0b!c-ZmPpAO( zFB>=mDdYkgBX+qT`IR}jioBK*_#{x!y>sv>$HwNml-II89RVt9XuJE)s3M6sw>@ua z7@J~K|5;+;p%F(v@v@kvxln!?1ROzh+kajjmX`|k=J|H94??)~V?U9(ojfo$$<;Fj z?2=!`FbuX@9aKJ9E>PaY7rJokThsiqaN@!6@MUCkxK8j#t35l{=!)P_DDlJ`n(5hKFLp+P`za-H#KT19`x)bAlUAFypIS*1RUQC4-^s zL&$3+6l+a#T^wVtV`cBxZ=Ya~a+!)_rdJwa->d?jcU85(7Wg@!nt<+YU|>LQJNG__ zZgkWZBT-(s2A>Jm_b&|b7XScshZa;-b_BaX8bT7PF4VAk#DVnZa{lK4))B#fZlNV} zM<@>wk@Ryzy9=Q_G=8=uW)PT#1S9?4SJ^b3^dcZ|zWXb$5{DAq?{9OWsH;VF z;$;Y^;&&p|cv|M@7a2R1h2s`c%sb0!mV?K@o<-Bx(HklV_`9-51=og})XUs7E3xKf zcuU~>(HUoa32(LV;<)=;Mm9;OiZ5JlbPi&$jPO^q*7++^$AJ{r?A8cBm5l@qCYV7% zHw)5|651a}mjKy+1DJXN`RdUj4F!zJbHW-9?|YlcJZNiD4?rungb(2Km#gIp`OTbI z7&^dL=SEdjR6@hYZX}~k{w)MF=^}~ju~DQ?iNH-pUWrL<6Pw=IUO)K#$lTwwbpt!t z=q`?nnwy{J6A%C|MO{`%Bd;Lc)yrkg@G|fL`~s5rq}(Eh;ZL2^M;DXpmX7H|H9WRj zPN@J+x3c)T>?<^A71^QWDg+Fnd=>J+i}V;XI4>{~1BCgYV9hHn=n*ex_&KWiKcu<4 z1a*Q~9H>odhNYT&_Bhs_=YS`;gILsy)Pjn?ttIfjs+0Z_3>kDEk^NQ?7_OVfeK%C1kPY>W5DWTfIu-CfR-3xcW6} zDC@{$>bqpC0Iso**a@+K3$0uy$j)(EN`U5h+J!2cJC=&%^P93bfp&Lbg-pjWi~id< zL~xMU7bo}-tqpRUqN&wypA#P1PI4S(ylDPjR0hCISDrtZDu6xaPNL#ZagwP4T$c|~ zP%lije~-y|ZglK1VURCCz#FgE=JG+V2SffBcnJ#XOm4qd4!@`gj6i|a1^Y4rjNnhX zOr>IG-XF&{npY<%_H$91MUY0jZ+6~x^&XRva22;Q+c~NH+ z>`fTDS~ni|I{;~;o5BsxU{@8a4FFakiiX&#W&AZGTzF6o0zuF%$}Pyiyg+2~xds0p zKIN!0Fmb-)Y%(CkmK%evOEZ7RZf~%6$POI^H#B?)?jXQ8 zMz^j3C){G==QK*an5>S1dvbW&L0eu1t13P`cdVecweHlKK!N;}9et^EIVh*Hgtimp zhd^P2LM#Ur1(h7M!Q>tPdJo>b2HD3VJ_g(5Fk7H|A_@cD|w!QB{m;+hJ0ML zpL|Y?A!VcL0PR7JASG*R>2HX*?;M5{FhEyKt>FTVOZEQ@|6ne&tkqzGQnwu3jl`V8 zoDiPxOD=qy${FLSd3z>cCcq%uom+Q> z2f)_08_)Js!j3R!W~3DSsw^c7{)bO#7G8wyZbQ3~c2`!nHfAM1zPG=$8VLkEt9`l{ z%({~Bi__3b42qhna?|$mNBmGk-hw(-^)me+y5?jBRxTE@JI1rb^Zjvz&J0= zl|#u2CYF7}kv7C4W8_dj;KQmOy|k0S%V-j7Vn@)7S6&#x#bkX3z!Ei-jc$rOob5e# zfms|5R;q}8SzaXe01B#{AA4XZg47-ZzYVjnE>sN(1{+SqE|43NqPu+*^JUz>1qgy+ z06&%UAKPKX7LDfvj2a>4sq_fRWX2c?H&uu+%-}M*lwbL0#_xEp|5eMuI=ZCgo*3A3 z4Rf<8x()}Pvf7A2iekA;eno5o!FofYMs^pxAM3O3+y)$c716oDP?ZnR=CFk)7oj_` zDQ#t;fd}N(I>jijG!SMyqis{f1o=?K73^&piW+cpHAp%`6#Mv()h4RdW=>77B;1ZQbq=)hoq&WmY$w< zE5ml|@bTlnfgqr8s+FIXQ$`b=%jbbV8lp{&QZ3cQ3tb$OZG32Yti>VTYY4)PyDZ#r z#8dG!HxBJ;ZQTXj?Aod9@>8(0ekn z$0c%YSEZ-p-HAhgAG6q-e5acOqNY0J=U^ik5E=^YyaHlM2jjE78%q@675L8_;X(D^ zh7oEZm+WKBC_#zId0XlN^48RgYcFRk5^cXUP*VaH&ksc8A+JE3ZiFM9TU=a(jh%5O z$(;}GEj5f>zj^eP-i;jhBU!szk2;m@YTeXW`UtxMq4CLDP_?NPUNi!VQ1HKX+pFBj zYlZZ0;>fUTnHuv8D%MU5N6Rv)X{*+kSgz=i%;e1)8 zmKs|{;*0a9?r|zSzP{d+rGv#>>=fDw@sCldmE=F$WpqkpyIe;nQ1Bnl+QI6krB~|z E0YC_@JOBUy diff --git a/doc/source/appliance.rst b/doc/source/appliance.rst deleted file mode 100644 index 9a7e9662..00000000 --- a/doc/source/appliance.rst +++ /dev/null @@ -1,339 +0,0 @@ -.. _appliance: - -The Service VM (the Astara Appliance) -===================================== - -Astara uses Linux-based images (stored in OpenStack Glance) to provide layer -3 routing and advanced networking services. Akanda, Inc provides stable image -releases for download at `akanda.io `_, but it's also -possible to build your own custom Service VM image (running additional -services of your own on top of the routing and other default services provided -by Astara). - -.. _appliance_build: - -Building a Service VM image from source ---------------------------------------- - -The router code that runs within the appliance is hosted in the ``astara-appliance`` -repository at ``https://git.openstack.org/cgit/openstack/astara-appliance``. Additional tooling -for actually building a VM image to run the appliance is located in that repository's -``disk-image-builder`` sub-directory, in the form elements to be used with -``diskimage-builder``. The following instructions will walk through -building the Debian-based appliance locally, publishing to Glance and configuring the RUG to -use said image. These instructions are for building the image on an Ubuntu 14.04+ system. - -Install Prerequisites -+++++++++++++++++++++ - -First, install ``diskimage-builder`` and required packages: - -:: - - sudo apt-get -y install debootstrap qemu-utils - sudo pip install "diskimage-builder<0.1.43" - -Next, clone the ``astara-appliance`` repository: - -:: - - git clone https://git.openstack.org/openstack/astara-appliance - - -Build the image -+++++++++++++++ - -Kick off an image build using diskimage-builder: - -:: - - cd astara-appliance - ELEMENTS_PATH=diskimage-builder/elements DIB_RELEASE=jessie DIB_EXTLINUX=1 \ - disk-image-create debian vm astara -o astara - -Publish the image -+++++++++++++++++ - -The previous step should produce a qcow2 image called ``astara.qcow`` that can be -published into Glance for use by the system: - -:: - - # We assume you have the required OpenStack credentials set as an environment - # variables - glance image-create --name astara --disk-format qcow2 --container-format bare \ - --file astara.qcow2 - +------------------+--------------------------------------+ - | Property | Value | - +------------------+--------------------------------------+ - | checksum | cfc24b67e262719199c2c4dfccb6c808 | - | container_format | bare | - | created_at | 2015-05-13T21:27:02.000000 | - | deleted | False | - | deleted_at | None | - | disk_format | qcow2 | - | id | e2caf7fa-9b51-4f42-9fb9-8cfce96aad5a | - | is_public | False | - | min_disk | 0 | - | min_ram | 0 | - | name | astara | - | owner | df8eaa19c1d44365911902e738c2b10a | - | protected | False | - | size | 450573824 | - | status | active | - | updated_at | 2015-05-13T21:27:03.000000 | - | virtual_size | None | - +------------------+--------------------------------------+ - -Configure the RUG -+++++++++++++++++ - -Take the above image id and set the corresponding value in the RUG's config file, to instruct -the service to use that image for software router instances it manages: - -:: - - vi /etc/astara/orchestrator.ini - ... - [router] - image_uuid=e2caf7fa-9b51-4f42-9fb9-8cfce96aad5a - -Making local changes to the appliance service -+++++++++++++++++++++++++++++++++++++++++++++ - -By default, building an image in this way pulls the ``astara-appliance`` code directly -from the upstream tip of trunk. If you'd like to make modifications to this code locally -and build an image containing those changes, set DIB_REPOLOCATION_astara and DIB_REPOREF_astara -in your enviornment accordingly during the image build, ie: - -:: - - export DIB_REPOLOCATION_astara=~/src/astara-appliance # Location of the local repository checkout - export DIB_REPOREF_astara=my-new-feature # The branch name or SHA-1 hash of the git ref to build from. - -.. _appliance_rest: - -REST API --------- -The Astara Appliance REST API is used by the :ref:`orchestrator` service to manage -health and configuration of services on the router. - -Router Health -+++++++++++++ - -``HTTP GET /v1/status/`` -~~~~~~~~~~~~~~~~~~~~~~~~ - -Used to confirm that a router is responsive and has external network connectivity. - -:: - - Example HTTP 200 Response - - Content-Type: application/json - { - 'v4': true, - 'v6': false, - } - -Router Configuration -++++++++++++++++++++ - -``HTTP GET /v1/firewall/rules/`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Used to retrieve an overview of configured firewall rules for the router (from -``iptables -L`` and ``iptables6 -L``). - -:: - - Example HTTP 200 Response - - Content-Type: text/plain - Chain INPUT (policy DROP) - target prot opt source destination - ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 - ACCEPT icmp -- 0.0.0.0/0 0.0.0.0/0 icmptype 8 - - ... - - -``HTTP GET /v1/system/interface//`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Used to retrieve JSON data about a specific interface on the router. - -:: - - Example HTTP 200 Response - - Content-Type: application/json - { - "interface": { - "addresses": [ - "8.8.8.8", - "2001:4860:4860::8888", - ], - "description": "", - "groups": [], - "ifname": "ge0", - "lladdr": "fa:16:3f:de:21:e9", - "media": null, - "mtu": 1500, - "state": "up" - } - } - -``HTTP GET /v1/system/interfaces`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Used to retrieve JSON data about a `every` interface on the router. - -:: - - Example HTTP 200 Response - - Content-Type: application/json - { - "interfaces": [{ - "addresses": [ - "8.8.8.8", - "2001:4860:4860::8888", - ], - "description": "", - "groups": [], - "ifname": "ge0", - "lladdr": "fa:16:3f:de:21:e9", - "media": null, - "mtu": 1500, - "state": "up" - }, { - ... - }] - } - -``HTTP PUT /v1/system/config/`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Used (generally, by :program:`astara-orchestrator`) to push a new configuration -to the router and restart services as necessary: - -:: - - Example HTTP PUT Body - - Content-Type: application/json - { - "configuration": { - "networks": [ - { - "address_allocations": [], - "interface": { - "addresses": [ - "8.8.8.8", - "2001:4860:4860::8888" - ], - "description": "", - "groups": [], - "ifname": "ge1", - "lladdr": null, - "media": null, - "mtu": 1500, - "state": "up" - }, - "name": "", - "network_id": "f0f8c937-9fb7-4a58-b83f-57e9515e36cb", - "network_type": "external", - "v4_conf_service": "static", - "v6_conf_service": "static" - }, - { - "address_allocations": [], - "interface": { - "addresses": [ - "..." - ], - "description": "", - "groups": [], - "ifname": "ge0", - "lladdr": "fa:16:f8:90:32:e3", - "media": null, - "mtu": 1500, - "state": "up" - }, - "name": "", - "network_id": "15016de1-494b-4c65-97fb-475b40acf7e1", - "network_type": "management", - "v4_conf_service": "static", - "v6_conf_service": "static" - }, - { - "address_allocations": [ - { - "device_id": "7c400585-1743-42ca-a2a3-6b30dd34f83b", - "hostname": "10-10-10-1.local", - "ip_addresses": { - "10.10.10.1": true, - "2607:f298:6050:f0ff::1": false - }, - "mac_address": "fa:16:4d:c3:95:81" - } - ], - "interface": { - "addresses": [ - "10.10.10.1/24", - "2607:f298:6050:f0ff::1/64" - ], - "description": "", - "groups": [], - "ifname": "ge2", - "lladdr": null, - "media": null, - "mtu": 1500, - "state": "up" - }, - "name": "", - "network_id": "31a242a0-95aa-49cd-b2db-cc00f33dfe88", - "network_type": "internal", - "v4_conf_service": "static", - "v6_conf_service": "static" - } - ], - "static_routes": [] - } - } - -Survey of Software and Services -------------------------------- -The Astara Appliance uses a variety of software and services to manage routing -and advanced services, such as: - - * ``iproute2`` tools (e.g., ``ip neigh``, ``ip addr``, ``ip route``, etc...) - * ``dnsmasq`` - * ``bird6`` - * ``iptables`` and ``iptables6`` - -In addition, the Astara Appliance includes two Python-based services: - - * The REST API (which :program:`astara-orchestrator)` communicates with to - orchestrate router updates), deployed behind `gunicorn - `_. - * A Python-based metadata proxy. - -Proxying Instance Metadata --------------------------- - -When OpenStack VMs boot with ``cloud-init``, they look for metadata on a -well-known address, ``169.254.169.254``. To facilitate this process, Astara -sets up a special NAT rule (one for each local network):: - - -A PREROUTING -i eth2 -d 169.254.169.254 -p tcp -m tcp --dport 80 -j DNAT --to-destination 10.10.10.1:9602 - -...and a special rule to allow metadata requests to pass across the management -network (where OpenStack Nova is running, and will answer requests):: - - -A INPUT -i !eth0 -d -j DROP - -A Python-based metadata proxy runs locally on the router (in this example, -listening on ``http://10.10.10.1:9602``) and proxies these metadata requests -over the management network so that instances on local tenant networks will -have access to server metadata. diff --git a/doc/source/conf.py b/doc/source/conf.py deleted file mode 100644 index 02bc0e79..00000000 --- a/doc/source/conf.py +++ /dev/null @@ -1,288 +0,0 @@ -# -*- coding: utf-8 -*- -# -# astara documentation build configuration file, created by -# sphinx-quickstart on Thu Apr 2 14:55:06 2015. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'sphinx.ext.intersphinx', - 'sphinx.ext.graphviz', - 'reno.sphinxext', -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# source_suffix = ['.rst', '.md'] -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'astara' -copyright = u'2015, Akanda, Inc' -author = u'Akanda, Inc' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -from astara.version import version_info as astara_version -version = astara_version.canonical_version_string() -# The full version, including alpha/beta/rc tags. -release = astara_version.version_string_with_vcs() - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -#html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -#html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# Now only 'ja' uses this config value -#html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -#html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'astaradoc' - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', - -# Latex figure (float) alignment -#'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, 'astara.tex', u'Astara Documentation', - u'Akanda, Inc', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# If true, show page references after internal links. -#latex_show_pagerefs = False - -# If true, show URL addresses after external links. -#latex_show_urls = False - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'astara', u'astara Documentation', - [author], 1) -] - -# If true, show URL addresses after external links. -#man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - (master_doc, 'astara', u'astara Documentation', - author, 'astara', 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -#texinfo_appendices = [] - -# If false, no module index is generated. -#texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -#texinfo_no_detailmenu = False - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'https://docs.python.org/': None} diff --git a/doc/source/contribute.rst b/doc/source/contribute.rst deleted file mode 100644 index 05fbda89..00000000 --- a/doc/source/contribute.rst +++ /dev/null @@ -1,8 +0,0 @@ -Contributing -============ - -Submitting Code Upstream ------------------------- - -All of Astara's code is 100% open-source and is hosted on `git.openstack.org -`_ Patches are welcome! diff --git a/doc/source/developer_quickstart.rst b/doc/source/developer_quickstart.rst deleted file mode 100644 index cae2051b..00000000 --- a/doc/source/developer_quickstart.rst +++ /dev/null @@ -1,92 +0,0 @@ -.. _developer_quickstart: - -Astara Developer Quickstart -=========================== - -This guide provides guidance for new developers looking to get up and running -with an Astara development environment. The Astara components may be easily -deployed alongside OpenStack using DevStack. For more information about -DevStack visit ``http://docs.openstack.org/developer/devstack/``. - - -.. _developer_quickstart_rest: - -Deploying Astara using DevStack -------------------------------- - -Preparation and prerequisites -+++++++++++++++++++++++++++++ - -Deploying DevStack on your local workstation is not recommended. Instead, -developers should use a dedicated virtual machine. Currently, Ubuntu -Trusty 14.04 is the tested and supported base operating system. Additionally, -you'll need at least 4GB of RAM (8 is better) and to have ``git`` installed:: - - sudo apt-get -y install git - - -First clone the DevStack repository:: - - sudo mkdir -p /opt/stack/ - sudo chown `whoami` /opt/stack - git clone https://git.openstack.org/openstack-dev/devstack /opt/stack/devstack - - -Configuring DevStack -++++++++++++++++++++ - -Next, you will need to enable the Astara plugin in the DevStack configuration -and enable the relevant services:: - - cat >/opt/stack/devstack/local.conf <>/opt/stack/devstack/local.conf <`_ for their OpenStack-based public cloud, -`DreamCompute `_, Astara eliminates the -need for complex SDN controllers, overlays, and multiple plugins by providing -a simple integrated networking stack (routing, firewall, and load balancing via -a :ref:`virtual Service VM `) for connecting and securing -multi-tenant OpenStack environments. - -Narrative Documentation ------------------------ - -.. toctree:: - :maxdepth: 2 - - what_is_astara.rst - orchestrator.rst - appliance.rst - contribute.rst - operation.rst - install.rst - loadbalancer.rst - developer_quickstart.rst - reference.rst - release_notes.rst - -Licensing ---------- -Astara is licensed under the Apache-2.0 license and is copyright `Akanda, Inc -`_. diff --git a/doc/source/install.rst b/doc/source/install.rst deleted file mode 100644 index 0711380c..00000000 --- a/doc/source/install.rst +++ /dev/null @@ -1,345 +0,0 @@ -.. _install_astara: - -Astara Installation -=================== - -Assumptions ------------- - -You have a fully operating Openstack environment with, at least: Nova, Keystone, Glance, Neutron -The OpenStack environment has been tested and they VMs can be successfully created. -the packages git and pip should be installed - -This has been tested on Ubuntu 14.04 with OpenStack installed from source. For RHEL or CentOS path names will -need to be adjusted. These instructions assume they are performed by the root user, whose home directory is /root. -If another user does the installation some adjustment in the paths may be needed. This user will need sudo access -and most commands will need to be prepended with sudo. - -Use the neutron commands to delete all VMs, routers, networks - -All neutron l3 agents should be stopped and disabled. (l3, dhcp, ..) - -Installation ------------- - -All configuration is to be performed on the controller node. - -1. Set up astara user and directories:: - - mkdir -p /var/log/astara /var/lib/astara /etc/astara - useradd --home-dir "/var/lib/astara" --create-home --system --shell /bin/false astara - - chown -R astara:astara /var/log/astara /var/lib/astara /etc/astara - - Set up log rotation:: - - - cat >> /etc/logrotate.d/astara << EOF - - /var/log/astara/*.log { - - daily - - missingok - - rotate 7 - - compress - - notifempty - - nocreate - - } - - EOF - - Give astara sudo permissions:: - - cat > '/etc/sudoers.d/astara_sudoers' << EOF - Defaults:astara !requiretty - - astara ALL = (root) NOPASSWD: /usr/local/bin/astara-rootwrap /etc/astara/rootwrap.conf * - - EOF - -2. Get the code:: - - cd ~ - git clone git://git.openstack.org/openstack/astara - git clone git://git.openstack.org/openstack/astara-neutron - git clone git://git.openstack.org/openstack/astara-appliance - - -3. Install the code:: - - # If you are not building packages and just installing locally, manually install it via pip: - - cd ~/astara - pip install . - - cd ~/astara-neutron - pip install . - cd ~ - -4. Configure Neutron: - - Make required changes to the neutron configuration file: - - In /etc/neutron/neutron.conf, set in the [DEFAULT] section: - - To use the Astara Neutron ML2 plugin change the core_plugin and service_plugins to:: - - core_plugin = astara_neutron.plugins.ml2_neutron_plugin.Ml2Plugin - service_plugins = astara_neutron.plugins.ml2_neutron_plugin.L3RouterPlugin - - And also the add the API extension path (Note: append the astara path to existing list of extension paths if you have others specified):: - - api_extensions_path = /usr/local/lib/python2.7/dist-packages/astara_neutron/extensions/ - - Note: the path shown will vary with the distribution for Ubuntu it will be /usr/lib/python2.7/dist-packages/astara_neutron/extensions/ for Red Hat installations this path will be different. - - Configure Neutron to emit event notifications:: - - notification_driver = neutron.openstack.common.notifier.rpc_notifier - - In /etc/neutron/plugins/ml2/ml2_conf.ini in the [ml2] section add:: - - extension_drivers = port_security - - Ensure that l2population is enabled. On all nodes running the l2 agent, either Linuxbridge or OpenvSwitch (namely the compute nodes and nodes running the orchestrator process), in the ml2 ini file set: - - Add l2population to the mechanism_drivers line - - To the [agent] sections add:: - - l2_population = True - - Depending on the layer 2 technology used in your OpenStack environment to enable layer 2 population additional parameters may need to be set. Check the OpenStack configuration guide for information about additional layer 2 setting for the layer 2 type and to tenant isolation type (VLAN, VXLAN of GRE) being used. - -5. Configure Nova to use astara in the [DEFAULT] section of /etc/nova/nova.conf set: - - If using IPv6:: - - use_ipv6=True - - In the [neutron] section of /etc/nova/nova.conf set:: - - service_metadata_proxy = True - - In /etc/nova/policy.json, replace:: - - "network:attach_external_network": "rule:admin_api" - - with:: - - "network:attach_external_network": "rule:admin_api or role:service" - -6. Start/restart Nova API to read the configuration changes:: - - restart nova-api - - Restart the neutron services:: - - restart neutron-server - restart neutron-linuxbridge - - Stop and disable any L3 agents such as the DHCP agent, L3 agent or the metadata agent. - - Create a management network:: - - neutron net-create mgt # note the ID, it is used in the orchestrator.ini config - neutron subnet-create --name mgt-subnet mgt fdca:3ba5:a17a:acda::/64 --ip-version=6 --ipv6_address_mode=slaac --enable_dhcp - - Create a public network:: - - neutron net-create --shared --router:external public - neutron subnet-create --name public-subnet public 172.16.0.0/24 - -7. Configure Astara: - - For this configuration, we assume an IPv6 Neutron network /w prefix fdca:3ba5:a17a:acda::/64 has been created to be used as the management network:: - - mkdir /etc/astara - cp -r ~/astara/etc/* /etc/astara/ - mv /etc/astara/orchestrator.ini.sample /etc/astara/orchestrator.ini - chown astara:astara /etc/astara/*.{ini,json} - - Create a ssh keypair to enable ssh key based logins to the router:: - - ssh-keygen - - It is best to copy the public ssh key into the astara configuration directory:: - - cp ~/.ssh/id_rsa.pub /etc/astara - chmod 600 /etc/astara - - In the astara orchestrator configuration file (/etc/astara/orchestrator.ini) make the following changes: - - In the [oslo_messaging_rabbit] section set:: - - rabbit_userid = guest - rabbit_password = guest - rabbit_hosts = 10.0.1.4 - - Set up logging:: - - log_file = /var/log/astara/orchestrator.log - - Set the prefix of the existing Neutron network to be used used as management network used during subnet creation (above):: - - management_prefix = fdca:3ba5:a17a:acda::/64 - - The neutron subnet id of the management network and subnet:: - - management_net_id = $management_net_uuid - management_subnet_id = $management_subnet_uuid - - The neutron network if of the external network:: - - external_network_id=$public_network_id - external_subnet_id=$public_subnet_id - - - Public SSH Key used for SSH'ing into the appliance VMs as user 'astara' (this is optional):: - - ssh_public_key = $path_to_readable_ssh_pub_key #From the above step this should be /etc/astara/id_rsa.pub - - The interface driver is used for bringing up a local port on the astara control node that plugs into the management network. This is specific to the underlying L2 implementation used, set accordingly:: - - interface_driver=astara.common.linux.interface.BridgeInterfaceDriver #For Linuxbridge - interface_driver=astara.common.linux.interface.OVSInterfaceDriver #For OpenvSwitch - - Correct the provider rules path:: - - provider_rules_path=/etc/astara/provider_rules.json - - In the [keystone_authtoken] section, configure the credentials for the keystone service tenant as configured in your environment, specifically:: - - auth_uri = http://127.0.0.1:5000 # Adjust the IP for the current installation - project_name = service - password = neutron - username = neutron - auth_url = http://127.0.0.1:35357 # Adjust the IP for the current installation - auth_plugin = password - - In the [database] section, configure URL to supported oslo.db backend, ie:: - - connection = mysql+pymysql://astara:astara@127.0.0.1/astara?charset=utf8 - - -8. Create and Migrate the DB: - - Install the PyMySQL pip package:: - - pip install PyMySQL - - And create the database set database access permissions:: - - mysql -u root -pmysql -e 'CREATE DATABASE astara;' - mysql -u root -pmysql -e "GRANT ALL PRIVILEGES ON astara.* TO 'astara'@'localhost' IDENTIFIED BY 'astara';" - mysql -u root -pmysql -e "GRANT ALL PRIVILEGES ON astara.* TO 'astara'@'%' IDENTIFIED BY 'astara';" - astara-dbsync --config-file /etc/astara/orchestrator.ini upgrade - - -9. Create or download an Appliance Image - - If you don't plan to build your own appliance image, one can be downloaded for testing at: http://tarballs.openstack.org/akanda-appliance/images/ - - If you want to build one yourself instructions are found in the :ref:`appliance documentation` - In either case, upload the image to Glance (this command must be performed in the directory where the image was downloaded/created):: - - openstack image create astara --public --container-format=bare --disk-format=qcow2 --file astara.qcow2 - - Note the image id for the next step - - Update /etc/astara/orchestrator.ini and set this in the [router] section:: - - image_uuid=$image_uuid_in_glance - - You may also want to boot appliances with a specific nova flavor, this may be specified in the [router] section as: - Create a new flavor:: - - nova flavor-create m1.astara 6 512 3 1 --is-public True - - Set the flavor in /etc/astara/orchestrator.ini:: - - instance_flavor=$nova_flavor_id - -10. Start astara:: - - astara-orchestrator --config-file /etc/astara/orchestrator.ini - - For Ubuntu or Debian systems use the following to create an upstart script to automatically start astara-orchestrator on boot:: - - cat > /etc/init/astara.conf << EOF - description "Astara Orchestrator server" - - start on runlevel [2345] - stop on runlevel [!2345] - - respawn - - exec start-stop-daemon --start --chuid astara --exec /usr/local/bin/astara-orchestrator -- --config-file=/etc/astara/orchestrator.ini - - EOF - - Note: For RHEL or CentOS use the command:: - - sudo -u astara /usr/local/bin/astara-orchestrator --config-file=/etc/astara/orchestrator.ini & - - Note: to automatically start the orchestrator process a systemd startup script will need to be created. - Start the astara orchestrator process:: - - start astara - -Use Astara ------------ - -If you have existing routers in your environment, astara will find them and attempt to boot appliances in Nova. If not, create a router and it should react accordingly. Otherwise use the following to create a privte network, create a router and add the network interface to the rputer:: - - neutron net-create private - neutron subnet-create --name private-subnet private 10.2.0.0/24 - - neutron router-create MyRouter - neutron router-interface-add MyRouter private - -Boot a VM (replacing the <---> with the appropriate information):: - - nova boot --image --flavor 1 --nic net-id= - -At this time sourcing the admin's credentials and using the command:: - - nova list --all-tenants - -Output similar to:: - - +--------------------------------------+------------------------------------------------+----------------------------------+--------+------------+-------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | ID | Name | Tenant ID | Status | Task State | Power State | Networks | - +--------------------------------------+------------------------------------------------+----------------------------------+--------+------------+-------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - - | 1003335d-640c-4492-8054-80c4d23f9552 | Three | fbf54d3e3fc544a7895701d27139489e | ACTIVE | - | Running | private1=10.3.0.3, fdd6:a1fa:cfa8:f4d0:f816:3eff:fed6:2e3b | - | e75a0429-15cb-41a2-ae7b-890315b75922 | ak-router-6aa27c79-8ed4-4c59-ae83-4c4da725b3ec | d9aa8deb2d2c489e81eb93f30a5b63e8 | ACTIVE | - | Running | private1=fdd6:a1fa:cfa8:f4d0:f816:3eff:feab:c96b; public=fdd6:a1fa:cfa8:b59a:f816:3eff:feb4:29e6; private=fdd6:a1fa:cfa8:eefe:f816:3eff:fe3e:a5e9; mgt=fdd6:a1fa:cfa8:d5ff:f816:3eff:fe3f:4f95, fdca:3ba5:a17a:acda:f816:3eff:fe3f:4f95 | - +--------------------------------------+------------------------------------------------+----------------------------------+--------+------------+-------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - -The line with the ak-router shows that astara has built the router VM. Further operation and debug information can be found in the :ref:`operator tools` section. - -.. _cluster_astara: - -Clustering astara-orchestrator ------------------------------- - -The ``astara-orchestartor`` service supports clustering among multiple processes spanning multiple nodes to provide active/active clustering for -purposes of load-distribution and high-availability (HA). In this setup, multiple ``astara-orchestrator`` processes form a distributed hash ring, -in which each is responsible for orchestrating a subset of virtual appliances. When one ``astara-orchestrator`` falls offline, management of -its resources are redistributed to remaining nodes. This feature requires the use of an external coordination service (ie, zookeeper), -as provided by the `tooz library `_. To find out more about which services ``tooz`` supports, -see ``_. - -To enable this feature, you must set the following in ``orchestrator.ini``:: - - [coordination] - enabled=True # enable the feature - url=kazoo://zookeeper.localnet:2181?timeout=5 # a URL to a tooz-supported coordination service - group_id=astara.orchestrator # optional, change this if deploying multiple clusters - heartbeat_interval=1 # optional, tune as needed diff --git a/doc/source/loadbalancer.rst b/doc/source/loadbalancer.rst deleted file mode 100644 index 682a29b6..00000000 --- a/doc/source/loadbalancer.rst +++ /dev/null @@ -1,102 +0,0 @@ - -Install an Astara Load Balancer -=============================== - -How to configure Astara to be able to create load balancers ------------------------------------------------------------ - -In this example we will create an image that can be used for both a router or a loadbalancer. -Then we will configure both astara and neutron for loadbalancer support, which will use the -LBAASV2 commands. We can then use the LBAASv2 API to create a loadbalancer. - -Build loadbalancer applicance image: -------------------------------------- - -Build an image to include loadbalancer support by using one of the two following commands. -If you have a license for nginx plus you will be able to take advantage of some of these -nginx-plus features but you must first copy over your nginx certs. Run this commad in the -astara-appliance directory:: - - ELEMENTS_PATH=diskimage-builder/elements \ - DIB_RELEASE=jessie DIB_EXTLINUX=1 \ - DIB_ASTARA_ADVANCED_SERVICES=router,loadbalancer \ - disk-image-create debian vm astara nginx -o astara-lb - -or for nginx plus (nginx certs will need to be copied over before running this command). -Run this commad in the astara-appliance directory:: - - ELEMENTS_PATH=diskimage-builder/elements \ - DIB_RELEASE=jessie DIB_EXTLINUX=1 \ - DIB_ASTARA_ADVANCED_SERVICES=router,loadbalancer \ - disk-image-create debian vm astara nginx-plus -o astara-lb - -Configure Neutron for Astara loadbalancer support -------------------------------------------------- - -1. Ensure that neutron LBAAS packages are installed or install neutron-lbaas from source as follows:: - - git clone https://git.openstack.org/openstack/neutron-lbaas - cd neutron-lbaas - pip install -U . - - -2. Make the following changes to neutron.conf -in the [DEFAULT] section:: - - core_plugin = astara_neutron.plugins.ml2_neutron_plugin.Ml2Plugin - service_plugins = astara_neutron.plugins.ml2_neutron_plugin.L3RouterPlugin,astara_neutron.plugins.lbaas_neutron_plugin.LoadBalancerPluginv2 - api_extensions_path = /usr/local/lib/python2.7/dist-packages/astara_neutron/extensions:/usr/local/lib/python2.7/dist-packages/neutron_lbaas/extensions - -in the [SERVICE_PROVIDERS] section (you may have to add this section if it doesn't exist):: - - service_provider = LOADBALANCERV2:LoggingNoop:neutron_lbaas.drivers.logging_noop.driver.LoggingNoopLoadBalancerDriver:default - - -3. Create the loadbalancer tables in the neutron database:: - - neutron-db-manage --subproject neutron-lbaas upgrade head - -Configure Astara for loadbalancer support ------------------------------------------ - -1. Make the following changes to orchestrator.conf. - -in the [DEFAULT] section:: - - enabled_drivers = router,loadbalancer - -in the [LOADBALANCER] section:: - - image_uuid = - instance_flavor = 6 - -(If you are using this image for the router also, in the [ROUTER] section, set the image_uuid to this value - also.) - -2. Restart the neutron-server and astara services to pick up the changes:: - - restart neutron-server - restart astara - -Create a loadbalancer ---------------------- - -1. Build a loadbalancer (this assumes that you have two web servers at ips -WEB1_IP, WEB2_IP which will used -in the following commands):: - - neutron lbaas-loadbalancer-create --name lb1 private-subnet - neutron lbaas-loadbalancer-show lb1 # Note the VIP address - neutron lbaas-listener-create --loadbalancer lb1 --protocol HTTP --protocol-port 80 --name listener1 - neutron lbaas-pool-create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1 - neutron lbaas-member-create --subnet private-subnet --address 10.2.0.4 --protocol-port 80 --name mem1 pool1 - neutron lbaas-member-create --subnet private-subnet --address 10.2.0.5 --protocol-port 80 --name mem2 pool1 - neutron lbaas-healthmonitor-create --delay 3 --type HTTP --max-retries 3 --timeout 3 --pool pool1 --name hm1 - -2. Once finished you can delete everything using the following:: - - neutron lbaas-member-delete mem1 pool1 - neutron lbaas-member-delete mem2 pool1 - neutron lbaas-pool-delete pool1 - neutron lbaas-listener-delete listener1 - neutron lbaas-loadbalancer-delete lb1 - diff --git a/doc/source/mitaka.rst b/doc/source/mitaka.rst deleted file mode 100644 index dac28297..00000000 --- a/doc/source/mitaka.rst +++ /dev/null @@ -1,5 +0,0 @@ -================================================ - Astara Mitaka Series Release Notes (UNRELEASED) -================================================ - -.. release-notes:: diff --git a/doc/source/operation.rst b/doc/source/operation.rst deleted file mode 100644 index 3f87f193..00000000 --- a/doc/source/operation.rst +++ /dev/null @@ -1,99 +0,0 @@ -.. _operator_tools: - -Operation and Deployment -======================== - -Installation ------------- - -You can install from GitHub directly with ``pip``:: - - $ pip install -e git://git.openstack.org/openstack/astara@stable/liberty#egg=astara - -After installing :py:mod:`astara`, it can be invoked as:: - - $ astara-orchestrator --config-file /etc/akanda-rug/rug.ini - -The :py:mod:`astara` service is intended to run on a management network (a -separate network for use by your cloud operators). This segregation prevents -system administration and the monitoring of system access from being disrupted -by traffic generated by guests. - -Operator Tools --------------- - -rug-ctl -+++++++ - -:program:`astara-ctl` is a tool which can be used to send manual instructions to -a running :py:mod:`astara-orchestrator` via AMQP:: - - $ astara-ctl browse - A curses console interface for browsing the state - of every Neutron router and issuing `rebuild` commands - - $ astara-ctl poll - Sends a POLL instruction to every router to check health - - $ astara-ctl router rebuild - Sends a REBUILD instruction to a specific router - - $ astara-ctl router update - Sends an UPDATE instruction to a specific router - - $ astara-ctl router debug - Places a specific router in `debug mode`. - This causes the rug to ignore messages for the specified - router (so that, for example, operators can investigate - troublesome routers). - - $ astara-ctl router manage - Removes a specific router from `debug mode` and places - it back under astara-orchestrator management. - - $ astara-ctl tenant debug - Places a specific tenant in `debug mode`. - This causes the rug to ignore messages for the specified - tenant. - troublesome routers). - - $ astara-ctl tenant manage - Removes every router for a specific tenant from `debug mode` - and places the tenant back under astara-orchestrator management. - - $ astara-ctl ssh - Establishes an ssh connection with a specified Service VM. - - $ astara-ctl workers debug - Causes the rug to print debugging diagnostics about the - current state of its worker processes and the state machines - under their management. - -:program:`astara-orchestrator` also exposes an RPC API on the management network, -which allows non-interactive `astara-ctl` commands to be issued via HTTP, e.g., - -:: - - $ curl -X PUT -g6 "http://[fdca:3ba5:a17a:acda::1]:44250/poll/" - $ curl -X PUT -g6 "http://[fdca:3ba5:a17a:acda::1]:44250/workers/debug/" - $ curl -X PUT -g6 "http://[fdca:3ba5:a17a:acda::1]:44250/router/rebuild/" - - - -astara-debug-router -+++++++++++++++++++ - -:program:`astara-debug-router` is a diagnostic tool which can be used to -analyze the state machine flow of any router and step through its operation -using Python's debugger. This is particularly useful for development purposes -and understanding the nature of the :py:mod:`astara-orchestrator` state -machine, but it's also useful for debugging problematic routers as an -operator; a common pattern for determining why a Service VM won't boot is to -place the router in `debug mode`:: - - $ astara-ctl router debug - -...and then step through the handling of a manual ``UPDATE`` event to see where -it fails:: - - $ astara-debug-router --router-id diff --git a/doc/source/orchestrator.rst b/doc/source/orchestrator.rst deleted file mode 100644 index 6175c561..00000000 --- a/doc/source/orchestrator.rst +++ /dev/null @@ -1,168 +0,0 @@ -.. _rug: - -Service VM Orchestration and Management -======================================= - -Astara Orchestrator ------------------------------ - -:program:`astara-orchestrator` is a multi-processed, multithreaded Python process -composed of three primary subsystems, each of which are spawned as a subprocess -of the main :py:mod:`astara-orchestrator` process: - -L3 and DHCP Event Consumption ------------------------------ - -:py:mod:`astara.notifications` uses `kombu `_ -and a Python :py:mod:`multiprocessing.Queue` to listen for specific Neutron service -events (e.g., ``router.interface.create``, ``subnet.create.end``, -``port.create.end``, ``port.delete.end``) and normalize them into one of -several event types: - - * ``CREATE`` - a router creation was requested - * ``UPDATE`` - services on a router need to be reconfigured - * ``DELETE`` - a router was deleted - * ``POLL`` - used by the :ref:`health monitor` for checking aliveness - of a Service VM - * ``REBUILD`` - a Service VM should be destroyed and recreated - -As events are normalized and shuttled onto the :py:mod:`multiprocessing.Queue`, -:py:mod:`astara.scheduler` shards (by Tenant ID, by default) and -distributes them amongst a pool of worker processes it manages. - -This system also consumes and distributes special :py:mod:`astara.command` events -which are published by the :program:`rug-ctl` :ref:`operator tools`. - - -State Machine Workers and Router Lifecycle ------------------------------------------- -Each multithreaded worker process manages a pool of state machines (one -per virtual router), each of which represents the lifecycle of an individual -router. As the scheduler distributes events for a specific router, logic in -the worker (dependent on the router's current state) determines which action to -take next: - -.. graphviz:: worker_diagram.dot - -For example, let's say a user created a new Neutron network, subnet, and router. -In this scenario, a ``router-interface-create`` event would be handled by the -appropriate worker (based by tenant ID), and a transition through the state -machine might look something like this: - -.. graphviz:: sample_boot.dot - -State Machine Flow -++++++++++++++++++ - -The supported states in the state machine are: - - :CalcAction: The entry point of the state machine. Depending on the - current status of the Service VM (e.g., ``ACTIVE``, ``BUILD``, ``SHUTDOWN``) - and the current event, determine the first step in the state machine to - transition to. - - :Alive: Check aliveness of the Service VM by attempting to communicate with - it via its REST HTTP API. - - :CreateVM: Call ``nova boot`` to boot a new Service VM. This will attempt - to boot a Service VM up to a (configurable) number of times before - placing the router into ``ERROR`` state. - - :CheckBoot: Check aliveness (up to a configurable number of seconds) of the - router until the VM is responsive and ready for initial configuration. - - :ConfigureVM: Configure the Service VM and its services. This is generally - the final step in the process of booting and configuring a router. This - step communicates with the Neutron API to generate a comprehensive network - configuration for the router (which is pushed to the router via its REST - API). On success, the state machine yields control back to the worker - thread and that thread handles the next event in its queue (likely for - a different Service VM and its state machine). - - :ReplugVM: Attempt to hot-plug/unplug a network from the router via ``nova - interface-attach`` or ``nova-interface-detach``. - - :StopVM: Terminate a running Service VM. This is generally performed when - a Neutron router is deleted or via explicit operator tools. - - :ClearError: After a (configurable) number of ``nova boot`` failures, Neutron - routers are automatically transitioned into a cool down ``ERROR`` state - (so that :py:mod:`astara` will not continue to boot them forever; this is - to prevent further exasperation of failing hypervisors). This state - transition is utilized to add routers back into management after issues - are resolved and signal to :py:mod:`astara-orchestrator` that it should attempt - to manage them again. - - :STATS: Reads traffic data from the router. - - :CONFIG: Configures the VM and its services. - - :EXIT: Processing stops. - - -ACT(ion) Variables are: - - :Create: Create router was requested. - - :Read: Read router traffic stats. - - :Update: Update router configuration. - - :Delete: Delete router. - - :Poll: Poll router alive status. - - :rEbuild: Recreate a router from scratch. - -VM Variables are: - - :Down: VM is known to be down. - - :Booting: VM is booting. - - :Up: VM is known to be up (pingable). - - :Configured: VM is known to be configured. - - :Restart Needed: VM needs to be rebooted. - - :Hotplug Needed: VM needs to be replugged. - - :Gone: The router definition has been removed from neutron. - - :Error: The router has been rebooted too many times, or has had some - other error. - -.. graphviz:: state_machine.dot - -.. _health: - -Health Monitoring ------------------ - -``astara.health`` is a subprocess which (at a configurable interval) -periodically delivers ``POLL`` events to every known virtual router. This -event transitions the state machine into the ``Alive`` state, which (depending -on the availability of the router), may simply exit the state machine (because -the router's status API replies with an ``HTTP 200``) or transition to the -``CreateVM`` state (because the router is unresponsive and must be recreated). - -High Availability ------------------ - -Astara supports high-availability (HA) on both the control plane and data -plane. - -The ``astara-orchestrator`` service may be deployed in a configuration that -allows multiple service processes to span nodes to allow load-distribution -and HA. For more information on clustering, see the :ref:`install docs`. - -It also supports orchestrating pairs of virtual appliances to provide -HA of the data path, allowing pairs of virtual routers to be clustered among -themselves using VRRP and connection tracking. To enable this, simply -create Neutron routers with the ``ha=True`` parameter or set this property -on existing routers and issue a rebuild command via ``astara-ctl`` for that -router. - - - diff --git a/doc/source/reference.rst b/doc/source/reference.rst deleted file mode 100644 index dc022314..00000000 --- a/doc/source/reference.rst +++ /dev/null @@ -1,5 +0,0 @@ -Configuration Options -===================== - -``astara-orchestrator`` uses ``oslo.config`` for configuration, so it's -configuration file format should be very familiar to OpenStack deployers diff --git a/doc/source/release_notes.rst b/doc/source/release_notes.rst deleted file mode 100644 index 575752af..00000000 --- a/doc/source/release_notes.rst +++ /dev/null @@ -1,6 +0,0 @@ -Astara Release Notes -====== - -.. toctree:: - - mitaka.rst diff --git a/doc/source/sample_boot.dot b/doc/source/sample_boot.dot deleted file mode 100644 index 1cd8b683..00000000 --- a/doc/source/sample_boot.dot +++ /dev/null @@ -1,14 +0,0 @@ -digraph sample_boot { - rankdir=LR; - - node [shape = doublecircle]; - CalcAction; - - node [shape = circle]; - - CalcAction -> Alive; - Alive -> CreateVM; - CreateVM -> CheckBoot; - CheckBoot -> CheckBoot; - CheckBoot -> ConfigureVM; -} diff --git a/doc/source/state_machine.dot b/doc/source/state_machine.dot deleted file mode 100644 index 4c037e21..00000000 --- a/doc/source/state_machine.dot +++ /dev/null @@ -1,57 +0,0 @@ -digraph rug { - // rankdir=LR; - - node [shape = rectangle]; - START; - - // These nodes enter and exit the state machine. - - node [shape = doublecircle]; - EXIT; - CALC_ACTION; - - node [shape = circle]; - - START -> CALC_ACTION; - - CALC_ACTION -> ALIVE [ label = "ACT>[CRUP],vm:[UC]" ]; - CALC_ACTION -> CREATE_VM [ label = "ACT>[CRUP],vm:D" ]; - CALC_ACTION -> CHECK_BOOT [ label = "ACT>[CRUP],vm:B" ]; - CALC_ACTION -> REBUILD_VM [ label = "ACT:E" ]; - CALC_ACTION -> STOP_VM [ label = "ACT>D or vm:G" ]; - CALC_ACTION -> CLEAR_ERROR [ label = "vm:E" ]; - - ALIVE -> CREATE_VM [ label = "vm>D" ]; - ALIVE -> CONFIG [ label = "ACT:[CU],vm:[UC]" ]; - ALIVE -> STATS [ label = "ACT:R,vm:C" ]; - ALIVE -> CALC_ACTION [ label = "ACT:P,vm>[UC]" ]; - ALIVE -> STOP_VM [ label = "vm:G" ]; - - CREATE_VM -> CHECK_BOOT [ label = "ACT:[CRUDP],vm:[DBUCR]" ]; - CREATE_VM -> STOP_VM [ label = "vm:G" ]; - CREATE_VM -> CALC_ACTION [ label = "vm:E" ]; - CREATE_VM -> CREATE_VM [ label = "vm:D" ]; - - CHECK_BOOT -> CONFIG [ label = "vm>U" ]; - CHECK_BOOT -> CALC_ACTION [ label = "vm:[BCR]" ]; - CHECK_BOOT -> STOP_VM [ label = "vm:[DG]" ]; - - CONFIG -> STATS [ label = "ACT:R,vm>C" ]; - CONFIG -> CALC_ACTION [ label = "ACT>P,vm>C" ]; - CONFIG -> REPLUG_VM [ label = "vm>[H]" ]; - CONFIG -> STOP_VM [ label = "vm>[RDG]" ]; - - REPLUG_VM -> CONFIG [ label = "vm>[H]" ]; - REPLUG_VM -> STOP_VM [ label = "vm>[R]" ]; - - STATS -> CALC_ACTION [ label = "ACT>P" ]; - - CLEAR_ERROR -> CALC_ACTION [ label = "no pause before next action" ]; - - REBUILD_VM -> REBUILD_VM [ label = "vm!=[DG]" ]; - REBUILD_VM -> CREATE_VM [ label = "ACT:E,vm:D" ]; - - STOP_VM -> CREATE_VM [ label = "ACT:E or vm>D" ]; - STOP_VM -> EXIT [ label = "ACT:D,vm>D or vm:G" ]; - -} diff --git a/doc/source/what_is_astara.rst b/doc/source/what_is_astara.rst deleted file mode 100644 index 828e4a2e..00000000 --- a/doc/source/what_is_astara.rst +++ /dev/null @@ -1,107 +0,0 @@ -What Is Astara -============== - -Astara an open source network virtualization solution built by OpenStack -operators for OpenStack clouds. - -Astara follows core principles of simple, compatible, and open development. - -The Astara architecture is broken down by describing the building blocks. The -most important of those building blocks, the Astara Orchestrator, is a -multi-process, multi-threaded Neutron Advanced Services orchestration service -which manages the lifecycle of the Neutron Advanced Services. Astara currently -supports a layer 3 routing and load balancing. Astara will support additional -Neuton Advanced services such as VPN, and Firewalls in the open driver model. - -High-Level Architecture ------------------------ - -Astara is a network orchestration platform that delivers network services -(L3-L7) via service instances that provide routing, load balancing, and -eventually more. Astara also interacts with any L2 overlay - including open -source solutions based on OVS and Linux bridge (VLAN, VXLAN, GRE) and most -proprietary solutions - to deliver a centralized management layer for all -OpenStack networking decisions. - -In a typical OpenStack deployment, Neutron server emits L3 and DHCP -messages which are handled by a variety of Neutron agents (the L3 agent, DHCP -agent, agents for advanced services such as load balancing, firewall, and VPN -as a service): - -.. image:: _static/neutron-reference.png - -When we add Astara into the mix, we're able to replace these agents with -a virtualized Service Instance that manages layer 3 routing and other advanced -networking services, significantly lowering the barrier of entry for operators -(in terms of deployment, monitoring and management): - -.. image:: _static/neutron-astara.png - -Astara takes the place of many of the agents that OpenStack Neutron -communicates with (L3, DHCP, LBaaS, FWaaS) and acts as a single control point -for all networking services. By removing the complexity of extra agents, Astara -can centrally manage DHCP and L3, orchestrate load balancing and VPN Services, -and overall reduce the number of components required to build, manage and -monitor complete virtual networks within your cloud. - -Astara Building Blocks -++++++++++++++++++++++ - -From an architectural perspective, Astara is composed of a few sub-projects: - - * | `astara `_ - - A service for managing the creation, configuration, and health of Astara - Service Instances. The Orchestrator acts in part as a replacement for - Neutron's various L3-L7 agents by listening for Neutron AMQP events and - coalescing them into software appliance API calls (which configure and - manage embedded services on the Service Instance). Additionally, the - Orchestrator contains a health monitoring component which monitors health - and guarantees uptime for existing Service Instances. - - * | `astara-appliance `_ - - The software and services (including tools for building custom service - images themselves) that run on the virtualized Linux appliance. Includes - drivers for L3-L7 services and a RESTful API that is used to orchestrate - changes to appliance configuration. - - * | `astara-neutron `_ - - Addon API extensions and plugins for OpenStack Neutron which enable - functionality and integration with the Astara project, notably Astara - router appliance interaction. - - * | `akanda-horizon `_ - - OpenStack Horizon rug panels - -Software Instance Lifecycle -+++++++++++++++++++++++++++ - -As Neutron emits events in reaction to network operations (e.g., a user creates -a new network/subnet, a user attaches a virtual machine to a network, -a floating IP address is associated, etc...), Astara Orchestrator receives these -events, parses, and dispatches them to a pool of workers which manage the -lifecycle of every virtualized appliance. - -This management of individual appliances is handled via a state machine per -appliance; as events come in, the state machine for the appropriate instance -transitions, modifying its configuration in a variety of ways, such as: - - * Booting a virtual machine for the appliance via the Nova API - * Checking for aliveness of the Service Instance. - * Pushing configuration updates via the :ref:`REST API - ` to configure services - (such as ``iptables``, ``dnsmasq``, ``bird6``, etc...). - * Deleting instances via the Nova API (e.g., when a router or load balancer - is deleted from Neutron). - -The Service Instance (the Astara Appliance) -------------------------------------------- - -Astara uses Linux-based images (stored in OpenStack Glance) to provide layer 3 -routing and advanced networking services. There is a stable image -available by default, but it’s also possible to build your own -custom Service Instance image (running additional services of your own on top of -the routing and other default services provided by the project). diff --git a/doc/source/worker_diagram.dot b/doc/source/worker_diagram.dot deleted file mode 100644 index fb508e65..00000000 --- a/doc/source/worker_diagram.dot +++ /dev/null @@ -1,27 +0,0 @@ -digraph sample_boot { - node [shape = square]; - AMQP; - "Event Processing + Scheduler"; - Nova; - Neutron; - - node [shape = circle]; - - AMQP -> "Event Processing + Scheduler"; - subgraph clusterrug { - "Event Processing + Scheduler" -> "Worker 1"; - "Event Processing + Scheduler" -> "Worker ..."; - "Event Processing + Scheduler" -> "Worker N"; - - "Worker 1" -> "Thread 1" - "Worker 1" -> "Thread ..." - "Worker 1" -> "Thread N" - } - - "Thread 1" -> "Service VM 1"; - "Thread 1" -> "Service VM ..." [ label = "Appliance REST API" ]; - "Thread 1" -> "Service VM N"; - - "Thread 1" -> "Nova" [ label = "Nova API" ]; - "Thread 1" -> "Neutron" [ label = "Neutron API" ]; -} diff --git a/etc/orchestrator.ini b/etc/orchestrator.ini deleted file mode 100644 index adb46973..00000000 --- a/etc/orchestrator.ini +++ /dev/null @@ -1,964 +0,0 @@ -[DEFAULT] - -# -# From astara -# - -# (string value) -#admin_user = - -# (string value) -#admin_password = - -# (string value) -#admin_tenant_name = - -# (string value) -#auth_url = - -# (string value) -#auth_strategy = keystone - -# (string value) -#auth_region = - -# (integer value) -#max_retries = 3 - -# (integer value) -#retry_delay = 1 - -# RUG API listening port (integer value) -#rug_api_port = 44250 - -# Path to the SSH public key for the 'astara' user within appliance instances -# (string value) -#ssh_public_key = /etc/astara/astara.pub - -# Which instance provider to use (on_demand, pez) (string value) -#instance_provider = on_demand - -# The boot command to run to configure the appliance (string value) -#astara_boot_command = astara-configure-management - -# (string value) -#management_network_id = - -# (string value) -#external_network_id = - -# (string value) -#management_subnet_id = - -# (string value) -#external_subnet_id = - -# (string value) -#management_prefix = fdca:3ba5:a17a:acda::/64 - -# (string value) -#external_prefix = 172.16.77.0/24 - -# (integer value) -#astara_mgt_service_port = 5000 - -# (string value) -#default_instance_flavor = 1 - -# (string value) -#interface_driver = astara.common.linux.interface.OVSInterfaceDriver - -# (boolean value) -#neutron_port_security_extension_enabled = true - -# Check for resources using the Liberty naming scheme when the modern name does -# not exist. (boolean value) -#legacy_fallback_mode = true - -# (integer value) -#alive_timeout = 3 - -# (integer value) -#config_timeout = 90 - -# list of drivers the rug process will load (list value) -#enabled_drivers = router - -# The hostname Astara is running on (string value) -#host = localhost - -# (boolean value) -#plug_external_port = true - -# Name of Open vSwitch bridge to use (string value) -#ovs_integration_bridge = br-int - -# Uses veth for an interface or not (boolean value) -#ovs_use_veth = false - -# MTU setting for device. (string value) -#network_device_mtu = - -# Exponent to determine number of hash partitions to use when distributing load -# across Rugs. Larger values will result in more even distribution of load and -# less load when rebalancing the ring, but more memory usage. Number of -# partitions per rug is (2^hash_partition_exponent). This determines the -# granularity of rebalancing: given 10 hosts, and an exponent of the 2, there -# are 40 partitions in the ring.A few thousand partitions should make -# rebalancing smooth in most cases. The default is suitable for up to a few -# hundred rugs. Too many partitions has a CPU impact. (integer value) -#hash_partition_exponent = 5 - -# (string value) -#provider_rules_path = /etc/astara/provider_rules.json - -# (integer value) -#asn = 64512 - -# (integer value) -#neighbor_asn = 64512 - -# connection for AMQP server (string value) -#amqp_url = - -# name of the exchange where we receive notifications (string value) -#incoming_notifications_exchange = neutron - -# name of the exchange where we receive RPC calls (string value) -#rpc_exchange = l3_agent_fanout - -# The name of the exchange used by Neutron for RPCs (string value) -#neutron_control_exchange = neutron - -# The UUID for the router to debug (string value) -#router_id = - -# the number of worker processes to run (integer value) -#num_worker_processes = 16 - -# Directory to scan for routers to ignore for debugging (string value) -#ignored_router_directory = /etc/astara/ignored - -# warn if the event backlog for a tenant exceeds this value (integer value) -#queue_warning_threshold = 100 - -# Number of reboots to allow before assuming a router needs manual intervention -# (integer value) -#reboot_error_threshold = 5 - -# the number of worker threads to run per process (integer value) -#num_worker_threads = 4 - -# IP address used by Nova metadata server. (string value) -#nova_metadata_ip = 127.0.0.1 - -# TCP Port used by Nova metadata server. (integer value) -#nova_metadata_port = 8775 - -# Shared secret to sign instance-id request (string value) -# Deprecated group/name - [DEFAULT]/quantum_metadata_proxy_shared_secret -#neutron_metadata_proxy_shared_secret = - -# seconds between health checks (integer value) -#health_check_period = 60 - -# The amount of time to wait for nova to hotplug/unplug networks from the -# instances. (integer value) -#hotplug_timeout = 10 - -# (integer value) -#boot_timeout = 600 - -# Number of seconds to ignore new events when an instance goes into ERROR -# state. (integer value) -#error_state_cooldown = 30 - -# -# From oslo.log -# - -# Print debugging output (set logging level to DEBUG instead of default INFO -# level). (boolean value) -#debug = false - -# If set to false, will disable INFO logging level, making WARNING the default. -# (boolean value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -#verbose = true - -# The name of a logging configuration file. This file is appended to any -# existing logging configuration files. For details about logging configuration -# files, see the Python logging module documentation. Note that when logging -# configuration files are used then all logging configuration is set in the -# configuration file and other logging configuration options are ignored (for -# example, log_format). (string value) -# Deprecated group/name - [DEFAULT]/log_config -#log_config_append = - -# DEPRECATED. A logging.Formatter log message format string which may use any -# of the available logging.LogRecord attributes. This option is deprecated. -# Please use logging_context_format_string and logging_default_format_string -# instead. This option is ignored if log_config_append is set. (string value) -#log_format = - -# Format string for %%(asctime)s in log records. Default: %(default)s . This -# option is ignored if log_config_append is set. (string value) -#log_date_format = %Y-%m-%d %H:%M:%S - -# (Optional) Name of log file to output to. If no default is set, logging will -# go to stdout. This option is ignored if log_config_append is set. (string -# value) -# Deprecated group/name - [DEFAULT]/logfile -#log_file = - -# (Optional) The base directory used for relative --log-file paths. This option -# is ignored if log_config_append is set. (string value) -# Deprecated group/name - [DEFAULT]/logdir -#log_dir = - -# (Optional) Uses logging handler designed to watch file system. When log file -# is moved or removed this handler will open a new log file with specified path -# instantaneously. It makes sense only if log-file option is specified and -# Linux platform is used. This option is ignored if log_config_append is set. -# (boolean value) -#watch_log_file = false - -# Use syslog for logging. Existing syslog format is DEPRECATED and will be -# changed later to honor RFC5424. This option is ignored if log_config_append -# is set. (boolean value) -#use_syslog = false - -# (Optional) Enables or disables syslog rfc5424 format for logging. If enabled, -# prefixes the MSG part of the syslog message with APP-NAME (RFC5424). The -# format without the APP-NAME is deprecated in Kilo, and will be removed in -# Mitaka, along with this option. This option is ignored if log_config_append -# is set. (boolean value) -# This option is deprecated for removal. -# Its value may be silently ignored in the future. -#use_syslog_rfc_format = true - -# Syslog facility to receive log lines. This option is ignored if -# log_config_append is set. (string value) -#syslog_log_facility = LOG_USER - -# Log output to standard error. This option is ignored if log_config_append is -# set. (boolean value) -#use_stderr = true - -# Format string to use for log messages with context. (string value) -#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s - -# Format string to use for log messages without context. (string value) -#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s - -# Data to append to log format when level is DEBUG. (string value) -#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d - -# Prefix each line of exception output with this format. (string value) -#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s - -# List of logger=LEVEL pairs. This option is ignored if log_config_append is -# set. (list value) -#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN - -# Enables or disables publication of error events. (boolean value) -#publish_errors = false - -# The format for an instance that is passed with the log message. (string -# value) -#instance_format = "[instance: %(uuid)s] " - -# The format for an instance UUID that is passed with the log message. (string -# value) -#instance_uuid_format = "[instance: %(uuid)s] " - -# Format string for user_identity field of the logging_context_format_string -# (string value) -#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s - -# Enables or disables fatal status of deprecations. (boolean value) -#fatal_deprecations = false - -# -# From oslo.messaging -# - -# Size of RPC connection pool. (integer value) -# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size -#rpc_conn_pool_size = 30 - -# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. -# The "host" option should point or resolve to this address. (string value) -#rpc_zmq_bind_address = * - -# MatchMaker driver. (string value) -#rpc_zmq_matchmaker = redis - -# Type of concurrency used. Either "native" or "eventlet" (string value) -#rpc_zmq_concurrency = eventlet - -# Number of ZeroMQ contexts, defaults to 1. (integer value) -#rpc_zmq_contexts = 1 - -# Maximum number of ingress messages to locally buffer per topic. Default is -# unlimited. (integer value) -#rpc_zmq_topic_backlog = - -# Directory for holding IPC sockets. (string value) -#rpc_zmq_ipc_dir = /var/run/openstack - -# Name of this node. Must be a valid hostname, FQDN, or IP address. Must match -# "host" option, if running Nova. (string value) -#rpc_zmq_host = localhost - -# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. -# (integer value) -#rpc_cast_timeout = 30 - -# The default number of seconds that poll should wait. Poll raises timeout -# exception when timeout expired. (integer value) -#rpc_poll_timeout = 1 - -# Configures zmq-messaging to use proxy with non PUB/SUB patterns. (boolean -# value) -#direct_over_proxy = true - -# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. (boolean -# value) -#use_pub_sub = true - -# Minimal port number for random ports range. (port value) -# Minimum value: 1 -# Maximum value: 65535 -#rpc_zmq_min_port = 49152 - -# Maximal port number for random ports range. (integer value) -# Minimum value: 1 -# Maximum value: 65536 -#rpc_zmq_max_port = 65536 - -# Number of retries to find free port number before fail with ZMQBindError. -# (integer value) -#rpc_zmq_bind_port_retries = 100 - -# Host to locate redis. (string value) -#host = 127.0.0.1 - -# Use this port to connect to redis host. (port value) -# Minimum value: 1 -# Maximum value: 65535 -#port = 6379 - -# Password for Redis server (optional). (string value) -#password = - -# Size of executor thread pool. (integer value) -# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size -#executor_thread_pool_size = 64 - -# The Drivers(s) to handle sending notifications. Possible values are -# messaging, messagingv2, routing, log, test, noop (multi valued) -# Deprecated group/name - [DEFAULT]/notification_driver -#driver = - -# A URL representing the messaging driver to use for notifications. If not set, -# we fall back to the same configuration used for RPC. (string value) -# Deprecated group/name - [DEFAULT]/notification_transport_url -#transport_url = - -# AMQP topic used for OpenStack notifications. (list value) -# Deprecated group/name - [rpc_notifier2]/topics -# Deprecated group/name - [DEFAULT]/notification_topics -#topics = notifications - -# Seconds to wait for a response from a call. (integer value) -#rpc_response_timeout = 60 - -# A URL representing the messaging driver to use and its full configuration. If -# not set, we fall back to the rpc_backend option and driver specific -# configuration. (string value) -#transport_url = - -# The messaging driver to use, defaults to rabbit. Other drivers include amqp -# and zmq. (string value) -#rpc_backend = rabbit - -# The default exchange under which topics are scoped. May be overridden by an -# exchange name specified in the transport_url option. (string value) -#control_exchange = openstack - - -[AGENT] - -# -# From astara.agent -# - -# (string value) -#root_helper = sudo astara-rootwrap /etc/astara/rootwrap.conf - - -[ceilometer] - -# -# From astara.ceilometer -# - -# Enable reporting metrics to ceilometer. (boolean value) -#enabled = false - -# The name of the topic queue ceilometer consumes events from. (string value) -#topic = notifications.info - - -[coordination] - -# -# From astara.coordination -# - -# Whether to use an external coordination service to a cluster of astara- -# orchestrator nodes. This may be disabled for astara-orchestrator node -# environments. (boolean value) -#enabled = false - -# URL of suppoted coordination service (string value) -#url = memcached://localhost:11211 - -# ID of coordination group to join. (string value) -#group_id = astara.orchestrator - -# Interval (in seconds) for cluster heartbeats (integer value) -#heartbeat_interval = 1 - - -[database] - -# -# From oslo.db -# - -# The file name to use with SQLite. (string value) -# Deprecated group/name - [DEFAULT]/sqlite_db -#sqlite_db = oslo.sqlite - -# If True, SQLite uses synchronous mode. (boolean value) -# Deprecated group/name - [DEFAULT]/sqlite_synchronous -#sqlite_synchronous = true - -# The back end to use for the database. (string value) -# Deprecated group/name - [DEFAULT]/db_backend -#backend = sqlalchemy - -# The SQLAlchemy connection string to use to connect to the database. (string -# value) -# Deprecated group/name - [DEFAULT]/sql_connection -# Deprecated group/name - [DATABASE]/sql_connection -# Deprecated group/name - [sql]/connection -#connection = - -# The SQLAlchemy connection string to use to connect to the slave database. -# (string value) -#slave_connection = - -# The SQL mode to be used for MySQL sessions. This option, including the -# default, overrides any server-set SQL mode. To use whatever SQL mode is set -# by the server configuration, set this to no value. Example: mysql_sql_mode= -# (string value) -#mysql_sql_mode = TRADITIONAL - -# Timeout before idle SQL connections are reaped. (integer value) -# Deprecated group/name - [DEFAULT]/sql_idle_timeout -# Deprecated group/name - [DATABASE]/sql_idle_timeout -# Deprecated group/name - [sql]/idle_timeout -#idle_timeout = 3600 - -# Minimum number of SQL connections to keep open in a pool. (integer value) -# Deprecated group/name - [DEFAULT]/sql_min_pool_size -# Deprecated group/name - [DATABASE]/sql_min_pool_size -#min_pool_size = 1 - -# Maximum number of SQL connections to keep open in a pool. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_pool_size -# Deprecated group/name - [DATABASE]/sql_max_pool_size -#max_pool_size = - -# Maximum number of database connection retries during startup. Set to -1 to -# specify an infinite retry count. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_retries -# Deprecated group/name - [DATABASE]/sql_max_retries -#max_retries = 10 - -# Interval between retries of opening a SQL connection. (integer value) -# Deprecated group/name - [DEFAULT]/sql_retry_interval -# Deprecated group/name - [DATABASE]/reconnect_interval -#retry_interval = 10 - -# If set, use this value for max_overflow with SQLAlchemy. (integer value) -# Deprecated group/name - [DEFAULT]/sql_max_overflow -# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow -#max_overflow = - -# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer -# value) -# Deprecated group/name - [DEFAULT]/sql_connection_debug -#connection_debug = 0 - -# Add Python stack traces to SQL as comment strings. (boolean value) -# Deprecated group/name - [DEFAULT]/sql_connection_trace -#connection_trace = false - -# If set, use this value for pool_timeout with SQLAlchemy. (integer value) -# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout -#pool_timeout = - -# Enable the experimental use of database reconnect on connection lost. -# (boolean value) -#use_db_reconnect = false - -# Seconds between retries of a database transaction. (integer value) -#db_retry_interval = 1 - -# If True, increases the interval between retries of a database operation up to -# db_max_retry_interval. (boolean value) -#db_inc_retry_interval = true - -# If db_inc_retry_interval is set, the maximum seconds between retries of a -# database operation. (integer value) -#db_max_retry_interval = 10 - -# Maximum retries in case of connection error or deadlock error before error is -# raised. Set to -1 to specify an infinite retry count. (integer value) -#db_max_retries = 20 - - -[keystone_authtoken] - -# -# From keystonemiddleware.auth_token -# - -# Complete public Identity API endpoint. (string value) -#auth_uri = - -# API version of the admin Identity API endpoint. (string value) -#auth_version = - -# Do not handle authorization requests within the middleware, but delegate the -# authorization decision to downstream WSGI components. (boolean value) -#delay_auth_decision = false - -# Request timeout value for communicating with Identity API server. (integer -# value) -#http_connect_timeout = - -# How many times are we trying to reconnect when communicating with Identity -# API Server. (integer value) -#http_request_max_retries = 3 - -# Env key for the swift cache. (string value) -#cache = - -# Required if identity server requires client certificate (string value) -#certfile = - -# Required if identity server requires client certificate (string value) -#keyfile = - -# A PEM encoded Certificate Authority to use when verifying HTTPs connections. -# Defaults to system CAs. (string value) -#cafile = - -# Verify HTTPS connections. (boolean value) -#insecure = false - -# The region in which the identity server can be found. (string value) -#region_name = - -# Directory used to cache files related to PKI tokens. (string value) -#signing_dir = - -# Optionally specify a list of memcached server(s) to use for caching. If left -# undefined, tokens will instead be cached in-process. (list value) -# Deprecated group/name - [DEFAULT]/memcache_servers -#memcached_servers = - -# In order to prevent excessive effort spent validating tokens, the middleware -# caches previously-seen tokens for a configurable duration (in seconds). Set -# to -1 to disable caching completely. (integer value) -#token_cache_time = 300 - -# Determines the frequency at which the list of revoked tokens is retrieved -# from the Identity service (in seconds). A high number of revocation events -# combined with a low cache duration may significantly reduce performance. -# (integer value) -#revocation_cache_time = 10 - -# (Optional) If defined, indicate whether token data should be authenticated or -# authenticated and encrypted. Acceptable values are MAC or ENCRYPT. If MAC, -# token data is authenticated (with HMAC) in the cache. If ENCRYPT, token data -# is encrypted and authenticated in the cache. If the value is not one of these -# options or empty, auth_token will raise an exception on initialization. -# (string value) -#memcache_security_strategy = - -# (Optional, mandatory if memcache_security_strategy is defined) This string is -# used for key derivation. (string value) -#memcache_secret_key = - -# (Optional) Number of seconds memcached server is considered dead before it is -# tried again. (integer value) -#memcache_pool_dead_retry = 300 - -# (Optional) Maximum total number of open connections to every memcached -# server. (integer value) -#memcache_pool_maxsize = 10 - -# (Optional) Socket timeout in seconds for communicating with a memcached -# server. (integer value) -#memcache_pool_socket_timeout = 3 - -# (Optional) Number of seconds a connection to memcached is held unused in the -# pool before it is closed. (integer value) -#memcache_pool_unused_timeout = 60 - -# (Optional) Number of seconds that an operation will wait to get a memcached -# client connection from the pool. (integer value) -#memcache_pool_conn_get_timeout = 10 - -# (Optional) Use the advanced (eventlet safe) memcached client pool. The -# advanced pool will only work under python 2.x. (boolean value) -#memcache_use_advanced_pool = false - -# (Optional) Indicate whether to set the X-Service-Catalog header. If False, -# middleware will not ask for service catalog on token validation and will not -# set the X-Service-Catalog header. (boolean value) -#include_service_catalog = true - -# Used to control the use and type of token binding. Can be set to: "disabled" -# to not check token binding. "permissive" (default) to validate binding -# information if the bind type is of a form known to the server and ignore it -# if not. "strict" like "permissive" but if the bind type is unknown the token -# will be rejected. "required" any form of token binding is needed to be -# allowed. Finally the name of a binding method that must be present in tokens. -# (string value) -#enforce_token_bind = permissive - -# If true, the revocation list will be checked for cached tokens. This requires -# that PKI tokens are configured on the identity server. (boolean value) -#check_revocations_for_cached = false - -# Hash algorithms to use for hashing PKI tokens. This may be a single algorithm -# or multiple. The algorithms are those supported by Python standard -# hashlib.new(). The hashes will be tried in the order given, so put the -# preferred one first for performance. The result of the first hash will be -# stored in the cache. This will typically be set to multiple values only while -# migrating from a less secure algorithm to a more secure one. Once all the old -# tokens are expired this option should be set to a single value for better -# performance. (list value) -#hash_algorithms = md5 - -# Prefix to prepend at the beginning of the path. Deprecated, use identity_uri. -# (string value) -#auth_admin_prefix = - -# Host providing the admin Identity API endpoint. Deprecated, use identity_uri. -# (string value) -#auth_host = 127.0.0.1 - -# Port of the admin Identity API endpoint. Deprecated, use identity_uri. -# (integer value) -#auth_port = 35357 - -# Protocol of the admin Identity API endpoint (http or https). Deprecated, use -# identity_uri. (string value) -#auth_protocol = https - -# Complete admin Identity API endpoint. This should specify the unversioned -# root endpoint e.g. https://localhost:35357/ (string value) -#identity_uri = - -# This option is deprecated and may be removed in a future release. Single -# shared secret with the Keystone configuration used for bootstrapping a -# Keystone installation, or otherwise bypassing the normal authentication -# process. This option should not be used, use `admin_user` and -# `admin_password` instead. (string value) -#admin_token = - -# Service username. (string value) -#admin_user = - -# Service user password. (string value) -#admin_password = - -# Service tenant name. (string value) -#admin_tenant_name = admin - -# Authentication type to load (unknown value) -# Deprecated group/name - [DEFAULT]/auth_plugin -#auth_type = - -# Config Section from which to load plugin specific options (unknown value) -#auth_section = - - -[loadbalancer] - -# -# From astara.loadbalancer -# - -# The image_uuid for loadbalancer instances. (string value) -#image_uuid = - -# The nova flavor id to use for loadbalancer instances (string value) -#instance_flavor = - -# The port on which the loadbalancer API service listens on loadbalancer -# appliances (integer value) -#mgt_service_port = 5000 - - -[matchmaker_redis] - -# -# From oslo.messaging -# - -# Host to locate redis. (string value) -#host = 127.0.0.1 - -# Use this port to connect to redis host. (port value) -# Minimum value: 1 -# Maximum value: 65535 -#port = 6379 - -# Password for Redis server (optional). (string value) -#password = - - -[oslo_messaging_amqp] - -# -# From oslo.messaging -# - -# address prefix used when sending to a specific server (string value) -# Deprecated group/name - [amqp1]/server_request_prefix -#server_request_prefix = exclusive - -# address prefix used when broadcasting to all servers (string value) -# Deprecated group/name - [amqp1]/broadcast_prefix -#broadcast_prefix = broadcast - -# address prefix when sending to any server in group (string value) -# Deprecated group/name - [amqp1]/group_request_prefix -#group_request_prefix = unicast - -# Name for the AMQP container (string value) -# Deprecated group/name - [amqp1]/container_name -#container_name = - -# Timeout for inactive connections (in seconds) (integer value) -# Deprecated group/name - [amqp1]/idle_timeout -#idle_timeout = 0 - -# Debug: dump AMQP frames to stdout (boolean value) -# Deprecated group/name - [amqp1]/trace -#trace = false - -# CA certificate PEM file to verify server certificate (string value) -# Deprecated group/name - [amqp1]/ssl_ca_file -#ssl_ca_file = - -# Identifying certificate PEM file to present to clients (string value) -# Deprecated group/name - [amqp1]/ssl_cert_file -#ssl_cert_file = - -# Private key PEM file used to sign cert_file certificate (string value) -# Deprecated group/name - [amqp1]/ssl_key_file -#ssl_key_file = - -# Password for decrypting ssl_key_file (if encrypted) (string value) -# Deprecated group/name - [amqp1]/ssl_key_password -#ssl_key_password = - -# Accept clients using either SSL or plain TCP (boolean value) -# Deprecated group/name - [amqp1]/allow_insecure_clients -#allow_insecure_clients = false - -# Space separated list of acceptable SASL mechanisms (string value) -# Deprecated group/name - [amqp1]/sasl_mechanisms -#sasl_mechanisms = - -# Path to directory that contains the SASL configuration (string value) -# Deprecated group/name - [amqp1]/sasl_config_dir -#sasl_config_dir = - -# Name of configuration file (without .conf suffix) (string value) -# Deprecated group/name - [amqp1]/sasl_config_name -#sasl_config_name = - -# User name for message broker authentication (string value) -# Deprecated group/name - [amqp1]/username -#username = - -# Password for message broker authentication (string value) -# Deprecated group/name - [amqp1]/password -#password = - - -[oslo_messaging_rabbit] - -# -# From oslo.messaging -# - -# Use durable queues in AMQP. (boolean value) -# Deprecated group/name - [DEFAULT]/amqp_durable_queues -# Deprecated group/name - [DEFAULT]/rabbit_durable_queues -#amqp_durable_queues = false - -# Auto-delete queues in AMQP. (boolean value) -# Deprecated group/name - [DEFAULT]/amqp_auto_delete -#amqp_auto_delete = false - -# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and -# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some -# distributions. (string value) -# Deprecated group/name - [DEFAULT]/kombu_ssl_version -#kombu_ssl_version = - -# SSL key file (valid only if SSL enabled). (string value) -# Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile -#kombu_ssl_keyfile = - -# SSL cert file (valid only if SSL enabled). (string value) -# Deprecated group/name - [DEFAULT]/kombu_ssl_certfile -#kombu_ssl_certfile = - -# SSL certification authority file (valid only if SSL enabled). (string value) -# Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs -#kombu_ssl_ca_certs = - -# How long to wait before reconnecting in response to an AMQP consumer cancel -# notification. (floating point value) -# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay -#kombu_reconnect_delay = 1.0 - -# How long to wait a missing client beforce abandoning to send it its replies. -# This value should not be longer than rpc_response_timeout. (integer value) -# Deprecated group/name - [DEFAULT]/kombu_reconnect_timeout -#kombu_missing_consumer_retry_timeout = 60 - -# Determines how the next RabbitMQ node is chosen in case the one we are -# currently connected to becomes unavailable. Takes effect only if more than -# one RabbitMQ node is provided in config. (string value) -# Allowed values: round-robin, shuffle -#kombu_failover_strategy = round-robin - -# The RabbitMQ broker address where a single node is used. (string value) -# Deprecated group/name - [DEFAULT]/rabbit_host -#rabbit_host = localhost - -# The RabbitMQ broker port where a single node is used. (port value) -# Minimum value: 1 -# Maximum value: 65535 -# Deprecated group/name - [DEFAULT]/rabbit_port -#rabbit_port = 5672 - -# RabbitMQ HA cluster host:port pairs. (list value) -# Deprecated group/name - [DEFAULT]/rabbit_hosts -#rabbit_hosts = $rabbit_host:$rabbit_port - -# Connect over SSL for RabbitMQ. (boolean value) -# Deprecated group/name - [DEFAULT]/rabbit_use_ssl -#rabbit_use_ssl = false - -# The RabbitMQ userid. (string value) -# Deprecated group/name - [DEFAULT]/rabbit_userid -#rabbit_userid = guest - -# The RabbitMQ password. (string value) -# Deprecated group/name - [DEFAULT]/rabbit_password -#rabbit_password = guest - -# The RabbitMQ login method. (string value) -# Deprecated group/name - [DEFAULT]/rabbit_login_method -#rabbit_login_method = AMQPLAIN - -# The RabbitMQ virtual host. (string value) -# Deprecated group/name - [DEFAULT]/rabbit_virtual_host -#rabbit_virtual_host = / - -# How frequently to retry connecting with RabbitMQ. (integer value) -#rabbit_retry_interval = 1 - -# How long to backoff for between retries when connecting to RabbitMQ. (integer -# value) -# Deprecated group/name - [DEFAULT]/rabbit_retry_backoff -#rabbit_retry_backoff = 2 - -# Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry -# count). (integer value) -# Deprecated group/name - [DEFAULT]/rabbit_max_retries -#rabbit_max_retries = 0 - -# Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you -# must wipe the RabbitMQ database. (boolean value) -# Deprecated group/name - [DEFAULT]/rabbit_ha_queues -#rabbit_ha_queues = false - -# Number of seconds after which the Rabbit broker is considered down if -# heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL (integer -# value) -#heartbeat_timeout_threshold = 60 - -# How often times during the heartbeat_timeout_threshold we check the -# heartbeat. (integer value) -#heartbeat_rate = 2 - -# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value) -# Deprecated group/name - [DEFAULT]/fake_rabbit -#fake_rabbit = false - - -[pez] - -# -# From astara.pez -# - -# How many pre-allocated hot standby nodes to keep in the pez pool. (integer -# value) -#pool_size = 1 - -# Image uuid to boot. (string value) -#image_uuid = - -# Nova flavor to boot (string value) -#flavor = - -# (string value) -#rpc_topic = astara-pez - - -[router] - -# -# From astara.router -# - -# The image_uuid for router instances. (string value) -# Deprecated group/name - [DEFAULT]/router_image_uuid -#image_uuid = - -# The nova id flavor to use for router instances (string value) -# Deprecated group/name - [DEFAULT]/router_instance_flavor -#instance_flavor = - -# The port on which the router API service listens on router appliances -# (integer value) -# Deprecated group/name - [DEFAULT]/akanda_mgt_service_port -#mgt_service_port = 5000 diff --git a/etc/oslo-config-generator/orchestrator.ini b/etc/oslo-config-generator/orchestrator.ini deleted file mode 100644 index 86d71fe7..00000000 --- a/etc/oslo-config-generator/orchestrator.ini +++ /dev/null @@ -1,16 +0,0 @@ -[DEFAULT] -output_file = etc/orchestrator.ini.sample -wrap_width = 79 - -namespace = astara -namespace = astara.rabbit -namespace = astara.agent -namespace = astara.router -namespace = astara.loadbalancer -namespace = astara.ceilometer -namespace = astara.coordination -namespace = astara.pez -namespace = oslo.log -namespace = oslo.db -namespace = oslo.messaging -namespace = keystonemiddleware.auth_token diff --git a/etc/oslo-config-generator/test.conf b/etc/oslo-config-generator/test.conf deleted file mode 100644 index 918a91df..00000000 --- a/etc/oslo-config-generator/test.conf +++ /dev/null @@ -1,5 +0,0 @@ -[DEFAULT] -output_file = astara/test/functional/test.conf.sample -wrap_width = 79 - -namespace = astara.test.functional diff --git a/etc/provider_rules.json b/etc/provider_rules.json deleted file mode 100644 index 8f99b7c3..00000000 --- a/etc/provider_rules.json +++ /dev/null @@ -1,4 +0,0 @@ -{"labels": {"ext": ["192.168.57.0/24"]}, - "prerules": [], - "postrules": [] -} diff --git a/etc/rootwrap.conf b/etc/rootwrap.conf deleted file mode 100644 index a942d3cb..00000000 --- a/etc/rootwrap.conf +++ /dev/null @@ -1,27 +0,0 @@ -# Configuration for astara-rootwrap -# This file should be owned by (and only-writeable by) the root user - -[DEFAULT] -# List of directories to load filter definitions from (separated by ','). -# These directories MUST all be only writeable by root ! -filters_path=/etc/astara/rootwrap.d - -# List of directories to search executables in, in case filters do not -# explicitely specify a full path (separated by ',') -# If not specified, defaults to system PATH environment variable. -# These directories MUST all be only writeable by root ! -exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/sbin,/usr/local/bin - -# Enable logging to syslog -# Default value is False -use_syslog=False - -# Which syslog facility to use. -# Valid values include auth, authpriv, syslog, local0, local1... -# Default value is 'syslog' -syslog_log_facility=syslog - -# Which messages to log. -# INFO means log all usage -# ERROR means only log unsuccessful attempts -syslog_log_level=ERROR diff --git a/etc/rootwrap.d/network.filters b/etc/rootwrap.d/network.filters deleted file mode 100644 index cbdf736d..00000000 --- a/etc/rootwrap.d/network.filters +++ /dev/null @@ -1,18 +0,0 @@ -# astara-rootwrap command filters for astara -# This file should be owned by (and only-writeable by) the root user - -[Filters] -# astara/common/linux/ip_lib.py: 'ip', 'link', .. -# astara/common/linux/ip_lib.py: 'ip', 'addr', .. -# astara/common/linux/ip_lib.py: 'ip', 'route', .. -ip: IpFilter, ip, root - -# astara/common/linux/ip_lib.py: 'ip', 'netns', 'exec', 'namespace', 'ip', .. -ip_exec: IpNetnsExecFilter, ip, root - -# astara/common/linux/interface.py: 'ovs-vsctl'.. -ovs-vsctl: CommandFilter, ovs-vsctl, root - -# astara/common/linux/ovs_lib.py: 'ovs-ofctl'.. -ovs-ofctl: CommandFilter, ovs-ofctl, root - diff --git a/releasenotes/notes/.placeholder b/releasenotes/notes/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/notes/add_variable_mtu_support-9aae24343ee58aed.yaml b/releasenotes/notes/add_variable_mtu_support-9aae24343ee58aed.yaml deleted file mode 100644 index 4089a065..00000000 --- a/releasenotes/notes/add_variable_mtu_support-9aae24343ee58aed.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Bug `1539786 `_ Varible MTU support is now supported by the orchestrator and passed to appliance. This requires Neutron with MTU extension enabled to support. diff --git a/releasenotes/notes/astara-debug-cmd-fixed-2cf74ccd82f21fe0.yaml b/releasenotes/notes/astara-debug-cmd-fixed-2cf74ccd82f21fe0.yaml deleted file mode 100644 index 454d87f8..00000000 --- a/releasenotes/notes/astara-debug-cmd-fixed-2cf74ccd82f21fe0.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Bug `1524979 `_, Bug `1528338 `_ \- astara-debug-router command has been fixed diff --git a/releasenotes/notes/astara-rootwrap-59731966fb3a0117.yaml b/releasenotes/notes/astara-rootwrap-59731966fb3a0117.yaml deleted file mode 100644 index b0190ab0..00000000 --- a/releasenotes/notes/astara-rootwrap-59731966fb3a0117.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - > - `Blueprint astara-rootwrap `_ \- - We replace shelling out directly to sudo with the oslo.rootwrap library. diff --git a/releasenotes/notes/astara_browse_rebuild_fix-71b9c6e76a80dc00.yaml b/releasenotes/notes/astara_browse_rebuild_fix-71b9c6e76a80dc00.yaml deleted file mode 100644 index e8806d7f..00000000 --- a/releasenotes/notes/astara_browse_rebuild_fix-71b9c6e76a80dc00.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Bug `1537500 `_ - Fixes tenant_id issue when rebuilding router from the astara-ctl browser diff --git a/releasenotes/notes/autogen-config-file-568ecfdaba9e316d.yaml b/releasenotes/notes/autogen-config-file-568ecfdaba9e316d.yaml deleted file mode 100644 index 1f3e1175..00000000 --- a/releasenotes/notes/autogen-config-file-568ecfdaba9e316d.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - > - `blueprint autogen-astara-conf-file `_ \- - This switches astara to use oslo-config-generator, where the contents of our sample configuration file are - configured using a configuration file in etc/oslo-config-generator/. diff --git a/releasenotes/notes/byonf-60787a24e83c3341.yaml b/releasenotes/notes/byonf-60787a24e83c3341.yaml deleted file mode 100644 index 8d56d470..00000000 --- a/releasenotes/notes/byonf-60787a24e83c3341.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - Operators may now associate custom drivers and image IDs to - tenants, via the Neutron API, to override global configuration, providing - support for dynamic user-provided network functions. To enable this feature, - set ``enable_byonf=True`` in ``orchestrator.ini`` and be sure the version - of ``astara-neutron`` loaded into Neutron supports the BYONF API. diff --git a/releasenotes/notes/cleanup_sm_management_post_rebalance-3e7c64785679f239.yaml b/releasenotes/notes/cleanup_sm_management_post_rebalance-3e7c64785679f239.yaml deleted file mode 100644 index acb6f773..00000000 --- a/releasenotes/notes/cleanup_sm_management_post_rebalance-3e7c64785679f239.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - Bug `1527396 `_ - Fixes issue where, after a cluster rebalance, stat machines are created across - all workers and instead ensures they are only created on a single target worker. diff --git a/releasenotes/notes/deprecated-amqp-url-6e988fd7d33a9305.yaml b/releasenotes/notes/deprecated-amqp-url-6e988fd7d33a9305.yaml deleted file mode 100644 index 27395b6f..00000000 --- a/releasenotes/notes/deprecated-amqp-url-6e988fd7d33a9305.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -deprecations: - - The ``amqp_url`` config option has been deprecated in favor using ``oslo.messaging`` backend specific configution. See example configuration file for an example. The pre-Liberty rabbit options have been removed. -fixes: - - Bug `1524595 `_ astara-ctl warning message for deprecated AMQP configuration diff --git a/releasenotes/notes/devstack_default_external-d5b9e5e830d5a8df.yaml b/releasenotes/notes/devstack_default_external-d5b9e5e830d5a8df.yaml deleted file mode 100644 index 03c6e329..00000000 --- a/releasenotes/notes/devstack_default_external-d5b9e5e830d5a8df.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -prelude: > - Astara has dropped a number of legacy convenience hooks available in - earlier releases. The hooks complicated automation and created - potential for mismatch of end state and the desired state. -fixes: - - Bug `1539345 `_ - auto added resources break interoperability -upgrade: - - Astara will no longer automatically add the external gateway to a router. - Previous usage was causing issues with automation tooling. - - Astara no longer requires the external network and subnet id to be known. - In production deployments this step was handled externally and the - internal hooks were often disabled. -critical: - - The devstack plugin no longer creates the external network as before and - instead follows the setup used for reference implementation. diff --git a/releasenotes/notes/dynamic-mgt-port-86d4b9f780fa3d78.yaml b/releasenotes/notes/dynamic-mgt-port-86d4b9f780fa3d78.yaml deleted file mode 100644 index 77545ce8..00000000 --- a/releasenotes/notes/dynamic-mgt-port-86d4b9f780fa3d78.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Bug `1524068 `_ Local management port addresses are now allocated from the management subnet rather than using a hard-coded address, fixing Neutron port address conflicts when clustering astara-orchestrators. diff --git a/releasenotes/notes/fix-astara-ctl-ssh-9be434ac84c06c79.yaml b/releasenotes/notes/fix-astara-ctl-ssh-9be434ac84c06c79.yaml deleted file mode 100644 index df2286e0..00000000 --- a/releasenotes/notes/fix-astara-ctl-ssh-9be434ac84c06c79.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -fixes: - - Bug `152492 `_ - Fixed astara-ctl ssh command diff --git a/releasenotes/notes/ha_appliances-c2048033c2be6d51.yaml b/releasenotes/notes/ha_appliances-c2048033c2be6d51.yaml deleted file mode 100644 index 7d26de87..00000000 --- a/releasenotes/notes/ha_appliances-c2048033c2be6d51.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Astara now supports orchestrating clustered pairs of appliance VMs for - Neutron routers that have the been set to highly-available. diff --git a/releasenotes/notes/ignore_zk_leadership_member-2cf3736e67d19b27.yaml b/releasenotes/notes/ignore_zk_leadership_member-2cf3736e67d19b27.yaml deleted file mode 100644 index 96997617..00000000 --- a/releasenotes/notes/ignore_zk_leadership_member-2cf3736e67d19b27.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Bug `1535857 `_ The additional \"leadership\" member reported by zookeeper is now ignored to avoid hashing resources to a non-existent node. diff --git a/releasenotes/notes/initial-note-fd05752e5f3b87fb.yaml b/releasenotes/notes/initial-note-fd05752e5f3b87fb.yaml deleted file mode 100644 index e74cc4d5..00000000 --- a/releasenotes/notes/initial-note-fd05752e5f3b87fb.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -prelude: Astara Mitaka Series Release v8.0.0. - diff --git a/releasenotes/notes/invalidate-deleted-resources-from-cache-6636d39c9f173287.yaml b/releasenotes/notes/invalidate-deleted-resources-from-cache-6636d39c9f173287.yaml deleted file mode 100644 index bd89f1f2..00000000 --- a/releasenotes/notes/invalidate-deleted-resources-from-cache-6636d39c9f173287.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -fixes: - - Bug `1531597 `_ \- Deleted resources are properly invalidated from the local tenant resource cache diff --git a/releasenotes/notes/push_orchestrator_appliance_config-33552cd22b549847.yaml b/releasenotes/notes/push_orchestrator_appliance_config-33552cd22b549847.yaml deleted file mode 100644 index b40e9e3a..00000000 --- a/releasenotes/notes/push_orchestrator_appliance_config-33552cd22b549847.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - > - The orchestrator now pushes local orchestrator-specific configuration into the appliance, allowing - services like the metadata proxy to be configured specifically for current cluster layout. diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/source/_templates/.placeholder b/releasenotes/source/_templates/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py deleted file mode 100644 index 33ae2dfa..00000000 --- a/releasenotes/source/conf.py +++ /dev/null @@ -1,273 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Astara Release Notes documentation build configuration file, created by -# sphinx-quickstart on Tue Nov 3 17:40:50 2015. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'oslosphinx', - 'reno.sphinxext', -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Astara Release Notes' -copyright = u'2015, Astara Developers' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -from astara.version import version_info as astara_version -# The full version, including alpha/beta/rc tags. -release = astara_version.version_string_with_vcs() -# The short X.Y version. -version = astara_version.canonical_version_string() - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'default' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'AstaraReleaseNotesdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # 'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'AstaraReleaseNotes.tex', u'Astara Release Notes Documentation', - u'Astara Developers', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'astarareleasenotes', u'Astara Release Notes Documentation', - [u'Astara Developers'], 1) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'AstaraReleaseNotes', u'Astara Release Notes Documentation', - u'Astara Developers', 'AstaraReleaseNotes', - 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst deleted file mode 100644 index d2e040d8..00000000 --- a/releasenotes/source/index.rst +++ /dev/null @@ -1,8 +0,0 @@ -====================== - Astara Release Notes -====================== - -.. toctree:: - :maxdepth: 1 - - mitaka diff --git a/releasenotes/source/mitaka.rst b/releasenotes/source/mitaka.rst deleted file mode 100644 index 2e0341f7..00000000 --- a/releasenotes/source/mitaka.rst +++ /dev/null @@ -1,5 +0,0 @@ -================================================ - Astara Mitaka Series Release Notes -================================================ - -.. release-notes:: diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index dbcbe534..00000000 --- a/requirements.txt +++ /dev/null @@ -1,27 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. -alembic>=0.8.4 # MIT -eventlet!=0.18.3,>=0.18.2 # MIT -netaddr!=0.7.16,>=0.7.13 # BSD -httplib2>=0.7.5 # MIT -python-neutronclient>=5.1.0 # Apache-2.0 -oslo.concurrency>=3.8.0 # Apache-2.0 -oslo.config>=3.14.0 # Apache-2.0 -oslo.context>=2.9.0 # Apache-2.0 -oslo.db!=4.13.1,!=4.13.2,>=4.10.0 # Apache-2.0 -oslo.i18n>=2.1.0 # Apache-2.0 -oslo.log>=3.11.0 # Apache-2.0 -oslo.messaging>=5.2.0 # Apache-2.0 -oslo.serialization>=1.10.0 # Apache-2.0 -oslo.utils>=3.16.0 # Apache-2.0 -oslo.rootwrap>=5.0.0 # Apache-2.0 -oslo.service>=1.10.0 # Apache-2.0 -WebOb>=1.2.3 # MIT -python-novaclient!=2.33.0,>=2.29.0 # Apache-2.0 -cliff>=2.2.0 # Apache-2.0 -six>=1.9.0 # MIT -tooz>=1.28.0 # Apache-2.0 -pymemcache!=1.3.0,>=1.2.9 # Apache 2.0 License -keystonemiddleware!=4.5.0,>=4.2.0 # Apache-2.0 -python-keystoneclient!=2.1.0,>=2.0.0 # Apache-2.0 diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 29ae9237..00000000 --- a/setup.cfg +++ /dev/null @@ -1,80 +0,0 @@ -[metadata] -name = astara -summary = Astara Network Orchestrator -description-file = - README.md -author = OpenStack -author-email = openstack-dev@lists.openstack.org -home-page = http://github.com/openstack/astara -classifier = - Environment :: OpenStack - Intended Audience :: Developers - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 - -[files] -packages = - astara - -[global] -setup-hooks = - pbr.hooks.setup_hook - -[entry_points] -console_scripts = - akanda-rug-service=astara.main:main - akanda-pez-service=astara.pez.service:main - akanda-debug-router=astara.debug:debug_one_router - akanda-rug-dbsync=astara.db.sqlalchemy.dbsync:main - rug-ctl=astara.cli.main:main - astara-orchestrator=astara.main:main - astara-pez-service=astara.pez.service:main - astara-debug-router=astara.debug:debug_one_router - astara-dbsync=astara.db.sqlalchemy.dbsync:main - astara-ctl=astara.cli.main:main - astara-rootwrap=oslo_rootwrap.cmd:main -astara.cli = - config reload=astara.cli.config:ConfigReload - - resource debug=astara.cli.resource:ResourceDebug - resource manage=astara.cli.resource:ResourceManage - resource update=astara.cli.resource:ResourceUpdate - resource rebuild=astara.cli.resource:ResourceRebuild - - # NOTE(adam_g): The 'router' commands are deprecated in favor - # of the generic 'resource' commands and can be dropped in M. - router debug=astara.cli.router:RouterDebug - router manage=astara.cli.router:RouterManage - router update=astara.cli.router:RouterUpdate - router rebuild=astara.cli.router:RouterRebuild - tenant debug=astara.cli.tenant:TenantDebug - tenant manage=astara.cli.tenant:TenantManage - workers debug=astara.cli.worker:WorkerDebug - global debug=astara.cli.global_debug:GlobalDebug - browse=astara.cli.browse:BrowseRouters - poll=astara.cli.poll:Poll - ssh=astara.cli.router:RouterSSH -oslo.config.opts = - astara = astara.opts:list_opts - astara.ceilometer = astara.opts:list_ceilometer_opts - astara.agent = astara.opts:list_agent_opts - astara.coordination = astara.opts:list_coordination_opts - astara.pez = astara.opts:list_pez_opts - astara.router = astara.opts:list_router_opts - astara.loadbalancer = astara.opts:list_loadbalancer_opts - astara.test.functional = astara.test.functional.config:list_opts - -[build_sphinx] -all_files = 1 -build-dir = doc/build -source-dir = doc/source - -[nosetests] -verbosity = 2 -detailed-errors = 1 -cover-package = astara diff --git a/setup.py b/setup.py deleted file mode 100644 index 782bb21f..00000000 --- a/setup.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT -import setuptools - -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr>=1.8'], - pbr=True) diff --git a/specs/README.rst b/specs/README.rst deleted file mode 100644 index 7a520d86..00000000 --- a/specs/README.rst +++ /dev/null @@ -1,35 +0,0 @@ -OpenStack Akanda Specifications -=============================== - -This directory structure is used to hold approved design specifications for additions -to the Akanda project. Reviews of the specs are done in gerrit, using a -similar workflow to how we review and merge changes to the code itself. - -The layout of this repository is:: - - specs// - -You can find an example spec in `specs/template.rst`. A -skeleton that contains all the sections required for a spec -file is located in `specs/skeleton.rst` and can -be copied, then filled in with the details of a new blueprint for -convenience. - -Specifications are proposed for a given release by adding them to the -`specs/` directory and posting it for review. The implementation -status of a blueprint for a given release can be found by looking at the -blueprint in launchpad. Not all approved blueprints will get fully implemented. - -Specifications have to be re-proposed for every release. The review may be -quick, but even if something was previously approved, it should be re-reviewed -to make sure it still makes sense as written. - -Please note, Launchpad blueprints are still used for tracking the -current status of blueprints. For more information, see:: - - https://wiki.openstack.org/wiki/Blueprints - http://blueprints.launchpad.net/akanda - -For more information about working with gerrit, see:: - - http://docs.openstack.org/infra/manual/developers.html#development-workflow diff --git a/specs/kilo/ci-updates.rst b/specs/kilo/ci-updates.rst deleted file mode 100644 index ee6c8d15..00000000 --- a/specs/kilo/ci-updates.rst +++ /dev/null @@ -1,191 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - - -Title of your blueprint -======================= - -Akanda CI updates for Kilo - -Problem Description -=================== - -We build lots of interconnected things but dont test any of the things. We -should be employing pre-commit testing similar to other projects to ensure -users get something thats not broken when deploying from master of git -repositories or generated tarballs and images. - -Proposed Change -=============== - -All changes to Akanda projects should go through regular check and gate -phases that test a deployment containing proposed code changes. This -includes changes to Akanda code as well as supporting things like its devstack -code and ``akanda-appliance-builder``. We can leverage devstack, tempest -and diskimage-builder to do this and create a generic Akanda integration -testing job that can be added to the pipelines of relevant projects. We should -also be running standard unit test coverage and pep8 checks here, too. - -For code that runs in the Akanda appliance VM or code that is used to build -said image, we should ensure that tests run against proposed changes and not -static, pre-built appliance images. That is, runs that are testing changes -to ``akanda-appliance`` should build and entirely new appliance VM image and -use that for its integration tests instead of pulling a pre-built image that -does not contain the code under review. - -Additionally, we should be archiving the results of changes to these -appliance-related repositories as a 'latest' image. That is, if someone -lands a change to ``akanda-appliance``, we should build and archive a -VM image in a known location on the internet. This will speed up other -tests that do not need to build a new image but should run against the -latest version, and also avoid forcing users to needlessly build images. - -For changes that do not modify the appliance code or tooling used to build -the image, tests should run with a pre-built image. This can be either a -'latest' image or a released, versioned image. - -One question at this point is where we run the Tempest jobs. These usually -take between 30min-1hr to complete and the nodes that run them in the main -OpenStack gate are a limited resource. We may need to maintain our own third -party CI infrastructure to do this. TBD. - -Data Model Impact ------------------ - -None - -REST API Impact ---------------- - -None - -Security Impact ---------------- - -None - -Notifications Impact --------------------- - -None - -Other End User Impact ---------------------- - -None - -Performance Impact ------------------- - -None - -Other Deployer Impact ---------------------- - -None - -Developer Impact ----------------- - -Developers hoping to land code in any of the Akanda repositories will need to -ensure their code passes all gate tests before it can land. - -Community Impact ----------------- - -This may make landing changes a bit slower but should improve the overall -quality and health of Akanda repositories. - - -Alternatives ------------- - - -Implementation -============== - -Assignee(s) ------------ - - -Work Items ----------- - -* Enable pep8 and unit test jobs against relevant Akanda repositories. - -* Move existing devstack code to out of ``http://github.com/dreamhost/akanda-devstack.git`` - and into a proper gerrit-managed Akanda repository in the stackforge namespace. - -* Complete diskimage-builder support that currently exists in - ``http://github.com/stackforge/akanda-appliance-builder.git`` - -* Update devstack code to either pull a pre-built Akanda appliance image from a - known URL or to build one from source for use in test run. - -* Create a generic ``(check|gate)-dsvm-tempest-akanda`` job that spins up the - Akanda devstack deployment and runs a subset of Tempest tests against it. - -* Identifiy the subset of Tempest tests we care to run. - -* Sync with openstack-infra and determine how and where these integration test - jobs will run. - -* Run the devstack job against changes to ``akanda-appliance`` or - ``akanda-appliace-builder`` with a configuration such that the appliance - image will be built from source including the patch under review. - -* Setup infrastructure to publish a new appliance image - (ie, akanda-appliance-latest.qcow2) to a known location on the internet - after code lands in ``akanda-appliance`` or ``akanda-appliance-builder`` - -* Run the devstack job against all other relevant akanda repositories with a - configuration such that a pre-built appliance image from a known location on - the internet. Ideally, this will be the image produced from changes to - the appliance repositories (ie, akanda-appliance-latest.qcow2) - -Dependencies -============ - -None - -Testing -======= - -Tempest Tests -------------- - -n/a - -Functional Tests ----------------- - -n/a - - -API Tests ---------- - -n/a - -Documentation Impact -==================== - -User Documentation ------------------- - -Should be updated to reflect the new home of devstack code and proper ways to -deploy it. - -Developer Documentation ------------------------ - -Should be updated to reflect the new home of devstack code and proper ways to -deploy it. - -References -========== - -None diff --git a/specs/kilo/skeleton.rst b/specs/kilo/skeleton.rst deleted file mode 120000 index c1f175df..00000000 --- a/specs/kilo/skeleton.rst +++ /dev/null @@ -1 +0,0 @@ -../skeleton.rst \ No newline at end of file diff --git a/specs/liberty/doc-updates.rst b/specs/liberty/doc-updates.rst deleted file mode 100644 index aaa46fa1..00000000 --- a/specs/liberty/doc-updates.rst +++ /dev/null @@ -1,192 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - - -Title of your blueprint -======================= - -Liberty release documentation updates - - -Problem Description -=================== - -The documentation needs to be easy for new users and contributors while -following similar OpenStack docs structure and conventions. - - -Proposed Change -=============== - -Organize the documentation around four sections: What Is Akanda, Installing -Akanda, Operating Akanda, and Akanda Developer Guide. - -This change will make the Akanda documentation [1] read similar to the existing -OpenStack documentation [2]. This will also prepare the Akanda documentation -for merging with the OpenStack documentation. - -What Is Akanda section will hold the existing High Level Architecture, -Service VM Orchestration and Management, The Service VM sections. These pages -VM will be renamed to Instance. We will -add user documentation for demonstrating akanda, understanding how it -orchestrates network services, and how to compare (or not to) akanda to other -SDN options. Add some details around EW and NS frame/packet flow between -compute nodes. Make IPv6 support very clear and called out. Explain the driver -concept and how it will make support of new Neutron Advanced services easier. -Additionally provide understanding of how Akanda integrates with Neutron. Say -all this without duplicating any of the existing OpenStack documentation. - -Installing Akanda section will hold the existing Akanda Developer Quickstart. -Adding installing from tarballs, source, and eventually distribution. Known good -configurations will also be part of this section. - -Operating Akanda will hold the existing Operation and Deployment and -Configuration Options. We will add the training material here. We will need to -add details on dynamic routing support, how the configuration drift support -works and is managed. Links to supporting ML2 drivers like linuxbridge and OVS. -Making it clear how Akanda supports common Neutron configurations and -configuration changes. Add details on supporting VXLAN overlay and Lightweight -Network Virtualization (LNV) (Hierarchical Port Binding) with Akanda. - -Akanda Developer Guide will hold the details on setting up the developer -environment, testing code locally, explaining the CI tests, along with some -references to Neutron dependencies. This entire section will move to the -Akanda developer reference section here [3], once the Akanda project is -accepted into the OpenStack org repo. - -This spec also includes the use of docstrings in the code. We will start with -updating the rug code with docstrings as the most critical. - - -Data Model Impact ------------------ - -n/a - - -REST API Impact ---------------- - -n/a - - -Security Impact ---------------- - -n/a - - -Notifications Impact --------------------- - -n/a - - -Other End User Impact ---------------------- - -n/a - - -Performance Impact ------------------- - -n/a - - -Other Deployer Impact ---------------------- - -n/a - - -Developer Impact ----------------- - -Updating the documentation structure will make it easier for new contributors -to join the Akanda project. As Akanda joins the OpenStack org repo structure, -it will make setting up the devref material very easy. - - -Community Impact ----------------- - -The OpenStack community will better understand what the Akanda project is -about and why it is important with clear documentation. - - -Alternatives ------------- - -* Leave documentation as is -* Wait until the Akanda project is moved into the OpenStack org repo before -updating the documentation structure. - - -Implementation -============== - -Assignee(s) ------------ - -Sean Roberts (sarob) - - -Work Items ----------- - -* Create a patch to restructure the Akanda documentation -* Add new content from slides and other sources -* After Akanda gets moved into OpenStack org repos, move the Akanda developer -reference to doc.openstack.org/developer/akanda/devref/ - - -Dependencies -============ - - -Testing -======= - -Tempest Tests -------------- - -n/a - - -Functional Tests ----------------- - -n/a - - -API Tests ---------- - -n/a - - -Documentation Impact -==================== - -User Documentation ------------------- - -See the proposed change section - - -Developer Documentation ------------------------ - -See the proposed change section - - -References -========== - -[1] http://docs.akanda.io/ -[2] http://docs.openstack.org/ -[3] http://docs.openstack.org/developer/openstack-projects.html diff --git a/specs/liberty/rug_ha.rst b/specs/liberty/rug_ha.rst deleted file mode 100644 index b0ce1500..00000000 --- a/specs/liberty/rug_ha.rst +++ /dev/null @@ -1,201 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - - -Title of your blueprint -======================= - -Rug HA and scaleout - -Problem Description -=================== - -The RUG is a multi-process, multi-worker service but it be cannot be -scaled out to multiple nodes for purposes of high-availability and -distributed handling of load. The only currently option for a -highly-available is to do an active/passive cluster using Pacemaker -or similar, which is less than ideal and does not address scale-out -concerns. - -Proposed Change -=============== - -This proposes allowing multiple RUG processes to be spawned across -many nodes. Each RUG process is responsible for a fraction of the -total running appliances. RUG_process->appliance(s) mapping will be -managed by a consistent hash ring. An external coordination service -(ie, zookeeper) will be leveraged to provide cluster membership -capabilities, and python-tooz will be used to manage cluster events. -When new members join or depart, the hash ring will be rebalanced and -appliances re-distributed across the RUG. - -This allows operators to scale out to many RUG instances, eliminating -the single-point-of-failure and allowing appliances to be evenly -distributed across multiple worker processes. - - -Data Model Impact ------------------ - -n/a - -REST API Impact ---------------- - -n/a - -Security Impact ---------------- - -None - -Notifications Impact --------------------- - - -Other End User Impact ---------------------- - -n/a - -Performance Impact ------------------- - -There will be some new overhead introduced the messaging layer as Neutron -notifications and RPCs will need to be distributed to per-RUG message queues. - -Other Deployer Impact ---------------------- - -Deployers will need to evaluate and choose an appropriate backend to be used -by tooz for leader election. memcached is a simple yet non-robust solution, -while zookeeper is a less light-weight but proven one. More info at [2] - -Developer Impact ----------------- - -n/a - -Community Impact ----------------- - -n/a - - -Alternatives ------------- - -One alternative to having each RUG instance declare its own messaging queue and -inspect all incoming messages would be to have the DHT master also serve as a -notification master. That is, the leader would be the only instance of the RUG -listening to and processing incoming Neutron notificatons, and then -re-distributing them to specific RUG workers based on the state of the DHT. - -Another option would be to do away with the use of Neutron notifications -entirely and hard-wire the akanda-neutron plugin to the RUG via a dedicated -message queue. - - -Implementation -============== - -This proposes enabling operators to run multiple instances of the RUG. -Each instance of the RUG will be responsible for a subset of the managed -appliances. A distributed, consistent hash ring will be used to map appliances -to their respective RUG instance. The Ironic project is already doing -something similar and has a hashring implementation we can likely leverage -to get started [1] - -The RUG cluster is essentially leaderless. The hash ring is constructed -using the active node list and each indvidual RUG instance is capable of -constructing a ring given a list of members. This ring is consistent -across nodes provided the coordination service is properly reporting membership -events and they are processed correctly. Using metadata attached to incoming -events (ie, tenant_id), a consumer is able to check the hash ring to determine -which node in the ring the event is mapped to. - -The RUG will spawn a new subprocess called the coordinator. It's only purpose -is to listen for cluster membership events using python-tooz. When a member -joins or departs, the coordinator will create a new Event of type REBALANCE -and put it onto the notifications queue. This event's body will contain an -updated list of current cluster nodes. - -Each RUG worker process will maintain a copy of the hash ring, which is -shared by its worker threads. When it receives a REBALANCE event, it will -rebalance the hash ring given the new membership list. When it receives -normal CRUD events for resources, it will first check the hash ring to see -if it is mapped to its host based on target tenant_id for the event. If it is, -the event will be processed. If it is not, the event will be ignored and -serviced by another worker. - -Ideally, REBALANCE events should be serviced before CRUD events. - -Assignee(s) ------------ - - -Work Items ----------- - -* Implement a distributed hash ring for managing worker:appliance -assignment - -* Add new coordination sub-process to the RUG that publishes REBALANCE -events to the notifications queue when membership changes - -* Setup per-RUG message queues such that notifications are distributed to all -RUG processes equally. - -* Update worker to manage its own copy of the hash ring - -* Update worker /w ability to respond to new REBALANCE events by rebalancing -the ring with an updated membership list - -* Update worker to drop events for resources that are not mapped to its host in -the hash ring. - -Dependencies -============ - -Testing -======= - -Tempest Tests -------------- - - -Functional Tests ----------------- - -If we cannot sufficiently test this using unit tests, we could potentially -spin up our devstack job with multiple copies of the akanda-rug-service -running on a single host, and having multiple router appliances. This -would allow us to test ring rebalancing by killing off one of the multiple -akanda-rug-service processes. - -API Tests ---------- - - -Documentation Impact -==================== - -User Documentation ------------------- - -Deployment docs need to be updated to mention this feature is dependent -on an external coordination service. - -Developer Documentation ------------------------ - - -References -========== - -[1] https://git.openstack.org/cgit/openstack/ironic/tree/ironic/common/hash_ring.py -[2] http://docs.openstack.org/developer/tooz/drivers.html - diff --git a/specs/liberty/skeleton.rst b/specs/liberty/skeleton.rst deleted file mode 120000 index c1f175df..00000000 --- a/specs/liberty/skeleton.rst +++ /dev/null @@ -1 +0,0 @@ -../skeleton.rst \ No newline at end of file diff --git a/specs/skeleton.rst b/specs/skeleton.rst deleted file mode 100644 index 18421a8c..00000000 --- a/specs/skeleton.rst +++ /dev/null @@ -1,103 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - - -Title of your blueprint -======================= - - -Problem Description -=================== - - -Proposed Change -=============== - - -Data Model Impact ------------------ - - -REST API Impact ---------------- - - -Security Impact ---------------- - - -Notifications Impact --------------------- - - -Other End User Impact ---------------------- - - -Performance Impact ------------------- - - -Other Deployer Impact ---------------------- - - -Developer Impact ----------------- - - -Community Impact ----------------- - - -Alternatives ------------- - - -Implementation -============== - -Assignee(s) ------------ - - -Work Items ----------- - - -Dependencies -============ - - -Testing -======= - -Tempest Tests -------------- - - -Functional Tests ----------------- - - -API Tests ---------- - - -Documentation Impact -==================== - -User Documentation ------------------- - - -Developer Documentation ------------------------ - - -References -========== - diff --git a/specs/template.tst b/specs/template.tst deleted file mode 100644 index da74ac79..00000000 --- a/specs/template.tst +++ /dev/null @@ -1,471 +0,0 @@ -.. - This work is licensed under a Creative Commons Attribution 3.0 Unported - License. - - http://creativecommons.org/licenses/by/3.0/legalcode - -========================================== -Example Spec - The title of your blueprint -========================================== - -Include the URL of your launchpad blueprint: - -https://blueprints.launchpad.net/akanda/+spec/example - -Introduction paragraph -- why are we doing anything? A single paragraph of -prose that **operators, deployers, and developers** can understand. - -If your specification proposes any changes to the Akanda REST API such -as changing parameters which can be returned or accepted, or even -the semantics of what happens when a client calls into the API, then -you should add the APIImpact flag to the commit message. Specifications with -the APIImpact flag can be found with the following query: - -https://review.openstack.org/#/q/status:open+project:openstack/neutron-specs+message:apiimpact,n,z - - -Problem Description -=================== - -A detailed description of the problem: - -* For a new feature this should be use cases. Ensure you are clear about the - actors in each use case: End User vs Deployer - -* For a major reworking of something existing it would describe the - problems in that feature that are being addressed. - - -Proposed Change -=============== - -Here is where you cover the change you propose to make in detail. How do you -propose to solve this problem? - -If this is one part of a larger effort make it clear where this piece ends. In -other words, what's the scope of this effort? - -Data Model Impact ------------------ - -Changes which require modifications to the data model often have a wider impact -on the system. The community often has strong opinions on how the data model -should be evolved, from both a functional and performance perspective. It is -therefore important to capture and gain agreement as early as possible on any -proposed changes to the data model. - -Questions which need to be addressed by this section include: - -* What new data objects and/or database schema changes is this going to require? - -* What database migrations will accompany this change. - -* How will the initial set of new data objects be generated, for example if you - need to take into account existing instances, or modify other existing data - describe how that will work. - -REST API Impact ---------------- - -For each API resource to be implemented, describe the resource -collection and specify the name, type, and other essential details of -each new or modified attribute. A table similar to the following may -be used: - -+----------+-------+---------+---------+------------+--------------+ -|Attribute |Type |Access |Default |Validation/ |Description | -|Name | | |Value |Conversion | | -+==========+=======+=========+=========+============+==============+ -|id |string |RO, all |generated|N/A |identity | -| |(UUID) | | | | | -+----------+-------+---------+---------+------------+--------------+ -|name |string |RW, all |'' |string |human-readable| -| | | | | |name | -+----------+-------+---------+---------+------------+--------------+ -|color |string |RW, admin|'red' |'red', |color | -| | | | |'yellow', or|indicating | -| | | | |'green' |state | -+----------+-------+---------+---------+------------+--------------+ - - -Here is the other example of the table using csv-table - - -.. csv-table:: CSVTable - :header: Attribute Name,Type,Access,Default Value,Validation Conversion,Description - - id,string (UUID),"RO, all",generated,N/A,identity - name,string,"RW, all","''",string,human-readable name - color,string,"RW, admin",red,"'red', 'yellow' or 'green'",color indicating state - - -Each API method which is either added or changed should have the following: - -* Specification for the method - - * A description of what the method does suitable for use in - user documentation - - * Method type (POST/PUT/GET/DELETE) - - * Normal http response code(s) - - * Expected error http response code(s) - - * A description for each possible error code should be included - describing semantic errors which can cause it such as - inconsistent parameters supplied to the method, or when an - instance is not in an appropriate state for the request to - succeed. Errors caused by syntactic problems covered by the JSON - schema defintion do not need to be included. - - * URL for the resource - - * Parameters which can be passed via the url - - * JSON schema definition for the body data if allowed - - * JSON schema definition for the response data if any - -* Example use case including typical API samples for both data supplied - by the caller and the response - -* Discuss any API policy changes, and discuss what things a deployer needs to - think about when defining their API policy. This is in reference to the - policy.json file. - -Note that the schema should be defined as restrictively as -possible. Parameters which are required should be marked as such and -only under exceptional circumstances should additional parameters -which are not defined in the schema be permitted (eg -additionaProperties should be False). - -Reuse of existing predefined parameter types such as regexps for -passwords and user defined names is highly encouraged. - -Security Impact ---------------- - -Describe any potential security impact on the system. Some of the items to -consider include: - -* Does this change touch sensitive data such as tokens, keys, or user data? - -* Does this change alter the API in a way that may impact security, such as - a new way to access sensitive information or a new way to login? - -* Does this change involve cryptography or hashing? - -* Does this change require the use of sudo or any elevated privileges? - -* Does this change involve using or parsing user-provided data? This could - be directly at the API level or indirectly such as changes to a cache layer. - -* Can this change enable a resource exhaustion attack, such as allowing a - single API interaction to consume significant server resources? Some examples - of this include launching subprocesses for each connection, or entity - expansion attacks in XML. - -For more detailed guidance, please see the OpenStack Security Guidelines -[#security_guidelines]_ as a reference. These guidelines are a work in -progress and are designed to help you identify security best practices. -For further information, feel free to reach out to the OpenStack Security -Group at openstack-security@lists.openstack.org. - -.. [#security_guidelines] OpenStack Security Guidelines - https://wiki.openstack.org/wiki/Security/Guidelines - -Notifications Impact --------------------- - -Please specify any changes to notifications. Be that an extra notification, -changes to an existing notification, or removing a notification. - -Other End User Impact ---------------------- - -Aside from the API, are there other ways a user will interact with this feature? - - -Performance Impact ------------------- - -Describe any potential performance impact on the system, for example -how often will new code be called, and is there a major change to the calling -pattern of existing code. - -Examples of things to consider here include: - -* A periodic task might look like a small addition but if it calls conductor or - another service the load is multiplied by the number of nodes in the system. - -* A small change in a utility function or a commonly used decorator can have a - large impacts on performance. - -* Calls which result in a database queries (whether direct or via conductor) can - have a profound impact on performance when called in critical sections of the - code. - -* Will the change include any locking, and if so what considerations are there on - holding the lock? - - -Other Deployer Impact ---------------------- - -Discuss things that will affect how you deploy and configure OpenStack -that have not already been mentioned, such as: - -* What config options are being added? Should they be more generic than - proposed (for example a flag that other hypervisor drivers might want to - implement as well)? Are the default values ones which will work well in - real deployments? - -* Is this a change that takes immediate effect after its merged, or is it - something that has to be explicitly enabled? - -* If this change is a new binary, how would it be deployed? - -* Please state anything that those doing continuous deployment, or those - upgrading from the previous release, need to be aware of. Also describe - any plans to deprecate configuration values or features. For example, if we - change the directory name that instances are stored in, how do we handle - instance directories created before the change landed? Do we move them? Do - we have a special case in the code? Do we assume that the operator will - recreate all the instances in their cloud? - -* Does this require downtime or manual intervention to apply when upgrading? - -Developer Impact ----------------- - -Discuss things that will affect other developers working on OpenStack, -such as: - -* If the blueprint proposes a change to the API, discussion of how other - plugins would implement the feature is required. - -Community Impact ----------------- - -Describe how this change fits in with the direction the Akanda community is -going. - -* Has the change been discussed on mailing lists, at the weekly Akanda - meeting, or at a Design Summit? - -* Does the change fit with the direction of the Akanda community? - -Alternatives ------------- - -What other ways could we do this thing? Why aren't we using those? This doesn't -have to be a full literature review, but it should demonstrate that thought has -been put into why the proposed solution is an appropriate one. - - -Implementation -============== - -Assignee(s) ------------ - -Who is leading the writing of the code? Or is this a blueprint where you're -throwing it out there to see who picks it up? - -If more than one person is working on the implementation, please designate the -primary author and contact. - -Primary assignee: - - -Other contributors: - - -Work Items ----------- - -Work items or tasks -- break the feature up into the things that need to be -done to implement it. Those parts might end up being done by different people, -but we're mostly trying to understand the timeline for implementation. - - -Dependencies -============ - -* Include specific references to specs and/or blueprints in Akanda, or in other - projects, that this one either depends on or is related to. - -* If this requires functionality of another project that is not currently used - by Akanda (such as the glance v2 API when we previously only required v1), - document that fact. - -* Does this feature require any new library dependencies or code otherwise not - included in OpenStack? Or does it depend on a specific version of library? - - -Testing -======= - -Please discuss how the change will be tested. We especially want to know what -tempest tests will be added. It is assumed that unit test coverage will be -added so that doesn't need to be mentioned explicitly, but discussion of why -you think unit tests are sufficient and we don't need to add more tempest -tests would need to be included. - -Is this untestable in gate given current limitations (specific hardware / -software configurations available)? If so, are there mitigation plans (3rd -party testing, gate enhancements, etc). - -Tempest Tests -------------- - -List new, changed, or deleted Tempest tests in this section. If a blueprint -has been filed in the Tempest specs repository, please cross reference that -blueprint here. - -Functional Tests ----------------- - -Please document any functional tests which this change will require. New -features will require functional tests before being allowed to be merged. -Code refactors may require functional tests. - -API Tests ---------- - -Add changes to API tests in this section. This is required if the change is -adding, removing, or changing any API related code in Akanda. - - -Documentation Impact -==================== - -What is the impact on the docs team of this change? Some changes might require -donating resources to the docs team to have the documentation updated. Don't -repeat details discussed above, but please reference them here. - -User Documentation ------------------- - -Specify any User Documentation which needs to be changed. Reference the guides -which need updating due to this change. - -Developer Documentation ------------------------ - -If API changes are being made, specify the developer API documentation which -will be updated to reflect the new changes here. - -References -========== - -Please add any useful references here. You are not required to have any -reference. Moreover, this specification should still make sense when your -references are unavailable. Examples of what you could include are: - -* Links to mailing list or IRC discussions - -* Links to notes from a summit session - -* Links to relevant research, if appropriate - -* Related specifications as appropriate (e.g. link any vendor documentation) - -* Anything else you feel it is worthwhile to refer to - - -NOTE: Please remove everything from here and down. This section is meant to -show examples of how to format the spec. - -Some notes about using this template: - -* Your spec should be in ReSTructured text, like this template. - -* Please wrap text at 80 columns. - -* The filename in the git repository should match the launchpad URL, for - example a URL of: https://blueprints.launchpad.net/akanda/+spec/awesome-thing - should be named awesome-thing.rst - -* Please do not delete any of the sections in this template. If you have - nothing to say for a whole section, just write: None - -* For help with syntax, see http://sphinx-doc.org/rest.html - -* To test out your formatting, build the docs using tox, or see: - http://rst.ninjs.org - -* If you would like to provide a diagram with your spec, text representations - are preferred. http://asciiflow.com/ is a very nice tool to assist with - making ascii diagrams. blockdiag is another tool. These are described below. - If you require an image (screenshot) for your BP, attaching that to the BP - and checking it in is also accepted. However, text representations are prefered. - -* Diagram examples - -asciiflow:: - - +----------+ +-----------+ +----------+ - | A | | B | | C | - | +-----+ +--------+ | - +----------+ +-----------+ +----------+ - -blockdiag - -.. blockdiag:: - - blockdiag sample { - a -> b -> c; - } - -actdiag - -.. actdiag:: - - actdiag { - write -> convert -> image - lane user { - label = "User" - write [label = "Writing reST"]; - image [label = "Get diagram IMAGE"]; - } - lane actdiag { - convert [label = "Convert reST to Image"]; - } - } - -nwdiag - -.. nwdiag:: - - nwdiag { - network dmz { - address = "210.x.x.x/24" - - web01 [address = "210.x.x.1"]; - web02 [address = "210.x.x.2"]; - } - network internal { - address = "172.x.x.x/24"; - - web01 [address = "172.x.x.1"]; - web02 [address = "172.x.x.2"]; - db01; - db02; - } - } - - -seqdiag - -.. seqdiag:: - - seqdiag { - browser -> webserver [label = "GET /index.html"]; - browser <-- webserver; - browser -> webserver [label = "POST /blog/comment"]; - webserver -> database [label = "INSERT comment"]; - webserver <-- database; - browser <-- webserver; - } \ No newline at end of file diff --git a/test-requirements.txt b/test-requirements.txt deleted file mode 100644 index 6a0137f4..00000000 --- a/test-requirements.txt +++ /dev/null @@ -1,19 +0,0 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. -unittest2 # BSD -nose # LGPL -coverage>=3.6 # Apache-2.0 -mock>=2.0 # BSD -pep8==1.5.7 # MIT -iso8601>=0.1.11 # MIT -mox>=0.5.3 # Apache-2.0 -testtools>=1.4.0 # MIT -fixtures>=3.0.0 # Apache-2.0/BSD -tempest-lib>=0.14.0 # Apache-2.0 - -# Doc requirements -sphinx!=1.3b1,<1.4,>=1.2.1 # BSD -oslosphinx>=4.7.0 # Apache-2.0 -reno>=1.8.0 # Apache2 - diff --git a/tools/astara-byonf b/tools/astara-byonf deleted file mode 100755 index 27263763..00000000 --- a/tools/astara-byonf +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/python -# -# Copyright (c) 2016 Akanda, Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import pprint -import argparse -import os -import sys - -from prettytable import PrettyTable - -from keystoneclient.auth.identity import v3 as ksv3 -from keystoneclient import session as kssession -from neutronclient.v2_0 import client -from neutronclient.common import exceptions as neutron_exc - -class ByoExtClientWrapper(client.Client): - byonf_path = '/byonf' - byonfs_path = '/byonfs' - - - def create_byonf(self, byonf): - return self.post( - self.byonf_path, - body={'byonf': byonf} - ) - - def update_byonf(self, byonf): - if not byonf.get('id'): - print 'ERROR: must specify id of byonf assocation to update' - sys.exit(1) - - path = self.byonf_path + '/' + byonf.pop('id') - return self.put( - path, - body={'byonf': byonf} - ) - - def list_byonfs(self, retrieve_all=True, **_params): - return self.list('byonfs', self.byonf_path, retrieve_all, **_params) - - def delete_byonf(self, byonf_id): - return self.delete('%s/%s' % (self.byonf_path, byonf_id)) - -ks_args = { - 'auth_url': os.getenv('OS_AUTH_URL', 'http://127.0.0.1:5000/v3'), - 'username': os.getenv('OS_USERNAME', 'demo'), - 'password': os.getenv('OS_PASSWORD', 'secrete'), - 'project_name': os.getenv('OS_PROJECT_NAME', 'demo'), - 'user_domain_id': 'default', - 'project_domain_id': 'default', -} - -auth = ksv3.Password(**ks_args) -ks_session = kssession.Session(auth=auth) -api_client = ByoExtClientWrapper( - session=ks_session, -) - - -parser = argparse.ArgumentParser(description="Script to manage user network functions") -parser.add_argument('action', default='list') -parser.add_argument('--function', default='') -parser.add_argument('--image_id') -parser.add_argument('--driver') -parser.add_argument('--id') -parser.add_argument('--tenant_id') - - -args = parser.parse_args() - -def print_table(byonfs): - if not isinstance(byonfs, list): - byonfs = [byonfs] - - columns = ['id', 'tenant_id', 'function_type', 'driver', 'image_id'] - table = PrettyTable(columns) - for byonf in byonfs: - table.add_row([byonf.get(k) for k in columns]) - print table - -if args.action in ['create', 'update']: - req_args = { - 'image_id': args.image_id, - 'function_type': args.function, - 'driver': args.driver, - } - if args.tenant_id: - req_args['tenant_id'] = args.tenant_id - if args.id: - req_args['id'] = args.id - - f = getattr(api_client, '%s_byonf' % args.action) - result = f(req_args) - print_table(result['byonf']) - -elif args.action == 'delete': - api_client.delete_byonf(args.id) - print 'deleted byonf assocation with id %s' % args.id -else: - arg2 = {} - if args.function: - arg2['function_type'] = args.function - print_table(api_client.list_byonfs(**arg2)['byonfs']) diff --git a/tools/generate_config_file_samples.sh b/tools/generate_config_file_samples.sh deleted file mode 100755 index 2086c446..00000000 --- a/tools/generate_config_file_samples.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/sh -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -set -e - -GEN_CMD=oslo-config-generator - -if ! type "$GEN_CMD" > /dev/null; then - echo "ERROR: $GEN_CMD not installed on the system." - exit 1 -fi - -for file in `ls etc/oslo-config-generator/*`; do - $GEN_CMD --config-file=$file -done - -set -x - diff --git a/tools/logstash/README.md b/tools/logstash/README.md deleted file mode 100644 index efe81c7c..00000000 --- a/tools/logstash/README.md +++ /dev/null @@ -1,3 +0,0 @@ -These files are used for logstash to read the astara-orchestrator log and send -metrics to a graphite server. Logstash has many output options, so these files -serve as a template for parsing the astara-orchestrator log file. diff --git a/tools/logstash/logstash-rug-patterns b/tools/logstash/logstash-rug-patterns deleted file mode 100644 index f7c981e1..00000000 --- a/tools/logstash/logstash-rug-patterns +++ /dev/null @@ -1,4 +0,0 @@ -WORKER p[0-9][0-9] -THREAD tmain|t[0-9][0-9] -ASTARA_STATE_METHOD (?:[a-zA-Z]*\.)* -METHOD (?:astara.openstack.common.rpc.amqp|astara.worker|astara.api.configuration|astara.tenant|astara.state.|astara.scheduler) diff --git a/tools/logstash/rug-filter.conf b/tools/logstash/rug-filter.conf deleted file mode 100644 index 5a50a098..00000000 --- a/tools/logstash/rug-filter.conf +++ /dev/null @@ -1,39 +0,0 @@ -input { - file { - path => '/var/log/akrug/akrug.log' - } -} - -filter { - grok { - patterns_dir => "/etc/logstash/patterns/logstash-rug-patterns" - match => {"message" => "%{TIMESTAMP_ISO8601:akanda_timestamp}:%{LOGLEVEL:loglevel}:%{ASTARA_STATE_METHOD:method}%{UUID:router_id}:%{POSINT:pid}:%{WORKER:worker}:%{THREAD:thread}:incoming message brings queue length to %{INT:queue_length}"} - add_tag => [ "queue_length" ] - } - - grok { - patterns_dir => "/etc/logstash/patterns/logstash-rug-patterns" - match => {"message" => "%{TIMESTAMP_ISO8601:akanda_timestamp}:%{LOGLEVEL:loglevel}:%{METHOD:method}:%{POSINT:pid}:%{WORKER:worker}:%{THREAD:thread}"} - add_tag => [ "threads" ] - } - - date { - match => [ "akanda_timestamp", "YYYY-MM-dd HH:mm:ss" ] - } -} - -output { - if "queue_length" in [tags] { - graphite { - host => "127.0.0.1" - metrics => { "%{router_id}/queue_length" => "%{queue_length}" } - metrics_format => "openstack.astara.rug.routers.*" - } - } else if "threads" in [tags] { - graphite { - host => "127.0.0.1" - metrics => { "%{worker}/%{thread}" => "1" } - metrics_format => "openstack.astara.rug.workers.*" - } - } -} diff --git a/tools/run_functional.sh b/tools/run_functional.sh deleted file mode 100755 index 2e74c3c0..00000000 --- a/tools/run_functional.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash -xe -echo $LOGDIR -FUNC_TEST_DIR=$(dirname $0)/../astara/test/functional/ -CONFIG_FILE=$FUNC_TEST_DIR/test.conf -LOGDIR=${LOGDIR:-$FUNC_TEST_DIR} -LOG_FILE=$LOGDIR/astara_functional.log -APPLIANCE_API_PORT=${APPLIANCE_API_PORT:-5000} -SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} -if [ -z "$SERVICE_TENANT_ID" ]; then - SERVICE_TENANT_ID="$(openstack project list | grep $SERVICE_TENANT_NAME | awk '{ print $2 }')" - if [ -z "$SERVICE_TENANT_ID" ]; then - # Fallback to V2 - SERVICE_TENANT_ID="$(keystone tenant-list | grep $SERVICE_TENANT_NAME | awk '{ print $2 }')" - fi -fi - -cat <$CONFIG_FILE -[DEFAULT] -debug=True -use_stderr=False -use_syslog=False -os_auth_url=$OS_AUTH_URL -os_username=$OS_USERNAME -os_password=$OS_PASSWORD -os_tenant_name=$OS_TENANT_NAME -service_tenant_name=$SERVICE_TENANT_NAME -service_tenant_id=$SERVICE_TENANT_ID -appliance_api_port=$APPLIANCE_API_PORT -astara_auto_add_resources=False - -# Defaults for the gate -health_check_timeout=10 -appliance_active_timeout=480 -log_file=/opt/stack/logs/astara_functional.log -END - -tox -e functional diff --git a/tox.ini b/tox.ini deleted file mode 100644 index ea2b0100..00000000 --- a/tox.ini +++ /dev/null @@ -1,46 +0,0 @@ -[tox] -envlist = py27,pep8 - -[testenv] -distribute = False -setenv = VIRTUAL_ENV={envdir} -deps = -r{toxinidir}/test-requirements.txt -commands = nosetests {posargs:--with-coverage astara/test/unit/} -sitepackages = False - -[tox:jenkins] - -[testenv:style] -deps = flake8 - setuptools_git>=0.4 -commands = flake8 astara setup.py - -[testenv:pep8] -deps = {[testenv:style]deps} -commands = {[testenv:style]commands} - -[testenv:doc] -deps = -r{toxinidir}/test-requirements.txt -commands = - sphinx-build doc/source doc/build - sphinx-build -a -E -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html - -[testenv:cover] -setenv = NOSE_WITH_COVERAGE=1 - -[testenv:venv] -commands = {posargs} - -[testenv:functional] -commands = nosetests -v ./astara/test/functional/ - -[flake8] -ignore = E133,E226,E241,E242,E731 -exclude=.venv,.git,.tox,dist,doc,.idea -show-source = True - -[testenv:genconfig] -commands = {toxinidir}/tools/generate_config_file_samples.sh - -[testenv:releasenotes] -commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html diff --git a/vagrant/README.md b/vagrant/README.md deleted file mode 100644 index 408fab24..00000000 --- a/vagrant/README.md +++ /dev/null @@ -1,60 +0,0 @@ -vagrant-devstack-astara -======================= - -Getting started ---------------- - -A Vagrant based astara. - -Steps to try vagrant image: - - 1. Install Vagrant on your local machine. Install one of the current - providers supported: VirtualBox, Libvirt or Vagrant - 2. Git clone the astara repository. - 3. Run `cd vagrant` - 4. Run `vagrant up` - It will take from 10 to 60 minutes, depending on your internet speed. - Vagrant-cachier can speed up the process [1]. - 5. `vagrant ssh` - You will get a VM with everything running. - You will get vm shell with keystone and neutron already running. - -At this point you should have astara running inside of the Vagrant VM. - -[1] http://fgrehm.viewdocs.io/vagrant-cachier/ - -Vagrant Options available -------------------------- - -You can set the following environment variables before running `vagrant up` to modify -the definition of the Virtual Machine spawned: - - * **VAGRANT\_ASTARA\_VM\_BOX**: To change the Vagrant Box used. Should be available in - [atlas](http://atlas.hashicorp.com). - - export VAGRANT_ASTARA_VM_BOX=centos/7 - - Could be an example of a rpm-based option. - - * **VAGRANT\_ASTARA\_VM\_MEMORY**: To modify the RAM of the VM. Defaulted to: 4096 - * **VAGRANT\_ASTARA\_VM\_CPU**: To modify the cpus of the VM. Defaulted to: 2 - * **VAGRANT\_ASTARA\_RUN\_DEVSTACK**: Whether `vagrant up` should run devstack to - have an environment ready to use. Set it to 'false' if you want to edit - `local.conf` before run ./stack.sh manually in the VM. Defaulted to: true. - See below for additional options for editing local.conf. - -Additional devstack configuration ---------------------------------- - -To add additional configuration to local.conf before the VM is provisioned, you can -create a file called "user_local.conf" in the vagrant directory of astara. This file -will be appended to the "local.conf" created during the Vagrant provisioning. - -For example, to use OVN as the Neutron plugin with Astara, you can create a -"user_local.conf" with the following configuration: - - enable_plugin networking-ovn http://git.openstack.org/openstack/networking-ovn - enable_service ovn-northd - enable_service ovn-controller - disable_service q-agt - disable_service q-l3 diff --git a/vagrant/Vagrantfile b/vagrant/Vagrantfile deleted file mode 100644 index b8eb203d..00000000 --- a/vagrant/Vagrantfile +++ /dev/null @@ -1,47 +0,0 @@ -VAGRANTFILE_API_VERSION = "2" - -Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| - - VM_MEMORY = ENV.fetch('VAGRANT_ASTARA_VM_MEMORY', 6144) - VM_CPUS = ENV.fetch('VAGRANT_ASTARA_VM_CPUS', 2) - RUN_DEVSTACK = ENV.fetch('VAGRANT_ASTARA_RUN_DEVSTACK', 'true') - - config.vm.hostname = 'devstack' - - config.vm.provider 'virtualbox' do |v, override| - override.vm.box = ENV.fetch('VAGRANT_ASTARA_VM_BOX', 'ubuntu/trusty64') - v.memory = VM_MEMORY - v.cpus = VM_CPUS - end - - config.vm.provider 'parallels' do |v, override| - override.vm.box = ENV.fetch('VAGRANT_ASTARA_VM_BOX', 'boxcutter/ubuntu1404') - v.memory = VM_MEMORY - v.cpus = VM_CPUS - v.customize ['set', :id, '--nested-virt', 'on'] - end - - config.vm.provider 'libvirt' do |v, override| - override.vm.box = ENV.fetch('VAGRANT_ASTARA_VM_BOX', 'celebdor/trusty64') - v.memory = VM_MEMORY - v.cpus = VM_CPUS - v.nested = true - v.graphics_type = 'spice' - v.video_type = 'qxl' - end - - config.vm.synced_folder '../devstack/', '/devstack' - # For CentOS machines it needs to be specified - config.vm.synced_folder '.', '/vagrant' - - config.vm.provision :shell do |s| - s.path = 'vagrant.sh' - s.args = RUN_DEVSTACK - end - - if Vagrant.has_plugin?('vagrant-cachier') - config.cache.scope = :box - end - - config.vm.network :forwarded_port, guest: 80, host_ip: "127.0.0.1", host: 8080 -end diff --git a/vagrant/devstack.sh b/vagrant/devstack.sh deleted file mode 100755 index 5e3e37eb..00000000 --- a/vagrant/devstack.sh +++ /dev/null @@ -1,60 +0,0 @@ -#!/bin/bash - -set -e - -BASHPATH=$(dirname "$0"\") -RUN_DEVSTACK="$1" -echo "Run script from $BASHPATH" - -# Copied shamelessly from Devstack -function GetOSVersion { - if [[ -x $(which lsb_release 2>/dev/null) ]]; then - os_FAMILY='Debian' - elif [[ -r /etc/redhat-release ]]; then - os_FAMILY='RedHat' - else - echo "Unsupported distribution!" - exit 1; - fi -} - -GetOSVersion - -if [[ "$os_FAMILY" == "Debian" ]]; then - export DEBIAN_FRONTEND noninteractive - sudo apt-get update - sudo apt-get install -qqy git -elif [[ "$os_FAMILY" == "RedHat" ]]; then - sudo yum install -y -d 0 -e 0 git -fi - -# determine checkout folder -PWD=$(su "$OS_USER" -c "cd && pwd") -DEVSTACK=$PWD/devstack - -# check if devstack is already there -if [[ ! -d "$DEVSTACK" ]] -then - echo "Download devstack into $DEVSTACK" - - # clone devstack - su "$OS_USER" -c "cd && git clone -b master https://github.com/openstack-dev/devstack.git $DEVSTACK" - - echo "Copy configuration" - - # copy local.conf.sample settings (source: astara/devstack/local.conf.sample) - cp /devstack/local.conf.sample $DEVSTACK/local.conf - # If local settings are present, append them - if [ -f "/vagrant/user_local.conf" ]; then - cat /vagrant/user_local.conf >> $DEVSTACK/local.conf - fi - chown "$OS_USER":"$OS_USER" "$DEVSTACK"/local.conf - -fi - -if $RUN_DEVSTACK; then - echo "Start Devstack" - su "$OS_USER" -c "cd $DEVSTACK && ./stack.sh" -else - echo "Virtual Machine ready. You can run devstack by executing '/home/vagrant/devstack/stack.sh'" -fi diff --git a/vagrant/vagrant.sh b/vagrant/vagrant.sh deleted file mode 100755 index 8b14c75f..00000000 --- a/vagrant/vagrant.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/sh - -export OS_USER=vagrant -export OS_HOST_IP=172.68.5.10 - -# run script -bash /vagrant/devstack.sh "$1"