Retire astara repo
Retire repository, following https://docs.openstack.org/infra/manual/drivers.html#retiring-a-project Change-Id: If5f7d284bd107a93edd9272ac0ed8e6d20ba5c51
This commit is contained in:
parent
e7f8940fa0
commit
7759e2fd82
53
.gitignore
vendored
53
.gitignore
vendored
@ -1,53 +0,0 @@
|
|||||||
*.py[co]
|
|
||||||
|
|
||||||
# Packages
|
|
||||||
*.egg
|
|
||||||
*.egg-info
|
|
||||||
dist
|
|
||||||
build
|
|
||||||
eggs
|
|
||||||
parts
|
|
||||||
bin
|
|
||||||
var
|
|
||||||
sdist
|
|
||||||
develop-eggs
|
|
||||||
.installed.cfg
|
|
||||||
|
|
||||||
# Installer logs
|
|
||||||
pip-log.txt
|
|
||||||
|
|
||||||
# Unit test / coverage reports
|
|
||||||
.coverage
|
|
||||||
.tox
|
|
||||||
|
|
||||||
#Translations
|
|
||||||
*.mo
|
|
||||||
|
|
||||||
#Mr Developer
|
|
||||||
.mr.developer.cfg
|
|
||||||
|
|
||||||
# Packaging output
|
|
||||||
*.deb
|
|
||||||
|
|
||||||
# pbr output
|
|
||||||
AUTHORS
|
|
||||||
ChangeLog
|
|
||||||
|
|
||||||
orchestrator.ini.sample
|
|
||||||
astara/test/functional/test.conf.sample
|
|
||||||
|
|
||||||
*.swp
|
|
||||||
|
|
||||||
#pycharm cruft
|
|
||||||
.idea/*
|
|
||||||
|
|
||||||
*.db
|
|
||||||
*.db_clean
|
|
||||||
|
|
||||||
#macos hidden files
|
|
||||||
.DS_Store
|
|
||||||
._.DS_Store
|
|
||||||
|
|
||||||
# Vagrant
|
|
||||||
vagrant/.vagrant
|
|
||||||
vagrant/user_local.conf
|
|
10
.travis.yml
10
.travis.yml
@ -1,10 +0,0 @@
|
|||||||
language: python
|
|
||||||
python:
|
|
||||||
- "2.7"
|
|
||||||
install:
|
|
||||||
- pip install -r test_requirements.txt --use-mirrors
|
|
||||||
- pip install flake8 --use-mirrors
|
|
||||||
- pip install -q . --use-mirrors
|
|
||||||
before_script:
|
|
||||||
- flake8 setup.py akanda --ignore=E123,E133,E226,E241,E242,E731
|
|
||||||
script: nosetests -d
|
|
175
LICENSE
175
LICENSE
@ -1,175 +0,0 @@
|
|||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
70
README.md
70
README.md
@ -1,70 +0,0 @@
|
|||||||
# Astara
|
|
||||||
|
|
||||||
A service with an open plugin architecture that manages Neutron advanced
|
|
||||||
services such as routers and load balancers within an OpenStack environment.
|
|
||||||
|
|
||||||
## The Name
|
|
||||||
|
|
||||||
Astara is the sanskrit word for carpet. So why name our project carpet?
|
|
||||||
|
|
||||||
The original code name for this project was simply "The RUG" which was a
|
|
||||||
reference to a line from the popular film "The Big Lebowski":
|
|
||||||
|
|
||||||
**That rug really tied the room together, did it not?**
|
|
||||||
|
|
||||||
The idea is that "The Rug" really ties OpenStack neutron together nicely. We
|
|
||||||
felt it was an apt description so we kept the name.
|
|
||||||
|
|
||||||
## Related Projects
|
|
||||||
|
|
||||||
The code for the Astara project lives in several separate repositories to ease
|
|
||||||
packaging and management:
|
|
||||||
|
|
||||||
|
|
||||||
* [Astara](https://github.com/openstack/astara) -
|
|
||||||
Contains the Orchestration service for managing the creation, configuration,
|
|
||||||
and health of neutron advanced services as virtual network functions.
|
|
||||||
|
|
||||||
* [Astara Appliance](https://github.com/openstack/astara-appliance) –
|
|
||||||
Supporting software for the Astara virtual network appliance, which is
|
|
||||||
a Linux-based service VM that provides routing and L3+ services in
|
|
||||||
a virtualized network environment. This includes a REST API for managing
|
|
||||||
the appliance via the Astara orchestration service.
|
|
||||||
|
|
||||||
* [Astara Neutron](https://github.com/openstack/astara-neutron) –
|
|
||||||
Ancillary subclasses of several OpenStack Neutron plugins and supporting
|
|
||||||
code.
|
|
||||||
|
|
||||||
* [Astara Horizon](https://github.com/openstack/astara-horizon) -
|
|
||||||
OpenStack Horizon Dashboard code.
|
|
||||||
|
|
||||||
|
|
||||||
## Project Details
|
|
||||||
|
|
||||||
Astara is publicly managed through the [Astara Launchpad project](https://launchpad.net/astara)
|
|
||||||
|
|
||||||
|
|
||||||
## Code Review
|
|
||||||
|
|
||||||
The code goes to get reviewed by collaborators and merged at
|
|
||||||
[OpenStack Gerrit review](https://review.openstack.org)
|
|
||||||
|
|
||||||
|
|
||||||
## Documentation
|
|
||||||
|
|
||||||
Can be found at [docs.akanda.io](http://docs.akanda.io)
|
|
||||||
|
|
||||||
Developer quick start guides for making this all work in Devstack `Here
|
|
||||||
<docs/source/developer_quickstart.rst>`_
|
|
||||||
|
|
||||||
|
|
||||||
## Community
|
|
||||||
|
|
||||||
Talk to the developers through IRC [#openstack-astara channel on freenode.net]
|
|
||||||
(http://webchat.freenode.net/?randomnick=1&channels=%23openstack-astara&prompt=1&uio=d4)
|
|
||||||
|
|
||||||
|
|
||||||
## License and Copyright
|
|
||||||
|
|
||||||
Astara is licensed under the Apache-2.0 license and is Copyright 2015,
|
|
||||||
OpenStack Foundation
|
|
10
README.rst
Normal file
10
README.rst
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
This project is no longer maintained.
|
||||||
|
|
||||||
|
The contents of this repository are still available in the Git
|
||||||
|
source code management system. To see the contents of this
|
||||||
|
repository before it reached its end of life, please check out the
|
||||||
|
previous commit with "git checkout HEAD^1".
|
||||||
|
|
||||||
|
For any further questions, please email
|
||||||
|
openstack-dev@lists.openstack.org or join #openstack-dev on
|
||||||
|
Freenode.
|
@ -1,33 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
|
|
||||||
|
|
||||||
api_opts = [
|
|
||||||
cfg.StrOpt('admin_user'),
|
|
||||||
cfg.StrOpt('admin_password', secret=True),
|
|
||||||
cfg.StrOpt('admin_tenant_name'),
|
|
||||||
cfg.StrOpt('auth_url'),
|
|
||||||
cfg.StrOpt('auth_strategy', default='keystone'),
|
|
||||||
cfg.StrOpt('auth_region'),
|
|
||||||
cfg.IntOpt('max_retries', default=3),
|
|
||||||
cfg.IntOpt('retry_delay', default=1),
|
|
||||||
cfg.StrOpt('endpoint_type', default='publicURL'),
|
|
||||||
]
|
|
||||||
CONF.register_opts(api_opts)
|
|
@ -1,91 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
import requests
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log as logging
|
|
||||||
from oslo_serialization import jsonutils
|
|
||||||
|
|
||||||
ASTARA_MGT_SERVICE_PORT = 5000
|
|
||||||
ASTARA_BASE_PATH = '/v1/'
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
CONF = cfg.CONF
|
|
||||||
|
|
||||||
AK_CLIENT_OPTS = [
|
|
||||||
cfg.IntOpt('alive_timeout', default=3),
|
|
||||||
cfg.IntOpt('config_timeout', default=90),
|
|
||||||
]
|
|
||||||
CONF.register_opts(AK_CLIENT_OPTS)
|
|
||||||
|
|
||||||
|
|
||||||
def _mgt_url(host, port, path):
|
|
||||||
if ':' in host:
|
|
||||||
host = '[%s]' % host
|
|
||||||
return 'http://%s:%s%s' % (host, port, path)
|
|
||||||
|
|
||||||
|
|
||||||
def _get_proxyless_session():
|
|
||||||
s = requests.Session()
|
|
||||||
# ignore any proxy setting because we should have a direct connection
|
|
||||||
s.trust_env = False
|
|
||||||
return s
|
|
||||||
|
|
||||||
|
|
||||||
def is_alive(host, port, timeout=None):
|
|
||||||
timeout = timeout or cfg.CONF.alive_timeout
|
|
||||||
path = ASTARA_BASE_PATH + 'firewall/rules'
|
|
||||||
try:
|
|
||||||
s = _get_proxyless_session()
|
|
||||||
r = s.get(_mgt_url(host, port, path), timeout=timeout)
|
|
||||||
if r.status_code == 200:
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
LOG.debug('is_alive for %s failed: %s', host, str(e))
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def get_interfaces(host, port):
|
|
||||||
path = ASTARA_BASE_PATH + 'system/interfaces'
|
|
||||||
s = _get_proxyless_session()
|
|
||||||
r = s.get(_mgt_url(host, port, path), timeout=30)
|
|
||||||
return r.json().get('interfaces', [])
|
|
||||||
|
|
||||||
|
|
||||||
def update_config(host, port, config_dict):
|
|
||||||
path = ASTARA_BASE_PATH + 'system/config'
|
|
||||||
headers = {'Content-type': 'application/json'}
|
|
||||||
|
|
||||||
s = _get_proxyless_session()
|
|
||||||
r = s.put(
|
|
||||||
_mgt_url(host, port, path),
|
|
||||||
data=jsonutils.dump_as_bytes(config_dict),
|
|
||||||
headers=headers,
|
|
||||||
timeout=cfg.CONF.config_timeout)
|
|
||||||
|
|
||||||
if r.status_code != 200:
|
|
||||||
raise Exception('Config update failed: %s' % r.text)
|
|
||||||
else:
|
|
||||||
return r.json()
|
|
||||||
|
|
||||||
|
|
||||||
def read_labels(host, port):
|
|
||||||
path = ASTARA_BASE_PATH + 'firewall/labels'
|
|
||||||
s = _get_proxyless_session()
|
|
||||||
r = s.post(_mgt_url(host, port, path), timeout=30)
|
|
||||||
return r.json().get('labels', [])
|
|
@ -1,105 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
from astara.common import constants
|
|
||||||
|
|
||||||
SERVICE_STATIC = 'static'
|
|
||||||
|
|
||||||
|
|
||||||
def network_config(client, port, ifname, network_type, network_ports=[]):
|
|
||||||
network = client.get_network_detail(port.network_id)
|
|
||||||
subnets_dict = dict((s.id, s) for s in network.subnets)
|
|
||||||
|
|
||||||
return _make_network_config_dict(
|
|
||||||
_interface_config(ifname, port, subnets_dict, network.mtu),
|
|
||||||
network_type,
|
|
||||||
port.network_id,
|
|
||||||
mtu=network.mtu,
|
|
||||||
subnets_dict=subnets_dict,
|
|
||||||
network_ports=network_ports)
|
|
||||||
|
|
||||||
|
|
||||||
def _make_network_config_dict(interface, network_type, network_id, mtu=None,
|
|
||||||
v4_conf=SERVICE_STATIC, v6_conf=SERVICE_STATIC,
|
|
||||||
subnets_dict={}, network_ports=[]):
|
|
||||||
return {'interface': interface,
|
|
||||||
'network_id': network_id,
|
|
||||||
'mtu': mtu,
|
|
||||||
'v4_conf_service': v4_conf,
|
|
||||||
'v6_conf_service': v6_conf,
|
|
||||||
'network_type': network_type,
|
|
||||||
'subnets': [_subnet_config(s) for s in subnets_dict.values()],
|
|
||||||
'allocations': _allocation_config(network_ports, subnets_dict)}
|
|
||||||
|
|
||||||
|
|
||||||
def _interface_config(ifname, port, subnets_dict, mtu):
|
|
||||||
def fmt(fixed):
|
|
||||||
return '%s/%s' % (fixed.ip_address,
|
|
||||||
subnets_dict[fixed.subnet_id].cidr.prefixlen)
|
|
||||||
|
|
||||||
retval = {'ifname': ifname,
|
|
||||||
'addresses': [fmt(fixed) for fixed in port.fixed_ips]}
|
|
||||||
if mtu:
|
|
||||||
retval['mtu'] = mtu
|
|
||||||
|
|
||||||
return retval
|
|
||||||
|
|
||||||
|
|
||||||
def _subnet_config(subnet):
|
|
||||||
return {
|
|
||||||
'id': str(subnet.id),
|
|
||||||
'cidr': str(subnet.cidr),
|
|
||||||
'dhcp_enabled': subnet.enable_dhcp and subnet.ipv6_ra_mode != 'slaac',
|
|
||||||
'dns_nameservers': subnet.dns_nameservers,
|
|
||||||
'host_routes': subnet.host_routes,
|
|
||||||
'gateway_ip': (str(subnet.gateway_ip)
|
|
||||||
if subnet.gateway_ip is not None
|
|
||||||
else ''),
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def _allocation_config(ports, subnets_dict):
|
|
||||||
r = re.compile('[:.]')
|
|
||||||
service_ports_re = re.compile(
|
|
||||||
'^ASTARA:(' + '|'.join(constants.ASTARA_SERVICE_PORT_TYPES) + '):.*$'
|
|
||||||
)
|
|
||||||
allocations = []
|
|
||||||
|
|
||||||
for port in ports:
|
|
||||||
if service_ports_re.match(port.name):
|
|
||||||
continue
|
|
||||||
|
|
||||||
addrs = {
|
|
||||||
str(fixed.ip_address): subnets_dict[fixed.subnet_id].enable_dhcp
|
|
||||||
for fixed in port.fixed_ips
|
|
||||||
}
|
|
||||||
|
|
||||||
if not addrs:
|
|
||||||
continue
|
|
||||||
|
|
||||||
allocations.append(
|
|
||||||
{
|
|
||||||
'ip_addresses': addrs,
|
|
||||||
'device_id': port.device_id,
|
|
||||||
'hostname': '%s.local' % r.sub('-', sorted(addrs.keys())[0]),
|
|
||||||
'mac_address': port.mac_address
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
return allocations
|
|
@ -1,49 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from oslo_log import log as logging
|
|
||||||
|
|
||||||
from astara.api.config import common
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def build_config(client, loadbalancer, management_port, iface_map):
|
|
||||||
LOG.debug('Generating configuration for loadbalancer %s', loadbalancer.id)
|
|
||||||
|
|
||||||
network_config = [
|
|
||||||
common.network_config(
|
|
||||||
client,
|
|
||||||
loadbalancer.vip_port,
|
|
||||||
iface_map[loadbalancer.vip_port.network_id],
|
|
||||||
'loadbalancer'),
|
|
||||||
|
|
||||||
common.network_config(
|
|
||||||
client,
|
|
||||||
management_port,
|
|
||||||
iface_map[management_port.network_id],
|
|
||||||
'management'),
|
|
||||||
]
|
|
||||||
|
|
||||||
out = {
|
|
||||||
'hostname': 'ak-loadbalancer-%s' % loadbalancer.tenant_id,
|
|
||||||
'tenant_id': loadbalancer.tenant_id,
|
|
||||||
'networks': network_config,
|
|
||||||
'services': {
|
|
||||||
'loadbalancer': loadbalancer.to_dict()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out
|
|
@ -1,171 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
import netaddr
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log as logging
|
|
||||||
from oslo_serialization import jsonutils
|
|
||||||
|
|
||||||
from astara.common.i18n import _LI, _LW
|
|
||||||
from astara.api.config import common
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
DEFAULT_AS = 64512
|
|
||||||
|
|
||||||
OPTIONS = [
|
|
||||||
cfg.StrOpt('provider_rules_path',
|
|
||||||
default='/etc/astara/provider_rules.json'),
|
|
||||||
cfg.IntOpt('asn', default=DEFAULT_AS),
|
|
||||||
cfg.IntOpt('neighbor_asn', default=DEFAULT_AS),
|
|
||||||
]
|
|
||||||
|
|
||||||
cfg.CONF.register_opts(OPTIONS)
|
|
||||||
|
|
||||||
EXTERNAL_NET = 'external'
|
|
||||||
INTERNAL_NET = 'internal'
|
|
||||||
MANAGEMENT_NET = 'management'
|
|
||||||
SERVICE_STATIC = 'static'
|
|
||||||
SERVICE_DHCP = 'dhcp'
|
|
||||||
SERVICE_RA = 'ra'
|
|
||||||
|
|
||||||
|
|
||||||
def build_config(worker_context, router, management_port, interfaces):
|
|
||||||
provider_rules = load_provider_rules(cfg.CONF.provider_rules_path)
|
|
||||||
|
|
||||||
networks = generate_network_config(
|
|
||||||
worker_context.neutron,
|
|
||||||
router,
|
|
||||||
management_port,
|
|
||||||
interfaces
|
|
||||||
)
|
|
||||||
gateway = get_default_v4_gateway(
|
|
||||||
worker_context.neutron, router, networks)
|
|
||||||
|
|
||||||
return {
|
|
||||||
'asn': cfg.CONF.asn,
|
|
||||||
'neighbor_asn': cfg.CONF.neighbor_asn,
|
|
||||||
'default_v4_gateway': gateway,
|
|
||||||
'networks': networks,
|
|
||||||
'labels': provider_rules.get('labels', {}),
|
|
||||||
'floating_ips': generate_floating_config(router),
|
|
||||||
'tenant_id': router.tenant_id,
|
|
||||||
'hostname': 'ak-%s' % router.tenant_id,
|
|
||||||
'orchestrator': worker_context.config,
|
|
||||||
'ha_resource': router.ha,
|
|
||||||
'vpn': generate_vpn_config(router, worker_context.neutron),
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_default_v4_gateway(client, router, networks):
|
|
||||||
"""Find the IPv4 default gateway for the router.
|
|
||||||
"""
|
|
||||||
LOG.debug('networks = %r', networks)
|
|
||||||
if router.external_port:
|
|
||||||
LOG.debug('external interface = %s', router.external_port.mac_address)
|
|
||||||
|
|
||||||
# Now find the subnet that our external IP is on, and return its
|
|
||||||
# gateway.
|
|
||||||
for n in networks:
|
|
||||||
if n['network_type'] == EXTERNAL_NET:
|
|
||||||
v4_addresses = [
|
|
||||||
addr
|
|
||||||
for addr in (netaddr.IPAddress(ip.partition('/')[0])
|
|
||||||
for ip in n['interface']['addresses'])
|
|
||||||
if addr.version == 4
|
|
||||||
]
|
|
||||||
for s in n['subnets']:
|
|
||||||
subnet = netaddr.IPNetwork(s['cidr'])
|
|
||||||
if subnet.version != 4:
|
|
||||||
continue
|
|
||||||
LOG.debug(
|
|
||||||
'%s: checking if subnet %s should have the default route',
|
|
||||||
router.id, s['cidr'])
|
|
||||||
for addr in v4_addresses:
|
|
||||||
if addr in subnet:
|
|
||||||
LOG.debug(
|
|
||||||
'%s: found gateway %s for subnet %s on network %s',
|
|
||||||
router.id,
|
|
||||||
s['gateway_ip'],
|
|
||||||
s['cidr'],
|
|
||||||
n['network_id'],
|
|
||||||
)
|
|
||||||
return s['gateway_ip']
|
|
||||||
|
|
||||||
# Sometimes we are asked to build a configuration for the server
|
|
||||||
# when the external interface is still marked as "down". We can
|
|
||||||
# report that case, but we don't treat it as an error here because
|
|
||||||
# we'll be asked to do it again when the interface comes up.
|
|
||||||
LOG.info(_LI('%s: no default gateway was found'), router.id)
|
|
||||||
return ''
|
|
||||||
|
|
||||||
|
|
||||||
def load_provider_rules(path):
|
|
||||||
try:
|
|
||||||
return jsonutils.load(open(path))
|
|
||||||
except: # pragma nocover
|
|
||||||
LOG.warning(_LW('unable to open provider rules: %s'), path)
|
|
||||||
return {}
|
|
||||||
|
|
||||||
|
|
||||||
def generate_network_config(client, router, management_port, iface_map):
|
|
||||||
retval = [
|
|
||||||
common.network_config(
|
|
||||||
client,
|
|
||||||
management_port,
|
|
||||||
iface_map[management_port.network_id],
|
|
||||||
MANAGEMENT_NET
|
|
||||||
)
|
|
||||||
]
|
|
||||||
|
|
||||||
if router.external_port:
|
|
||||||
retval.extend([
|
|
||||||
common.network_config(
|
|
||||||
client,
|
|
||||||
router.external_port,
|
|
||||||
iface_map[router.external_port.network_id],
|
|
||||||
EXTERNAL_NET)])
|
|
||||||
|
|
||||||
retval.extend(
|
|
||||||
common.network_config(
|
|
||||||
client,
|
|
||||||
p,
|
|
||||||
iface_map[p.network_id],
|
|
||||||
INTERNAL_NET,
|
|
||||||
client.get_network_ports(p.network_id))
|
|
||||||
for p in router.internal_ports)
|
|
||||||
|
|
||||||
return retval
|
|
||||||
|
|
||||||
|
|
||||||
def generate_floating_config(router):
|
|
||||||
return [
|
|
||||||
{'floating_ip': str(fip.floating_ip), 'fixed_ip': str(fip.fixed_ip)}
|
|
||||||
for fip in router.floating_ips
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def generate_vpn_config(router, client):
|
|
||||||
if not cfg.CONF.router.ipsec_vpn:
|
|
||||||
return {}
|
|
||||||
|
|
||||||
return {
|
|
||||||
'ipsec': [
|
|
||||||
v.to_dict() for v in client.get_vpnservices_for_router(router.id)
|
|
||||||
]
|
|
||||||
}
|
|
@ -1,38 +0,0 @@
|
|||||||
# Copyright (c) 2015 Akanda, Inc. All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from keystoneclient import auth as ksauth
|
|
||||||
from keystoneclient import session as kssession
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
|
|
||||||
|
|
||||||
class KeystoneSession(object):
|
|
||||||
def __init__(self):
|
|
||||||
self._session = None
|
|
||||||
self.region_name = CONF.auth_region
|
|
||||||
ksauth.register_conf_options(CONF, 'keystone_authtoken')
|
|
||||||
|
|
||||||
@property
|
|
||||||
def session(self):
|
|
||||||
if not self._session:
|
|
||||||
# Construct a Keystone session for configured auth_plugin
|
|
||||||
# and credentials
|
|
||||||
auth_plugin = ksauth.load_from_conf_options(
|
|
||||||
cfg.CONF, 'keystone_authtoken')
|
|
||||||
self._session = kssession.Session(auth=auth_plugin)
|
|
||||||
return self._session
|
|
File diff suppressed because it is too large
Load Diff
@ -1,453 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from datetime import datetime
|
|
||||||
import time
|
|
||||||
|
|
||||||
import netaddr
|
|
||||||
from novaclient import client
|
|
||||||
from novaclient import exceptions as novaclient_exceptions
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log as logging
|
|
||||||
|
|
||||||
from astara.common.i18n import _LW, _LE, _LI
|
|
||||||
from astara.api import keystone
|
|
||||||
from astara.api import neutron
|
|
||||||
from astara.common import config
|
|
||||||
from astara.pez import rpcapi as pez_api
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
OPTIONS = [
|
|
||||||
cfg.StrOpt(
|
|
||||||
'ssh_public_key',
|
|
||||||
help="Path to the SSH public key for the 'astara' user within "
|
|
||||||
"appliance instances",
|
|
||||||
default='/etc/astara/astara.pub'),
|
|
||||||
cfg.StrOpt(
|
|
||||||
'instance_provider', default='on_demand',
|
|
||||||
help='Which instance provider to use (on_demand, pez)'),
|
|
||||||
cfg.StrOpt(
|
|
||||||
'astara_boot_command', default='astara-configure-management',
|
|
||||||
help='The boot command to run to configure the appliance'),
|
|
||||||
]
|
|
||||||
cfg.CONF.register_opts(OPTIONS)
|
|
||||||
|
|
||||||
|
|
||||||
class NovaInstanceDeleteTimeout(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class InstanceInfo(object):
|
|
||||||
def __init__(self, instance_id, name, management_port=None, ports=(),
|
|
||||||
image_uuid=None, status=None, last_boot=None):
|
|
||||||
self.id_ = instance_id
|
|
||||||
self.name = name
|
|
||||||
self.image_uuid = image_uuid
|
|
||||||
|
|
||||||
self.nova_status = status
|
|
||||||
|
|
||||||
self.management_port = management_port
|
|
||||||
self._ports = ports
|
|
||||||
self.last_boot = last_boot
|
|
||||||
|
|
||||||
@property
|
|
||||||
def booting(self):
|
|
||||||
return 'BUILD' in self.nova_status
|
|
||||||
|
|
||||||
@property
|
|
||||||
def management_address(self):
|
|
||||||
if self.management_port:
|
|
||||||
return str(self.management_port.fixed_ips[0].ip_address)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def time_since_boot(self):
|
|
||||||
if self.last_boot:
|
|
||||||
return datetime.utcnow() - self.last_boot
|
|
||||||
|
|
||||||
@property
|
|
||||||
def ports(self):
|
|
||||||
return self._ports
|
|
||||||
|
|
||||||
@ports.setter
|
|
||||||
def ports(self, port_list):
|
|
||||||
self._ports = [p for p in port_list if p != self.management_port]
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def from_nova(cls, instance):
|
|
||||||
"""
|
|
||||||
Returns an instantiated InstanceInfo object with data gathered from
|
|
||||||
an existing Nova server.
|
|
||||||
|
|
||||||
:param instance: novaclient.v2.servers.Server object for an existing
|
|
||||||
nova instance.
|
|
||||||
:returns: InstanceInfo instance
|
|
||||||
"""
|
|
||||||
# NOTE(adam_g): We do not yet actually rebuild any instances.
|
|
||||||
# A rug REBUILD is actually a delete/create, so it
|
|
||||||
# should be safe to track last_boot as the timestamp
|
|
||||||
# the instance was last booted.
|
|
||||||
last_boot = datetime.strptime(
|
|
||||||
instance.created, "%Y-%m-%dT%H:%M:%SZ")
|
|
||||||
return cls(
|
|
||||||
instance_id=instance.id,
|
|
||||||
name=instance.name,
|
|
||||||
image_uuid=instance.image['id'],
|
|
||||||
status=instance.status,
|
|
||||||
last_boot=last_boot,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class InstanceProvider(object):
|
|
||||||
def __init__(self, client):
|
|
||||||
self.nova_client = client
|
|
||||||
LOG.debug(_LI(
|
|
||||||
'Initialized %s with novaclient %s'),
|
|
||||||
self.__class__.__name__, self.nova_client)
|
|
||||||
|
|
||||||
def create_instance(self, driver, name, image_uuid, flavor,
|
|
||||||
make_ports_callback):
|
|
||||||
"""Create or get an instance
|
|
||||||
|
|
||||||
:param router_id: UUID of the resource that the instance will host
|
|
||||||
|
|
||||||
:returns: InstanceInfo object with at least id, name and image_uuid
|
|
||||||
set.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
class PezInstanceProvider(InstanceProvider):
|
|
||||||
def __init__(self, client):
|
|
||||||
super(PezInstanceProvider, self).__init__(client)
|
|
||||||
self.rpc_client = pez_api.AstaraPezAPI(rpc_topic='astara-pez')
|
|
||||||
LOG.debug(_LI(
|
|
||||||
'Initialized %s with rpc client %s'),
|
|
||||||
self.__class__.__name__, self.rpc_client)
|
|
||||||
|
|
||||||
def create_instance(self, resource_type, name, image_uuid, flavor,
|
|
||||||
make_ports_callback):
|
|
||||||
# TODO(adam_g): pez already creates the mgt port on boot and the one
|
|
||||||
# we create here is wasted. callback needs to be adjusted
|
|
||||||
mgt_port, instance_ports = make_ports_callback()
|
|
||||||
|
|
||||||
mgt_port_dict = {
|
|
||||||
'id': mgt_port.id,
|
|
||||||
'network_id': mgt_port.network_id,
|
|
||||||
}
|
|
||||||
instance_ports_dicts = [{
|
|
||||||
'id': p.id, 'network_id': p.network_id,
|
|
||||||
} for p in instance_ports]
|
|
||||||
|
|
||||||
LOG.debug('Requesting new %s instance from Pez.', resource_type)
|
|
||||||
pez_instance = self.rpc_client.get_instance(
|
|
||||||
resource_type, name, mgt_port_dict, instance_ports_dicts)
|
|
||||||
LOG.debug('Got %s instance %s from Pez.',
|
|
||||||
resource_type, pez_instance['id'])
|
|
||||||
|
|
||||||
server = self.nova_client.servers.get(pez_instance['id'])
|
|
||||||
|
|
||||||
# deserialize port data
|
|
||||||
mgt_port = neutron.Port.from_dict(pez_instance['management_port'])
|
|
||||||
instance_ports = [
|
|
||||||
neutron.Port.from_dict(p)
|
|
||||||
for p in pez_instance['instance_ports']]
|
|
||||||
|
|
||||||
boot_time = datetime.strptime(
|
|
||||||
server.created, "%Y-%m-%dT%H:%M:%SZ")
|
|
||||||
instance_info = InstanceInfo(
|
|
||||||
instance_id=server.id,
|
|
||||||
name=server.name,
|
|
||||||
management_port=mgt_port,
|
|
||||||
ports=instance_ports,
|
|
||||||
image_uuid=image_uuid,
|
|
||||||
status=server.status,
|
|
||||||
last_boot=boot_time)
|
|
||||||
|
|
||||||
return instance_info
|
|
||||||
|
|
||||||
|
|
||||||
class OnDemandInstanceProvider(InstanceProvider):
|
|
||||||
def create_instance(self, resource_type, name, image_uuid, flavor,
|
|
||||||
make_ports_callback):
|
|
||||||
mgt_port, instance_ports = make_ports_callback()
|
|
||||||
|
|
||||||
nics = [{'net-id': p.network_id,
|
|
||||||
'v4-fixed-ip': '',
|
|
||||||
'port-id': p.id}
|
|
||||||
for p in ([mgt_port] + instance_ports)]
|
|
||||||
|
|
||||||
LOG.debug('creating instance %s with image %s',
|
|
||||||
name, image_uuid)
|
|
||||||
|
|
||||||
server = self.nova_client.servers.create(
|
|
||||||
name,
|
|
||||||
image=image_uuid,
|
|
||||||
flavor=flavor,
|
|
||||||
nics=nics,
|
|
||||||
config_drive=True,
|
|
||||||
userdata=format_userdata(mgt_port)
|
|
||||||
)
|
|
||||||
|
|
||||||
server_status = None
|
|
||||||
for i in range(1, 10):
|
|
||||||
try:
|
|
||||||
# novaclient loads attributes lazily and we need to wait until
|
|
||||||
# the client object is populated. moving to keystone sessions
|
|
||||||
# exposes this race.
|
|
||||||
server_status = server.status
|
|
||||||
except AttributeError:
|
|
||||||
time.sleep(.5)
|
|
||||||
assert server_status
|
|
||||||
|
|
||||||
boot_time = datetime.strptime(
|
|
||||||
server.created, "%Y-%m-%dT%H:%M:%SZ")
|
|
||||||
instance_info = InstanceInfo(
|
|
||||||
instance_id=server.id,
|
|
||||||
name=name,
|
|
||||||
management_port=mgt_port,
|
|
||||||
ports=instance_ports,
|
|
||||||
image_uuid=image_uuid,
|
|
||||||
status=server.status,
|
|
||||||
last_boot=boot_time)
|
|
||||||
|
|
||||||
return instance_info
|
|
||||||
|
|
||||||
INSTANCE_PROVIDERS = {
|
|
||||||
'on_demand': OnDemandInstanceProvider,
|
|
||||||
'pez': PezInstanceProvider,
|
|
||||||
'default': OnDemandInstanceProvider,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_instance_provider(provider):
|
|
||||||
try:
|
|
||||||
return INSTANCE_PROVIDERS[provider]
|
|
||||||
except KeyError:
|
|
||||||
default = INSTANCE_PROVIDERS['default']
|
|
||||||
LOG.error(_LE('Could not find %s instance provider, using default %s'),
|
|
||||||
provider, default)
|
|
||||||
return default
|
|
||||||
|
|
||||||
|
|
||||||
class Nova(object):
|
|
||||||
def __init__(self, conf):
|
|
||||||
self.conf = conf
|
|
||||||
ks_session = keystone.KeystoneSession()
|
|
||||||
self.client = client.Client(
|
|
||||||
version='2',
|
|
||||||
session=ks_session.session,
|
|
||||||
region_name=conf.auth_region,
|
|
||||||
endpoint_type=conf.endpoint_type)
|
|
||||||
|
|
||||||
try:
|
|
||||||
self.instance_provider = get_instance_provider(
|
|
||||||
conf.instance_provider)(self.client)
|
|
||||||
except AttributeError:
|
|
||||||
default = INSTANCE_PROVIDERS['default']
|
|
||||||
LOG.error(_LE('Could not find provider config, using default %s'),
|
|
||||||
default)
|
|
||||||
self.instance_provider = default(self.client)
|
|
||||||
|
|
||||||
def get_instances_for_obj(self, name):
|
|
||||||
"""Retrieves all nova servers for a given instance name.
|
|
||||||
|
|
||||||
:param name: name of the instance being queried
|
|
||||||
|
|
||||||
:returns: a list of novaclient.v2.servers.Server objects or []
|
|
||||||
"""
|
|
||||||
search_opt = '^' + name + '.*$'
|
|
||||||
instances = self.client.servers.list(
|
|
||||||
search_opts=dict(name=search_opt)
|
|
||||||
)
|
|
||||||
if not instances:
|
|
||||||
return []
|
|
||||||
return [InstanceInfo.from_nova(i) for i in instances]
|
|
||||||
|
|
||||||
def get_instance_for_obj(self, name):
|
|
||||||
"""Retrieves a nova server for a given instance name.
|
|
||||||
|
|
||||||
:param name: name of the instance being queried
|
|
||||||
|
|
||||||
:returns: a novaclient.v2.servers.Server object or None
|
|
||||||
"""
|
|
||||||
instances = self.client.servers.list(
|
|
||||||
search_opts=dict(name=name)
|
|
||||||
)
|
|
||||||
|
|
||||||
if instances:
|
|
||||||
return instances[0]
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_instance_by_id(self, instance_id):
|
|
||||||
"""Retrieves a nova server for a given instance_id.
|
|
||||||
|
|
||||||
:param instance_id: Nova instance ID of instance being queried
|
|
||||||
|
|
||||||
:returns: a novaclient.v2.servers.Server object
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
return self.client.servers.get(instance_id)
|
|
||||||
except novaclient_exceptions.NotFound:
|
|
||||||
return None
|
|
||||||
|
|
||||||
def destroy_instance(self, instance_info):
|
|
||||||
if instance_info:
|
|
||||||
LOG.debug('deleting instance %s', instance_info.name)
|
|
||||||
self.client.servers.delete(instance_info.id_)
|
|
||||||
|
|
||||||
def boot_instance(self,
|
|
||||||
resource_type,
|
|
||||||
prev_instance_info,
|
|
||||||
name,
|
|
||||||
image_uuid,
|
|
||||||
flavor,
|
|
||||||
make_ports_callback):
|
|
||||||
|
|
||||||
if not prev_instance_info:
|
|
||||||
instance = self.get_instance_for_obj(name)
|
|
||||||
else:
|
|
||||||
instance = self.get_instance_by_id(prev_instance_info.id_)
|
|
||||||
|
|
||||||
# check to make sure this instance isn't pre-existing
|
|
||||||
if instance:
|
|
||||||
if 'BUILD' in instance.status:
|
|
||||||
if prev_instance_info:
|
|
||||||
# if we had previous instance, return the same instance
|
|
||||||
# with updated status
|
|
||||||
prev_instance_info.nova_status = instance.status
|
|
||||||
instance_info = prev_instance_info
|
|
||||||
else:
|
|
||||||
instance_info = InstanceInfo.from_nova(instance)
|
|
||||||
return instance_info
|
|
||||||
|
|
||||||
self.client.servers.delete(instance.id)
|
|
||||||
return None
|
|
||||||
|
|
||||||
# it is now safe to attempt boot
|
|
||||||
instance_info = self.instance_provider.create_instance(
|
|
||||||
resource_type=resource_type,
|
|
||||||
name=name,
|
|
||||||
image_uuid=image_uuid,
|
|
||||||
flavor=flavor,
|
|
||||||
make_ports_callback=make_ports_callback
|
|
||||||
)
|
|
||||||
return instance_info
|
|
||||||
|
|
||||||
def update_instance_info(self, instance_info):
|
|
||||||
"""Used primarily for updating tracked instance status"""
|
|
||||||
instance = self.get_instance_by_id(instance_info.id_)
|
|
||||||
if not instance:
|
|
||||||
return None
|
|
||||||
instance_info.nova_status = instance.status
|
|
||||||
return instance_info
|
|
||||||
|
|
||||||
def delete_instances_and_wait(self, instance_infos):
|
|
||||||
"""Deletes the nova instance and waits for its deletion to complete"""
|
|
||||||
to_poll = list(instance_infos)
|
|
||||||
|
|
||||||
for inst in instance_infos:
|
|
||||||
try:
|
|
||||||
self.destroy_instance(inst)
|
|
||||||
except novaclient_exceptions.NotFound:
|
|
||||||
pass
|
|
||||||
except Exception:
|
|
||||||
LOG.exception(
|
|
||||||
_LE('Error deleting instance %s' % inst.id_))
|
|
||||||
to_poll.remove(inst)
|
|
||||||
|
|
||||||
# XXX parallelize this
|
|
||||||
timed_out = []
|
|
||||||
for inst in to_poll:
|
|
||||||
start = time.time()
|
|
||||||
i = 0
|
|
||||||
while time.time() - start < cfg.CONF.boot_timeout:
|
|
||||||
i += 1
|
|
||||||
if not self.get_instance_by_id(inst.id_):
|
|
||||||
LOG.debug('Instance %s has been deleted', inst.id_)
|
|
||||||
break
|
|
||||||
LOG.debug(
|
|
||||||
'Instance %s has not finished stopping', inst.id_)
|
|
||||||
time.sleep(cfg.CONF.retry_delay)
|
|
||||||
else:
|
|
||||||
timed_out.append(inst)
|
|
||||||
LOG.error(_LE(
|
|
||||||
'Instance %s failed to stop within %d secs'),
|
|
||||||
inst.id_, cfg.CONF.boot_timeout)
|
|
||||||
|
|
||||||
if timed_out:
|
|
||||||
raise NovaInstanceDeleteTimeout()
|
|
||||||
|
|
||||||
|
|
||||||
# TODO(mark): Convert this to dynamic yaml, proper network prefix and ssh-keys
|
|
||||||
|
|
||||||
TEMPLATE = """#cloud-config
|
|
||||||
|
|
||||||
cloud_config_modules:
|
|
||||||
- emit_upstart
|
|
||||||
- set_hostname
|
|
||||||
- locale
|
|
||||||
- set-passwords
|
|
||||||
- timezone
|
|
||||||
- disable-ec2-metadata
|
|
||||||
- runcmd
|
|
||||||
|
|
||||||
output: {all: '| tee -a /var/log/cloud-init-output.log'}
|
|
||||||
|
|
||||||
debug:
|
|
||||||
- verbose: true
|
|
||||||
|
|
||||||
bootcmd:
|
|
||||||
- /usr/local/bin/%(boot_command)s %(mac_address)s %(ip_address)s/%(prefix)d
|
|
||||||
|
|
||||||
users:
|
|
||||||
- name: astara
|
|
||||||
gecos: Astara
|
|
||||||
groups: users
|
|
||||||
shell: /bin/bash
|
|
||||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
|
||||||
lock-passwd: true
|
|
||||||
ssh-authorized-keys:
|
|
||||||
- %(ssh_public_key)s
|
|
||||||
|
|
||||||
final_message: "Astara appliance is running"
|
|
||||||
""" # noqa
|
|
||||||
|
|
||||||
|
|
||||||
def _ssh_key():
|
|
||||||
key = config.get_best_config_path(cfg.CONF.ssh_public_key)
|
|
||||||
if not key:
|
|
||||||
return ''
|
|
||||||
try:
|
|
||||||
with open(key) as out:
|
|
||||||
return out.read().strip()
|
|
||||||
except IOError:
|
|
||||||
LOG.warning(_LW('Could not load router ssh public key from %s'), key)
|
|
||||||
return ''
|
|
||||||
|
|
||||||
|
|
||||||
def format_userdata(mgt_port):
|
|
||||||
mgt_net = netaddr.IPNetwork(cfg.CONF.management_prefix)
|
|
||||||
ctxt = {
|
|
||||||
'ssh_public_key': _ssh_key(),
|
|
||||||
'mac_address': mgt_port.mac_address,
|
|
||||||
'ip_address': mgt_port.fixed_ips[0].ip_address,
|
|
||||||
'boot_command': cfg.CONF.astara_boot_command,
|
|
||||||
'prefix': mgt_net.prefixlen
|
|
||||||
}
|
|
||||||
out = TEMPLATE % ctxt
|
|
||||||
LOG.debug('Rendered cloud-init for instance: %s' % out)
|
|
||||||
return out
|
|
@ -1,125 +0,0 @@
|
|||||||
# Copyright 2015 Akanda, Inc
|
|
||||||
#
|
|
||||||
# Author: Akanda, Inc
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import socket
|
|
||||||
|
|
||||||
import eventlet
|
|
||||||
import eventlet.wsgi
|
|
||||||
import webob
|
|
||||||
import webob.dec
|
|
||||||
import webob.exc
|
|
||||||
import six
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log as logging
|
|
||||||
|
|
||||||
from astara.cli import app
|
|
||||||
from astara.common.i18n import _, _LE, _LI, _LW
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
RUG_API_OPTS = [
|
|
||||||
cfg.IntOpt('api_port', default=44250,
|
|
||||||
help='Astara administrative API listening port',
|
|
||||||
deprecated_opts=[
|
|
||||||
cfg.DeprecatedOpt('rug_api_port',
|
|
||||||
group='DEFAULT')]),
|
|
||||||
cfg.StrOpt('api_listen', default='0.0.0.0',
|
|
||||||
help='Astara administrative API listening address')
|
|
||||||
]
|
|
||||||
cfg.CONF.register_opts(RUG_API_OPTS)
|
|
||||||
|
|
||||||
|
|
||||||
class RugAPI(object):
|
|
||||||
|
|
||||||
def __init__(self, ctl=app.RugController):
|
|
||||||
self.ctl = ctl()
|
|
||||||
|
|
||||||
@webob.dec.wsgify(RequestClass=webob.Request)
|
|
||||||
def __call__(self, req):
|
|
||||||
try:
|
|
||||||
if req.method != 'PUT':
|
|
||||||
return webob.exc.HTTPMethodNotAllowed()
|
|
||||||
|
|
||||||
args = filter(None, req.path.split('/'))
|
|
||||||
if not args:
|
|
||||||
return webob.exc.HTTPNotFound()
|
|
||||||
|
|
||||||
command, _, _ = self.ctl.command_manager.find_command(args)
|
|
||||||
if command.interactive:
|
|
||||||
return webob.exc.HTTPNotImplemented()
|
|
||||||
|
|
||||||
return str(self.ctl.run(['--debug'] + args))
|
|
||||||
except SystemExit:
|
|
||||||
# cliff invokes -h (help) on argparse failure
|
|
||||||
# (which in turn results in sys.exit call)
|
|
||||||
return webob.exc.HTTPBadRequest()
|
|
||||||
except ValueError:
|
|
||||||
return webob.exc.HTTPNotFound()
|
|
||||||
except Exception:
|
|
||||||
LOG.exception(_LE("Unexpected error."))
|
|
||||||
msg = _('An unknown error has occurred. '
|
|
||||||
'Please try your request again.')
|
|
||||||
return webob.exc.HTTPInternalServerError(
|
|
||||||
explanation=six.text_type(msg))
|
|
||||||
|
|
||||||
|
|
||||||
class RugAPIServer(object):
|
|
||||||
def __init__(self):
|
|
||||||
self.pool = eventlet.GreenPool(1000)
|
|
||||||
|
|
||||||
def run(self, ip_address, port):
|
|
||||||
app = RugAPI()
|
|
||||||
|
|
||||||
try:
|
|
||||||
socket.inet_pton(socket.AF_INET6, ip_address)
|
|
||||||
family = socket.AF_INET6
|
|
||||||
except Exception:
|
|
||||||
family = socket.AF_INET
|
|
||||||
|
|
||||||
for i in six.moves.range(5):
|
|
||||||
LOG.info(_LI(
|
|
||||||
'Starting the rug-api on %s:%s'),
|
|
||||||
ip_address, port,
|
|
||||||
)
|
|
||||||
try:
|
|
||||||
sock = eventlet.listen(
|
|
||||||
(ip_address, port),
|
|
||||||
family=family,
|
|
||||||
backlog=128
|
|
||||||
)
|
|
||||||
except socket.error as err:
|
|
||||||
if err.errno != 99: # EADDRNOTAVAIL
|
|
||||||
raise
|
|
||||||
LOG.warning(_LW('Could not create rug-api socket: %s'), err)
|
|
||||||
LOG.warning(_LW('Sleeping %s before trying again'), i + 1)
|
|
||||||
eventlet.sleep(i + 1)
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
raise RuntimeError(_(
|
|
||||||
'Could not establish rug-api socket on %s:%s') %
|
|
||||||
(ip_address, port)
|
|
||||||
)
|
|
||||||
eventlet.wsgi.server(
|
|
||||||
sock,
|
|
||||||
app,
|
|
||||||
custom_pool=self.pool,
|
|
||||||
log=LOG)
|
|
||||||
|
|
||||||
|
|
||||||
def serve():
|
|
||||||
RugAPIServer().run(cfg.CONF.api_listen, cfg.CONF.api_port)
|
|
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
@ -1,49 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from cliff import app
|
|
||||||
from cliff import commandmanager
|
|
||||||
from oslo_config import cfg
|
|
||||||
import pkg_resources
|
|
||||||
|
|
||||||
from astara.common import config
|
|
||||||
|
|
||||||
|
|
||||||
class RugController(app.App):
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
dist = pkg_resources.get_distribution('astara')
|
|
||||||
super(RugController, self).__init__(
|
|
||||||
description='controller for the Astara Orchestrator service',
|
|
||||||
version=dist.version,
|
|
||||||
command_manager=commandmanager.CommandManager('astara.cli'),
|
|
||||||
)
|
|
||||||
|
|
||||||
def initialize_app(self, argv):
|
|
||||||
# Quiet logging for some request library
|
|
||||||
logging.getLogger('requests').setLevel(logging.WARN)
|
|
||||||
|
|
||||||
# Don't pass argv here because cfg.CONF will intercept the
|
|
||||||
# help options and exit.
|
|
||||||
cfg.CONF(['--config-file', config.get_best_config_path()],
|
|
||||||
project='astara-orchestrator')
|
|
||||||
self.rug_ini = cfg.CONF
|
|
||||||
return super(RugController, self).initialize_app(argv)
|
|
@ -1,377 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
"""Interactive CLI for rebuilding routers
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import print_function
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import Queue
|
|
||||||
import sqlite3
|
|
||||||
import tempfile
|
|
||||||
import threading
|
|
||||||
import six
|
|
||||||
from contextlib import closing
|
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
try:
|
|
||||||
from blessed import Terminal
|
|
||||||
except ImportError:
|
|
||||||
# blessed is not part of openstack global-requirements.
|
|
||||||
raise Exception("The 'blessed' python module is required to browse"
|
|
||||||
" Astara routers. Please install and try again.")
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
|
|
||||||
from astara import commands
|
|
||||||
from astara.api import nova as nova_api
|
|
||||||
from astara.api import neutron as neutron_api
|
|
||||||
from astara.cli import message
|
|
||||||
|
|
||||||
logging.getLogger("urllib3").setLevel(logging.ERROR)
|
|
||||||
|
|
||||||
cfg.CONF.import_opt('host', 'astara.main')
|
|
||||||
|
|
||||||
|
|
||||||
class FakeConfig(object):
|
|
||||||
|
|
||||||
def __init__(self, admin_user, admin_password, tenant_name, auth_url,
|
|
||||||
auth_strategy, auth_region, instance_provider):
|
|
||||||
self.admin_user = admin_user
|
|
||||||
self.admin_password = admin_password
|
|
||||||
self.tenant_name = tenant_name
|
|
||||||
self.admin_tenant_name = tenant_name
|
|
||||||
self.auth_url = auth_url
|
|
||||||
self.auth_strategy = auth_strategy
|
|
||||||
self.auth_region = auth_region
|
|
||||||
self.instance_provider = instance_provider
|
|
||||||
|
|
||||||
|
|
||||||
class RouterRow(object):
|
|
||||||
|
|
||||||
id = None
|
|
||||||
name = None
|
|
||||||
status = None
|
|
||||||
latest = None
|
|
||||||
image_name = None
|
|
||||||
booted_at = None
|
|
||||||
last_fetch = None
|
|
||||||
nova_status = None
|
|
||||||
|
|
||||||
def __init__(self, **kw):
|
|
||||||
for k, v in kw.items():
|
|
||||||
setattr(self, k, v)
|
|
||||||
|
|
||||||
self.image_name = self.image_name or ''
|
|
||||||
self.booted_at = self.booted_at or ''
|
|
||||||
self.nova_status = self.nova_status or ''
|
|
||||||
self.tenant_id = '*'
|
|
||||||
if self.name and self.name.startswith('ak-'):
|
|
||||||
self.tenant_id = self.name.replace('ak-', '')
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def from_cursor(cls, cursor, row):
|
|
||||||
d = {}
|
|
||||||
for idx, col in enumerate(cursor.description):
|
|
||||||
d[col[0]] = row[idx]
|
|
||||||
return cls(**d)
|
|
||||||
|
|
||||||
|
|
||||||
class RouterFetcher(object):
|
|
||||||
|
|
||||||
def __init__(self, conf, db, workers):
|
|
||||||
self.db = db
|
|
||||||
self.conn = sqlite3.connect(self.db)
|
|
||||||
self.conn.row_factory = RouterRow.from_cursor
|
|
||||||
self.nova = nova_api.Nova(conf)
|
|
||||||
self.neutron = neutron_api.Neutron(conf)
|
|
||||||
self.nova_queue = Queue.Queue()
|
|
||||||
self.save_queue = Queue.Queue()
|
|
||||||
|
|
||||||
# Create X threads to perform Nova calls and put results into a queue
|
|
||||||
threads = [
|
|
||||||
threading.Thread(
|
|
||||||
name='fetcher-t%02d' % i,
|
|
||||||
target=self.fetch_router_metadata,
|
|
||||||
)
|
|
||||||
for i in six.moves.range(workers)
|
|
||||||
]
|
|
||||||
for t in threads:
|
|
||||||
t.setDaemon(True)
|
|
||||||
t.start()
|
|
||||||
|
|
||||||
def fetch(self):
|
|
||||||
routers = self.neutron.get_routers(detailed=False)
|
|
||||||
routers.sort(key=lambda x: x.id)
|
|
||||||
for router in routers:
|
|
||||||
sql = ''.join([
|
|
||||||
"INSERT OR IGNORE INTO routers ",
|
|
||||||
"('id', 'name', 'latest') VALUES (",
|
|
||||||
', '.join("?" * 3),
|
|
||||||
");"
|
|
||||||
])
|
|
||||||
|
|
||||||
with closing(self.conn.cursor()) as cursor:
|
|
||||||
cursor.execute(
|
|
||||||
'SELECT * FROM routers WHERE id=?;',
|
|
||||||
(router.id,)
|
|
||||||
)
|
|
||||||
current_router = cursor.fetchone()
|
|
||||||
|
|
||||||
if router.status not in ('BUILD', 'ACTIVE') and \
|
|
||||||
current_router and current_router.status == 'BOOT':
|
|
||||||
continue
|
|
||||||
|
|
||||||
cursor.execute(sql, (router.id, router.name, None))
|
|
||||||
cursor.execute(
|
|
||||||
'UPDATE routers SET status=? WHERE id=?',
|
|
||||||
(router.status, router.id)
|
|
||||||
)
|
|
||||||
self.conn.commit()
|
|
||||||
self.nova_queue.put(router.id)
|
|
||||||
|
|
||||||
# SQLite databases have global database-wide lock for writes, so
|
|
||||||
# we can't split the writes across threads. That's okay, though, the
|
|
||||||
# slowness isn't the DB writes, it's the Nova API calls
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
router, latest, name, booted_at, nova_status = \
|
|
||||||
self.save_queue.get(False)
|
|
||||||
with closing(self.conn.cursor()) as cursor:
|
|
||||||
cursor.execute(
|
|
||||||
'UPDATE routers SET latest=?, image_name=?, '
|
|
||||||
'last_fetch=?, booted_at=? WHERE id=?',
|
|
||||||
(latest, name, datetime.utcnow(), booted_at, router)
|
|
||||||
)
|
|
||||||
if nova_status == 'BUILD':
|
|
||||||
cursor.execute(
|
|
||||||
'UPDATE routers SET status=? WHERE id=?',
|
|
||||||
('BOOT', router)
|
|
||||||
)
|
|
||||||
self.conn.commit()
|
|
||||||
self.save_queue.task_done()
|
|
||||||
except Queue.Empty:
|
|
||||||
# the queue *might* be empty, and that's okay
|
|
||||||
break
|
|
||||||
|
|
||||||
def fetch_router_metadata(self):
|
|
||||||
conn = sqlite3.connect(self.db)
|
|
||||||
conn.row_factory = RouterRow.from_cursor
|
|
||||||
while True:
|
|
||||||
router = RouterRow(id=self.nova_queue.get())
|
|
||||||
image = None
|
|
||||||
try:
|
|
||||||
instance = self.nova.get_instance(router)
|
|
||||||
image = self.nova.client.images.get(instance.image['id'])
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
if image:
|
|
||||||
self.save_queue.put((
|
|
||||||
router.id,
|
|
||||||
image.id == cfg.CONF.router_image_uuid,
|
|
||||||
image.name,
|
|
||||||
instance.created,
|
|
||||||
instance.status
|
|
||||||
))
|
|
||||||
else:
|
|
||||||
self.save_queue.put((
|
|
||||||
router.id,
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
None
|
|
||||||
))
|
|
||||||
self.nova_queue.task_done()
|
|
||||||
|
|
||||||
|
|
||||||
def populate_routers(db, conf, workers):
|
|
||||||
conf = FakeConfig(*conf)
|
|
||||||
client = RouterFetcher(conf, db, workers)
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
client.fetch()
|
|
||||||
except (KeyboardInterrupt, SystemExit):
|
|
||||||
print("Killing background worker...")
|
|
||||||
break
|
|
||||||
|
|
||||||
|
|
||||||
class BrowseRouters(message.MessageSending):
|
|
||||||
"""browse the state of every Astara appliance"""
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
interactive = True
|
|
||||||
|
|
||||||
SCHEMA = '''CREATE TABLE routers (
|
|
||||||
id TEXT PRIMARY KEY,
|
|
||||||
name TEXT,
|
|
||||||
status TEXT,
|
|
||||||
latest INTEGER,
|
|
||||||
image_name TEXT,
|
|
||||||
last_fetch TIMESTAMP,
|
|
||||||
booted_at TIMESTAMP
|
|
||||||
);'''
|
|
||||||
|
|
||||||
def __init__(self, *a, **kw):
|
|
||||||
self.term = Terminal()
|
|
||||||
self.position = 0
|
|
||||||
self.routers = []
|
|
||||||
super(BrowseRouters, self).__init__(*a, **kw)
|
|
||||||
|
|
||||||
def init_database(self):
|
|
||||||
self.fh = tempfile.NamedTemporaryFile(delete=False)
|
|
||||||
self.conn = sqlite3.connect(self.fh.name)
|
|
||||||
self.conn.row_factory = RouterRow.from_cursor
|
|
||||||
with closing(self.conn.cursor()) as cursor:
|
|
||||||
cursor.execute(self.SCHEMA)
|
|
||||||
|
|
||||||
def get_parser(self, prog_name):
|
|
||||||
parser = super(BrowseRouters, self).get_parser(prog_name)
|
|
||||||
parser.add_argument('--dump', dest='interactive', action='store_false')
|
|
||||||
parser.add_argument('--threads', type=int, default=16)
|
|
||||||
parser.set_defaults(interactive=True)
|
|
||||||
return parser
|
|
||||||
|
|
||||||
def take_action(self, parsed_args):
|
|
||||||
self.interactive = parsed_args.interactive
|
|
||||||
self.init_database()
|
|
||||||
credentials = [
|
|
||||||
cfg.CONF.admin_user,
|
|
||||||
cfg.CONF.admin_password,
|
|
||||||
cfg.CONF.admin_tenant_name,
|
|
||||||
cfg.CONF.auth_url,
|
|
||||||
cfg.CONF.auth_strategy,
|
|
||||||
cfg.CONF.auth_region,
|
|
||||||
cfg.CONF.instance_provider
|
|
||||||
]
|
|
||||||
populate = threading.Thread(
|
|
||||||
name='router-populater',
|
|
||||||
target=populate_routers,
|
|
||||||
args=(self.fh.name, credentials, parsed_args.threads)
|
|
||||||
)
|
|
||||||
populate.setDaemon(True)
|
|
||||||
populate.start()
|
|
||||||
self.handle_loop()
|
|
||||||
|
|
||||||
def handle_loop(self):
|
|
||||||
try:
|
|
||||||
with self.term.fullscreen():
|
|
||||||
with self.term.cbreak():
|
|
||||||
val = None
|
|
||||||
while val != u'q':
|
|
||||||
if not val:
|
|
||||||
self.fetch_routers()
|
|
||||||
elif val.is_sequence:
|
|
||||||
if val.code == self.term.KEY_DOWN:
|
|
||||||
self.move_down()
|
|
||||||
if val.code == self.term.KEY_UP:
|
|
||||||
self.move_up()
|
|
||||||
elif val == u'j':
|
|
||||||
self.move_down()
|
|
||||||
elif val == u'k':
|
|
||||||
self.move_up()
|
|
||||||
elif val == u'r':
|
|
||||||
self.rebuild_router()
|
|
||||||
if self.interactive:
|
|
||||||
self.print_routers()
|
|
||||||
val = self.term.inkey(timeout=3)
|
|
||||||
elif len(self.routers) and all(map(
|
|
||||||
lambda x: x.last_fetch, self.routers
|
|
||||||
)):
|
|
||||||
self.print_routers()
|
|
||||||
val = u'q'
|
|
||||||
self._exit()
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
self._exit()
|
|
||||||
raise
|
|
||||||
|
|
||||||
def fetch_routers(self):
|
|
||||||
with self.conn:
|
|
||||||
cursor = self.conn.cursor()
|
|
||||||
cursor.execute('SELECT * FROM routers ORDER BY id ASC;')
|
|
||||||
self.routers = cursor.fetchall()
|
|
||||||
|
|
||||||
@property
|
|
||||||
def window(self):
|
|
||||||
offset = 0
|
|
||||||
routers = self.routers
|
|
||||||
visible_height = self.term.height - 2
|
|
||||||
if len(routers) > visible_height:
|
|
||||||
offset = self.position
|
|
||||||
offset = min(offset, len(routers) - visible_height - 1)
|
|
||||||
return offset, routers[offset:(offset+visible_height+1)]
|
|
||||||
|
|
||||||
def print_routers(self):
|
|
||||||
offset, routers = self.window
|
|
||||||
with self.term.location():
|
|
||||||
for i, r in enumerate(routers):
|
|
||||||
if r.latest is None:
|
|
||||||
age = '<loading>'.ljust(11)
|
|
||||||
elif r.latest:
|
|
||||||
age = self.term.green('LATEST'.ljust(11))
|
|
||||||
elif not r.latest:
|
|
||||||
age = self.term.red('OUT-OF-DATE')
|
|
||||||
args = [
|
|
||||||
r.id,
|
|
||||||
r.name,
|
|
||||||
self.router_states[r.status](r.status.ljust(7)),
|
|
||||||
age,
|
|
||||||
r.image_name,
|
|
||||||
'at',
|
|
||||||
r.booted_at
|
|
||||||
]
|
|
||||||
if i + offset == self.position:
|
|
||||||
args = map(self.term.reverse, args[:-3]) + args[-3:]
|
|
||||||
print(self.term.move(i, 0) + ' '.join(args))
|
|
||||||
|
|
||||||
def make_message(self, router):
|
|
||||||
return {
|
|
||||||
'command': commands.ROUTER_REBUILD,
|
|
||||||
'router_id': router.id,
|
|
||||||
'tenant_id': router.tenant_id
|
|
||||||
}
|
|
||||||
|
|
||||||
def rebuild_router(self):
|
|
||||||
offset, routers = self.window
|
|
||||||
r = routers[self.position-offset]
|
|
||||||
r.status = 'REBUILD'
|
|
||||||
self.send_message(self.make_message(r))
|
|
||||||
|
|
||||||
def move_up(self):
|
|
||||||
self.position = max(0, self.position-1)
|
|
||||||
|
|
||||||
def move_down(self):
|
|
||||||
self.position = min(len(self.routers)-1, self.position+1)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def router_states(self):
|
|
||||||
return {
|
|
||||||
'ACTIVE': self.term.green,
|
|
||||||
'BUILD': self.term.yellow,
|
|
||||||
'BOOT': self.term.yellow,
|
|
||||||
'REBUILD': self.term.yellow,
|
|
||||||
'DOWN': self.term.red,
|
|
||||||
'ERROR': self.term.red
|
|
||||||
}
|
|
||||||
|
|
||||||
def _exit(self):
|
|
||||||
if self.interactive:
|
|
||||||
print('Deleting %s...' % self.fh.name)
|
|
||||||
self.fh.close()
|
|
||||||
os.remove(self.fh.name)
|
|
||||||
if self.interactive:
|
|
||||||
print('Exiting...')
|
|
@ -1,37 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
"""Commands related to the application configuration
|
|
||||||
"""
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from astara import commands
|
|
||||||
from astara.cli import message
|
|
||||||
|
|
||||||
|
|
||||||
class ConfigReload(message.MessageSending):
|
|
||||||
"""reload the configuration file(s)"""
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
def make_message(self, parsed_args):
|
|
||||||
self.log.info(
|
|
||||||
'sending config reload instruction',
|
|
||||||
)
|
|
||||||
return {
|
|
||||||
'command': commands.CONFIG_RELOAD,
|
|
||||||
}
|
|
@ -1,56 +0,0 @@
|
|||||||
# Copyright 2015 Akanda, Inc.
|
|
||||||
#
|
|
||||||
# Author: Akanda, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
"""Commands related to tenants.
|
|
||||||
"""
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from astara import commands
|
|
||||||
from astara.cli import message
|
|
||||||
|
|
||||||
|
|
||||||
class GlobalDebug(message.MessageSending):
|
|
||||||
"""Enable or disable global debug mode"""
|
|
||||||
|
|
||||||
_COMMAND = commands.GLOBAL_DEBUG
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
def get_parser(self, prog_name):
|
|
||||||
p = super(GlobalDebug, self).get_parser(prog_name)
|
|
||||||
p.add_argument(
|
|
||||||
'status',
|
|
||||||
)
|
|
||||||
p.add_argument(
|
|
||||||
'--reason',
|
|
||||||
)
|
|
||||||
return p
|
|
||||||
|
|
||||||
def make_message(self, parsed_args):
|
|
||||||
status = parsed_args.status.lower()
|
|
||||||
if status not in ['enable', 'disable']:
|
|
||||||
m = "Invalid global-debug command, must 'enable' or 'disable'"
|
|
||||||
raise ValueError(m)
|
|
||||||
|
|
||||||
self.log.info(
|
|
||||||
"sending instruction to %s global debug mode" % status
|
|
||||||
)
|
|
||||||
return {
|
|
||||||
'command': self._COMMAND,
|
|
||||||
'enabled': 1 if status == "enable" else 0,
|
|
||||||
'reason': parsed_args.reason,
|
|
||||||
}
|
|
@ -1,24 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
import sys
|
|
||||||
|
|
||||||
from astara.cli import app
|
|
||||||
|
|
||||||
|
|
||||||
def main(args=sys.argv[1:]):
|
|
||||||
return app.RugController().run(args)
|
|
@ -1,52 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
"""Base class for command that sends a message to the rug
|
|
||||||
"""
|
|
||||||
import abc
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
|
|
||||||
from cliff import command
|
|
||||||
|
|
||||||
from astara import notifications
|
|
||||||
|
|
||||||
|
|
||||||
class MessageSending(command.Command):
|
|
||||||
|
|
||||||
__metaclass__ = abc.ABCMeta
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
interactive = False
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def make_message(self, parsed_args):
|
|
||||||
"""Return a dictionary containing the message contents
|
|
||||||
"""
|
|
||||||
return {}
|
|
||||||
|
|
||||||
def take_action(self, parsed_args):
|
|
||||||
self.send_message(self.make_message(parsed_args))
|
|
||||||
|
|
||||||
def send_message(self, payload):
|
|
||||||
sender = notifications.Sender()
|
|
||||||
cmd = payload.get('command')
|
|
||||||
argv = os.path.basename(sys.argv[0])
|
|
||||||
self.log.info('%s: sending %s instruction.' % (argv, cmd))
|
|
||||||
self.log.debug('payload: %r', payload)
|
|
||||||
sender.send(event_type='astara.command', message=payload)
|
|
@ -1,33 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
"""Commands related to tenants.
|
|
||||||
"""
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from astara import commands
|
|
||||||
from astara.cli import message
|
|
||||||
|
|
||||||
|
|
||||||
class Poll(message.MessageSending):
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
def make_message(self, parsed_args):
|
|
||||||
return {
|
|
||||||
'command': commands.POLL,
|
|
||||||
}
|
|
@ -1,102 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
"""Commands related to astara resources.
|
|
||||||
"""
|
|
||||||
from astara.api import nova
|
|
||||||
from astara.cli import message
|
|
||||||
from astara import commands
|
|
||||||
|
|
||||||
from novaclient import exceptions
|
|
||||||
from oslo_config import cfg
|
|
||||||
|
|
||||||
|
|
||||||
class _TenantResourceCmd(message.MessageSending):
|
|
||||||
|
|
||||||
def get_parser(self, prog_name):
|
|
||||||
p = super(_TenantResourceCmd, self).get_parser(prog_name)
|
|
||||||
p.add_argument(
|
|
||||||
'resource_id',
|
|
||||||
)
|
|
||||||
p.add_argument(
|
|
||||||
'--reason',
|
|
||||||
)
|
|
||||||
return p
|
|
||||||
|
|
||||||
def make_message(self, parsed_args):
|
|
||||||
resource_id = parsed_args.resource_id.lower()
|
|
||||||
reason = parsed_args.reason
|
|
||||||
self.log.info(
|
|
||||||
'sending %s instruction for resource %r',
|
|
||||||
self._COMMAND,
|
|
||||||
resource_id,
|
|
||||||
)
|
|
||||||
return {
|
|
||||||
'command': self._COMMAND,
|
|
||||||
'resource_id': resource_id,
|
|
||||||
'tenant_id': '*',
|
|
||||||
'reason': reason,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class ResourceUpdate(_TenantResourceCmd):
|
|
||||||
"""force-update a resource"""
|
|
||||||
|
|
||||||
_COMMAND = commands.RESOURCE_UPDATE
|
|
||||||
|
|
||||||
|
|
||||||
class ResourceRebuild(_TenantResourceCmd):
|
|
||||||
"""force-rebuild a resource"""
|
|
||||||
|
|
||||||
_COMMAND = commands.RESOURCE_REBUILD
|
|
||||||
|
|
||||||
def get_parser(self, prog_name):
|
|
||||||
p = super(ResourceRebuild, self).get_parser(prog_name)
|
|
||||||
p.add_argument(
|
|
||||||
'--image_uuid',
|
|
||||||
)
|
|
||||||
return p
|
|
||||||
|
|
||||||
def take_action(self, parsed_args):
|
|
||||||
uuid = parsed_args.image_uuid
|
|
||||||
if uuid:
|
|
||||||
nova_client = nova.Nova(cfg.CONF).client
|
|
||||||
try:
|
|
||||||
nova_client.images.get(uuid)
|
|
||||||
except exceptions.NotFound:
|
|
||||||
self.log.exception(
|
|
||||||
'could not retrieve custom image %s from Glance:' % uuid
|
|
||||||
)
|
|
||||||
raise
|
|
||||||
return super(ResourceRebuild, self).take_action(parsed_args)
|
|
||||||
|
|
||||||
def make_message(self, parsed_args):
|
|
||||||
message = super(ResourceRebuild, self).make_message(parsed_args)
|
|
||||||
message['resource_image_uuid'] = parsed_args.image_uuid
|
|
||||||
return message
|
|
||||||
|
|
||||||
|
|
||||||
class ResourceDebug(_TenantResourceCmd):
|
|
||||||
"""debug a single resource"""
|
|
||||||
|
|
||||||
_COMMAND = commands.RESOURCE_DEBUG
|
|
||||||
|
|
||||||
|
|
||||||
class ResourceManage(_TenantResourceCmd):
|
|
||||||
"""manage a single resource"""
|
|
||||||
|
|
||||||
_COMMAND = commands.RESOURCE_MANAGE
|
|
@ -1,169 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
"""Commands related to routers.
|
|
||||||
"""
|
|
||||||
import argparse
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
|
|
||||||
from astara.common.i18n import _LW
|
|
||||||
from astara import commands
|
|
||||||
from astara.cli import message
|
|
||||||
from astara.api import keystone, nova
|
|
||||||
|
|
||||||
from novaclient import exceptions
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log as logging
|
|
||||||
|
|
||||||
from neutronclient.v2_0 import client
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class _TenantRouterCmd(message.MessageSending):
|
|
||||||
|
|
||||||
def get_parser(self, prog_name):
|
|
||||||
new_cmd = str(prog_name).replace('router', 'resource')
|
|
||||||
LOG.warning(_LW(
|
|
||||||
"WARNING: '%s' is deprecated in favor of '%s' and will be removed "
|
|
||||||
"in the Mitaka release.") % (prog_name, new_cmd))
|
|
||||||
# Bypass the direct base class to let us put the tenant id
|
|
||||||
# argument first
|
|
||||||
p = super(_TenantRouterCmd, self).get_parser(prog_name)
|
|
||||||
p.add_argument(
|
|
||||||
'router_id',
|
|
||||||
)
|
|
||||||
p.add_argument(
|
|
||||||
'--reason',
|
|
||||||
)
|
|
||||||
return p
|
|
||||||
|
|
||||||
def make_message(self, parsed_args):
|
|
||||||
router_id = parsed_args.router_id.lower()
|
|
||||||
reason = parsed_args.reason
|
|
||||||
if router_id in ['error', '*']:
|
|
||||||
tenant_id = router_id
|
|
||||||
else:
|
|
||||||
# Look up the tenant for a given router so we can send the
|
|
||||||
# command using both and the rug can route it to the correct
|
|
||||||
# worker. We do the lookup here instead of in the rug to avoid
|
|
||||||
# having to teach the rug notification and dispatching code
|
|
||||||
# about how to find the owner of a router, and to shift the
|
|
||||||
# burden of the neutron API call to the client so the server
|
|
||||||
# doesn't block. It also gives us a chance to report an error
|
|
||||||
# when we can't find the router.
|
|
||||||
ks_session = keystone.KeystoneSession()
|
|
||||||
n_c = client.Client(session=ks_session.session)
|
|
||||||
response = n_c.list_routers(retrieve_all=True, id=router_id)
|
|
||||||
try:
|
|
||||||
router_details = response['routers'][0]
|
|
||||||
except (KeyError, IndexError):
|
|
||||||
raise ValueError('No router with id %r found.' %
|
|
||||||
(router_id))
|
|
||||||
assert router_details['id'] == router_id
|
|
||||||
tenant_id = router_details['tenant_id']
|
|
||||||
self.log.info(
|
|
||||||
'sending %s instruction for tenant %r, router %r',
|
|
||||||
self._COMMAND,
|
|
||||||
tenant_id,
|
|
||||||
router_id,
|
|
||||||
)
|
|
||||||
return {
|
|
||||||
'command': self._COMMAND,
|
|
||||||
'router_id': router_id,
|
|
||||||
'tenant_id': tenant_id,
|
|
||||||
'reason': reason,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class RouterUpdate(_TenantRouterCmd):
|
|
||||||
"""force-update a router"""
|
|
||||||
|
|
||||||
_COMMAND = commands.ROUTER_UPDATE
|
|
||||||
|
|
||||||
|
|
||||||
class RouterRebuild(_TenantRouterCmd):
|
|
||||||
"""force-rebuild a router"""
|
|
||||||
|
|
||||||
_COMMAND = commands.ROUTER_REBUILD
|
|
||||||
|
|
||||||
def get_parser(self, prog_name):
|
|
||||||
p = super(RouterRebuild, self).get_parser(prog_name)
|
|
||||||
p.add_argument(
|
|
||||||
'--router_image_uuid',
|
|
||||||
)
|
|
||||||
return p
|
|
||||||
|
|
||||||
def take_action(self, parsed_args):
|
|
||||||
uuid = parsed_args.router_image_uuid
|
|
||||||
if uuid:
|
|
||||||
nova_client = nova.Nova(cfg.CONF).client
|
|
||||||
try:
|
|
||||||
nova_client.images.get(uuid)
|
|
||||||
except exceptions.NotFound:
|
|
||||||
self.log.exception(
|
|
||||||
'could not retrieve custom image %s from Glance:' % uuid
|
|
||||||
)
|
|
||||||
raise
|
|
||||||
return super(RouterRebuild, self).take_action(parsed_args)
|
|
||||||
|
|
||||||
def make_message(self, parsed_args):
|
|
||||||
message = super(RouterRebuild, self).make_message(parsed_args)
|
|
||||||
message['router_image_uuid'] = parsed_args.router_image_uuid
|
|
||||||
return message
|
|
||||||
|
|
||||||
|
|
||||||
class RouterDebug(_TenantRouterCmd):
|
|
||||||
"""debug a single router"""
|
|
||||||
|
|
||||||
_COMMAND = commands.ROUTER_DEBUG
|
|
||||||
|
|
||||||
|
|
||||||
class RouterManage(_TenantRouterCmd):
|
|
||||||
"""manage a single router"""
|
|
||||||
|
|
||||||
_COMMAND = commands.ROUTER_MANAGE
|
|
||||||
|
|
||||||
|
|
||||||
class RouterSSH(_TenantRouterCmd):
|
|
||||||
"""ssh into a router over the management network"""
|
|
||||||
|
|
||||||
interactive = True
|
|
||||||
|
|
||||||
def get_parser(self, prog_name):
|
|
||||||
p = super(RouterSSH, self).get_parser(prog_name)
|
|
||||||
p.add_argument('remainder', nargs=argparse.REMAINDER)
|
|
||||||
return p
|
|
||||||
|
|
||||||
def take_action(self, parsed_args):
|
|
||||||
ks_session = keystone.KeystoneSession()
|
|
||||||
n_c = client.Client(session=ks_session.session)
|
|
||||||
router_id = parsed_args.router_id.lower()
|
|
||||||
port = n_c.list_ports(name="ASTARA:MGT:%s" % router_id)
|
|
||||||
try:
|
|
||||||
mgmt_ip_addr = port['ports'][0]['fixed_ips'].pop()['ip_address']
|
|
||||||
except (KeyError, IndexError):
|
|
||||||
raise ValueError(
|
|
||||||
"No router management address found for router with id %s"
|
|
||||||
% router_id)
|
|
||||||
try:
|
|
||||||
cmd = ["ssh", "astara@%s" % mgmt_ip_addr] + parsed_args.remainder
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
sys.exit(e.returncode)
|
|
@ -1,62 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
"""Commands related to tenants.
|
|
||||||
"""
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from astara import commands
|
|
||||||
from astara.cli import message
|
|
||||||
|
|
||||||
|
|
||||||
class _TenantCmd(message.MessageSending):
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
def get_parser(self, prog_name):
|
|
||||||
p = super(_TenantCmd, self).get_parser(prog_name)
|
|
||||||
p.add_argument(
|
|
||||||
'tenant_id',
|
|
||||||
)
|
|
||||||
p.add_argument(
|
|
||||||
'--reason',
|
|
||||||
)
|
|
||||||
return p
|
|
||||||
|
|
||||||
def make_message(self, parsed_args):
|
|
||||||
self.log.info(
|
|
||||||
'sending %s instruction for tenant with uuid %r',
|
|
||||||
self._COMMAND,
|
|
||||||
parsed_args.tenant_id,
|
|
||||||
)
|
|
||||||
return {
|
|
||||||
'command': self._COMMAND,
|
|
||||||
'tenant_id': parsed_args.tenant_id,
|
|
||||||
'reason': parsed_args.reason,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class TenantDebug(_TenantCmd):
|
|
||||||
"""debug a single tenant"""
|
|
||||||
|
|
||||||
_COMMAND = commands.TENANT_DEBUG
|
|
||||||
|
|
||||||
|
|
||||||
class TenantManage(_TenantCmd):
|
|
||||||
"""manage a single tenant"""
|
|
||||||
|
|
||||||
_COMMAND = commands.TENANT_MANAGE
|
|
@ -1,37 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
"""Commands related to workers.
|
|
||||||
"""
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from astara import commands
|
|
||||||
from astara.cli import message
|
|
||||||
|
|
||||||
|
|
||||||
class WorkerDebug(message.MessageSending):
|
|
||||||
"""debug all workers"""
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
def make_message(self, parsed_args):
|
|
||||||
self.log.info(
|
|
||||||
'sending worker debug instruction',
|
|
||||||
)
|
|
||||||
return {
|
|
||||||
'command': commands.WORKERS_DEBUG,
|
|
||||||
}
|
|
@ -1,55 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
"""Constants for the commands
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Special values for dispatching
|
|
||||||
WILDCARDS = ('*', 'error')
|
|
||||||
|
|
||||||
# Dump debugging details about the worker processes and threads
|
|
||||||
WORKERS_DEBUG = 'workers-debug'
|
|
||||||
|
|
||||||
# Router commands expect a 'router_id' argument in the payload with
|
|
||||||
# the UUID of the router
|
|
||||||
|
|
||||||
# Put a resource in debug/manage mode
|
|
||||||
RESOURCE_DEBUG = 'resource-debug'
|
|
||||||
RESOURCE_MANAGE = 'resource-manage'
|
|
||||||
# Send an updated config to the resource whether it is needed or not
|
|
||||||
RESOURCE_UPDATE = 'resource-update'
|
|
||||||
# Rebuild a resource from scratch
|
|
||||||
RESOURCE_REBUILD = 'resource-rebuild'
|
|
||||||
|
|
||||||
# These are the deprecated versions of the above, to be removed in M.
|
|
||||||
ROUTER_DEBUG = 'router-debug'
|
|
||||||
ROUTER_MANAGE = 'router-manage'
|
|
||||||
ROUTER_UPDATE = 'router-update'
|
|
||||||
ROUTER_REBUILD = 'router-rebuild'
|
|
||||||
|
|
||||||
# Put a tenant in debug/manage mode
|
|
||||||
# Expects a 'tenant_id' argument in the payload with the UUID of the tenant
|
|
||||||
TENANT_DEBUG = 'tenant-debug'
|
|
||||||
TENANT_MANAGE = 'tenant-manage'
|
|
||||||
|
|
||||||
# Configuration commands
|
|
||||||
CONFIG_RELOAD = 'config-reload'
|
|
||||||
|
|
||||||
# Force a poll of all resources right now
|
|
||||||
POLL = 'poll'
|
|
||||||
|
|
||||||
GLOBAL_DEBUG = 'global-debug'
|
|
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
@ -1,96 +0,0 @@
|
|||||||
# Copyright 2015 Akanda, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log
|
|
||||||
|
|
||||||
|
|
||||||
PREFERRED_CONFIG_FILEPATH = '/etc/astara/orchestrator.ini'
|
|
||||||
SEARCH_DIRS = ['/etc/astara', '/etc/akanda-rug', '/etc/akanda']
|
|
||||||
LEGACY_FILE_MAP = {
|
|
||||||
'orchestrator.ini': 'rug.ini',
|
|
||||||
'astara.pub': 'akanda.pub'
|
|
||||||
}
|
|
||||||
|
|
||||||
DEFAULT_CONFIG_FILES = [
|
|
||||||
PREFERRED_CONFIG_FILEPATH
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def get_best_config_path(filepath=PREFERRED_CONFIG_FILEPATH):
|
|
||||||
if os.path.isfile(filepath):
|
|
||||||
return filepath
|
|
||||||
|
|
||||||
# now begin attemp to fallback for compatibility
|
|
||||||
dirname, basename = os.path.split(filepath)
|
|
||||||
|
|
||||||
if dirname and dirname not in SEARCH_DIRS:
|
|
||||||
return filepath # retain the non-standard location
|
|
||||||
|
|
||||||
for searchdir in SEARCH_DIRS:
|
|
||||||
candidate_path = os.path.join(searchdir, basename)
|
|
||||||
if os.path.isfile(candidate_path):
|
|
||||||
return candidate_path
|
|
||||||
|
|
||||||
if basename in LEGACY_FILE_MAP:
|
|
||||||
candidate_path = os.path.join(searchdir, LEGACY_FILE_MAP[basename])
|
|
||||||
if os.path.isfile(candidate_path):
|
|
||||||
return candidate_path
|
|
||||||
return filepath
|
|
||||||
|
|
||||||
|
|
||||||
def parse_config(argv, default_config_files=DEFAULT_CONFIG_FILES):
|
|
||||||
log.register_options(cfg.CONF)
|
|
||||||
# Set the logging format to include the process and thread, since
|
|
||||||
# those aren't included in standard openstack logs but are useful
|
|
||||||
# for the rug
|
|
||||||
extended = ':'.join('%(' + n + ')s'
|
|
||||||
for n in ['name',
|
|
||||||
'process',
|
|
||||||
'processName',
|
|
||||||
'threadName'])
|
|
||||||
log_format = ('%(asctime)s.%(msecs)03d %(levelname)s ' +
|
|
||||||
extended + ' %(message)s')
|
|
||||||
|
|
||||||
# Configure the default log levels for some third-party packages
|
|
||||||
# that are chatty
|
|
||||||
log_levels = [
|
|
||||||
'amqp=WARN',
|
|
||||||
'amqplib=WARN',
|
|
||||||
'qpid.messaging=INFO',
|
|
||||||
'sqlalchemy=WARN',
|
|
||||||
'keystoneclient=INFO',
|
|
||||||
'stevedore=INFO',
|
|
||||||
'eventlet.wsgi.server=WARN',
|
|
||||||
'requests=WARN',
|
|
||||||
'astara.openstack.common.rpc.amqp=INFO',
|
|
||||||
'neutronclient.client=INFO',
|
|
||||||
'oslo.messaging=INFO',
|
|
||||||
'iso8601=INFO',
|
|
||||||
'cliff.commandmanager=INFO',
|
|
||||||
]
|
|
||||||
cfg.CONF.set_default('logging_default_format_string', log_format)
|
|
||||||
log.set_defaults(default_log_levels=log_levels)
|
|
||||||
|
|
||||||
# For legacy compatibility
|
|
||||||
default_config_files = map(get_best_config_path, default_config_files)
|
|
||||||
|
|
||||||
# remove default config files that do not exist
|
|
||||||
default_config_files = filter(os.path.isfile, default_config_files)
|
|
||||||
|
|
||||||
cfg.CONF(argv,
|
|
||||||
project='astara-orchestrator',
|
|
||||||
default_config_files=default_config_files)
|
|
@ -1,24 +0,0 @@
|
|||||||
# Copyright (c) 2016 Akanda, Inc. All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
ASTARA_SERVICE_PORT_TYPES = [
|
|
||||||
'VRRP',
|
|
||||||
'LB',
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
ASTARA_MGT_PORT_TYPES = [
|
|
||||||
'MGT',
|
|
||||||
]
|
|
@ -1,70 +0,0 @@
|
|||||||
# Copyright (c) 2016 Akanda, Inc. All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import collections
|
|
||||||
import threading
|
|
||||||
|
|
||||||
|
|
||||||
class ResourceContainer(object):
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.resources = {}
|
|
||||||
self.deleted = collections.deque(maxlen=50)
|
|
||||||
self.lock = threading.Lock()
|
|
||||||
|
|
||||||
def __delitem__(self, item):
|
|
||||||
with self.lock:
|
|
||||||
del self.resources[item]
|
|
||||||
self.deleted.append(item)
|
|
||||||
|
|
||||||
def items(self):
|
|
||||||
"""Get all state machines.
|
|
||||||
:returns: all state machines in this RouterContainer
|
|
||||||
"""
|
|
||||||
with self.lock:
|
|
||||||
return list(self.resources.items())
|
|
||||||
|
|
||||||
def values(self):
|
|
||||||
with self.lock:
|
|
||||||
return list(self.resources.values())
|
|
||||||
|
|
||||||
def has_been_deleted(self, resource_id):
|
|
||||||
"""Check if a resource has been deleted.
|
|
||||||
|
|
||||||
:param resource_id: The resource's id to check against the deleted list
|
|
||||||
:returns: Returns True if the resource_id has been deleted.
|
|
||||||
"""
|
|
||||||
with self.lock:
|
|
||||||
return resource_id in self.deleted
|
|
||||||
|
|
||||||
def __getitem__(self, item):
|
|
||||||
with self.lock:
|
|
||||||
return self.resources[item]
|
|
||||||
|
|
||||||
def __setitem__(self, key, value):
|
|
||||||
with self.lock:
|
|
||||||
self.resources[key] = value
|
|
||||||
|
|
||||||
def __contains__(self, item):
|
|
||||||
with self.lock:
|
|
||||||
return item in self.resources
|
|
||||||
|
|
||||||
def __bool__(self):
|
|
||||||
if self.values():
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
|
|
||||||
def __nonzero__(self):
|
|
||||||
return self.__bool__()
|
|
@ -1,204 +0,0 @@
|
|||||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
|
||||||
# Copyright 2015 Akanda, Inc.
|
|
||||||
#
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import bisect
|
|
||||||
import hashlib
|
|
||||||
import threading
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
import six
|
|
||||||
from six.moves import range
|
|
||||||
|
|
||||||
from astara.common.i18n import _
|
|
||||||
|
|
||||||
hash_opts = [
|
|
||||||
cfg.IntOpt('hash_partition_exponent',
|
|
||||||
default=5,
|
|
||||||
help='Exponent to determine number of hash partitions to use '
|
|
||||||
'when distributing load across Rugs. Larger values '
|
|
||||||
'will result in more even distribution of load and less '
|
|
||||||
'load when rebalancing the ring, but more memory usage. '
|
|
||||||
'Number of partitions per rug is '
|
|
||||||
'(2^hash_partition_exponent). This determines the '
|
|
||||||
'granularity of rebalancing: given 10 hosts, and an '
|
|
||||||
'exponent of the 2, there are 40 partitions in the ring.'
|
|
||||||
'A few thousand partitions should make rebalancing '
|
|
||||||
'smooth in most cases. The default is suitable for up to '
|
|
||||||
'a few hundred rugs. Too many partitions has a CPU '
|
|
||||||
'impact.'),
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(hash_opts)
|
|
||||||
|
|
||||||
|
|
||||||
# A static key that can be used to choose a single host when from the
|
|
||||||
# ring we have no other data to hash with.
|
|
||||||
DC_KEY = 'astara_designated_coordinator'
|
|
||||||
|
|
||||||
|
|
||||||
class Invalid(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
# Lifted from ironic with some modifications.
|
|
||||||
class HashRing(object):
|
|
||||||
"""A stable hash ring.
|
|
||||||
|
|
||||||
We map item N to a host Y based on the closest lower hash:
|
|
||||||
|
|
||||||
- hash(item) -> partition
|
|
||||||
- hash(host) -> divider
|
|
||||||
- closest lower divider is the host to use
|
|
||||||
- we hash each host many times to spread load more finely
|
|
||||||
as otherwise adding a host gets (on average) 50% of the load of
|
|
||||||
just one other host assigned to it.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, hosts, replicas=1):
|
|
||||||
"""Create a new hash ring across the specified hosts.
|
|
||||||
|
|
||||||
:param hosts: an iterable of hosts which will be mapped.
|
|
||||||
:param replicas: number of hosts to map to each hash partition,
|
|
||||||
or len(hosts), which ever is lesser.
|
|
||||||
Default: 1
|
|
||||||
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
self.hosts = set(hosts)
|
|
||||||
self.replicas = replicas if replicas <= len(hosts) else len(hosts)
|
|
||||||
except TypeError:
|
|
||||||
raise Invalid(
|
|
||||||
_("Invalid hosts supplied when building HashRing."))
|
|
||||||
|
|
||||||
self._host_hashes = {}
|
|
||||||
for host in hosts:
|
|
||||||
key = str(host).encode('utf8')
|
|
||||||
key_hash = hashlib.md5(key)
|
|
||||||
for p in range(2 ** CONF.hash_partition_exponent):
|
|
||||||
key_hash.update(key)
|
|
||||||
hashed_key = self._hash2int(key_hash)
|
|
||||||
self._host_hashes[hashed_key] = host
|
|
||||||
# Gather the (possibly colliding) resulting hashes into a bisectable
|
|
||||||
# list.
|
|
||||||
self._partitions = sorted(self._host_hashes.keys())
|
|
||||||
|
|
||||||
def _hash2int(self, key_hash):
|
|
||||||
"""Convert the given hash's digest to a numerical value for the ring.
|
|
||||||
|
|
||||||
:returns: An integer equivalent value of the digest.
|
|
||||||
"""
|
|
||||||
return int(key_hash.hexdigest(), 16)
|
|
||||||
|
|
||||||
def _get_partition(self, data):
|
|
||||||
try:
|
|
||||||
if six.PY3 and data is not None:
|
|
||||||
data = data.encode('utf-8')
|
|
||||||
key_hash = hashlib.md5(data)
|
|
||||||
hashed_key = self._hash2int(key_hash)
|
|
||||||
position = bisect.bisect(self._partitions, hashed_key)
|
|
||||||
return position if position < len(self._partitions) else 0
|
|
||||||
except TypeError:
|
|
||||||
raise Invalid(
|
|
||||||
_("Invalid data supplied to HashRing.get_hosts."))
|
|
||||||
|
|
||||||
def get_hosts(self, data, ignore_hosts=None):
|
|
||||||
"""Get the list of hosts which the supplied data maps onto.
|
|
||||||
|
|
||||||
:param data: A string identifier to be mapped across the ring.
|
|
||||||
:param ignore_hosts: A list of hosts to skip when performing the hash.
|
|
||||||
Useful to temporarily skip down hosts without
|
|
||||||
performing a full rebalance.
|
|
||||||
Default: None.
|
|
||||||
:returns: a list of hosts.
|
|
||||||
The length of this list depends on the number of replicas
|
|
||||||
this `HashRing` was created with. It may be less than this
|
|
||||||
if ignore_hosts is not None.
|
|
||||||
"""
|
|
||||||
hosts = []
|
|
||||||
if ignore_hosts is None:
|
|
||||||
ignore_hosts = set()
|
|
||||||
else:
|
|
||||||
ignore_hosts = set(ignore_hosts)
|
|
||||||
ignore_hosts.intersection_update(self.hosts)
|
|
||||||
partition = self._get_partition(data)
|
|
||||||
for replica in range(0, self.replicas):
|
|
||||||
if len(hosts) + len(ignore_hosts) == len(self.hosts):
|
|
||||||
# prevent infinite loop - cannot allocate more fallbacks.
|
|
||||||
break
|
|
||||||
# Linear probing: partition N, then N+1 etc.
|
|
||||||
host = self._get_host(partition)
|
|
||||||
while host in hosts or host in ignore_hosts:
|
|
||||||
partition += 1
|
|
||||||
if partition >= len(self._partitions):
|
|
||||||
partition = 0
|
|
||||||
host = self._get_host(partition)
|
|
||||||
hosts.append(host)
|
|
||||||
return hosts
|
|
||||||
|
|
||||||
def _get_host(self, partition):
|
|
||||||
"""Find what host is serving a partition.
|
|
||||||
|
|
||||||
:param partition: The index of the partition in the partition map.
|
|
||||||
e.g. 0 is the first partition, 1 is the second.
|
|
||||||
:return: The host object the ring was constructed with.
|
|
||||||
"""
|
|
||||||
return self._host_hashes[self._partitions[partition]]
|
|
||||||
|
|
||||||
|
|
||||||
class HashRingManager(object):
|
|
||||||
_hash_ring = None
|
|
||||||
_lock = threading.Lock()
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self._hosts = []
|
|
||||||
self._balanced = False
|
|
||||||
|
|
||||||
@property
|
|
||||||
def balanced(self):
|
|
||||||
return self._balanced
|
|
||||||
|
|
||||||
@property
|
|
||||||
def ring(self):
|
|
||||||
# Hot path, no lock
|
|
||||||
if self._hash_ring is not None:
|
|
||||||
return self._hash_ring
|
|
||||||
|
|
||||||
with self._lock:
|
|
||||||
if self._hash_ring is None:
|
|
||||||
ring = self._load_hash_ring()
|
|
||||||
self.__class__._hash_ring = ring
|
|
||||||
return self._hash_ring
|
|
||||||
|
|
||||||
@property
|
|
||||||
def hosts(self):
|
|
||||||
return self.ring.hosts
|
|
||||||
|
|
||||||
def _load_hash_ring(self):
|
|
||||||
return HashRing(self._hosts)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def reset(cls):
|
|
||||||
with cls._lock:
|
|
||||||
cls._hash_ring = None
|
|
||||||
cls._balanced = False
|
|
||||||
|
|
||||||
def rebalance(self, hosts):
|
|
||||||
self.reset()
|
|
||||||
with self._lock:
|
|
||||||
self._hosts = hosts
|
|
||||||
self._balanced = True
|
|
@ -1,24 +0,0 @@
|
|||||||
# Copyright 2014 Akanda, Inc.
|
|
||||||
#
|
|
||||||
# Author: Akanda, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import oslo_i18n
|
|
||||||
|
|
||||||
_translators = oslo_i18n.TranslatorFactory(domain='astara')
|
|
||||||
|
|
||||||
_ = _translators.primary
|
|
||||||
_LI = _translators.log_info
|
|
||||||
_LW = _translators.log_warning
|
|
||||||
_LE = _translators.log_error
|
|
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
@ -1,269 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012 OpenStack Foundation
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import abc
|
|
||||||
|
|
||||||
import netaddr
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log as logging
|
|
||||||
|
|
||||||
from astara.common.i18n import _, _LE, _LW
|
|
||||||
from astara.common.linux import ip_lib
|
|
||||||
from astara.common.linux import ovs_lib
|
|
||||||
from astara.common.linux import utils
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
CONF = cfg.CONF
|
|
||||||
|
|
||||||
|
|
||||||
OPTS = [
|
|
||||||
cfg.StrOpt('ovs_integration_bridge',
|
|
||||||
default='br-int',
|
|
||||||
help=_('Name of Open vSwitch bridge to use')),
|
|
||||||
cfg.BoolOpt('ovs_use_veth',
|
|
||||||
default=False,
|
|
||||||
help=_('Uses veth for an interface or not')),
|
|
||||||
cfg.StrOpt('network_device_mtu',
|
|
||||||
help=_('MTU setting for device.')),
|
|
||||||
]
|
|
||||||
CONF.register_opts(OPTS)
|
|
||||||
|
|
||||||
|
|
||||||
AGENT_OPTIONS = [
|
|
||||||
cfg.StrOpt('root_helper',
|
|
||||||
default='sudo astara-rootwrap /etc/astara/rootwrap.conf'),
|
|
||||||
]
|
|
||||||
CONF.register_group(cfg.OptGroup(name='AGENT'))
|
|
||||||
CONF.register_opts(AGENT_OPTIONS, 'AGENT')
|
|
||||||
|
|
||||||
|
|
||||||
class LinuxInterfaceDriver(object):
|
|
||||||
__metaclass__ = abc.ABCMeta
|
|
||||||
|
|
||||||
# from linux IF_NAMESIZE
|
|
||||||
DEV_NAME_LEN = 14
|
|
||||||
DEV_NAME_PREFIX = 'tap'
|
|
||||||
|
|
||||||
def __init__(self, conf):
|
|
||||||
self.conf = conf
|
|
||||||
self.root_helper = conf.AGENT.root_helper
|
|
||||||
|
|
||||||
def init_l3(self, device_name, ip_cidrs, namespace=None):
|
|
||||||
"""Set the L3 settings for the interface using data from the port.
|
|
||||||
ip_cidrs: list of 'X.X.X.X/YY' strings
|
|
||||||
"""
|
|
||||||
device = ip_lib.IPDevice(device_name,
|
|
||||||
self.root_helper,
|
|
||||||
namespace=namespace)
|
|
||||||
|
|
||||||
previous = {}
|
|
||||||
for address in device.addr.list(scope='global', filters=['permanent']):
|
|
||||||
previous[address['cidr']] = address['ip_version']
|
|
||||||
|
|
||||||
# add new addresses
|
|
||||||
for ip_cidr in ip_cidrs:
|
|
||||||
|
|
||||||
net = netaddr.IPNetwork(ip_cidr)
|
|
||||||
if ip_cidr in previous:
|
|
||||||
del previous[ip_cidr]
|
|
||||||
continue
|
|
||||||
|
|
||||||
device.addr.add(net.version, ip_cidr, str(net.broadcast))
|
|
||||||
|
|
||||||
# clean up any old addresses
|
|
||||||
for ip_cidr, ip_version in previous.items():
|
|
||||||
device.addr.delete(ip_version, ip_cidr)
|
|
||||||
|
|
||||||
def check_bridge_exists(self, bridge):
|
|
||||||
if not ip_lib.device_exists(bridge):
|
|
||||||
raise Exception(_('Bridge %s does not exist') % bridge)
|
|
||||||
|
|
||||||
def get_device_name(self, port):
|
|
||||||
return (self.DEV_NAME_PREFIX + port.id)[:self.DEV_NAME_LEN]
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def plug(self, network_id, port_id, device_name, mac_address,
|
|
||||||
bridge=None, namespace=None, prefix=None):
|
|
||||||
"""Plug in the interface."""
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
|
|
||||||
"""Unplug the interface."""
|
|
||||||
|
|
||||||
|
|
||||||
class NullDriver(LinuxInterfaceDriver):
|
|
||||||
def plug(self, network_id, port_id, device_name, mac_address,
|
|
||||||
bridge=None, namespace=None, prefix=None):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class OVSInterfaceDriver(LinuxInterfaceDriver):
|
|
||||||
"""Driver for creating an internal interface on an OVS bridge."""
|
|
||||||
|
|
||||||
DEV_NAME_PREFIX = 'tap'
|
|
||||||
|
|
||||||
def __init__(self, conf):
|
|
||||||
super(OVSInterfaceDriver, self).__init__(conf)
|
|
||||||
if self.conf.ovs_use_veth:
|
|
||||||
self.DEV_NAME_PREFIX = 'ns-'
|
|
||||||
|
|
||||||
def _get_tap_name(self, dev_name, prefix=None):
|
|
||||||
if self.conf.ovs_use_veth:
|
|
||||||
dev_name = dev_name.replace(prefix or self.DEV_NAME_PREFIX, 'tap')
|
|
||||||
return dev_name
|
|
||||||
|
|
||||||
def _ovs_add_port(self, bridge, device_name, port_id, mac_address,
|
|
||||||
internal=True):
|
|
||||||
cmd = ['ovs-vsctl', '--', '--may-exist',
|
|
||||||
'add-port', bridge, device_name]
|
|
||||||
if internal:
|
|
||||||
cmd += ['--', 'set', 'Interface', device_name, 'type=internal']
|
|
||||||
cmd += ['--', 'set', 'Interface', device_name,
|
|
||||||
'external-ids:iface-id=%s' % port_id,
|
|
||||||
'--', 'set', 'Interface', device_name,
|
|
||||||
'external-ids:iface-status=active',
|
|
||||||
'--', 'set', 'Interface', device_name,
|
|
||||||
'external-ids:attached-mac=%s' % mac_address]
|
|
||||||
utils.execute(cmd, self.root_helper)
|
|
||||||
|
|
||||||
def plug(self, network_id, port_id, device_name, mac_address,
|
|
||||||
bridge=None, namespace=None, prefix=None):
|
|
||||||
"""Plug in the interface."""
|
|
||||||
if not bridge:
|
|
||||||
bridge = self.conf.ovs_integration_bridge
|
|
||||||
|
|
||||||
self.check_bridge_exists(bridge)
|
|
||||||
|
|
||||||
if not ip_lib.device_exists(device_name,
|
|
||||||
self.root_helper,
|
|
||||||
namespace=namespace):
|
|
||||||
|
|
||||||
ip = ip_lib.IPWrapper(self.root_helper)
|
|
||||||
tap_name = self._get_tap_name(device_name, prefix)
|
|
||||||
|
|
||||||
if self.conf.ovs_use_veth:
|
|
||||||
root_dev, ns_dev = ip.add_veth(tap_name, device_name)
|
|
||||||
|
|
||||||
internal = not self.conf.ovs_use_veth
|
|
||||||
self._ovs_add_port(bridge, tap_name, port_id, mac_address,
|
|
||||||
internal=internal)
|
|
||||||
|
|
||||||
ns_dev = ip.device(device_name)
|
|
||||||
ns_dev.link.set_address(mac_address)
|
|
||||||
|
|
||||||
if self.conf.network_device_mtu:
|
|
||||||
ns_dev.link.set_mtu(self.conf.network_device_mtu)
|
|
||||||
if self.conf.ovs_use_veth:
|
|
||||||
root_dev.link.set_mtu(self.conf.network_device_mtu)
|
|
||||||
|
|
||||||
if namespace:
|
|
||||||
namespace_obj = ip.ensure_namespace(namespace)
|
|
||||||
namespace_obj.add_device_to_namespace(ns_dev)
|
|
||||||
|
|
||||||
ns_dev.link.set_up()
|
|
||||||
if self.conf.ovs_use_veth:
|
|
||||||
root_dev.link.set_up()
|
|
||||||
else:
|
|
||||||
LOG.warning(_LW("Device %s already exists"), device_name)
|
|
||||||
|
|
||||||
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
|
|
||||||
"""Unplug the interface."""
|
|
||||||
if not bridge:
|
|
||||||
bridge = self.conf.ovs_integration_bridge
|
|
||||||
|
|
||||||
tap_name = self._get_tap_name(device_name, prefix)
|
|
||||||
self.check_bridge_exists(bridge)
|
|
||||||
ovs = ovs_lib.OVSBridge(bridge, self.root_helper)
|
|
||||||
|
|
||||||
try:
|
|
||||||
ovs.delete_port(tap_name)
|
|
||||||
if self.conf.ovs_use_veth:
|
|
||||||
device = ip_lib.IPDevice(device_name,
|
|
||||||
self.root_helper,
|
|
||||||
namespace)
|
|
||||||
device.link.delete()
|
|
||||||
LOG.debug(_("Unplugged interface '%s'"), device_name)
|
|
||||||
except RuntimeError:
|
|
||||||
LOG.exception(_LE("Failed unplugging interface '%s'"), device_name)
|
|
||||||
|
|
||||||
|
|
||||||
class BridgeInterfaceDriver(LinuxInterfaceDriver):
|
|
||||||
"""Driver for creating bridge interfaces."""
|
|
||||||
|
|
||||||
DEV_NAME_PREFIX = 'ns-'
|
|
||||||
|
|
||||||
def plug(self, network_id, port_id, device_name, mac_address,
|
|
||||||
bridge=None, namespace=None, prefix=None):
|
|
||||||
"""Plugin the interface."""
|
|
||||||
if not ip_lib.device_exists(device_name,
|
|
||||||
self.root_helper,
|
|
||||||
namespace=namespace):
|
|
||||||
ip = ip_lib.IPWrapper(self.root_helper)
|
|
||||||
|
|
||||||
# Enable agent to define the prefix
|
|
||||||
if prefix:
|
|
||||||
tap_name = device_name.replace(prefix, 'tap')
|
|
||||||
else:
|
|
||||||
tap_name = device_name.replace(self.DEV_NAME_PREFIX, 'tap')
|
|
||||||
root_veth, ns_veth = ip.add_veth(tap_name, device_name)
|
|
||||||
ns_veth.link.set_address(mac_address)
|
|
||||||
|
|
||||||
if self.conf.network_device_mtu:
|
|
||||||
root_veth.link.set_mtu(self.conf.network_device_mtu)
|
|
||||||
ns_veth.link.set_mtu(self.conf.network_device_mtu)
|
|
||||||
|
|
||||||
if namespace:
|
|
||||||
namespace_obj = ip.ensure_namespace(namespace)
|
|
||||||
namespace_obj.add_device_to_namespace(ns_veth)
|
|
||||||
|
|
||||||
root_veth.link.set_up()
|
|
||||||
ns_veth.link.set_up()
|
|
||||||
|
|
||||||
else:
|
|
||||||
LOG.warning(_LW("Device %s already exists"), device_name)
|
|
||||||
|
|
||||||
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
|
|
||||||
"""Unplug the interface."""
|
|
||||||
device = ip_lib.IPDevice(device_name, self.root_helper, namespace)
|
|
||||||
try:
|
|
||||||
device.link.delete()
|
|
||||||
LOG.debug("Unplugged interface '%s'", device_name)
|
|
||||||
except RuntimeError:
|
|
||||||
LOG.exception(_LE(
|
|
||||||
"Failed unplugging interface '%s'"), device_name)
|
|
@ -1,450 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
# Copyright 2012 OpenStack Foundation
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import netaddr
|
|
||||||
|
|
||||||
from astara.common.linux import utils
|
|
||||||
from astara.common.i18n import _
|
|
||||||
|
|
||||||
from oslo_log import log
|
|
||||||
|
|
||||||
LOOPBACK_DEVNAME = 'lo'
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class SubProcessBase(object):
|
|
||||||
def __init__(self, root_helper=None, namespace=None):
|
|
||||||
self.root_helper = root_helper
|
|
||||||
self.namespace = namespace
|
|
||||||
|
|
||||||
def _run(self, options, command, args):
|
|
||||||
if self.namespace:
|
|
||||||
return self._as_root(options, command, args)
|
|
||||||
else:
|
|
||||||
return self._execute(options, command, args)
|
|
||||||
|
|
||||||
def _as_root(self, options, command, args, use_root_namespace=False):
|
|
||||||
if not self.root_helper:
|
|
||||||
raise Exception(_('Sudo is required to run this command'))
|
|
||||||
|
|
||||||
namespace = self.namespace if not use_root_namespace else None
|
|
||||||
|
|
||||||
return self._execute(options,
|
|
||||||
command,
|
|
||||||
args,
|
|
||||||
self.root_helper,
|
|
||||||
namespace)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def _execute(cls, options, command, args, root_helper=None,
|
|
||||||
namespace=None):
|
|
||||||
opt_list = ['-%s' % o for o in options]
|
|
||||||
if namespace:
|
|
||||||
ip_cmd = ['ip', 'netns', 'exec', namespace, 'ip']
|
|
||||||
else:
|
|
||||||
ip_cmd = ['ip']
|
|
||||||
return utils.execute(ip_cmd + opt_list + [command] + list(args),
|
|
||||||
root_helper=root_helper)
|
|
||||||
|
|
||||||
|
|
||||||
class IPWrapper(SubProcessBase):
|
|
||||||
def __init__(self, root_helper=None, namespace=None):
|
|
||||||
super(IPWrapper, self).__init__(root_helper=root_helper,
|
|
||||||
namespace=namespace)
|
|
||||||
self.netns = IpNetnsCommand(self)
|
|
||||||
|
|
||||||
def device(self, name):
|
|
||||||
return IPDevice(name, self.root_helper, self.namespace)
|
|
||||||
|
|
||||||
def get_devices(self, exclude_loopback=False):
|
|
||||||
retval = []
|
|
||||||
output = self._execute('o', 'link', ('list',),
|
|
||||||
self.root_helper, self.namespace)
|
|
||||||
for line in output.split('\n'):
|
|
||||||
if '<' not in line:
|
|
||||||
continue
|
|
||||||
tokens = line.split(':', 2)
|
|
||||||
if len(tokens) >= 3:
|
|
||||||
name = tokens[1].strip()
|
|
||||||
|
|
||||||
if exclude_loopback and name == LOOPBACK_DEVNAME:
|
|
||||||
continue
|
|
||||||
|
|
||||||
retval.append(IPDevice(name,
|
|
||||||
self.root_helper,
|
|
||||||
self.namespace))
|
|
||||||
return retval
|
|
||||||
|
|
||||||
def add_tuntap(self, name, mode='tap'):
|
|
||||||
self._as_root('', 'tuntap', ('add', name, 'mode', mode))
|
|
||||||
return IPDevice(name, self.root_helper, self.namespace)
|
|
||||||
|
|
||||||
def add_veth(self, name1, name2):
|
|
||||||
self._as_root('', 'link',
|
|
||||||
('add', name1, 'type', 'veth', 'peer', 'name', name2))
|
|
||||||
|
|
||||||
return (IPDevice(name1, self.root_helper, self.namespace),
|
|
||||||
IPDevice(name2, self.root_helper, self.namespace))
|
|
||||||
|
|
||||||
def ensure_namespace(self, name):
|
|
||||||
if not self.netns.exists(name):
|
|
||||||
ip = self.netns.add(name)
|
|
||||||
lo = ip.device(LOOPBACK_DEVNAME)
|
|
||||||
lo.link.set_up()
|
|
||||||
else:
|
|
||||||
ip = IPWrapper(self.root_helper, name)
|
|
||||||
return ip
|
|
||||||
|
|
||||||
def namespace_is_empty(self):
|
|
||||||
return not self.get_devices(exclude_loopback=True)
|
|
||||||
|
|
||||||
def garbage_collect_namespace(self):
|
|
||||||
"""Conditionally destroy the namespace if it is empty."""
|
|
||||||
if self.namespace and self.netns.exists(self.namespace):
|
|
||||||
if self.namespace_is_empty():
|
|
||||||
self.netns.delete(self.namespace)
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def add_device_to_namespace(self, device):
|
|
||||||
if self.namespace:
|
|
||||||
device.link.set_netns(self.namespace)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_namespaces(cls, root_helper):
|
|
||||||
output = cls._execute('', 'netns', ('list',), root_helper=root_helper)
|
|
||||||
return [l.strip() for l in output.split('\n')]
|
|
||||||
|
|
||||||
|
|
||||||
class IPDevice(SubProcessBase):
|
|
||||||
def __init__(self, name, root_helper=None, namespace=None):
|
|
||||||
super(IPDevice, self).__init__(root_helper=root_helper,
|
|
||||||
namespace=namespace)
|
|
||||||
self.name = name
|
|
||||||
self.link = IpLinkCommand(self)
|
|
||||||
self.addr = IpAddrCommand(self)
|
|
||||||
self.route = IpRouteCommand(self)
|
|
||||||
|
|
||||||
def __eq__(self, other):
|
|
||||||
return (other is not None and self.name == other.name and
|
|
||||||
self.namespace == other.namespace)
|
|
||||||
|
|
||||||
def __ne__(self, other):
|
|
||||||
return not self.__eq__(other)
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return self.name
|
|
||||||
|
|
||||||
|
|
||||||
class IpCommandBase(object):
|
|
||||||
COMMAND = ''
|
|
||||||
|
|
||||||
def __init__(self, parent):
|
|
||||||
self._parent = parent
|
|
||||||
|
|
||||||
def _run(self, *args, **kwargs):
|
|
||||||
return self._parent._run(kwargs.get('options', []), self.COMMAND, args)
|
|
||||||
|
|
||||||
def _as_root(self, *args, **kwargs):
|
|
||||||
return self._parent._as_root(kwargs.get('options', []),
|
|
||||||
self.COMMAND,
|
|
||||||
args,
|
|
||||||
kwargs.get('use_root_namespace', False))
|
|
||||||
|
|
||||||
|
|
||||||
class IpDeviceCommandBase(IpCommandBase):
|
|
||||||
@property
|
|
||||||
def name(self):
|
|
||||||
return self._parent.name
|
|
||||||
|
|
||||||
|
|
||||||
class IpLinkCommand(IpDeviceCommandBase):
|
|
||||||
COMMAND = 'link'
|
|
||||||
|
|
||||||
def set_address(self, mac_address):
|
|
||||||
self._as_root('set', self.name, 'address', mac_address)
|
|
||||||
|
|
||||||
def set_mtu(self, mtu_size):
|
|
||||||
self._as_root('set', self.name, 'mtu', mtu_size)
|
|
||||||
|
|
||||||
def set_up(self):
|
|
||||||
self._as_root('set', self.name, 'up')
|
|
||||||
|
|
||||||
def set_down(self):
|
|
||||||
self._as_root('set', self.name, 'down')
|
|
||||||
|
|
||||||
def set_netns(self, namespace):
|
|
||||||
self._as_root('set', self.name, 'netns', namespace)
|
|
||||||
self._parent.namespace = namespace
|
|
||||||
|
|
||||||
def set_name(self, name):
|
|
||||||
self._as_root('set', self.name, 'name', name)
|
|
||||||
self._parent.name = name
|
|
||||||
|
|
||||||
def set_alias(self, alias_name):
|
|
||||||
self._as_root('set', self.name, 'alias', alias_name)
|
|
||||||
|
|
||||||
def delete(self):
|
|
||||||
self._as_root('delete', self.name)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def address(self):
|
|
||||||
return self.attributes.get('link/ether')
|
|
||||||
|
|
||||||
@property
|
|
||||||
def state(self):
|
|
||||||
return self.attributes.get('state')
|
|
||||||
|
|
||||||
@property
|
|
||||||
def mtu(self):
|
|
||||||
return self.attributes.get('mtu')
|
|
||||||
|
|
||||||
@property
|
|
||||||
def qdisc(self):
|
|
||||||
return self.attributes.get('qdisc')
|
|
||||||
|
|
||||||
@property
|
|
||||||
def qlen(self):
|
|
||||||
return self.attributes.get('qlen')
|
|
||||||
|
|
||||||
@property
|
|
||||||
def alias(self):
|
|
||||||
return self.attributes.get('alias')
|
|
||||||
|
|
||||||
@property
|
|
||||||
def attributes(self):
|
|
||||||
return self._parse_line(self._run('show', self.name, options='o'))
|
|
||||||
|
|
||||||
def _parse_line(self, value):
|
|
||||||
if not value:
|
|
||||||
return {}
|
|
||||||
|
|
||||||
device_name, settings = value.replace("\\", '').split('>', 1)
|
|
||||||
tokens = settings.split()
|
|
||||||
keys = tokens[::2]
|
|
||||||
values = [int(v) if v.isdigit() else v for v in tokens[1::2]]
|
|
||||||
|
|
||||||
retval = dict(zip(keys, values))
|
|
||||||
return retval
|
|
||||||
|
|
||||||
|
|
||||||
class IpAddrCommand(IpDeviceCommandBase):
|
|
||||||
COMMAND = 'addr'
|
|
||||||
|
|
||||||
def add(self, ip_version, cidr, broadcast, scope='global'):
|
|
||||||
self._as_root('add',
|
|
||||||
cidr,
|
|
||||||
'brd',
|
|
||||||
broadcast,
|
|
||||||
'scope',
|
|
||||||
scope,
|
|
||||||
'dev',
|
|
||||||
self.name,
|
|
||||||
options=[ip_version])
|
|
||||||
|
|
||||||
def delete(self, ip_version, cidr):
|
|
||||||
self._as_root('del',
|
|
||||||
cidr,
|
|
||||||
'dev',
|
|
||||||
self.name,
|
|
||||||
options=[ip_version])
|
|
||||||
|
|
||||||
def flush(self):
|
|
||||||
self._as_root('flush', self.name)
|
|
||||||
|
|
||||||
def list(self, scope=None, to=None, filters=None):
|
|
||||||
if filters is None:
|
|
||||||
filters = []
|
|
||||||
|
|
||||||
retval = []
|
|
||||||
|
|
||||||
if scope:
|
|
||||||
filters += ['scope', scope]
|
|
||||||
if to:
|
|
||||||
filters += ['to', to]
|
|
||||||
|
|
||||||
for line in self._run('show', self.name, *filters).split('\n'):
|
|
||||||
line = line.strip()
|
|
||||||
if not line.startswith('inet'):
|
|
||||||
continue
|
|
||||||
parts = line.split()
|
|
||||||
if parts[0] == 'inet6':
|
|
||||||
version = 6
|
|
||||||
scope = parts[3]
|
|
||||||
broadcast = '::'
|
|
||||||
else:
|
|
||||||
version = 4
|
|
||||||
if parts[2] == 'brd':
|
|
||||||
broadcast = parts[3]
|
|
||||||
scope = parts[5]
|
|
||||||
else:
|
|
||||||
# sometimes output of 'ip a' might look like:
|
|
||||||
# inet 192.168.100.100/24 scope global eth0
|
|
||||||
# and broadcast needs to be calculated from CIDR
|
|
||||||
broadcast = str(netaddr.IPNetwork(parts[1]).broadcast)
|
|
||||||
scope = parts[3]
|
|
||||||
|
|
||||||
retval.append(dict(cidr=parts[1],
|
|
||||||
broadcast=broadcast,
|
|
||||||
scope=scope,
|
|
||||||
ip_version=version,
|
|
||||||
dynamic=('dynamic' == parts[-1])))
|
|
||||||
return retval
|
|
||||||
|
|
||||||
|
|
||||||
class IpRouteCommand(IpDeviceCommandBase):
|
|
||||||
COMMAND = 'route'
|
|
||||||
|
|
||||||
def add_gateway(self, gateway, metric=None):
|
|
||||||
args = ['replace', 'default', 'via', gateway]
|
|
||||||
if metric:
|
|
||||||
args += ['metric', metric]
|
|
||||||
args += ['dev', self.name]
|
|
||||||
self._as_root(*args)
|
|
||||||
|
|
||||||
def delete_gateway(self, gateway):
|
|
||||||
self._as_root('del',
|
|
||||||
'default',
|
|
||||||
'via',
|
|
||||||
gateway,
|
|
||||||
'dev',
|
|
||||||
self.name)
|
|
||||||
|
|
||||||
def get_gateway(self, scope=None, filters=None):
|
|
||||||
if filters is None:
|
|
||||||
filters = []
|
|
||||||
|
|
||||||
retval = None
|
|
||||||
|
|
||||||
if scope:
|
|
||||||
filters += ['scope', scope]
|
|
||||||
|
|
||||||
route_list_lines = self._run('list', 'dev', self.name,
|
|
||||||
*filters).split('\n')
|
|
||||||
default_route_line = next((x.strip() for x in
|
|
||||||
route_list_lines if
|
|
||||||
x.strip().startswith('default')), None)
|
|
||||||
if default_route_line:
|
|
||||||
gateway_index = 2
|
|
||||||
parts = default_route_line.split()
|
|
||||||
retval = dict(gateway=parts[gateway_index])
|
|
||||||
metric_index = 4
|
|
||||||
parts_has_metric = (len(parts) > metric_index)
|
|
||||||
if parts_has_metric:
|
|
||||||
retval.update(metric=int(parts[metric_index]))
|
|
||||||
|
|
||||||
return retval
|
|
||||||
|
|
||||||
def pullup_route(self, interface_name):
|
|
||||||
"""
|
|
||||||
Ensures that the route entry for the interface is before all
|
|
||||||
others on the same subnet.
|
|
||||||
"""
|
|
||||||
device_list = []
|
|
||||||
device_route_list_lines = self._run('list', 'proto', 'kernel',
|
|
||||||
'dev', interface_name).split('\n')
|
|
||||||
for device_route_line in device_route_list_lines:
|
|
||||||
try:
|
|
||||||
subnet = device_route_line.split()[0]
|
|
||||||
except:
|
|
||||||
continue
|
|
||||||
subnet_route_list_lines = self._run('list', 'proto', 'kernel',
|
|
||||||
'match', subnet).split('\n')
|
|
||||||
for subnet_route_line in subnet_route_list_lines:
|
|
||||||
i = iter(subnet_route_line.split())
|
|
||||||
while(next(i) != 'dev'):
|
|
||||||
pass
|
|
||||||
device = next(i)
|
|
||||||
try:
|
|
||||||
while(next(i) != 'src'):
|
|
||||||
pass
|
|
||||||
src = next(i)
|
|
||||||
except:
|
|
||||||
src = ''
|
|
||||||
if device != interface_name:
|
|
||||||
device_list.append((device, src))
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
|
|
||||||
for (device, src) in device_list:
|
|
||||||
self._as_root('del', subnet, 'dev', device)
|
|
||||||
if (src != ''):
|
|
||||||
self._as_root('append', subnet, 'proto', 'kernel',
|
|
||||||
'src', src, 'dev', device)
|
|
||||||
else:
|
|
||||||
self._as_root('append', subnet, 'proto', 'kernel',
|
|
||||||
'dev', device)
|
|
||||||
|
|
||||||
|
|
||||||
class IpNetnsCommand(IpCommandBase):
|
|
||||||
COMMAND = 'netns'
|
|
||||||
|
|
||||||
def add(self, name):
|
|
||||||
self._as_root('add', name, use_root_namespace=True)
|
|
||||||
return IPWrapper(self._parent.root_helper, name)
|
|
||||||
|
|
||||||
def delete(self, name):
|
|
||||||
self._as_root('delete', name, use_root_namespace=True)
|
|
||||||
|
|
||||||
def execute(self, cmds, addl_env={}, check_exit_code=True):
|
|
||||||
if not self._parent.root_helper:
|
|
||||||
m = _('sudo is required to run this command')
|
|
||||||
LOG.error(m)
|
|
||||||
raise Exception(m)
|
|
||||||
elif not self._parent.namespace:
|
|
||||||
m = _('No namespace defined for parent')
|
|
||||||
LOG.error(m)
|
|
||||||
raise Exception(m)
|
|
||||||
else:
|
|
||||||
return utils.execute(
|
|
||||||
['%s=%s' % pair for pair in addl_env.items()] +
|
|
||||||
['ip', 'netns', 'exec', self._parent.namespace] + list(cmds),
|
|
||||||
root_helper=self._parent.root_helper,
|
|
||||||
check_exit_code=check_exit_code)
|
|
||||||
|
|
||||||
def exists(self, name):
|
|
||||||
output = self._as_root('list', options='o', use_root_namespace=True)
|
|
||||||
|
|
||||||
for line in output.split('\n'):
|
|
||||||
if name == line.strip():
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def device_exists(device_name, root_helper=None, namespace=None):
|
|
||||||
try:
|
|
||||||
address = IPDevice(device_name, root_helper, namespace).link.address
|
|
||||||
except RuntimeError:
|
|
||||||
return False
|
|
||||||
return bool(address)
|
|
@ -1,321 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
# Copyright 2011 Nicira Networks, Inc.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
# @author: Somik Behera, Nicira Networks, Inc.
|
|
||||||
# @author: Brad Hall, Nicira Networks, Inc.
|
|
||||||
# @author: Dan Wendlandt, Nicira Networks, Inc.
|
|
||||||
# @author: Dave Lapsley, Nicira Networks, Inc.
|
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
from astara.common.linux import utils
|
|
||||||
from astara.common.i18n import _, _LE, _LW
|
|
||||||
|
|
||||||
from oslo_log import log as logging
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class VifPort:
|
|
||||||
def __init__(self, port_name, ofport, vif_id, vif_mac, switch):
|
|
||||||
self.port_name = port_name
|
|
||||||
self.ofport = ofport
|
|
||||||
self.vif_id = vif_id
|
|
||||||
self.vif_mac = vif_mac
|
|
||||||
self.switch = switch
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return ("iface-id=" + self.vif_id + ", vif_mac=" +
|
|
||||||
self.vif_mac + ", port_name=" + self.port_name +
|
|
||||||
", ofport=" + str(self.ofport) + ", bridge_name =" +
|
|
||||||
self.switch.br_name)
|
|
||||||
|
|
||||||
|
|
||||||
class OVSBridge:
|
|
||||||
def __init__(self, br_name, root_helper):
|
|
||||||
self.br_name = br_name
|
|
||||||
self.root_helper = root_helper
|
|
||||||
self.re_id = self.re_compile_id()
|
|
||||||
|
|
||||||
def re_compile_id(self):
|
|
||||||
external = 'external_ids\s*'
|
|
||||||
mac = 'attached-mac="(?P<vif_mac>([a-fA-F\d]{2}:){5}([a-fA-F\d]{2}))"'
|
|
||||||
iface = 'iface-id="(?P<vif_id>[^"]+)"'
|
|
||||||
name = 'name\s*:\s"(?P<port_name>[^"]*)"'
|
|
||||||
port = 'ofport\s*:\s(?P<ofport>-?\d+)'
|
|
||||||
_re = ('%(external)s:\s{ ( %(mac)s,? | %(iface)s,? | . )* }'
|
|
||||||
' \s+ %(name)s \s+ %(port)s' % locals())
|
|
||||||
return re.compile(_re, re.M | re.X)
|
|
||||||
|
|
||||||
def run_vsctl(self, args):
|
|
||||||
full_args = ["ovs-vsctl", "--timeout=2"] + args
|
|
||||||
try:
|
|
||||||
return utils.execute(full_args, root_helper=self.root_helper)
|
|
||||||
except Exception, e:
|
|
||||||
LOG.error(_LE(
|
|
||||||
"Unable to execute %(cmd)s. Exception: %(exception)s"),
|
|
||||||
{'cmd': full_args, 'exception': e})
|
|
||||||
|
|
||||||
def reset_bridge(self):
|
|
||||||
self.run_vsctl(["--", "--if-exists", "del-br", self.br_name])
|
|
||||||
self.run_vsctl(["add-br", self.br_name])
|
|
||||||
|
|
||||||
def add_port(self, port_name):
|
|
||||||
self.run_vsctl(["--", "--may-exist", "add-port", self.br_name,
|
|
||||||
port_name])
|
|
||||||
return self.get_port_ofport(port_name)
|
|
||||||
|
|
||||||
def delete_port(self, port_name):
|
|
||||||
self.run_vsctl(["--", "--if-exists", "del-port", self.br_name,
|
|
||||||
port_name])
|
|
||||||
|
|
||||||
def set_db_attribute(self, table_name, record, column, value):
|
|
||||||
args = ["set", table_name, record, "%s=%s" % (column, value)]
|
|
||||||
self.run_vsctl(args)
|
|
||||||
|
|
||||||
def clear_db_attribute(self, table_name, record, column):
|
|
||||||
args = ["clear", table_name, record, column]
|
|
||||||
self.run_vsctl(args)
|
|
||||||
|
|
||||||
def run_ofctl(self, cmd, args):
|
|
||||||
full_args = ["ovs-ofctl", cmd, self.br_name] + args
|
|
||||||
try:
|
|
||||||
return utils.execute(full_args, root_helper=self.root_helper)
|
|
||||||
except Exception, e:
|
|
||||||
LOG.error(_LE(
|
|
||||||
"Unable to execute %(cmd)s. Exception: %(exception)s"),
|
|
||||||
{'cmd': full_args, 'exception': e})
|
|
||||||
|
|
||||||
def count_flows(self):
|
|
||||||
flow_list = self.run_ofctl("dump-flows", []).split("\n")[1:]
|
|
||||||
return len(flow_list) - 1
|
|
||||||
|
|
||||||
def remove_all_flows(self):
|
|
||||||
self.run_ofctl("del-flows", [])
|
|
||||||
|
|
||||||
def get_port_ofport(self, port_name):
|
|
||||||
return self.db_get_val("Interface", port_name, "ofport")
|
|
||||||
|
|
||||||
def get_datapath_id(self):
|
|
||||||
return self.db_get_val('Bridge',
|
|
||||||
self.br_name, 'datapath_id').strip('"')
|
|
||||||
|
|
||||||
def _build_flow_expr_arr(self, **kwargs):
|
|
||||||
flow_expr_arr = []
|
|
||||||
is_delete_expr = kwargs.get('delete', False)
|
|
||||||
if not is_delete_expr:
|
|
||||||
prefix = ("hard_timeout=%s,idle_timeout=%s,priority=%s" %
|
|
||||||
(kwargs.get('hard_timeout', '0'),
|
|
||||||
kwargs.get('idle_timeout', '0'),
|
|
||||||
kwargs.get('priority', '1')))
|
|
||||||
flow_expr_arr.append(prefix)
|
|
||||||
elif 'priority' in kwargs:
|
|
||||||
raise Exception(_("Cannot match priority on flow deletion"))
|
|
||||||
|
|
||||||
in_port = ('in_port' in kwargs and ",in_port=%s" %
|
|
||||||
kwargs['in_port'] or '')
|
|
||||||
dl_type = ('dl_type' in kwargs and ",dl_type=%s" %
|
|
||||||
kwargs['dl_type'] or '')
|
|
||||||
dl_vlan = ('dl_vlan' in kwargs and ",dl_vlan=%s" %
|
|
||||||
kwargs['dl_vlan'] or '')
|
|
||||||
dl_src = 'dl_src' in kwargs and ",dl_src=%s" % kwargs['dl_src'] or ''
|
|
||||||
dl_dst = 'dl_dst' in kwargs and ",dl_dst=%s" % kwargs['dl_dst'] or ''
|
|
||||||
nw_src = 'nw_src' in kwargs and ",nw_src=%s" % kwargs['nw_src'] or ''
|
|
||||||
nw_dst = 'nw_dst' in kwargs and ",nw_dst=%s" % kwargs['nw_dst'] or ''
|
|
||||||
tun_id = 'tun_id' in kwargs and ",tun_id=%s" % kwargs['tun_id'] or ''
|
|
||||||
proto = 'proto' in kwargs and ",%s" % kwargs['proto'] or ''
|
|
||||||
ip = ('nw_src' in kwargs or 'nw_dst' in kwargs) and ',ip' or ''
|
|
||||||
match = (in_port + dl_type + dl_vlan + dl_src + dl_dst +
|
|
||||||
(ip or proto) + nw_src + nw_dst + tun_id)
|
|
||||||
if match:
|
|
||||||
match = match[1:] # strip leading comma
|
|
||||||
flow_expr_arr.append(match)
|
|
||||||
return flow_expr_arr
|
|
||||||
|
|
||||||
def add_flow(self, **kwargs):
|
|
||||||
if "actions" not in kwargs:
|
|
||||||
raise Exception(_("Must specify one or more actions"))
|
|
||||||
if "priority" not in kwargs:
|
|
||||||
kwargs["priority"] = "0"
|
|
||||||
|
|
||||||
flow_expr_arr = self._build_flow_expr_arr(**kwargs)
|
|
||||||
flow_expr_arr.append("actions=%s" % (kwargs["actions"]))
|
|
||||||
flow_str = ",".join(flow_expr_arr)
|
|
||||||
self.run_ofctl("add-flow", [flow_str])
|
|
||||||
|
|
||||||
def delete_flows(self, **kwargs):
|
|
||||||
kwargs['delete'] = True
|
|
||||||
flow_expr_arr = self._build_flow_expr_arr(**kwargs)
|
|
||||||
if "actions" in kwargs:
|
|
||||||
flow_expr_arr.append("actions=%s" % (kwargs["actions"]))
|
|
||||||
flow_str = ",".join(flow_expr_arr)
|
|
||||||
self.run_ofctl("del-flows", [flow_str])
|
|
||||||
|
|
||||||
def add_tunnel_port(self, port_name, remote_ip):
|
|
||||||
self.run_vsctl(["add-port", self.br_name, port_name])
|
|
||||||
self.set_db_attribute("Interface", port_name, "type", "gre")
|
|
||||||
self.set_db_attribute("Interface", port_name, "options:remote_ip",
|
|
||||||
remote_ip)
|
|
||||||
self.set_db_attribute("Interface", port_name, "options:in_key", "flow")
|
|
||||||
self.set_db_attribute("Interface", port_name, "options:out_key",
|
|
||||||
"flow")
|
|
||||||
return self.get_port_ofport(port_name)
|
|
||||||
|
|
||||||
def add_patch_port(self, local_name, remote_name):
|
|
||||||
self.run_vsctl(["add-port", self.br_name, local_name])
|
|
||||||
self.set_db_attribute("Interface", local_name, "type", "patch")
|
|
||||||
self.set_db_attribute("Interface", local_name, "options:peer",
|
|
||||||
remote_name)
|
|
||||||
return self.get_port_ofport(local_name)
|
|
||||||
|
|
||||||
def db_get_map(self, table, record, column):
|
|
||||||
output = self.run_vsctl(["get", table, record, column])
|
|
||||||
if output:
|
|
||||||
str = output.rstrip("\n\r")
|
|
||||||
return self.db_str_to_map(str)
|
|
||||||
return {}
|
|
||||||
|
|
||||||
def db_get_val(self, table, record, column):
|
|
||||||
output = self.run_vsctl(["get", table, record, column])
|
|
||||||
if output:
|
|
||||||
return output.rstrip("\n\r")
|
|
||||||
|
|
||||||
def db_str_to_map(self, full_str):
|
|
||||||
list = full_str.strip("{}").split(", ")
|
|
||||||
ret = {}
|
|
||||||
for e in list:
|
|
||||||
if e.find("=") == -1:
|
|
||||||
continue
|
|
||||||
arr = e.split("=")
|
|
||||||
ret[arr[0]] = arr[1].strip("\"")
|
|
||||||
return ret
|
|
||||||
|
|
||||||
def get_port_name_list(self):
|
|
||||||
res = self.run_vsctl(["list-ports", self.br_name])
|
|
||||||
if res:
|
|
||||||
return res.strip().split("\n")
|
|
||||||
return []
|
|
||||||
|
|
||||||
def get_port_stats(self, port_name):
|
|
||||||
return self.db_get_map("Interface", port_name, "statistics")
|
|
||||||
|
|
||||||
def get_xapi_iface_id(self, xs_vif_uuid):
|
|
||||||
args = ["xe", "vif-param-get", "param-name=other-config",
|
|
||||||
"param-key=nicira-iface-id", "uuid=%s" % xs_vif_uuid]
|
|
||||||
try:
|
|
||||||
return utils.execute(args, root_helper=self.root_helper).strip()
|
|
||||||
except Exception, e:
|
|
||||||
LOG.error(_LE(
|
|
||||||
"Unable to execute %(cmd)s. Exception: %(exception)s"),
|
|
||||||
{'cmd': args, 'exception': e})
|
|
||||||
|
|
||||||
# returns a VIF object for each VIF port
|
|
||||||
def get_vif_ports(self):
|
|
||||||
edge_ports = []
|
|
||||||
port_names = self.get_port_name_list()
|
|
||||||
for name in port_names:
|
|
||||||
external_ids = self.db_get_map("Interface", name, "external_ids")
|
|
||||||
ofport = self.db_get_val("Interface", name, "ofport")
|
|
||||||
if "iface-id" in external_ids and "attached-mac" in external_ids:
|
|
||||||
p = VifPort(name, ofport, external_ids["iface-id"],
|
|
||||||
external_ids["attached-mac"], self)
|
|
||||||
edge_ports.append(p)
|
|
||||||
elif ("xs-vif-uuid" in external_ids and
|
|
||||||
"attached-mac" in external_ids):
|
|
||||||
# if this is a xenserver and iface-id is not automatically
|
|
||||||
# synced to OVS from XAPI, we grab it from XAPI directly
|
|
||||||
iface_id = self.get_xapi_iface_id(external_ids["xs-vif-uuid"])
|
|
||||||
p = VifPort(name, ofport, iface_id,
|
|
||||||
external_ids["attached-mac"], self)
|
|
||||||
edge_ports.append(p)
|
|
||||||
|
|
||||||
return edge_ports
|
|
||||||
|
|
||||||
def get_vif_port_set(self):
|
|
||||||
edge_ports = set()
|
|
||||||
port_names = self.get_port_name_list()
|
|
||||||
for name in port_names:
|
|
||||||
external_ids = self.db_get_map("Interface", name, "external_ids")
|
|
||||||
if "iface-id" in external_ids and "attached-mac" in external_ids:
|
|
||||||
edge_ports.add(external_ids['iface-id'])
|
|
||||||
elif ("xs-vif-uuid" in external_ids and
|
|
||||||
"attached-mac" in external_ids):
|
|
||||||
# if this is a xenserver and iface-id is not automatically
|
|
||||||
# synced to OVS from XAPI, we grab it from XAPI directly
|
|
||||||
iface_id = self.get_xapi_iface_id(external_ids["xs-vif-uuid"])
|
|
||||||
edge_ports.add(iface_id)
|
|
||||||
return edge_ports
|
|
||||||
|
|
||||||
def get_vif_port_by_id(self, port_id):
|
|
||||||
args = ['--', '--columns=external_ids,name,ofport',
|
|
||||||
'find', 'Interface',
|
|
||||||
'external_ids:iface-id="%s"' % port_id]
|
|
||||||
result = self.run_vsctl(args)
|
|
||||||
if not result:
|
|
||||||
return
|
|
||||||
match = self.re_id.search(result)
|
|
||||||
try:
|
|
||||||
vif_mac = match.group('vif_mac')
|
|
||||||
vif_id = match.group('vif_id')
|
|
||||||
port_name = match.group('port_name')
|
|
||||||
ofport = int(match.group('ofport'))
|
|
||||||
return VifPort(port_name, ofport, vif_id, vif_mac, self)
|
|
||||||
except Exception, e:
|
|
||||||
LOG.warning(_LW("Unable to parse regex results. Exception: %s"), e)
|
|
||||||
return
|
|
||||||
|
|
||||||
def delete_ports(self, all_ports=False):
|
|
||||||
if all_ports:
|
|
||||||
port_names = self.get_port_name_list()
|
|
||||||
else:
|
|
||||||
port_names = (port.port_name for port in self.get_vif_ports())
|
|
||||||
|
|
||||||
for port_name in port_names:
|
|
||||||
self.delete_port(port_name)
|
|
||||||
|
|
||||||
|
|
||||||
def get_bridge_for_iface(root_helper, iface):
|
|
||||||
args = ["ovs-vsctl", "--timeout=2", "iface-to-br", iface]
|
|
||||||
try:
|
|
||||||
return utils.execute(args, root_helper=root_helper).strip()
|
|
||||||
except Exception:
|
|
||||||
LOG.exception(_LE("Interface %s not found."), iface)
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def get_bridges(root_helper):
|
|
||||||
args = ["ovs-vsctl", "--timeout=2", "list-br"]
|
|
||||||
try:
|
|
||||||
return utils.execute(args, root_helper=root_helper).strip().split("\n")
|
|
||||||
except Exception:
|
|
||||||
LOG.exception(_LE("Unable to retrieve bridges."))
|
|
||||||
return []
|
|
@ -1,123 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012 Locaweb.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# @author: Juliano Martinez, Locaweb.
|
|
||||||
|
|
||||||
import fcntl
|
|
||||||
import os
|
|
||||||
import shlex
|
|
||||||
import signal
|
|
||||||
import socket
|
|
||||||
import struct
|
|
||||||
import tempfile
|
|
||||||
|
|
||||||
from eventlet.green import subprocess
|
|
||||||
|
|
||||||
from astara.common.i18n import _
|
|
||||||
|
|
||||||
from oslo_log import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def _subprocess_setup():
|
|
||||||
# Python installs a SIGPIPE handler by default. This is usually not what
|
|
||||||
# non-Python subprocesses expect.
|
|
||||||
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
|
|
||||||
|
|
||||||
|
|
||||||
def subprocess_popen(args, stdin=None, stdout=None, stderr=None, shell=False,
|
|
||||||
env=None):
|
|
||||||
return subprocess.Popen(args, shell=shell, stdin=stdin, stdout=stdout,
|
|
||||||
stderr=stderr, preexec_fn=_subprocess_setup,
|
|
||||||
close_fds=True, env=env)
|
|
||||||
|
|
||||||
|
|
||||||
def execute(cmd, root_helper=None, process_input=None, addl_env=None,
|
|
||||||
check_exit_code=True, return_stderr=False):
|
|
||||||
if root_helper:
|
|
||||||
cmd = shlex.split(root_helper) + cmd
|
|
||||||
cmd = map(str, cmd)
|
|
||||||
|
|
||||||
LOG.debug("Running command: %s", cmd)
|
|
||||||
env = os.environ.copy()
|
|
||||||
if addl_env:
|
|
||||||
env.update(addl_env)
|
|
||||||
obj = subprocess_popen(cmd, shell=False,
|
|
||||||
stdin=subprocess.PIPE,
|
|
||||||
stdout=subprocess.PIPE,
|
|
||||||
stderr=subprocess.PIPE,
|
|
||||||
env=env)
|
|
||||||
|
|
||||||
_stdout, _stderr = (process_input and
|
|
||||||
obj.communicate(process_input) or
|
|
||||||
obj.communicate())
|
|
||||||
obj.stdin.close()
|
|
||||||
m = _("\nCommand: %(cmd)s\nExit code: %(code)s\nStdout: %(stdout)r\n"
|
|
||||||
"Stderr: %(stderr)r") % {'cmd': cmd, 'code': obj.returncode,
|
|
||||||
'stdout': _stdout, 'stderr': _stderr}
|
|
||||||
LOG.debug(m)
|
|
||||||
if obj.returncode and check_exit_code:
|
|
||||||
raise RuntimeError(m)
|
|
||||||
|
|
||||||
return return_stderr and (_stdout, _stderr) or _stdout
|
|
||||||
|
|
||||||
|
|
||||||
def get_interface_mac(interface):
|
|
||||||
DEVICE_NAME_LEN = 15
|
|
||||||
MAC_START = 18
|
|
||||||
MAC_END = 24
|
|
||||||
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
|
||||||
info = fcntl.ioctl(s.fileno(), 0x8927,
|
|
||||||
struct.pack('256s', interface[:DEVICE_NAME_LEN]))
|
|
||||||
return ''.join(['%02x:' % ord(char)
|
|
||||||
for char in info[MAC_START:MAC_END]])[:-1]
|
|
||||||
|
|
||||||
|
|
||||||
def replace_file(file_name, data):
|
|
||||||
"""Replaces the contents of file_name with data in a safe manner.
|
|
||||||
|
|
||||||
First write to a temp file and then rename. Since POSIX renames are
|
|
||||||
atomic, the file is unlikely to be corrupted by competing writes.
|
|
||||||
|
|
||||||
We create the tempfile on the same device to ensure that it can be renamed.
|
|
||||||
"""
|
|
||||||
|
|
||||||
base_dir = os.path.dirname(os.path.abspath(file_name))
|
|
||||||
tmp_file = tempfile.NamedTemporaryFile('w+', dir=base_dir, delete=False)
|
|
||||||
tmp_file.write(data)
|
|
||||||
tmp_file.close()
|
|
||||||
os.chmod(tmp_file.name, 0644)
|
|
||||||
os.rename(tmp_file.name, file_name)
|
|
@ -1,153 +0,0 @@
|
|||||||
# Copyright 2015 Akanda, Inc
|
|
||||||
#
|
|
||||||
# Author: Akanda, Inc
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from six.moves.urllib import parse as urlparse
|
|
||||||
|
|
||||||
from oslo_log import log as logging
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_service import service
|
|
||||||
import oslo_messaging
|
|
||||||
|
|
||||||
from astara.common.i18n import _LW
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def _deprecated_amqp_url():
|
|
||||||
"""Allow for deprecating amqp_url setting over time.
|
|
||||||
This warns and attempts to translate an amqp_url to something
|
|
||||||
oslo_messaging can use to load a driver.
|
|
||||||
"""
|
|
||||||
url = cfg.CONF.amqp_url
|
|
||||||
if not url:
|
|
||||||
return
|
|
||||||
LOG.warning(_LW(
|
|
||||||
'Use of amqp_url is deprecated. Please instead use options defined in '
|
|
||||||
'oslo_messaging_rabbit to declare your AMQP connection.'))
|
|
||||||
url = urlparse.urlsplit(url)
|
|
||||||
if url.scheme == 'amqp':
|
|
||||||
scheme = 'rabbit'
|
|
||||||
else:
|
|
||||||
scheme = url.scheme
|
|
||||||
port = str(url.port or 5672)
|
|
||||||
netloc = url.netloc
|
|
||||||
if netloc.endswith(':'):
|
|
||||||
netloc = netloc[:-1]
|
|
||||||
out = urlparse.urlunsplit((
|
|
||||||
scheme,
|
|
||||||
'%s:%s' % (netloc, port),
|
|
||||||
url.path,
|
|
||||||
'', ''
|
|
||||||
))
|
|
||||||
return out
|
|
||||||
|
|
||||||
|
|
||||||
def get_transport():
|
|
||||||
url = _deprecated_amqp_url()
|
|
||||||
return oslo_messaging.get_transport(conf=cfg.CONF, url=url)
|
|
||||||
|
|
||||||
|
|
||||||
def get_server(target, endpoints):
|
|
||||||
return oslo_messaging.get_rpc_server(
|
|
||||||
transport=get_transport(),
|
|
||||||
target=target,
|
|
||||||
endpoints=endpoints,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def get_target(topic, fanout=True, exchange=None, version=None, server=None):
|
|
||||||
return oslo_messaging.Target(
|
|
||||||
topic=topic, fanout=fanout, exchange=exchange, version=version,
|
|
||||||
server=server)
|
|
||||||
|
|
||||||
|
|
||||||
def get_rpc_client(topic, exchange=None, version='1.0'):
|
|
||||||
"""Creates an RPC client to be used to request methods be
|
|
||||||
executed on remote RPC servers
|
|
||||||
"""
|
|
||||||
target = get_target(topic=topic, exchange=exchange,
|
|
||||||
version=version, fanout=False)
|
|
||||||
return oslo_messaging.rpc.client.RPCClient(
|
|
||||||
get_transport(), target
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def get_rpc_notifier(topic='notifications'):
|
|
||||||
return oslo_messaging.notify.Notifier(
|
|
||||||
transport=get_transport(),
|
|
||||||
# TODO(adam_g): driver should be specified in oslo.messaging's cfg
|
|
||||||
driver='messaging',
|
|
||||||
topic=topic,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class MessagingService(service.Service):
|
|
||||||
"""Used to create objects that can manage multiple RPC connections"""
|
|
||||||
def __init__(self):
|
|
||||||
super(MessagingService, self).__init__()
|
|
||||||
self._servers = set()
|
|
||||||
|
|
||||||
def _add_server(self, server):
|
|
||||||
self._servers.add(server)
|
|
||||||
|
|
||||||
def create_rpc_consumer(self, topic, endpoints):
|
|
||||||
"""Creates an RPC server for this host that will execute RPCs requested
|
|
||||||
by clients. Adds the resulting consumer to the pool of messaging
|
|
||||||
servers.
|
|
||||||
|
|
||||||
:param topic: Topic on which to listen for RPC requests
|
|
||||||
:param endpoints: List of endpoint objects that define methods that
|
|
||||||
the server will execute.
|
|
||||||
"""
|
|
||||||
target = get_target(topic=topic, fanout=True, server=cfg.CONF.host)
|
|
||||||
server = get_server(target, endpoints)
|
|
||||||
LOG.debug('Created RPC server on topic %s', topic)
|
|
||||||
self._add_server(server)
|
|
||||||
|
|
||||||
def create_notification_listener(self, endpoints, exchange=None,
|
|
||||||
topic='notifications'):
|
|
||||||
"""Creates an oslo.messaging notification listener associated with
|
|
||||||
provided endpoints. Adds the resulting listener to the pool of
|
|
||||||
messaging servers.
|
|
||||||
|
|
||||||
:param endpoints: list of endpoint objects that define methods for
|
|
||||||
processing prioritized notifications
|
|
||||||
:param exchange: Optional control exchange to listen on. If not
|
|
||||||
specified, oslo_messaging defaults to 'openstack'
|
|
||||||
:param topic: Topic on which to listen for notification events
|
|
||||||
"""
|
|
||||||
transport = get_transport()
|
|
||||||
target = get_target(topic=topic, fanout=False,
|
|
||||||
exchange=exchange)
|
|
||||||
pool = 'astara.' + topic + '.' + cfg.CONF.host
|
|
||||||
server = oslo_messaging.get_notification_listener(
|
|
||||||
transport, [target], endpoints, pool=pool, executor='threading')
|
|
||||||
LOG.debug(
|
|
||||||
'Created RPC notification listener on topic:%s/exchange:%s.',
|
|
||||||
topic, exchange)
|
|
||||||
self._add_server(server)
|
|
||||||
|
|
||||||
def start(self):
|
|
||||||
LOG.info('Astara notification listener service starting...')
|
|
||||||
super(MessagingService, self).start()
|
|
||||||
[s.start() for s in self._servers]
|
|
||||||
LOG.info('Astara notification listener service started.')
|
|
||||||
|
|
||||||
def stop(self):
|
|
||||||
LOG.info('Astara notification listener service stopping...')
|
|
||||||
super(MessagingService, self).stop()
|
|
||||||
[s.wait() for s in self._servers]
|
|
||||||
LOG.info('Astara notification listener service stopped.')
|
|
@ -1,186 +0,0 @@
|
|||||||
# Copyright 2015 Akanda, Inc.
|
|
||||||
#
|
|
||||||
# Author: Akanda, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import signal
|
|
||||||
import time
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log
|
|
||||||
|
|
||||||
import tooz
|
|
||||||
from tooz import coordination as tz_coordination
|
|
||||||
|
|
||||||
from astara import event as ak_event
|
|
||||||
from astara.common.i18n import _, _LI
|
|
||||||
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
|
||||||
CONF = cfg.CONF
|
|
||||||
|
|
||||||
|
|
||||||
COORD_OPTS = [
|
|
||||||
cfg.BoolOpt('enabled', default=False,
|
|
||||||
help=_('Whether to use an external coordination service to '
|
|
||||||
'a cluster of astara-orchestrator nodes. This may be '
|
|
||||||
'disabled for astara-orchestrator node environments.')),
|
|
||||||
cfg.StrOpt('url',
|
|
||||||
default='memcached://localhost:11211',
|
|
||||||
help=_('URL of supported coordination service')),
|
|
||||||
cfg.StrOpt('group_id', default='astara.orchestrator',
|
|
||||||
help=_('ID of coordination group to join.')),
|
|
||||||
cfg.IntOpt('heartbeat_interval', default=1,
|
|
||||||
help=_('Interval (in seconds) for cluster heartbeats')),
|
|
||||||
]
|
|
||||||
CONF.register_group(cfg.OptGroup(name='coordination'))
|
|
||||||
CONF.register_opts(COORD_OPTS, group='coordination')
|
|
||||||
|
|
||||||
|
|
||||||
class InvalidEventType(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class CoordinatorDone(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class RugCoordinator(object):
|
|
||||||
def __init__(self, notifications_queue):
|
|
||||||
self._queue = notifications_queue
|
|
||||||
self.host = CONF.host
|
|
||||||
self.url = CONF.coordination.url
|
|
||||||
self.group = CONF.coordination.group_id
|
|
||||||
self.heartbeat_interval = CONF.coordination.heartbeat_interval
|
|
||||||
self._coordinator = None
|
|
||||||
signal.signal(signal.SIGTERM, self.stop)
|
|
||||||
self.start()
|
|
||||||
|
|
||||||
def start(self):
|
|
||||||
"""Brings up coordination service online
|
|
||||||
|
|
||||||
This connects the coordination service to its tooz backend. This
|
|
||||||
involves:
|
|
||||||
|
|
||||||
- connecting to the cluster
|
|
||||||
- creating the coordination group (if required)
|
|
||||||
- joining the coordination group
|
|
||||||
- registering callbacks to respond to join/leave membership
|
|
||||||
events
|
|
||||||
|
|
||||||
After the local node has joined the cluster and knows its remote
|
|
||||||
peers, it fires off an initial rebalance event to the workers
|
|
||||||
so they can seed their hash ring with the current membership.
|
|
||||||
"""
|
|
||||||
LOG.info(_LI('Starting RUG coordinator process for host %s on %s'),
|
|
||||||
self.host, self.url)
|
|
||||||
self._coordinator = tz_coordination.get_coordinator(
|
|
||||||
self.url, self.host)
|
|
||||||
self._coordinator.start()
|
|
||||||
|
|
||||||
try:
|
|
||||||
self._coordinator.create_group(self.group).get()
|
|
||||||
except tooz.coordination.GroupAlreadyExist:
|
|
||||||
pass
|
|
||||||
|
|
||||||
try:
|
|
||||||
self._coordinator.join_group(self.group).get()
|
|
||||||
self._coordinator.heartbeat()
|
|
||||||
except tooz.coordination.MemberAlreadyExist:
|
|
||||||
pass
|
|
||||||
|
|
||||||
self._coordinator.watch_join_group(self.group, self.cluster_changed)
|
|
||||||
self._coordinator.watch_leave_group(self.group, self.cluster_changed)
|
|
||||||
self._coordinator.heartbeat()
|
|
||||||
LOG.debug("Sending initial event changed for members: %s" %
|
|
||||||
self.members)
|
|
||||||
self.cluster_changed(event=None, node_bootstrap=True)
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
try:
|
|
||||||
while True:
|
|
||||||
self._coordinator.heartbeat()
|
|
||||||
self._coordinator.run_watchers()
|
|
||||||
time.sleep(self.heartbeat_interval)
|
|
||||||
except CoordinatorDone:
|
|
||||||
LOG.info(_LI('Stopping RUG coordinator.'))
|
|
||||||
return
|
|
||||||
|
|
||||||
def stop(self, signal=None, frame=None):
|
|
||||||
"""Stop the coordinator service.
|
|
||||||
|
|
||||||
This ensures a clean shutdown of the coordinator service and attemps to
|
|
||||||
advertise its departure to the rest of the cluster. Note this is
|
|
||||||
registered as a signal handler for SIGINT so that its run when the main
|
|
||||||
shutdowns and subprocesses receive the signal.
|
|
||||||
"""
|
|
||||||
self._coordinator.unwatch_join_group(self.group, self.cluster_changed)
|
|
||||||
self._coordinator.unwatch_leave_group(self.group, self.cluster_changed)
|
|
||||||
|
|
||||||
if self.is_leader:
|
|
||||||
try:
|
|
||||||
self._coordinator.stand_down_group_leader(self.group)
|
|
||||||
except tooz.NotImplemented:
|
|
||||||
pass
|
|
||||||
self._coordinator.leave_group(self.group).get()
|
|
||||||
raise CoordinatorDone()
|
|
||||||
|
|
||||||
@property
|
|
||||||
def members(self):
|
|
||||||
"""Returns the current cluster membership list"""
|
|
||||||
members = self._coordinator.get_members(self.group).get()
|
|
||||||
|
|
||||||
# tooz ZK driver reports 'leader' as a member, which can screw with
|
|
||||||
# hashing.
|
|
||||||
try:
|
|
||||||
members.remove('leader')
|
|
||||||
except ValueError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
return members
|
|
||||||
|
|
||||||
@property
|
|
||||||
def is_leader(self):
|
|
||||||
"""Returns true if the local cluster member is the leader"""
|
|
||||||
return self._coordinator.get_leader(self.group).get() == self.host
|
|
||||||
|
|
||||||
def cluster_changed(self, event, node_bootstrap=False):
|
|
||||||
"""Event callback to be called by tooz on membership changes"""
|
|
||||||
LOG.debug('Broadcasting cluster changed event to trigger rebalance. '
|
|
||||||
'members=%s' % self.members)
|
|
||||||
|
|
||||||
body = {
|
|
||||||
'members': self.members
|
|
||||||
}
|
|
||||||
|
|
||||||
# Flag this as a local bootstrap rebalance rather than one in reaction
|
|
||||||
# to a cluster event.
|
|
||||||
if node_bootstrap:
|
|
||||||
body['node_bootstrap'] = True
|
|
||||||
|
|
||||||
r = ak_event.Resource(
|
|
||||||
tenant_id='*',
|
|
||||||
id='*',
|
|
||||||
driver='*',
|
|
||||||
)
|
|
||||||
e = ak_event.Event(
|
|
||||||
resource=r,
|
|
||||||
crud=ak_event.REBALANCE,
|
|
||||||
body=body,
|
|
||||||
)
|
|
||||||
self._queue.put(('*', e))
|
|
||||||
|
|
||||||
|
|
||||||
def start(notification_queue):
|
|
||||||
return RugCoordinator(notification_queue).run()
|
|
@ -1,44 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
"""Utilities for managing ourselves as a daemon.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import signal
|
|
||||||
|
|
||||||
from oslo_log import log as logging
|
|
||||||
|
|
||||||
from astara.common.i18n import _LI
|
|
||||||
|
|
||||||
|
|
||||||
def ignore_signals():
|
|
||||||
"""Ignore signals that might interrupt processing
|
|
||||||
|
|
||||||
Since the RUG doesn't want to be asynchronously interrupted,
|
|
||||||
various signals received needs to be ignored. The registered
|
|
||||||
signals including SIGHUP, SIGALRM, and default signals
|
|
||||||
SIGUSR1 and SIGUSR2 are captured and ignored through the SIG_IGN
|
|
||||||
action.
|
|
||||||
|
|
||||||
:param: None
|
|
||||||
|
|
||||||
:returns: None
|
|
||||||
|
|
||||||
"""
|
|
||||||
for s in [signal.SIGHUP, signal.SIGUSR1, signal.SIGUSR2, signal.SIGALRM]:
|
|
||||||
logging.getLogger(__name__).info(_LI('ignoring signal %s'), s)
|
|
||||||
signal.signal(s, signal.SIG_IGN)
|
|
128
astara/db/api.py
128
astara/db/api.py
@ -1,128 +0,0 @@
|
|||||||
# Copyright 2015 Akanda, Inc.
|
|
||||||
#
|
|
||||||
# Author: Akanda, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import abc
|
|
||||||
import six
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_db import api as db_api
|
|
||||||
|
|
||||||
|
|
||||||
_BACKEND_MAPPING = {
|
|
||||||
'sqlalchemy': 'astara.db.sqlalchemy.api'
|
|
||||||
}
|
|
||||||
|
|
||||||
IMPL = db_api.DBAPI.from_config(
|
|
||||||
cfg.CONF, backend_mapping=_BACKEND_MAPPING, lazy=True)
|
|
||||||
|
|
||||||
|
|
||||||
def get_instance():
|
|
||||||
return IMPL
|
|
||||||
|
|
||||||
|
|
||||||
@six.add_metaclass(abc.ABCMeta)
|
|
||||||
class Connection(object):
|
|
||||||
@abc.abstractmethod
|
|
||||||
def __init__(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def enable_resource_debug(self, resource_uuid, reason=None):
|
|
||||||
"""Enter a resource into debug mode
|
|
||||||
|
|
||||||
:param resource_uuid: str uuid of the resource to be placed into debug
|
|
||||||
mode
|
|
||||||
:param reason: str (optional) reason for entering resource into debug
|
|
||||||
mode
|
|
||||||
"""
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def disable_resource_debug(self, resource_uuid):
|
|
||||||
"""Remove a resource into debug mode
|
|
||||||
|
|
||||||
:param resource_uuid: str uuid of the resource to be removed from debug
|
|
||||||
mode
|
|
||||||
"""
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def resource_in_debug(self, resource_uuid):
|
|
||||||
"""Determines if a resource is in debug mode
|
|
||||||
|
|
||||||
:param resource_uuid: str the uuid of the resource to query
|
|
||||||
:returns: tuple (False, None) if resource is not in debug mode or
|
|
||||||
(True, "reason") if it is.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def resources_in_debug(self):
|
|
||||||
"""Queries all resources in debug mode
|
|
||||||
|
|
||||||
:returns: a set of (resource_uuid, reason) tuples
|
|
||||||
"""
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def enable_tenant_debug(self, tenant_uuid, reason=None):
|
|
||||||
"""Enter a tenant into debug mode
|
|
||||||
|
|
||||||
:param tenant_uuid: str uuid of the tenant to be placed into debug
|
|
||||||
mode
|
|
||||||
:param reason: str (optional) reason for entering tenant into debug
|
|
||||||
mode
|
|
||||||
"""
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def disable_tenant_debug(self, tenant_uuid):
|
|
||||||
"""Remove a tenant into debug mode
|
|
||||||
|
|
||||||
:param tenant_uuid: str uuid of the tenant to be removed from debug
|
|
||||||
mode
|
|
||||||
"""
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def tenant_in_debug(self, tenant_uuid):
|
|
||||||
"""Determines if a tenant is in debug mode
|
|
||||||
|
|
||||||
:param tenant_uuid: str the uuid of the tenant to query
|
|
||||||
:returns: tuple (False, None) if tenant is not in debug mode or
|
|
||||||
(True, "reason") if it is.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def tenants_in_debug(self):
|
|
||||||
"""Queries all tenants in debug mode
|
|
||||||
|
|
||||||
:returns: a set of (tenant_uuid, reason) tuples
|
|
||||||
"""
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def enable_global_debug(self, reason=None):
|
|
||||||
"""Enter the entire system into debug mode
|
|
||||||
:param reason: str (optional) reason for entering cluster into global
|
|
||||||
debug mode.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def disable_global_debug(self):
|
|
||||||
"""Remove the entire system from global debug mode"""
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def global_debug(self):
|
|
||||||
"""Determine whether cluster is in global debug mode
|
|
||||||
|
|
||||||
:returns: bool True if cluster is in debug mode
|
|
||||||
:returns: tuple (False, None) if cluster is not in global debug mode or
|
|
||||||
(True, "reason") if it is.
|
|
||||||
"""
|
|
@ -1,56 +0,0 @@
|
|||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""Database setup and migration commands."""
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from stevedore import driver
|
|
||||||
|
|
||||||
_IMPL = None
|
|
||||||
|
|
||||||
|
|
||||||
def get_backend():
|
|
||||||
global _IMPL
|
|
||||||
if not _IMPL:
|
|
||||||
cfg.CONF.import_opt('backend', 'oslo_db.options', group='database')
|
|
||||||
_IMPL = driver.DriverManager("astara.database.migration_backend",
|
|
||||||
cfg.CONF.database.backend).driver
|
|
||||||
return _IMPL
|
|
||||||
|
|
||||||
|
|
||||||
def upgrade(version=None):
|
|
||||||
"""Migrate the database to `version` or the most recent version."""
|
|
||||||
return get_backend().upgrade(version)
|
|
||||||
|
|
||||||
|
|
||||||
def downgrade(version=None):
|
|
||||||
return get_backend().downgrade(version)
|
|
||||||
|
|
||||||
|
|
||||||
def version():
|
|
||||||
return get_backend().version()
|
|
||||||
|
|
||||||
|
|
||||||
def stamp(version):
|
|
||||||
return get_backend().stamp(version)
|
|
||||||
|
|
||||||
|
|
||||||
def revision(message, autogenerate):
|
|
||||||
return get_backend().revision(message, autogenerate)
|
|
||||||
|
|
||||||
|
|
||||||
def create_schema():
|
|
||||||
return get_backend().create_schema()
|
|
@ -1,54 +0,0 @@
|
|||||||
# A generic, single database configuration.
|
|
||||||
|
|
||||||
[alembic]
|
|
||||||
# path to migration scripts
|
|
||||||
script_location = %(here)s/alembic
|
|
||||||
|
|
||||||
# template used to generate migration files
|
|
||||||
# file_template = %%(rev)s_%%(slug)s
|
|
||||||
|
|
||||||
# max length of characters to apply to the
|
|
||||||
# "slug" field
|
|
||||||
#truncate_slug_length = 40
|
|
||||||
|
|
||||||
# set to 'true' to run the environment during
|
|
||||||
# the 'revision' command, regardless of autogenerate
|
|
||||||
# revision_environment = false
|
|
||||||
|
|
||||||
#sqlalchemy.url = driver://user:pass@localhost/dbname
|
|
||||||
|
|
||||||
|
|
||||||
# Logging configuration
|
|
||||||
[loggers]
|
|
||||||
keys = root,sqlalchemy,alembic
|
|
||||||
|
|
||||||
[handlers]
|
|
||||||
keys = console
|
|
||||||
|
|
||||||
[formatters]
|
|
||||||
keys = generic
|
|
||||||
|
|
||||||
[logger_root]
|
|
||||||
level = WARN
|
|
||||||
handlers = console
|
|
||||||
qualname =
|
|
||||||
|
|
||||||
[logger_sqlalchemy]
|
|
||||||
level = WARN
|
|
||||||
handlers =
|
|
||||||
qualname = sqlalchemy.engine
|
|
||||||
|
|
||||||
[logger_alembic]
|
|
||||||
level = INFO
|
|
||||||
handlers =
|
|
||||||
qualname = alembic
|
|
||||||
|
|
||||||
[handler_console]
|
|
||||||
class = StreamHandler
|
|
||||||
args = (sys.stderr,)
|
|
||||||
level = NOTSET
|
|
||||||
formatter = generic
|
|
||||||
|
|
||||||
[formatter_generic]
|
|
||||||
format = %(levelname)-5.5s [%(name)s] %(message)s
|
|
||||||
datefmt = %H:%M:%S
|
|
@ -1,61 +0,0 @@
|
|||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from logging import config as log_config
|
|
||||||
|
|
||||||
from alembic import context
|
|
||||||
|
|
||||||
try:
|
|
||||||
# NOTE(whaom): This is to register the DB2 alembic code which
|
|
||||||
# is an optional runtime dependency.
|
|
||||||
from ibm_db_alembic.ibm_db import IbmDbImpl # noqa
|
|
||||||
except ImportError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
from astara.db.sqlalchemy import api as sqla_api
|
|
||||||
from astara.db.sqlalchemy import models
|
|
||||||
|
|
||||||
# this is the Alembic Config object, which provides
|
|
||||||
# access to the values within the .ini file in use.
|
|
||||||
config = context.config
|
|
||||||
|
|
||||||
# Interpret the config file for Python logging.
|
|
||||||
# This line sets up loggers basically.
|
|
||||||
log_config.fileConfig(config.config_file_name)
|
|
||||||
|
|
||||||
# add your model's MetaData object here
|
|
||||||
# for 'autogenerate' support
|
|
||||||
# from myapp import mymodel
|
|
||||||
target_metadata = models.Base.metadata
|
|
||||||
|
|
||||||
# other values from the config, defined by the needs of env.py,
|
|
||||||
# can be acquired:
|
|
||||||
# my_important_option = config.get_main_option("my_important_option")
|
|
||||||
# ... etc.
|
|
||||||
|
|
||||||
|
|
||||||
def run_migrations_online():
|
|
||||||
"""Run migrations in 'online' mode.
|
|
||||||
|
|
||||||
In this scenario we need to create an Engine
|
|
||||||
and associate a connection with the context.
|
|
||||||
|
|
||||||
"""
|
|
||||||
engine = sqla_api.get_engine()
|
|
||||||
with engine.connect() as connection:
|
|
||||||
context.configure(connection=connection,
|
|
||||||
target_metadata=target_metadata)
|
|
||||||
with context.begin_transaction():
|
|
||||||
context.run_migrations()
|
|
||||||
|
|
||||||
|
|
||||||
run_migrations_online()
|
|
@ -1,22 +0,0 @@
|
|||||||
"""${message}
|
|
||||||
|
|
||||||
Revision ID: ${up_revision}
|
|
||||||
Revises: ${down_revision}
|
|
||||||
Create Date: ${create_date}
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
# revision identifiers, used by Alembic.
|
|
||||||
revision = ${repr(up_revision)}
|
|
||||||
down_revision = ${repr(down_revision)}
|
|
||||||
|
|
||||||
from alembic import op
|
|
||||||
import sqlalchemy as sa
|
|
||||||
${imports if imports else ""}
|
|
||||||
|
|
||||||
def upgrade():
|
|
||||||
${upgrades if upgrades else "pass"}
|
|
||||||
|
|
||||||
|
|
||||||
def downgrade():
|
|
||||||
${downgrades if downgrades else "pass"}
|
|
@ -1,69 +0,0 @@
|
|||||||
# Copyright 2015 Akanda, Inc.
|
|
||||||
#
|
|
||||||
# Author: Akanda, Inc
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""initial_migration
|
|
||||||
|
|
||||||
Revision ID: 4f695b725637
|
|
||||||
Revises: None
|
|
||||||
Create Date: 2015-07-02 12:29:50.243891
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
import sqlalchemy as sa
|
|
||||||
|
|
||||||
from alembic import op
|
|
||||||
|
|
||||||
# revision identifiers, used by Alembic.
|
|
||||||
revision = '4f695b725637'
|
|
||||||
down_revision = None
|
|
||||||
|
|
||||||
|
|
||||||
def upgrade():
|
|
||||||
op.create_table(
|
|
||||||
'resource_debug',
|
|
||||||
sa.Column('created_at', sa.DateTime(), nullable=True),
|
|
||||||
sa.Column('updated_at', sa.DateTime(), nullable=True),
|
|
||||||
sa.Column('id', sa.Integer(), nullable=False),
|
|
||||||
sa.Column('uuid', sa.String(length=36), nullable=False),
|
|
||||||
sa.Column('reason', sa.String(length=255), nullable=True),
|
|
||||||
sa.PrimaryKeyConstraint('id'),
|
|
||||||
sa.UniqueConstraint('uuid', name='uniq_debug_resource0uuid'),
|
|
||||||
)
|
|
||||||
op.create_table(
|
|
||||||
'tenant_debug',
|
|
||||||
sa.Column('created_at', sa.DateTime(), nullable=True),
|
|
||||||
sa.Column('updated_at', sa.DateTime(), nullable=True),
|
|
||||||
sa.Column('id', sa.Integer(), nullable=False),
|
|
||||||
sa.Column('uuid', sa.String(length=36), nullable=False),
|
|
||||||
sa.Column('reason', sa.String(length=255), nullable=True),
|
|
||||||
sa.PrimaryKeyConstraint('id'),
|
|
||||||
sa.UniqueConstraint('uuid', name='uniq_debug_tenant0uuid'),
|
|
||||||
)
|
|
||||||
op.create_table(
|
|
||||||
'global_debug',
|
|
||||||
sa.Column('created_at', sa.DateTime(), nullable=True),
|
|
||||||
sa.Column('updated_at', sa.DateTime(), nullable=True),
|
|
||||||
sa.Column('id', sa.Integer(), nullable=False),
|
|
||||||
sa.Column('status', sa.Integer(), nullable=False),
|
|
||||||
sa.Column('reason', sa.String(length=255), nullable=True),
|
|
||||||
sa.PrimaryKeyConstraint('id'),
|
|
||||||
sa.UniqueConstraint('status', name='uniq_global_debug0status'),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def downgrade():
|
|
||||||
raise NotImplementedError(('Downgrade from initial migration is'
|
|
||||||
' unsupported.'))
|
|
@ -1,166 +0,0 @@
|
|||||||
# -*- encoding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
|
||||||
# Copyright 2015 Akanda, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""SQLAlchemy storage backend."""
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_db import exception as db_exc
|
|
||||||
from oslo_db.sqlalchemy import session as db_session
|
|
||||||
|
|
||||||
from astara.db import api
|
|
||||||
from astara.db.sqlalchemy import models
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
|
|
||||||
|
|
||||||
_FACADE = None
|
|
||||||
|
|
||||||
|
|
||||||
def _create_facade_lazily():
|
|
||||||
global _FACADE
|
|
||||||
if _FACADE is None:
|
|
||||||
_FACADE = db_session.EngineFacade.from_config(CONF)
|
|
||||||
return _FACADE
|
|
||||||
|
|
||||||
|
|
||||||
def get_engine():
|
|
||||||
facade = _create_facade_lazily()
|
|
||||||
return facade.get_engine()
|
|
||||||
|
|
||||||
|
|
||||||
def get_session(**kwargs):
|
|
||||||
facade = _create_facade_lazily()
|
|
||||||
return facade.get_session(**kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def get_backend():
|
|
||||||
"""The backend is this module itself."""
|
|
||||||
return Connection()
|
|
||||||
|
|
||||||
|
|
||||||
def model_query(model, *args, **kwargs):
|
|
||||||
"""Query helper for simpler session usage.
|
|
||||||
|
|
||||||
:param session: if present, the session to use
|
|
||||||
"""
|
|
||||||
|
|
||||||
session = kwargs.get('session') or get_session()
|
|
||||||
query = session.query(model, *args)
|
|
||||||
return query
|
|
||||||
|
|
||||||
|
|
||||||
class Connection(api.Connection):
|
|
||||||
"""SqlAlchemy connection."""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def _enable_debug(self, model, uuid, reason=None):
|
|
||||||
model.update({
|
|
||||||
'uuid': uuid,
|
|
||||||
'reason': reason,
|
|
||||||
})
|
|
||||||
try:
|
|
||||||
model.save()
|
|
||||||
except db_exc.DBDuplicateEntry:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def _disable_debug(self, model=None, uuid=None):
|
|
||||||
query = model_query(model)
|
|
||||||
query.filter_by(uuid=uuid).delete()
|
|
||||||
|
|
||||||
def _check_debug(self, model, uuid):
|
|
||||||
query = model_query(model)
|
|
||||||
res = query.filter_by(uuid=uuid).all()
|
|
||||||
if not res:
|
|
||||||
return (False, None)
|
|
||||||
return (True, res[0].reason)
|
|
||||||
|
|
||||||
def _list_debug(self, model):
|
|
||||||
res = model_query(model).all()
|
|
||||||
return set((r.uuid, r.reason) for r in res)
|
|
||||||
|
|
||||||
def enable_resource_debug(self, resource_uuid, reason=None):
|
|
||||||
self._enable_debug(
|
|
||||||
model=models.ResourceDebug(),
|
|
||||||
uuid=resource_uuid,
|
|
||||||
reason=reason,
|
|
||||||
)
|
|
||||||
|
|
||||||
def disable_resource_debug(self, resource_uuid):
|
|
||||||
self._disable_debug(
|
|
||||||
model=models.ResourceDebug,
|
|
||||||
uuid=resource_uuid,
|
|
||||||
)
|
|
||||||
|
|
||||||
def resource_in_debug(self, resource_uuid):
|
|
||||||
return self._check_debug(models.ResourceDebug, resource_uuid)
|
|
||||||
|
|
||||||
def resources_in_debug(self):
|
|
||||||
return self._list_debug(models.ResourceDebug)
|
|
||||||
|
|
||||||
def enable_tenant_debug(self, tenant_uuid, reason=None):
|
|
||||||
self._enable_debug(
|
|
||||||
model=models.TenantDebug(),
|
|
||||||
uuid=tenant_uuid,
|
|
||||||
reason=reason,
|
|
||||||
)
|
|
||||||
|
|
||||||
def disable_tenant_debug(self, tenant_uuid):
|
|
||||||
self._disable_debug(
|
|
||||||
model=models.TenantDebug,
|
|
||||||
uuid=tenant_uuid,
|
|
||||||
)
|
|
||||||
|
|
||||||
def tenant_in_debug(self, tenant_uuid):
|
|
||||||
return self._check_debug(models.TenantDebug, tenant_uuid)
|
|
||||||
|
|
||||||
def tenants_in_debug(self):
|
|
||||||
return self._list_debug(models.TenantDebug)
|
|
||||||
|
|
||||||
def _set_global_debug(self, status, reason=None):
|
|
||||||
query = model_query(models.GlobalDebug)
|
|
||||||
res = query.first()
|
|
||||||
if not res:
|
|
||||||
gdb = models.GlobalDebug()
|
|
||||||
gdb.update({
|
|
||||||
'status': status,
|
|
||||||
'reason': reason,
|
|
||||||
})
|
|
||||||
gdb.save()
|
|
||||||
|
|
||||||
def enable_global_debug(self, reason=None):
|
|
||||||
gdb = models.GlobalDebug()
|
|
||||||
gdb.update({
|
|
||||||
'status': 1,
|
|
||||||
'reason': reason,
|
|
||||||
})
|
|
||||||
try:
|
|
||||||
gdb.save()
|
|
||||||
except db_exc.DBDuplicateEntry:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def disable_global_debug(self):
|
|
||||||
query = model_query(models.GlobalDebug)
|
|
||||||
query.filter_by(status=1).delete()
|
|
||||||
|
|
||||||
def global_debug(self):
|
|
||||||
query = model_query(models.GlobalDebug)
|
|
||||||
res = query.filter_by(status=1).all()
|
|
||||||
if not res:
|
|
||||||
return (False, None)
|
|
||||||
return (True, res[0].reason)
|
|
@ -1,93 +0,0 @@
|
|||||||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
|
||||||
# Copyright 2015 Akanda, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import six
|
|
||||||
|
|
||||||
from alembic import command as alembic_command
|
|
||||||
from alembic import config as alembic_config
|
|
||||||
from alembic import util as alembic_util
|
|
||||||
from oslo_config import cfg
|
|
||||||
|
|
||||||
from astara.common.i18n import _
|
|
||||||
import pkg_resources
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
|
|
||||||
|
|
||||||
def do_alembic_command(config, cmd, *args, **kwargs):
|
|
||||||
try:
|
|
||||||
getattr(alembic_command, cmd)(config, *args, **kwargs)
|
|
||||||
except alembic_util.CommandError as e:
|
|
||||||
alembic_util.err(six.text_type(e))
|
|
||||||
|
|
||||||
|
|
||||||
def add_alembic_subparser(sub, cmd):
|
|
||||||
return sub.add_parser(cmd, help=getattr(alembic_command, cmd).__doc__)
|
|
||||||
|
|
||||||
|
|
||||||
def do_upgrade(config, cmd):
|
|
||||||
revision = CONF.command.revision or 'head'
|
|
||||||
do_alembic_command(config, cmd, revision, sql=CONF.command.sql)
|
|
||||||
|
|
||||||
|
|
||||||
def do_stamp(config, cmd):
|
|
||||||
do_alembic_command(config, cmd,
|
|
||||||
CONF.command.revision,
|
|
||||||
sql=CONF.command.sql)
|
|
||||||
|
|
||||||
|
|
||||||
def add_command_parsers(subparsers):
|
|
||||||
for name in ['current', 'history', 'branches']:
|
|
||||||
parser = add_alembic_subparser(subparsers, name)
|
|
||||||
parser.set_defaults(func=do_alembic_command)
|
|
||||||
|
|
||||||
parser = add_alembic_subparser(subparsers, 'upgrade')
|
|
||||||
parser.add_argument('--delta', type=int)
|
|
||||||
parser.add_argument('--sql', action='store_true')
|
|
||||||
parser.add_argument('revision', nargs='?')
|
|
||||||
parser.add_argument('--mysql-engine',
|
|
||||||
default='',
|
|
||||||
help='Change MySQL storage engine of current '
|
|
||||||
'existing tables')
|
|
||||||
parser.set_defaults(func=do_upgrade)
|
|
||||||
|
|
||||||
parser = add_alembic_subparser(subparsers, 'stamp')
|
|
||||||
parser.add_argument('--sql', action='store_true')
|
|
||||||
parser.add_argument('revision')
|
|
||||||
parser.set_defaults(func=do_stamp)
|
|
||||||
|
|
||||||
|
|
||||||
command_opt = cfg.SubCommandOpt('command',
|
|
||||||
title='Command',
|
|
||||||
help=_('Available commands'),
|
|
||||||
handler=add_command_parsers)
|
|
||||||
|
|
||||||
CONF.register_cli_opt(command_opt)
|
|
||||||
|
|
||||||
|
|
||||||
def get_alembic_config():
|
|
||||||
config = alembic_config.Config(os.path.join(os.path.dirname(__file__),
|
|
||||||
'alembic.ini'))
|
|
||||||
return config
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
dist = pkg_resources.get_distribution('astara')
|
|
||||||
CONF(project='astara-orchestrator', version=dist.version)
|
|
||||||
config = get_alembic_config()
|
|
||||||
config.astara_config = CONF
|
|
||||||
CONF.command.func(config, CONF.command.name)
|
|
@ -1,113 +0,0 @@
|
|||||||
# Copyright 2010 United States Government as represented by the
|
|
||||||
# Administrator of the National Aeronautics and Space Administration.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
import alembic
|
|
||||||
from alembic import config as alembic_config
|
|
||||||
import alembic.migration as alembic_migration
|
|
||||||
from oslo_db import exception as db_exc
|
|
||||||
|
|
||||||
from astara.db.sqlalchemy import api as sqla_api
|
|
||||||
from astara.db.sqlalchemy import models
|
|
||||||
|
|
||||||
|
|
||||||
def _alembic_config():
|
|
||||||
path = os.path.join(os.path.dirname(__file__), 'alembic.ini')
|
|
||||||
config = alembic_config.Config(path)
|
|
||||||
return config
|
|
||||||
|
|
||||||
|
|
||||||
def version(config=None, engine=None):
|
|
||||||
"""Current database version.
|
|
||||||
|
|
||||||
:returns: Database version
|
|
||||||
:rtype: string
|
|
||||||
"""
|
|
||||||
if engine is None:
|
|
||||||
engine = sqla_api.get_engine()
|
|
||||||
with engine.connect() as conn:
|
|
||||||
context = alembic_migration.MigrationContext.configure(conn)
|
|
||||||
return context.get_current_revision()
|
|
||||||
|
|
||||||
|
|
||||||
def upgrade(revision, config=None):
|
|
||||||
"""Used for upgrading database.
|
|
||||||
|
|
||||||
:param version: Desired database version
|
|
||||||
:type version: string
|
|
||||||
"""
|
|
||||||
revision = revision or 'head'
|
|
||||||
config = config or _alembic_config()
|
|
||||||
|
|
||||||
alembic.command.upgrade(config, revision or 'head')
|
|
||||||
|
|
||||||
|
|
||||||
def create_schema(config=None, engine=None):
|
|
||||||
"""Create database schema from models description.
|
|
||||||
|
|
||||||
Can be used for initial installation instead of upgrade('head').
|
|
||||||
"""
|
|
||||||
if engine is None:
|
|
||||||
engine = sqla_api.get_engine()
|
|
||||||
|
|
||||||
# NOTE(viktors): If we will use metadata.create_all() for non empty db
|
|
||||||
# schema, it will only add the new tables, but leave
|
|
||||||
# existing as is. So we should avoid of this situation.
|
|
||||||
if version(engine=engine) is not None:
|
|
||||||
raise db_exc.DbMigrationError("DB schema is already under version"
|
|
||||||
" control. Use upgrade() instead")
|
|
||||||
|
|
||||||
models.Base.metadata.create_all(engine)
|
|
||||||
stamp('head', config=config)
|
|
||||||
|
|
||||||
|
|
||||||
def downgrade(revision, config=None):
|
|
||||||
"""Used for downgrading database.
|
|
||||||
|
|
||||||
:param version: Desired database version
|
|
||||||
:type version: string
|
|
||||||
"""
|
|
||||||
revision = revision or 'base'
|
|
||||||
config = config or _alembic_config()
|
|
||||||
return alembic.command.downgrade(config, revision)
|
|
||||||
|
|
||||||
|
|
||||||
def stamp(revision, config=None):
|
|
||||||
"""Stamps database with provided revision.
|
|
||||||
|
|
||||||
Don't run any migrations.
|
|
||||||
|
|
||||||
:param revision: Should match one from repository or head - to stamp
|
|
||||||
database with most recent revision
|
|
||||||
:type revision: string
|
|
||||||
"""
|
|
||||||
config = config or _alembic_config()
|
|
||||||
return alembic.command.stamp(config, revision=revision)
|
|
||||||
|
|
||||||
|
|
||||||
def revision(message=None, autogenerate=False, config=None):
|
|
||||||
"""Creates template for migration.
|
|
||||||
|
|
||||||
:param message: Text that will be used for migration title
|
|
||||||
:type message: string
|
|
||||||
:param autogenerate: If True - generates diff based on current database
|
|
||||||
state
|
|
||||||
:type autogenerate: bool
|
|
||||||
"""
|
|
||||||
config = config or _alembic_config()
|
|
||||||
return alembic.command.revision(config, message=message,
|
|
||||||
autogenerate=autogenerate)
|
|
@ -1,113 +0,0 @@
|
|||||||
# -*- encoding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
|
||||||
# Copyright 2015 Akanda, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
SQLAlchemy models for baremetal data.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from astara.common.i18n import _
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_db import options as db_options
|
|
||||||
from oslo_db.sqlalchemy import models
|
|
||||||
import six.moves.urllib.parse as urlparse
|
|
||||||
from sqlalchemy import Column
|
|
||||||
from sqlalchemy import Integer
|
|
||||||
from sqlalchemy import schema, String
|
|
||||||
from sqlalchemy.ext.declarative import declarative_base
|
|
||||||
|
|
||||||
|
|
||||||
sql_opts = [
|
|
||||||
cfg.StrOpt('mysql_engine',
|
|
||||||
default='InnoDB',
|
|
||||||
help=_('MySQL engine to use.'))
|
|
||||||
]
|
|
||||||
|
|
||||||
_DEFAULT_SQL_CONNECTION = 'sqlite:///astara.db'
|
|
||||||
|
|
||||||
|
|
||||||
cfg.CONF.register_opts(sql_opts, 'database')
|
|
||||||
db_options.set_defaults(cfg.CONF, _DEFAULT_SQL_CONNECTION, 'ironic.sqlite')
|
|
||||||
|
|
||||||
|
|
||||||
def table_args():
|
|
||||||
engine_name = urlparse.urlparse(cfg.CONF.database.connection).scheme
|
|
||||||
if engine_name == 'mysql':
|
|
||||||
return {'mysql_engine': cfg.CONF.database.mysql_engine,
|
|
||||||
'mysql_charset': "utf8"}
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
class AstaraBase(models.TimestampMixin,
|
|
||||||
models.ModelBase):
|
|
||||||
|
|
||||||
metadata = None
|
|
||||||
|
|
||||||
def as_dict(self):
|
|
||||||
d = {}
|
|
||||||
for c in self.__table__.columns:
|
|
||||||
d[c.name] = self[c.name]
|
|
||||||
return d
|
|
||||||
|
|
||||||
def save(self, session=None):
|
|
||||||
import astara.db.sqlalchemy.api as db_api
|
|
||||||
|
|
||||||
if session is None:
|
|
||||||
session = db_api.get_session()
|
|
||||||
|
|
||||||
super(AstaraBase, self).save(session)
|
|
||||||
|
|
||||||
Base = declarative_base(cls=AstaraBase)
|
|
||||||
|
|
||||||
|
|
||||||
class ResourceDebug(Base):
|
|
||||||
"""Represents a resource in debug mode."""
|
|
||||||
|
|
||||||
__tablename__ = 'resource_debug'
|
|
||||||
__table_args__ = (
|
|
||||||
schema.UniqueConstraint('uuid', name='uniq_debug_resource0uuid'),
|
|
||||||
table_args()
|
|
||||||
)
|
|
||||||
id = Column(Integer, primary_key=True)
|
|
||||||
uuid = Column(String(36))
|
|
||||||
reason = Column(String(255), nullable=True)
|
|
||||||
|
|
||||||
|
|
||||||
class TenantDebug(Base):
|
|
||||||
"""Represents a tenant in debug mode."""
|
|
||||||
|
|
||||||
__tablename__ = 'tenant_debug'
|
|
||||||
__table_args__ = (
|
|
||||||
schema.UniqueConstraint('uuid', name='uniq_debug_tenant0uuid'),
|
|
||||||
table_args()
|
|
||||||
)
|
|
||||||
id = Column(Integer, primary_key=True)
|
|
||||||
uuid = Column(String(36))
|
|
||||||
reason = Column(String(255), nullable=True)
|
|
||||||
|
|
||||||
|
|
||||||
class GlobalDebug(Base):
|
|
||||||
"""Stores a single row that serves as a status flag for global debug"""
|
|
||||||
|
|
||||||
__tablename__ = 'global_debug'
|
|
||||||
__table_args__ = (
|
|
||||||
schema.UniqueConstraint('status', name='uniq_global_debug0status'),
|
|
||||||
table_args()
|
|
||||||
)
|
|
||||||
id = Column(Integer, primary_key=True)
|
|
||||||
status = Column(Integer)
|
|
||||||
reason = Column(String(255), nullable=True)
|
|
@ -1,80 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log as logging
|
|
||||||
|
|
||||||
from astara import drivers
|
|
||||||
from astara import state
|
|
||||||
from astara import worker
|
|
||||||
|
|
||||||
from astara.common import config
|
|
||||||
|
|
||||||
|
|
||||||
DEBUG_OPTS = [
|
|
||||||
cfg.StrOpt(
|
|
||||||
'router-id', required=True,
|
|
||||||
help='The UUID for the router to debug')
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
class Fake(object):
|
|
||||||
def __init__(self, crud):
|
|
||||||
self.crud = crud
|
|
||||||
|
|
||||||
|
|
||||||
def delete_callback():
|
|
||||||
print('DELETE')
|
|
||||||
|
|
||||||
|
|
||||||
def bandwidth_callback(*args, **kwargs):
|
|
||||||
print('BANDWIDTH:', args, kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def debug_one_router(args=sys.argv[1:]):
|
|
||||||
# Add our extra option for specifying the router-id to debug
|
|
||||||
cfg.CONF.register_cli_opts(DEBUG_OPTS)
|
|
||||||
cfg.CONF.set_override('boot_timeout', 60000)
|
|
||||||
cfg.CONF.import_opt('host', 'astara.main')
|
|
||||||
config.parse_config(args)
|
|
||||||
logging.setup(cfg.CONF, __name__)
|
|
||||||
log = logging.getLogger(__name__)
|
|
||||||
log.debug('Proxy settings: %r', os.getenv('no_proxy'))
|
|
||||||
|
|
||||||
context = worker.WorkerContext()
|
|
||||||
driver = drivers.get('router')(context, cfg.CONF.router_id)
|
|
||||||
a = state.Automaton(
|
|
||||||
resource=driver,
|
|
||||||
tenant_id=driver._router.tenant_id,
|
|
||||||
delete_callback=delete_callback,
|
|
||||||
bandwidth_callback=bandwidth_callback,
|
|
||||||
worker_context=context,
|
|
||||||
queue_warning_threshold=100,
|
|
||||||
reboot_error_threshold=1,
|
|
||||||
)
|
|
||||||
|
|
||||||
a.send_message(Fake('update'))
|
|
||||||
|
|
||||||
import pdb
|
|
||||||
pdb.set_trace()
|
|
||||||
|
|
||||||
a.update(context)
|
|
@ -1,91 +0,0 @@
|
|||||||
# Copyright (c) 2015 Akanda, Inc. All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log as logging
|
|
||||||
|
|
||||||
from astara.drivers.router import Router
|
|
||||||
from astara.drivers.loadbalancer import LoadBalancer
|
|
||||||
|
|
||||||
DRIVER_OPTS = [
|
|
||||||
cfg.ListOpt('enabled_drivers',
|
|
||||||
default=['router', ],
|
|
||||||
help='list of drivers the rug process will load'),
|
|
||||||
]
|
|
||||||
cfg.CONF.register_opts(DRIVER_OPTS)
|
|
||||||
|
|
||||||
ASTARA_APP_OPTS = [
|
|
||||||
cfg.IntOpt('max_sleep', default=15,
|
|
||||||
help='The max sleep seconds between each attempt by'
|
|
||||||
' neutron client for fetching resource.'),
|
|
||||||
]
|
|
||||||
cfg.CONF.register_group(cfg.OptGroup(name='astara_appliance'))
|
|
||||||
cfg.CONF.register_opts(ASTARA_APP_OPTS, 'astara_appliance')
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
AVAILABLE_DRIVERS = {
|
|
||||||
Router.RESOURCE_NAME: Router,
|
|
||||||
LoadBalancer.RESOURCE_NAME: LoadBalancer,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class InvalidDriverException(Exception):
|
|
||||||
"""Triggered when driver is not available in AVAILABLE_DRIVERS"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def get(requested_driver):
|
|
||||||
"""Returns driver class based on the requested_driver param
|
|
||||||
will raise InvalidDriverException if not listed in the config option
|
|
||||||
cfg.CONF.available_drivers.
|
|
||||||
|
|
||||||
:param requested_driver: name of desired driver
|
|
||||||
:return: returns driver object
|
|
||||||
"""
|
|
||||||
if requested_driver in AVAILABLE_DRIVERS:
|
|
||||||
return AVAILABLE_DRIVERS[requested_driver]
|
|
||||||
|
|
||||||
raise InvalidDriverException(
|
|
||||||
'Failed loading driver: %s' % requested_driver
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def load_from_byonf(worker_context, byonf_result, resource_id):
|
|
||||||
""""Returns a loaded driver based on astara-neutron BYONF response
|
|
||||||
|
|
||||||
:param worker_context: Worker context with clients
|
|
||||||
:param byonf_result: dict response from neutron API describing
|
|
||||||
user-provided NF info (specifically image_uuid and
|
|
||||||
driver)
|
|
||||||
:param resource_id: The UUID of the logical resource derived from the
|
|
||||||
notification message
|
|
||||||
|
|
||||||
Responsible for also setting correct driver attributes based on BYONF
|
|
||||||
specs.
|
|
||||||
"""
|
|
||||||
driver_obj = get(byonf_result['driver'])(worker_context, resource_id)
|
|
||||||
if byonf_result.get('image_uuid'):
|
|
||||||
driver_obj.image_uuid = byonf_result['image_uuid']
|
|
||||||
return driver_obj
|
|
||||||
|
|
||||||
|
|
||||||
def enabled_drivers():
|
|
||||||
for driver in cfg.CONF.enabled_drivers:
|
|
||||||
try:
|
|
||||||
d = get(driver)
|
|
||||||
yield d
|
|
||||||
except InvalidDriverException as e:
|
|
||||||
LOG.exception(e)
|
|
||||||
pass
|
|
@ -1,194 +0,0 @@
|
|||||||
# Copyright (c) 2015 Akanda, Inc. All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
from oslo_log import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
class BaseDriver(object):
|
|
||||||
|
|
||||||
RESOURCE_NAME = 'BaseDriver'
|
|
||||||
|
|
||||||
def __init__(self, worker_context, id, log=None):
|
|
||||||
"""This is the abstract for rug drivers.
|
|
||||||
|
|
||||||
:param id: logical resource id
|
|
||||||
:param log: override default log
|
|
||||||
"""
|
|
||||||
self.id = id
|
|
||||||
self.external_port = None
|
|
||||||
self.details = []
|
|
||||||
self.flavor = None
|
|
||||||
self.image_uuid = None
|
|
||||||
self.name = 'ak-%s-%s' % (self.RESOURCE_NAME, self.id)
|
|
||||||
|
|
||||||
if log:
|
|
||||||
self.log = log
|
|
||||||
else:
|
|
||||||
self.log = logging.getLogger(self.name)
|
|
||||||
|
|
||||||
self.post_init(worker_context)
|
|
||||||
|
|
||||||
def post_init(self, worker_context):
|
|
||||||
"""post init hook
|
|
||||||
|
|
||||||
:param worker_context:
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def pre_boot(self, worker_context):
|
|
||||||
"""pre boot hook
|
|
||||||
|
|
||||||
:param worker_context:
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def post_boot(self, worker_context):
|
|
||||||
"""post boot hook
|
|
||||||
|
|
||||||
:param worker_context:
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def update_state(self, worker_context, silent=False):
|
|
||||||
"""returns state of logical resource.
|
|
||||||
|
|
||||||
:param worker_context:
|
|
||||||
:param silent:
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def build_config(self, worker_context, mgt_port, iface_map):
|
|
||||||
"""gets config of logical resource attached to worker_context.
|
|
||||||
|
|
||||||
:param worker_context:
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def update_config(self, management_address, config):
|
|
||||||
"""Updates appliance configuration
|
|
||||||
|
|
||||||
This is responsible for pushing configuration to the managed
|
|
||||||
appliance
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def synchronize_state(self, worker_context, state):
|
|
||||||
"""sometimes a driver will need to update a service behind it with a
|
|
||||||
new state.
|
|
||||||
|
|
||||||
:param state: a valid state
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def make_ports(self, worker_context):
|
|
||||||
"""Make ports call back for the nova client.
|
|
||||||
|
|
||||||
This is expected to create the management port for the instance
|
|
||||||
and any required instance ports.
|
|
||||||
|
|
||||||
:param worker_context:
|
|
||||||
|
|
||||||
:returns: A tuple (management_port, [instance_ports])
|
|
||||||
"""
|
|
||||||
def _make_ports():
|
|
||||||
pass
|
|
||||||
|
|
||||||
return _make_ports
|
|
||||||
|
|
||||||
def delete_ports(self, worker_context):
|
|
||||||
"""Delete all created ports.
|
|
||||||
|
|
||||||
:param worker_context:
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def pre_populate_hook():
|
|
||||||
"""called in populate.py during driver loading loop.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def pre_plug(self, worker_context):
|
|
||||||
"""pre-plug hook
|
|
||||||
|
|
||||||
:param worker_context:
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_resource_id_for_tenant(worker_context, tenant_id, message):
|
|
||||||
"""Find the id of a resource for a given tenant id and message.
|
|
||||||
|
|
||||||
For some resources simply searching by tenant_id is enough, for
|
|
||||||
others some context from the message payload may be necessary.
|
|
||||||
|
|
||||||
:param worker_context: A worker context with instantiated clients
|
|
||||||
:param tenant_id: The tenant uuid to search for
|
|
||||||
:param message: The message associated with the request
|
|
||||||
|
|
||||||
:returns: uuid of the resource owned by the tenant
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def process_notification(tenant_id, event_type, payload):
|
|
||||||
"""Process an incoming notification event
|
|
||||||
|
|
||||||
This gets called from the notifications layer to determine whether
|
|
||||||
a driver should process an incoming notification event. It is
|
|
||||||
responsible for translating an incoming notification to an Event
|
|
||||||
object appropriate for that driver.
|
|
||||||
|
|
||||||
:param tenant_id: str The UUID tenant_id for the incoming event
|
|
||||||
:param event_type: str event type, for example router.create.end
|
|
||||||
:param payload: The payload body of the incoming event
|
|
||||||
|
|
||||||
:returns: A populated Event object if it should process, or None if not
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
@property
|
|
||||||
def ports(self):
|
|
||||||
"""Lists ports associated with the resource.
|
|
||||||
|
|
||||||
:returns: A list of astara.api.neutron.Port objects or []
|
|
||||||
"""
|
|
||||||
|
|
||||||
def get_interfaces(self, management_address):
|
|
||||||
"""Lists interfaces attached to the resource.
|
|
||||||
|
|
||||||
This lists the interfaces attached to the resource from the POV
|
|
||||||
of the resource itself.
|
|
||||||
|
|
||||||
:returns: A list of interfaces
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def is_alive(self, management_address):
|
|
||||||
"""Determines whether the managed resource is alive
|
|
||||||
|
|
||||||
:returns: bool True if alive, False if not
|
|
||||||
"""
|
|
||||||
|
|
||||||
def get_state(self, worker_context):
|
|
||||||
"""Returns the state of the managed resource"""
|
|
||||||
|
|
||||||
@property
|
|
||||||
def is_ha(self):
|
|
||||||
"""Returns True if logical resource is set to be highly-available"""
|
|
||||||
return False
|
|
@ -1,352 +0,0 @@
|
|||||||
# Copyright (c) 2015 Akanda, Inc. All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
import time
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log as logging
|
|
||||||
|
|
||||||
from neutronclient.common import exceptions as q_exceptions
|
|
||||||
|
|
||||||
from astara.common.i18n import _
|
|
||||||
from astara.api import astara_client
|
|
||||||
from astara.api.config import loadbalancer as config
|
|
||||||
from astara import event
|
|
||||||
from astara.api import neutron
|
|
||||||
from astara.drivers.base import BaseDriver
|
|
||||||
from astara.drivers import states
|
|
||||||
from astara.common.i18n import _LW
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
LOADBALANCER_OPTS = [
|
|
||||||
cfg.StrOpt('image_uuid',
|
|
||||||
help='The image_uuid for loadbalancer instances.'),
|
|
||||||
cfg.StrOpt('instance_flavor',
|
|
||||||
help='The nova flavor id to use for loadbalancer instances'),
|
|
||||||
cfg.IntOpt('mgt_service_port', default=5000,
|
|
||||||
help='The port on which the loadbalancer API service listens '
|
|
||||||
'on loadbalancer appliances'),
|
|
||||||
]
|
|
||||||
cfg.CONF.register_group(cfg.OptGroup(name='loadbalancer'))
|
|
||||||
cfg.CONF.register_opts(LOADBALANCER_OPTS, 'loadbalancer')
|
|
||||||
|
|
||||||
|
|
||||||
STATUS_MAP = {
|
|
||||||
states.DOWN: neutron.PLUGIN_DOWN,
|
|
||||||
states.BOOTING: neutron.PLUGIN_PENDING_CREATE,
|
|
||||||
states.UP: neutron.PLUGIN_PENDING_CREATE,
|
|
||||||
states.CONFIGURED: neutron.PLUGIN_ACTIVE,
|
|
||||||
states.ERROR: neutron.PLUGIN_ERROR,
|
|
||||||
states.REPLUG: neutron.PLUGIN_PENDING_UPDATE,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class LoadBalancer(BaseDriver):
|
|
||||||
|
|
||||||
RESOURCE_NAME = 'loadbalancer'
|
|
||||||
_last_synced_status = None
|
|
||||||
|
|
||||||
def post_init(self, worker_context):
|
|
||||||
"""Called at end of __init__ in BaseDriver.
|
|
||||||
|
|
||||||
Populates the details object from neutron and sets image_uuid and
|
|
||||||
flavor from cfg.
|
|
||||||
|
|
||||||
:param worker_context:
|
|
||||||
"""
|
|
||||||
self.image_uuid = cfg.CONF.loadbalancer.image_uuid
|
|
||||||
self.flavor = cfg.CONF.loadbalancer.instance_flavor
|
|
||||||
self.mgt_port = cfg.CONF.loadbalancer.mgt_service_port
|
|
||||||
|
|
||||||
self._ensure_cache(worker_context)
|
|
||||||
|
|
||||||
def _ensure_cache(self, worker_context):
|
|
||||||
try:
|
|
||||||
lb = worker_context.neutron.get_loadbalancer_detail(self.id)
|
|
||||||
self._loadbalancer = lb
|
|
||||||
except neutron.LoadBalancerGone:
|
|
||||||
self._loadbalancer = None
|
|
||||||
|
|
||||||
@property
|
|
||||||
def ports(self):
|
|
||||||
"""Lists ports associated with the resource.
|
|
||||||
|
|
||||||
:returns: A list of astara.api.neutron.Port objects or []
|
|
||||||
"""
|
|
||||||
if self._loadbalancer:
|
|
||||||
return [p for p in self._loadbalancer.ports]
|
|
||||||
else:
|
|
||||||
return []
|
|
||||||
|
|
||||||
def pre_boot(self, worker_context):
|
|
||||||
"""pre boot hook
|
|
||||||
Calls self.pre_plug().
|
|
||||||
|
|
||||||
:param worker_context:
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def post_boot(self, worker_context):
|
|
||||||
"""post boot hook
|
|
||||||
|
|
||||||
:param worker_context:
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def build_config(self, worker_context, mgt_port, iface_map):
|
|
||||||
"""Builds / rebuilds config
|
|
||||||
|
|
||||||
:param worker_context:
|
|
||||||
:param mgt_port:
|
|
||||||
:param iface_map:
|
|
||||||
:returns: configuration object
|
|
||||||
"""
|
|
||||||
|
|
||||||
self._ensure_cache(worker_context)
|
|
||||||
return config.build_config(
|
|
||||||
worker_context.neutron,
|
|
||||||
self._loadbalancer,
|
|
||||||
mgt_port,
|
|
||||||
iface_map)
|
|
||||||
|
|
||||||
def update_config(self, management_address, config):
|
|
||||||
"""Updates appliance configuration
|
|
||||||
|
|
||||||
This is responsible for pushing configuration to the managed
|
|
||||||
appliance
|
|
||||||
"""
|
|
||||||
self.log.info(_('Updating config for %s'), self.name)
|
|
||||||
astara_client.update_config(management_address, self.mgt_port, config)
|
|
||||||
|
|
||||||
def pre_plug(self, worker_context):
|
|
||||||
"""pre-plug hook
|
|
||||||
Sets up the external port.
|
|
||||||
|
|
||||||
:param worker_context:
|
|
||||||
:returs: None
|
|
||||||
"""
|
|
||||||
|
|
||||||
def make_ports(self, worker_context):
|
|
||||||
"""make ports call back for the nova client.
|
|
||||||
|
|
||||||
:param worker_context:
|
|
||||||
|
|
||||||
:returns: A tuple (managment_port, [instance_ports])
|
|
||||||
"""
|
|
||||||
def _make_ports():
|
|
||||||
self._ensure_cache(worker_context)
|
|
||||||
mgt_port = worker_context.neutron.create_management_port(
|
|
||||||
self.id
|
|
||||||
)
|
|
||||||
|
|
||||||
# allocate a port on the same net as the LB VIP
|
|
||||||
lb_port = worker_context.neutron.create_vrrp_port(
|
|
||||||
object_id=self.id,
|
|
||||||
network_id=self._loadbalancer.vip_port.network_id,
|
|
||||||
label='LB',
|
|
||||||
)
|
|
||||||
|
|
||||||
return mgt_port, [lb_port]
|
|
||||||
|
|
||||||
return _make_ports
|
|
||||||
|
|
||||||
def delete_ports(self, worker_context):
|
|
||||||
"""Delete all ports.
|
|
||||||
|
|
||||||
:param worker_context:
|
|
||||||
:returns: None
|
|
||||||
|
|
||||||
"""
|
|
||||||
worker_context.neutron.delete_vrrp_port(self.id, label='LB')
|
|
||||||
worker_context.neutron.delete_vrrp_port(self.id, label='MGT')
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def pre_populate_hook():
|
|
||||||
"""Fetch the existing LBs from neutron then and returns list back
|
|
||||||
to populate to be distributed to workers.
|
|
||||||
|
|
||||||
Wait for neutron to return the list of the existing LBs.
|
|
||||||
Pause up to max_sleep seconds between each attempt and ignore
|
|
||||||
neutron client exceptions.
|
|
||||||
|
|
||||||
"""
|
|
||||||
nap_time = 1
|
|
||||||
|
|
||||||
neutron_client = neutron.Neutron(cfg.CONF)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
resources = []
|
|
||||||
for lb in neutron_client.get_loadbalancers():
|
|
||||||
resources.append(
|
|
||||||
event.Resource(driver=LoadBalancer.RESOURCE_NAME,
|
|
||||||
id=lb.id,
|
|
||||||
tenant_id=lb.tenant_id))
|
|
||||||
|
|
||||||
return resources
|
|
||||||
except (q_exceptions.Unauthorized, q_exceptions.Forbidden) as err:
|
|
||||||
LOG.warning(_LW('PrePopulateWorkers thread failed: %s'), err)
|
|
||||||
return
|
|
||||||
except Exception as err:
|
|
||||||
LOG.warning(
|
|
||||||
_LW('Could not fetch loadbalancers from neutron: %s'), err)
|
|
||||||
LOG.warning(_LW(
|
|
||||||
'sleeping %s seconds before retrying'), nap_time)
|
|
||||||
time.sleep(nap_time)
|
|
||||||
nap_time = min(nap_time * 2,
|
|
||||||
cfg.CONF.astara_appliance.max_sleep)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_resource_id_for_tenant(worker_context, tenant_id, message):
|
|
||||||
"""Find the id of the loadbalancer owned by tenant
|
|
||||||
|
|
||||||
Some events (ie, member.create.end) give us no context about which
|
|
||||||
LB the event is associated and only show us the tenant_id and member
|
|
||||||
id, so we for those we need to some resolution here.
|
|
||||||
|
|
||||||
:param tenant_id: The tenant uuid to search for
|
|
||||||
:param message: Message associated /w the request
|
|
||||||
|
|
||||||
:returns: uuid of the loadbalancer owned by the tenant
|
|
||||||
"""
|
|
||||||
|
|
||||||
lb_id = None
|
|
||||||
|
|
||||||
# loadbalancer.create.end contains the id in the payload
|
|
||||||
if message.body.get('loadbalancer'):
|
|
||||||
lb_id = message.body['loadbalancer'].get('id')
|
|
||||||
# listener.create.end references the loadbalancer directly
|
|
||||||
elif message.body.get('listener'):
|
|
||||||
lb_id = message.body['listener'].get('loadbalancer_id')
|
|
||||||
# pool.create.end references by listener
|
|
||||||
elif message.body.get('pool'):
|
|
||||||
listener_id = message.body['pool'].get('listener_id')
|
|
||||||
if listener_id:
|
|
||||||
lb = worker_context.neutron.get_loadbalancer_by_listener(
|
|
||||||
listener_id, tenant_id)
|
|
||||||
if lb:
|
|
||||||
lb_id = lb.id
|
|
||||||
# member.crate.end only gives us the member id itself.
|
|
||||||
elif message.body.get('member') or message.body.get('member_id'):
|
|
||||||
member_id = (message.body.get('member', {}).get('id') or
|
|
||||||
message.body.get('member_id'))
|
|
||||||
if member_id:
|
|
||||||
lb = worker_context.neutron.get_loadbalancer_by_member(
|
|
||||||
member_id=member_id, tenant_id=tenant_id)
|
|
||||||
if lb:
|
|
||||||
lb_id = lb.id
|
|
||||||
return lb_id
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def process_notification(tenant_id, event_type, payload):
|
|
||||||
"""Process an incoming notification event
|
|
||||||
|
|
||||||
This gets called from the notifications layer to determine whether
|
|
||||||
this driver should process an incoming notification event. It is
|
|
||||||
responsible for translating an incoming notificatino to an Event
|
|
||||||
object appropriate for this driver.
|
|
||||||
|
|
||||||
:param tenant_id: str The UUID tenant_id for the incoming event
|
|
||||||
:param event_type: str event type, for example loadbalancer.create.end
|
|
||||||
:param payload: The payload body of the incoming event
|
|
||||||
|
|
||||||
:returns: A populated Event objet if it should process, or None if not
|
|
||||||
"""
|
|
||||||
if event_type.startswith('loadbalancerstatus.update'):
|
|
||||||
# these are generated when we sync state
|
|
||||||
return
|
|
||||||
lb_id = (
|
|
||||||
payload.get('loadbalancer', {}).get('id') or
|
|
||||||
payload.get('listener', {}).get('loadbalancer_id') or
|
|
||||||
payload.get('loadbalancer_id')
|
|
||||||
)
|
|
||||||
|
|
||||||
update_notifications = [
|
|
||||||
'listener.create.start',
|
|
||||||
'pool.create.start',
|
|
||||||
'member.create.end',
|
|
||||||
'member.delete.end',
|
|
||||||
]
|
|
||||||
|
|
||||||
# some events do not contain a lb id.
|
|
||||||
if not lb_id and event_type not in update_notifications:
|
|
||||||
return
|
|
||||||
|
|
||||||
if event_type == 'loadbalancer.create.end':
|
|
||||||
crud = event.CREATE
|
|
||||||
elif event_type == 'loadbalancer.delete.end':
|
|
||||||
crud = event.DELETE
|
|
||||||
elif event_type in update_notifications:
|
|
||||||
crud = event.UPDATE
|
|
||||||
else:
|
|
||||||
crud = None
|
|
||||||
|
|
||||||
if not crud:
|
|
||||||
LOG.info('Could not determine CRUD for event: %s ', event_type)
|
|
||||||
return
|
|
||||||
|
|
||||||
resource = event.Resource(driver=LoadBalancer.RESOURCE_NAME,
|
|
||||||
id=lb_id,
|
|
||||||
tenant_id=tenant_id)
|
|
||||||
e = event.Event(
|
|
||||||
resource=resource,
|
|
||||||
crud=crud,
|
|
||||||
body=payload,
|
|
||||||
)
|
|
||||||
return e
|
|
||||||
|
|
||||||
def get_state(self, worker_context):
|
|
||||||
self._ensure_cache(worker_context)
|
|
||||||
if not self._loadbalancer:
|
|
||||||
return states.GONE
|
|
||||||
else:
|
|
||||||
# NOTE(adam_g): We probably want to map this status back to
|
|
||||||
# an internal astara status
|
|
||||||
return self._loadbalancer.status
|
|
||||||
|
|
||||||
def synchronize_state(self, worker_context, state):
|
|
||||||
self._ensure_cache(worker_context)
|
|
||||||
if not self._loadbalancer:
|
|
||||||
LOG.debug('Not synchronizing state with missing loadbalancer %s',
|
|
||||||
self.id)
|
|
||||||
return
|
|
||||||
|
|
||||||
new_status = STATUS_MAP.get(state)
|
|
||||||
old_status = self._last_synced_status
|
|
||||||
LOG.debug('Synchronizing loadbalancer %s state %s->%s',
|
|
||||||
self.id, old_status, new_status)
|
|
||||||
worker_context.neutron.update_loadbalancer_status(
|
|
||||||
self.id, new_status)
|
|
||||||
self._last_synced_status = new_status
|
|
||||||
|
|
||||||
def get_interfaces(self, management_address):
|
|
||||||
"""Lists interfaces attached to the resource.
|
|
||||||
|
|
||||||
This lists the interfaces attached to the resource from the POV
|
|
||||||
of the resource iteslf.
|
|
||||||
|
|
||||||
:returns: A list of interfaces
|
|
||||||
"""
|
|
||||||
return astara_client.get_interfaces(management_address,
|
|
||||||
self.mgt_port)
|
|
||||||
|
|
||||||
def is_alive(self, management_address):
|
|
||||||
"""Determines whether the managed resource is alive
|
|
||||||
|
|
||||||
:returns: bool True if alive, False if not
|
|
||||||
"""
|
|
||||||
return astara_client.is_alive(management_address, self.mgt_port)
|
|
@ -1,368 +0,0 @@
|
|||||||
# Copyright (c) 2015 Akanda, Inc. All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
import time
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log as logging
|
|
||||||
from oslo_utils import timeutils
|
|
||||||
|
|
||||||
from neutronclient.common import exceptions as q_exceptions
|
|
||||||
|
|
||||||
from astara.common.i18n import _
|
|
||||||
from astara.api import astara_client
|
|
||||||
from astara.api.config import router as configuration
|
|
||||||
from astara import event
|
|
||||||
from astara.api import neutron
|
|
||||||
from astara.drivers.base import BaseDriver
|
|
||||||
from astara.drivers import states
|
|
||||||
from astara.common.i18n import _LW
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
ROUTER_OPTS = [
|
|
||||||
cfg.StrOpt('image_uuid',
|
|
||||||
help='The image_uuid for router instances.',
|
|
||||||
deprecated_opts=[
|
|
||||||
cfg.DeprecatedOpt('router_image_uuid',
|
|
||||||
group='DEFAULT')]),
|
|
||||||
cfg.StrOpt('instance_flavor',
|
|
||||||
help='The nova id flavor to use for router instances',
|
|
||||||
deprecated_opts=[
|
|
||||||
cfg.DeprecatedOpt('router_instance_flavor',
|
|
||||||
group='DEFAULT')]),
|
|
||||||
cfg.IntOpt('mgt_service_port', default=5000,
|
|
||||||
help='The port on which the router API service listens on '
|
|
||||||
'router appliances',
|
|
||||||
deprecated_opts=[
|
|
||||||
cfg.DeprecatedOpt('akanda_mgt_service_port',
|
|
||||||
group='DEFAULT')]),
|
|
||||||
cfg.BoolOpt('ipsec_vpn', default=False,
|
|
||||||
help='Enable ipsec vpn support'),
|
|
||||||
]
|
|
||||||
cfg.CONF.register_group(cfg.OptGroup(name='router'))
|
|
||||||
cfg.CONF.register_opts(ROUTER_OPTS, 'router')
|
|
||||||
|
|
||||||
|
|
||||||
STATUS_MAP = {
|
|
||||||
states.DOWN: neutron.STATUS_DOWN,
|
|
||||||
states.BOOTING: neutron.STATUS_BUILD,
|
|
||||||
states.UP: neutron.STATUS_BUILD,
|
|
||||||
states.CONFIGURED: neutron.STATUS_ACTIVE,
|
|
||||||
states.ERROR: neutron.STATUS_ERROR,
|
|
||||||
states.DEGRADED: neutron.STATUS_BUILD,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
_ROUTER_INTERFACE_NOTIFICATIONS = set([
|
|
||||||
'router.interface.create',
|
|
||||||
'router.interface.delete',
|
|
||||||
])
|
|
||||||
|
|
||||||
_ROUTER_INTERESTING_NOTIFICATIONS = set([
|
|
||||||
'router.change.end',
|
|
||||||
'subnet.create.end',
|
|
||||||
'subnet.change.end',
|
|
||||||
'subnet.delete.end',
|
|
||||||
'port.create.end',
|
|
||||||
'port.change.end',
|
|
||||||
'port.delete.end',
|
|
||||||
'floatingip.create.end',
|
|
||||||
'floatingip.update.end',
|
|
||||||
# NOTE(adam_g): Not certain this floatingip.change.* is ever broadcast?
|
|
||||||
'floatingip.change.end',
|
|
||||||
'floatingip.delete.end'
|
|
||||||
])
|
|
||||||
|
|
||||||
_VPN_NOTIFICATIONS = set([
|
|
||||||
'vpnservice.change.end',
|
|
||||||
'vpnservice.delete.end',
|
|
||||||
'ipsec_site_connection.create.end',
|
|
||||||
'ipsec_site_connection.change.end',
|
|
||||||
'ipsec_site_connection.delete.end',
|
|
||||||
'ikepolicy.change.end',
|
|
||||||
'ipsecpolicy.change.end'
|
|
||||||
])
|
|
||||||
|
|
||||||
|
|
||||||
DRIVER_NAME = 'router'
|
|
||||||
|
|
||||||
|
|
||||||
class Router(BaseDriver):
|
|
||||||
|
|
||||||
RESOURCE_NAME = DRIVER_NAME
|
|
||||||
_last_synced_status = None
|
|
||||||
|
|
||||||
def post_init(self, worker_context):
|
|
||||||
"""Called at end of __init__ in BaseDriver.
|
|
||||||
|
|
||||||
Populates the _router object from neutron and sets image_uuid and
|
|
||||||
flavor from cfg.
|
|
||||||
|
|
||||||
:param worker_context:
|
|
||||||
"""
|
|
||||||
self.image_uuid = cfg.CONF.router.image_uuid
|
|
||||||
self.flavor = cfg.CONF.router.instance_flavor
|
|
||||||
self.mgt_port = cfg.CONF.router.mgt_service_port
|
|
||||||
|
|
||||||
self._ensure_cache(worker_context)
|
|
||||||
|
|
||||||
def _ensure_cache(self, worker_context):
|
|
||||||
try:
|
|
||||||
self._router = worker_context.neutron.get_router_detail(self.id)
|
|
||||||
except neutron.RouterGone:
|
|
||||||
self._router = None
|
|
||||||
|
|
||||||
@property
|
|
||||||
def ports(self):
|
|
||||||
"""Lists ports associated with the resource.
|
|
||||||
|
|
||||||
:returns: A list of astara.api.neutron.Port objects or []
|
|
||||||
"""
|
|
||||||
if self._router:
|
|
||||||
return [p for p in self._router.ports]
|
|
||||||
else:
|
|
||||||
return []
|
|
||||||
|
|
||||||
def pre_boot(self, worker_context):
|
|
||||||
"""pre boot hook
|
|
||||||
Calls self.pre_plug().
|
|
||||||
|
|
||||||
:param worker_context:
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
self.pre_plug(worker_context)
|
|
||||||
|
|
||||||
def post_boot(self, worker_context):
|
|
||||||
"""post boot hook
|
|
||||||
|
|
||||||
:param worker_context:
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def build_config(self, worker_context, mgt_port, iface_map):
|
|
||||||
"""Builds / rebuilds config
|
|
||||||
|
|
||||||
:param worker_context:
|
|
||||||
:param mgt_port:
|
|
||||||
:param iface_map:
|
|
||||||
:returns: configuration object
|
|
||||||
"""
|
|
||||||
self._ensure_cache(worker_context)
|
|
||||||
return configuration.build_config(
|
|
||||||
worker_context,
|
|
||||||
self._router,
|
|
||||||
mgt_port,
|
|
||||||
iface_map
|
|
||||||
)
|
|
||||||
|
|
||||||
def update_config(self, management_address, config):
|
|
||||||
"""Updates appliance configuration
|
|
||||||
|
|
||||||
This is responsible for pushing configuration to the managed
|
|
||||||
appliance
|
|
||||||
"""
|
|
||||||
self.log.info(_('Updating config for %s'), self.name)
|
|
||||||
start_time = timeutils.utcnow()
|
|
||||||
|
|
||||||
astara_client.update_config(
|
|
||||||
management_address, self.mgt_port, config)
|
|
||||||
delta = timeutils.delta_seconds(start_time, timeutils.utcnow())
|
|
||||||
self.log.info(_('Config updated for %s after %s seconds'),
|
|
||||||
self.name, round(delta, 2))
|
|
||||||
|
|
||||||
def make_ports(self, worker_context):
|
|
||||||
"""make ports call back for the nova client.
|
|
||||||
|
|
||||||
:param worker_context:
|
|
||||||
|
|
||||||
:returns: A tuple (managment_port, [instance_ports])
|
|
||||||
"""
|
|
||||||
def _make_ports():
|
|
||||||
self._ensure_cache(worker_context)
|
|
||||||
mgt_port = worker_context.neutron.create_management_port(
|
|
||||||
self.id
|
|
||||||
)
|
|
||||||
|
|
||||||
# FIXME(mark): ideally this should be ordered and de-duped
|
|
||||||
instance_ports = [
|
|
||||||
worker_context.neutron.create_vrrp_port(self.id, n)
|
|
||||||
for n in (p.network_id for p in self._router.ports)
|
|
||||||
]
|
|
||||||
|
|
||||||
return mgt_port, instance_ports
|
|
||||||
|
|
||||||
return _make_ports
|
|
||||||
|
|
||||||
def delete_ports(self, worker_context):
|
|
||||||
"""Delete all ports.
|
|
||||||
|
|
||||||
:param worker_context:
|
|
||||||
:returns: None
|
|
||||||
|
|
||||||
"""
|
|
||||||
worker_context.neutron.delete_vrrp_port(self.id)
|
|
||||||
worker_context.neutron.delete_vrrp_port(self.id, label='MGT')
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def pre_populate_hook():
|
|
||||||
"""Fetch the existing routers from neutrom then and returns list back
|
|
||||||
to populate to be distributed to workers.
|
|
||||||
|
|
||||||
Wait for neutron to return the list of the existing routers.
|
|
||||||
Pause up to max_sleep seconds between each attempt and ignore
|
|
||||||
neutron client exceptions.
|
|
||||||
|
|
||||||
"""
|
|
||||||
nap_time = 1
|
|
||||||
|
|
||||||
neutron_client = neutron.Neutron(cfg.CONF)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
neutron_routers = neutron_client.get_routers(detailed=False)
|
|
||||||
resources = []
|
|
||||||
for router in neutron_routers:
|
|
||||||
resources.append(
|
|
||||||
event.Resource(driver=DRIVER_NAME,
|
|
||||||
id=router.id,
|
|
||||||
tenant_id=router.tenant_id)
|
|
||||||
)
|
|
||||||
|
|
||||||
return resources
|
|
||||||
except (q_exceptions.Unauthorized, q_exceptions.Forbidden) as err:
|
|
||||||
LOG.warning(_LW('PrePopulateWorkers thread failed: %s'), err)
|
|
||||||
return
|
|
||||||
except Exception as err:
|
|
||||||
LOG.warning(
|
|
||||||
_LW('Could not fetch routers from neutron: %s'), err)
|
|
||||||
LOG.warning(_LW(
|
|
||||||
'sleeping %s seconds before retrying'), nap_time)
|
|
||||||
time.sleep(nap_time)
|
|
||||||
nap_time = min(nap_time * 2,
|
|
||||||
cfg.CONF.astara_appliance.max_sleep)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_resource_id_for_tenant(worker_context, tenant_id, message):
|
|
||||||
"""Find the id of the router owned by tenant
|
|
||||||
|
|
||||||
:param tenant_id: The tenant uuid to search for
|
|
||||||
:param message: message associated /w request (unused here)
|
|
||||||
|
|
||||||
:returns: uuid of the router owned by the tenant
|
|
||||||
"""
|
|
||||||
|
|
||||||
router = worker_context.neutron.get_router_for_tenant(tenant_id)
|
|
||||||
if not router:
|
|
||||||
LOG.debug('Router not found for tenant %s.',
|
|
||||||
tenant_id)
|
|
||||||
return None
|
|
||||||
return router.id
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def process_notification(tenant_id, event_type, payload):
|
|
||||||
"""Process an incoming notification event
|
|
||||||
|
|
||||||
This gets called from the notifications layer to determine whether
|
|
||||||
this driver should process an incoming notification event. It is
|
|
||||||
responsible for translating an incoming notificatino to an Event
|
|
||||||
object appropriate for this driver.
|
|
||||||
|
|
||||||
:param tenant_id: str The UUID tenant_id for the incoming event
|
|
||||||
:param event_type: str event type, for example router.create.end
|
|
||||||
:param payload: The payload body of the incoming event
|
|
||||||
|
|
||||||
:returns: A populated Event objet if it should process, or None if not
|
|
||||||
"""
|
|
||||||
router_id = payload.get('router', {}).get('id')
|
|
||||||
crud = event.UPDATE
|
|
||||||
|
|
||||||
if event_type.startswith('routerstatus.update'):
|
|
||||||
# We generate these events ourself, so ignore them.
|
|
||||||
return
|
|
||||||
|
|
||||||
if event_type == 'router.create.end':
|
|
||||||
crud = event.CREATE
|
|
||||||
elif event_type == 'router.delete.end':
|
|
||||||
crud = event.DELETE
|
|
||||||
router_id = payload.get('router_id')
|
|
||||||
elif event_type in _ROUTER_INTERFACE_NOTIFICATIONS:
|
|
||||||
crud = event.UPDATE
|
|
||||||
router_id = payload.get('router.interface', {}).get('id')
|
|
||||||
elif event_type in _ROUTER_INTERESTING_NOTIFICATIONS:
|
|
||||||
crud = event.UPDATE
|
|
||||||
elif cfg.CONF.router.ipsec_vpn and event_type in _VPN_NOTIFICATIONS:
|
|
||||||
crud = event.UPDATE
|
|
||||||
else:
|
|
||||||
LOG.debug('Not processing event: %s' % event_type)
|
|
||||||
return
|
|
||||||
|
|
||||||
resource = event.Resource(driver=DRIVER_NAME,
|
|
||||||
id=router_id,
|
|
||||||
tenant_id=tenant_id)
|
|
||||||
e = event.Event(
|
|
||||||
resource=resource,
|
|
||||||
crud=crud,
|
|
||||||
body=payload,
|
|
||||||
)
|
|
||||||
return e
|
|
||||||
|
|
||||||
def get_state(self, worker_context):
|
|
||||||
self._ensure_cache(worker_context)
|
|
||||||
if not self._router:
|
|
||||||
return states.GONE
|
|
||||||
else:
|
|
||||||
# NOTE(adam_g): We probably want to map this status back to
|
|
||||||
# an internal astara status
|
|
||||||
return self._router.status
|
|
||||||
|
|
||||||
def synchronize_state(self, worker_context, state):
|
|
||||||
self._ensure_cache(worker_context)
|
|
||||||
if not self._router:
|
|
||||||
LOG.debug('Not synchronizing state with missing router %s',
|
|
||||||
self.id)
|
|
||||||
return
|
|
||||||
new_status = STATUS_MAP.get(state)
|
|
||||||
old_status = self._last_synced_status
|
|
||||||
if not old_status or old_status != new_status:
|
|
||||||
LOG.debug('Synchronizing router %s state %s->%s',
|
|
||||||
self.id, old_status, new_status)
|
|
||||||
worker_context.neutron.update_router_status(self.id, new_status)
|
|
||||||
self._last_synced_status = new_status
|
|
||||||
|
|
||||||
def get_interfaces(self, management_address):
|
|
||||||
"""Lists interfaces attached to the resource.
|
|
||||||
|
|
||||||
This lists the interfaces attached to the resource from the POV
|
|
||||||
of the resource iteslf.
|
|
||||||
|
|
||||||
:returns: A list of interfaces
|
|
||||||
"""
|
|
||||||
return astara_client.get_interfaces(management_address,
|
|
||||||
self.mgt_port)
|
|
||||||
|
|
||||||
def is_alive(self, management_address):
|
|
||||||
"""Determines whether the managed resource is alive
|
|
||||||
|
|
||||||
:returns: bool True if alive, False if not
|
|
||||||
"""
|
|
||||||
return astara_client.is_alive(management_address, self.mgt_port)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def is_ha(self):
|
|
||||||
"""Returns True if logical resource is set to be highly-available"""
|
|
||||||
if not self._router:
|
|
||||||
return False
|
|
||||||
return self._router.ha
|
|
@ -1,30 +0,0 @@
|
|||||||
# Copyright (c) 2015 Akanda, Inc. All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
"""Reference states.
|
|
||||||
|
|
||||||
Each driver maps these to which ever neutron or other
|
|
||||||
services state.
|
|
||||||
"""
|
|
||||||
DOWN = 'down'
|
|
||||||
BOOTING = 'booting'
|
|
||||||
UP = 'up'
|
|
||||||
CONFIGURED = 'configured'
|
|
||||||
RESTART = 'restart'
|
|
||||||
REPLUG = 'replug'
|
|
||||||
GONE = 'gone'
|
|
||||||
ERROR = 'error'
|
|
||||||
DEGRADED = 'degraded'
|
|
||||||
|
|
||||||
# base list of ready states, driver can use its own list.
|
|
||||||
READY_STATES = (UP, CONFIGURED, DEGRADED)
|
|
@ -1,98 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
# CRUD operations tracked in Event.crud
|
|
||||||
CREATE = 'create'
|
|
||||||
READ = 'read'
|
|
||||||
UPDATE = 'update'
|
|
||||||
DELETE = 'delete'
|
|
||||||
POLL = 'poll'
|
|
||||||
COMMAND = 'command' # an external command to be processed
|
|
||||||
REBUILD = 'rebuild'
|
|
||||||
REBALANCE = 'rebalance'
|
|
||||||
CLUSTER_REBUILD = 'cluster_rebuild'
|
|
||||||
|
|
||||||
|
|
||||||
class Event(object):
|
|
||||||
"""Rug Event object
|
|
||||||
|
|
||||||
Events are constructed from incoming messages accepted by the Rug.
|
|
||||||
They are responsible for holding the message payload (body), the
|
|
||||||
correpsonding CRUD operation and the logical resource that the
|
|
||||||
event affects.
|
|
||||||
"""
|
|
||||||
def __init__(self, resource, crud, body):
|
|
||||||
"""
|
|
||||||
:param resource: Resource instance holding context about the logical
|
|
||||||
resource that is affected by the Event.
|
|
||||||
:param crud: CRUD operation that is to be completed by the
|
|
||||||
correpsonding state machine when it is delivered.
|
|
||||||
:param body: The original message payload dict.
|
|
||||||
"""
|
|
||||||
self.resource = resource
|
|
||||||
self.crud = crud
|
|
||||||
self.body = body
|
|
||||||
|
|
||||||
def __eq__(self, other):
|
|
||||||
if not type(self) == type(other):
|
|
||||||
return False
|
|
||||||
for k, v in vars(self).items():
|
|
||||||
if k not in vars(other):
|
|
||||||
return False
|
|
||||||
if vars(other)[k] != v:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def __ne__(self, other):
|
|
||||||
return not self.__eq__(other)
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return '<%s (resource=%s, crud=%s, body=%s)>' % (
|
|
||||||
self.__class__.__name__,
|
|
||||||
self.resource,
|
|
||||||
self.crud,
|
|
||||||
self.body)
|
|
||||||
|
|
||||||
|
|
||||||
class Resource(object):
|
|
||||||
"""Rug Resource object
|
|
||||||
|
|
||||||
A Resource object represents one instance of a logical resource
|
|
||||||
that is to be managed by the rug (ie, a router).
|
|
||||||
"""
|
|
||||||
def __init__(self, driver, id, tenant_id):
|
|
||||||
"""
|
|
||||||
:param driver: str name of the driver that corresponds to the resource
|
|
||||||
type.
|
|
||||||
:param id: ID of the resource (ie, the Neutron router's UUID).
|
|
||||||
:param tenant_id: The UUID of the tenant that owns this resource.
|
|
||||||
"""
|
|
||||||
self.driver = driver
|
|
||||||
self.id = id
|
|
||||||
self.tenant_id = tenant_id
|
|
||||||
|
|
||||||
def __eq__(self, other):
|
|
||||||
return type(self) == type(other) and vars(self) == vars(other)
|
|
||||||
|
|
||||||
def __ne__(self, other):
|
|
||||||
return not self.__eq__(other)
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return '<%s (driver=%s, id=%s, tenant_id=%s)>' % (
|
|
||||||
self.__class__.__name__,
|
|
||||||
self.driver,
|
|
||||||
self.id,
|
|
||||||
self.tenant_id)
|
|
@ -1,87 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
"""Periodic health check code.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import threading
|
|
||||||
import time
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
|
|
||||||
from astara import event
|
|
||||||
from astara.api import neutron
|
|
||||||
|
|
||||||
from oslo_log import log as logging
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
CONF = cfg.CONF
|
|
||||||
|
|
||||||
HEALTH_INSPECTOR_OPTS = [
|
|
||||||
cfg.IntOpt('health_check_period',
|
|
||||||
default=60,
|
|
||||||
help='seconds between health checks'),
|
|
||||||
]
|
|
||||||
CONF.register_opts(HEALTH_INSPECTOR_OPTS)
|
|
||||||
|
|
||||||
|
|
||||||
def _health_inspector(scheduler):
|
|
||||||
"""Runs in the thread.
|
|
||||||
"""
|
|
||||||
period = CONF.health_check_period
|
|
||||||
while True:
|
|
||||||
time.sleep(period)
|
|
||||||
LOG.debug('waking up')
|
|
||||||
r = event.Resource(
|
|
||||||
id='*',
|
|
||||||
tenant_id='*',
|
|
||||||
driver='*',
|
|
||||||
)
|
|
||||||
e = event.Event(
|
|
||||||
resource=r,
|
|
||||||
crud=event.POLL,
|
|
||||||
body={},
|
|
||||||
)
|
|
||||||
scheduler.handle_message('*', e)
|
|
||||||
|
|
||||||
|
|
||||||
def start_inspector(period, scheduler):
|
|
||||||
"""Start a health check thread.
|
|
||||||
"""
|
|
||||||
t = threading.Thread(
|
|
||||||
target=_health_inspector,
|
|
||||||
args=(scheduler,),
|
|
||||||
name='HealthInspector',
|
|
||||||
)
|
|
||||||
t.setDaemon(True)
|
|
||||||
t.start()
|
|
||||||
return t
|
|
||||||
|
|
||||||
|
|
||||||
def start_reporter():
|
|
||||||
"""Start a agent report thread.
|
|
||||||
"""
|
|
||||||
reporter = neutron.NeutronAgentReporter()
|
|
||||||
t = threading.Thread(
|
|
||||||
target=reporter.report_forever,
|
|
||||||
args=(),
|
|
||||||
name='AgentReporter',
|
|
||||||
)
|
|
||||||
t.setDaemon(True)
|
|
||||||
t.start()
|
|
||||||
return t
|
|
@ -1,824 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from datetime import datetime
|
|
||||||
from functools import wraps
|
|
||||||
import time
|
|
||||||
import six
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
|
|
||||||
from astara.drivers import states
|
|
||||||
from astara.common.i18n import _LE, _LI
|
|
||||||
from astara.common import container
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
INSTANCE_MANAGER_OPTS = [
|
|
||||||
cfg.IntOpt(
|
|
||||||
'hotplug_timeout',
|
|
||||||
default=10,
|
|
||||||
help='The amount of time to wait for nova to hotplug/unplug '
|
|
||||||
'networks from the instances.'),
|
|
||||||
cfg.IntOpt(
|
|
||||||
'boot_timeout', default=600),
|
|
||||||
cfg.IntOpt(
|
|
||||||
'error_state_cooldown',
|
|
||||||
default=30,
|
|
||||||
help='Number of seconds to ignore new events when an instance goes '
|
|
||||||
'into ERROR state.',
|
|
||||||
),
|
|
||||||
]
|
|
||||||
CONF.register_opts(INSTANCE_MANAGER_OPTS)
|
|
||||||
|
|
||||||
|
|
||||||
def _generate_interface_map(instance, interfaces):
|
|
||||||
# TODO(mark): We're in the first phase of VRRP, so we need
|
|
||||||
# map the interface to the network ID.
|
|
||||||
# Eventually we'll send VRRP data and real interface data
|
|
||||||
port_mac_to_net = {
|
|
||||||
p.mac_address: p.network_id
|
|
||||||
for p in instance.ports
|
|
||||||
}
|
|
||||||
# Add in the management port
|
|
||||||
mgt_port = instance.management_port
|
|
||||||
port_mac_to_net[mgt_port.mac_address] = mgt_port.network_id
|
|
||||||
# this is a network to logical interface id
|
|
||||||
return {
|
|
||||||
port_mac_to_net[i['lladdr']]: i['ifname']
|
|
||||||
for i in interfaces if i['lladdr'] in port_mac_to_net
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def synchronize_driver_state(f):
|
|
||||||
"""Wrapper that triggers a driver's synchronize_state function"""
|
|
||||||
def wrapper(self, *args, **kw):
|
|
||||||
state = f(self, *args, **kw)
|
|
||||||
self.resource.synchronize_state(*args, state=state)
|
|
||||||
return state
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
|
|
||||||
def ensure_cache(f):
|
|
||||||
"""Decorator to wrap around any function that uses self.instance_info.
|
|
||||||
|
|
||||||
Ensures that self.instance_info is up to date and catches instances in a
|
|
||||||
GONE or missing state before wasting cycles trying to do something with it.
|
|
||||||
|
|
||||||
NOTE: This replaces the old function called _ensure_cache made a Decorator
|
|
||||||
rather than calling it explicitly at the start of all those functions.
|
|
||||||
"""
|
|
||||||
@wraps(f)
|
|
||||||
def wrapper(self, worker_context, *args, **kw):
|
|
||||||
self.instances.refresh(worker_context)
|
|
||||||
|
|
||||||
instances = worker_context.nova_client.get_instances_for_obj(
|
|
||||||
self.resource.name)
|
|
||||||
for inst_info in instances:
|
|
||||||
self.instances[inst_info.id_] = inst_info
|
|
||||||
|
|
||||||
self.instances.update_ports(worker_context)
|
|
||||||
|
|
||||||
return f(self, worker_context, *args, **kw)
|
|
||||||
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
|
|
||||||
class BootAttemptCounter(object):
|
|
||||||
def __init__(self):
|
|
||||||
self._attempts = 0
|
|
||||||
|
|
||||||
def start(self):
|
|
||||||
self._attempts += 1
|
|
||||||
|
|
||||||
def reset(self):
|
|
||||||
self._attempts = 0
|
|
||||||
|
|
||||||
@property
|
|
||||||
def count(self):
|
|
||||||
return self._attempts
|
|
||||||
|
|
||||||
|
|
||||||
class InstanceGroupManager(container.ResourceContainer):
|
|
||||||
def __init__(self, log, resource):
|
|
||||||
super(InstanceGroupManager, self).__init__()
|
|
||||||
self.log = log
|
|
||||||
self.resource = resource
|
|
||||||
self._alive = set()
|
|
||||||
|
|
||||||
@property
|
|
||||||
def instances(self):
|
|
||||||
"""Returns the managed instances sorted by name"""
|
|
||||||
return sorted(self.resources.values(), key=lambda i: i.name)
|
|
||||||
|
|
||||||
def validate_ports(self):
|
|
||||||
"""Checks whether instance have management ports attached
|
|
||||||
|
|
||||||
:returns: tuple containing two lists:
|
|
||||||
(instances that have ports, instances that don't)
|
|
||||||
"""
|
|
||||||
has_ports = set()
|
|
||||||
for inst_info in set(self.resources.values()):
|
|
||||||
if inst_info.management_address:
|
|
||||||
has_ports.add(inst_info)
|
|
||||||
return has_ports, set(self.resources.values()) - has_ports
|
|
||||||
|
|
||||||
def are_alive(self):
|
|
||||||
"""Calls the check_check function all instances to ensure liveliness
|
|
||||||
|
|
||||||
:returns: tuple containing two lists (alive_instances, dead_instances)
|
|
||||||
"""
|
|
||||||
alive = set()
|
|
||||||
for i in six.moves.range(cfg.CONF.max_retries):
|
|
||||||
for inst_info in set(self.instances) - alive:
|
|
||||||
if (inst_info.management_address and
|
|
||||||
self.resource.is_alive(inst_info.management_address)):
|
|
||||||
self.log.debug(
|
|
||||||
'Instance %s found alive after %s of %s attempts',
|
|
||||||
inst_info.id_, i, cfg.CONF.max_retries)
|
|
||||||
alive.add(inst_info)
|
|
||||||
else:
|
|
||||||
self.log.debug(
|
|
||||||
'Alive check failed for instance %s. Attempt %d of %d',
|
|
||||||
inst_info.id_, i, cfg.CONF.max_retries)
|
|
||||||
|
|
||||||
# all managed instances report alive
|
|
||||||
if alive == set(self.instances):
|
|
||||||
self._alive = [i.id_ for i in alive]
|
|
||||||
return alive, []
|
|
||||||
|
|
||||||
# zero managed instances report alive
|
|
||||||
if not alive:
|
|
||||||
self.log.debug(
|
|
||||||
'Alive check failed for all instances after %s attempts.',
|
|
||||||
cfg.CONF.max_retries)
|
|
||||||
return [], self.instances
|
|
||||||
|
|
||||||
dead = set(self.resources.values()) - alive
|
|
||||||
self._alive = [i.id_ for i in alive - dead]
|
|
||||||
return list(alive), list(dead)
|
|
||||||
|
|
||||||
def update_ports(self, worker_context):
|
|
||||||
"""Refresh ports on all managed instance info objects"""
|
|
||||||
for instance_info in self.instances:
|
|
||||||
if not instance_info:
|
|
||||||
continue
|
|
||||||
(
|
|
||||||
instance_info.management_port,
|
|
||||||
instance_info.ports
|
|
||||||
) = worker_context.neutron.get_ports_for_instance(
|
|
||||||
instance_info.id_
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_interfaces(self):
|
|
||||||
"""Obtain a list of interfaces from each managed instance
|
|
||||||
|
|
||||||
Skips any instance that has not already been verified as being alive.
|
|
||||||
|
|
||||||
:returns: dict of {instance: [interfaces_dict]}
|
|
||||||
"""
|
|
||||||
interfaces = {}
|
|
||||||
for inst in self.instances:
|
|
||||||
if inst.id_ not in self._alive:
|
|
||||||
self.log.debug(
|
|
||||||
'Skipping interfaces on query on instance %s that '
|
|
||||||
'is not yet alive.', inst.id_)
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
interfaces[inst] = self.resource.get_interfaces(
|
|
||||||
inst.management_address)
|
|
||||||
return interfaces
|
|
||||||
|
|
||||||
def verify_interfaces(self, ports):
|
|
||||||
"""Verify at least one instance in group has correct ports plugged"""
|
|
||||||
for inst, interfaces in self.get_interfaces().items():
|
|
||||||
actual_macs = set((iface['lladdr'] for iface in interfaces))
|
|
||||||
self.log.debug(
|
|
||||||
'MACs found on %s: %s', inst.id_,
|
|
||||||
', '.join(sorted(actual_macs)))
|
|
||||||
if not all(
|
|
||||||
getattr(p, 'mac_address', None) for p in ports
|
|
||||||
):
|
|
||||||
return False
|
|
||||||
|
|
||||||
num_instance_ports = len(list(inst.ports))
|
|
||||||
num_logical_ports = len(list(ports))
|
|
||||||
if num_logical_ports != num_instance_ports:
|
|
||||||
self.log.debug(
|
|
||||||
'Expected %s instance ports but found %s',
|
|
||||||
num_logical_ports, num_instance_ports)
|
|
||||||
return False
|
|
||||||
|
|
||||||
expected_macs = set(p.mac_address
|
|
||||||
for p in inst.ports)
|
|
||||||
expected_macs.add(inst.management_port.mac_address)
|
|
||||||
self.log.debug(
|
|
||||||
'MACs expected on: %s, %s',
|
|
||||||
inst.id_, ', '.join(sorted(expected_macs)))
|
|
||||||
|
|
||||||
if actual_macs == expected_macs:
|
|
||||||
self.log.debug('Found all expected MACs on %s', inst.id_)
|
|
||||||
return True
|
|
||||||
|
|
||||||
self.log.debug(
|
|
||||||
'Did not find all expected MACs on instance %s, '
|
|
||||||
'actual MACs: %s', inst.id_, ', '.join(actual_macs))
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _update_config(self, instance, config):
|
|
||||||
self.log.debug(
|
|
||||||
'Updating config for instance %s on resource %s',
|
|
||||||
instance.id_, self.resource.id)
|
|
||||||
self.log.debug('New config: %r', config)
|
|
||||||
attempts = cfg.CONF.max_retries
|
|
||||||
for i in six.moves.range(attempts):
|
|
||||||
try:
|
|
||||||
self.resource.update_config(
|
|
||||||
instance.management_address,
|
|
||||||
config)
|
|
||||||
except Exception:
|
|
||||||
if i == attempts - 1:
|
|
||||||
# Only log the traceback if we encounter it many times.
|
|
||||||
self.log.exception(_LE('failed to update config'))
|
|
||||||
else:
|
|
||||||
self.log.debug(
|
|
||||||
'failed to update config, attempt %d',
|
|
||||||
i
|
|
||||||
)
|
|
||||||
time.sleep(cfg.CONF.retry_delay)
|
|
||||||
else:
|
|
||||||
self.log.info('Instance config updated')
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _ha_config(self, instance):
|
|
||||||
"""Builds configuration describing the HA cluster
|
|
||||||
|
|
||||||
This informs the instance about any configuration relating to the HA
|
|
||||||
cluster it should be joining. ATM this is primarily used to inform
|
|
||||||
an instance about the management addresses of its peers.
|
|
||||||
|
|
||||||
:param instance: InstanceInfo object
|
|
||||||
:returns: dict of HA configuration
|
|
||||||
"""
|
|
||||||
peers = [
|
|
||||||
i.management_address for i in self.instances
|
|
||||||
if i.management_address != instance.management_address]
|
|
||||||
|
|
||||||
# determine cluster priority by instance age. the older instance
|
|
||||||
# gets the higher priority
|
|
||||||
sorted_by_age = sorted(
|
|
||||||
self.instances, key=lambda i: i.time_since_boot,
|
|
||||||
reverse=True)
|
|
||||||
|
|
||||||
if sorted_by_age.index(instance) == 0:
|
|
||||||
priority = 100
|
|
||||||
else:
|
|
||||||
priority = 50
|
|
||||||
|
|
||||||
return {
|
|
||||||
'peers': peers,
|
|
||||||
'priority': priority,
|
|
||||||
}
|
|
||||||
|
|
||||||
def configure(self, worker_context):
|
|
||||||
# XXX config update can be dispatched to threads to speed
|
|
||||||
# things up across multiple instances
|
|
||||||
failed = []
|
|
||||||
|
|
||||||
# get_interfaces() return returns only instances that are up and ready
|
|
||||||
# for config
|
|
||||||
instances_interfaces = self.get_interfaces()
|
|
||||||
|
|
||||||
for inst, interfaces in instances_interfaces.items():
|
|
||||||
# sending all the standard config over to the driver for
|
|
||||||
# final updates
|
|
||||||
config = self.resource.build_config(
|
|
||||||
worker_context,
|
|
||||||
inst.management_port,
|
|
||||||
_generate_interface_map(inst, interfaces)
|
|
||||||
)
|
|
||||||
|
|
||||||
# while drivers are free to express their own ha config
|
|
||||||
# requirements, the instance manager is the only one with
|
|
||||||
# high level view of the cluster, ie knowledge of membership
|
|
||||||
if self.resource.is_ha:
|
|
||||||
config['ha_config'] = config.get('ha') or {}
|
|
||||||
config['ha_config'].update(self._ha_config(inst))
|
|
||||||
|
|
||||||
self.log.debug(
|
|
||||||
'preparing to update config for instance %s on %s resource '
|
|
||||||
'to %r', inst.id_, self.resource.RESOURCE_NAME, config)
|
|
||||||
|
|
||||||
if self._update_config(inst, config) is not True:
|
|
||||||
failed.append(inst)
|
|
||||||
|
|
||||||
if set(failed) == set(self.instances):
|
|
||||||
# all updates have failed
|
|
||||||
self.log.error(
|
|
||||||
'Could not update config for any instances on %s resource %s, '
|
|
||||||
'marking resource state %s',
|
|
||||||
self.resource.id, self.resource.RESOURCE_NAME, states.RESTART)
|
|
||||||
return states.RESTART
|
|
||||||
elif failed:
|
|
||||||
# some updates to instances we thought to be alive have failed
|
|
||||||
self.log.error(
|
|
||||||
'Could not update config for some instances on %s '
|
|
||||||
'resource %s marking %s resource state',
|
|
||||||
self.resource.RESOURCE_NAME, self.resource.id, states.DEGRADED)
|
|
||||||
return states.DEGRADED
|
|
||||||
elif len(instances_interfaces.keys()) != len(self.instances):
|
|
||||||
# instance_interfaces contains only instances that are alive
|
|
||||||
# if we're still waiting on instances, remain degraded
|
|
||||||
self.log.debug(
|
|
||||||
'Config updated on %s of %s instances',
|
|
||||||
len(instances_interfaces.keys()), len(self.instances))
|
|
||||||
return states.DEGRADED
|
|
||||||
else:
|
|
||||||
self.log.debug(
|
|
||||||
'Config updated across all instances on %s resource %s',
|
|
||||||
self.resource.RESOURCE_NAME, self.resource.id)
|
|
||||||
return states.CONFIGURED
|
|
||||||
|
|
||||||
def delete(self, instance):
|
|
||||||
"""Removes nova server reference from manager"""
|
|
||||||
del self.resources[instance.id_]
|
|
||||||
|
|
||||||
def refresh(self, worker_context):
|
|
||||||
"""Update nova server reference for all managed instances"""
|
|
||||||
for i in self.instances:
|
|
||||||
if not worker_context.nova_client.update_instance_info(i):
|
|
||||||
self.delete(i)
|
|
||||||
|
|
||||||
def destroy(self, worker_context):
|
|
||||||
"""Destroys all nova instances and blocks until deletion"""
|
|
||||||
worker_context.nova_client.delete_instances_and_wait(
|
|
||||||
self.instances)
|
|
||||||
|
|
||||||
def remove(self, worker_context, instance):
|
|
||||||
"""Destroys the nova instance, removes instance from group manager"""
|
|
||||||
worker_context.nova_client.destroy_instance(instance)
|
|
||||||
self.delete(instance)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def next_instance_index(self):
|
|
||||||
ids = [
|
|
||||||
int(i.name.split('_')[1]) for i in
|
|
||||||
self.instances]
|
|
||||||
try:
|
|
||||||
return max(ids) + 1
|
|
||||||
except ValueError:
|
|
||||||
return 0
|
|
||||||
|
|
||||||
def create(self, worker_context):
|
|
||||||
to_boot = self.required_instance_count - len(self.instances)
|
|
||||||
self.log.debug(
|
|
||||||
'Booting an additional %s instance(s) for resource %s',
|
|
||||||
to_boot, self.resource.id)
|
|
||||||
|
|
||||||
for i in six.moves.range(to_boot):
|
|
||||||
name = '%s_%s' % (self.resource.name, self.next_instance_index)
|
|
||||||
instance = worker_context.nova_client.boot_instance(
|
|
||||||
resource_type=self.resource.RESOURCE_NAME,
|
|
||||||
prev_instance_info=None,
|
|
||||||
name=name,
|
|
||||||
image_uuid=self.resource.image_uuid,
|
|
||||||
flavor=self.resource.flavor,
|
|
||||||
make_ports_callback=self.resource.make_ports(worker_context)
|
|
||||||
|
|
||||||
)
|
|
||||||
self.add_instance(instance)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def required_instance_count(self):
|
|
||||||
if self.resource.is_ha is True:
|
|
||||||
return 2
|
|
||||||
else:
|
|
||||||
return 1
|
|
||||||
|
|
||||||
@property
|
|
||||||
def instance_count(self):
|
|
||||||
return len(self.instances)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def cluster_degraded(self):
|
|
||||||
return self.instance_count < self.required_instance_count
|
|
||||||
|
|
||||||
def add_instance(self, instance):
|
|
||||||
"""Adds a new instance or updates existing"""
|
|
||||||
self.resources[instance.id_] = instance
|
|
||||||
|
|
||||||
|
|
||||||
class InstanceManager(object):
|
|
||||||
|
|
||||||
def __init__(self, resource, worker_context):
|
|
||||||
"""The instance manager is your interface to the running instance.
|
|
||||||
wether it be virtual, container or physical.
|
|
||||||
|
|
||||||
Service specific code lives in the driver which is passed in here.
|
|
||||||
|
|
||||||
:param resource: An driver instance for the managed resource
|
|
||||||
:param resource_id: UUID of logical resource
|
|
||||||
:param worker_context:
|
|
||||||
"""
|
|
||||||
self.resource = resource
|
|
||||||
self.log = self.resource.log
|
|
||||||
|
|
||||||
self.state = states.DOWN
|
|
||||||
|
|
||||||
self.instance_info = None
|
|
||||||
self.instances = InstanceGroupManager(self.log, self.resource)
|
|
||||||
self.last_error = None
|
|
||||||
self._boot_counter = BootAttemptCounter()
|
|
||||||
self._boot_logged = []
|
|
||||||
self._last_synced_status = None
|
|
||||||
|
|
||||||
self.state = self.update_state(worker_context, silent=True)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def attempts(self):
|
|
||||||
"""Property which returns the boot count.
|
|
||||||
|
|
||||||
:returns Int:
|
|
||||||
"""
|
|
||||||
return self._boot_counter.count
|
|
||||||
|
|
||||||
def reset_boot_counter(self):
|
|
||||||
"""Resets the boot counter.
|
|
||||||
|
|
||||||
:returns None:
|
|
||||||
"""
|
|
||||||
self._boot_counter.reset()
|
|
||||||
|
|
||||||
@synchronize_driver_state
|
|
||||||
@ensure_cache
|
|
||||||
def update_state(self, worker_context, silent=False):
|
|
||||||
"""Updates state of the instance and, by extension, its logical resource
|
|
||||||
|
|
||||||
:param worker_context:
|
|
||||||
:param silent:
|
|
||||||
:returns: state
|
|
||||||
"""
|
|
||||||
if self.resource.get_state(worker_context) == states.GONE:
|
|
||||||
self.log.debug('%s driver reported its state is %s',
|
|
||||||
self.resource.RESOURCE_NAME, states.GONE)
|
|
||||||
self.state = states.GONE
|
|
||||||
return self.state
|
|
||||||
|
|
||||||
if not self.instances:
|
|
||||||
self.log.info(_LI('no backing instance(s), marking as %s'),
|
|
||||||
states.DOWN)
|
|
||||||
self.state = states.DOWN
|
|
||||||
return self.state
|
|
||||||
elif self.instances.cluster_degraded is True:
|
|
||||||
self.log.info(_LI(
|
|
||||||
'instance cluster for resource %s reports degraded'),
|
|
||||||
self.resource.id)
|
|
||||||
self.state = states.DEGRADED
|
|
||||||
return self.state
|
|
||||||
|
|
||||||
has_ports, no_ports = self.instances.validate_ports()
|
|
||||||
|
|
||||||
# ports_state=None means no instances have ports
|
|
||||||
if not has_ports:
|
|
||||||
self.log.debug('waiting for instance ports to be attached')
|
|
||||||
self.state = states.BOOTING
|
|
||||||
return self.state
|
|
||||||
|
|
||||||
# XXX TODO need to account for when only a subset of the cluster have
|
|
||||||
# correct ports, kick back to Replug
|
|
||||||
|
|
||||||
alive, dead = self.instances.are_alive()
|
|
||||||
if not alive:
|
|
||||||
# alive checked failed on all instances for an already configured
|
|
||||||
# resource, mark it down.
|
|
||||||
# XXX need to track timeouts per instance
|
|
||||||
# self._check_boot_timeout()
|
|
||||||
|
|
||||||
if self.state == states.CONFIGURED:
|
|
||||||
self.log.debug('No instance(s) alive, marking it as %s',
|
|
||||||
states.DOWN)
|
|
||||||
self.state = states.DOWN
|
|
||||||
return self.state
|
|
||||||
elif dead:
|
|
||||||
# some subset of instances reported not alive, mark it degraded.
|
|
||||||
if self.state == states.CONFIGURED:
|
|
||||||
for i in dead:
|
|
||||||
instance = worker_context.nova_client.get_instance_by_id(
|
|
||||||
i.id_)
|
|
||||||
if instance is None and self.state != states.ERROR:
|
|
||||||
self.log.info(
|
|
||||||
'Instance %s was found; rebooting', i.id_)
|
|
||||||
self.instances.delete(i)
|
|
||||||
self.state = states.DEGRADED
|
|
||||||
return self.state
|
|
||||||
|
|
||||||
self.instances.refresh(worker_context)
|
|
||||||
if self.state == states.CONFIGURED:
|
|
||||||
for i in alive:
|
|
||||||
if not i.booting and i not in self._boot_logged:
|
|
||||||
self.log.info(
|
|
||||||
'%s booted in %s seconds after %s attempts',
|
|
||||||
self.resource.RESOURCE_NAME,
|
|
||||||
i.time_since_boot.total_seconds(),
|
|
||||||
self._boot_counter.count)
|
|
||||||
self._boot_logged.append(i)
|
|
||||||
self.reset_boot_counter()
|
|
||||||
else:
|
|
||||||
if alive:
|
|
||||||
self.state = states.UP
|
|
||||||
|
|
||||||
return self.state
|
|
||||||
|
|
||||||
@ensure_cache
|
|
||||||
def boot(self, worker_context):
|
|
||||||
"""Boots the instances with driver pre/post boot hooks.
|
|
||||||
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
self.log.info('Booting %s' % self.resource.RESOURCE_NAME)
|
|
||||||
|
|
||||||
if self.state != states.DEGRADED:
|
|
||||||
self.state = states.DOWN
|
|
||||||
self._boot_counter.start()
|
|
||||||
|
|
||||||
# driver preboot hook
|
|
||||||
self.resource.pre_boot(worker_context)
|
|
||||||
|
|
||||||
try:
|
|
||||||
self.instances.create(worker_context)
|
|
||||||
if not self.instances:
|
|
||||||
self.log.info(_LI('Previous instances are still deleting'))
|
|
||||||
# Reset the boot counter, causing the state machine to start
|
|
||||||
# again with a new Instance.
|
|
||||||
self.reset_boot_counter()
|
|
||||||
return
|
|
||||||
except:
|
|
||||||
self.log.exception(_LE('Instances failed to start boot'))
|
|
||||||
else:
|
|
||||||
self.state = states.BOOTING
|
|
||||||
|
|
||||||
# driver post boot hook
|
|
||||||
self.resource.post_boot(worker_context)
|
|
||||||
|
|
||||||
@synchronize_driver_state
|
|
||||||
@ensure_cache
|
|
||||||
def set_error(self, worker_context, silent=False):
|
|
||||||
"""Set the internal and neutron status for the router to states.ERROR.
|
|
||||||
|
|
||||||
This is called from outside when something notices the router
|
|
||||||
is "broken". We don't use it internally because this class is
|
|
||||||
supposed to do what it's told and not make decisions about
|
|
||||||
whether or not the router is fatally broken.
|
|
||||||
"""
|
|
||||||
self.state = states.ERROR
|
|
||||||
self.last_error = datetime.utcnow()
|
|
||||||
return self.state
|
|
||||||
|
|
||||||
@synchronize_driver_state
|
|
||||||
@ensure_cache
|
|
||||||
def clear_error(self, worker_context, silent=False):
|
|
||||||
"""Clear the internal error state.
|
|
||||||
|
|
||||||
This is called from outside when something wants to force a
|
|
||||||
router rebuild, so that the state machine that checks our
|
|
||||||
status won't think we are broken unless we actually break
|
|
||||||
again.
|
|
||||||
"""
|
|
||||||
# Clear the boot counter.
|
|
||||||
self._boot_counter.reset()
|
|
||||||
self.state = states.DOWN
|
|
||||||
return self.state
|
|
||||||
|
|
||||||
@property
|
|
||||||
def error_cooldown(self):
|
|
||||||
"""Returns True if the instance was recently set to states.ERROR state.
|
|
||||||
"""
|
|
||||||
if self.last_error and self.state == states.ERROR:
|
|
||||||
seconds_since_error = (
|
|
||||||
datetime.utcnow() - self.last_error
|
|
||||||
).total_seconds()
|
|
||||||
if seconds_since_error < cfg.CONF.error_state_cooldown:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
@synchronize_driver_state
|
|
||||||
@ensure_cache
|
|
||||||
def stop(self, worker_context):
|
|
||||||
"""Attempts to destroy the instance cluster
|
|
||||||
|
|
||||||
:param worker_context:
|
|
||||||
:returns:
|
|
||||||
"""
|
|
||||||
self.log.info(_LI('Destroying instance'))
|
|
||||||
|
|
||||||
self.resource.delete_ports(worker_context)
|
|
||||||
|
|
||||||
if not self.instances:
|
|
||||||
self.log.info(_LI('Instance(s) already destroyed.'))
|
|
||||||
if self.state != states.GONE:
|
|
||||||
self.state = states.DOWN
|
|
||||||
return self.state
|
|
||||||
|
|
||||||
try:
|
|
||||||
self.instances.destroy(worker_context)
|
|
||||||
if self.state != states.GONE:
|
|
||||||
self.state = states.DOWN
|
|
||||||
except Exception:
|
|
||||||
self.log.exception(_LE('Failed to stop instance(s)'))
|
|
||||||
|
|
||||||
@synchronize_driver_state
|
|
||||||
@ensure_cache
|
|
||||||
def configure(self, worker_context):
|
|
||||||
"""Pushes config to instance
|
|
||||||
|
|
||||||
:param worker_context:
|
|
||||||
:param failure_state:
|
|
||||||
:param attempts:
|
|
||||||
:returns:
|
|
||||||
"""
|
|
||||||
self.log.debug('Begin instance config')
|
|
||||||
self.state = states.UP
|
|
||||||
|
|
||||||
if self.resource.get_state(worker_context) == states.GONE:
|
|
||||||
return states.GONE
|
|
||||||
|
|
||||||
if not self.instances:
|
|
||||||
return states.DOWN
|
|
||||||
|
|
||||||
if not self.instances.verify_interfaces(self.resource.ports):
|
|
||||||
# XXX Need to acct for degraded cluster /w subset of nodes
|
|
||||||
# having incorrect plugging.
|
|
||||||
self.log.debug("Interfaces aren't plugged as expected.")
|
|
||||||
self.state = states.REPLUG
|
|
||||||
return self.state
|
|
||||||
|
|
||||||
self.state = self.instances.configure(worker_context)
|
|
||||||
return self.state
|
|
||||||
|
|
||||||
def replug(self, worker_context):
|
|
||||||
|
|
||||||
"""Attempts to replug the network ports for an instance.
|
|
||||||
|
|
||||||
:param worker_context:
|
|
||||||
:returns:
|
|
||||||
"""
|
|
||||||
self.log.debug('Attempting to replug...')
|
|
||||||
|
|
||||||
self.resource.pre_plug(worker_context)
|
|
||||||
|
|
||||||
for instance, interfaces in self.instances.get_interfaces().items():
|
|
||||||
actual_macs = set((iface['lladdr'] for iface in interfaces))
|
|
||||||
instance_macs = set(p.mac_address for p in instance.ports)
|
|
||||||
instance_macs.add(instance.management_port.mac_address)
|
|
||||||
|
|
||||||
if instance_macs != actual_macs:
|
|
||||||
# our cached copy of the ports is wrong reboot and clean up
|
|
||||||
self.log.warning((
|
|
||||||
'Instance macs(%s) do not match actual macs (%s). Instance'
|
|
||||||
' cache appears out-of-sync'),
|
|
||||||
instance_macs, actual_macs
|
|
||||||
)
|
|
||||||
self.state = states.RESTART
|
|
||||||
return
|
|
||||||
|
|
||||||
instance_ports = {p.network_id: p for p in instance.ports}
|
|
||||||
instance_networks = set(instance_ports.keys())
|
|
||||||
|
|
||||||
logical_networks = set(p.network_id for p in self.resource.ports)
|
|
||||||
|
|
||||||
if logical_networks != instance_networks:
|
|
||||||
nova_instance = worker_context.nova_client.get_instance_by_id(
|
|
||||||
instance.id_
|
|
||||||
)
|
|
||||||
|
|
||||||
# For each port that doesn't have a mac address on the instance
|
|
||||||
for network_id in logical_networks - instance_networks:
|
|
||||||
port = worker_context.neutron.create_vrrp_port(
|
|
||||||
self.resource.id,
|
|
||||||
network_id
|
|
||||||
)
|
|
||||||
self.log.debug(
|
|
||||||
'Net %s is missing from the appliance instance %s, '
|
|
||||||
'plugging: %s', network_id, instance.id_, port.id
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
nova_instance.interface_attach(port.id, None, None)
|
|
||||||
instance.ports.append(port)
|
|
||||||
except:
|
|
||||||
self.log.exception(
|
|
||||||
'Interface attach failed on instance %s',
|
|
||||||
instance.id_)
|
|
||||||
self.instances.remove(worker_context, instance)
|
|
||||||
|
|
||||||
# instance has been removed for failure, do not continue with
|
|
||||||
# plugging
|
|
||||||
if instance not in self.instances.values():
|
|
||||||
continue
|
|
||||||
|
|
||||||
ports_to_delete = []
|
|
||||||
for network_id in instance_networks - logical_networks:
|
|
||||||
port = instance_ports[network_id]
|
|
||||||
self.log.debug(
|
|
||||||
'Net %s is detached from the router, unplugging: %s',
|
|
||||||
network_id, port.id
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
nova_instance.interface_detach(port.id)
|
|
||||||
instance.ports.remove(port)
|
|
||||||
ports_to_delete.append(port)
|
|
||||||
except:
|
|
||||||
self.log.exception(
|
|
||||||
'Interface detach failed on instance %s',
|
|
||||||
instance.id_)
|
|
||||||
self.instances.remove(worker_context, instance)
|
|
||||||
|
|
||||||
# instance has been removed for failure, do not continue with
|
|
||||||
# plugging
|
|
||||||
if instance not in self.instances.values():
|
|
||||||
continue
|
|
||||||
|
|
||||||
if self._wait_for_interface_hotplug(instance) is not True:
|
|
||||||
self.instances.remove(worker_context, instance)
|
|
||||||
|
|
||||||
if not self.instances:
|
|
||||||
# all instances were destroyed for plugging failure
|
|
||||||
self.state = states.RESTART
|
|
||||||
elif self.instances.cluster_degraded:
|
|
||||||
# some instances were destroyed for plugging failure
|
|
||||||
self.state = states.DEGRADED
|
|
||||||
else:
|
|
||||||
# plugging was successful
|
|
||||||
for p in ports_to_delete:
|
|
||||||
worker_context.neutron.api_client.delete_port(port.id)
|
|
||||||
return
|
|
||||||
|
|
||||||
def _wait_for_interface_hotplug(self, instance):
|
|
||||||
"""Waits for instance to report interfaces for all expected ports"""
|
|
||||||
# The action of attaching/detaching interfaces in Nova happens via
|
|
||||||
# the message bus and is *not* blocking. We need to wait a few
|
|
||||||
# seconds to if the list of tap devices on the appliance actually
|
|
||||||
# changed. If not, assume the hotplug failed, and reboot the
|
|
||||||
# Instance.
|
|
||||||
for i in six.moves.range(1, cfg.CONF.hotplug_timeout):
|
|
||||||
self.log.debug(
|
|
||||||
"Waiting for interface attachments to take effect..."
|
|
||||||
)
|
|
||||||
interfaces = self.resource.get_interfaces(
|
|
||||||
instance.management_address)
|
|
||||||
|
|
||||||
actual_macs = set((iface['lladdr'] for iface in interfaces))
|
|
||||||
instance_macs = set(p.mac_address for p in instance.ports)
|
|
||||||
instance_macs.add(instance.management_port.mac_address)
|
|
||||||
if actual_macs == instance_macs:
|
|
||||||
return True
|
|
||||||
time.sleep(1)
|
|
||||||
else:
|
|
||||||
self.log.debug(
|
|
||||||
"Interfaces aren't plugged as expected on instance %s, ",
|
|
||||||
"marking for rebooting.", instance.id_)
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _check_boot_timeout(self):
|
|
||||||
"""If the instance was created more than `boot_timeout` seconds
|
|
||||||
ago, log an error and set the state set to states.DOWN
|
|
||||||
"""
|
|
||||||
time_since_boot = self.instance_info.time_since_boot
|
|
||||||
|
|
||||||
if time_since_boot:
|
|
||||||
if time_since_boot.seconds < cfg.CONF.boot_timeout:
|
|
||||||
# Do not reset the state if we have an error
|
|
||||||
# condition already. The state will be reset when
|
|
||||||
# the router starts responding again, or when the
|
|
||||||
# error is cleared from a forced rebuild.
|
|
||||||
if self.state != states.ERROR:
|
|
||||||
self.state = states.BOOTING
|
|
||||||
else:
|
|
||||||
# If the instance was created more than `boot_timeout` seconds
|
|
||||||
# ago, log an error and set the state set to states.DOWN
|
|
||||||
self.log.info(
|
|
||||||
'Router is DOWN. Created over %d secs ago.',
|
|
||||||
cfg.CONF.boot_timeout)
|
|
||||||
# Do not reset the state if we have an error condition
|
|
||||||
# already. The state will be reset when the router starts
|
|
||||||
# responding again, or when the error is cleared from a
|
|
||||||
# forced rebuild.
|
|
||||||
if self.state != states.ERROR:
|
|
||||||
self.state = states.DOWN
|
|
218
astara/main.py
218
astara/main.py
@ -1,218 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
import functools
|
|
||||||
import logging
|
|
||||||
import multiprocessing
|
|
||||||
import signal
|
|
||||||
import socket
|
|
||||||
import sys
|
|
||||||
import threading
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log
|
|
||||||
|
|
||||||
from astara.common.i18n import _LE, _LI
|
|
||||||
from astara.common import config as ak_cfg
|
|
||||||
from astara import coordination
|
|
||||||
from astara import daemon
|
|
||||||
from astara import health
|
|
||||||
from astara import metadata
|
|
||||||
from astara import notifications
|
|
||||||
from astara import scheduler
|
|
||||||
from astara import populate
|
|
||||||
from astara import worker
|
|
||||||
from astara.api import neutron as neutron_api
|
|
||||||
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
|
||||||
CONF = cfg.CONF
|
|
||||||
|
|
||||||
MAIN_OPTS = [
|
|
||||||
cfg.StrOpt('host',
|
|
||||||
default=socket.gethostname(),
|
|
||||||
help="The hostname Astara is running on"),
|
|
||||||
]
|
|
||||||
CONF.register_opts(MAIN_OPTS)
|
|
||||||
|
|
||||||
|
|
||||||
CEILOMETER_OPTS = [
|
|
||||||
cfg.BoolOpt('enabled', default=False,
|
|
||||||
help='Enable reporting metrics to ceilometer.'),
|
|
||||||
cfg.StrOpt('topic', default='notifications.info',
|
|
||||||
help='The name of the topic queue ceilometer consumes events '
|
|
||||||
'from.')
|
|
||||||
]
|
|
||||||
CONF.register_group(cfg.OptGroup(name='ceilometer',
|
|
||||||
title='Ceilometer Reporting Options'))
|
|
||||||
CONF.register_opts(CEILOMETER_OPTS, group='ceilometer')
|
|
||||||
|
|
||||||
|
|
||||||
def shuffle_notifications(notification_queue, sched):
|
|
||||||
"""Copy messages from the notification queue into the scheduler.
|
|
||||||
"""
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
target, message = notification_queue.get()
|
|
||||||
if target is None:
|
|
||||||
break
|
|
||||||
sched.handle_message(target, message)
|
|
||||||
except IOError:
|
|
||||||
# FIXME(rods): if a signal arrive during an IO operation
|
|
||||||
# an IOError is raised. We catch the exceptions in
|
|
||||||
# meantime waiting for a better solution.
|
|
||||||
pass
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
LOG.info(_LI('got Ctrl-C'))
|
|
||||||
break
|
|
||||||
except:
|
|
||||||
LOG.exception(_LE('unhandled exception processing message'))
|
|
||||||
|
|
||||||
|
|
||||||
def main(argv=sys.argv[1:]):
|
|
||||||
"""Main Entry point into the astara-orchestrator
|
|
||||||
|
|
||||||
This is the main entry point into the astara-orchestrator. On invocation of
|
|
||||||
this method, logging, local network connectivity setup is performed.
|
|
||||||
This information is obtained through the 'ak-config' file, passed as
|
|
||||||
arguement to this method. Worker threads are spawned for handling
|
|
||||||
various tasks that are associated with processing as well as
|
|
||||||
responding to different Neutron events prior to starting a notification
|
|
||||||
dispatch loop.
|
|
||||||
|
|
||||||
:param argv: list of Command line arguments
|
|
||||||
|
|
||||||
:returns: None
|
|
||||||
|
|
||||||
:raises: None
|
|
||||||
|
|
||||||
"""
|
|
||||||
# TODO(rama) Error Handling to be added as part of the docstring
|
|
||||||
# description
|
|
||||||
|
|
||||||
# Change the process and thread name so the logs are cleaner.
|
|
||||||
p = multiprocessing.current_process()
|
|
||||||
p.name = 'pmain'
|
|
||||||
t = threading.current_thread()
|
|
||||||
t.name = 'tmain'
|
|
||||||
ak_cfg.parse_config(argv)
|
|
||||||
log.setup(cfg.CONF, 'astara-orchestrator')
|
|
||||||
cfg.CONF.log_opt_values(LOG, logging.INFO)
|
|
||||||
|
|
||||||
neutron = neutron_api.Neutron(cfg.CONF)
|
|
||||||
|
|
||||||
# TODO(mark): develop better way restore after machine reboot
|
|
||||||
# neutron.purge_management_interface()
|
|
||||||
|
|
||||||
# bring the mgt tap interface up
|
|
||||||
mgt_ip_address = neutron.ensure_local_service_port().split('/')[0]
|
|
||||||
|
|
||||||
# Set up the queue to move messages between the eventlet-based
|
|
||||||
# listening process and the scheduler.
|
|
||||||
notification_queue = multiprocessing.Queue()
|
|
||||||
|
|
||||||
# Ignore signals that might interrupt processing.
|
|
||||||
daemon.ignore_signals()
|
|
||||||
|
|
||||||
# If we see a SIGINT, stop processing.
|
|
||||||
def _stop_processing(*args):
|
|
||||||
notification_queue.put((None, None))
|
|
||||||
signal.signal(signal.SIGINT, _stop_processing)
|
|
||||||
|
|
||||||
# Listen for notifications.
|
|
||||||
notification_proc = multiprocessing.Process(
|
|
||||||
target=notifications.listen,
|
|
||||||
kwargs={
|
|
||||||
'notification_queue': notification_queue
|
|
||||||
},
|
|
||||||
name='notification-listener',
|
|
||||||
)
|
|
||||||
notification_proc.start()
|
|
||||||
|
|
||||||
if CONF.coordination.enabled:
|
|
||||||
coordinator_proc = multiprocessing.Process(
|
|
||||||
target=coordination.start,
|
|
||||||
kwargs={
|
|
||||||
'notification_queue': notification_queue
|
|
||||||
},
|
|
||||||
name='coordinator',
|
|
||||||
)
|
|
||||||
coordinator_proc.start()
|
|
||||||
else:
|
|
||||||
coordinator_proc = None
|
|
||||||
|
|
||||||
metadata_proc = multiprocessing.Process(
|
|
||||||
target=metadata.serve,
|
|
||||||
args=(mgt_ip_address,),
|
|
||||||
name='metadata-proxy'
|
|
||||||
)
|
|
||||||
metadata_proc.start()
|
|
||||||
|
|
||||||
from astara.api import rug as rug_api
|
|
||||||
rug_api_proc = multiprocessing.Process(
|
|
||||||
target=rug_api.serve,
|
|
||||||
name='rug-api'
|
|
||||||
)
|
|
||||||
rug_api_proc.start()
|
|
||||||
|
|
||||||
# Set up the notifications publisher
|
|
||||||
Publisher = (notifications.Publisher if cfg.CONF.ceilometer.enabled
|
|
||||||
else notifications.NoopPublisher)
|
|
||||||
publisher = Publisher(
|
|
||||||
topic=cfg.CONF.ceilometer.topic,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Set up a factory to make Workers that know how many threads to
|
|
||||||
# run.
|
|
||||||
worker_factory = functools.partial(
|
|
||||||
worker.Worker,
|
|
||||||
notifier=publisher,
|
|
||||||
management_address=mgt_ip_address,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Set up the scheduler that knows how to manage the routers and
|
|
||||||
# dispatch messages.
|
|
||||||
sched = scheduler.Scheduler(
|
|
||||||
worker_factory=worker_factory,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Prepopulate the workers with existing routers on startup
|
|
||||||
populate.pre_populate_workers(sched)
|
|
||||||
|
|
||||||
# Set up the periodic health check
|
|
||||||
health.start_inspector(cfg.CONF.health_check_period, sched)
|
|
||||||
|
|
||||||
# Set up the periodic neutron agent report
|
|
||||||
health.start_reporter()
|
|
||||||
|
|
||||||
# Block the main process, copying messages from the notification
|
|
||||||
# listener to the scheduler
|
|
||||||
try:
|
|
||||||
shuffle_notifications(notification_queue, sched)
|
|
||||||
finally:
|
|
||||||
LOG.info(_LI('Stopping scheduler.'))
|
|
||||||
sched.stop()
|
|
||||||
LOG.info(_LI('Stopping notification publisher.'))
|
|
||||||
publisher.stop()
|
|
||||||
|
|
||||||
# Terminate the subprocesses
|
|
||||||
for subproc in [notification_proc, coordinator_proc, metadata_proc,
|
|
||||||
rug_api_proc]:
|
|
||||||
if not subproc:
|
|
||||||
continue
|
|
||||||
LOG.info(_LI('Stopping %s.'), subproc.name)
|
|
||||||
subproc.terminate()
|
|
@ -1,229 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
#
|
|
||||||
# Copyright 2012 New Dream Network, LLC (DreamHost)
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# @author: Mark McClain, DreamHost
|
|
||||||
|
|
||||||
"""Proxy requests to Nova's metadata server.
|
|
||||||
|
|
||||||
Used by main.py
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
import hashlib
|
|
||||||
import hmac
|
|
||||||
from six.moves.urllib import parse as urlparse
|
|
||||||
import socket
|
|
||||||
|
|
||||||
import eventlet
|
|
||||||
import eventlet.wsgi
|
|
||||||
import httplib2
|
|
||||||
from oslo_config import cfg
|
|
||||||
import webob
|
|
||||||
import webob.dec
|
|
||||||
import webob.exc
|
|
||||||
import six
|
|
||||||
|
|
||||||
from oslo_log import log as logging
|
|
||||||
|
|
||||||
from astara.common.i18n import _, _LE, _LI, _LW
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
CONF = cfg.CONF
|
|
||||||
|
|
||||||
METADATA_OPTS = [
|
|
||||||
cfg.StrOpt('nova_metadata_ip', default='127.0.0.1',
|
|
||||||
help="IP address used by Nova metadata server."),
|
|
||||||
cfg.IntOpt('nova_metadata_port',
|
|
||||||
default=8775,
|
|
||||||
help="TCP Port used by Nova metadata server."),
|
|
||||||
cfg.IntOpt('astara_metadata_port',
|
|
||||||
default=9697,
|
|
||||||
help="TCP listening port used by Astara metadata proxy."),
|
|
||||||
cfg.StrOpt('neutron_metadata_proxy_shared_secret',
|
|
||||||
default='',
|
|
||||||
help='Shared secret to sign instance-id request',
|
|
||||||
deprecated_name='quantum_metadata_proxy_shared_secret')
|
|
||||||
]
|
|
||||||
CONF.register_opts(METADATA_OPTS)
|
|
||||||
|
|
||||||
|
|
||||||
class MetadataProxyHandler(object):
|
|
||||||
|
|
||||||
"""The actual handler for proxy requests."""
|
|
||||||
|
|
||||||
@webob.dec.wsgify(RequestClass=webob.Request)
|
|
||||||
def __call__(self, req):
|
|
||||||
"""Inital handler for an incoming `webob.Request`.
|
|
||||||
|
|
||||||
:param req: The webob.Request to handle
|
|
||||||
:returns: returns a valid HTTP Response or Error
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
LOG.debug("Request: %s", req)
|
|
||||||
|
|
||||||
instance_id = self._get_instance_id(req)
|
|
||||||
if instance_id:
|
|
||||||
return self._proxy_request(instance_id, req)
|
|
||||||
else:
|
|
||||||
return webob.exc.HTTPNotFound()
|
|
||||||
|
|
||||||
except Exception:
|
|
||||||
LOG.exception(_LE("Unexpected error."))
|
|
||||||
msg = ('An unknown error has occurred. '
|
|
||||||
'Please try your request again.')
|
|
||||||
return webob.exc.HTTPInternalServerError(
|
|
||||||
explanation=six.text_type(msg))
|
|
||||||
|
|
||||||
def _get_instance_id(self, req):
|
|
||||||
"""Pull the X-Instance-ID out of a request.
|
|
||||||
|
|
||||||
:param req: The webob.Request to handle
|
|
||||||
:returns: returns the X-Instance-ID HTTP header
|
|
||||||
"""
|
|
||||||
return req.headers.get('X-Instance-ID')
|
|
||||||
|
|
||||||
def _proxy_request(self, instance_id, req):
|
|
||||||
"""Proxy a signed HTTP request to an instance.
|
|
||||||
|
|
||||||
:param instance_id: ID of the Instance being proxied to
|
|
||||||
:param req: The webob.Request to handle
|
|
||||||
:returns: returns a valid HTTP Response or Error
|
|
||||||
"""
|
|
||||||
headers = {
|
|
||||||
'X-Forwarded-For': req.headers.get('X-Forwarded-For'),
|
|
||||||
'X-Instance-ID': instance_id,
|
|
||||||
'X-Instance-ID-Signature': self._sign_instance_id(instance_id),
|
|
||||||
'X-Tenant-ID': req.headers.get('X-Tenant-ID')
|
|
||||||
}
|
|
||||||
|
|
||||||
url = urlparse.urlunsplit((
|
|
||||||
'http',
|
|
||||||
'%s:%s' % (cfg.CONF.nova_metadata_ip,
|
|
||||||
cfg.CONF.nova_metadata_port),
|
|
||||||
req.path_info,
|
|
||||||
req.query_string,
|
|
||||||
''))
|
|
||||||
|
|
||||||
h = httplib2.Http()
|
|
||||||
resp, content = h.request(url, headers=headers)
|
|
||||||
|
|
||||||
if resp.status == 200:
|
|
||||||
LOG.debug(str(resp))
|
|
||||||
return content
|
|
||||||
elif resp.status == 403:
|
|
||||||
msg = _LW(
|
|
||||||
'The remote metadata server responded with Forbidden. This '
|
|
||||||
'response usually occurs when shared secrets do not match.'
|
|
||||||
)
|
|
||||||
LOG.warning(msg)
|
|
||||||
return webob.exc.HTTPForbidden()
|
|
||||||
elif resp.status == 404:
|
|
||||||
return webob.exc.HTTPNotFound()
|
|
||||||
elif resp.status == 500:
|
|
||||||
msg = _LW('Remote metadata server experienced an'
|
|
||||||
' internal server error.')
|
|
||||||
LOG.warning(msg)
|
|
||||||
return webob.exc.HTTPInternalServerError(
|
|
||||||
explanation=six.text_type(msg))
|
|
||||||
else:
|
|
||||||
raise Exception(_('Unexpected response code: %s') % resp.status)
|
|
||||||
|
|
||||||
def _sign_instance_id(self, instance_id):
|
|
||||||
"""Get an HMAC based on the instance_id and Neutron shared secret.
|
|
||||||
|
|
||||||
:param instance_id: ID of the Instance being proxied to
|
|
||||||
:returns: returns a hexadecimal string HMAC for a specific instance_id
|
|
||||||
"""
|
|
||||||
return hmac.new(cfg.CONF.neutron_metadata_proxy_shared_secret,
|
|
||||||
instance_id,
|
|
||||||
hashlib.sha256).hexdigest()
|
|
||||||
|
|
||||||
|
|
||||||
class MetadataProxy(object):
|
|
||||||
|
|
||||||
"""The proxy service."""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
"""Initialize the MetadataProxy.
|
|
||||||
|
|
||||||
:returns: returns nothing
|
|
||||||
"""
|
|
||||||
self.pool = eventlet.GreenPool(1000)
|
|
||||||
|
|
||||||
def run(self, ip_address, port=cfg.CONF.astara_metadata_port):
|
|
||||||
"""Run the MetadataProxy.
|
|
||||||
|
|
||||||
:param ip_address: the ip address to bind to for incoming requests
|
|
||||||
:param port: the port to bind to for incoming requests
|
|
||||||
:returns: returns nothing
|
|
||||||
"""
|
|
||||||
app = MetadataProxyHandler()
|
|
||||||
for i in six.moves.range(5):
|
|
||||||
LOG.info(_LI(
|
|
||||||
'Starting the metadata proxy on %s:%s'),
|
|
||||||
ip_address, port
|
|
||||||
)
|
|
||||||
try:
|
|
||||||
sock = eventlet.listen(
|
|
||||||
(ip_address, port),
|
|
||||||
family=socket.AF_INET6,
|
|
||||||
backlog=128
|
|
||||||
)
|
|
||||||
except socket.error as err:
|
|
||||||
if err.errno != 99:
|
|
||||||
raise
|
|
||||||
LOG.warning(
|
|
||||||
_LW('Could not create metadata proxy socket: %s'), err)
|
|
||||||
LOG.warning(_LW('Sleeping %s before trying again'), i + 1)
|
|
||||||
eventlet.sleep(i + 1)
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
raise RuntimeError(
|
|
||||||
_('Could not establish metadata proxy socket on %s:%s') %
|
|
||||||
(ip_address, port)
|
|
||||||
)
|
|
||||||
eventlet.wsgi.server(
|
|
||||||
sock,
|
|
||||||
app,
|
|
||||||
custom_pool=self.pool,
|
|
||||||
log=LOG)
|
|
||||||
|
|
||||||
|
|
||||||
def serve(ip_address):
|
|
||||||
"""Initialize the MetaData proxy.
|
|
||||||
|
|
||||||
:param ip_address: the ip address to bind to for incoming requests
|
|
||||||
:returns: returns nothing
|
|
||||||
"""
|
|
||||||
MetadataProxy().run(ip_address)
|
|
@ -1,58 +0,0 @@
|
|||||||
# Copyright 2016 Mark McClain
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from astara_neutron.plugins import ml2_neutron_plugin as as_plugin
|
|
||||||
|
|
||||||
from neutron.plugins.ml2 import plugin as ml2_plugin
|
|
||||||
from neutron.services.l3_router.service_providers import base
|
|
||||||
|
|
||||||
|
|
||||||
class SingleNodeDriver(base.L3ServiceProvider):
|
|
||||||
"""Provider for single L3 agent routers."""
|
|
||||||
use_integrated_agent_scheduler = False
|
|
||||||
|
|
||||||
|
|
||||||
class HaNodeDriver(base.L3ServiceProvider):
|
|
||||||
"""Provider for HA L3 agent routers."""
|
|
||||||
use_integrated_agent_schedule = False
|
|
||||||
ha_support = base.MANDATORY
|
|
||||||
|
|
||||||
|
|
||||||
class Ml2Plugin(as_plugin.Ml2Plugin):
|
|
||||||
_supported_extension_aliases = (
|
|
||||||
as_plugin.Ml2Plugin._supported_extension_aliases +
|
|
||||||
['ip_allocation']
|
|
||||||
)
|
|
||||||
|
|
||||||
disabled_extensions = [
|
|
||||||
"dhrouterstatus",
|
|
||||||
"byonf"
|
|
||||||
]
|
|
||||||
|
|
||||||
for ext in disabled_extensions:
|
|
||||||
try:
|
|
||||||
_supported_extension_aliases.remove(ext)
|
|
||||||
except ValueError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def _make_port_dict(self, port, fields=None, process_extensions=True):
|
|
||||||
res = ml2_plugin.Ml2Plugin._make_port_dict(
|
|
||||||
self,
|
|
||||||
port,
|
|
||||||
fields,
|
|
||||||
process_extensions
|
|
||||||
)
|
|
||||||
if not res.get('fixed_ips') and res.get('mac_address'):
|
|
||||||
res['ip_allocation'] = 'deferred'
|
|
||||||
return res
|
|
@ -1,264 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
"""Listen for notifications.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import Queue
|
|
||||||
import threading
|
|
||||||
|
|
||||||
from astara import commands
|
|
||||||
from astara import drivers
|
|
||||||
from astara import event
|
|
||||||
from astara.common import rpc
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_context import context
|
|
||||||
from oslo_log import log as logging
|
|
||||||
|
|
||||||
from astara.common.i18n import _LE
|
|
||||||
|
|
||||||
from oslo_service import service
|
|
||||||
|
|
||||||
NOTIFICATIONS_OPTS = [
|
|
||||||
cfg.StrOpt('amqp-url',
|
|
||||||
help='connection for AMQP server'),
|
|
||||||
cfg.StrOpt('incoming-notifications-exchange',
|
|
||||||
default='neutron',
|
|
||||||
help='name of the exchange where we receive notifications'),
|
|
||||||
cfg.StrOpt('rpc-exchange',
|
|
||||||
default='l3_agent_fanout',
|
|
||||||
help='name of the exchange where we receive RPC calls'),
|
|
||||||
cfg.StrOpt('neutron-control-exchange',
|
|
||||||
default='neutron',
|
|
||||||
help='The name of the exchange used by Neutron for RPCs')
|
|
||||||
]
|
|
||||||
cfg.CONF.register_opts(NOTIFICATIONS_OPTS)
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def _get_tenant_id_for_message(context, payload=None):
|
|
||||||
"""Find the tenant id in the incoming message."""
|
|
||||||
|
|
||||||
# give priority to the tenant_id in the router dict if one
|
|
||||||
# exists in the message
|
|
||||||
if payload:
|
|
||||||
for key in ('router', 'port', 'subnet'):
|
|
||||||
if key in payload and payload[key].get('tenant_id'):
|
|
||||||
val = payload[key]['tenant_id']
|
|
||||||
return val
|
|
||||||
|
|
||||||
for key in ['tenant_id', 'project_id']:
|
|
||||||
if key in context:
|
|
||||||
val = context[key]
|
|
||||||
# Some notifications have None as the tenant id, but we
|
|
||||||
# can't shard on None in the dispatcher, so treat those as
|
|
||||||
# invalid.
|
|
||||||
if val is not None:
|
|
||||||
return val
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
_ROUTER_INTERFACE_NOTIFICATIONS = set([
|
|
||||||
'router.interface.create',
|
|
||||||
'router.interface.delete',
|
|
||||||
])
|
|
||||||
|
|
||||||
_ROUTER_INTERESTING_NOTIFICATIONS = set([
|
|
||||||
'subnet.create.end',
|
|
||||||
'subnet.change.end',
|
|
||||||
'subnet.delete.end',
|
|
||||||
'port.create.end',
|
|
||||||
'port.change.end',
|
|
||||||
'port.delete.end',
|
|
||||||
])
|
|
||||||
|
|
||||||
|
|
||||||
L3_AGENT_TOPIC = 'l3_agent'
|
|
||||||
|
|
||||||
|
|
||||||
class L3RPCEndpoint(object):
|
|
||||||
"""A RPC endpoint for servicing L3 Agent RPC requests"""
|
|
||||||
def __init__(self, notification_queue):
|
|
||||||
self.notification_queue = notification_queue
|
|
||||||
|
|
||||||
def router_deleted(self, ctxt, router_id):
|
|
||||||
tenant_id = _get_tenant_id_for_message(ctxt)
|
|
||||||
|
|
||||||
resource = event.Resource('router', router_id, tenant_id)
|
|
||||||
|
|
||||||
crud = event.DELETE
|
|
||||||
e = event.Event(resource, crud, None)
|
|
||||||
self.notification_queue.put((e.resource.tenant_id, e))
|
|
||||||
|
|
||||||
|
|
||||||
class NotificationsEndpoint(object):
|
|
||||||
"""A RPC endpoint for processing notification"""
|
|
||||||
def __init__(self, notification_queue):
|
|
||||||
self.notification_queue = notification_queue
|
|
||||||
|
|
||||||
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
|
||||||
tenant_id = _get_tenant_id_for_message(ctxt, payload)
|
|
||||||
crud = event.UPDATE
|
|
||||||
e = None
|
|
||||||
events = []
|
|
||||||
if event_type.startswith('astara.command'):
|
|
||||||
LOG.debug('received a command: %r', payload)
|
|
||||||
crud = event.COMMAND
|
|
||||||
if payload.get('command') == commands.POLL:
|
|
||||||
r = event.Resource(driver='*', id='*', tenant_id='*')
|
|
||||||
e = event.Event(
|
|
||||||
resource=r,
|
|
||||||
crud=event.POLL,
|
|
||||||
body={})
|
|
||||||
self.notification_queue.put(('*', e))
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
# If the message does not specify a tenant, send it to everyone
|
|
||||||
tenant_id = payload.get('tenant_id', '*')
|
|
||||||
router_id = payload.get('router_id')
|
|
||||||
resource = event.Resource(
|
|
||||||
driver='*',
|
|
||||||
id=router_id,
|
|
||||||
tenant_id=tenant_id)
|
|
||||||
events.append(event.Event(resource, crud, payload))
|
|
||||||
else:
|
|
||||||
|
|
||||||
for driver in drivers.enabled_drivers():
|
|
||||||
driver_event = driver.process_notification(
|
|
||||||
tenant_id, event_type, payload)
|
|
||||||
if driver_event:
|
|
||||||
events.append(driver_event)
|
|
||||||
|
|
||||||
if not events:
|
|
||||||
LOG.debug('Could not construct any events from %s /w payload: %s',
|
|
||||||
event_type, payload)
|
|
||||||
return
|
|
||||||
|
|
||||||
LOG.debug('Generated %s events from %s /w payload: %s',
|
|
||||||
len(events), event_type, payload)
|
|
||||||
|
|
||||||
for e in events:
|
|
||||||
self.notification_queue.put((e.resource.tenant_id, e))
|
|
||||||
|
|
||||||
|
|
||||||
def listen(notification_queue):
|
|
||||||
"""Create and launch the messaging service"""
|
|
||||||
connection = rpc.MessagingService()
|
|
||||||
connection.create_notification_listener(
|
|
||||||
endpoints=[NotificationsEndpoint(notification_queue)],
|
|
||||||
exchange=cfg.CONF.neutron_control_exchange,
|
|
||||||
)
|
|
||||||
connection.create_rpc_consumer(
|
|
||||||
topic=L3_AGENT_TOPIC,
|
|
||||||
endpoints=[L3RPCEndpoint(notification_queue)]
|
|
||||||
)
|
|
||||||
launcher = service.ServiceLauncher(cfg.CONF)
|
|
||||||
launcher.launch_service(service=connection, workers=1)
|
|
||||||
launcher.wait()
|
|
||||||
|
|
||||||
|
|
||||||
class Sender(object):
|
|
||||||
"Send notification messages"
|
|
||||||
|
|
||||||
def __init__(self, topic=None):
|
|
||||||
self._notifier = None
|
|
||||||
self.topic = topic
|
|
||||||
|
|
||||||
def get_notifier(self):
|
|
||||||
if not self._notifier:
|
|
||||||
self._notifier = rpc.get_rpc_notifier(topic=self.topic)
|
|
||||||
|
|
||||||
def send(self, event_type, message):
|
|
||||||
self.get_notifier()
|
|
||||||
ctxt = context.get_admin_context().to_dict()
|
|
||||||
self._notifier.info(ctxt, event_type, message)
|
|
||||||
|
|
||||||
|
|
||||||
class Publisher(Sender):
|
|
||||||
|
|
||||||
def __init__(self, topic=None):
|
|
||||||
super(Publisher, self).__init__(topic)
|
|
||||||
self._q = Queue.Queue()
|
|
||||||
self._t = None
|
|
||||||
|
|
||||||
def start(self):
|
|
||||||
ready = threading.Event()
|
|
||||||
self._t = threading.Thread(
|
|
||||||
name='notification-publisher',
|
|
||||||
target=self._send,
|
|
||||||
args=(ready,),
|
|
||||||
)
|
|
||||||
self._t.setDaemon(True)
|
|
||||||
self._t.start()
|
|
||||||
# Block until the thread is ready for work, but use a timeout
|
|
||||||
# in case of error in the thread.
|
|
||||||
ready.wait(10)
|
|
||||||
LOG.debug('started %s', self._t.getName())
|
|
||||||
|
|
||||||
def stop(self):
|
|
||||||
if self._t:
|
|
||||||
LOG.debug('stopping %s', self._t.getName())
|
|
||||||
self._q.put(None)
|
|
||||||
self._t.join(timeout=1)
|
|
||||||
self._t = None
|
|
||||||
|
|
||||||
def publish(self, incoming):
|
|
||||||
self._q.put(incoming)
|
|
||||||
|
|
||||||
def _send(self, ready):
|
|
||||||
"""Deliver notification messages from the in-process queue
|
|
||||||
to the appropriate topic via the AMQP service.
|
|
||||||
"""
|
|
||||||
# setup notifier driver ahead a time
|
|
||||||
self.get_notifier()
|
|
||||||
# Tell the start() method that we have set up the AMQP
|
|
||||||
# communication stuff and are ready to do some work.
|
|
||||||
ready.set()
|
|
||||||
while True:
|
|
||||||
msg = self._q.get()
|
|
||||||
if msg is None:
|
|
||||||
break
|
|
||||||
LOG.debug('sending notification %r', msg)
|
|
||||||
try:
|
|
||||||
self.send(event_type=msg['event_type'], message=msg['payload'])
|
|
||||||
except Exception:
|
|
||||||
LOG.exception(_LE('could not publish notification'))
|
|
||||||
|
|
||||||
|
|
||||||
class NoopPublisher(Publisher):
|
|
||||||
"""A Publisher that doesn't do anything.
|
|
||||||
|
|
||||||
The code that publishes notifications is spread across several
|
|
||||||
classes and cannot be easily disabled in configurations that do
|
|
||||||
not require sending metrics to ceilometer.
|
|
||||||
|
|
||||||
This class is used in place of the Publisher class to disable
|
|
||||||
sending metrics without explicitly checking in various places
|
|
||||||
across the code base.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
def start(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def stop(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def publish(self, incoming):
|
|
||||||
pass
|
|
@ -1,84 +0,0 @@
|
|||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import itertools
|
|
||||||
import astara.api.nova
|
|
||||||
import astara.drivers
|
|
||||||
import astara.main
|
|
||||||
import astara.common.linux.interface
|
|
||||||
import astara.notifications
|
|
||||||
import astara.coordination
|
|
||||||
import astara.pez.manager
|
|
||||||
import astara.drivers.router
|
|
||||||
import astara.api.rug
|
|
||||||
import astara.debug
|
|
||||||
|
|
||||||
|
|
||||||
def list_opts():
|
|
||||||
return [
|
|
||||||
('DEFAULT',
|
|
||||||
itertools.chain(
|
|
||||||
astara.api.api_opts,
|
|
||||||
astara.api.rug.RUG_API_OPTS,
|
|
||||||
astara.api.nova.OPTIONS,
|
|
||||||
astara.api.neutron.neutron_opts,
|
|
||||||
astara.api.astara_client.AK_CLIENT_OPTS,
|
|
||||||
astara.drivers.DRIVER_OPTS,
|
|
||||||
astara.main.MAIN_OPTS,
|
|
||||||
astara.common.linux.interface.OPTS,
|
|
||||||
astara.common.hash_ring.hash_opts,
|
|
||||||
astara.api.config.router.OPTIONS,
|
|
||||||
astara.notifications.NOTIFICATIONS_OPTS,
|
|
||||||
astara.debug.DEBUG_OPTS,
|
|
||||||
astara.scheduler.SCHEDULER_OPTS,
|
|
||||||
astara.worker.WORKER_OPTS,
|
|
||||||
astara.metadata.METADATA_OPTS,
|
|
||||||
astara.health.HEALTH_INSPECTOR_OPTS,
|
|
||||||
astara.instance_manager.INSTANCE_MANAGER_OPTS
|
|
||||||
))
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def list_agent_opts():
|
|
||||||
return [
|
|
||||||
('AGENT', astara.common.linux.interface.AGENT_OPTIONS)
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def list_coordination_opts():
|
|
||||||
return [
|
|
||||||
('coordination', astara.coordination.COORD_OPTS)
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def list_ceilometer_opts():
|
|
||||||
return [
|
|
||||||
('ceilometer', astara.main.CEILOMETER_OPTS)
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def list_router_opts():
|
|
||||||
return [
|
|
||||||
('router', astara.drivers.router.ROUTER_OPTS)
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def list_loadbalancer_opts():
|
|
||||||
return [
|
|
||||||
('loadbalancer', astara.drivers.loadbalancer.LOADBALANCER_OPTS)
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def list_pez_opts():
|
|
||||||
return [
|
|
||||||
('pez', astara.pez.manager.PEZ_OPTIONS)
|
|
||||||
]
|
|
@ -1,109 +0,0 @@
|
|||||||
# Copyright 2015 Akanda, Inc
|
|
||||||
#
|
|
||||||
# Author: Akanda, Inc
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
import threading
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
|
|
||||||
from astara.common.i18n import _
|
|
||||||
from astara.pez import pool
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
|
|
||||||
PEZ_OPTIONS = [
|
|
||||||
cfg.IntOpt('pool_size', default=1,
|
|
||||||
help=_('How many pre-allocated hot standby nodes to keep '
|
|
||||||
'in the pez pool.')),
|
|
||||||
|
|
||||||
# NOTE(adam_g): We should consider how these get configured for when
|
|
||||||
# we support multiple drivers. {router, lbaas}_image_uuid?
|
|
||||||
cfg.StrOpt('image_uuid',
|
|
||||||
help=_('Image uuid to boot.')),
|
|
||||||
cfg.StrOpt('flavor',
|
|
||||||
help=_('Nova flavor to boot')),
|
|
||||||
cfg.StrOpt('rpc_topic', default='astara-pez'),
|
|
||||||
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF.register_group(cfg.OptGroup(name='pez'))
|
|
||||||
CONF.register_opts(PEZ_OPTIONS, group='pez')
|
|
||||||
|
|
||||||
|
|
||||||
CONF.import_opt('host', 'astara.main')
|
|
||||||
CONF.import_opt('management_network_id', 'astara.api.neutron')
|
|
||||||
|
|
||||||
|
|
||||||
class PezManager(object):
|
|
||||||
"""The RPC server-side of the Pez service"""
|
|
||||||
def __init__(self):
|
|
||||||
self.image_uuid = CONF.pez.image_uuid
|
|
||||||
self.flavor = CONF.pez.flavor
|
|
||||||
self.mgt_net_id = CONF.management_network_id
|
|
||||||
self.pool_size = CONF.pez.pool_size
|
|
||||||
self.pool_mgr = pool.PezPoolManager(
|
|
||||||
self.image_uuid,
|
|
||||||
self.flavor,
|
|
||||||
self.pool_size,
|
|
||||||
self.mgt_net_id)
|
|
||||||
|
|
||||||
def start(self):
|
|
||||||
pooler_thread = threading.Thread(target=self.pool_mgr.start)
|
|
||||||
pooler_thread.start()
|
|
||||||
|
|
||||||
def get_instance(self, context, resource_type, name, management_port,
|
|
||||||
instance_ports):
|
|
||||||
"""Obtains an instance from the pool for client
|
|
||||||
|
|
||||||
This obtains an instance from the pool manager and returns enough data
|
|
||||||
about it to the client that the client can create an InstanceInfo
|
|
||||||
object. We purposely avoid the need to introduce versioned object (for
|
|
||||||
now) by serializing everything into a dict. This may change in the
|
|
||||||
future.
|
|
||||||
|
|
||||||
:param context: oslo_context admin context object
|
|
||||||
:param resource_type: The str driver name of the resource
|
|
||||||
:param name: The requested name of the instance
|
|
||||||
:param managment_port: The management port dict that was created for
|
|
||||||
the instance by the RUG.
|
|
||||||
:param instance_ports: A list of dicts of ports to be attached to
|
|
||||||
instance upon reservation.
|
|
||||||
|
|
||||||
:returns: A dict containing the following:
|
|
||||||
- 'id': The id of the reserved instance
|
|
||||||
- 'name': The name of the reserved instance
|
|
||||||
- 'image_uuid': The image id of the reserved instance
|
|
||||||
- 'management_port': A serialized dict representing the
|
|
||||||
management Neutron port.
|
|
||||||
- 'instance_port': A list of serialized instance port
|
|
||||||
dicts that the caller requested be
|
|
||||||
attached.
|
|
||||||
|
|
||||||
"""
|
|
||||||
instance, mgt_port, instance_ports = self.pool_mgr.get_instance(
|
|
||||||
resource_type=resource_type, name=name,
|
|
||||||
management_port=management_port, instance_ports=instance_ports)
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': instance.id,
|
|
||||||
'resource_type': resource_type,
|
|
||||||
'name': instance.name,
|
|
||||||
'image_uuid': instance.image['id'],
|
|
||||||
'management_port': mgt_port.to_dict(),
|
|
||||||
'instance_ports': [
|
|
||||||
p.to_dict() for p in instance_ports
|
|
||||||
],
|
|
||||||
}
|
|
@ -1,327 +0,0 @@
|
|||||||
# Copyright 2015 Akanda, Inc
|
|
||||||
#
|
|
||||||
# Author: Akanda, Inc
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import copy
|
|
||||||
import time
|
|
||||||
|
|
||||||
from six.moves import range
|
|
||||||
from oslo_concurrency import lockutils
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log as logging
|
|
||||||
from oslo_utils import timeutils
|
|
||||||
|
|
||||||
from astara.common.i18n import _LE, _LI
|
|
||||||
from astara import drivers
|
|
||||||
from astara.api import neutron
|
|
||||||
from astara.api import nova
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
# Unused instances are launched with a known name
|
|
||||||
INSTANCE_FREE = 'ASTARA:UNUSED:%(resource_name)s'
|
|
||||||
|
|
||||||
# When an instance is reserved, its renamed accordingly
|
|
||||||
# TODO(adam_g): We ideally want to make all instance naming template-able
|
|
||||||
IN_USE_TEMPLATE = 'ak-%(resource_name)s-%(resource_id)s'
|
|
||||||
|
|
||||||
|
|
||||||
# Nova states
|
|
||||||
ACTIVE = 'active'
|
|
||||||
ERROR = 'error'
|
|
||||||
DELETING = 'deleting'
|
|
||||||
|
|
||||||
PEZ_LOCK = 'astara-pez'
|
|
||||||
|
|
||||||
|
|
||||||
class PezPoolExhausted(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class WorkerContext(object):
|
|
||||||
"""Holds resources owned by the worker and used by the Automaton.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.nova_client = nova.Nova(cfg.CONF)
|
|
||||||
self.neutron_client = neutron.Neutron(cfg.CONF)
|
|
||||||
|
|
||||||
|
|
||||||
class PezPoolManager(object):
|
|
||||||
"""Astara Pez Pool Manager
|
|
||||||
|
|
||||||
This manages a pool of instances of a configurable size. All instance
|
|
||||||
state is managed and tracked in Nova itself.
|
|
||||||
|
|
||||||
Each iteratino of the manager's main loop will scan the service tenant's
|
|
||||||
booted instances. Instances named INSTANCE_FREE (defined above) will be
|
|
||||||
considered unused. If any of these instances are in ERROR state or are
|
|
||||||
out dated in some way (ie, its image is not the currently configured
|
|
||||||
image), they will be deleted from the pool and the manager will replenish
|
|
||||||
the deficit on its next tick.
|
|
||||||
|
|
||||||
Instances may be reserved for use via the get_instance() method. This
|
|
||||||
simply renames the instance according to the ID of the thing that it will
|
|
||||||
host and returns it to the caller. At this point, Pez no longer cares about
|
|
||||||
the instance and will refill its position in the pool on next its next
|
|
||||||
tick.
|
|
||||||
|
|
||||||
The calling service is responsible for managing the lifecycle of the
|
|
||||||
returned instance. This includes attaching required ports, ensuring
|
|
||||||
deletion/cleanup, etc. The instance will not be returned to the pool when
|
|
||||||
it is no longer in use.
|
|
||||||
"""
|
|
||||||
def __init__(self, image_uuid, flavor, pool_size, mgt_net_id):
|
|
||||||
"""
|
|
||||||
:param image_uuid: UUID of backing image for managed instances.
|
|
||||||
:param flavor: nova flavor id to be used for managed instances.
|
|
||||||
:param mgt_net_id: UUID of management network. Each instance in the
|
|
||||||
pool is initially booted with a single port on this
|
|
||||||
network
|
|
||||||
:param pool_size: The size of the pool
|
|
||||||
"""
|
|
||||||
self.image_uuid = image_uuid
|
|
||||||
self.flavor = flavor
|
|
||||||
self.mgt_net_id = mgt_net_id
|
|
||||||
self.pool_size = int(pool_size)
|
|
||||||
self.poll_interval = 3
|
|
||||||
self.ctxt = WorkerContext()
|
|
||||||
self.boot_timeout = 120
|
|
||||||
self.delete_timeout = 30
|
|
||||||
|
|
||||||
# used to track boot/delete timeouts
|
|
||||||
self._delete_counters = {}
|
|
||||||
self._boot_counters = {}
|
|
||||||
self.load_driver_config()
|
|
||||||
|
|
||||||
def load_driver_config(self):
|
|
||||||
self.images = {}
|
|
||||||
self.flavors = {}
|
|
||||||
self.drivers = [d for d in drivers.enabled_drivers()]
|
|
||||||
for driver in self.drivers:
|
|
||||||
self.images[driver.RESOURCE_NAME] = getattr(
|
|
||||||
cfg.CONF, driver.RESOURCE_NAME).image_uuid
|
|
||||||
self.flavors[driver.RESOURCE_NAME] = getattr(
|
|
||||||
cfg.CONF, driver.RESOURCE_NAME).instance_flavor
|
|
||||||
|
|
||||||
@lockutils.synchronized(PEZ_LOCK)
|
|
||||||
def delete_instance(self, instance_uuid):
|
|
||||||
LOG.info(_LI('Deleting instance %s.'), instance_uuid)
|
|
||||||
self.ctxt.nova_client.client.servers.delete(instance_uuid)
|
|
||||||
self._delete_counters[instance_uuid] = timeutils.utcnow()
|
|
||||||
|
|
||||||
def _check_err_instances(self, pools):
|
|
||||||
"""Scans the pool and deletes any instances in error state"""
|
|
||||||
for resource, pool in copy.copy(pools).items():
|
|
||||||
err_instances = [i for i in pool if i.status == ERROR]
|
|
||||||
for err_inst in err_instances:
|
|
||||||
LOG.error(_LE(
|
|
||||||
'Instance %s is in %s state, deleting.'),
|
|
||||||
i.id, ERROR)
|
|
||||||
del_instance = self.delete_instance(err_inst.id)
|
|
||||||
i = pool.index(err_inst)
|
|
||||||
pools[resource][i] = del_instance
|
|
||||||
|
|
||||||
def _check_del_instances(self, pools):
|
|
||||||
"""Scans the pool for deleted instances and checks deletion timers"""
|
|
||||||
# XXX: What do we do with instances stuck in deleting?
|
|
||||||
# For now, just return stuck instances to caller and we can figure
|
|
||||||
# out what to do with them later.
|
|
||||||
stuck_instances = []
|
|
||||||
del_instances = []
|
|
||||||
for resource, pool in pools.items():
|
|
||||||
del_instances += [i for i in pool if i.status == DELETING]
|
|
||||||
|
|
||||||
# clean out counters for old instances that have been deleted entirely
|
|
||||||
if self._delete_counters:
|
|
||||||
del_instance_ids = [i.id for i in del_instances]
|
|
||||||
for inst_id in copy.copy(self._delete_counters):
|
|
||||||
if inst_id not in del_instance_ids:
|
|
||||||
self._delete_counters.pop(inst_id)
|
|
||||||
|
|
||||||
for del_inst in del_instances:
|
|
||||||
if del_inst.id not in self._delete_counters:
|
|
||||||
self._delete_counters[del_inst.id] = timeutils.utcnow()
|
|
||||||
else:
|
|
||||||
if timeutils.is_older_than(self._delete_counters[del_inst.id],
|
|
||||||
self.delete_timeout):
|
|
||||||
LOG.error(_LE(
|
|
||||||
'Instance %s is stuck in %s for more than %s '
|
|
||||||
'seconds.'), i.id, DELETING, self.delete_timeout)
|
|
||||||
stuck_instances.append(del_inst)
|
|
||||||
return stuck_instances
|
|
||||||
|
|
||||||
def _check_outdated_instances(self, pools):
|
|
||||||
outdated_instances = []
|
|
||||||
for resource, pool in pools.items():
|
|
||||||
for server in pool:
|
|
||||||
if server.image['id'] != str(self.images[resource]):
|
|
||||||
LOG.info(_LI(
|
|
||||||
'Deleting instance %s with outdated image, '
|
|
||||||
'%s != %s'),
|
|
||||||
server.id, server.image['id'], self.image_uuid)
|
|
||||||
outdated_instances.append(server)
|
|
||||||
elif server.flavor['id'] != str(self.flavors[resource]):
|
|
||||||
LOG.info(_LI(
|
|
||||||
'Deleting instance %s with outdated flavor, '
|
|
||||||
'%s != %s'),
|
|
||||||
server.id, server.flavor['id'], self.flavor)
|
|
||||||
outdated_instances.append(server)
|
|
||||||
|
|
||||||
if outdated_instances:
|
|
||||||
[self.delete_instance(i.id) for i in outdated_instances]
|
|
||||||
|
|
||||||
def load_pools(self):
|
|
||||||
pools = {}
|
|
||||||
[pools.update({d.RESOURCE_NAME: []}) for d in self.drivers]
|
|
||||||
for server in self.ctxt.nova_client.client.servers.list():
|
|
||||||
for d in self.drivers:
|
|
||||||
instance_free = INSTANCE_FREE % {
|
|
||||||
'resource_name': d.RESOURCE_NAME
|
|
||||||
}
|
|
||||||
if server.name.startswith(instance_free):
|
|
||||||
pools[d.RESOURCE_NAME].append(server)
|
|
||||||
return pools
|
|
||||||
|
|
||||||
@property
|
|
||||||
def unused_instances(self):
|
|
||||||
"""Determines the size and contents of the current instance pool
|
|
||||||
|
|
||||||
We list all nova servers according to the naming template.
|
|
||||||
|
|
||||||
Any instances in an error state are deleted and will be replenished on
|
|
||||||
the next run of the main loop.
|
|
||||||
|
|
||||||
We time instance deletion and any servers that appear to be stuck in a
|
|
||||||
deleted state will be reported as such. TODO(adam_g): We should figure
|
|
||||||
out what to do with stuck instances?
|
|
||||||
|
|
||||||
Any instances that appear to be outdated (ie, the server's image or
|
|
||||||
flavor does not match whats configured) will be deleted and replenished
|
|
||||||
on the next tick of hte main loop.
|
|
||||||
|
|
||||||
:returns: a dict keyed by driver name, each value a list of nova server
|
|
||||||
objects that represents the current resources pool.
|
|
||||||
"""
|
|
||||||
pools = self.load_pools()
|
|
||||||
self._check_err_instances(pools)
|
|
||||||
self._check_del_instances(pools)
|
|
||||||
self._check_outdated_instances(pools)
|
|
||||||
return pools
|
|
||||||
|
|
||||||
def launch_instances(self, count, driver):
|
|
||||||
LOG.info(_LI(
|
|
||||||
'Launching %s %s instances.'), driver.RESOURCE_NAME, count)
|
|
||||||
for i in range(0, count):
|
|
||||||
# NOTE: Use a fake UUID so astara-neutron's name matching still
|
|
||||||
# catches this port as an astara port. This can be avoided if
|
|
||||||
# we use a mgt security group in the future.
|
|
||||||
mgt_port = self.ctxt.neutron_client.create_management_port(
|
|
||||||
'00000000-0000-0000-0000-000000000000')
|
|
||||||
nics = [{
|
|
||||||
'net-id': mgt_port.network_id,
|
|
||||||
'v4-fixed-ip': '',
|
|
||||||
'port-id': mgt_port.id}]
|
|
||||||
|
|
||||||
instance_name = INSTANCE_FREE % {
|
|
||||||
'resource_name': driver.RESOURCE_NAME
|
|
||||||
}
|
|
||||||
image = self.images[driver.RESOURCE_NAME]
|
|
||||||
flavor = self.flavors[driver.RESOURCE_NAME]
|
|
||||||
|
|
||||||
self.ctxt.nova_client.client.servers.create(
|
|
||||||
name=instance_name,
|
|
||||||
image=image,
|
|
||||||
flavor=flavor,
|
|
||||||
nics=nics,
|
|
||||||
config_drive=True,
|
|
||||||
userdata=nova.format_userdata(mgt_port),
|
|
||||||
)
|
|
||||||
|
|
||||||
@lockutils.synchronized(PEZ_LOCK)
|
|
||||||
def get_instance(self, resource_type, name, management_port=None,
|
|
||||||
instance_ports=None):
|
|
||||||
"""Get an instance from the pool.
|
|
||||||
|
|
||||||
This involves popping it out of the pool, updating its name and
|
|
||||||
attaching
|
|
||||||
any ports.
|
|
||||||
|
|
||||||
:param resource_type: The str driver name of the resource
|
|
||||||
:param name: The requested name of the instance
|
|
||||||
:param managment_port: The management port dict that was created for
|
|
||||||
the instance by the RUG.
|
|
||||||
:param instance_ports: A list of dicts of ports to be attached to
|
|
||||||
instance upon reservation.
|
|
||||||
|
|
||||||
:returns: A tuple containing (novaclient server object for the
|
|
||||||
reserved server, a port object for the management port,
|
|
||||||
a list of port objects that were attached the server)
|
|
||||||
"""
|
|
||||||
instance_ports = instance_ports or []
|
|
||||||
|
|
||||||
try:
|
|
||||||
server = self.unused_instances[resource_type][0]
|
|
||||||
except IndexError:
|
|
||||||
raise PezPoolExhausted()
|
|
||||||
|
|
||||||
LOG.info(_LI('Renaming instance %s to %s'), server.name, name)
|
|
||||||
server = self.ctxt.nova_client.client.servers.update(
|
|
||||||
server, name=name)
|
|
||||||
|
|
||||||
for port in instance_ports:
|
|
||||||
LOG.info(_LI('Attaching instance port %s to %s (%s)'),
|
|
||||||
port['id'], server.name, server.id)
|
|
||||||
self.ctxt.nova_client.client.servers.interface_attach(
|
|
||||||
server=server, port_id=port['id'], net_id=None, fixed_ip=None)
|
|
||||||
|
|
||||||
mgt_port, instance_ports = (
|
|
||||||
self.ctxt.neutron_client.get_ports_for_instance(server.id)
|
|
||||||
)
|
|
||||||
|
|
||||||
return (
|
|
||||||
self.ctxt.nova_client.client.servers.get(server.id),
|
|
||||||
mgt_port,
|
|
||||||
instance_ports,
|
|
||||||
)
|
|
||||||
|
|
||||||
def start(self):
|
|
||||||
"""The pool manager main loop.
|
|
||||||
|
|
||||||
The bulk of the algorithm exists in the 'unused_instances' property.
|
|
||||||
This main loop simply checks for a deficit in the pool and dispatches
|
|
||||||
a 'launch_instances' call when a deficit needs to be filled.
|
|
||||||
"""
|
|
||||||
while True:
|
|
||||||
cur_pools = self.unused_instances
|
|
||||||
report = []
|
|
||||||
for driver in self.drivers:
|
|
||||||
report.append(
|
|
||||||
'%s:%s/%s' %
|
|
||||||
(driver.RESOURCE_NAME,
|
|
||||||
len(cur_pools[driver.RESOURCE_NAME]),
|
|
||||||
self.pool_size))
|
|
||||||
LOG.debug('Current pools: %s' % ' '.join(report))
|
|
||||||
|
|
||||||
for driver in self.drivers:
|
|
||||||
cur_pool = cur_pools[driver.RESOURCE_NAME]
|
|
||||||
deficit = self.pool_size - len(cur_pool)
|
|
||||||
if deficit:
|
|
||||||
LOG.info(_LI(
|
|
||||||
'Need to launch %s more %s instance(s).'),
|
|
||||||
deficit, driver.RESOURCE_NAME)
|
|
||||||
self.launch_instances(
|
|
||||||
driver=driver, count=deficit)
|
|
||||||
time.sleep(self.poll_interval)
|
|
@ -1,53 +0,0 @@
|
|||||||
# Copyright 2015 Akanda, Inc
|
|
||||||
#
|
|
||||||
# Author: Akanda, Inc
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from astara.common import rpc
|
|
||||||
|
|
||||||
from oslo_context import context
|
|
||||||
|
|
||||||
|
|
||||||
class AstaraPezAPI(object):
|
|
||||||
""""Client side of the Astara Pez RPC API.
|
|
||||||
"""
|
|
||||||
def __init__(self, rpc_topic):
|
|
||||||
self.topic = rpc_topic
|
|
||||||
self.client = rpc.get_rpc_client(
|
|
||||||
topic=self.topic)
|
|
||||||
self.context = context.get_admin_context().to_dict()
|
|
||||||
|
|
||||||
def get_instance(self, resource_type, name, management_port,
|
|
||||||
instance_ports):
|
|
||||||
"""Reserves an instance from the Pez service. We can instruct Pez to
|
|
||||||
attach any required instance ports during the reservation process.
|
|
||||||
The dict returned here should be enough for the caller to construct
|
|
||||||
a InstanceInfo object. Note that the port information are serialized
|
|
||||||
astara.api.neutron.Port objects that can be deserialized by the
|
|
||||||
caller during creation of InstanceInfo.
|
|
||||||
|
|
||||||
:param resource_type: The str name of the driver that manages the
|
|
||||||
resource (ie, loadbalancer)
|
|
||||||
:param name: The requested name of the instance
|
|
||||||
:param managment_port: The management port dict that was created for
|
|
||||||
the instance by the RUG.
|
|
||||||
:param instance_ports: A list of dicts of ports to be attached to
|
|
||||||
instance upon reservation.
|
|
||||||
|
|
||||||
"""
|
|
||||||
cctxt = self.client.prepare(topic=self.topic)
|
|
||||||
return cctxt.call(
|
|
||||||
self.context, 'get_instance', resource_type=resource_type,
|
|
||||||
name=name, management_port=management_port,
|
|
||||||
instance_ports=instance_ports)
|
|
@ -1,65 +0,0 @@
|
|||||||
# Copyright 2015 Akanda, Inc
|
|
||||||
#
|
|
||||||
# Author: Akanda, Inc
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import sys
|
|
||||||
|
|
||||||
from astara.common.i18n import _LI
|
|
||||||
from astara.common import config as ak_cfg
|
|
||||||
|
|
||||||
from astara.common import rpc
|
|
||||||
from astara.pez import manager
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log
|
|
||||||
from oslo_service import service
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class PezService(service.Service):
|
|
||||||
"""Bootstraps a connection for the manager to the messaging
|
|
||||||
queue and launches the pez service
|
|
||||||
"""
|
|
||||||
def __init__(self):
|
|
||||||
super(PezService, self).__init__()
|
|
||||||
self.manager = manager.PezManager()
|
|
||||||
self.manager.start()
|
|
||||||
self._rpc_connection = None
|
|
||||||
self.rpcserver = None
|
|
||||||
|
|
||||||
def start(self):
|
|
||||||
super(PezService, self).start()
|
|
||||||
self._rpc_connection = rpc.Connection()
|
|
||||||
self._rpc_connection.create_rpc_consumer(
|
|
||||||
topic=cfg.CONF.pez.rpc_topic,
|
|
||||||
endpoints=[self.manager])
|
|
||||||
self._rpc_connection.consume_in_threads()
|
|
||||||
self._rpc_connection.close()
|
|
||||||
|
|
||||||
|
|
||||||
def main(argv=sys.argv[1:]):
|
|
||||||
ak_cfg.parse_config(argv)
|
|
||||||
log.setup(CONF, 'astara-pez')
|
|
||||||
CONF.log_opt_values(LOG, logging.INFO)
|
|
||||||
|
|
||||||
LOG.info(_LI("Starting Astara Pez service."))
|
|
||||||
|
|
||||||
mgr = PezService()
|
|
||||||
launcher = service.launch(CONF, mgr)
|
|
||||||
launcher.wait()
|
|
@ -1,79 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
"""Populate the workers with the existing routers
|
|
||||||
"""
|
|
||||||
|
|
||||||
import threading
|
|
||||||
|
|
||||||
from oslo_log import log as logging
|
|
||||||
|
|
||||||
from astara import event
|
|
||||||
from astara import drivers
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def repopulate():
|
|
||||||
"""Called from workers after a rebalance to find newly owned resources"""
|
|
||||||
resources = []
|
|
||||||
for driver in drivers.enabled_drivers():
|
|
||||||
resources += driver.pre_populate_hook()
|
|
||||||
return resources
|
|
||||||
|
|
||||||
|
|
||||||
def _pre_populate_workers(scheduler):
|
|
||||||
"""Loops through enabled drivers triggering each drivers pre_populate_hook
|
|
||||||
which is a static method for each driver.
|
|
||||||
|
|
||||||
"""
|
|
||||||
for driver in drivers.enabled_drivers():
|
|
||||||
resources = driver.pre_populate_hook()
|
|
||||||
|
|
||||||
if not resources:
|
|
||||||
# just skip to the next one the drivers pre_populate_hook already
|
|
||||||
# handled the exception or error and outputs to logs
|
|
||||||
LOG.debug('No %s resources found to pre-populate',
|
|
||||||
driver.RESOURCE_NAME)
|
|
||||||
continue
|
|
||||||
|
|
||||||
LOG.debug('Start pre-populating %d workers for the %s driver',
|
|
||||||
len(resources),
|
|
||||||
driver.RESOURCE_NAME)
|
|
||||||
|
|
||||||
for resource in resources:
|
|
||||||
message = event.Event(
|
|
||||||
resource=resource,
|
|
||||||
crud=event.POLL,
|
|
||||||
body={}
|
|
||||||
)
|
|
||||||
scheduler.handle_message(resource.tenant_id, message)
|
|
||||||
|
|
||||||
|
|
||||||
def pre_populate_workers(scheduler):
|
|
||||||
"""Start the pre-populating task
|
|
||||||
"""
|
|
||||||
|
|
||||||
t = threading.Thread(
|
|
||||||
target=_pre_populate_workers,
|
|
||||||
args=(scheduler,),
|
|
||||||
name='PrePopulateWorkers'
|
|
||||||
)
|
|
||||||
|
|
||||||
t.setDaemon(True)
|
|
||||||
t.start()
|
|
||||||
return t
|
|
@ -1,166 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
"""Scheduler to send messages for a given router to the correct worker.
|
|
||||||
"""
|
|
||||||
import six
|
|
||||||
import multiprocessing
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
from six.moves import range
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log as logging
|
|
||||||
|
|
||||||
from astara import commands
|
|
||||||
from astara.common.i18n import _, _LE, _LI, _LW
|
|
||||||
from astara import daemon
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
CONF = cfg.CONF
|
|
||||||
SCHEDULER_OPTS = [
|
|
||||||
cfg.IntOpt('num_worker_processes',
|
|
||||||
default=16,
|
|
||||||
help='the number of worker processes to run'),
|
|
||||||
]
|
|
||||||
CONF.register_opts(SCHEDULER_OPTS)
|
|
||||||
|
|
||||||
|
|
||||||
def _worker(inq, worker_factory, scheduler, proc_name):
|
|
||||||
"""Scheduler's worker process main function.
|
|
||||||
"""
|
|
||||||
daemon.ignore_signals()
|
|
||||||
LOG.debug('starting worker process')
|
|
||||||
worker = worker_factory(scheduler=scheduler, proc_name=proc_name)
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
data = inq.get()
|
|
||||||
except IOError:
|
|
||||||
# NOTE(dhellmann): Likely caused by a signal arriving
|
|
||||||
# during processing, especially SIGCHLD.
|
|
||||||
data = None
|
|
||||||
if data is None:
|
|
||||||
target, message = None, None
|
|
||||||
else:
|
|
||||||
target, message = data
|
|
||||||
try:
|
|
||||||
worker.handle_message(target, message)
|
|
||||||
except Exception:
|
|
||||||
LOG.exception(_LE('Error processing data %s'), six.text_type(data))
|
|
||||||
if data is None:
|
|
||||||
break
|
|
||||||
LOG.debug('exiting')
|
|
||||||
|
|
||||||
|
|
||||||
class Dispatcher(object):
|
|
||||||
"""Choose one of the workers to receive a message.
|
|
||||||
|
|
||||||
The current implementation uses the least significant bits of the
|
|
||||||
UUID as an integer to shard across the worker pool.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, workers):
|
|
||||||
self.workers = workers
|
|
||||||
|
|
||||||
def pick_workers(self, target):
|
|
||||||
"""Returns the workers that match the target.
|
|
||||||
"""
|
|
||||||
target = target.strip() if target else None
|
|
||||||
# If we get any wildcard target, send the message to all of
|
|
||||||
# the workers.
|
|
||||||
if target in commands.WILDCARDS:
|
|
||||||
return self.workers[:]
|
|
||||||
try:
|
|
||||||
idx = uuid.UUID(target).int % len(self.workers)
|
|
||||||
except (TypeError, ValueError) as e:
|
|
||||||
LOG.warning(_LW(
|
|
||||||
'Could not determine UUID from %r: %s, ignoring message'),
|
|
||||||
target, e,
|
|
||||||
)
|
|
||||||
return []
|
|
||||||
else:
|
|
||||||
LOG.debug('target %s maps to worker %s', target, idx)
|
|
||||||
return [self.workers[idx]]
|
|
||||||
|
|
||||||
|
|
||||||
class Scheduler(object):
|
|
||||||
"""Manages a worker pool and redistributes messages.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, worker_factory):
|
|
||||||
"""
|
|
||||||
:param num_workers: The number of worker processes to create.
|
|
||||||
:type num_workers: int
|
|
||||||
:param worker_func: Callable for the worker processes to use
|
|
||||||
when a notification is received.
|
|
||||||
:type worker_factory: Callable to create Worker instances.
|
|
||||||
"""
|
|
||||||
self.num_workers = cfg.CONF.num_worker_processes
|
|
||||||
if self.num_workers < 1:
|
|
||||||
raise ValueError(_('Need at least one worker process'))
|
|
||||||
self.workers = []
|
|
||||||
# Create several worker processes, each with its own queue for
|
|
||||||
# sending it instructions based on the notifications we get
|
|
||||||
# when someone calls our handle_message() method.
|
|
||||||
for i in range(self.num_workers):
|
|
||||||
wq = multiprocessing.JoinableQueue()
|
|
||||||
name = 'p%02d' % i
|
|
||||||
worker = multiprocessing.Process(
|
|
||||||
target=_worker,
|
|
||||||
kwargs={
|
|
||||||
'inq': wq,
|
|
||||||
'worker_factory': worker_factory,
|
|
||||||
'scheduler': self,
|
|
||||||
'proc_name': name,
|
|
||||||
},
|
|
||||||
name=name,
|
|
||||||
)
|
|
||||||
self.workers.append({
|
|
||||||
'queue': wq,
|
|
||||||
'worker': worker,
|
|
||||||
})
|
|
||||||
self.dispatcher = Dispatcher(self.workers)
|
|
||||||
for w in self.workers:
|
|
||||||
w['worker'].start()
|
|
||||||
|
|
||||||
def stop(self):
|
|
||||||
"""Shutdown all workers cleanly.
|
|
||||||
"""
|
|
||||||
LOG.info('shutting down scheduler')
|
|
||||||
# Send a poison pill to all of the workers
|
|
||||||
for w in self.workers:
|
|
||||||
LOG.debug('sending stop message to %s', w['worker'].name)
|
|
||||||
w['queue'].put(None)
|
|
||||||
# Wait for the workers to finish and be ready to exit.
|
|
||||||
for w in self.workers:
|
|
||||||
LOG.debug('waiting for queue for %s', w['worker'].name)
|
|
||||||
w['queue'].close()
|
|
||||||
LOG.debug('waiting for worker %s', w['worker'].name)
|
|
||||||
w['worker'].join()
|
|
||||||
LOG.info(_LI('scheduler shutdown'))
|
|
||||||
|
|
||||||
def handle_message(self, target, message):
|
|
||||||
"""Call this method when a new notification message is delivered. The
|
|
||||||
scheduler will distribute it to the appropriate worker.
|
|
||||||
|
|
||||||
:param target: UUID of the resource that needs to get the message.
|
|
||||||
:type target: uuid
|
|
||||||
:param message: Dictionary full of data to send to the target.
|
|
||||||
:type message: dict
|
|
||||||
"""
|
|
||||||
for w in self.dispatcher.pick_workers(target):
|
|
||||||
w['queue'].put((target, message))
|
|
565
astara/state.py
565
astara/state.py
@ -1,565 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
"""State machine for managing a router.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
# See state machine diagram and description:
|
|
||||||
# http://akanda.readthedocs.org/en/latest/rug.html#state-machine-workers-and-router-lifecycle
|
|
||||||
|
|
||||||
import collections
|
|
||||||
import itertools
|
|
||||||
|
|
||||||
from astara.common.i18n import _LE, _LI, _LW
|
|
||||||
from astara.event import (POLL, CREATE, READ, UPDATE, DELETE, REBUILD,
|
|
||||||
CLUSTER_REBUILD)
|
|
||||||
from astara import instance_manager
|
|
||||||
from astara.drivers import states
|
|
||||||
|
|
||||||
|
|
||||||
class StateParams(object):
|
|
||||||
def __init__(self, driver, instance, queue, bandwidth_callback,
|
|
||||||
reboot_error_threshold):
|
|
||||||
self.resource = driver
|
|
||||||
self.instance = instance
|
|
||||||
self.log = driver.log
|
|
||||||
self.queue = queue
|
|
||||||
self.bandwidth_callback = bandwidth_callback
|
|
||||||
self.reboot_error_threshold = reboot_error_threshold
|
|
||||||
self.image_uuid = driver.image_uuid
|
|
||||||
|
|
||||||
|
|
||||||
class State(object):
|
|
||||||
|
|
||||||
def __init__(self, params):
|
|
||||||
self.params = params
|
|
||||||
|
|
||||||
@property
|
|
||||||
def log(self):
|
|
||||||
return self.params.log
|
|
||||||
|
|
||||||
@property
|
|
||||||
def queue(self):
|
|
||||||
return self.params.queue
|
|
||||||
|
|
||||||
@property
|
|
||||||
def instance(self):
|
|
||||||
return self.params.instance
|
|
||||||
|
|
||||||
@property
|
|
||||||
def image_uuid(self):
|
|
||||||
return self.params.image_uuid
|
|
||||||
|
|
||||||
@property
|
|
||||||
def name(self):
|
|
||||||
return self.__class__.__name__
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return self.name
|
|
||||||
|
|
||||||
def execute(self, action, worker_context):
|
|
||||||
return action
|
|
||||||
|
|
||||||
def transition(self, action, worker_context):
|
|
||||||
return self
|
|
||||||
|
|
||||||
|
|
||||||
class CalcAction(State):
|
|
||||||
def execute(self, action, worker_context):
|
|
||||||
queue = self.queue
|
|
||||||
if DELETE in queue:
|
|
||||||
self.params.resource.log.debug('shortcutting to delete')
|
|
||||||
return DELETE
|
|
||||||
|
|
||||||
if (self.params.instance.state == states.DEGRADED and
|
|
||||||
CLUSTER_REBUILD not in queue):
|
|
||||||
self.params.resource.log.debug(
|
|
||||||
'Scheduling a rebuild on degraded cluster')
|
|
||||||
queue.append(CLUSTER_REBUILD)
|
|
||||||
|
|
||||||
while queue:
|
|
||||||
self.params.resource.log.debug(
|
|
||||||
'action = %s, len(queue) = %s, queue = %s',
|
|
||||||
action,
|
|
||||||
len(queue),
|
|
||||||
list(itertools.islice(queue, 0, 60))
|
|
||||||
)
|
|
||||||
|
|
||||||
if action == UPDATE and queue[0] == CREATE:
|
|
||||||
# upgrade to CREATE from UPDATE by taking the next
|
|
||||||
# item from the queue
|
|
||||||
self.params.resource.log.debug(
|
|
||||||
'upgrading from update to create')
|
|
||||||
action = queue.popleft()
|
|
||||||
continue
|
|
||||||
|
|
||||||
elif (action in (CREATE, UPDATE, CLUSTER_REBUILD) and
|
|
||||||
queue[0] == REBUILD):
|
|
||||||
# upgrade to REBUILD from CREATE/UPDATE by taking the next
|
|
||||||
# item from the queue
|
|
||||||
self.params.resource.log.debug('upgrading from %s to rebuild',
|
|
||||||
action)
|
|
||||||
action = queue.popleft()
|
|
||||||
continue
|
|
||||||
|
|
||||||
elif action == CREATE and queue[0] == UPDATE:
|
|
||||||
# CREATE implies an UPDATE so eat the update event
|
|
||||||
# without changing the action
|
|
||||||
self.params.resource.log.debug('merging create and update')
|
|
||||||
queue.popleft()
|
|
||||||
continue
|
|
||||||
|
|
||||||
elif action and queue[0] == POLL:
|
|
||||||
# Throw away a poll following any other valid action,
|
|
||||||
# because a create or update will automatically handle
|
|
||||||
# the poll and repeated polls are not needed.
|
|
||||||
self.params.resource.log.debug(
|
|
||||||
'discarding poll event following action %s',
|
|
||||||
action)
|
|
||||||
queue.popleft()
|
|
||||||
continue
|
|
||||||
|
|
||||||
elif action and action != POLL and action != queue[0]:
|
|
||||||
# We are not polling and the next action is something
|
|
||||||
# different from what we are doing, so just do the
|
|
||||||
# current action.
|
|
||||||
self.params.resource.log.debug('done collapsing events')
|
|
||||||
break
|
|
||||||
|
|
||||||
self.params.resource.log.debug('popping action from queue')
|
|
||||||
action = queue.popleft()
|
|
||||||
|
|
||||||
return action
|
|
||||||
|
|
||||||
def transition(self, action, worker_context):
|
|
||||||
if self.instance.state == states.GONE:
|
|
||||||
next_action = StopInstance(self.params)
|
|
||||||
elif action == DELETE:
|
|
||||||
next_action = StopInstance(self.params)
|
|
||||||
elif action == REBUILD:
|
|
||||||
next_action = RebuildInstance(self.params)
|
|
||||||
elif (action == CLUSTER_REBUILD and
|
|
||||||
self.instance.state in (states.DEGRADED, states.DOWN)):
|
|
||||||
next_action = CreateInstance(self.params)
|
|
||||||
elif self.instance.state == states.BOOTING:
|
|
||||||
next_action = CheckBoot(self.params)
|
|
||||||
elif self.instance.state in (states.DOWN, states.DEGRADED):
|
|
||||||
next_action = CreateInstance(self.params)
|
|
||||||
else:
|
|
||||||
next_action = Alive(self.params)
|
|
||||||
|
|
||||||
if self.instance.state == states.ERROR:
|
|
||||||
if action == POLL:
|
|
||||||
# If the selected action is to poll, and we are in an
|
|
||||||
# error state, then an event slipped through the
|
|
||||||
# filter in send_message() and we should ignore it
|
|
||||||
# here.
|
|
||||||
next_action = self
|
|
||||||
elif self.instance.error_cooldown:
|
|
||||||
self.params.resource.log.debug(
|
|
||||||
'Resource is in ERROR cooldown, '
|
|
||||||
'ignoring event.'
|
|
||||||
)
|
|
||||||
next_action = self
|
|
||||||
else:
|
|
||||||
# If this isn't a POLL, and the configured `error_cooldown`
|
|
||||||
# has passed, clear the error status before doing what we
|
|
||||||
# really want to do.
|
|
||||||
next_action = ClearError(self.params, next_action)
|
|
||||||
return next_action
|
|
||||||
|
|
||||||
|
|
||||||
class PushUpdate(State):
|
|
||||||
"""Put an update instruction on the queue for the state machine.
|
|
||||||
"""
|
|
||||||
def execute(self, action, worker_context):
|
|
||||||
# Put the action back on the front of the queue.
|
|
||||||
self.queue.appendleft(UPDATE)
|
|
||||||
return action
|
|
||||||
|
|
||||||
def transition(self, action, worker_context):
|
|
||||||
return CalcAction(self.params)
|
|
||||||
|
|
||||||
|
|
||||||
class ClearError(State):
|
|
||||||
"""Remove the error state from the instance.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, params, next_state=None):
|
|
||||||
super(ClearError, self).__init__(params)
|
|
||||||
self._next_state = next_state
|
|
||||||
|
|
||||||
def execute(self, action, worker_context):
|
|
||||||
# If we are being told explicitly to update the instance, we should
|
|
||||||
# ignore any error status.
|
|
||||||
self.instance.clear_error(worker_context)
|
|
||||||
return action
|
|
||||||
|
|
||||||
def transition(self, action, worker_context):
|
|
||||||
if self._next_state:
|
|
||||||
return self._next_state
|
|
||||||
return CalcAction(self.params)
|
|
||||||
|
|
||||||
|
|
||||||
class Alive(State):
|
|
||||||
def execute(self, action, worker_context):
|
|
||||||
self.instance.update_state(worker_context)
|
|
||||||
return action
|
|
||||||
|
|
||||||
def transition(self, action, worker_context):
|
|
||||||
if self.instance.state == states.GONE:
|
|
||||||
return StopInstance(self.params)
|
|
||||||
elif self.instance.state in (states.DOWN, states.DEGRADED):
|
|
||||||
return CreateInstance(self.params)
|
|
||||||
elif action == POLL and \
|
|
||||||
self.instance.state == states.CONFIGURED:
|
|
||||||
return CalcAction(self.params)
|
|
||||||
elif action == READ and \
|
|
||||||
self.instance.state == states.CONFIGURED:
|
|
||||||
return ReadStats(self.params)
|
|
||||||
else:
|
|
||||||
return ConfigureInstance(self.params)
|
|
||||||
|
|
||||||
|
|
||||||
class CreateInstance(State):
|
|
||||||
def execute(self, action, worker_context):
|
|
||||||
# Check for a loop where the resource keeps failing to boot or
|
|
||||||
# accept the configuration.
|
|
||||||
if (not self.instance.state == states.DEGRADED and
|
|
||||||
self.instance.attempts >= self.params.reboot_error_threshold):
|
|
||||||
self.params.resource.log.info(_LI(
|
|
||||||
'Dropping out of boot loop after %s trials'),
|
|
||||||
self.instance.attempts)
|
|
||||||
self.instance.set_error(worker_context)
|
|
||||||
return action
|
|
||||||
self.instance.boot(worker_context)
|
|
||||||
self.params.resource.log.debug('CreateInstance attempt %s/%s',
|
|
||||||
self.instance.attempts,
|
|
||||||
self.params.reboot_error_threshold)
|
|
||||||
return action
|
|
||||||
|
|
||||||
def transition(self, action, worker_context):
|
|
||||||
if self.instance.state == states.GONE:
|
|
||||||
return StopInstance(self.params)
|
|
||||||
elif self.instance.state == states.ERROR:
|
|
||||||
return CalcAction(self.params)
|
|
||||||
elif self.instance.state == states.DOWN:
|
|
||||||
return CreateInstance(self.params)
|
|
||||||
return CheckBoot(self.params)
|
|
||||||
|
|
||||||
|
|
||||||
class CheckBoot(State):
|
|
||||||
def execute(self, action, worker_context):
|
|
||||||
self.instance.update_state(worker_context)
|
|
||||||
self.params.resource.log.debug(
|
|
||||||
'Instance is %s' % self.instance.state.upper())
|
|
||||||
# Put the action back on the front of the queue so that we can yield
|
|
||||||
# and handle it in another state machine traversal (which will proceed
|
|
||||||
# from CalcAction directly to CheckBoot).
|
|
||||||
if self.instance.state not in (states.DOWN,
|
|
||||||
states.GONE):
|
|
||||||
self.queue.appendleft(action)
|
|
||||||
return action
|
|
||||||
|
|
||||||
def transition(self, action, worker_context):
|
|
||||||
if self.instance.state == states.REPLUG:
|
|
||||||
return ReplugInstance(self.params)
|
|
||||||
if self.instance.state in (states.DOWN,
|
|
||||||
states.GONE):
|
|
||||||
return StopInstance(self.params)
|
|
||||||
if self.instance.state == states.UP:
|
|
||||||
return ConfigureInstance(self.params)
|
|
||||||
return CalcAction(self.params)
|
|
||||||
|
|
||||||
|
|
||||||
class ReplugInstance(State):
|
|
||||||
def execute(self, action, worker_context):
|
|
||||||
self.instance.replug(worker_context)
|
|
||||||
return action
|
|
||||||
|
|
||||||
def transition(self, action, worker_context):
|
|
||||||
if self.instance.state == states.RESTART:
|
|
||||||
return StopInstance(self.params)
|
|
||||||
return ConfigureInstance(self.params)
|
|
||||||
|
|
||||||
|
|
||||||
class StopInstance(State):
|
|
||||||
def execute(self, action, worker_context):
|
|
||||||
self.instance.stop(worker_context)
|
|
||||||
if self.instance.state == states.GONE:
|
|
||||||
# Force the action to delete since the router isn't there
|
|
||||||
# any more.
|
|
||||||
return DELETE
|
|
||||||
return action
|
|
||||||
|
|
||||||
def transition(self, action, worker_context):
|
|
||||||
if self.instance.state not in (states.DOWN,
|
|
||||||
states.GONE):
|
|
||||||
return self
|
|
||||||
if self.instance.state == states.GONE:
|
|
||||||
return Exit(self.params)
|
|
||||||
if action == DELETE:
|
|
||||||
return Exit(self.params)
|
|
||||||
return CreateInstance(self.params)
|
|
||||||
|
|
||||||
|
|
||||||
class RebuildInstance(State):
|
|
||||||
def execute(self, action, worker_context):
|
|
||||||
self.instance.stop(worker_context)
|
|
||||||
if self.instance.state == states.GONE:
|
|
||||||
# Force the action to delete since the router isn't there
|
|
||||||
# any more.
|
|
||||||
return DELETE
|
|
||||||
# Re-create the instance
|
|
||||||
self.instance.reset_boot_counter()
|
|
||||||
return CREATE
|
|
||||||
|
|
||||||
def transition(self, action, worker_context):
|
|
||||||
if self.instance.state not in (states.DOWN,
|
|
||||||
states.GONE):
|
|
||||||
return self
|
|
||||||
if self.instance.state == states.GONE:
|
|
||||||
return Exit(self.params)
|
|
||||||
return CreateInstance(self.params)
|
|
||||||
|
|
||||||
|
|
||||||
class Exit(State):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class ConfigureInstance(State):
|
|
||||||
def execute(self, action, worker_context):
|
|
||||||
self.instance.configure(worker_context)
|
|
||||||
if self.instance.state == states.CONFIGURED:
|
|
||||||
if action == READ:
|
|
||||||
return READ
|
|
||||||
else:
|
|
||||||
return POLL
|
|
||||||
else:
|
|
||||||
return action
|
|
||||||
|
|
||||||
def transition(self, action, worker_context):
|
|
||||||
if self.instance.state == states.REPLUG:
|
|
||||||
return ReplugInstance(self.params)
|
|
||||||
if self.instance.state in (states.RESTART,
|
|
||||||
states.DOWN,
|
|
||||||
states.GONE):
|
|
||||||
return StopInstance(self.params)
|
|
||||||
if self.instance.state == states.UP:
|
|
||||||
return PushUpdate(self.params)
|
|
||||||
# Below here, assume instance.state == states.CONFIGURED
|
|
||||||
if action == READ:
|
|
||||||
return ReadStats(self.params)
|
|
||||||
return CalcAction(self.params)
|
|
||||||
|
|
||||||
|
|
||||||
class ReadStats(State):
|
|
||||||
def execute(self, action, worker_context):
|
|
||||||
stats = self.instance.read_stats()
|
|
||||||
self.params.bandwidth_callback(stats)
|
|
||||||
return POLL
|
|
||||||
|
|
||||||
def transition(self, action, worker_context):
|
|
||||||
return CalcAction(self.params)
|
|
||||||
|
|
||||||
|
|
||||||
class Automaton(object):
|
|
||||||
def __init__(self, resource, tenant_id,
|
|
||||||
delete_callback, bandwidth_callback,
|
|
||||||
worker_context, queue_warning_threshold,
|
|
||||||
reboot_error_threshold):
|
|
||||||
"""
|
|
||||||
:param resource: An instantiated driver object for the managed resource
|
|
||||||
:param tenant_id: UUID of the tenant being managed
|
|
||||||
:type tenant_id: str
|
|
||||||
:param delete_callback: Invoked when the Automaton decides
|
|
||||||
the router should be deleted.
|
|
||||||
:type delete_callback: callable
|
|
||||||
:param bandwidth_callback: To be invoked when the Automaton needs to
|
|
||||||
report how much bandwidth a router has used.
|
|
||||||
:type bandwidth_callback: callable taking router_id and bandwidth
|
|
||||||
info dict
|
|
||||||
:param worker_context: a WorkerContext
|
|
||||||
:type worker_context: WorkerContext
|
|
||||||
:param queue_warning_threshold: Limit after which adding items
|
|
||||||
to the queue triggers a warning.
|
|
||||||
:type queue_warning_threshold: int
|
|
||||||
:param reboot_error_threshold: Limit after which trying to reboot
|
|
||||||
the router puts it into an error state.
|
|
||||||
:type reboot_error_threshold: int
|
|
||||||
"""
|
|
||||||
self.resource = resource
|
|
||||||
self.tenant_id = tenant_id
|
|
||||||
self._delete_callback = delete_callback
|
|
||||||
self._queue_warning_threshold = queue_warning_threshold
|
|
||||||
self._reboot_error_threshold = reboot_error_threshold
|
|
||||||
self.deleted = False
|
|
||||||
self.bandwidth_callback = bandwidth_callback
|
|
||||||
self._queue = collections.deque()
|
|
||||||
|
|
||||||
self.action = POLL
|
|
||||||
self.instance = instance_manager.InstanceManager(self.resource,
|
|
||||||
worker_context)
|
|
||||||
self._state_params = StateParams(
|
|
||||||
self.resource,
|
|
||||||
self.instance,
|
|
||||||
self._queue,
|
|
||||||
self.bandwidth_callback,
|
|
||||||
self._reboot_error_threshold,
|
|
||||||
)
|
|
||||||
self.state = CalcAction(self._state_params)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def resource_id(self):
|
|
||||||
"""Returns the ID of the managed resource"""
|
|
||||||
return self.resource.id
|
|
||||||
|
|
||||||
def service_shutdown(self):
|
|
||||||
"Called when the parent process is being stopped"
|
|
||||||
|
|
||||||
def _do_delete(self):
|
|
||||||
if self._delete_callback is not None:
|
|
||||||
self.resource.log.debug('calling delete callback')
|
|
||||||
self._delete_callback()
|
|
||||||
# Avoid calling the delete callback more than once.
|
|
||||||
self._delete_callback = None
|
|
||||||
# Remember that this router has been deleted
|
|
||||||
self.deleted = True
|
|
||||||
|
|
||||||
def update(self, worker_context):
|
|
||||||
"Called when the router config should be changed"
|
|
||||||
while self._queue:
|
|
||||||
while True:
|
|
||||||
if self.deleted:
|
|
||||||
self.resource.log.debug(
|
|
||||||
'skipping update because the router is being deleted'
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
try:
|
|
||||||
self.resource.log.debug(
|
|
||||||
'%s.execute(%s) instance.state=%s',
|
|
||||||
self.state,
|
|
||||||
self.action,
|
|
||||||
self.instance.state)
|
|
||||||
self.action = self.state.execute(
|
|
||||||
self.action,
|
|
||||||
worker_context,
|
|
||||||
)
|
|
||||||
self.resource.log.debug(
|
|
||||||
'%s.execute -> %s instance.state=%s',
|
|
||||||
self.state,
|
|
||||||
self.action,
|
|
||||||
self.instance.state)
|
|
||||||
except:
|
|
||||||
self.resource.log.exception(
|
|
||||||
_LE('%s.execute() failed for action: %s'),
|
|
||||||
self.state,
|
|
||||||
self.action
|
|
||||||
)
|
|
||||||
|
|
||||||
old_state = self.state
|
|
||||||
self.state = self.state.transition(
|
|
||||||
self.action,
|
|
||||||
worker_context,
|
|
||||||
)
|
|
||||||
self.resource.log.debug(
|
|
||||||
'%s.transition(%s) -> %s instance.state=%s',
|
|
||||||
old_state,
|
|
||||||
self.action,
|
|
||||||
self.state,
|
|
||||||
self.instance.state
|
|
||||||
)
|
|
||||||
|
|
||||||
# Yield control each time we stop to figure out what
|
|
||||||
# to do next.
|
|
||||||
if isinstance(self.state, CalcAction):
|
|
||||||
return # yield
|
|
||||||
|
|
||||||
# We have reached the exit state, so the router has
|
|
||||||
# been deleted somehow.
|
|
||||||
if isinstance(self.state, Exit):
|
|
||||||
self._do_delete()
|
|
||||||
return
|
|
||||||
|
|
||||||
def send_message(self, message):
|
|
||||||
"Called when the worker put a message in the state machine queue"
|
|
||||||
if self.deleted:
|
|
||||||
# Ignore any more incoming messages
|
|
||||||
self.resource.log.debug(
|
|
||||||
'deleted state machine, ignoring incoming message %s',
|
|
||||||
message)
|
|
||||||
return False
|
|
||||||
|
|
||||||
# NOTE(dhellmann): This check is largely redundant with the
|
|
||||||
# one in CalcAction.transition() but it may allow us to avoid
|
|
||||||
# adding poll events to the queue at all, and therefore cut
|
|
||||||
# down on the number of times a worker thread wakes up to
|
|
||||||
# process something on a router that isn't going to actually
|
|
||||||
# do any work.
|
|
||||||
if message.crud == POLL and \
|
|
||||||
self.instance.state == states.ERROR:
|
|
||||||
self.resource.log.info(_LI(
|
|
||||||
'Resource status is ERROR, ignoring POLL message: %s'),
|
|
||||||
message,
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
if message.crud == REBUILD:
|
|
||||||
if message.body.get('image_uuid'):
|
|
||||||
self.resource.log.info(_LI(
|
|
||||||
'Resource is being REBUILT with custom image %s'),
|
|
||||||
message.body['image_uuid']
|
|
||||||
)
|
|
||||||
self.image_uuid = message.body['image_uuid']
|
|
||||||
else:
|
|
||||||
self.image_uuid = self.resource.image_uuid
|
|
||||||
|
|
||||||
self._queue.append(message.crud)
|
|
||||||
queue_len = len(self._queue)
|
|
||||||
if queue_len > self._queue_warning_threshold:
|
|
||||||
logger = self.resource.log.warning
|
|
||||||
else:
|
|
||||||
logger = self.resource.log.debug
|
|
||||||
logger(_LW('incoming message brings queue length to %s'), queue_len)
|
|
||||||
return True
|
|
||||||
|
|
||||||
@property
|
|
||||||
def image_uuid(self):
|
|
||||||
return self.state.params.image_uuid
|
|
||||||
|
|
||||||
@image_uuid.setter
|
|
||||||
def image_uuid(self, value):
|
|
||||||
self.state.params.image_uuid = value
|
|
||||||
|
|
||||||
def has_more_work(self):
|
|
||||||
"Called to check if there are more messages in the state machine queue"
|
|
||||||
return (not self.deleted) and bool(self._queue)
|
|
||||||
|
|
||||||
def has_error(self):
|
|
||||||
return self.instance.state == states.ERROR
|
|
||||||
|
|
||||||
def drop_queue(self):
|
|
||||||
"""Drop all pending actions from the local state machine's work queue.
|
|
||||||
|
|
||||||
This is used after a ring rebalance if this state machine no longer
|
|
||||||
maps to the local Rug process.
|
|
||||||
"""
|
|
||||||
self.resource.log.info(
|
|
||||||
'Dropping %s pending actions from queue', len(self._queue))
|
|
||||||
self._queue.clear()
|
|
224
astara/tenant.py
224
astara/tenant.py
@ -1,224 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
"""Manage the resources for a given tenant.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import datetime
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log as logging
|
|
||||||
from oslo_utils import timeutils
|
|
||||||
|
|
||||||
from astara.common.i18n import _LE
|
|
||||||
from astara import state
|
|
||||||
from astara import drivers
|
|
||||||
from astara.common import container
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
tenant_opts = [
|
|
||||||
cfg.BoolOpt('enable_byonf', default=False,
|
|
||||||
help='Whether to enable bring-your-own-network-function '
|
|
||||||
'support via operator supplied drivers and images.'),
|
|
||||||
]
|
|
||||||
cfg.CONF.register_opts(tenant_opts)
|
|
||||||
|
|
||||||
|
|
||||||
class InvalidIncomingMessage(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class StateMachineContainer(container.ResourceContainer):
|
|
||||||
def unmanage(self, resource_id):
|
|
||||||
"""Used to delete a state machine from local management
|
|
||||||
|
|
||||||
Removes the local state machine from orchestrator management during
|
|
||||||
cluster events. This is different than deleting the resource in that
|
|
||||||
it does not tag the resource as also deleted from Neutron, which would
|
|
||||||
prevent us from recreating its state machine if the resource later ends
|
|
||||||
up back under this orchestrators control.
|
|
||||||
|
|
||||||
:param resource_id: The resource id to unmanage
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
with self.lock:
|
|
||||||
sm = self.resources.pop(resource_id)
|
|
||||||
sm.drop_queue()
|
|
||||||
LOG.debug('unmanaged tenant state machine for resource %s',
|
|
||||||
resource_id)
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TenantResourceManager(object):
|
|
||||||
"""Keep track of the state machines for the logical resources for a given
|
|
||||||
tenant.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, tenant_id, delete_callback, notify_callback,
|
|
||||||
queue_warning_threshold,
|
|
||||||
reboot_error_threshold):
|
|
||||||
self.tenant_id = tenant_id
|
|
||||||
self.delete = delete_callback
|
|
||||||
self.notify = notify_callback
|
|
||||||
self._queue_warning_threshold = queue_warning_threshold
|
|
||||||
self._reboot_error_threshold = reboot_error_threshold
|
|
||||||
self.state_machines = StateMachineContainer()
|
|
||||||
self._default_resource_id = None
|
|
||||||
|
|
||||||
def _delete_resource(self, resource):
|
|
||||||
"Called when the Automaton decides the resource can be deleted"
|
|
||||||
if resource.id in self.state_machines:
|
|
||||||
LOG.debug('deleting state machine for %s', resource.id)
|
|
||||||
del self.state_machines[resource.id]
|
|
||||||
if self._default_resource_id == resource.id:
|
|
||||||
self._default_resource_id = None
|
|
||||||
self.delete(resource)
|
|
||||||
|
|
||||||
def unmanage_resource(self, resource_id):
|
|
||||||
self.state_machines.unmanage(resource_id)
|
|
||||||
|
|
||||||
def shutdown(self):
|
|
||||||
LOG.info('shutting down')
|
|
||||||
for resource_id, sm in self.state_machines.items():
|
|
||||||
try:
|
|
||||||
sm.service_shutdown()
|
|
||||||
except Exception:
|
|
||||||
LOG.exception(_LE(
|
|
||||||
'Failed to shutdown state machine for %s'), resource_id
|
|
||||||
)
|
|
||||||
|
|
||||||
def _report_bandwidth(self, resource_id, bandwidth):
|
|
||||||
LOG.debug('reporting bandwidth for %s', resource_id)
|
|
||||||
msg = {
|
|
||||||
'tenant_id': self.tenant_id,
|
|
||||||
'timestamp': datetime.datetime.isoformat(timeutils.utcnow()),
|
|
||||||
'event_type': 'astara.bandwidth.used',
|
|
||||||
'payload': dict((b.pop('name'), b) for b in bandwidth),
|
|
||||||
'uuid': resource_id,
|
|
||||||
}
|
|
||||||
self.notify(msg)
|
|
||||||
|
|
||||||
def get_all_state_machines(self):
|
|
||||||
return self.state_machines.values()
|
|
||||||
|
|
||||||
def get_state_machines(self, message, worker_context):
|
|
||||||
"""Return the state machines and the queue for sending it messages for
|
|
||||||
the logical resource being addressed by the message.
|
|
||||||
"""
|
|
||||||
if (not message.resource or
|
|
||||||
(message.resource and not message.resource.id)):
|
|
||||||
LOG.error(_LE(
|
|
||||||
'Cannot get state machine for message with '
|
|
||||||
'no message.resource'))
|
|
||||||
raise InvalidIncomingMessage()
|
|
||||||
|
|
||||||
state_machines = []
|
|
||||||
|
|
||||||
# Send to all of our resources.
|
|
||||||
if message.resource.id == '*':
|
|
||||||
LOG.debug('routing to all state machines')
|
|
||||||
state_machines = self.state_machines.values()
|
|
||||||
|
|
||||||
# Ignore messages to deleted resources.
|
|
||||||
elif self.state_machines.has_been_deleted(message.resource.id):
|
|
||||||
LOG.debug('dropping message for deleted resource')
|
|
||||||
return []
|
|
||||||
|
|
||||||
# Send to resources that have an ERROR status
|
|
||||||
elif message.resource.id == 'error':
|
|
||||||
state_machines = [
|
|
||||||
sm for sm in self.state_machines.values()
|
|
||||||
if sm.has_error()
|
|
||||||
]
|
|
||||||
LOG.debug('routing to %d errored state machines',
|
|
||||||
len(state_machines))
|
|
||||||
|
|
||||||
# Create a new state machine for this router.
|
|
||||||
elif message.resource.id not in self.state_machines:
|
|
||||||
LOG.debug('creating state machine for %s', message.resource.id)
|
|
||||||
|
|
||||||
# load the driver
|
|
||||||
if not message.resource.driver:
|
|
||||||
LOG.error(_LE('cannot create state machine without specifying'
|
|
||||||
'a driver.'))
|
|
||||||
return []
|
|
||||||
|
|
||||||
resource_obj = self._load_resource_from_message(
|
|
||||||
worker_context, message)
|
|
||||||
|
|
||||||
if not resource_obj:
|
|
||||||
# this means the driver didn't load for some reason..
|
|
||||||
# this might not be needed at all.
|
|
||||||
LOG.debug('for some reason loading the driver failed')
|
|
||||||
return []
|
|
||||||
|
|
||||||
def deleter():
|
|
||||||
self._delete_resource(message.resource)
|
|
||||||
|
|
||||||
new_state_machine = state.Automaton(
|
|
||||||
resource=resource_obj,
|
|
||||||
tenant_id=self.tenant_id,
|
|
||||||
delete_callback=deleter,
|
|
||||||
bandwidth_callback=self._report_bandwidth,
|
|
||||||
worker_context=worker_context,
|
|
||||||
queue_warning_threshold=self._queue_warning_threshold,
|
|
||||||
reboot_error_threshold=self._reboot_error_threshold,
|
|
||||||
)
|
|
||||||
self.state_machines[message.resource.id] = new_state_machine
|
|
||||||
state_machines = [new_state_machine]
|
|
||||||
|
|
||||||
# Send directly to an existing router.
|
|
||||||
elif message.resource.id:
|
|
||||||
state_machines = [self.state_machines[message.resource.id]]
|
|
||||||
|
|
||||||
# Filter out any deleted state machines.
|
|
||||||
return [
|
|
||||||
machine
|
|
||||||
for machine in state_machines
|
|
||||||
if (not machine.deleted and
|
|
||||||
not self.state_machines.has_been_deleted(machine.resource.id))
|
|
||||||
]
|
|
||||||
|
|
||||||
def get_state_machine_by_resource_id(self, resource_id):
|
|
||||||
try:
|
|
||||||
return self.state_machines[resource_id]
|
|
||||||
except KeyError:
|
|
||||||
return None
|
|
||||||
|
|
||||||
def _load_resource_from_message(self, worker_context, message):
|
|
||||||
if cfg.CONF.enable_byonf:
|
|
||||||
byonf_res = worker_context.neutron.tenant_has_byo_for_function(
|
|
||||||
tenant_id=self.tenant_id.replace('-', ''),
|
|
||||||
function_type=message.resource.driver)
|
|
||||||
|
|
||||||
if byonf_res:
|
|
||||||
try:
|
|
||||||
return drivers.load_from_byonf(
|
|
||||||
worker_context,
|
|
||||||
byonf_res,
|
|
||||||
message.resource.id)
|
|
||||||
except drivers.InvalidDriverException:
|
|
||||||
LOG.exception(_LE(
|
|
||||||
'Could not load BYONF driver, falling back to '
|
|
||||||
'configured image'))
|
|
||||||
pass
|
|
||||||
|
|
||||||
return drivers.get(message.resource.driver)(
|
|
||||||
worker_context, message.resource.id)
|
|
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
@ -1,629 +0,0 @@
|
|||||||
# Copyright (c) 2015 Akanda, Inc. All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import netaddr
|
|
||||||
import os
|
|
||||||
import six
|
|
||||||
import subprocess
|
|
||||||
import testtools
|
|
||||||
import time
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log as logging
|
|
||||||
|
|
||||||
from astara.api import astara_client
|
|
||||||
|
|
||||||
from keystoneclient import client as _keystoneclient
|
|
||||||
from keystoneclient import auth as ksauth
|
|
||||||
from keystoneclient import session as kssession
|
|
||||||
|
|
||||||
from neutronclient.v2_0 import client as _neutronclient
|
|
||||||
from novaclient import client as _novaclient
|
|
||||||
|
|
||||||
from keystoneclient import exceptions as ksc_exceptions
|
|
||||||
from neutronclient.common import exceptions as neutron_exceptions
|
|
||||||
|
|
||||||
from tempest_lib.common.utils import data_utils
|
|
||||||
|
|
||||||
from astara.test.functional import config
|
|
||||||
|
|
||||||
DEFAULT_CONFIG = os.path.join(os.path.dirname(__file__), 'test.conf')
|
|
||||||
DEFAULT_ACTIVE_TIMEOUT = 340
|
|
||||||
DEFAULT_DELETE_TIMEOUT = 60
|
|
||||||
DEFAULT_DOMAIN = 'default'
|
|
||||||
|
|
||||||
|
|
||||||
config.register_opts()
|
|
||||||
CONF = cfg.CONF
|
|
||||||
logging.register_options(CONF)
|
|
||||||
|
|
||||||
LOG = None
|
|
||||||
|
|
||||||
|
|
||||||
def parse_config():
|
|
||||||
config_file = os.environ.get('AKANDA_TEST_CONFIG',
|
|
||||||
DEFAULT_CONFIG)
|
|
||||||
cfg.CONF(
|
|
||||||
[], project='astara-orchestrator-functional',
|
|
||||||
default_config_files=[config_file])
|
|
||||||
logging.set_defaults(default_log_levels=[
|
|
||||||
'paramiko.transport=INFO',
|
|
||||||
'neutronclient=WARN',
|
|
||||||
'keystoneclient=WARN',
|
|
||||||
])
|
|
||||||
logging.setup(CONF, 'astara_functional')
|
|
||||||
global LOG
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class ClientManager(object):
|
|
||||||
"""A client manager using specified credentials"""
|
|
||||||
def __init__(self, username, password, tenant_name, auth_url):
|
|
||||||
parse_config()
|
|
||||||
self.username = username
|
|
||||||
self.password = password
|
|
||||||
self.tenant_name = tenant_name
|
|
||||||
self.auth_url = auth_url
|
|
||||||
|
|
||||||
self._keystoneclient = None
|
|
||||||
self._neutronclient = None
|
|
||||||
self._novaclient = None
|
|
||||||
|
|
||||||
@property
|
|
||||||
def auth_version(self):
|
|
||||||
if self.auth_url.endswith('v3') or self.auth_url.endswith('identity'):
|
|
||||||
return 3
|
|
||||||
else:
|
|
||||||
return 2.0
|
|
||||||
|
|
||||||
@property
|
|
||||||
def keystone_session(self):
|
|
||||||
auth_plugin = ksauth.get_plugin_class('password')
|
|
||||||
_args = {
|
|
||||||
'auth_url': self.auth_url,
|
|
||||||
'username': self.username,
|
|
||||||
'password': self.password,
|
|
||||||
}
|
|
||||||
if self.auth_version == 3:
|
|
||||||
_args.update({
|
|
||||||
'user_domain_name': DEFAULT_DOMAIN,
|
|
||||||
'project_domain_name': DEFAULT_DOMAIN,
|
|
||||||
'project_name': self.tenant_name,
|
|
||||||
})
|
|
||||||
else:
|
|
||||||
_args.update({
|
|
||||||
'tenant_name': self.tenant_name,
|
|
||||||
})
|
|
||||||
_auth = auth_plugin(**_args)
|
|
||||||
return kssession.Session(auth=_auth)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def novaclient(self):
|
|
||||||
if not self._novaclient:
|
|
||||||
self._novaclient = _novaclient.Client(
|
|
||||||
version=2,
|
|
||||||
session=self.keystone_session,
|
|
||||||
)
|
|
||||||
return self._novaclient
|
|
||||||
|
|
||||||
@property
|
|
||||||
def neutronclient(self):
|
|
||||||
if not self._neutronclient:
|
|
||||||
self._neutronclient = _neutronclient.Client(
|
|
||||||
session=self.keystone_session,
|
|
||||||
)
|
|
||||||
return self._neutronclient
|
|
||||||
|
|
||||||
@property
|
|
||||||
def keystoneclient(self):
|
|
||||||
if not self._keystoneclient:
|
|
||||||
client = _keystoneclient.Client(session=self.keystone_session)
|
|
||||||
self._keystoneclient = client
|
|
||||||
return self._keystoneclient
|
|
||||||
|
|
||||||
@property
|
|
||||||
def tenant_id(self):
|
|
||||||
return self.keystoneclient.tenant_id
|
|
||||||
|
|
||||||
|
|
||||||
class ApplianceServerNotFound(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class ApplianceServerTimeout(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class AdminClientManager(ClientManager):
|
|
||||||
"""A client manager using credentials loaded from test.conf, which
|
|
||||||
are assumed to be admin.
|
|
||||||
"""
|
|
||||||
def __init__(self):
|
|
||||||
parse_config()
|
|
||||||
super(AdminClientManager, self).__init__(
|
|
||||||
username=CONF.os_username,
|
|
||||||
password=CONF.os_password,
|
|
||||||
tenant_name=CONF.os_tenant_name,
|
|
||||||
auth_url=CONF.os_auth_url,
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_router_appliance_server(self, router_uuid, retries=10,
|
|
||||||
wait_for_active=False, ha_router=False):
|
|
||||||
"""Returns a Nova server object for router"""
|
|
||||||
LOG.debug(
|
|
||||||
'Looking for nova backing instance for resource %s',
|
|
||||||
router_uuid)
|
|
||||||
|
|
||||||
if ha_router:
|
|
||||||
exp_instances = 2
|
|
||||||
else:
|
|
||||||
exp_instances = 1
|
|
||||||
|
|
||||||
for i in six.moves.range(retries):
|
|
||||||
service_instances = \
|
|
||||||
[instance for instance in
|
|
||||||
self.novaclient.servers.list(
|
|
||||||
search_opts={
|
|
||||||
'all_tenants': 1,
|
|
||||||
'tenant_id': CONF.service_tenant_id}
|
|
||||||
) if router_uuid in instance.name]
|
|
||||||
|
|
||||||
if service_instances and len(service_instances) == exp_instances:
|
|
||||||
LOG.debug(
|
|
||||||
'Found %s backing instance for resource %s: %s',
|
|
||||||
exp_instances, router_uuid, service_instances)
|
|
||||||
break
|
|
||||||
LOG.debug('%s backing instance not found, will retry %s/%s',
|
|
||||||
exp_instances, i, retries)
|
|
||||||
time.sleep(1)
|
|
||||||
else:
|
|
||||||
raise ApplianceServerNotFound(
|
|
||||||
'Could not get nova %s server(s) for router %s' %
|
|
||||||
(exp_instances, router_uuid))
|
|
||||||
|
|
||||||
def _wait_for_active(instance):
|
|
||||||
LOG.debug('Waiting for backing instance %s to become ACTIVE',
|
|
||||||
instance)
|
|
||||||
for i in six.moves.range(CONF.appliance_active_timeout):
|
|
||||||
instance = self.novaclient.servers.get(
|
|
||||||
instance.id)
|
|
||||||
if instance.status == 'ACTIVE':
|
|
||||||
LOG.debug('Instance %s status==ACTIVE', instance)
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
LOG.debug('Instance %s status==%s, will wait',
|
|
||||||
instance, instance.status)
|
|
||||||
time.sleep(1)
|
|
||||||
raise ApplianceServerTimeout(
|
|
||||||
('Timed out waiting for backing instance of %s %s to become '
|
|
||||||
'ACTIVE') % router_uuid)
|
|
||||||
|
|
||||||
if wait_for_active:
|
|
||||||
LOG.debug('Waiting for %s backing instances to become ACTIVE',
|
|
||||||
exp_instances)
|
|
||||||
|
|
||||||
[_wait_for_active(i) for i in service_instances]
|
|
||||||
LOG.debug('Waiting for backing instance %s to become ACTIVE',
|
|
||||||
exp_instances)
|
|
||||||
|
|
||||||
if ha_router:
|
|
||||||
return sorted(service_instances, key=lambda i: i.name)
|
|
||||||
else:
|
|
||||||
return service_instances[0]
|
|
||||||
|
|
||||||
def get_network_info(self, network_name):
|
|
||||||
net_response = self.neutronclient.list_networks(name=network_name)
|
|
||||||
network = net_response.get('networks', [None])[0]
|
|
||||||
return network
|
|
||||||
|
|
||||||
|
|
||||||
class TestTenant(object):
|
|
||||||
def __init__(self):
|
|
||||||
parse_config()
|
|
||||||
self.username = data_utils.rand_name(name='user', prefix='akanda')
|
|
||||||
self.user_id = None
|
|
||||||
self.password = data_utils.rand_password()
|
|
||||||
self.tenant_name = data_utils.rand_name(name='tenant', prefix='akanda')
|
|
||||||
self.tenant_id = None
|
|
||||||
self.role_name = data_utils.rand_name(name='role', prefix='akanda')
|
|
||||||
|
|
||||||
self._admin_clients = AdminClientManager()
|
|
||||||
self._admin_ks_client = self._admin_clients.keystoneclient
|
|
||||||
self.auth_url = self._admin_ks_client.auth_url
|
|
||||||
|
|
||||||
# create the tenant before creating its clients.
|
|
||||||
self._create_tenant()
|
|
||||||
|
|
||||||
self.clients = ClientManager(self.username, self.password,
|
|
||||||
self.tenant_name, self.auth_url)
|
|
||||||
self.tester = ClientManager('demo', 'akanda', 'demo', self.auth_url)
|
|
||||||
|
|
||||||
self._subnets = []
|
|
||||||
self._routers = []
|
|
||||||
|
|
||||||
def _create_tenant(self):
|
|
||||||
if self._admin_clients.auth_version == 3:
|
|
||||||
tenant = self._admin_ks_client.projects.create(
|
|
||||||
name=self.tenant_name,
|
|
||||||
domain=DEFAULT_DOMAIN)
|
|
||||||
user = self._admin_ks_client.users.create(
|
|
||||||
name=self.username,
|
|
||||||
password=self.password,
|
|
||||||
project_domain_name=DEFAULT_DOMAIN,
|
|
||||||
default_project=self.tenant_name)
|
|
||||||
role = self._admin_ks_client.roles.create(name=self.role_name)
|
|
||||||
self._admin_ks_client.roles.grant(
|
|
||||||
role=role, user=user, project=tenant)
|
|
||||||
else:
|
|
||||||
tenant = self._admin_ks_client.tenants.create(self.tenant_name)
|
|
||||||
self.tenant_id = tenant.id
|
|
||||||
user = self._admin_ks_client.users.create(
|
|
||||||
name=self.username,
|
|
||||||
password=self.password,
|
|
||||||
tenant_id=self.tenant_id)
|
|
||||||
self.user_id = user.id
|
|
||||||
self.tenant_id = tenant.id
|
|
||||||
LOG.debug('Created new test tenant: %s (%s)',
|
|
||||||
self.tenant_id, self.user_id)
|
|
||||||
|
|
||||||
def setup_networking(self, ha_router=False):
|
|
||||||
""""Create a network + subnet for the tenant. Also creates a router
|
|
||||||
if required, and attaches the subnet to it.
|
|
||||||
|
|
||||||
:returns: a (network dict, router dict) tuple
|
|
||||||
"""
|
|
||||||
# NOTE(adam_g): I didn't expect simply creating a network
|
|
||||||
# to also create a subnet and router automatically, but this
|
|
||||||
# does?
|
|
||||||
net_body = {
|
|
||||||
'network': {
|
|
||||||
'name': data_utils.rand_name(name='network', prefix='ak'),
|
|
||||||
'admin_state_up': False,
|
|
||||||
'tenant_id': self.tenant_id
|
|
||||||
}}
|
|
||||||
LOG.debug('Creating network: %s', net_body)
|
|
||||||
network = self.clients.neutronclient.create_network(net_body)
|
|
||||||
network = network.get('network')
|
|
||||||
if not network:
|
|
||||||
raise Exception('Failed to create default tenant network')
|
|
||||||
LOG.debug('Created network: %s', network)
|
|
||||||
|
|
||||||
if not CONF.astara_auto_add_resources:
|
|
||||||
addr = netaddr.IPNetwork(CONF.test_subnet_cidr)
|
|
||||||
subnet_body = {
|
|
||||||
'subnet': {
|
|
||||||
'name': data_utils.rand_name(name='subnet', prefix='ak'),
|
|
||||||
'network_id': network['id'],
|
|
||||||
'cidr': CONF.test_subnet_cidr,
|
|
||||||
'ip_version': addr.version,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
LOG.debug('Creating subnet: %s', subnet_body)
|
|
||||||
subnet = self.clients.neutronclient.create_subnet(
|
|
||||||
body=subnet_body)['subnet']
|
|
||||||
LOG.debug('Created subnet: %s', subnet)
|
|
||||||
router_body = {
|
|
||||||
'router': {
|
|
||||||
'name': data_utils.rand_name(name='router', prefix='ak'),
|
|
||||||
'admin_state_up': True,
|
|
||||||
'tenant_id': self.tenant_id,
|
|
||||||
'ha': ha_router,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
LOG.debug('Creating router: %s', router_body)
|
|
||||||
router = self._admin_clients.neutronclient.create_router(
|
|
||||||
body=router_body)['router']
|
|
||||||
LOG.debug('Created router: %s', router)
|
|
||||||
|
|
||||||
LOG.debug(
|
|
||||||
'Attaching interface on subnet %s to router %s',
|
|
||||||
subnet['id'], router['id'])
|
|
||||||
self.clients.neutronclient.add_interface_router(
|
|
||||||
router['id'], {'subnet_id': subnet['id']}
|
|
||||||
)
|
|
||||||
LOG.debug(
|
|
||||||
'Attached interface on subnet %s to router %s',
|
|
||||||
subnet['id'], router['id'])
|
|
||||||
|
|
||||||
else:
|
|
||||||
# routers report as ACTIVE initially (LP: #1491673)
|
|
||||||
time.sleep(2)
|
|
||||||
LOG.debug('Waiting for astara auto-created router')
|
|
||||||
for i in six.moves.range(CONF.appliance_active_timeout):
|
|
||||||
routers = self.clients.neutronclient.list_routers()
|
|
||||||
routers = routers.get('routers')
|
|
||||||
if routers:
|
|
||||||
router = routers[0]
|
|
||||||
LOG.debug('Found astara auto-created router: %s', router)
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
LOG.debug(
|
|
||||||
'Still waiting for auto-created router. %s/%s',
|
|
||||||
i, CONF.appliance_active_timeout)
|
|
||||||
time.sleep(1)
|
|
||||||
else:
|
|
||||||
raise Exception('Timed out waiting for default router.')
|
|
||||||
|
|
||||||
# routers report as ACTIVE initially (LP: #1491673)
|
|
||||||
time.sleep(2)
|
|
||||||
return network, router
|
|
||||||
|
|
||||||
def _wait_for_backing_instance_delete(self, resource_id):
|
|
||||||
i = 1
|
|
||||||
LOG.debug(
|
|
||||||
'Waiting on deletion of backing instance for resource %s',
|
|
||||||
resource_id)
|
|
||||||
|
|
||||||
for i in six.moves.range(DEFAULT_DELETE_TIMEOUT):
|
|
||||||
try:
|
|
||||||
self._admin_clients.get_router_appliance_server(
|
|
||||||
resource_id, retries=1)
|
|
||||||
except ApplianceServerNotFound:
|
|
||||||
LOG.debug('Backing instance for resource %s deleted',
|
|
||||||
resource_id)
|
|
||||||
return
|
|
||||||
|
|
||||||
LOG.debug(
|
|
||||||
'Still waiting for deletion of backing instance for %s'
|
|
||||||
' , will wait (%s/%s)',
|
|
||||||
resource_id, i, DEFAULT_DELETE_TIMEOUT)
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
m = ('Timed out waiting on deletion of backing instance for %s '
|
|
||||||
'after %s sec.' % (resource_id, DEFAULT_DELETE_TIMEOUT))
|
|
||||||
LOG.debug(m)
|
|
||||||
raise ApplianceServerTimeout(m)
|
|
||||||
|
|
||||||
def _wait_for_neutron_delete(self, thing, ids):
|
|
||||||
show = getattr(self.clients.neutronclient, 'show_' + thing)
|
|
||||||
attempt = 0
|
|
||||||
max_attempts = 10
|
|
||||||
for i in ids:
|
|
||||||
LOG.debug('Waiting for deletion of %s %s', thing, i)
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
show(i)
|
|
||||||
except neutron_exceptions.NeutronClientException as e:
|
|
||||||
if e.status_code == 404:
|
|
||||||
LOG.debug('Deletion of %s %s complete', thing, i)
|
|
||||||
break
|
|
||||||
if attempt == max_attempts:
|
|
||||||
raise Exception(
|
|
||||||
'Timed out waiting for deletion of %s %s after %s sec.'
|
|
||||||
% (thing, i, max_attempts))
|
|
||||||
LOG.debug(
|
|
||||||
'Still waiting for deletion of %s %s, will wait (%s/%s)',
|
|
||||||
thing, i, attempt, max_attempts)
|
|
||||||
attempt += 1
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
# also wait for nova backing instance to delete after routers
|
|
||||||
if thing in ['router']:
|
|
||||||
[self._wait_for_backing_instance_delete(i) for i in ids]
|
|
||||||
|
|
||||||
def cleanup_neutron(self):
|
|
||||||
"""Clean tenant environment of neutron resources"""
|
|
||||||
LOG.debug('Cleaning up created neutron resources')
|
|
||||||
router_interface_ports = [
|
|
||||||
p for p in self.clients.neutronclient.list_ports()['ports']
|
|
||||||
if (
|
|
||||||
'router_interface' in p['device_owner'] or
|
|
||||||
'ha_router_replicated_interface' in p['device_owner']
|
|
||||||
)]
|
|
||||||
|
|
||||||
for rip in router_interface_ports:
|
|
||||||
LOG.debug('Deleting router interface port: %s', rip)
|
|
||||||
self.clients.neutronclient.remove_interface_router(
|
|
||||||
rip['device_id'],
|
|
||||||
body=dict(port_id=router_interface_ports[0]['id']))
|
|
||||||
|
|
||||||
astara_router_ports = []
|
|
||||||
router_ids = [
|
|
||||||
r['id'] for r in
|
|
||||||
self.clients.neutronclient.list_routers().get('routers')
|
|
||||||
]
|
|
||||||
|
|
||||||
for rid in router_ids:
|
|
||||||
for p in ['MGT', 'VRRP']:
|
|
||||||
name = 'ASTARA:%s:%s' % (p, rid)
|
|
||||||
astara_router_ports += [
|
|
||||||
p['id'] for p in
|
|
||||||
self._admin_clients.neutronclient.list_ports(
|
|
||||||
name=name).get('ports')]
|
|
||||||
|
|
||||||
LOG.debug('Deleting router %s' % rid)
|
|
||||||
|
|
||||||
try:
|
|
||||||
self.clients.neutronclient.delete_router(r['id'])
|
|
||||||
except neutron_exceptions.NeutronClientException as e:
|
|
||||||
if e.status_code == 404:
|
|
||||||
router_ids.remove(rid)
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
self._wait_for_neutron_delete('router', router_ids)
|
|
||||||
|
|
||||||
time.sleep(2)
|
|
||||||
|
|
||||||
port_ids = [
|
|
||||||
p['id'] for p in
|
|
||||||
self.clients.neutronclient.list_ports().get('ports')]
|
|
||||||
for pid in port_ids:
|
|
||||||
LOG.debug('Deleting port: %s', pid)
|
|
||||||
try:
|
|
||||||
self.clients.neutronclient.delete_port(pid)
|
|
||||||
except neutron_exceptions.NeutronClientException as e:
|
|
||||||
if e.status_code == 404:
|
|
||||||
port_ids.remove(pid)
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
self._wait_for_neutron_delete('port', port_ids)
|
|
||||||
|
|
||||||
subnet_ids = [
|
|
||||||
s['id']
|
|
||||||
for s in self.clients.neutronclient.list_subnets().get('subnets')]
|
|
||||||
for sid in subnet_ids:
|
|
||||||
LOG.debug('Deleting subnet: %s', sid)
|
|
||||||
try:
|
|
||||||
self.clients.neutronclient.delete_subnet(sid)
|
|
||||||
except neutron_exceptions.NeutronClientException as e:
|
|
||||||
if e.status_code == 404:
|
|
||||||
subnet_ids.remove(sid)
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
self._wait_for_neutron_delete('subnet', subnet_ids)
|
|
||||||
|
|
||||||
# need to make sure the vrrp and mgt ports get deleted
|
|
||||||
# in time before the delete_network()
|
|
||||||
for p in astara_router_ports:
|
|
||||||
try:
|
|
||||||
self._admin_clients.neutronclient.delete_port(p)
|
|
||||||
except neutron_exceptions.NeutronClientException as e:
|
|
||||||
if e.status_code == 404:
|
|
||||||
astara_router_ports.remove(p)
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
self._wait_for_neutron_delete('port', astara_router_ports)
|
|
||||||
|
|
||||||
networks = self.clients.neutronclient.list_networks().get('networks')
|
|
||||||
net_ids = [
|
|
||||||
n['id'] for n in networks if n['tenant_id'] == self.tenant_id]
|
|
||||||
for nid in net_ids:
|
|
||||||
LOG.debug('Deleting network: %s', nid)
|
|
||||||
try:
|
|
||||||
self.clients.neutronclient.delete_network(nid)
|
|
||||||
except neutron_exceptions.NeutronClientException as e:
|
|
||||||
if e.status_code == 404:
|
|
||||||
net_ids.remove(nid)
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
self._wait_for_neutron_delete('network', net_ids)
|
|
||||||
|
|
||||||
def cleanUp(self):
|
|
||||||
self.cleanup_neutron()
|
|
||||||
|
|
||||||
self._admin_ks_client.users.delete(self.user_id)
|
|
||||||
if self._admin_clients.auth_version == 3:
|
|
||||||
self._admin_ks_client.projects.delete(self.tenant_id)
|
|
||||||
else:
|
|
||||||
self._admin_ks_client.tenants.delete(self.tenant_id)
|
|
||||||
|
|
||||||
|
|
||||||
class AstaraFunctionalBase(testtools.TestCase):
|
|
||||||
_test_tenants = []
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(AstaraFunctionalBase, self).setUp()
|
|
||||||
log_format = '%(asctime)s.%(msecs)03d ' + self.id() + ' %(message)s'
|
|
||||||
cfg.CONF.set_default('logging_default_format_string', log_format)
|
|
||||||
parse_config()
|
|
||||||
self.ak_client = astara_client
|
|
||||||
self.admin_clients = AdminClientManager()
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def setUpClass(cls):
|
|
||||||
cls._test_tenants = []
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def tearDownClass(cls):
|
|
||||||
try:
|
|
||||||
[t.cleanUp() for t in cls._test_tenants]
|
|
||||||
except ksc_exceptions.NotFound:
|
|
||||||
pass
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_tenant(cls):
|
|
||||||
"""Creates a new test tenant
|
|
||||||
|
|
||||||
This tenant is assumed to be empty of any cloud resources
|
|
||||||
and will be destroyed on test class teardown.
|
|
||||||
"""
|
|
||||||
tenant = TestTenant()
|
|
||||||
cls._test_tenants.append(tenant)
|
|
||||||
return tenant
|
|
||||||
|
|
||||||
def get_router_appliance_server(self, router_uuid, retries=10,
|
|
||||||
wait_for_active=False, ha_router=False):
|
|
||||||
"""Returns a Nova server object for router"""
|
|
||||||
return self.admin_clients.get_router_appliance_server(
|
|
||||||
router_uuid, retries, wait_for_active, ha_router)
|
|
||||||
|
|
||||||
def get_management_address(self, router_uuid, retries=10):
|
|
||||||
LOG.debug('Getting management address for resource %s', router_uuid)
|
|
||||||
|
|
||||||
service_instance = self.get_router_appliance_server(
|
|
||||||
router_uuid,
|
|
||||||
retries=retries,
|
|
||||||
wait_for_active=True
|
|
||||||
)
|
|
||||||
|
|
||||||
mgt_network = self.admin_clients.get_network_info(
|
|
||||||
CONF.management_network_name
|
|
||||||
)
|
|
||||||
|
|
||||||
for interface in service_instance.interface_list():
|
|
||||||
if interface.net_id == mgt_network['id']:
|
|
||||||
addr = interface.fixed_ips[0]['ip_address']
|
|
||||||
LOG.debug(
|
|
||||||
'Got management address %s for resource %s',
|
|
||||||
addr,
|
|
||||||
router_uuid
|
|
||||||
)
|
|
||||||
return addr
|
|
||||||
else:
|
|
||||||
raise Exception(
|
|
||||||
'"mgt" port not found on service instance %s (%s)' %
|
|
||||||
(service_instance.id, service_instance.name))
|
|
||||||
|
|
||||||
def assert_router_is_active(self, router_uuid, ha_router=False):
|
|
||||||
LOG.debug('Waiting for resource %s to become ACTIVE', router_uuid)
|
|
||||||
for i in six.moves.range(CONF.appliance_active_timeout):
|
|
||||||
res = self.admin_clients.neutronclient.show_router(router_uuid)
|
|
||||||
router = res['router']
|
|
||||||
if router['status'] == 'ACTIVE':
|
|
||||||
LOG.debug('Router %s ACTIVE after %s sec.', router_uuid, i)
|
|
||||||
return
|
|
||||||
|
|
||||||
service_instances = self.get_router_appliance_server(
|
|
||||||
router_uuid, wait_for_active=True, ha_router=ha_router)
|
|
||||||
if not ha_router:
|
|
||||||
service_instances = [service_instances]
|
|
||||||
|
|
||||||
for instance in service_instances:
|
|
||||||
if instance.status == 'ERROR':
|
|
||||||
raise Exception(
|
|
||||||
'Backing instance %s for router %s in ERROR state',
|
|
||||||
instance.id, router_uuid)
|
|
||||||
|
|
||||||
LOG.debug(
|
|
||||||
'Resource %s not active. Status==%s, will wait, %s/%s sec.',
|
|
||||||
router_uuid, router['status'], i,
|
|
||||||
CONF.appliance_active_timeout)
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
raise Exception(
|
|
||||||
'Timed out waiting for router %s to become ACTIVE, '
|
|
||||||
'current status=%s' % (router_uuid, router['status']))
|
|
||||||
|
|
||||||
def ping_router_mgt_address(self, router_uuid):
|
|
||||||
mgt_address = self.get_management_address(router_uuid)
|
|
||||||
program = {4: 'ping', 6: 'ping6'}
|
|
||||||
|
|
||||||
mgt_ip_version = netaddr.IPNetwork(mgt_address).version
|
|
||||||
cmd = [program[mgt_ip_version], '-c30', mgt_address]
|
|
||||||
LOG.debug('Pinging resource %s: %s', router_uuid, ' '.join(cmd))
|
|
||||||
try:
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
except:
|
|
||||||
raise Exception('Failed to ping router with command: %s' % cmd)
|
|
@ -1,67 +0,0 @@
|
|||||||
# Copyright (c) 2016 Akanda, Inc. All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
import itertools
|
|
||||||
from oslo_config import cfg
|
|
||||||
|
|
||||||
|
|
||||||
functional_test_opts = [
|
|
||||||
cfg.StrOpt(
|
|
||||||
'os_auth_url', required=True,
|
|
||||||
help='Keystone auth URL'),
|
|
||||||
cfg.StrOpt(
|
|
||||||
'os_username', required=True,
|
|
||||||
help='Username of admin user'),
|
|
||||||
cfg.StrOpt(
|
|
||||||
'os_password', required=True,
|
|
||||||
help='Password of admin user'),
|
|
||||||
cfg.StrOpt(
|
|
||||||
'os_tenant_name', required=True,
|
|
||||||
help='Tenant name of admin user'),
|
|
||||||
cfg.StrOpt(
|
|
||||||
'service_tenant_id', required=True,
|
|
||||||
help='Tenant ID for the astara service user'),
|
|
||||||
cfg.StrOpt(
|
|
||||||
'service_tenant_name', required=True,
|
|
||||||
help='Tenant name of the astara service user'),
|
|
||||||
cfg.StrOpt(
|
|
||||||
'appliance_api_port', required=True,
|
|
||||||
help='The port on which appliance API servers listen'),
|
|
||||||
cfg.BoolOpt(
|
|
||||||
'astara_auto_add_resources', required=False, default=True,
|
|
||||||
help='Whether astara-neutron is configured to auto-add resources'),
|
|
||||||
cfg.IntOpt(
|
|
||||||
'appliance_active_timeout', required=False, default=340,
|
|
||||||
help='Timeout (sec) for an appliance to become ACTIVE'),
|
|
||||||
cfg.StrOpt(
|
|
||||||
'test_subnet_cidr', required=False, default='10.1.1.0/24'),
|
|
||||||
cfg.IntOpt(
|
|
||||||
'health_check_period', required=False, default=60,
|
|
||||||
help='Time health_check_period astara-orchestrator is configured to '
|
|
||||||
'use'),
|
|
||||||
cfg.StrOpt(
|
|
||||||
'management_network_name', required=False, default='mgt',
|
|
||||||
help='The name of the management network')
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def list_opts():
|
|
||||||
return [
|
|
||||||
('functional',
|
|
||||||
itertools.chain(functional_test_opts))]
|
|
||||||
|
|
||||||
|
|
||||||
def register_opts():
|
|
||||||
cfg.CONF.register_opts(functional_test_opts)
|
|
@ -1,42 +0,0 @@
|
|||||||
[DEFAULT]
|
|
||||||
|
|
||||||
|
|
||||||
[functional]
|
|
||||||
|
|
||||||
#
|
|
||||||
# From astara.test.functional
|
|
||||||
#
|
|
||||||
|
|
||||||
# Password of admin user (string value)
|
|
||||||
#os_password = <None>
|
|
||||||
|
|
||||||
# The port on which appliance API servers listen (string value)
|
|
||||||
#appliance_api_port = <None>
|
|
||||||
|
|
||||||
# Timeout (sec) for an appliance to become ACTIVE (integer value)
|
|
||||||
#appliance_active_timeout = 340
|
|
||||||
|
|
||||||
# Time health_check_period astara-orchestrator is configured to use (integer
|
|
||||||
# value)
|
|
||||||
#health_check_period = 60
|
|
||||||
|
|
||||||
# Tenant ID for the astara service user (string value)
|
|
||||||
#service_tenant_id = <None>
|
|
||||||
|
|
||||||
# Tenant name of admin user (string value)
|
|
||||||
#os_tenant_name = <None>
|
|
||||||
|
|
||||||
# Keystone auth URL (string value)
|
|
||||||
#os_auth_url = <None>
|
|
||||||
|
|
||||||
# Whether astara-neutron is configured to auto-add resources (boolean value)
|
|
||||||
#astara_auto_add_resources = true
|
|
||||||
|
|
||||||
# (string value)
|
|
||||||
#test_subnet_cidr = 10.1.1.0/24
|
|
||||||
|
|
||||||
# Username of admin user (string value)
|
|
||||||
#os_username = <None>
|
|
||||||
|
|
||||||
# Tenant name of the astara service user (string value)
|
|
||||||
#service_tenant_name = <None>
|
|
@ -1,134 +0,0 @@
|
|||||||
# Copyright (c) 2015 Akanda, Inc. All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import time
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log as logging
|
|
||||||
|
|
||||||
from astara.test.functional import base
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class AstaraRouterTestBase(base.AstaraFunctionalBase):
|
|
||||||
HA_ROUTER = False
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def setUpClass(cls):
|
|
||||||
super(AstaraRouterTestBase, cls).setUpClass()
|
|
||||||
cls.tenant = cls.get_tenant()
|
|
||||||
cls.neutronclient = cls.tenant.clients.neutronclient
|
|
||||||
cls.network, cls.router = cls.tenant.setup_networking(
|
|
||||||
ha_router=cls.HA_ROUTER)
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(AstaraRouterTestBase, self).setUp()
|
|
||||||
self.assert_router_is_active(self.router['id'], self.HA_ROUTER)
|
|
||||||
|
|
||||||
# refresh router ref now that its active
|
|
||||||
router = self.neutronclient.show_router(self.router['id'])
|
|
||||||
self.router = router['router']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def router_ha(self):
|
|
||||||
router = self.admin_clients.neutronclient.show_router(
|
|
||||||
self.router['id'])['router']
|
|
||||||
return router.get('ha', False)
|
|
||||||
|
|
||||||
|
|
||||||
class TestAstaraHARouter(AstaraRouterTestBase):
|
|
||||||
HA_ROUTER = True
|
|
||||||
|
|
||||||
def test_ha_router_servers(self):
|
|
||||||
service_instances = self.get_router_appliance_server(
|
|
||||||
self.router['id'], ha_router=self.HA_ROUTER)
|
|
||||||
self.assertEqual(2, len(service_instances))
|
|
||||||
|
|
||||||
# kill the master and ensure it is backfilled with a new instance
|
|
||||||
master, backup = service_instances
|
|
||||||
self.admin_clients.novaclient.servers.delete(master.id)
|
|
||||||
|
|
||||||
LOG.debug('Waiting %s seconds for astara health check to tick',
|
|
||||||
CONF.health_check_period)
|
|
||||||
time.sleep(CONF.health_check_period)
|
|
||||||
|
|
||||||
service_instances = self.get_router_appliance_server(
|
|
||||||
self.router['id'], retries=600, ha_router=self.HA_ROUTER)
|
|
||||||
self.assertEqual(2, len(service_instances))
|
|
||||||
self.assertEqual(backup, service_instances[0])
|
|
||||||
|
|
||||||
|
|
||||||
class TestAstaraRouter(AstaraRouterTestBase):
|
|
||||||
HA_ROUTER = False
|
|
||||||
|
|
||||||
def test_router_recovery(self):
|
|
||||||
"""
|
|
||||||
Test that creation of network/subnet/router results in a
|
|
||||||
correctly plugged appliance, and that manually destroying the
|
|
||||||
Nova instance results in a new appliance being booted.
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.skipTest("Race condition makes this test too unstable")
|
|
||||||
|
|
||||||
# for each subnet that was created during setup, ensure we have a
|
|
||||||
# router interface added
|
|
||||||
ports = self.neutronclient.list_ports(
|
|
||||||
device_owner='network:router_interface')['ports']
|
|
||||||
subnets = self.neutronclient.list_subnets(
|
|
||||||
network_id=self.network['id'])
|
|
||||||
subnets = subnets['subnets']
|
|
||||||
self.assertEqual(len(ports), len(subnets))
|
|
||||||
for port in ports:
|
|
||||||
self.assertEqual(port['device_id'], self.router['id'])
|
|
||||||
self.assertEqual(
|
|
||||||
sorted([subnet['id'] for subnet in subnets]),
|
|
||||||
sorted([fip['subnet_id'] for fip in port['fixed_ips']])
|
|
||||||
)
|
|
||||||
|
|
||||||
self.ping_router_mgt_address(self.router['id'])
|
|
||||||
|
|
||||||
# Ensure that if we destroy the nova instance, the RUG will rebuild
|
|
||||||
# the router with a new instance.
|
|
||||||
# This could live in a separate test case but it'd require the
|
|
||||||
# above as setup, so just piggyback on it.
|
|
||||||
|
|
||||||
old_server = self.get_router_appliance_server(self.router['id'])
|
|
||||||
LOG.debug('Original server: %s', old_server)
|
|
||||||
|
|
||||||
# NOTE(adam_g): In the gate, sometimes the appliance hangs on the
|
|
||||||
# first config update and health checks get queued up behind the
|
|
||||||
# hanging config update. If thats the case, we need to wait a while
|
|
||||||
# before deletion for the first to timeout.
|
|
||||||
time.sleep(30)
|
|
||||||
LOG.debug('Deleting original nova server: %s', old_server.id)
|
|
||||||
self.admin_clients.novaclient.servers.delete(old_server.id)
|
|
||||||
|
|
||||||
LOG.debug('Waiting %s seconds for astara health check to tick',
|
|
||||||
CONF.health_check_period)
|
|
||||||
time.sleep(CONF.health_check_period)
|
|
||||||
|
|
||||||
# look for the new server, retry giving rug time to do its thing.
|
|
||||||
new_server = self.get_router_appliance_server(
|
|
||||||
self.router['id'], retries=600, wait_for_active=True)
|
|
||||||
LOG.debug('Rebuilt new server found: %s', new_server)
|
|
||||||
self.assertNotEqual(old_server.id, new_server.id)
|
|
||||||
|
|
||||||
# routers report as ACTIVE initially (LP: #1491673)
|
|
||||||
time.sleep(2)
|
|
||||||
|
|
||||||
self.assert_router_is_active(self.router['id'])
|
|
||||||
self.ping_router_mgt_address(self.router['id'])
|
|
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
@ -1,127 +0,0 @@
|
|||||||
# Copyright (c) 2015 Akanda, Inc. All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import netaddr
|
|
||||||
|
|
||||||
from astara.api.neutron import Subnet
|
|
||||||
|
|
||||||
|
|
||||||
class FakeModel(object):
|
|
||||||
def __init__(self, id_, **kwargs):
|
|
||||||
self.id = id_
|
|
||||||
self.__dict__.update(kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
fake_ext_port = FakeModel(
|
|
||||||
'1',
|
|
||||||
mac_address='aa:bb:cc:dd:ee:ff',
|
|
||||||
network_id='ext-net',
|
|
||||||
fixed_ips=[FakeModel('', ip_address='9.9.9.9', subnet_id='s2')],
|
|
||||||
first_v4='9.9.9.9',
|
|
||||||
device_id='e-e-e-e')
|
|
||||||
|
|
||||||
|
|
||||||
fake_mgt_port = FakeModel(
|
|
||||||
'2',
|
|
||||||
name='ASTARA:MGT:foo',
|
|
||||||
mac_address='aa:bb:cc:cc:bb:aa',
|
|
||||||
network_id='mgt-net',
|
|
||||||
device_id='m-m-m-m')
|
|
||||||
|
|
||||||
fake_int_port = FakeModel(
|
|
||||||
'3',
|
|
||||||
name='ASTARA:RUG:foo',
|
|
||||||
mac_address='aa:aa:aa:aa:aa:aa',
|
|
||||||
network_id='int-net',
|
|
||||||
fixed_ips=[FakeModel('', ip_address='192.168.1.1', subnet_id='s1')],
|
|
||||||
device_id='i-i-i-i')
|
|
||||||
|
|
||||||
fake_instance_port = FakeModel(
|
|
||||||
'4',
|
|
||||||
name='foo',
|
|
||||||
mac_address='aa:aa:aa:aa:aa:bb',
|
|
||||||
network_id='int-net',
|
|
||||||
fixed_ips=[FakeModel('', ip_address='192.168.1.2', subnet_id='s1')],
|
|
||||||
first_v4='192.168.1.2',
|
|
||||||
device_id='v-v-v-v')
|
|
||||||
|
|
||||||
fake_instance_mgt_port = FakeModel(
|
|
||||||
'4',
|
|
||||||
name='ASTARA:MGT:foo',
|
|
||||||
mac_address='aa:aa:aa:aa:aa:bb',
|
|
||||||
network_id='int-net',
|
|
||||||
fixed_ips=[FakeModel('', ip_address='192.168.1.2', subnet_id='s1')],
|
|
||||||
first_v4='192.168.1.2',
|
|
||||||
device_id='v-v-v-v')
|
|
||||||
|
|
||||||
fake_instance_vrrp_port = FakeModel(
|
|
||||||
'4',
|
|
||||||
name='ASTARA:VRRP:foo',
|
|
||||||
mac_address='aa:aa:aa:aa:aa:bb',
|
|
||||||
network_id='int-net',
|
|
||||||
fixed_ips=[FakeModel('', ip_address='192.168.1.2', subnet_id='s1')],
|
|
||||||
first_v4='192.168.1.2',
|
|
||||||
device_id='v-v-v-v')
|
|
||||||
|
|
||||||
fake_instance_lb_port = FakeModel(
|
|
||||||
'4',
|
|
||||||
name='ASTARA:LB:foo',
|
|
||||||
mac_address='aa:aa:aa:aa:aa:bb',
|
|
||||||
network_id='int-net',
|
|
||||||
fixed_ips=[FakeModel('', ip_address='192.168.1.2', subnet_id='s1')],
|
|
||||||
first_v4='192.168.1.2',
|
|
||||||
device_id='v-v-v-v')
|
|
||||||
|
|
||||||
fake_subnet = FakeModel(
|
|
||||||
's1',
|
|
||||||
cidr=netaddr.IPNetwork('192.168.1.0/24'),
|
|
||||||
gateway_ip='192.168.1.1',
|
|
||||||
enable_dhcp=True,
|
|
||||||
dns_nameservers=['8.8.8.8'],
|
|
||||||
ipv6_ra_mode=None,
|
|
||||||
host_routes={})
|
|
||||||
|
|
||||||
fake_subnet_with_slaac = Subnet(
|
|
||||||
id_='fake_id',
|
|
||||||
name='s1',
|
|
||||||
tenant_id='fake_tenant_id',
|
|
||||||
network_id='fake_network_id',
|
|
||||||
ip_version=6,
|
|
||||||
cidr='fdee:9f85:83be::/48',
|
|
||||||
gateway_ip='fdee:9f85:83be::1',
|
|
||||||
enable_dhcp=True,
|
|
||||||
dns_nameservers=['8.8.8.8'],
|
|
||||||
ipv6_ra_mode='slaac',
|
|
||||||
host_routes={})
|
|
||||||
|
|
||||||
fake_network = FakeModel(
|
|
||||||
'fake_network_id',
|
|
||||||
name='thenet',
|
|
||||||
tenant_id='tenant_id',
|
|
||||||
status='ACTIVE',
|
|
||||||
shared=False,
|
|
||||||
admin_statue_up=True,
|
|
||||||
mtu=1280,
|
|
||||||
port_security_enabled=False,
|
|
||||||
subnets=[fake_subnet]
|
|
||||||
)
|
|
||||||
|
|
||||||
fake_router = FakeModel(
|
|
||||||
'router_id',
|
|
||||||
tenant_id='tenant_id',
|
|
||||||
name='router_name',
|
|
||||||
external_port=fake_ext_port,
|
|
||||||
management_port=fake_mgt_port,
|
|
||||||
internal_ports=[fake_int_port],
|
|
||||||
ha=False)
|
|
@ -1,202 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
import mock
|
|
||||||
import netaddr
|
|
||||||
import unittest2 as unittest
|
|
||||||
|
|
||||||
from astara.api.config import common
|
|
||||||
|
|
||||||
from astara.test.unit.api.config import config_fakes as fakes
|
|
||||||
|
|
||||||
|
|
||||||
class TestCommonConfig(unittest.TestCase):
|
|
||||||
def test_network_config(self):
|
|
||||||
mock_client = mock.Mock()
|
|
||||||
mock_client.get_network_detail.return_value = fakes.fake_network
|
|
||||||
subnets_dict = {fakes.fake_subnet.id: fakes.fake_subnet}
|
|
||||||
|
|
||||||
with mock.patch.object(common, '_make_network_config_dict') as nc:
|
|
||||||
with mock.patch.object(common, '_interface_config') as ic:
|
|
||||||
mock_interface = mock.Mock()
|
|
||||||
ic.return_value = mock_interface
|
|
||||||
|
|
||||||
common.network_config(
|
|
||||||
mock_client,
|
|
||||||
fakes.fake_int_port,
|
|
||||||
'ge1',
|
|
||||||
'internal',
|
|
||||||
[])
|
|
||||||
|
|
||||||
ic.assert_called_once_with(
|
|
||||||
'ge1', fakes.fake_int_port, subnets_dict, 1280)
|
|
||||||
nc.assert_called_once_with(
|
|
||||||
mock_interface,
|
|
||||||
'internal',
|
|
||||||
'int-net',
|
|
||||||
mtu=1280,
|
|
||||||
subnets_dict=subnets_dict,
|
|
||||||
network_ports=[]),
|
|
||||||
|
|
||||||
def test_make_network_config(self):
|
|
||||||
interface = {'ifname': 'ge2'}
|
|
||||||
|
|
||||||
result = common._make_network_config_dict(
|
|
||||||
interface,
|
|
||||||
'internal',
|
|
||||||
fakes.fake_int_port.network_id,
|
|
||||||
1280,
|
|
||||||
'dhcp',
|
|
||||||
'ra',
|
|
||||||
subnets_dict={fakes.fake_subnet.id: fakes.fake_subnet},
|
|
||||||
network_ports=[fakes.fake_instance_port])
|
|
||||||
|
|
||||||
expected = {
|
|
||||||
'interface': interface,
|
|
||||||
'network_id': fakes.fake_int_port.network_id,
|
|
||||||
'v4_conf_service': 'dhcp',
|
|
||||||
'v6_conf_service': 'ra',
|
|
||||||
'network_type': 'internal',
|
|
||||||
'mtu': 1280,
|
|
||||||
'subnets': [{'cidr': '192.168.1.0/24',
|
|
||||||
'dhcp_enabled': True,
|
|
||||||
'dns_nameservers': ['8.8.8.8'],
|
|
||||||
'gateway_ip': '192.168.1.1',
|
|
||||||
'host_routes': {},
|
|
||||||
'id': 's1'}],
|
|
||||||
'allocations': [
|
|
||||||
{
|
|
||||||
'mac_address': 'aa:aa:aa:aa:aa:bb',
|
|
||||||
'ip_addresses': {'192.168.1.2': True},
|
|
||||||
'hostname': '192-168-1-2.local',
|
|
||||||
'device_id': 'v-v-v-v'
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
self.assertEqual(expected, result)
|
|
||||||
|
|
||||||
def test_interface_config(self):
|
|
||||||
expected = {
|
|
||||||
'addresses': ['192.168.1.1/24'],
|
|
||||||
'ifname': 'ge1',
|
|
||||||
'mtu': 1280
|
|
||||||
}
|
|
||||||
subnets_dict = {fakes.fake_subnet.id: fakes.fake_subnet}
|
|
||||||
|
|
||||||
self.assertEqual(
|
|
||||||
expected,
|
|
||||||
common._interface_config(
|
|
||||||
'ge1',
|
|
||||||
fakes.fake_int_port,
|
|
||||||
subnets_dict,
|
|
||||||
1280
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_subnet_config(self):
|
|
||||||
expected = {
|
|
||||||
'cidr': '192.168.1.0/24',
|
|
||||||
'dhcp_enabled': True,
|
|
||||||
'dns_nameservers': ['8.8.8.8'],
|
|
||||||
'gateway_ip': '192.168.1.1',
|
|
||||||
'host_routes': {},
|
|
||||||
'id': 's1',
|
|
||||||
}
|
|
||||||
self.assertEqual(expected, common._subnet_config(fakes.fake_subnet))
|
|
||||||
|
|
||||||
def test_subnet_config_with_slaac_enabled(self):
|
|
||||||
expected = {
|
|
||||||
'cidr': 'fdee:9f85:83be::/48',
|
|
||||||
'dhcp_enabled': False,
|
|
||||||
'dns_nameservers': ['8.8.8.8'],
|
|
||||||
'gateway_ip': 'fdee:9f85:83be::1',
|
|
||||||
'host_routes': {},
|
|
||||||
'id': 'fake_id',
|
|
||||||
}
|
|
||||||
self.assertEqual(
|
|
||||||
expected, common._subnet_config(fakes.fake_subnet_with_slaac))
|
|
||||||
|
|
||||||
def test_subnet_config_no_gateway(self):
|
|
||||||
expected = {
|
|
||||||
'cidr': '192.168.1.0/24',
|
|
||||||
'dhcp_enabled': True,
|
|
||||||
'dns_nameservers': ['8.8.8.8'],
|
|
||||||
'gateway_ip': '',
|
|
||||||
'host_routes': {},
|
|
||||||
'id': 's1',
|
|
||||||
}
|
|
||||||
sn = fakes.FakeModel(
|
|
||||||
's1',
|
|
||||||
cidr=netaddr.IPNetwork('192.168.1.0/24'),
|
|
||||||
gateway_ip='',
|
|
||||||
enable_dhcp=True,
|
|
||||||
dns_nameservers=['8.8.8.8'],
|
|
||||||
ipv6_ra_mode='',
|
|
||||||
host_routes={})
|
|
||||||
self.assertEqual(expected, common._subnet_config(sn))
|
|
||||||
|
|
||||||
def test_subnet_config_gateway_none(self):
|
|
||||||
expected = {
|
|
||||||
'cidr': '192.168.1.0/24',
|
|
||||||
'dhcp_enabled': True,
|
|
||||||
'dns_nameservers': ['8.8.8.8'],
|
|
||||||
'gateway_ip': '',
|
|
||||||
'host_routes': {},
|
|
||||||
'id': 's1',
|
|
||||||
}
|
|
||||||
sn = fakes.FakeModel(
|
|
||||||
's1',
|
|
||||||
cidr=netaddr.IPNetwork('192.168.1.0/24'),
|
|
||||||
gateway_ip=None,
|
|
||||||
enable_dhcp=True,
|
|
||||||
dns_nameservers=['8.8.8.8'],
|
|
||||||
ipv6_ra_mode='',
|
|
||||||
host_routes={})
|
|
||||||
self.assertEqual(expected, common._subnet_config(sn))
|
|
||||||
|
|
||||||
def test_allocation_config_vrrp(self):
|
|
||||||
subnets_dict = {fakes.fake_subnet.id: fakes.fake_subnet}
|
|
||||||
self.assertEqual(
|
|
||||||
[],
|
|
||||||
common._allocation_config(
|
|
||||||
[fakes.fake_instance_vrrp_port],
|
|
||||||
subnets_dict)
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_allocation_config_lb(self):
|
|
||||||
subnets_dict = {fakes.fake_subnet.id: fakes.fake_subnet}
|
|
||||||
self.assertEqual(
|
|
||||||
[],
|
|
||||||
common._allocation_config(
|
|
||||||
[fakes.fake_instance_lb_port],
|
|
||||||
subnets_dict)
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_allocation_config_mgt(self):
|
|
||||||
subnets_dict = {fakes.fake_subnet.id: fakes.fake_subnet}
|
|
||||||
expected = [
|
|
||||||
{'mac_address': 'aa:aa:aa:aa:aa:bb',
|
|
||||||
'ip_addresses': {'192.168.1.2': True},
|
|
||||||
'hostname': '192-168-1-2.local',
|
|
||||||
'device_id': 'v-v-v-v'}
|
|
||||||
]
|
|
||||||
self.assertEqual(
|
|
||||||
expected,
|
|
||||||
common._allocation_config([
|
|
||||||
fakes.fake_instance_mgt_port],
|
|
||||||
subnets_dict)
|
|
||||||
)
|
|
@ -1,51 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from astara.api.config import loadbalancer as lb_config
|
|
||||||
from astara.test.unit import base, fakes
|
|
||||||
|
|
||||||
|
|
||||||
class TestLoadbalancerConfigAPI(base.RugTestBase):
|
|
||||||
@mock.patch('astara.api.config.common.network_config')
|
|
||||||
def test_build_config(self, fake_network_config):
|
|
||||||
fake_client = mock.Mock()
|
|
||||||
fake_lb = fakes.fake_loadbalancer()
|
|
||||||
fake_lb_net = mock.Mock()
|
|
||||||
fake_mgt_net = mock.Mock()
|
|
||||||
fake_mgt_port = mock.Mock(
|
|
||||||
network_id='fake_mgt_network_id',
|
|
||||||
)
|
|
||||||
fake_iface_map = {
|
|
||||||
fake_lb.vip_port.network_id: fake_lb_net,
|
|
||||||
fake_mgt_port.network_id: fake_mgt_net,
|
|
||||||
}
|
|
||||||
fake_network_config.side_effect = [
|
|
||||||
'fake_lb_net_dict', 'fake_mgt_net_dict'
|
|
||||||
]
|
|
||||||
res = lb_config.build_config(
|
|
||||||
fake_client, fake_lb, fake_mgt_port, fake_iface_map)
|
|
||||||
expected = {
|
|
||||||
'hostname': 'ak-loadbalancer-%s' % fake_lb.tenant_id,
|
|
||||||
'tenant_id': fake_lb.tenant_id,
|
|
||||||
'networks': ['fake_lb_net_dict', 'fake_mgt_net_dict'],
|
|
||||||
'services': {
|
|
||||||
'loadbalancer': fake_lb.to_dict(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
self.assertEqual(expected, res)
|
|
@ -1,292 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
import mock
|
|
||||||
import netaddr
|
|
||||||
from oslo_config import cfg
|
|
||||||
import unittest2 as unittest
|
|
||||||
from six.moves import builtins as __builtins__
|
|
||||||
|
|
||||||
from astara.api.config import router as conf_mod
|
|
||||||
|
|
||||||
from astara.test.unit.api.config import config_fakes as fakes
|
|
||||||
|
|
||||||
|
|
||||||
class TestAstaraClient(unittest.TestCase):
|
|
||||||
def setUp(self):
|
|
||||||
cfg.CONF.set_override('provider_rules_path', '/the/path')
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
cfg.CONF.reset()
|
|
||||||
|
|
||||||
def test_build_config(self):
|
|
||||||
methods = {
|
|
||||||
'load_provider_rules': mock.DEFAULT,
|
|
||||||
'generate_network_config': mock.DEFAULT,
|
|
||||||
'generate_floating_config': mock.DEFAULT,
|
|
||||||
'get_default_v4_gateway': mock.DEFAULT,
|
|
||||||
}
|
|
||||||
fake_orchestrator = {
|
|
||||||
'host': 'foohost',
|
|
||||||
'adddress': '10.0.0.1',
|
|
||||||
'metadata_port': 80,
|
|
||||||
}
|
|
||||||
|
|
||||||
mock_client = mock.Mock()
|
|
||||||
mock_context = mock.Mock(
|
|
||||||
neutron=mock_client,
|
|
||||||
config=fake_orchestrator,
|
|
||||||
)
|
|
||||||
ifaces = []
|
|
||||||
provider_rules = {'labels': {'ext': ['192.168.1.1']}}
|
|
||||||
network_config = [
|
|
||||||
{'interface': 1,
|
|
||||||
'network_id': 2,
|
|
||||||
'v4_conf_service': 'static',
|
|
||||||
'v6_conf_service': 'static',
|
|
||||||
'network_type': 'external',
|
|
||||||
'subnets': [
|
|
||||||
{'cidr': '192.168.1.0/24',
|
|
||||||
'dhcp_enabled': True,
|
|
||||||
'dns_nameservers': [],
|
|
||||||
'host_routes': [],
|
|
||||||
'gateway_ip': '192.168.1.1',
|
|
||||||
},
|
|
||||||
{'cidr': '10.0.0.0/24',
|
|
||||||
'dhcp_enabled': True,
|
|
||||||
'dns_nameservers': [],
|
|
||||||
'host_routes': [],
|
|
||||||
'gateway_ip': '10.0.0.1',
|
|
||||||
}, ],
|
|
||||||
'allocations': []}
|
|
||||||
]
|
|
||||||
|
|
||||||
with mock.patch.multiple(conf_mod, **methods) as mocks:
|
|
||||||
mocks['load_provider_rules'].return_value = provider_rules
|
|
||||||
mocks['generate_network_config'].return_value = network_config
|
|
||||||
mocks['generate_floating_config'].return_value = 'floating_config'
|
|
||||||
mocks['get_default_v4_gateway'].return_value = 'default_gw'
|
|
||||||
|
|
||||||
config = conf_mod.build_config(mock_context, fakes.fake_router,
|
|
||||||
fakes.fake_mgt_port, ifaces)
|
|
||||||
|
|
||||||
expected = {
|
|
||||||
'default_v4_gateway': 'default_gw',
|
|
||||||
'networks': network_config,
|
|
||||||
'labels': {'ext': ['192.168.1.1']},
|
|
||||||
'floating_ips': 'floating_config',
|
|
||||||
'asn': 64512,
|
|
||||||
'neighbor_asn': 64512,
|
|
||||||
'tenant_id': 'tenant_id',
|
|
||||||
'ha_resource': False,
|
|
||||||
'hostname': 'ak-tenant_id',
|
|
||||||
'orchestrator': {
|
|
||||||
'host': 'foohost',
|
|
||||||
'adddress': '10.0.0.1',
|
|
||||||
'metadata_port': 80,
|
|
||||||
},
|
|
||||||
'vpn': {}
|
|
||||||
}
|
|
||||||
|
|
||||||
self.assertEqual(expected, config)
|
|
||||||
|
|
||||||
mocks['load_provider_rules'].assert_called_once_with('/the/path')
|
|
||||||
mocks['generate_network_config'].assert_called_once_with(
|
|
||||||
mock_client, fakes.fake_router, fakes.fake_mgt_port, ifaces)
|
|
||||||
|
|
||||||
def test_load_provider_rules(self):
|
|
||||||
rules_dict = {'labels': {}, 'preanchors': [], 'postanchors': []}
|
|
||||||
with mock.patch('oslo_serialization.jsonutils.load') as load:
|
|
||||||
load.return_value = rules_dict
|
|
||||||
with mock.patch('six.moves.builtins.open') as mock_open:
|
|
||||||
r = conf_mod.load_provider_rules('/the/path')
|
|
||||||
|
|
||||||
mock_open.assert_called_once_with('/the/path')
|
|
||||||
load.assert_called_once_with(mock_open.return_value)
|
|
||||||
self.assertEqual(rules_dict, r)
|
|
||||||
|
|
||||||
@mock.patch.object(__builtins__, 'open', autospec=True)
|
|
||||||
def test_load_provider_rules_not_found(self, mock_open):
|
|
||||||
mock_open.side_effect = IOError()
|
|
||||||
res = conf_mod.load_provider_rules('/tmp/path')
|
|
||||||
self.assertEqual({}, res)
|
|
||||||
|
|
||||||
@mock.patch('astara.api.config.common.network_config')
|
|
||||||
def test_generate_network_config(self, mock_net_conf):
|
|
||||||
mock_client = mock.Mock()
|
|
||||||
|
|
||||||
iface_map = {
|
|
||||||
fakes.fake_mgt_port.network_id: 'ge0',
|
|
||||||
fakes.fake_ext_port.network_id: 'ge1',
|
|
||||||
fakes.fake_int_port.network_id: 'ge2'
|
|
||||||
}
|
|
||||||
|
|
||||||
mock_net_conf.return_value = 'configured_network'
|
|
||||||
|
|
||||||
result = conf_mod.generate_network_config(
|
|
||||||
mock_client, fakes.fake_router, fakes.fake_mgt_port, iface_map)
|
|
||||||
|
|
||||||
expected = [
|
|
||||||
'configured_network',
|
|
||||||
'configured_network',
|
|
||||||
'configured_network'
|
|
||||||
]
|
|
||||||
|
|
||||||
self.assertEqual(expected, result)
|
|
||||||
|
|
||||||
expected_calls = [
|
|
||||||
mock.call(
|
|
||||||
mock_client, fakes.fake_router.management_port,
|
|
||||||
'ge0', 'management'),
|
|
||||||
mock.call(
|
|
||||||
mock_client, fakes.fake_router.external_port,
|
|
||||||
'ge1', 'external'),
|
|
||||||
mock.call(
|
|
||||||
mock_client, fakes.fake_int_port,
|
|
||||||
'ge2', 'internal', mock.ANY)]
|
|
||||||
for c in expected_calls:
|
|
||||||
self.assertIn(c, mock_net_conf.call_args_list)
|
|
||||||
mock_net_conf.assert_has_calls(expected_calls)
|
|
||||||
|
|
||||||
def test_generate_floating_config(self):
|
|
||||||
fip = fakes.FakeModel(
|
|
||||||
'id',
|
|
||||||
floating_ip=netaddr.IPAddress('9.9.9.9'),
|
|
||||||
fixed_ip=netaddr.IPAddress('192.168.1.1')
|
|
||||||
)
|
|
||||||
|
|
||||||
rtr = fakes.FakeModel('rtr_id', floating_ips=[fip])
|
|
||||||
|
|
||||||
result = conf_mod.generate_floating_config(rtr)
|
|
||||||
expected = [{'floating_ip': '9.9.9.9', 'fixed_ip': '192.168.1.1'}]
|
|
||||||
|
|
||||||
self.assertEqual(expected, result)
|
|
||||||
|
|
||||||
|
|
||||||
class TestAstaraClientGateway(unittest.TestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
cfg.CONF.set_override('provider_rules_path', '/the/path')
|
|
||||||
# Sample data taken from a real devstack-created system, with
|
|
||||||
# the external MAC address modified to match the fake port in
|
|
||||||
# use for the mocked router.
|
|
||||||
self.networks = [
|
|
||||||
{'subnets': [
|
|
||||||
{'host_routes': [],
|
|
||||||
'cidr': '172.16.77.0/24',
|
|
||||||
'gateway_ip': '172.16.77.1',
|
|
||||||
'dns_nameservers': [],
|
|
||||||
'dhcp_enabled': True,
|
|
||||||
'network_type': 'external'},
|
|
||||||
{'host_routes': [],
|
|
||||||
'cidr': 'fdee:9f85:83be::/48',
|
|
||||||
'gateway_ip': 'fdee:9f85:83be::1',
|
|
||||||
'dns_nameservers': [],
|
|
||||||
'dhcp_enabled': True}],
|
|
||||||
'v6_conf_service': 'static',
|
|
||||||
'network_id': u'1e109e80-4a6a-483e-9dd4-2ff31adf25f5',
|
|
||||||
'allocations': [],
|
|
||||||
'interface': {'ifname': u'ge1',
|
|
||||||
'addresses': [
|
|
||||||
'172.16.77.2/24',
|
|
||||||
'fdee:9f85:83be:0:f816:3eff:fee5:1742/48',
|
|
||||||
]},
|
|
||||||
'v4_conf_service': 'static',
|
|
||||||
'network_type': 'external'},
|
|
||||||
{'subnets': [],
|
|
||||||
'v6_conf_service': 'static',
|
|
||||||
'network_id': u'698ef1d1-1089-48ab-80b0-f994a962891c',
|
|
||||||
'allocations': [],
|
|
||||||
'interface': {
|
|
||||||
u'addresses': [
|
|
||||||
u'fe80::f816:3eff:fe4d:bf12/64',
|
|
||||||
u'fdca:3ba5:a17a:acda:f816:3eff:fe4d:bf12/64',
|
|
||||||
],
|
|
||||||
u'media': u'Ethernet autoselect',
|
|
||||||
u'lladdr': u'fa:16:3e:4d:bf:12',
|
|
||||||
u'state': u'up',
|
|
||||||
u'groups': [],
|
|
||||||
u'ifname': u'ge0',
|
|
||||||
u'mtu': 1500,
|
|
||||||
u'description': u''},
|
|
||||||
'v4_conf_service': 'static',
|
|
||||||
'network_type': 'management'},
|
|
||||||
{'subnets': [
|
|
||||||
{'host_routes': [],
|
|
||||||
'cidr': 'fdd6:a1fa:cfa8:6c94::/64',
|
|
||||||
'gateway_ip': 'fdd6:a1fa:cfa8:6c94::1',
|
|
||||||
'dns_nameservers': [],
|
|
||||||
'dhcp_enabled': False},
|
|
||||||
{'host_routes': [],
|
|
||||||
'cidr': '192.168.0.0/24',
|
|
||||||
'gateway_ip': '192.168.0.1',
|
|
||||||
'dns_nameservers': [],
|
|
||||||
'dhcp_enabled': True}],
|
|
||||||
'v6_conf_service': 'static',
|
|
||||||
'network_id': u'a1ea2256-5e57-4e9e-8b7a-8bf17eb76b73',
|
|
||||||
'allocations': [
|
|
||||||
{'mac_address': u'fa:16:3e:1b:93:76',
|
|
||||||
'ip_addresses': {
|
|
||||||
'fdd6:a1fa:cfa8:6c94::1': False,
|
|
||||||
'192.168.0.1': True},
|
|
||||||
'hostname': '192-168-0-1.local',
|
|
||||||
'device_id': u'c72a34fb-fb56-4ee7-b9b2-6467eb1c45d6'}],
|
|
||||||
'interface': {'ifname': u'ge2',
|
|
||||||
'addresses': ['192.168.0.1/24',
|
|
||||||
'fdd6:a1fa:cfa8:6c94::1/64']},
|
|
||||||
'v4_conf_service': 'static',
|
|
||||||
'network_type': 'internal'}]
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
cfg.CONF.reset()
|
|
||||||
|
|
||||||
def test_with_interfaces(self):
|
|
||||||
mock_client = mock.Mock()
|
|
||||||
result = conf_mod.get_default_v4_gateway(
|
|
||||||
mock_client,
|
|
||||||
fakes.fake_router,
|
|
||||||
self.networks,
|
|
||||||
)
|
|
||||||
self.assertEqual('172.16.77.1', result)
|
|
||||||
|
|
||||||
def test_without_ipv4_on_external_port(self):
|
|
||||||
# Only set a V6 address
|
|
||||||
self.networks[0]['interface']['addresses'] = [
|
|
||||||
'fdee:9f85:83be:0:f816:3eff:fee5:1742/48',
|
|
||||||
]
|
|
||||||
mock_client = mock.Mock()
|
|
||||||
result = conf_mod.get_default_v4_gateway(
|
|
||||||
mock_client,
|
|
||||||
fakes.fake_router,
|
|
||||||
self.networks,
|
|
||||||
)
|
|
||||||
self.assertEqual('', result)
|
|
||||||
|
|
||||||
def test_extra_ipv4_on_external_port(self):
|
|
||||||
self.networks[0]['interface']['addresses'] = [
|
|
||||||
u'fe80::f816:3eff:fe4d:bf12/64',
|
|
||||||
u'fdca:3ba5:a17a:acda:f816:3eff:fe4d:bf12/64',
|
|
||||||
u'192.168.1.1',
|
|
||||||
u'172.16.77.2',
|
|
||||||
]
|
|
||||||
mock_client = mock.Mock()
|
|
||||||
result = conf_mod.get_default_v4_gateway(
|
|
||||||
mock_client,
|
|
||||||
fakes.fake_router,
|
|
||||||
self.networks,
|
|
||||||
)
|
|
||||||
self.assertEqual('172.16.77.1', result)
|
|
@ -1,140 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
import mock
|
|
||||||
import unittest2 as unittest
|
|
||||||
|
|
||||||
from astara.api import astara_client
|
|
||||||
|
|
||||||
|
|
||||||
class TestAstaraClient(unittest.TestCase):
|
|
||||||
def setUp(self):
|
|
||||||
self.mock_create_session = mock.patch.object(
|
|
||||||
astara_client,
|
|
||||||
'_get_proxyless_session'
|
|
||||||
).start()
|
|
||||||
self.mock_get = self.mock_create_session.return_value.get
|
|
||||||
self.mock_put = self.mock_create_session.return_value.put
|
|
||||||
self.mock_post = self.mock_create_session.return_value.post
|
|
||||||
|
|
||||||
self.addCleanup(mock.patch.stopall)
|
|
||||||
|
|
||||||
def test_mgt_url(self):
|
|
||||||
self.assertEqual('http://[fe80::2]:5000/',
|
|
||||||
astara_client._mgt_url('fe80::2', 5000, '/'))
|
|
||||||
self.assertEqual('http://192.168.1.1:5000/',
|
|
||||||
astara_client._mgt_url('192.168.1.1', 5000, '/'))
|
|
||||||
|
|
||||||
def test_is_alive_success(self):
|
|
||||||
self.mock_get.return_value.status_code = 200
|
|
||||||
|
|
||||||
self.assertTrue(astara_client.is_alive('fe80::2', 5000))
|
|
||||||
self.mock_get.assert_called_once_with(
|
|
||||||
'http://[fe80::2]:5000/v1/firewall/rules',
|
|
||||||
timeout=3.0
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_is_alive_bad_status(self):
|
|
||||||
self.mock_get.return_value.status_code = 500
|
|
||||||
|
|
||||||
self.assertFalse(astara_client.is_alive('fe80::2', 5000))
|
|
||||||
self.mock_get.assert_called_once_with(
|
|
||||||
'http://[fe80::2]:5000/v1/firewall/rules',
|
|
||||||
timeout=3.0
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_is_alive_exception(self):
|
|
||||||
self.mock_get.side_effect = Exception
|
|
||||||
|
|
||||||
self.assertFalse(astara_client.is_alive('fe80::2', 5000))
|
|
||||||
self.mock_get.assert_called_once_with(
|
|
||||||
'http://[fe80::2]:5000/v1/firewall/rules',
|
|
||||||
timeout=3.0
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_get_interfaces(self):
|
|
||||||
self.mock_get.return_value.status_code = 200
|
|
||||||
self.mock_get.return_value.json.return_value = {
|
|
||||||
'interfaces': 'the_interfaces'
|
|
||||||
}
|
|
||||||
|
|
||||||
self.assertEqual('the_interfaces',
|
|
||||||
astara_client.get_interfaces('fe80::2', 5000))
|
|
||||||
self.mock_get.assert_called_once_with(
|
|
||||||
'http://[fe80::2]:5000/v1/system/interfaces',
|
|
||||||
timeout=30
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_update_config(self):
|
|
||||||
config = {'foo': 'bar'}
|
|
||||||
self.mock_put.return_value.status_code = 200
|
|
||||||
self.mock_put.return_value.json.return_value = config
|
|
||||||
|
|
||||||
resp = astara_client.update_config('fe80::2', 5000, config)
|
|
||||||
|
|
||||||
self.mock_put.assert_called_once_with(
|
|
||||||
'http://[fe80::2]:5000/v1/system/config',
|
|
||||||
data=b'{"foo": "bar"}',
|
|
||||||
headers={'Content-type': 'application/json'},
|
|
||||||
timeout=90)
|
|
||||||
self.assertEqual(config, resp)
|
|
||||||
|
|
||||||
def test_update_config_with_custom_config(self):
|
|
||||||
config = {'foo': 'bar'}
|
|
||||||
self.mock_put.return_value.status_code = 200
|
|
||||||
self.mock_put.return_value.json.return_value = config
|
|
||||||
|
|
||||||
with mock.patch.object(astara_client.cfg, 'CONF') as cfg:
|
|
||||||
cfg.config_timeout = 5
|
|
||||||
resp = astara_client.update_config('fe80::2', 5000, config)
|
|
||||||
|
|
||||||
self.mock_put.assert_called_once_with(
|
|
||||||
'http://[fe80::2]:5000/v1/system/config',
|
|
||||||
data=b'{"foo": "bar"}',
|
|
||||||
headers={'Content-type': 'application/json'},
|
|
||||||
timeout=5)
|
|
||||||
self.assertEqual(config, resp)
|
|
||||||
|
|
||||||
def test_update_config_failure(self):
|
|
||||||
config = {'foo': 'bar'}
|
|
||||||
|
|
||||||
self.mock_put.return_value.status_code = 500
|
|
||||||
self.mock_put.return_value.text = 'error_text'
|
|
||||||
|
|
||||||
with self.assertRaises(Exception):
|
|
||||||
astara_client.update_config('fe80::2', 5000, config)
|
|
||||||
|
|
||||||
self.mock_put.assert_called_once_with(
|
|
||||||
'http://[fe80::2]:5000/v1/system/config',
|
|
||||||
data=b'{"foo": "bar"}',
|
|
||||||
headers={'Content-type': 'application/json'},
|
|
||||||
timeout=90
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_read_labels(self):
|
|
||||||
self.mock_post.return_value.status_code = 200
|
|
||||||
self.mock_post.return_value.json.return_value = {
|
|
||||||
'labels': ['label1', 'label2']
|
|
||||||
}
|
|
||||||
resp = astara_client.read_labels('fe80::2', 5000)
|
|
||||||
|
|
||||||
self.mock_post.assert_called_once_with(
|
|
||||||
'http://[fe80::2]:5000/v1/firewall/labels',
|
|
||||||
timeout=30
|
|
||||||
)
|
|
||||||
|
|
||||||
self.assertEqual(['label1', 'label2'], resp)
|
|
@ -1,38 +0,0 @@
|
|||||||
# Copyright (c) 2015 Akanda, Inc. All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import mock
|
|
||||||
from astara.api import keystone
|
|
||||||
|
|
||||||
from astara.test.unit import base
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
|
|
||||||
|
|
||||||
class KeystoneTest(base.RugTestBase):
|
|
||||||
def setUp(self):
|
|
||||||
super(KeystoneTest, self).setUp()
|
|
||||||
self.config(auth_region='foo_regin')
|
|
||||||
|
|
||||||
@mock.patch('keystoneclient.session.Session')
|
|
||||||
@mock.patch('keystoneclient.auth.load_from_conf_options')
|
|
||||||
def test_session(self, mock_load_auth, mock_session):
|
|
||||||
fake_auth = mock.Mock()
|
|
||||||
mock_load_auth.return_value = fake_auth
|
|
||||||
fake_session = mock.Mock()
|
|
||||||
mock_session.return_value = fake_session
|
|
||||||
ks_session = keystone.KeystoneSession().session
|
|
||||||
mock_load_auth.assert_called_with(cfg.CONF, 'keystone_authtoken')
|
|
||||||
mock_session.assert_called_with(auth=fake_auth)
|
|
||||||
self.assertEqual(fake_session, ks_session)
|
|
@ -1,518 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import six
|
|
||||||
import mock
|
|
||||||
import netaddr
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
|
|
||||||
from astara.test.unit import base, fakes
|
|
||||||
from astara.api import neutron
|
|
||||||
|
|
||||||
|
|
||||||
class TestuNeutronModels(base.RugTestBase):
|
|
||||||
def test_router(self):
|
|
||||||
r = neutron.Router(
|
|
||||||
'1', 'tenant_id', 'name', True, 'ACTIVE', 'ext', ['int'], ['fip'])
|
|
||||||
self.assertEqual('1', r.id)
|
|
||||||
self.assertEqual('tenant_id', r.tenant_id)
|
|
||||||
self.assertEqual('name', r.name)
|
|
||||||
self.assertTrue(r.admin_state_up)
|
|
||||||
self.assertEqual('ACTIVE', r.status)
|
|
||||||
self.assertEqual('ext', r.external_port)
|
|
||||||
self.assertEqual(['fip'], r.floating_ips)
|
|
||||||
self.assertEqual(['int'], r.internal_ports)
|
|
||||||
self.assertEqual(set(['ext', 'int']), set(r.ports))
|
|
||||||
|
|
||||||
def test_router_from_dict(self):
|
|
||||||
p = {
|
|
||||||
'name': 'ext',
|
|
||||||
'id': 'ext',
|
|
||||||
'device_id': 'device_id',
|
|
||||||
'fixed_ips': [],
|
|
||||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
|
||||||
'network_id': 'net_id',
|
|
||||||
'device_owner': 'network:router_gateway'
|
|
||||||
}
|
|
||||||
|
|
||||||
int_p = {
|
|
||||||
'name': 'int',
|
|
||||||
'id': 'int',
|
|
||||||
'device_id': 'device_id',
|
|
||||||
'fixed_ips': [],
|
|
||||||
'mac_address': 'aa:bb:cc:dd:ee:ee',
|
|
||||||
'network_id': 'net_id',
|
|
||||||
'device_owner': 'network:router_interface'
|
|
||||||
}
|
|
||||||
|
|
||||||
int_ha_p = {
|
|
||||||
'name': 'ha_int',
|
|
||||||
'id': 'ha_int',
|
|
||||||
'device_id': 'device_id',
|
|
||||||
'fixed_ips': [],
|
|
||||||
'mac_address': 'aa:bb:cc:dd:ee:ee',
|
|
||||||
'network_id': 'net_id',
|
|
||||||
'device_owner': 'network:ha_router_replicated_interface'
|
|
||||||
}
|
|
||||||
|
|
||||||
fip = {
|
|
||||||
'id': 'fip',
|
|
||||||
'floating_ip_address': '9.9.9.9',
|
|
||||||
'fixed_ip_address': '192.168.1.1'
|
|
||||||
}
|
|
||||||
|
|
||||||
d = {
|
|
||||||
'id': '1',
|
|
||||||
'tenant_id': 'tenant_id',
|
|
||||||
'name': 'name',
|
|
||||||
'admin_state_up': True,
|
|
||||||
'status': 'ACTIVE',
|
|
||||||
'ports': [p],
|
|
||||||
'_floatingips': [fip],
|
|
||||||
'_interfaces': [int_p, int_ha_p],
|
|
||||||
}
|
|
||||||
|
|
||||||
r = neutron.Router.from_dict(d)
|
|
||||||
|
|
||||||
self.assertEqual('1', r.id)
|
|
||||||
self.assertEqual('tenant_id', r.tenant_id)
|
|
||||||
self.assertEqual('name', r.name)
|
|
||||||
self.assertTrue(r.admin_state_up)
|
|
||||||
self.assertTrue(r.floating_ips) # just make sure this exists
|
|
||||||
self.assertEqual(
|
|
||||||
['ha_int', 'int'],
|
|
||||||
sorted([ip.id for ip in r.internal_ports]))
|
|
||||||
|
|
||||||
def test_router_eq(self):
|
|
||||||
r1 = neutron.Router(
|
|
||||||
'1', 'tenant_id', 'name', True, 'ext', ['int'], 'mgt')
|
|
||||||
r2 = neutron.Router(
|
|
||||||
'1', 'tenant_id', 'name', True, 'ext', ['int'], 'mgt')
|
|
||||||
|
|
||||||
self.assertEqual(r1, r2)
|
|
||||||
|
|
||||||
def test_router_ne(self):
|
|
||||||
r1 = neutron.Router(
|
|
||||||
'1', 'tenant_id', 'name', True, 'ext', ['int'], 'mgt')
|
|
||||||
r2 = neutron.Router(
|
|
||||||
'2', 'tenant_id', 'name', True, 'ext', ['int'], 'mgt')
|
|
||||||
|
|
||||||
self.assertNotEqual(r1, r2)
|
|
||||||
|
|
||||||
def test_subnet_model(self):
|
|
||||||
d = {
|
|
||||||
'id': '1',
|
|
||||||
'tenant_id': 'tenant_id',
|
|
||||||
'name': 'name',
|
|
||||||
'network_id': 'network_id',
|
|
||||||
'ip_version': 6,
|
|
||||||
'cidr': 'fe80::/64',
|
|
||||||
'gateway_ip': 'fe80::1',
|
|
||||||
'enable_dhcp': True,
|
|
||||||
'dns_nameservers': ['8.8.8.8', '8.8.4.4'],
|
|
||||||
'ipv6_ra_mode': 'slaac',
|
|
||||||
'host_routes': []
|
|
||||||
}
|
|
||||||
|
|
||||||
s = neutron.Subnet.from_dict(d)
|
|
||||||
|
|
||||||
self.assertEqual('1', s.id)
|
|
||||||
self.assertEqual('tenant_id', s.tenant_id)
|
|
||||||
self.assertEqual('name', s.name)
|
|
||||||
self.assertEqual('network_id', s.network_id)
|
|
||||||
self.assertEqual(6, s.ip_version)
|
|
||||||
self.assertEqual(netaddr.IPNetwork('fe80::/64'), s.cidr)
|
|
||||||
self.assertEqual(netaddr.IPAddress('fe80::1'), s.gateway_ip)
|
|
||||||
self.assertTrue(s.enable_dhcp, True)
|
|
||||||
self.assertEqual(['8.8.8.8', '8.8.4.4'], s.dns_nameservers)
|
|
||||||
self.assertEqual([], s.host_routes)
|
|
||||||
|
|
||||||
def test_subnet_gateway_none(self):
|
|
||||||
d = {
|
|
||||||
'id': '1',
|
|
||||||
'tenant_id': 'tenant_id',
|
|
||||||
'name': 'name',
|
|
||||||
'network_id': 'network_id',
|
|
||||||
'ip_version': 6,
|
|
||||||
'cidr': 'fe80::/64',
|
|
||||||
'gateway_ip': None,
|
|
||||||
'enable_dhcp': True,
|
|
||||||
'dns_nameservers': ['8.8.8.8', '8.8.4.4'],
|
|
||||||
'ipv6_ra_mode': 'slaac',
|
|
||||||
'host_routes': []
|
|
||||||
}
|
|
||||||
s = neutron.Subnet.from_dict(d)
|
|
||||||
self.assertEqual(netaddr.IPNetwork('fe80::/64'), s.cidr)
|
|
||||||
self.assertIsNone(s.gateway_ip)
|
|
||||||
|
|
||||||
def test_subnet_gateway_not_ip(self):
|
|
||||||
d = {
|
|
||||||
'id': '1',
|
|
||||||
'tenant_id': 'tenant_id',
|
|
||||||
'name': 'name',
|
|
||||||
'network_id': 'network_id',
|
|
||||||
'ip_version': 6,
|
|
||||||
'cidr': 'fe80::/64',
|
|
||||||
'gateway_ip': 'something-that-is-not-an-ip',
|
|
||||||
'enable_dhcp': True,
|
|
||||||
'dns_nameservers': ['8.8.8.8', '8.8.4.4'],
|
|
||||||
'ipv6_ra_mode': 'slaac',
|
|
||||||
'host_routes': []
|
|
||||||
}
|
|
||||||
s = neutron.Subnet.from_dict(d)
|
|
||||||
self.assertEqual(netaddr.IPNetwork('fe80::/64'), s.cidr)
|
|
||||||
self.assertIsNone(s.gateway_ip)
|
|
||||||
|
|
||||||
def test_subnet_cidr_none(self):
|
|
||||||
d = {
|
|
||||||
'id': '1',
|
|
||||||
'tenant_id': 'tenant_id',
|
|
||||||
'name': 'name',
|
|
||||||
'network_id': 'network_id',
|
|
||||||
'ip_version': 6,
|
|
||||||
'cidr': None,
|
|
||||||
'gateway_ip': 'fe80::1',
|
|
||||||
'enable_dhcp': True,
|
|
||||||
'dns_nameservers': ['8.8.8.8', '8.8.4.4'],
|
|
||||||
'ipv6_ra_mode': 'slaac',
|
|
||||||
'host_routes': []
|
|
||||||
}
|
|
||||||
try:
|
|
||||||
neutron.Subnet.from_dict(d)
|
|
||||||
except ValueError as e:
|
|
||||||
self.assertIn('Invalid CIDR', six.text_type(e))
|
|
||||||
|
|
||||||
def test_subnet_cidr_not_valid(self):
|
|
||||||
d = {
|
|
||||||
'id': '1',
|
|
||||||
'tenant_id': 'tenant_id',
|
|
||||||
'name': 'name',
|
|
||||||
'network_id': 'network_id',
|
|
||||||
'ip_version': 6,
|
|
||||||
'cidr': 'something-that-is-not-an-ip',
|
|
||||||
'gateway_ip': 'fe80::1',
|
|
||||||
'enable_dhcp': True,
|
|
||||||
'dns_nameservers': ['8.8.8.8', '8.8.4.4'],
|
|
||||||
'ipv6_ra_mode': 'slaac',
|
|
||||||
'host_routes': []
|
|
||||||
}
|
|
||||||
try:
|
|
||||||
neutron.Subnet.from_dict(d)
|
|
||||||
except ValueError as e:
|
|
||||||
self.assertIn('Invalid CIDR', six.text_type(e))
|
|
||||||
|
|
||||||
def test_port_model(self):
|
|
||||||
d = {
|
|
||||||
'id': '1',
|
|
||||||
'name': 'name',
|
|
||||||
'device_id': 'device_id',
|
|
||||||
'fixed_ips': [{'ip_address': '192.168.1.1', 'subnet_id': 'sub1'}],
|
|
||||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
|
||||||
'network_id': 'net_id',
|
|
||||||
'device_owner': 'test'
|
|
||||||
}
|
|
||||||
|
|
||||||
p = neutron.Port.from_dict(d)
|
|
||||||
|
|
||||||
self.assertEqual('1', p.id)
|
|
||||||
self.assertEqual('device_id', p.device_id)
|
|
||||||
self.assertEqual('aa:bb:cc:dd:ee:ff', p.mac_address)
|
|
||||||
self.assertEqual('test', p.device_owner)
|
|
||||||
self.assertEqual(1, len(p.fixed_ips))
|
|
||||||
|
|
||||||
def test_fixed_ip_model(self):
|
|
||||||
d = {
|
|
||||||
'subnet_id': 'sub1',
|
|
||||||
'ip_address': '192.168.1.1'
|
|
||||||
}
|
|
||||||
|
|
||||||
fip = neutron.FixedIp.from_dict(d)
|
|
||||||
|
|
||||||
self.assertEqual('sub1', fip.subnet_id)
|
|
||||||
self.assertEqual(netaddr.IPAddress('192.168.1.1'), fip.ip_address)
|
|
||||||
|
|
||||||
def test_floating_ip_model(self):
|
|
||||||
d = {
|
|
||||||
'id': 'a-b-c-d',
|
|
||||||
'floating_ip_address': '9.9.9.9',
|
|
||||||
'fixed_ip_address': '192.168.1.1'
|
|
||||||
}
|
|
||||||
|
|
||||||
fip = neutron.FloatingIP.from_dict(d)
|
|
||||||
|
|
||||||
self.assertEqual('a-b-c-d', fip.id)
|
|
||||||
self.assertEqual(netaddr.IPAddress('9.9.9.9'), fip.floating_ip)
|
|
||||||
self.assertEqual(netaddr.IPAddress('192.168.1.1'), fip.fixed_ip)
|
|
||||||
|
|
||||||
|
|
||||||
class FakeConf:
|
|
||||||
admin_user = 'admin'
|
|
||||||
admin_password = 'password'
|
|
||||||
admin_tenant_name = 'admin'
|
|
||||||
auth_url = 'http://127.0.0.1/'
|
|
||||||
auth_strategy = 'keystone'
|
|
||||||
auth_region = 'RegionOne'
|
|
||||||
|
|
||||||
|
|
||||||
class TestNeutronWrapper(base.RugTestBase):
|
|
||||||
@mock.patch('astara.api.neutron.cfg')
|
|
||||||
@mock.patch('astara.api.neutron.AstaraExtClientWrapper')
|
|
||||||
@mock.patch('astara.api.neutron.importutils')
|
|
||||||
def test_purge_management_interface(self, import_utils, ak_wrapper, cfg):
|
|
||||||
conf = mock.Mock()
|
|
||||||
driver = mock.Mock()
|
|
||||||
import_utils.import_object.return_value = driver
|
|
||||||
|
|
||||||
neutron_wrapper = neutron.Neutron(conf)
|
|
||||||
neutron_wrapper.purge_management_interface()
|
|
||||||
self.assertEqual(1, driver.get_device_name.call_count)
|
|
||||||
self.assertEqual(1, driver.unplug.call_count)
|
|
||||||
|
|
||||||
def test_clear_device_id(self):
|
|
||||||
neutron_wrapper = neutron.Neutron(mock.Mock())
|
|
||||||
neutron_wrapper.api_client.update_port = mock.Mock()
|
|
||||||
neutron_wrapper.clear_device_id(mock.Mock(id='PORT1'))
|
|
||||||
neutron_wrapper.api_client.update_port.assert_called_once_with(
|
|
||||||
'PORT1', {'port': {'device_id': ''}}
|
|
||||||
)
|
|
||||||
|
|
||||||
@mock.patch('astara.api.neutron.AstaraExtClientWrapper')
|
|
||||||
def test_neutron_router_status_update_error(self, client_wrapper):
|
|
||||||
urs = client_wrapper.return_value.update_status
|
|
||||||
urs.side_effect = RuntimeError('should be caught')
|
|
||||||
conf = mock.Mock()
|
|
||||||
neutron_wrapper = neutron.Neutron(conf)
|
|
||||||
neutron_wrapper.update_router_status('router-id', 'new-status')
|
|
||||||
|
|
||||||
@mock.patch('astara.api.neutron.AstaraExtClientWrapper')
|
|
||||||
def _test_create_vrrp_port_success_hlpr(self, ext_enabled, client_wrapper):
|
|
||||||
conf = mock.Mock()
|
|
||||||
conf.neutron_port_security_extension_enabled = ext_enabled
|
|
||||||
|
|
||||||
expected_port_data = {
|
|
||||||
'port': {
|
|
||||||
'name': 'ASTARA:VRRP:obj_id',
|
|
||||||
'admin_state_up': True,
|
|
||||||
'network_id': 'the_net_id',
|
|
||||||
'fixed_ips': [],
|
|
||||||
'security_groups': []
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ext_enabled:
|
|
||||||
expected_port_data['port']['port_security_enabled'] = False
|
|
||||||
|
|
||||||
neutron_wrapper = neutron.Neutron(conf)
|
|
||||||
api_client = neutron_wrapper.api_client
|
|
||||||
with mock.patch.object(api_client, 'create_port') as create_port:
|
|
||||||
with mock.patch.object(neutron.Port, 'from_dict') as port_from_d:
|
|
||||||
retval = neutron_wrapper.create_vrrp_port(
|
|
||||||
'obj_id',
|
|
||||||
'the_net_id'
|
|
||||||
)
|
|
||||||
|
|
||||||
self.assertIs(retval, port_from_d.return_value)
|
|
||||||
port_from_d.assert_called_once_with(
|
|
||||||
create_port.return_value.get()
|
|
||||||
)
|
|
||||||
create_port.assert_called_once_with(
|
|
||||||
expected_port_data
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_create_vrrp_port_success(self):
|
|
||||||
self._test_create_vrrp_port_success_hlpr(True)
|
|
||||||
|
|
||||||
def test_create_vrrp_port_success_port_security_disabled(self):
|
|
||||||
self._test_create_vrrp_port_success_hlpr(False)
|
|
||||||
|
|
||||||
@mock.patch('astara.api.neutron.AstaraExtClientWrapper')
|
|
||||||
def test_create_vrrp_port_error(self, client_wrapper):
|
|
||||||
neutron_wrapper = neutron.Neutron(mock.Mock())
|
|
||||||
api_client = neutron_wrapper.api_client
|
|
||||||
with mock.patch.object(api_client, 'create_port') as create_port:
|
|
||||||
create_port.return_value.get.return_value = None
|
|
||||||
self.assertRaises(
|
|
||||||
ValueError,
|
|
||||||
neutron_wrapper.create_vrrp_port,
|
|
||||||
'obj_id',
|
|
||||||
'the_net_id'
|
|
||||||
)
|
|
||||||
|
|
||||||
@mock.patch('astara.api.neutron.AstaraExtClientWrapper')
|
|
||||||
def test_delete_vrrp_ports(self, client_wrapper):
|
|
||||||
conf = mock.Mock()
|
|
||||||
neutron_wrapper = neutron.Neutron(conf)
|
|
||||||
neutron_wrapper.api_client.list_ports = mock.Mock(
|
|
||||||
return_value={
|
|
||||||
'ports': [{'id': 'fake_port_id'}]
|
|
||||||
}
|
|
||||||
)
|
|
||||||
neutron_wrapper.api_client.delete_port = mock.Mock()
|
|
||||||
neutron_wrapper.delete_vrrp_port(object_id='foo')
|
|
||||||
neutron_wrapper.api_client.list_ports.assert_called_with(
|
|
||||||
name='ASTARA:VRRP:foo'
|
|
||||||
)
|
|
||||||
neutron_wrapper.api_client.delete_port.assert_called_with(
|
|
||||||
'fake_port_id')
|
|
||||||
|
|
||||||
@mock.patch('astara.api.neutron.AstaraExtClientWrapper')
|
|
||||||
def test_delete_vrrp_ports_not_found(self, client_wrapper):
|
|
||||||
conf = mock.Mock()
|
|
||||||
neutron_wrapper = neutron.Neutron(conf)
|
|
||||||
neutron_wrapper.api_client.list_ports = mock.Mock(
|
|
||||||
return_value={'ports': []}
|
|
||||||
)
|
|
||||||
neutron_wrapper.api_client.delete_port = mock.Mock()
|
|
||||||
neutron_wrapper.delete_vrrp_port(object_id='foo')
|
|
||||||
neutron_wrapper.api_client.list_ports.assert_has_calls(
|
|
||||||
[
|
|
||||||
mock.call(name='ASTARA:VRRP:foo'),
|
|
||||||
mock.call(name='AKANDA:VRRP:foo'),
|
|
||||||
]
|
|
||||||
)
|
|
||||||
self.assertFalse(neutron_wrapper.api_client.delete_port.called)
|
|
||||||
|
|
||||||
|
|
||||||
class TestLocalServicePorts(base.RugTestBase):
|
|
||||||
def setUp(self):
|
|
||||||
super(TestLocalServicePorts, self).setUp()
|
|
||||||
self.config(management_network_id='fake_mgtnet_network_id')
|
|
||||||
self.config(management_subnet_id='fake_mgtnet_subnet_id')
|
|
||||||
self.config(management_prefix='172.16.77.0/24')
|
|
||||||
self.config(management_prefix='fdca:3ba5:a17a:acda::/64')
|
|
||||||
self.neutron_wrapper = neutron.Neutron(cfg.CONF)
|
|
||||||
self.fake_interface_driver = mock.Mock(
|
|
||||||
plug=mock.Mock(),
|
|
||||||
init_l3=mock.Mock(),
|
|
||||||
get_device_name=mock.Mock())
|
|
||||||
|
|
||||||
def test_ensure_local_service_port(self):
|
|
||||||
with mock.patch.object(self.neutron_wrapper,
|
|
||||||
'_ensure_local_port') as ep:
|
|
||||||
self.neutron_wrapper.ensure_local_service_port()
|
|
||||||
ep.assert_called_with(
|
|
||||||
'fake_mgtnet_network_id',
|
|
||||||
'fake_mgtnet_subnet_id',
|
|
||||||
'fdca:3ba5:a17a:acda::/64',
|
|
||||||
'service',
|
|
||||||
)
|
|
||||||
|
|
||||||
@mock.patch('astara.api.neutron.ip_lib')
|
|
||||||
@mock.patch('astara.api.neutron.uuid')
|
|
||||||
@mock.patch('astara.api.neutron.importutils')
|
|
||||||
def test__ensure_local_port_neutron_port_exists(self, fake_import,
|
|
||||||
fake_uuid, fake_ip_lib):
|
|
||||||
fake_ip_lib.device_exists.return_value = True
|
|
||||||
fake_uuid.uuid5.return_value = 'fake_host_id'
|
|
||||||
fake_import.import_object.return_value = self.fake_interface_driver
|
|
||||||
|
|
||||||
fake_port = fakes.fake_port()
|
|
||||||
fake_port_dict = {
|
|
||||||
'ports': [fake_port._neutron_port_dict],
|
|
||||||
}
|
|
||||||
fake_client = mock.Mock(
|
|
||||||
list_ports=mock.Mock(return_value=fake_port_dict)
|
|
||||||
)
|
|
||||||
self.neutron_wrapper.api_client = fake_client
|
|
||||||
self.fake_interface_driver.get_device_name.return_value = 'fake_dev'
|
|
||||||
|
|
||||||
self.neutron_wrapper._ensure_local_port(
|
|
||||||
'fake_network_id',
|
|
||||||
'fake_subnet_id',
|
|
||||||
'fdca:3ba5:a17a:acda:f816:3eff:fe2b::1/64',
|
|
||||||
'service')
|
|
||||||
|
|
||||||
exp_query = {
|
|
||||||
'network_id': 'fake_network_id',
|
|
||||||
'device_owner': 'network:astara',
|
|
||||||
'name': 'ASTARA:RUG:SERVICE',
|
|
||||||
'device_id': 'fake_host_id'
|
|
||||||
}
|
|
||||||
fake_client.list_ports.assert_called_with(**exp_query)
|
|
||||||
self.fake_interface_driver.init_l3.assert_called_with(
|
|
||||||
'fake_dev', ['fdca:3ba5:a17a:acda:f816:3eff:fe2b:ced0/64']
|
|
||||||
)
|
|
||||||
|
|
||||||
@mock.patch('astara.api.neutron.socket')
|
|
||||||
@mock.patch('astara.api.neutron.ip_lib')
|
|
||||||
@mock.patch('astara.api.neutron.uuid')
|
|
||||||
@mock.patch('astara.api.neutron.importutils')
|
|
||||||
def test__ensure_local_port_no_neutron_port(self, fake_import, fake_uuid,
|
|
||||||
fake_ip_lib, fake_socket):
|
|
||||||
fake_socket.gethostname.return_value = 'foo_hostname'
|
|
||||||
fake_ip_lib.device_exists.return_value = True
|
|
||||||
fake_uuid.uuid5.return_value = 'fake_host_id'
|
|
||||||
fake_import.import_object.return_value = self.fake_interface_driver
|
|
||||||
|
|
||||||
fake_created_port = {'port': fakes.fake_port().to_dict()}
|
|
||||||
fake_client = mock.Mock(
|
|
||||||
list_ports=mock.Mock(return_value={'ports': []}),
|
|
||||||
create_port=mock.Mock(return_value=fake_created_port))
|
|
||||||
self.neutron_wrapper.api_client = fake_client
|
|
||||||
self.fake_interface_driver.get_device_name.return_value = 'fake_dev'
|
|
||||||
|
|
||||||
self.neutron_wrapper._ensure_local_port(
|
|
||||||
'fake_network_id',
|
|
||||||
'fake_subnet_id',
|
|
||||||
'fdca:3ba5:a17a:acda:f816:3eff:fe2b::1/64',
|
|
||||||
'service')
|
|
||||||
|
|
||||||
exp_port_create_dict = {'port': {
|
|
||||||
'admin_state_up': True,
|
|
||||||
'binding:host_id': 'foo_hostname',
|
|
||||||
'device_id': 'fake_host_id',
|
|
||||||
'device_owner': 'network:router_interface',
|
|
||||||
'fixed_ips': [{'subnet_id': 'fake_subnet_id'}],
|
|
||||||
'name': 'ASTARA:RUG:SERVICE',
|
|
||||||
'network_id': 'fake_network_id'
|
|
||||||
}}
|
|
||||||
fake_client.create_port.assert_called_with(exp_port_create_dict)
|
|
||||||
self.fake_interface_driver.init_l3.assert_called_with(
|
|
||||||
'fake_dev', ['fdca:3ba5:a17a:acda:f816:3eff:fe2b:ced0/64']
|
|
||||||
)
|
|
||||||
|
|
||||||
@mock.patch('time.sleep')
|
|
||||||
@mock.patch('astara.api.neutron.ip_lib')
|
|
||||||
@mock.patch('astara.api.neutron.uuid')
|
|
||||||
@mock.patch('astara.api.neutron.importutils')
|
|
||||||
def test__ensure_local_port_plug(self, fake_import,
|
|
||||||
fake_uuid, fake_ip_lib, fake_sleep):
|
|
||||||
fake_ip_lib.device_exists.return_value = False
|
|
||||||
fake_uuid.uuid5.return_value = 'fake_host_id'
|
|
||||||
fake_import.import_object.return_value = self.fake_interface_driver
|
|
||||||
|
|
||||||
fake_port = fakes.fake_port()
|
|
||||||
fake_port_dict = {
|
|
||||||
'ports': [fake_port._neutron_port_dict],
|
|
||||||
}
|
|
||||||
fake_client = mock.Mock(
|
|
||||||
list_ports=mock.Mock(return_value=fake_port_dict)
|
|
||||||
)
|
|
||||||
self.neutron_wrapper.api_client = fake_client
|
|
||||||
self.fake_interface_driver.get_device_name.return_value = 'fake_dev'
|
|
||||||
|
|
||||||
self.neutron_wrapper._ensure_local_port(
|
|
||||||
'fake_network_id',
|
|
||||||
'fake_subnet_id',
|
|
||||||
'fdca:3ba5:a17a:acda:f816:3eff:fe2b::1/64',
|
|
||||||
'service')
|
|
||||||
|
|
||||||
self.fake_interface_driver.plug.assert_called_with(
|
|
||||||
'fake_network_id',
|
|
||||||
fake_port.id,
|
|
||||||
'fake_dev',
|
|
||||||
fake_port.mac_address)
|
|
@ -1,481 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
from datetime import datetime, timedelta
|
|
||||||
|
|
||||||
import mock
|
|
||||||
import copy
|
|
||||||
from novaclient import exceptions as novaclient_exceptions
|
|
||||||
from six.moves import builtins as __builtins__
|
|
||||||
|
|
||||||
from astara.api import nova
|
|
||||||
from astara.test.unit import base
|
|
||||||
|
|
||||||
|
|
||||||
class FakeNovaServer(object):
|
|
||||||
id = '6f05906e-4538-11e5-bb22-5254003ff1ae'
|
|
||||||
name = 'ak-796aafbc-4538-11e5-88e0-5254003ff1ae'
|
|
||||||
image = {'id': '83031410-4538-11e5-abd2-5254003ff1ae'}
|
|
||||||
status = 'ACTIVE'
|
|
||||||
created = '2012-08-20T21:11:09Z'
|
|
||||||
|
|
||||||
|
|
||||||
class FakeModel(object):
|
|
||||||
def __init__(self, id_, **kwargs):
|
|
||||||
self.id = id_
|
|
||||||
self.__dict__.update(kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
fake_ext_port = FakeModel(
|
|
||||||
'1',
|
|
||||||
mac_address='aa:bb:cc:dd:ee:ff',
|
|
||||||
network_id='ext-net',
|
|
||||||
fixed_ips=[FakeModel('', ip_address='9.9.9.9', subnet_id='s2')])
|
|
||||||
|
|
||||||
fake_mgt_port = FakeModel(
|
|
||||||
'2',
|
|
||||||
mac_address='aa:bb:cc:cc:bb:aa',
|
|
||||||
network_id='mgt-net')
|
|
||||||
|
|
||||||
fake_int_port = FakeModel(
|
|
||||||
'3',
|
|
||||||
mac_address='aa:aa:aa:aa:aa:aa',
|
|
||||||
network_id='int-net',
|
|
||||||
fixed_ips=[FakeModel('', ip_address='192.168.1.1', subnet_id='s1')])
|
|
||||||
|
|
||||||
fake_router = FakeModel(
|
|
||||||
'router_id',
|
|
||||||
tenant_id='tenant_id',
|
|
||||||
external_port=fake_ext_port,
|
|
||||||
management_port=fake_mgt_port,
|
|
||||||
internal_ports=[fake_int_port],
|
|
||||||
ports=[fake_mgt_port, fake_ext_port, fake_int_port])
|
|
||||||
|
|
||||||
fake_nova_instance = FakeModel(
|
|
||||||
'instance_id',
|
|
||||||
name='ak-appliance',
|
|
||||||
status=None,
|
|
||||||
image={'id': 'fake_image_uuid'},
|
|
||||||
created='2012-08-20T21:11:09Z'
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class FakeConf:
|
|
||||||
admin_user = 'admin'
|
|
||||||
admin_password = 'password'
|
|
||||||
admin_tenant_name = 'admin'
|
|
||||||
auth_url = 'http://127.0.0.1/'
|
|
||||||
auth_strategy = 'keystone'
|
|
||||||
auth_region = 'RegionOne'
|
|
||||||
router_image_uuid = 'astara-image'
|
|
||||||
router_instance_flavor = 1
|
|
||||||
instance_provider = 'foo'
|
|
||||||
endpoint_type = 'publicURL'
|
|
||||||
|
|
||||||
|
|
||||||
EXPECTED_USERDATA = """
|
|
||||||
#cloud-config
|
|
||||||
|
|
||||||
cloud_config_modules:
|
|
||||||
- emit_upstart
|
|
||||||
- set_hostname
|
|
||||||
- locale
|
|
||||||
- set-passwords
|
|
||||||
- timezone
|
|
||||||
- disable-ec2-metadata
|
|
||||||
- runcmd
|
|
||||||
|
|
||||||
output: {all: '| tee -a /var/log/cloud-init-output.log'}
|
|
||||||
|
|
||||||
debug:
|
|
||||||
- verbose: true
|
|
||||||
|
|
||||||
bootcmd:
|
|
||||||
- /usr/local/bin/astara-configure-management aa:aa:aa:aa:aa:aa 192.168.1.1/64
|
|
||||||
|
|
||||||
users:
|
|
||||||
- name: astara
|
|
||||||
gecos: Astara
|
|
||||||
groups: users
|
|
||||||
shell: /bin/bash
|
|
||||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
|
||||||
lock-passwd: true
|
|
||||||
ssh-authorized-keys:
|
|
||||||
- fake_key
|
|
||||||
|
|
||||||
final_message: "Astara appliance is running"
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
def fake_make_ports_callback():
|
|
||||||
return (fake_mgt_port, [fake_ext_port, fake_int_port])
|
|
||||||
|
|
||||||
|
|
||||||
class TestNovaWrapper(base.RugTestBase):
|
|
||||||
def setUp(self):
|
|
||||||
super(TestNovaWrapper, self).setUp()
|
|
||||||
self.addCleanup(mock.patch.stopall)
|
|
||||||
patch = mock.patch('novaclient.client.Client')
|
|
||||||
self.client = mock.Mock()
|
|
||||||
self.client_cls = patch.start()
|
|
||||||
self.client_cls.return_value = self.client
|
|
||||||
|
|
||||||
self.fake_instance_provider = mock.Mock(create_instance=mock.Mock())
|
|
||||||
fake_instance_provider_cls = mock.Mock(name='fake_provider_class')
|
|
||||||
fake_instance_provider_cls.return_value = \
|
|
||||||
self.fake_instance_provider
|
|
||||||
get_instance_provider_p = mock.patch.object(
|
|
||||||
nova, 'get_instance_provider').start()
|
|
||||||
get_instance_provider_p.return_value = fake_instance_provider_cls
|
|
||||||
|
|
||||||
self.nova = nova.Nova(FakeConf)
|
|
||||||
|
|
||||||
self.INSTANCE_INFO = nova.InstanceInfo(
|
|
||||||
instance_id='fake_instance_id',
|
|
||||||
name='fake_name',
|
|
||||||
image_uuid='fake_image_id',
|
|
||||||
status='ACTIVE',
|
|
||||||
last_boot=(datetime.utcnow() - timedelta(minutes=15)),
|
|
||||||
ports=[fake_int_port, fake_ext_port, fake_mgt_port],
|
|
||||||
management_port=fake_mgt_port,
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_get_instance_for_obj(self):
|
|
||||||
instance = mock.Mock()
|
|
||||||
self.client.servers.list.return_value = [instance]
|
|
||||||
|
|
||||||
expected = [
|
|
||||||
mock.call.servers.list(search_opts={'name': 'foo_instance_name'})
|
|
||||||
]
|
|
||||||
|
|
||||||
result = self.nova.get_instance_for_obj('foo_instance_name')
|
|
||||||
self.client.assert_has_calls(expected)
|
|
||||||
self.assertEqual(instance, result)
|
|
||||||
|
|
||||||
def test_get_instance_for_obj_not_found(self):
|
|
||||||
self.client.servers.list.return_value = []
|
|
||||||
|
|
||||||
expected = [
|
|
||||||
mock.call.servers.list(search_opts={'name': 'foo_instance_name'})
|
|
||||||
]
|
|
||||||
|
|
||||||
result = self.nova.get_instance_for_obj('foo_instance_name')
|
|
||||||
self.client.assert_has_calls(expected)
|
|
||||||
self.assertIsNone(result)
|
|
||||||
|
|
||||||
def test_get_instance_by_id(self):
|
|
||||||
self.client.servers.get.return_value = 'fake_instance'
|
|
||||||
expected = [
|
|
||||||
mock.call.servers.get('instance_id')
|
|
||||||
]
|
|
||||||
result = self.nova.get_instance_by_id('instance_id')
|
|
||||||
self.client.servers.get.assert_has_calls(expected)
|
|
||||||
self.assertEqual('fake_instance', result)
|
|
||||||
|
|
||||||
def test_get_instance_by_id_not_found(self):
|
|
||||||
not_found = novaclient_exceptions.NotFound('instance_id')
|
|
||||||
self.client.servers.get.side_effect = not_found
|
|
||||||
result = self.nova.get_instance_by_id('instance_id')
|
|
||||||
self.assertIsNone(result)
|
|
||||||
|
|
||||||
def test_destroy_instance(self):
|
|
||||||
self.nova.destroy_instance(self.INSTANCE_INFO)
|
|
||||||
self.client.servers.delete.assert_called_with(self.INSTANCE_INFO.id_)
|
|
||||||
|
|
||||||
@mock.patch.object(nova, '_ssh_key')
|
|
||||||
def test_format_userdata(self, fake_ssh_key):
|
|
||||||
fake_ssh_key.return_value = 'fake_key'
|
|
||||||
result = nova.format_userdata(fake_int_port)
|
|
||||||
self.assertEqual(EXPECTED_USERDATA.strip(), result.strip())
|
|
||||||
|
|
||||||
@mock.patch.object(__builtins__, 'open', autospec=True)
|
|
||||||
def test_ssh_key(self, fake_open):
|
|
||||||
mock_key_file = mock.MagicMock(spec=file)
|
|
||||||
mock_key_file.read.return_value = 'fake-key'
|
|
||||||
mock_key_file.__enter__.return_value = mock_key_file
|
|
||||||
fake_open.return_value = mock_key_file
|
|
||||||
result = nova._ssh_key()
|
|
||||||
self.assertEqual('fake-key', result)
|
|
||||||
|
|
||||||
@mock.patch.object(__builtins__, 'open', autospec=True)
|
|
||||||
def test_ssh_key_sanitize(self, fake_open):
|
|
||||||
mock_key_file = mock.MagicMock(spec=file)
|
|
||||||
mock_key_file.read.return_value = ('''
|
|
||||||
|
|
||||||
fake-key with some newlines
|
|
||||||
|
|
||||||
''')
|
|
||||||
mock_key_file.__enter__.return_value = mock_key_file
|
|
||||||
fake_open.return_value = mock_key_file
|
|
||||||
result = nova._ssh_key()
|
|
||||||
self.assertEqual('fake-key with some newlines', result)
|
|
||||||
|
|
||||||
@mock.patch.object(nova, 'LOG', autospec=True)
|
|
||||||
@mock.patch.object(__builtins__, 'open', autospec=True)
|
|
||||||
def test_ssh_key_not_found(self, fake_open, fake_log):
|
|
||||||
fake_open.side_effect = IOError
|
|
||||||
result = nova._ssh_key()
|
|
||||||
self.assertEqual('', result)
|
|
||||||
self.assertTrue(fake_log.warning.called)
|
|
||||||
|
|
||||||
@mock.patch.object(nova.Nova, 'get_instance_for_obj', return_value=None)
|
|
||||||
def test_boot_instance(self, fake_get):
|
|
||||||
self.fake_instance_provider.create_instance.return_value = \
|
|
||||||
'fake_new_instance_info'
|
|
||||||
res = self.nova.boot_instance(
|
|
||||||
resource_type='router',
|
|
||||||
prev_instance_info=None,
|
|
||||||
name='foo_instance_name',
|
|
||||||
image_uuid='foo_image',
|
|
||||||
flavor='foo_flavor',
|
|
||||||
make_ports_callback='foo_callback',
|
|
||||||
)
|
|
||||||
self.fake_instance_provider.create_instance.assert_called_with(
|
|
||||||
resource_type='router',
|
|
||||||
name='foo_instance_name',
|
|
||||||
image_uuid='foo_image',
|
|
||||||
flavor='foo_flavor',
|
|
||||||
make_ports_callback='foo_callback',
|
|
||||||
)
|
|
||||||
fake_get.assert_called_with('foo_instance_name')
|
|
||||||
self.assertEqual('fake_new_instance_info', res)
|
|
||||||
|
|
||||||
@mock.patch.object(nova.Nova, 'get_instance_for_obj')
|
|
||||||
def test_boot_instance_exists(self, fake_get):
|
|
||||||
fake_instance = fake_nova_instance
|
|
||||||
fake_instance.id = 'existing_instance_id'
|
|
||||||
fake_instance.status = 'SHUTOFF'
|
|
||||||
fake_get.return_value = fake_instance
|
|
||||||
self.fake_instance_provider.create_instance.return_value = \
|
|
||||||
'fake_new_instance_info'
|
|
||||||
res = self.nova.boot_instance(
|
|
||||||
resource_type='router',
|
|
||||||
prev_instance_info=None,
|
|
||||||
name='foo_instance_name',
|
|
||||||
image_uuid='foo_image',
|
|
||||||
flavor='foo_flavor',
|
|
||||||
make_ports_callback='foo_callback',
|
|
||||||
)
|
|
||||||
fake_get.assert_called_with('foo_instance_name')
|
|
||||||
self.client.servers.delete.assert_called_with('existing_instance_id')
|
|
||||||
self.assertIsNone(res)
|
|
||||||
|
|
||||||
@mock.patch.object(nova.Nova, 'get_instance_for_obj')
|
|
||||||
def test_boot_instance_exists_build(self, fake_get):
|
|
||||||
fake_instance = fake_nova_instance
|
|
||||||
fake_instance.id = 'existing_instance_id'
|
|
||||||
fake_instance.status = 'BUILD'
|
|
||||||
fake_get.return_value = fake_instance
|
|
||||||
self.fake_instance_provider.create_instance.return_value = \
|
|
||||||
'fake_new_instance_info'
|
|
||||||
res = self.nova.boot_instance(
|
|
||||||
resource_type='router',
|
|
||||||
prev_instance_info=None,
|
|
||||||
name='foo_instance_name',
|
|
||||||
image_uuid='foo_image',
|
|
||||||
flavor='foo_flavor',
|
|
||||||
make_ports_callback='foo_callback',
|
|
||||||
)
|
|
||||||
fake_get.assert_called_with('foo_instance_name')
|
|
||||||
self.assertIsInstance(res, nova.InstanceInfo)
|
|
||||||
self.assertEqual('existing_instance_id', res.id_)
|
|
||||||
self.assertEqual('ak-appliance', res.name)
|
|
||||||
self.assertEqual('fake_image_uuid', res.image_uuid)
|
|
||||||
|
|
||||||
@mock.patch.object(nova.Nova, 'get_instance_by_id', return_value=None)
|
|
||||||
def test_boot_instance_prev_inst(self, fake_get):
|
|
||||||
self.fake_instance_provider.create_instance.return_value = \
|
|
||||||
'fake_new_instance_info'
|
|
||||||
res = self.nova.boot_instance(
|
|
||||||
resource_type='router',
|
|
||||||
prev_instance_info=self.INSTANCE_INFO,
|
|
||||||
name='foo_instance_name',
|
|
||||||
image_uuid='foo_image',
|
|
||||||
flavor='foo_flavor',
|
|
||||||
make_ports_callback='foo_callback',
|
|
||||||
)
|
|
||||||
fake_get.assert_called_with(self.INSTANCE_INFO.id_)
|
|
||||||
self.fake_instance_provider.create_instance.assert_called_with(
|
|
||||||
resource_type='router',
|
|
||||||
name='foo_instance_name',
|
|
||||||
image_uuid='foo_image',
|
|
||||||
flavor='foo_flavor',
|
|
||||||
make_ports_callback='foo_callback',
|
|
||||||
)
|
|
||||||
self.assertEqual('fake_new_instance_info', res)
|
|
||||||
|
|
||||||
@mock.patch.object(nova.Nova, 'get_instance_by_id')
|
|
||||||
def test_boot_instance_exists_prev_inst(self, fake_get):
|
|
||||||
fake_instance = fake_nova_instance
|
|
||||||
fake_instance.id = 'existing_instance_id'
|
|
||||||
fake_instance.status = 'SHUTOFF'
|
|
||||||
fake_get.return_value = fake_instance
|
|
||||||
self.fake_instance_provider.create_instance.return_value = \
|
|
||||||
'fake_new_instance_info'
|
|
||||||
res = self.nova.boot_instance(
|
|
||||||
resource_type='router',
|
|
||||||
prev_instance_info=self.INSTANCE_INFO,
|
|
||||||
name='foo_instance_name',
|
|
||||||
image_uuid='foo_image',
|
|
||||||
flavor='foo_flavor',
|
|
||||||
make_ports_callback='foo_callback',
|
|
||||||
)
|
|
||||||
fake_get.assert_called_with(self.INSTANCE_INFO.id_)
|
|
||||||
self.client.servers.delete.assert_called_with('existing_instance_id')
|
|
||||||
self.assertIsNone(res)
|
|
||||||
|
|
||||||
@mock.patch.object(nova.Nova, 'get_instance_for_obj')
|
|
||||||
def test_boot_instance_exists_build_prev_inst(self, fake_get):
|
|
||||||
fake_instance = fake_nova_instance
|
|
||||||
fake_instance.id = 'existing_instance_id'
|
|
||||||
fake_instance.status = 'BUILD'
|
|
||||||
fake_get.return_value = fake_instance
|
|
||||||
self.fake_instance_provider.create_instance.return_value = \
|
|
||||||
'fake_new_instance_info'
|
|
||||||
res = self.nova.boot_instance(
|
|
||||||
resource_type='router',
|
|
||||||
prev_instance_info=None,
|
|
||||||
name='foo_instance_name',
|
|
||||||
image_uuid='foo_image',
|
|
||||||
flavor='foo_flavor',
|
|
||||||
make_ports_callback='foo_callback',
|
|
||||||
)
|
|
||||||
# assert we get back the same instance_info but with updated status
|
|
||||||
self.assertEqual('BUILD', res.nova_status)
|
|
||||||
self.assertEqual(fake_instance.id, res.id_)
|
|
||||||
self.assertIsInstance(res, nova.InstanceInfo)
|
|
||||||
|
|
||||||
def test_from_nova(self):
|
|
||||||
fake_server = FakeNovaServer()
|
|
||||||
last_boot = datetime.strptime(
|
|
||||||
fake_server.created, "%Y-%m-%dT%H:%M:%SZ")
|
|
||||||
instance_info = nova.InstanceInfo.from_nova(fake_server)
|
|
||||||
self.assertEqual(fake_server.id, instance_info.id_)
|
|
||||||
self.assertEqual(fake_server.name, instance_info.name)
|
|
||||||
self.assertEqual(fake_server.image['id'], instance_info.image_uuid)
|
|
||||||
self.assertEqual(last_boot, instance_info.last_boot)
|
|
||||||
|
|
||||||
def test_booting_false(self):
|
|
||||||
self.INSTANCE_INFO.nova_status = 'ACTIVE'
|
|
||||||
self.assertFalse(self.INSTANCE_INFO.booting)
|
|
||||||
|
|
||||||
def test_booting_true(self):
|
|
||||||
self.INSTANCE_INFO.nova_status = 'BUILDING'
|
|
||||||
self.assertTrue(self.INSTANCE_INFO.booting)
|
|
||||||
|
|
||||||
def test_no_provider_not_none(self):
|
|
||||||
NoProviderConf = copy.deepcopy(FakeConf)
|
|
||||||
del NoProviderConf.instance_provider
|
|
||||||
self.nova = nova.Nova(NoProviderConf)
|
|
||||||
self.assertIsNotNone(self.nova.instance_provider.create_instance)
|
|
||||||
|
|
||||||
|
|
||||||
class TestOnDemandInstanceProvider(base.RugTestBase):
|
|
||||||
def setUp(self):
|
|
||||||
super(TestOnDemandInstanceProvider, self).setUp()
|
|
||||||
self.addCleanup(mock.patch.stopall)
|
|
||||||
patch = mock.patch('novaclient.client.Client')
|
|
||||||
self.client = mock.Mock()
|
|
||||||
self.client_cls = patch.start()
|
|
||||||
self.client_cls.return_value = self.client
|
|
||||||
|
|
||||||
@mock.patch.object(nova, 'format_userdata')
|
|
||||||
def test_create_instance(self, mock_userdata):
|
|
||||||
provider = nova.OnDemandInstanceProvider(self.client)
|
|
||||||
self.client.servers.create.return_value = fake_nova_instance
|
|
||||||
mock_userdata.return_value = 'fake_userdata'
|
|
||||||
expected = [
|
|
||||||
mock.call.servers.create(
|
|
||||||
'ak-instance-name',
|
|
||||||
nics=[{'port-id': '2',
|
|
||||||
'net-id': 'mgt-net',
|
|
||||||
'v4-fixed-ip': ''},
|
|
||||||
{'port-id': '1',
|
|
||||||
'net-id': 'ext-net',
|
|
||||||
'v4-fixed-ip': ''},
|
|
||||||
{'port-id': '3',
|
|
||||||
'net-id': 'int-net',
|
|
||||||
'v4-fixed-ip': ''}],
|
|
||||||
flavor=1,
|
|
||||||
image='GLANCE-IMAGE-123',
|
|
||||||
config_drive=True,
|
|
||||||
userdata='fake_userdata',
|
|
||||||
)
|
|
||||||
]
|
|
||||||
|
|
||||||
provider.create_instance(
|
|
||||||
'router', 'ak-instance-name', 'GLANCE-IMAGE-123',
|
|
||||||
1, fake_make_ports_callback)
|
|
||||||
self.client.assert_has_calls(expected)
|
|
||||||
|
|
||||||
|
|
||||||
class TestPezInstanceProvider(base.RugTestBase):
|
|
||||||
def setUp(self):
|
|
||||||
super(TestPezInstanceProvider, self).setUp()
|
|
||||||
self.addCleanup(mock.patch.stopall)
|
|
||||||
patch = mock.patch('novaclient.client.Client')
|
|
||||||
self.nova_client = mock.Mock()
|
|
||||||
self.nova_client_cls = patch.start()
|
|
||||||
self.nova_client_cls.return_value = self.nova_client
|
|
||||||
|
|
||||||
patch = mock.patch('astara.pez.rpcapi.AstaraPezAPI')
|
|
||||||
self.rpc_client = mock.Mock()
|
|
||||||
self.rpc_client_cls = patch.start()
|
|
||||||
self.rpc_client_cls.return_value = self.rpc_client
|
|
||||||
|
|
||||||
@mock.patch.object(nova, 'format_userdata')
|
|
||||||
def test_create_instance(self, mock_userdata):
|
|
||||||
provider = nova.PezInstanceProvider(self.nova_client)
|
|
||||||
|
|
||||||
inst_port = {
|
|
||||||
'id': '1',
|
|
||||||
'name': 'name',
|
|
||||||
'device_id': 'device_id',
|
|
||||||
'fixed_ips': [{'ip_address': '192.168.1.1', 'subnet_id': 'sub1'}],
|
|
||||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
|
||||||
'network_id': 'net_id',
|
|
||||||
'device_owner': 'test'
|
|
||||||
}
|
|
||||||
mgt_port = {
|
|
||||||
'id': '2',
|
|
||||||
'name': 'name',
|
|
||||||
'device_id': 'device_id',
|
|
||||||
'fixed_ips': [{'ip_address': '192.168.1.10', 'subnet_id': 'sub1'}],
|
|
||||||
'mac_address': 'aa:bb:cc:dd:ee:fa',
|
|
||||||
'network_id': 'net_id2',
|
|
||||||
'device_owner': 'test'
|
|
||||||
}
|
|
||||||
|
|
||||||
fake_server = FakeNovaServer()
|
|
||||||
self.nova_client.servers.get.return_value = fake_server
|
|
||||||
fake_pez_instance = {
|
|
||||||
'id': fake_server.id,
|
|
||||||
'management_port': mgt_port,
|
|
||||||
'instance_ports': [inst_port],
|
|
||||||
}
|
|
||||||
self.rpc_client.get_instance.return_value = fake_pez_instance
|
|
||||||
res = provider.create_instance(
|
|
||||||
'router', 'ak-instance-name', 'GLANCE-IMAGE-123',
|
|
||||||
1, fake_make_ports_callback)
|
|
||||||
self.rpc_client.get_instance.assert_called_with(
|
|
||||||
'router', 'ak-instance-name',
|
|
||||||
{'network_id': 'mgt-net', 'id': '2'},
|
|
||||||
[{'network_id': 'ext-net', 'id': '1'},
|
|
||||||
{'network_id': 'int-net', 'id': '3'}])
|
|
||||||
self.nova_client.servers.get.assert_called_with(fake_server.id)
|
|
||||||
exp_instance_info = nova.InstanceInfo.from_nova(fake_server)
|
|
||||||
self.assertEqual(exp_instance_info.id_, res.id_)
|
|
@ -1,242 +0,0 @@
|
|||||||
import unittest
|
|
||||||
import socket
|
|
||||||
|
|
||||||
from six.moves import range
|
|
||||||
from cliff import commandmanager
|
|
||||||
import mock
|
|
||||||
from oslo_log import log
|
|
||||||
import webob
|
|
||||||
|
|
||||||
from astara.api import rug
|
|
||||||
|
|
||||||
|
|
||||||
try:
|
|
||||||
import blessed # noqa
|
|
||||||
HAS_BLESSED = True
|
|
||||||
except ImportError:
|
|
||||||
HAS_BLESSED = False
|
|
||||||
|
|
||||||
|
|
||||||
class TestRugAPI(unittest.TestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
ctl = mock.Mock()
|
|
||||||
ctl.return_value.command_manager = commandmanager.CommandManager(
|
|
||||||
'astara.cli'
|
|
||||||
)
|
|
||||||
self.api = rug.RugAPI(ctl)
|
|
||||||
self.ctl = ctl.return_value
|
|
||||||
|
|
||||||
@unittest.skipUnless(HAS_BLESSED, "blessed not available")
|
|
||||||
def test_browse(self):
|
|
||||||
resp = self.api(webob.Request({
|
|
||||||
'REQUEST_METHOD': 'PUT',
|
|
||||||
'PATH_INFO': '/browse/'
|
|
||||||
}))
|
|
||||||
assert isinstance(resp, webob.exc.HTTPNotImplemented)
|
|
||||||
assert not self.ctl.run.called
|
|
||||||
|
|
||||||
def test_ssh(self):
|
|
||||||
resp = self.api(webob.Request({
|
|
||||||
'REQUEST_METHOD': 'PUT',
|
|
||||||
'PATH_INFO': '/ssh/ROUTER123/'
|
|
||||||
}))
|
|
||||||
assert isinstance(resp, webob.exc.HTTPNotImplemented)
|
|
||||||
assert not self.ctl.run.called
|
|
||||||
|
|
||||||
def test_poll(self):
|
|
||||||
self.api(webob.Request({
|
|
||||||
'REQUEST_METHOD': 'PUT',
|
|
||||||
'PATH_INFO': '/poll/'
|
|
||||||
}))
|
|
||||||
self.ctl.run.assert_called_with(
|
|
||||||
['--debug', 'poll']
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_missing_argument(self):
|
|
||||||
# argparse failures (e.g., a missing router ID) raise a SystemExit
|
|
||||||
# because cliff's behavior is to print a help message and sys.exit()
|
|
||||||
self.ctl.run.side_effect = SystemExit
|
|
||||||
resp = self.api(webob.Request({
|
|
||||||
'REQUEST_METHOD': 'PUT',
|
|
||||||
'PATH_INFO': '/router/debug/'
|
|
||||||
}))
|
|
||||||
assert isinstance(resp, webob.exc.HTTPBadRequest)
|
|
||||||
self.ctl.run.assert_called_with(
|
|
||||||
['--debug', 'router', 'debug']
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_router_debug(self):
|
|
||||||
self.api(webob.Request({
|
|
||||||
'REQUEST_METHOD': 'PUT',
|
|
||||||
'PATH_INFO': '/router/debug/ROUTER123'
|
|
||||||
}))
|
|
||||||
self.ctl.run.assert_called_with(
|
|
||||||
['--debug', 'router', 'debug', 'ROUTER123']
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_router_manage(self):
|
|
||||||
self.api(webob.Request({
|
|
||||||
'REQUEST_METHOD': 'PUT',
|
|
||||||
'PATH_INFO': '/router/manage/ROUTER123'
|
|
||||||
}))
|
|
||||||
self.ctl.run.assert_called_with(
|
|
||||||
['--debug', 'router', 'manage', 'ROUTER123']
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_router_update(self):
|
|
||||||
self.api(webob.Request({
|
|
||||||
'REQUEST_METHOD': 'PUT',
|
|
||||||
'PATH_INFO': '/router/update/ROUTER123'
|
|
||||||
}))
|
|
||||||
self.ctl.run.assert_called_with(
|
|
||||||
['--debug', 'router', 'update', 'ROUTER123']
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_router_rebuild(self):
|
|
||||||
self.api(webob.Request({
|
|
||||||
'REQUEST_METHOD': 'PUT',
|
|
||||||
'PATH_INFO': '/router/rebuild/ROUTER123'
|
|
||||||
}))
|
|
||||||
self.ctl.run.assert_called_with(
|
|
||||||
['--debug', 'router', 'rebuild', 'ROUTER123']
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_tenant_debug(self):
|
|
||||||
self.api(webob.Request({
|
|
||||||
'REQUEST_METHOD': 'PUT',
|
|
||||||
'PATH_INFO': '/tenant/debug/TENANT123'
|
|
||||||
}))
|
|
||||||
self.ctl.run.assert_called_with(
|
|
||||||
['--debug', 'tenant', 'debug', 'TENANT123']
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_tenant_manage(self):
|
|
||||||
self.api(webob.Request({
|
|
||||||
'REQUEST_METHOD': 'PUT',
|
|
||||||
'PATH_INFO': '/tenant/manage/TENANT123'
|
|
||||||
}))
|
|
||||||
self.ctl.run.assert_called_with(
|
|
||||||
['--debug', 'tenant', 'manage', 'TENANT123']
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_workers_debug(self):
|
|
||||||
self.api(webob.Request({
|
|
||||||
'REQUEST_METHOD': 'PUT',
|
|
||||||
'PATH_INFO': '/workers/debug/'
|
|
||||||
}))
|
|
||||||
self.ctl.run.assert_called_with(
|
|
||||||
['--debug', 'workers', 'debug']
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_invalid_router_action(self):
|
|
||||||
resp = self.api(webob.Request({
|
|
||||||
'REQUEST_METHOD': 'PUT',
|
|
||||||
'PATH_INFO': '/router/breakdance/ROUTER123'
|
|
||||||
}))
|
|
||||||
assert isinstance(resp, webob.exc.HTTPNotFound)
|
|
||||||
assert not self.ctl.run.called
|
|
||||||
|
|
||||||
def test_multiple_calls(self):
|
|
||||||
for i in range(10):
|
|
||||||
self.api(webob.Request({
|
|
||||||
'REQUEST_METHOD': 'PUT',
|
|
||||||
'PATH_INFO': '/poll/'
|
|
||||||
}))
|
|
||||||
|
|
||||||
assert self.ctl.run.call_args_list == [
|
|
||||||
mock.call(['--debug', 'poll'])
|
|
||||||
for _ in range(10)
|
|
||||||
]
|
|
||||||
|
|
||||||
def test_invalid_request_method(self):
|
|
||||||
resp = self.api(webob.Request({
|
|
||||||
'REQUEST_METHOD': 'GET',
|
|
||||||
'PATH_INFO': '/poll/'
|
|
||||||
}))
|
|
||||||
assert isinstance(resp, webob.exc.HTTPMethodNotAllowed)
|
|
||||||
assert not self.ctl.run.called
|
|
||||||
|
|
||||||
|
|
||||||
class TestRugAPIServer(unittest.TestCase):
|
|
||||||
|
|
||||||
@mock.patch('eventlet.listen')
|
|
||||||
@mock.patch('eventlet.wsgi')
|
|
||||||
def test_bind_and_serve_ipv4(self, wsgi, listen):
|
|
||||||
sock = listen.return_value
|
|
||||||
server = rug.RugAPIServer()
|
|
||||||
server.run('10.0.0.250', 44250)
|
|
||||||
listen.assert_called_with(
|
|
||||||
('10.0.0.250', 44250),
|
|
||||||
family=socket.AF_INET,
|
|
||||||
backlog=128
|
|
||||||
)
|
|
||||||
args, kwargs = wsgi.server.call_args
|
|
||||||
assert all([
|
|
||||||
args[0] == sock,
|
|
||||||
isinstance(args[1], rug.RugAPI),
|
|
||||||
kwargs['custom_pool'] == server.pool,
|
|
||||||
isinstance(kwargs['log'], log.KeywordArgumentAdapter)
|
|
||||||
])
|
|
||||||
|
|
||||||
@mock.patch('eventlet.listen')
|
|
||||||
@mock.patch('eventlet.wsgi')
|
|
||||||
def test_bind_and_serve_ipv6(self, wsgi, listen):
|
|
||||||
sock = listen.return_value
|
|
||||||
server = rug.RugAPIServer()
|
|
||||||
server.run('fdca:3ba5:a17a:acda::1', 44250)
|
|
||||||
listen.assert_called_with(
|
|
||||||
('fdca:3ba5:a17a:acda::1', 44250),
|
|
||||||
family=socket.AF_INET6,
|
|
||||||
backlog=128
|
|
||||||
)
|
|
||||||
args, kwargs = wsgi.server.call_args
|
|
||||||
assert all([
|
|
||||||
args[0] == sock,
|
|
||||||
isinstance(args[1], rug.RugAPI),
|
|
||||||
kwargs['custom_pool'] == server.pool,
|
|
||||||
isinstance(kwargs['log'], log.KeywordArgumentAdapter)
|
|
||||||
])
|
|
||||||
|
|
||||||
@mock.patch('eventlet.listen')
|
|
||||||
@mock.patch('eventlet.sleep', lambda x: None)
|
|
||||||
def test_fail_to_bind(self, listen):
|
|
||||||
listen.side_effect = socket.error(
|
|
||||||
99, "Can't assign requested address"
|
|
||||||
)
|
|
||||||
server = rug.RugAPIServer()
|
|
||||||
self.assertRaises(
|
|
||||||
RuntimeError,
|
|
||||||
server.run,
|
|
||||||
'fdca:3ba5:a17a:acda::1',
|
|
||||||
44250,
|
|
||||||
)
|
|
||||||
assert listen.call_args_list == [
|
|
||||||
mock.call(('fdca:3ba5:a17a:acda::1', 44250),
|
|
||||||
family=socket.AF_INET6, backlog=128)
|
|
||||||
for i in range(5)
|
|
||||||
]
|
|
||||||
|
|
||||||
@mock.patch('eventlet.listen')
|
|
||||||
@mock.patch('eventlet.wsgi')
|
|
||||||
@mock.patch('eventlet.sleep', lambda x: None)
|
|
||||||
def test_bind_fails_on_first_attempt(self, wsgi, listen):
|
|
||||||
sock = mock.Mock()
|
|
||||||
listen.side_effect = [
|
|
||||||
socket.error(99, "Can't assign requested address"),
|
|
||||||
sock
|
|
||||||
]
|
|
||||||
server = rug.RugAPIServer()
|
|
||||||
server.run('fdca:3ba5:a17a:acda::1', 44250)
|
|
||||||
assert listen.call_args_list == [
|
|
||||||
mock.call(('fdca:3ba5:a17a:acda::1', 44250),
|
|
||||||
family=socket.AF_INET6, backlog=128)
|
|
||||||
for i in range(2) # fails the first time, succeeds the second
|
|
||||||
]
|
|
||||||
args, kwargs = wsgi.server.call_args
|
|
||||||
assert all([
|
|
||||||
args[0] == sock,
|
|
||||||
isinstance(args[1], rug.RugAPI),
|
|
||||||
kwargs['custom_pool'] == server.pool,
|
|
||||||
isinstance(kwargs['log'], log.KeywordArgumentAdapter)
|
|
||||||
])
|
|
@ -1,40 +0,0 @@
|
|||||||
# Copyright 2015 Akanda, Inc.
|
|
||||||
#
|
|
||||||
# Author: Akanda, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
import mock
|
|
||||||
import testtools
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_config import fixture as config_fixture
|
|
||||||
|
|
||||||
|
|
||||||
class RugTestBase(testtools.TestCase):
|
|
||||||
def setUp(self):
|
|
||||||
super(RugTestBase, self).setUp()
|
|
||||||
self.test_config = self.useFixture(config_fixture.Config(cfg.CONF))
|
|
||||||
self.argv = []
|
|
||||||
cfg.CONF.import_opt('host', 'astara.main')
|
|
||||||
|
|
||||||
self.time_patch = mock.patch('time.sleep')
|
|
||||||
self.time_mock = self.time_patch.start()
|
|
||||||
self.addCleanup(mock.patch.stopall)
|
|
||||||
|
|
||||||
def config(self, **kw):
|
|
||||||
"""Override config options for a test."""
|
|
||||||
group = kw.pop('group', None)
|
|
||||||
for k, v in kw.items():
|
|
||||||
cfg.CONF.set_override(k, v, group)
|
|
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
@ -1,109 +0,0 @@
|
|||||||
# Copyright 2014 DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Author: DreamHost, LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012, Nicira, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
# @author: Dan Wendlandt, Nicira, Inc.
|
|
||||||
|
|
||||||
import fixtures
|
|
||||||
import mock
|
|
||||||
import testtools
|
|
||||||
|
|
||||||
from astara.common.linux import utils
|
|
||||||
|
|
||||||
|
|
||||||
class AgentUtilsExecuteTest(testtools.TestCase):
|
|
||||||
def setUp(self):
|
|
||||||
super(AgentUtilsExecuteTest, self).setUp()
|
|
||||||
self.root_helper = "echo"
|
|
||||||
self.test_file = self.useFixture(
|
|
||||||
fixtures.TempDir()).join("test_execute.tmp")
|
|
||||||
open(self.test_file, 'w').close()
|
|
||||||
|
|
||||||
def test_without_helper(self):
|
|
||||||
result = utils.execute(["ls", self.test_file])
|
|
||||||
self.assertEqual("%s\n" % self.test_file, result)
|
|
||||||
|
|
||||||
def test_with_helper(self):
|
|
||||||
result = utils.execute(["ls", self.test_file],
|
|
||||||
self.root_helper)
|
|
||||||
self.assertEqual("ls %s\n" % self.test_file, result)
|
|
||||||
|
|
||||||
def test_stderr(self):
|
|
||||||
stdout, stderr = utils.execute(["ls", self.test_file],
|
|
||||||
return_stderr=True)
|
|
||||||
self.assertEqual("%s\n" % self.test_file, stdout)
|
|
||||||
self.assertEqual("", stderr)
|
|
||||||
|
|
||||||
def test_check_exit_code(self):
|
|
||||||
stdout = utils.execute(["ls", self.test_file[:-1]],
|
|
||||||
check_exit_code=False)
|
|
||||||
self.assertEqual("", stdout)
|
|
||||||
self.assertRaises(RuntimeError, utils.execute,
|
|
||||||
["ls", self.test_file[:-1]])
|
|
||||||
|
|
||||||
def test_process_input(self):
|
|
||||||
result = utils.execute(["cat"], process_input="%s\n" %
|
|
||||||
self.test_file[:-1])
|
|
||||||
self.assertEqual("%s\n" % self.test_file[:-1], result)
|
|
||||||
|
|
||||||
def test_with_addl_env(self):
|
|
||||||
result = utils.execute(["ls", self.test_file],
|
|
||||||
addl_env={'foo': 'bar'})
|
|
||||||
self.assertEqual("%s\n" % self.test_file, result)
|
|
||||||
|
|
||||||
|
|
||||||
class AgentUtilsGetInterfaceMAC(testtools.TestCase):
|
|
||||||
def test_get_interface_mac(self):
|
|
||||||
expect_val = '01:02:03:04:05:06'
|
|
||||||
with mock.patch('fcntl.ioctl') as ioctl:
|
|
||||||
ioctl.return_value = ''.join(['\x00' * 18,
|
|
||||||
'\x01\x02\x03\x04\x05\x06',
|
|
||||||
'\x00' * 232])
|
|
||||||
actual_val = utils.get_interface_mac('eth0')
|
|
||||||
self.assertEqual(expect_val, actual_val)
|
|
||||||
|
|
||||||
|
|
||||||
class AgentUtilsReplaceFile(testtools.TestCase):
|
|
||||||
def test_replace_file(self):
|
|
||||||
# make file to replace
|
|
||||||
with mock.patch('tempfile.NamedTemporaryFile') as ntf:
|
|
||||||
ntf.return_value.name = '/baz'
|
|
||||||
with mock.patch('os.chmod') as chmod:
|
|
||||||
with mock.patch('os.rename') as rename:
|
|
||||||
utils.replace_file('/foo', 'bar')
|
|
||||||
|
|
||||||
expected = [mock.call('w+', dir='/', delete=False),
|
|
||||||
mock.call().write('bar'),
|
|
||||||
mock.call().close()]
|
|
||||||
|
|
||||||
ntf.assert_has_calls(expected)
|
|
||||||
chmod.assert_called_once_with('/baz', 0644)
|
|
||||||
rename.assert_called_once_with('/baz', '/foo')
|
|
@ -1,44 +0,0 @@
|
|||||||
# Copyright 2015 Akanda, Inc.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from astara.common import config
|
|
||||||
from astara.test.unit import base
|
|
||||||
|
|
||||||
|
|
||||||
class TestConfig(base.RugTestBase):
|
|
||||||
def _test_get_best_config_path(self, original, expected, files_exist=()):
|
|
||||||
def mock_isfile_f(f):
|
|
||||||
return f in files_exist
|
|
||||||
|
|
||||||
with mock.patch('os.path.isfile', side_effect=mock_isfile_f):
|
|
||||||
self.assertEqual(
|
|
||||||
expected,
|
|
||||||
config.get_best_config_path(original)
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_get_best_config_path_preferred(self):
|
|
||||||
self._test_get_best_config_path(
|
|
||||||
config.PREFERRED_CONFIG_FILEPATH,
|
|
||||||
config.PREFERRED_CONFIG_FILEPATH
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_get_best_config_path_legacy(self):
|
|
||||||
self._test_get_best_config_path(
|
|
||||||
config.PREFERRED_CONFIG_FILEPATH,
|
|
||||||
'/etc/akanda/rug.ini',
|
|
||||||
('/etc/akanda/rug.ini',)
|
|
||||||
)
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user