StarlingX open source release updates
Signed-off-by: Scott Little <scott.little@windriver.com>
This commit is contained in:
commit
e82c7b4336
.coveragerc.coveragerc_xml.gitignore.mailmap.testr.confCONTRIBUTING.rstCONTRIBUTORS.wrsHACKING.rstLICENSEMANIFEST.inREADME.rstREADME_DCbabel.cfg
dcmanager
__init__.py
api
README.rst__init__.pyapi_config.pyapp.py
controllers
enforcer.pycmd
common
__init__.pyconfig.pyconsts.pycontext.pyexceptions.pyi18n.pymanager.pymessaging.pypolicy.pyserializer.pyutils.pyversion.py
config-generator.confdb
drivers
manager
README.rst__init__.pypatch_audit_manager.pyscheduler.pyservice.pysubcloud_audit_manager.pysubcloud_manager.pysw_update_manager.py
objects
rpc
tests
__init__.pybase.py
data/ipv6_R5_install
12
.coveragerc
Normal file
12
.coveragerc
Normal file
@ -0,0 +1,12 @@
|
||||
[run]
|
||||
branch = True
|
||||
parallel = True
|
||||
source =
|
||||
dcmanager
|
||||
dcorch
|
||||
|
||||
[report]
|
||||
ignore_errors = True
|
||||
omit =
|
||||
*/tests/*
|
||||
|
11
.coveragerc_xml
Normal file
11
.coveragerc_xml
Normal file
@ -0,0 +1,11 @@
|
||||
[run]
|
||||
branch = True
|
||||
parallel = True
|
||||
source =
|
||||
distributedcloud
|
||||
|
||||
[report]
|
||||
ignore_errors = True
|
||||
omit =
|
||||
*/tests/*
|
||||
|
57
.gitignore
vendored
Normal file
57
.gitignore
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
*.py[cod]
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Packages
|
||||
*.egg
|
||||
*.egg-info
|
||||
dist
|
||||
build
|
||||
eggs
|
||||
parts
|
||||
var
|
||||
sdist
|
||||
develop-eggs
|
||||
.installed.cfg
|
||||
lib
|
||||
lib64
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
.coverage
|
||||
cover
|
||||
coverage.xml
|
||||
.coverage\.*
|
||||
.current.cfg
|
||||
.tox
|
||||
nosetests.xml
|
||||
.testrepository
|
||||
.venv
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
|
||||
# Mr Developer
|
||||
.mr.developer.cfg
|
||||
.project
|
||||
.pydevproject
|
||||
|
||||
# Complexity
|
||||
output/*.html
|
||||
output/*/index.html
|
||||
|
||||
# Sphinx
|
||||
doc/build
|
||||
|
||||
# pbr generates these
|
||||
AUTHORS
|
||||
ChangeLog
|
||||
|
||||
# Editors
|
||||
*~
|
||||
.*.swp
|
||||
.*sw?
|
||||
|
3
.mailmap
Normal file
3
.mailmap
Normal file
@ -0,0 +1,3 @@
|
||||
# Format is:
|
||||
# <preferred e-mail> <other e-mail 1>
|
||||
Dimitri Mazmanov <dimitri.mazmanov@ericsson.com>
|
14
.testr.conf
Normal file
14
.testr.conf
Normal file
@ -0,0 +1,14 @@
|
||||
# The sed command is required for coverage to work.
|
||||
# otherwise testr will pass --source distributedcloud when invoking coverage
|
||||
# which breaks the source definitions in the .coveragerc file
|
||||
[DEFAULT]
|
||||
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1}
|
||||
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1}
|
||||
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60}
|
||||
PYTHON=$(echo ${PYTHON:-python} | sed 's/--source distributedcloud//g')
|
||||
${PYTHON} -m subunit.run discover -s dcmanager $LISTOPT $IDOPTION
|
||||
${PYTHON} -m subunit.run discover -s dcorch $LISTOPT $IDOPTION
|
||||
test_id_option=--load-list $IDFILE
|
||||
test_list_option=--list
|
||||
test_run_concurrency=echo 5
|
||||
|
14
CONTRIBUTING.rst
Normal file
14
CONTRIBUTING.rst
Normal file
@ -0,0 +1,14 @@
|
||||
If you would like to contribute to the development of OpenStack, you must
|
||||
follow the steps in this page:
|
||||
|
||||
http://docs.openstack.org/infra/manual/developers.html
|
||||
|
||||
If you already have a good understanding of how the system works and your
|
||||
OpenStack accounts are set up, you can skip to the development workflow
|
||||
section of this documentation to learn how changes to OpenStack should be
|
||||
submitted for review via the Gerrit tool:
|
||||
|
||||
http://docs.openstack.org/infra/manual/developers.html#development-workflow
|
||||
|
||||
Pull requests submitted through GitHub will be ignored.
|
||||
|
20
CONTRIBUTORS.wrs
Normal file
20
CONTRIBUTORS.wrs
Normal file
@ -0,0 +1,20 @@
|
||||
The following contributors from Wind River have developed the seed code in this
|
||||
repository. We look forward to community collaboration and contributions for
|
||||
additional features, enhancements and refactoring.
|
||||
Contributors:
|
||||
=============
|
||||
Al Bailey
|
||||
Alex Kozyrev
|
||||
Andy Ning
|
||||
Angie Wang
|
||||
Bart Wensley
|
||||
Chris Friesen
|
||||
John Kung
|
||||
Kam Nasim
|
||||
Kevin Smith
|
||||
Lachlan Plant
|
||||
Saju Oommen
|
||||
Stefan Dinescu
|
||||
Tao Liu
|
||||
Tyler Smith
|
||||
|
4
HACKING.rst
Normal file
4
HACKING.rst
Normal file
@ -0,0 +1,4 @@
|
||||
DistributedCloud Style Commandments
|
||||
===============================================
|
||||
|
||||
Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/
|
176
LICENSE
Normal file
176
LICENSE
Normal file
@ -0,0 +1,176 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
9
MANIFEST.in
Normal file
9
MANIFEST.in
Normal file
@ -0,0 +1,9 @@
|
||||
include AUTHORS
|
||||
include ChangeLog
|
||||
include dcmanager/db/sqlalchemy/migrate_repo/migrate.cfg
|
||||
include dcorch/db/sqlalchemy/migrate_repo/migrate.cfg
|
||||
exclude .gitignore
|
||||
exclude .gitreview
|
||||
exclude .current.cfg
|
||||
|
||||
global-exclude *.pyc
|
6
README.rst
Normal file
6
README.rst
Normal file
@ -0,0 +1,6 @@
|
||||
DistributedCloud
|
||||
===============================
|
||||
|
||||
Wind River's Distributed Cloud system supports an edge computing solution by providing
|
||||
central management and orchestration for a geographically distributed network of Titanium
|
||||
Cloud systems.
|
58
README_DC
Normal file
58
README_DC
Normal file
@ -0,0 +1,58 @@
|
||||
# These are instructions for building and installing Distributed Cloud
|
||||
|
||||
# Packages and configure script are now included in the load
|
||||
|
||||
# Run configure script (on target - requires root privileges)
|
||||
|
||||
configure_dc.sh
|
||||
|
||||
# To use DC Manager CLI (these are just examples)
|
||||
|
||||
source /etc/nova/openrc
|
||||
# Add subclouds (description and location are optional)
|
||||
dcmanager subcloud add --name=subcloud1 \
|
||||
--description="subcloud1 description" \
|
||||
--location="subcloud 1 location" \
|
||||
--management-subnet=192.168.101.0/24 \
|
||||
--management-start-ip=192.168.101.2 \
|
||||
--management-end-ip=192.168.101.50 \
|
||||
--management-gateway-ip=192.168.101.1 \
|
||||
--systemcontroller-gateway-ip=192.168.204.101
|
||||
dcmanager subcloud add --name=subcloud2 \
|
||||
--management-subnet=192.168.102.0/24 \
|
||||
--management-start-ip=192.168.102.2 \
|
||||
--management-end-ip=192.168.102.50 \
|
||||
--management-gateway-ip=192.168.102.1 \
|
||||
--systemcontroller-gateway-ip=192.168.204.101
|
||||
# List all subclouds
|
||||
dcmanager subcloud list
|
||||
# Show a single subcloud
|
||||
dcmanager subcloud show 1
|
||||
dcmanager subcloud show subcloud2
|
||||
# Update subcloud description or location
|
||||
dcmanager subcloud update 1 \
|
||||
--description="new subcloud1 description" \
|
||||
--location="new subcloud1 location"
|
||||
# Generate config for a subcloud (additional items are optional)
|
||||
dcmanager subcloud generate-config 1 \
|
||||
--management-interface-port=enp0s8 \
|
||||
--management-interface-mtu=1500 \
|
||||
--oam-subnet=10.10.10.0/24 \
|
||||
--oam-gateway-ip=10.10.10.1 \
|
||||
--oam-floating-ip=10.10.10.12 \
|
||||
--oam-unit-0-ip=10.10.10.13 \
|
||||
--oam-unit-1-ip=10.10.10.14 \
|
||||
--oam-interface-port=enp0s3 \
|
||||
--oam-interface-mtu=1500
|
||||
dcmanager subcloud generate-config 2
|
||||
# Unlock a subcloud
|
||||
dcmanager subcloud unlock 1
|
||||
# Lock a subcloud
|
||||
dcmanager subcloud lock 1
|
||||
# Delete a subcloud (must be locked)
|
||||
dcmanager subcloud delete 1
|
||||
|
||||
# To use DC Orchestrator API directly
|
||||
|
||||
run "openstack token issue", then copy the token. Then to add a subcloud it's something like this:
|
||||
curl -H "Content-Type: application/json" -H "X-Auth-Token: gAAAAABZ3pT6ZLUaMJfTjAius1zFjcYq25JyiI-eHJe_m5B4NheiN_T94wbG-NrFAAbYNKkOb90MdQ5fnTMjGi1QqZyJ9Rkyg2ZvnaI3Sj8Cw6cSl7goyG0rzseP9b1qADmvX66aqZx79pQQUE0EcC2YDPh-mwgYRoerjuNQ_DGYeWOfZxa06kk " -X POST -d '{"subcloud":"subcloud2"}' http://127.0.0.1:8118/v1.0/d9f1bcfd50b447de993ec90614e9bdc8/subclouds
|
2
babel.cfg
Normal file
2
babel.cfg
Normal file
@ -0,0 +1,2 @@
|
||||
[python: **.py]
|
||||
|
25
dcmanager/__init__.py
Normal file
25
dcmanager/__init__.py
Normal file
@ -0,0 +1,25 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
import pbr.version
|
||||
|
||||
|
||||
__version__ = pbr.version.VersionInfo('distributedcloud').version_string()
|
31
dcmanager/api/README.rst
Executable file
31
dcmanager/api/README.rst
Executable file
@ -0,0 +1,31 @@
|
||||
===============================
|
||||
api
|
||||
===============================
|
||||
|
||||
DC Manager API is Web Server Gateway Interface (WSGI) application to receive
|
||||
and process API calls, including keystonemiddleware to do the authentication,
|
||||
parameter check and validation, convert API calls to job rpc message, and
|
||||
then send the job to DC Manager Manager through the queue. If the job will
|
||||
be processed by DC Manager Manager in synchronous way, the DC Manager API will
|
||||
wait for the response from the DC Manager Manager. Otherwise, the DC Manager
|
||||
API will send response to the API caller first, and then send the job to
|
||||
DC Manager Manager in asynchronous way.
|
||||
|
||||
Multiple DC Manager API could run in parallel, and also can work in
|
||||
multi-worker mode.
|
||||
|
||||
Multiple DC Manager API will be designed and run in stateless mode, persistent
|
||||
data will be accessed (read and write) from the DC Manager Database through
|
||||
the DAL module.
|
||||
|
||||
Setup and encapsulate the API WSGI app
|
||||
|
||||
app.py:
|
||||
Setup and encapsulate the API WSGI app, including integrate the
|
||||
keystonemiddleware app
|
||||
|
||||
api_config.py:
|
||||
API configuration loading and init
|
||||
|
||||
enforcer.py
|
||||
Enforces policies on the version2 APIs
|
0
dcmanager/api/__init__.py
Normal file
0
dcmanager/api/__init__.py
Normal file
111
dcmanager/api/api_config.py
Normal file
111
dcmanager/api/api_config.py
Normal file
@ -0,0 +1,111 @@
|
||||
# Copyright 2015 Huawei Technologies Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
"""
|
||||
Routines for configuring DC Manager, largely copy from Neutron
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from dcmanager.common.i18n import _
|
||||
|
||||
|
||||
# from dcmanager import policy
|
||||
from dcmanager.common import version
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
common_opts = [
|
||||
cfg.StrOpt('bind_host', default='0.0.0.0',
|
||||
help=_("The host IP to bind to")),
|
||||
cfg.IntOpt('bind_port', default=8119,
|
||||
help=_("The port to bind to")),
|
||||
cfg.IntOpt('api_workers', default=2,
|
||||
help=_("number of api workers")),
|
||||
cfg.StrOpt('state_path',
|
||||
default=os.path.join(os.path.dirname(__file__), '../'),
|
||||
help='Top-level directory for maintaining dcmanager state'),
|
||||
cfg.StrOpt('api_extensions_path', default="",
|
||||
help=_("The path for API extensions")),
|
||||
cfg.StrOpt('auth_strategy', default='keystone',
|
||||
help=_("The type of authentication to use")),
|
||||
cfg.BoolOpt('allow_bulk', default=True,
|
||||
help=_("Allow the usage of the bulk API")),
|
||||
cfg.BoolOpt('allow_pagination', default=False,
|
||||
help=_("Allow the usage of the pagination")),
|
||||
cfg.BoolOpt('allow_sorting', default=False,
|
||||
help=_("Allow the usage of the sorting")),
|
||||
cfg.StrOpt('pagination_max_limit', default="-1",
|
||||
help=_("The maximum number of items returned in a single "
|
||||
"response, value was 'infinite' or negative integer "
|
||||
"means no limit")),
|
||||
]
|
||||
|
||||
|
||||
def init(args, **kwargs):
|
||||
# Register the configuration options
|
||||
cfg.CONF.register_opts(common_opts)
|
||||
|
||||
# ks_session.Session.register_conf_options(cfg.CONF)
|
||||
# auth.register_conf_options(cfg.CONF)
|
||||
logging.register_options(cfg.CONF)
|
||||
|
||||
cfg.CONF(args=args, project='dcmanager',
|
||||
version='%%(prog)s %s' % version.version_info.release_string(),
|
||||
**kwargs)
|
||||
|
||||
|
||||
def setup_logging():
|
||||
"""Sets up the logging options for a log with supplied name."""
|
||||
product_name = "dcmanager"
|
||||
logging.setup(cfg.CONF, product_name)
|
||||
LOG.info("Logging enabled!")
|
||||
LOG.info("%(prog)s version %(version)s",
|
||||
{'prog': sys.argv[0],
|
||||
'version': version.version_info.release_string()})
|
||||
LOG.debug("command line: %s", " ".join(sys.argv))
|
||||
|
||||
|
||||
def reset_service():
|
||||
# Reset worker in case SIGHUP is called.
|
||||
# Note that this is called only in case a service is running in
|
||||
# daemon mode.
|
||||
setup_logging()
|
||||
|
||||
# TODO(joehuang) enforce policy later
|
||||
# policy.refresh()
|
||||
|
||||
|
||||
def test_init():
|
||||
# Register the configuration options
|
||||
cfg.CONF.register_opts(common_opts)
|
||||
logging.register_options(cfg.CONF)
|
||||
setup_logging()
|
||||
|
||||
|
||||
def list_opts():
|
||||
yield None, common_opts
|
97
dcmanager/api/app.py
Normal file
97
dcmanager/api/app.py
Normal file
@ -0,0 +1,97 @@
|
||||
# Copyright (c) 2015 Huawei, Tech. Co,. Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
import pecan
|
||||
|
||||
from keystonemiddleware import auth_token
|
||||
from oslo_config import cfg
|
||||
from oslo_middleware import request_id
|
||||
from oslo_service import service
|
||||
|
||||
from dcmanager.common import context as ctx
|
||||
from dcmanager.common.i18n import _
|
||||
|
||||
|
||||
def setup_app(*args, **kwargs):
|
||||
|
||||
opts = cfg.CONF.pecan
|
||||
config = {
|
||||
'server': {
|
||||
'port': cfg.CONF.bind_port,
|
||||
'host': cfg.CONF.bind_host
|
||||
},
|
||||
'app': {
|
||||
'root': 'dcmanager.api.controllers.root.RootController',
|
||||
'modules': ['dcmanager.api'],
|
||||
"debug": opts.debug,
|
||||
"auth_enable": opts.auth_enable,
|
||||
'errors': {
|
||||
400: '/error',
|
||||
'__force_dict__': True
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pecan_config = pecan.configuration.conf_from_dict(config)
|
||||
|
||||
# app_hooks = [], hook collection will be put here later
|
||||
|
||||
app = pecan.make_app(
|
||||
pecan_config.app.root,
|
||||
debug=False,
|
||||
wrap_app=_wrap_app,
|
||||
force_canonical=False,
|
||||
hooks=lambda: [ctx.AuthHook()],
|
||||
guess_content_type_from_ext=True
|
||||
)
|
||||
|
||||
return app
|
||||
|
||||
|
||||
def _wrap_app(app):
|
||||
app = request_id.RequestId(app)
|
||||
if cfg.CONF.pecan.auth_enable and cfg.CONF.auth_strategy == 'keystone':
|
||||
conf = dict(cfg.CONF.keystone_authtoken)
|
||||
# Change auth decisions of requests to the app itself.
|
||||
conf.update({'delay_auth_decision': True})
|
||||
|
||||
# NOTE: Policy enforcement works only if Keystone
|
||||
# authentication is enabled. No support for other authentication
|
||||
# types at this point.
|
||||
return auth_token.AuthProtocol(app, conf)
|
||||
else:
|
||||
return app
|
||||
|
||||
|
||||
_launcher = None
|
||||
|
||||
|
||||
def serve(api_service, conf, workers=1):
|
||||
global _launcher
|
||||
if _launcher:
|
||||
raise RuntimeError(_('serve() can only be called once'))
|
||||
|
||||
_launcher = service.launch(conf, api_service, workers=workers)
|
||||
|
||||
|
||||
def wait():
|
||||
_launcher.wait()
|
14
dcmanager/api/controllers/README.rst
Executable file
14
dcmanager/api/controllers/README.rst
Executable file
@ -0,0 +1,14 @@
|
||||
===============================
|
||||
controllers
|
||||
===============================
|
||||
|
||||
API request processing
|
||||
|
||||
root.py:
|
||||
API root request
|
||||
|
||||
subclouds.py
|
||||
Controller for all the subcloud related requests
|
||||
|
||||
restcomm.py:
|
||||
common functionality used in API
|
0
dcmanager/api/controllers/__init__.py
Normal file
0
dcmanager/api/controllers/__init__.py
Normal file
48
dcmanager/api/controllers/restcomm.py
Normal file
48
dcmanager/api/controllers/restcomm.py
Normal file
@ -0,0 +1,48 @@
|
||||
# Copyright (c) 2015 Huawei Tech. Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
|
||||
from pecan import request
|
||||
|
||||
import dcmanager.common.context as k_context
|
||||
|
||||
|
||||
def extract_context_from_environ():
|
||||
context_paras = {'auth_token': 'HTTP_X_AUTH_TOKEN',
|
||||
'user': 'HTTP_X_USER_ID',
|
||||
'project': 'HTTP_X_TENANT_ID',
|
||||
'user_name': 'HTTP_X_USER_NAME',
|
||||
'tenant_name': 'HTTP_X_PROJECT_NAME',
|
||||
'domain': 'HTTP_X_DOMAIN_ID',
|
||||
'roles': 'HTTP_X_ROLE',
|
||||
'user_domain': 'HTTP_X_USER_DOMAIN_ID',
|
||||
'project_domain': 'HTTP_X_PROJECT_DOMAIN_ID',
|
||||
'request_id': 'openstack.request_id'}
|
||||
|
||||
environ = request.environ
|
||||
|
||||
for key in context_paras:
|
||||
context_paras[key] = environ.get(context_paras[key])
|
||||
role = environ.get('HTTP_X_ROLE')
|
||||
|
||||
context_paras['is_admin'] = 'admin' in role.split(',')
|
||||
return k_context.RequestContext(**context_paras)
|
64
dcmanager/api/controllers/root.py
Normal file
64
dcmanager/api/controllers/root.py
Normal file
@ -0,0 +1,64 @@
|
||||
# Copyright (c) 2015 Huawei Tech. Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
|
||||
import pecan
|
||||
|
||||
from dcmanager.api.controllers.v1 import root as v1_root
|
||||
|
||||
|
||||
class RootController(object):
|
||||
|
||||
@pecan.expose('json')
|
||||
def _lookup(self, version, *remainder):
|
||||
version = str(version)
|
||||
minor_version = version[-1]
|
||||
major_version = version[1]
|
||||
remainder = remainder + (minor_version,)
|
||||
if major_version == '1':
|
||||
return v1_root.Controller(), remainder
|
||||
|
||||
@pecan.expose(generic=True, template='json')
|
||||
def index(self):
|
||||
return {
|
||||
"versions": [
|
||||
{
|
||||
"status": "CURRENT",
|
||||
"links": [
|
||||
{
|
||||
"rel": "self",
|
||||
"href": pecan.request.application_url + "/v1.0/"
|
||||
}
|
||||
],
|
||||
"id": "v1.0",
|
||||
"updated": "2017-10-2"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@index.when(method='POST')
|
||||
@index.when(method='PUT')
|
||||
@index.when(method='DELETE')
|
||||
@index.when(method='HEAD')
|
||||
@index.when(method='PATCH')
|
||||
def not_supported(self):
|
||||
pecan.abort(405)
|
0
dcmanager/api/controllers/v1/__init__.py
Normal file
0
dcmanager/api/controllers/v1/__init__.py
Normal file
87
dcmanager/api/controllers/v1/alarm_manager.py
Normal file
87
dcmanager/api/controllers/v1/alarm_manager.py
Normal file
@ -0,0 +1,87 @@
|
||||
# Copyright (c) 2017 Ericsson AB.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
from dcmanager.api.controllers import restcomm
|
||||
from dcorch.rpc import client as dcorch_rpc_client
|
||||
from oslo_log import log as logging
|
||||
from pecan import expose
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SubcloudAlarmController(object):
|
||||
VERSION_ALIASES = {
|
||||
'Newton': '1.0',
|
||||
}
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(SubcloudAlarmController, self).__init__(*args, **kwargs)
|
||||
self.dcorch_rpc_client = dcorch_rpc_client.EngineClient()
|
||||
|
||||
# to do the version compatibility for future purpose
|
||||
def _determine_version_cap(self, target):
|
||||
version_cap = 1.0
|
||||
return version_cap
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def index(self):
|
||||
# Route the request to specific methods with parameters
|
||||
pass
|
||||
|
||||
def _get_alarm_aggregates(self):
|
||||
summary = []
|
||||
context = restcomm.extract_context_from_environ()
|
||||
alarms = self.dcorch_rpc_client.get_alarm_summary(context)
|
||||
for alarm in alarms:
|
||||
alarm_dict = {'region_name': alarm['region_name'],
|
||||
'uuid': alarm['uuid'],
|
||||
'critical_alarms': alarm['critical_alarms'],
|
||||
'major_alarms': alarm['major_alarms'],
|
||||
'minor_alarms': alarm['minor_alarms'],
|
||||
'warnings': alarm['warnings'],
|
||||
'cloud_status': alarm['cloud_status']}
|
||||
summary.append(alarm_dict)
|
||||
return {'alarm_summary': summary}
|
||||
|
||||
@index.when(method='GET', template='json')
|
||||
def get(self):
|
||||
"""Get List of alarm summarys
|
||||
|
||||
"""
|
||||
return self._get_alarm_aggregates()
|
||||
|
||||
def _get_alarm_summary(self):
|
||||
alarms = self._get_alarm_aggregates()
|
||||
summary = {'critical': 0,
|
||||
'degraded': 0,
|
||||
'ok': 0,
|
||||
'unreachable': 0}
|
||||
for alarm in alarms:
|
||||
summary[alarm['cloud_status']] += 1
|
||||
return summary
|
||||
|
||||
@index.when(method='summary', template='json')
|
||||
def summary(self):
|
||||
"""Get an agregate of all subcloud status
|
||||
|
||||
"""
|
||||
return self._get_alarm_summary()
|
63
dcmanager/api/controllers/v1/root.py
Normal file
63
dcmanager/api/controllers/v1/root.py
Normal file
@ -0,0 +1,63 @@
|
||||
# Copyright (c) 2017 Ericsson AB.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
|
||||
from dcmanager.api.controllers.v1 import alarm_manager
|
||||
from dcmanager.api.controllers.v1 import subclouds
|
||||
from dcmanager.api.controllers.v1 import sw_update_options
|
||||
from dcmanager.api.controllers.v1 import sw_update_strategy
|
||||
|
||||
import pecan
|
||||
|
||||
|
||||
class Controller(object):
|
||||
|
||||
def _get_resource_controller(self, remainder):
|
||||
|
||||
if not remainder:
|
||||
pecan.abort(404)
|
||||
return
|
||||
minor_version = remainder[-1]
|
||||
remainder = remainder[:-1]
|
||||
sub_controllers = dict()
|
||||
if minor_version == '0':
|
||||
sub_controllers["subclouds"] = subclouds.SubcloudsController
|
||||
sub_controllers["alarms"] = alarm_manager.SubcloudAlarmController
|
||||
sub_controllers["sw-update-strategy"] = \
|
||||
sw_update_strategy.SwUpdateStrategyController
|
||||
sub_controllers["sw-update-options"] = \
|
||||
sw_update_options.SwUpdateOptionsController
|
||||
|
||||
for name, ctrl in sub_controllers.items():
|
||||
setattr(self, name, ctrl)
|
||||
|
||||
resource = remainder[0]
|
||||
if resource not in sub_controllers:
|
||||
pecan.abort(404)
|
||||
return
|
||||
|
||||
remainder = remainder[1:]
|
||||
return sub_controllers[resource](), remainder
|
||||
|
||||
@pecan.expose()
|
||||
def _lookup(self, *remainder):
|
||||
return self._get_resource_controller(remainder)
|
688
dcmanager/api/controllers/v1/subclouds.py
Normal file
688
dcmanager/api/controllers/v1/subclouds.py
Normal file
@ -0,0 +1,688 @@
|
||||
# Copyright (c) 2017 Ericsson AB.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
import keyring
|
||||
from netaddr import IPAddress
|
||||
from netaddr import IPNetwork
|
||||
from netaddr import IPRange
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_messaging import RemoteError
|
||||
|
||||
import pecan
|
||||
from pecan import expose
|
||||
from pecan import request
|
||||
|
||||
from configutilities.common import crypt
|
||||
from configutilities.common.exceptions import ValidateFail
|
||||
from configutilities.common.utils import validate_address_str
|
||||
from configutilities.common.utils import validate_network_str
|
||||
|
||||
from dcorch.drivers.openstack.keystone_v3 import KeystoneClient
|
||||
|
||||
from dcmanager.api.controllers import restcomm
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.common import exceptions
|
||||
from dcmanager.common.i18n import _
|
||||
from dcmanager.db import api as db_api
|
||||
from dcmanager.drivers.openstack.sysinv_v1 import SysinvClient
|
||||
from dcmanager.rpc import client as rpc_client
|
||||
|
||||
from Crypto.Hash import MD5
|
||||
import json
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
# System mode
|
||||
SYSTEM_MODE_DUPLEX = "duplex"
|
||||
SYSTEM_MODE_SIMPLEX = "simplex"
|
||||
SYSTEM_MODE_DUPLEX_DIRECT = "duplex-direct"
|
||||
|
||||
|
||||
class SubcloudsController(object):
|
||||
VERSION_ALIASES = {
|
||||
'Newton': '1.0',
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
super(SubcloudsController, self).__init__()
|
||||
self.rpc_client = rpc_client.ManagerClient()
|
||||
|
||||
# to do the version compatibility for future purpose
|
||||
def _determine_version_cap(self, target):
|
||||
version_cap = 1.0
|
||||
return version_cap
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def index(self):
|
||||
# Route the request to specific methods with parameters
|
||||
pass
|
||||
|
||||
def _validate_subcloud_config(self,
|
||||
context,
|
||||
name,
|
||||
management_subnet_str,
|
||||
management_start_ip_str,
|
||||
management_end_ip_str,
|
||||
management_gateway_ip_str,
|
||||
systemcontroller_gateway_ip_str):
|
||||
"""Check whether subcloud config is valid."""
|
||||
|
||||
# Validate the name
|
||||
if name.isdigit():
|
||||
pecan.abort(400, _("name must contain alphabetic characters"))
|
||||
|
||||
if name in [consts.DEFAULT_REGION_NAME,
|
||||
consts.SYSTEM_CONTROLLER_NAME]:
|
||||
pecan.abort(400, _("name cannot be %(bad_name1)s or %(bad_name2)s")
|
||||
% {'bad_name1': consts.DEFAULT_REGION_NAME,
|
||||
'bad_name2': consts.SYSTEM_CONTROLLER_NAME})
|
||||
|
||||
# Parse/validate the management subnet
|
||||
subcloud_subnets = []
|
||||
subclouds = db_api.subcloud_get_all(context)
|
||||
for subcloud in subclouds:
|
||||
subcloud_subnets.append(IPNetwork(subcloud.management_subnet))
|
||||
|
||||
MIN_MANAGEMENT_SUBNET_SIZE = 8
|
||||
# subtract 3 for network, gateway and broadcast addresses.
|
||||
MIN_MANAGEMENT_ADDRESSES = MIN_MANAGEMENT_SUBNET_SIZE - 3
|
||||
|
||||
management_subnet = None
|
||||
try:
|
||||
management_subnet = validate_network_str(
|
||||
management_subnet_str,
|
||||
minimum_size=MIN_MANAGEMENT_SUBNET_SIZE,
|
||||
existing_networks=subcloud_subnets)
|
||||
except ValidateFail as e:
|
||||
LOG.exception(e)
|
||||
pecan.abort(400, _("management-subnet invalid: %s") % e)
|
||||
|
||||
# Parse/validate the start/end addresses
|
||||
management_start_ip = None
|
||||
try:
|
||||
management_start_ip = validate_address_str(
|
||||
management_start_ip_str, management_subnet)
|
||||
except ValidateFail as e:
|
||||
LOG.exception(e)
|
||||
pecan.abort(400, _("management-start-ip invalid: %s") % e)
|
||||
|
||||
management_end_ip = None
|
||||
try:
|
||||
management_end_ip = validate_address_str(
|
||||
management_end_ip_str, management_subnet)
|
||||
except ValidateFail as e:
|
||||
LOG.exception(e)
|
||||
pecan.abort(400, _("management-end-ip invalid: %s") % e)
|
||||
|
||||
if not management_start_ip < management_end_ip:
|
||||
pecan.abort(
|
||||
400,
|
||||
_("management-start-ip not less than management-end-ip"))
|
||||
|
||||
if not len(IPRange(management_start_ip, management_end_ip)) >= \
|
||||
MIN_MANAGEMENT_ADDRESSES:
|
||||
pecan.abort(
|
||||
400,
|
||||
_("management address range must contain at least %d "
|
||||
"addresses") % MIN_MANAGEMENT_ADDRESSES)
|
||||
|
||||
# Parse/validate the gateway
|
||||
try:
|
||||
validate_address_str(
|
||||
management_gateway_ip_str, management_subnet)
|
||||
except ValidateFail as e:
|
||||
LOG.exception(e)
|
||||
pecan.abort(400, _("management-gateway-ip invalid: %s") % e)
|
||||
|
||||
# Ensure subcloud management gateway is not within the actual subcloud
|
||||
# management subnet address pool for consistency with the
|
||||
# systemcontroller gateway restriction below. Address collision
|
||||
# is not a concern as the address is added to sysinv.
|
||||
subcloud_mgmt_address_start = IPAddress(management_start_ip_str)
|
||||
subcloud_mgmt_address_end = IPAddress(management_end_ip_str)
|
||||
subcloud_mgmt_gw_ip = IPAddress(management_gateway_ip_str)
|
||||
if ((subcloud_mgmt_gw_ip >= subcloud_mgmt_address_start) and
|
||||
(subcloud_mgmt_gw_ip <= subcloud_mgmt_address_end)):
|
||||
pecan.abort(400, _("management-gateway-ip invalid, "
|
||||
"is within management pool: %(start)s - "
|
||||
"%(end)s") %
|
||||
{'start': subcloud_mgmt_address_start,
|
||||
'end': subcloud_mgmt_address_end})
|
||||
|
||||
# Ensure systemcontroller gateway is in the management subnet
|
||||
# for the systemcontroller region.
|
||||
management_address_pool = self._get_management_address_pool(context)
|
||||
systemcontroller_subnet_str = "%s/%d" % (
|
||||
management_address_pool.network,
|
||||
management_address_pool.prefix)
|
||||
systemcontroller_subnet = IPNetwork(systemcontroller_subnet_str)
|
||||
try:
|
||||
validate_address_str(
|
||||
systemcontroller_gateway_ip_str, systemcontroller_subnet)
|
||||
except ValidateFail as e:
|
||||
LOG.exception(e)
|
||||
pecan.abort(400, _("systemcontroller-gateway-ip invalid: %s") % e)
|
||||
# Ensure systemcontroller gateway is not within the actual
|
||||
# management subnet address pool to prevent address collision.
|
||||
mgmt_address_start = IPAddress(management_address_pool.ranges[0][0])
|
||||
mgmt_address_end = IPAddress(management_address_pool.ranges[0][1])
|
||||
systemcontroller_gw_ip = IPAddress(systemcontroller_gateway_ip_str)
|
||||
if ((systemcontroller_gw_ip >= mgmt_address_start) and
|
||||
(systemcontroller_gw_ip <= mgmt_address_end)):
|
||||
pecan.abort(400, _("systemcontroller-gateway-ip invalid, "
|
||||
"is within management pool: %(start)s - "
|
||||
"%(end)s") %
|
||||
{'start': mgmt_address_start, 'end': mgmt_address_end})
|
||||
|
||||
def _create_subcloud_config_file(self, context, subcloud, payload):
|
||||
"""Creates the subcloud config file for a subcloud."""
|
||||
DEFAULT_STR = '<EDIT>'
|
||||
|
||||
pxe_cidr = payload.get(
|
||||
'pxe-subnet', DEFAULT_STR)
|
||||
management_vlan = payload.get(
|
||||
'management-vlan', DEFAULT_STR)
|
||||
management_interface_mtu = payload.get(
|
||||
'management-interface-mtu', DEFAULT_STR)
|
||||
management_interface_ports = payload.get(
|
||||
'management-interface-port', DEFAULT_STR)
|
||||
oam_cidr = payload.get(
|
||||
'oam-subnet', DEFAULT_STR)
|
||||
oam_gateway = payload.get(
|
||||
'oam-gateway-ip', DEFAULT_STR)
|
||||
oam_ip_floating_address = payload.get(
|
||||
'oam-floating-ip', DEFAULT_STR)
|
||||
oam_ip_unit_0_address = payload.get(
|
||||
'oam-unit-0-ip', DEFAULT_STR)
|
||||
oam_ip_unit_1_address = payload.get(
|
||||
'oam-unit-1-ip', DEFAULT_STR)
|
||||
oam_interface_mtu = payload.get(
|
||||
'oam-interface-mtu', DEFAULT_STR)
|
||||
oam_interface_ports = payload.get(
|
||||
'oam-interface-port', DEFAULT_STR)
|
||||
system_mode = payload.get(
|
||||
'system-mode', DEFAULT_STR)
|
||||
|
||||
management_address_pool = self._get_management_address_pool(context)
|
||||
systemcontroller_subnet = "%s/%d" % (
|
||||
management_address_pool.network,
|
||||
management_address_pool.prefix)
|
||||
sc_mgmt_floating_ip = management_address_pool.floating_address
|
||||
|
||||
subcloud_config = ""
|
||||
if system_mode in [SYSTEM_MODE_SIMPLEX, SYSTEM_MODE_DUPLEX,
|
||||
SYSTEM_MODE_DUPLEX_DIRECT]:
|
||||
subcloud_config += (
|
||||
"[SYSTEM]\n"
|
||||
"SYSTEM_MODE={}\n".format(system_mode))
|
||||
|
||||
if system_mode == SYSTEM_MODE_SIMPLEX:
|
||||
subcloud_oamip_config = (
|
||||
"IP_ADDRESS = {oam_ip_floating_address}\n"
|
||||
).format(
|
||||
oam_ip_floating_address=oam_ip_floating_address,
|
||||
)
|
||||
else:
|
||||
subcloud_oamip_config = (
|
||||
"IP_FLOATING_ADDRESS = {oam_ip_floating_address}\n"
|
||||
"IP_UNIT_0_ADDRESS = {oam_ip_unit_0_address}\n"
|
||||
"IP_UNIT_1_ADDRESS = {oam_ip_unit_1_address}\n"
|
||||
).format(
|
||||
oam_ip_floating_address=oam_ip_floating_address,
|
||||
oam_ip_unit_0_address=oam_ip_unit_0_address,
|
||||
oam_ip_unit_1_address=oam_ip_unit_1_address,
|
||||
)
|
||||
|
||||
MIN_MANAGEMENT_SUBNET_SIZE = 8
|
||||
tmp_management_subnet = validate_network_str(
|
||||
subcloud.management_subnet,
|
||||
minimum_size=MIN_MANAGEMENT_SUBNET_SIZE)
|
||||
|
||||
is_ipv6_mgmt = (tmp_management_subnet.version == 6)
|
||||
|
||||
# If ipv6 then we need pxe subnet and management_vlan.
|
||||
# If user specified pxe boot subnet, then management vlan is required
|
||||
# and vice versa
|
||||
if is_ipv6_mgmt or (pxe_cidr != DEFAULT_STR) or \
|
||||
(management_vlan != DEFAULT_STR):
|
||||
subcloud_config += (
|
||||
"[REGION2_PXEBOOT_NETWORK]\n"
|
||||
"PXEBOOT_CIDR = {pxe_cidr}\n"
|
||||
"[MGMT_NETWORK]\n"
|
||||
"VLAN = {management_vlan}\n"
|
||||
).format(
|
||||
pxe_cidr=pxe_cidr,
|
||||
management_vlan=management_vlan,
|
||||
)
|
||||
else:
|
||||
subcloud_config += "[MGMT_NETWORK]\n"
|
||||
|
||||
subcloud_config += (
|
||||
"CIDR = {management_cidr}\n"
|
||||
"GATEWAY = {management_gateway}\n"
|
||||
"IP_START_ADDRESS = {management_ip_start_address}\n"
|
||||
"IP_END_ADDRESS = {management_ip_end_address}\n"
|
||||
"DYNAMIC_ALLOCATION = Y\n"
|
||||
"LOGICAL_INTERFACE = LOGICAL_INTERFACE_1\n"
|
||||
"[LOGICAL_INTERFACE_1]\n"
|
||||
"LAG_INTERFACE = N\n"
|
||||
"INTERFACE_MTU = {management_interface_mtu}\n"
|
||||
"INTERFACE_PORTS = {management_interface_ports}\n"
|
||||
"[OAM_NETWORK]\n"
|
||||
"CIDR = {oam_cidr}\n"
|
||||
"GATEWAY = {oam_gateway}\n" +
|
||||
subcloud_oamip_config +
|
||||
"LOGICAL_INTERFACE = LOGICAL_INTERFACE_2\n"
|
||||
"[LOGICAL_INTERFACE_2]\n"
|
||||
"LAG_INTERFACE = N\n"
|
||||
"INTERFACE_MTU = {oam_interface_mtu}\n"
|
||||
"INTERFACE_PORTS = {oam_interface_ports}\n"
|
||||
"[SHARED_SERVICES]\n"
|
||||
"SYSTEM_CONTROLLER_SUBNET = {systemcontroller_subnet}\n"
|
||||
"SYSTEM_CONTROLLER_FLOATING_ADDRESS = {sc_mgmt_floating_ip}\n"
|
||||
"REGION_NAME = SystemController\n"
|
||||
"ADMIN_PROJECT_NAME = admin\n"
|
||||
"ADMIN_USER_NAME = admin\n"
|
||||
"ADMIN_PASSWORD = {admin_password}\n"
|
||||
"KEYSTONE_ADMINURL = {keystone_adminurl}\n"
|
||||
"KEYSTONE_SERVICE_NAME = keystone\n"
|
||||
"KEYSTONE_SERVICE_TYPE = identity\n"
|
||||
"GLANCE_SERVICE_NAME = glance\n"
|
||||
"GLANCE_SERVICE_TYPE = image\n"
|
||||
"GLANCE_CACHED = True\n"
|
||||
"[REGION_2_SERVICES]\n"
|
||||
"REGION_NAME = {region_2_name}\n"
|
||||
"[VERSION]\n"
|
||||
"RELEASE = {release}\n"
|
||||
).format(
|
||||
management_cidr=subcloud.management_subnet,
|
||||
management_gateway=subcloud.management_gateway_ip,
|
||||
management_ip_start_address=subcloud.management_start_ip,
|
||||
management_ip_end_address=subcloud.management_end_ip,
|
||||
management_interface_mtu=management_interface_mtu,
|
||||
management_interface_ports=management_interface_ports,
|
||||
oam_cidr=oam_cidr,
|
||||
oam_gateway=oam_gateway,
|
||||
oam_interface_mtu=oam_interface_mtu,
|
||||
oam_interface_ports=oam_interface_ports,
|
||||
systemcontroller_subnet=systemcontroller_subnet,
|
||||
sc_mgmt_floating_ip=sc_mgmt_floating_ip,
|
||||
admin_password=cfg.CONF.cache.admin_password,
|
||||
keystone_adminurl=cfg.CONF.cache.auth_uri,
|
||||
region_2_name=subcloud.name,
|
||||
release=subcloud.software_version,
|
||||
)
|
||||
return subcloud_config
|
||||
|
||||
def _get_subcloud_users(self):
|
||||
"""Get the subcloud users and passwords from keyring"""
|
||||
DEFAULT_SERVICE_PROJECT_NAME = 'services'
|
||||
# First entry is openstack user name, second entry is the user stored
|
||||
# in keyring. Not sure why heat_admin uses a different keystone name.
|
||||
SUBCLOUD_USERS = [
|
||||
('nova', 'nova'),
|
||||
('placement', 'placement'),
|
||||
('sysinv', 'sysinv'),
|
||||
('patching', 'patching'),
|
||||
('heat', 'heat'),
|
||||
('ceilometer', 'ceilometer'),
|
||||
('vim', 'vim'),
|
||||
('aodh', 'aodh'),
|
||||
('panko', 'panko'),
|
||||
('mtce', 'mtce'),
|
||||
('cinder', 'cinder'),
|
||||
('glance', 'glance'),
|
||||
('neutron', 'neutron'),
|
||||
('heat_admin', 'heat-domain'),
|
||||
('gnocchi', 'gnocchi')
|
||||
]
|
||||
|
||||
user_list = list()
|
||||
for user in SUBCLOUD_USERS:
|
||||
password = keyring.get_password(user[1],
|
||||
DEFAULT_SERVICE_PROJECT_NAME)
|
||||
if password:
|
||||
user_dict = dict()
|
||||
user_dict['name'] = user[0]
|
||||
user_dict['password'] = password
|
||||
user_list.append(user_dict)
|
||||
else:
|
||||
LOG.error("User %s not found in keyring as %s" % (user[0],
|
||||
user[1]))
|
||||
pecan.abort(500, _('System configuration error'))
|
||||
|
||||
return user_list
|
||||
|
||||
def _get_management_address_pool(self, context):
|
||||
"""Get the system controller's management address pool"""
|
||||
session = KeystoneClient().endpoint_cache.get_session_from_token(
|
||||
context.auth_token, context.project)
|
||||
sysinv_client = SysinvClient(consts.DEFAULT_REGION_NAME, session)
|
||||
return sysinv_client.get_management_address_pool()
|
||||
|
||||
@index.when(method='GET', template='json')
|
||||
def get(self, subcloud_ref=None, qualifier=None):
|
||||
"""Get details about subcloud.
|
||||
|
||||
:param subcloud_ref: ID or name of subcloud
|
||||
"""
|
||||
context = restcomm.extract_context_from_environ()
|
||||
|
||||
if subcloud_ref is None:
|
||||
# List of subclouds requested
|
||||
subclouds = db_api.subcloud_get_all_with_status(context)
|
||||
result = dict()
|
||||
result['subclouds'] = []
|
||||
first_time = True
|
||||
subcloud_list = []
|
||||
subcloud_status_list = []
|
||||
|
||||
# We get back a subcloud, subcloud_status pair for every
|
||||
# subcloud_status entry corresponding to a subcloud. (Subcloud
|
||||
# info repeats)
|
||||
# Aggregate all the sync status for each of the
|
||||
# endpoints per subcloud into an overall sync status
|
||||
for subcloud, subcloud_status in subclouds:
|
||||
subcloud_dict = db_api.subcloud_db_model_to_dict(subcloud)
|
||||
subcloud_status_dict = db_api.subcloud_status_db_model_to_dict(
|
||||
subcloud_status)
|
||||
subcloud_dict.update(subcloud_status_dict)
|
||||
|
||||
if not first_time:
|
||||
if subcloud_list[-1]['id'] == subcloud_dict['id']:
|
||||
# We have a match for this subcloud id already,
|
||||
# check if we have a same sync_status
|
||||
if subcloud_list[-1][consts.SYNC_STATUS] != \
|
||||
subcloud_dict[consts.SYNC_STATUS]:
|
||||
subcloud_list[-1][consts.SYNC_STATUS] = \
|
||||
consts.SYNC_STATUS_OUT_OF_SYNC
|
||||
|
||||
if subcloud_status:
|
||||
subcloud_status_list.append(
|
||||
db_api.subcloud_endpoint_status_db_model_to_dict( # noqa
|
||||
subcloud_status))
|
||||
subcloud_list[-1][
|
||||
consts.ENDPOINT_SYNC_STATUS] = subcloud_status_list
|
||||
|
||||
else:
|
||||
subcloud_status_list = []
|
||||
if subcloud_status:
|
||||
subcloud_status_list.append(
|
||||
db_api.subcloud_endpoint_status_db_model_to_dict( # noqa
|
||||
subcloud_status))
|
||||
|
||||
subcloud_list.append(subcloud_dict)
|
||||
else:
|
||||
if subcloud_status:
|
||||
subcloud_status_list.append(
|
||||
db_api.subcloud_endpoint_status_db_model_to_dict(
|
||||
subcloud_status))
|
||||
subcloud_list.append(subcloud_dict)
|
||||
|
||||
first_time = False
|
||||
|
||||
for s in subcloud_list:
|
||||
result['subclouds'].append(s)
|
||||
|
||||
return result
|
||||
else:
|
||||
# Single subcloud requested
|
||||
subcloud = None
|
||||
subcloud_dict = dict()
|
||||
subcloud_status_list = []
|
||||
endpoint_sync_dict = dict()
|
||||
|
||||
if subcloud_ref.isdigit():
|
||||
# Look up subcloud as an ID
|
||||
try:
|
||||
subcloud = db_api.subcloud_get(context, subcloud_ref)
|
||||
except exceptions.SubcloudNotFound:
|
||||
pecan.abort(404, _('Subcloud not found'))
|
||||
else:
|
||||
# Look up subcloud by name
|
||||
try:
|
||||
subcloud = db_api.subcloud_get_by_name(context,
|
||||
subcloud_ref)
|
||||
except exceptions.SubcloudNameNotFound:
|
||||
pecan.abort(404, _('Subcloud not found'))
|
||||
|
||||
subcloud_id = subcloud.id
|
||||
|
||||
if qualifier:
|
||||
# Configuration for this subcloud requested.
|
||||
# Encrypt before sending.
|
||||
if qualifier == 'config':
|
||||
result = dict()
|
||||
user_list = self._get_subcloud_users()
|
||||
|
||||
# Use a hash of the subcloud name + management subnet
|
||||
# as the encryption key
|
||||
hashstring = subcloud.name + subcloud.management_subnet
|
||||
h = MD5.new()
|
||||
h.update(hashstring)
|
||||
encryption_key = h.hexdigest()
|
||||
user_list_string = json.dumps(user_list)
|
||||
user_list_encrypted = crypt.urlsafe_encrypt(
|
||||
encryption_key,
|
||||
user_list_string)
|
||||
result['users'] = user_list_encrypted
|
||||
return result
|
||||
else:
|
||||
pecan.abort(400, _('Invalid request'))
|
||||
else:
|
||||
# Data for this subcloud requested
|
||||
# Build up and append a dictionary of the endpoints
|
||||
# sync status to the result.
|
||||
for subcloud, subcloud_status in db_api. \
|
||||
subcloud_get_with_status(context, subcloud_id):
|
||||
subcloud_dict = db_api.subcloud_db_model_to_dict(
|
||||
subcloud)
|
||||
# may be empty subcloud_status entry, account for this
|
||||
if subcloud_status:
|
||||
subcloud_status_list.append(
|
||||
db_api.subcloud_endpoint_status_db_model_to_dict(
|
||||
subcloud_status))
|
||||
endpoint_sync_dict = {consts.ENDPOINT_SYNC_STATUS:
|
||||
subcloud_status_list}
|
||||
subcloud_dict.update(endpoint_sync_dict)
|
||||
|
||||
return subcloud_dict
|
||||
|
||||
@index.when(method='POST', template='json')
|
||||
def post(self, subcloud_ref=None, qualifier=None):
|
||||
"""Create a new subcloud.
|
||||
|
||||
:param subcloud_ref: ID of or name subcloud (only used when generating
|
||||
config)
|
||||
:param qualifier: if 'config', returns the config INI file for the
|
||||
subcloud
|
||||
"""
|
||||
|
||||
context = restcomm.extract_context_from_environ()
|
||||
|
||||
if subcloud_ref is None:
|
||||
payload = eval(request.body)
|
||||
if not payload:
|
||||
pecan.abort(400, _('Body required'))
|
||||
name = payload.get('name')
|
||||
if not name:
|
||||
pecan.abort(400, _('name required'))
|
||||
management_subnet = payload.get('management-subnet')
|
||||
if not management_subnet:
|
||||
pecan.abort(400, _('management-subnet required'))
|
||||
management_start_ip = payload.get('management-start-ip')
|
||||
if not management_start_ip:
|
||||
pecan.abort(400, _('management-start-ip required'))
|
||||
management_end_ip = payload.get('management-end-ip')
|
||||
if not management_end_ip:
|
||||
pecan.abort(400, _('management-end-ip required'))
|
||||
management_gateway_ip = payload.get('management-gateway-ip')
|
||||
if not management_gateway_ip:
|
||||
pecan.abort(400, _('management-gateway-ip required'))
|
||||
systemcontroller_gateway_ip = \
|
||||
payload.get('systemcontroller-gateway-ip')
|
||||
if not systemcontroller_gateway_ip:
|
||||
pecan.abort(400, _('systemcontroller-gateway-ip required'))
|
||||
|
||||
self._validate_subcloud_config(context,
|
||||
name,
|
||||
management_subnet,
|
||||
management_start_ip,
|
||||
management_end_ip,
|
||||
management_gateway_ip,
|
||||
systemcontroller_gateway_ip)
|
||||
|
||||
try:
|
||||
# Ask dcmanager-manager to add the subcloud.
|
||||
# It will do all the real work...
|
||||
return self.rpc_client.add_subcloud(context, payload)
|
||||
except RemoteError as e:
|
||||
pecan.abort(422, e.value)
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
pecan.abort(500, _('Unable to create subcloud'))
|
||||
elif qualifier:
|
||||
if qualifier == 'config':
|
||||
subcloud = None
|
||||
|
||||
if subcloud_ref.isdigit():
|
||||
# Look up subcloud as an ID
|
||||
try:
|
||||
subcloud = db_api.subcloud_get(context, subcloud_ref)
|
||||
except exceptions.SubcloudNotFound:
|
||||
pecan.abort(404, _('Subcloud not found'))
|
||||
else:
|
||||
# Look up subcloud by name
|
||||
try:
|
||||
subcloud = db_api.subcloud_get_by_name(context,
|
||||
subcloud_ref)
|
||||
except exceptions.SubcloudNameNotFound:
|
||||
pecan.abort(404, _('Subcloud not found'))
|
||||
|
||||
payload = dict()
|
||||
if request.body:
|
||||
payload = eval(request.body)
|
||||
config_file = self._create_subcloud_config_file(
|
||||
context, subcloud, payload)
|
||||
result = dict()
|
||||
result['config'] = config_file
|
||||
return result
|
||||
else:
|
||||
pecan.abort(400, _('Invalid request'))
|
||||
else:
|
||||
pecan.abort(400, _('Invalid request'))
|
||||
|
||||
@index.when(method='PATCH', template='json')
|
||||
def patch(self, subcloud_ref=None):
|
||||
"""Update a subcloud.
|
||||
|
||||
:param subcloud_ref: ID or name of subcloud to update
|
||||
"""
|
||||
|
||||
context = restcomm.extract_context_from_environ()
|
||||
subcloud = None
|
||||
|
||||
if subcloud_ref is None:
|
||||
pecan.abort(400, _('Subcloud ID required'))
|
||||
|
||||
payload = eval(request.body)
|
||||
if not payload:
|
||||
pecan.abort(400, _('Body required'))
|
||||
|
||||
if subcloud_ref.isdigit():
|
||||
# Look up subcloud as an ID
|
||||
try:
|
||||
subcloud = db_api.subcloud_get(context, subcloud_ref)
|
||||
except exceptions.SubcloudNotFound:
|
||||
pecan.abort(404, _('Subcloud not found'))
|
||||
else:
|
||||
# Look up subcloud by name
|
||||
try:
|
||||
subcloud = db_api.subcloud_get_by_name(context,
|
||||
subcloud_ref)
|
||||
except exceptions.SubcloudNameNotFound:
|
||||
pecan.abort(404, _('Subcloud not found'))
|
||||
|
||||
subcloud_id = subcloud.id
|
||||
|
||||
management_state = payload.get('management-state')
|
||||
description = payload.get('description')
|
||||
location = payload.get('location')
|
||||
|
||||
if not (management_state or description or location):
|
||||
pecan.abort(400, _('nothing to update'))
|
||||
|
||||
# Syntax checking
|
||||
if management_state and \
|
||||
management_state not in [consts.MANAGEMENT_UNMANAGED,
|
||||
consts.MANAGEMENT_MANAGED]:
|
||||
pecan.abort(400, _('Invalid management-state'))
|
||||
|
||||
try:
|
||||
# Inform dcmanager-manager that subcloud has been updated.
|
||||
# It will do all the real work...
|
||||
subcloud = self.rpc_client.update_subcloud(
|
||||
context, subcloud_id, management_state=management_state,
|
||||
description=description, location=location)
|
||||
return subcloud
|
||||
except RemoteError as e:
|
||||
pecan.abort(422, e.value)
|
||||
except Exception as e:
|
||||
# additional exceptions.
|
||||
LOG.exception(e)
|
||||
pecan.abort(500, _('Unable to update subcloud'))
|
||||
|
||||
@index.when(method='delete', template='json')
|
||||
def delete(self, subcloud_ref):
|
||||
"""Delete a subcloud.
|
||||
|
||||
:param subcloud_ref: ID or name of subcloud to delete.
|
||||
"""
|
||||
context = restcomm.extract_context_from_environ()
|
||||
subcloud = None
|
||||
|
||||
if subcloud_ref.isdigit():
|
||||
# Look up subcloud as an ID
|
||||
try:
|
||||
subcloud = db_api.subcloud_get(context, subcloud_ref)
|
||||
except exceptions.SubcloudNotFound:
|
||||
pecan.abort(404, _('Subcloud not found'))
|
||||
else:
|
||||
# Look up subcloud by name
|
||||
try:
|
||||
subcloud = db_api.subcloud_get_by_name(context,
|
||||
subcloud_ref)
|
||||
except exceptions.SubcloudNameNotFound:
|
||||
pecan.abort(404, _('Subcloud not found'))
|
||||
|
||||
subcloud_id = subcloud.id
|
||||
|
||||
try:
|
||||
# Ask dcmanager-manager to delete the subcloud.
|
||||
# It will do all the real work...
|
||||
return self.rpc_client.delete_subcloud(context, subcloud_id)
|
||||
except RemoteError as e:
|
||||
pecan.abort(422, e.value)
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
pecan.abort(500, _('Unable to delete subcloud'))
|
244
dcmanager/api/controllers/v1/sw_update_options.py
Normal file
244
dcmanager/api/controllers/v1/sw_update_options.py
Normal file
@ -0,0 +1,244 @@
|
||||
# Copyright (c) 2017 Ericsson AB.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
import pecan
|
||||
from pecan import expose
|
||||
from pecan import request
|
||||
|
||||
from dcmanager.api.controllers import restcomm
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.common import exceptions
|
||||
from dcmanager.common.i18n import _
|
||||
from dcmanager.common import utils
|
||||
from dcmanager.db import api as db_api
|
||||
from dcmanager.rpc import client as rpc_client
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SwUpdateOptionsController(object):
|
||||
|
||||
def __init__(self):
|
||||
super(SwUpdateOptionsController, self).__init__()
|
||||
self.rpc_client = rpc_client.ManagerClient()
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def index(self):
|
||||
# Route the request to specific methods with parameters
|
||||
pass
|
||||
|
||||
@index.when(method='GET', template='json')
|
||||
def get(self, subcloud_ref=None):
|
||||
"""Get details about software update options.
|
||||
|
||||
:param subcloud: name or id of subcloud (optional)
|
||||
"""
|
||||
context = restcomm.extract_context_from_environ()
|
||||
|
||||
if subcloud_ref is None:
|
||||
# List of all subcloud options requested.
|
||||
# Prepend the all clouds default options to the result.
|
||||
|
||||
result = dict()
|
||||
result['sw-update-options'] = list()
|
||||
|
||||
default_sw_update_opts_dict = utils.get_sw_update_opts(
|
||||
context)
|
||||
|
||||
result['sw-update-options'].append(default_sw_update_opts_dict)
|
||||
|
||||
subclouds = db_api.sw_update_opts_get_all_plus_subcloud_info(
|
||||
context)
|
||||
|
||||
for subcloud, sw_update_opts in subclouds:
|
||||
if sw_update_opts:
|
||||
result['sw-update-options'].append(
|
||||
db_api.sw_update_opts_w_name_db_model_to_dict(
|
||||
sw_update_opts, subcloud.name))
|
||||
|
||||
return result
|
||||
|
||||
elif subcloud_ref == consts.DEFAULT_REGION_NAME:
|
||||
# Default options requested, guaranteed to succeed
|
||||
|
||||
return utils.get_sw_update_opts(context)
|
||||
|
||||
else:
|
||||
# Specific subcloud options requested
|
||||
|
||||
if subcloud_ref.isdigit():
|
||||
# Look up subcloud as an ID
|
||||
try:
|
||||
subcloud = db_api.subcloud_get(context, subcloud_ref)
|
||||
except exceptions.SubcloudNotFound:
|
||||
pecan.abort(404, _('Subcloud not found'))
|
||||
else:
|
||||
# Look up subcloud by name
|
||||
try:
|
||||
subcloud = db_api.subcloud_get_by_name(context,
|
||||
subcloud_ref)
|
||||
except exceptions.SubcloudNameNotFound:
|
||||
pecan.abort(404, _('Subcloud not found'))
|
||||
|
||||
try:
|
||||
return utils.get_sw_update_opts(
|
||||
context, subcloud_id=subcloud.id)
|
||||
except Exception as e:
|
||||
pecan.abort(404, _('%s') % e)
|
||||
|
||||
@index.when(method='POST', template='json')
|
||||
def post(self, subcloud_ref=None):
|
||||
"""Update or create sw update options.
|
||||
|
||||
:param subcloud: name or id of subcloud (optional)
|
||||
"""
|
||||
|
||||
# Note creating or updating subcloud specific options require
|
||||
# setting all options.
|
||||
|
||||
context = restcomm.extract_context_from_environ()
|
||||
|
||||
payload = eval(request.body)
|
||||
if not payload:
|
||||
pecan.abort(400, _('Body required'))
|
||||
|
||||
if subcloud_ref == consts.DEFAULT_REGION_NAME:
|
||||
|
||||
# update default options
|
||||
subcloud_name = consts.SW_UPDATE_DEFAULT_TITLE
|
||||
|
||||
if db_api.sw_update_opts_default_get(context):
|
||||
# entry already in db, update it.
|
||||
try:
|
||||
sw_update_opts_ref = db_api.sw_update_opts_default_update(
|
||||
context,
|
||||
payload['storage-apply-type'],
|
||||
payload['compute-apply-type'],
|
||||
payload['max-parallel-computes'],
|
||||
payload['alarm-restriction-type'],
|
||||
payload['default-instance-action'])
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
raise e
|
||||
else:
|
||||
# no entry in db, create one.
|
||||
try:
|
||||
sw_update_opts_ref = db_api.sw_update_opts_default_create(
|
||||
context,
|
||||
payload['storage-apply-type'],
|
||||
payload['compute-apply-type'],
|
||||
payload['max-parallel-computes'],
|
||||
payload['alarm-restriction-type'],
|
||||
payload['default-instance-action'])
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
raise e
|
||||
else:
|
||||
# update subcloud options
|
||||
|
||||
if subcloud_ref.isdigit():
|
||||
# Look up subcloud as an ID
|
||||
try:
|
||||
subcloud = db_api.subcloud_get(context, subcloud_ref)
|
||||
except exceptions.SubcloudNotFound:
|
||||
pecan.abort(404, _('Subcloud not found'))
|
||||
|
||||
subcloud_name = subcloud.name
|
||||
|
||||
else:
|
||||
# Look up subcloud by name
|
||||
try:
|
||||
subcloud = db_api.subcloud_get_by_name(context,
|
||||
subcloud_ref)
|
||||
except exceptions.SubcloudNameNotFound:
|
||||
pecan.abort(404, _('Subcloud not found'))
|
||||
|
||||
subcloud_name = subcloud_ref
|
||||
|
||||
sw_update_opts = db_api.sw_update_opts_get(context,
|
||||
subcloud.id)
|
||||
|
||||
if sw_update_opts is None:
|
||||
sw_update_opts_ref = db_api.sw_update_opts_create(
|
||||
context,
|
||||
subcloud.id,
|
||||
payload['storage-apply-type'],
|
||||
payload['compute-apply-type'],
|
||||
payload['max-parallel-computes'],
|
||||
payload['alarm-restriction-type'],
|
||||
payload['default-instance-action'])
|
||||
|
||||
else:
|
||||
# a row is present in table, update
|
||||
sw_update_opts_ref = db_api.sw_update_opts_update(
|
||||
context,
|
||||
subcloud.id,
|
||||
payload['storage-apply-type'],
|
||||
payload['compute-apply-type'],
|
||||
payload['max-parallel-computes'],
|
||||
payload['alarm-restriction-type'],
|
||||
payload['default-instance-action'])
|
||||
|
||||
return db_api.sw_update_opts_w_name_db_model_to_dict(
|
||||
sw_update_opts_ref, subcloud_name)
|
||||
|
||||
@index.when(method='delete', template='json')
|
||||
def delete(self, subcloud_ref):
|
||||
"""Delete the software update options."""
|
||||
|
||||
context = restcomm.extract_context_from_environ()
|
||||
|
||||
if subcloud_ref == consts.DEFAULT_REGION_NAME:
|
||||
# Delete defaults.
|
||||
# Note by deleting these, the next get will repopulate with
|
||||
# the global constants.
|
||||
|
||||
try:
|
||||
db_api.sw_update_opts_default_destroy(context)
|
||||
except Exception:
|
||||
return
|
||||
else:
|
||||
|
||||
if subcloud_ref.isdigit():
|
||||
# Look up subcloud as an ID
|
||||
try:
|
||||
subcloud = db_api.subcloud_get(context, subcloud_ref)
|
||||
except exceptions.SubcloudNotFound:
|
||||
pecan.abort(404, _('Subcloud not found'))
|
||||
|
||||
else:
|
||||
# Look up subcloud by name
|
||||
try:
|
||||
subcloud = db_api.subcloud_get_by_name(context,
|
||||
subcloud_ref)
|
||||
except exceptions.SubcloudNameNotFound:
|
||||
pecan.abort(404, _('Subcloud not found'))
|
||||
|
||||
# Delete the subcloud specific options
|
||||
if db_api.sw_update_opts_get(context, subcloud.id):
|
||||
db_api.sw_update_opts_destroy(context, subcloud.id)
|
||||
else:
|
||||
pecan.abort(404, _('Subcloud patch options not found'))
|
196
dcmanager/api/controllers/v1/sw_update_strategy.py
Executable file
196
dcmanager/api/controllers/v1/sw_update_strategy.py
Executable file
@ -0,0 +1,196 @@
|
||||
# Copyright (c) 2017 Ericsson AB.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_messaging import RemoteError
|
||||
|
||||
import pecan
|
||||
from pecan import expose
|
||||
from pecan import request
|
||||
|
||||
from dcmanager.api.controllers import restcomm
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.common import exceptions
|
||||
from dcmanager.common.i18n import _
|
||||
from dcmanager.db import api as db_api
|
||||
from dcmanager.rpc import client as rpc_client
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SwUpdateStrategyController(object):
|
||||
|
||||
def __init__(self):
|
||||
super(SwUpdateStrategyController, self).__init__()
|
||||
self.rpc_client = rpc_client.ManagerClient()
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def index(self):
|
||||
# Route the request to specific methods with parameters
|
||||
pass
|
||||
|
||||
@index.when(method='GET', template='json')
|
||||
def get(self, steps=None, cloud_name=None):
|
||||
"""Get details about software update strategy.
|
||||
|
||||
:param steps: get the steps for this strategy (optional)
|
||||
:param cloud_name: name of cloud (optional)
|
||||
"""
|
||||
context = restcomm.extract_context_from_environ()
|
||||
|
||||
if steps is None:
|
||||
# Strategy requested
|
||||
strategy = None
|
||||
try:
|
||||
strategy = db_api.sw_update_strategy_get(context)
|
||||
except exceptions.NotFound:
|
||||
pecan.abort(404, _('Strategy not found'))
|
||||
|
||||
strategy_dict = db_api.sw_update_strategy_db_model_to_dict(
|
||||
strategy)
|
||||
return strategy_dict
|
||||
|
||||
elif steps == "steps":
|
||||
# Steps for the strategy requested
|
||||
if cloud_name is None:
|
||||
# List of steps requested
|
||||
result = dict()
|
||||
result['strategy-steps'] = list()
|
||||
strategy_steps = db_api.strategy_step_get_all(context)
|
||||
for strategy_step in strategy_steps:
|
||||
result['strategy-steps'].append(
|
||||
db_api.strategy_step_db_model_to_dict(strategy_step))
|
||||
|
||||
return result
|
||||
else:
|
||||
# Single step requested
|
||||
strategy_step = None
|
||||
if cloud_name == consts.SYSTEM_CONTROLLER_NAME:
|
||||
# The system controller step does not map to a subcloud,
|
||||
# so has no name.
|
||||
try:
|
||||
strategy_step = db_api.strategy_step_get(context, None)
|
||||
except exceptions.StrategyStepNotFound:
|
||||
pecan.abort(404, _('Strategy step not found'))
|
||||
else:
|
||||
try:
|
||||
strategy_step = db_api.strategy_step_get_by_name(
|
||||
context, cloud_name)
|
||||
except exceptions.StrategyStepNameNotFound:
|
||||
pecan.abort(404, _('Strategy step not found'))
|
||||
|
||||
strategy_step_dict = db_api.strategy_step_db_model_to_dict(
|
||||
strategy_step)
|
||||
return strategy_step_dict
|
||||
|
||||
@index.when(method='POST', template='json')
|
||||
def post(self, actions=None):
|
||||
"""Create a new software update strategy."""
|
||||
context = restcomm.extract_context_from_environ()
|
||||
|
||||
payload = eval(request.body)
|
||||
if not payload:
|
||||
pecan.abort(400, _('Body required'))
|
||||
|
||||
if actions is None:
|
||||
# Validate any options that were supplied
|
||||
strategy_type = payload.get('type')
|
||||
if not strategy_type:
|
||||
pecan.abort(400, _('type required'))
|
||||
if strategy_type not in consts.SW_UPDATE_TYPE_PATCH:
|
||||
pecan.abort(400, _('type invalid'))
|
||||
|
||||
subcloud_apply_type = payload.get('subcloud-apply-type')
|
||||
if subcloud_apply_type is not None:
|
||||
if subcloud_apply_type not in [
|
||||
consts.SUBCLOUD_APPLY_TYPE_PARALLEL,
|
||||
consts.SUBCLOUD_APPLY_TYPE_SERIAL]:
|
||||
pecan.abort(400, _('subcloud-apply-type invalid'))
|
||||
|
||||
max_parallel_subclouds_str = payload.get('max-parallel-subclouds')
|
||||
if max_parallel_subclouds_str is not None:
|
||||
max_parallel_subclouds = None
|
||||
try:
|
||||
max_parallel_subclouds = int(max_parallel_subclouds_str)
|
||||
except ValueError:
|
||||
pecan.abort(400, _('max-parallel-subclouds invalid'))
|
||||
# TODO(Bart): Decide on a maximum
|
||||
if max_parallel_subclouds < 1 or max_parallel_subclouds > 100:
|
||||
pecan.abort(400, _('max-parallel-subclouds invalid'))
|
||||
|
||||
stop_on_failure = payload.get('stop-on-failure')
|
||||
if stop_on_failure is not None:
|
||||
if stop_on_failure not in ["true", "false"]:
|
||||
pecan.abort(400, _('stop-on-failure invalid'))
|
||||
|
||||
try:
|
||||
# Ask dcmanager-manager to create the strategy.
|
||||
# It will do all the real work...
|
||||
return self.rpc_client.create_sw_update_strategy(context,
|
||||
payload)
|
||||
except RemoteError as e:
|
||||
pecan.abort(422, e.value)
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
pecan.abort(500, _('Unable to create strategy'))
|
||||
elif actions == 'actions':
|
||||
# Apply or abort a strategy
|
||||
action = payload.get('action')
|
||||
if not action:
|
||||
pecan.abort(400, _('action required'))
|
||||
if action == consts.SW_UPDATE_ACTION_APPLY:
|
||||
try:
|
||||
# Ask dcmanager-manager to apply the strategy.
|
||||
# It will do all the real work...
|
||||
return self.rpc_client.apply_sw_update_strategy(context)
|
||||
except RemoteError as e:
|
||||
pecan.abort(422, e.value)
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
pecan.abort(500, _('Unable to apply strategy'))
|
||||
elif action == consts.SW_UPDATE_ACTION_ABORT:
|
||||
try:
|
||||
# Ask dcmanager-manager to abort the strategy.
|
||||
# It will do all the real work...
|
||||
return self.rpc_client.abort_sw_update_strategy(context)
|
||||
except RemoteError as e:
|
||||
pecan.abort(422, e.value)
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
pecan.abort(500, _('Unable to abort strategy'))
|
||||
|
||||
@index.when(method='delete', template='json')
|
||||
def delete(self):
|
||||
"""Delete the software update strategy."""
|
||||
context = restcomm.extract_context_from_environ()
|
||||
|
||||
try:
|
||||
# Ask dcmanager-manager to delete the strategy.
|
||||
# It will do all the real work...
|
||||
return self.rpc_client.delete_sw_update_strategy(context)
|
||||
except RemoteError as e:
|
||||
pecan.abort(422, e.value)
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
pecan.abort(500, _('Unable to delete strategy'))
|
78
dcmanager/api/enforcer.py
Normal file
78
dcmanager/api/enforcer.py
Normal file
@ -0,0 +1,78 @@
|
||||
# Copyright 2017 Ericsson AB.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
"""Policy enforcer for DC Manager."""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_policy import policy
|
||||
|
||||
from dcmanager.common import exceptions as exc
|
||||
|
||||
|
||||
_ENFORCER = None
|
||||
|
||||
|
||||
def enforce(action, context, target=None, do_raise=True,
|
||||
exc=exc.NotAuthorized):
|
||||
"""Verify that the action is valid on the target in this context.
|
||||
|
||||
:param action: String, representing the action to be checked.
|
||||
This should be colon separated for clarity.
|
||||
i.e. ``sync:list``
|
||||
:param context: DC Manager context.
|
||||
:param target: Dictionary, representing the object of the action.
|
||||
For object creation, this should be a dictionary
|
||||
representing the location of the object.
|
||||
e.g. ``{'project_id': context.project}``
|
||||
:param do_raise: if True (the default), raises specified exception.
|
||||
:param exc: Exception to be raised if not authorized. Default is
|
||||
dcmanager.common.exceptions.NotAuthorized.
|
||||
|
||||
:return: returns True if authorized and False if not authorized and
|
||||
do_raise is False.
|
||||
"""
|
||||
if cfg.CONF.auth_strategy != 'keystone':
|
||||
# Policy enforcement is supported now only with Keystone
|
||||
# authentication.
|
||||
return
|
||||
|
||||
target_obj = {
|
||||
'project_id': context.project,
|
||||
'user_id': context.user,
|
||||
}
|
||||
|
||||
target_obj.update(target or {})
|
||||
_ensure_enforcer_initialization()
|
||||
|
||||
try:
|
||||
_ENFORCER.enforce(action, target_obj, context.to_dict(),
|
||||
do_raise=do_raise, exc=exc)
|
||||
return True
|
||||
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _ensure_enforcer_initialization():
|
||||
global _ENFORCER
|
||||
if not _ENFORCER:
|
||||
_ENFORCER = policy.Enforcer(cfg.CONF)
|
||||
_ENFORCER.load_rules()
|
18
dcmanager/cmd/README.rst
Executable file
18
dcmanager/cmd/README.rst
Executable file
@ -0,0 +1,18 @@
|
||||
===============================
|
||||
cmd
|
||||
===============================
|
||||
|
||||
Scripts to start the DC Manager API and Manager services
|
||||
|
||||
api.py:
|
||||
start API service
|
||||
python api.py --config-file=/etc/dcmanager.conf
|
||||
|
||||
manager.py:
|
||||
start Manager service
|
||||
python manager.py --config-file=/etc/dcmanager.conf
|
||||
|
||||
manage.py:
|
||||
CLI interface for dcmanager database management
|
||||
dcmanager-manage --config-file /etc/dcmanager.conf db_sync
|
||||
dcmanager-manage --config-file /etc/dcmanager.conf db_version
|
0
dcmanager/cmd/__init__.py
Normal file
0
dcmanager/cmd/__init__.py
Normal file
78
dcmanager/cmd/api.py
Normal file
78
dcmanager/cmd/api.py
Normal file
@ -0,0 +1,78 @@
|
||||
# Copyright 2015 Huawei Technologies Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
# Much of this module is based on the work of the Ironic team
|
||||
# see http://git.openstack.org/cgit/openstack/ironic/tree/ironic/cmd/api.py
|
||||
|
||||
|
||||
import sys
|
||||
|
||||
import eventlet
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_service import systemd
|
||||
from oslo_service import wsgi
|
||||
|
||||
import logging as std_logging
|
||||
|
||||
from dcmanager.api import api_config
|
||||
from dcmanager.api import app
|
||||
|
||||
from dcmanager.common import config
|
||||
from dcmanager.common import messaging
|
||||
from dcorch.common import messaging as dcorch_messaging
|
||||
CONF = cfg.CONF
|
||||
config.register_options()
|
||||
LOG = logging.getLogger('dcmanager.api')
|
||||
eventlet.monkey_patch(os=False)
|
||||
|
||||
|
||||
def main():
|
||||
api_config.init(sys.argv[1:])
|
||||
api_config.setup_logging()
|
||||
application = app.setup_app()
|
||||
|
||||
host = CONF.bind_host
|
||||
port = CONF.bind_port
|
||||
workers = CONF.api_workers
|
||||
|
||||
if workers < 1:
|
||||
LOG.warning("Wrong worker number, worker = %(workers)s", workers)
|
||||
workers = 1
|
||||
|
||||
LOG.info("Server on http://%(host)s:%(port)s with %(workers)s",
|
||||
{'host': host, 'port': port, 'workers': workers})
|
||||
messaging.setup()
|
||||
dcorch_messaging.setup()
|
||||
systemd.notify_once()
|
||||
service = wsgi.Server(CONF, "DCManager", application, host, port)
|
||||
|
||||
app.serve(service, CONF, workers)
|
||||
|
||||
LOG.info("Configuration:")
|
||||
CONF.log_opt_values(LOG, std_logging.INFO)
|
||||
|
||||
app.wait()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
85
dcmanager/cmd/manage.py
Normal file
85
dcmanager/cmd/manage.py
Normal file
@ -0,0 +1,85 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
"""
|
||||
CLI interface for DC Manager management.
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from dcmanager.common import config
|
||||
from dcmanager.db import api
|
||||
from dcmanager import version
|
||||
|
||||
config.register_options()
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
def do_db_version():
|
||||
'''Print database's current migration level.'''
|
||||
print(api.db_version(api.get_engine()))
|
||||
|
||||
|
||||
def do_db_sync():
|
||||
'''Place a database under migration control and upgrade.
|
||||
|
||||
DB is created first if necessary.
|
||||
'''
|
||||
api.db_sync(api.get_engine(), CONF.command.version)
|
||||
|
||||
|
||||
def add_command_parsers(subparsers):
|
||||
parser = subparsers.add_parser('db_version')
|
||||
parser.set_defaults(func=do_db_version)
|
||||
|
||||
parser = subparsers.add_parser('db_sync')
|
||||
parser.set_defaults(func=do_db_sync)
|
||||
parser.add_argument('version', nargs='?')
|
||||
parser.add_argument('current_version', nargs='?')
|
||||
|
||||
command_opt = cfg.SubCommandOpt('command',
|
||||
title='Commands',
|
||||
help='Show available commands.',
|
||||
handler=add_command_parsers)
|
||||
|
||||
|
||||
def main():
|
||||
logging.register_options(CONF)
|
||||
logging.setup(CONF, 'dcmanager-manage')
|
||||
CONF.register_cli_opt(command_opt)
|
||||
|
||||
try:
|
||||
default_config_files = cfg.find_config_files('dcmanager',
|
||||
'dcmanager-engine')
|
||||
CONF(sys.argv[1:], project='dcmanager', prog='dcmanager-manage',
|
||||
version=version.version_info.version_string(),
|
||||
default_config_files=default_config_files)
|
||||
except RuntimeError as e:
|
||||
sys.exit("ERROR: %s" % e)
|
||||
|
||||
try:
|
||||
CONF.command.func()
|
||||
except Exception as e:
|
||||
sys.exit("ERROR: %s" % e)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
64
dcmanager/cmd/manager.py
Normal file
64
dcmanager/cmd/manager.py
Normal file
@ -0,0 +1,64 @@
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
"""
|
||||
DC Manager Engine Server.
|
||||
"""
|
||||
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_i18n import _lazy
|
||||
from oslo_log import log as logging
|
||||
from oslo_service import service
|
||||
|
||||
from dcmanager.common import config
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.common import messaging
|
||||
from dcorch.common import messaging as dcorch_messaging
|
||||
|
||||
_lazy.enable_lazy()
|
||||
config.register_options()
|
||||
config.register_keystone_options()
|
||||
LOG = logging.getLogger('dcmanager.engine')
|
||||
|
||||
|
||||
def main():
|
||||
logging.register_options(cfg.CONF)
|
||||
cfg.CONF(project='dcmanager', prog='dcmanager-engine')
|
||||
logging.setup(cfg.CONF, 'dcmanager-engine')
|
||||
logging.set_defaults()
|
||||
messaging.setup()
|
||||
dcorch_messaging.setup()
|
||||
|
||||
from dcmanager.manager import service as manager
|
||||
|
||||
srv = manager.DCManagerService(cfg.CONF.host,
|
||||
consts.TOPIC_DC_MANAGER)
|
||||
launcher = service.launch(cfg.CONF,
|
||||
srv, workers=cfg.CONF.workers)
|
||||
# the following periodic tasks are intended serve as HA checking
|
||||
# srv.create_periodic_tasks()
|
||||
launcher.wait()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
0
dcmanager/common/__init__.py
Normal file
0
dcmanager/common/__init__.py
Normal file
159
dcmanager/common/config.py
Normal file
159
dcmanager/common/config.py
Normal file
@ -0,0 +1,159 @@
|
||||
# Copyright 2016 Ericsson AB
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
"""
|
||||
File to store all the configurations
|
||||
"""
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import importutils
|
||||
|
||||
# Ensure keystonemiddleware options are imported
|
||||
importutils.import_module('keystonemiddleware.auth_token')
|
||||
|
||||
global_opts = [
|
||||
cfg.BoolOpt('use_default_quota_class',
|
||||
default=True,
|
||||
help='Enables or disables use of default quota class '
|
||||
'with default quota.'),
|
||||
cfg.IntOpt('report_interval',
|
||||
default=60,
|
||||
help='Seconds between running periodic reporting tasks.'),
|
||||
]
|
||||
|
||||
# OpenStack credentials used for Endpoint Cache
|
||||
# We need to register the below non-standard config
|
||||
# options to dcmanager engine
|
||||
keystone_opts = [
|
||||
cfg.StrOpt('username',
|
||||
help='Username of account'),
|
||||
cfg.StrOpt('password',
|
||||
help='Password of account'),
|
||||
cfg.StrOpt('project_name',
|
||||
help='Tenant name of account'),
|
||||
cfg.StrOpt('user_domain_name',
|
||||
default='Default',
|
||||
help='User domain name of account'),
|
||||
cfg.StrOpt('project_domain_name',
|
||||
default='Default',
|
||||
help='Project domain name of account'),
|
||||
]
|
||||
|
||||
|
||||
# Pecan_opts
|
||||
pecan_opts = [
|
||||
cfg.StrOpt(
|
||||
'root',
|
||||
default='dcmanager.api.controllers.root.RootController',
|
||||
help='Pecan root controller'
|
||||
),
|
||||
cfg.ListOpt(
|
||||
'modules',
|
||||
default=["dcmanager.api"],
|
||||
help='A list of modules where pecan will search for applications.'
|
||||
),
|
||||
cfg.BoolOpt(
|
||||
'debug',
|
||||
default=False,
|
||||
help='Enables the ability to display tracebacks in the browser and'
|
||||
'interactively debug during development.'
|
||||
),
|
||||
cfg.BoolOpt(
|
||||
'auth_enable',
|
||||
default=True,
|
||||
help='Enables user authentication in pecan.'
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
# OpenStack credentials used for Endpoint Cache
|
||||
cache_opts = [
|
||||
cfg.StrOpt('auth_uri',
|
||||
help='Keystone authorization url'),
|
||||
cfg.StrOpt('identity_uri',
|
||||
help='Keystone service url'),
|
||||
cfg.StrOpt('admin_username',
|
||||
help='Username of admin account, needed when'
|
||||
' auto_refresh_endpoint set to True'),
|
||||
cfg.StrOpt('admin_password',
|
||||
help='Password of admin account, needed when'
|
||||
' auto_refresh_endpoint set to True'),
|
||||
cfg.StrOpt('admin_tenant',
|
||||
help='Tenant name of admin account, needed when'
|
||||
' auto_refresh_endpoint set to True'),
|
||||
cfg.StrOpt('admin_user_domain_name',
|
||||
default='Default',
|
||||
help='User domain name of admin account, needed when'
|
||||
' auto_refresh_endpoint set to True'),
|
||||
cfg.StrOpt('admin_project_domain_name',
|
||||
default='Default',
|
||||
help='Project domain name of admin account, needed when'
|
||||
' auto_refresh_endpoint set to True')
|
||||
]
|
||||
|
||||
scheduler_opts = [
|
||||
cfg.BoolOpt('periodic_enable',
|
||||
default=True,
|
||||
help='boolean value for enable/disable periodic tasks'),
|
||||
cfg.IntOpt('subcloud_audit_interval',
|
||||
default=180,
|
||||
help='periodic time interval for subcloud audit'),
|
||||
cfg.IntOpt('patch_audit_interval',
|
||||
default=10,
|
||||
help='periodic time interval for patch audit')
|
||||
]
|
||||
|
||||
common_opts = [
|
||||
cfg.IntOpt('workers', default=1,
|
||||
help='number of workers'),
|
||||
cfg.StrOpt('host',
|
||||
default='localhost',
|
||||
help='hostname of the machine')
|
||||
]
|
||||
|
||||
scheduler_opt_group = cfg.OptGroup(name='scheduler',
|
||||
title='Scheduler options for periodic job')
|
||||
keystone_opt_group = cfg.OptGroup(name='keystone_authtoken',
|
||||
title='Keystone options')
|
||||
# The group stores the pecan configurations.
|
||||
pecan_group = cfg.OptGroup(name='pecan',
|
||||
title='Pecan options')
|
||||
|
||||
cache_opt_group = cfg.OptGroup(name='cache',
|
||||
title='OpenStack Credentials')
|
||||
|
||||
|
||||
def list_opts():
|
||||
yield cache_opt_group.name, cache_opts
|
||||
yield scheduler_opt_group.name, scheduler_opts
|
||||
yield pecan_group.name, pecan_opts
|
||||
yield None, global_opts
|
||||
yield None, common_opts
|
||||
|
||||
|
||||
def register_options():
|
||||
for group, opts in list_opts():
|
||||
cfg.CONF.register_opts(opts, group=group)
|
||||
|
||||
|
||||
# Only necessary for dcmanager engine, keystone_authtoken options for
|
||||
# dcmanager-api will get picked up and registered automatically from the
|
||||
# config file
|
||||
def register_keystone_options():
|
||||
cfg.CONF.register_opts(keystone_opts, group=keystone_opt_group.name)
|
90
dcmanager/common/consts.py
Normal file
90
dcmanager/common/consts.py
Normal file
@ -0,0 +1,90 @@
|
||||
# Copyright (c) 2016 Ericsson AB.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
RPC_API_VERSION = "1.0"
|
||||
|
||||
TOPIC_DC_MANAGER = "dcmanager"
|
||||
|
||||
PATCH_VAULT_DIR = "/opt/patch-vault"
|
||||
|
||||
# Well known region names
|
||||
SYSTEM_CONTROLLER_NAME = "SystemController"
|
||||
DEFAULT_REGION_NAME = "RegionOne"
|
||||
|
||||
# Subcloud management state
|
||||
MANAGEMENT_UNMANAGED = "unmanaged"
|
||||
MANAGEMENT_MANAGED = "managed"
|
||||
|
||||
# Subcloud availability status
|
||||
AVAILABILITY_OFFLINE = "offline"
|
||||
AVAILABILITY_ONLINE = "online"
|
||||
|
||||
# Subcloud sync status
|
||||
SYNC_STATUS_UNKNOWN = "unknown"
|
||||
SYNC_STATUS_IN_SYNC = "in-sync"
|
||||
SYNC_STATUS_OUT_OF_SYNC = "out-of-sync"
|
||||
|
||||
# Subcloud endpoint related database fields
|
||||
ENDPOINT_SYNC_STATUS = "endpoint_sync_status"
|
||||
SYNC_STATUS = "sync_status"
|
||||
ENDPOINT_TYPE = "endpoint_type"
|
||||
|
||||
# Service group status
|
||||
SERVICE_GROUP_STATUS_ACTIVE = "active"
|
||||
|
||||
# Availability fail count
|
||||
AVAIL_FAIL_COUNT_TO_ALARM = 2
|
||||
AVAIL_FAIL_COUNT_MAX = 9999
|
||||
|
||||
# Software update type
|
||||
SW_UPDATE_TYPE_PATCH = "patch"
|
||||
SW_UPDATE_TYPE_UPGRADE = "upgrade"
|
||||
|
||||
# Software update states
|
||||
SW_UPDATE_STATE_INITIAL = "initial"
|
||||
SW_UPDATE_STATE_APPLYING = "applying"
|
||||
SW_UPDATE_STATE_ABORT_REQUESTED = "abort requested"
|
||||
SW_UPDATE_STATE_ABORTING = "aborting"
|
||||
SW_UPDATE_STATE_COMPLETE = "complete"
|
||||
SW_UPDATE_STATE_ABORTED = "aborted"
|
||||
SW_UPDATE_STATE_FAILED = "failed"
|
||||
SW_UPDATE_STATE_DELETING = "deleting"
|
||||
SW_UPDATE_STATE_DELETED = "deleted"
|
||||
|
||||
# Software update actions
|
||||
SW_UPDATE_ACTION_APPLY = "apply"
|
||||
SW_UPDATE_ACTION_ABORT = "abort"
|
||||
|
||||
# Subcloud apply types
|
||||
SUBCLOUD_APPLY_TYPE_PARALLEL = "parallel"
|
||||
SUBCLOUD_APPLY_TYPE_SERIAL = "serial"
|
||||
|
||||
# Strategy step states
|
||||
STRATEGY_STATE_INITIAL = "initial"
|
||||
STRATEGY_STATE_UPDATING_PATCHES = "updating patches"
|
||||
STRATEGY_STATE_CREATING_STRATEGY = "creating strategy"
|
||||
STRATEGY_STATE_APPLYING_STRATEGY = "applying strategy"
|
||||
STRATEGY_STATE_FINISHING = "finishing"
|
||||
STRATEGY_STATE_COMPLETE = "complete"
|
||||
STRATEGY_STATE_ABORTED = "aborted"
|
||||
STRATEGY_STATE_FAILED = "failed"
|
||||
|
||||
SW_UPDATE_DEFAULT_TITLE = "all clouds default"
|
154
dcmanager/common/context.py
Normal file
154
dcmanager/common/context.py
Normal file
@ -0,0 +1,154 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
import pecan
|
||||
from pecan import hooks
|
||||
|
||||
from oslo_context import context as base_context
|
||||
from oslo_utils import encodeutils
|
||||
|
||||
from dcmanager.common import policy
|
||||
from dcmanager.db import api as db_api
|
||||
|
||||
ALLOWED_WITHOUT_AUTH = '/'
|
||||
|
||||
|
||||
class RequestContext(base_context.RequestContext):
|
||||
'''Stores information about the security context.
|
||||
|
||||
The context encapsulates information related to the user accessing the
|
||||
the system, as well as additional request information.
|
||||
'''
|
||||
|
||||
def __init__(self, auth_token=None, user=None, project=None,
|
||||
domain=None, user_domain=None, project_domain=None,
|
||||
is_admin=None, read_only=False, show_deleted=False,
|
||||
request_id=None, auth_url=None, trusts=None,
|
||||
user_name=None, project_name=None, domain_name=None,
|
||||
user_domain_name=None, project_domain_name=None,
|
||||
auth_token_info=None, region_name=None, roles=None,
|
||||
password=None, **kwargs):
|
||||
|
||||
'''Initializer of request context.'''
|
||||
# We still have 'tenant' param because oslo_context still use it.
|
||||
super(RequestContext, self).__init__(
|
||||
auth_token=auth_token, user=user, tenant=project,
|
||||
domain=domain, user_domain=user_domain,
|
||||
project_domain=project_domain, roles=roles,
|
||||
read_only=read_only, show_deleted=show_deleted,
|
||||
request_id=request_id)
|
||||
|
||||
# request_id might be a byte array
|
||||
self.request_id = encodeutils.safe_decode(self.request_id)
|
||||
|
||||
# we save an additional 'project' internally for use
|
||||
self.project = project
|
||||
|
||||
# Session for DB access
|
||||
self._session = None
|
||||
|
||||
self.auth_url = auth_url
|
||||
self.trusts = trusts
|
||||
|
||||
self.user_name = user_name
|
||||
self.project_name = project_name
|
||||
self.domain_name = domain_name
|
||||
self.user_domain_name = user_domain_name
|
||||
self.project_domain_name = project_domain_name
|
||||
|
||||
self.auth_token_info = auth_token_info
|
||||
self.region_name = region_name
|
||||
self.roles = roles or []
|
||||
self.password = password
|
||||
|
||||
# Check user is admin or not
|
||||
if is_admin is None:
|
||||
self.is_admin = policy.enforce(self, 'context_is_admin',
|
||||
target={'project': self.project},
|
||||
do_raise=False)
|
||||
else:
|
||||
self.is_admin = is_admin
|
||||
|
||||
@property
|
||||
def session(self):
|
||||
if self._session is None:
|
||||
self._session = db_api.get_session()
|
||||
return self._session
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
'auth_url': self.auth_url,
|
||||
'auth_token': self.auth_token,
|
||||
'auth_token_info': self.auth_token_info,
|
||||
'user': self.user,
|
||||
'user_name': self.user_name,
|
||||
'user_domain': self.user_domain,
|
||||
'user_domain_name': self.user_domain_name,
|
||||
'project': self.project,
|
||||
'project_name': self.project_name,
|
||||
'project_domain': self.project_domain,
|
||||
'project_domain_name': self.project_domain_name,
|
||||
'domain': self.domain,
|
||||
'domain_name': self.domain_name,
|
||||
'trusts': self.trusts,
|
||||
'region_name': self.region_name,
|
||||
'roles': self.roles,
|
||||
'show_deleted': self.show_deleted,
|
||||
'is_admin': self.is_admin,
|
||||
'request_id': self.request_id,
|
||||
'password': self.password,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, values):
|
||||
return cls(**values)
|
||||
|
||||
|
||||
def get_admin_context(show_deleted=False):
|
||||
return RequestContext(is_admin=True, show_deleted=show_deleted)
|
||||
|
||||
|
||||
def get_service_context(**args):
|
||||
'''An abstraction layer for getting service context.
|
||||
|
||||
There could be multiple cloud backends for dcmanager to use. This
|
||||
abstraction layer provides an indirection for dcmanager to get the
|
||||
credentials of 'dcmanager' user on the specific cloud. By default,
|
||||
this credential refers to the credentials built for dcmanager middleware
|
||||
in an OpenStack cloud.
|
||||
'''
|
||||
pass
|
||||
|
||||
|
||||
class AuthHook(hooks.PecanHook):
|
||||
def before(self, state):
|
||||
if state.request.path == ALLOWED_WITHOUT_AUTH:
|
||||
return
|
||||
req = state.request
|
||||
identity_status = req.headers.get('X-Identity-Status')
|
||||
service_identity_status = req.headers.get('X-Service-Identity-Status')
|
||||
if (identity_status == 'Confirmed' or
|
||||
service_identity_status == 'Confirmed'):
|
||||
return
|
||||
if req.headers.get('X-Auth-Token'):
|
||||
msg = 'Auth token is invalid: %s' % req.headers['X-Auth-Token']
|
||||
else:
|
||||
msg = 'Authentication required'
|
||||
msg = "Failed to validate access token: %s" % str(msg)
|
||||
pecan.abort(status_code=401, detail=msg)
|
148
dcmanager/common/exceptions.py
Normal file
148
dcmanager/common/exceptions.py
Normal file
@ -0,0 +1,148 @@
|
||||
# Copyright 2015 Huawei Technologies Co., Ltd.
|
||||
# Copyright 2015 Ericsson AB.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
"""
|
||||
DC Manager base exception handling.
|
||||
"""
|
||||
import six
|
||||
|
||||
from oslo_utils import excutils
|
||||
|
||||
from dcmanager.common.i18n import _
|
||||
|
||||
|
||||
class DCManagerException(Exception):
|
||||
"""Base DC Manager Exception.
|
||||
|
||||
To correctly use this class, inherit from it and define
|
||||
a 'message' property. That message will get printf'd
|
||||
with the keyword arguments provided to the constructor.
|
||||
"""
|
||||
|
||||
message = _("An unknown exception occurred.")
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
try:
|
||||
super(DCManagerException, self).__init__(self.message % kwargs)
|
||||
self.msg = self.message % kwargs
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception() as ctxt:
|
||||
if not self.use_fatal_exceptions():
|
||||
ctxt.reraise = False
|
||||
# at least get the core message out if something happened
|
||||
super(DCManagerException, self).__init__(self.message)
|
||||
|
||||
if six.PY2:
|
||||
def __unicode__(self):
|
||||
return unicode(self.msg)
|
||||
|
||||
def use_fatal_exceptions(self):
|
||||
return False
|
||||
|
||||
|
||||
class BadRequest(DCManagerException):
|
||||
message = _('Bad %(resource)s request: %(msg)s')
|
||||
|
||||
|
||||
class NotFound(DCManagerException):
|
||||
message = _("Not found")
|
||||
|
||||
|
||||
class Conflict(DCManagerException):
|
||||
message = _('Conflict: %(msg)s')
|
||||
|
||||
|
||||
class NotAuthorized(DCManagerException):
|
||||
message = _("Not authorized.")
|
||||
|
||||
|
||||
class ServiceUnavailable(DCManagerException):
|
||||
message = _("The service is unavailable")
|
||||
|
||||
|
||||
class AdminRequired(NotAuthorized):
|
||||
message = _("User does not have admin privileges: %(reason)s")
|
||||
|
||||
|
||||
class InUse(DCManagerException):
|
||||
message = _("The resource is inuse")
|
||||
|
||||
|
||||
class InvalidConfigurationOption(DCManagerException):
|
||||
message = _("An invalid value was provided for %(opt_name)s: "
|
||||
"%(opt_value)s")
|
||||
|
||||
|
||||
class SubcloudNotFound(NotFound):
|
||||
message = _("Subcloud with id %(subcloud_id)s doesn't exist.")
|
||||
|
||||
|
||||
class SubcloudNameNotFound(NotFound):
|
||||
message = _("Subcloud with name %(name)s doesn't exist.")
|
||||
|
||||
|
||||
class SubcloudNotOnline(DCManagerException):
|
||||
message = _("Subcloud is not online.")
|
||||
|
||||
|
||||
class SubcloudStatusNotFound(NotFound):
|
||||
message = _("SubcloudStatus with subcloud_id %(subcloud_id)s and "
|
||||
"endpoint_type %(endpoint_type)s doesn't exist.")
|
||||
|
||||
|
||||
class SubcloudNotUnmanaged(DCManagerException):
|
||||
message = _("Subcloud must be unmanaged to perform this operation.")
|
||||
|
||||
|
||||
class SubcloudNotOffline(DCManagerException):
|
||||
message = _("Subcloud must be powered down to perform this operation.")
|
||||
|
||||
|
||||
class SubcloudPatchOptsNotFound(NotFound):
|
||||
message = _("No options found for Subcloud with id %(subcloud_id)s, "
|
||||
"defaults will be used.")
|
||||
|
||||
|
||||
class ConnectionRefused(DCManagerException):
|
||||
message = _("Connection to the service endpoint is refused")
|
||||
|
||||
|
||||
class TimeOut(DCManagerException):
|
||||
message = _("Timeout when connecting to OpenStack Service")
|
||||
|
||||
|
||||
class InternalError(DCManagerException):
|
||||
message = _("Error when performing operation")
|
||||
|
||||
|
||||
class InvalidInputError(DCManagerException):
|
||||
message = _("An invalid value was provided")
|
||||
|
||||
|
||||
class StrategyStepNotFound(NotFound):
|
||||
message = _("StrategyStep with subcloud_id %(subcloud_id)s "
|
||||
"doesn't exist.")
|
||||
|
||||
|
||||
class StrategyStepNameNotFound(NotFound):
|
||||
message = _("StrategyStep with name %(name)s doesn't exist.")
|
27
dcmanager/common/i18n.py
Normal file
27
dcmanager/common/i18n.py
Normal file
@ -0,0 +1,27 @@
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
import oslo_i18n
|
||||
|
||||
_translators = oslo_i18n.TranslatorFactory(domain='dcmanager')
|
||||
|
||||
# The primary translation function using the well-known name "_"
|
||||
_ = _translators.primary
|
124
dcmanager/common/manager.py
Normal file
124
dcmanager/common/manager.py
Normal file
@ -0,0 +1,124 @@
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# copy and modify from Nova manager.py
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
"""Base Manager class.
|
||||
Managers are responsible for a certain aspect of the system. It is a logical
|
||||
grouping of code relating to a portion of the system. In general other
|
||||
components should be using the manager to make changes to the components that
|
||||
it is responsible for.
|
||||
For example, other components that need to deal with volumes in some way,
|
||||
should do so by calling methods on the VolumeManager instead of directly
|
||||
changing fields in the database. This allows us to keep all of the code
|
||||
relating to volumes in the same place.
|
||||
We have adopted a basic strategy of Smart managers and dumb data, which means
|
||||
rather than attaching methods to data objects, components should call manager
|
||||
methods that act on the data.
|
||||
Methods on managers that can be executed locally should be called directly. If
|
||||
a particular method must execute on a remote host, this should be done via rpc
|
||||
to the service that wraps the manager
|
||||
Managers should be responsible for most of the db access, and
|
||||
non-implementation specific data. Anything implementation specific that can't
|
||||
be generalized should be done by the Driver.
|
||||
Managers will often provide methods for initial setup of a host or periodic
|
||||
tasks to a wrapping service.
|
||||
This module provides Manager, a base class for managers.
|
||||
"""
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_service import periodic_task
|
||||
|
||||
from dcmanager.common import config
|
||||
|
||||
CONF = cfg.CONF
|
||||
config.register_options()
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PeriodicTasks(periodic_task.PeriodicTasks):
|
||||
def __init__(self):
|
||||
super(PeriodicTasks, self).__init__(CONF)
|
||||
|
||||
|
||||
class Manager(PeriodicTasks):
|
||||
|
||||
def __init__(self, host=None, service_name='undefined'):
|
||||
if not host:
|
||||
host = cfg.CONF.host
|
||||
self.host = host
|
||||
self.service_name = service_name
|
||||
# self.notifier = rpc.get_notifier(self.service_name, self.host)
|
||||
self.additional_endpoints = []
|
||||
super(Manager, self).__init__()
|
||||
|
||||
def periodic_tasks(self, context, raise_on_error=False):
|
||||
"""Tasks to be run at a periodic interval."""
|
||||
return self.run_periodic_tasks(context, raise_on_error=raise_on_error)
|
||||
|
||||
def init_host(self):
|
||||
|
||||
"""init_host
|
||||
|
||||
Hook to do additional manager initialization when one requests
|
||||
the service be started. This is called before any service record
|
||||
is created.
|
||||
Child classes should override this method.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
def cleanup_host(self):
|
||||
|
||||
"""cleanup_host
|
||||
|
||||
Hook to do cleanup work when the service shuts down.
|
||||
Child classes should override this method.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
def pre_start_hook(self):
|
||||
|
||||
"""pre_start_hook
|
||||
|
||||
Hook to provide the manager the ability to do additional
|
||||
start-up work before any RPC queues/consumers are created. This is
|
||||
called after other initialization has succeeded and a service
|
||||
record is created.
|
||||
Child classes should override this method.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
def post_start_hook(self):
|
||||
|
||||
"""post_start_hook
|
||||
|
||||
Hook to provide the manager the ability to do additional
|
||||
start-up work immediately after a service creates RPC consumers
|
||||
and starts 'running'.
|
||||
Child classes should override this method.
|
||||
"""
|
||||
|
||||
pass
|
118
dcmanager/common/messaging.py
Normal file
118
dcmanager/common/messaging.py
Normal file
@ -0,0 +1,118 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
import eventlet
|
||||
|
||||
from oslo_config import cfg
|
||||
import oslo_messaging
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from dcmanager.common import context
|
||||
|
||||
TRANSPORT = None
|
||||
NOTIFIER = None
|
||||
|
||||
_ALIASES = {
|
||||
'dcmanager.openstack.common.rpc.impl_kombu': 'rabbit',
|
||||
'dcmanager.openstack.common.rpc.impl_qpid': 'qpid',
|
||||
'dcmanager.openstack.common.rpc.impl_zmq': 'zmq',
|
||||
}
|
||||
|
||||
|
||||
class RequestContextSerializer(oslo_messaging.Serializer):
|
||||
def __init__(self, base):
|
||||
self._base = base
|
||||
|
||||
def serialize_entity(self, ctxt, entity):
|
||||
if not self._base:
|
||||
return entity
|
||||
return self._base.serialize_entity(ctxt, entity)
|
||||
|
||||
def deserialize_entity(self, ctxt, entity):
|
||||
if not self._base:
|
||||
return entity
|
||||
return self._base.deserialize_entity(ctxt, entity)
|
||||
|
||||
@staticmethod
|
||||
def serialize_context(ctxt):
|
||||
return ctxt.to_dict()
|
||||
|
||||
@staticmethod
|
||||
def deserialize_context(ctxt):
|
||||
return context.RequestContext.from_dict(ctxt)
|
||||
|
||||
|
||||
class JsonPayloadSerializer(oslo_messaging.NoOpSerializer):
|
||||
@classmethod
|
||||
def serialize_entity(cls, context, entity):
|
||||
return jsonutils.to_primitive(entity, convert_instances=True)
|
||||
|
||||
|
||||
def setup(url=None, optional=False):
|
||||
"""Initialise the oslo_messaging layer."""
|
||||
global TRANSPORT, NOTIFIER
|
||||
|
||||
if url and url.startswith("fake://"):
|
||||
# NOTE: oslo_messaging fake driver uses time.sleep
|
||||
# for task switch, so we need to monkey_patch it
|
||||
eventlet.monkey_patch(time=True)
|
||||
|
||||
if not TRANSPORT:
|
||||
oslo_messaging.set_transport_defaults('dcmanager')
|
||||
exmods = ['dcmanager.common.exception']
|
||||
try:
|
||||
TRANSPORT = oslo_messaging.get_transport(
|
||||
cfg.CONF, url, allowed_remote_exmods=exmods, aliases=_ALIASES)
|
||||
except oslo_messaging.InvalidTransportURL as e:
|
||||
TRANSPORT = None
|
||||
if not optional or e.url:
|
||||
raise
|
||||
|
||||
if not NOTIFIER and TRANSPORT:
|
||||
serializer = RequestContextSerializer(JsonPayloadSerializer())
|
||||
NOTIFIER = oslo_messaging.Notifier(TRANSPORT, serializer=serializer)
|
||||
|
||||
|
||||
def cleanup():
|
||||
"""Cleanup the oslo_messaging layer."""
|
||||
global TRANSPORT, NOTIFIER
|
||||
if TRANSPORT:
|
||||
TRANSPORT.cleanup()
|
||||
TRANSPORT = NOTIFIER = None
|
||||
|
||||
|
||||
def get_rpc_server(target, endpoint):
|
||||
"""Return a configured oslo_messaging rpc server."""
|
||||
serializer = RequestContextSerializer(JsonPayloadSerializer())
|
||||
return oslo_messaging.get_rpc_server(TRANSPORT, target, [endpoint],
|
||||
executor='eventlet',
|
||||
serializer=serializer)
|
||||
|
||||
|
||||
def get_rpc_client(**kwargs):
|
||||
"""Return a configured oslo_messaging RPCClient."""
|
||||
target = oslo_messaging.Target(**kwargs)
|
||||
serializer = RequestContextSerializer(JsonPayloadSerializer())
|
||||
return oslo_messaging.RPCClient(TRANSPORT, target,
|
||||
serializer=serializer)
|
||||
|
||||
|
||||
def get_notifier(publisher_id):
|
||||
"""Return a configured oslo_messaging notifier."""
|
||||
return NOTIFIER.prepare(publisher_id=publisher_id)
|
56
dcmanager/common/policy.py
Normal file
56
dcmanager/common/policy.py
Normal file
@ -0,0 +1,56 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
"""
|
||||
Policy Engine For DC Manager
|
||||
"""
|
||||
|
||||
# from oslo_concurrency import lockutils
|
||||
from oslo_config import cfg
|
||||
from oslo_policy import policy
|
||||
|
||||
from dcmanager.common import exceptions
|
||||
|
||||
POLICY_ENFORCER = None
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
# @lockutils.synchronized('policy_enforcer', 'dcmanager-')
|
||||
def _get_enforcer(policy_file=None, rules=None, default_rule=None):
|
||||
|
||||
global POLICY_ENFORCER
|
||||
|
||||
if POLICY_ENFORCER is None:
|
||||
POLICY_ENFORCER = policy.Enforcer(CONF,
|
||||
policy_file=policy_file,
|
||||
rules=rules,
|
||||
default_rule=default_rule)
|
||||
return POLICY_ENFORCER
|
||||
|
||||
|
||||
def enforce(context, rule, target, do_raise=True, *args, **kwargs):
|
||||
|
||||
enforcer = _get_enforcer()
|
||||
credentials = context.to_dict()
|
||||
target = target or {}
|
||||
if do_raise:
|
||||
kwargs.update(exc=exceptions.Forbidden)
|
||||
|
||||
return enforcer.enforce(rule, target, credentials, do_raise,
|
||||
*args, **kwargs)
|
89
dcmanager/common/serializer.py
Normal file
89
dcmanager/common/serializer.py
Normal file
@ -0,0 +1,89 @@
|
||||
# Copyright 2015 Huawei Technologies Co., Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
import oslo_messaging
|
||||
|
||||
ATTR_NOT_SPECIFIED = object()
|
||||
|
||||
|
||||
class Mapping(object):
|
||||
def __init__(self, mapping):
|
||||
self.direct_mapping = mapping
|
||||
self.reverse_mapping = {}
|
||||
for key, value in mapping.items():
|
||||
self.reverse_mapping[value] = key
|
||||
|
||||
_SINGLETON_MAPPING = Mapping({
|
||||
ATTR_NOT_SPECIFIED: "@@**ATTR_NOT_SPECIFIED**@@",
|
||||
})
|
||||
|
||||
|
||||
class DCManagerSerializer(oslo_messaging.Serializer):
|
||||
def __init__(self, base=None):
|
||||
super(DCManagerSerializer, self).__init__()
|
||||
self._base = base
|
||||
|
||||
def serialize_entity(self, context, entity):
|
||||
if isinstance(entity, dict):
|
||||
for key, value in entity.items():
|
||||
entity[key] = self.serialize_entity(context, value)
|
||||
|
||||
elif isinstance(entity, list):
|
||||
for i, item in enumerate(entity):
|
||||
entity[i] = self.serialize_entity(context, item)
|
||||
|
||||
elif entity in _SINGLETON_MAPPING.direct_mapping:
|
||||
entity = _SINGLETON_MAPPING.direct_mapping[entity]
|
||||
|
||||
if self._base is not None:
|
||||
entity = self._base.serialize_entity(context, entity)
|
||||
|
||||
return entity
|
||||
|
||||
def deserialize_entity(self, context, entity):
|
||||
if isinstance(entity, dict):
|
||||
for key, value in entity.items():
|
||||
entity[key] = self.deserialize_entity(context, value)
|
||||
|
||||
elif isinstance(entity, list):
|
||||
for i, item in enumerate(entity):
|
||||
entity[i] = self.deserialize_entity(context, item)
|
||||
|
||||
elif entity in _SINGLETON_MAPPING.reverse_mapping:
|
||||
entity = _SINGLETON_MAPPING.reverse_mapping[entity]
|
||||
|
||||
if self._base is not None:
|
||||
entity = self._base.deserialize_entity(context, entity)
|
||||
|
||||
return entity
|
||||
|
||||
def serialize_context(self, context):
|
||||
if self._base is not None:
|
||||
context = self._base.serialize_context(context)
|
||||
|
||||
return context
|
||||
|
||||
def deserialize_context(self, context):
|
||||
if self._base is not None:
|
||||
context = self._base.deserialize_context(context)
|
||||
|
||||
return context
|
91
dcmanager/common/utils.py
Normal file
91
dcmanager/common/utils.py
Normal file
@ -0,0 +1,91 @@
|
||||
# Copyright 2015 Huawei Technologies Co., Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
import itertools
|
||||
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.common import exceptions
|
||||
from dcmanager.db import api as db_api
|
||||
from dcmanager.drivers.openstack import vim
|
||||
|
||||
|
||||
def get_import_path(cls):
|
||||
return cls.__module__ + "." + cls.__name__
|
||||
|
||||
|
||||
# Returns a iterator of tuples containing batch_size number of objects in each
|
||||
def get_batch_projects(batch_size, project_list, fillvalue=None):
|
||||
args = [iter(project_list)] * batch_size
|
||||
return itertools.izip_longest(fillvalue=fillvalue, *args)
|
||||
|
||||
|
||||
# to do validate the quota limits
|
||||
def validate_quota_limits(payload):
|
||||
for resource in payload:
|
||||
# Check valid resource name
|
||||
if resource not in itertools.chain(consts.CINDER_QUOTA_FIELDS,
|
||||
consts.NOVA_QUOTA_FIELDS,
|
||||
consts.NEUTRON_QUOTA_FIELDS):
|
||||
raise exceptions.InvalidInputError
|
||||
# Check valid quota limit value in case for put/post
|
||||
if isinstance(payload, dict) and (not isinstance(
|
||||
payload[resource], int) or payload[resource] <= 0):
|
||||
raise exceptions.InvalidInputError
|
||||
|
||||
|
||||
def get_sw_update_opts(context,
|
||||
for_sw_update=False, subcloud_id=None):
|
||||
"""Get sw update options for a subcloud
|
||||
|
||||
:param context: request context object.
|
||||
:param for_sw_update: return the default options if subcloud options
|
||||
are empty. Useful for retrieving sw update
|
||||
options on application of patch strategy.
|
||||
:param subcloud_id: id of subcloud.
|
||||
|
||||
"""
|
||||
|
||||
if subcloud_id is None:
|
||||
# Requesting defaults. Return constants if no entry in db.
|
||||
sw_update_opts_ref = db_api.sw_update_opts_default_get(context)
|
||||
if not sw_update_opts_ref:
|
||||
sw_update_opts_dict = vim.SW_UPDATE_OPTS_CONST_DEFAULT
|
||||
return sw_update_opts_dict
|
||||
else:
|
||||
# requesting subcloud options
|
||||
sw_update_opts_ref = db_api.sw_update_opts_get(context,
|
||||
subcloud_id)
|
||||
if sw_update_opts_ref:
|
||||
subcloud_name = db_api.subcloud_get(context, subcloud_id).name
|
||||
return db_api.sw_update_opts_w_name_db_model_to_dict(
|
||||
sw_update_opts_ref, subcloud_name)
|
||||
elif for_sw_update:
|
||||
sw_update_opts_ref = db_api.sw_update_opts_default_get(context)
|
||||
if not sw_update_opts_ref:
|
||||
sw_update_opts_dict = vim.SW_UPDATE_OPTS_CONST_DEFAULT
|
||||
return sw_update_opts_dict
|
||||
else:
|
||||
raise exceptions.SubcloudPatchOptsNotFound(
|
||||
subcloud_id=subcloud_id)
|
||||
|
||||
return db_api.sw_update_opts_w_name_db_model_to_dict(
|
||||
sw_update_opts_ref, consts.SW_UPDATE_DEFAULT_TITLE)
|
48
dcmanager/common/version.py
Normal file
48
dcmanager/common/version.py
Normal file
@ -0,0 +1,48 @@
|
||||
# Copyright 2011 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
import pbr.version
|
||||
|
||||
DCMANAGER_VENDOR = "Wind River Systems"
|
||||
DCMANAGER_PRODUCT = "Distributed Cloud Manager"
|
||||
DCMANAGER_PACKAGE = None # OS distro package version suffix
|
||||
|
||||
version_info = pbr.version.VersionInfo('distributedcloud')
|
||||
version_string = version_info.version_string
|
||||
|
||||
|
||||
def vendor_string():
|
||||
return DCMANAGER_VENDOR
|
||||
|
||||
|
||||
def product_string():
|
||||
return DCMANAGER_PRODUCT
|
||||
|
||||
|
||||
def package_string():
|
||||
return DCMANAGER_PACKAGE
|
||||
|
||||
|
||||
def version_string_with_package():
|
||||
if package_string() is None:
|
||||
return version_info.version_string()
|
||||
else:
|
||||
return "%s-%s" % (version_info.version_string(), package_string())
|
15
dcmanager/config-generator.conf
Normal file
15
dcmanager/config-generator.conf
Normal file
@ -0,0 +1,15 @@
|
||||
[DEFAULT]
|
||||
output_file = etc/dcmanager/dcmanager.conf.sample
|
||||
wrap_width = 79
|
||||
namespace = dcmanager.common.config
|
||||
namespace = dcmanager.manager.subcloud_manager
|
||||
namespace = dcmanager.api.api_config
|
||||
namespace = keystonemiddleware.auth_token
|
||||
namespace = oslo.messaging
|
||||
namespace = oslo.middleware
|
||||
namespace = oslo.db
|
||||
namespace = oslo.log
|
||||
namespace = oslo.policy
|
||||
namespace = oslo.service.service
|
||||
namespace = oslo.service.periodic_task
|
||||
namespace = oslo.service.sslutils
|
0
dcmanager/db/__init__.py
Normal file
0
dcmanager/db/__init__.py
Normal file
394
dcmanager/db/api.py
Normal file
394
dcmanager/db/api.py
Normal file
@ -0,0 +1,394 @@
|
||||
# Copyright (c) 2015 Ericsson AB.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
'''
|
||||
Interface for database access.
|
||||
|
||||
SQLAlchemy is currently the only supported backend.
|
||||
'''
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_db import api
|
||||
|
||||
from dcmanager.common import consts
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
_BACKEND_MAPPING = {'sqlalchemy': 'dcmanager.db.sqlalchemy.api'}
|
||||
|
||||
IMPL = api.DBAPI.from_config(CONF, backend_mapping=_BACKEND_MAPPING)
|
||||
|
||||
|
||||
def get_engine():
|
||||
return IMPL.get_engine()
|
||||
|
||||
|
||||
def get_session():
|
||||
return IMPL.get_session()
|
||||
|
||||
|
||||
# subcloud db methods
|
||||
|
||||
###################
|
||||
|
||||
def subcloud_db_model_to_dict(subcloud):
|
||||
"""Convert subcloud db model to dictionary."""
|
||||
result = {"id": subcloud.id,
|
||||
"name": subcloud.name,
|
||||
"description": subcloud.description,
|
||||
"location": subcloud.location,
|
||||
"software-version": subcloud.software_version,
|
||||
"management-state": subcloud.management_state,
|
||||
"availability-status": subcloud.availability_status,
|
||||
"management-subnet": subcloud.management_subnet,
|
||||
"management-start-ip": subcloud.management_start_ip,
|
||||
"management-end-ip": subcloud.management_end_ip,
|
||||
"management-gateway-ip": subcloud.management_gateway_ip,
|
||||
"systemcontroller-gateway-ip":
|
||||
subcloud.systemcontroller_gateway_ip,
|
||||
"created-at": subcloud.created_at,
|
||||
"updated-at": subcloud.updated_at}
|
||||
return result
|
||||
|
||||
|
||||
def subcloud_create(context, name, description, location, software_version,
|
||||
management_subnet, management_gateway_ip,
|
||||
management_start_ip, management_end_ip,
|
||||
systemcontroller_gateway_ip):
|
||||
"""Create a subcloud."""
|
||||
return IMPL.subcloud_create(context, name, description, location,
|
||||
software_version,
|
||||
management_subnet, management_gateway_ip,
|
||||
management_start_ip, management_end_ip,
|
||||
systemcontroller_gateway_ip)
|
||||
|
||||
|
||||
def subcloud_get(context, subcloud_id):
|
||||
"""Retrieve a subcloud or raise if it does not exist."""
|
||||
return IMPL.subcloud_get(context, subcloud_id)
|
||||
|
||||
|
||||
def subcloud_get_with_status(context, subcloud_id):
|
||||
"""Retrieve a subcloud and all endpoint sync statuses."""
|
||||
return IMPL.subcloud_get_with_status(context, subcloud_id)
|
||||
|
||||
|
||||
def subcloud_get_by_name(context, name):
|
||||
"""Retrieve a subcloud by name or raise if it does not exist."""
|
||||
return IMPL.subcloud_get_by_name(context, name)
|
||||
|
||||
|
||||
def subcloud_get_all(context):
|
||||
"""Retrieve all subclouds."""
|
||||
return IMPL.subcloud_get_all(context)
|
||||
|
||||
|
||||
def subcloud_get_all_with_status(context):
|
||||
"""Retrieve all subclouds and sync statuses."""
|
||||
return IMPL.subcloud_get_all_with_status(context)
|
||||
|
||||
|
||||
def subcloud_update(context, subcloud_id, management_state=None,
|
||||
availability_status=None, software_version=None,
|
||||
description=None, location=None, audit_fail_count=None):
|
||||
"""Update a subcloud or raise if it does not exist."""
|
||||
return IMPL.subcloud_update(context, subcloud_id, management_state,
|
||||
availability_status, software_version,
|
||||
description, location, audit_fail_count)
|
||||
|
||||
|
||||
def subcloud_destroy(context, subcloud_id):
|
||||
"""Destroy the subcloud or raise if it does not exist."""
|
||||
return IMPL.subcloud_destroy(context, subcloud_id)
|
||||
|
||||
|
||||
###################
|
||||
|
||||
def subcloud_status_create(context, subcloud_id, endpoint_type):
|
||||
"""Create a subcloud status for an endpoint_type."""
|
||||
return IMPL.subcloud_status_create(context, subcloud_id, endpoint_type)
|
||||
|
||||
|
||||
def subcloud_status_db_model_to_dict(subcloud_status):
|
||||
"""Convert subcloud status db model to dictionary."""
|
||||
if subcloud_status:
|
||||
result = {"subcloud_id": subcloud_status.subcloud_id,
|
||||
"sync_status": subcloud_status.sync_status}
|
||||
else:
|
||||
result = {"subcloud_id": 0,
|
||||
"sync_status": "unknown"}
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def subcloud_endpoint_status_db_model_to_dict(subcloud_status):
|
||||
"""Convert endpoint subcloud db model to dictionary."""
|
||||
if subcloud_status:
|
||||
result = {"endpoint_type": subcloud_status.endpoint_type,
|
||||
"sync_status": subcloud_status.sync_status}
|
||||
else:
|
||||
result = {}
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def subcloud_status_get(context, subcloud_id, endpoint_type):
|
||||
|
||||
"""Retrieve the subcloud status for an endpoint
|
||||
|
||||
Will raise if subcloud does not exist.
|
||||
"""
|
||||
|
||||
return IMPL.subcloud_status_get(context, subcloud_id, endpoint_type)
|
||||
|
||||
|
||||
def subcloud_status_get_all(context, subcloud_id):
|
||||
"""Retrieve all statuses for a subcloud."""
|
||||
return IMPL.subcloud_status_get_all(context, subcloud_id)
|
||||
|
||||
|
||||
def subcloud_status_get_all_by_name(context, name):
|
||||
"""Retrieve all statuses for a subcloud by name."""
|
||||
return IMPL.subcloud_status_get_all_by_name(context, name)
|
||||
|
||||
|
||||
def subcloud_status_update(context, subcloud_id, endpoint_type, sync_status):
|
||||
"""Update the status of a subcloud or raise if it does not exist."""
|
||||
return IMPL.subcloud_status_update(context, subcloud_id, endpoint_type,
|
||||
sync_status)
|
||||
|
||||
|
||||
def subcloud_status_destroy_all(context, subcloud_id):
|
||||
"""Destroy all the statuses for a subcloud
|
||||
|
||||
Will raise if subcloud does not exist.
|
||||
"""
|
||||
|
||||
return IMPL.subcloud_status_destroy_all(context, subcloud_id)
|
||||
|
||||
|
||||
###################
|
||||
|
||||
def sw_update_strategy_db_model_to_dict(sw_update_strategy):
|
||||
"""Convert sw update db model to dictionary."""
|
||||
result = {"id": sw_update_strategy.id,
|
||||
"type": sw_update_strategy.type,
|
||||
"subcloud-apply-type": sw_update_strategy.subcloud_apply_type,
|
||||
"max-parallel-subclouds":
|
||||
sw_update_strategy.max_parallel_subclouds,
|
||||
"stop-on-failure": sw_update_strategy.stop_on_failure,
|
||||
"state": sw_update_strategy.state,
|
||||
"created-at": sw_update_strategy.created_at,
|
||||
"updated-at": sw_update_strategy.updated_at}
|
||||
return result
|
||||
|
||||
|
||||
def sw_update_strategy_create(context, type, subcloud_apply_type,
|
||||
max_parallel_subclouds, stop_on_failure, state):
|
||||
"""Create a sw update."""
|
||||
return IMPL.sw_update_strategy_create(context, type, subcloud_apply_type,
|
||||
max_parallel_subclouds,
|
||||
stop_on_failure, state)
|
||||
|
||||
|
||||
def sw_update_strategy_get(context):
|
||||
"""Retrieve a sw update or raise if it does not exist."""
|
||||
return IMPL.sw_update_strategy_get(context)
|
||||
|
||||
|
||||
def sw_update_strategy_update(context, state=None):
|
||||
"""Update a sw update or raise if it does not exist."""
|
||||
return IMPL.sw_update_strategy_update(context, state)
|
||||
|
||||
|
||||
def sw_update_strategy_destroy(context):
|
||||
"""Destroy the sw update or raise if it does not exist."""
|
||||
return IMPL.sw_update_strategy_destroy(context)
|
||||
|
||||
|
||||
###################
|
||||
|
||||
def strategy_step_db_model_to_dict(strategy_step):
|
||||
"""Convert patch strategy db model to dictionary."""
|
||||
if strategy_step.subcloud is not None:
|
||||
cloud = strategy_step.subcloud.name
|
||||
else:
|
||||
cloud = consts.SYSTEM_CONTROLLER_NAME
|
||||
result = {"id": strategy_step.id,
|
||||
"cloud": cloud,
|
||||
"stage": strategy_step.stage,
|
||||
"state": strategy_step.state,
|
||||
"details": strategy_step.details,
|
||||
"started-at": strategy_step.started_at,
|
||||
"finished-at": strategy_step.finished_at,
|
||||
"created-at": strategy_step.created_at,
|
||||
"updated-at": strategy_step.updated_at}
|
||||
return result
|
||||
|
||||
|
||||
def strategy_step_get(context, subcloud_id):
|
||||
"""Retrieve the patch strategy step for a subcloud ID.
|
||||
|
||||
Will raise if subcloud does not exist.
|
||||
"""
|
||||
|
||||
return IMPL.strategy_step_get(context, subcloud_id)
|
||||
|
||||
|
||||
def strategy_step_get_by_name(context, name):
|
||||
"""Retrieve the patch strategy step for a subcloud name."""
|
||||
return IMPL.strategy_step_get_by_name(context, name)
|
||||
|
||||
|
||||
def strategy_step_get_all(context):
|
||||
"""Retrieve all patch strategy steps."""
|
||||
return IMPL.strategy_step_get_all(context)
|
||||
|
||||
|
||||
def strategy_step_create(context, subcloud_id, stage, state, details):
|
||||
"""Create a patch strategy step."""
|
||||
return IMPL.strategy_step_create(context, subcloud_id, stage, state,
|
||||
details)
|
||||
|
||||
|
||||
def strategy_step_update(context, subcloud_id, stage=None, state=None,
|
||||
details=None, started_at=None, finished_at=None):
|
||||
"""Update a patch strategy step or raise if it does not exist."""
|
||||
return IMPL.strategy_step_update(context, subcloud_id, stage, state,
|
||||
details, started_at, finished_at)
|
||||
|
||||
|
||||
def strategy_step_destroy_all(context):
|
||||
"""Destroy all the patch strategy steps."""
|
||||
return IMPL.strategy_step_destroy_all(context)
|
||||
|
||||
|
||||
###################
|
||||
|
||||
def sw_update_opts_w_name_db_model_to_dict(sw_update_opts, subcloud_name):
|
||||
"""Convert sw update options db model plus subcloud name to dictionary."""
|
||||
result = {"id": sw_update_opts.id,
|
||||
"name": subcloud_name,
|
||||
"subcloud-id": sw_update_opts.subcloud_id,
|
||||
"storage-apply-type": sw_update_opts.storage_apply_type,
|
||||
"compute-apply-type": sw_update_opts.compute_apply_type,
|
||||
"max-parallel-computes": sw_update_opts.max_parallel_computes,
|
||||
"alarm-restriction-type": sw_update_opts.alarm_restriction_type,
|
||||
"default-instance-action":
|
||||
sw_update_opts.default_instance_action,
|
||||
"created-at": sw_update_opts.created_at,
|
||||
"updated-at": sw_update_opts.updated_at}
|
||||
return result
|
||||
|
||||
|
||||
def sw_update_opts_create(context, subcloud_id, storage_apply_type,
|
||||
compute_apply_type, max_parallel_computes,
|
||||
alarm_restriction_type, default_instance_action):
|
||||
"""Create sw update options."""
|
||||
return IMPL.sw_update_opts_create(context, subcloud_id,
|
||||
storage_apply_type,
|
||||
compute_apply_type,
|
||||
max_parallel_computes,
|
||||
alarm_restriction_type,
|
||||
default_instance_action)
|
||||
|
||||
|
||||
def sw_update_opts_get(context, subcloud_id):
|
||||
"""Retrieve sw update options."""
|
||||
return IMPL.sw_update_opts_get(context, subcloud_id)
|
||||
|
||||
|
||||
def sw_update_opts_get_all_plus_subcloud_info(context):
|
||||
"""Retrieve sw update options plus subcloud info."""
|
||||
return IMPL.sw_update_opts_get_all_plus_subcloud_info(context)
|
||||
|
||||
|
||||
def sw_update_opts_update(context, subcloud_id,
|
||||
storage_apply_type=None,
|
||||
compute_apply_type=None,
|
||||
max_parallel_computes=None,
|
||||
alarm_restriction_type=None,
|
||||
default_instance_action=None):
|
||||
|
||||
"""Update sw update options or raise if it does not exist."""
|
||||
return IMPL.sw_update_opts_update(context, subcloud_id,
|
||||
storage_apply_type,
|
||||
compute_apply_type,
|
||||
max_parallel_computes,
|
||||
alarm_restriction_type,
|
||||
default_instance_action)
|
||||
|
||||
|
||||
def sw_update_opts_destroy(context, subcloud_id):
|
||||
"""Destroy sw update options or raise if it does not exist."""
|
||||
return IMPL.sw_update_opts_destroy(context, subcloud_id)
|
||||
|
||||
|
||||
###################
|
||||
def sw_update_opts_default_create(context, storage_apply_type,
|
||||
compute_apply_type, max_parallel_computes,
|
||||
alarm_restriction_type,
|
||||
default_instance_action):
|
||||
"""Create default sw update options."""
|
||||
return IMPL.sw_update_opts_default_create(context,
|
||||
storage_apply_type,
|
||||
compute_apply_type,
|
||||
max_parallel_computes,
|
||||
alarm_restriction_type,
|
||||
default_instance_action)
|
||||
|
||||
|
||||
def sw_update_opts_default_get(context):
|
||||
"""Retrieve default sw update options."""
|
||||
return IMPL.sw_update_opts_default_get(context)
|
||||
|
||||
|
||||
def sw_update_opts_default_update(context,
|
||||
storage_apply_type=None,
|
||||
compute_apply_type=None,
|
||||
max_parallel_computes=None,
|
||||
alarm_restriction_type=None,
|
||||
default_instance_action=None):
|
||||
|
||||
"""Update default sw update options."""
|
||||
return IMPL.sw_update_opts_default_update(context,
|
||||
storage_apply_type,
|
||||
compute_apply_type,
|
||||
max_parallel_computes,
|
||||
alarm_restriction_type,
|
||||
default_instance_action)
|
||||
|
||||
|
||||
def sw_update_opts_default_destroy(context):
|
||||
"""Destroy the default sw update options or raise if it does not exist."""
|
||||
return IMPL.sw_update_opts_default_destroy(context)
|
||||
|
||||
|
||||
###################
|
||||
|
||||
def db_sync(engine, version=None):
|
||||
"""Migrate the database to `version` or the most recent version."""
|
||||
return IMPL.db_sync(engine, version=version)
|
||||
|
||||
|
||||
def db_version(engine):
|
||||
"""Display the current database version."""
|
||||
return IMPL.db_version(engine)
|
0
dcmanager/db/sqlalchemy/__init__.py
Normal file
0
dcmanager/db/sqlalchemy/__init__.py
Normal file
602
dcmanager/db/sqlalchemy/api.py
Normal file
602
dcmanager/db/sqlalchemy/api.py
Normal file
@ -0,0 +1,602 @@
|
||||
# Copyright (c) 2015 Ericsson AB.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
"""
|
||||
Implementation of SQLAlchemy backend.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import threading
|
||||
|
||||
from oslo_db.sqlalchemy import enginefacade
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from sqlalchemy.orm import joinedload_all
|
||||
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.common import exceptions as exception
|
||||
from dcmanager.common.i18n import _
|
||||
from dcmanager.db.sqlalchemy import migration
|
||||
from dcmanager.db.sqlalchemy import models
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
_facade = None
|
||||
|
||||
_main_context_manager = None
|
||||
_CONTEXT = threading.local()
|
||||
|
||||
|
||||
def _get_main_context_manager():
|
||||
global _main_context_manager
|
||||
if not _main_context_manager:
|
||||
_main_context_manager = enginefacade.transaction_context()
|
||||
return _main_context_manager
|
||||
|
||||
|
||||
def get_engine():
|
||||
return _get_main_context_manager().get_legacy_facade().get_engine()
|
||||
|
||||
|
||||
def get_session():
|
||||
return _get_main_context_manager().get_legacy_facade().get_session()
|
||||
|
||||
|
||||
def read_session():
|
||||
return _get_main_context_manager().reader.using(_CONTEXT)
|
||||
|
||||
|
||||
def write_session():
|
||||
return _get_main_context_manager().writer.using(_CONTEXT)
|
||||
|
||||
|
||||
def get_backend():
|
||||
"""The backend is this module itself."""
|
||||
return sys.modules[__name__]
|
||||
|
||||
|
||||
def model_query(context, *args):
|
||||
with read_session() as session:
|
||||
query = session.query(*args).options(joinedload_all('*'))
|
||||
return query
|
||||
|
||||
|
||||
def _session(context):
|
||||
return get_session()
|
||||
|
||||
|
||||
def is_admin_context(context):
|
||||
"""Indicate if the request context is an administrator."""
|
||||
if not context:
|
||||
LOG.warning(_('Use of empty request context is deprecated'),
|
||||
DeprecationWarning)
|
||||
raise Exception('die')
|
||||
return context.is_admin
|
||||
|
||||
|
||||
def is_user_context(context):
|
||||
"""Indicate if the request context is a normal user."""
|
||||
if not context:
|
||||
return False
|
||||
if context.is_admin:
|
||||
return False
|
||||
if not context.user or not context.project:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def require_admin_context(f):
|
||||
"""Decorator to require admin request context.
|
||||
|
||||
The first argument to the wrapped function must be the context.
|
||||
"""
|
||||
def wrapper(*args, **kwargs):
|
||||
if not is_admin_context(args[0]):
|
||||
raise exception.AdminRequired()
|
||||
return f(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def require_context(f):
|
||||
"""Decorator to require *any* user or admin context.
|
||||
|
||||
This does no authorization for user or project access matching, see
|
||||
:py:func:`authorize_project_context` and
|
||||
:py:func:`authorize_user_context`.
|
||||
The first argument to the wrapped function must be the context.
|
||||
|
||||
"""
|
||||
def wrapper(*args, **kwargs):
|
||||
if not is_admin_context(args[0]) and not is_user_context(args[0]):
|
||||
raise exception.NotAuthorized()
|
||||
return f(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
###################
|
||||
|
||||
|
||||
@require_context
|
||||
def subcloud_get(context, subcloud_id):
|
||||
result = model_query(context, models.Subcloud). \
|
||||
filter_by(deleted=0). \
|
||||
filter_by(id=subcloud_id). \
|
||||
first()
|
||||
|
||||
if not result:
|
||||
raise exception.SubcloudNotFound(subcloud_id=subcloud_id)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@require_context
|
||||
def subcloud_get_with_status(context, subcloud_id):
|
||||
result = model_query(context, models.Subcloud, models.SubcloudStatus). \
|
||||
outerjoin(models.SubcloudStatus,
|
||||
(models.Subcloud.id == models.SubcloudStatus.subcloud_id) |
|
||||
(not models.SubcloudStatus.subcloud_id)). \
|
||||
filter(models.Subcloud.id == subcloud_id). \
|
||||
filter(models.Subcloud.deleted == 0). \
|
||||
order_by(models.SubcloudStatus.endpoint_type). \
|
||||
all()
|
||||
|
||||
if not result:
|
||||
raise exception.SubcloudNotFound(subcloud_id=subcloud_id)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@require_context
|
||||
def subcloud_get_by_name(context, name):
|
||||
result = model_query(context, models.Subcloud). \
|
||||
filter_by(deleted=0). \
|
||||
filter_by(name=name). \
|
||||
first()
|
||||
|
||||
if not result:
|
||||
raise exception.SubcloudNameNotFound(name=name)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@require_context
|
||||
def subcloud_get_all(context):
|
||||
return model_query(context, models.Subcloud). \
|
||||
filter_by(deleted=0). \
|
||||
all()
|
||||
|
||||
|
||||
@require_context
|
||||
def subcloud_get_all_with_status(context):
|
||||
result = model_query(context, models.Subcloud, models.SubcloudStatus). \
|
||||
outerjoin(models.SubcloudStatus,
|
||||
(models.Subcloud.id == models.SubcloudStatus.subcloud_id) |
|
||||
(not models.SubcloudStatus.subcloud_id)). \
|
||||
filter(models.Subcloud.deleted == 0). \
|
||||
order_by(models.Subcloud.id). \
|
||||
all()
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def subcloud_create(context, name, description, location, software_version,
|
||||
management_subnet, management_gateway_ip,
|
||||
management_start_ip, management_end_ip,
|
||||
systemcontroller_gateway_ip):
|
||||
with write_session() as session:
|
||||
subcloud_ref = models.Subcloud()
|
||||
subcloud_ref.name = name
|
||||
subcloud_ref.description = description
|
||||
subcloud_ref.location = location
|
||||
subcloud_ref.software_version = software_version
|
||||
subcloud_ref.management_state = consts.MANAGEMENT_UNMANAGED
|
||||
subcloud_ref.availability_status = consts.AVAILABILITY_OFFLINE
|
||||
subcloud_ref.management_subnet = management_subnet
|
||||
subcloud_ref.management_gateway_ip = management_gateway_ip
|
||||
subcloud_ref.management_start_ip = management_start_ip
|
||||
subcloud_ref.management_end_ip = management_end_ip
|
||||
subcloud_ref.systemcontroller_gateway_ip = systemcontroller_gateway_ip
|
||||
subcloud_ref.audit_fail_count = 0
|
||||
session.add(subcloud_ref)
|
||||
return subcloud_ref
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def subcloud_update(context, subcloud_id, management_state=None,
|
||||
availability_status=None, software_version=None,
|
||||
description=None, location=None, audit_fail_count=None):
|
||||
with write_session() as session:
|
||||
subcloud_ref = subcloud_get(context, subcloud_id)
|
||||
if management_state is not None:
|
||||
subcloud_ref.management_state = management_state
|
||||
if availability_status is not None:
|
||||
subcloud_ref.availability_status = availability_status
|
||||
if software_version is not None:
|
||||
subcloud_ref.software_version = software_version
|
||||
if description is not None:
|
||||
subcloud_ref.description = description
|
||||
if location is not None:
|
||||
subcloud_ref.location = location
|
||||
if audit_fail_count is not None:
|
||||
subcloud_ref.audit_fail_count = audit_fail_count
|
||||
subcloud_ref.save(session)
|
||||
return subcloud_ref
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def subcloud_destroy(context, subcloud_id):
|
||||
with write_session() as session:
|
||||
subcloud_ref = subcloud_get(context, subcloud_id)
|
||||
session.delete(subcloud_ref)
|
||||
|
||||
|
||||
##########################
|
||||
|
||||
|
||||
@require_context
|
||||
def subcloud_status_get(context, subcloud_id, endpoint_type):
|
||||
result = model_query(context, models.SubcloudStatus). \
|
||||
filter_by(deleted=0). \
|
||||
filter_by(subcloud_id=subcloud_id). \
|
||||
filter_by(endpoint_type=endpoint_type). \
|
||||
first()
|
||||
|
||||
if not result:
|
||||
raise exception.SubcloudStatusNotFound(subcloud_id=subcloud_id,
|
||||
endpoint_type=endpoint_type)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@require_context
|
||||
def subcloud_status_get_all(context, subcloud_id):
|
||||
return model_query(context, models.SubcloudStatus). \
|
||||
filter_by(deleted=0). \
|
||||
join(models.Subcloud,
|
||||
models.SubcloudStatus.subcloud_id == models.Subcloud.id). \
|
||||
filter(models.Subcloud.id == subcloud_id).all()
|
||||
|
||||
|
||||
@require_context
|
||||
def subcloud_status_get_all_by_name(context, name):
|
||||
return model_query(context, models.SubcloudStatus). \
|
||||
filter_by(deleted=0). \
|
||||
join(models.Subcloud,
|
||||
models.SubcloudStatus.subcloud_id == models.Subcloud.id). \
|
||||
filter(models.Subcloud.name == name).all()
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def subcloud_status_create(context, subcloud_id, endpoint_type):
|
||||
with write_session() as session:
|
||||
subcloud_status_ref = models.SubcloudStatus()
|
||||
subcloud_status_ref.subcloud_id = subcloud_id
|
||||
subcloud_status_ref.endpoint_type = endpoint_type
|
||||
subcloud_status_ref.sync_status = consts.SYNC_STATUS_UNKNOWN
|
||||
session.add(subcloud_status_ref)
|
||||
return subcloud_status_ref
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def subcloud_status_update(context, subcloud_id, endpoint_type, sync_status):
|
||||
with write_session() as session:
|
||||
subcloud_status_ref = subcloud_status_get(context, subcloud_id,
|
||||
endpoint_type)
|
||||
subcloud_status_ref.sync_status = sync_status
|
||||
subcloud_status_ref.save(session)
|
||||
return subcloud_status_ref
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def subcloud_status_destroy_all(context, subcloud_id):
|
||||
with write_session() as session:
|
||||
subcloud_statuses = subcloud_status_get_all(context, subcloud_id)
|
||||
if subcloud_statuses:
|
||||
for subcloud_status_ref in subcloud_statuses:
|
||||
session.delete(subcloud_status_ref)
|
||||
else:
|
||||
raise exception.SubcloudStatusNotFound(subcloud_id=subcloud_id,
|
||||
endpoint_type="any")
|
||||
|
||||
|
||||
###################
|
||||
|
||||
|
||||
@require_context
|
||||
def sw_update_strategy_get(context):
|
||||
result = model_query(context, models.SwUpdateStrategy). \
|
||||
filter_by(deleted=0). \
|
||||
first()
|
||||
|
||||
if not result:
|
||||
raise exception.NotFound()
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def sw_update_strategy_create(context, type, subcloud_apply_type,
|
||||
max_parallel_subclouds, stop_on_failure, state):
|
||||
with write_session() as session:
|
||||
sw_update_strategy_ref = models.SwUpdateStrategy()
|
||||
sw_update_strategy_ref.type = type
|
||||
sw_update_strategy_ref.subcloud_apply_type = subcloud_apply_type
|
||||
sw_update_strategy_ref.max_parallel_subclouds = max_parallel_subclouds
|
||||
sw_update_strategy_ref.stop_on_failure = stop_on_failure
|
||||
sw_update_strategy_ref.state = state
|
||||
|
||||
session.add(sw_update_strategy_ref)
|
||||
return sw_update_strategy_ref
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def sw_update_strategy_update(context, state=None):
|
||||
with write_session() as session:
|
||||
sw_update_strategy_ref = sw_update_strategy_get(context)
|
||||
if state is not None:
|
||||
sw_update_strategy_ref.state = state
|
||||
sw_update_strategy_ref.save(session)
|
||||
return sw_update_strategy_ref
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def sw_update_strategy_destroy(context):
|
||||
with write_session() as session:
|
||||
sw_update_strategy_ref = sw_update_strategy_get(context)
|
||||
session.delete(sw_update_strategy_ref)
|
||||
|
||||
|
||||
##########################
|
||||
|
||||
|
||||
@require_context
|
||||
def sw_update_opts_get(context, subcloud_id):
|
||||
result = model_query(context, models.SwUpdateOpts). \
|
||||
filter_by(deleted=0). \
|
||||
filter_by(subcloud_id=subcloud_id). \
|
||||
first()
|
||||
|
||||
# Note we will return None if not found
|
||||
return result
|
||||
|
||||
|
||||
@require_context
|
||||
def sw_update_opts_get_all_plus_subcloud_info(context):
|
||||
result = model_query(context, models.Subcloud, models.SwUpdateOpts). \
|
||||
outerjoin(models.SwUpdateOpts,
|
||||
(models.Subcloud.id == models.SwUpdateOpts.subcloud_id) |
|
||||
(not models.SubcloudStatus.subcloud_id)). \
|
||||
filter(models.Subcloud.deleted == 0). \
|
||||
order_by(models.Subcloud.id). \
|
||||
all()
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def sw_update_opts_create(context, subcloud_id, storage_apply_type,
|
||||
compute_apply_type,
|
||||
max_parallel_computes,
|
||||
alarm_restriction_type,
|
||||
default_instance_action):
|
||||
with write_session() as session:
|
||||
sw_update_opts_ref = models.SwUpdateOpts()
|
||||
sw_update_opts_ref.subcloud_id = subcloud_id
|
||||
sw_update_opts_ref.storage_apply_type = storage_apply_type
|
||||
sw_update_opts_ref.compute_apply_type = compute_apply_type
|
||||
sw_update_opts_ref.max_parallel_computes = max_parallel_computes
|
||||
sw_update_opts_ref.alarm_restriction_type = alarm_restriction_type
|
||||
sw_update_opts_ref.default_instance_action = default_instance_action
|
||||
session.add(sw_update_opts_ref)
|
||||
return sw_update_opts_ref
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def sw_update_opts_update(context, subcloud_id, storage_apply_type=None,
|
||||
compute_apply_type=None, max_parallel_computes=None,
|
||||
alarm_restriction_type=None,
|
||||
default_instance_action=None):
|
||||
with write_session() as session:
|
||||
sw_update_opts_ref = sw_update_opts_get(context, subcloud_id)
|
||||
if storage_apply_type is not None:
|
||||
sw_update_opts_ref.storage_apply_type = storage_apply_type
|
||||
if compute_apply_type is not None:
|
||||
sw_update_opts_ref.compute_apply_type = compute_apply_type
|
||||
if max_parallel_computes is not None:
|
||||
sw_update_opts_ref.max_parallel_computes = max_parallel_computes
|
||||
if alarm_restriction_type is not None:
|
||||
sw_update_opts_ref.alarm_restriction_type = alarm_restriction_type
|
||||
if default_instance_action is not None:
|
||||
sw_update_opts_ref.default_instance_action = \
|
||||
default_instance_action
|
||||
sw_update_opts_ref.save(session)
|
||||
return sw_update_opts_ref
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def sw_update_opts_destroy(context, subcloud_id):
|
||||
with write_session() as session:
|
||||
sw_update_opts_ref = sw_update_opts_get(context, subcloud_id)
|
||||
session.delete(sw_update_opts_ref)
|
||||
|
||||
|
||||
##########################
|
||||
|
||||
|
||||
@require_context
|
||||
def sw_update_opts_default_get(context):
|
||||
result = model_query(context, models.SwUpdateOptsDefault). \
|
||||
filter_by(deleted=0). \
|
||||
first()
|
||||
|
||||
# Note we will return None if not found
|
||||
return result
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def sw_update_opts_default_create(context, storage_apply_type,
|
||||
compute_apply_type,
|
||||
max_parallel_computes,
|
||||
alarm_restriction_type,
|
||||
default_instance_action):
|
||||
with write_session() as session:
|
||||
sw_update_opts_default_ref = models.SwUpdateOptsDefault()
|
||||
sw_update_opts_default_ref.subcloud_id = 0
|
||||
sw_update_opts_default_ref.storage_apply_type = storage_apply_type
|
||||
sw_update_opts_default_ref.compute_apply_type = compute_apply_type
|
||||
sw_update_opts_default_ref.max_parallel_computes = \
|
||||
max_parallel_computes
|
||||
sw_update_opts_default_ref.alarm_restriction_type = \
|
||||
alarm_restriction_type
|
||||
sw_update_opts_default_ref.default_instance_action = \
|
||||
default_instance_action
|
||||
session.add(sw_update_opts_default_ref)
|
||||
return sw_update_opts_default_ref
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def sw_update_opts_default_update(context, storage_apply_type=None,
|
||||
compute_apply_type=None,
|
||||
max_parallel_computes=None,
|
||||
alarm_restriction_type=None,
|
||||
default_instance_action=None):
|
||||
with write_session() as session:
|
||||
sw_update_opts_default_ref = sw_update_opts_default_get(context)
|
||||
if storage_apply_type is not None:
|
||||
sw_update_opts_default_ref.storage_apply_type = storage_apply_type
|
||||
if compute_apply_type is not None:
|
||||
sw_update_opts_default_ref.compute_apply_type = compute_apply_type
|
||||
if max_parallel_computes is not None:
|
||||
sw_update_opts_default_ref.max_parallel_computes = \
|
||||
max_parallel_computes
|
||||
if alarm_restriction_type is not None:
|
||||
sw_update_opts_default_ref.alarm_restriction_type = \
|
||||
alarm_restriction_type
|
||||
if default_instance_action is not None:
|
||||
sw_update_opts_default_ref.default_instance_action = \
|
||||
default_instance_action
|
||||
sw_update_opts_default_ref.save(session)
|
||||
return sw_update_opts_default_ref
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def sw_update_opts_default_destroy(context):
|
||||
with write_session() as session:
|
||||
sw_update_opts_default_ref = sw_update_opts_default_get(context)
|
||||
session.delete(sw_update_opts_default_ref)
|
||||
|
||||
|
||||
##########################
|
||||
|
||||
|
||||
@require_context
|
||||
def strategy_step_get(context, subcloud_id):
|
||||
result = model_query(context, models.StrategyStep). \
|
||||
filter_by(deleted=0). \
|
||||
filter_by(subcloud_id=subcloud_id). \
|
||||
first()
|
||||
|
||||
if not result:
|
||||
raise exception.StrategyStepNotFound(subcloud_id=subcloud_id)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@require_context
|
||||
def strategy_step_get_by_name(context, name):
|
||||
result = model_query(context, models.StrategyStep). \
|
||||
filter_by(deleted=0). \
|
||||
join(models.Subcloud,
|
||||
models.StrategyStep.subcloud_id == models.Subcloud.id). \
|
||||
filter(models.Subcloud.name == name).first()
|
||||
|
||||
if not result:
|
||||
raise exception.StrategyStepNameNotFound(name=name)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@require_context
|
||||
def strategy_step_get_all(context):
|
||||
result = model_query(context, models.StrategyStep). \
|
||||
filter_by(deleted=0). \
|
||||
order_by(models.StrategyStep.id). \
|
||||
all()
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def strategy_step_create(context, subcloud_id, stage, state, details):
|
||||
with write_session() as session:
|
||||
strategy_step_ref = models.StrategyStep()
|
||||
strategy_step_ref.subcloud_id = subcloud_id
|
||||
strategy_step_ref.stage = stage
|
||||
strategy_step_ref.state = state
|
||||
strategy_step_ref.details = details
|
||||
session.add(strategy_step_ref)
|
||||
return strategy_step_ref
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def strategy_step_update(context, subcloud_id, stage=None, state=None,
|
||||
details=None, started_at=None, finished_at=None):
|
||||
with write_session() as session:
|
||||
strategy_step_ref = strategy_step_get(context, subcloud_id)
|
||||
if stage is not None:
|
||||
strategy_step_ref.stage = stage
|
||||
if state is not None:
|
||||
strategy_step_ref.state = state
|
||||
if details is not None:
|
||||
strategy_step_ref.details = details
|
||||
if started_at is not None:
|
||||
strategy_step_ref.started_at = started_at
|
||||
if finished_at is not None:
|
||||
strategy_step_ref.finished_at = finished_at
|
||||
strategy_step_ref.save(session)
|
||||
return strategy_step_ref
|
||||
|
||||
|
||||
@require_admin_context
|
||||
def strategy_step_destroy_all(context):
|
||||
with write_session() as session:
|
||||
strategy_step_stages = strategy_step_get_all(context)
|
||||
if strategy_step_stages:
|
||||
for strategy_step_ref in strategy_step_stages:
|
||||
session.delete(strategy_step_ref)
|
||||
|
||||
|
||||
##########################
|
||||
|
||||
|
||||
def db_sync(engine, version=None):
|
||||
"""Migrate the database to `version` or the most recent version."""
|
||||
return migration.db_sync(engine, version=version)
|
||||
|
||||
|
||||
def db_version(engine):
|
||||
"""Display the current database version."""
|
||||
return migration.db_version(engine)
|
4
dcmanager/db/sqlalchemy/migrate_repo/README
Normal file
4
dcmanager/db/sqlalchemy/migrate_repo/README
Normal file
@ -0,0 +1,4 @@
|
||||
This is a database migration repository.
|
||||
|
||||
More information at
|
||||
http://code.google.com/p/sqlalchemy-migrate/
|
0
dcmanager/db/sqlalchemy/migrate_repo/__init__.py
Normal file
0
dcmanager/db/sqlalchemy/migrate_repo/__init__.py
Normal file
5
dcmanager/db/sqlalchemy/migrate_repo/manage.py
Normal file
5
dcmanager/db/sqlalchemy/migrate_repo/manage.py
Normal file
@ -0,0 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
from migrate.versioning.shell import main
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(debug='False')
|
25
dcmanager/db/sqlalchemy/migrate_repo/migrate.cfg
Normal file
25
dcmanager/db/sqlalchemy/migrate_repo/migrate.cfg
Normal file
@ -0,0 +1,25 @@
|
||||
[db_settings]
|
||||
# Used to identify which repository this database is versioned under.
|
||||
# You can use the name of your project.
|
||||
repository_id=dcmanager
|
||||
|
||||
# The name of the database table used to track the schema version.
|
||||
# This name shouldn't already be used by your project.
|
||||
# If this is changed once a database is under version control, you'll need to
|
||||
# change the table name in each database too.
|
||||
version_table=migrate_version
|
||||
|
||||
# When committing a change script, Migrate will attempt to generate the
|
||||
# sql for all supported databases; normally, if one of them fails - probably
|
||||
# because you don't have that database installed - it is ignored and the
|
||||
# commit continues, perhaps ending successfully.
|
||||
# Databases in this list MUST compile successfully during a commit, or the
|
||||
# entire commit will fail. List the databases your application will actually
|
||||
# be using to ensure your updates to that database work properly.
|
||||
# This must be a list; example: ['postgres','sqlite']
|
||||
required_dbs=['postgres']
|
||||
|
||||
# When creating new change scripts, Migrate will stamp the new script with
|
||||
# a version number. By default this is latest_version + 1. You can set this
|
||||
# to 'true' to tell Migrate to use the UTC timestamp instead.
|
||||
use_timestamp_numbering=False
|
@ -0,0 +1,197 @@
|
||||
# Copyright (c) 2015 Ericsson AB.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
from dcmanager.drivers.openstack import vim
|
||||
import sqlalchemy
|
||||
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
meta = sqlalchemy.MetaData()
|
||||
meta.bind = migrate_engine
|
||||
|
||||
subclouds = sqlalchemy.Table(
|
||||
'subclouds', meta,
|
||||
sqlalchemy.Column('id', sqlalchemy.Integer,
|
||||
primary_key=True, nullable=False),
|
||||
sqlalchemy.Column('name', sqlalchemy.String(255), unique=True),
|
||||
sqlalchemy.Column('description', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('location', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('software_version', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('management_state', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('availability_status', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('management_subnet', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('management_gateway_ip', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('management_start_ip', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('management_end_ip', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('systemcontroller_gateway_ip',
|
||||
sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('audit_fail_count', sqlalchemy.Integer, default=0),
|
||||
sqlalchemy.Column('reserved_1', sqlalchemy.Text),
|
||||
sqlalchemy.Column('reserved_2', sqlalchemy.Text),
|
||||
sqlalchemy.Column('created_at', sqlalchemy.DateTime),
|
||||
sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
|
||||
sqlalchemy.Column('deleted_at', sqlalchemy.DateTime),
|
||||
sqlalchemy.Column('deleted', sqlalchemy.Integer),
|
||||
mysql_engine='InnoDB',
|
||||
mysql_charset='utf8'
|
||||
)
|
||||
|
||||
subcloud_status = sqlalchemy.Table(
|
||||
'subcloud_status', meta,
|
||||
sqlalchemy.Column('id', sqlalchemy.Integer,
|
||||
primary_key=True, nullable=False),
|
||||
sqlalchemy.Column('subcloud_id', sqlalchemy.Integer,
|
||||
sqlalchemy.ForeignKey('subclouds.id',
|
||||
ondelete='CASCADE')),
|
||||
sqlalchemy.Column('endpoint_type', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('sync_status', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('reserved_1', sqlalchemy.Text),
|
||||
sqlalchemy.Column('reserved_2', sqlalchemy.Text),
|
||||
sqlalchemy.Column('created_at', sqlalchemy.DateTime),
|
||||
sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
|
||||
sqlalchemy.Column('deleted_at', sqlalchemy.DateTime),
|
||||
sqlalchemy.Column('deleted', sqlalchemy.Integer),
|
||||
mysql_engine='InnoDB',
|
||||
mysql_charset='utf8'
|
||||
)
|
||||
|
||||
sw_update_strategy = sqlalchemy.Table(
|
||||
'sw_update_strategy', meta,
|
||||
sqlalchemy.Column('id', sqlalchemy.Integer,
|
||||
primary_key=True, nullable=False),
|
||||
sqlalchemy.Column('type', sqlalchemy.String(255), unique=True),
|
||||
sqlalchemy.Column('subcloud_apply_type', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('max_parallel_subclouds', sqlalchemy.Integer),
|
||||
sqlalchemy.Column('stop_on_failure', sqlalchemy.Boolean),
|
||||
sqlalchemy.Column('state', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('reserved_1', sqlalchemy.Text),
|
||||
sqlalchemy.Column('reserved_2', sqlalchemy.Text),
|
||||
sqlalchemy.Column('created_at', sqlalchemy.DateTime),
|
||||
sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
|
||||
sqlalchemy.Column('deleted_at', sqlalchemy.DateTime),
|
||||
sqlalchemy.Column('deleted', sqlalchemy.Integer),
|
||||
mysql_engine='InnoDB',
|
||||
mysql_charset='utf8'
|
||||
)
|
||||
|
||||
sw_update_opts_default = sqlalchemy.Table(
|
||||
'sw_update_opts_default', meta,
|
||||
sqlalchemy.Column('id', sqlalchemy.Integer,
|
||||
primary_key=True, nullable=False),
|
||||
sqlalchemy.Column('subcloud_id', sqlalchemy.Integer),
|
||||
sqlalchemy.Column('storage_apply_type', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('compute_apply_type', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('max_parallel_computes', sqlalchemy.Integer),
|
||||
sqlalchemy.Column('default_instance_action', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('alarm_restriction_type', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('reserved_1', sqlalchemy.Text),
|
||||
sqlalchemy.Column('reserved_2', sqlalchemy.Text),
|
||||
sqlalchemy.Column('created_at', sqlalchemy.DateTime),
|
||||
sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
|
||||
sqlalchemy.Column('deleted_at', sqlalchemy.DateTime),
|
||||
sqlalchemy.Column('deleted', sqlalchemy.Integer),
|
||||
mysql_engine='InnoDB',
|
||||
mysql_charset='utf8'
|
||||
)
|
||||
|
||||
sw_update_opts = sqlalchemy.Table(
|
||||
'sw_update_opts', meta,
|
||||
sqlalchemy.Column('id', sqlalchemy.Integer,
|
||||
primary_key=True, nullable=False),
|
||||
sqlalchemy.Column('subcloud_id', sqlalchemy.Integer,
|
||||
sqlalchemy.ForeignKey('subclouds.id',
|
||||
ondelete='CASCADE')),
|
||||
sqlalchemy.Column('storage_apply_type', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('compute_apply_type', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('max_parallel_computes', sqlalchemy.Integer),
|
||||
sqlalchemy.Column('default_instance_action', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('alarm_restriction_type', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('reserved_1', sqlalchemy.Text),
|
||||
sqlalchemy.Column('reserved_2', sqlalchemy.Text),
|
||||
sqlalchemy.Column('created_at', sqlalchemy.DateTime),
|
||||
sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
|
||||
sqlalchemy.Column('deleted_at', sqlalchemy.DateTime),
|
||||
sqlalchemy.Column('deleted', sqlalchemy.Integer),
|
||||
mysql_engine='InnoDB',
|
||||
mysql_charset='utf8'
|
||||
)
|
||||
|
||||
strategy_steps = sqlalchemy.Table(
|
||||
'strategy_steps', meta,
|
||||
sqlalchemy.Column('id', sqlalchemy.Integer,
|
||||
primary_key=True, nullable=False),
|
||||
sqlalchemy.Column('subcloud_id', sqlalchemy.Integer,
|
||||
sqlalchemy.ForeignKey('subclouds.id',
|
||||
ondelete='CASCADE'),
|
||||
unique=True),
|
||||
sqlalchemy.Column('stage', sqlalchemy.Integer),
|
||||
sqlalchemy.Column('state', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('details', sqlalchemy.String(255)),
|
||||
sqlalchemy.Column('started_at', sqlalchemy.DateTime),
|
||||
sqlalchemy.Column('finished_at', sqlalchemy.DateTime),
|
||||
sqlalchemy.Column('reserved_1', sqlalchemy.Text),
|
||||
sqlalchemy.Column('reserved_2', sqlalchemy.Text),
|
||||
sqlalchemy.Column('created_at', sqlalchemy.DateTime),
|
||||
sqlalchemy.Column('updated_at', sqlalchemy.DateTime),
|
||||
sqlalchemy.Column('deleted_at', sqlalchemy.DateTime),
|
||||
sqlalchemy.Column('deleted', sqlalchemy.Integer),
|
||||
mysql_engine='InnoDB',
|
||||
mysql_charset='utf8'
|
||||
)
|
||||
|
||||
tables = (
|
||||
subclouds,
|
||||
subcloud_status,
|
||||
sw_update_strategy,
|
||||
strategy_steps,
|
||||
sw_update_opts,
|
||||
sw_update_opts_default
|
||||
)
|
||||
|
||||
for index, table in enumerate(tables):
|
||||
try:
|
||||
table.create()
|
||||
except Exception:
|
||||
# If an error occurs, drop all tables created so far to return
|
||||
# to the previously existing state.
|
||||
meta.drop_all(tables=tables[:index])
|
||||
raise
|
||||
|
||||
try:
|
||||
# populate the sw_update_opts_default with the default values.
|
||||
con = migrate_engine.connect()
|
||||
|
||||
con.execute(sw_update_opts_default.insert(),
|
||||
storage_apply_type=vim.APPLY_TYPE_PARALLEL,
|
||||
compute_apply_type=vim.APPLY_TYPE_PARALLEL,
|
||||
max_parallel_computes=10,
|
||||
default_instance_action=vim.INSTANCE_ACTION_MIGRATE,
|
||||
alarm_restriction_type=vim.ALARM_RESTRICTIONS_RELAXED,
|
||||
deleted=0)
|
||||
except Exception:
|
||||
# We can survive if this fails.
|
||||
pass
|
||||
|
||||
|
||||
def downgrade(migrate_engine):
|
||||
raise NotImplementedError('Database downgrade not supported - '
|
||||
'would drop all tables')
|
47
dcmanager/db/sqlalchemy/migration.py
Normal file
47
dcmanager/db/sqlalchemy/migration.py
Normal file
@ -0,0 +1,47 @@
|
||||
# Copyright (c) 2015 Ericsson AB.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
import os
|
||||
|
||||
from oslo_db.sqlalchemy import migration as oslo_migration
|
||||
|
||||
|
||||
INIT_VERSION = 0
|
||||
|
||||
|
||||
def db_sync(engine, version=None):
|
||||
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
|
||||
'migrate_repo')
|
||||
return oslo_migration.db_sync(engine, path, version,
|
||||
init_version=INIT_VERSION)
|
||||
|
||||
|
||||
def db_version(engine):
|
||||
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
|
||||
'migrate_repo')
|
||||
return oslo_migration.db_version(engine, path, INIT_VERSION)
|
||||
|
||||
|
||||
def db_version_control(engine, version=None):
|
||||
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
|
||||
'migrate_repo')
|
||||
return oslo_migration.db_version_control(engine, path, version)
|
169
dcmanager/db/sqlalchemy/models.py
Normal file
169
dcmanager/db/sqlalchemy/models.py
Normal file
@ -0,0 +1,169 @@
|
||||
# Copyright (c) 2015 Ericsson AB
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
"""
|
||||
SQLAlchemy models for dcmanager data.
|
||||
"""
|
||||
|
||||
from oslo_db.sqlalchemy import models
|
||||
|
||||
from sqlalchemy.orm import backref, relationship
|
||||
from sqlalchemy.orm import session as orm_session
|
||||
from sqlalchemy import (Column, Integer, String, Boolean, DateTime)
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy import ForeignKey
|
||||
|
||||
# from dcmanager.common import consts
|
||||
|
||||
BASE = declarative_base()
|
||||
|
||||
|
||||
def get_session():
|
||||
from dcmanager.db.sqlalchemy import api as db_api
|
||||
|
||||
return db_api.get_session()
|
||||
|
||||
|
||||
class DCManagerBase(models.ModelBase,
|
||||
models.SoftDeleteMixin,
|
||||
models.TimestampMixin):
|
||||
"""Base class for DC Manager Models."""
|
||||
|
||||
# __table_args__ = {'mysql_engine': 'InnoDB'}
|
||||
|
||||
def expire(self, session=None, attrs=None):
|
||||
if not session:
|
||||
session = orm_session.Session.object_session(self)
|
||||
if not session:
|
||||
session = get_session()
|
||||
session.expire(self, attrs)
|
||||
|
||||
def refresh(self, session=None, attrs=None):
|
||||
"""Refresh this object."""
|
||||
if not session:
|
||||
session = orm_session.Session.object_session(self)
|
||||
if not session:
|
||||
session = get_session()
|
||||
session.refresh(self, attrs)
|
||||
|
||||
def delete(self, session=None):
|
||||
"""Delete this object."""
|
||||
if not session:
|
||||
session = orm_session.Session.object_session(self)
|
||||
if not session:
|
||||
session = get_session()
|
||||
session.begin()
|
||||
session.delete(self)
|
||||
session.commit()
|
||||
|
||||
|
||||
class Subcloud(BASE, DCManagerBase):
|
||||
"""Represents a subcloud"""
|
||||
|
||||
__tablename__ = 'subclouds'
|
||||
|
||||
id = Column(Integer, primary_key=True, nullable=False)
|
||||
name = Column(String(255), unique=True)
|
||||
description = Column(String(255))
|
||||
location = Column(String(255))
|
||||
software_version = Column(String(255))
|
||||
management_state = Column(String(255))
|
||||
availability_status = Column(String(255))
|
||||
management_subnet = Column(String(255))
|
||||
management_gateway_ip = Column(String(255))
|
||||
management_start_ip = Column(String(255), unique=True)
|
||||
management_end_ip = Column(String(255), unique=True)
|
||||
systemcontroller_gateway_ip = Column(String(255))
|
||||
audit_fail_count = Column(Integer)
|
||||
|
||||
|
||||
class SubcloudStatus(BASE, DCManagerBase):
|
||||
"""Represents the status of an endpoint in a subcloud"""
|
||||
|
||||
__tablename__ = "subcloud_status"
|
||||
|
||||
id = Column(Integer, primary_key=True, nullable=False)
|
||||
subcloud_id = Column(Integer,
|
||||
ForeignKey('subclouds.id', ondelete='CASCADE'))
|
||||
endpoint_type = Column(String(255))
|
||||
sync_status = Column(String(255))
|
||||
|
||||
|
||||
class SwUpdateStrategy(BASE, DCManagerBase):
|
||||
"""Represents a software update for subclouds"""
|
||||
|
||||
__tablename__ = "sw_update_strategy"
|
||||
|
||||
id = Column(Integer, primary_key=True, nullable=False)
|
||||
type = Column(String(255), unique=True)
|
||||
subcloud_apply_type = Column(String(255))
|
||||
max_parallel_subclouds = Column(Integer)
|
||||
stop_on_failure = Column(Boolean)
|
||||
state = Column(String(255))
|
||||
|
||||
|
||||
class SwUpdateOpts(BASE, DCManagerBase):
|
||||
"""Represents software update options for a subcloud"""
|
||||
|
||||
__tablename__ = "sw_update_opts"
|
||||
|
||||
id = Column(Integer, primary_key=True, nullable=False)
|
||||
subcloud_id = Column(Integer,
|
||||
ForeignKey('subclouds.id', ondelete='CASCADE'))
|
||||
|
||||
storage_apply_type = Column(String(255))
|
||||
compute_apply_type = Column(String(255))
|
||||
max_parallel_computes = Column(Integer)
|
||||
alarm_restriction_type = Column(String(255))
|
||||
default_instance_action = Column(String(255))
|
||||
|
||||
|
||||
class SwUpdateOptsDefault(BASE, DCManagerBase):
|
||||
"""Represents default software update options for subclouds"""
|
||||
|
||||
__tablename__ = "sw_update_opts_default"
|
||||
|
||||
id = Column(Integer, primary_key=True, nullable=False)
|
||||
|
||||
subcloud_id = Column(Integer)
|
||||
storage_apply_type = Column(String(255))
|
||||
compute_apply_type = Column(String(255))
|
||||
max_parallel_computes = Column(Integer)
|
||||
alarm_restriction_type = Column(String(255))
|
||||
default_instance_action = Column(String(255))
|
||||
|
||||
|
||||
class StrategyStep(BASE, DCManagerBase):
|
||||
"""Represents a step for patching or upgrading subclouds"""
|
||||
|
||||
__tablename__ = "strategy_steps"
|
||||
|
||||
id = Column(Integer, primary_key=True, nullable=False)
|
||||
subcloud_id = Column(Integer,
|
||||
ForeignKey('subclouds.id', ondelete='CASCADE'),
|
||||
unique=True)
|
||||
stage = Column(Integer)
|
||||
state = Column(String(255))
|
||||
details = Column(String(255))
|
||||
started_at = Column(DateTime)
|
||||
finished_at = Column(DateTime)
|
||||
subcloud = relationship('Subcloud', backref=backref("strategy_steps",
|
||||
cascade="all,delete"))
|
55
dcmanager/db/utils.py
Normal file
55
dcmanager/db/utils.py
Normal file
@ -0,0 +1,55 @@
|
||||
# Copyright (c) 2015 Ericsson AB.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
|
||||
class LazyPluggable(object):
|
||||
"""A pluggable backend loaded lazily based on some value."""
|
||||
|
||||
def __init__(self, pivot, **backends):
|
||||
self.__backends = backends
|
||||
self.__pivot = pivot
|
||||
self.__backend = None
|
||||
|
||||
def __get_backend(self):
|
||||
if not self.__backend:
|
||||
backend_name = 'sqlalchemy'
|
||||
backend = self.__backends[backend_name]
|
||||
if isinstance(backend, tuple):
|
||||
name = backend[0]
|
||||
fromlist = backend[1]
|
||||
else:
|
||||
name = backend
|
||||
fromlist = backend
|
||||
|
||||
self.__backend = __import__(name, None, None, fromlist)
|
||||
return self.__backend
|
||||
|
||||
def __getattr__(self, key):
|
||||
backend = self.__get_backend()
|
||||
return getattr(backend, key)
|
||||
|
||||
|
||||
IMPL = LazyPluggable('backend', sqlalchemy='dcmanager.db.sqlalchemy.api')
|
||||
|
||||
|
||||
def purge_deleted(age, granularity='days'):
|
||||
IMPL.purge_deleted(age, granularity)
|
5
dcmanager/drivers/README.rst
Normal file
5
dcmanager/drivers/README.rst
Normal file
@ -0,0 +1,5 @@
|
||||
===============================
|
||||
OpenStack Drivers
|
||||
================================
|
||||
|
||||
Driver for openstack communication based on python native clients.
|
0
dcmanager/drivers/__init__.py
Normal file
0
dcmanager/drivers/__init__.py
Normal file
31
dcmanager/drivers/base.py
Normal file
31
dcmanager/drivers/base.py
Normal file
@ -0,0 +1,31 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
'''
|
||||
Base class for all drivers.
|
||||
'''
|
||||
|
||||
import abc
|
||||
import six
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DriverBase(object):
|
||||
|
||||
six.add_metaclass(abc.ABCMeta)
|
||||
'''Base class for all drivers.'''
|
||||
|
||||
def __init__(self, context):
|
||||
self.context = context
|
0
dcmanager/drivers/openstack/__init__.py
Normal file
0
dcmanager/drivers/openstack/__init__.py
Normal file
196
dcmanager/drivers/openstack/patching_v1.py
Normal file
196
dcmanager/drivers/openstack/patching_v1.py
Normal file
@ -0,0 +1,196 @@
|
||||
# Copyright 2016 Ericsson AB
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
from oslo_log import log
|
||||
import requests
|
||||
from requests_toolbelt import MultipartEncoder
|
||||
|
||||
from dcmanager.drivers import base
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
# Patch states
|
||||
PATCH_STATE_AVAILABLE = 'Available'
|
||||
PATCH_STATE_APPLIED = 'Applied'
|
||||
PATCH_STATE_PARTIAL_APPLY = 'Partial-Apply'
|
||||
PATCH_STATE_PARTIAL_REMOVE = 'Partial-Remove'
|
||||
PATCH_STATE_COMMITTED = 'Committed'
|
||||
PATCH_STATE_UNKNOWN = 'n/a'
|
||||
|
||||
|
||||
class PatchingClient(base.DriverBase):
|
||||
"""Patching V1 driver."""
|
||||
|
||||
def __init__(self, region, session):
|
||||
# Get an endpoint and token.
|
||||
self.endpoint = session.get_endpoint(service_type='patching',
|
||||
region_name=region,
|
||||
interface='internal')
|
||||
self.token = session.get_token()
|
||||
|
||||
def query(self, state=None, release=None,):
|
||||
"""Query patches"""
|
||||
url = self.endpoint + '/v1/query'
|
||||
if state is not None:
|
||||
url += "?show=%s" % state.lower()
|
||||
if release is not None:
|
||||
url += "&release=%s" % release
|
||||
headers = {"X-Auth-Token": self.token}
|
||||
response = requests.get(url, headers=headers)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
if 'error' in data and data["error"] != "":
|
||||
message = "query failed with error: %s" % data["error"]
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
else:
|
||||
return data.get('pd', [])
|
||||
else:
|
||||
message = "query failed with RC: %d" % response.status_code
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
|
||||
def query_hosts(self):
|
||||
"""Query hosts"""
|
||||
url = self.endpoint + '/v1/query_hosts'
|
||||
headers = {"X-Auth-Token": self.token}
|
||||
response = requests.get(url, headers=headers)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
if 'error' in data and data["error"] != "":
|
||||
message = "query_hosts failed with error: %s" % data["error"]
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
else:
|
||||
return data.get('data', [])
|
||||
else:
|
||||
message = "query_hosts failed with RC: %d" % response.status_code
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
|
||||
def apply(self, patches):
|
||||
"""Apply patches"""
|
||||
patch_str = "/".join(patches)
|
||||
url = self.endpoint + '/v1/apply/%s' % patch_str
|
||||
headers = {"X-Auth-Token": self.token}
|
||||
response = requests.post(url, headers=headers)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
if 'error' in data and data["error"] != "":
|
||||
message = "apply failed with error: %s" % data["error"]
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
else:
|
||||
return data.get('pd', [])
|
||||
else:
|
||||
message = "apply failed with RC: %d" % response.status_code
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
|
||||
def remove(self, patches):
|
||||
"""Remove patches"""
|
||||
patch_str = "/".join(patches)
|
||||
url = self.endpoint + '/v1/remove/%s' % patch_str
|
||||
headers = {"X-Auth-Token": self.token}
|
||||
response = requests.post(url, headers=headers)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
if 'error' in data and data["error"] != "":
|
||||
message = "remove failed with error: %s" % data["error"]
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
else:
|
||||
return data.get('pd', [])
|
||||
else:
|
||||
message = "remove failed with RC: %d" % response.status_code
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
|
||||
def delete(self, patches):
|
||||
"""Delete patches"""
|
||||
patch_str = "/".join(patches)
|
||||
url = self.endpoint + '/v1/delete/%s' % patch_str
|
||||
headers = {"X-Auth-Token": self.token}
|
||||
response = requests.post(url, headers=headers)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
if 'error' in data and data["error"] != "":
|
||||
message = "delete failed with error: %s" % data["error"]
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
else:
|
||||
return data.get('pd', [])
|
||||
else:
|
||||
message = "delete failed with RC: %d" % response.status_code
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
|
||||
def commit(self, patches):
|
||||
"""Commit patches"""
|
||||
patch_str = "/".join(patches)
|
||||
url = self.endpoint + '/v1/commit/%s' % patch_str
|
||||
headers = {"X-Auth-Token": self.token}
|
||||
response = requests.post(url, headers=headers)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
if 'error' in data and data["error"] != "":
|
||||
message = "commit failed with error: %s" % data["error"]
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
else:
|
||||
return data.get('pd', [])
|
||||
else:
|
||||
message = "commit failed with RC: %d" % response.status_code
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
|
||||
def upload(self, files):
|
||||
"""Upload patches"""
|
||||
|
||||
for file in sorted(list(set(files))):
|
||||
enc = MultipartEncoder(fields={'file': (file,
|
||||
open(file, 'rb'),
|
||||
)})
|
||||
url = self.endpoint + '/v1/upload'
|
||||
headers = {"X-Auth-Token": self.token,
|
||||
'Content-Type': enc.content_type}
|
||||
response = requests.post(url,
|
||||
data=enc,
|
||||
headers=headers)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
if 'error' in data and data["error"] != "":
|
||||
message = "upload failed with error: %s" % data["error"]
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
else:
|
||||
return data.get('pd', [])
|
||||
else:
|
||||
message = "upload failed with RC: %d" % response.status_code
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
124
dcmanager/drivers/openstack/sysinv_v1.py
Normal file
124
dcmanager/drivers/openstack/sysinv_v1.py
Normal file
@ -0,0 +1,124 @@
|
||||
# Copyright 2016 Ericsson AB
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
from sysinv.common import constants as sysinv_constants
|
||||
|
||||
# from dcmanager.common import consts
|
||||
from dcmanager.common import exceptions
|
||||
from dcmanager.drivers import base
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
API_VERSION = '1'
|
||||
|
||||
|
||||
class SysinvClient(base.DriverBase):
|
||||
"""Sysinv V1 driver."""
|
||||
|
||||
def __init__(self, region, session):
|
||||
try:
|
||||
# TOX cannot import cgts_client and all the dependencies therefore
|
||||
# the client is being lazy loaded since TOX doesn't actually
|
||||
# require the cgtsclient module.
|
||||
from cgtsclient import client
|
||||
|
||||
# The sysinv client doesn't support a session, so we need to
|
||||
# get an endpoint and token.
|
||||
endpoint = session.get_endpoint(service_type='platform',
|
||||
region_name=region,
|
||||
interface='internal')
|
||||
token = session.get_token()
|
||||
|
||||
self.sysinv_client = client.Client(API_VERSION,
|
||||
endpoint=endpoint,
|
||||
token=token)
|
||||
except exceptions.ServiceUnavailable:
|
||||
raise
|
||||
|
||||
def get_controller_hosts(self):
|
||||
"""Get a list of controller hosts."""
|
||||
return self.sysinv_client.ihost.list_personality(
|
||||
sysinv_constants.CONTROLLER)
|
||||
|
||||
def get_management_interface(self, hostname):
|
||||
"""Get the management interface for a host."""
|
||||
interfaces = self.sysinv_client.iinterface.list(hostname)
|
||||
|
||||
for interface in interfaces:
|
||||
if interface.networktype == sysinv_constants.NETWORK_TYPE_MGMT:
|
||||
return interface
|
||||
|
||||
# This can happen if the host is still being installed and has not
|
||||
# yet created its management interface.
|
||||
LOG.warning("Management interface on host %s not found" % hostname)
|
||||
return None
|
||||
|
||||
def get_management_address_pool(self):
|
||||
"""Get the management address pool for a host."""
|
||||
networks = self.sysinv_client.network.list()
|
||||
for network in networks:
|
||||
if network.type == sysinv_constants.NETWORK_TYPE_MGMT:
|
||||
address_pool_uuid = network.pool_uuid
|
||||
break
|
||||
else:
|
||||
LOG.error("Management address pool not found")
|
||||
raise exceptions.InternalError()
|
||||
|
||||
return self.sysinv_client.address_pool.get(address_pool_uuid)
|
||||
|
||||
def create_route(self, interface_uuid, network, prefix, gateway, metric):
|
||||
"""Create a static route on an interface."""
|
||||
|
||||
LOG.info("Creating route: interface: %s dest: %s/%s "
|
||||
"gateway: %s metric %s" % (interface_uuid, network,
|
||||
prefix, gateway, metric))
|
||||
self.sysinv_client.route.create(interface_uuid=interface_uuid,
|
||||
network=network,
|
||||
prefix=prefix,
|
||||
gateway=gateway,
|
||||
metric=metric)
|
||||
|
||||
def delete_route(self, interface_uuid, network, prefix, gateway, metric):
|
||||
"""Delete a static route."""
|
||||
|
||||
# Get the routes for this interface
|
||||
routes = self.sysinv_client.route.list_by_interface(interface_uuid)
|
||||
for route in routes:
|
||||
if (route.network == network and route.prefix == prefix and
|
||||
route.gateway == gateway and route.metric == metric):
|
||||
LOG.info("Deleting route: interface: %s dest: %s/%s "
|
||||
"gateway: %s metric %s" % (interface_uuid, network,
|
||||
prefix, gateway, metric))
|
||||
self.sysinv_client.route.delete(route.uuid)
|
||||
return
|
||||
|
||||
LOG.warning("Route not found: interface: %s dest: %s/%s gateway: %s "
|
||||
"metric %s" % (interface_uuid, network, prefix, gateway,
|
||||
metric))
|
||||
|
||||
def get_service_groups(self):
|
||||
"""Get a list of service groups."""
|
||||
return self.sysinv_client.sm_servicegroup.list()
|
||||
|
||||
def get_loads(self):
|
||||
"""Get a list of loads."""
|
||||
return self.sysinv_client.load.list()
|
157
dcmanager/drivers/openstack/vim.py
Normal file
157
dcmanager/drivers/openstack/vim.py
Normal file
@ -0,0 +1,157 @@
|
||||
# Copyright 2016 Ericsson AB
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
from nfv_client.openstack import sw_update
|
||||
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.common import exceptions
|
||||
from dcmanager.drivers import base
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
STRATEGY_NAME_SW_PATCH = 'sw-patch'
|
||||
STRATEGY_NAME_SW_UPGRADE = 'sw-upgrade'
|
||||
|
||||
APPLY_TYPE_SERIAL = 'serial'
|
||||
APPLY_TYPE_PARALLEL = 'parallel'
|
||||
APPLY_TYPE_IGNORE = 'ignore'
|
||||
|
||||
INSTANCE_ACTION_MIGRATE = 'migrate'
|
||||
INSTANCE_ACTION_STOP_START = 'stop-start'
|
||||
|
||||
ALARM_RESTRICTIONS_STRICT = 'strict'
|
||||
ALARM_RESTRICTIONS_RELAXED = 'relaxed'
|
||||
|
||||
SW_UPDATE_OPTS_CONST_DEFAULT = {
|
||||
"name": consts.SW_UPDATE_DEFAULT_TITLE,
|
||||
"storage-apply-type": APPLY_TYPE_PARALLEL,
|
||||
"compute-apply-type": APPLY_TYPE_PARALLEL,
|
||||
"max-parallel-computes": 10,
|
||||
"default-instance-action": INSTANCE_ACTION_MIGRATE,
|
||||
"alarm-restriction-type": ALARM_RESTRICTIONS_RELAXED,
|
||||
"created-at": None,
|
||||
"updated-at": None}
|
||||
|
||||
STATE_INITIAL = 'initial'
|
||||
STATE_BUILDING = 'building'
|
||||
STATE_BUILD_FAILED = 'build-failed'
|
||||
STATE_BUILD_TIMEOUT = 'build-timeout'
|
||||
STATE_READY_TO_APPLY = 'ready-to-apply'
|
||||
STATE_APPLYING = 'applying'
|
||||
STATE_APPLY_FAILED = 'apply-failed'
|
||||
STATE_APPLY_TIMEOUT = 'apply-timeout'
|
||||
STATE_APPLIED = 'applied'
|
||||
STATE_ABORTING = 'aborting'
|
||||
STATE_ABORT_FAILED = 'abort-failed'
|
||||
STATE_ABORT_TIMEOUT = 'abort-timeout'
|
||||
STATE_ABORTED = 'aborted'
|
||||
|
||||
|
||||
class VimClient(base.DriverBase):
|
||||
"""VIM driver."""
|
||||
|
||||
def __init__(self, region, session):
|
||||
try:
|
||||
# The nfv_client doesn't support a session, so we need to
|
||||
# get an endpoint and token.
|
||||
self.endpoint = session.get_endpoint(service_type='nfv',
|
||||
region_name=region,
|
||||
interface='internal')
|
||||
self.token = session.get_token()
|
||||
|
||||
except exceptions.ServiceUnavailable:
|
||||
raise
|
||||
|
||||
def create_strategy(self, strategy_name, storage_apply_type,
|
||||
compute_apply_type, max_parallel_compute_hosts,
|
||||
default_instance_action, alarm_restrictions):
|
||||
"""Create orchestration strategy"""
|
||||
|
||||
url = self.endpoint
|
||||
strategy = sw_update.create_strategy(
|
||||
self.token, url,
|
||||
strategy_name=strategy_name,
|
||||
controller_apply_type=APPLY_TYPE_SERIAL,
|
||||
storage_apply_type=storage_apply_type,
|
||||
swift_apply_type=APPLY_TYPE_IGNORE,
|
||||
compute_apply_type=compute_apply_type,
|
||||
max_parallel_compute_hosts=max_parallel_compute_hosts,
|
||||
default_instance_action=default_instance_action,
|
||||
alarm_restrictions=alarm_restrictions)
|
||||
if not strategy:
|
||||
raise Exception("Strategy creation failed")
|
||||
|
||||
LOG.debug("Strategy created: %s" % strategy)
|
||||
return strategy
|
||||
|
||||
def get_strategy(self, strategy_name):
|
||||
"""Get the current orchestration strategy"""
|
||||
|
||||
url = self.endpoint
|
||||
strategy = sw_update.get_strategies(
|
||||
self.token, url,
|
||||
strategy_name=strategy_name)
|
||||
if not strategy:
|
||||
raise Exception("Get strategy failed")
|
||||
|
||||
LOG.debug("Strategy: %s" % strategy)
|
||||
return strategy
|
||||
|
||||
def delete_strategy(self, strategy_name):
|
||||
"""Delete the current orchestration strategy"""
|
||||
|
||||
url = self.endpoint
|
||||
success = sw_update.delete_strategy(
|
||||
self.token, url,
|
||||
strategy_name=strategy_name)
|
||||
if not success:
|
||||
raise Exception("Delete strategy failed")
|
||||
|
||||
LOG.debug("Strategy deleted")
|
||||
|
||||
def apply_strategy(self, strategy_name):
|
||||
"""Apply the current orchestration strategy"""
|
||||
|
||||
url = self.endpoint
|
||||
strategy = sw_update.apply_strategy(
|
||||
self.token, url,
|
||||
strategy_name=strategy_name)
|
||||
if not strategy:
|
||||
raise Exception("Strategy apply failed")
|
||||
|
||||
LOG.debug("Strategy applied: %s" % strategy)
|
||||
return strategy
|
||||
|
||||
def abort_strategy(self, strategy_name):
|
||||
"""Abort the current orchestration strategy"""
|
||||
|
||||
url = self.endpoint
|
||||
strategy = sw_update.abort_strategy(
|
||||
self.token, url,
|
||||
strategy_name=strategy_name,
|
||||
stage_id=None)
|
||||
if not strategy:
|
||||
raise Exception("Strategy abort failed")
|
||||
|
||||
LOG.debug("Strategy aborted: %s" % strategy)
|
||||
return strategy
|
23
dcmanager/manager/README.rst
Executable file
23
dcmanager/manager/README.rst
Executable file
@ -0,0 +1,23 @@
|
||||
===============================
|
||||
Service
|
||||
===============================
|
||||
|
||||
DC Manager Service has responsibility for:
|
||||
Main subcloud state machine as well as all operations on subclouds
|
||||
including creation, deletion and update.
|
||||
|
||||
service.py:
|
||||
run DC Manager service in multi-worker mode, and establish RPC server
|
||||
|
||||
subcloud_manager.py:
|
||||
Manages all subcloud related activities such as creation, deletion,
|
||||
availability status, management state
|
||||
|
||||
audit_manager.py:
|
||||
A Periodic audit to contact each subcloud and ensure that at least
|
||||
one of each service group is up and active, which is a pre-requisite
|
||||
for declaring a subcloud as online.
|
||||
|
||||
scheduler.py:
|
||||
Thread group manager, also responsible for periodic timer tasks - ie. audit.
|
||||
|
0
dcmanager/manager/__init__.py
Normal file
0
dcmanager/manager/__init__.py
Normal file
248
dcmanager/manager/patch_audit_manager.py
Normal file
248
dcmanager/manager/patch_audit_manager.py
Normal file
@ -0,0 +1,248 @@
|
||||
# Copyright 2017 Ericsson AB.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
from keystoneauth1 import exceptions as keystone_exceptions
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from dcorch.common import consts as dcorch_consts
|
||||
from dcorch.drivers.openstack.keystone_v3 import KeystoneClient
|
||||
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.common import context
|
||||
from dcmanager.common.i18n import _
|
||||
from dcmanager.common import manager
|
||||
from dcmanager.db import api as db_api
|
||||
|
||||
from dcmanager.drivers.openstack import patching_v1
|
||||
from dcmanager.drivers.openstack.patching_v1 import PatchingClient
|
||||
from dcmanager.drivers.openstack.sysinv_v1 import SysinvClient
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
# By default the patch audit will only occur every five minutes.
|
||||
DEFAULT_PATCH_AUDIT_DELAY_SECONDS = 300
|
||||
|
||||
|
||||
class PatchAuditManager(manager.Manager):
|
||||
"""Manages tasks related to patch audits."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
LOG.info(_('PatchAuditManager initialization...'))
|
||||
|
||||
super(PatchAuditManager, self).__init__(
|
||||
service_name="patch_audit_manager")
|
||||
self.context = context.get_admin_context()
|
||||
self.subcloud_manager = kwargs['subcloud_manager']
|
||||
# Wait 20 seconds before doing the first audit
|
||||
self.wait_time_passed = DEFAULT_PATCH_AUDIT_DELAY_SECONDS - 25
|
||||
|
||||
# Used to force an audit on the next interval
|
||||
_force_audit = False
|
||||
|
||||
@classmethod
|
||||
def trigger_audit(cls):
|
||||
"""Trigger audit at next interval.
|
||||
|
||||
This can be called from outside the audit greenthread.
|
||||
"""
|
||||
cls._force_audit = True
|
||||
|
||||
def periodic_patch_audit(self):
|
||||
"""Audit patch status of subclouds.
|
||||
|
||||
Audit normally happens every DEFAULT_PATCH_AUDIT_DELAY_SECONDS, but
|
||||
can be forced to happen on the next audit interval by calling
|
||||
trigger_audit.
|
||||
"""
|
||||
|
||||
do_audit = False
|
||||
|
||||
if PatchAuditManager._force_audit:
|
||||
# Audit has been triggered.
|
||||
do_audit = True
|
||||
else:
|
||||
# This won't be super accurate as we aren't woken up after exactly
|
||||
# the interval seconds, but it is good enough for an audit.
|
||||
self.wait_time_passed += cfg.CONF.scheduler.patch_audit_interval
|
||||
if self.wait_time_passed >= DEFAULT_PATCH_AUDIT_DELAY_SECONDS:
|
||||
do_audit = True
|
||||
|
||||
if do_audit:
|
||||
self.wait_time_passed = 0
|
||||
PatchAuditManager._force_audit = False
|
||||
# Blanket catch all exceptions in the audit so that the audit
|
||||
# does not die.
|
||||
try:
|
||||
self._periodic_patch_audit_loop()
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
|
||||
def _periodic_patch_audit_loop(self):
|
||||
"""Audit patch status of subclouds loop."""
|
||||
|
||||
# We are running in our own green thread here.
|
||||
LOG.info('Triggered patch audit.')
|
||||
|
||||
try:
|
||||
ks_client = KeystoneClient()
|
||||
except Exception:
|
||||
LOG.warn('Failure initializing KeystoneClient, exiting audit.')
|
||||
return
|
||||
|
||||
# First query RegionOne to determine what patches should be applied
|
||||
# to the system.
|
||||
patching_client = PatchingClient(
|
||||
consts.DEFAULT_REGION_NAME, ks_client.session)
|
||||
regionone_patches = patching_client.query()
|
||||
LOG.debug("regionone_patches: %s" % regionone_patches)
|
||||
|
||||
# Build lists of patches that should be applied or committed in all
|
||||
# subclouds, based on their state in RegionOne. Check repostate
|
||||
# (not patchstate) as we only care if the patch has been applied to
|
||||
# the repo (not whether it is installed on the hosts).
|
||||
applied_patch_ids = list()
|
||||
committed_patch_ids = list()
|
||||
for patch_id in regionone_patches.keys():
|
||||
if regionone_patches[patch_id]['repostate'] == \
|
||||
patching_v1.PATCH_STATE_APPLIED:
|
||||
applied_patch_ids.append(patch_id)
|
||||
elif regionone_patches[patch_id]['repostate'] == \
|
||||
patching_v1.PATCH_STATE_COMMITTED:
|
||||
committed_patch_ids.append(patch_id)
|
||||
LOG.debug("RegionOne applied_patch_ids: %s" % applied_patch_ids)
|
||||
LOG.debug("RegionOne committed_patch_ids: %s" % committed_patch_ids)
|
||||
|
||||
# For each subcloud, check whether the patches match the target.
|
||||
for subcloud in db_api.subcloud_get_all(self.context):
|
||||
# Only audit patching on subclouds that are managed and online
|
||||
if (subcloud.management_state != consts.MANAGEMENT_MANAGED or
|
||||
subcloud.availability_status !=
|
||||
consts.AVAILABILITY_ONLINE):
|
||||
continue
|
||||
|
||||
try:
|
||||
sc_ks_client = KeystoneClient(subcloud.name)
|
||||
except (keystone_exceptions.EndpointNotFound, IndexError) as e:
|
||||
LOG.warn("Identity endpoint for online subcloud % not found. %"
|
||||
% (subcloud.name, e))
|
||||
continue
|
||||
|
||||
try:
|
||||
patching_client = PatchingClient(subcloud.name,
|
||||
sc_ks_client.session)
|
||||
except keystone_exceptions.EndpointNotFound:
|
||||
LOG.warn("Patching endpoint for online subcloud %s not found."
|
||||
% subcloud.name)
|
||||
continue
|
||||
|
||||
try:
|
||||
sysinv_client = SysinvClient(subcloud.name,
|
||||
sc_ks_client.session)
|
||||
except keystone_exceptions.EndpointNotFound:
|
||||
LOG.warn("Sysinv endpoint for online subcloud %s not found."
|
||||
% subcloud.name)
|
||||
continue
|
||||
|
||||
# Retrieve all the patches that are present in this subcloud.
|
||||
try:
|
||||
subcloud_patches = patching_client.query()
|
||||
LOG.debug("Patches for subcloud %s: %s" %
|
||||
(subcloud.name, subcloud_patches))
|
||||
except Exception:
|
||||
LOG.warn('Cannot retrieve patches for subcloud: %s' %
|
||||
subcloud.name)
|
||||
continue
|
||||
|
||||
# Determine which loads are present in this subcloud. During an
|
||||
# upgrade, there will be more than one load installed.
|
||||
installed_loads = list()
|
||||
try:
|
||||
loads = sysinv_client.get_loads()
|
||||
except Exception:
|
||||
LOG.warn('Cannot retrieve loads for subcloud: %s' %
|
||||
subcloud.name)
|
||||
continue
|
||||
for load in loads:
|
||||
installed_loads.append(load.software_version)
|
||||
|
||||
out_of_sync = False
|
||||
|
||||
# Check that all patches in this subcloud are in the correct
|
||||
# state, based on the state of the patch in RegionOne. For the
|
||||
# subcloud, we use the patchstate because we care whether the
|
||||
# patch is installed on the hosts.
|
||||
for patch_id in subcloud_patches.keys():
|
||||
if subcloud_patches[patch_id]['patchstate'] == \
|
||||
patching_v1.PATCH_STATE_APPLIED:
|
||||
if patch_id not in applied_patch_ids:
|
||||
if patch_id not in committed_patch_ids:
|
||||
LOG.debug("Patch %s should not be applied in %s" %
|
||||
(patch_id, subcloud.name))
|
||||
else:
|
||||
LOG.debug("Patch %s should be committed in %s" %
|
||||
(patch_id, subcloud.name))
|
||||
out_of_sync = True
|
||||
elif subcloud_patches[patch_id]['patchstate'] == \
|
||||
patching_v1.PATCH_STATE_COMMITTED:
|
||||
if patch_id not in committed_patch_ids:
|
||||
LOG.warn("Patch %s should not be committed in %s" %
|
||||
(patch_id, subcloud.name))
|
||||
out_of_sync = True
|
||||
else:
|
||||
# In steady state, all patches should either be applied
|
||||
# or committed in each subcloud. Patches in other
|
||||
# states mean a sync is required.
|
||||
out_of_sync = True
|
||||
|
||||
# Check that all applied or committed patches in RegionOne are
|
||||
# present in the subcloud.
|
||||
for patch_id in applied_patch_ids:
|
||||
if regionone_patches[patch_id]['sw_version'] in \
|
||||
installed_loads and patch_id not in subcloud_patches:
|
||||
LOG.debug("Patch %s missing from %s" %
|
||||
(patch_id, subcloud.name))
|
||||
out_of_sync = True
|
||||
for patch_id in committed_patch_ids:
|
||||
if regionone_patches[patch_id]['sw_version'] in \
|
||||
installed_loads and patch_id not in subcloud_patches:
|
||||
LOG.debug("Patch %s missing from %s" %
|
||||
(patch_id, subcloud.name))
|
||||
out_of_sync = True
|
||||
|
||||
if out_of_sync:
|
||||
LOG.debug("Subcloud %s is out-of-sync for patching" %
|
||||
subcloud.name)
|
||||
self.subcloud_manager.update_subcloud_endpoint_status(
|
||||
self.context,
|
||||
subcloud_name=subcloud.name,
|
||||
endpoint_type=dcorch_consts.ENDPOINT_TYPE_PATCHING,
|
||||
sync_status=consts.SYNC_STATUS_OUT_OF_SYNC)
|
||||
else:
|
||||
LOG.debug("Subcloud %s is in-sync for patching" %
|
||||
subcloud.name)
|
||||
self.subcloud_manager.update_subcloud_endpoint_status(
|
||||
self.context,
|
||||
subcloud_name=subcloud.name,
|
||||
endpoint_type=dcorch_consts.ENDPOINT_TYPE_PATCHING,
|
||||
sync_status=consts.SYNC_STATUS_IN_SYNC)
|
91
dcmanager/manager/scheduler.py
Normal file
91
dcmanager/manager/scheduler.py
Normal file
@ -0,0 +1,91 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
import time
|
||||
|
||||
import eventlet
|
||||
from oslo_log import log as logging
|
||||
|
||||
from oslo_service import threadgroup
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
wallclock = time.time
|
||||
|
||||
|
||||
class ThreadGroupManager(object):
|
||||
'''Thread group manager.'''
|
||||
|
||||
def __init__(self):
|
||||
super(ThreadGroupManager, self).__init__()
|
||||
self.threads = {}
|
||||
self.group = threadgroup.ThreadGroup()
|
||||
|
||||
def start(self, func, *args, **kwargs):
|
||||
'''Run the given method in a sub-thread.'''
|
||||
|
||||
return self.group.add_thread(func, *args, **kwargs)
|
||||
|
||||
def add_timer(self, interval, func, *args, **kwargs):
|
||||
'''Define a periodic task to be run in the thread group.
|
||||
|
||||
The task will be executed in a separate green thread.
|
||||
'''
|
||||
|
||||
self.group.add_timer(interval, func, *args, **kwargs)
|
||||
|
||||
def stop_timers(self):
|
||||
self.group.stop_timers()
|
||||
|
||||
def stop(self, graceful=False):
|
||||
'''Stop any active threads belong to this threadgroup.'''
|
||||
# Try to stop all threads gracefully
|
||||
self.group.stop(graceful)
|
||||
self.group.wait()
|
||||
|
||||
# Wait for link()ed functions (i.e. lock release)
|
||||
threads = self.group.threads[:]
|
||||
links_done = dict((th, False) for th in threads)
|
||||
|
||||
def mark_done(gt, th):
|
||||
links_done[th] = True
|
||||
|
||||
for th in threads:
|
||||
th.link(mark_done, th)
|
||||
|
||||
while not all(links_done.values()):
|
||||
eventlet.sleep()
|
||||
|
||||
|
||||
def reschedule(action, sleep_time=1):
|
||||
'''Eventlet Sleep for the specified number of seconds.
|
||||
|
||||
:param sleep_time: seconds to sleep; if None, no sleep;
|
||||
'''
|
||||
|
||||
if sleep_time is not None:
|
||||
LOG.debug('Action %s sleep for %s seconds' % (
|
||||
action.id, sleep_time))
|
||||
eventlet.sleep(sleep_time)
|
||||
|
||||
|
||||
def sleep(sleep_time):
|
||||
'''Interface for sleeping.'''
|
||||
|
||||
eventlet.sleep(sleep_time)
|
236
dcmanager/manager/service.py
Normal file
236
dcmanager/manager/service.py
Normal file
@ -0,0 +1,236 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
import six
|
||||
import time
|
||||
|
||||
import functools
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import oslo_messaging
|
||||
from oslo_service import service
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from dcorch.common import consts as dcorch_consts
|
||||
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.common import context
|
||||
from dcmanager.common import exceptions
|
||||
from dcmanager.common.i18n import _
|
||||
from dcmanager.common import messaging as rpc_messaging
|
||||
from dcmanager.manager.patch_audit_manager import PatchAuditManager
|
||||
from dcmanager.manager import scheduler
|
||||
from dcmanager.manager.subcloud_audit_manager import SubcloudAuditManager
|
||||
from dcmanager.manager.subcloud_manager import SubcloudManager
|
||||
from dcmanager.manager.sw_update_manager import SwUpdateManager
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def request_context(func):
|
||||
@functools.wraps(func)
|
||||
def wrapped(self, ctx, *args, **kwargs):
|
||||
if ctx is not None and not isinstance(ctx, context.RequestContext):
|
||||
ctx = context.RequestContext.from_dict(ctx.to_dict())
|
||||
try:
|
||||
return func(self, ctx, *args, **kwargs)
|
||||
except exceptions.DCManagerException:
|
||||
raise oslo_messaging.rpc.dispatcher.ExpectedException()
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
class DCManagerService(service.Service):
|
||||
"""Lifecycle manager for a running service.
|
||||
|
||||
- All the methods in here are called from the RPC client.
|
||||
- If a RPC call does not have a corresponding method here, an exception
|
||||
will be thrown.
|
||||
- Arguments to these calls are added dynamically and will be treated as
|
||||
keyword arguments by the RPC client.
|
||||
"""
|
||||
|
||||
def __init__(self, host, topic, manager=None):
|
||||
|
||||
super(DCManagerService, self).__init__()
|
||||
self.host = cfg.CONF.host
|
||||
self.rpc_api_version = consts.RPC_API_VERSION
|
||||
self.topic = consts.TOPIC_DC_MANAGER
|
||||
# The following are initialized here, but assigned in start() which
|
||||
# happens after the fork when spawning multiple worker processes
|
||||
self.engine_id = None
|
||||
self.TG = None
|
||||
self.periodic_enable = cfg.CONF.scheduler.periodic_enable
|
||||
self.target = None
|
||||
self._rpc_server = None
|
||||
self.subcloud_manager = None
|
||||
self.subcloud_audit_manager = None
|
||||
self.sw_update_manager = None
|
||||
self.patch_audit_manager = None
|
||||
|
||||
def init_tgm(self):
|
||||
self.TG = scheduler.ThreadGroupManager()
|
||||
|
||||
def init_audit_managers(self):
|
||||
self.subcloud_audit_manager = SubcloudAuditManager(
|
||||
subcloud_manager=self.subcloud_manager)
|
||||
self.patch_audit_manager = PatchAuditManager(
|
||||
subcloud_manager=self.subcloud_manager)
|
||||
|
||||
def init_managers(self):
|
||||
self.subcloud_manager = SubcloudManager()
|
||||
self.sw_update_manager = SwUpdateManager()
|
||||
|
||||
def stop_managers(self):
|
||||
self.sw_update_manager.stop()
|
||||
|
||||
def start(self):
|
||||
self.dcmanager_id = uuidutils.generate_uuid()
|
||||
self.init_tgm()
|
||||
self.init_managers()
|
||||
self.init_audit_managers()
|
||||
target = oslo_messaging.Target(version=self.rpc_api_version,
|
||||
server=self.host,
|
||||
topic=self.topic)
|
||||
self.target = target
|
||||
self._rpc_server = rpc_messaging.get_rpc_server(self.target, self)
|
||||
self._rpc_server.start()
|
||||
|
||||
super(DCManagerService, self).start()
|
||||
if self.periodic_enable:
|
||||
LOG.info("Adding periodic tasks for the manager to perform")
|
||||
self.TG.add_timer(cfg.CONF.scheduler.subcloud_audit_interval,
|
||||
self.subcloud_audit, None)
|
||||
self.TG.add_timer(cfg.CONF.scheduler.patch_audit_interval,
|
||||
self.patch_audit, None)
|
||||
|
||||
def subcloud_audit(self):
|
||||
# Audit availability of all subclouds.
|
||||
# Note this will run in a separate green thread
|
||||
LOG.debug("Subcloud audit job started at: %s",
|
||||
time.strftime("%c"))
|
||||
self.subcloud_audit_manager.periodic_subcloud_audit()
|
||||
|
||||
def patch_audit(self):
|
||||
# Audit patch status of all subclouds.
|
||||
# Note this will run in a separate green thread
|
||||
LOG.debug("Patch audit job started at: %s",
|
||||
time.strftime("%c"))
|
||||
self.patch_audit_manager.periodic_patch_audit()
|
||||
|
||||
@request_context
|
||||
def add_subcloud(self, context, payload):
|
||||
# Adds a subcloud
|
||||
LOG.info("Handling add_subcloud request for: %s" % payload.get('name'))
|
||||
return self.subcloud_manager.add_subcloud(context, payload)
|
||||
|
||||
@request_context
|
||||
def delete_subcloud(self, context, subcloud_id):
|
||||
# Deletes a subcloud
|
||||
LOG.info("Handling delete_subcloud request for: %s" % subcloud_id)
|
||||
return self.subcloud_manager.delete_subcloud(context, subcloud_id)
|
||||
|
||||
@request_context
|
||||
def update_subcloud(self, context, subcloud_id, management_state=None,
|
||||
description=None, location=None):
|
||||
# Updates a subcloud
|
||||
LOG.info("Handling update_subcloud request for: %s" % subcloud_id)
|
||||
subcloud = self.subcloud_manager.update_subcloud(context, subcloud_id,
|
||||
management_state,
|
||||
description,
|
||||
location)
|
||||
# If a subcloud has been set to the managed state, trigger the
|
||||
# patching audit so it can update the sync status ASAP.
|
||||
if management_state == consts.MANAGEMENT_MANAGED:
|
||||
PatchAuditManager.trigger_audit()
|
||||
|
||||
return subcloud
|
||||
|
||||
@request_context
|
||||
def update_subcloud_endpoint_status(self, context, subcloud_name=None,
|
||||
endpoint_type=None,
|
||||
sync_status=consts.
|
||||
SYNC_STATUS_OUT_OF_SYNC,
|
||||
alarmable=True):
|
||||
# Updates subcloud endpoint sync status
|
||||
LOG.info("Handling update_subcloud_endpoint_status request for: %s" %
|
||||
subcloud_name)
|
||||
|
||||
self.subcloud_manager. \
|
||||
update_subcloud_endpoint_status(context,
|
||||
subcloud_name,
|
||||
endpoint_type,
|
||||
sync_status,
|
||||
alarmable)
|
||||
|
||||
# If the patching sync status is being set to unknown, trigger the
|
||||
# patching audit so it can update the sync status ASAP.
|
||||
if endpoint_type == dcorch_consts.ENDPOINT_TYPE_PATCHING and \
|
||||
sync_status == consts.SYNC_STATUS_UNKNOWN:
|
||||
PatchAuditManager.trigger_audit()
|
||||
|
||||
return
|
||||
|
||||
@request_context
|
||||
def create_sw_update_strategy(self, context, payload):
|
||||
# Creates a software update strategy
|
||||
LOG.info("Handling create_sw_update_strategy request of type %s" %
|
||||
payload.get('type'))
|
||||
return self.sw_update_manager.create_sw_update_strategy(
|
||||
context, payload)
|
||||
|
||||
@request_context
|
||||
def delete_sw_update_strategy(self, context):
|
||||
# Deletes the software update strategy
|
||||
LOG.info("Handling delete_sw_update_strategy request")
|
||||
return self.sw_update_manager.delete_sw_update_strategy(context)
|
||||
|
||||
@request_context
|
||||
def apply_sw_update_strategy(self, context):
|
||||
# Applies the software update strategy
|
||||
LOG.info("Handling apply_sw_update_strategy request")
|
||||
return self.sw_update_manager.apply_sw_update_strategy(context)
|
||||
|
||||
@request_context
|
||||
def abort_sw_update_strategy(self, context):
|
||||
# Aborts the software update strategy
|
||||
LOG.info("Handling abort_sw_update_strategy request")
|
||||
return self.sw_update_manager.abort_sw_update_strategy(context)
|
||||
|
||||
def _stop_rpc_server(self):
|
||||
# Stop RPC connection to prevent new requests
|
||||
LOG.debug(_("Attempting to stop engine service..."))
|
||||
try:
|
||||
self._rpc_server.stop()
|
||||
self._rpc_server.wait()
|
||||
LOG.info('Engine service stopped successfully')
|
||||
except Exception as ex:
|
||||
LOG.error('Failed to stop engine service: %s',
|
||||
six.text_type(ex))
|
||||
|
||||
def stop(self):
|
||||
self._stop_rpc_server()
|
||||
|
||||
self.TG.stop()
|
||||
self.stop_managers()
|
||||
|
||||
# Terminate the engine process
|
||||
LOG.info("All threads were gone, terminating engine")
|
||||
super(DCManagerService, self).stop()
|
259
dcmanager/manager/subcloud_audit_manager.py
Normal file
259
dcmanager/manager/subcloud_audit_manager.py
Normal file
@ -0,0 +1,259 @@
|
||||
# Copyright 2017 Ericsson AB.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
|
||||
from dcorch.drivers.openstack.keystone_v3 import KeystoneClient
|
||||
from dcorch.rpc import client as dcorch_rpc_client
|
||||
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.common import context
|
||||
from dcmanager.common import exceptions
|
||||
from dcmanager.common.i18n import _
|
||||
from dcmanager.common import manager
|
||||
from dcmanager.db import api as db_api
|
||||
|
||||
from dcmanager.drivers.openstack.sysinv_v1 import SysinvClient
|
||||
from keystoneauth1 import exceptions as keystone_exceptions
|
||||
|
||||
from fm_api import constants as fm_const
|
||||
from fm_api import fm_api
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SubcloudAuditManager(manager.Manager):
|
||||
"""Manages tasks related to audits."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
LOG.debug(_('SubcloudAuditManager initialization...'))
|
||||
|
||||
super(SubcloudAuditManager, self).__init__(
|
||||
service_name="subcloud_audit_manager")
|
||||
self.context = context.get_admin_context()
|
||||
self.dcorch_rpc_client = dcorch_rpc_client.EngineClient()
|
||||
self.fm_api = fm_api.FaultAPIs()
|
||||
self.subcloud_manager = kwargs['subcloud_manager']
|
||||
|
||||
def periodic_subcloud_audit(self):
|
||||
"""Audit availability of subclouds."""
|
||||
|
||||
# Blanket catch all exceptions in the audit so that the audit
|
||||
# does not die.
|
||||
try:
|
||||
self._periodic_subcloud_audit_loop()
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
|
||||
def _periodic_subcloud_audit_loop(self):
|
||||
"""Audit availability of subclouds loop."""
|
||||
# We will be running in our own green thread here.
|
||||
LOG.info('Triggered subcloud audit.')
|
||||
|
||||
# For each subcloud, if at least one service is active in
|
||||
# each service of servicegroup-list then declare the subcloud online.
|
||||
|
||||
for subcloud in db_api.subcloud_get_all(self.context):
|
||||
subcloud_name = subcloud.name
|
||||
subcloud_id = subcloud.id
|
||||
management_state = subcloud.management_state
|
||||
avail_status_current = subcloud.availability_status
|
||||
audit_fail_count = subcloud.audit_fail_count
|
||||
|
||||
# Set defaults to None and disabled so we will still set disabled
|
||||
# status if we encounter an error.
|
||||
|
||||
sysinv_client = None
|
||||
svc_groups = None
|
||||
avail_to_set = consts.AVAILABILITY_OFFLINE
|
||||
|
||||
try:
|
||||
ks_client = KeystoneClient(subcloud_name)
|
||||
sysinv_client = SysinvClient(subcloud_name,
|
||||
ks_client.session)
|
||||
except (keystone_exceptions.EndpointNotFound,
|
||||
keystone_exceptions.ConnectFailure, IndexError) as e:
|
||||
if avail_status_current == consts.AVAILABILITY_OFFLINE:
|
||||
LOG.info("Identity or Platform endpoint for %s not "
|
||||
"found, ignoring for offline "
|
||||
"subcloud." % subcloud_name)
|
||||
continue
|
||||
else:
|
||||
LOG.error("Identity or Platform endpoint for online "
|
||||
"subcloud: %s not found." % subcloud_name)
|
||||
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
|
||||
if sysinv_client:
|
||||
try:
|
||||
svc_groups = sysinv_client.get_service_groups()
|
||||
except Exception as e:
|
||||
svc_groups = None
|
||||
LOG.warn('Cannot retrieve service groups for '
|
||||
'subcloud:%s, %s' % (subcloud_name, e))
|
||||
|
||||
if svc_groups:
|
||||
active_sgs = []
|
||||
inactive_sgs = []
|
||||
|
||||
# Build 2 lists, 1 of active service groups,
|
||||
# one with non-active.
|
||||
for sg in svc_groups:
|
||||
if sg.state != consts.SERVICE_GROUP_STATUS_ACTIVE:
|
||||
inactive_sgs.append(sg.service_group_name)
|
||||
else:
|
||||
active_sgs.append(sg.service_group_name)
|
||||
|
||||
# Create a list of service groups that are only present
|
||||
# in non-active list
|
||||
inactive_only = [sg for sg in inactive_sgs if
|
||||
sg not in active_sgs]
|
||||
|
||||
# An empty inactive only list and a non-empty active list
|
||||
# means we're good to go.
|
||||
if not inactive_only and active_sgs:
|
||||
avail_to_set = \
|
||||
consts.AVAILABILITY_ONLINE
|
||||
else:
|
||||
LOG.info("Subcloud:%s has non-active "
|
||||
"service groups: %s" %
|
||||
(subcloud_name, inactive_only))
|
||||
|
||||
if avail_to_set == consts.AVAILABILITY_OFFLINE:
|
||||
if audit_fail_count < consts.AVAIL_FAIL_COUNT_MAX:
|
||||
audit_fail_count = audit_fail_count + 1
|
||||
|
||||
if (avail_status_current == consts.AVAILABILITY_ONLINE) and \
|
||||
(audit_fail_count < consts.AVAIL_FAIL_COUNT_TO_ALARM):
|
||||
# Do not set offline until we have failed audit
|
||||
# the requisite number of times
|
||||
avail_to_set = consts.AVAILABILITY_ONLINE
|
||||
else:
|
||||
# In the case of a one off blip, we may need to set the
|
||||
# fail count back to 0
|
||||
audit_fail_count = 0
|
||||
|
||||
if avail_to_set != avail_status_current:
|
||||
|
||||
if avail_to_set == consts.AVAILABILITY_ONLINE:
|
||||
audit_fail_count = 0
|
||||
|
||||
LOG.info('Setting new availability status: %s '
|
||||
'on subcloud: %s' %
|
||||
(avail_to_set, subcloud_name))
|
||||
|
||||
entity_instance_id = "subcloud=%s" % subcloud_name
|
||||
fault = self.fm_api.get_fault(
|
||||
fm_const.FM_ALARM_ID_DC_SUBCLOUD_OFFLINE,
|
||||
entity_instance_id)
|
||||
|
||||
if fault and (avail_to_set == consts.AVAILABILITY_ONLINE):
|
||||
try:
|
||||
self.fm_api.clear_fault(
|
||||
fm_const.FM_ALARM_ID_DC_SUBCLOUD_OFFLINE,
|
||||
entity_instance_id)
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
|
||||
elif not fault and \
|
||||
(avail_to_set == consts.AVAILABILITY_OFFLINE):
|
||||
try:
|
||||
fault = fm_api.Fault(
|
||||
alarm_id=fm_const.FM_ALARM_ID_DC_SUBCLOUD_OFFLINE,
|
||||
alarm_state=fm_const.FM_ALARM_STATE_SET,
|
||||
entity_type_id=fm_const.FM_ENTITY_TYPE_SUBCLOUD,
|
||||
entity_instance_id=entity_instance_id,
|
||||
severity=fm_const.FM_ALARM_SEVERITY_CRITICAL,
|
||||
reason_text=('%s is offline' % subcloud_name),
|
||||
alarm_type=fm_const.FM_ALARM_TYPE_0,
|
||||
probable_cause=fm_const.ALARM_PROBABLE_CAUSE_29,
|
||||
proposed_repair_action="Wait for subcloud to "
|
||||
"become online; if "
|
||||
"problem persists contact "
|
||||
"next level of support.",
|
||||
service_affecting=True)
|
||||
|
||||
self.fm_api.set_fault(fault)
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
|
||||
try:
|
||||
db_api.subcloud_update(self.context, subcloud_id,
|
||||
management_state=None,
|
||||
availability_status=avail_to_set,
|
||||
software_version=None,
|
||||
description=None, location=None,
|
||||
audit_fail_count=audit_fail_count)
|
||||
except exceptions.SubcloudNotFound:
|
||||
# slim possibility subcloud could have been deleted since
|
||||
# we found it in db, ignore this benign error.
|
||||
LOG.info('Ignoring SubcloudNotFound when attempting state'
|
||||
' update: %s' % subcloud_name)
|
||||
continue
|
||||
|
||||
try:
|
||||
self.dcorch_rpc_client.\
|
||||
update_subcloud_states(self.context,
|
||||
subcloud_name,
|
||||
management_state,
|
||||
avail_to_set)
|
||||
|
||||
LOG.info('Notifying dcorch, subcloud:%s management: %s, '
|
||||
'availability:%s' % (subcloud_name,
|
||||
management_state,
|
||||
avail_to_set))
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
LOG.warn('Problem informing dcorch of subcloud '
|
||||
'state change, subcloud: %s' % subcloud_name)
|
||||
|
||||
if avail_to_set == consts.AVAILABILITY_OFFLINE:
|
||||
# Subcloud is going offline, set all endpoint statuses to
|
||||
# unknown.
|
||||
try:
|
||||
self.subcloud_manager.update_subcloud_endpoint_status(
|
||||
self.context,
|
||||
subcloud_name=subcloud_name,
|
||||
endpoint_type=None,
|
||||
sync_status=consts.SYNC_STATUS_UNKNOWN)
|
||||
except exceptions.SubcloudNotFound:
|
||||
LOG.info('Ignoring SubcloudNotFound when attempting '
|
||||
'sync_status update: %s' % subcloud_name)
|
||||
continue
|
||||
|
||||
elif audit_fail_count != subcloud.audit_fail_count:
|
||||
|
||||
try:
|
||||
db_api.subcloud_update(self.context, subcloud_id,
|
||||
management_state=None,
|
||||
availability_status=None,
|
||||
software_version=None,
|
||||
description=None, location=None,
|
||||
audit_fail_count=audit_fail_count)
|
||||
except exceptions.SubcloudNotFound:
|
||||
# slim possibility subcloud could have been deleted since
|
||||
# we found it in db, ignore this benign error.
|
||||
LOG.info('Ignoring SubcloudNotFound when attempting '
|
||||
'audit_fail_count update: %s' % subcloud_name)
|
||||
continue
|
590
dcmanager/manager/subcloud_manager.py
Normal file
590
dcmanager/manager/subcloud_manager.py
Normal file
@ -0,0 +1,590 @@
|
||||
# Copyright 2017 Ericsson AB.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Copyright (c) 2017-2018 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
import filecmp
|
||||
import netaddr
|
||||
import os
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_messaging import RemoteError
|
||||
|
||||
from tsconfig.tsconfig import CONFIG_PATH
|
||||
from tsconfig.tsconfig import SW_VERSION
|
||||
|
||||
from dcorch.common import consts as dcorch_consts
|
||||
from dcorch.drivers.openstack.keystone_v3 import KeystoneClient
|
||||
from dcorch.rpc import client as dcorch_rpc_client
|
||||
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.common import context
|
||||
from dcmanager.common import exceptions
|
||||
from dcmanager.common.i18n import _
|
||||
from dcmanager.common import manager
|
||||
from dcmanager.db import api as db_api
|
||||
from dcmanager.drivers.openstack.sysinv_v1 import SysinvClient
|
||||
|
||||
from fm_api import constants as fm_const
|
||||
from fm_api import fm_api
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
# Name of our distributed cloud addn_hosts file for dnsmasq
|
||||
# to read. This file is referenced in dnsmasq.conf
|
||||
ADDN_HOSTS_DC = 'dnsmasq.addn_hosts_dc'
|
||||
|
||||
|
||||
class SubcloudManager(manager.Manager):
|
||||
"""Manages tasks related to subclouds."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
LOG.debug(_('SubcloudManager initialization...'))
|
||||
|
||||
super(SubcloudManager, self).__init__(service_name="subcloud_manager",
|
||||
*args, **kwargs)
|
||||
self.context = context.get_admin_context()
|
||||
self.dcorch_rpc_client = dcorch_rpc_client.EngineClient()
|
||||
self.fm_api = fm_api.FaultAPIs()
|
||||
|
||||
def add_subcloud(self, context, payload):
|
||||
"""Add subcloud and notify orchestrators.
|
||||
|
||||
:param context: request context object
|
||||
:param name: name of subcloud to add
|
||||
:param payload: subcloud configuration
|
||||
"""
|
||||
LOG.info("Adding subcloud %s." % payload['name'])
|
||||
|
||||
try:
|
||||
subcloud = db_api.subcloud_get_by_name(context, payload['name'])
|
||||
except exceptions.SubcloudNameNotFound:
|
||||
pass
|
||||
else:
|
||||
raise exceptions.BadRequest(
|
||||
resource='subcloud',
|
||||
msg='Subcloud with that name already exists')
|
||||
|
||||
# Subcloud is added with software version that matches system
|
||||
# controller.
|
||||
software_version = SW_VERSION
|
||||
try:
|
||||
subcloud = db_api.subcloud_create(
|
||||
context,
|
||||
payload['name'],
|
||||
payload.get('description'),
|
||||
payload.get('location'),
|
||||
software_version,
|
||||
payload['management-subnet'],
|
||||
payload['management-gateway-ip'],
|
||||
payload['management-start-ip'],
|
||||
payload['management-end-ip'],
|
||||
payload['systemcontroller-gateway-ip'])
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
raise e
|
||||
|
||||
# Populate the subcloud status table with all endpoints
|
||||
for endpoint in dcorch_consts.ENDPOINT_TYPES_LIST:
|
||||
db_api.subcloud_status_create(context,
|
||||
subcloud.id,
|
||||
endpoint)
|
||||
|
||||
try:
|
||||
# Create a new route to this subcloud on the management interface
|
||||
# on both controllers.
|
||||
m_ks_client = KeystoneClient()
|
||||
subcloud_subnet = netaddr.IPNetwork(payload['management-subnet'])
|
||||
session = m_ks_client.endpoint_cache.get_session_from_token(
|
||||
context.auth_token, context.project)
|
||||
sysinv_client = SysinvClient(consts.DEFAULT_REGION_NAME, session)
|
||||
controllers = sysinv_client.get_controller_hosts()
|
||||
for controller in controllers:
|
||||
management_interface = sysinv_client.get_management_interface(
|
||||
controller.hostname)
|
||||
if management_interface is not None:
|
||||
sysinv_client.create_route(
|
||||
management_interface.uuid,
|
||||
str(subcloud_subnet.ip),
|
||||
subcloud_subnet.prefixlen,
|
||||
payload['systemcontroller-gateway-ip'],
|
||||
1)
|
||||
|
||||
# Create identity endpoints to this subcloud on the
|
||||
# management-start-ip of the subcloud which will be allocated
|
||||
# as the floating Management IP of the Subcloud if the
|
||||
# Address Pool is not shared. Incase the endpoint entry
|
||||
# is incorrect, or the management IP of the subcloud is changed
|
||||
# in the future, it will not go managed or will show up as
|
||||
# out of sync. To fix this use Openstack endpoint commands
|
||||
# on the SystemController to change the subcloud endpoint
|
||||
ks_service_id = None
|
||||
for service in m_ks_client.services_list:
|
||||
if service.type == dcorch_consts.ENDPOINT_TYPE_IDENTITY:
|
||||
ks_service_id = service.id
|
||||
break
|
||||
else:
|
||||
raise exceptions.BadRequest(
|
||||
resource='subcloud',
|
||||
msg='No Identity service found on SystemController')
|
||||
|
||||
identity_endpoint_ip = payload['management-start-ip']
|
||||
|
||||
if netaddr.IPAddress(identity_endpoint_ip).version == 6:
|
||||
identity_endpoint_url = \
|
||||
"http://[{}]:5000/v3".format(identity_endpoint_ip)
|
||||
else:
|
||||
identity_endpoint_url = \
|
||||
"http://{}:5000/v3".format(identity_endpoint_ip)
|
||||
|
||||
for iface in ['internal', 'admin']:
|
||||
m_ks_client.keystone_client.endpoints.create(
|
||||
ks_service_id,
|
||||
identity_endpoint_url,
|
||||
interface=iface,
|
||||
region=subcloud.name)
|
||||
|
||||
# Inform orchestrator that subcloud has been added
|
||||
self.dcorch_rpc_client.add_subcloud(
|
||||
context, subcloud.name, subcloud.software_version)
|
||||
|
||||
# Regenerate the addn_hosts_dc file
|
||||
self._create_addn_hosts_dc(context)
|
||||
|
||||
return db_api.subcloud_db_model_to_dict(subcloud)
|
||||
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
# If we failed to create the subcloud, clean up anything we may
|
||||
# have done.
|
||||
self._delete_subcloud_routes(context, subcloud)
|
||||
db_api.subcloud_destroy(context, subcloud.id)
|
||||
raise e
|
||||
|
||||
def _create_addn_hosts_dc(self, context):
|
||||
"""Generate the addn_hosts_dc file for hostname/ip translation"""
|
||||
|
||||
addn_hosts_dc = os.path.join(CONFIG_PATH, ADDN_HOSTS_DC)
|
||||
addn_hosts_dc_temp = addn_hosts_dc + '.temp'
|
||||
|
||||
subclouds = db_api.subcloud_get_all(context)
|
||||
with open(addn_hosts_dc_temp, 'w') as f_out_addn_dc_temp:
|
||||
for subcloud in subclouds:
|
||||
addn_dc_line = subcloud.management_start_ip + ' ' + \
|
||||
subcloud.name + '\n'
|
||||
f_out_addn_dc_temp.write(addn_dc_line)
|
||||
|
||||
# if no more subclouds, create empty file so dnsmasq does not
|
||||
# emit an error log.
|
||||
if not subclouds:
|
||||
f_out_addn_dc_temp.write(' ')
|
||||
|
||||
if not filecmp.cmp(addn_hosts_dc_temp, addn_hosts_dc):
|
||||
os.rename(addn_hosts_dc_temp, addn_hosts_dc)
|
||||
# restart dnsmasq so it can re-read our addn_hosts file.
|
||||
os.system("pkill -HUP dnsmasq")
|
||||
|
||||
def _delete_subcloud_routes(self, context, subcloud):
|
||||
"""Delete the routes to this subcloud"""
|
||||
|
||||
keystone_client = KeystoneClient()
|
||||
# Delete subcloud's identity endpoints
|
||||
keystone_client.delete_endpoints(subcloud.name)
|
||||
|
||||
# Delete the route to this subcloud on the management interface on
|
||||
# both controllers.
|
||||
management_subnet = netaddr.IPNetwork(subcloud.management_subnet)
|
||||
session = keystone_client.endpoint_cache.get_session_from_token(
|
||||
context.auth_token, context.project)
|
||||
sysinv_client = SysinvClient(consts.DEFAULT_REGION_NAME, session)
|
||||
controllers = sysinv_client.get_controller_hosts()
|
||||
for controller in controllers:
|
||||
management_interface = sysinv_client.get_management_interface(
|
||||
controller.hostname)
|
||||
if management_interface is not None:
|
||||
sysinv_client.delete_route(
|
||||
management_interface.uuid,
|
||||
str(management_subnet.ip),
|
||||
management_subnet.prefixlen,
|
||||
str(netaddr.IPAddress(
|
||||
subcloud.systemcontroller_gateway_ip)),
|
||||
1)
|
||||
|
||||
def delete_subcloud(self, context, subcloud_id):
|
||||
"""Delete subcloud and notify orchestrators.
|
||||
|
||||
:param context: request context object.
|
||||
:param subcloud_id: id of subcloud to delete
|
||||
"""
|
||||
LOG.info("Deleting subcloud %s." % subcloud_id)
|
||||
|
||||
# Retrieve the subcloud details from the database
|
||||
subcloud = db_api.subcloud_get(context, subcloud_id)
|
||||
|
||||
# Semantic checking
|
||||
if subcloud.management_state != consts.MANAGEMENT_UNMANAGED:
|
||||
raise exceptions.SubcloudNotUnmanaged()
|
||||
|
||||
if subcloud.availability_status == \
|
||||
consts.AVAILABILITY_ONLINE:
|
||||
raise exceptions.SubcloudNotOffline()
|
||||
|
||||
# Inform orchestrators that subcloud has been deleted
|
||||
try:
|
||||
self.dcorch_rpc_client.del_subcloud(context, subcloud.name)
|
||||
except RemoteError as e:
|
||||
if "SubcloudNotFound" in e:
|
||||
pass
|
||||
|
||||
# We only delete subcloud endpoints, region and user information
|
||||
# in the Central Region. The subcloud is already unmanaged and powered
|
||||
# down so is not accessible. Therefore set up a session with the
|
||||
# Central Region Keystone ONLY.
|
||||
keystone_client = KeystoneClient()
|
||||
|
||||
# Delete keystone endpoints for subcloud
|
||||
keystone_client.delete_endpoints(subcloud.name)
|
||||
keystone_client.delete_region(subcloud.name)
|
||||
|
||||
# Delete the routes to this subcloud
|
||||
self._delete_subcloud_routes(context, subcloud)
|
||||
|
||||
# Remove the subcloud from the database
|
||||
try:
|
||||
db_api.subcloud_destroy(context, subcloud_id)
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
raise e
|
||||
|
||||
# Clear the offline fault associated with this subcloud as we
|
||||
# are deleting it. Note that endpoint out-of-sync alarms should
|
||||
# have been cleared when the subcloud was unmanaged and the endpoint
|
||||
# sync statuses were set to unknown.
|
||||
entity_instance_id = "subcloud=%s" % subcloud.name
|
||||
|
||||
try:
|
||||
subcloud_offline = fm_const.FM_ALARM_ID_DC_SUBCLOUD_OFFLINE
|
||||
fault = self.fm_api.get_fault(subcloud_offline,
|
||||
entity_instance_id)
|
||||
|
||||
if fault:
|
||||
self.fm_api.clear_fault(subcloud_offline,
|
||||
entity_instance_id)
|
||||
except Exception as e:
|
||||
LOG.info("Problem clearing offline fault for "
|
||||
"subcloud %s" % subcloud.name)
|
||||
LOG.exception(e)
|
||||
|
||||
# Regenerate the addn_hosts_dc file
|
||||
self._create_addn_hosts_dc(context)
|
||||
|
||||
def update_subcloud(self, context, subcloud_id, management_state=None,
|
||||
description=None, location=None):
|
||||
"""Update subcloud and notify orchestrators.
|
||||
|
||||
:param context: request context object
|
||||
:param subcloud_id: id of subcloud to update
|
||||
:param management_state: new management state
|
||||
:param description: new description
|
||||
:param location: new location
|
||||
"""
|
||||
|
||||
LOG.info("Updating subcloud %s." % subcloud_id)
|
||||
|
||||
# Get the subcloud details from the database
|
||||
subcloud = db_api.subcloud_get(context, subcloud_id)
|
||||
|
||||
# Semantic checking
|
||||
if management_state:
|
||||
if management_state == consts.MANAGEMENT_UNMANAGED:
|
||||
if subcloud.management_state == consts.MANAGEMENT_UNMANAGED:
|
||||
LOG.warning("Subcloud %s already unmanaged" % subcloud_id)
|
||||
raise exceptions.BadRequest(
|
||||
resource='subcloud',
|
||||
msg='Subcloud is already unmanaged')
|
||||
elif management_state == consts.MANAGEMENT_MANAGED:
|
||||
if subcloud.management_state == consts.MANAGEMENT_MANAGED:
|
||||
LOG.warning("Subcloud %s already managed" % subcloud_id)
|
||||
raise exceptions.BadRequest(
|
||||
resource='subcloud',
|
||||
msg='Subcloud is already managed')
|
||||
if subcloud.availability_status != \
|
||||
consts.AVAILABILITY_ONLINE:
|
||||
LOG.warning("Subcloud %s is not online" % subcloud_id)
|
||||
raise exceptions.SubcloudNotOnline()
|
||||
else:
|
||||
LOG.error("Invalid management_state %s" % management_state)
|
||||
raise exceptions.InternalError()
|
||||
|
||||
subcloud = db_api.subcloud_update(context, subcloud_id,
|
||||
management_state=management_state,
|
||||
description=description,
|
||||
location=location)
|
||||
|
||||
# Inform orchestrators that subcloud has been updated
|
||||
if management_state:
|
||||
|
||||
try:
|
||||
# Inform orchestrator of state change
|
||||
self.dcorch_rpc_client.update_subcloud_states(
|
||||
context,
|
||||
subcloud.name,
|
||||
management_state,
|
||||
subcloud.availability_status)
|
||||
|
||||
LOG.info('Notifying dcorch, subcloud:%s management: %s, '
|
||||
'availability:%s' % (subcloud.name,
|
||||
management_state,
|
||||
subcloud.availability_status))
|
||||
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
LOG.warn('Problem informing dcorch of subcloud '
|
||||
'state change, subcloud: %s' % subcloud.name)
|
||||
|
||||
if management_state == consts.MANAGEMENT_UNMANAGED:
|
||||
|
||||
# set all endpoint statuses to unknown
|
||||
self.update_subcloud_endpoint_status(
|
||||
context,
|
||||
subcloud_name=subcloud.name,
|
||||
endpoint_type=None,
|
||||
sync_status=consts.SYNC_STATUS_UNKNOWN)
|
||||
|
||||
return db_api.subcloud_db_model_to_dict(subcloud)
|
||||
|
||||
def _update_endpoint_status_for_subcloud(self, context, subcloud_id,
|
||||
endpoint_type, sync_status,
|
||||
alarmable):
|
||||
"""Update subcloud endpoint status
|
||||
|
||||
:param context: request context object
|
||||
:param subcloud_id: id of subcloud to update
|
||||
:param endpoint_type: endpoint type to update
|
||||
:param sync_status: sync status to set
|
||||
"""
|
||||
|
||||
subcloud_status_list = []
|
||||
subcloud = None
|
||||
# retrieve the info from the db for this subcloud.
|
||||
# subcloud_id should not be None
|
||||
try:
|
||||
for subcloud, subcloud_status in db_api. \
|
||||
subcloud_get_with_status(context, subcloud_id):
|
||||
if subcloud_status:
|
||||
subcloud_status_list.append(
|
||||
db_api.subcloud_endpoint_status_db_model_to_dict(
|
||||
subcloud_status))
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
raise e
|
||||
|
||||
if subcloud:
|
||||
if endpoint_type:
|
||||
# updating a single endpoint on a single subcloud
|
||||
for subcloud_status in subcloud_status_list:
|
||||
if subcloud_status['endpoint_type'] == endpoint_type:
|
||||
if subcloud_status['sync_status'] == sync_status:
|
||||
# No change in the sync_status
|
||||
LOG.debug("Sync status (%s) for subcloud %s did "
|
||||
"not change - ignore update" %
|
||||
(sync_status, subcloud.name))
|
||||
return
|
||||
# We found the endpoint
|
||||
break
|
||||
else:
|
||||
# We did not find the endpoint
|
||||
raise exceptions.BadRequest(
|
||||
resource='subcloud',
|
||||
msg='Endpoint %s not found for subcloud' %
|
||||
endpoint_type)
|
||||
|
||||
LOG.info("Updating subcloud:%s endpoint:%s sync:%s" %
|
||||
(subcloud.name, endpoint_type, sync_status))
|
||||
db_api.subcloud_status_update(context,
|
||||
subcloud_id,
|
||||
endpoint_type,
|
||||
sync_status)
|
||||
|
||||
entity_instance_id = "subcloud=%s.resource=%s" % \
|
||||
(subcloud.name, endpoint_type)
|
||||
fault = self.fm_api.get_fault(
|
||||
fm_const.FM_ALARM_ID_DC_SUBCLOUD_RESOURCE_OUT_OF_SYNC,
|
||||
entity_instance_id)
|
||||
|
||||
if (sync_status != consts.SYNC_STATUS_OUT_OF_SYNC) \
|
||||
and fault:
|
||||
try:
|
||||
self.fm_api.clear_fault(
|
||||
fm_const.FM_ALARM_ID_DC_SUBCLOUD_RESOURCE_OUT_OF_SYNC, # noqa
|
||||
entity_instance_id)
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
|
||||
elif not fault and alarmable and \
|
||||
(sync_status == consts.SYNC_STATUS_OUT_OF_SYNC):
|
||||
entity_type_id = fm_const.FM_ENTITY_TYPE_SUBCLOUD
|
||||
try:
|
||||
fault = fm_api.Fault(
|
||||
alarm_id=fm_const.FM_ALARM_ID_DC_SUBCLOUD_RESOURCE_OUT_OF_SYNC, # noqa
|
||||
alarm_state=fm_const.FM_ALARM_STATE_SET,
|
||||
entity_type_id=entity_type_id,
|
||||
entity_instance_id=entity_instance_id,
|
||||
severity=fm_const.FM_ALARM_SEVERITY_MAJOR,
|
||||
reason_text=("%s %s sync_status is "
|
||||
"out-of-sync" %
|
||||
(subcloud.name, endpoint_type)),
|
||||
alarm_type=fm_const.FM_ALARM_TYPE_0,
|
||||
probable_cause=fm_const.ALARM_PROBABLE_CAUSE_2,
|
||||
proposed_repair_action="If problem persists "
|
||||
"contact next level "
|
||||
"of support",
|
||||
service_affecting=False)
|
||||
|
||||
self.fm_api.set_fault(fault)
|
||||
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
|
||||
else:
|
||||
# update all endpoints on this subcloud
|
||||
LOG.info("Updating all subclouds, endpoint: %s sync: %s" %
|
||||
(endpoint_type, sync_status))
|
||||
|
||||
for entry in subcloud_status_list:
|
||||
endpoint = entry[consts.ENDPOINT_TYPE]
|
||||
db_api.subcloud_status_update(context,
|
||||
subcloud_id,
|
||||
endpoint,
|
||||
sync_status)
|
||||
|
||||
entity_instance_id = "subcloud=%s.resource=%s" % \
|
||||
(subcloud.name, endpoint)
|
||||
|
||||
fault = self.fm_api.get_fault(
|
||||
fm_const.FM_ALARM_ID_DC_SUBCLOUD_RESOURCE_OUT_OF_SYNC,
|
||||
entity_instance_id)
|
||||
|
||||
if (sync_status != consts.SYNC_STATUS_OUT_OF_SYNC) \
|
||||
and fault:
|
||||
try:
|
||||
self.fm_api.clear_fault(
|
||||
fm_const.FM_ALARM_ID_DC_SUBCLOUD_RESOURCE_OUT_OF_SYNC, # noqa
|
||||
entity_instance_id)
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
|
||||
elif not fault and alarmable and \
|
||||
(sync_status == consts.SYNC_STATUS_OUT_OF_SYNC):
|
||||
entity_type_id = fm_const.FM_ENTITY_TYPE_SUBCLOUD
|
||||
try:
|
||||
fault = fm_api.Fault(
|
||||
alarm_id=fm_const.FM_ALARM_ID_DC_SUBCLOUD_RESOURCE_OUT_OF_SYNC, # noqa
|
||||
alarm_state=fm_const.FM_ALARM_STATE_SET,
|
||||
entity_type_id=entity_type_id,
|
||||
entity_instance_id=entity_instance_id,
|
||||
severity=fm_const.FM_ALARM_SEVERITY_MAJOR,
|
||||
reason_text=("%s %s sync_status is "
|
||||
"out-of-sync" %
|
||||
(subcloud.name, endpoint)),
|
||||
alarm_type=fm_const.FM_ALARM_TYPE_0,
|
||||
probable_cause=fm_const.ALARM_PROBABLE_CAUSE_2,
|
||||
proposed_repair_action="If problem persists "
|
||||
"contact next level "
|
||||
"of support",
|
||||
service_affecting=False)
|
||||
|
||||
self.fm_api.set_fault(fault)
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
|
||||
else:
|
||||
LOG.error("Subcloud not found:%s" % subcloud_id)
|
||||
|
||||
def update_subcloud_endpoint_status(
|
||||
self, context,
|
||||
subcloud_name=None,
|
||||
endpoint_type=None,
|
||||
sync_status=consts.SYNC_STATUS_OUT_OF_SYNC,
|
||||
alarmable=True):
|
||||
"""Update subcloud endpoint status
|
||||
|
||||
:param context: request context object
|
||||
:param subcloud_name: name of subcloud to update
|
||||
:param endpoint_type: endpoint type to update
|
||||
:param sync_status: sync status to set
|
||||
"""
|
||||
|
||||
subcloud = None
|
||||
|
||||
if subcloud_name:
|
||||
try:
|
||||
subcloud = db_api.subcloud_get_by_name(context, subcloud_name)
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
raise e
|
||||
|
||||
# Only allow updating the sync status if managed and online.
|
||||
# This means if a subcloud is going offline or unmanaged, then
|
||||
# the sync status update must be done first.
|
||||
if (((subcloud.availability_status ==
|
||||
consts.AVAILABILITY_ONLINE)
|
||||
and (subcloud.management_state ==
|
||||
consts.MANAGEMENT_MANAGED))
|
||||
or (sync_status != consts.SYNC_STATUS_IN_SYNC)):
|
||||
|
||||
# update a single subcloud
|
||||
try:
|
||||
self._update_endpoint_status_for_subcloud(context,
|
||||
subcloud.id,
|
||||
endpoint_type,
|
||||
sync_status,
|
||||
alarmable)
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
raise e
|
||||
else:
|
||||
LOG.info("Ignoring unmanaged/offline subcloud sync_status "
|
||||
"update for subcloud:%s endpoint:%s sync:%s" %
|
||||
(subcloud_name, endpoint_type, sync_status))
|
||||
|
||||
else:
|
||||
# update all subclouds
|
||||
for subcloud in db_api.subcloud_get_all(context):
|
||||
if (((subcloud.availability_status ==
|
||||
consts.AVAILABILITY_ONLINE)
|
||||
and (subcloud.management_state ==
|
||||
consts.MANAGEMENT_MANAGED))
|
||||
or (sync_status != consts.SYNC_STATUS_IN_SYNC)):
|
||||
|
||||
try:
|
||||
self._update_endpoint_status_for_subcloud(
|
||||
context,
|
||||
subcloud.id,
|
||||
endpoint_type,
|
||||
sync_status,
|
||||
alarmable)
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
raise e
|
||||
else:
|
||||
LOG.info("Ignoring unmanaged/offline subcloud sync_status "
|
||||
"update for subcloud:%s endpoint:%s sync:%s" %
|
||||
(subcloud.name, endpoint_type, sync_status))
|
1483
dcmanager/manager/sw_update_manager.py
Normal file
1483
dcmanager/manager/sw_update_manager.py
Normal file
File diff suppressed because it is too large
Load Diff
0
dcmanager/objects/__init__.py
Normal file
0
dcmanager/objects/__init__.py
Normal file
76
dcmanager/objects/base.py
Normal file
76
dcmanager/objects/base.py
Normal file
@ -0,0 +1,76 @@
|
||||
# Copyright (c) 2015 Ericsson AB.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
"""DC Manager common internal object model"""
|
||||
|
||||
from oslo_utils import versionutils
|
||||
from oslo_versionedobjects import base
|
||||
|
||||
from dcmanager import objects
|
||||
|
||||
VersionedObjectDictCompat = base.VersionedObjectDictCompat
|
||||
|
||||
|
||||
class DCManagerObject(base.VersionedObject):
|
||||
"""Base class for dcmanager objects.
|
||||
|
||||
This is the base class for all objects that can be remoted or instantiated
|
||||
via RPC. Simply defining a sub-class of this class would make it remotely
|
||||
instantiatable. Objects should implement the "get" class method and the
|
||||
"save" object method.
|
||||
"""
|
||||
|
||||
OBJ_PROJECT_NAMESPACE = 'dcmanager'
|
||||
VERSION = '1.0'
|
||||
|
||||
@staticmethod
|
||||
def _from_db_object(context, obj, db_obj):
|
||||
if db_obj is None:
|
||||
return None
|
||||
for field in obj.fields:
|
||||
if field == 'metadata':
|
||||
obj['metadata'] = db_obj['meta_data']
|
||||
else:
|
||||
obj[field] = db_obj[field]
|
||||
|
||||
obj._context = context
|
||||
obj.obj_reset_changes()
|
||||
|
||||
return obj
|
||||
|
||||
|
||||
class DCManagerObjectRegistry(base.VersionedObjectRegistry):
|
||||
def registration_hook(self, cls, index):
|
||||
"""Callback for object registration.
|
||||
|
||||
When an object is registered, this function will be called for
|
||||
maintaining dcmanager.objects.$OBJECT as the highest-versioned
|
||||
implementation of a given object.
|
||||
"""
|
||||
version = versionutils.convert_version_to_tuple(cls.VERSION)
|
||||
if not hasattr(objects, cls.obj_name()):
|
||||
setattr(objects, cls.obj_name(), cls)
|
||||
else:
|
||||
curr_version = versionutils.convert_version_to_tuple(
|
||||
getattr(objects, cls.obj_name()).VERSION)
|
||||
if version >= curr_version:
|
||||
setattr(objects, cls.obj_name(), cls)
|
0
dcmanager/rpc/__init__.py
Normal file
0
dcmanager/rpc/__init__.py
Normal file
102
dcmanager/rpc/client.py
Normal file
102
dcmanager/rpc/client.py
Normal file
@ -0,0 +1,102 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
'''
|
||||
Client side of the DC Manager RPC API.
|
||||
'''
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.common import messaging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ManagerClient(object):
|
||||
"""Client side of the DC Manager rpc API.
|
||||
|
||||
Version History:
|
||||
1.0 - Initial version (Mitaka 1.0 release)
|
||||
"""
|
||||
|
||||
BASE_RPC_API_VERSION = '1.0'
|
||||
|
||||
def __init__(self):
|
||||
self._client = messaging.get_rpc_client(
|
||||
topic=consts.TOPIC_DC_MANAGER,
|
||||
version=self.BASE_RPC_API_VERSION)
|
||||
|
||||
@staticmethod
|
||||
def make_msg(method, **kwargs):
|
||||
return method, kwargs
|
||||
|
||||
def call(self, ctxt, msg, version=None):
|
||||
method, kwargs = msg
|
||||
if version is not None:
|
||||
client = self._client.prepare(version=version)
|
||||
else:
|
||||
client = self._client
|
||||
return client.call(ctxt, method, **kwargs)
|
||||
|
||||
def cast(self, ctxt, msg, version=None):
|
||||
method, kwargs = msg
|
||||
if version is not None:
|
||||
client = self._client.prepare(version=version)
|
||||
else:
|
||||
client = self._client
|
||||
return client.cast(ctxt, method, **kwargs)
|
||||
|
||||
def add_subcloud(self, ctxt, payload):
|
||||
return self.call(ctxt, self.make_msg('add_subcloud',
|
||||
payload=payload))
|
||||
|
||||
def delete_subcloud(self, ctxt, subcloud_id):
|
||||
return self.call(ctxt, self.make_msg('delete_subcloud',
|
||||
subcloud_id=subcloud_id))
|
||||
|
||||
def update_subcloud(self, ctxt, subcloud_id, management_state=None,
|
||||
description=None, location=None):
|
||||
return self.call(ctxt, self.make_msg('update_subcloud',
|
||||
subcloud_id=subcloud_id,
|
||||
management_state=management_state,
|
||||
description=description,
|
||||
location=location))
|
||||
|
||||
def update_subcloud_endpoint_status(self, ctxt, subcloud_name=None,
|
||||
endpoint_type=None,
|
||||
sync_status=consts.
|
||||
SYNC_STATUS_OUT_OF_SYNC):
|
||||
return self.cast(ctxt, self.make_msg('update_subcloud_endpoint_status',
|
||||
subcloud_name=subcloud_name,
|
||||
endpoint_type=endpoint_type,
|
||||
sync_status=sync_status))
|
||||
|
||||
def create_sw_update_strategy(self, ctxt, payload):
|
||||
return self.call(ctxt, self.make_msg('create_sw_update_strategy',
|
||||
payload=payload))
|
||||
|
||||
def delete_sw_update_strategy(self, ctxt):
|
||||
return self.call(ctxt, self.make_msg('delete_sw_update_strategy'))
|
||||
|
||||
def apply_sw_update_strategy(self, ctxt):
|
||||
return self.call(ctxt, self.make_msg('apply_sw_update_strategy'))
|
||||
|
||||
def abort_sw_update_strategy(self, ctxt):
|
||||
return self.call(ctxt, self.make_msg('abort_sw_update_strategy'))
|
0
dcmanager/tests/__init__.py
Normal file
0
dcmanager/tests/__init__.py
Normal file
27
dcmanager/tests/base.py
Normal file
27
dcmanager/tests/base.py
Normal file
@ -0,0 +1,27 @@
|
||||
# Copyright (c) 2015 Ericsson AB
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2017 Wind River Systems, Inc.
|
||||
#
|
||||
# The right to copy, distribute, modify, or otherwise make use
|
||||
# of this software may be licensed only pursuant to the terms
|
||||
# of an applicable Wind River license agreement.
|
||||
#
|
||||
|
||||
from oslotest import base
|
||||
|
||||
|
||||
class DCManagerTestCase(base.BaseTestCase):
|
||||
"""Test case base class for all unit tests."""
|
@ -0,0 +1,3 @@
|
||||
{
|
||||
"migrate_version_0": ["dcmanager", "/usr/lib/python2.7/site-packages/dcmanager/db/sqlalchemy/migrate_repo", 1]
|
||||
}
|
@ -0,0 +1,6 @@
|
||||
{
|
||||
"strategy_steps_0": [63, 7, 3, "complete", "", "2018-05-18 00:00:14.073539", "2018-05-18 00:03:05.38425", "NULL", "NULL", "2018-05-17 23:50:59.230807", "2018-05-18 00:03:05.389346", "NULL", 0],
|
||||
"strategy_steps_1": [60, "NULL", 1, "complete", "", "2018-05-17 23:51:13.588264", "2018-05-17 23:54:53.791109", "NULL", "NULL", "2018-05-17 23:50:59.223942", "2018-05-17 23:54:53.796026", "NULL", 0],
|
||||
"strategy_steps_2": [62, 6, 2, "complete", "", "2018-05-17 23:55:03.805419", "2018-05-17 23:59:05.153763", "NULL", "NULL", "2018-05-17 23:50:59.228584", "2018-05-17 23:59:05.159172", "NULL", 0],
|
||||
"strategy_steps_3": [61, 1, 2, "complete", "", "2018-05-17 23:55:03.798957", "2018-05-18 00:00:05.185775", "NULL", "NULL", "2018-05-17 23:50:59.226117", "2018-05-18 00:00:05.191001", "NULL", 0]
|
||||
}
|
@ -0,0 +1,17 @@
|
||||
{
|
||||
"subcloud_status_0": [32, 7, "volume", "in-sync", "NULL", "NULL", "2018-05-15 14:45:48.96137", "2018-05-18 18:20:39.773185", "NULL", 0],
|
||||
"subcloud_status_1": [34, 7, "network", "in-sync", "NULL", "NULL", "2018-05-15 14:45:48.965798", "2018-05-18 18:20:40.20996", "NULL", 0],
|
||||
"subcloud_status_2": [33, 7, "compute", "in-sync", "NULL", "NULL", "2018-05-15 14:45:48.96369", "2018-05-18 18:20:40.647117", "NULL", 0],
|
||||
"subcloud_status_3": [31, 7, "platform", "in-sync", "NULL", "NULL", "2018-05-15 14:45:48.959", "2018-05-18 18:20:40.647643", "NULL", 0],
|
||||
"subcloud_status_4": [27, 6, "volume", "in-sync", "NULL", "NULL", "2018-05-15 14:45:12.516212", "2018-05-18 18:20:53.848545", "NULL", 0],
|
||||
"subcloud_status_5": [29, 6, "network", "in-sync", "NULL", "NULL", "2018-05-15 14:45:12.520688", "2018-05-18 18:20:54.318122", "NULL", 0],
|
||||
"subcloud_status_6": [26, 6, "platform", "in-sync", "NULL", "NULL", "2018-05-15 14:45:12.512624", "2018-05-18 18:20:54.800959", "NULL", 0],
|
||||
"subcloud_status_7": [28, 6, "compute", "in-sync", "NULL", "NULL", "2018-05-15 14:45:12.518589", "2018-05-18 18:20:54.801511", "NULL", 0],
|
||||
"subcloud_status_8": [35, 7, "patching", "in-sync", "NULL", "NULL", "2018-05-15 14:45:48.968028", "2018-05-18 18:24:52.93953", "NULL", 0],
|
||||
"subcloud_status_9": [30, 6, "patching", "in-sync", "NULL", "NULL", "2018-05-15 14:45:12.522906", "2018-05-18 18:24:53.403192", "NULL", 0],
|
||||
"subcloud_status_10": [2, 1, "volume", "in-sync", "NULL", "NULL", "2018-04-11 17:01:48.55157", "2018-05-24 00:17:37.344778", "NULL", 0],
|
||||
"subcloud_status_11": [4, 1, "network", "in-sync", "NULL", "NULL", "2018-04-11 17:01:48.555564", "2018-05-24 00:17:37.799951", "NULL", 0],
|
||||
"subcloud_status_12": [1, 1, "platform", "in-sync", "NULL", "NULL", "2018-04-11 17:01:48.548357", "2018-05-24 00:17:38.353609", "NULL", 0],
|
||||
"subcloud_status_13": [3, 1, "compute", "in-sync", "NULL", "NULL", "2018-04-11 17:01:48.553623", "2018-05-24 00:17:38.354505", "NULL", 0],
|
||||
"subcloud_status_14": [5, 1, "patching", "in-sync", "NULL", "NULL", "2018-04-11 17:01:48.557433", "2018-05-24 00:17:42.564325", "NULL", 0]
|
||||
}
|
@ -0,0 +1,5 @@
|
||||
{
|
||||
"subclouds_0": [6, "subcloud-4", "wcp85 subcloud", "Ottawa-PheonixLab-Aisle_3-Rack_C", "18.03", "managed", "online", "fd01:3::0/64", "fd01:3::1", "fd01:3::2", "fd01:3::f", "fd01:1::1", 0, "NULL", "NULL", "2018-05-15 14:45:12.508708", "2018-05-24 10:48:18.090931", "NULL", 0],
|
||||
"subclouds_1": [1, "subcloud-1", "wcp80 subcloud", "Ottawa-PheonixLab-Aisle_3-Rack_B", "18.03", "managed", "online", "fd01:2::0/64", "fd01:2::1", "fd01:2::2", "fd01:2::f", "fd01:1::1", 0, "NULL", "NULL", "2018-04-11 17:01:48.54467", "2018-05-24 00:17:34.74161", "NULL", 0],
|
||||
"subclouds_2": [7, "subcloud-5", "wcp87 subcloud", "Ottawa-PheonixLab-Aisle_4-Rack_B", "18.03", "managed", "online", "fd01:4::0/64", "fd01:4::1", "fd01:4::2", "fd01:4::f", "fd01:1::1", 0, "NULL", "NULL", "2018-05-15 14:45:48.95625", "2018-05-24 10:48:17.907767", "NULL", 0]
|
||||
}
|
@ -0,0 +1,3 @@
|
||||
{
|
||||
"sw_update_opts_default_0": [1, "NULL", "parallel", "parallel", 2, "stop-start", "relaxed", "NULL", "NULL", "NULL", "2018-05-16 13:41:44.330145", "NULL", 0]
|
||||
}
|
@ -0,0 +1,3 @@
|
||||
{
|
||||
"sw_update_strategy_0": [21, "patch", "parallel", 2, true, "complete", "NULL", "NULL", "2018-05-17 23:50:59.221342", "2018-05-18 00:03:14.24641", "NULL", 0]
|
||||
}
|
@ -0,0 +1,30 @@
|
||||
{
|
||||
"assignment_0": ["UserProject", "500b2ba0791e44a780d4dad3c5a1ff31", "8803689162424f60a71e4642e9dc2b9e", "59fa225368524bf6974f76a25050143b", false],
|
||||
"assignment_1": ["UserProject", "81eed996f2a346a3b5282fe2a881db9b", "f3b78df9bbd74d6b8bbf8c5f08427ca7", "59fa225368524bf6974f76a25050143b", false],
|
||||
"assignment_2": ["UserProject", "4abaa160c36846328a482217de0112af", "f3b78df9bbd74d6b8bbf8c5f08427ca7", "59fa225368524bf6974f76a25050143b", false],
|
||||
"assignment_3": ["UserProject", "c5d07e41f78747949fbc1de84168a44f", "f3b78df9bbd74d6b8bbf8c5f08427ca7", "59fa225368524bf6974f76a25050143b", false],
|
||||
"assignment_4": ["UserProject", "63dd0fb409264a43b7dbfe9582b8023d", "f3b78df9bbd74d6b8bbf8c5f08427ca7", "59fa225368524bf6974f76a25050143b", false],
|
||||
"assignment_5": ["UserProject", "6cf3cfc5d26f458daf66802d8e8a2e2a", "f3b78df9bbd74d6b8bbf8c5f08427ca7", "59fa225368524bf6974f76a25050143b", false],
|
||||
"assignment_6": ["UserProject", "a757fb8d624b46b4b10eea1b4d2ca0d2", "f3b78df9bbd74d6b8bbf8c5f08427ca7", "59fa225368524bf6974f76a25050143b", false],
|
||||
"assignment_7": ["UserProject", "8ff17967605a4240b8a6c15ed4bf10f1", "f3b78df9bbd74d6b8bbf8c5f08427ca7", "59fa225368524bf6974f76a25050143b", false],
|
||||
"assignment_8": ["UserProject", "500b2ba0791e44a780d4dad3c5a1ff31", "8803689162424f60a71e4642e9dc2b9e", "9fe2ff9ee4384b1894a90878d3e92bab", false],
|
||||
"assignment_9": ["UserProject", "500b2ba0791e44a780d4dad3c5a1ff31", "8803689162424f60a71e4642e9dc2b9e", "ef2e357b0d4d4bcaaa6ae303c7d58d7e", false],
|
||||
"assignment_10": ["UserProject", "04facea7432848c9bfdf3780bb51612e", "f3b78df9bbd74d6b8bbf8c5f08427ca7", "59fa225368524bf6974f76a25050143b", false],
|
||||
"assignment_11": ["UserProject", "04facea7432848c9bfdf3780bb51612e", "8803689162424f60a71e4642e9dc2b9e", "59fa225368524bf6974f76a25050143b", false],
|
||||
"assignment_12": ["UserProject", "c455073c30044db8908630595699d874", "f3b78df9bbd74d6b8bbf8c5f08427ca7", "59fa225368524bf6974f76a25050143b", false],
|
||||
"assignment_13": ["UserProject", "c455073c30044db8908630595699d874", "f3b78df9bbd74d6b8bbf8c5f08427ca7", "1f62f45b748b4c5db66f97c715ecf1ae", false],
|
||||
"assignment_14": ["UserDomain", "f94aa82e49dd4aaa8bf1c80fee109234", "2423d6c7853145a798e6491ca9de6e2b", "59fa225368524bf6974f76a25050143b", false],
|
||||
"assignment_15": ["UserProject", "146482c0aba84e35a5c1a507cff9db3d", "f3b78df9bbd74d6b8bbf8c5f08427ca7", "59fa225368524bf6974f76a25050143b", false],
|
||||
"assignment_16": ["UserProject", "118a09e72d6a4194af383285cb7e579a", "f3b78df9bbd74d6b8bbf8c5f08427ca7", "59fa225368524bf6974f76a25050143b", false],
|
||||
"assignment_17": ["UserProject", "692bd0a53c414d6dbbd0ba4d6fdb3c49", "f3b78df9bbd74d6b8bbf8c5f08427ca7", "59fa225368524bf6974f76a25050143b", false],
|
||||
"assignment_18": ["UserProject", "5f4d401253a74cc8ab507957b9cafb29", "f3b78df9bbd74d6b8bbf8c5f08427ca7", "59fa225368524bf6974f76a25050143b", false],
|
||||
"assignment_19": ["UserProject", "f1cc67bbf0d84c89a1df3067b538e1b8", "f3b78df9bbd74d6b8bbf8c5f08427ca7", "59fa225368524bf6974f76a25050143b", false],
|
||||
"assignment_20": ["UserProject", "4a2c1f4c8ae942b19e388576e93d1ced", "f3b78df9bbd74d6b8bbf8c5f08427ca7", "59fa225368524bf6974f76a25050143b", false],
|
||||
"assignment_21": ["UserProject", "d1399977957645e4a1e26c1b7b1e6d35", "9008c3fc102040cd8149b5c0d8aa06a3", "9fe2ff9ee4384b1894a90878d3e92bab", false],
|
||||
"assignment_22": ["UserProject", "500b2ba0791e44a780d4dad3c5a1ff31", "9008c3fc102040cd8149b5c0d8aa06a3", "9fe2ff9ee4384b1894a90878d3e92bab", false],
|
||||
"assignment_23": ["UserProject", "5ad8271fc6bc432ab80685945bc5b346", "6ecc44a6b24e4c398dc749f1386b2ced", "9fe2ff9ee4384b1894a90878d3e92bab", false],
|
||||
"assignment_24": ["UserProject", "500b2ba0791e44a780d4dad3c5a1ff31", "6ecc44a6b24e4c398dc749f1386b2ced", "9fe2ff9ee4384b1894a90878d3e92bab", false],
|
||||
"assignment_25": ["UserProject", "73403639b14c40e6b288c0e2cd3707bc", "f3b78df9bbd74d6b8bbf8c5f08427ca7", "59fa225368524bf6974f76a25050143b", false],
|
||||
"assignment_26": ["UserProject", "872e8c1b48c640c59189cf1587bd4e41", "f3b78df9bbd74d6b8bbf8c5f08427ca7", "59fa225368524bf6974f76a25050143b", false],
|
||||
"assignment_27": ["UserProject", "f85b8eca57a441838cfe5a39d33230b5", "f3b78df9bbd74d6b8bbf8c5f08427ca7", "59fa225368524bf6974f76a25050143b", false]
|
||||
}
|
185
dcmanager/tests/data/ipv6_R5_install/keystone/endpoint.json
Normal file
185
dcmanager/tests/data/ipv6_R5_install/keystone/endpoint.json
Normal file
@ -0,0 +1,185 @@
|
||||
{
|
||||
"endpoint_0": ["9785cc7f99b6469ba6fe89bd8d5b9072", "NULL", "admin", "7d48ddb964034eb588e557b976d11cdf", "http://[fd01:1::2]:9292", "{}", true, "SystemController"],
|
||||
"endpoint_1": ["2b627b437d3c4412aa0581cf1b0fc8cb", "NULL", "internal", "7d48ddb964034eb588e557b976d11cdf", "http://[fd01:1::2]:9292", "{}", true, "SystemController"],
|
||||
"endpoint_2": ["171c04c06ec4436daec6604a2ded6e9a", "NULL", "public", "7d48ddb964034eb588e557b976d11cdf", "http://128.224.151.162:9292", "{}", true, "SystemController"],
|
||||
"endpoint_3": ["1645bfec421c4d88898bea1284dc8d89", "NULL", "admin", "c4ae85afaf7b465190d927e11da3eb38", "http://[fd01:1::2]:28774/v2.1/%(tenant_id)s", "{}", true, "SystemController"],
|
||||
"endpoint_4": ["f93ed1fdabb04b7f913da53218a242e1", "NULL", "internal", "c4ae85afaf7b465190d927e11da3eb38", "http://[fd01:1::2]:28774/v2.1/%(tenant_id)s", "{}", true, "SystemController"],
|
||||
"endpoint_5": ["fa55665905be43d3b47472b580726690", "NULL", "public", "c4ae85afaf7b465190d927e11da3eb38", "http://128.224.151.162:28774/v2.1/%(tenant_id)s", "{}", true, "SystemController"],
|
||||
"endpoint_6": ["15b0341250be443287cf4c333bec7ca8", "NULL", "admin", "9754bb0a6cba4ae4b62c1a4e825964a5", "http://[fd01:1::2]:8119/v1.0", "{}", true, "SystemController"],
|
||||
"endpoint_7": ["70ede9a42a8a48f68be78622b9ca8aa7", "NULL", "internal", "9754bb0a6cba4ae4b62c1a4e825964a5", "http://[fd01:1::2]:8119/v1.0", "{}", true, "SystemController"],
|
||||
"endpoint_8": ["42f9c95f20f84bfd9c05f5417eeea7ba", "NULL", "public", "9754bb0a6cba4ae4b62c1a4e825964a5", "http://128.224.151.162:8119/v1.0", "{}", true, "SystemController"],
|
||||
"endpoint_9": ["45be189e3e1448ab92930534a950d5a2", "NULL", "admin", "6cfd11045b1e4c0badcb56f18428ab5b", "http://[fd01:1::2]:29696/", "{}", true, "SystemController"],
|
||||
"endpoint_10": ["4d29f266e3524fd28070ae89d9bcc218", "NULL", "internal", "6cfd11045b1e4c0badcb56f18428ab5b", "http://[fd01:1::2]:29696/", "{}", true, "SystemController"],
|
||||
"endpoint_11": ["a78b26ecbba74db1802293fcfacd584a", "NULL", "public", "6cfd11045b1e4c0badcb56f18428ab5b", "http://128.224.151.162:29696/", "{}", true, "SystemController"],
|
||||
"endpoint_12": ["7a42e40aac4040708fd23b571c650026", "NULL", "admin", "c3677835d8024fa894929ea67b1e9fa0", "http://[fd01:1::2]:25491/", "{}", true, "SystemController"],
|
||||
"endpoint_13": ["62844e21e90a42278026bca686192401", "NULL", "internal", "c3677835d8024fa894929ea67b1e9fa0", "http://[fd01:1::2]:25491/", "{}", true, "SystemController"],
|
||||
"endpoint_14": ["7b6dd7d0bb504919952c162bd74bb1ae", "NULL", "public", "c3677835d8024fa894929ea67b1e9fa0", "http://128.224.151.162:25491/", "{}", true, "SystemController"],
|
||||
"endpoint_15": ["c89c795cff5c45c7adc3b321943351ef", "NULL", "admin", "c3677835d8024fa894929ea67b1e9fa0", "http://[fd01:1::2]:5491", "{}", true, "RegionOne"],
|
||||
"endpoint_16": ["4971b138f1e04b94aed46af88489fa53", "NULL", "internal", "c3677835d8024fa894929ea67b1e9fa0", "http://[fd01:1::2]:5491", "{}", true, "RegionOne"],
|
||||
"endpoint_17": ["21aa3f2577f0402190f9a8758fdb2620", "NULL", "public", "c3677835d8024fa894929ea67b1e9fa0", "http://128.224.151.162:15491", "{}", true, "RegionOne"],
|
||||
"endpoint_18": ["bd7d26e0755d498ebf4c846448936983", "NULL", "admin", "aa803a6f0ab84b68ad13a759b1b29525", "http://[fd01:1::2]:4545", "{}", true, "RegionOne"],
|
||||
"endpoint_19": ["993f49cf95754c93884fc8eac180eda8", "NULL", "internal", "aa803a6f0ab84b68ad13a759b1b29525", "http://[fd01:1::2]:4545", "{}", true, "RegionOne"],
|
||||
"endpoint_20": ["2821d84aec434123b039f3d7ab3fbaca", "NULL", "public", "aa803a6f0ab84b68ad13a759b1b29525", "http://128.224.151.162:4545", "{}", true, "RegionOne"],
|
||||
"endpoint_21": ["8d8e469fd83f4608b025338c8e67e7e1", "NULL", "admin", "86328b93a3c84d63a1be7f7368138bdf", "http://[fd01:1::2]:8777", "{}", true, "RegionOne"],
|
||||
"endpoint_22": ["2bf8cd48dfee4d339bfba53abccd20b4", "NULL", "internal", "86328b93a3c84d63a1be7f7368138bdf", "http://[fd01:1::2]:8777", "{}", true, "RegionOne"],
|
||||
"endpoint_23": ["8cdf5229f64c46deb9ebe86d0aa88776", "NULL", "public", "86328b93a3c84d63a1be7f7368138bdf", "http://128.224.151.162:8777", "{}", true, "RegionOne"],
|
||||
"endpoint_24": ["704878ca10f24d63a33b44139549f6e9", "NULL", "admin", "5fa3efb666204693a0d0ab05fb03140c", "http://[fd01:1::2]:5000/v3", "{}", true, "SystemController"],
|
||||
"endpoint_25": ["736c4e7c5aa84384944c3907f1c1a6ae", "NULL", "internal", "5fa3efb666204693a0d0ab05fb03140c", "http://[fd01:1::2]:5000/v3", "{}", true, "SystemController"],
|
||||
"endpoint_26": ["a4537dcfeefe4adeaf37cd100833ec12", "NULL", "public", "5fa3efb666204693a0d0ab05fb03140c", "http://128.224.151.162:5000/v3", "{}", true, "SystemController"],
|
||||
"endpoint_27": ["8627ce33d93c4b769e295b83a7dc100b", "NULL", "admin", "b3dd49c87dfd40d08d19d2895d2bc9c6", "http://[fd01:1::2]:6385/v1", "{}", true, "RegionOne"],
|
||||
"endpoint_28": ["8c11b80a30464d7791f4825d9ad14fca", "NULL", "internal", "b3dd49c87dfd40d08d19d2895d2bc9c6", "http://[fd01:1::2]:6385/v1", "{}", true, "RegionOne"],
|
||||
"endpoint_29": ["3330af049c0547c1a400b8ce7a6f73f3", "NULL", "public", "b3dd49c87dfd40d08d19d2895d2bc9c6", "http://128.224.151.162:6385/v1", "{}", true, "RegionOne"],
|
||||
"endpoint_30": ["89cb5c408a2a43979a22728abe3b7256", "NULL", "admin", "b3dd49c87dfd40d08d19d2895d2bc9c6", "http://[fd01:1::2]:26385/v1", "{}", true, "SystemController"],
|
||||
"endpoint_31": ["0fdceccb9c6c476594a22b37fa717007", "NULL", "internal", "b3dd49c87dfd40d08d19d2895d2bc9c6", "http://[fd01:1::2]:26385/v1", "{}", true, "SystemController"],
|
||||
"endpoint_32": ["5705066ec86c49f0b30f46677824f4a8", "NULL", "public", "b3dd49c87dfd40d08d19d2895d2bc9c6", "http://128.224.151.162:26385/v1", "{}", true, "SystemController"],
|
||||
"endpoint_33": ["f0f5128e02654c33a6f438533b77ff86", "NULL", "admin", "c5834d3740504a69bf427385319b51a0", "http://[fd01:1::2]:28776/v3/%(tenant_id)s", "{}", true, "SystemController"],
|
||||
"endpoint_34": ["66c462a7643046aab31d4afe6058200c", "NULL", "internal", "c5834d3740504a69bf427385319b51a0", "http://[fd01:1::2]:28776/v3/%(tenant_id)s", "{}", true, "SystemController"],
|
||||
"endpoint_35": ["853aab978d8b41a78c381292f55c71f2", "NULL", "public", "c5834d3740504a69bf427385319b51a0", "http://128.224.151.162:28776/v3/%(tenant_id)s", "{}", true, "SystemController"],
|
||||
"endpoint_36": ["d194fdcc00ea444887ca0666955a929f", "NULL", "admin", "8a5873d1ee914ccbae3c070d578d0d0d", "http://[fd01:1::2]:28776/v2/%(tenant_id)s", "{}", true, "SystemController"],
|
||||
"endpoint_37": ["7492375879c34231949d75eef5fa7c5b", "NULL", "internal", "8a5873d1ee914ccbae3c070d578d0d0d", "http://[fd01:1::2]:28776/v2/%(tenant_id)s", "{}", true, "SystemController"],
|
||||
"endpoint_38": ["3be8d1d22d44456c9a48c71bacc77ac9", "NULL", "public", "8a5873d1ee914ccbae3c070d578d0d0d", "http://128.224.151.162:28776/v2/%(tenant_id)s", "{}", true, "SystemController"],
|
||||
"endpoint_39": ["baabaa1754d14732bcaca91acc6ac7bc", "NULL", "admin", "a15edc66a6394e18bda9f9256e7b470c", "http://[fd01:2::2]:8042", "{}", true, "subcloud-1"],
|
||||
"endpoint_40": ["bb0598302d7644a8b9af8a39006e9dea", "NULL", "internal", "a15edc66a6394e18bda9f9256e7b470c", "http://[fd01:2::2]:8042", "{}", true, "subcloud-1"],
|
||||
"endpoint_41": ["388ec02223e5470bbc5b12c0078f1d0e", "NULL", "public", "a15edc66a6394e18bda9f9256e7b470c", "http://128.224.150.18:8042", "{}", true, "subcloud-1"],
|
||||
"endpoint_42": ["02c9dcf0a5074324b2f0c310bedac5fe", "NULL", "admin", "995cc229e9af44ec81c1c76073f4c733", "http://[fd01:2::2]:8778", "{}", true, "subcloud-1"],
|
||||
"endpoint_43": ["ee61c87ae43d499a8937bcdf4b02da69", "NULL", "internal", "995cc229e9af44ec81c1c76073f4c733", "http://[fd01:2::2]:8778", "{}", true, "subcloud-1"],
|
||||
"endpoint_44": ["9f8d2d7164624b0ebf7e6d95118d8657", "NULL", "public", "995cc229e9af44ec81c1c76073f4c733", "http://128.224.150.18:8778", "{}", true, "subcloud-1"],
|
||||
"endpoint_45": ["bd0005e60acf47a6890f0867f683b209", "NULL", "admin", "ea41162395844d30af3e59efa3e6323e", "http://[fd01:2::2]:8000/v1/%(tenant_id)s", "{}", true, "subcloud-1"],
|
||||
"endpoint_46": ["d4ef0e1fdb2f4fa885c8c8a6b878340e", "NULL", "internal", "ea41162395844d30af3e59efa3e6323e", "http://[fd01:2::2]:8000/v1/%(tenant_id)s", "{}", true, "subcloud-1"],
|
||||
"endpoint_47": ["b9e4375f8b64466ca7b8c11f3bfcd335", "NULL", "public", "ea41162395844d30af3e59efa3e6323e", "http://128.224.150.18:8000/v1/%(tenant_id)s", "{}", true, "subcloud-1"],
|
||||
"endpoint_48": ["fb7b4a9155c64e75801ba11955798fb5", "NULL", "admin", "6cfd11045b1e4c0badcb56f18428ab5b", "http://[fd01:2::2]:9696", "{}", true, "subcloud-1"],
|
||||
"endpoint_49": ["773a9d739bbd4a03ba401c46225e412d", "NULL", "internal", "6cfd11045b1e4c0badcb56f18428ab5b", "http://[fd01:2::2]:9696", "{}", true, "subcloud-1"],
|
||||
"endpoint_50": ["680c06a7db8e4457bb5d8b62810f98f5", "NULL", "public", "6cfd11045b1e4c0badcb56f18428ab5b", "http://128.224.150.18:9696", "{}", true, "subcloud-1"],
|
||||
"endpoint_51": ["e07a8bbabd5343fa877de9c2425f662e", "NULL", "admin", "86328b93a3c84d63a1be7f7368138bdf", "http://[fd01:2::2]:8777", "{}", true, "subcloud-1"],
|
||||
"endpoint_52": ["c7182c60c44c40c2945bbe3e288c2ff6", "NULL", "internal", "86328b93a3c84d63a1be7f7368138bdf", "http://[fd01:2::2]:8777", "{}", true, "subcloud-1"],
|
||||
"endpoint_53": ["9bc906a5fcb84b96ba7f196b01119077", "NULL", "public", "86328b93a3c84d63a1be7f7368138bdf", "http://128.224.150.18:8777", "{}", true, "subcloud-1"],
|
||||
"endpoint_54": ["40efcf0cf1934896ac204fff9599181f", "NULL", "admin", "d6f2ef7609f44c9aa0b40b15f9f93139", "http://[fd01:2::2]:8977", "{}", true, "subcloud-1"],
|
||||
"endpoint_55": ["3c56e9f939aa4f48b48e0bd63a7e0e2d", "NULL", "admin", "aa803a6f0ab84b68ad13a759b1b29525", "http://[fd01:2::2]:4545", "{}", true, "subcloud-1"],
|
||||
"endpoint_56": ["e0dc056cf41d48ada2a5128ff6d13c80", "NULL", "admin", "a15edc66a6394e18bda9f9256e7b470c", "http://[fd01:3::2]:8042", "{}", true, "subcloud-4"],
|
||||
"endpoint_57": ["9c3669ebb2864fe49b00555a4cb720bf", "NULL", "admin", "995cc229e9af44ec81c1c76073f4c733", "http://[fd01:3::2]:8778", "{}", true, "subcloud-4"],
|
||||
"endpoint_58": ["43d53656ac8e46e7875237e202e99896", "NULL", "public", "995cc229e9af44ec81c1c76073f4c733", "http://128.224.150.224:8778", "{}", true, "subcloud-4"],
|
||||
"endpoint_59": ["1b6649177bbf4793ae70e09badeaf1fa", "NULL", "internal", "ea41162395844d30af3e59efa3e6323e", "http://[fd01:3::2]:8000/v1/%(tenant_id)s", "{}", true, "subcloud-4"],
|
||||
"endpoint_60": ["19a92fce510743f9939a9a22299fc6ff", "NULL", "public", "6cfd11045b1e4c0badcb56f18428ab5b", "http://128.224.150.224:9696", "{}", true, "subcloud-4"],
|
||||
"endpoint_61": ["f802be7e04a64c768150f0416e113fe1", "NULL", "admin", "aa803a6f0ab84b68ad13a759b1b29525", "http://[fd01:3::2]:4545", "{}", true, "subcloud-4"],
|
||||
"endpoint_62": ["513e4e6a0e4840dd8de65742a1b0634d", "NULL", "internal", "aa803a6f0ab84b68ad13a759b1b29525", "http://[fd01:3::2]:4545", "{}", true, "subcloud-4"],
|
||||
"endpoint_63": ["c17d425b28aa4589a42abf0c3ae89865", "NULL", "internal", "86328b93a3c84d63a1be7f7368138bdf", "http://[fd01:3::2]:8777", "{}", true, "subcloud-4"],
|
||||
"endpoint_64": ["71b408d04b984a958090054093c6330a", "NULL", "public", "5fa3efb666204693a0d0ab05fb03140c", "http://128.224.151.162:5000/v3", "{}", true, "subcloud-4"],
|
||||
"endpoint_65": ["549e1bd55f2e4218b5f2a03bc9859bf6", "NULL", "public", "d6f2ef7609f44c9aa0b40b15f9f93139", "http://128.224.150.224:8977", "{}", true, "subcloud-4"],
|
||||
"endpoint_66": ["89606a1804a54f17ae67659d481fde20", "NULL", "internal", "7d48ddb964034eb588e557b976d11cdf", "http://[fd01:3::2]:9292", "{}", true, "subcloud-4"],
|
||||
"endpoint_67": ["ecae4ccc0af242a98d978ff527e7e81b", "NULL", "admin", "b3dd49c87dfd40d08d19d2895d2bc9c6", "http://[fd01:3::2]:6385/v1", "{}", true, "subcloud-4"],
|
||||
"endpoint_68": ["9b503e8a198f4221a716568dfe0a497f", "NULL", "public", "b3dd49c87dfd40d08d19d2895d2bc9c6", "http://128.224.150.224:6385/v1", "{}", true, "subcloud-4"],
|
||||
"endpoint_69": ["f2209d54eb064311aeacb96a853e5867", "NULL", "public", "7d48ddb964034eb588e557b976d11cdf", "http://128.224.151.66:9292", "{}", true, "subcloud-5"],
|
||||
"endpoint_70": ["8c761aabddf7450c95fcee0dd5f38bee", "NULL", "public", "995cc229e9af44ec81c1c76073f4c733", "http://128.224.151.66:8778", "{}", true, "subcloud-5"],
|
||||
"endpoint_71": ["8c0e3189cf5e49f09dc3566321603e85", "NULL", "public", "ea41162395844d30af3e59efa3e6323e", "http://128.224.151.66:8000/v1/%(tenant_id)s", "{}", true, "subcloud-5"],
|
||||
"endpoint_72": ["71575d7116ae4510b9115476a21bbb1b", "NULL", "admin", "c3677835d8024fa894929ea67b1e9fa0", "http://[fd01:4::2]:5491", "{}", true, "subcloud-5"],
|
||||
"endpoint_73": ["73972411365647f2be7b7f6b4d302759", "NULL", "admin", "c4ae85afaf7b465190d927e11da3eb38", "http://[fd01:4::2]:8774/v2.1/%(tenant_id)s", "{}", true, "subcloud-5"],
|
||||
"endpoint_74": ["52643da5712d4555a953d9f03b2bf332", "NULL", "admin", "aa803a6f0ab84b68ad13a759b1b29525", "http://[fd01:4::2]:4545", "{}", true, "subcloud-5"],
|
||||
"endpoint_75": ["a51e43d1eb9b47c980c45efd8bac4c87", "NULL", "admin", "6cfd11045b1e4c0badcb56f18428ab5b", "http://[fd01:4::2]:9696", "{}", true, "subcloud-5"],
|
||||
"endpoint_76": ["56ef199db23c43ecae7d94cc7222c854", "NULL", "internal", "6cfd11045b1e4c0badcb56f18428ab5b", "http://[fd01:4::2]:9696", "{}", true, "subcloud-5"],
|
||||
"endpoint_77": ["ed96c299010b48eaa8eddee5cbf9df5e", "NULL", "public", "6cfd11045b1e4c0badcb56f18428ab5b", "http://128.224.151.66:9696", "{}", true, "subcloud-5"],
|
||||
"endpoint_78": ["9fdb807e84124790b8c3ece35d15a0ef", "NULL", "public", "5fa3efb666204693a0d0ab05fb03140c", "http://128.224.151.162:5000/v3", "{}", true, "subcloud-5"],
|
||||
"endpoint_79": ["ffa6c1cd10a94194a02e36a4937c343c", "NULL", "public", "d6f2ef7609f44c9aa0b40b15f9f93139", "http://128.224.151.66:8977", "{}", true, "subcloud-5"],
|
||||
"endpoint_80": ["699aa011ead14997ac6f56d83ed95a8c", "NULL", "admin", "b3dd49c87dfd40d08d19d2895d2bc9c6", "http://[fd01:4::2]:6385/v1", "{}", true, "subcloud-5"],
|
||||
"endpoint_81": ["24418e1fb27c4bd19138bd60ff84339b", "NULL", "internal", "c5834d3740504a69bf427385319b51a0", "http://[fd01:4::2]:8776/v3/%(tenant_id)s", "{}", true, "subcloud-5"],
|
||||
"endpoint_82": ["f839413f6073428999df122e5e39c5a9", "NULL", "internal", "8a5873d1ee914ccbae3c070d578d0d0d", "http://[fd01:4::2]:8776/v2/%(tenant_id)s", "{}", true, "subcloud-5"],
|
||||
"endpoint_83": ["6730c5f0390a4e66b24d87db41d0a0f6", "NULL", "public", "567f8aafa7844256b03e86655fa2bd3e", "http://128.224.151.66:8776/v1/%(tenant_id)s", "{}", true, "subcloud-5"],
|
||||
"endpoint_84": ["22c6413d42234c0a98e91ed342bf7db7", "NULL", "internal", "c5834d3740504a69bf427385319b51a0", "http://[fd01:3::2]:8776/v3/%(tenant_id)s", "{}", true, "subcloud-4"],
|
||||
"endpoint_85": ["98a5b15ccb424826922f5c919c5690a8", "NULL", "admin", "8a5873d1ee914ccbae3c070d578d0d0d", "http://[fd01:3::2]:8776/v2/%(tenant_id)s", "{}", true, "subcloud-4"],
|
||||
"endpoint_86": ["da110ac1f03c4b9e817463225a4b2b83", "NULL", "public", "8a5873d1ee914ccbae3c070d578d0d0d", "http://128.224.150.224:8776/v2/%(tenant_id)s", "{}", true, "subcloud-4"],
|
||||
"endpoint_87": ["f5167f307d1f4adc84e29d59b3fcbf7b", "NULL", "public", "567f8aafa7844256b03e86655fa2bd3e", "http://128.224.150.224:8776/v1/%(tenant_id)s", "{}", true, "subcloud-4"],
|
||||
"endpoint_88": ["b9aebe07a0e64367931946c584657186", "NULL", "admin", "c3677835d8024fa894929ea67b1e9fa0", "http://[fd01:2::2]:5491", "{}", true, "subcloud-1"],
|
||||
"endpoint_89": ["1b58cd57070740809875fb0ea84d1ed4", "NULL", "internal", "c3677835d8024fa894929ea67b1e9fa0", "http://[fd01:2::2]:5491", "{}", true, "subcloud-1"],
|
||||
"endpoint_90": ["127083b5a58641778f84bd63378f14a3", "NULL", "public", "c3677835d8024fa894929ea67b1e9fa0", "http://128.224.150.18:15491", "{}", true, "subcloud-1"],
|
||||
"endpoint_91": ["9849dbabbdd9472598b3c8001f42dd3f", "NULL", "admin", "5fa3efb666204693a0d0ab05fb03140c", "http://[fd01:1::2]:5000/v3", "{}", true, "subcloud-1"],
|
||||
"endpoint_92": ["9b8814b1121a44948ca007a27982ee55", "NULL", "internal", "5fa3efb666204693a0d0ab05fb03140c", "http://[fd01:1::2]:5000/v3", "{}", true, "subcloud-1"],
|
||||
"endpoint_93": ["f456d7f703e242139355140c0617c619", "NULL", "public", "5fa3efb666204693a0d0ab05fb03140c", "http://128.224.151.162:5000/v3", "{}", true, "subcloud-1"],
|
||||
"endpoint_94": ["b2041b71fc2244dba94dea647dd35b7e", "NULL", "internal", "d6f2ef7609f44c9aa0b40b15f9f93139", "http://[fd01:2::2]:8977", "{}", true, "subcloud-1"],
|
||||
"endpoint_95": ["29efd6682e1d435d807e991075bcf125", "NULL", "public", "d6f2ef7609f44c9aa0b40b15f9f93139", "http://128.224.150.18:8977", "{}", true, "subcloud-1"],
|
||||
"endpoint_96": ["05731150463b47699ab8fef01b81d464", "NULL", "internal", "aa803a6f0ab84b68ad13a759b1b29525", "http://[fd01:2::2]:4545", "{}", true, "subcloud-1"],
|
||||
"endpoint_97": ["5d347c0e475d40d385024705bb78c0d5", "NULL", "public", "aa803a6f0ab84b68ad13a759b1b29525", "http://128.224.150.18:4545", "{}", true, "subcloud-1"],
|
||||
"endpoint_98": ["39dd7a4d128549e3ab1d65d04b2bd862", "NULL", "admin", "0efe25ad76f244e1bca9f6975cfe8b83", "http://[fd01:2::2]:8004/v1/%(tenant_id)s", "{}", true, "subcloud-1"],
|
||||
"endpoint_99": ["497de517819045df9ca739bc3e121c89", "NULL", "internal", "0efe25ad76f244e1bca9f6975cfe8b83", "http://[fd01:2::2]:8004/v1/%(tenant_id)s", "{}", true, "subcloud-1"],
|
||||
"endpoint_100": ["b772d9d3df6c446e8c0de2611c5627aa", "NULL", "public", "0efe25ad76f244e1bca9f6975cfe8b83", "http://128.224.150.18:8004/v1/%(tenant_id)s", "{}", true, "subcloud-1"],
|
||||
"endpoint_101": ["7c27d2a668244dd8b35573df61cde0a0", "NULL", "admin", "7d48ddb964034eb588e557b976d11cdf", "http://[fd01:2::2]:9292", "{}", true, "subcloud-1"],
|
||||
"endpoint_102": ["c006181bd2c34abca079453ddc862b78", "NULL", "internal", "7d48ddb964034eb588e557b976d11cdf", "http://[fd01:2::2]:9292", "{}", true, "subcloud-1"],
|
||||
"endpoint_103": ["e884ae6f48fc4d2498e8735e1de545aa", "NULL", "public", "7d48ddb964034eb588e557b976d11cdf", "http://128.224.150.18:9292", "{}", true, "subcloud-1"],
|
||||
"endpoint_104": ["2e5e16ddea9b43c4a6be55c7c57e762c", "NULL", "admin", "b3dd49c87dfd40d08d19d2895d2bc9c6", "http://[fd01:2::2]:6385/v1", "{}", true, "subcloud-1"],
|
||||
"endpoint_105": ["e9820b3b3abe48f98548d4bc113bc905", "NULL", "internal", "b3dd49c87dfd40d08d19d2895d2bc9c6", "http://[fd01:2::2]:6385/v1", "{}", true, "subcloud-1"],
|
||||
"endpoint_106": ["1506e9230fc948b2b267eec824bd97ae", "NULL", "public", "b3dd49c87dfd40d08d19d2895d2bc9c6", "http://128.224.150.18:6385/v1", "{}", true, "subcloud-1"],
|
||||
"endpoint_107": ["6a5596c56479437a9c2fd2a78fe54d22", "NULL", "admin", "c4ae85afaf7b465190d927e11da3eb38", "http://[fd01:2::2]:8774/v2.1/%(tenant_id)s", "{}", true, "subcloud-1"],
|
||||
"endpoint_108": ["af690a3d102b484fb5cf760ad143689a", "NULL", "internal", "c4ae85afaf7b465190d927e11da3eb38", "http://[fd01:2::2]:8774/v2.1/%(tenant_id)s", "{}", true, "subcloud-1"],
|
||||
"endpoint_109": ["97ed35a6b02c46cfbeaddf20e6a1bd48", "NULL", "public", "c4ae85afaf7b465190d927e11da3eb38", "http://128.224.150.18:8774/v2.1/%(tenant_id)s", "{}", true, "subcloud-1"],
|
||||
"endpoint_110": ["3e74d939f3684f6892640c5d6e6406d1", "NULL", "internal", "a15edc66a6394e18bda9f9256e7b470c", "http://[fd01:3::2]:8042", "{}", true, "subcloud-4"],
|
||||
"endpoint_111": ["41b54ad4c80d45449f507db76083fb80", "NULL", "internal", "995cc229e9af44ec81c1c76073f4c733", "http://[fd01:3::2]:8778", "{}", true, "subcloud-4"],
|
||||
"endpoint_112": ["0390cb283a53403e9545651a60bf348e", "NULL", "public", "ea41162395844d30af3e59efa3e6323e", "http://128.224.150.224:8000/v1/%(tenant_id)s", "{}", true, "subcloud-4"],
|
||||
"endpoint_113": ["7c3fabf70c174ea4a4fe6a0c4712e6bf", "NULL", "admin", "6cfd11045b1e4c0badcb56f18428ab5b", "http://[fd01:3::2]:9696", "{}", true, "subcloud-4"],
|
||||
"endpoint_114": ["0a338aef7a8b404aa0867e5a0205dc58", "NULL", "admin", "c3677835d8024fa894929ea67b1e9fa0", "http://[fd01:3::2]:5491", "{}", true, "subcloud-4"],
|
||||
"endpoint_115": ["09dde1c9499e4ba198a44c18e74f0a09", "NULL", "public", "aa803a6f0ab84b68ad13a759b1b29525", "http://128.224.150.224:4545", "{}", true, "subcloud-4"],
|
||||
"endpoint_116": ["56a76e94e60c4fd899a773001b272e47", "NULL", "public", "86328b93a3c84d63a1be7f7368138bdf", "http://128.224.150.224:8777", "{}", true, "subcloud-4"],
|
||||
"endpoint_117": ["2a7cd8d550d94b90b93a15739fb5f79a", "NULL", "admin", "5fa3efb666204693a0d0ab05fb03140c", "http://[fd01:1::2]:5000/v3", "{}", true, "subcloud-4"],
|
||||
"endpoint_118": ["4dc1caf31cff44ccb8585fe4f200a32c", "NULL", "admin", "d6f2ef7609f44c9aa0b40b15f9f93139", "http://[fd01:3::2]:8977", "{}", true, "subcloud-4"],
|
||||
"endpoint_119": ["3fdab757ab134146bbd68c4521af397b", "NULL", "admin", "0efe25ad76f244e1bca9f6975cfe8b83", "http://[fd01:3::2]:8004/v1/%(tenant_id)s", "{}", true, "subcloud-4"],
|
||||
"endpoint_120": ["17846cae6aaa41e6be9f26f30adcb6d7", "NULL", "internal", "0efe25ad76f244e1bca9f6975cfe8b83", "http://[fd01:3::2]:8004/v1/%(tenant_id)s", "{}", true, "subcloud-4"],
|
||||
"endpoint_121": ["e67b03ac766f49059729a99a2754defa", "NULL", "public", "7d48ddb964034eb588e557b976d11cdf", "http://128.224.150.224:9292", "{}", true, "subcloud-4"],
|
||||
"endpoint_122": ["81b25e9b817a46fd9654b1f478a9b5ce", "NULL", "internal", "b3dd49c87dfd40d08d19d2895d2bc9c6", "http://[fd01:3::2]:6385/v1", "{}", true, "subcloud-4"],
|
||||
"endpoint_123": ["3cde2d8d8b5748f1966f06548cf65ec9", "NULL", "admin", "7d48ddb964034eb588e557b976d11cdf", "http://[fd01:4::2]:9292", "{}", true, "subcloud-5"],
|
||||
"endpoint_124": ["3e817212004241988cb0731f2f79ef76", "NULL", "admin", "995cc229e9af44ec81c1c76073f4c733", "http://[fd01:4::2]:8778", "{}", true, "subcloud-5"],
|
||||
"endpoint_125": ["1ed1542248fc483fbc7ce26ca60ac00b", "NULL", "admin", "ea41162395844d30af3e59efa3e6323e", "http://[fd01:4::2]:8000/v1/%(tenant_id)s", "{}", true, "subcloud-5"],
|
||||
"endpoint_126": ["4136388469804921a749485b44ebc90b", "NULL", "admin", "a15edc66a6394e18bda9f9256e7b470c", "http://[fd01:4::2]:8042", "{}", true, "subcloud-5"],
|
||||
"endpoint_127": ["745d8b18ddae4353992dc123bf79ca66", "NULL", "internal", "a15edc66a6394e18bda9f9256e7b470c", "http://[fd01:4::2]:8042", "{}", true, "subcloud-5"],
|
||||
"endpoint_128": ["38d157631f04457d8a9e1e1a55e8879b", "NULL", "internal", "c3677835d8024fa894929ea67b1e9fa0", "http://[fd01:4::2]:5491", "{}", true, "subcloud-5"],
|
||||
"endpoint_129": ["b58d3551b31042dc8f1eeab3db053b36", "NULL", "internal", "c4ae85afaf7b465190d927e11da3eb38", "http://[fd01:4::2]:8774/v2.1/%(tenant_id)s", "{}", true, "subcloud-5"],
|
||||
"endpoint_130": ["b27ddfa5a6c940c180730d70c02b448e", "NULL", "internal", "aa803a6f0ab84b68ad13a759b1b29525", "http://[fd01:4::2]:4545", "{}", true, "subcloud-5"],
|
||||
"endpoint_131": ["4d69300126184d108511e4d9a1ae829c", "NULL", "admin", "5fa3efb666204693a0d0ab05fb03140c", "http://[fd01:1::2]:5000/v3", "{}", true, "subcloud-5"],
|
||||
"endpoint_132": ["03e1eb2e1f6a4041a3bd721c25bca9cd", "NULL", "admin", "d6f2ef7609f44c9aa0b40b15f9f93139", "http://[fd01:4::2]:8977", "{}", true, "subcloud-5"],
|
||||
"endpoint_133": ["ba748586c1e74328a95d240566abd5da", "NULL", "admin", "86328b93a3c84d63a1be7f7368138bdf", "http://[fd01:4::2]:8777", "{}", true, "subcloud-5"],
|
||||
"endpoint_134": ["f1c694830a79479fb6efa8bc20af509d", "NULL", "internal", "86328b93a3c84d63a1be7f7368138bdf", "http://[fd01:4::2]:8777", "{}", true, "subcloud-5"],
|
||||
"endpoint_135": ["83fda4af04c9475ba8906e4d1e25fc20", "NULL", "public", "86328b93a3c84d63a1be7f7368138bdf", "http://128.224.151.66:8777", "{}", true, "subcloud-5"],
|
||||
"endpoint_136": ["b649a5f6c14b4f9db37d416b9044ac73", "NULL", "internal", "0efe25ad76f244e1bca9f6975cfe8b83", "http://[fd01:4::2]:8004/v1/%(tenant_id)s", "{}", true, "subcloud-5"],
|
||||
"endpoint_137": ["46c653ca16294222be50ee0c6a530943", "NULL", "public", "0efe25ad76f244e1bca9f6975cfe8b83", "http://128.224.151.66:8004/v1/%(tenant_id)s", "{}", true, "subcloud-5"],
|
||||
"endpoint_138": ["22c5a12627d54fb49d6ea7ae28efc60d", "NULL", "public", "b3dd49c87dfd40d08d19d2895d2bc9c6", "http://128.224.151.66:6385/v1", "{}", true, "subcloud-5"],
|
||||
"endpoint_139": ["8170c8c7bd1f42f285b6eceb9a024134", "NULL", "public", "c5834d3740504a69bf427385319b51a0", "http://128.224.151.66:8776/v3/%(tenant_id)s", "{}", true, "subcloud-5"],
|
||||
"endpoint_140": ["41f38595b5f249b7ad9cc6fdf24d1f7c", "NULL", "admin", "567f8aafa7844256b03e86655fa2bd3e", "http://[fd01:4::2]:8776/v1/%(tenant_id)s", "{}", true, "subcloud-5"],
|
||||
"endpoint_141": ["adaa3140e68f4ea4a7e377a5a5b640bc", "NULL", "admin", "c5834d3740504a69bf427385319b51a0", "http://[fd01:3::2]:8776/v3/%(tenant_id)s", "{}", true, "subcloud-4"],
|
||||
"endpoint_142": ["33b40962242e4afdb3ef6787af04e5a3", "NULL", "public", "c5834d3740504a69bf427385319b51a0", "http://128.224.150.224:8776/v3/%(tenant_id)s", "{}", true, "subcloud-4"],
|
||||
"endpoint_143": ["744d6951d82e47dc9fc48763d1b18d60", "NULL", "internal", "8a5873d1ee914ccbae3c070d578d0d0d", "http://[fd01:3::2]:8776/v2/%(tenant_id)s", "{}", true, "subcloud-4"],
|
||||
"endpoint_144": ["1dd34422beca4c2bb027d6e11a40b2c4", "NULL", "admin", "567f8aafa7844256b03e86655fa2bd3e", "http://[fd01:3::2]:8776/v1/%(tenant_id)s", "{}", true, "subcloud-4"],
|
||||
"endpoint_145": ["fcf6a770edc2486aa11e4b119e5de873", "NULL", "internal", "567f8aafa7844256b03e86655fa2bd3e", "http://[fd01:3::2]:8776/v1/%(tenant_id)s", "{}", true, "subcloud-4"],
|
||||
"endpoint_146": ["ef47b072365f475a8a56eeff153264ce", "NULL", "admin", "c5834d3740504a69bf427385319b51a0", "http://[fd01:2::2]:8776/v3/%(tenant_id)s", "{}", true, "subcloud-1"],
|
||||
"endpoint_147": ["b461104aa21049aca0a71f8d4ee862e3", "NULL", "internal", "c5834d3740504a69bf427385319b51a0", "http://[fd01:2::2]:8776/v3/%(tenant_id)s", "{}", true, "subcloud-1"],
|
||||
"endpoint_148": ["67f79a87a6954f489dd9789e844e5998", "NULL", "public", "c5834d3740504a69bf427385319b51a0", "http://128.224.150.18:8776/v3/%(tenant_id)s", "{}", true, "subcloud-1"],
|
||||
"endpoint_149": ["72264b75ad9e46578d882d9d96301188", "NULL", "admin", "567f8aafa7844256b03e86655fa2bd3e", "http://[fd01:2::2]:8776/v1/%(tenant_id)s", "{}", true, "subcloud-1"],
|
||||
"endpoint_150": ["be6af8a2b8a5469b9dc8f2db2e2fc787", "NULL", "internal", "567f8aafa7844256b03e86655fa2bd3e", "http://[fd01:2::2]:8776/v1/%(tenant_id)s", "{}", true, "subcloud-1"],
|
||||
"endpoint_151": ["fa360cf6f5684c34be7c3ab5998b3a2c", "NULL", "public", "567f8aafa7844256b03e86655fa2bd3e", "http://128.224.150.18:8776/v1/%(tenant_id)s", "{}", true, "subcloud-1"],
|
||||
"endpoint_152": ["37b87d149089406f81dba376f5309357", "NULL", "admin", "8a5873d1ee914ccbae3c070d578d0d0d", "http://[fd01:2::2]:8776/v2/%(tenant_id)s", "{}", true, "subcloud-1"],
|
||||
"endpoint_153": ["19e9a38d9db34ce1ba8953300bc32e65", "NULL", "internal", "8a5873d1ee914ccbae3c070d578d0d0d", "http://[fd01:2::2]:8776/v2/%(tenant_id)s", "{}", true, "subcloud-1"],
|
||||
"endpoint_154": ["b9fa7c1bc44f495e9ff6dda810b841a1", "NULL", "public", "8a5873d1ee914ccbae3c070d578d0d0d", "http://128.224.150.18:8776/v2/%(tenant_id)s", "{}", true, "subcloud-1"],
|
||||
"endpoint_155": ["0a32ffd450814d7599d95b9d006cd42c", "NULL", "public", "a15edc66a6394e18bda9f9256e7b470c", "http://128.224.150.224:8042", "{}", true, "subcloud-4"],
|
||||
"endpoint_156": ["d652da20f7834c53b8bdcd0e9e6e2fb4", "NULL", "admin", "ea41162395844d30af3e59efa3e6323e", "http://[fd01:3::2]:8000/v1/%(tenant_id)s", "{}", true, "subcloud-4"],
|
||||
"endpoint_157": ["c6667081743646efbfe6e0ab888b3eb2", "NULL", "internal", "6cfd11045b1e4c0badcb56f18428ab5b", "http://[fd01:3::2]:9696", "{}", true, "subcloud-4"],
|
||||
"endpoint_158": ["edaeb34e5038485786df22d7f6360036", "NULL", "internal", "c3677835d8024fa894929ea67b1e9fa0", "http://[fd01:3::2]:5491", "{}", true, "subcloud-4"],
|
||||
"endpoint_159": ["31e0293920404baf94390a6652c9ebff", "NULL", "public", "c3677835d8024fa894929ea67b1e9fa0", "http://128.224.150.224:15491", "{}", true, "subcloud-4"],
|
||||
"endpoint_160": ["1cbf691bc9c84e3f9f6cc79246660bf7", "NULL", "admin", "86328b93a3c84d63a1be7f7368138bdf", "http://[fd01:3::2]:8777", "{}", true, "subcloud-4"],
|
||||
"endpoint_161": ["ba7c54b0b7ac4cfdb558665fdd731c28", "NULL", "internal", "5fa3efb666204693a0d0ab05fb03140c", "http://[fd01:1::2]:5000/v3", "{}", true, "subcloud-4"],
|
||||
"endpoint_162": ["e6f1e6f998674d13b6b8fa6a843e49f9", "NULL", "internal", "d6f2ef7609f44c9aa0b40b15f9f93139", "http://[fd01:3::2]:8977", "{}", true, "subcloud-4"],
|
||||
"endpoint_163": ["1f3a25620c2b4e74b0348733a190fff1", "NULL", "public", "0efe25ad76f244e1bca9f6975cfe8b83", "http://128.224.150.224:8004/v1/%(tenant_id)s", "{}", true, "subcloud-4"],
|
||||
"endpoint_164": ["1daa3c5f75184962868ddd72d1b62529", "NULL", "admin", "7d48ddb964034eb588e557b976d11cdf", "http://[fd01:3::2]:9292", "{}", true, "subcloud-4"],
|
||||
"endpoint_165": ["be6a2850cec44595b38eb6940baab1a6", "NULL", "admin", "c4ae85afaf7b465190d927e11da3eb38", "http://[fd01:3::2]:8774/v2.1/%(tenant_id)s", "{}", true, "subcloud-4"],
|
||||
"endpoint_166": ["39c119210e864123b8b6c845be341074", "NULL", "internal", "c4ae85afaf7b465190d927e11da3eb38", "http://[fd01:3::2]:8774/v2.1/%(tenant_id)s", "{}", true, "subcloud-4"],
|
||||
"endpoint_167": ["8f5980c8301146368c7be4f2a2e41cac", "NULL", "public", "c4ae85afaf7b465190d927e11da3eb38", "http://128.224.150.224:8774/v2.1/%(tenant_id)s", "{}", true, "subcloud-4"],
|
||||
"endpoint_168": ["9953dc666ff24502b03cfb69c408f442", "NULL", "internal", "7d48ddb964034eb588e557b976d11cdf", "http://[fd01:4::2]:9292", "{}", true, "subcloud-5"],
|
||||
"endpoint_169": ["368c49d56241450188857d2e7cd757d3", "NULL", "internal", "995cc229e9af44ec81c1c76073f4c733", "http://[fd01:4::2]:8778", "{}", true, "subcloud-5"],
|
||||
"endpoint_170": ["33f33be90a1442839aef4f50afca45f9", "NULL", "internal", "ea41162395844d30af3e59efa3e6323e", "http://[fd01:4::2]:8000/v1/%(tenant_id)s", "{}", true, "subcloud-5"],
|
||||
"endpoint_171": ["523fe3578d634f17a81e04cb0e3c48c0", "NULL", "public", "a15edc66a6394e18bda9f9256e7b470c", "http://128.224.151.66:8042", "{}", true, "subcloud-5"],
|
||||
"endpoint_172": ["e2d534e4e8804d0ebdb175e1f38f1cf2", "NULL", "public", "c3677835d8024fa894929ea67b1e9fa0", "http://128.224.151.66:15491", "{}", true, "subcloud-5"],
|
||||
"endpoint_173": ["dc4cc20db20e4a08be988012f3b53efa", "NULL", "public", "c4ae85afaf7b465190d927e11da3eb38", "http://128.224.151.66:8774/v2.1/%(tenant_id)s", "{}", true, "subcloud-5"],
|
||||
"endpoint_174": ["135d036dc1804366873f238f193d0ee4", "NULL", "public", "aa803a6f0ab84b68ad13a759b1b29525", "http://128.224.151.66:4545", "{}", true, "subcloud-5"],
|
||||
"endpoint_175": ["dbcf6bf6bcdf409ba2333370415fbd38", "NULL", "internal", "5fa3efb666204693a0d0ab05fb03140c", "http://[fd01:1::2]:5000/v3", "{}", true, "subcloud-5"],
|
||||
"endpoint_176": ["a6a2033b69a34a04bb5a1d944c764401", "NULL", "internal", "d6f2ef7609f44c9aa0b40b15f9f93139", "http://[fd01:4::2]:8977", "{}", true, "subcloud-5"],
|
||||
"endpoint_177": ["efb480b1d3374e0c97e688c1d5946d4d", "NULL", "admin", "0efe25ad76f244e1bca9f6975cfe8b83", "http://[fd01:4::2]:8004/v1/%(tenant_id)s", "{}", true, "subcloud-5"],
|
||||
"endpoint_178": ["d526cf9c4c1c48be8d9770e8e261de07", "NULL", "internal", "b3dd49c87dfd40d08d19d2895d2bc9c6", "http://[fd01:4::2]:6385/v1", "{}", true, "subcloud-5"],
|
||||
"endpoint_179": ["4881245fbcb5474ba807b60b1cab4c7f", "NULL", "admin", "c5834d3740504a69bf427385319b51a0", "http://[fd01:4::2]:8776/v3/%(tenant_id)s", "{}", true, "subcloud-5"],
|
||||
"endpoint_180": ["e27f671897e54872876470d1880a1ca3", "NULL", "admin", "8a5873d1ee914ccbae3c070d578d0d0d", "http://[fd01:4::2]:8776/v2/%(tenant_id)s", "{}", true, "subcloud-5"],
|
||||
"endpoint_181": ["3530851c3c7444d981ac145e3f6545d7", "NULL", "public", "8a5873d1ee914ccbae3c070d578d0d0d", "http://128.224.151.66:8776/v2/%(tenant_id)s", "{}", true, "subcloud-5"],
|
||||
"endpoint_182": ["9157998f1a8a4d54ba679b31ac3eac0c", "NULL", "internal", "567f8aafa7844256b03e86655fa2bd3e", "http://[fd01:4::2]:8776/v1/%(tenant_id)s", "{}", true, "subcloud-5"]
|
||||
}
|
@ -0,0 +1,24 @@
|
||||
{
|
||||
"local_user_0": [3, "8ff17967605a4240b8a6c15ed4bf10f1", "default", "panko", 0, "NULL"],
|
||||
"local_user_1": [4, "c5d07e41f78747949fbc1de84168a44f", "default", "dcorch", 0, "NULL"],
|
||||
"local_user_2": [5, "5f4d401253a74cc8ab507957b9cafb29", "default", "neutron", 0, "NULL"],
|
||||
"local_user_3": [6, "4abaa160c36846328a482217de0112af", "default", "heat", 0, "NULL"],
|
||||
"local_user_4": [7, "692bd0a53c414d6dbbd0ba4d6fdb3c49", "default", "vim", 0, "NULL"],
|
||||
"local_user_5": [8, "6cf3cfc5d26f458daf66802d8e8a2e2a", "default", "aodh", 0, "NULL"],
|
||||
"local_user_6": [11, "a757fb8d624b46b4b10eea1b4d2ca0d2", "default", "glance", 0, "NULL"],
|
||||
"local_user_7": [12, "118a09e72d6a4194af383285cb7e579a", "default", "placement", 0, "NULL"],
|
||||
"local_user_8": [13, "f1cc67bbf0d84c89a1df3067b538e1b8", "default", "patching", 0, "NULL"],
|
||||
"local_user_9": [14, "f94aa82e49dd4aaa8bf1c80fee109234", "2423d6c7853145a798e6491ca9de6e2b", "heat_admin", 0, "NULL"],
|
||||
"local_user_10": [15, "04facea7432848c9bfdf3780bb51612e", "default", "dcmanager", 0, "NULL"],
|
||||
"local_user_11": [16, "c455073c30044db8908630595699d874", "default", "ceilometer", 0, "NULL"],
|
||||
"local_user_12": [17, "4a2c1f4c8ae942b19e388576e93d1ced", "default", "cinder", 0, "NULL"],
|
||||
"local_user_13": [18, "d1399977957645e4a1e26c1b7b1e6d35", "default", "tenant1", 0, "NULL"],
|
||||
"local_user_14": [19, "5ad8271fc6bc432ab80685945bc5b346", "default", "tenant2", 0, "NULL"],
|
||||
"local_user_15": [20, "73403639b14c40e6b288c0e2cd3707bc", "default", "cindersubcloud-1", 0, "NULL"],
|
||||
"local_user_16": [9, "146482c0aba84e35a5c1a507cff9db3d", "default", "nova", 0, "NULL"],
|
||||
"local_user_17": [10, "63dd0fb409264a43b7dbfe9582b8023d", "default", "mtce", 0, "NULL"],
|
||||
"local_user_18": [2, "81eed996f2a346a3b5282fe2a881db9b", "default", "sysinv", 0, "NULL"],
|
||||
"local_user_19": [23, "872e8c1b48c640c59189cf1587bd4e41", "default", "cindersubcloud-5", 0, "NULL"],
|
||||
"local_user_20": [24, "f85b8eca57a441838cfe5a39d33230b5", "default", "cindersubcloud-4", 0, "NULL"],
|
||||
"local_user_21": [1, "500b2ba0791e44a780d4dad3c5a1ff31", "default", "admin", 0, "NULL"]
|
||||
}
|
@ -0,0 +1,6 @@
|
||||
{
|
||||
"migrate_version_0": ["keystone_expand", "/usr/lib/python2.7/site-packages/keystone/common/sql/expand_repo", 24],
|
||||
"migrate_version_1": ["keystone_data_migrate", "/usr/lib/python2.7/site-packages/keystone/common/sql/data_migration_repo", 24],
|
||||
"migrate_version_2": ["keystone", "/usr/lib/python2.7/site-packages/keystone/common/sql/migrate_repo", 109],
|
||||
"migrate_version_3": ["keystone_contract", "/usr/lib/python2.7/site-packages/keystone/common/sql/contract_repo", 24]
|
||||
}
|
24
dcmanager/tests/data/ipv6_R5_install/keystone/password.json
Normal file
24
dcmanager/tests/data/ipv6_R5_install/keystone/password.json
Normal file
@ -0,0 +1,24 @@
|
||||
{
|
||||
"password_0": [1, 1, "NULL", "NULL", false, "$2b$12$fVKV1.pFz76EgkTePPtzEuMYbTS8CbsVghxDhX7f7liZx8RlW0Y6O", 1523460727481605, "NULL", "2018-04-11 15:32:07.481605"],
|
||||
"password_1": [2, 2, "NULL", "NULL", false, "$2b$12$SlX.b0AgnYn4nZtJ3jmvWeCpSQiY21QjdlpjvkMwyUjH8zYJzLBHe", 1523460750612369, "NULL", "2018-04-11 15:32:30.612369"],
|
||||
"password_2": [3, 3, "NULL", "NULL", false, "$2b$12$xOE0UlHJSzLiqeupbP/BvOTKxptmAAXylD0IlcbecOpeQ9w3L8o9K", 1523461139214437, "NULL", "2018-04-11 15:38:59.214437"],
|
||||
"password_3": [4, 4, "NULL", "NULL", false, "$2b$12$XraXnRCsEobDqxvZI10YwOCN2qFdVx4YyVsnAggUk6JOpZIA1ILRC", 1523461146035371, "NULL", "2018-04-11 15:39:06.035371"],
|
||||
"password_4": [5, 5, "NULL", "NULL", false, "$2b$12$hm9rPyEF4MGzGhVN6MZEZOV20HNAEYdd/X5tE/eTMBUdf2ojGozym", 1523461151305674, "NULL", "2018-04-11 15:39:11.305674"],
|
||||
"password_5": [6, 6, "NULL", "NULL", false, "$2b$12$uRXa5txGlCkP3K8k2evESOKE0OCvN0E1lmtDEffUo4GN4M3/moDhG", 1523461154969656, "NULL", "2018-04-11 15:39:14.969656"],
|
||||
"password_6": [7, 7, "NULL", "NULL", false, "$2b$12$UDWh3bOprZkcicTvX74ekO7Z2sA9i578bvJWR3u3JKxx./R4zfAZm", 1523461159304616, "NULL", "2018-04-11 15:39:19.304616"],
|
||||
"password_7": [8, 8, "NULL", "NULL", false, "$2b$12$aaxz0tFwmstJa28TC6CBAubmJImu7CpnOf6IL5Ay69xrmhjntK7U6", 1523461167384976, "NULL", "2018-04-11 15:39:27.384976"],
|
||||
"password_8": [9, 9, "NULL", "NULL", false, "$2b$12$P8NNMYOhoASdrH9otXOSpuSdRmumCxmaUw86sQBr4uMBU0QZgrVB6", 1523461170949886, "NULL", "2018-04-11 15:39:30.949886"],
|
||||
"password_9": [10, 10, "NULL", "NULL", false, "$2b$12$G5oIKiC7dArW21ALaT.vyuHoUl2frQdBrNH9oX1JGiC/IVK4/x5d2", 1523461176191435, "NULL", "2018-04-11 15:39:36.191435"],
|
||||
"password_10": [11, 11, "NULL", "NULL", false, "$2b$12$c7khbuXewToyssTnkBI.sOSP1evojjJVadd8aVPjRdSaKBXhOu5XO", 1523461179586188, "NULL", "2018-04-11 15:39:39.586188"],
|
||||
"password_11": [12, 12, "NULL", "NULL", false, "$2b$12$YiAwkChCYKqog31cjk9hReGyoSf.LBk2pp4ca/ujTMUZnS5Bi06oS", 1523461183306664, "NULL", "2018-04-11 15:39:43.306664"],
|
||||
"password_12": [13, 13, "NULL", "NULL", false, "$2b$12$6R5Wc3uuF270K.Kz0Qhdze20dzWHUx/YNYCT4CBIZtq70T4eTKo2.", 1523461186923901, "NULL", "2018-04-11 15:39:46.923901"],
|
||||
"password_13": [14, 14, "NULL", "NULL", false, "$2b$12$c069e0ysfrkXryUc7Y7FV.V0mIV1AuAebtTPt6HG51etBI8JYiLK2", 1523461239110598, "NULL", "2018-04-11 15:40:39.110598"],
|
||||
"password_14": [15, 15, "NULL", "NULL", false, "$2b$12$PhXg966X3UpaW6nUHKjAseGgIq2WFEiwxqsg0AQl1fZB0XRyF3q1G", 1523461266343289, "NULL", "2018-04-11 15:41:06.343289"],
|
||||
"password_15": [16, 16, "NULL", "NULL", false, "$2b$12$HEbgdNZ.XAueAUE.yQVRV.RePFvWXi3kzuE5nzuQ/cR4ecNdq5GuK", 1523461278526719, "NULL", "2018-04-11 15:41:18.526719"],
|
||||
"password_16": [17, 17, "NULL", "NULL", false, "$2b$12$ta3TKTGmLRRSb0LvENvFpOdkyvf24h.XDYuE4zJCavb/z5ERh6GcK", 1523462230091266, "NULL", "2018-04-11 15:57:10.091266"],
|
||||
"password_17": [18, 18, "NULL", "NULL", false, "$2b$12$IlICOy5XIrgXKB/LrpYH8OxhhumP6TIX7CoNET3jXEloQdcvLgig2", 1523462315972021, "NULL", "2018-04-11 15:58:35.972021"],
|
||||
"password_18": [19, 19, "NULL", "NULL", false, "$2b$12$Tzx42wm1w1hauLkUqypJuu84yTsfWtm9XrsZFLNlpoizX/b6MLQHO", 1523462331773330, "NULL", "2018-04-11 15:58:51.77333"],
|
||||
"password_19": [20, 20, "NULL", "NULL", false, "$2b$12$lFM1kQaZ3wQyuOcsUYnbqeEgRmQsYFsabjMJLPWm3EgZCnHAO0fXC", 1523469345119409, "NULL", "2018-04-11 17:55:45.119409"],
|
||||
"password_20": [23, 23, "NULL", "NULL", false, "$2b$12$IpkrfjrFTVclpDV9qC4Twuct8aFZUFEPEEr/6tznmFr/U8lc42k1m", 1526397706723260, "NULL", "2018-05-15 15:21:46.72326"],
|
||||
"password_21": [24, 24, "NULL", "NULL", false, "$2b$12$809wlBp0xowtrgpFiwGNp.gVrJ8uvdQNN43zQGbexRm82Mb5AJriq", 1526399747870689, "NULL", "2018-05-15 15:55:47.870689"]
|
||||
}
|
@ -0,0 +1,9 @@
|
||||
{
|
||||
"project_0": ["<<keystone.domain.root>>", "<<keystone.domain.root>>", "{}", "", false, "<<keystone.domain.root>>", "NULL", true],
|
||||
"project_1": ["default", "Default", "{}", "The default domain", true, "<<keystone.domain.root>>", "NULL", true],
|
||||
"project_2": ["8803689162424f60a71e4642e9dc2b9e", "admin", "{}", "admin tenant", true, "default", "default", false],
|
||||
"project_3": ["f3b78df9bbd74d6b8bbf8c5f08427ca7", "services", "{}", "Tenant for the openstack services", true, "default", "default", false],
|
||||
"project_4": ["2423d6c7853145a798e6491ca9de6e2b", "heat", "{}", "", true, "<<keystone.domain.root>>", "NULL", true],
|
||||
"project_5": ["9008c3fc102040cd8149b5c0d8aa06a3", "tenant1", "{}", "tenant1", true, "default", "default", false],
|
||||
"project_6": ["6ecc44a6b24e4c398dc749f1386b2ced", "tenant2", "{}", "tenant2", true, "default", "default", false]
|
||||
}
|
@ -0,0 +1,7 @@
|
||||
{
|
||||
"region_0": ["SystemController", "", "NULL", "{}"],
|
||||
"region_1": ["RegionOne", "", "NULL", "{}"],
|
||||
"region_2": ["subcloud-1", "", "NULL", "{}"],
|
||||
"region_3": ["subcloud-4", "", "NULL", "{}"],
|
||||
"region_4": ["subcloud-5", "", "NULL", "{}"]
|
||||
}
|
@ -0,0 +1,3 @@
|
||||
{
|
||||
"revocation_event_0": [7, "NULL", "NULL", "8c5414c673634a8ebb837a897cb73a54", "NULL", "NULL", "NULL", "NULL", "2018-05-21 13:48:50", "NULL", "2018-05-21 13:48:50", "NULL", "NULL"]
|
||||
}
|
7
dcmanager/tests/data/ipv6_R5_install/keystone/role.json
Normal file
7
dcmanager/tests/data/ipv6_R5_install/keystone/role.json
Normal file
@ -0,0 +1,7 @@
|
||||
{
|
||||
"role_0": ["59fa225368524bf6974f76a25050143b", "admin", "{}", "<<null>>"],
|
||||
"role_1": ["9fe2ff9ee4384b1894a90878d3e92bab", "_member_", "{}", "<<null>>"],
|
||||
"role_2": ["1f62f45b748b4c5db66f97c715ecf1ae", "ResellerAdmin", "{}", "<<null>>"],
|
||||
"role_3": ["d6bd09cf50334c5b9b1fe4cdeedfbdc4", "heat_stack_user", "{}", "<<null>>"],
|
||||
"role_4": ["ef2e357b0d4d4bcaaa6ae303c7d58d7e", "heat_stack_owner", "{}", "<<null>>"]
|
||||
}
|
20
dcmanager/tests/data/ipv6_R5_install/keystone/service.json
Normal file
20
dcmanager/tests/data/ipv6_R5_install/keystone/service.json
Normal file
@ -0,0 +1,20 @@
|
||||
{
|
||||
"service_0": ["5fa3efb666204693a0d0ab05fb03140c", "identity", true, "{"description": "OpenStack Identity Service", "name": "keystone"}"],
|
||||
"service_1": ["b3dd49c87dfd40d08d19d2895d2bc9c6", "platform", true, "{"description": "SysInvService", "name": "sysinv"}"],
|
||||
"service_2": ["9754bb0a6cba4ae4b62c1a4e825964a5", "dcmanager", true, "{"description": "DCManagerService", "name": "dcmanager"}"],
|
||||
"service_3": ["c931b77a92bc4208909d9205d85391a0", "dcorch", true, "{"description": "DcOrchService", "name": "dcorch"}"],
|
||||
"service_4": ["7d48ddb964034eb588e557b976d11cdf", "image", true, "{"description": "OpenStack Image Service", "name": "glance"}"],
|
||||
"service_5": ["a15edc66a6394e18bda9f9256e7b470c", "alarming", true, "{"description": "OpenStack Alarming Service", "name": "aodh"}"],
|
||||
"service_6": ["995cc229e9af44ec81c1c76073f4c733", "placement", true, "{"description": "Openstack Placement Service", "name": "placement"}"],
|
||||
"service_7": ["c4ae85afaf7b465190d927e11da3eb38", "compute", true, "{"description": "Openstack Compute Service", "name": "nova"}"],
|
||||
"service_8": ["ea41162395844d30af3e59efa3e6323e", "cloudformation", true, "{"description": "Openstack Cloudformation Service", "name": "heat-cfn"}"],
|
||||
"service_9": ["6cfd11045b1e4c0badcb56f18428ab5b", "network", true, "{"description": "Neutron Networking Service", "name": "neutron"}"],
|
||||
"service_10": ["c3677835d8024fa894929ea67b1e9fa0", "patching", true, "{"description": "Patching Service", "name": "patching"}"],
|
||||
"service_11": ["86328b93a3c84d63a1be7f7368138bdf", "metering", true, "{"description": "Openstack Metering Service", "name": "ceilometer"}"],
|
||||
"service_12": ["aa803a6f0ab84b68ad13a759b1b29525", "nfv", true, "{"description": "Virtual Infrastructure Manager", "name": "vim"}"],
|
||||
"service_13": ["d6f2ef7609f44c9aa0b40b15f9f93139", "event", true, "{"description": "OpenStack Event Service", "name": "panko"}"],
|
||||
"service_14": ["0efe25ad76f244e1bca9f6975cfe8b83", "orchestration", true, "{"description": "Openstack Orchestration Service", "name": "heat"}"],
|
||||
"service_15": ["c5834d3740504a69bf427385319b51a0", "volumev3", true, "{"description": "Cinder Service v3", "name": "cinderv3"}"],
|
||||
"service_16": ["8a5873d1ee914ccbae3c070d578d0d0d", "volumev2", true, "{"description": "Cinder Service v2", "name": "cinderv2"}"],
|
||||
"service_17": ["567f8aafa7844256b03e86655fa2bd3e", "volume", true, "{"description": "Cinder Service", "name": "cinder"}"]
|
||||
}
|
@ -0,0 +1,6 @@
|
||||
{
|
||||
"strategy_steps_0": [id, subcloud_id, stage, state, details, started_at, finished_at, reserved_1, reserved_2, created_at, updated_at, deleted_at, deleted],
|
||||
"strategy_steps_1": [id, subcloud_id, stage, state, details, started_at, finished_at, reserved_1, reserved_2, created_at, updated_at, deleted_at, deleted],
|
||||
"strategy_steps_2": [id, subcloud_id, stage, state, details, started_at, finished_at, reserved_1, reserved_2, created_at, updated_at, deleted_at, deleted],
|
||||
"strategy_steps_3": [id, subcloud_id, stage, state, details, started_at, finished_at, reserved_1, reserved_2, created_at, updated_at, deleted_at, deleted]
|
||||
}
|
@ -0,0 +1,17 @@
|
||||
{
|
||||
"subcloud_status_0": [id, subcloud_id, endpoint_type, sync_status, reserved_1, reserved_2, created_at, updated_at, deleted_at, deleted],
|
||||
"subcloud_status_1": [id, subcloud_id, endpoint_type, sync_status, reserved_1, reserved_2, created_at, updated_at, deleted_at, deleted],
|
||||
"subcloud_status_2": [id, subcloud_id, endpoint_type, sync_status, reserved_1, reserved_2, created_at, updated_at, deleted_at, deleted],
|
||||
"subcloud_status_3": [id, subcloud_id, endpoint_type, sync_status, reserved_1, reserved_2, created_at, updated_at, deleted_at, deleted],
|
||||
"subcloud_status_4": [id, subcloud_id, endpoint_type, sync_status, reserved_1, reserved_2, created_at, updated_at, deleted_at, deleted],
|
||||
"subcloud_status_5": [id, subcloud_id, endpoint_type, sync_status, reserved_1, reserved_2, created_at, updated_at, deleted_at, deleted],
|
||||
"subcloud_status_6": [id, subcloud_id, endpoint_type, sync_status, reserved_1, reserved_2, created_at, updated_at, deleted_at, deleted],
|
||||
"subcloud_status_7": [id, subcloud_id, endpoint_type, sync_status, reserved_1, reserved_2, created_at, updated_at, deleted_at, deleted],
|
||||
"subcloud_status_8": [id, subcloud_id, endpoint_type, sync_status, reserved_1, reserved_2, created_at, updated_at, deleted_at, deleted],
|
||||
"subcloud_status_9": [id, subcloud_id, endpoint_type, sync_status, reserved_1, reserved_2, created_at, updated_at, deleted_at, deleted],
|
||||
"subcloud_status_10": [id, subcloud_id, endpoint_type, sync_status, reserved_1, reserved_2, created_at, updated_at, deleted_at, deleted],
|
||||
"subcloud_status_11": [id, subcloud_id, endpoint_type, sync_status, reserved_1, reserved_2, created_at, updated_at, deleted_at, deleted],
|
||||
"subcloud_status_12": [id, subcloud_id, endpoint_type, sync_status, reserved_1, reserved_2, created_at, updated_at, deleted_at, deleted],
|
||||
"subcloud_status_13": [id, subcloud_id, endpoint_type, sync_status, reserved_1, reserved_2, created_at, updated_at, deleted_at, deleted],
|
||||
"subcloud_status_14": [id, subcloud_id, endpoint_type, sync_status, reserved_1, reserved_2, created_at, updated_at, deleted_at, deleted]
|
||||
}
|
@ -0,0 +1,5 @@
|
||||
{
|
||||
"subclouds_0": [id, name, description, location, software_version, management_state, availability_status, management_subnet, management_gateway_ip, management_start_ip, management_end_ip, systemcontroller_gateway_ip, audit_fail_count, reserved_1, reserved_2, created_at, updated_at, deleted_at, deleted],
|
||||
"subclouds_1": [id, name, description, location, software_version, management_state, availability_status, management_subnet, management_gateway_ip, management_start_ip, management_end_ip, systemcontroller_gateway_ip, audit_fail_count, reserved_1, reserved_2, created_at, updated_at, deleted_at, deleted],
|
||||
"subclouds_2": [id, name, description, location, software_version, management_state, availability_status, management_subnet, management_gateway_ip, management_start_ip, management_end_ip, systemcontroller_gateway_ip, audit_fail_count, reserved_1, reserved_2, created_at, updated_at, deleted_at, deleted]
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user