adding configuration group support
This allows users to make custom configurations groups and apply them to multiple instances. Configuration parameters that can be set are listed in the validation_rules.json file. implements blueprint configuration-management Change-Id: I99b0bfe51c131ace30774e75e30d620101ed2e0b
This commit is contained in:
parent
522f6df6ad
commit
1f3a4fda87
@ -15,6 +15,8 @@
|
||||
import routes
|
||||
|
||||
from trove.common import wsgi
|
||||
from trove.configuration.service import ConfigurationsController
|
||||
from trove.configuration.service import ParametersController
|
||||
from trove.flavor.service import FlavorController
|
||||
from trove.instance.service import InstanceController
|
||||
from trove.limits.service import LimitsController
|
||||
@ -34,6 +36,7 @@ class API(wsgi.Router):
|
||||
self._versions_router(mapper)
|
||||
self._limits_router(mapper)
|
||||
self._backups_router(mapper)
|
||||
self._configurations_router(mapper)
|
||||
|
||||
def _versions_router(self, mapper):
|
||||
versions_resource = VersionsController().create_resource()
|
||||
@ -74,6 +77,10 @@ class API(wsgi.Router):
|
||||
controller=instance_resource,
|
||||
action="action",
|
||||
conditions={'method': ['POST']})
|
||||
mapper.connect("/{tenant_id}/instances/{id}",
|
||||
controller=instance_resource,
|
||||
action="update",
|
||||
conditions={'method': ['PUT']})
|
||||
mapper.connect("/{tenant_id}/instances/{id}",
|
||||
controller=instance_resource,
|
||||
action="delete",
|
||||
@ -82,6 +89,10 @@ class API(wsgi.Router):
|
||||
controller=instance_resource,
|
||||
action="backups",
|
||||
conditions={'method': ['GET']})
|
||||
mapper.connect("/{tenant_id}/instances/{id}/configuration",
|
||||
controller=instance_resource,
|
||||
action="configuration",
|
||||
conditions={'method': ['GET']})
|
||||
|
||||
def _flavor_router(self, mapper):
|
||||
flavor_resource = FlavorController().create_resource()
|
||||
@ -124,6 +135,59 @@ class API(wsgi.Router):
|
||||
action="delete",
|
||||
conditions={'method': ['DELETE']})
|
||||
|
||||
def _configurations_router(self, mapper):
|
||||
parameters_resource = ParametersController().create_resource()
|
||||
path = '/{tenant_id}/datastores/versions/{version}/parameters'
|
||||
mapper.connect(path,
|
||||
controller=parameters_resource,
|
||||
action='index_by_version',
|
||||
conditions={'method': ['GET']})
|
||||
path = '/{tenant_id}/datastores/versions/{version}/parameters/{name}'
|
||||
mapper.connect(path,
|
||||
controller=parameters_resource,
|
||||
action='show_by_version',
|
||||
conditions={'method': ['GET']})
|
||||
|
||||
path = '/{tenant_id}/datastores/{datastore}/versions/{id}'
|
||||
mapper.connect(path + '/parameters',
|
||||
controller=parameters_resource,
|
||||
action='index',
|
||||
conditions={'method': ['GET']})
|
||||
mapper.connect(path + '/parameters/{name}',
|
||||
controller=parameters_resource,
|
||||
action='show',
|
||||
conditions={'method': ['GET']})
|
||||
|
||||
configuration_resource = ConfigurationsController().create_resource()
|
||||
mapper.connect('/{tenant_id}/configurations',
|
||||
controller=configuration_resource,
|
||||
action='index',
|
||||
conditions={'method': ['GET']})
|
||||
mapper.connect('/{tenant_id}/configurations',
|
||||
controller=configuration_resource,
|
||||
action='create',
|
||||
conditions={'method': ['POST']})
|
||||
mapper.connect('/{tenant_id}/configurations/{id}',
|
||||
controller=configuration_resource,
|
||||
action='show',
|
||||
conditions={'method': ['GET']})
|
||||
mapper.connect('/{tenant_id}/configurations/{id}/instances',
|
||||
controller=configuration_resource,
|
||||
action='instances',
|
||||
conditions={'method': ['GET']})
|
||||
mapper.connect('/{tenant_id}/configurations/{id}',
|
||||
controller=configuration_resource,
|
||||
action='edit',
|
||||
conditions={'method': ['PATCH']})
|
||||
mapper.connect('/{tenant_id}/configurations/{id}',
|
||||
controller=configuration_resource,
|
||||
action='update',
|
||||
conditions={'method': ['PUT']})
|
||||
mapper.connect('/{tenant_id}/configurations/{id}',
|
||||
controller=configuration_resource,
|
||||
action='delete',
|
||||
conditions={'method': ['DELETE']})
|
||||
|
||||
|
||||
def app_factory(global_conf, **local_conf):
|
||||
return API()
|
||||
|
@ -17,14 +17,16 @@ from trove.common import cfg
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
url_ref = {
|
||||
"type": "string",
|
||||
"minLength": 8,
|
||||
"pattern": 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]'
|
||||
'|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
|
||||
}
|
||||
|
||||
flavorref = {
|
||||
'oneOf': [
|
||||
{
|
||||
"type": "string",
|
||||
"minLength": 8,
|
||||
"pattern": 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]'
|
||||
'|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
|
||||
},
|
||||
url_ref,
|
||||
{
|
||||
"type": "string",
|
||||
"maxLength": 5,
|
||||
@ -172,6 +174,12 @@ users_list = {
|
||||
}
|
||||
}
|
||||
|
||||
configuration_id = {
|
||||
'oneOf': [
|
||||
uuid
|
||||
]
|
||||
}
|
||||
|
||||
instance = {
|
||||
"create": {
|
||||
"type": "object",
|
||||
@ -185,6 +193,7 @@ instance = {
|
||||
"additionalProperties": True,
|
||||
"properties": {
|
||||
"name": non_empty_string,
|
||||
"configuration_id": configuration_id,
|
||||
"flavorRef": flavorref,
|
||||
"volume": volume,
|
||||
"databases": databases_def,
|
||||
@ -352,6 +361,69 @@ backup = {
|
||||
}
|
||||
}
|
||||
|
||||
configuration = {
|
||||
"create": {
|
||||
"name": "configuration:create",
|
||||
"type": "object",
|
||||
"required": ["configuration"],
|
||||
"properties": {
|
||||
"configuration": {
|
||||
"type": "object",
|
||||
"required": ["values", "name"],
|
||||
"properties": {
|
||||
"description": non_empty_string,
|
||||
"values": {
|
||||
"type": "object",
|
||||
},
|
||||
"name": non_empty_string,
|
||||
"datastore": {
|
||||
"type": "object",
|
||||
"additionalProperties": True,
|
||||
"properties": {
|
||||
"type": non_empty_string,
|
||||
"version": non_empty_string
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"update": {
|
||||
"name": "configuration:update",
|
||||
"type": "object",
|
||||
"required": ["configuration"],
|
||||
"properties": {
|
||||
"configuration": {
|
||||
"type": "object",
|
||||
"required": [],
|
||||
"properties": {
|
||||
"description": non_empty_string,
|
||||
"values": {
|
||||
"type": "object",
|
||||
},
|
||||
"name": non_empty_string
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"edit": {
|
||||
"name": "configuration:edit",
|
||||
"type": "object",
|
||||
"required": ["configuration"],
|
||||
"properties": {
|
||||
"configuration": {
|
||||
"type": "object",
|
||||
"required": [],
|
||||
"properties": {
|
||||
"values": {
|
||||
"type": "object",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
account = {
|
||||
'create': {
|
||||
"type": "object",
|
||||
|
@ -1,6 +1,7 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2011 OpenStack Foundation
|
||||
# Copyright 2014 Rackspace Hosting
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@ -17,6 +18,8 @@
|
||||
"""Routines for configuring Trove."""
|
||||
|
||||
from oslo.config import cfg
|
||||
from trove.openstack.common import log as logging
|
||||
|
||||
import os.path
|
||||
|
||||
UNKNOWN_SERVICE_ID = 'unknown-service-id-error'
|
||||
@ -28,6 +31,8 @@ path_opts = [
|
||||
help='Directory where the trove python module is installed.'),
|
||||
]
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
common_opts = [
|
||||
cfg.StrOpt('sql_connection',
|
||||
default='sqlite:///trove_test.sqlite',
|
||||
@ -80,6 +85,7 @@ common_opts = [
|
||||
cfg.IntOpt('databases_page_size', default=20),
|
||||
cfg.IntOpt('instances_page_size', default=20),
|
||||
cfg.IntOpt('backups_page_size', default=20),
|
||||
cfg.IntOpt('configurations_page_size', default=20),
|
||||
cfg.ListOpt('ignore_users', default=['os_admin', 'root']),
|
||||
cfg.ListOpt('ignore_dbs', default=['lost+found',
|
||||
'mysql',
|
||||
|
84
trove/common/configurations.py
Normal file
84
trove/common/configurations.py
Normal file
@ -0,0 +1,84 @@
|
||||
# Copyright 2014 Rackspace Hosting
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import io
|
||||
import json
|
||||
from trove.common import cfg
|
||||
from trove.common import exception
|
||||
from trove.common import utils
|
||||
from trove.openstack.common import log as logging
|
||||
from trove.openstack.common.gettextutils import _
|
||||
from six.moves import configparser
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
ENV = utils.ENV
|
||||
|
||||
|
||||
def _get_item(key, dictList):
|
||||
for item in dictList:
|
||||
if key == item.get('name'):
|
||||
return item
|
||||
|
||||
|
||||
def do_configs_require_restart(overrides, datastore_manager='mysql'):
|
||||
rules = get_validation_rules(datastore_manager=datastore_manager)
|
||||
LOG.debug(_("overrides: %s") % overrides)
|
||||
LOG.debug(_("rules?: %s") % rules)
|
||||
for key in overrides.keys():
|
||||
rule = _get_item(key, rules['configuration-parameters'])
|
||||
LOG.debug(_("checking the rule: %s") % rule)
|
||||
if rule.get('restart_required'):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def get_validation_rules(datastore_manager='mysql'):
|
||||
try:
|
||||
config_location = ("%s/validation-rules.json" % datastore_manager)
|
||||
template = ENV.get_template(config_location)
|
||||
return json.loads(template.render())
|
||||
except Exception:
|
||||
msg = "This operation is not supported for this datastore at this time"
|
||||
LOG.exception(msg)
|
||||
raise exception.UnprocessableEntity(message=msg)
|
||||
|
||||
|
||||
class MySQLConfParser(object):
|
||||
"""MySQLConfParser"""
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
|
||||
def parse(self):
|
||||
good_cfg = self._remove_commented_lines(str(self.config))
|
||||
cfg_parser = configparser.ConfigParser()
|
||||
cfg_parser.readfp(io.BytesIO(str(good_cfg)))
|
||||
return cfg_parser.items("mysqld")
|
||||
|
||||
def _remove_commented_lines(self, config_str):
|
||||
ret = []
|
||||
for line in config_str.splitlines():
|
||||
line_clean = line.strip()
|
||||
if line_clean.startswith('#'):
|
||||
continue
|
||||
elif line_clean.startswith('!'):
|
||||
continue
|
||||
elif line_clean.startswith(':'):
|
||||
continue
|
||||
else:
|
||||
ret.append(line_clean)
|
||||
rendered = "\n".join(ret)
|
||||
return rendered
|
@ -184,6 +184,14 @@ class UserAlreadyExists(BadRequest):
|
||||
message = _('A user with the name "%(name)s" already exists.')
|
||||
|
||||
|
||||
class InstanceAssignedToConfiguration(BadRequest):
|
||||
|
||||
message = _('A configuration group cannot be deleted if it is '
|
||||
'associated with one or more non-terminated instances. '
|
||||
'Detach the configuration group from all non-terminated '
|
||||
'instances and please try again.')
|
||||
|
||||
|
||||
class UnprocessableEntity(TroveError):
|
||||
|
||||
message = _("Unable to process the contained request")
|
||||
@ -357,3 +365,24 @@ class RestoreBackupIntegrityError(TroveError):
|
||||
|
||||
message = _("Current Swift object checksum does not match original "
|
||||
"checksum for backup %(backup_id)s.")
|
||||
|
||||
|
||||
class ConfigKeyNotFound(NotFound):
|
||||
message = _("%(key)s is not a supported configuration parameter")
|
||||
|
||||
|
||||
class NoConfigParserFound(NotFound):
|
||||
message = _("No configuration parser found for datastore "
|
||||
"%(datastore_manager)s")
|
||||
|
||||
|
||||
class ConfigurationDatastoreNotMatchInstance(TroveError):
|
||||
message = _("Datastore Version on Configuration "
|
||||
"%(config_datastore_version)s does not "
|
||||
"match the Datastore Version on the instance "
|
||||
"%(instance_datastore_version)s.")
|
||||
|
||||
|
||||
class ConfigurationParameterDeleted(object):
|
||||
message = _("%(parameter_name)s parameter can no longer be "
|
||||
" set as of %(parameter_deleted_at)s")
|
||||
|
@ -1,4 +1,6 @@
|
||||
# Copyright 2012 OpenStack Foundation
|
||||
# Copyright 2014 Rackspace Hosting
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -14,16 +16,20 @@
|
||||
|
||||
import jinja2
|
||||
from trove.common import cfg
|
||||
from trove.common import configurations
|
||||
from trove.common import exception
|
||||
from trove.common import utils
|
||||
from trove.openstack.common import log as logging
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
ENV = jinja2.Environment(loader=jinja2.ChoiceLoader([
|
||||
jinja2.FileSystemLoader(CONF.template_path),
|
||||
jinja2.PackageLoader("trove", "templates"),
|
||||
]))
|
||||
ENV = utils.ENV
|
||||
|
||||
# TODO(cp16net) Maybe this should be moved to a config dict
|
||||
SERVICE_PARSERS = {
|
||||
'mysql': configurations.MySQLConfParser,
|
||||
}
|
||||
|
||||
|
||||
class SingleInstanceConfigTemplate(object):
|
||||
@ -31,6 +37,8 @@ class SingleInstanceConfigTemplate(object):
|
||||
rendering on the guest
|
||||
"""
|
||||
|
||||
template_name = "%s/config.template"
|
||||
|
||||
def __init__(self, datastore_manager, flavor_dict, instance_id):
|
||||
"""Constructor
|
||||
|
||||
@ -43,21 +51,36 @@ class SingleInstanceConfigTemplate(object):
|
||||
|
||||
"""
|
||||
self.flavor_dict = flavor_dict
|
||||
template_filename = "%s/config.template" % datastore_manager
|
||||
template_filename = self.template_name % datastore_manager
|
||||
self.template = ENV.get_template(template_filename)
|
||||
self.datastore_manager = datastore_manager
|
||||
self.instance_id = instance_id
|
||||
|
||||
def render(self):
|
||||
def render(self, **kwargs):
|
||||
"""Renders the jinja template
|
||||
|
||||
:returns: str -- The rendered configuration file
|
||||
|
||||
"""
|
||||
template = ENV.get_template(self.template_name %
|
||||
self.datastore_manager)
|
||||
server_id = self._calculate_unique_id()
|
||||
self.config_contents = self.template.render(
|
||||
flavor=self.flavor_dict, server_id=server_id)
|
||||
self.config_contents = template.render(
|
||||
flavor=self.flavor_dict, server_id=server_id, **kwargs)
|
||||
return self.config_contents
|
||||
|
||||
def render_dict(self):
|
||||
"""
|
||||
Renders the default configuration template file as a dictionary
|
||||
to apply the default configuration dynamically.
|
||||
"""
|
||||
config = self.render()
|
||||
cfg_parser = SERVICE_PARSERS.get(self.datastore_manager)
|
||||
if not cfg_parser:
|
||||
raise exception.NoConfigParserFound(
|
||||
datastore_manager=self.datastore_manager)
|
||||
return cfg_parser(config).parse()
|
||||
|
||||
def _calculate_unique_id(self):
|
||||
"""
|
||||
Returns a positive unique id based off of the instance id
|
||||
@ -67,6 +90,10 @@ class SingleInstanceConfigTemplate(object):
|
||||
return abs(hash(self.instance_id) % (2 ** 31))
|
||||
|
||||
|
||||
class OverrideConfigTemplate(SingleInstanceConfigTemplate):
|
||||
template_name = "%s/override.config.template"
|
||||
|
||||
|
||||
def load_heat_template(datastore_manager):
|
||||
template_filename = "%s/heat.template" % datastore_manager
|
||||
try:
|
||||
|
@ -18,6 +18,7 @@
|
||||
|
||||
import datetime
|
||||
import inspect
|
||||
import jinja2
|
||||
import sys
|
||||
import time
|
||||
import urlparse
|
||||
@ -48,6 +49,12 @@ bool_from_string = openstack_utils.bool_from_string
|
||||
execute = processutils.execute
|
||||
isotime = timeutils.isotime
|
||||
|
||||
CONF = cfg.CONF
|
||||
ENV = jinja2.Environment(loader=jinja2.ChoiceLoader([
|
||||
jinja2.FileSystemLoader(CONF.template_path),
|
||||
jinja2.PackageLoader("trove", "templates")
|
||||
]))
|
||||
|
||||
|
||||
def create_method_args_string(*args, **kwargs):
|
||||
"""Returns a string representation of args and keyword args.
|
||||
|
@ -82,6 +82,12 @@ CUSTOM_SERIALIZER_METADATA = {
|
||||
#mgmt/instance
|
||||
'id': '',
|
||||
},
|
||||
'configuration': {
|
||||
'id': '',
|
||||
'name': '',
|
||||
'description': '',
|
||||
'datastore_version_id': ''
|
||||
},
|
||||
'flavor': {'id': '', 'ram': '', 'name': ''},
|
||||
'link': {'href': '', 'rel': ''},
|
||||
'database': {'name': ''},
|
||||
|
0
trove/configuration/__init__.py
Normal file
0
trove/configuration/__init__.py
Normal file
215
trove/configuration/models.py
Normal file
215
trove/configuration/models.py
Normal file
@ -0,0 +1,215 @@
|
||||
# Copyright 2014 Rackspace
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
from trove.common import cfg
|
||||
from trove.common import configurations
|
||||
from trove.common.exception import ModelNotFoundError
|
||||
from trove.datastore.models import DatastoreVersion
|
||||
from trove.db import models as dbmodels
|
||||
from trove.openstack.common import log as logging
|
||||
from trove.openstack.common.gettextutils import _
|
||||
from trove.taskmanager import api as task_api
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Configurations(object):
|
||||
|
||||
DEFAULT_LIMIT = CONF.configurations_page_size
|
||||
|
||||
@staticmethod
|
||||
def load(context):
|
||||
if context is None:
|
||||
raise TypeError("Argument context not defined.")
|
||||
elif id is None:
|
||||
raise TypeError("Argument is not defined.")
|
||||
|
||||
if context.is_admin:
|
||||
db_info = DBConfiguration.find_all(deleted=False)
|
||||
if db_info is None:
|
||||
LOG.debug(_("No configurations found"))
|
||||
else:
|
||||
db_info = DBConfiguration.find_all(tenant_id=context.tenant,
|
||||
deleted=False)
|
||||
if db_info is None:
|
||||
LOG.debug(_("No configurations found for tenant % s")
|
||||
% context.tenant)
|
||||
|
||||
limit = int(context.limit or Configurations.DEFAULT_LIMIT)
|
||||
if limit > Configurations.DEFAULT_LIMIT:
|
||||
limit = Configurations.DEFAULT_LIMIT
|
||||
|
||||
data_view = DBConfiguration.find_by_pagination('configurations',
|
||||
db_info,
|
||||
"foo",
|
||||
limit=limit,
|
||||
marker=context.marker)
|
||||
next_marker = data_view.next_page_marker
|
||||
return data_view.collection, next_marker
|
||||
|
||||
|
||||
class Configuration(object):
|
||||
|
||||
@property
|
||||
def instances(self):
|
||||
return self.instances
|
||||
|
||||
@property
|
||||
def items(self):
|
||||
return self.items
|
||||
|
||||
@staticmethod
|
||||
def create(name, description, tenant_id, datastore, datastore_version):
|
||||
configurationGroup = DBConfiguration.create(
|
||||
name=name,
|
||||
description=description,
|
||||
tenant_id=tenant_id,
|
||||
datastore_version_id=datastore_version)
|
||||
return configurationGroup
|
||||
|
||||
@staticmethod
|
||||
def create_items(cfg_id, values):
|
||||
LOG.debug(_("saving the values to the database"))
|
||||
LOG.debug(_("cfg_id: %s") % cfg_id)
|
||||
LOG.debug(_("values: %s") % values)
|
||||
config_items = []
|
||||
for key, val in values.iteritems():
|
||||
config_item = ConfigurationParameter.create(
|
||||
configuration_id=cfg_id,
|
||||
configuration_key=key,
|
||||
configuration_value=val)
|
||||
config_items.append(config_item)
|
||||
return config_items
|
||||
|
||||
@staticmethod
|
||||
def delete(context, group):
|
||||
deleted_at = datetime.utcnow()
|
||||
Configuration.remove_all_items(context, group.id, deleted_at)
|
||||
group.deleted = True
|
||||
group.deleted_at = deleted_at
|
||||
group.save()
|
||||
|
||||
@staticmethod
|
||||
def remove_all_items(context, id, deleted_at):
|
||||
LOG.debug(_("removing the values from the database with configuration"
|
||||
" %s") % id)
|
||||
items = ConfigurationParameter.find_all(configuration_id=id,
|
||||
deleted=False).all()
|
||||
LOG.debug(_("removing items: %s") % items)
|
||||
for item in items:
|
||||
item.deleted = True
|
||||
item.deleted_at = deleted_at
|
||||
item.save()
|
||||
|
||||
@staticmethod
|
||||
def load_configuration_datastore_version(context, id):
|
||||
config = Configuration.load(context, id)
|
||||
datastore_version = DatastoreVersion.load_by_uuid(
|
||||
config.datastore_version_id)
|
||||
return datastore_version
|
||||
|
||||
@staticmethod
|
||||
def load(context, id):
|
||||
try:
|
||||
if context.is_admin:
|
||||
config_info = DBConfiguration.find_by(id=id,
|
||||
deleted=False)
|
||||
else:
|
||||
config_info = DBConfiguration.find_by(id=id,
|
||||
tenant_id=context.tenant,
|
||||
deleted=False)
|
||||
except ModelNotFoundError:
|
||||
msg = _("Configuration group with ID %s could not be found.") % id
|
||||
raise ModelNotFoundError(msg)
|
||||
return config_info
|
||||
|
||||
@staticmethod
|
||||
def load_items(context, id):
|
||||
datastore = Configuration.load_configuration_datastore_version(context,
|
||||
id)
|
||||
config_items = ConfigurationParameter.find_all(configuration_id=id,
|
||||
deleted=False).all()
|
||||
rules = configurations.get_validation_rules(
|
||||
datastore_manager=datastore.manager)
|
||||
|
||||
def _get_rule(key):
|
||||
LOG.debug(_("finding rule with key : %s") % key)
|
||||
for rule in rules['configuration-parameters']:
|
||||
if str(rule.get('name')) == key:
|
||||
return rule
|
||||
|
||||
for item in config_items:
|
||||
rule = _get_rule(str(item.configuration_key))
|
||||
if rule.get('type') == 'boolean':
|
||||
item.configuration_value = bool(int(item.configuration_value))
|
||||
elif rule.get('type') == 'integer':
|
||||
item.configuration_value = int(item.configuration_value)
|
||||
else:
|
||||
item.configuration_value = str(item.configuration_value)
|
||||
return config_items
|
||||
|
||||
@staticmethod
|
||||
def get_configuration_overrides(context, configuration_id):
|
||||
"""Gets the overrides dict to apply to an instance"""
|
||||
overrides = {}
|
||||
if configuration_id:
|
||||
config_items = Configuration.load_items(context,
|
||||
id=configuration_id)
|
||||
|
||||
for i in config_items:
|
||||
overrides[i.configuration_key] = i.configuration_value
|
||||
return overrides
|
||||
|
||||
@staticmethod
|
||||
def save(context, configuration, configuration_items, instances):
|
||||
DBConfiguration.save(configuration)
|
||||
for item in configuration_items:
|
||||
item["deleted_at"] = None
|
||||
ConfigurationParameter.save(item)
|
||||
|
||||
items = Configuration.load_items(context, configuration.id)
|
||||
|
||||
for instance in instances:
|
||||
LOG.debug(_("applying to instance: %s") % instance.id)
|
||||
overrides = {}
|
||||
for i in items:
|
||||
overrides[i.configuration_key] = i.configuration_value
|
||||
|
||||
task_api.API(context).update_overrides(instance.id, overrides)
|
||||
|
||||
|
||||
class DBConfiguration(dbmodels.DatabaseModelBase):
|
||||
_data_fields = ['name', 'description', 'tenant_id', 'datastore_version_id',
|
||||
'deleted', 'deleted_at']
|
||||
|
||||
|
||||
class ConfigurationParameter(dbmodels.DatabaseModelBase):
|
||||
_data_fields = ['configuration_id', 'configuration_key',
|
||||
'configuration_value', 'deleted',
|
||||
'deleted_at']
|
||||
|
||||
def __hash__(self):
|
||||
return self.configuration_key.__hash__()
|
||||
|
||||
|
||||
def persisted_models():
|
||||
return {
|
||||
'configurations': DBConfiguration,
|
||||
'configuration_parameters': ConfigurationParameter
|
||||
}
|
287
trove/configuration/service.py
Normal file
287
trove/configuration/service.py
Normal file
@ -0,0 +1,287 @@
|
||||
# Copyright 2014 Rackspace
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from datetime import datetime
|
||||
from trove.common import cfg
|
||||
from trove.common import configurations
|
||||
from trove.common import exception
|
||||
from trove.common import pagination
|
||||
from trove.common import wsgi
|
||||
from trove.configuration import models
|
||||
from trove.configuration import views
|
||||
from trove.configuration.models import ConfigurationParameter
|
||||
from trove.datastore import models as ds_models
|
||||
from trove.openstack.common import log as logging
|
||||
from trove.openstack.common.gettextutils import _
|
||||
from trove.instance import models as instances_models
|
||||
import trove.common.apischema as apischema
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ConfigurationsController(wsgi.Controller):
|
||||
|
||||
schemas = apischema.configuration
|
||||
|
||||
def index(self, req, tenant_id):
|
||||
context = req.environ[wsgi.CONTEXT_KEY]
|
||||
configs, marker = models.Configurations.load(context)
|
||||
view = views.ConfigurationsView(configs)
|
||||
paged = pagination.SimplePaginatedDataView(req.url, 'configurations',
|
||||
view, marker)
|
||||
return wsgi.Result(paged.data(), 200)
|
||||
|
||||
def show(self, req, tenant_id, id):
|
||||
context = req.environ[wsgi.CONTEXT_KEY]
|
||||
configuration = models.Configuration.load(context, id)
|
||||
configuration_items = models.Configuration.load_items(context, id)
|
||||
|
||||
return wsgi.Result(views.DetailedConfigurationView(
|
||||
configuration,
|
||||
configuration_items).data(), 200)
|
||||
|
||||
def instances(self, req, tenant_id, id):
|
||||
context = req.environ[wsgi.CONTEXT_KEY]
|
||||
configuration = models.Configuration.load(context, id)
|
||||
instances = instances_models.DBInstance.find_all(
|
||||
tenant_id=context.tenant,
|
||||
configuration_id=configuration.id,
|
||||
deleted=False)
|
||||
limit = int(context.limit or CONF.instances_page_size)
|
||||
if limit > CONF.instances_page_size:
|
||||
limit = CONF.instances_page_size
|
||||
data_view = instances_models.DBInstance.find_by_pagination(
|
||||
'instances', instances, "foo",
|
||||
limit=limit,
|
||||
marker=context.marker)
|
||||
view = views.DetailedConfigurationInstancesView(data_view.collection)
|
||||
paged = pagination.SimplePaginatedDataView(req.url, 'instances', view,
|
||||
data_view.next_page_marker)
|
||||
return wsgi.Result(paged.data(), 200)
|
||||
|
||||
def create(self, req, body, tenant_id):
|
||||
LOG.debug(_("req : '%s'\n\n") % req)
|
||||
LOG.debug(_("body : '%s'\n\n") % req)
|
||||
|
||||
name = body['configuration']['name']
|
||||
description = body['configuration'].get('description')
|
||||
values = body['configuration']['values']
|
||||
|
||||
datastore_args = body['configuration'].get('datastore', {})
|
||||
datastore, datastore_version = (
|
||||
ds_models.get_datastore_version(**datastore_args))
|
||||
|
||||
configItems = []
|
||||
if values:
|
||||
# validate that the values passed in are permitted by the operator.
|
||||
ConfigurationsController._validate_configuration(
|
||||
body['configuration']['values'],
|
||||
datastore_manager=datastore_version.manager)
|
||||
|
||||
for k, v in values.iteritems():
|
||||
configItems.append(ConfigurationParameter(
|
||||
configuration_key=k,
|
||||
configuration_value=v))
|
||||
|
||||
cfg_group = models.Configuration.create(name, description, tenant_id,
|
||||
datastore.id,
|
||||
datastore_version.id)
|
||||
cfg_group_items = models.Configuration.create_items(cfg_group.id,
|
||||
values)
|
||||
view_data = views.DetailedConfigurationView(cfg_group,
|
||||
cfg_group_items)
|
||||
return wsgi.Result(view_data.data(), 200)
|
||||
|
||||
def delete(self, req, tenant_id, id):
|
||||
context = req.environ[wsgi.CONTEXT_KEY]
|
||||
group = models.Configuration.load(context, id)
|
||||
instances = instances_models.DBInstance.find_all(
|
||||
tenant_id=context.tenant,
|
||||
configuration_id=id,
|
||||
deleted=False).all()
|
||||
if instances:
|
||||
raise exception.InstanceAssignedToConfiguration()
|
||||
models.Configuration.delete(context, group)
|
||||
return wsgi.Result(None, 202)
|
||||
|
||||
def update(self, req, body, tenant_id, id):
|
||||
LOG.info(_("Updating configuration for tenant id %s") % tenant_id)
|
||||
context = req.environ[wsgi.CONTEXT_KEY]
|
||||
group = models.Configuration.load(context, id)
|
||||
instances = instances_models.DBInstance.find_all(
|
||||
tenant_id=context.tenant,
|
||||
configuration_id=id,
|
||||
deleted=False).all()
|
||||
|
||||
# if name/description are provided in the request body, update the
|
||||
# model with these values as well.
|
||||
if 'name' in body['configuration']:
|
||||
group.name = body['configuration']['name']
|
||||
|
||||
if 'description' in body['configuration']:
|
||||
group.description = body['configuration']['description']
|
||||
|
||||
items = self._configuration_items_list(group, body['configuration'])
|
||||
deleted_at = datetime.utcnow()
|
||||
models.Configuration.remove_all_items(context, group.id, deleted_at)
|
||||
LOG.info(_("loaded configuration instances: %s") % instances)
|
||||
models.Configuration.save(context, group, items, instances)
|
||||
return wsgi.Result(None, 202)
|
||||
|
||||
def edit(self, req, body, tenant_id, id):
|
||||
context = req.environ[wsgi.CONTEXT_KEY]
|
||||
group = models.Configuration.load(context, id)
|
||||
instances = instances_models.DBInstance.find_all(
|
||||
tenant_id=context.tenant,
|
||||
configuration_id=id,
|
||||
deleted=False).all()
|
||||
LOG.info(_("loaded configuration instances: %s") % instances)
|
||||
items = self._configuration_items_list(group, body['configuration'])
|
||||
models.Configuration.save(context, group, items, instances)
|
||||
|
||||
def _configuration_items_list(self, group, configuration):
|
||||
ds_version_id = group.datastore_version_id
|
||||
ds_version = ds_models.DatastoreVersion.load_by_uuid(ds_version_id)
|
||||
items = []
|
||||
LOG.info(_("loaded configuration group: %s") % group)
|
||||
if 'values' in configuration:
|
||||
# validate that the values passed in are permitted by the operator.
|
||||
ConfigurationsController._validate_configuration(
|
||||
configuration['values'], datastore_manager=ds_version.manager)
|
||||
for k, v in configuration['values'].iteritems():
|
||||
items.append(ConfigurationParameter(configuration_id=group.id,
|
||||
configuration_key=k,
|
||||
configuration_value=v,
|
||||
deleted=False))
|
||||
return items
|
||||
|
||||
@staticmethod
|
||||
def _validate_configuration(values, datastore_manager=None):
|
||||
rules = configurations.get_validation_rules(
|
||||
datastore_manager=datastore_manager)
|
||||
|
||||
LOG.info(_("Validating configuration values"))
|
||||
for k, v in values.iteritems():
|
||||
# get the validation rule dictionary, which will ensure there is a
|
||||
# rule for the given key name. An exception will be thrown if no
|
||||
# valid rule is located.
|
||||
rule = ConfigurationsController._get_item(
|
||||
k, rules['configuration-parameters'])
|
||||
|
||||
if rule.get('deleted_at'):
|
||||
raise exception.ConfigurationParameterDeleted(
|
||||
parameter_name=rule.get('name'),
|
||||
parameter_deleted_at=rule.get('deleted_at'))
|
||||
|
||||
# type checking
|
||||
valueType = rule.get('type')
|
||||
|
||||
if not isinstance(v, ConfigurationsController._find_type(
|
||||
valueType)):
|
||||
output = {"key": k, "type": valueType}
|
||||
msg = _("The value provided for the configuration "
|
||||
"parameter %(key)s is not of type %(type)s.") % output
|
||||
raise exception.UnprocessableEntity(message=msg)
|
||||
|
||||
# integer min/max checking
|
||||
if isinstance(v, int):
|
||||
try:
|
||||
min_value = int(rule.get('min'))
|
||||
except ValueError:
|
||||
raise exception.TroveError(_(
|
||||
"Invalid or unsupported min value defined in the "
|
||||
"configuration-parameters configuration file. "
|
||||
"Expected integer."))
|
||||
if v < min_value:
|
||||
output = {"key": k, "min": min_value}
|
||||
message = _("The value for the configuration parameter "
|
||||
"%(key)s is less than the minimum allowed: "
|
||||
"%(min)s") % output
|
||||
raise exception.UnprocessableEntity(message=message)
|
||||
|
||||
try:
|
||||
max_value = int(rule.get('max'))
|
||||
except ValueError:
|
||||
raise exception.TroveError(_(
|
||||
"Invalid or unsupported max value defined in the "
|
||||
"configuration-parameters configuration file. "
|
||||
"Expected integer."))
|
||||
if v > max_value:
|
||||
output = {"key": k, "max": max_value}
|
||||
message = _("The value for the configuration parameter "
|
||||
"%(key)s is greater than the maximum "
|
||||
"allowed: %(max)s") % output
|
||||
raise exception.UnprocessableEntity(message=message)
|
||||
|
||||
@staticmethod
|
||||
def _find_type(valueType):
|
||||
if valueType == "boolean":
|
||||
return bool
|
||||
elif valueType == "string":
|
||||
return basestring
|
||||
elif valueType == "integer":
|
||||
return int
|
||||
else:
|
||||
raise exception.TroveError(_(
|
||||
"Invalid or unsupported type defined in the "
|
||||
"configuration-parameters configuration file."))
|
||||
|
||||
@staticmethod
|
||||
def _get_item(key, dictList):
|
||||
for item in dictList:
|
||||
if key == item.get('name'):
|
||||
return item
|
||||
raise exception.UnprocessableEntity(
|
||||
message=_("%s is not a supported configuration parameter.") % key)
|
||||
|
||||
|
||||
class ParametersController(wsgi.Controller):
|
||||
def index(self, req, tenant_id, datastore, id):
|
||||
ds, ds_version = ds_models.get_datastore_version(
|
||||
type=datastore, version=id)
|
||||
rules = configurations.get_validation_rules(
|
||||
datastore_manager=ds_version.manager)
|
||||
return wsgi.Result(views.ConfigurationParametersView(rules).data(),
|
||||
200)
|
||||
|
||||
def show(self, req, tenant_id, datastore, id, name):
|
||||
ds, ds_version = ds_models.get_datastore_version(
|
||||
type=datastore, version=id)
|
||||
rules = configurations.get_validation_rules(
|
||||
datastore_manager=ds_version.manager)
|
||||
for rule in rules['configuration-parameters']:
|
||||
if rule['name'] == name:
|
||||
return wsgi.Result(
|
||||
views.ConfigurationParametersView(rule).data(), 200)
|
||||
raise exception.ConfigKeyNotFound(key=name)
|
||||
|
||||
def index_by_version(self, req, tenant_id, version):
|
||||
ds_version = ds_models.DatastoreVersion.load_by_uuid(version)
|
||||
rules = configurations.get_validation_rules(
|
||||
datastore_manager=ds_version.manager)
|
||||
return wsgi.Result(views.ConfigurationParametersView(rules).data(),
|
||||
200)
|
||||
|
||||
def show_by_version(self, req, tenant_id, version, name):
|
||||
ds_version = ds_models.DatastoreVersion.load_by_uuid(version)
|
||||
rules = configurations.get_validation_rules(
|
||||
datastore_manager=ds_version.manager)
|
||||
for rule in rules['configuration-parameters']:
|
||||
if rule['name'] == name:
|
||||
return wsgi.Result(
|
||||
views.ConfigurationParametersView(rule).data(), 200)
|
||||
raise exception.ConfigKeyNotFound(key=name)
|
107
trove/configuration/views.py
Normal file
107
trove/configuration/views.py
Normal file
@ -0,0 +1,107 @@
|
||||
# Copyright 2014 Rackspace
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from trove.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ConfigurationView(object):
|
||||
|
||||
def __init__(self, configuration):
|
||||
self.configuration = configuration
|
||||
|
||||
def data(self):
|
||||
configuration_dict = {
|
||||
"id": self.configuration.id,
|
||||
"name": self.configuration.name,
|
||||
"description": self.configuration.description,
|
||||
"datastore_version_id": self.configuration.datastore_version_id,
|
||||
}
|
||||
|
||||
return {"configuration": configuration_dict}
|
||||
|
||||
|
||||
class ConfigurationsView(object):
|
||||
|
||||
def __init__(self, configurations):
|
||||
self.configurations = configurations
|
||||
|
||||
def data(self):
|
||||
data = []
|
||||
|
||||
for configuration in self.configurations:
|
||||
data.append(self.data_for_configuration(configuration))
|
||||
|
||||
return {"configurations": data}
|
||||
|
||||
def data_for_configuration(self, configuration):
|
||||
view = ConfigurationView(configuration)
|
||||
return view.data()['configuration']
|
||||
|
||||
|
||||
class DetailedConfigurationInstancesView(object):
|
||||
|
||||
def __init__(self, instances):
|
||||
self.instances = instances
|
||||
|
||||
def instance_data(self):
|
||||
instances_list = []
|
||||
if self.instances:
|
||||
for instance in self.instances:
|
||||
instances_list.append(
|
||||
{
|
||||
"id": instance.id,
|
||||
"name": instance.name
|
||||
}
|
||||
)
|
||||
return instances_list
|
||||
|
||||
def data(self):
|
||||
|
||||
return {"instances": self.instance_data()}
|
||||
|
||||
|
||||
class DetailedConfigurationView(object):
|
||||
|
||||
def __init__(self, configuration, configuration_items):
|
||||
self.configuration = configuration
|
||||
self.configuration_items = configuration_items
|
||||
|
||||
def data(self):
|
||||
values = {}
|
||||
|
||||
for configItem in self.configuration_items:
|
||||
key = configItem.configuration_key
|
||||
value = configItem.configuration_value
|
||||
values[key] = value
|
||||
configuration_dict = {
|
||||
"id": self.configuration.id,
|
||||
"name": self.configuration.name,
|
||||
"description": self.configuration.description,
|
||||
"values": values,
|
||||
"datastore_version_id": self.configuration.datastore_version_id,
|
||||
}
|
||||
|
||||
return {"configuration": configuration_dict}
|
||||
|
||||
|
||||
class ConfigurationParametersView(object):
|
||||
|
||||
def __init__(self, configuration_parameters):
|
||||
self.configuration_parameters = configuration_parameters
|
||||
|
||||
def data(self):
|
||||
return self.configuration_parameters
|
@ -55,6 +55,10 @@ def map(engine, models):
|
||||
orm.mapper(models['security_group_instance_association'],
|
||||
Table('security_group_instance_associations', meta,
|
||||
autoload=True))
|
||||
orm.mapper(models['configurations'],
|
||||
Table('configurations', meta, autoload=True))
|
||||
orm.mapper(models['configuration_parameters'],
|
||||
Table('configuration_parameters', meta, autoload=True))
|
||||
|
||||
|
||||
def mapping_exists(model):
|
||||
|
@ -0,0 +1,65 @@
|
||||
# Copyright 2014 Rackspace
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sqlalchemy import ForeignKey
|
||||
from sqlalchemy.schema import Column
|
||||
from sqlalchemy.schema import MetaData
|
||||
|
||||
from trove.db.sqlalchemy.migrate_repo.schema import create_tables
|
||||
from trove.db.sqlalchemy.migrate_repo.schema import DateTime
|
||||
from trove.db.sqlalchemy.migrate_repo.schema import Boolean
|
||||
from trove.db.sqlalchemy.migrate_repo.schema import String
|
||||
from trove.db.sqlalchemy.migrate_repo.schema import Table
|
||||
|
||||
meta = MetaData()
|
||||
|
||||
configurations = Table(
|
||||
'configurations',
|
||||
meta,
|
||||
Column('id', String(36), primary_key=True, nullable=False),
|
||||
Column('name', String(64), nullable=False),
|
||||
Column('description', String(256)),
|
||||
Column('tenant_id', String(36), nullable=False),
|
||||
Column('datastore_version_id', String(36), nullable=False),
|
||||
Column('deleted', Boolean(), nullable=False, default=False),
|
||||
Column('deleted_at', DateTime()),
|
||||
)
|
||||
|
||||
configuration_parameters = Table(
|
||||
'configuration_parameters',
|
||||
meta,
|
||||
Column('configuration_id', String(36), ForeignKey("configurations.id"),
|
||||
nullable=False, primary_key=True),
|
||||
Column('configuration_key', String(128), nullable=False, primary_key=True),
|
||||
Column('configuration_value', String(128)),
|
||||
Column('deleted', Boolean(), nullable=False, default=False),
|
||||
Column('deleted_at', DateTime()),
|
||||
)
|
||||
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
meta.bind = migrate_engine
|
||||
create_tables([configurations])
|
||||
create_tables([configuration_parameters])
|
||||
|
||||
instances = Table('instances', meta, autoload=True)
|
||||
instances.create_column(Column('configuration_id', String(36),
|
||||
ForeignKey("configurations.id")))
|
||||
|
||||
|
||||
def downgrade(migrate_engine):
|
||||
meta.bind = migrate_engine
|
||||
# Not dropping the tables for concern if rollback needed would cause
|
||||
# consumers to recreate configurations.
|
@ -49,6 +49,7 @@ def configure_db(options, models_mapper=None):
|
||||
from trove.quota import models as quota_models
|
||||
from trove.backup import models as backup_models
|
||||
from trove.extensions.security_group import models as secgrp_models
|
||||
from trove.configuration import models as configurations_models
|
||||
|
||||
model_modules = [
|
||||
base_models,
|
||||
@ -59,6 +60,7 @@ def configure_db(options, models_mapper=None):
|
||||
quota_models,
|
||||
backup_models,
|
||||
secgrp_models,
|
||||
configurations_models,
|
||||
]
|
||||
|
||||
models = {}
|
||||
|
@ -220,7 +220,8 @@ class API(proxy.RpcProxy):
|
||||
|
||||
def prepare(self, memory_mb, packages, databases, users,
|
||||
device_path='/dev/vdb', mount_point='/mnt/volume',
|
||||
backup_info=None, config_contents=None, root_password=None):
|
||||
backup_info=None, config_contents=None, root_password=None,
|
||||
overrides=None):
|
||||
"""Make an asynchronous call to prepare the guest
|
||||
as a database container optionally includes a backup id for restores
|
||||
"""
|
||||
@ -229,7 +230,8 @@ class API(proxy.RpcProxy):
|
||||
"prepare", packages=packages, databases=databases,
|
||||
memory_mb=memory_mb, users=users, device_path=device_path,
|
||||
mount_point=mount_point, backup_info=backup_info,
|
||||
config_contents=config_contents, root_password=root_password)
|
||||
config_contents=config_contents, root_password=root_password,
|
||||
overrides=overrides)
|
||||
|
||||
def restart(self):
|
||||
"""Restart the MySQL server."""
|
||||
@ -301,3 +303,13 @@ class API(proxy.RpcProxy):
|
||||
'device': device_path, 'id': self.id})
|
||||
self._call("resize_fs", AGENT_LOW_TIMEOUT, device_path=device_path,
|
||||
mount_point=mount_point)
|
||||
|
||||
def update_overrides(self, overrides, remove=False):
|
||||
LOG.debug(_("Updating overrides on Instance %s"), self.id)
|
||||
LOG.debug(_("Updating overrides values %s") % overrides)
|
||||
self._cast("update_overrides", overrides=overrides, remove=remove)
|
||||
|
||||
def apply_overrides(self, overrides):
|
||||
LOG.debug(_("Applying overrides on Instance %s"), self.id)
|
||||
LOG.debug(_("Applying overrides values %s") % overrides)
|
||||
self._cast("apply_overrides", overrides=overrides)
|
||||
|
@ -425,6 +425,25 @@ class DropUser(object):
|
||||
return "DROP USER `%s`@`%s`;" % (self.user, self.host)
|
||||
|
||||
|
||||
class SetServerVariable(object):
|
||||
|
||||
def __init__(self, key, value):
|
||||
self.key = key
|
||||
self.value = value
|
||||
|
||||
def __repr__(self):
|
||||
return str(self)
|
||||
|
||||
def __str__(self):
|
||||
if self.value is True:
|
||||
return "SET GLOBAL %s=%s" % (self.key, 1)
|
||||
elif self.value is False:
|
||||
return "SET GLOBAL %s=%s" % (self.key, 0)
|
||||
elif self.value is None:
|
||||
return "SET GLOBAL %s" % (self.key)
|
||||
else:
|
||||
return "SET GLOBAL %s=%s" % (self.key, self.value)
|
||||
|
||||
### Miscellaneous queries that need no parameters.
|
||||
|
||||
FLUSH = "FLUSH PRIVILEGES;"
|
||||
|
@ -105,7 +105,7 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
|
||||
def prepare(self, context, packages, databases, memory_mb, users,
|
||||
device_path=None, mount_point=None, backup_info=None,
|
||||
config_contents=None, root_password=None):
|
||||
config_contents=None, root_password=None, overrides=None):
|
||||
"""Makes ready DBAAS on a Guest container."""
|
||||
MySqlAppStatus.get().begin_install()
|
||||
# status end_mysql_install set with secure()
|
||||
@ -127,7 +127,7 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
self._perform_restore(backup_info, context,
|
||||
CONF.mount_point, app)
|
||||
LOG.info(_("Securing mysql now."))
|
||||
app.secure(config_contents)
|
||||
app.secure(config_contents, overrides)
|
||||
enable_root_on_restore = (backup_info and
|
||||
MySqlAdmin().is_root_enabled())
|
||||
if root_password and not backup_info:
|
||||
@ -190,3 +190,11 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
device = volume.VolumeDevice(device_path)
|
||||
device.resize_fs(mount_point)
|
||||
LOG.debug(_("Resized the filesystem"))
|
||||
|
||||
def update_overrides(self, context, overrides, remove=False):
|
||||
app = MySqlApp(MySqlAppStatus.get())
|
||||
app.update_overrides(overrides, remove=remove)
|
||||
|
||||
def apply_overrides(self, context, overrides):
|
||||
app = MySqlApp(MySqlAppStatus.get())
|
||||
app.apply_overrides(overrides)
|
||||
|
@ -48,6 +48,7 @@ TMP_MYCNF = "/tmp/my.cnf.tmp"
|
||||
MYSQL_BASE_DIR = "/var/lib/mysql"
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
INCLUDE_MARKER_OPERATORS = {
|
||||
True: ">=",
|
||||
False: ">"
|
||||
@ -56,6 +57,8 @@ INCLUDE_MARKER_OPERATORS = {
|
||||
MYSQL_CONFIG = "/etc/mysql/my.cnf"
|
||||
MYSQL_SERVICE_CANDIDATES = ["mysql", "mysqld", "mysql-server"]
|
||||
MYSQL_BIN_CANDIDATES = ["/usr/sbin/mysqld", "/usr/libexec/mysqld"]
|
||||
MYCNF_OVERRIDES = "/etc/mysql/conf.d/overrides.cnf"
|
||||
MYCNF_OVERRIDES_TMP = "/tmp/overrides.cnf.tmp"
|
||||
|
||||
|
||||
# Create a package impl
|
||||
@ -348,7 +351,7 @@ class MySqlAdmin(object):
|
||||
user = models.MySQLUser()
|
||||
try:
|
||||
user.name = username # Could possibly throw a BadRequest here.
|
||||
except exceptions.ValueError as ve:
|
||||
except exception.ValueError as ve:
|
||||
raise exception.BadRequest(_("Username %(user)s is not valid"
|
||||
": %(reason)s") %
|
||||
{'user': username, 'reason': ve.message}
|
||||
@ -597,7 +600,7 @@ class MySqlApp(object):
|
||||
def complete_install_or_restart(self):
|
||||
self.status.end_install_or_restart()
|
||||
|
||||
def secure(self, config_contents):
|
||||
def secure(self, config_contents, overrides):
|
||||
LOG.info(_("Generating admin password..."))
|
||||
admin_password = utils.generate_random_password()
|
||||
clear_expired_password()
|
||||
@ -608,7 +611,7 @@ class MySqlApp(object):
|
||||
self._create_admin_user(client, admin_password)
|
||||
|
||||
self.stop_db()
|
||||
self._write_mycnf(admin_password, config_contents)
|
||||
self._write_mycnf(admin_password, config_contents, overrides)
|
||||
self.start_mysql()
|
||||
|
||||
LOG.info(_("Dbaas secure complete."))
|
||||
@ -691,10 +694,41 @@ class MySqlApp(object):
|
||||
finally:
|
||||
self.status.end_install_or_restart()
|
||||
|
||||
def update_overrides(self, overrides_file, remove=False):
|
||||
"""
|
||||
This function will either update or remove the MySQL overrides.cnf file
|
||||
If remove is set to True the function will remove the overrides file.
|
||||
|
||||
:param overrides:
|
||||
:param remove:
|
||||
:return:
|
||||
"""
|
||||
|
||||
if overrides_file:
|
||||
LOG.debug(_("writing new overrides.cnf config file"))
|
||||
self._write_config_overrides(overrides_file)
|
||||
if remove:
|
||||
LOG.debug(_("removing overrides.cnf config file"))
|
||||
self._remove_overrides()
|
||||
|
||||
def apply_overrides(self, overrides):
|
||||
LOG.debug(_("applying overrides to mysql"))
|
||||
with LocalSqlClient(get_engine()) as client:
|
||||
LOG.debug(_("updating overrides values in running daemon"))
|
||||
for k, v in overrides.iteritems():
|
||||
q = sql_query.SetServerVariable(key=k, value=v)
|
||||
t = text(str(q))
|
||||
try:
|
||||
client.execute(t)
|
||||
except exc.OperationalError:
|
||||
output = {'key': k, 'value': v}
|
||||
LOG.exception(_("Unable to set %(key)s with value "
|
||||
"%(value)s") % output)
|
||||
|
||||
def _replace_mycnf_with_template(self, template_path, original_path):
|
||||
LOG.debug("replacing the mycnf with template")
|
||||
LOG.debug("template_path(%s) original_path(%s)"
|
||||
% (template_path, original_path))
|
||||
LOG.debug(_("replacing the mycnf with template"))
|
||||
LOG.debug(_("template_path(%(template)s) original_path(%(origin)s)")
|
||||
% {"template": template_path, "origin": original_path})
|
||||
if os.path.isfile(template_path):
|
||||
if os.path.isfile(original_path):
|
||||
utils.execute_with_timeout(
|
||||
@ -739,7 +773,7 @@ class MySqlApp(object):
|
||||
if "No such file or directory" not in str(pe):
|
||||
raise
|
||||
|
||||
def _write_mycnf(self, admin_password, config_contents):
|
||||
def _write_mycnf(self, admin_password, config_contents, overrides=None):
|
||||
"""
|
||||
Install the set of mysql my.cnf templates.
|
||||
Update the os_admin user and password to the my.cnf
|
||||
@ -762,6 +796,28 @@ class MySqlApp(object):
|
||||
|
||||
self.wipe_ib_logfiles()
|
||||
|
||||
# write configuration file overrides
|
||||
if overrides:
|
||||
self._write_config_overrides(overrides)
|
||||
|
||||
def _write_config_overrides(self, overrideValues):
|
||||
LOG.info(_("Writing new temp overrides.cnf file."))
|
||||
|
||||
with open(MYCNF_OVERRIDES_TMP, 'w') as overrides:
|
||||
overrides.write(overrideValues)
|
||||
LOG.info(_("Moving overrides.cnf into correct location."))
|
||||
utils.execute_with_timeout("sudo", "mv", MYCNF_OVERRIDES_TMP,
|
||||
MYCNF_OVERRIDES)
|
||||
|
||||
LOG.info(_("Setting permissions on overrides.cnf"))
|
||||
utils.execute_with_timeout("sudo", "chmod", "0711",
|
||||
MYCNF_OVERRIDES)
|
||||
|
||||
def _remove_overrides(self):
|
||||
LOG.info(_("Removing overrides configuration file"))
|
||||
if os.path.exists(MYCNF_OVERRIDES):
|
||||
utils.execute_with_timeout("sudo", "rm", MYCNF_OVERRIDES)
|
||||
|
||||
def start_mysql(self, update_db=False):
|
||||
LOG.info(_("Starting mysql..."))
|
||||
# This is the site of all the trouble in the restart tests.
|
||||
|
@ -1,7 +1,8 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010-2011 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
# Copyright 2010-2011 OpenStack Foundation
|
||||
# Copyright 2013-2014 Rackspace Hosting
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -22,12 +23,15 @@ from datetime import datetime
|
||||
from novaclient import exceptions as nova_exceptions
|
||||
from trove.common import cfg
|
||||
from trove.common import exception
|
||||
import trove.common.instance as rd_instance
|
||||
from trove.common import template
|
||||
from trove.common.configurations import do_configs_require_restart
|
||||
import trove.common.instance as tr_instance
|
||||
from trove.common.remote import create_dns_client
|
||||
from trove.common.remote import create_guest_client
|
||||
from trove.common.remote import create_nova_client
|
||||
from trove.common.remote import create_cinder_client
|
||||
from trove.common import utils
|
||||
from trove.configuration.models import Configuration
|
||||
from trove.extensions.security_group.models import SecurityGroup
|
||||
from trove.db import get_db_api
|
||||
from trove.db import models as dbmodels
|
||||
@ -74,6 +78,7 @@ class InstanceStatus(object):
|
||||
BACKUP = "BACKUP"
|
||||
SHUTDOWN = "SHUTDOWN"
|
||||
ERROR = "ERROR"
|
||||
RESTART_REQUIRED = "RESTART_REQUIRED"
|
||||
|
||||
|
||||
def validate_volume_size(size):
|
||||
@ -226,6 +231,8 @@ class SimpleInstance(object):
|
||||
return InstanceStatus.REBOOT
|
||||
if 'RESIZING' == ACTION:
|
||||
return InstanceStatus.RESIZE
|
||||
if 'RESTART_REQUIRED' == ACTION:
|
||||
return InstanceStatus.RESTART_REQUIRED
|
||||
|
||||
### Check for server status.
|
||||
if self.db_info.server_status in ["BUILD", "ERROR", "REBOOT",
|
||||
@ -254,10 +261,10 @@ class SimpleInstance(object):
|
||||
|
||||
### Check against the service status.
|
||||
# The service is only paused during a reboot.
|
||||
if rd_instance.ServiceStatuses.PAUSED == self.service_status.status:
|
||||
if tr_instance.ServiceStatuses.PAUSED == self.service_status.status:
|
||||
return InstanceStatus.REBOOT
|
||||
# If the service status is NEW, then we are building.
|
||||
if rd_instance.ServiceStatuses.NEW == self.service_status.status:
|
||||
if tr_instance.ServiceStatuses.NEW == self.service_status.status:
|
||||
return InstanceStatus.BUILD
|
||||
|
||||
# For everything else we can look at the service status mapping.
|
||||
@ -287,6 +294,12 @@ class SimpleInstance(object):
|
||||
def root_password(self):
|
||||
return self.root_pass
|
||||
|
||||
@property
|
||||
def configuration(self):
|
||||
if self.db_info.configuration_id is not None:
|
||||
return Configuration.load(self.context,
|
||||
self.db_info.configuration_id)
|
||||
|
||||
|
||||
class DetailInstance(SimpleInstance):
|
||||
"""A detailed view of an Instnace.
|
||||
@ -407,7 +420,8 @@ class BaseInstance(SimpleInstance):
|
||||
LOG.debug(_(" ... deleting compute id = %s") %
|
||||
self.db_info.compute_instance_id)
|
||||
LOG.debug(_(" ... setting status to DELETING."))
|
||||
self.update_db(task_status=InstanceTasks.DELETING)
|
||||
self.update_db(task_status=InstanceTasks.DELETING,
|
||||
configuration_id=None)
|
||||
task_api.API(self.context).delete_instance(self.id)
|
||||
|
||||
deltas = {'instances': -1}
|
||||
@ -458,7 +472,7 @@ class BaseInstance(SimpleInstance):
|
||||
|
||||
def set_servicestatus_deleted(self):
|
||||
del_instance = InstanceServiceStatus.find_by(instance_id=self.id)
|
||||
del_instance.set_status(rd_instance.ServiceStatuses.DELETED)
|
||||
del_instance.set_status(tr_instance.ServiceStatuses.DELETED)
|
||||
del_instance.save()
|
||||
|
||||
@property
|
||||
@ -491,7 +505,7 @@ class Instance(BuiltInstance):
|
||||
@classmethod
|
||||
def create(cls, context, name, flavor_id, image_id, databases, users,
|
||||
datastore, datastore_version, volume_size, backup_id,
|
||||
availability_zone=None, nics=None):
|
||||
availability_zone=None, nics=None, configuration_id=None):
|
||||
|
||||
client = create_nova_client(context)
|
||||
try:
|
||||
@ -533,14 +547,21 @@ class Instance(BuiltInstance):
|
||||
volume_size=volume_size,
|
||||
datastore_version_id=
|
||||
datastore_version.id,
|
||||
task_status=InstanceTasks.BUILDING)
|
||||
task_status=InstanceTasks.BUILDING,
|
||||
configuration_id=configuration_id)
|
||||
LOG.debug(_("Tenant %(tenant)s created new "
|
||||
"Trove instance %(db)s...") %
|
||||
{'tenant': context.tenant, 'db': db_info.id})
|
||||
|
||||
# if a configuration group is associated with an instance,
|
||||
# generate an overrides dict to pass into the instance creation
|
||||
# method
|
||||
|
||||
overrides = Configuration.get_configuration_overrides(
|
||||
context, configuration_id)
|
||||
service_status = InstanceServiceStatus.create(
|
||||
instance_id=db_info.id,
|
||||
status=rd_instance.ServiceStatuses.NEW)
|
||||
status=tr_instance.ServiceStatuses.NEW)
|
||||
|
||||
if CONF.trove_dns_support:
|
||||
dns_client = create_dns_client(context)
|
||||
@ -558,7 +579,9 @@ class Instance(BuiltInstance):
|
||||
datastore_version.packages,
|
||||
volume_size, backup_id,
|
||||
availability_zone,
|
||||
root_password, nics)
|
||||
root_password,
|
||||
nics,
|
||||
overrides)
|
||||
|
||||
return SimpleInstance(context, db_info, service_status,
|
||||
root_password)
|
||||
@ -567,6 +590,17 @@ class Instance(BuiltInstance):
|
||||
deltas,
|
||||
_create_resources)
|
||||
|
||||
def get_flavor(self):
|
||||
client = create_nova_client(self.context)
|
||||
return client.flavors.get(self.flavor_id)
|
||||
|
||||
def get_default_configration_template(self):
|
||||
flavor = self.get_flavor()
|
||||
LOG.debug("flavor: %s" % flavor)
|
||||
config = template.SingleInstanceConfigTemplate(
|
||||
self.ds_version.manager, flavor, id)
|
||||
return config.render_dict()
|
||||
|
||||
def resize_flavor(self, new_flavor_id):
|
||||
self.validate_can_perform_action()
|
||||
LOG.debug("resizing instance %s flavor to %s"
|
||||
@ -654,21 +688,73 @@ class Instance(BuiltInstance):
|
||||
"""
|
||||
Raises exception if an instance action cannot currently be performed.
|
||||
"""
|
||||
# cases where action cannot be performed
|
||||
if self.db_info.server_status != 'ACTIVE':
|
||||
status = self.db_info.server_status
|
||||
elif self.db_info.task_status != InstanceTasks.NONE:
|
||||
elif (self.db_info.task_status != InstanceTasks.NONE and
|
||||
self.db_info.task_status != InstanceTasks.RESTART_REQUIRED):
|
||||
status = self.db_info.task_status
|
||||
elif not self.service_status.status.action_is_allowed:
|
||||
status = self.status
|
||||
elif Backup.running(self.id):
|
||||
status = InstanceStatus.BACKUP
|
||||
else:
|
||||
# action can be performed
|
||||
return
|
||||
|
||||
msg = ("Instance is not currently available for an action to be "
|
||||
"performed (status was %s)." % status)
|
||||
LOG.error(msg)
|
||||
raise exception.UnprocessableEntity(msg)
|
||||
|
||||
def unassign_configuration(self):
|
||||
LOG.debug(_("Unassigning the configuration from the instance %s")
|
||||
% self.id)
|
||||
LOG.debug(_("Unassigning the configuration id %s")
|
||||
% self.configuration.id)
|
||||
if self.configuration and self.configuration.id:
|
||||
flavor = self.get_flavor()
|
||||
config_id = self.configuration.id
|
||||
task_api.API(self.context).unassign_configuration(self.id,
|
||||
flavor,
|
||||
config_id)
|
||||
else:
|
||||
LOG.debug("no configuration found on instance skipping.")
|
||||
|
||||
def assign_configuration(self, configuration_id):
|
||||
try:
|
||||
configuration = Configuration.load(self.context, configuration_id)
|
||||
except exception.ModelNotFoundError:
|
||||
raise exception.NotFound(
|
||||
message='Configuration group id: %s could not be found'
|
||||
% configuration_id)
|
||||
|
||||
config_ds_v = configuration.datastore_version_id
|
||||
inst_ds_v = self.db_info.datastore_version_id
|
||||
if (config_ds_v != inst_ds_v):
|
||||
raise exception.ConfigurationDatastoreNotMatchInstance(
|
||||
config_datastore_version=config_ds_v,
|
||||
instance_datastore_version=inst_ds_v)
|
||||
|
||||
overrides = Configuration.get_configuration_overrides(
|
||||
self.context, configuration.id)
|
||||
|
||||
LOG.info(overrides)
|
||||
|
||||
self.update_overrides(overrides)
|
||||
self.update_db(configuration_id=configuration.id)
|
||||
|
||||
def update_overrides(self, overrides):
|
||||
LOG.debug(_("Updating or removing overrides for instance %s")
|
||||
% self.id)
|
||||
need_restart = do_configs_require_restart(
|
||||
overrides, datastore_manager=self.ds_version.manager)
|
||||
LOG.debug(_("config overrides has non-dynamic settings, "
|
||||
"requires a restart: %s") % need_restart)
|
||||
if need_restart:
|
||||
self.update_db(task_status=InstanceTasks.RESTART_REQUIRED)
|
||||
task_api.API(self.context).update_overrides(self.id, overrides)
|
||||
|
||||
|
||||
def create_server_list_matcher(server_list):
|
||||
# Returns a method which finds a server from the given list.
|
||||
@ -765,7 +851,7 @@ class DBInstance(dbmodels.DatabaseModelBase):
|
||||
_data_fields = ['name', 'created', 'compute_instance_id',
|
||||
'task_id', 'task_description', 'task_start_time',
|
||||
'volume_id', 'deleted', 'tenant_id',
|
||||
'datastore_version_id']
|
||||
'datastore_version_id', 'configuration_id']
|
||||
|
||||
def __init__(self, task_status, **kwargs):
|
||||
kwargs["task_id"] = task_status.code
|
||||
@ -803,11 +889,11 @@ class InstanceServiceStatus(dbmodels.DatabaseModelBase):
|
||||
def _validate(self, errors):
|
||||
if self.status is None:
|
||||
errors['status'] = "Cannot be none."
|
||||
if rd_instance.ServiceStatus.from_code(self.status_id) is None:
|
||||
if tr_instance.ServiceStatus.from_code(self.status_id) is None:
|
||||
errors['status_id'] = "Not valid."
|
||||
|
||||
def get_status(self):
|
||||
return rd_instance.ServiceStatus.from_code(self.status_id)
|
||||
return tr_instance.ServiceStatus.from_code(self.status_id)
|
||||
|
||||
def set_status(self, value):
|
||||
self.status_id = value.code
|
||||
@ -827,4 +913,4 @@ def persisted_models():
|
||||
}
|
||||
|
||||
|
||||
MYSQL_RESPONSIVE_STATUSES = [rd_instance.ServiceStatuses.RUNNING]
|
||||
MYSQL_RESPONSIVE_STATUSES = [tr_instance.ServiceStatuses.RUNNING]
|
||||
|
@ -38,6 +38,7 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class InstanceController(wsgi.Controller):
|
||||
|
||||
"""Controller for instance functionality"""
|
||||
schemas = apischema.instance.copy()
|
||||
if not CONF.trove_volume_support:
|
||||
@ -189,6 +190,8 @@ class InstanceController(wsgi.Controller):
|
||||
name = body['instance']['name']
|
||||
flavor_ref = body['instance']['flavorRef']
|
||||
flavor_id = utils.get_id_from_href(flavor_ref)
|
||||
|
||||
configuration = self._configuration_parse(context, body)
|
||||
databases = populate_validated_databases(
|
||||
body['instance'].get('databases', []))
|
||||
database_names = [database.get('_name', '') for database in databases]
|
||||
@ -224,7 +227,50 @@ class InstanceController(wsgi.Controller):
|
||||
image_id, databases, users,
|
||||
datastore, datastore_version,
|
||||
volume_size, backup_id,
|
||||
availability_zone, nics)
|
||||
availability_zone, nics,
|
||||
configuration)
|
||||
|
||||
view = views.InstanceDetailView(instance, req=req)
|
||||
return wsgi.Result(view.data(), 200)
|
||||
|
||||
def _configuration_parse(self, context, body):
|
||||
if 'configuration' in body['instance']:
|
||||
configuration_ref = body['instance']['configuration']
|
||||
if configuration_ref:
|
||||
configuration_id = utils.get_id_from_href(configuration_ref)
|
||||
return configuration_id
|
||||
|
||||
def update(self, req, id, body, tenant_id):
|
||||
"""Updates the instance to attach/detach configuration."""
|
||||
LOG.info(_("Updating instance for tenant id %s") % tenant_id)
|
||||
LOG.info(_("req: %s") % req)
|
||||
LOG.info(_("body: %s") % body)
|
||||
context = req.environ[wsgi.CONTEXT_KEY]
|
||||
|
||||
instance = models.Instance.load(context, id)
|
||||
|
||||
# if configuration is set, then we will update the instance to use
|
||||
# the new configuration. If configuration is empty, we want to
|
||||
# disassociate the instance from the configuration group and remove the
|
||||
# active overrides file.
|
||||
|
||||
configuration_id = self._configuration_parse(context, body)
|
||||
|
||||
if configuration_id:
|
||||
instance.assign_configuration(configuration_id)
|
||||
else:
|
||||
instance.unassign_configuration()
|
||||
return wsgi.Result(None, 202)
|
||||
|
||||
def configuration(self, req, tenant_id, id):
|
||||
"""
|
||||
Returns the default configuration template applied to the instance.
|
||||
"""
|
||||
LOG.debug("getting default configuration for the instance(%s)" % id)
|
||||
context = req.environ[wsgi.CONTEXT_KEY]
|
||||
instance = models.Instance.load(context, id)
|
||||
LOG.debug("server: %s" % instance)
|
||||
config = instance.get_default_configration_template()
|
||||
LOG.debug("default config for instance is: %s" % config)
|
||||
return wsgi.Result(views.DefaultConfigurationView(
|
||||
config).data(), 200)
|
||||
|
@ -70,6 +70,8 @@ class InstanceTasks(object):
|
||||
RESIZING = InstanceTask(0x04, 'RESIZING', 'Resizing the instance.')
|
||||
BUILDING = InstanceTask(0x05, 'BUILDING', 'The instance is building.')
|
||||
MIGRATING = InstanceTask(0x06, 'MIGRATING', 'Migrating the instance.')
|
||||
RESTART_REQUIRED = InstanceTask(0x07, 'RESTART_REQUIRED',
|
||||
'Instance requires a restart.')
|
||||
|
||||
BUILDING_ERROR_DNS = InstanceTask(0x50, 'BUILDING', 'Build error: DNS.',
|
||||
is_error=True)
|
||||
|
@ -75,6 +75,9 @@ class InstanceDetailView(InstanceView):
|
||||
result['instance']['datastore']['version'] = (self.instance.
|
||||
datastore_version.name)
|
||||
|
||||
if self.instance.configuration is not None:
|
||||
result['instance']['configuration'] = (self.
|
||||
_build_configuration_info())
|
||||
if self.instance.hostname:
|
||||
result['instance']['hostname'] = self.instance.hostname
|
||||
else:
|
||||
@ -96,6 +99,14 @@ class InstanceDetailView(InstanceView):
|
||||
|
||||
return result
|
||||
|
||||
def _build_configuration_info(self):
|
||||
return {
|
||||
"id": self.instance.configuration.id,
|
||||
"name": self.instance.configuration.name,
|
||||
"links": create_links("configurations", self.req,
|
||||
self.instance.configuration.id)
|
||||
}
|
||||
|
||||
|
||||
class InstancesView(object):
|
||||
"""Shows a list of SimpleInstance objects."""
|
||||
@ -114,3 +125,14 @@ class InstancesView(object):
|
||||
def data_for_instance(self, instance):
|
||||
view = InstanceView(instance, req=self.req)
|
||||
return view.data()['instance']
|
||||
|
||||
|
||||
class DefaultConfigurationView(object):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
|
||||
def data(self):
|
||||
config_dict = {}
|
||||
for key, val in self.config:
|
||||
config_dict[key] = val
|
||||
return {"instance": {"configuration": config_dict}}
|
||||
|
@ -22,6 +22,7 @@ Routes all the requests to the task manager.
|
||||
from trove.common import cfg
|
||||
from trove.openstack.common.rpc import proxy
|
||||
from trove.openstack.common import log as logging
|
||||
from trove.openstack.common.gettextutils import _
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
@ -102,7 +103,8 @@ class API(proxy.RpcProxy):
|
||||
def create_instance(self, instance_id, name, flavor,
|
||||
image_id, databases, users, datastore_manager,
|
||||
packages, volume_size, backup_id=None,
|
||||
availability_zone=None, root_password=None, nics=None):
|
||||
availability_zone=None, root_password=None,
|
||||
nics=None, overrides=None):
|
||||
LOG.debug("Making async call to create instance %s " % instance_id)
|
||||
self.cast(self.context,
|
||||
self.make_msg("create_instance",
|
||||
@ -117,4 +119,25 @@ class API(proxy.RpcProxy):
|
||||
volume_size=volume_size,
|
||||
backup_id=backup_id,
|
||||
availability_zone=availability_zone,
|
||||
root_password=root_password, nics=nics))
|
||||
root_password=root_password,
|
||||
nics=nics,
|
||||
overrides=overrides))
|
||||
|
||||
def update_overrides(self, instance_id, overrides=None):
|
||||
LOG.debug(_("Making async call to update configuration overrides for "
|
||||
"instance %s") % instance_id)
|
||||
|
||||
self.cast(self.context,
|
||||
self.make_msg("update_overrides",
|
||||
instance_id=instance_id,
|
||||
overrides=overrides))
|
||||
|
||||
def unassign_configuration(self, instance_id, flavor, configuration_id):
|
||||
LOG.debug(_("Making async call to unassign configuration for "
|
||||
"instance %s") % instance_id)
|
||||
|
||||
self.cast(self.context,
|
||||
self.make_msg("unassign_configuration",
|
||||
instance_id=instance_id,
|
||||
flavor=self._transform_obj(flavor),
|
||||
configuration_id=configuration_id))
|
||||
|
@ -83,12 +83,22 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
def create_instance(self, context, instance_id, name, flavor,
|
||||
image_id, databases, users, datastore_manager,
|
||||
packages, volume_size, backup_id, availability_zone,
|
||||
root_password, nics):
|
||||
root_password, nics, overrides):
|
||||
instance_tasks = FreshInstanceTasks.load(context, instance_id)
|
||||
instance_tasks.create_instance(flavor, image_id, databases, users,
|
||||
datastore_manager, packages,
|
||||
volume_size, backup_id,
|
||||
availability_zone, root_password, nics)
|
||||
availability_zone, root_password, nics,
|
||||
overrides)
|
||||
|
||||
def update_overrides(self, context, instance_id, overrides):
|
||||
instance_tasks = models.BuiltInstanceTasks.load(context, instance_id)
|
||||
instance_tasks.update_overrides(overrides)
|
||||
|
||||
def unassign_configuration(self, context, instance_id, flavor,
|
||||
configuration_id):
|
||||
instance_tasks = models.BuiltInstanceTasks.load(context, instance_id)
|
||||
instance_tasks.unassign_configuration(flavor, configuration_id)
|
||||
|
||||
if CONF.exists_notification_transformer:
|
||||
@periodic_task.periodic_task(
|
||||
|
@ -12,6 +12,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import re
|
||||
import traceback
|
||||
import os.path
|
||||
|
||||
@ -24,6 +25,7 @@ from trove.common import cfg
|
||||
from trove.common import template
|
||||
from trove.common import utils
|
||||
from trove.common.utils import try_recover
|
||||
from trove.common.configurations import do_configs_require_restart
|
||||
from trove.common.exception import GuestError
|
||||
from trove.common.exception import GuestTimeout
|
||||
from trove.common.exception import PollTimeOut
|
||||
@ -35,6 +37,7 @@ from trove.common.remote import create_dns_client
|
||||
from trove.common.remote import create_heat_client
|
||||
from trove.common.remote import create_cinder_client
|
||||
from trove.extensions.mysql import models as mysql_models
|
||||
from trove.configuration.models import Configuration
|
||||
from trove.extensions.security_group.models import SecurityGroup
|
||||
from trove.extensions.security_group.models import SecurityGroupRule
|
||||
from swiftclient.client import ClientException
|
||||
@ -143,11 +146,26 @@ class ConfigurationMixin(object):
|
||||
config.render()
|
||||
return config
|
||||
|
||||
def _render_override_config(self, datastore_manager, flavor, instance_id,
|
||||
overrides=None):
|
||||
config = template.OverrideConfigTemplate(
|
||||
datastore_manager, flavor, instance_id)
|
||||
config.render(overrides=overrides)
|
||||
return config
|
||||
|
||||
def _render_config_dict(self, datastore_manager, flavor, instance_id):
|
||||
config = template.SingleInstanceConfigTemplate(
|
||||
datastore_manager, flavor, instance_id)
|
||||
ret = config.render_dict()
|
||||
LOG.debug(_("the default template dict of mysqld section: %s") % ret)
|
||||
return ret
|
||||
|
||||
|
||||
class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
|
||||
def create_instance(self, flavor, image_id, databases, users,
|
||||
datastore_manager, packages, volume_size,
|
||||
backup_id, availability_zone, root_password, nics):
|
||||
backup_id, availability_zone, root_password, nics,
|
||||
overrides):
|
||||
|
||||
LOG.debug(_("begin create_instance for id: %s") % self.id)
|
||||
security_groups = None
|
||||
@ -197,6 +215,10 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
|
||||
nics)
|
||||
|
||||
config = self._render_config(datastore_manager, flavor, self.id)
|
||||
config_overrides = self._render_override_config(datastore_manager,
|
||||
None,
|
||||
self.id,
|
||||
overrides=overrides)
|
||||
|
||||
backup_info = None
|
||||
if backup_id is not None:
|
||||
@ -208,7 +230,8 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
|
||||
}
|
||||
self._guest_prepare(flavor['ram'], volume_info,
|
||||
packages, databases, users, backup_info,
|
||||
config.config_contents, root_password)
|
||||
config.config_contents, root_password,
|
||||
config_overrides.config_contents)
|
||||
|
||||
if root_password:
|
||||
self.report_root_enabled()
|
||||
@ -554,7 +577,8 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
|
||||
|
||||
def _guest_prepare(self, flavor_ram, volume_info,
|
||||
packages, databases, users, backup_info=None,
|
||||
config_contents=None, root_password=None):
|
||||
config_contents=None, root_password=None,
|
||||
overrides=None):
|
||||
LOG.info(_("Entering guest_prepare"))
|
||||
# Now wait for the response from the create to do additional work
|
||||
self.guest.prepare(flavor_ram, packages, databases, users,
|
||||
@ -562,7 +586,8 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
|
||||
mount_point=volume_info['mount_point'],
|
||||
backup_info=backup_info,
|
||||
config_contents=config_contents,
|
||||
root_password=root_password)
|
||||
root_password=root_password,
|
||||
overrides=overrides)
|
||||
|
||||
def _create_dns_entry(self):
|
||||
LOG.debug(_("%(gt)s: Creating dns entry for instance: %(id)s") %
|
||||
@ -756,6 +781,86 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
|
||||
LOG.debug(_("Restarting FINALLY %s ") % self.id)
|
||||
self.update_db(task_status=inst_models.InstanceTasks.NONE)
|
||||
|
||||
def update_overrides(self, overrides, remove=False):
|
||||
LOG.debug(_("Updating configuration overrides on instance %s")
|
||||
% self.id)
|
||||
LOG.debug(_("overrides: %s") % overrides)
|
||||
LOG.debug(_("self.ds_version: %s") % self.ds_version.__dict__)
|
||||
# todo(cp16net) How do we know what datastore type we have?
|
||||
need_restart = do_configs_require_restart(
|
||||
overrides, datastore_manager=self.ds_version.manager)
|
||||
LOG.debug(_("do we need a restart?: %s") % need_restart)
|
||||
if need_restart:
|
||||
status = inst_models.InstanceTasks.RESTART_REQUIRED
|
||||
self.update_db(task_status=status)
|
||||
|
||||
config_overrides = self._render_override_config(
|
||||
self.ds_version.manager,
|
||||
None,
|
||||
self.id,
|
||||
overrides=overrides)
|
||||
try:
|
||||
self.guest.update_overrides(config_overrides.config_contents,
|
||||
remove=remove)
|
||||
self.guest.apply_overrides(overrides)
|
||||
LOG.debug(_("Configuration overrides update successful."))
|
||||
except GuestError:
|
||||
LOG.error(_("Failed to update configuration overrides."))
|
||||
|
||||
def unassign_configuration(self, flavor, configuration_id):
|
||||
LOG.debug(_("Unassigning the configuration from the instance %s")
|
||||
% self.id)
|
||||
LOG.debug(_("Unassigning the configuration id %s")
|
||||
% self.configuration.id)
|
||||
|
||||
def _find_item(items, item_name):
|
||||
LOG.debug(_("items: %s") % items)
|
||||
LOG.debug(_("item_name: %s") % item_name)
|
||||
# find the item in the list
|
||||
for i in items:
|
||||
if i[0] == item_name:
|
||||
return i
|
||||
|
||||
def _convert_value(value):
|
||||
# split the value and the size e.g. 512M=['512','M']
|
||||
pattern = re.compile('(\d+)(\w+)')
|
||||
split = pattern.findall(value)
|
||||
if len(split) < 2:
|
||||
return value
|
||||
digits, size = split
|
||||
conversions = {
|
||||
'K': 1024,
|
||||
'M': 1024 ** 2,
|
||||
'G': 1024 ** 3,
|
||||
}
|
||||
return str(int(digits) * conversions[size])
|
||||
|
||||
default_config = self._render_config_dict(self.ds_version.manager,
|
||||
flavor,
|
||||
self.id)
|
||||
args = {
|
||||
"ds_manager": self.ds_version.manager,
|
||||
"config": default_config,
|
||||
}
|
||||
LOG.debug(_("default %(ds_manager)s section: %(config)s") % args)
|
||||
LOG.debug(_("self.configuration: %s") % self.configuration.__dict__)
|
||||
|
||||
overrides = {}
|
||||
config_items = Configuration.load_items(self.context, configuration_id)
|
||||
for item in config_items:
|
||||
LOG.debug(_("finding item(%s)") % item.__dict__)
|
||||
try:
|
||||
key, val = _find_item(default_config, item.configuration_key)
|
||||
except TypeError:
|
||||
val = None
|
||||
restart_required = inst_models.InstanceTasks.RESTART_REQUIRED
|
||||
self.update_db(task_status=restart_required)
|
||||
if val:
|
||||
overrides[item.configuration_key] = _convert_value(val)
|
||||
LOG.debug(_("setting the default variables in dict: %s") % overrides)
|
||||
self.update_overrides(overrides, remove=True)
|
||||
self.update_db(configuration_id=None)
|
||||
|
||||
def _refresh_compute_server_info(self):
|
||||
"""Refreshes the compute server field."""
|
||||
server = self.nova_client.servers.get(self.server.id)
|
||||
|
@ -12,7 +12,7 @@ datadir = /var/lib/mysql
|
||||
####tmpdir = /tmp
|
||||
tmpdir = /var/tmp
|
||||
pid_file = /var/run/mysqld/mysqld.pid
|
||||
skip-external-locking
|
||||
skip-external-locking = 1
|
||||
key_buffer_size = {{ 50 * flavor['ram']//512 }}M
|
||||
max_allowed_packet = {{ 1 * flavor['ram']//512 }}M
|
||||
thread_stack = 192K
|
||||
@ -45,8 +45,8 @@ local-infile = 0
|
||||
server_id = {{server_id}}
|
||||
|
||||
[mysqldump]
|
||||
quick
|
||||
quote-names
|
||||
quick = 1
|
||||
quote-names = 1
|
||||
max_allowed_packet = 16M
|
||||
|
||||
[isamchk]
|
||||
|
12
trove/templates/mysql/override.config.template
Normal file
12
trove/templates/mysql/override.config.template
Normal file
@ -0,0 +1,12 @@
|
||||
[mysqld]
|
||||
{% for key, value in overrides.iteritems() -%}
|
||||
{%- if value == True -%}
|
||||
{{key}} = 1
|
||||
{%- elif value == False -%}
|
||||
{{key}} = 0
|
||||
{%- elif value == "" -%}
|
||||
{{key}}
|
||||
{%- else -%}
|
||||
{{key}}={{value}}
|
||||
{%- endif %}
|
||||
{% endfor %}
|
224
trove/templates/mysql/validation-rules.json
Normal file
224
trove/templates/mysql/validation-rules.json
Normal file
@ -0,0 +1,224 @@
|
||||
{
|
||||
"configuration-parameters": [
|
||||
{
|
||||
"name": "innodb_file_per_table",
|
||||
"restart_required": true,
|
||||
"max": 1,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "autocommit",
|
||||
"restart_required": false,
|
||||
"max": 1,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "local_infile",
|
||||
"restart_required": false,
|
||||
"max": 1,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "key_buffer_size",
|
||||
"restart_required": false,
|
||||
"max": 4294967296,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "connect_timeout",
|
||||
"restart_required": false,
|
||||
"max": 65535,
|
||||
"min": 1,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "join_buffer_size",
|
||||
"restart_required": false,
|
||||
"max": 4294967296,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "sort_buffer_size",
|
||||
"restart_required": false,
|
||||
"max": 18446744073709547520,
|
||||
"min": 32768,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "innodb_buffer_pool_size",
|
||||
"restart_required": true,
|
||||
"max": 68719476736,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "innodb_flush_log_at_trx_commit",
|
||||
"restart_required": false,
|
||||
"max": 2,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "innodb_log_buffer_size",
|
||||
"restart_required": true,
|
||||
"max": 4294967296,
|
||||
"min": 1048576,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "innodb_open_files",
|
||||
"restart_required": true,
|
||||
"max": 4294967296,
|
||||
"min": 10,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "innodb_thread_concurrency",
|
||||
"restart_required": false,
|
||||
"max": 1000,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "sync_binlog",
|
||||
"restart_required": false,
|
||||
"max": 18446744073709547520,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "auto_increment_increment",
|
||||
"restart_required": false,
|
||||
"max": 65535,
|
||||
"min": 1,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "auto_increment_offset",
|
||||
"restart_required": false,
|
||||
"max": 65535,
|
||||
"min": 1,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "bulk_insert_buffer_size",
|
||||
"restart_required": false,
|
||||
"max": 18446744073709547520,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "expire_logs_days",
|
||||
"restart_required": false,
|
||||
"max": 65535,
|
||||
"min": 1,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "interactive_timeout",
|
||||
"restart_required": false,
|
||||
"max": 65535,
|
||||
"min": 1,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "max_allowed_packet",
|
||||
"restart_required": false,
|
||||
"max": 1073741824,
|
||||
"min": 1024,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "max_connect_errors",
|
||||
"restart_required": false,
|
||||
"max": 18446744073709547520,
|
||||
"min": 1,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "max_connections",
|
||||
"restart_required": false,
|
||||
"max": 65535,
|
||||
"min": 1,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "myisam_sort_buffer_size",
|
||||
"restart_required": false,
|
||||
"max": 18446744073709547520,
|
||||
"min": 4,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "max_user_connections",
|
||||
"restart_required": false,
|
||||
"max": 100000,
|
||||
"min": 1,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "server_id",
|
||||
"restart_required": true,
|
||||
"max": 100000,
|
||||
"min": 1,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "wait_timeout",
|
||||
"restart_required": false,
|
||||
"max": 31536000,
|
||||
"min": 1,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "character_set_client",
|
||||
"restart_required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "character_set_connection",
|
||||
"restart_required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "character_set_database",
|
||||
"restart_required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "character_set_filesystem",
|
||||
"restart_required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "character_set_results",
|
||||
"restart_required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "character_set_server",
|
||||
"restart_required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "collation_connection",
|
||||
"restart_required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "collation_database",
|
||||
"restart_required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "collation_server",
|
||||
"restart_required": false,
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
}
|
606
trove/tests/api/configurations.py
Normal file
606
trove/tests/api/configurations.py
Normal file
@ -0,0 +1,606 @@
|
||||
# Copyright 2014 Rackspace Hosting
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import json
|
||||
from datetime import datetime
|
||||
from proboscis import SkipTest
|
||||
from proboscis import test
|
||||
from proboscis.asserts import assert_equal
|
||||
from proboscis.asserts import assert_raises
|
||||
from proboscis.asserts import assert_true
|
||||
from proboscis.asserts import assert_not_equal
|
||||
from proboscis.decorators import time_out
|
||||
from trove.common.utils import poll_until
|
||||
from trove.tests.api.instances import assert_unprocessable
|
||||
from trove.tests.api.instances import InstanceTestInfo
|
||||
from trove.tests.api.instances import instance_info
|
||||
from trove.tests.api.instances import WaitForGuestInstallationToFinish
|
||||
from trove.tests.config import CONFIG
|
||||
from trove.tests.util import create_dbaas_client
|
||||
from trove.tests.util import test_config
|
||||
from trove.tests.util.check import AttrCheck
|
||||
from trove.tests.util.check import CollectionCheck
|
||||
from trove.tests.util.check import TypeCheck
|
||||
from trove.tests.util.mysql import create_mysql_connection
|
||||
from trove.tests.util.users import Requirements
|
||||
from troveclient.compat import exceptions
|
||||
|
||||
|
||||
GROUP = "dbaas.api.configurations"
|
||||
CONFIG_NAME = "test_configuration"
|
||||
CONFIG_DESC = "configuration description"
|
||||
|
||||
configuration_default = None
|
||||
configuration_info = None
|
||||
configuration_href = None
|
||||
configuration_instance = InstanceTestInfo()
|
||||
configuration_instance_id = None
|
||||
sql_variables = [
|
||||
'key_buffer_size',
|
||||
'connect_timeout',
|
||||
'join_buffer_size',
|
||||
]
|
||||
|
||||
|
||||
# helper methods to validate configuration is applied to instance
|
||||
def _execute_query(host, user_name, password, query):
|
||||
print(host, user_name, password, query)
|
||||
with create_mysql_connection(host, user_name, password) as db:
|
||||
result = db.execute(query)
|
||||
return result
|
||||
assert_true(False, "something went wrong in the sql connection")
|
||||
|
||||
|
||||
def _get_address(instance_id):
|
||||
result = instance_info.dbaas_admin.mgmt.instances.show(instance_id)
|
||||
return result.ip[0]
|
||||
|
||||
|
||||
def _test_configuration_is_applied_to_instance(instance, configuration_id):
|
||||
if CONFIG.fake_mode:
|
||||
raise SkipTest("configuration from sql does not work in fake mode")
|
||||
instance_test = instance_info.dbaas.instances.get(instance.id)
|
||||
assert_equal(configuration_id, instance_test.configuration['id'])
|
||||
if configuration_id:
|
||||
testconfig_info = instance_info.dbaas.configurations.get(
|
||||
configuration_id)
|
||||
else:
|
||||
testconfig_info = instance_info.dbaas.instance.configuration(
|
||||
instance.id)
|
||||
testconfig_info['configuration']
|
||||
conf_instances = instance_info.dbaas.configurations.instances(
|
||||
configuration_id)
|
||||
config_instance_ids = [inst.id for inst in conf_instances]
|
||||
assert_true(instance_test.id in config_instance_ids)
|
||||
cfg_names = testconfig_info.values.keys()
|
||||
|
||||
host = _get_address(instance.id)
|
||||
for user in instance.users:
|
||||
username = user['name']
|
||||
password = user['password']
|
||||
concat_variables = "','".join(cfg_names)
|
||||
query = ("show variables where Variable_name "
|
||||
"in ('%s');" % concat_variables)
|
||||
actual_values = _execute_query(host, username, password, query)
|
||||
print("actual_values %s" % actual_values)
|
||||
print("testconfig_info.values %s" % testconfig_info.values)
|
||||
assert_true(len(actual_values) == len(cfg_names))
|
||||
|
||||
# check the configs exist
|
||||
attrcheck = AttrCheck()
|
||||
expected_attrs = [actual_key for actual_key, actual_value in actual_values]
|
||||
attrcheck.attrs_exist(testconfig_info.values, expected_attrs,
|
||||
msg="Configurations parameters")
|
||||
|
||||
def _get_parameter_type(name):
|
||||
instance_info.dbaas.configuration_parameters.get_parameter(
|
||||
instance_info.dbaas_datastore,
|
||||
instance_info.dbaas_datastore_version,
|
||||
name)
|
||||
resp, body = instance_info.dbaas.client.last_response
|
||||
print(resp)
|
||||
print(body)
|
||||
return json.loads(body)['type']
|
||||
|
||||
# check the config values are correct
|
||||
for key, value in actual_values:
|
||||
key_type = _get_parameter_type(key)
|
||||
# mysql returns 'ON' and 'OFF' for True and False respectively
|
||||
if value == 'ON':
|
||||
converted_key_value = (str(key), 1)
|
||||
elif value == 'OFF':
|
||||
converted_key_value = (str(key), 0)
|
||||
else:
|
||||
if key_type == 'integer':
|
||||
value = int(value)
|
||||
converted_key_value = (str(key), value)
|
||||
print("converted_key_value: %s" % str(converted_key_value))
|
||||
assert_true(converted_key_value in testconfig_info.values.items())
|
||||
|
||||
|
||||
@test(depends_on_classes=[WaitForGuestInstallationToFinish], groups=[GROUP])
|
||||
class CreateConfigurations(object):
|
||||
|
||||
@test
|
||||
def test_expected_configurations_parameters(self):
|
||||
"""test get expected configurations parameters"""
|
||||
expected_attrs = ["configuration-parameters"]
|
||||
instance_info.dbaas.configuration_parameters.parameters(
|
||||
instance_info.dbaas_datastore,
|
||||
instance_info.dbaas_datastore_version)
|
||||
resp, body = instance_info.dbaas.client.last_response
|
||||
attrcheck = AttrCheck()
|
||||
config_parameters_dict = json.loads(body)
|
||||
attrcheck.attrs_exist(config_parameters_dict, expected_attrs,
|
||||
msg="Configurations parameters")
|
||||
# sanity check that a few options are in the list
|
||||
config_params_list = config_parameters_dict['configuration-parameters']
|
||||
config_param_keys = []
|
||||
for param in config_params_list:
|
||||
config_param_keys.append(param['name'])
|
||||
expected_config_params = ['key_buffer_size', 'connect_timeout']
|
||||
# check for duplicate configuration parameters
|
||||
msg = "check for duplicate configuration parameters"
|
||||
assert_equal(len(config_param_keys), len(set(config_param_keys)), msg)
|
||||
for expected_config_item in expected_config_params:
|
||||
assert_true(expected_config_item in config_param_keys)
|
||||
|
||||
@test
|
||||
def test_expected_get_configuration_parameter(self):
|
||||
# tests get on a single parameter to verify it has expected attributes
|
||||
param = 'key_buffer_size'
|
||||
expected_config_params = ['name', 'restart_required', 'max',
|
||||
'min', 'type']
|
||||
instance_info.dbaas.configuration_parameters.get_parameter(
|
||||
instance_info.dbaas_datastore,
|
||||
instance_info.dbaas_datastore_version,
|
||||
param)
|
||||
resp, body = instance_info.dbaas.client.last_response
|
||||
print(resp)
|
||||
print(body)
|
||||
attrcheck = AttrCheck()
|
||||
config_parameter_dict = json.loads(body)
|
||||
print(config_parameter_dict)
|
||||
attrcheck.attrs_exist(config_parameter_dict, expected_config_params,
|
||||
msg="Get Configuration parameter")
|
||||
assert_equal(param, config_parameter_dict['name'])
|
||||
|
||||
@test
|
||||
def test_configurations_create_invalid_values(self):
|
||||
"""test create configurations with invalid values"""
|
||||
values = '{"this_is_invalid": 123}'
|
||||
assert_unprocessable(instance_info.dbaas.configurations.create,
|
||||
CONFIG_NAME, values, CONFIG_DESC)
|
||||
|
||||
@test
|
||||
def test_configurations_create_invalid_value_type(self):
|
||||
"""test create configuration with invalild value type"""
|
||||
values = '{"key_buffer_size": "this is a string not int"}'
|
||||
assert_unprocessable(instance_info.dbaas.configurations.create,
|
||||
CONFIG_NAME, values, CONFIG_DESC)
|
||||
|
||||
@test
|
||||
def test_configurations_create_value_out_of_bounds(self):
|
||||
"""test create configuration with value out of bounds"""
|
||||
values = '{"connect_timeout": 1000000}'
|
||||
assert_unprocessable(instance_info.dbaas.configurations.create,
|
||||
CONFIG_NAME, values, CONFIG_DESC)
|
||||
values = '{"connect_timeout": -10}'
|
||||
assert_unprocessable(instance_info.dbaas.configurations.create,
|
||||
CONFIG_NAME, values, CONFIG_DESC)
|
||||
|
||||
@test
|
||||
def test_valid_configurations_create(self):
|
||||
# create a configuration with valid parameters
|
||||
values = ('{"connect_timeout": 120, "local_infile": true, '
|
||||
'"collation_server": "latin1_swedish_ci"}')
|
||||
expected_values = json.loads(values)
|
||||
result = instance_info.dbaas.configurations.create(CONFIG_NAME,
|
||||
values,
|
||||
CONFIG_DESC)
|
||||
resp, body = instance_info.dbaas.client.last_response
|
||||
assert_equal(resp.status, 200)
|
||||
global configuration_info
|
||||
configuration_info = result
|
||||
assert_equal(configuration_info.name, CONFIG_NAME)
|
||||
assert_equal(configuration_info.description, CONFIG_DESC)
|
||||
assert_equal(configuration_info.values, expected_values)
|
||||
|
||||
@test(runs_after=[test_valid_configurations_create])
|
||||
def test_appending_to_existing_configuration(self):
|
||||
# test being able to update and insert new parameter name and values
|
||||
# to an existing configuration
|
||||
values = '{"join_buffer_size": 1048576, "connect_timeout": 60}'
|
||||
instance_info.dbaas.configurations.edit(configuration_info.id,
|
||||
values)
|
||||
resp, body = instance_info.dbaas.client.last_response
|
||||
assert_equal(resp.status, 200)
|
||||
|
||||
|
||||
@test(runs_after=[CreateConfigurations], groups=[GROUP])
|
||||
class AfterConfigurationsCreation(object):
|
||||
|
||||
@test
|
||||
def test_assign_configuration_to_invalid_instance(self):
|
||||
# test assigning to an instance that does not exist
|
||||
invalid_id = "invalid-inst-id"
|
||||
try:
|
||||
instance_info.dbaas.instances.modify(invalid_id,
|
||||
configuration_info.id)
|
||||
except exceptions.NotFound:
|
||||
resp, body = instance_info.dbaas.client.last_response
|
||||
assert_equal(resp.status, 404)
|
||||
|
||||
@test
|
||||
def test_assign_configuration_to_valid_instance(self):
|
||||
# test assigning a configuration to an instance
|
||||
print("instance_info.id: %s" % instance_info.id)
|
||||
print("configuration_info: %s" % configuration_info)
|
||||
print("configuration_info.id: %s" % configuration_info.id)
|
||||
config_id = configuration_info.id
|
||||
instance_info.dbaas.instances.modify(instance_info.id,
|
||||
configuration=config_id)
|
||||
resp, body = instance_info.dbaas.client.last_response
|
||||
assert_equal(resp.status, 202)
|
||||
|
||||
@test(depends_on=[test_assign_configuration_to_valid_instance])
|
||||
@time_out(10)
|
||||
def test_get_configuration_details_from_instance_validation(self):
|
||||
# validate that the configuraiton was applied correctly to the instance
|
||||
inst = instance_info.dbaas.instances.get(instance_info.id)
|
||||
configuration_id = inst.configuration['id']
|
||||
assert_not_equal(None, inst.configuration['id'])
|
||||
_test_configuration_is_applied_to_instance(instance_info,
|
||||
configuration_id)
|
||||
|
||||
@test
|
||||
def test_configurations_get(self):
|
||||
# test that the instance shows up on the assigned configuration
|
||||
result = instance_info.dbaas.configurations.get(configuration_info.id)
|
||||
assert_equal(configuration_info.id, result.id)
|
||||
assert_equal(configuration_info.name, result.name)
|
||||
assert_equal(configuration_info.description, result.description)
|
||||
|
||||
# check the result field types
|
||||
with TypeCheck("configuration", result) as check:
|
||||
check.has_field("id", basestring)
|
||||
check.has_field("name", basestring)
|
||||
check.has_field("description", basestring)
|
||||
check.has_field("values", dict)
|
||||
|
||||
print(result.values)
|
||||
with CollectionCheck("configuration_values", result.values) as check:
|
||||
# check each item has the correct type according to the rules
|
||||
for (item_key, item_val) in result.values.iteritems():
|
||||
print("item_key: %s" % item_key)
|
||||
print("item_val: %s" % item_val)
|
||||
dbaas = instance_info.dbaas
|
||||
param = dbaas.configuration_parameters.get_parameter(
|
||||
instance_info.dbaas_datastore,
|
||||
instance_info.dbaas_datastore_version,
|
||||
item_key)
|
||||
if param.type == 'integer':
|
||||
check.has_element(item_key, int)
|
||||
if param.type == 'string':
|
||||
check.has_element(item_key, basestring)
|
||||
if param.type == 'boolean':
|
||||
check.has_element(item_key, bool)
|
||||
|
||||
# Test to make sure that another user is not able to GET this config
|
||||
reqs = Requirements(is_admin=False)
|
||||
test_auth_user = instance_info.user.auth_user
|
||||
other_user = CONFIG.users.find_user(reqs, black_list=[test_auth_user])
|
||||
other_user_tenant_id = other_user.tenant_id
|
||||
client_tenant_id = instance_info.user.tenant_id
|
||||
if other_user_tenant_id == client_tenant_id:
|
||||
other_user = CONFIG.users.find_user(reqs,
|
||||
black_list=[
|
||||
instance_info.user.auth_user,
|
||||
other_user])
|
||||
print(other_user)
|
||||
print(other_user.__dict__)
|
||||
other_client = create_dbaas_client(other_user)
|
||||
assert_raises(exceptions.NotFound, other_client.configurations.get,
|
||||
configuration_info.id)
|
||||
|
||||
|
||||
@test(runs_after=[AfterConfigurationsCreation], groups=[GROUP])
|
||||
class ListConfigurations(object):
|
||||
|
||||
@test
|
||||
def test_configurations_list(self):
|
||||
# test listing configurations show up
|
||||
result = instance_info.dbaas.configurations.list()
|
||||
exists = [config for config in result if
|
||||
config.id == configuration_info.id]
|
||||
assert_equal(1, len(exists))
|
||||
configuration = exists[0]
|
||||
assert_equal(configuration.id, configuration_info.id)
|
||||
assert_equal(configuration.name, configuration_info.name)
|
||||
assert_equal(configuration.description, configuration_info.description)
|
||||
|
||||
@test
|
||||
def test_configurations_list_for_instance(self):
|
||||
# test getting an instance shows the configuration assigned shows up
|
||||
instance = instance_info.dbaas.instances.get(instance_info.id)
|
||||
assert_equal(instance.configuration['id'], configuration_info.id)
|
||||
assert_equal(instance.configuration['name'], configuration_info.name)
|
||||
# expecting two things in links, href and bookmark
|
||||
assert_equal(2, len(instance.configuration['links']))
|
||||
link = instance.configuration['links'][0]
|
||||
global configuration_href
|
||||
configuration_href = link['href']
|
||||
|
||||
@test
|
||||
def test_get_default_configuration_on_instance(self):
|
||||
# test the api call to get the default template of an instance exists
|
||||
result = instance_info.dbaas.instances.configuration(instance_info.id)
|
||||
global configuration_default
|
||||
configuration_default = result
|
||||
assert_not_equal(None, result.configuration)
|
||||
|
||||
@test
|
||||
def test_changing_configuration_with_nondynamic_parameter(self):
|
||||
# test that changing a non-dynamic parameter is applied to instance
|
||||
# and show that the instance requires a restart
|
||||
values = ('{"join_buffer_size":1048576,'
|
||||
'"innodb_buffer_pool_size":57671680}')
|
||||
instance_info.dbaas.configurations.update(configuration_info.id,
|
||||
values)
|
||||
resp, body = instance_info.dbaas.client.last_response
|
||||
assert_equal(resp.status, 202)
|
||||
|
||||
instance_info.dbaas.configurations.get(configuration_info.id)
|
||||
resp, body = instance_info.dbaas.client.last_response
|
||||
assert_equal(resp.status, 200)
|
||||
|
||||
@test(depends_on=[test_changing_configuration_with_nondynamic_parameter])
|
||||
@time_out(20)
|
||||
def test_waiting_for_instance_in_restart_required(self):
|
||||
def result_is_not_active():
|
||||
instance = instance_info.dbaas.instances.get(
|
||||
instance_info.id)
|
||||
if instance.status == "ACTIVE":
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
poll_until(result_is_not_active)
|
||||
|
||||
instance = instance_info.dbaas.instances.get(instance_info.id)
|
||||
resp, body = instance_info.dbaas.client.last_response
|
||||
assert_equal(resp.status, 200)
|
||||
print(instance.status)
|
||||
assert_equal('RESTART_REQUIRED', instance.status)
|
||||
|
||||
@test(depends_on=[test_waiting_for_instance_in_restart_required])
|
||||
def test_restart_service_should_return_active(self):
|
||||
# test that after restarting the instance it becomes active
|
||||
instance_info.dbaas.instances.restart(instance_info.id)
|
||||
resp, body = instance_info.dbaas.client.last_response
|
||||
assert_equal(resp.status, 202)
|
||||
|
||||
def result_is_active():
|
||||
instance = instance_info.dbaas.instances.get(
|
||||
instance_info.id)
|
||||
if instance.status == "ACTIVE":
|
||||
return True
|
||||
else:
|
||||
assert_equal("REBOOT", instance.status)
|
||||
return False
|
||||
poll_until(result_is_active)
|
||||
|
||||
@test(depends_on=[test_restart_service_should_return_active])
|
||||
@time_out(10)
|
||||
def test_get_configuration_details_from_instance_validation(self):
|
||||
# validate that the configuraiton was applied correctly to the instance
|
||||
inst = instance_info.dbaas.instances.get(instance_info.id)
|
||||
configuration_id = inst.configuration['id']
|
||||
assert_not_equal(None, inst.configuration['id'])
|
||||
_test_configuration_is_applied_to_instance(instance_info,
|
||||
configuration_id)
|
||||
|
||||
|
||||
@test(runs_after=[ListConfigurations], groups=[GROUP])
|
||||
class StartInstanceWithConfiguration(object):
|
||||
|
||||
@test
|
||||
def test_start_instance_with_configuration(self):
|
||||
# test that a new instance will apply the configuration on create
|
||||
if test_config.auth_strategy == "fake":
|
||||
raise SkipTest("Skipping instance start with configuration "
|
||||
"test for fake mode.")
|
||||
global configuration_instance
|
||||
databases = []
|
||||
databases.append({"name": "firstdbconfig", "character_set": "latin2",
|
||||
"collate": "latin2_general_ci"})
|
||||
databases.append({"name": "db2"})
|
||||
configuration_instance.databases = databases
|
||||
users = []
|
||||
users.append({"name": "liteconf", "password": "liteconfpass",
|
||||
"databases": [{"name": "firstdbconfig"}]})
|
||||
configuration_instance.users = users
|
||||
configuration_instance.name = "TEST_" + str(datetime.now()) + "_config"
|
||||
flavor_href = instance_info.dbaas_flavor_href
|
||||
configuration_instance.dbaas_flavor_href = flavor_href
|
||||
configuration_instance.volume = instance_info.volume
|
||||
|
||||
result = instance_info.dbaas.instances.create(
|
||||
configuration_instance.name,
|
||||
configuration_instance.dbaas_flavor_href,
|
||||
configuration_instance.volume,
|
||||
configuration_instance.databases,
|
||||
configuration_instance.users,
|
||||
availability_zone="nova",
|
||||
configuration=configuration_href)
|
||||
assert_equal(200, instance_info.dbaas.last_http_code)
|
||||
assert_equal("BUILD", result.status)
|
||||
configuration_instance.id = result.id
|
||||
|
||||
|
||||
@test(runs_after=[StartInstanceWithConfiguration], groups=[GROUP])
|
||||
class WaitForConfigurationInstanceToFinish(object):
|
||||
|
||||
@test
|
||||
@time_out(60 * 7)
|
||||
def test_instance_with_configuration_active(self):
|
||||
# wait for the instance to become active
|
||||
if test_config.auth_strategy == "fake":
|
||||
raise SkipTest("Skipping instance start with configuration "
|
||||
"test for fake mode.")
|
||||
|
||||
def result_is_active():
|
||||
instance = instance_info.dbaas.instances.get(
|
||||
configuration_instance.id)
|
||||
if instance.status == "ACTIVE":
|
||||
return True
|
||||
else:
|
||||
assert_equal("BUILD", instance.status)
|
||||
return False
|
||||
|
||||
poll_until(result_is_active)
|
||||
|
||||
@test(depends_on=[test_instance_with_configuration_active])
|
||||
@time_out(10)
|
||||
def test_get_configuration_details_from_instance_validation(self):
|
||||
# validate that the configuraiton was applied correctly to the instance
|
||||
inst = instance_info.dbaas.instances.get(configuration_instance.id)
|
||||
configuration_id = inst.configuration['id']
|
||||
assert_not_equal(None, inst.configuration['id'])
|
||||
_test_configuration_is_applied_to_instance(configuration_instance,
|
||||
configuration_id)
|
||||
|
||||
|
||||
@test(runs_after=[WaitForConfigurationInstanceToFinish], groups=[GROUP])
|
||||
class DeleteConfigurations(object):
|
||||
|
||||
@test
|
||||
def test_delete_invalid_configuration_not_found(self):
|
||||
# test deleting a configuration that does not exist throws exception
|
||||
invalid_configuration_id = "invalid-config-id"
|
||||
assert_raises(exceptions.NotFound,
|
||||
instance_info.dbaas.configurations.delete,
|
||||
invalid_configuration_id)
|
||||
|
||||
@test
|
||||
def test_unable_delete_instance_configurations(self):
|
||||
# test deleting a configuration that is assigned to
|
||||
# an instance is not allowed.
|
||||
assert_raises(exceptions.BadRequest,
|
||||
instance_info.dbaas.configurations.delete,
|
||||
configuration_info.id)
|
||||
|
||||
@test(depends_on=[test_unable_delete_instance_configurations])
|
||||
@time_out(30)
|
||||
def test_unassign_configuration_from_instances(self):
|
||||
# test to unassign configuration from instance
|
||||
instance_info.dbaas.instances.modify(configuration_instance.id,
|
||||
configuration="")
|
||||
resp, body = instance_info.dbaas.client.last_response
|
||||
assert_equal(resp.status, 202)
|
||||
instance_info.dbaas.instances.get(configuration_instance.id)
|
||||
#test that config group is not removed
|
||||
instance_info.dbaas.instances.modify(instance_info.id,
|
||||
configuration=None)
|
||||
resp, body = instance_info.dbaas.client.last_response
|
||||
assert_equal(resp.status, 202)
|
||||
instance_info.dbaas.instances.get(instance_info.id)
|
||||
|
||||
def result_has_no_configuration():
|
||||
instance = instance_info.dbaas.instances.get(inst_info.id)
|
||||
if hasattr(instance, 'configuration'):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
inst_info = instance_info
|
||||
poll_until(result_has_no_configuration)
|
||||
inst_info = configuration_instance
|
||||
poll_until(result_has_no_configuration)
|
||||
|
||||
@test(depends_on=[test_unassign_configuration_from_instances])
|
||||
def test_no_instances_on_configuration(self):
|
||||
# test there is no configuration on the instance after unassigning
|
||||
result = instance_info.dbaas.configurations.get(configuration_info.id)
|
||||
assert_equal(configuration_info.id, result.id)
|
||||
assert_equal(configuration_info.name, result.name)
|
||||
assert_equal(configuration_info.description, result.description)
|
||||
print(configuration_instance.id)
|
||||
print(instance_info.id)
|
||||
|
||||
@test(depends_on=[test_no_instances_on_configuration])
|
||||
def test_delete_unassigned_configuration(self):
|
||||
# test that we can delete the configuration after no instances are
|
||||
# assigned to it any longer
|
||||
instance_info.dbaas.configurations.delete(configuration_info.id)
|
||||
resp, body = instance_info.dbaas.client.last_response
|
||||
assert_equal(resp.status, 202)
|
||||
|
||||
@test(depends_on=[test_unassign_configuration_from_instances])
|
||||
@time_out(120)
|
||||
def test_restart_service_after_unassign_return_active(self):
|
||||
def result_is_not_active():
|
||||
instance = instance_info.dbaas.instances.get(
|
||||
instance_info.id)
|
||||
if instance.status == "ACTIVE":
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
poll_until(result_is_not_active)
|
||||
|
||||
config = instance_info.dbaas.configurations.list()
|
||||
print(config)
|
||||
instance = instance_info.dbaas.instances.get(instance_info.id)
|
||||
print(instance.__dict__)
|
||||
resp, body = instance_info.dbaas.client.last_response
|
||||
assert_equal(resp.status, 200)
|
||||
print(instance.status)
|
||||
assert_equal('RESTART_REQUIRED', instance.status)
|
||||
|
||||
@test(depends_on=[test_restart_service_after_unassign_return_active])
|
||||
@time_out(120)
|
||||
def test_restart_service_should_return_active(self):
|
||||
# test that after restarting the instance it becomes active
|
||||
instance_info.dbaas.instances.restart(instance_info.id)
|
||||
resp, body = instance_info.dbaas.client.last_response
|
||||
assert_equal(resp.status, 202)
|
||||
|
||||
def result_is_active():
|
||||
instance = instance_info.dbaas.instances.get(
|
||||
instance_info.id)
|
||||
if instance.status == "ACTIVE":
|
||||
return True
|
||||
else:
|
||||
assert_equal("REBOOT", instance.status)
|
||||
return False
|
||||
poll_until(result_is_active)
|
||||
|
||||
@test(depends_on=[test_delete_unassigned_configuration])
|
||||
@time_out(120)
|
||||
def test_delete_configuration_instance(self):
|
||||
# test that we can delete the instance even though there is a
|
||||
# configuration applied to the instance
|
||||
instance_info.dbaas.instances.delete(configuration_instance.id)
|
||||
assert_equal(202, instance_info.dbaas.last_http_code)
|
||||
|
||||
def instance_is_gone():
|
||||
try:
|
||||
instance_info.dbaas.instances.get(configuration_instance.id)
|
||||
return False
|
||||
except exceptions.NotFound:
|
||||
return True
|
||||
|
||||
poll_until(instance_is_gone)
|
||||
assert_raises(exceptions.NotFound, instance_info.dbaas.instances.get,
|
||||
configuration_instance.id)
|
@ -350,6 +350,7 @@ class CreateInstance(object):
|
||||
|
||||
result = instance_info.initial_result
|
||||
instance_info.id = result.id
|
||||
instance_info.dbaas_datastore_version = result.datastore['version']
|
||||
|
||||
report = CONFIG.get_report()
|
||||
report.log("Instance UUID = %s" % instance_info.id)
|
||||
|
@ -37,6 +37,7 @@ class FakeGuest(object):
|
||||
self.root_was_enabled = False
|
||||
self.version = 1
|
||||
self.grants = {}
|
||||
self.overrides = {}
|
||||
|
||||
# Our default admin user.
|
||||
self._create_user({
|
||||
@ -209,7 +210,7 @@ class FakeGuest(object):
|
||||
|
||||
def prepare(self, memory_mb, packages, databases, users, device_path=None,
|
||||
mount_point=None, backup_info=None, config_contents=None,
|
||||
root_password=None):
|
||||
root_password=None, overrides=None):
|
||||
from trove.instance.models import DBInstance
|
||||
from trove.instance.models import InstanceServiceStatus
|
||||
from trove.guestagent.models import AgentHeartBeat
|
||||
@ -218,6 +219,7 @@ class FakeGuest(object):
|
||||
instance_name = DBInstance.find_by(id=self.id).name
|
||||
self.create_user(users)
|
||||
self.create_database(databases)
|
||||
self.overrides = overrides or {}
|
||||
|
||||
def update_db():
|
||||
status = InstanceServiceStatus.find_by(instance_id=self.id)
|
||||
@ -317,6 +319,12 @@ class FakeGuest(object):
|
||||
def resize_fs(self, device_path=None, mount_point=None):
|
||||
pass
|
||||
|
||||
def update_overrides(self, overrides, remove=False):
|
||||
self.overrides = overrides
|
||||
|
||||
def apply_overrides(self, overrides):
|
||||
self.overrides = overrides
|
||||
|
||||
|
||||
def get_or_create(id):
|
||||
if id not in DB:
|
||||
|
0
trove/tests/unittests/configuration/__init__.py
Normal file
0
trove/tests/unittests/configuration/__init__.py
Normal file
@ -0,0 +1,102 @@
|
||||
# Copyright 2014 Rackspace
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
import jsonschema
|
||||
from testtools import TestCase
|
||||
from trove.configuration.service import ConfigurationsController
|
||||
|
||||
|
||||
class TestConfigurationController(TestCase):
|
||||
def setUp(self):
|
||||
super(TestConfigurationController, self).setUp()
|
||||
self.controller = ConfigurationsController()
|
||||
|
||||
def test_validate_create_configuration(self):
|
||||
body = {
|
||||
"configuration": {
|
||||
"values": {},
|
||||
"name": "test",
|
||||
"datastore": {
|
||||
"type": "test_type",
|
||||
"version": "test_version"
|
||||
}
|
||||
}
|
||||
}
|
||||
schema = self.controller.get_schema('create', body)
|
||||
self.assertIsNotNone(schema)
|
||||
validator = jsonschema.Draft4Validator(schema)
|
||||
self.assertTrue(validator.is_valid(body))
|
||||
|
||||
def test_validate_create_configuration_no_datastore(self):
|
||||
body = {
|
||||
"configuration": {
|
||||
"values": {},
|
||||
"name": "test"
|
||||
}
|
||||
}
|
||||
schema = self.controller.get_schema('create', body)
|
||||
self.assertIsNotNone(schema)
|
||||
validator = jsonschema.Draft4Validator(schema)
|
||||
self.assertTrue(validator.is_valid(body))
|
||||
|
||||
def test_validate_create_invalid_values_param(self):
|
||||
body = {
|
||||
"configuration": {
|
||||
"values": '',
|
||||
"name": "test",
|
||||
"datastore": {
|
||||
"type": "test_type",
|
||||
"version": "test_version"
|
||||
}
|
||||
}
|
||||
}
|
||||
schema = self.controller.get_schema('create', body)
|
||||
self.assertIsNotNone(schema)
|
||||
validator = jsonschema.Draft4Validator(schema)
|
||||
self.assertFalse(validator.is_valid(body))
|
||||
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
|
||||
self.assertEqual(errors[0].message,
|
||||
"'' is not of type 'object'")
|
||||
|
||||
def test_validate_create_invalid_name_param(self):
|
||||
body = {
|
||||
"configuration": {
|
||||
"values": {},
|
||||
"name": "",
|
||||
"datastore": {
|
||||
"type": "test_type",
|
||||
"version": "test_version"
|
||||
}
|
||||
}
|
||||
}
|
||||
schema = self.controller.get_schema('create', body)
|
||||
self.assertIsNotNone(schema)
|
||||
validator = jsonschema.Draft4Validator(schema)
|
||||
self.assertFalse(validator.is_valid(body))
|
||||
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
|
||||
self.assertEqual(errors[0].message,
|
||||
"'' is too short")
|
||||
|
||||
def test_validate_edit_configuration(self):
|
||||
body = {
|
||||
"configuration": {
|
||||
"values": {}
|
||||
}
|
||||
}
|
||||
schema = self.controller.get_schema('edit', body)
|
||||
self.assertIsNotNone(schema)
|
||||
validator = jsonschema.Draft4Validator(schema)
|
||||
self.assertTrue(validator.is_valid(body))
|
0
trove/tests/unittests/datastore/__init__.py
Normal file
0
trove/tests/unittests/datastore/__init__.py
Normal file
28
trove/tests/unittests/datastore/test_datastore.py
Normal file
28
trove/tests/unittests/datastore/test_datastore.py
Normal file
@ -0,0 +1,28 @@
|
||||
# Copyright 2014 Rackspace
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from testtools import TestCase
|
||||
from trove.datastore import models as datastore_models
|
||||
from trove.common.exception import DatastoreDefaultDatastoreNotFound
|
||||
|
||||
|
||||
class TestDatastore(TestCase):
|
||||
def setUp(self):
|
||||
super(TestDatastore, self).setUp()
|
||||
|
||||
def test_create_failure_with_datastore_default_notfound(self):
|
||||
self.assertRaises(
|
||||
DatastoreDefaultDatastoreNotFound,
|
||||
datastore_models.get_datastore_version)
|
@ -242,6 +242,18 @@ class ApiTest(testtools.TestCase):
|
||||
self.api.create_backup({'id': '123'})
|
||||
self._verify_rpc_cast(exp_msg)
|
||||
|
||||
def test_update_overrides(self):
|
||||
exp_msg = RpcMsgMatcher('update_overrides', 'overrides', 'remove')
|
||||
self._mock_rpc_cast(exp_msg)
|
||||
self.api.update_overrides('123')
|
||||
self._verify_rpc_cast(exp_msg)
|
||||
|
||||
def test_apply_overrides(self):
|
||||
exp_msg = RpcMsgMatcher('apply_overrides', 'overrides')
|
||||
self._mock_rpc_cast(exp_msg)
|
||||
self.api.apply_overrides('123')
|
||||
self._verify_rpc_cast(exp_msg)
|
||||
|
||||
def _verify_rpc_connection_and_cast(self, rpc, mock_conn, exp_msg):
|
||||
verify(rpc).create_connection(new=True)
|
||||
verify(mock_conn).create_consumer(self.api._get_routing_key(), None,
|
||||
@ -255,12 +267,13 @@ class ApiTest(testtools.TestCase):
|
||||
exp_msg = RpcMsgMatcher('prepare', 'memory_mb', 'packages',
|
||||
'databases', 'users', 'device_path',
|
||||
'mount_point', 'backup_info',
|
||||
'config_contents', 'root_password')
|
||||
|
||||
'config_contents', 'root_password',
|
||||
'overrides')
|
||||
when(rpc).cast(any(), any(), exp_msg).thenReturn(None)
|
||||
|
||||
self.api.prepare('2048', 'package1', 'db1', 'user1', '/dev/vdt',
|
||||
'/mnt/opt', 'bkup-1232', 'cont', '1-2-3-4')
|
||||
'/mnt/opt', 'bkup-1232', 'cont', '1-2-3-4',
|
||||
'override')
|
||||
|
||||
self._verify_rpc_connection_and_cast(rpc, mock_conn, exp_msg)
|
||||
|
||||
@ -271,13 +284,14 @@ class ApiTest(testtools.TestCase):
|
||||
exp_msg = RpcMsgMatcher('prepare', 'memory_mb', 'packages',
|
||||
'databases', 'users', 'device_path',
|
||||
'mount_point', 'backup_info',
|
||||
'config_contents', 'root_password')
|
||||
'config_contents', 'root_password',
|
||||
'overrides')
|
||||
when(rpc).cast(any(), any(), exp_msg).thenReturn(None)
|
||||
bkup = {'id': 'backup_id_123'}
|
||||
|
||||
self.api.prepare('2048', 'package1', 'db1', 'user1', '/dev/vdt',
|
||||
'/mnt/opt', bkup, 'cont', '1-2-3-4')
|
||||
|
||||
'/mnt/opt', bkup, 'cont', '1-2-3-4',
|
||||
'overrides')
|
||||
self._verify_rpc_connection_and_cast(rpc, mock_conn, exp_msg)
|
||||
|
||||
def test_upgrade(self):
|
||||
|
@ -658,6 +658,13 @@ class MySqlAppTest(testtools.TestCase):
|
||||
self.mySqlApp.start_db_with_conf_changes,
|
||||
Mock())
|
||||
|
||||
def test_remove_overrides(self):
|
||||
|
||||
from trove.common.exception import ProcessExecutionError
|
||||
mocked = Mock(side_effect=ProcessExecutionError('Error'))
|
||||
dbaas.utils.execute_with_timeout = mocked
|
||||
self.assertRaises(ProcessExecutionError, self.mySqlApp.start_mysql)
|
||||
|
||||
|
||||
class MySqlAppInstallTest(MySqlAppTest):
|
||||
|
||||
@ -694,7 +701,7 @@ class MySqlAppInstallTest(MySqlAppTest):
|
||||
self.mysql_starts_successfully()
|
||||
sqlalchemy.create_engine = Mock()
|
||||
|
||||
self.mySqlApp.secure('contents')
|
||||
self.mySqlApp.secure('contents', None)
|
||||
|
||||
self.assertTrue(self.mySqlApp.stop_db.called)
|
||||
self.assertTrue(self.mySqlApp._write_mycnf.called)
|
||||
@ -728,7 +735,7 @@ class MySqlAppInstallTest(MySqlAppTest):
|
||||
self.mysql_starts_successfully()
|
||||
sqlalchemy.create_engine = Mock()
|
||||
|
||||
self.assertRaises(IOError, self.mySqlApp.secure, "foo")
|
||||
self.assertRaises(IOError, self.mySqlApp.secure, "foo", None)
|
||||
|
||||
self.assertTrue(self.mySqlApp.stop_db.called)
|
||||
self.assertTrue(self.mySqlApp._write_mycnf.called)
|
||||
@ -789,7 +796,7 @@ class MySqlAppMockTest(testtools.TestCase):
|
||||
any(), any(), any()).thenReturn(True)
|
||||
app = MySqlApp(mock_status)
|
||||
when(dbaas).clear_expired_password().thenReturn(None)
|
||||
self.assertRaises(TypeError, app.secure, None)
|
||||
self.assertRaises(TypeError, app.secure, None, None)
|
||||
|
||||
verify(mock_conn, atleast=2).execute(any())
|
||||
inorder.verify(mock_status).wait_for_real_status_to_change_to(
|
||||
@ -814,7 +821,7 @@ class MySqlAppMockTest(testtools.TestCase):
|
||||
when(app)._write_mycnf(any(), any()).thenReturn(True)
|
||||
when(app).start_mysql().thenReturn(None)
|
||||
when(app).stop_db().thenReturn(None)
|
||||
app.secure('foo')
|
||||
app.secure('foo', None)
|
||||
verify(mock_conn, never).execute(TextClauseMatcher('root'))
|
||||
|
||||
|
||||
|
@ -163,7 +163,8 @@ class GuestAgentManagerTest(testtools.TestCase):
|
||||
is_root_enabled=True)
|
||||
|
||||
def _prepare_dynamic(self, device_path='/dev/vdb', is_mysql_installed=True,
|
||||
backup_id=None, is_root_enabled=False):
|
||||
backup_id=None, is_root_enabled=False,
|
||||
overrides=None):
|
||||
|
||||
# covering all outcomes is starting to cause trouble here
|
||||
COUNT = 1 if device_path else 0
|
||||
@ -205,7 +206,8 @@ class GuestAgentManagerTest(testtools.TestCase):
|
||||
users=None,
|
||||
device_path=device_path,
|
||||
mount_point='/var/lib/mysql',
|
||||
backup_info=backup_info)
|
||||
backup_info=backup_info,
|
||||
overrides=overrides)
|
||||
# verification/assertion
|
||||
verify(mock_status).begin_install()
|
||||
|
||||
@ -217,7 +219,7 @@ class GuestAgentManagerTest(testtools.TestCase):
|
||||
verify(backup).restore(self.context, backup_info, '/var/lib/mysql')
|
||||
verify(dbaas.MySqlApp).install_if_needed(any())
|
||||
# We dont need to make sure the exact contents are there
|
||||
verify(dbaas.MySqlApp).secure(any())
|
||||
verify(dbaas.MySqlApp).secure(any(), overrides)
|
||||
verify(dbaas.MySqlAdmin, never).create_database()
|
||||
verify(dbaas.MySqlAdmin, never).create_user()
|
||||
verify(dbaas.MySqlApp).secure_root(secure_remote_root=any())
|
||||
|
@ -44,8 +44,10 @@ class InstanceDetailViewTest(TestCase):
|
||||
super(InstanceDetailViewTest, self).setUp()
|
||||
self.build_links_method = InstanceView._build_links
|
||||
self.build_flavor_links_method = InstanceView._build_flavor_links
|
||||
self.build_config_method = InstanceDetailView._build_configuration_info
|
||||
InstanceView._build_links = Mock()
|
||||
InstanceView._build_flavor_links = Mock()
|
||||
InstanceDetailView._build_configuration_info = Mock()
|
||||
self.instance = Mock()
|
||||
self.instance.created = 'Yesterday'
|
||||
self.instance.updated = 'Now'
|
||||
@ -62,6 +64,7 @@ class InstanceDetailViewTest(TestCase):
|
||||
super(InstanceDetailViewTest, self).tearDown()
|
||||
InstanceView._build_links = self.build_links_method
|
||||
InstanceView._build_flavor_links = self.build_flavor_links_method
|
||||
InstanceDetailView._build_configuration_info = self.build_config_method
|
||||
|
||||
def test_data_hostname(self):
|
||||
view = InstanceDetailView(self.instance, Mock())
|
||||
|
@ -235,6 +235,7 @@ def iso_time(time_string):
|
||||
# https://bugs.launchpad.net/trove-integration/+bug/1228306
|
||||
|
||||
|
||||
#TODO(cp16net): DO NOT USE needs to be removed
|
||||
def mysql_connection():
|
||||
cls = CONFIG.get('mysql_connection',
|
||||
"local.MySqlConnection")
|
||||
|
Loading…
Reference in New Issue
Block a user