Remove Sahara support

The sahara project was marked inactive during this cycle[1]. Its
project health has not been improved even at m-2 thus 2024.1 release
will not be created for this project.

The project is retired because of no interest/volunteer to maintain
it[2].

[1] https://review.opendev.org/c/openstack/governance/+/899986
[2] https://review.opendev.org/c/openstack/governance/+/919374

Change-Id: If5a50258c3a9cba29c3f2202a640b5c3c1a8b0be
This commit is contained in:
Takashi Kajinami 2024-01-03 23:08:37 +09:00
parent 0357a47900
commit 6cbf9a1e34
23 changed files with 117 additions and 3201 deletions

View File

@ -63,7 +63,6 @@ We have integration with
* https://opendev.org/openstack/python-cinderclient (block storage)
* https://opendev.org/openstack/python-glanceclient (image service)
* https://opendev.org/openstack/python-troveclient (database as a Service)
* https://opendev.org/openstack/python-saharaclient (hadoop cluster)
* https://opendev.org/openstack/python-barbicanclient (key management service)
* https://opendev.org/openstack/python-designateclient (DNS service)
* https://opendev.org/openstack/python-magnumclient (container service)

View File

@ -293,7 +293,7 @@ engine_opts = [
'software config data, and metadata is derived from any '
'associated SoftwareDeployment resources.')),
cfg.ListOpt('hidden_stack_tags',
default=['data-processing-cluster'],
default=[],
help=_('Stacks containing these tag names will be hidden. '
'Multiple tags should be given in a comma-delimited '
'list (eg. hidden_stack_tags=hide_me,me_too).')),
@ -460,7 +460,7 @@ def list_opts():
for client in ('aodh', 'barbican', 'cinder', 'designate',
'glance', 'heat', 'keystone', 'magnum', 'manila', 'mistral',
'monasca', 'neutron', 'nova', 'octavia', 'sahara',
'monasca', 'neutron', 'nova', 'octavia',
'swift', 'trove', 'vitrage', 'zaqar'
):
client_specific_group = 'clients_' + client

View File

@ -1,197 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from saharaclient.api import base as sahara_base
from saharaclient import client as sahara_client
from heat.common import exception
from heat.common.i18n import _
from heat.engine.clients import client_plugin
from heat.engine import constraints
CLIENT_NAME = 'sahara'
class SaharaClientPlugin(client_plugin.ClientPlugin):
exceptions_module = sahara_base
service_types = [DATA_PROCESSING] = ['data-processing']
def _create(self):
con = self.context
endpoint_type = self._get_client_option(CLIENT_NAME, 'endpoint_type')
args = {
'endpoint_type': endpoint_type,
'service_type': self.DATA_PROCESSING,
'session': con.keystone_session,
'connect_retries': cfg.CONF.client_retry_limit,
'region_name': self._get_region_name()
}
client = sahara_client.Client('1.1', **args)
return client
def validate_hadoop_version(self, plugin_name, hadoop_version):
plugin = self.client().plugins.get(plugin_name)
allowed_versions = plugin.versions
if hadoop_version not in allowed_versions:
msg = (_("Requested plugin '%(plugin)s' doesn\'t support version "
"'%(version)s'. Allowed versions are %(allowed)s") %
{'plugin': plugin_name,
'version': hadoop_version,
'allowed': ', '.join(allowed_versions)})
raise exception.StackValidationFailed(message=msg)
def is_not_found(self, ex):
return (isinstance(ex, sahara_base.APIException) and
ex.error_code == 404)
def is_over_limit(self, ex):
return (isinstance(ex, sahara_base.APIException) and
ex.error_code == 413)
def is_conflict(self, ex):
return (isinstance(ex, sahara_base.APIException) and
ex.error_code == 409)
def is_not_registered(self, ex):
return (isinstance(ex, sahara_base.APIException) and
ex.error_code == 400 and
ex.error_name == 'IMAGE_NOT_REGISTERED')
def find_resource_by_name_or_id(self, resource_name, value):
"""Return the ID for the specified name or identifier.
:param resource_name: API name of entity
:param value: ID or name of entity
:returns: the id of the requested :value:
:raises exception.EntityNotFound:
:raises exception.PhysicalResourceNameAmbiguity:
"""
try:
entity = getattr(self.client(), resource_name)
return entity.get(value).id
except sahara_base.APIException:
return self.find_resource_by_name(resource_name, value)
def get_image_id(self, image_identifier):
"""Return the ID for the specified image name or identifier.
:param image_identifier: image name or a UUID-like identifier
:returns: the id of the requested :image_identifier:
:raises exception.EntityNotFound:
:raises exception.PhysicalResourceNameAmbiguity:
"""
# leave this method for backward compatibility
try:
return self.find_resource_by_name_or_id('images', image_identifier)
except exception.EntityNotFound:
raise exception.EntityNotFound(entity='Image',
name=image_identifier)
def find_resource_by_name(self, resource_name, value):
"""Return the ID for the specified entity name.
:raises exception.EntityNotFound:
:raises exception.PhysicalResourceNameAmbiguity:
"""
try:
filters = {'name': value}
obj = getattr(self.client(), resource_name)
obj_list = obj.find(**filters)
except sahara_base.APIException as ex:
raise exception.Error(
_("Error retrieving %(entity)s list from sahara: "
"%(err)s") % dict(entity=resource_name,
err=str(ex)))
num_matches = len(obj_list)
if num_matches == 0:
raise exception.EntityNotFound(entity=resource_name or 'entity',
name=value)
elif num_matches > 1:
raise exception.PhysicalResourceNameAmbiguity(
name=value)
else:
return obj_list[0].id
def get_plugin_id(self, plugin_name):
"""Get the id for the specified plugin name.
:param plugin_name: the name of the plugin to find
:returns: the id of :plugin:
:raises exception.EntityNotFound:
"""
try:
self.client().plugins.get(plugin_name)
except sahara_base.APIException:
raise exception.EntityNotFound(entity='Plugin',
name=plugin_name)
def get_job_type(self, job_type):
"""Find the job type
:param job_type: the name of sahara job type to find
:returns: the name of :job_type:
:raises: exception.EntityNotFound
"""
try:
filters = {'name': job_type}
return self.client().job_types.find_unique(**filters)
except sahara_base.APIException:
raise exception.EntityNotFound(entity='Job Type',
name=job_type)
class SaharaBaseConstraint(constraints.BaseCustomConstraint):
expected_exceptions = (exception.EntityNotFound,
exception.PhysicalResourceNameAmbiguity,)
resource_name = None
def validate_with_client(self, client, resource_id):
sahara_plugin = client.client_plugin(CLIENT_NAME)
sahara_plugin.find_resource_by_name_or_id(self.resource_name,
resource_id)
class PluginConstraint(constraints.BaseCustomConstraint):
# do not touch constraint for compatibility
resource_client_name = CLIENT_NAME
resource_getter_name = 'get_plugin_id'
class JobTypeConstraint(constraints.BaseCustomConstraint):
resource_client_name = CLIENT_NAME
resource_getter_name = 'get_job_type'
class ImageConstraint(SaharaBaseConstraint):
resource_name = 'images'
class JobBinaryConstraint(SaharaBaseConstraint):
resource_name = 'job_binaries'
class ClusterConstraint(SaharaBaseConstraint):
resource_name = 'clusters'
class DataSourceConstraint(SaharaBaseConstraint):
resource_name = 'data_sources'
class ClusterTemplateConstraint(SaharaBaseConstraint):
resource_name = 'cluster_templates'

View File

@ -13,32 +13,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from oslo_log import log as logging
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources.openstack.heat import none_resource
from heat.engine import support
from heat.engine import translation
LOG = logging.getLogger(__name__)
# NOTE(jfreud, pshchelo): copied from sahara/utils/api_validator.py
SAHARA_NAME_REGEX = (r"^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]"
r"*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z]"
r"[A-Za-z0-9\-]*[A-Za-z0-9])$")
# NOTE(jfreud): we do not use physical_resource_name_limit attr because we
# prefer to truncate _after_ removing invalid characters
SAHARA_CLUSTER_NAME_MAX_LENGTH = 80
class SaharaCluster(resource.Resource):
class SaharaCluster(none_resource.NoneResource):
"""A resource for managing Sahara clusters.
The Cluster entity represents a collection of VM instances that all have
@ -50,271 +30,17 @@ class SaharaCluster(resource.Resource):
"""
support_status = support.SupportStatus(
version='22.0.0',
status=support.DEPRECATED,
message=_('Sahara project was marked inactive'),
version='23.0.0',
status=support.HIDDEN,
message=_('Sahara project was retired'),
previous_status=support.SupportStatus(
version='5.0.0',
status=support.SUPPORTED
))
PROPERTIES = (
NAME, PLUGIN_NAME, HADOOP_VERSION, CLUSTER_TEMPLATE_ID,
KEY_NAME, IMAGE, MANAGEMENT_NETWORK, IMAGE_ID,
USE_AUTOCONFIG, SHARES
) = (
'name', 'plugin_name', 'hadoop_version', 'cluster_template_id',
'key_name', 'image', 'neutron_management_network', 'default_image_id',
'use_autoconfig', 'shares'
)
_SHARE_KEYS = (
SHARE_ID, PATH, ACCESS_LEVEL
) = (
'id', 'path', 'access_level'
)
ATTRIBUTES = (
STATUS, INFO,
) = (
"status", "info",
)
CLUSTER_STATUSES = (
CLUSTER_ACTIVE, CLUSTER_ERROR
) = (
'Active', 'Error'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Hadoop cluster name.'),
constraints=[
constraints.Length(min=1, max=SAHARA_CLUSTER_NAME_MAX_LENGTH),
constraints.AllowedPattern(SAHARA_NAME_REGEX),
],
),
PLUGIN_NAME: properties.Schema(
properties.Schema.STRING,
_('Plugin name.'),
required=True,
constraints=[
constraints.CustomConstraint('sahara.plugin')
]
),
HADOOP_VERSION: properties.Schema(
properties.Schema.STRING,
_('Version of Hadoop running on instances.'),
required=True,
),
CLUSTER_TEMPLATE_ID: properties.Schema(
properties.Schema.STRING,
_('ID of the Cluster Template used for '
'Node Groups and configurations.'),
constraints=[
constraints.CustomConstraint('sahara.cluster_template')
],
required=True
),
KEY_NAME: properties.Schema(
properties.Schema.STRING,
_('Keypair added to instances to make them accessible for user.'),
constraints=[
constraints.CustomConstraint('nova.keypair')
],
),
IMAGE: properties.Schema(
properties.Schema.STRING,
_('Name or UUID of the image used to boot Hadoop nodes.'),
support_status=support.SupportStatus(
status=support.HIDDEN,
version='6.0.0',
previous_status=support.SupportStatus(
status=support.DEPRECATED,
message=_('Use property %s.') % IMAGE_ID,
version='2015.1',
previous_status=support.SupportStatus(version='2014.2'))
),
constraints=[
constraints.CustomConstraint('glance.image')
],
),
IMAGE_ID: properties.Schema(
properties.Schema.STRING,
_('Default name or UUID of the image used to boot Hadoop nodes.'),
constraints=[
constraints.CustomConstraint('sahara.image'),
],
support_status=support.SupportStatus(version='2015.1')
),
MANAGEMENT_NETWORK: properties.Schema(
properties.Schema.STRING,
_('Name or UUID of network.'),
required=True,
constraints=[
constraints.CustomConstraint('neutron.network')
],
),
USE_AUTOCONFIG: properties.Schema(
properties.Schema.BOOLEAN,
_("Configure most important configs automatically."),
support_status=support.SupportStatus(version='5.0.0')
),
SHARES: properties.Schema(
properties.Schema.LIST,
_("List of manila shares to be mounted."),
schema=properties.Schema(
properties.Schema.MAP,
schema={
SHARE_ID: properties.Schema(
properties.Schema.STRING,
_("Id of the manila share."),
required=True
),
PATH: properties.Schema(
properties.Schema.STRING,
_("Local path on each cluster node on which to mount "
"the share. Defaults to '/mnt/{share_id}'.")
),
ACCESS_LEVEL: properties.Schema(
properties.Schema.STRING,
_("Governs permissions set in manila for the cluster "
"ips."),
constraints=[
constraints.AllowedValues(['rw', 'ro']),
],
default='rw'
)
}
),
support_status=support.SupportStatus(version='6.0.0')
)
}
attributes_schema = {
STATUS: attributes.Schema(
_("Cluster status."),
type=attributes.Schema.STRING
),
INFO: attributes.Schema(
_("Cluster information."),
type=attributes.Schema.MAP
),
}
default_client_name = 'sahara'
entity = 'clusters'
def translation_rules(self, props):
neutron_client_plugin = self.client_plugin('neutron')
rules = [
translation.TranslationRule(
props,
translation.TranslationRule.REPLACE,
[self.IMAGE_ID],
value_path=[self.IMAGE]),
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
[self.IMAGE_ID],
client_plugin=self.client_plugin('glance'),
finder='find_image_by_name_or_id'),
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
[self.MANAGEMENT_NETWORK],
client_plugin=neutron_client_plugin,
finder='find_resourceid_by_name_or_id',
entity=neutron_client_plugin.RES_TYPE_NETWORK)
]
return rules
def _cluster_name(self):
name = self.properties[self.NAME]
if name:
return name
return self.reduce_physical_resource_name(
re.sub('[^a-zA-Z0-9-]', '', self.physical_resource_name()),
SAHARA_CLUSTER_NAME_MAX_LENGTH)
def handle_create(self):
plugin_name = self.properties[self.PLUGIN_NAME]
hadoop_version = self.properties[self.HADOOP_VERSION]
cluster_template_id = self.properties[self.CLUSTER_TEMPLATE_ID]
image_id = self.properties[self.IMAGE_ID]
# check that image is provided in case when
# cluster template is missing one
cluster_template = self.client().cluster_templates.get(
cluster_template_id)
if cluster_template.default_image_id is None and not image_id:
msg = _("%(img)s must be provided: Referenced cluster template "
"%(tmpl)s has no default_image_id defined.") % {
'img': self.IMAGE_ID, 'tmpl': cluster_template_id}
raise exception.StackValidationFailed(message=msg)
key_name = self.properties[self.KEY_NAME]
net_id = self.properties[self.MANAGEMENT_NETWORK]
use_autoconfig = self.properties[self.USE_AUTOCONFIG]
shares = self.properties[self.SHARES]
cluster = self.client().clusters.create(
self._cluster_name(),
plugin_name, hadoop_version,
cluster_template_id=cluster_template_id,
user_keypair_id=key_name,
default_image_id=image_id,
net_id=net_id,
use_autoconfig=use_autoconfig,
shares=shares)
LOG.info('Cluster "%s" is being started.', cluster.name)
self.resource_id_set(cluster.id)
return self.resource_id
def check_create_complete(self, cluster_id):
cluster = self.client().clusters.get(cluster_id)
if cluster.status == self.CLUSTER_ERROR:
raise exception.ResourceInError(resource_status=cluster.status)
if cluster.status != self.CLUSTER_ACTIVE:
return False
LOG.info("Cluster '%s' has been created", cluster.name)
return True
def check_delete_complete(self, resource_id):
if not resource_id:
return True
try:
cluster = self.client().clusters.get(resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
LOG.info("Cluster '%s' has been deleted",
self._cluster_name())
return True
else:
if cluster.status == self.CLUSTER_ERROR:
raise exception.ResourceInError(resource_status=cluster.status)
return False
def _resolve_attribute(self, name):
if self.resource_id is None:
return
cluster = self.client().clusters.get(self.resource_id)
return getattr(cluster, name, None)
def validate(self):
res = super(SaharaCluster, self).validate()
if res:
return res
self.client_plugin().validate_hadoop_version(
self.properties[self.PLUGIN_NAME],
self.properties[self.HADOOP_VERSION]
)
version='22.0.0',
status=support.DEPRECATED,
message=_('Sahara project was marked inactive'),
previous_status=support.SupportStatus(
version='5.0.0',
status=support.SUPPORTED
)))
def resource_mapping():

View File

@ -13,13 +13,11 @@
# limitations under the License.
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources.openstack.heat import none_resource
from heat.engine import support
class DataSource(resource.Resource):
class DataSource(none_resource.NoneResource):
"""A resource for creating sahara data source.
A data source stores an URL which designates the location of input
@ -27,111 +25,17 @@ class DataSource(resource.Resource):
"""
support_status = support.SupportStatus(
version='22.0.0',
status=support.DEPRECATED,
message=_('Sahara project was marked inactive'),
version='23.0.0',
status=support.HIDDEN,
message=_('Sahara project was retired'),
previous_status=support.SupportStatus(
version='5.0.0',
status=support.SUPPORTED
))
PROPERTIES = (
NAME, TYPE, URL, DESCRIPTION, CREDENTIALS
) = (
'name', 'type', 'url', 'description', 'credentials'
)
_CREDENTIAL_KEYS = (
USER, PASSWORD
) = (
'user', 'password'
)
_DATA_SOURCE_TYPES = (
SWIFT, HDFS, MAPRFS, MANILA
) = (
'swift', 'hdfs', 'maprfs', 'manila'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_("Name of the data source."),
update_allowed=True
),
TYPE: properties.Schema(
properties.Schema.STRING,
_('Type of the data source.'),
constraints=[
constraints.AllowedValues(_DATA_SOURCE_TYPES),
],
required=True,
update_allowed=True
),
URL: properties.Schema(
properties.Schema.STRING,
_('URL for the data source.'),
required=True,
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the data source.'),
default='',
update_allowed=True
),
CREDENTIALS: properties.Schema(
properties.Schema.MAP,
_('Credentials used for swift. Not required if sahara is '
'configured to use proxy users and delegated trusts for '
'access.'),
schema={
USER: properties.Schema(
properties.Schema.STRING,
_('Username for accessing the data source URL.'),
required=True
),
PASSWORD: properties.Schema(
properties.Schema.STRING,
_("Password for accessing the data source URL."),
required=True
)
},
update_allowed=True
)
}
default_client_name = 'sahara'
entity = 'data_sources'
def _data_source_name(self):
return self.properties[self.NAME] or self.physical_resource_name()
def handle_create(self):
credentials = self.properties[self.CREDENTIALS] or {}
args = {
'name': self._data_source_name(),
'description': self.properties[self.DESCRIPTION],
'data_source_type': self.properties[self.TYPE],
'url': self.properties[self.URL],
'credential_user': credentials.get(self.USER),
'credential_pass': credentials.get(self.PASSWORD)
}
data_source = self.client().data_sources.create(**args)
self.resource_id_set(data_source.id)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.properties = json_snippet.properties(
self.properties_schema,
self.context)
data = dict(self.properties)
if not data.get(self.NAME):
data[self.NAME] = self.physical_resource_name()
self.client().data_sources.update(self.resource_id, data)
version='22.0.0',
status=support.DEPRECATED,
message=_('Sahara project was marked inactive'),
previous_status=support.SupportStatus(
version='5.0.0',
status=support.SUPPORTED
)))
def resource_mapping():

View File

@ -13,113 +13,28 @@
# limitations under the License.
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources.openstack.heat import none_resource
from heat.engine import support
from heat.engine import translation
class SaharaImageRegistry(resource.Resource):
class SaharaImageRegistry(none_resource.NoneResource):
"""A resource for registering an image in sahara.
Allows to register an image in the sahara image registry and add tags.
"""
support_status = support.SupportStatus(
version='22.0.0',
status=support.DEPRECATED,
message=_('Sahara project was marked inactive'),
version='23.0.0',
status=support.HIDDEN,
message=_('Sahara project was retired'),
previous_status=support.SupportStatus(
version='5.0.0',
status=support.SUPPORTED
))
PROPERTIES = (
IMAGE, USERNAME, DESCRIPTION, TAGS
) = (
'image', 'username', 'description', 'tags'
)
properties_schema = {
IMAGE: properties.Schema(
properties.Schema.STRING,
_("ID or name of the image to register."),
constraints=[
constraints.CustomConstraint('glance.image')
],
required=True
),
USERNAME: properties.Schema(
properties.Schema.STRING,
_('Username of privileged user in the image.'),
required=True,
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the image.'),
default='',
update_allowed=True
),
TAGS: properties.Schema(
properties.Schema.LIST,
_('Tags to add to the image.'),
schema=properties.Schema(
properties.Schema.STRING
),
update_allowed=True,
default=[]
)
}
def translation_rules(self, props):
return [
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
[self.IMAGE],
client_plugin=self.client_plugin('glance'),
finder='find_image_by_name_or_id')
]
default_client_name = 'sahara'
entity = 'images'
def handle_create(self):
self.resource_id_set(self.properties[self.IMAGE])
self.client().images.update_image(
self.resource_id,
self.properties[self.USERNAME],
self.properties[self.DESCRIPTION]
)
if self.properties[self.TAGS]:
self.client().images.update_tags(self.resource_id,
self.properties[self.TAGS])
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.properties = json_snippet.properties(
self.properties_schema,
self.context)
if self.USERNAME in prop_diff or self.DESCRIPTION in prop_diff:
self.client().images.update_image(
self.resource_id,
self.properties[self.USERNAME],
self.properties[self.DESCRIPTION]
)
if self.TAGS in prop_diff:
self.client().images.update_tags(self.resource_id,
self.properties[self.TAGS])
def handle_delete(self):
if self.resource_id is None:
return
with self.client_plugin().ignore_not_found:
self.client().images.unregister_image(self.resource_id)
version='22.0.0',
status=support.DEPRECATED,
message=_('Sahara project was marked inactive'),
previous_status=support.SupportStatus(
version='5.0.0',
status=support.SUPPORTED
)))
def resource_mapping():

View File

@ -12,21 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources import signal_responder
from heat.engine.resources.openstack.heat import none_resource
from heat.engine import support
from heat.engine import translation
# NOTE(tlashchova): copied from sahara/utils/api_validator.py
SAHARA_NAME_REGEX = r"^[a-zA-Z0-9][a-zA-Z0-9\-_\.]*$"
class SaharaJob(signal_responder.SignalResponder, resource.Resource):
class SaharaJob(none_resource.NoneResource):
"""A resource for creating Sahara Job.
A job specifies the type of the job and lists all of the individual
@ -34,284 +25,17 @@ class SaharaJob(signal_responder.SignalResponder, resource.Resource):
"""
support_status = support.SupportStatus(
version='22.0.0',
status=support.DEPRECATED,
message=_('Sahara project was marked inactive'),
version='23.0.0',
status=support.HIDDEN,
message=_('Sahara project was retired'),
previous_status=support.SupportStatus(
version='8.0.0',
status=support.SUPPORTED
))
PROPERTIES = (
NAME, TYPE, MAINS, LIBS, DESCRIPTION,
DEFAULT_EXECUTION_DATA, IS_PUBLIC, IS_PROTECTED
) = (
'name', 'type', 'mains', 'libs', 'description',
'default_execution_data', 'is_public', 'is_protected'
)
_EXECUTION_DATA_KEYS = (
CLUSTER, INPUT, OUTPUT, CONFIGS, PARAMS, ARGS,
IS_PUBLIC, INTERFACE
) = (
'cluster', 'input', 'output', 'configs', 'params', 'args',
'is_public', 'interface'
)
ATTRIBUTES = (
EXECUTIONS, DEFAULT_EXECUTION_URL
) = (
'executions', 'default_execution_url'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_("Name of the job."),
constraints=[
constraints.Length(min=1, max=50),
constraints.AllowedPattern(SAHARA_NAME_REGEX),
],
update_allowed=True
),
TYPE: properties.Schema(
properties.Schema.STRING,
_("Type of the job."),
constraints=[
constraints.CustomConstraint('sahara.job_type')
],
required=True
),
MAINS: properties.Schema(
properties.Schema.LIST,
_("IDs or names of job's main job binary. In case of specific "
"Sahara service, this property designed as a list, but accepts "
"only one item."),
schema=properties.Schema(
properties.Schema.STRING,
_("ID of job's main job binary."),
constraints=[constraints.CustomConstraint('sahara.job_binary')]
),
constraints=[constraints.Length(max=1)],
default=[]
),
LIBS: properties.Schema(
properties.Schema.LIST,
_("IDs or names of job's lib job binaries."),
schema=properties.Schema(
properties.Schema.STRING,
constraints=[
constraints.CustomConstraint('sahara.job_binary')
]
),
default=[]
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the job.'),
update_allowed=True
),
IS_PUBLIC: properties.Schema(
properties.Schema.BOOLEAN,
_('If True, job will be shared across the tenants.'),
update_allowed=True,
default=False
),
IS_PROTECTED: properties.Schema(
properties.Schema.BOOLEAN,
_('If True, job will be protected from modifications and '
'can not be deleted until this property is set to False.'),
update_allowed=True,
default=False
),
DEFAULT_EXECUTION_DATA: properties.Schema(
properties.Schema.MAP,
_('Default execution data to use when run signal.'),
schema={
CLUSTER: properties.Schema(
properties.Schema.STRING,
_('ID or name of the cluster to run the job in.'),
constraints=[
constraints.CustomConstraint('sahara.cluster')
],
required=True
),
INPUT: properties.Schema(
properties.Schema.STRING,
_('ID or name of the input data source.'),
constraints=[
constraints.CustomConstraint('sahara.data_source')
]
),
OUTPUT: properties.Schema(
properties.Schema.STRING,
_('ID or name of the output data source.'),
constraints=[
constraints.CustomConstraint('sahara.data_source')
]
),
CONFIGS: properties.Schema(
properties.Schema.MAP,
_('Config parameters to add to the job.'),
default={}
),
PARAMS: properties.Schema(
properties.Schema.MAP,
_('Parameters to add to the job.'),
default={}
),
ARGS: properties.Schema(
properties.Schema.LIST,
_('Arguments to add to the job.'),
schema=properties.Schema(
properties.Schema.STRING,
),
default=[]
),
IS_PUBLIC: properties.Schema(
properties.Schema.BOOLEAN,
_('If True, execution will be shared across the tenants.'),
default=False
),
INTERFACE: properties.Schema(
properties.Schema.MAP,
_('Interface arguments to add to the job.'),
default={}
)
},
update_allowed=True
)
}
attributes_schema = {
DEFAULT_EXECUTION_URL: attributes.Schema(
_("A signed url to create execution specified in "
"default_execution_data property."),
type=attributes.Schema.STRING,
cache_mode=attributes.Schema.CACHE_NONE
),
EXECUTIONS: attributes.Schema(
_("List of the job executions."),
type=attributes.Schema.LIST
)
}
default_client_name = 'sahara'
entity = 'jobs'
def translation_rules(self, properties):
return [
translation.TranslationRule(
properties,
translation.TranslationRule.RESOLVE,
[self.MAINS],
client_plugin=self.client_plugin(),
finder='find_resource_by_name_or_id',
entity='job_binaries'
),
translation.TranslationRule(
properties,
translation.TranslationRule.RESOLVE,
[self.LIBS],
client_plugin=self.client_plugin(),
finder='find_resource_by_name_or_id',
entity='job_binaries'
),
translation.TranslationRule(
properties,
translation.TranslationRule.RESOLVE,
[self.DEFAULT_EXECUTION_DATA, self.CLUSTER],
client_plugin=self.client_plugin(),
finder='find_resource_by_name_or_id',
entity='clusters'
),
translation.TranslationRule(
properties,
translation.TranslationRule.RESOLVE,
[self.DEFAULT_EXECUTION_DATA, self.INPUT],
client_plugin=self.client_plugin(),
finder='find_resource_by_name_or_id',
entity='data_sources'
),
translation.TranslationRule(
properties,
translation.TranslationRule.RESOLVE,
[self.DEFAULT_EXECUTION_DATA, self.OUTPUT],
client_plugin=self.client_plugin(),
finder='find_resource_by_name_or_id',
entity='data_sources'
)
]
def handle_create(self):
args = {
'name': self.properties[
self.NAME] or self.physical_resource_name(),
'type': self.properties[self.TYPE],
# Note: sahara accepts only one main binary but schema demands
# that it should be in a list.
'mains': self.properties[self.MAINS],
'libs': self.properties[self.LIBS],
'description': self.properties[self.DESCRIPTION],
'is_public': self.properties[self.IS_PUBLIC],
'is_protected': self.properties[self.IS_PROTECTED]
}
job = self.client().jobs.create(**args)
self.resource_id_set(job.id)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if self.NAME in prop_diff:
name = prop_diff[self.NAME] or self.physical_resource_name()
prop_diff[self.NAME] = name
if self.DEFAULT_EXECUTION_DATA in prop_diff:
del prop_diff[self.DEFAULT_EXECUTION_DATA]
if prop_diff:
self.client().jobs.update(self.resource_id, **prop_diff)
def handle_signal(self, details):
data = details or self.properties.get(self.DEFAULT_EXECUTION_DATA)
execution_args = {
'job_id': self.resource_id,
'cluster_id': data.get(self.CLUSTER),
'input_id': data.get(self.INPUT),
'output_id': data.get(self.OUTPUT),
'is_public': data.get(self.IS_PUBLIC),
'interface': data.get(self.INTERFACE),
'configs': {
'configs': data.get(self.CONFIGS),
'params': data.get(self.PARAMS),
'args': data.get(self.ARGS)
},
'is_protected': False
}
try:
self.client().job_executions.create(**execution_args)
except Exception as ex:
raise exception.ResourceFailure(ex, self)
def handle_delete(self):
if self.resource_id is None:
return
with self.client_plugin().ignore_not_found:
job_exs = self.client().job_executions.find(id=self.resource_id)
for ex in job_exs:
self.client().job_executions.delete(ex.id)
super(SaharaJob, self).handle_delete()
def _resolve_attribute(self, name):
if name == self.DEFAULT_EXECUTION_URL:
return str(self._get_ec2_signed_url(never_expire=True))
elif name == self.EXECUTIONS:
try:
job_execs = self.client().job_executions.find(
id=self.resource_id)
except Exception:
return []
return [execution.to_dict() for execution in job_execs]
version='22.0.0',
status=support.DEPRECATED,
message=_('Sahara project was marked inactive'),
previous_status=support.SupportStatus(
version='8.0.0',
status=support.SUPPORTED
)))
def resource_mapping():

View File

@ -12,17 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_utils import uuidutils
from heat.common import exception
from heat.common.i18n import _
from heat.engine import properties
from heat.engine import resource
from heat.engine import rsrc_defn
from heat.engine.resources.openstack.heat import none_resource
from heat.engine import support
class JobBinary(resource.Resource):
class JobBinary(none_resource.NoneResource):
"""A resource for creating sahara job binary.
A job binary stores an URL to a single script or Jar file and any
@ -30,105 +25,17 @@ class JobBinary(resource.Resource):
"""
support_status = support.SupportStatus(
version='22.0.0',
status=support.DEPRECATED,
message=_('Sahara project was marked inactive'),
version='23.0.0',
status=support.HIDDEN,
message=_('Sahara project was retired'),
previous_status=support.SupportStatus(
version='5.0.0',
status=support.SUPPORTED
))
PROPERTIES = (
NAME, URL, DESCRIPTION, CREDENTIALS
) = (
'name', 'url', 'description', 'credentials'
)
_CREDENTIAL_KEYS = (
USER, PASSWORD
) = (
'user', 'password'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the job binary.'),
update_allowed=True
),
URL: properties.Schema(
properties.Schema.STRING,
_('URL for the job binary. Must be in the format '
'swift://<container>/<path> or internal-db://<uuid>.'),
required=True,
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the job binary.'),
default='',
update_allowed=True
),
CREDENTIALS: properties.Schema(
properties.Schema.MAP,
_('Credentials used for swift. Not required if sahara is '
'configured to use proxy users and delegated trusts for '
'access.'),
schema={
USER: properties.Schema(
properties.Schema.STRING,
_('Username for accessing the job binary URL.'),
required=True
),
PASSWORD: properties.Schema(
properties.Schema.STRING,
_('Password for accessing the job binary URL.'),
required=True
),
},
update_allowed=True
)
}
default_client_name = 'sahara'
entity = 'job_binaries'
def _job_binary_name(self):
return self.properties[self.NAME] or self.physical_resource_name()
def _prepare_properties(self):
credentials = self.properties[self.CREDENTIALS] or {}
return {
'name': self._job_binary_name(),
'description': self.properties[self.DESCRIPTION],
'url': self.properties[self.URL],
'extra': credentials
}
def validate(self):
super(JobBinary, self).validate()
url = self.properties[self.URL]
if not (url.startswith('swift://') or (url.startswith('internal-db://')
and uuidutils.is_uuid_like(url[len("internal-db://"):]))):
msg = _("%s is not a valid job location.") % url
raise exception.StackValidationFailed(
path=[self.stack.t.RESOURCES, self.name,
self.stack.t.get_section_name(rsrc_defn.PROPERTIES)],
message=msg)
def handle_create(self):
args = self._prepare_properties()
job_binary = self.client().job_binaries.create(**args)
self.resource_id_set(job_binary.id)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.properties = json_snippet.properties(
self.properties_schema,
self.context)
data = self._prepare_properties()
self.client().job_binaries.update(self.resource_id, data)
version='22.0.0',
status=support.DEPRECATED,
message=_('Sahara project was marked inactive'),
previous_status=support.SupportStatus(
version='5.0.0',
status=support.SUPPORTED
)))
def resource_mapping():

View File

@ -13,29 +13,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from oslo_log import log as logging
from oslo_utils import encodeutils
from heat.common import exception
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import rsrc_defn
from heat.engine.resources.openstack.heat import none_resource
from heat.engine import support
from heat.engine import translation
LOG = logging.getLogger(__name__)
# NOTE(pshchelo): copied from sahara/utils/api_validator.py
SAHARA_NAME_REGEX = (r"^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]"
r"*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z]"
r"[A-Za-z0-9\-]*[A-Za-z0-9])$")
class SaharaNodeGroupTemplate(resource.Resource):
class SaharaNodeGroupTemplate(none_resource.NoneResource):
"""A resource for managing Sahara node group templates.
A Node Group Template describes a group of nodes within cluster. It
@ -45,334 +28,20 @@ class SaharaNodeGroupTemplate(resource.Resource):
"""
support_status = support.SupportStatus(
version='22.0.0',
status=support.DEPRECATED,
message=_('Sahara project was marked inactive'),
version='23.0.0',
status=support.HIDDEN,
message=_('Sahara project was retired'),
previous_status=support.SupportStatus(
version='2014.2',
status=support.SUPPORTED
))
PROPERTIES = (
NAME, PLUGIN_NAME, HADOOP_VERSION, FLAVOR, DESCRIPTION,
VOLUMES_PER_NODE, VOLUMES_SIZE, VOLUME_TYPE,
SECURITY_GROUPS, AUTO_SECURITY_GROUP,
AVAILABILITY_ZONE, VOLUMES_AVAILABILITY_ZONE,
NODE_PROCESSES, FLOATING_IP_POOL, NODE_CONFIGS, IMAGE_ID,
IS_PROXY_GATEWAY, VOLUME_LOCAL_TO_INSTANCE, USE_AUTOCONFIG,
SHARES
) = (
'name', 'plugin_name', 'hadoop_version', 'flavor', 'description',
'volumes_per_node', 'volumes_size', 'volume_type',
'security_groups', 'auto_security_group',
'availability_zone', 'volumes_availability_zone',
'node_processes', 'floating_ip_pool', 'node_configs', 'image_id',
'is_proxy_gateway', 'volume_local_to_instance', 'use_autoconfig',
'shares'
)
_SHARE_KEYS = (
SHARE_ID, PATH, ACCESS_LEVEL
) = (
'id', 'path', 'access_level'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_("Name for the Sahara Node Group Template."),
constraints=[
constraints.Length(min=1, max=50),
constraints.AllowedPattern(SAHARA_NAME_REGEX),
],
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the Node Group Template.'),
default="",
update_allowed=True
),
PLUGIN_NAME: properties.Schema(
properties.Schema.STRING,
_('Plugin name.'),
required=True,
constraints=[
constraints.CustomConstraint('sahara.plugin')
],
update_allowed=True
),
HADOOP_VERSION: properties.Schema(
properties.Schema.STRING,
_('Version of Hadoop running on instances.'),
required=True,
update_allowed=True
),
FLAVOR: properties.Schema(
properties.Schema.STRING,
_('Name or ID Nova flavor for the nodes.'),
required=True,
constraints=[
constraints.CustomConstraint('nova.flavor')
],
update_allowed=True
),
VOLUMES_PER_NODE: properties.Schema(
properties.Schema.INTEGER,
_("Volumes per node."),
constraints=[
constraints.Range(min=0),
],
default=0,
update_allowed=True
),
VOLUMES_SIZE: properties.Schema(
properties.Schema.INTEGER,
_("Size of the volumes, in GB."),
constraints=[
constraints.Range(min=1),
],
update_allowed=True
),
VOLUME_TYPE: properties.Schema(
properties.Schema.STRING,
_("Type of the volume to create on Cinder backend."),
constraints=[
constraints.CustomConstraint('cinder.vtype')
],
update_allowed=True
),
SECURITY_GROUPS: properties.Schema(
properties.Schema.LIST,
_("List of security group names or IDs to assign to this "
"Node Group template."),
schema=properties.Schema(
properties.Schema.STRING,
),
update_allowed=True
),
AUTO_SECURITY_GROUP: properties.Schema(
properties.Schema.BOOLEAN,
_("Defines whether auto-assign security group to this "
"Node Group template."),
update_allowed=True
),
AVAILABILITY_ZONE: properties.Schema(
properties.Schema.STRING,
_("Availability zone to create servers in."),
update_allowed=True
),
VOLUMES_AVAILABILITY_ZONE: properties.Schema(
properties.Schema.STRING,
_("Availability zone to create volumes in."),
update_allowed=True
),
NODE_PROCESSES: properties.Schema(
properties.Schema.LIST,
_("List of processes to run on every node."),
required=True,
constraints=[
constraints.Length(min=1),
],
schema=properties.Schema(
properties.Schema.STRING,
),
update_allowed=True
),
FLOATING_IP_POOL: properties.Schema(
properties.Schema.STRING,
_("Name or UUID of the Neutron floating IP network or "
"name of the Nova floating ip pool to use. "
"Should not be provided when used with Nova-network "
"that auto-assign floating IPs."),
update_allowed=True
),
NODE_CONFIGS: properties.Schema(
properties.Schema.MAP,
_("Dictionary of node configurations."),
update_allowed=True
),
IMAGE_ID: properties.Schema(
properties.Schema.STRING,
_("ID of the image to use for the template."),
constraints=[
constraints.CustomConstraint('sahara.image'),
],
update_allowed=True
),
IS_PROXY_GATEWAY: properties.Schema(
properties.Schema.BOOLEAN,
_("Provide access to nodes using other nodes of the cluster "
"as proxy gateways."),
support_status=support.SupportStatus(version='5.0.0'),
update_allowed=True
),
VOLUME_LOCAL_TO_INSTANCE: properties.Schema(
properties.Schema.BOOLEAN,
_("Create volumes on the same physical port as an instance."),
support_status=support.SupportStatus(version='5.0.0'),
update_allowed=True
),
USE_AUTOCONFIG: properties.Schema(
properties.Schema.BOOLEAN,
_("Configure most important configs automatically."),
support_status=support.SupportStatus(version='5.0.0'),
update_allowed=True
),
SHARES: properties.Schema(
properties.Schema.LIST,
_("List of manila shares to be mounted."),
schema=properties.Schema(
properties.Schema.MAP,
schema={
SHARE_ID: properties.Schema(
properties.Schema.STRING,
_("Id of the manila share."),
required=True
),
PATH: properties.Schema(
properties.Schema.STRING,
_("Local path on each cluster node on which to mount "
"the share. Defaults to '/mnt/{share_id}'.")
),
ACCESS_LEVEL: properties.Schema(
properties.Schema.STRING,
_("Governs permissions set in manila for the cluster "
"ips."),
constraints=[
constraints.AllowedValues(['rw', 'ro']),
],
default='rw'
)
}
),
support_status=support.SupportStatus(version='6.0.0'),
update_allowed=True
)
}
default_client_name = 'sahara'
physical_resource_name_limit = 50
entity = 'node_group_templates'
def translation_rules(self, props):
neutron_client_plugin = self.client_plugin('neutron')
return [
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
[self.FLAVOR],
client_plugin=self.client_plugin('nova'),
finder='find_flavor_by_name_or_id'),
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
[self.FLOATING_IP_POOL],
client_plugin=neutron_client_plugin,
finder='find_resourceid_by_name_or_id',
entity=neutron_client_plugin.RES_TYPE_NETWORK)
]
def _ngt_name(self, name):
if name:
return name
return re.sub('[^a-zA-Z0-9-]', '', self.physical_resource_name())
def _prepare_properties(self, props):
"""Prepares the property values."""
if self.NAME in props:
props['name'] = self._ngt_name(props[self.NAME])
if self.FLAVOR in props:
props['flavor_id'] = props.pop(self.FLAVOR)
return props
def handle_create(self):
props = dict((k, v) for k, v in self.properties.items())
args = self._prepare_properties(props)
node_group_template = self.client().node_group_templates.create(**args)
LOG.info("Node Group Template '%s' has been created",
node_group_template.name)
self.resource_id_set(node_group_template.id)
return self.resource_id
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
args = self._prepare_properties(prop_diff)
self.client().node_group_templates.update(self.resource_id, **args)
def validate(self):
res = super(SaharaNodeGroupTemplate, self).validate()
if res:
return res
pool = self.properties[self.FLOATING_IP_POOL]
if pool:
if self.is_using_neutron():
neutron_client_plugin = self.client_plugin('neutron')
try:
neutron_client_plugin.find_resourceid_by_name_or_id(
neutron_client_plugin.RES_TYPE_NETWORK,
pool)
except Exception as ex:
if (neutron_client_plugin.is_not_found(ex)
or neutron_client_plugin.is_no_unique(ex)):
err_msg = encodeutils.exception_to_unicode(ex)
raise exception.StackValidationFailed(message=err_msg)
raise
else:
try:
self.client('nova').floating_ip_pools.find(name=pool)
except Exception as ex:
if self.client_plugin('nova').is_not_found(ex):
err_msg = encodeutils.exception_to_unicode(ex)
raise exception.StackValidationFailed(message=err_msg)
raise
self.client_plugin().validate_hadoop_version(
self.properties[self.PLUGIN_NAME],
self.properties[self.HADOOP_VERSION]
)
# validate node processes
plugin = self.client().plugins.get_version_details(
self.properties[self.PLUGIN_NAME],
self.properties[self.HADOOP_VERSION])
allowed_processes = [item for sublist in
list(plugin.node_processes.values())
for item in sublist]
unsupported_processes = []
for process in self.properties[self.NODE_PROCESSES]:
if process not in allowed_processes:
unsupported_processes.append(process)
if unsupported_processes:
msg = (_("Plugin %(plugin)s doesn't support the following "
"node processes: %(unsupported)s. Allowed processes are: "
"%(allowed)s") %
{'plugin': self.properties[self.PLUGIN_NAME],
'unsupported': ', '.join(unsupported_processes),
'allowed': ', '.join(allowed_processes)})
raise exception.StackValidationFailed(
path=[self.stack.t.RESOURCES,
self.name,
self.stack.t.get_section_name(rsrc_defn.PROPERTIES)],
message=msg)
def parse_live_resource_data(self, resource_properties, resource_data):
result = super(SaharaNodeGroupTemplate, self).parse_live_resource_data(
resource_properties, resource_data)
for group in result[self.SHARES] or []:
remove_keys = set(group.keys()) - set(self._SHARE_KEYS)
for key in remove_keys:
del group[key]
result[self.FLAVOR] = resource_data.get('flavor_id')
return result
version='22.0.0',
status=support.DEPRECATED,
message=_('Sahara project was marked inactive'),
previous_status=support.SupportStatus(
version='2014.2',
status=support.SUPPORTED
)))
class SaharaClusterTemplate(resource.Resource):
class SaharaClusterTemplate(none_resource.NoneResource):
"""A resource for managing Sahara cluster templates.
A Cluster Template is designed to bring Node Group Templates together to
@ -387,231 +56,17 @@ class SaharaClusterTemplate(resource.Resource):
hosts.
"""
support_status = support.SupportStatus(version='2014.2')
PROPERTIES = (
NAME, PLUGIN_NAME, HADOOP_VERSION, DESCRIPTION,
ANTI_AFFINITY, MANAGEMENT_NETWORK,
CLUSTER_CONFIGS, NODE_GROUPS, IMAGE_ID, USE_AUTOCONFIG,
SHARES
) = (
'name', 'plugin_name', 'hadoop_version', 'description',
'anti_affinity', 'neutron_management_network',
'cluster_configs', 'node_groups', 'default_image_id', 'use_autoconfig',
'shares'
)
_NODE_GROUP_KEYS = (
NG_NAME, COUNT, NG_TEMPLATE_ID,
) = (
'name', 'count', 'node_group_template_id',
)
_SHARE_KEYS = (
SHARE_ID, PATH, ACCESS_LEVEL
) = (
'id', 'path', 'access_level'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_("Name for the Sahara Cluster Template."),
constraints=[
constraints.Length(min=1, max=50),
constraints.AllowedPattern(SAHARA_NAME_REGEX),
],
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the Sahara Group Template.'),
default="",
update_allowed=True
),
PLUGIN_NAME: properties.Schema(
properties.Schema.STRING,
_('Plugin name.'),
required=True,
constraints=[
constraints.CustomConstraint('sahara.plugin')
],
update_allowed=True
),
HADOOP_VERSION: properties.Schema(
properties.Schema.STRING,
_('Version of Hadoop running on instances.'),
required=True,
update_allowed=True
),
IMAGE_ID: properties.Schema(
properties.Schema.STRING,
_("ID of the default image to use for the template."),
constraints=[
constraints.CustomConstraint('sahara.image'),
],
update_allowed=True
),
MANAGEMENT_NETWORK: properties.Schema(
properties.Schema.STRING,
_('Name or UUID of network.'),
constraints=[
constraints.CustomConstraint('neutron.network')
],
update_allowed=True
),
ANTI_AFFINITY: properties.Schema(
properties.Schema.LIST,
_("List of processes to enable anti-affinity for."),
schema=properties.Schema(
properties.Schema.STRING,
),
update_allowed=True
),
CLUSTER_CONFIGS: properties.Schema(
properties.Schema.MAP,
_('Cluster configs dictionary.'),
update_allowed=True
),
NODE_GROUPS: properties.Schema(
properties.Schema.LIST,
_('Node groups.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
NG_NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the Node group.'),
required=True
),
COUNT: properties.Schema(
properties.Schema.INTEGER,
_("Number of instances in the Node group."),
required=True,
constraints=[
constraints.Range(min=1)
]
),
NG_TEMPLATE_ID: properties.Schema(
properties.Schema.STRING,
_("ID of the Node Group Template."),
required=True
),
}
),
update_allowed=True
),
USE_AUTOCONFIG: properties.Schema(
properties.Schema.BOOLEAN,
_("Configure most important configs automatically."),
support_status=support.SupportStatus(version='5.0.0')
),
SHARES: properties.Schema(
properties.Schema.LIST,
_("List of manila shares to be mounted."),
schema=properties.Schema(
properties.Schema.MAP,
schema={
SHARE_ID: properties.Schema(
properties.Schema.STRING,
_("Id of the manila share."),
required=True
),
PATH: properties.Schema(
properties.Schema.STRING,
_("Local path on each cluster node on which to mount "
"the share. Defaults to '/mnt/{share_id}'.")
),
ACCESS_LEVEL: properties.Schema(
properties.Schema.STRING,
_("Governs permissions set in manila for the cluster "
"ips."),
constraints=[
constraints.AllowedValues(['rw', 'ro']),
],
default='rw'
)
}
),
support_status=support.SupportStatus(version='6.0.0'),
update_allowed=True
)
}
default_client_name = 'sahara'
physical_resource_name_limit = 50
entity = 'cluster_templates'
def translation_rules(self, props):
neutron_client_plugin = self.client_plugin('neutron')
return [
translation.TranslationRule(
props,
translation.TranslationRule.RESOLVE,
[self.MANAGEMENT_NETWORK],
client_plugin=neutron_client_plugin,
finder='find_resourceid_by_name_or_id',
entity=neutron_client_plugin.RES_TYPE_NETWORK)
]
def _cluster_template_name(self, name):
if name:
return name
return re.sub('[^a-zA-Z0-9-]', '', self.physical_resource_name())
def _prepare_properties(self, props):
"""Prepares the property values."""
if self.NAME in props:
props['name'] = self._cluster_template_name(props[self.NAME])
if self.MANAGEMENT_NETWORK in props:
props['net_id'] = props.pop(self.MANAGEMENT_NETWORK)
return props
def handle_create(self):
props = dict((k, v) for k, v in self.properties.items())
args = self._prepare_properties(props)
cluster_template = self.client().cluster_templates.create(**args)
LOG.info("Cluster Template '%s' has been created",
cluster_template.name)
self.resource_id_set(cluster_template.id)
return self.resource_id
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
args = self._prepare_properties(prop_diff)
self.client().cluster_templates.update(self.resource_id, **args)
def validate(self):
res = super(SaharaClusterTemplate, self).validate()
if res:
return res
# check if running on neutron and MANAGEMENT_NETWORK missing
if (self.is_using_neutron() and
not self.properties[self.MANAGEMENT_NETWORK]):
msg = _("%s must be provided"
) % self.MANAGEMENT_NETWORK
raise exception.StackValidationFailed(message=msg)
self.client_plugin().validate_hadoop_version(
self.properties[self.PLUGIN_NAME],
self.properties[self.HADOOP_VERSION]
)
def parse_live_resource_data(self, resource_properties, resource_data):
result = super(SaharaClusterTemplate, self).parse_live_resource_data(
resource_properties, resource_data)
for group in result[self.NODE_GROUPS] or []:
remove_keys = set(group.keys()) - set(self._NODE_GROUP_KEYS)
for key in remove_keys:
del group[key]
for group in result[self.SHARES] or []:
remove_keys = set(group.keys()) - set(self._SHARE_KEYS)
for key in remove_keys:
del group[key]
return result
support_status = support.SupportStatus(
version='23.0.0',
status=support.HIDDEN,
previous_status=support.SupportStatus(
version='22.0.0',
status=support.DEPRECATED,
message=_('Sahara project was marked inactive'),
previous_status=support.SupportStatus(
version='2014.2',
status=support.SUPPORTED
)))
def resource_mapping():

View File

@ -25,7 +25,6 @@ from mistralclient.api import base as mistral_base
from neutronclient.common import exceptions as neutron_exc
from openstack import exceptions
from oslo_config import cfg
from saharaclient.api import base as sahara_base
from swiftclient import exceptions as swift_exc
from testtools import testcase
from troveclient import client as troveclient
@ -691,41 +690,6 @@ class TestIsNotFound(common.HeatTestCase):
exception=lambda: troveclient.exceptions.Conflict(
message='Conflict'),
)),
('sahara_not_found', dict(
is_not_found=True,
is_over_limit=False,
is_client_exception=True,
is_conflict=False,
plugin='sahara',
exception=lambda: sahara_base.APIException(
error_message='gone1', error_code=404),
)),
('sahara_exception', dict(
is_not_found=False,
is_over_limit=False,
is_client_exception=False,
is_conflict=False,
plugin='sahara',
exception=lambda: Exception()
)),
('sahara_overlimit', dict(
is_not_found=False,
is_over_limit=True,
is_client_exception=True,
is_conflict=False,
plugin='sahara',
exception=lambda: sahara_base.APIException(
error_message='over1', error_code=413),
)),
('sahara_conflict', dict(
is_not_found=False,
is_over_limit=False,
is_client_exception=True,
is_conflict=True,
plugin='sahara',
exception=lambda: sahara_base.APIException(
error_message='conflict1', error_code=409),
)),
('zaqar_not_found', dict(
is_not_found=True,
is_over_limit=False,

View File

@ -1,235 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import uuid
from saharaclient.api import base as sahara_base
from heat.common import exception
from heat.engine.clients.os import sahara
from heat.tests import common
from heat.tests import utils
class SaharaUtilsTest(common.HeatTestCase):
"""Basic tests :module:'heat.engine.resources.clients.os.sahara'."""
def setUp(self):
super(SaharaUtilsTest, self).setUp()
self.sahara_client = mock.MagicMock()
con = utils.dummy_context()
c = con.clients
self.sahara_plugin = c.client_plugin('sahara')
self.sahara_plugin.client = lambda: self.sahara_client
self.my_image = mock.MagicMock()
self.my_plugin = mock.MagicMock()
self.my_jobtype = mock.MagicMock()
def test_get_image_id(self):
"""Tests the get_image_id function."""
img_id = str(uuid.uuid4())
img_name = 'myfakeimage'
self.my_image.id = img_id
self.my_image.name = img_name
self.sahara_client.images.get.side_effect = [
self.my_image,
sahara_base.APIException(404),
sahara_base.APIException(404)
]
self.sahara_client.images.find.side_effect = [[self.my_image], []]
self.assertEqual(img_id, self.sahara_plugin.get_image_id(img_id))
self.assertEqual(img_id, self.sahara_plugin.get_image_id(img_name))
self.assertRaises(exception.EntityNotFound,
self.sahara_plugin.get_image_id, 'noimage')
calls = [mock.call(name=img_name),
mock.call(name='noimage')]
self.sahara_client.images.find.assert_has_calls(calls)
def test_get_image_id_by_name_in_uuid(self):
"""Tests the get_image_id function by name in uuid."""
img_id = str(uuid.uuid4())
img_name = str(uuid.uuid4())
self.my_image.id = img_id
self.my_image.name = img_name
self.sahara_client.images.get.side_effect = [
sahara_base.APIException(error_code=400,
error_name='IMAGE_NOT_REGISTERED')]
self.sahara_client.images.find.return_value = [self.my_image]
self.assertEqual(img_id, self.sahara_plugin.get_image_id(img_name))
self.sahara_client.images.get.assert_called_once_with(img_name)
self.sahara_client.images.find.assert_called_once_with(name=img_name)
def test_get_image_id_sahara_exception(self):
"""Test get_image_id when sahara raises an exception."""
# Simulate HTTP exception
img_name = str(uuid.uuid4())
self.sahara_client.images.find.side_effect = [
sahara_base.APIException(error_message="Error", error_code=404)]
expected_error = "Error retrieving images list from sahara: Error"
e = self.assertRaises(exception.Error,
self.sahara_plugin.find_resource_by_name,
'images', img_name)
self.assertEqual(expected_error, str(e))
self.sahara_client.images.find.assert_called_once_with(name=img_name)
def test_get_image_id_not_found(self):
"""Tests the get_image_id function while image is not found."""
img_name = str(uuid.uuid4())
self.my_image.name = img_name
self.sahara_client.images.get.side_effect = [
sahara_base.APIException(error_code=400,
error_name='IMAGE_NOT_REGISTERED')]
self.sahara_client.images.find.return_value = []
self.assertRaises(exception.EntityNotFound,
self.sahara_plugin.get_image_id, img_name)
self.sahara_client.images.get.assert_called_once_with(img_name)
self.sahara_client.images.find.assert_called_once_with(name=img_name)
def test_get_image_id_name_ambiguity(self):
"""Tests the get_image_id function while name ambiguity ."""
img_name = 'ambiguity_name'
self.my_image.name = img_name
self.sahara_client.images.get.side_effect = sahara_base.APIException()
self.sahara_client.images.find.return_value = [self.my_image,
self.my_image]
self.assertRaises(exception.PhysicalResourceNameAmbiguity,
self.sahara_plugin.get_image_id, img_name)
self.sahara_client.images.find.assert_called_once_with(name=img_name)
def test_get_plugin_id(self):
"""Tests the get_plugin_id function."""
plugin_name = 'myfakeplugin'
self.my_plugin.name = plugin_name
def side_effect(name):
if name == plugin_name:
return self.my_plugin
else:
raise sahara_base.APIException(error_code=404,
error_name='NOT_FOUND')
self.sahara_client.plugins.get.side_effect = side_effect
self.assertIsNone(self.sahara_plugin.get_plugin_id(plugin_name))
self.assertRaises(exception.EntityNotFound,
self.sahara_plugin.get_plugin_id, 'noplugin')
calls = [mock.call(plugin_name), mock.call('noplugin')]
self.sahara_client.plugins.get.assert_has_calls(calls)
def test_validate_hadoop_version(self):
"""Tests the validate_hadoop_version function."""
versions = ['1.2.1', '2.6.0', '2.7.1']
plugin_name = 'vanilla'
self.my_plugin.name = plugin_name
self.my_plugin.versions = versions
self.sahara_client.plugins.get.return_value = self.my_plugin
self.assertIsNone(self.sahara_plugin.validate_hadoop_version(
plugin_name, '2.6.0'))
ex = self.assertRaises(exception.StackValidationFailed,
self.sahara_plugin.validate_hadoop_version,
plugin_name, '1.2.3')
self.assertEqual("Requested plugin 'vanilla' doesn't support version "
"'1.2.3'. Allowed versions are 1.2.1, 2.6.0, 2.7.1",
str(ex))
calls = [mock.call(plugin_name), mock.call(plugin_name)]
self.sahara_client.plugins.get.assert_has_calls(calls)
def test_get_job_type(self):
"""Tests the get_job_type function."""
job_type = 'myfakejobtype'
self.my_jobtype = job_type
def side_effect(name):
if name == job_type:
return self.my_jobtype
else:
raise sahara_base.APIException(error_code=404,
error_name='NOT_FOUND')
self.sahara_client.job_types.find_unique.side_effect = side_effect
self.assertEqual(self.sahara_plugin.get_job_type(job_type), job_type)
self.assertRaises(exception.EntityNotFound,
self.sahara_plugin.get_job_type, 'nojobtype')
calls = [mock.call(name=job_type), mock.call(name='nojobtype')]
self.sahara_client.job_types.find_unique.assert_has_calls(calls)
class SaharaConstraintsTest(common.HeatTestCase):
scenarios = [
('JobType', dict(
constraint=sahara.JobTypeConstraint(),
resource_name=None
)),
('ClusterTemplate', dict(
constraint=sahara.ClusterTemplateConstraint(),
resource_name='cluster_templates'
)),
('DataSource', dict(
constraint=sahara.DataSourceConstraint(),
resource_name='data_sources'
)),
('Cluster', dict(
constraint=sahara.ClusterConstraint(),
resource_name='clusters'
)),
('JobBinary', dict(
constraint=sahara.JobBinaryConstraint(),
resource_name='job_binaries'
)),
('Plugin', dict(
constraint=sahara.PluginConstraint(),
resource_name=None
)),
('Image', dict(
constraint=sahara.ImageConstraint(),
resource_name='images'
)),
]
def setUp(self):
super(SaharaConstraintsTest, self).setUp()
self.ctx = utils.dummy_context()
self.mock_get = mock.Mock()
cl_plgn = self.ctx.clients.client_plugin('sahara')
cl_plgn.find_resource_by_name_or_id = self.mock_get
cl_plgn.get_image_id = self.mock_get
cl_plgn.get_plugin_id = self.mock_get
cl_plgn.get_job_type = self.mock_get
def test_validation(self):
self.mock_get.return_value = "fake_val"
self.assertTrue(self.constraint.validate("foo", self.ctx))
if self.resource_name is None:
self.mock_get.assert_called_once_with("foo")
else:
self.mock_get.assert_called_once_with(self.resource_name, "foo")
def test_validation_error(self):
self.mock_get.side_effect = exception.EntityNotFound(
entity='Fake entity', name='bar')
self.assertFalse(self.constraint.validate("bar", self.ctx))
if self.resource_name is None:
self.mock_get.assert_called_once_with("bar")
else:
self.mock_get.assert_called_once_with(self.resource_name, "bar")

View File

@ -31,7 +31,6 @@ from heat.engine.clients.os.keystone import fake_keystoneclient as fake_ks
from heat.engine.clients.os.keystone import keystone_constraints as ks_constr
from heat.engine.clients.os.neutron import neutron_constraints as neutron
from heat.engine.clients.os import nova
from heat.engine.clients.os import sahara
from heat.engine.clients.os import trove
from heat.engine import environment
from heat.engine import resource
@ -304,10 +303,6 @@ class HeatTestCase(testscenarios.WithScenarios,
'validate')
validate.return_value = True
def stub_SaharaPluginConstraint(self):
validate = self.patchobject(sahara.PluginConstraint, 'validate')
validate.return_value = True
def stub_ProviderConstraint_validate(self):
validate = self.patchobject(neutron.ProviderConstraint, 'validate')
validate.return_value = True

View File

@ -1,232 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from oslo_config import cfg
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import glance
from heat.engine.clients.os import neutron
from heat.engine.clients.os import sahara
from heat.engine.resources.openstack.sahara import cluster as sc
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
cluster_stack_template = """
heat_template_version: 2013-05-23
description: Hadoop Cluster by Sahara
resources:
super-cluster:
type: OS::Sahara::Cluster
properties:
name: super-cluster
plugin_name: vanilla
hadoop_version: 2.3.0
cluster_template_id: some_cluster_template_id
default_image_id: some_image
key_name: admin
neutron_management_network: some_network
shares:
- id: some_share_id
access_level: ro
"""
# NOTE(jfreud): the resource name contains an invalid character
cluster_stack_template_without_name = """
heat_template_version: 2013-05-23
description: Hadoop Cluster by Sahara
resources:
lots_of_underscore_name:
type: OS::Sahara::Cluster
properties:
plugin_name: vanilla
hadoop_version: 2.3.0
cluster_template_id: some_cluster_template_id
default_image_id: some_image
key_name: admin
neutron_management_network: some_network
shares:
- id: some_share_id
access_level: ro
"""
class FakeCluster(object):
def __init__(self, status='Active'):
self.status = status
self.id = "some_id"
self.name = "super-cluster"
self.info = {"HDFS": {"NameNode": "hdfs://hostname:port",
"Web UI": "http://host_ip:port"}}
self.to_dict = lambda: {"cluster": "info"}
class SaharaClusterTest(common.HeatTestCase):
def setUp(self):
super(SaharaClusterTest, self).setUp()
self.patchobject(sc.constraints.CustomConstraint, '_is_valid'
).return_value = True
self.patchobject(glance.GlanceClientPlugin,
'find_image_by_name_or_id'
).return_value = 'some_image_id'
self.patchobject(neutron.NeutronClientPlugin, '_create')
self.patchobject(neutron.NeutronClientPlugin,
'find_resourceid_by_name_or_id',
return_value='some_network_id')
self.sahara_mock = mock.MagicMock()
self.patchobject(sahara.SaharaClientPlugin, '_create'
).return_value = self.sahara_mock
self.patchobject(sahara.SaharaClientPlugin, 'validate_hadoop_version'
).return_value = None
self.cl_mgr = self.sahara_mock.clusters
self.fake_cl = FakeCluster()
self.t = template_format.parse(cluster_stack_template)
self.t2 = template_format.parse(cluster_stack_template_without_name)
def _init_cluster(self, template, name='super-cluster'):
self.stack = utils.parse_stack(template)
cluster = self.stack[name]
return cluster
def _create_cluster(self, template):
cluster = self._init_cluster(template)
self.cl_mgr.create.return_value = self.fake_cl
self.cl_mgr.get.return_value = self.fake_cl
scheduler.TaskRunner(cluster.create)()
self.assertEqual((cluster.CREATE, cluster.COMPLETE),
cluster.state)
self.assertEqual(self.fake_cl.id, cluster.resource_id)
return cluster
def test_cluster_create(self):
self._create_cluster(self.t)
expected_args = ('super-cluster', 'vanilla', '2.3.0')
expected_kwargs = {'cluster_template_id': 'some_cluster_template_id',
'user_keypair_id': 'admin',
'default_image_id': 'some_image_id',
'net_id': 'some_network_id',
'use_autoconfig': None,
'shares': [{'id': 'some_share_id',
'access_level': 'ro',
'path': None}]}
self.cl_mgr.create.assert_called_once_with(*expected_args,
**expected_kwargs)
self.cl_mgr.get.assert_called_once_with(self.fake_cl.id)
def test_cluster_create_invalid_name(self):
cluster = self._init_cluster(self.t2, 'lots_of_underscore_name')
self.cl_mgr.create.return_value = self.fake_cl
self.cl_mgr.get.return_value = self.fake_cl
scheduler.TaskRunner(cluster.create)()
name = self.cl_mgr.create.call_args[0][0]
self.assertIn('lotsofunderscorename', name)
def test_cluster_create_fails(self):
cfg.CONF.set_override('action_retry_limit', 0)
cluster = self._init_cluster(self.t)
self.cl_mgr.create.return_value = self.fake_cl
self.cl_mgr.get.return_value = FakeCluster(status='Error')
create_task = scheduler.TaskRunner(cluster.create)
ex = self.assertRaises(exception.ResourceFailure, create_task)
expected = ('ResourceInError: resources.super-cluster: '
'Went to status Error due to "Unknown"')
self.assertEqual(expected, str(ex))
def test_cluster_check_delete_complete_error(self):
cluster = self._create_cluster(self.t)
self.cl_mgr.get.side_effect = [
self.fake_cl,
sahara.sahara_base.APIException()]
self.cl_mgr.get.reset_mock()
delete_task = scheduler.TaskRunner(cluster.delete)
ex = self.assertRaises(exception.ResourceFailure, delete_task)
expected = "APIException: resources.super-cluster: None"
self.assertEqual(expected, str(ex))
self.cl_mgr.delete.assert_called_once_with(self.fake_cl.id)
self.assertEqual(2, self.cl_mgr.get.call_count)
def test_cluster_delete_cluster_in_error(self):
cluster = self._create_cluster(self.t)
self.cl_mgr.get.side_effect = [
self.fake_cl,
FakeCluster(status='Error')]
self.cl_mgr.get.reset_mock()
delete_task = scheduler.TaskRunner(cluster.delete)
ex = self.assertRaises(exception.ResourceFailure, delete_task)
expected = ('ResourceInError: resources.super-cluster: '
'Went to status Error due to "Unknown"')
self.assertEqual(expected, str(ex))
self.cl_mgr.delete.assert_called_once_with(self.fake_cl.id)
self.assertEqual(2, self.cl_mgr.get.call_count)
def test_cluster_resolve_attribute(self):
cluster = self._create_cluster(self.t)
self.cl_mgr.get.reset_mock()
self.assertEqual(self.fake_cl.info,
cluster._resolve_attribute('info'))
self.assertEqual(self.fake_cl.status,
cluster._resolve_attribute('status'))
self.assertEqual({"cluster": "info"}, cluster.FnGetAtt('show'))
self.assertEqual(3, self.cl_mgr.get.call_count)
def test_cluster_create_no_image_anywhere_fails(self):
self.t['resources']['super-cluster']['properties'].pop(
'default_image_id')
self.sahara_mock.cluster_templates.get.return_value = mock.Mock(
default_image_id=None)
cluster = self._init_cluster(self.t)
ex = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(cluster.create))
self.assertIsInstance(ex.exc, exception.StackValidationFailed)
self.assertIn("default_image_id must be provided: "
"Referenced cluster template some_cluster_template_id "
"has no default_image_id defined.",
str(ex.message))
def test_cluster_validate_no_network_on_neutron_fails(self):
self.t['resources']['super-cluster']['properties'].pop(
'neutron_management_network')
cluster = self._init_cluster(self.t)
ex = self.assertRaises(exception.StackValidationFailed,
cluster.validate)
error_msg = ('Property error: resources.super-cluster.properties: '
'Property neutron_management_network not assigned')
self.assertEqual(error_msg, str(ex))
def test_deprecated_properties_correctly_translates(self):
tmpl = '''
heat_template_version: 2013-05-23
description: Hadoop Cluster by Sahara
resources:
super-cluster:
type: OS::Sahara::Cluster
properties:
name: super-cluster
plugin_name: vanilla
hadoop_version: 2.3.0
cluster_template_id: some_cluster_template_id
image: some_image
key_name: admin
neutron_management_network: some_network
'''
ct = self._create_cluster(template_format.parse(tmpl))
self.assertEqual('some_image_id',
ct.properties.get('default_image_id'))
self.assertIsNone(ct.properties.get('image_id'))

View File

@ -1,111 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from heat.common import exception
from heat.common import template_format
from heat.engine.resources.openstack.sahara import data_source
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
data_source_template = """
heat_template_version: 2015-10-15
resources:
data-source:
type: OS::Sahara::DataSource
properties:
name: my-ds
type: swift
url: swift://container.sahara/text
credentials:
user: admin
password: swordfish
"""
class SaharaDataSourceTest(common.HeatTestCase):
def setUp(self):
super(SaharaDataSourceTest, self).setUp()
t = template_format.parse(data_source_template)
self.stack = utils.parse_stack(t)
resource_defns = self.stack.t.resource_definitions(self.stack)
self.rsrc_defn = resource_defns['data-source']
self.client = mock.Mock()
self.patchobject(data_source.DataSource, 'client',
return_value=self.client)
def _create_resource(self, name, snippet, stack):
ds = data_source.DataSource(name, snippet, stack)
value = mock.MagicMock(id='12345')
self.client.data_sources.create.return_value = value
scheduler.TaskRunner(ds.create)()
return ds
def test_create(self):
ds = self._create_resource('data-source', self.rsrc_defn, self.stack)
args = self.client.data_sources.create.call_args[1]
expected_args = {
'name': 'my-ds',
'description': '',
'data_source_type': 'swift',
'url': 'swift://container.sahara/text',
'credential_user': 'admin',
'credential_pass': 'swordfish'
}
self.assertEqual(expected_args, args)
self.assertEqual('12345', ds.resource_id)
expected_state = (ds.CREATE, ds.COMPLETE)
self.assertEqual(expected_state, ds.state)
def test_update(self):
ds = self._create_resource('data-source', self.rsrc_defn,
self.stack)
props = self.stack.t.t['resources']['data-source']['properties'].copy()
props['type'] = 'hdfs'
props['url'] = 'my/path'
self.rsrc_defn = self.rsrc_defn.freeze(properties=props)
scheduler.TaskRunner(ds.update, self.rsrc_defn)()
data = {
'name': 'my-ds',
'description': '',
'type': 'hdfs',
'url': 'my/path',
'credentials': {
'user': 'admin',
'password': 'swordfish'
}
}
self.client.data_sources.update.assert_called_once_with(
'12345', data)
self.assertEqual((ds.UPDATE, ds.COMPLETE), ds.state)
def test_show_attribute(self):
ds = self._create_resource('data-source', self.rsrc_defn, self.stack)
value = mock.MagicMock()
value.to_dict.return_value = {'ds': 'info'}
self.client.data_sources.get.return_value = value
self.assertEqual({'ds': 'info'}, ds.FnGetAtt('show'))
def test_validate_password_without_user(self):
props = self.stack.t.t['resources']['data-source']['properties'].copy()
del props['credentials']['user']
self.rsrc_defn = self.rsrc_defn.freeze(properties=props)
ds = data_source.DataSource('data-source', self.rsrc_defn, self.stack)
ex = self.assertRaises(exception.StackValidationFailed, ds.validate)
error_msg = ('Property error: resources.data-source.properties.'
'credentials: Property user not assigned')
self.assertEqual(error_msg, str(ex))

View File

@ -1,109 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from heat.common import template_format
from heat.engine.clients.os import glance
from heat.engine.clients.os import sahara
from heat.engine.resources.openstack.sahara import image
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
sahara_image_template = """
heat_template_version: 2015-10-15
resources:
sahara-image:
type: OS::Sahara::ImageRegistry
properties:
image: sahara-icehouse-vanilla-1.2.1-ubuntu-13.10
username: ubuntu
tags:
- vanilla
- 1.2.1
"""
class SaharaImageTest(common.HeatTestCase):
def setUp(self):
super(SaharaImageTest, self).setUp()
self.tmpl = template_format.parse(sahara_image_template)
self.stack = utils.parse_stack(self.tmpl)
resource_defns = self.stack.t.resource_definitions(self.stack)
self.rsrc_defn = resource_defns['sahara-image']
self.client = mock.Mock()
self.patchobject(image.SaharaImageRegistry, 'client',
return_value=self.client)
self.patchobject(glance.GlanceClientPlugin,
'find_image_by_name_or_id',
return_value='12345')
def _create_resource(self, name, snippet, stack):
img = image.SaharaImageRegistry(name, snippet, stack)
scheduler.TaskRunner(img.create)()
return img
def test_create(self):
img = self._create_resource('sahara-image', self.rsrc_defn, self.stack)
args = ('12345', 'ubuntu', '')
self.client.images.update_image.assert_called_once_with(*args)
self.client.images.update_tags.assert_called_once_with(
'12345', ['vanilla', '1.2.1'])
self.assertEqual('12345', img.resource_id)
expected_state = (img.CREATE, img.COMPLETE)
self.assertEqual(expected_state, img.state)
def test_update(self):
img = self._create_resource('sahara-image', self.rsrc_defn, self.stack)
props = self.tmpl['resources']['sahara-image']['properties'].copy()
props['tags'] = []
props['description'] = 'test image'
self.rsrc_defn = self.rsrc_defn.freeze(properties=props)
scheduler.TaskRunner(img.update, self.rsrc_defn)()
tags_update_calls = [
mock.call('12345', ['vanilla', '1.2.1']),
mock.call('12345', [])
]
image_update_calls = [
mock.call('12345', 'ubuntu', ''),
mock.call('12345', 'ubuntu', 'test image')
]
self.client.images.update_image.assert_has_calls(image_update_calls)
self.client.images.update_tags.assert_has_calls(tags_update_calls)
self.assertEqual((img.UPDATE, img.COMPLETE), img.state)
def test_delete(self):
img = self._create_resource('sahara-image', self.rsrc_defn, self.stack)
scheduler.TaskRunner(img.delete)()
self.assertEqual((img.DELETE, img.COMPLETE), img.state)
self.client.images.unregister_image.assert_called_once_with(
img.resource_id)
def test_delete_not_found(self):
img = self._create_resource('sahara-image', self.rsrc_defn, self.stack)
self.client.images.unregister_image.side_effect = (
sahara.sahara_base.APIException(error_code=404))
scheduler.TaskRunner(img.delete)()
self.assertEqual((img.DELETE, img.COMPLETE), img.state)
self.client.images.unregister_image.assert_called_once_with(
img.resource_id)
def test_show_attribute(self):
img = self._create_resource('sahara-image', self.rsrc_defn, self.stack)
value = mock.MagicMock()
value.to_dict.return_value = {'img': 'info'}
self.client.images.get.return_value = value
self.assertEqual({'img': 'info'}, img.FnGetAtt('show'))

View File

@ -1,183 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from heat.common import template_format
from heat.engine.clients.os import sahara
from heat.engine.resources.openstack.sahara import job
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
job_template = """
heat_template_version: newton
resources:
job:
type: OS::Sahara::Job
properties:
name: test_name_job
type: MapReduce
libs: [ fake-lib-id ]
description: test_description
is_public: True
default_execution_data:
cluster: fake-cluster-id
input: fake-input-id
output: fake-output-id
is_public: True
configs:
mapred.map.class: org.apache.oozie.example.SampleMapper
mapred.reduce.class: org.apache.oozie.example.SampleReducer
mapreduce.framework.name: yarn
"""
class SaharaJobTest(common.HeatTestCase):
def setUp(self):
super(SaharaJobTest, self).setUp()
t = template_format.parse(job_template)
self.stack = utils.parse_stack(t)
resource_defns = self.stack.t.resource_definitions(self.stack)
self.rsrc_defn = resource_defns['job']
self.client = mock.Mock()
self.patchobject(job.SaharaJob, 'client', return_value=self.client)
fake_execution = mock.Mock()
fake_execution.job_id = 'fake-resource-id'
fake_execution.id = 'fake-execution-id'
fake_execution.to_dict.return_value = {'job_id': 'fake-resource-id',
'id': 'fake-execution-id'}
self.client.job_executions.find.return_value = [fake_execution]
def _create_resource(self, name, snippet, stack, without_name=False):
jb = job.SaharaJob(name, snippet, stack)
if without_name:
self.client.jobs.create = mock.Mock(return_value='fake_rsrc_id')
jb.physical_resource_name = mock.Mock(
return_value='fake_phys_name')
value = mock.MagicMock(id='fake-resource-id')
self.client.jobs.create.return_value = value
mock_get_res = mock.Mock(return_value='some res id')
mock_get_type = mock.Mock(return_value='MapReduce')
jb.client_plugin().find_resource_by_name_or_id = mock_get_res
jb.client_plugin().get_job_type = mock_get_type
scheduler.TaskRunner(jb.create)()
return jb
def test_create(self):
jb = self._create_resource('job', self.rsrc_defn, self.stack)
args = self.client.jobs.create.call_args[1]
expected_args = {
'name': 'test_name_job',
'type': 'MapReduce',
'libs': ['some res id'],
'description': 'test_description',
'is_public': True,
'is_protected': False,
'mains': []
}
self.assertEqual(expected_args, args)
self.assertEqual('fake-resource-id', jb.resource_id)
expected_state = (jb.CREATE, jb.COMPLETE)
self.assertEqual(expected_state, jb.state)
def test_create_without_name_passed(self):
props = self.stack.t.t['resources']['job']['properties']
del props['name']
self.rsrc_defn = self.rsrc_defn.freeze(properties=props)
jb = self._create_resource('job', self.rsrc_defn, self.stack, True)
args = self.client.jobs.create.call_args[1]
expected_args = {
'name': 'fake_phys_name',
'type': 'MapReduce',
'libs': ['some res id'],
'description': 'test_description',
'is_public': True,
'is_protected': False,
'mains': []
}
self.assertEqual(expected_args, args)
self.assertEqual('fake-resource-id', jb.resource_id)
expected_state = (jb.CREATE, jb.COMPLETE)
self.assertEqual(expected_state, jb.state)
def test_delete(self):
jb = self._create_resource('job', self.rsrc_defn, self.stack)
scheduler.TaskRunner(jb.delete)()
self.assertEqual((jb.DELETE, jb.COMPLETE), jb.state)
self.client.jobs.delete.assert_called_once_with(jb.resource_id)
self.client.job_executions.delete.assert_called_once_with(
'fake-execution-id')
def test_delete_not_found(self):
jb = self._create_resource('job', self.rsrc_defn, self.stack)
self.client.jobs.delete.side_effect = (
sahara.sahara_base.APIException(error_code=404))
scheduler.TaskRunner(jb.delete)()
self.assertEqual((jb.DELETE, jb.COMPLETE), jb.state)
self.client.jobs.delete.assert_called_once_with(jb.resource_id)
self.client.job_executions.delete.assert_called_once_with(
'fake-execution-id')
def test_delete_job_executions_raises_error(self):
jb = self._create_resource('job', self.rsrc_defn, self.stack)
self.client.job_executions.find.side_effect = [
sahara.sahara_base.APIException(400)]
self.assertRaises(sahara.sahara_base.APIException, jb.handle_delete)
def test_update(self):
jb = self._create_resource('job', self.rsrc_defn, self.stack)
props = self.stack.t.t['resources']['job']['properties'].copy()
props['name'] = 'test_name_job_new'
props['description'] = 'test_description_new'
props['is_public'] = False
self.rsrc_defn = self.rsrc_defn.freeze(properties=props)
scheduler.TaskRunner(jb.update, self.rsrc_defn)()
self.client.jobs.update.assert_called_once_with(
'fake-resource-id', name='test_name_job_new',
description='test_description_new', is_public=False)
self.assertEqual((jb.UPDATE, jb.COMPLETE), jb.state)
def test_handle_signal(self):
jb = self._create_resource('job', self.rsrc_defn, self.stack)
scheduler.TaskRunner(jb.handle_signal, None)()
expected_args = {
'job_id': 'fake-resource-id',
'cluster_id': 'some res id',
'input_id': 'some res id',
'output_id': 'some res id',
'is_public': True,
'is_protected': False,
'interface': {},
'configs': {
'configs': {
'mapred.reduce.class':
'org.apache.oozie.example.SampleReducer',
'mapred.map.class':
'org.apache.oozie.example.SampleMapper',
'mapreduce.framework.name': 'yarn'},
'args': [],
'params': {}
}
}
self.client.job_executions.create.assert_called_once_with(
**expected_args)
def test_attributes(self):
jb = self._create_resource('job', self.rsrc_defn, self.stack)
jb._get_ec2_signed_url = mock.Mock(return_value='fake-url')
self.assertEqual('fake-execution-id',
jb.FnGetAtt('executions')[0]['id'])
self.assertEqual('fake-url', jb.FnGetAtt('default_execution_url'))

View File

@ -1,134 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import sahara
from heat.engine.resources.openstack.sahara import job_binary
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
job_binary_template = """
heat_template_version: 2015-10-15
resources:
job-binary:
type: OS::Sahara::JobBinary
properties:
name: my-jb
url: swift://container/jar-example.jar
credentials: {'user': 'admin','password': 'swordfish'}
"""
class SaharaJobBinaryTest(common.HeatTestCase):
def setUp(self):
super(SaharaJobBinaryTest, self).setUp()
t = template_format.parse(job_binary_template)
self.stack = utils.parse_stack(t)
resource_defns = self.stack.t.resource_definitions(self.stack)
self.rsrc_defn = resource_defns['job-binary']
self.client = mock.Mock()
self.patchobject(job_binary.JobBinary, 'client',
return_value=self.client)
def _create_resource(self, name, snippet, stack):
jb = job_binary.JobBinary(name, snippet, stack)
value = mock.MagicMock(id='12345')
self.client.job_binaries.create.return_value = value
scheduler.TaskRunner(jb.create)()
return jb
def test_create(self):
jb = self._create_resource('job-binary', self.rsrc_defn, self.stack)
args = self.client.job_binaries.create.call_args[1]
expected_args = {
'name': 'my-jb',
'description': '',
'url': 'swift://container/jar-example.jar',
'extra': {
'user': 'admin',
'password': 'swordfish'
}
}
self.assertEqual(expected_args, args)
self.assertEqual('12345', jb.resource_id)
expected_state = (jb.CREATE, jb.COMPLETE)
self.assertEqual(expected_state, jb.state)
def test_update(self):
jb = self._create_resource('job-binary', self.rsrc_defn,
self.stack)
props = self.stack.t.t['resources']['job-binary']['properties'].copy()
props['url'] = 'internal-db://94b8821d-1ce7-4131-8364-a6c6d85ad57b'
self.rsrc_defn = self.rsrc_defn.freeze(properties=props)
scheduler.TaskRunner(jb.update, self.rsrc_defn)()
data = {
'name': 'my-jb',
'description': '',
'url': 'internal-db://94b8821d-1ce7-4131-8364-a6c6d85ad57b',
'extra': {
'user': 'admin',
'password': 'swordfish'
}
}
self.client.job_binaries.update.assert_called_once_with(
'12345', data)
self.assertEqual((jb.UPDATE, jb.COMPLETE), jb.state)
def test_delete(self):
jb = self._create_resource('job-binary', self.rsrc_defn, self.stack)
scheduler.TaskRunner(jb.delete)()
self.assertEqual((jb.DELETE, jb.COMPLETE), jb.state)
self.client.job_binaries.delete.assert_called_once_with(
jb.resource_id)
def test_delete_not_found(self):
jb = self._create_resource('job-binary', self.rsrc_defn, self.stack)
self.client.job_binaries.delete.side_effect = (
sahara.sahara_base.APIException(error_code=404))
scheduler.TaskRunner(jb.delete)()
self.assertEqual((jb.DELETE, jb.COMPLETE), jb.state)
self.client.job_binaries.delete.assert_called_once_with(
jb.resource_id)
def test_show_attribute(self):
jb = self._create_resource('job-binary', self.rsrc_defn, self.stack)
value = mock.MagicMock()
value.to_dict.return_value = {'jb': 'info'}
self.client.job_binaries.get.return_value = value
self.assertEqual({'jb': 'info'}, jb.FnGetAtt('show'))
def test_validate_invalid_url(self):
props = self.stack.t.t['resources']['job-binary']['properties'].copy()
props['url'] = 'internal-db://38273f82'
self.rsrc_defn = self.rsrc_defn.freeze(properties=props)
jb = job_binary.JobBinary('job-binary', self.rsrc_defn, self.stack)
ex = self.assertRaises(exception.StackValidationFailed, jb.validate)
error_msg = ('resources.job-binary.properties: internal-db://38273f82 '
'is not a valid job location.')
self.assertEqual(error_msg, str(ex))
def test_validate_password_without_user(self):
props = self.stack.t.t['resources']['job-binary']['properties'].copy()
props['credentials'].pop('user')
self.rsrc_defn = self.rsrc_defn.freeze(properties=props)
jb = job_binary.JobBinary('job-binary', self.rsrc_defn, self.stack)
ex = self.assertRaises(exception.StackValidationFailed, jb.validate)
error_msg = ('Property error: resources.job-binary.properties.'
'credentials: Property user not assigned')
self.assertEqual(error_msg, str(ex))

View File

@ -1,481 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import neutron
from heat.engine.clients.os import nova
from heat.engine.clients.os import sahara
from heat.engine.resources.openstack.sahara import templates as st
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
node_group_template = """
heat_template_version: 2013-05-23
description: Sahara Node Group Template
resources:
node-group:
type: OS::Sahara::NodeGroupTemplate
properties:
name: node-group-template
plugin_name: vanilla
hadoop_version: 2.3.0
flavor: m1.large
volume_type: lvm
floating_ip_pool: some_pool_name
node_processes:
- namenode
- jobtracker
is_proxy_gateway: True
shares:
- id: e45eaabf-9300-42e2-b6eb-9ebc92081f46
access_level: ro
"""
cluster_template = """
heat_template_version: 2013-05-23
description: Sahara Cluster Template
resources:
cluster-template:
type: OS::Sahara::ClusterTemplate
properties:
name: test-cluster-template
plugin_name: vanilla
hadoop_version: 2.3.0
neutron_management_network: some_network
shares:
- id: e45eaabf-9300-42e2-b6eb-9ebc92081f46
access_level: ro
"""
cluster_template_without_name = """
heat_template_version: 2013-05-23
resources:
cluster_template!:
type: OS::Sahara::ClusterTemplate
properties:
plugin_name: vanilla
hadoop_version: 2.3.0
neutron_management_network: some_network
"""
node_group_template_without_name = """
heat_template_version: 2013-05-23
resources:
node_group!:
type: OS::Sahara::NodeGroupTemplate
properties:
plugin_name: vanilla
hadoop_version: 2.3.0
flavor: m1.large
floating_ip_pool: some_pool_name
node_processes:
- namenode
- jobtracker
"""
class FakeNodeGroupTemplate(object):
def __init__(self):
self.id = "some_ng_id"
self.name = "test-cluster-template"
self.to_dict = lambda: {"ng-template": "info"}
class FakeClusterTemplate(object):
def __init__(self):
self.id = "some_ct_id"
self.name = "node-group-template"
self.to_dict = lambda: {"cluster-template": "info"}
class SaharaNodeGroupTemplateTest(common.HeatTestCase):
def setUp(self):
super(SaharaNodeGroupTemplateTest, self).setUp()
self.stub_FlavorConstraint_validate()
self.stub_SaharaPluginConstraint()
self.stub_VolumeTypeConstraint_validate()
self.patchobject(nova.NovaClientPlugin, 'find_flavor_by_name_or_id'
).return_value = 'someflavorid'
self.patchobject(neutron.NeutronClientPlugin, '_create')
self.patchobject(neutron.NeutronClientPlugin,
'find_resourceid_by_name_or_id',
return_value='some_pool_id')
sahara_mock = mock.MagicMock()
self.ngt_mgr = sahara_mock.node_group_templates
self.plugin_mgr = sahara_mock.plugins
self.patchobject(sahara.SaharaClientPlugin,
'_create').return_value = sahara_mock
self.patchobject(sahara.SaharaClientPlugin, 'validate_hadoop_version'
).return_value = None
self.fake_ngt = FakeNodeGroupTemplate()
self.t = template_format.parse(node_group_template)
self.ngt_props = self.t['resources']['node-group']['properties']
def _init_ngt(self, template):
self.stack = utils.parse_stack(template)
return self.stack['node-group']
def _create_ngt(self, template):
ngt = self._init_ngt(template)
self.ngt_mgr.create.return_value = self.fake_ngt
scheduler.TaskRunner(ngt.create)()
self.assertEqual((ngt.CREATE, ngt.COMPLETE), ngt.state)
self.assertEqual(self.fake_ngt.id, ngt.resource_id)
return ngt
def test_ngt_create(self):
self._create_ngt(self.t)
args = {
'name': 'node-group-template',
'plugin_name': 'vanilla',
'hadoop_version': '2.3.0',
'flavor_id': 'someflavorid',
'description': "",
'volumes_per_node': 0,
'volumes_size': None,
'volume_type': 'lvm',
'security_groups': None,
'auto_security_group': None,
'availability_zone': None,
'volumes_availability_zone': None,
'node_processes': ['namenode', 'jobtracker'],
'floating_ip_pool': 'some_pool_id',
'node_configs': None,
'image_id': None,
'is_proxy_gateway': True,
'volume_local_to_instance': None,
'use_autoconfig': None,
'shares': [{'id': 'e45eaabf-9300-42e2-b6eb-9ebc92081f46',
'access_level': 'ro',
'path': None}]
}
self.ngt_mgr.create.assert_called_once_with(**args)
def test_validate_floatingippool_on_neutron_fails(self):
ngt = self._init_ngt(self.t)
self.patchobject(
neutron.NeutronClientPlugin,
'find_resourceid_by_name_or_id'
).side_effect = [
neutron.exceptions.NeutronClientNoUniqueMatch(message='Too many'),
neutron.exceptions.NeutronClientException(message='Not found',
status_code=404)
]
ex = self.assertRaises(exception.StackValidationFailed, ngt.validate)
self.assertEqual('Too many',
str(ex))
ex = self.assertRaises(exception.StackValidationFailed, ngt.validate)
self.assertEqual('Not found',
str(ex))
def test_validate_flavor_constraint_return_false(self):
self.t['resources']['node-group']['properties'].pop('floating_ip_pool')
self.t['resources']['node-group']['properties'].pop('volume_type')
ngt = self._init_ngt(self.t)
self.patchobject(nova.FlavorConstraint, 'validate'
).return_value = False
ex = self.assertRaises(exception.StackValidationFailed, ngt.validate)
self.assertEqual(u"Property error: "
u"resources.node-group.properties.flavor: "
u"Error validating value 'm1.large'",
str(ex))
def test_template_invalid_name(self):
tmpl = template_format.parse(node_group_template_without_name)
stack = utils.parse_stack(tmpl)
ngt = stack['node_group!']
self.ngt_mgr.create.return_value = self.fake_ngt
scheduler.TaskRunner(ngt.create)()
self.assertEqual((ngt.CREATE, ngt.COMPLETE), ngt.state)
self.assertEqual(self.fake_ngt.id, ngt.resource_id)
name = self.ngt_mgr.create.call_args[1]['name']
self.assertIn('-nodegroup-', name)
def test_ngt_show_resource(self):
ngt = self._create_ngt(self.t)
self.ngt_mgr.get.return_value = self.fake_ngt
self.assertEqual({"ng-template": "info"}, ngt.FnGetAtt('show'))
self.ngt_mgr.get.assert_called_once_with('some_ng_id')
def test_validate_node_processes_fails(self):
ngt = self._init_ngt(self.t)
plugin_mock = mock.MagicMock()
plugin_mock.node_processes = {
"HDFS": ["namenode", "datanode", "secondarynamenode"],
"JobFlow": ["oozie"]
}
self.plugin_mgr.get_version_details.return_value = plugin_mock
ex = self.assertRaises(exception.StackValidationFailed, ngt.validate)
self.assertIn("resources.node-group.properties: Plugin vanilla "
"doesn't support the following node processes: "
"jobtracker. Allowed processes are: ",
str(ex))
self.assertIn("namenode", str(ex))
self.assertIn("datanode", str(ex))
self.assertIn("secondarynamenode", str(ex))
self.assertIn("oozie", str(ex))
def test_update(self):
ngt = self._create_ngt(self.t)
props = self.ngt_props.copy()
props['node_processes'] = ['tasktracker', 'datanode']
props['name'] = 'new-ng-template'
rsrc_defn = ngt.t.freeze(properties=props)
scheduler.TaskRunner(ngt.update, rsrc_defn)()
args = {'node_processes': ['tasktracker', 'datanode'],
'name': 'new-ng-template'}
self.ngt_mgr.update.assert_called_once_with('some_ng_id', **args)
self.assertEqual((ngt.UPDATE, ngt.COMPLETE), ngt.state)
def test_get_live_state(self):
ngt = self._create_ngt(self.t)
resp = mock.MagicMock()
resp.to_dict.return_value = {
'volume_local_to_instance': False,
'availability_zone': None,
'updated_at': None,
'use_autoconfig': True,
'volumes_per_node': 0,
'id': '6157755e-dfd3-45b4-a445-36588e5f75ad',
'security_groups': None,
'shares': None,
'node_configs': {},
'auto_security_group': False,
'volumes_availability_zone': None,
'description': '',
'volume_mount_prefix': '/volumes/disk',
'plugin_name': 'vanilla',
'floating_ip_pool': None,
'is_default': False,
'image_id': None,
'volumes_size': 0,
'is_proxy_gateway': False,
'is_public': False,
'hadoop_version': '2.7.1',
'name': 'cluster-nodetemplate-jlgzovdaivn',
'tenant_id': '221b4f51e9bd4f659845f657a3051a46',
'created_at': '2016-01-29T11:08:46',
'volume_type': None,
'is_protected': False,
'node_processes': ['namenode'],
'flavor_id': '2'}
self.ngt_mgr.get.return_value = resp
# Simulate replace translation rule execution.
ngt.properties.data['flavor'] = '1'
reality = ngt.get_live_state(ngt.properties)
expected = {
'volume_local_to_instance': False,
'availability_zone': None,
'use_autoconfig': True,
'volumes_per_node': 0,
'security_groups': None,
'shares': None,
'node_configs': {},
'auto_security_group': False,
'volumes_availability_zone': None,
'description': '',
'plugin_name': 'vanilla',
'floating_ip_pool': None,
'image_id': None,
'volumes_size': 0,
'is_proxy_gateway': False,
'hadoop_version': '2.7.1',
'name': 'cluster-nodetemplate-jlgzovdaivn',
'volume_type': None,
'node_processes': ['namenode'],
'flavor': '2'
}
self.assertEqual(expected, reality)
# Make sure that old flavor will return when ids are equal - simulate
# replace translation rule execution.
ngt.properties.data['flavor'] = '2'
reality = ngt.get_live_state(ngt.properties)
self.assertEqual('2', reality.get('flavor'))
class SaharaClusterTemplateTest(common.HeatTestCase):
def setUp(self):
super(SaharaClusterTemplateTest, self).setUp()
self.patchobject(st.constraints.CustomConstraint, '_is_valid'
).return_value = True
self.patchobject(neutron.NeutronClientPlugin, '_create')
self.patchobject(neutron.NeutronClientPlugin,
'find_resourceid_by_name_or_id',
return_value='some_network_id')
sahara_mock = mock.MagicMock()
self.ct_mgr = sahara_mock.cluster_templates
self.patchobject(sahara.SaharaClientPlugin,
'_create').return_value = sahara_mock
self.patchobject(sahara.SaharaClientPlugin, 'validate_hadoop_version'
).return_value = None
self.fake_ct = FakeClusterTemplate()
self.t = template_format.parse(cluster_template)
def _init_ct(self, template):
self.stack = utils.parse_stack(template)
return self.stack['cluster-template']
def _create_ct(self, template):
ct = self._init_ct(template)
self.ct_mgr.create.return_value = self.fake_ct
scheduler.TaskRunner(ct.create)()
self.assertEqual((ct.CREATE, ct.COMPLETE), ct.state)
self.assertEqual(self.fake_ct.id, ct.resource_id)
return ct
def test_ct_create(self):
self._create_ct(self.t)
args = {
'name': 'test-cluster-template',
'plugin_name': 'vanilla',
'hadoop_version': '2.3.0',
'description': '',
'default_image_id': None,
'net_id': 'some_network_id',
'anti_affinity': None,
'node_groups': None,
'cluster_configs': None,
'use_autoconfig': None,
'shares': [{'id': 'e45eaabf-9300-42e2-b6eb-9ebc92081f46',
'access_level': 'ro',
'path': None}]
}
self.ct_mgr.create.assert_called_once_with(**args)
def test_ct_validate_no_network_on_neutron_fails(self):
self.t['resources']['cluster-template']['properties'].pop(
'neutron_management_network')
ct = self._init_ct(self.t)
self.patchobject(ct, 'is_using_neutron', return_value=True)
ex = self.assertRaises(exception.StackValidationFailed,
ct.validate)
self.assertEqual("neutron_management_network must be provided",
str(ex))
def test_template_invalid_name(self):
tmpl = template_format.parse(cluster_template_without_name)
stack = utils.parse_stack(tmpl)
ct = stack['cluster_template!']
self.ct_mgr.create.return_value = self.fake_ct
scheduler.TaskRunner(ct.create)()
self.assertEqual((ct.CREATE, ct.COMPLETE), ct.state)
self.assertEqual(self.fake_ct.id, ct.resource_id)
name = self.ct_mgr.create.call_args[1]['name']
self.assertIn('-clustertemplate-', name)
def test_ct_show_resource(self):
ct = self._create_ct(self.t)
self.ct_mgr.get.return_value = self.fake_ct
self.assertEqual({"cluster-template": "info"}, ct.FnGetAtt('show'))
self.ct_mgr.get.assert_called_once_with('some_ct_id')
def test_update(self):
ct = self._create_ct(self.t)
rsrc_defn = self.stack.t.resource_definitions(self.stack)[
'cluster-template']
props = self.t['resources']['cluster-template']['properties'].copy()
props['plugin_name'] = 'hdp'
props['hadoop_version'] = '1.3.2'
props['name'] = 'new-cluster-template'
rsrc_defn = rsrc_defn.freeze(properties=props)
scheduler.TaskRunner(ct.update, rsrc_defn)()
args = {
'plugin_name': 'hdp',
'hadoop_version': '1.3.2',
'name': 'new-cluster-template'
}
self.ct_mgr.update.assert_called_once_with('some_ct_id', **args)
self.assertEqual((ct.UPDATE, ct.COMPLETE), ct.state)
def test_ct_get_live_state(self):
ct = self._create_ct(self.t)
resp = mock.MagicMock()
resp.to_dict.return_value = {
'neutron_management_network': 'public',
'description': '',
'cluster_configs': {},
'created_at': '2016-01-29T11:45:47',
'default_image_id': None,
'updated_at': None,
'plugin_name': 'vanilla',
'shares': None,
'is_default': False,
'is_protected': False,
'use_autoconfig': True,
'anti_affinity': [],
'tenant_id': '221b4f51e9bd4f659845f657a3051a46',
'node_groups': [{'volume_local_to_instance': False,
'availability_zone': None,
'updated_at': None,
'node_group_template_id': '1234',
'volumes_per_node': 0,
'id': '48c356f6-bbe1-4b26-a90a-f3d543c2ea4c',
'security_groups': None,
'shares': None,
'node_configs': {},
'auto_security_group': False,
'volumes_availability_zone': None,
'volume_mount_prefix': '/volumes/disk',
'floating_ip_pool': None,
'image_id': None,
'volumes_size': 0,
'is_proxy_gateway': False,
'count': 1,
'name': 'test',
'created_at': '2016-01-29T11:45:47',
'volume_type': None,
'node_processes': ['namenode'],
'flavor_id': '2',
'use_autoconfig': True}],
'is_public': False,
'hadoop_version': '2.7.1',
'id': 'c07b8c63-b944-47f9-8588-085547a45c1b',
'name': 'cluster-template-ykokor6auha4'}
self.ct_mgr.get.return_value = resp
reality = ct.get_live_state(ct.properties)
expected = {
'neutron_management_network': 'public',
'description': '',
'cluster_configs': {},
'default_image_id': None,
'plugin_name': 'vanilla',
'shares': None,
'anti_affinity': [],
'node_groups': [{'node_group_template_id': '1234',
'count': 1,
'name': 'test'}],
'hadoop_version': '2.7.1',
'name': 'cluster-template-ykokor6auha4'
}
self.assertEqual(set(expected.keys()), set(reality.keys()))
expected_node_group = sorted(expected.pop('node_groups'))
reality_node_group = sorted(reality.pop('node_groups'))
for i in range(len(expected_node_group)):
self.assertEqual(expected_node_group[i], reality_node_group[i])
self.assertEqual(expected, reality)

View File

@ -17,10 +17,10 @@ from heat_integrationtests.functional import functional_base
class ServiceBasedExposureTest(functional_base.FunctionalTestsBase):
# NOTE(pas-ha) if we ever decide to install Sahara on Heat
# NOTE(pas-ha) if we ever decide to install Manila on Heat
# functional gate, this must be changed to other not-installed
# but in principle supported service
unavailable_service = 'Sahara'
unavailable_service = 'Manila'
unavailable_template = """
heat_template_version: 2015-10-15
parameters:
@ -28,25 +28,24 @@ parameters:
type: string
resources:
not_available:
type: OS::Sahara::NodeGroupTemplate
type: OS::Manila::Share
properties:
plugin_name: fake
hadoop_version: 0.1
flavor: {get_param: instance_type}
node_processes: []
name: not_available
share_protocol: NFS
size: 1
"""
def setUp(self):
super(ServiceBasedExposureTest, self).setUp()
# check that Sahara endpoint is available
if self._is_sahara_deployed():
self.skipTest("Sahara is actually deployed, "
if self._is_manila_deployed():
self.skipTest("Manila is actually deployed, "
"can not run negative tests on "
"Sahara resources availability.")
"Manila resources availability.")
def _is_sahara_deployed(self):
def _is_manila_deployed(self):
try:
self.identity_client.get_endpoint_url('data-processing',
self.identity_client.get_endpoint_url('sharev2',
self.conf.region)
except keystoneclient.exceptions.EndpointNotFound:
return False
@ -66,7 +65,7 @@ resources:
parameters=parameters,
template=self.unavailable_template)
self.assertIn('ResourceTypeUnavailable', ex.message.decode('utf-8'))
self.assertIn('OS::Sahara::NodeGroupTemplate',
self.assertIn('OS::Manila::Share',
ex.message.decode('utf-8'))

View File

@ -0,0 +1,19 @@
---
upgrade:
- |
Integration with sahara has been removed because the sahara project has
been retired. Because of the removal, the following resource types are no
longer supported and now hidden.
- ``OS::Senlin::Cluster``
- ``OS::Senlin::Node``
- ``OS::Senlin::Policy``
- ``OS::Senlin::Profile``
- ``OS::Senlin::Receiver``
Also, the options in ``[clients_sahara]`` section have been removed.
- |
Default value of the ``[DEFAULT] hidden_stack_tags`` option has been
updated and now stacks with the ``data-processing-cluster`` tag is not
hidden by default.

View File

@ -44,7 +44,6 @@ python-neutronclient>=7.7.0 # Apache-2.0
python-novaclient>=9.1.0 # Apache-2.0
python-octaviaclient>=1.8.0 # Apache-2.0
python-openstackclient>=3.12.0 # Apache-2.0
python-saharaclient>=1.4.0 # Apache-2.0
python-swiftclient>=3.2.0 # Apache-2.0
python-troveclient>=2.2.0 # Apache-2.0
python-vitrageclient>=2.7.0 # Apache-2.0

View File

@ -89,7 +89,6 @@ heat.clients =
neutron = heat.engine.clients.os.neutron:NeutronClientPlugin
octavia = heat.engine.clients.os.octavia:OctaviaClientPlugin
openstack = heat.engine.clients.os.openstacksdk:OpenStackSDKPlugin
sahara = heat.engine.clients.os.sahara:SaharaClientPlugin
swift = heat.engine.clients.os.swift:SwiftClientPlugin
trove = heat.engine.clients.os.trove:TroveClientPlugin
vitrage = heat.engine.clients.os.vitrage:VitrageClientPlugin
@ -163,13 +162,6 @@ heat.constraints =
octavia.pool = heat.engine.clients.os.octavia:PoolConstraint
octavia.flavor = heat.engine.clients.os.octavia:FlavorConstraint
octavia.flavorprofile = heat.engine.clients.os.octavia:FlavorProfileConstraint
sahara.cluster = heat.engine.clients.os.sahara:ClusterConstraint
sahara.cluster_template = heat.engine.clients.os.sahara:ClusterTemplateConstraint
sahara.data_source = heat.engine.clients.os.sahara:DataSourceConstraint
sahara.image = heat.engine.clients.os.sahara:ImageConstraint
sahara.job_binary = heat.engine.clients.os.sahara:JobBinaryConstraint
sahara.job_type = heat.engine.clients.os.sahara:JobTypeConstraint
sahara.plugin = heat.engine.clients.os.sahara:PluginConstraint
trove.flavor = heat.engine.clients.os.trove:FlavorConstraint
zaqar.queue = heat.engine.clients.os.zaqar:QueueConstraint
#ironic