From 3ab772394692deec64741b8ef7e8a139b9e4dbbf Mon Sep 17 00:00:00 2001 From: Michael Johnson Date: Mon, 2 Apr 2018 11:25:09 -0700 Subject: [PATCH] Create noop provider driver and data model This patch creates the noop provider driver and the driver data model. Change-Id: I40787c70d0cdaa5c8edd640f5c679bd2115827e4 --- octavia/api/drivers/data_models.py | 214 ++++++++++++ octavia/api/drivers/noop_driver/__init__.py | 11 + octavia/api/drivers/noop_driver/driver.py | 308 ++++++++++++++++++ .../unit/api/drivers/test_data_models.py | 138 ++++++++ .../api/drivers/test_provider_noop_driver.py | 298 +++++++++++++++++ setup.cfg | 2 + 6 files changed, 971 insertions(+) create mode 100644 octavia/api/drivers/data_models.py create mode 100644 octavia/api/drivers/noop_driver/__init__.py create mode 100644 octavia/api/drivers/noop_driver/driver.py create mode 100644 octavia/tests/unit/api/drivers/test_data_models.py create mode 100644 octavia/tests/unit/api/drivers/test_provider_noop_driver.py diff --git a/octavia/api/drivers/data_models.py b/octavia/api/drivers/data_models.py new file mode 100644 index 0000000000..809c3e08d4 --- /dev/null +++ b/octavia/api/drivers/data_models.py @@ -0,0 +1,214 @@ +# Copyright (c) 2014 Rackspace +# Copyright (c) 2016 Blue Box, an IBM Company +# Copyright 2018 Rackspace, US Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import six + + +class BaseDataModel(object): + def to_dict(self, calling_classes=None, recurse=False, **kwargs): + """Converts a data model to a dictionary.""" + calling_classes = calling_classes or [] + ret = {} + for attr in self.__dict__: + if attr.startswith('_') or not kwargs.get(attr, True): + continue + value = self.__dict__[attr] + + if recurse: + if isinstance(getattr(self, attr), list): + ret[attr] = [] + for item in value: + if isinstance(item, BaseDataModel): + if type(self) not in calling_classes: + ret[attr].append( + item.to_dict(calling_classes=( + calling_classes + [type(self)]))) + else: + ret[attr] = None + else: + ret[attr] = item + elif isinstance(getattr(self, attr), BaseDataModel): + if type(self) not in calling_classes: + ret[attr] = value.to_dict( + calling_classes=calling_classes + [type(self)]) + else: + ret[attr] = None + elif six.PY2 and isinstance(value, six.text_type): + ret[attr.encode('utf8')] = value.encode('utf8') + else: + ret[attr] = value + else: + if isinstance(getattr(self, attr), (BaseDataModel, list)): + ret[attr] = None + else: + ret[attr] = value + + return ret + + def __eq__(self, other): + if isinstance(other, self.__class__): + return self.to_dict() == other.to_dict() + return False + + def __ne__(self, other): + return not self.__eq__(other) + + @classmethod + def from_dict(cls, dict): + return cls(**dict) + + +class LoadBalancer(BaseDataModel): + def __init__(self, admin_state_up=None, description=None, flavor=None, + listeners=None, loadbalancer_id=None, name=None, + project_id=None, vip_address=None, vip_network_id=None, + vip_port_id=None, vip_subnet_id=None): + + self.admin_state_up = admin_state_up + self.description = description + self.flavor = flavor or {} + self.listeners = listeners or [] + self.loadbalancer_id = loadbalancer_id + self.name = name + self.project_id = project_id + self.vip_address = vip_address + self.vip_network_id = vip_network_id + self.vip_port_id = vip_port_id + self.vip_subnet_id = vip_subnet_id + + +class Listener(BaseDataModel): + def __init__(self, admin_state_up=None, connection_limit=None, + default_pool=None, default_pool_id=None, + default_tls_container=None, description=None, + insert_headers=None, l7policies=None, listener_id=None, + loadbalancer_id=None, name=None, protocol=None, + protocol_port=None, sni_containers=None): + + self.admin_state_up = admin_state_up + self.connection_limit = connection_limit + self.default_pool = default_pool + self.default_pool_id = default_pool_id + self.default_tls_container = default_tls_container + self.description = description + self.insert_headers = insert_headers or {} + self.l7policies = l7policies or [] + self.listener_id = listener_id + self.loadbalancer_id = loadbalancer_id + self.name = name + self.protocol = protocol + self.protocol_port = protocol_port + self.sni_containers = sni_containers + + +class Pool(BaseDataModel): + def __init__(self, admin_state_up=None, description=None, + healthmonitor=None, lb_algorithm=None, listener_id=None, + loadbalancer_id=None, members=None, name=None, pool_id=None, + protocol=None, session_persistence=None): + + self.admin_state_up = admin_state_up + self.description = description + self.healthmonitor = healthmonitor + self.lb_algorithm = lb_algorithm + self.listener_id = listener_id + self.loadbalancer_id = loadbalancer_id + self.members = members or [] + self.name = name + self.pool_id = pool_id + self.protocol = protocol + self.session_persistence = session_persistence or {} + + +class Member(BaseDataModel): + def __init__(self, address=None, admin_state_up=None, member_id=None, + monitor_address=None, monitor_port=None, name=None, + pool_id=None, protocol_port=None, subnet_id=None, + weight=None): + + self.address = address + self.admin_state_up = admin_state_up + self.member_id = member_id + self.monitor_address = monitor_address + self.monitor_port = monitor_port + self.name = name + self.pool_id = pool_id + self.protocol_port = protocol_port + self.subnet_id = subnet_id + self.weight = weight + + +class HealthMonitor(BaseDataModel): + def __init__(self, admin_state_up=None, delay=None, expected_codes=None, + healthmonitor_id=None, http_method=None, max_retries=None, + max_retries_down=None, name=None, pool_id=None, timeout=None, + type=None, url_path=None): + + self.admin_state_up = admin_state_up + self.delay = delay + self.expected_codes = expected_codes + self.healthmonitor_id = healthmonitor_id + self.http_method = http_method + self.max_retries = max_retries + self.max_retries_down = max_retries_down + self.name = name + self.pool_id = pool_id + self.timeout = timeout + self.type = type + self.url_path = url_path + + +class L7Policy(BaseDataModel): + def __init__(self, action=None, admin_state_up=None, description=None, + l7policy_id=None, listener_id=None, name=None, position=None, + redirect_pool_id=None, redirect_url=None, rules=None): + + self.action = action + self.admin_state_up = admin_state_up + self.description = description + self.l7policy_id = l7policy_id + self.listener_id = listener_id + self.name = name + self.position = position + self.redirect_pool_id = redirect_pool_id + self.redirect_url = redirect_url + self.rules = rules or [] + + +class L7Rule(BaseDataModel): + def __init__(self, admin_state_up=None, compare_type=None, invert=None, + key=None, l7policy_id=None, l7rule_id=None, type=None, + value=None): + + self.admin_state_up = admin_state_up + self.compare_type = compare_type + self.invert = invert + self.key = key + self.l7policy_id = l7policy_id + self.l7rule_id = l7rule_id + self.type = type + self.value = value + + +class VIP(BaseDataModel): + def __init__(self, vip_address=None, vip_network_id=None, vip_port_id=None, + vip_subnet_id=None): + + self.vip_address = vip_address + self.vip_network_id = vip_network_id + self.vip_port_id = vip_port_id + self.vip_subnet_id = vip_subnet_id diff --git a/octavia/api/drivers/noop_driver/__init__.py b/octavia/api/drivers/noop_driver/__init__.py new file mode 100644 index 0000000000..94e731d201 --- /dev/null +++ b/octavia/api/drivers/noop_driver/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/octavia/api/drivers/noop_driver/driver.py b/octavia/api/drivers/noop_driver/driver.py new file mode 100644 index 0000000000..e059200210 --- /dev/null +++ b/octavia/api/drivers/noop_driver/driver.py @@ -0,0 +1,308 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging +from oslo_utils import uuidutils + +from octavia.api.drivers import data_models +from octavia.api.drivers import provider_base as driver_base + +LOG = logging.getLogger(__name__) + + +class NoopManager(object): + def __init__(self): + super(NoopManager, self).__init__() + self.driverconfig = {} + + # Load Balancer + def create_vip_port(self, loadbalancer_id, vip_dictionary): + LOG.debug('Provider %s no-op, create_vip_port loadbalancer %s', + self.__class__.__name__, loadbalancer_id) + + self.driverconfig[loadbalancer_id] = (loadbalancer_id, vip_dictionary, + 'create_vip_port') + + vip_address = vip_dictionary.get('vip_address', '192.0.2.5') + vip_network_id = vip_dictionary.get('vip_network_id', + uuidutils.generate_uuid()) + vip_port_id = vip_dictionary.get('vip_port_id', + uuidutils.generate_uuid()) + vip_subnet_id = vip_dictionary.get('vip_subnet_id', + uuidutils.generate_uuid()) + + return data_models.VIP(vip_address=vip_address, + vip_network_id=vip_network_id, + vip_port_id=vip_port_id, + vip_subnet_id=vip_subnet_id).to_dict() + + def loadbalancer_create(self, loadbalancer): + LOG.debug('Provider %s no-op, loadbalancer_create loadbalancer %s', + self.__class__.__name__, loadbalancer.loadbalancer_id) + + self.driverconfig[loadbalancer.loadbalancer_id] = ( + loadbalancer, 'loadbalancer_create') + + def loadbalancer_delete(self, loadbalancer_id, cascade=False): + LOG.debug('Provider %s no-op, loadbalancer_delete loadbalancer %s', + self.__class__.__name__, loadbalancer_id) + + self.driverconfig[loadbalancer_id] = (loadbalancer_id, cascade, + 'loadbalancer_delete') + + def loadbalancer_failover(self, loadbalancer_id): + LOG.debug('Provider %s no-op, loadbalancer_failover loadbalancer %s', + self.__class__.__name__, loadbalancer_id) + + self.driverconfig[loadbalancer_id] = (loadbalancer_id, + 'loadbalancer_failover') + + def loadbalancer_update(self, loadbalancer): + LOG.debug('Provider %s no-op, loadbalancer_update loadbalancer %s', + self.__class__.__name__, loadbalancer.loadbalancer_id) + + self.driverconfig[loadbalancer.loadbalancer_id] = ( + loadbalancer, 'loadbalancer_update') + + # Listener + def listener_create(self, listener): + LOG.debug('Provider %s no-op, listener_create listener %s', + self.__class__.__name__, listener.listener_id) + + self.driverconfig[listener.listener_id] = (listener, 'listener_create') + + def listener_delete(self, listener_id): + LOG.debug('Provider %s no-op, listener_delete listener %s', + self.__class__.__name__, listener_id) + + self.driverconfig[listener_id] = (listener_id, 'listener_delete') + + def listener_update(self, listener): + LOG.debug('Provider %s no-op, listener_update listener %s', + self.__class__.__name__, listener.listener_id) + + self.driverconfig[listener.listener_id] = (listener, 'listener_update') + + # Pool + def pool_create(self, pool): + LOG.debug('Provider %s no-op, pool_create pool %s', + self.__class__.__name__, pool.pool_id) + + self.driverconfig[pool.pool_id] = (pool, 'pool_create') + + def pool_delete(self, pool_id): + LOG.debug('Provider %s no-op, pool_delete pool %s', + self.__class__.__name__, pool_id) + + self.driverconfig[pool_id] = (pool_id, 'pool_delete') + + def pool_update(self, pool): + LOG.debug('Provider %s no-op, pool_update pool %s', + self.__class__.__name__, pool.pool_id) + + self.driverconfig[pool.pool_id] = (pool, 'pool_update') + + # Member + def member_create(self, member): + LOG.debug('Provider %s no-op, member_create member %s', + self.__class__.__name__, member.member_id) + + self.driverconfig[member.member_id] = (member, 'member_create') + + def member_delete(self, member_id): + LOG.debug('Provider %s no-op, member_delete member %s', + self.__class__.__name__, member_id) + + self.driverconfig[member_id] = (member_id, 'member_delete') + + def member_update(self, member): + LOG.debug('Provider %s no-op, member_update member %s', + self.__class__.__name__, member.member_id) + + self.driverconfig[member.member_id] = (member, 'member_update') + + def member_batch_update(self, members): + for member in members: + LOG.debug('Provider %s no-op, member_batch_update member %s', + self.__class__.__name__, member.member_id) + + self.driverconfig[member.member_id] = (member, + 'member_batch_update') + + # Health Monitor + def health_monitor_create(self, healthmonitor): + LOG.debug('Provider %s no-op, health_monitor_create healthmonitor %s', + self.__class__.__name__, healthmonitor.healthmonitor_id) + + self.driverconfig[healthmonitor.healthmonitor_id] = ( + healthmonitor, 'health_monitor_create') + + def health_monitor_delete(self, healthmonitor_id): + LOG.debug('Provider %s no-op, health_monitor_delete healthmonitor %s', + self.__class__.__name__, healthmonitor_id) + + self.driverconfig[healthmonitor_id] = (healthmonitor_id, + 'health_monitor_delete') + + def health_monitor_update(self, healthmonitor): + LOG.debug('Provider %s no-op, health_monitor_update healthmonitor %s', + self.__class__.__name__, healthmonitor.healthmonitor_id) + + self.driverconfig[healthmonitor.healthmonitor_id] = ( + healthmonitor, 'health_monitor_update') + + # L7 Policy + def l7policy_create(self, l7policy): + LOG.debug('Provider %s no-op, l7policy_create l7policy %s', + self.__class__.__name__, l7policy.l7policy_id) + + self.driverconfig[l7policy.l7policy_id] = (l7policy, 'l7policy_create') + + def l7policy_delete(self, l7policy_id): + LOG.debug('Provider %s no-op, l7policy_delete l7policy %s', + self.__class__.__name__, l7policy_id) + + self.driverconfig[l7policy_id] = (l7policy_id, 'l7policy_delete') + + def l7policy_update(self, l7policy): + LOG.debug('Provider %s no-op, l7policy_update l7policy %s', + self.__class__.__name__, l7policy.l7policy_id) + + self.driverconfig[l7policy.l7policy_id] = (l7policy, 'l7policy_update') + + # L7 Rule + def l7rule_create(self, l7rule): + LOG.debug('Provider %s no-op, l7rule_create l7rule %s', + self.__class__.__name__, l7rule.l7rule_id) + + self.driverconfig[l7rule.l7rule_id] = (l7rule, 'l7rule_create') + + def l7rule_delete(self, l7rule_id): + LOG.debug('Provider %s no-op, l7rule_delete l7rule %s', + self.__class__.__name__, l7rule_id) + + self.driverconfig[l7rule_id] = (l7rule_id, 'l7rule_delete') + + def l7rule_update(self, l7rule): + LOG.debug('Provider %s no-op, l7rule_update l7rule %s', + self.__class__.__name__, l7rule.l7rule_id) + + self.driverconfig[l7rule.l7rule_id] = (l7rule, 'l7rule_update') + + # Flavor + def get_supported_flavor_metadata(self): + LOG.debug('Provider %s no-op, get_supported_flavor_metadata', + self.__class__.__name__) + + return {'amp_image_tag': 'The glance image tag to use for this load ' + 'balancer.'} + + def validate_flavor(self, flavor_metadata): + LOG.debug('Provider %s no-op, validate_flavor metadata: %s', + self.__class__.__name__, flavor_metadata) + + flavor_hash = hash(frozenset(flavor_metadata.items())) + self.driverconfig[flavor_hash] = (flavor_metadata, 'validate_flavor') + + +class NoopProviderDriver(driver_base.ProviderDriver): + def __init__(self): + super(NoopProviderDriver, self).__init__() + self.driver = NoopManager() + + # Load Balancer + def create_vip_port(self, loadbalancer_id, vip_dictionary): + return self.driver.create_vip_port(loadbalancer_id, vip_dictionary) + + def loadbalancer_create(self, loadbalancer): + self.driver.loadbalancer_create(loadbalancer) + + def loadbalancer_delete(self, loadbalancer_id, cascade=False): + self.driver.loadbalancer_delete(loadbalancer_id, cascade) + + def loadbalancer_failover(self, loadbalancer_id): + self.driver.loadbalancer_failover(loadbalancer_id) + + def loadbalancer_update(self, loadbalancer): + self.driver.loadbalancer_update(loadbalancer) + + # Listener + def listener_create(self, listener): + self.driver.listener_create(listener) + + def listener_delete(self, listener_id): + self.driver.listener_delete(listener_id) + + def listener_update(self, listener): + self.driver.listener_update(listener) + + # Pool + def pool_create(self, pool): + self.driver.pool_create(pool) + + def pool_delete(self, pool_id): + self.driver.pool_delete(pool_id) + + def pool_update(self, pool): + self.driver.pool_update(pool) + + # Member + def member_create(self, member): + self.driver.member_create(member) + + def member_delete(self, member_id): + self.driver.member_delete(member_id) + + def member_update(self, member): + self.driver.member_update(member) + + def member_batch_update(self, members): + self.driver.member_batch_update(members) + + # Health Monitor + def health_monitor_create(self, healthmonitor): + self.driver.health_monitor_create(healthmonitor) + + def health_monitor_delete(self, healthmonitor_id): + self.driver.health_monitor_delete(healthmonitor_id) + + def health_monitor_update(self, healthmonitor): + self.driver.health_monitor_update(healthmonitor) + + # L7 Policy + def l7policy_create(self, l7policy): + self.driver.l7policy_create(l7policy) + + def l7policy_delete(self, l7policy_id): + self.driver.l7policy_delete(l7policy_id) + + def l7policy_update(self, l7policy): + self.driver.l7policy_update(l7policy) + + # L7 Rule + def l7rule_create(self, l7rule): + self.driver.l7rule_create(l7rule) + + def l7rule_delete(self, l7rule_id): + self.driver.l7rule_delete(l7rule_id) + + def l7rule_update(self, l7rule): + self.driver.l7rule_update(l7rule) + + # Flavor + def get_supported_flavor_metadata(self): + return self.driver.get_supported_flavor_metadata() + + def validate_flavor(self, flavor_metadata): + self.driver.validate_flavor(flavor_metadata) diff --git a/octavia/tests/unit/api/drivers/test_data_models.py b/octavia/tests/unit/api/drivers/test_data_models.py new file mode 100644 index 0000000000..83cbab7b2b --- /dev/null +++ b/octavia/tests/unit/api/drivers/test_data_models.py @@ -0,0 +1,138 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from copy import deepcopy + +from oslo_utils import uuidutils + +from octavia.api.drivers import data_models +import octavia.tests.unit.base as base + + +class TestProviderDataModels(base.TestCase): + + def setUp(self): + super(TestProviderDataModels, self).setUp() + + self.loadbalancer_id = uuidutils.generate_uuid() + self.project_id = uuidutils.generate_uuid() + self.vip_address = '192.0.2.83' + self.vip_network_id = uuidutils.generate_uuid() + self.vip_port_id = uuidutils.generate_uuid() + self.vip_subnet_id = uuidutils.generate_uuid() + self.listener_id = uuidutils.generate_uuid() + + self.ref_listener = data_models.Listener( + admin_state_up=True, + connection_limit=5000, + default_pool_id=None, + default_tls_container='a_pkcs12_bundle', + description='The listener', + insert_headers={'X-Forwarded-For': 'true'}, + l7policies=[], + listener_id=self.listener_id, + loadbalancer_id=self.loadbalancer_id, + name='super_listener', + protocol='avian', + protocol_port=42, + sni_containers='another_pkcs12_bundle') + + self.ref_lb = data_models.LoadBalancer( + admin_state_up=False, + description='One great load balancer', + flavor={'cake': 'chocolate'}, + listeners=[self.ref_listener], + loadbalancer_id=self.loadbalancer_id, + name='favorite_lb', + project_id=self.project_id, + vip_address=self.vip_address, + vip_network_id=self.vip_network_id, + vip_port_id=self.vip_port_id, + vip_subnet_id=self.vip_subnet_id) + + self.ref_lb_dict = {'project_id': self.project_id, + 'flavor': {'cake': 'chocolate'}, + 'vip_network_id': self.vip_network_id, + 'admin_state_up': False, + 'loadbalancer_id': self.loadbalancer_id, + 'vip_port_id': self.vip_port_id, + 'listeners': None, + 'vip_address': self.vip_address, + 'description': 'One great load balancer', + 'vip_subnet_id': self.vip_subnet_id, + 'name': 'favorite_lb'} + + self.ref_lb_dict_with_listener = { + 'admin_state_up': False, + 'description': 'One great load balancer', + 'flavor': {'cake': 'chocolate'}, + 'listeners': [{'admin_state_up': True, + 'connection_limit': 5000, + 'default_pool': None, + 'default_pool_id': None, + 'default_tls_container': 'a_pkcs12_bundle', + 'description': 'The listener', + 'insert_headers': {'X-Forwarded-For': 'true'}, + 'l7policies': None, + 'listener_id': self.listener_id, + 'loadbalancer_id': self.loadbalancer_id, + 'name': 'super_listener', + 'protocol': 'avian', + 'protocol_port': 42, + 'sni_containers': 'another_pkcs12_bundle'}], + 'loadbalancer_id': self.loadbalancer_id, + 'name': 'favorite_lb', + 'project_id': self.project_id, + 'vip_address': self.vip_address, + 'vip_network_id': self.vip_network_id, + 'vip_port_id': self.vip_port_id, + 'vip_subnet_id': self.vip_subnet_id} + + def test_equality(self): + second_ref_lb = deepcopy(self.ref_lb) + + self.assertTrue(self.ref_lb == second_ref_lb) + + second_ref_lb.admin_state_up = True + + self.assertFalse(self.ref_lb == second_ref_lb) + + self.assertFalse(self.ref_lb == self.loadbalancer_id) + + def test_inequality(self): + second_ref_lb = deepcopy(self.ref_lb) + + self.assertFalse(self.ref_lb != second_ref_lb) + + second_ref_lb.admin_state_up = True + + self.assertTrue(self.ref_lb != second_ref_lb) + + self.assertTrue(self.ref_lb != self.loadbalancer_id) + + def test_to_dict(self): + ref_lb_converted_to_dict = self.ref_lb.to_dict() + + self.assertEqual(self.ref_lb_dict, ref_lb_converted_to_dict) + + def test_to_dict_recursive(self): + ref_lb_converted_to_dict = self.ref_lb.to_dict(recurse=True) + + self.assertEqual(self.ref_lb_dict_with_listener, + ref_lb_converted_to_dict) + + def test_from_dict(self): + lb_object = data_models.LoadBalancer.from_dict(self.ref_lb_dict) + + self.assertEqual(self.ref_lb, lb_object) diff --git a/octavia/tests/unit/api/drivers/test_provider_noop_driver.py b/octavia/tests/unit/api/drivers/test_provider_noop_driver.py new file mode 100644 index 0000000000..12f0dc9d04 --- /dev/null +++ b/octavia/tests/unit/api/drivers/test_provider_noop_driver.py @@ -0,0 +1,298 @@ +# Copyright 2018 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import uuidutils + +from octavia.api.drivers import data_models +from octavia.api.drivers.noop_driver import driver +import octavia.tests.unit.base as base + + +class TestNoopProviderDriver(base.TestCase): + + def setUp(self): + super(TestNoopProviderDriver, self).setUp() + self.driver = driver.NoopProviderDriver() + + self.loadbalancer_id = uuidutils.generate_uuid() + self.vip_address = '192.0.2.10' + self.vip_network_id = uuidutils.generate_uuid() + self.vip_port_id = uuidutils.generate_uuid() + self.vip_subnet_id = uuidutils.generate_uuid() + self.listener_id = uuidutils.generate_uuid() + self.pool_id = uuidutils.generate_uuid() + self.member_id = uuidutils.generate_uuid() + self.member_subnet_id = uuidutils.generate_uuid() + self.healthmonitor_id = uuidutils.generate_uuid() + self.l7policy_id = uuidutils.generate_uuid() + self.l7rule_id = uuidutils.generate_uuid() + + self.ref_vip = data_models.VIP( + vip_address=self.vip_address, + vip_network_id=self.vip_network_id, + vip_port_id=self.vip_port_id, + vip_subnet_id=self.vip_subnet_id) + + self.ref_member = data_models.Member( + address='198.51.100.4', + admin_state_up=True, + member_id=self.member_id, + monitor_address='203.0.113.2', + monitor_port=66, + name='jacket', + pool_id=self.pool_id, + protocol_port=99, + subnet_id=self.member_subnet_id, + weight=55) + + self.ref_healthmonitor = data_models.HealthMonitor( + admin_state_up=False, + delay=2, + expected_codes="500", + healthmonitor_id=self.healthmonitor_id, + http_method='TRACE', + max_retries=1, + max_retries_down=0, + name='doc', + pool_id=self.pool_id, + timeout=3, + type='PHD', + url_path='/index.html') + + self.ref_pool = data_models.Pool( + admin_state_up=True, + description='Olympic swimming pool', + healthmonitor=self.ref_healthmonitor, + lb_algorithm='A_Fast_One', + listener_id=self.listener_id, + loadbalancer_id=self.loadbalancer_id, + members=[self.ref_member], + name='Osborn', + pool_id=self.pool_id, + protocol='avian', + session_persistence={'type': 'glue'}) + + self.ref_l7rule = data_models.L7Rule( + admin_state_up=True, + compare_type='store_brand', + invert=True, + key='board', + l7policy_id=self.l7policy_id, + l7rule_id=self.l7rule_id, + type='strict', + value='gold') + + self.ref_l7policy = data_models.L7Policy( + action='packed', + admin_state_up=False, + description='Corporate policy', + l7policy_id=self.l7policy_id, + listener_id=self.listener_id, + name='more_policy', + position=1, + redirect_pool_id=self.pool_id, + redirect_url='/hr', + rules=[self.ref_l7rule]) + + self.ref_listener = data_models.Listener( + admin_state_up=False, + connection_limit=5, + default_pool=self.ref_pool, + default_pool_id=self.pool_id, + default_tls_container='a_pkcs12_bundle', + description='The listener', + insert_headers={'X-Forwarded-For': 'true'}, + l7policies=[self.ref_l7policy], + listener_id=self.listener_id, + loadbalancer_id=self.loadbalancer_id, + name='super_listener', + protocol='avian', + protocol_port=42, + sni_containers='another_pkcs12_bundle') + + self.ref_lb = data_models.LoadBalancer( + admin_state_up=False, + description='One great load balancer', + flavor={'cake': 'chocolate'}, + listeners=[self.ref_listener], + loadbalancer_id=self.loadbalancer_id, + name='favorite_lb', + project_id=uuidutils.generate_uuid(), + vip_address=self.vip_address, + vip_network_id=self.vip_network_id, + vip_port_id=self.vip_port_id, + vip_subnet_id=self.vip_subnet_id) + + self.ref_flavor_metadata = { + 'amp_image_tag': 'The glance image tag to use for this load ' + 'balancer.'} + + def test_create_vip_port(self): + vip_dict = self.driver.create_vip_port(self.loadbalancer_id, + self.ref_vip.to_dict()) + + self.assertEqual(self.ref_vip.to_dict(), vip_dict) + + def test_loadbalancer_create(self): + self.driver.loadbalancer_create(self.ref_lb) + + self.assertEqual((self.ref_lb, 'loadbalancer_create'), + self.driver.driver.driverconfig[self.loadbalancer_id]) + + def test_loadbalancer_delete(self): + self.driver.loadbalancer_delete(self.loadbalancer_id, cascade=True) + + self.assertEqual((self.loadbalancer_id, True, 'loadbalancer_delete'), + self.driver.driver.driverconfig[self.loadbalancer_id]) + + def test_loadbalancer_failover(self): + self.driver.loadbalancer_failover(self.loadbalancer_id) + + self.assertEqual((self.loadbalancer_id, 'loadbalancer_failover'), + self.driver.driver.driverconfig[self.loadbalancer_id]) + + def test_loadbalancer_update(self): + self.driver.loadbalancer_update(self.ref_lb) + + self.assertEqual((self.ref_lb, 'loadbalancer_update'), + self.driver.driver.driverconfig[self.loadbalancer_id]) + + def test_listener_create(self): + self.driver.listener_create(self.ref_listener) + + self.assertEqual((self.ref_listener, 'listener_create'), + self.driver.driver.driverconfig[self.listener_id]) + + def test_listener_delete(self): + self.driver.listener_delete(self.listener_id) + + self.assertEqual((self.listener_id, 'listener_delete'), + self.driver.driver.driverconfig[self.listener_id]) + + def test_listener_update(self): + self.driver.listener_update(self.ref_listener) + + self.assertEqual((self.ref_listener, 'listener_update'), + self.driver.driver.driverconfig[self.listener_id]) + + def test_pool_create(self): + self.driver.pool_create(self.ref_pool) + + self.assertEqual((self.ref_pool, 'pool_create'), + self.driver.driver.driverconfig[self.pool_id]) + + def test_pool_delete(self): + self.driver.pool_delete(self.pool_id) + + self.assertEqual((self.pool_id, 'pool_delete'), + self.driver.driver.driverconfig[self.pool_id]) + + def test_pool_update(self): + self.driver.pool_update(self.ref_pool) + + self.assertEqual((self.ref_pool, 'pool_update'), + self.driver.driver.driverconfig[self.pool_id]) + + def test_member_create(self): + self.driver.member_create(self.ref_member) + + self.assertEqual((self.ref_member, 'member_create'), + self.driver.driver.driverconfig[self.member_id]) + + def test_member_delete(self): + self.driver.member_delete(self.member_id) + + self.assertEqual((self.member_id, 'member_delete'), + self.driver.driver.driverconfig[self.member_id]) + + def test_member_update(self): + self.driver.member_update(self.ref_member) + + self.assertEqual((self.ref_member, 'member_update'), + self.driver.driver.driverconfig[self.member_id]) + + def test_member_batch_update(self): + self.driver.member_batch_update([self.ref_member]) + + self.assertEqual((self.ref_member, 'member_batch_update'), + self.driver.driver.driverconfig[self.member_id]) + + def test_health_monitor_create(self): + self.driver.health_monitor_create(self.ref_healthmonitor) + + self.assertEqual( + (self.ref_healthmonitor, 'health_monitor_create'), + self.driver.driver.driverconfig[self.healthmonitor_id]) + + def test_health_monitor_delete(self): + self.driver.health_monitor_delete(self.healthmonitor_id) + + self.assertEqual( + (self.healthmonitor_id, 'health_monitor_delete'), + self.driver.driver.driverconfig[self.healthmonitor_id]) + + def test_health_monitor_update(self): + self.driver.health_monitor_update(self.ref_healthmonitor) + + self.assertEqual( + (self.ref_healthmonitor, 'health_monitor_update'), + self.driver.driver.driverconfig[self.healthmonitor_id]) + + def test_l7policy_create(self): + self.driver.l7policy_create(self.ref_l7policy) + + self.assertEqual((self.ref_l7policy, 'l7policy_create'), + self.driver.driver.driverconfig[self.l7policy_id]) + + def test_l7policy_delete(self): + self.driver.l7policy_delete(self.l7policy_id) + + self.assertEqual((self.l7policy_id, 'l7policy_delete'), + self.driver.driver.driverconfig[self.l7policy_id]) + + def test_l7policy_update(self): + self.driver.l7policy_update(self.ref_l7policy) + + self.assertEqual((self.ref_l7policy, 'l7policy_update'), + self.driver.driver.driverconfig[self.l7policy_id]) + + def test_l7rule_create(self): + self.driver.l7rule_create(self.ref_l7rule) + + self.assertEqual((self.ref_l7rule, 'l7rule_create'), + self.driver.driver.driverconfig[self.l7rule_id]) + + def test_l7rule_delete(self): + self.driver.l7rule_delete(self.l7rule_id) + + self.assertEqual((self.l7rule_id, 'l7rule_delete'), + self.driver.driver.driverconfig[self.l7rule_id]) + + def test_l7rule_update(self): + self.driver.l7rule_update(self.ref_l7rule) + + self.assertEqual((self.ref_l7rule, 'l7rule_update'), + self.driver.driver.driverconfig[self.l7rule_id]) + + def test_get_supported_flavor_metadata(self): + metadata = self.driver.get_supported_flavor_metadata() + + self.assertEqual(self.ref_flavor_metadata, metadata) + + def test_validate_flavor(self): + self.driver.validate_flavor(self.ref_flavor_metadata) + + flavor_hash = hash(frozenset(self.ref_flavor_metadata.items())) + self.assertEqual((self.ref_flavor_metadata, 'validate_flavor'), + self.driver.driver.driverconfig[flavor_hash]) diff --git a/setup.cfg b/setup.cfg index 4fcd9a4b12..fc62ec4ead 100644 --- a/setup.cfg +++ b/setup.cfg @@ -69,6 +69,8 @@ console_scripts = octavia-db-manage = octavia.db.migration.cli:main amphora-agent = octavia.cmd.agent:main haproxy-vrrp-check = octavia.cmd.haproxy_vrrp_check:main +octavia.api.drivers = + noop_driver = octavia.api.drivers.noop_driver.driver:NoopProviderDriver octavia.api.handlers = simulated_handler = octavia.api.handlers.controller_simulator.handler:SimulatedControllerHandler queue_producer = octavia.api.handlers.queue.producer:ProducerHandler