Fullstack tests for DHCP agent HA
This patch adds fullstack tests for DHCP agent HA functionallity. There are two tests added: * One is checking if network will be properly rescheduled to new agent if existing DHCP agent will be down, * Second is checking if network handled by 2 DHCP agents will have properly working DHCP service even if one DHCP agent will be down Change-Id: Iaad373cafd6f83f2c1f8e7ac58dc70070e7aabaf
This commit is contained in:
parent
7653301bf6
commit
a12de8975e
neutron/tests
common/agents
fullstack
tools
82
neutron/tests/common/agents/fullstack_dhcp_agent.py
Executable file
82
neutron/tests/common/agents/fullstack_dhcp_agent.py
Executable file
@ -0,0 +1,82 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2016 OVH SAS
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
import os
|
||||
import sys
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from neutron.agent import dhcp_agent
|
||||
from neutron.agent.linux import dhcp as linux_dhcp
|
||||
|
||||
|
||||
OPTS = [
|
||||
cfg.StrOpt('test_namespace_suffix', default='testprefix',
|
||||
help=_("Suffix to append to all DHCP namespace names.")),
|
||||
]
|
||||
|
||||
|
||||
def NetModel_init(self, d):
|
||||
super(linux_dhcp.NetModel, self).__init__(d)
|
||||
|
||||
self._ns_name = "%s%s%s" % (
|
||||
linux_dhcp.NS_PREFIX, self.id, cfg.CONF.test_namespace_suffix)
|
||||
|
||||
|
||||
@classmethod
|
||||
def existing_dhcp_networks(cls, conf):
|
||||
"""Return a list of existing networks ids that we have configs for."""
|
||||
confs_dir = cls.get_confs_dir(conf)
|
||||
networks = []
|
||||
try:
|
||||
for c in os.listdir(confs_dir):
|
||||
c = c.replace(cfg.CONF.test_namespace_suffix, "")
|
||||
if uuidutils.is_uuid_like(c):
|
||||
networks.append(c)
|
||||
except OSError:
|
||||
pass
|
||||
return networks
|
||||
|
||||
|
||||
def monkeypatch_dhcplocalprocess_init():
|
||||
original_init = linux_dhcp.DhcpLocalProcess.__init__
|
||||
|
||||
def new_init(self, conf, network, process_monitor, version=None,
|
||||
plugin=None):
|
||||
network_copy = copy.deepcopy(network)
|
||||
network_copy.id = "%s%s" % (network.id, cfg.CONF.test_namespace_suffix)
|
||||
original_init(
|
||||
self, conf, network_copy, process_monitor, version, plugin)
|
||||
self.network = network
|
||||
|
||||
linux_dhcp.DhcpLocalProcess.__init__ = new_init
|
||||
|
||||
|
||||
def monkeypatch_linux_dhcp():
|
||||
linux_dhcp.NetModel.__init__ = NetModel_init
|
||||
linux_dhcp.Dnsmasq.existing_dhcp_networks = existing_dhcp_networks
|
||||
monkeypatch_dhcplocalprocess_init()
|
||||
|
||||
|
||||
def main():
|
||||
cfg.CONF.register_opts(OPTS)
|
||||
monkeypatch_linux_dhcp()
|
||||
dhcp_agent.main()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
@ -48,6 +48,9 @@ class ConfigFixture(fixtures.Fixture):
|
||||
self.useFixture(cfg_fixture)
|
||||
self.filename = cfg_fixture.filename
|
||||
|
||||
def _generate_namespace_suffix(self):
|
||||
return utils.get_rand_name(prefix='test')
|
||||
|
||||
|
||||
class NeutronConfigFixture(ConfigFixture):
|
||||
|
||||
@ -69,6 +72,7 @@ class NeutronConfigFixture(ConfigFixture):
|
||||
'service_plugins': ','.join(service_plugins),
|
||||
'auth_strategy': 'noauth',
|
||||
'debug': 'True',
|
||||
'agent_down_time': env_desc.agent_down_time,
|
||||
'transport_url':
|
||||
'rabbit://%(user)s:%(password)s@%(host)s:5672/%(vhost)s' %
|
||||
{'user': rabbitmq_environment.user,
|
||||
@ -292,9 +296,6 @@ class L3ConfigFixture(ConfigFixture):
|
||||
def get_external_bridge(self):
|
||||
return self.config.DEFAULT.external_network_bridge
|
||||
|
||||
def _generate_namespace_suffix(self):
|
||||
return utils.get_rand_name(prefix='test')
|
||||
|
||||
|
||||
class DhcpConfigFixture(ConfigFixture):
|
||||
|
||||
@ -309,6 +310,7 @@ class DhcpConfigFixture(ConfigFixture):
|
||||
self.config['DEFAULT'].update({
|
||||
'debug': 'True',
|
||||
'dhcp_confs': self._generate_dhcp_path(),
|
||||
'test_namespace_suffix': self._generate_namespace_suffix()
|
||||
})
|
||||
|
||||
def _setUp(self):
|
||||
|
@ -34,13 +34,15 @@ class EnvironmentDescription(object):
|
||||
Does the setup, as a whole, support tunneling? How about l2pop?
|
||||
"""
|
||||
def __init__(self, network_type='vxlan', l2_pop=True, qos=False,
|
||||
mech_drivers='openvswitch,linuxbridge', arp_responder=False):
|
||||
mech_drivers='openvswitch,linuxbridge', arp_responder=False,
|
||||
agent_down_time=75):
|
||||
self.network_type = network_type
|
||||
self.l2_pop = l2_pop
|
||||
self.qos = qos
|
||||
self.network_range = None
|
||||
self.mech_drivers = mech_drivers
|
||||
self.arp_responder = arp_responder
|
||||
self.agent_down_time = agent_down_time
|
||||
|
||||
@property
|
||||
def tunneling_enabled(self):
|
||||
|
@ -15,6 +15,7 @@
|
||||
import datetime
|
||||
from distutils import spawn
|
||||
import os
|
||||
import re
|
||||
import signal
|
||||
|
||||
import fixtures
|
||||
@ -23,6 +24,7 @@ from neutronclient.v2_0 import client
|
||||
from oslo_utils import fileutils
|
||||
|
||||
from neutron.agent.linux import async_process
|
||||
from neutron.agent.linux import ip_lib
|
||||
from neutron.agent.linux import utils
|
||||
from neutron.common import utils as common_utils
|
||||
from neutron.tests import base
|
||||
@ -66,7 +68,11 @@ class ProcessFixture(fixtures.Fixture):
|
||||
self.process.start(block=True)
|
||||
|
||||
def stop(self):
|
||||
self.process.stop(block=True, kill_signal=self.kill_signal)
|
||||
try:
|
||||
self.process.stop(block=True, kill_signal=self.kill_signal)
|
||||
except async_process.AsyncProcessException as e:
|
||||
if "Process is not running" not in str(e):
|
||||
raise
|
||||
|
||||
|
||||
class RabbitmqEnvironmentFixture(fixtures.Fixture):
|
||||
@ -259,8 +265,42 @@ class DhcpAgentFixture(fixtures.Fixture):
|
||||
ProcessFixture(
|
||||
test_name=self.test_name,
|
||||
process_name=self.NEUTRON_DHCP_AGENT,
|
||||
exec_name=self.NEUTRON_DHCP_AGENT,
|
||||
exec_name=spawn.find_executable(
|
||||
'fullstack_dhcp_agent.py',
|
||||
path=os.path.join(base.ROOTDIR, 'common', 'agents')),
|
||||
config_filenames=config_filenames,
|
||||
namespace=self.namespace
|
||||
)
|
||||
)
|
||||
self.dhcp_namespace_pattern = re.compile(
|
||||
r"qdhcp-[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}%s" %
|
||||
self.get_namespace_suffix())
|
||||
self.addCleanup(self.clean_dhcp_namespaces)
|
||||
|
||||
def get_agent_hostname(self):
|
||||
return self.neutron_cfg_fixture.config['DEFAULT']['host']
|
||||
|
||||
def get_namespace_suffix(self):
|
||||
return self.plugin_config.DEFAULT.test_namespace_suffix
|
||||
|
||||
def kill(self):
|
||||
self.process_fixture.stop()
|
||||
self.clean_dhcp_namespaces()
|
||||
|
||||
def clean_dhcp_namespaces(self):
|
||||
"""Delete all DHCP namespaces created by DHCP agent.
|
||||
|
||||
In some tests for DHCP agent HA agents are killed when handling DHCP
|
||||
service for network(s). In such case DHCP namespace is not deleted by
|
||||
DHCP agent and such namespaces are found and deleted using agent's
|
||||
namespace suffix.
|
||||
"""
|
||||
|
||||
ip_wrapper = ip_lib.IPWrapper()
|
||||
for namespace in ip_wrapper.get_namespaces():
|
||||
if self.dhcp_namespace_pattern.match(namespace):
|
||||
try:
|
||||
ip_wrapper.netns.delete(namespace)
|
||||
except RuntimeError:
|
||||
# Continue cleaning even if namespace deletions fails
|
||||
pass
|
||||
|
@ -12,9 +12,12 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import random
|
||||
|
||||
from neutron_lib import constants
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from neutron.common import utils as common_utils
|
||||
from neutron.tests.fullstack import base
|
||||
from neutron.tests.fullstack.resources import environment
|
||||
from neutron.tests.fullstack.resources import machine
|
||||
@ -23,7 +26,7 @@ from neutron.tests.unit import testlib_api
|
||||
load_tests = testlib_api.module_load_tests
|
||||
|
||||
|
||||
class TestDhcpAgent(base.BaseFullStackTestCase):
|
||||
class BaseDhcpAgentTest(base.BaseFullStackTestCase):
|
||||
|
||||
scenarios = [
|
||||
(constants.AGENT_TYPE_OVS,
|
||||
@ -36,18 +39,32 @@ class TestDhcpAgent(base.BaseFullStackTestCase):
|
||||
host_descriptions = [
|
||||
environment.HostDescription(
|
||||
dhcp_agent=True,
|
||||
l2_agent_type=self.l2_agent_type)]
|
||||
l2_agent_type=self.l2_agent_type
|
||||
) for _ in range(self.number_of_hosts)]
|
||||
|
||||
env = environment.Environment(
|
||||
environment.EnvironmentDescription(
|
||||
l2_pop=False,
|
||||
arp_responder=False),
|
||||
arp_responder=False,
|
||||
agent_down_time=10),
|
||||
host_descriptions)
|
||||
|
||||
super(TestDhcpAgent, self).setUp(env)
|
||||
super(BaseDhcpAgentTest, self).setUp(env)
|
||||
self.project_id = uuidutils.generate_uuid()
|
||||
self._create_network_subnet_and_vm()
|
||||
|
||||
def _spawn_vm(self):
|
||||
host = random.choice(self.environment.hosts)
|
||||
vm = self.useFixture(
|
||||
machine.FakeFullstackMachine(
|
||||
host,
|
||||
self.network['id'],
|
||||
self.project_id,
|
||||
self.safe_client,
|
||||
use_dhcp=True))
|
||||
vm.block_until_boot()
|
||||
return vm
|
||||
|
||||
def _create_network_subnet_and_vm(self):
|
||||
self.network = self.safe_client.create_network(self.project_id)
|
||||
|
||||
@ -58,14 +75,19 @@ class TestDhcpAgent(base.BaseFullStackTestCase):
|
||||
name='subnet-test',
|
||||
enable_dhcp=True)
|
||||
|
||||
self.vm = self.useFixture(
|
||||
machine.FakeFullstackMachine(
|
||||
self.environment.hosts[0],
|
||||
self.network['id'],
|
||||
self.project_id,
|
||||
self.safe_client,
|
||||
use_dhcp=True))
|
||||
self.vm.block_until_boot()
|
||||
self.vm = self._spawn_vm()
|
||||
|
||||
def _wait_until_agent_down(self, agent_id):
|
||||
def _agent_down():
|
||||
agent = self.client.show_agent(agent_id)['agent']
|
||||
return not agent.get('alive')
|
||||
|
||||
common_utils.wait_until_true(_agent_down)
|
||||
|
||||
|
||||
class TestDhcpAgentNoHA(BaseDhcpAgentTest):
|
||||
|
||||
number_of_hosts = 1
|
||||
|
||||
def test_dhcp_assignment(self):
|
||||
# First check if network was scheduled to one DHCP agent
|
||||
@ -75,3 +97,75 @@ class TestDhcpAgent(base.BaseFullStackTestCase):
|
||||
|
||||
# And check if IP and gateway config is fine on FakeMachine
|
||||
self.vm.block_until_dhcp_config_done()
|
||||
|
||||
|
||||
class TestDhcpAgentHA(BaseDhcpAgentTest):
|
||||
|
||||
number_of_hosts = 2
|
||||
|
||||
def _wait_until_network_rescheduled(self, old_agent):
|
||||
def _agent_rescheduled():
|
||||
network_agents = self.client.list_dhcp_agent_hosting_networks(
|
||||
self.network['id'])['agents']
|
||||
if network_agents:
|
||||
return network_agents[0]['id'] != old_agent['id']
|
||||
return False
|
||||
|
||||
common_utils.wait_until_true(_agent_rescheduled)
|
||||
|
||||
def _kill_dhcp_agent(self, agent):
|
||||
for host in self.environment.hosts:
|
||||
hostname = host.dhcp_agent.get_agent_hostname()
|
||||
if hostname == agent['host']:
|
||||
host.dhcp_agent.kill()
|
||||
self._wait_until_agent_down(agent['id'])
|
||||
break
|
||||
|
||||
def _add_network_to_new_agent(self):
|
||||
dhcp_agents = self.client.list_agents(
|
||||
agent_type=constants.AGENT_TYPE_DHCP)['agents']
|
||||
dhcp_agents_ids = [agent['id'] for agent in dhcp_agents]
|
||||
|
||||
current_agents = self.client.list_dhcp_agent_hosting_networks(
|
||||
self.network['id'])['agents']
|
||||
current_agents_ids = [agent['id'] for agent in current_agents]
|
||||
|
||||
new_agents_ids = list(set(dhcp_agents_ids) - set(current_agents_ids))
|
||||
if new_agents_ids:
|
||||
new_agent_id = random.choice(new_agents_ids)
|
||||
self.client.add_network_to_dhcp_agent(
|
||||
new_agent_id, {'network_id': self.network['id']})
|
||||
|
||||
def test_reschedule_network_on_new_agent(self):
|
||||
network_dhcp_agents = self.client.list_dhcp_agent_hosting_networks(
|
||||
self.network['id'])['agents']
|
||||
self.assertEqual(1, len(network_dhcp_agents))
|
||||
|
||||
self._kill_dhcp_agent(network_dhcp_agents[0])
|
||||
self._wait_until_network_rescheduled(network_dhcp_agents[0])
|
||||
|
||||
# ensure that only one agent is handling DHCP for this network
|
||||
new_network_dhcp_agents = self.client.list_dhcp_agent_hosting_networks(
|
||||
self.network['id'])['agents']
|
||||
self.assertEqual(1, len(new_network_dhcp_agents))
|
||||
|
||||
# check if new vm will get IP from new DHCP agent
|
||||
new_vm = self._spawn_vm()
|
||||
new_vm.block_until_dhcp_config_done()
|
||||
|
||||
def test_multiple_agents_for_network(self):
|
||||
network_dhcp_agents = self.client.list_dhcp_agent_hosting_networks(
|
||||
self.network['id'])['agents']
|
||||
self.assertEqual(1, len(network_dhcp_agents))
|
||||
|
||||
self._add_network_to_new_agent()
|
||||
# ensure that two agents are handling DHCP for this network
|
||||
network_dhcp_agents = self.client.list_dhcp_agent_hosting_networks(
|
||||
self.network['id'])['agents']
|
||||
self.assertEqual(2, len(network_dhcp_agents))
|
||||
|
||||
self._kill_dhcp_agent(network_dhcp_agents[0])
|
||||
|
||||
# check if new vm will get IP from DHCP agent which is still alive
|
||||
new_vm = self._spawn_vm()
|
||||
new_vm.block_until_dhcp_config_done()
|
||||
|
@ -202,7 +202,7 @@ function _install_rootwrap_sudoers {
|
||||
#
|
||||
# 1: https://bugs.launchpad.net/oslo.rootwrap/+bug/1417331
|
||||
#
|
||||
Defaults:$STACK_USER secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$PROJECT_VENV/bin"
|
||||
Defaults:$STACK_USER secure_path="$PROJECT_VENV/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD
|
||||
$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD
|
||||
EOF
|
||||
|
Loading…
x
Reference in New Issue
Block a user