Switch to pluggable IPAM implementation
This patch does unconditional switch from non-pluggable IPAM to pluggable IPAM for all deployments during upgrade to Neutron. Pluggable IPAM is enabled by pointing ipam_driver default to reference driver. User who manually set ipam_driver in neutron.conf will continue to use ipam_driver of their choice. During upgrade data is migrated from non-pluggable IPAM tables to pluggable IPAM tables using alembic_migration. Availability ranges (IPAvailabilityRange) is no longer used to calculate next available ip address, so migration for this table is not included. Migration is covered with functional tests. Dataset with subnets, allocation pools and ip allocations is loaded prior to migration. Once migration is completed ipam related tables are checked if data is migrated properly. Built-in IPAM implementation becomes obsolete and is planned to be removed in upcoming commits. UpgradeImpact Closes-Bug: #1516156 Change-Id: I1d633810bd16f1bec7bbca57522e9ad3f7745ea2
This commit is contained in:
parent
6449c40108
commit
625de54de3
@ -146,7 +146,7 @@ core_opts = [
|
||||
help=_('If True, advertise network MTU values if core plugin '
|
||||
'calculates them. MTU is advertised to running '
|
||||
'instances via DHCP and RA MTU options.')),
|
||||
cfg.StrOpt('ipam_driver',
|
||||
cfg.StrOpt('ipam_driver', default='internal',
|
||||
help=_("Neutron IPAM (IP address management) driver to use. "
|
||||
"If ipam_driver is not set (default behavior), no IPAM "
|
||||
"driver is used. In order to use the reference "
|
||||
|
@ -1 +1 @@
|
||||
a8b517cff8ab
|
||||
3b935b28e7a0
|
||||
|
@ -0,0 +1,131 @@
|
||||
# Copyright 2016 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
"""migrate to pluggable ipam """
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '3b935b28e7a0'
|
||||
down_revision = 'a8b517cff8ab'
|
||||
|
||||
from alembic import op
|
||||
from oslo_utils import uuidutils
|
||||
import sqlalchemy as sa
|
||||
|
||||
# A simple models for tables with only the fields needed for the migration.
|
||||
neutron_subnet = sa.Table('subnets', sa.MetaData(),
|
||||
sa.Column('id', sa.String(length=36),
|
||||
nullable=False))
|
||||
|
||||
ipam_subnet = sa.Table('ipamsubnets', sa.MetaData(),
|
||||
sa.Column('id', sa.String(length=36), nullable=False),
|
||||
sa.Column('neutron_subnet_id', sa.String(length=36),
|
||||
nullable=True))
|
||||
|
||||
ip_allocation_pool = sa.Table('ipallocationpools', sa.MetaData(),
|
||||
sa.Column('id', sa.String(length=36),
|
||||
nullable=False),
|
||||
sa.Column('subnet_id', sa.String(length=36),
|
||||
sa.ForeignKey('subnets.id',
|
||||
ondelete="CASCADE"),
|
||||
nullable=False),
|
||||
sa.Column('first_ip', sa.String(length=64),
|
||||
nullable=False),
|
||||
sa.Column('last_ip', sa.String(length=64),
|
||||
nullable=False))
|
||||
|
||||
ipam_allocation_pool = sa.Table('ipamallocationpools', sa.MetaData(),
|
||||
sa.Column('id', sa.String(length=36),
|
||||
nullable=False),
|
||||
sa.Column('ipam_subnet_id',
|
||||
sa.String(length=36),
|
||||
sa.ForeignKey('ipamsubnets.id',
|
||||
ondelete="CASCADE"),
|
||||
nullable=False),
|
||||
sa.Column('first_ip', sa.String(length=64),
|
||||
nullable=False),
|
||||
sa.Column('last_ip', sa.String(length=64),
|
||||
nullable=False))
|
||||
|
||||
ip_allocation = sa.Table('ipallocations', sa.MetaData(),
|
||||
sa.Column('ip_address', sa.String(length=64),
|
||||
nullable=False),
|
||||
sa.Column('subnet_id', sa.String(length=36),
|
||||
sa.ForeignKey('subnets.id',
|
||||
ondelete="CASCADE")))
|
||||
|
||||
ipam_allocation = sa.Table('ipamallocations', sa.MetaData(),
|
||||
sa.Column('ip_address', sa.String(length=64),
|
||||
nullable=False, primary_key=True),
|
||||
sa.Column('ipam_subnet_id', sa.String(length=36),
|
||||
sa.ForeignKey('subnets.id',
|
||||
ondelete="CASCADE"),
|
||||
primary_key=True),
|
||||
sa.Column('status', sa.String(length=36)))
|
||||
|
||||
|
||||
def upgrade():
|
||||
"""Migrate data to pluggable ipam reference driver.
|
||||
|
||||
Tables 'subnets', 'ipallocationpools' and 'ipallocations' are API exposed
|
||||
and always contain up to date data independently from the ipam driver
|
||||
in use, so they can be used as a reliable source of data.
|
||||
|
||||
This migration cleans up tables for reference ipam driver and rebuilds them
|
||||
from API exposed tables. So this migration will work correctly for both
|
||||
types of users:
|
||||
- Who used build-in ipam implementation;
|
||||
Their ipam data will be migrated to reference ipam driver tables,
|
||||
and reference ipam driver becomes default driver.
|
||||
- Who switched to reference ipam before Newton;
|
||||
Existent reference ipam driver tables are cleaned up and all ipam data is
|
||||
regenerated from API exposed tables.
|
||||
All existent subnets and ports are still usable after upgrade.
|
||||
"""
|
||||
session = sa.orm.Session(bind=op.get_bind())
|
||||
|
||||
# Make sure destination tables are clean
|
||||
session.execute(ipam_subnet.delete())
|
||||
session.execute(ipam_allocation_pool.delete())
|
||||
session.execute(ipam_allocation.delete())
|
||||
|
||||
map_neutron_id_to_ipam = {}
|
||||
subnet_values = []
|
||||
for subnet_id, in session.query(neutron_subnet):
|
||||
ipam_id = uuidutils.generate_uuid()
|
||||
map_neutron_id_to_ipam[subnet_id] = ipam_id
|
||||
subnet_values.append(dict(
|
||||
id=ipam_id,
|
||||
neutron_subnet_id=subnet_id))
|
||||
op.bulk_insert(ipam_subnet, subnet_values)
|
||||
|
||||
ipam_pool_values = []
|
||||
pools = session.query(ip_allocation_pool)
|
||||
for pool in pools:
|
||||
new_pool_id = uuidutils.generate_uuid()
|
||||
ipam_pool_values.append(dict(
|
||||
id=new_pool_id,
|
||||
ipam_subnet_id=map_neutron_id_to_ipam[pool.subnet_id],
|
||||
first_ip=pool.first_ip,
|
||||
last_ip=pool.last_ip))
|
||||
op.bulk_insert(ipam_allocation_pool, ipam_pool_values)
|
||||
|
||||
ipam_allocation_values = []
|
||||
for ip_alloc in session.query(ip_allocation):
|
||||
ipam_allocation_values.append(dict(
|
||||
ip_address=ip_alloc.ip_address,
|
||||
status='ALLOCATED',
|
||||
ipam_subnet_id=map_neutron_id_to_ipam[ip_alloc.subnet_id]))
|
||||
op.bulk_insert(ipam_allocation, ipam_allocation_values)
|
||||
session.commit()
|
@ -0,0 +1,139 @@
|
||||
# Copyright 2016 Infoblox Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from oslo_db.sqlalchemy import utils as db_utils
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from neutron.tests.functional.db import test_migrations
|
||||
|
||||
|
||||
class MigrationToPluggableIpamMixin(object):
|
||||
"""Validates data migration to Pluggable IPAM."""
|
||||
|
||||
_standard_attribute_id = 0
|
||||
|
||||
def _gen_attr_id(self, engine, type):
|
||||
self._standard_attribute_id += 1
|
||||
standardattributes = db_utils.get_table(engine, 'standardattributes')
|
||||
engine.execute(standardattributes.insert().values({
|
||||
'id': self._standard_attribute_id, 'resource_type': type}))
|
||||
return self._standard_attribute_id
|
||||
|
||||
def _create_subnets(self, engine, data):
|
||||
"""Create subnets and saves subnet id in data"""
|
||||
networks = db_utils.get_table(engine, 'networks')
|
||||
subnets = db_utils.get_table(engine, 'subnets')
|
||||
pools = db_utils.get_table(engine, 'ipallocationpools')
|
||||
allocations = db_utils.get_table(engine, 'ipallocations')
|
||||
|
||||
for cidr in data:
|
||||
ip_version = 6 if ':' in cidr else 4
|
||||
# Save generated id in incoming dict to simplify validations
|
||||
network_id = uuidutils.generate_uuid()
|
||||
network_dict = dict(
|
||||
id=network_id,
|
||||
standard_attr_id=self._gen_attr_id(engine, 'networks'))
|
||||
engine.execute(networks.insert().values(network_dict))
|
||||
|
||||
data[cidr]['id'] = uuidutils.generate_uuid()
|
||||
subnet_dict = dict(id=data[cidr]['id'],
|
||||
cidr=cidr,
|
||||
ip_version=ip_version,
|
||||
standard_attr_id=self._gen_attr_id(engine,
|
||||
'subnets'))
|
||||
engine.execute(subnets.insert().values(subnet_dict))
|
||||
|
||||
if data[cidr].get('pools'):
|
||||
for pool in data[cidr]['pools']:
|
||||
pool_dict = dict(id=uuidutils.generate_uuid(),
|
||||
first_ip=pool['first_ip'],
|
||||
last_ip=pool['last_ip'],
|
||||
subnet_id=data[cidr]['id'])
|
||||
engine.execute(pools.insert().values(pool_dict))
|
||||
|
||||
if data[cidr].get('allocations'):
|
||||
for ip in data[cidr]['allocations']:
|
||||
ip_dict = dict(ip_address=ip,
|
||||
subnet_id=data[cidr]['id'],
|
||||
network_id=network_id)
|
||||
engine.execute(allocations.insert().values(ip_dict))
|
||||
|
||||
def _pre_upgrade_3b935b28e7a0(self, engine):
|
||||
data = {
|
||||
'172.23.0.0/16': {
|
||||
'pools': [{'first_ip': '172.23.0.2',
|
||||
'last_ip': '172.23.255.254'}],
|
||||
'allocations': ('172.23.0.2', '172.23.245.2')},
|
||||
'192.168.40.0/24': {
|
||||
'pools': [{'first_ip': '192.168.40.2',
|
||||
'last_ip': '192.168.40.100'},
|
||||
{'first_ip': '192.168.40.105',
|
||||
'last_ip': '192.168.40.150'},
|
||||
{'first_ip': '192.168.40.155',
|
||||
'last_ip': '192.168.40.157'},
|
||||
],
|
||||
'allocations': ('192.168.40.2', '192.168.40.3',
|
||||
'192.168.40.15', '192.168.40.60')},
|
||||
'fafc:babc::/64': {
|
||||
'pools': [{'first_ip': 'fafc:babc::2',
|
||||
'last_ip': 'fafc:babc::6:fe00',
|
||||
}],
|
||||
'allocations': ('fafc:babc::3',)}}
|
||||
self._create_subnets(engine, data)
|
||||
return data
|
||||
|
||||
def _check_3b935b28e7a0(self, engine, data):
|
||||
subnets = db_utils.get_table(engine, 'ipamsubnets')
|
||||
pools = db_utils.get_table(engine, 'ipamallocationpools')
|
||||
allocations = db_utils.get_table(engine, 'ipamallocations')
|
||||
|
||||
ipam_subnets = engine.execute(subnets.select()).fetchall()
|
||||
# Count of ipam subnets should match count of usual subnets
|
||||
self.assertEqual(len(data), len(ipam_subnets))
|
||||
neutron_to_ipam_id = {subnet.neutron_subnet_id: subnet.id
|
||||
for subnet in ipam_subnets}
|
||||
for cidr in data:
|
||||
self.assertIn(data[cidr]['id'], neutron_to_ipam_id)
|
||||
|
||||
ipam_subnet_id = neutron_to_ipam_id[data[cidr]['id']]
|
||||
# Validate ip allocations are migrated correctly
|
||||
ipam_allocations = engine.execute(allocations.select().where(
|
||||
allocations.c.ipam_subnet_id == ipam_subnet_id)).fetchall()
|
||||
for ipam_allocation in ipam_allocations:
|
||||
self.assertIn(ipam_allocation.ip_address,
|
||||
data[cidr]['allocations'])
|
||||
self.assertEqual(len(data[cidr]['allocations']),
|
||||
len(ipam_allocations))
|
||||
|
||||
# Validate allocation pools are migrated correctly
|
||||
ipam_pools = engine.execute(pools.select().where(
|
||||
pools.c.ipam_subnet_id == ipam_subnet_id)).fetchall()
|
||||
# Covert to dict for easier lookup
|
||||
pool_dict = {pool.first_ip: pool.last_ip for pool in ipam_pools}
|
||||
for p in data[cidr]['pools']:
|
||||
self.assertIn(p['first_ip'], pool_dict)
|
||||
self.assertEqual(p['last_ip'], pool_dict[p['first_ip']])
|
||||
self.assertEqual(len(data[cidr]['pools']),
|
||||
len(ipam_pools))
|
||||
|
||||
|
||||
class TestMigrationToPluggableIpamMysql(MigrationToPluggableIpamMixin,
|
||||
test_migrations.TestWalkMigrationsMysql):
|
||||
pass
|
||||
|
||||
|
||||
class TestMigrationToPluggableIpamPsql(MigrationToPluggableIpamMixin,
|
||||
test_migrations.TestWalkMigrationsPsql):
|
||||
pass
|
@ -0,0 +1,12 @@
|
||||
---
|
||||
prelude: >
|
||||
The internal pluggable IPAM implementation -- added in the Liberty release
|
||||
-- is now the default for both old and new deployments. Old deployments
|
||||
are unconditionally switched to pluggable IPAM during upgrade.
|
||||
Old non-pluggable IPAM is deprecated and removed from code base.
|
||||
upgrade:
|
||||
- During upgrade 'internal' ipam driver becomes default for 'ipam_driver'
|
||||
config option and data is migrated to new tables using alembic migration.
|
||||
deprecations:
|
||||
- The non-pluggable ipam implementatios is deprecated and will be removed in
|
||||
Newton release cycle.
|
Loading…
Reference in New Issue
Block a user