Merge "Add support for catalog zones"
This commit is contained in:
commit
91f7d7d3fb
@ -384,7 +384,7 @@ class Service(service.RPCService):
|
||||
|
||||
def _update_soa(self, context, zone):
|
||||
# NOTE: We should not be updating SOA records when a zone is SECONDARY.
|
||||
if zone.type != 'PRIMARY':
|
||||
if zone.type == constants.ZONE_SECONDARY:
|
||||
return
|
||||
|
||||
# Get the pool for it's list of ns_records
|
||||
@ -753,6 +753,8 @@ class Service(service.RPCService):
|
||||
|
||||
policy.check('create_zone', context, target)
|
||||
|
||||
self._enforce_catalog_zone_policy(context, zone)
|
||||
|
||||
self._is_valid_project_id(zone.tenant_id)
|
||||
|
||||
# Ensure the tenant has enough quota to continue
|
||||
@ -817,7 +819,8 @@ class Service(service.RPCService):
|
||||
|
||||
zone = self._create_zone_in_storage(context, zone)
|
||||
|
||||
self.worker_api.create_zone(context, zone)
|
||||
if zone.type != constants.ZONE_CATALOG:
|
||||
self.worker_api.create_zone(context, zone)
|
||||
|
||||
if zone.type == constants.ZONE_SECONDARY:
|
||||
self.worker_api.perform_zone_xfr(context, zone)
|
||||
@ -855,6 +858,8 @@ class Service(service.RPCService):
|
||||
context, zone, rrset, increment_serial=False
|
||||
)
|
||||
|
||||
self._ensure_catalog_zone_serial_increment(context, zone)
|
||||
|
||||
return zone
|
||||
|
||||
@rpc.expected_exceptions()
|
||||
@ -940,6 +945,11 @@ class Service(service.RPCService):
|
||||
|
||||
policy.check('find_zones', context, target)
|
||||
|
||||
if 'admin' not in context.roles:
|
||||
if criterion is None:
|
||||
criterion = {}
|
||||
criterion['type'] = '!CATALOG'
|
||||
|
||||
return self.storage.find_zones(context, criterion, marker, limit,
|
||||
sort_key, sort_dir)
|
||||
|
||||
@ -967,6 +977,7 @@ class Service(service.RPCService):
|
||||
}
|
||||
|
||||
policy.check('update_zone', context, target)
|
||||
self._enforce_catalog_zone_policy(context, zone)
|
||||
|
||||
changes = zone.obj_get_changes()
|
||||
|
||||
@ -1029,6 +1040,8 @@ class Service(service.RPCService):
|
||||
"""
|
||||
zone = self.storage.get_zone(context, zone_id)
|
||||
|
||||
self._enforce_catalog_zone_policy(context, zone)
|
||||
|
||||
if policy.enforce_new_defaults():
|
||||
target = {
|
||||
'zone_id': zone_id,
|
||||
@ -1093,6 +1106,8 @@ class Service(service.RPCService):
|
||||
|
||||
zone = self.storage.update_zone(context, zone)
|
||||
|
||||
self._ensure_catalog_zone_serial_increment(context, zone)
|
||||
|
||||
return zone
|
||||
|
||||
@rpc.expected_exceptions()
|
||||
@ -1206,6 +1221,7 @@ class Service(service.RPCService):
|
||||
|
||||
policy.check('share_zone', context, target)
|
||||
|
||||
self._enforce_catalog_zone_policy(context, zone)
|
||||
self._is_valid_project_id(context.project_id)
|
||||
|
||||
if zone.tenant_id == shared_zone.target_project_id:
|
||||
@ -1412,6 +1428,8 @@ class Service(service.RPCService):
|
||||
zone = self.storage.get_zone(context, zone_id,
|
||||
apply_tenant_criteria=False)
|
||||
|
||||
self._enforce_catalog_zone_policy(context, zone)
|
||||
|
||||
# Note this call must follow the get_zone call to maintain API response
|
||||
# code behavior.
|
||||
zone_shared = self._check_zone_share_permission(context, zone)
|
||||
@ -1656,6 +1674,8 @@ class Service(service.RPCService):
|
||||
zone = self.storage.get_zone(context, zone_id,
|
||||
apply_tenant_criteria=False)
|
||||
|
||||
self._enforce_catalog_zone_policy(context, zone)
|
||||
|
||||
# Note this call must follow the get_zone call to maintain API response
|
||||
# code behavior.
|
||||
zone_shared = self._check_zone_share_permission(context, zone)
|
||||
@ -1755,6 +1775,8 @@ class Service(service.RPCService):
|
||||
zone = self.storage.get_zone(context, zone_id,
|
||||
apply_tenant_criteria=False)
|
||||
|
||||
self._enforce_catalog_zone_policy(context, zone)
|
||||
|
||||
# Don't allow updates to zones that are being deleted
|
||||
if zone.action == 'DELETE':
|
||||
raise exceptions.BadRequest('Can not update a deleting zone')
|
||||
@ -2582,6 +2604,8 @@ class Service(service.RPCService):
|
||||
# get zone
|
||||
zone = self.get_zone(context, zone_transfer_request.zone_id)
|
||||
|
||||
self._enforce_catalog_zone_policy(context, zone)
|
||||
|
||||
# Don't allow transfers for zones that are being deleted
|
||||
if zone.action == 'DELETE':
|
||||
raise exceptions.BadRequest('Can not transfer a deleting zone')
|
||||
@ -3094,3 +3118,29 @@ class Service(service.RPCService):
|
||||
)
|
||||
return self.storage.create_service_status(
|
||||
context, service_status)
|
||||
|
||||
def _ensure_catalog_zone_serial_increment(self, context, zone):
|
||||
if zone.type == constants.ZONE_CATALOG:
|
||||
return
|
||||
|
||||
pool = self.storage.find_pool(context, criterion={'id': zone.pool_id})
|
||||
|
||||
try:
|
||||
catalog_zone = self.storage.get_catalog_zone(context, pool)
|
||||
|
||||
# Schedule batched serial increment
|
||||
self._update_zone_in_storage(context, catalog_zone)
|
||||
except exceptions.ZoneNotFound:
|
||||
pass
|
||||
|
||||
def _enforce_catalog_zone_policy(self, context, zone):
|
||||
# Forbid for HTTP API, but allow for designate-manage
|
||||
if (
|
||||
zone.type == constants.ZONE_CATALOG and
|
||||
not (
|
||||
context.is_admin and 'admin' in context.roles and
|
||||
context.request_id == 'designate-manage'
|
||||
)
|
||||
):
|
||||
raise exceptions.Forbidden(
|
||||
'This operation is not allowed for catalog zones.')
|
||||
|
@ -57,7 +57,8 @@ PROJECT = 'project'
|
||||
# Zone constants
|
||||
ZONE_PRIMARY = 'PRIMARY'
|
||||
ZONE_SECONDARY = 'SECONDARY'
|
||||
ZONE_TYPES = [ZONE_PRIMARY, ZONE_SECONDARY]
|
||||
ZONE_CATALOG = 'CATALOG'
|
||||
ZONE_TYPES = [ZONE_PRIMARY, ZONE_SECONDARY, ZONE_CATALOG]
|
||||
|
||||
# Record regexes
|
||||
RE_HOSTNAME = re.compile(r'^(?!.{255,})(?:(?:^\*|(?!\-)[A-Za-z0-9_\-]{1,63})(?<!\-)\.)+\Z') # noqa
|
||||
@ -79,3 +80,12 @@ RE_FIP = re.compile(r'^(?P<region>[A-Za-z0-9\\.\\-_]{1,100}):(?P<id>[0-9a-fA-F]{
|
||||
|
||||
# Error Validation regexes
|
||||
RE_REQUIRED = re.compile(r'\'([\w]*)\' is a required property')
|
||||
|
||||
TSIG_ALGORITHMS = [
|
||||
'hmac-md5',
|
||||
'hmac-sha1',
|
||||
'hmac-sha224',
|
||||
'hmac-sha256',
|
||||
'hmac-sha384',
|
||||
'hmac-sha512'
|
||||
]
|
||||
|
@ -24,6 +24,7 @@ import dns.resolver
|
||||
import dns.rrset
|
||||
from oslo_log import log as logging
|
||||
|
||||
from designate.common import constants
|
||||
import designate.conf
|
||||
from designate import exceptions
|
||||
from designate.worker import rpcapi as worker_api
|
||||
@ -218,17 +219,25 @@ class RequestHandler:
|
||||
yield self._handle_query_error(request, dns.rcode.REFUSED)
|
||||
return
|
||||
|
||||
# The AXFR response needs to have a SOA at the beginning and end.
|
||||
criterion = {'zone_id': zone.id, 'type': 'SOA'}
|
||||
soa_records = self.storage.find_recordsets_axfr(context, criterion)
|
||||
if zone.type != constants.ZONE_CATALOG:
|
||||
# The AXFR response needs to have a SOA at the beginning and end.
|
||||
criterion = {'zone_id': zone.id, 'type': 'SOA'}
|
||||
soa_records = self.storage.find_recordsets_axfr(context, criterion)
|
||||
|
||||
# Get all the records other than SOA
|
||||
criterion = {'zone_id': zone.id, 'type': '!SOA'}
|
||||
records = self.storage.find_recordsets_axfr(context, criterion)
|
||||
# Get all the records other than SOA
|
||||
criterion = {'zone_id': zone.id, 'type': '!SOA'}
|
||||
records = self.storage.find_recordsets_axfr(context, criterion)
|
||||
|
||||
# Place the SOA RRSet at the front and end of the RRSet list
|
||||
records.insert(0, soa_records[0])
|
||||
records.append(soa_records[0])
|
||||
# Place the SOA RRSet at the front and end of the RRSet list
|
||||
records.insert(0, soa_records[0])
|
||||
records.append(soa_records[0])
|
||||
else:
|
||||
catalog_zone_pool = self.storage.find_pool(
|
||||
context, criterion={'id': zone.pool_id}
|
||||
)
|
||||
records = self.storage.get_catalog_zone_records(
|
||||
context, catalog_zone_pool
|
||||
)
|
||||
|
||||
# Handle multi message response with tsig
|
||||
multi_messages = False
|
||||
@ -239,10 +248,16 @@ class RequestHandler:
|
||||
while records:
|
||||
record = records.pop(0)
|
||||
|
||||
rrname = str(record[3])
|
||||
ttl = int(record[2]) if record[2] is not None else zone.ttl
|
||||
rrtype = str(record[1])
|
||||
rdata = [str(record[4])]
|
||||
if zone.type != constants.ZONE_CATALOG:
|
||||
rrname = str(record[3])
|
||||
ttl = int(record[2]) if record[2] is not None else zone.ttl
|
||||
rrtype = str(record[1])
|
||||
rdata = [str(record[4])]
|
||||
else:
|
||||
rrname = record.name
|
||||
ttl = zone.ttl
|
||||
rrtype = record.type
|
||||
rdata = [record.records[0].data]
|
||||
|
||||
rrset = dns.rrset.from_text_list(
|
||||
rrname, ttl, dns.rdataclass.IN, rrtype, rdata,
|
||||
|
@ -24,6 +24,7 @@ from designate.objects.floating_ip import FloatingIP, FloatingIPList # noqa
|
||||
from designate.objects.pool import Pool, PoolList # noqa
|
||||
from designate.objects.pool_also_notify import PoolAlsoNotify, PoolAlsoNotifyList # noqa
|
||||
from designate.objects.pool_attribute import PoolAttribute, PoolAttributeList # noqa
|
||||
from designate.objects.pool_catalog_zone import PoolCatalogZone # noqa
|
||||
from designate.objects.pool_ns_record import PoolNsRecord, PoolNsRecordList # noqa
|
||||
from designate.objects.pool_nameserver import PoolNameserver, PoolNameserverList # noqa
|
||||
from designate.objects.pool_target import PoolTarget, PoolTargetList # noqa
|
||||
|
@ -39,6 +39,7 @@ from designate.objects.adapters.api_v2.shared_zone import SharedZoneAPIv2Adapter
|
||||
from designate.objects.adapters.yaml.pool import PoolYAMLAdapter, PoolListYAMLAdapter # noqa
|
||||
from designate.objects.adapters.yaml.pool_attribute import PoolAttributeYAMLAdapter, PoolAttributeListYAMLAdapter # noqa
|
||||
from designate.objects.adapters.yaml.pool_also_notify import PoolAlsoNotifyYAMLAdapter, PoolAlsoNotifyListYAMLAdapter # noqa
|
||||
from designate.objects.adapters.yaml.pool_catalog_zone import PoolCatalogZoneYAMLAdapter # noqa
|
||||
from designate.objects.adapters.yaml.pool_nameserver import PoolNameserverYAMLAdapter, PoolNameserverListYAMLAdapter # noqa
|
||||
from designate.objects.adapters.yaml.pool_ns_record import PoolNsRecordYAMLAdapter, PoolNsRecordListYAMLAdapter # noqa
|
||||
from designate.objects.adapters.yaml.pool_target import PoolTargetYAMLAdapter, PoolTargetListYAMLAdapter # noqa
|
||||
|
@ -47,6 +47,9 @@ class PoolYAMLAdapter(base.YAMLAdapter):
|
||||
'also_notifies': {
|
||||
'read_only': False
|
||||
},
|
||||
'catalog_zone': {
|
||||
'read_only': False
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
35
designate/objects/adapters/yaml/pool_catalog_zone.py
Normal file
35
designate/objects/adapters/yaml/pool_catalog_zone.py
Normal file
@ -0,0 +1,35 @@
|
||||
# Copyright 2023 inovex GmbH
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from designate import objects
|
||||
from designate.objects.adapters.yaml import base
|
||||
|
||||
|
||||
class PoolCatalogZoneYAMLAdapter(base.YAMLAdapter):
|
||||
ADAPTER_OBJECT = objects.PoolCatalogZone
|
||||
MODIFICATIONS = {
|
||||
'fields': {
|
||||
'catalog_zone_fqdn': {
|
||||
'read_only': False
|
||||
},
|
||||
'catalog_zone_refresh': {
|
||||
'read_only': False
|
||||
},
|
||||
'catalog_zone_tsig_key': {
|
||||
'read_only': False
|
||||
},
|
||||
'catalog_zone_tsig_algorithm': {
|
||||
'read_only': False
|
||||
},
|
||||
}
|
||||
}
|
@ -32,6 +32,8 @@ class Pool(base.DictObjectMixin, base.PersistentObjectMixin,
|
||||
'targets': fields.ObjectFields('PoolTargetList', nullable=True),
|
||||
'also_notifies': fields.ObjectFields('PoolAlsoNotifyList',
|
||||
nullable=True),
|
||||
'catalog_zone': fields.ObjectFields('PoolCatalogZone',
|
||||
nullable=True),
|
||||
}
|
||||
|
||||
STRING_KEYS = [
|
||||
|
35
designate/objects/pool_catalog_zone.py
Normal file
35
designate/objects/pool_catalog_zone.py
Normal file
@ -0,0 +1,35 @@
|
||||
# Copyright (c) 2023 inovex GmbH
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from designate.common import constants
|
||||
from designate.objects import base
|
||||
from designate.objects import fields
|
||||
|
||||
|
||||
@base.DesignateRegistry.register
|
||||
class PoolCatalogZone(base.DictObjectMixin, base.PersistentObjectMixin,
|
||||
base.DesignateObject):
|
||||
fields = {
|
||||
'catalog_zone_fqdn': fields.DomainField(),
|
||||
'catalog_zone_refresh': fields.IntegerFields(
|
||||
nullable=True, minimum=0, maximum=2147483647),
|
||||
'catalog_zone_tsig_key': fields.StringFields(
|
||||
nullable=True, maxLength=160),
|
||||
'catalog_zone_tsig_algorithm': fields.EnumField(
|
||||
nullable=True, valid_values=constants.TSIG_ALGORITHMS),
|
||||
}
|
||||
|
||||
STRING_KEYS = [
|
||||
'catalog_zone_fqdn',
|
||||
]
|
@ -12,6 +12,7 @@
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from designate.common import constants
|
||||
from designate.objects import base
|
||||
from designate.objects import fields
|
||||
|
||||
@ -26,14 +27,7 @@ class TsigKey(base.DictObjectMixin, base.PersistentObjectMixin,
|
||||
'name': fields.StringFields(nullable=False, maxLength=160),
|
||||
'algorithm': fields.EnumField(
|
||||
nullable=False,
|
||||
valid_values=[
|
||||
'hmac-md5',
|
||||
'hmac-sha1',
|
||||
'hmac-sha224',
|
||||
'hmac-sha256',
|
||||
'hmac-sha384',
|
||||
'hmac-sha512'
|
||||
]
|
||||
valid_values=constants.TSIG_ALGORITHMS
|
||||
),
|
||||
'secret': fields.StringFields(maxLength=160),
|
||||
'scope': fields.EnumField(
|
||||
|
@ -61,7 +61,8 @@ class Zone(base.DesignateObject, base.DictObjectMixin,
|
||||
'masters': fields.ObjectField('ZoneMasterList', nullable=True),
|
||||
'shared': fields.BooleanField(default=False, nullable=True),
|
||||
'type': fields.EnumField(nullable=True,
|
||||
valid_values=['SECONDARY', 'PRIMARY'],
|
||||
valid_values=['SECONDARY', 'PRIMARY',
|
||||
'CATALOG'],
|
||||
read_only=False
|
||||
),
|
||||
'transferred_at': fields.DateTimeField(nullable=True, read_only=False),
|
||||
|
@ -19,13 +19,18 @@ from oslo_utils import timeutils
|
||||
from sqlalchemy import case, select, distinct, func
|
||||
from sqlalchemy.sql.expression import or_, literal_column
|
||||
|
||||
from designate.common import constants
|
||||
from designate import exceptions
|
||||
from designate import objects
|
||||
from designate.objects.adapters import DesignateAdapter
|
||||
from designate.storage import sql
|
||||
from designate.storage.sqlalchemy import base
|
||||
from designate.storage.sqlalchemy import tables
|
||||
|
||||
import designate.conf
|
||||
|
||||
|
||||
CONF = designate.conf.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
MAXIMUM_SUBZONE_DEPTH = 128
|
||||
@ -1393,8 +1398,30 @@ class SQLAlchemyStorage(base.SQLAlchemy):
|
||||
pool.also_notifies = self._find_pool_also_notifies(
|
||||
context, {'pool_id': pool.id})
|
||||
|
||||
try:
|
||||
catalog_zone = self.get_catalog_zone(context, pool)
|
||||
|
||||
try:
|
||||
tsigkey = self.find_tsigkey(
|
||||
context, criterion={'resource_id': catalog_zone.id})
|
||||
except exceptions.TsigKeyNotFound:
|
||||
tsigkey = None
|
||||
|
||||
secret = tsigkey.secret if tsigkey is not None else None
|
||||
algorithm = tsigkey.algorithm if tsigkey is not None else None
|
||||
|
||||
pool.catalog_zone = objects.PoolCatalogZone(
|
||||
catalog_zone_fqdn=catalog_zone.name,
|
||||
catalog_zone_refresh=catalog_zone.refresh,
|
||||
catalog_zone_tsig_key=secret,
|
||||
catalog_zone_tsig_algorithm=algorithm,
|
||||
)
|
||||
except exceptions.ZoneNotFound:
|
||||
pool.catalog_zone = None
|
||||
|
||||
pool.obj_reset_changes(['attributes', 'ns_records', 'nameservers',
|
||||
'targets', 'also_notifies'])
|
||||
'targets', 'also_notifies', 'catalog_zone']
|
||||
)
|
||||
|
||||
if one:
|
||||
_load_relations(pools)
|
||||
@ -1414,7 +1441,7 @@ class SQLAlchemyStorage(base.SQLAlchemy):
|
||||
pool = self._create(
|
||||
tables.pools, pool, exceptions.DuplicatePool,
|
||||
['attributes', 'ns_records', 'nameservers', 'targets',
|
||||
'also_notifies'])
|
||||
'also_notifies', 'catalog_zone'])
|
||||
|
||||
if pool.obj_attr_is_set('attributes'):
|
||||
for pool_attribute in pool.attributes:
|
||||
@ -1449,6 +1476,8 @@ class SQLAlchemyStorage(base.SQLAlchemy):
|
||||
pool.obj_reset_changes(['attributes', 'ns_records', 'nameservers',
|
||||
'targets', 'also_notifies'])
|
||||
|
||||
self._ensure_catalog_zone_config(context, pool)
|
||||
|
||||
return pool
|
||||
|
||||
def get_pool(self, context, pool_id):
|
||||
@ -1496,7 +1525,7 @@ class SQLAlchemyStorage(base.SQLAlchemy):
|
||||
pool = self._update(context, tables.pools, pool,
|
||||
exceptions.DuplicatePool, exceptions.PoolNotFound,
|
||||
['attributes', 'ns_records', 'nameservers',
|
||||
'targets', 'also_notifies'])
|
||||
'targets', 'also_notifies', 'catalog_zone'])
|
||||
|
||||
for attribute_name in ('attributes', 'ns_records', 'nameservers',
|
||||
'targets', 'also_notifies'):
|
||||
@ -1507,6 +1536,8 @@ class SQLAlchemyStorage(base.SQLAlchemy):
|
||||
# refreshed in the pool object
|
||||
updated_pool = self.get_pool(context, pool.id)
|
||||
|
||||
self._ensure_catalog_zone_config(context, pool)
|
||||
|
||||
return updated_pool
|
||||
|
||||
def delete_pool(self, context, pool_id):
|
||||
@ -1518,6 +1549,20 @@ class SQLAlchemyStorage(base.SQLAlchemy):
|
||||
"""
|
||||
pool = self._find_pools(context, {'id': pool_id}, one=True)
|
||||
|
||||
try:
|
||||
catalog_zone = self.get_catalog_zone(context, pool)
|
||||
|
||||
try:
|
||||
catalog_zone_tsig = self.find_tsigkey(
|
||||
context, criterion={'resource_id': catalog_zone.id})
|
||||
self.delete_tsigkey(context, catalog_zone_tsig.id)
|
||||
except exceptions.TsigKeyNotFound:
|
||||
pass
|
||||
|
||||
self.delete_zone(context, catalog_zone.id)
|
||||
except exceptions.ZoneNotFound:
|
||||
pass
|
||||
|
||||
return self._delete(context, tables.pools, pool,
|
||||
exceptions.PoolNotFound)
|
||||
|
||||
@ -2421,3 +2466,229 @@ class SQLAlchemyStorage(base.SQLAlchemy):
|
||||
if criterion is not None and criterion.get('name', '').startswith('*'):
|
||||
criterion['reverse_name'] = criterion.pop('name')[::-1]
|
||||
return criterion
|
||||
|
||||
def _create_catalog_zone(self, pool):
|
||||
catalog_zone = objects.Zone(
|
||||
name=pool.catalog_zone.catalog_zone_fqdn,
|
||||
email=CONF[
|
||||
'service:central'].managed_resource_email,
|
||||
refresh=pool.catalog_zone.catalog_zone_refresh,
|
||||
serial=1,
|
||||
pool_id=pool.id,
|
||||
type=constants.ZONE_CATALOG
|
||||
)
|
||||
return catalog_zone
|
||||
|
||||
def get_catalog_zone(self, context, pool):
|
||||
catalog_zone = self.find_zone(
|
||||
context, criterion={
|
||||
'pool_id': pool.id, 'type': constants.ZONE_CATALOG})
|
||||
return catalog_zone
|
||||
|
||||
def _ensure_catalog_zone_config(self, context, pool):
|
||||
if (
|
||||
not pool.obj_attr_is_set("catalog_zone") or
|
||||
pool.catalog_zone is None
|
||||
):
|
||||
return
|
||||
|
||||
try:
|
||||
self.get_catalog_zone(context, pool)
|
||||
except exceptions.ZoneNotFound:
|
||||
catalog_zone = self._create_catalog_zone(pool)
|
||||
self.create_zone(context, catalog_zone)
|
||||
|
||||
self._ensure_catalog_zone_consistent(context, pool)
|
||||
|
||||
def _ensure_catalog_zone_consistent(self, context, pool):
|
||||
"""
|
||||
Ensure a catalog zone's data as defined in pools.yaml is consistent
|
||||
with its values in the database.
|
||||
|
||||
:param context: RPC Context.
|
||||
:param pool: The pool to ensure catalog zone consistency for.
|
||||
"""
|
||||
if not pool.obj_attr_is_set("catalog_zone") or not pool.catalog_zone:
|
||||
return
|
||||
|
||||
catalog_zone = self.get_catalog_zone(context, pool)
|
||||
catalog_zone.attributes = self._find_zone_attributes(
|
||||
context, {'zone_id': catalog_zone.id})
|
||||
|
||||
catalog_zone = self._ensure_catalog_zone_info_consistent(
|
||||
context, catalog_zone, pool.catalog_zone) or catalog_zone
|
||||
self._ensure_catalog_zone_soa_consistent(context, catalog_zone, pool)
|
||||
self._ensure_catalog_zone_tsig_data_consistent(
|
||||
context, catalog_zone, pool.catalog_zone)
|
||||
|
||||
def _ensure_catalog_zone_info_consistent(
|
||||
self, context, catalog_zone, values):
|
||||
"""
|
||||
Ensure a catalog zone's FQDN and refresh interval as defined in
|
||||
pools.yaml are consistent with their values in the database.
|
||||
|
||||
:param context: RPC Context.
|
||||
:param catalog_zone: The catalog zone to ensure consistency for.
|
||||
:param values: The catalog zone values as defined in pools.yaml.
|
||||
"""
|
||||
if (
|
||||
not catalog_zone.attributes or
|
||||
catalog_zone.attributes.get('catalog_zone_fqdn') !=
|
||||
values.catalog_zone_fqdn or
|
||||
catalog_zone.attributes.get('catalog_zone_refresh') !=
|
||||
values.catalog_zone_refresh
|
||||
):
|
||||
catalog_zone.attributes = objects.ZoneAttributeList()
|
||||
catalog_zone_fqdn = objects.ZoneAttribute()
|
||||
catalog_zone_fqdn.zone_id = catalog_zone.id
|
||||
catalog_zone_fqdn.key = 'catalog_zone_fqdn'
|
||||
catalog_zone_fqdn.value = values.catalog_zone_fqdn
|
||||
|
||||
catalog_zone_refresh = objects.ZoneAttribute()
|
||||
catalog_zone_refresh.zone_id = catalog_zone.id
|
||||
catalog_zone_refresh.key = 'catalog_zone_refresh'
|
||||
catalog_zone_refresh.value = values.catalog_zone_refresh
|
||||
catalog_zone.attributes.append(catalog_zone_fqdn)
|
||||
catalog_zone.attributes.append(catalog_zone_refresh)
|
||||
|
||||
return self.update_zone(context, catalog_zone)
|
||||
|
||||
def _ensure_catalog_zone_soa_consistent(self, context, catalog_zone, pool):
|
||||
"""
|
||||
Ensure a catalog zone's SOA based on the values in pools.yaml is
|
||||
consistent with its values in the database.
|
||||
|
||||
:param context: RPC Context.
|
||||
:param catalog_zone: The catalog zone to ensure consistency for.
|
||||
:param pool: The catalog_zone's pool.
|
||||
"""
|
||||
soa_record = objects.RecordList()
|
||||
soa_record.append(
|
||||
objects.Record(
|
||||
data=f'{pool.ns_records[0]["hostname"]} '
|
||||
f'{catalog_zone.attributes.get("catalog_zone_fqdn")} '
|
||||
f'{catalog_zone.serial} '
|
||||
f'{catalog_zone.attributes.get("catalog_zone_refresh")} '
|
||||
f'{catalog_zone.retry} '
|
||||
'2147483646 '
|
||||
f'{catalog_zone.minimum}'
|
||||
)
|
||||
)
|
||||
soa = objects.RecordSet(
|
||||
name=catalog_zone.name,
|
||||
type='SOA',
|
||||
records=soa_record
|
||||
)
|
||||
|
||||
try:
|
||||
soa_db = self.find_recordset(
|
||||
context, criterion={'zone_id': catalog_zone.id, 'type': 'SOA'})
|
||||
soa_db.name = catalog_zone.name
|
||||
soa_db.records = soa_record
|
||||
self.update_recordset(context, soa_db)
|
||||
except exceptions.RecordSetNotFound:
|
||||
self.create_recordset(context, catalog_zone.id, soa)
|
||||
|
||||
def _ensure_catalog_zone_tsig_data_consistent(
|
||||
self, context, catalog_zone, values):
|
||||
"""
|
||||
Ensure a catalog zone's TSIG key and TSIG algorithm as defined in
|
||||
pools.yaml are consistent with their values in the database.
|
||||
|
||||
:param context: RPC Context.
|
||||
:param catalog_zone: The catalog zone to ensure consistency for.
|
||||
:param values: The catalog zone values as defined in pools.yaml.
|
||||
"""
|
||||
if (
|
||||
not values.catalog_zone_tsig_key or not
|
||||
values.catalog_zone_tsig_algorithm
|
||||
):
|
||||
return
|
||||
|
||||
tsig_key = values.catalog_zone_tsig_key
|
||||
tsig_algorithm = values.catalog_zone_tsig_algorithm
|
||||
|
||||
try:
|
||||
tsigkey = self.find_tsigkey(
|
||||
context, criterion={'resource_id': catalog_zone.id})
|
||||
|
||||
if (
|
||||
tsigkey.name !=
|
||||
catalog_zone.attributes.get('catalog_zone_fqdn') or
|
||||
tsigkey.secret != tsig_key or
|
||||
tsigkey.algorithm != tsig_algorithm
|
||||
):
|
||||
tsigkey.name = catalog_zone.attributes.get('catalog_zone_fqdn')
|
||||
tsigkey.secret = tsig_key
|
||||
tsigkey.algorithm = tsig_algorithm
|
||||
self.update_tsigkey(context, tsigkey)
|
||||
except exceptions.TsigKeyNotFound:
|
||||
tsigkey = objects.TsigKey(
|
||||
name=catalog_zone.attributes.get('catalog_zone_fqdn'),
|
||||
secret=tsig_key,
|
||||
algorithm=tsig_algorithm,
|
||||
scope='ZONE',
|
||||
resource_id=catalog_zone.id,
|
||||
)
|
||||
tsigkey = DesignateAdapter.parse(
|
||||
'API_v2', tsigkey, objects.TsigKey())
|
||||
tsigkey.validate()
|
||||
self.create_tsigkey(context, tsigkey)
|
||||
|
||||
def get_catalog_zone_records(self, context, pool):
|
||||
catalog_zone = self.get_catalog_zone(context, pool)
|
||||
zones = self.find_zones(
|
||||
context, criterion={'pool_id': pool.id, 'type': '!CATALOG'})
|
||||
soa_record = self.find_recordset(
|
||||
context, criterion={'zone_id': catalog_zone.id, 'type': 'SOA'})
|
||||
records = []
|
||||
|
||||
# Catalog zones require one NS record using NSDNAME 'invalid.'
|
||||
# per RFC 9432
|
||||
ns_record = objects.RecordList()
|
||||
ns_record.append(objects.Record(data='invalid.'))
|
||||
records.append(
|
||||
objects.RecordSet(
|
||||
name=pool.catalog_zone.catalog_zone_fqdn,
|
||||
type='NS',
|
||||
records=ns_record
|
||||
)
|
||||
)
|
||||
|
||||
# Catalog zones require a TXT record with the schema version,
|
||||
# currently '2' per RFC 9432
|
||||
txt_record = objects.RecordList()
|
||||
txt_record.append(objects.Record(data='2'))
|
||||
records.append(
|
||||
objects.RecordSet(
|
||||
name=f'version.{pool.catalog_zone.catalog_zone_fqdn}',
|
||||
type='TXT',
|
||||
records=txt_record
|
||||
)
|
||||
)
|
||||
|
||||
for z in zones:
|
||||
# If member zone is scheduled for deletion, do not include it in
|
||||
# catalog. Otherwise, zone poller will wait for zone's deletion on
|
||||
# secondary DNS servers, which will not happen since the zone is
|
||||
# still in catalog (deadlock).
|
||||
if z.action == 'DELETE':
|
||||
continue
|
||||
|
||||
rs = objects.RecordList()
|
||||
rs.append(
|
||||
objects.Record(
|
||||
data=z.name
|
||||
)
|
||||
)
|
||||
record = objects.RecordSet(
|
||||
name=f'{z.id}.zones.{soa_record.name}',
|
||||
type='PTR',
|
||||
records=rs
|
||||
)
|
||||
records.append(record)
|
||||
|
||||
records.insert(0, soa_record)
|
||||
records.append(soa_record)
|
||||
|
||||
return records
|
||||
|
@ -0,0 +1,37 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Add catalog zones
|
||||
|
||||
Revision ID: 9099de8ae11c
|
||||
Revises: a005af3aa38e
|
||||
Create Date: 2023-05-15 09:30:11.476307
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '9099de8ae11c'
|
||||
down_revision = 'a005af3aa38e'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
ZONE_TYPES = ['PRIMARY', 'SECONDARY', 'CATALOG']
|
||||
|
||||
with op.batch_alter_table('zones') as batch_op:
|
||||
batch_op.alter_column('type', type_=sa.Enum(name='type',
|
||||
*ZONE_TYPES),
|
||||
existing_type=sa.Enum, existing_nullable=False)
|
@ -38,7 +38,7 @@ TSIG_SCOPES = ['POOL', 'ZONE']
|
||||
POOL_PROVISIONERS = ['UNMANAGED']
|
||||
ACTIONS = ['CREATE', 'DELETE', 'UPDATE', 'NONE']
|
||||
|
||||
ZONE_TYPES = ('PRIMARY', 'SECONDARY',)
|
||||
ZONE_TYPES = ('PRIMARY', 'SECONDARY', 'CATALOG')
|
||||
ZONE_TASK_TYPES = ['IMPORT', 'EXPORT']
|
||||
|
||||
SERVICE_STATES = [
|
||||
|
@ -116,7 +116,7 @@ class TestCase(base.BaseTestCase):
|
||||
'resource_id': '7fbb6304-5e74-4691-bd80-cef3cff5fe2f',
|
||||
}]
|
||||
|
||||
# The last zone is invalid
|
||||
# The 4th zone is invalid, the last zone is a catalog zone
|
||||
zone_fixtures = {
|
||||
'PRIMARY': [
|
||||
{
|
||||
@ -135,6 +135,10 @@ class TestCase(base.BaseTestCase):
|
||||
'name': 'invalid.com.....',
|
||||
'type': 'PRIMARY',
|
||||
'email': 'example@invalid.com',
|
||||
}, {
|
||||
'name': 'example.com.',
|
||||
'type': 'CATALOG',
|
||||
'email': 'example@example.com',
|
||||
}
|
||||
],
|
||||
'SECONDARY': [
|
||||
@ -227,6 +231,28 @@ class TestCase(base.BaseTestCase):
|
||||
'description': 'Pool-Two description',
|
||||
'attributes': [{'key': 'scope', 'value': 'public'}],
|
||||
'ns_records': [{'priority': 1, 'hostname': 'ns1.example.org.'}]},
|
||||
{'name': 'Pool-With-Catalog-Zone',
|
||||
'description': 'Pool with catalog zone description',
|
||||
'attributes': [{'key': 'scope', 'value': 'public'}],
|
||||
'ns_records': [{'priority': 1, 'hostname': 'ns1.example.org.'}],
|
||||
'catalog_zone': {'catalog_zone_fqdn': 'cat.example.org.',
|
||||
'catalog_zone_refresh': 60}},
|
||||
{'name': 'Pool-With-Catalog-Zone-And-TSIG',
|
||||
'description': 'Pool with catalog zone description',
|
||||
'attributes': [{'key': 'scope', 'value': 'public'}],
|
||||
'ns_records': [{'priority': 1, 'hostname': 'ns1.example.org.'}],
|
||||
'catalog_zone': {'catalog_zone_fqdn': 'cat.example.org.',
|
||||
'catalog_zone_refresh': 60,
|
||||
'catalog_zone_tsig_key': 'SomeSecretKey',
|
||||
'catalog_zone_tsig_algorithm': 'hmac-md5'}},
|
||||
{'name': 'Pool-With-Catalog-Zone-And-Invalid-TSIG',
|
||||
'description': 'Pool with catalog zone description',
|
||||
'attributes': [{'key': 'scope', 'value': 'public'}],
|
||||
'ns_records': [{'priority': 1, 'hostname': 'ns1.example.org.'}],
|
||||
'catalog_zone': {'catalog_zone_fqdn': 'cat.example.org.',
|
||||
'catalog_zone_refresh': 60,
|
||||
'catalog_zone_tsig_key': 'AnotherSecretKey',
|
||||
'catalog_zone_tsig_algorithm': 'no-algorithm'}},
|
||||
]
|
||||
|
||||
pool_attribute_fixtures = [
|
||||
|
@ -221,7 +221,7 @@ class ApiV2ZonesTest(v2.ApiV2TestCase):
|
||||
|
||||
def test_create_zone_invalid_name(self):
|
||||
# Try to create a zone with an invalid name
|
||||
fixture = self.get_zone_fixture(fixture=-1)
|
||||
fixture = self.get_zone_fixture(fixture=3)
|
||||
|
||||
# Ensure it fails with a 400
|
||||
self._assert_exception('invalid_object', 400, self.client.post_json,
|
||||
@ -357,6 +357,45 @@ class ApiV2ZonesTest(v2.ApiV2TestCase):
|
||||
self.client.get(url, status=406, headers={'Accept': 'test/goat',
|
||||
'X-Test-Role': 'member'})
|
||||
|
||||
def test_get_catalog_zone(self):
|
||||
catalog_zone_fixture = self.get_zone_fixture(fixture=4)
|
||||
catalog_zone = self.storage.create_zone(
|
||||
self.admin_context, objects.Zone.from_dict(catalog_zone_fixture))
|
||||
|
||||
response = self.client.get('/zones/',
|
||||
headers={
|
||||
'Accept': 'application/json',
|
||||
'X-Test-Role': 'admin',
|
||||
'X-Auth-All-Projects': 'True',
|
||||
})
|
||||
self.assertEqual(catalog_zone.id, response.json['zones'][0]['id'])
|
||||
|
||||
response = self.client.get('/zones/%s' % catalog_zone['id'],
|
||||
headers={
|
||||
'Accept': 'application/json',
|
||||
'X-Test-Role': 'admin',
|
||||
'X-Auth-All-Projects': 'True',
|
||||
})
|
||||
self.assertEqual(catalog_zone.id, response.json['id'])
|
||||
|
||||
def test_get_catalog_zone_no_admin(self):
|
||||
catalog_zone_fixture = self.get_zone_fixture(fixture=4)
|
||||
zone = self.storage.create_zone(
|
||||
self.admin_context, objects.Zone.from_dict(catalog_zone_fixture))
|
||||
|
||||
response = self.client.get(
|
||||
'/zones/',
|
||||
headers={
|
||||
'Accept': 'application/json',
|
||||
})
|
||||
|
||||
self.assertEqual([], response.json['zones'])
|
||||
self._assert_exception(
|
||||
'zone_not_found', 404, self.client.get, '/zones/%s' % zone['id'],
|
||||
headers={
|
||||
'Accept': 'application/json',
|
||||
})
|
||||
|
||||
def test_update_zone(self):
|
||||
# Create a zone
|
||||
zone = self.create_zone()
|
||||
|
@ -381,6 +381,7 @@ class CentralServiceTestCase(CentralBasic):
|
||||
self.service.get_zone_ns_records = mock.Mock(
|
||||
return_value=[unit.RoObject(hostname='host_foo')]
|
||||
)
|
||||
self.service._ensure_catalog_zone_serial_increment = mock.Mock()
|
||||
|
||||
def create_zone(ctx, zone):
|
||||
return zone
|
||||
@ -757,6 +758,8 @@ class CentralZoneTestCase(CentralBasic):
|
||||
)
|
||||
)
|
||||
)
|
||||
self.service.storage.get_catalog_zone = mock.Mock(
|
||||
side_effect=exceptions.ZoneNotFound)
|
||||
|
||||
out = self.service.create_zone(
|
||||
self.context,
|
||||
@ -806,7 +809,7 @@ class CentralZoneTestCase(CentralBasic):
|
||||
self.assertEqual(CentralZoneTestCase.pool_id, pool_id)
|
||||
|
||||
def test_find_zones(self):
|
||||
self.context = unit.RoObject(project_id='t')
|
||||
self.context = unit.RoObject(project_id='t', roles=[])
|
||||
self.service.storage.find_zones = mock.Mock()
|
||||
self.service.find_zones(self.context)
|
||||
self.assertTrue(self.service.storage.find_zones.called)
|
||||
@ -822,6 +825,7 @@ class CentralZoneTestCase(CentralBasic):
|
||||
name='foo',
|
||||
tenant_id='2',
|
||||
shared=self.zone_shared,
|
||||
type='PRIMARY',
|
||||
)
|
||||
self.service.storage.count_zones.return_value = 2
|
||||
|
||||
@ -838,12 +842,16 @@ class CentralZoneTestCase(CentralBasic):
|
||||
}
|
||||
)
|
||||
|
||||
def test_delete_zone_abandon(self):
|
||||
@mock.patch.object(designate.central.service.Service,
|
||||
'_ensure_catalog_zone_serial_increment')
|
||||
def test_delete_zone_abandon(
|
||||
self, mock_ensure_catalog_zone_serial_increment):
|
||||
self.service.storage.get_zone.return_value = unit.RoObject(
|
||||
name='foo',
|
||||
tenant_id='2',
|
||||
id=CentralZoneTestCase.zone_id_2,
|
||||
shared=self.zone_shared,
|
||||
type='PRIMARY',
|
||||
)
|
||||
self.context.abandon = True
|
||||
self.service.storage.count_zones.return_value = 0
|
||||
@ -860,13 +868,17 @@ class CentralZoneTestCase(CentralBasic):
|
||||
}
|
||||
)
|
||||
|
||||
def test_delete_zone(self):
|
||||
# RoObject not compatible with _ensure_catalog_zone_serial_increment
|
||||
@mock.patch.object(designate.central.service.Service,
|
||||
'_ensure_catalog_zone_serial_increment')
|
||||
def test_delete_zone(self, mock_ensure_catalog_zone_serial_increment):
|
||||
self.context.abandon = False
|
||||
self.context.hard_delete = False
|
||||
self.service.storage.get_zone.return_value = unit.RoObject(
|
||||
name='foo',
|
||||
tenant_id='2',
|
||||
shared=self.zone_shared,
|
||||
type='PRIMARY',
|
||||
)
|
||||
self.service._delete_zone_in_storage = mock.Mock(
|
||||
return_value=unit.RoObject(
|
||||
@ -892,13 +904,18 @@ class CentralZoneTestCase(CentralBasic):
|
||||
}
|
||||
)
|
||||
|
||||
def test_delete_zone_hard_delete(self):
|
||||
# RoObject not compatible with _ensure_catalog_zone_serial_increment
|
||||
@mock.patch.object(designate.central.service.Service,
|
||||
'_ensure_catalog_zone_serial_increment')
|
||||
def test_delete_zone_hard_delete(
|
||||
self, mock_ensure_catalog_zone_serial_increment):
|
||||
self.context.abandon = False
|
||||
self.context.hard_delete = True
|
||||
self.service.storage.get_zone.return_value = unit.RoObject(
|
||||
name='foo',
|
||||
tenant_id='2',
|
||||
shared=False
|
||||
shared=False,
|
||||
type='PRIMARY',
|
||||
)
|
||||
self.service._delete_zone_in_storage = mock.Mock(
|
||||
return_value=unit.RoObject(
|
||||
@ -925,6 +942,7 @@ class CentralZoneTestCase(CentralBasic):
|
||||
)
|
||||
|
||||
def test_delete_zone_in_storage(self):
|
||||
self.service._ensure_catalog_zone_serial_increment = mock.Mock()
|
||||
self.service._delete_zone_in_storage(
|
||||
self.context,
|
||||
unit.RwObject(action='', status=''),
|
||||
@ -963,7 +981,7 @@ class CentralZoneTestCase(CentralBasic):
|
||||
self.service.storage.get_zone.return_value = unit.RoObject(
|
||||
name='example.org.',
|
||||
tenant_id='2',
|
||||
type='PRIMARY'
|
||||
type='PRIMARY',
|
||||
)
|
||||
|
||||
exc = self.assertRaises(rpc_dispatcher.ExpectedException,
|
||||
@ -1155,7 +1173,7 @@ class CentralZoneTestCase(CentralBasic):
|
||||
|
||||
def test_update_recordset_action_delete(self):
|
||||
self.service.storage.get_zone.return_value = unit.RoObject(
|
||||
action='DELETE', tenant_id=''
|
||||
action='DELETE', tenant_id='', type='PRIMARY'
|
||||
)
|
||||
recordset = mock.Mock(spec=objects.RecordSet)
|
||||
recordset.obj_get_changes.return_value = ['foo']
|
||||
@ -1650,7 +1668,8 @@ class CentralZoneExportTests(CentralBasic):
|
||||
task_type='EXPORT',
|
||||
status='PENDING',
|
||||
message=None,
|
||||
tenant_id='t'
|
||||
tenant_id='t',
|
||||
type='PRIMARY',
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -4575,3 +4575,91 @@ class CentralServiceTest(designate.tests.functional.TestCase):
|
||||
self.assertEqual('ACTIVE', recordset.status)
|
||||
for record in recordset.records:
|
||||
self.assertEqual('ACTIVE', record.status)
|
||||
|
||||
def test_create_catalog_member_zone(self):
|
||||
pool = self.create_pool(fixture=2)
|
||||
|
||||
self.storage._ensure_catalog_zone_config(self.admin_context, pool)
|
||||
catalog_zone = self.storage.get_catalog_zone(self.admin_context, pool)
|
||||
self.assertEqual(pool.id, catalog_zone.pool_id)
|
||||
self.assertFalse(catalog_zone.increment_serial)
|
||||
|
||||
# Create a member zone
|
||||
member_zone = self.create_zone(
|
||||
attributes=[{'key': 'pool_id', 'value': pool.id}])
|
||||
self.assertEqual(pool.id, member_zone.pool_id)
|
||||
|
||||
updated_catalog_zone = self.storage.get_catalog_zone(
|
||||
self.admin_context, pool)
|
||||
self.assertTrue(updated_catalog_zone.increment_serial)
|
||||
|
||||
def test_update_catalog_member_zone(self):
|
||||
pool = self.create_pool(fixture=2)
|
||||
|
||||
self.storage._ensure_catalog_zone_config(self.admin_context, pool)
|
||||
catalog_zone = self.storage.get_catalog_zone(self.admin_context, pool)
|
||||
self.assertEqual(pool.id, catalog_zone.pool_id)
|
||||
self.assertFalse(catalog_zone.increment_serial)
|
||||
|
||||
# Create a member zone
|
||||
member_zone = self.create_zone(
|
||||
attributes=[{'key': 'pool_id', 'value': pool.id}])
|
||||
self.assertEqual(pool.id, member_zone.pool_id)
|
||||
|
||||
# Reset increment_serial flag
|
||||
catalog_zone.increment_serial = False
|
||||
catalog_zone = self.storage.update_zone(
|
||||
self.admin_context, catalog_zone)
|
||||
self.assertFalse(catalog_zone.increment_serial)
|
||||
|
||||
# Update the member zone object
|
||||
member_zone.email = 'info@example.net'
|
||||
self.central_service.update_zone(self.admin_context, member_zone)
|
||||
|
||||
updated_catalog_zone = self.storage.get_catalog_zone(
|
||||
self.admin_context, pool)
|
||||
self.assertFalse(updated_catalog_zone.increment_serial)
|
||||
|
||||
def test_delete_catalog_member_zone(self):
|
||||
pool = self.create_pool(fixture=2)
|
||||
|
||||
self.storage._ensure_catalog_zone_config(self.admin_context, pool)
|
||||
catalog_zone = self.storage.get_catalog_zone(self.admin_context, pool)
|
||||
self.assertEqual(pool.id, catalog_zone.pool_id)
|
||||
self.assertFalse(catalog_zone.increment_serial)
|
||||
|
||||
# Create a member zone
|
||||
member_zone = self.create_zone(
|
||||
attributes=[{'key': 'pool_id', 'value': pool.id}])
|
||||
self.assertEqual(pool.id, member_zone.pool_id)
|
||||
|
||||
# Reset increment_serial flag
|
||||
catalog_zone.increment_serial = False
|
||||
catalog_zone = self.storage.update_zone(
|
||||
self.admin_context, catalog_zone)
|
||||
self.assertFalse(catalog_zone.increment_serial)
|
||||
|
||||
# Delete the member zone
|
||||
self.central_service.delete_zone(self.admin_context, member_zone.id)
|
||||
|
||||
updated_catalog_zone = self.storage.get_catalog_zone(
|
||||
self.admin_context, pool)
|
||||
self.assertTrue(updated_catalog_zone.increment_serial)
|
||||
|
||||
def test_enforce_catalog_zone_policy(self):
|
||||
pool = self.create_pool(fixture=2)
|
||||
catalog_zone = self.storage._create_catalog_zone(pool)
|
||||
context = self.admin_context
|
||||
context.request_id = 'designate-manage'
|
||||
|
||||
self.central_service._enforce_catalog_zone_policy(
|
||||
context, catalog_zone)
|
||||
|
||||
def test_enforce_catalog_zone_policy_no_admin(self):
|
||||
pool = self.create_pool(fixture=2)
|
||||
catalog_zone = self.storage._create_catalog_zone(pool)
|
||||
|
||||
self.assertRaises(
|
||||
exceptions.Forbidden,
|
||||
self.central_service._enforce_catalog_zone_policy,
|
||||
self.get_context(), catalog_zone)
|
||||
|
@ -569,6 +569,133 @@ class MdnsRequestHandlerTest(designate.tests.functional.TestCase):
|
||||
|
||||
self.assertEqual(expected_response, binascii.b2a_hex(response))
|
||||
|
||||
def test_dispatch_opcode_query_AXFR_catalog(self):
|
||||
# Query is for example.com. IN AXFR
|
||||
# id 18883
|
||||
# opcode QUERY
|
||||
# rcode NOERROR
|
||||
# flags AD
|
||||
# edns 0
|
||||
# payload 4096
|
||||
# ;QUESTION
|
||||
# example.com. IN AXFR
|
||||
# ;ANSWER
|
||||
# ;AUTHORITY
|
||||
# ;ADDITIONAL
|
||||
payload = ("49c300200001000000000001076578616d706c6503636f6d0000fc0001"
|
||||
"00fc000100000000000000")
|
||||
|
||||
# id 18883
|
||||
# opcode QUERY
|
||||
# rcode NOERROR
|
||||
# flags QR AA
|
||||
# ;QUESTION
|
||||
# example.com. IN AXFR
|
||||
# ;ANSWER
|
||||
# example.com. 3600 IN SOA ns1.example.org. example.example.com.
|
||||
# -> 1427899961 3600 600 86400 3600
|
||||
# mail.example.com. 3600 IN A 192.0.2.1
|
||||
# example.com. 3600 IN NS ns1.example.org.
|
||||
# ;AUTHORITY
|
||||
# ;ADDITIONAL
|
||||
expected_response = (
|
||||
b"49c384000001000500000000076578616d706c6503636f6d0000fc0001036361"
|
||||
b"74c00c0006000100000e10002707696e76616c696400076578616d706c65c00c"
|
||||
b"00000001000002580001518000000e1000000e10c01d0002000100000e100002"
|
||||
b"c02d0776657273696f6ec01d0010000100000e1000020132053138383833057a"
|
||||
b"6f6e6573c01d000c000100000e1000040161c00cc01d0006000100000e100018"
|
||||
b"c02dc03600000001000002580001518000000e1000000e10"
|
||||
)
|
||||
|
||||
cat_zone = objects.Zone.from_dict({
|
||||
'name': 'cat.example.com.',
|
||||
'ttl': 3600,
|
||||
'serial': 1427899961,
|
||||
'email': 'example@example.com',
|
||||
'type': 'CATALOG'
|
||||
})
|
||||
|
||||
pool = objects.Pool.from_dict({
|
||||
'name': 'POOL',
|
||||
|
||||
})
|
||||
|
||||
def _get_catalog_zone_records(context, pool):
|
||||
records = []
|
||||
|
||||
soa_record = objects.RecordList()
|
||||
soa_record.append(
|
||||
objects.Record(
|
||||
data='invalid. '
|
||||
'example.example.com. '
|
||||
'1 '
|
||||
'600 '
|
||||
'86400 '
|
||||
'3600 '
|
||||
'3600'
|
||||
)
|
||||
)
|
||||
soa = objects.RecordSet(
|
||||
name='cat.example.com.',
|
||||
type='SOA',
|
||||
records=soa_record
|
||||
)
|
||||
records.append(soa)
|
||||
|
||||
ns_record = objects.RecordList()
|
||||
ns_record.append(objects.Record(data='invalid.'))
|
||||
records.append(
|
||||
objects.RecordSet(
|
||||
name='cat.example.com.',
|
||||
type='NS',
|
||||
records=ns_record
|
||||
)
|
||||
)
|
||||
|
||||
txt_record = objects.RecordList()
|
||||
txt_record.append(objects.Record(data='2'))
|
||||
records.append(
|
||||
objects.RecordSet(
|
||||
name='version.cat.example.com.',
|
||||
type='TXT',
|
||||
records=txt_record
|
||||
)
|
||||
)
|
||||
|
||||
ptr_record = objects.RecordList()
|
||||
ptr_record.append(
|
||||
objects.Record(
|
||||
data='a.example.com.'
|
||||
)
|
||||
)
|
||||
records.append(
|
||||
objects.RecordSet(
|
||||
name='18883.zones.cat.example.com.',
|
||||
type='PTR',
|
||||
records=ptr_record
|
||||
)
|
||||
)
|
||||
|
||||
records.append(soa)
|
||||
|
||||
return records
|
||||
|
||||
with mock.patch.object(self.storage, 'find_zone',
|
||||
return_value=cat_zone):
|
||||
with mock.patch.object(self.storage, 'find_pool',
|
||||
return_value=pool):
|
||||
with mock.patch.object(
|
||||
self.storage, 'get_catalog_zone_records',
|
||||
side_effect=_get_catalog_zone_records):
|
||||
request = dns.message.from_wire(binascii.a2b_hex(payload))
|
||||
request.environ = {'addr': self.addr,
|
||||
'context': self.context}
|
||||
|
||||
response = next(self.handler(request)).get_wire()
|
||||
|
||||
self.assertEqual(
|
||||
expected_response, binascii.b2a_hex(response))
|
||||
|
||||
def test_dispatch_opcode_query_AXFR_multiple_messages(self):
|
||||
# Query is for example.com. IN AXFR
|
||||
# id 18883
|
||||
|
@ -118,6 +118,9 @@ class DesignateMigrationsWalk(
|
||||
def _check_a005af3aa38e(self, connection):
|
||||
pass
|
||||
|
||||
def _check_9099de8ae11c(self, connection):
|
||||
pass
|
||||
|
||||
def test_single_base_revision(self):
|
||||
script = alembic_script.ScriptDirectory.from_config(self.config)
|
||||
self.assertEqual(1, len(script.get_bases()))
|
||||
|
@ -2357,6 +2357,30 @@ class SqlalchemyStorageTest(designate.tests.functional.TestCase):
|
||||
self.storage.delete_pool, self.admin_context, pool['id']
|
||||
)
|
||||
|
||||
@mock.patch.object(storage.sqlalchemy.SQLAlchemyStorage, 'delete_tsigkey')
|
||||
def test_delete_pool_catalog_zone_with_tsig(self, mock_delete_tsig):
|
||||
pool = self.create_pool(fixture=3)
|
||||
self.storage.delete_pool(self.admin_context, pool.id)
|
||||
|
||||
mock_delete_tsig.assert_called()
|
||||
|
||||
self.assertRaisesRegex(
|
||||
exceptions.PoolNotFound, 'Could not find Pool',
|
||||
self.storage.delete_pool, self.admin_context, pool.id
|
||||
)
|
||||
|
||||
@mock.patch.object(storage.sqlalchemy.SQLAlchemyStorage, 'delete_tsigkey')
|
||||
def test_delete_pool_catalog_zone_without_tsig(self, mock_delete_tsig):
|
||||
pool = self.create_pool(fixture=2)
|
||||
self.storage.delete_pool(self.admin_context, pool.id)
|
||||
|
||||
mock_delete_tsig.assert_not_called()
|
||||
|
||||
self.assertRaisesRegex(
|
||||
exceptions.PoolNotFound, 'Could not find Pool',
|
||||
self.storage.delete_pool, self.admin_context, pool.id
|
||||
)
|
||||
|
||||
def test_delete_pool_missing(self):
|
||||
uuid = '203ca44f-c7e7-4337-9a02-0d735833e6aa'
|
||||
|
||||
@ -3975,3 +3999,158 @@ class SqlalchemyStorageTest(designate.tests.functional.TestCase):
|
||||
}
|
||||
}
|
||||
self.assertDictEqual(expected, indexes)
|
||||
|
||||
def test_create_catalog_zone(self):
|
||||
pool = self.create_pool(fixture=2)
|
||||
|
||||
catalog_zone = self.storage._create_catalog_zone(pool)
|
||||
|
||||
self.assertEqual("cat.example.org.", catalog_zone.name)
|
||||
self.assertEqual(60, catalog_zone.refresh)
|
||||
self.assertEqual(pool.id, catalog_zone.pool_id)
|
||||
self.assertEqual("CATALOG", catalog_zone.type)
|
||||
self.assertEqual(
|
||||
CONF['service:central'].managed_resource_email,
|
||||
catalog_zone.email)
|
||||
|
||||
def test_get_catalog_zone(self):
|
||||
pool = self.create_pool(fixture=2)
|
||||
|
||||
catalog_zone = self.storage.get_catalog_zone(
|
||||
self.admin_context, pool)
|
||||
self.assertEqual(pool.id, catalog_zone.pool_id)
|
||||
self.assertEqual("CATALOG", catalog_zone.type)
|
||||
|
||||
def test_get_catalog_zone_records(self):
|
||||
pool = self.create_pool(fixture=2)
|
||||
self.storage._ensure_catalog_zone_config(self.admin_context, pool)
|
||||
member_zone = self.create_zone(
|
||||
attributes=[{'key': 'pool_id', 'value': pool.id}])
|
||||
|
||||
catz_records = self.storage.get_catalog_zone_records(
|
||||
self.admin_context, pool)
|
||||
fqdn = pool.catalog_zone.catalog_zone_fqdn
|
||||
|
||||
self.assertEqual(5, len(catz_records))
|
||||
self.assertEqual("SOA", catz_records[0].type)
|
||||
self.assertEqual(fqdn, catz_records[0].name)
|
||||
self.assertEqual("NS", catz_records[1].type)
|
||||
self.assertEqual(fqdn, catz_records[1].name)
|
||||
self.assertEqual("TXT", catz_records[2].type)
|
||||
self.assertEqual(f"version.{fqdn}", catz_records[2].name)
|
||||
self.assertEqual(
|
||||
f"{member_zone.id}.zones.{fqdn}", catz_records[3].name)
|
||||
self.assertEqual("SOA", catz_records[4].type)
|
||||
|
||||
def test_ensure_catalog_zone_config_no_catalog_zone(self):
|
||||
pool = self.storage.find_pools(self.admin_context)[0]
|
||||
self.assertRaises(
|
||||
exceptions.ZoneNotFound, self.storage.get_catalog_zone,
|
||||
self.admin_context, pool)
|
||||
|
||||
self.storage.get_catalog_zone = mock.Mock()
|
||||
self.storage._ensure_catalog_zone_config(
|
||||
self.admin_context, pool)
|
||||
self.storage.get_catalog_zone.assert_not_called()
|
||||
|
||||
@mock.patch.object(storage.sqlalchemy.SQLAlchemyStorage, 'update_zone')
|
||||
@mock.patch.object(storage.sqlalchemy.SQLAlchemyStorage,
|
||||
'_ensure_catalog_zone_consistent')
|
||||
def test_ensure_catalog_zone_config(
|
||||
self, mock_update_zone, mock_ensure_catalog_zone_consistent):
|
||||
self.create_pool(fixture=2)
|
||||
mock_update_zone.assert_called()
|
||||
|
||||
@mock.patch.object(
|
||||
storage.sqlalchemy.SQLAlchemyStorage, 'get_catalog_zone')
|
||||
def test_ensure_catalog_zone_consistent_no_catalog_zone(
|
||||
self, mock_get_catalog_zone):
|
||||
pool = self.create_pool()
|
||||
self.storage._ensure_catalog_zone_consistent(self.admin_context, pool)
|
||||
mock_get_catalog_zone.assert_not_called()
|
||||
|
||||
def test_ensure_catalog_zone_consistent(self):
|
||||
pool = self.create_pool(
|
||||
fixture=3) # Pool with catalog zone and TSIG data
|
||||
self.storage._ensure_catalog_zone_config(self.admin_context, pool)
|
||||
catalog_zone = self.storage.get_catalog_zone(self.admin_context, pool)
|
||||
|
||||
self.assertEqual(catalog_zone.attributes.get('catalog_zone_fqdn'),
|
||||
pool.catalog_zone.catalog_zone_fqdn)
|
||||
|
||||
self.assertEqual(int(
|
||||
catalog_zone.attributes.get('catalog_zone_refresh')),
|
||||
pool.catalog_zone.catalog_zone_refresh)
|
||||
|
||||
# Check SOA
|
||||
catz_records = self.storage.get_catalog_zone_records(
|
||||
self.admin_context, pool)
|
||||
self.assertEqual(catz_records[-1].type, 'SOA')
|
||||
expected = (
|
||||
f'{pool.ns_records[0]["hostname"]} '
|
||||
f'{catalog_zone.attributes.get("catalog_zone_fqdn")} '
|
||||
f'{catalog_zone.serial} '
|
||||
f'{catalog_zone.attributes.get("catalog_zone_refresh")} '
|
||||
f'{catalog_zone.retry} '
|
||||
'2147483646 '
|
||||
f'{catalog_zone.minimum}'
|
||||
)
|
||||
self.assertEqual(expected, catz_records[-1].records[0].data)
|
||||
|
||||
# Check TSIG
|
||||
tsigkey = self.storage.find_tsigkey(
|
||||
self.admin_context, criterion={'resource_id': catalog_zone.id})
|
||||
self.assertEqual(pool.catalog_zone.catalog_zone_tsig_key,
|
||||
tsigkey.secret)
|
||||
self.assertEqual(pool.catalog_zone.catalog_zone_tsig_algorithm,
|
||||
tsigkey.algorithm)
|
||||
|
||||
def test_ensure_catalog_zone_consistent_no_tsig(self):
|
||||
pool = self.create_pool(fixture=2)
|
||||
catalog_zone = self.storage.get_catalog_zone(self.admin_context, pool)
|
||||
|
||||
# Check no TSIG key created
|
||||
self.assertRaises(
|
||||
exceptions.TsigKeyNotFound, self.storage.find_tsigkey,
|
||||
self.admin_context, criterion={'resource_id': catalog_zone.id})
|
||||
|
||||
def test_ensure_catalog_zone_consistent_invalid_tsig(self):
|
||||
# Check invalid TSIG data detected
|
||||
self.assertRaisesRegex(
|
||||
ValueError, 'Field value no-algorithm is invalid',
|
||||
self.create_pool, fixture=4)
|
||||
|
||||
def test_ensure_catalog_zone_consistent_tsig_changed(self):
|
||||
pool = self.create_pool(
|
||||
fixture=3) # Pool with catalog zone and TSIG data
|
||||
|
||||
pool.catalog_zone.catalog_zone_tsig_key = 'SomeNewSecret'
|
||||
pool.catalog_zone.catalog_zone_tsig_algorithm = 'hmac-sha256'
|
||||
|
||||
self.storage._ensure_catalog_zone_config(self.admin_context, pool)
|
||||
catalog_zone = self.storage.get_catalog_zone(self.admin_context, pool)
|
||||
|
||||
# Check TSIG
|
||||
tsigkey = self.storage.find_tsigkey(
|
||||
self.admin_context, criterion={'resource_id': catalog_zone.id})
|
||||
self.assertEqual(pool.catalog_zone.catalog_zone_tsig_key,
|
||||
tsigkey.secret)
|
||||
self.assertEqual(pool.catalog_zone.catalog_zone_tsig_algorithm,
|
||||
tsigkey.algorithm)
|
||||
|
||||
def test_ensure_catalog_zone_consistent_change_pool(self):
|
||||
pool = self.create_pool(fixture=2) # Catalog zone without TSIG
|
||||
|
||||
# Change pool attributes
|
||||
pool.catalog_zone.catalog_zone_fqdn = 'new.example.com.'
|
||||
pool.catalog_zone.catalog_zone_refresh = 3600
|
||||
|
||||
self.storage._ensure_catalog_zone_consistent(self.admin_context, pool)
|
||||
catalog_zone = self.storage.get_catalog_zone(self.admin_context, pool)
|
||||
|
||||
self.assertEqual(
|
||||
pool.catalog_zone.catalog_zone_fqdn,
|
||||
catalog_zone.attributes.get("catalog_zone_fqdn"))
|
||||
self.assertEqual(
|
||||
pool.catalog_zone.catalog_zone_refresh,
|
||||
int(catalog_zone.attributes.get("catalog_zone_refresh")))
|
||||
|
@ -31,3 +31,6 @@
|
||||
- host: 192.0.2.4
|
||||
port: 53
|
||||
|
||||
catalog_zone:
|
||||
catalog_zone_fqdn: example.org.
|
||||
catalog_zone_refresh: '60'
|
||||
|
@ -2,6 +2,9 @@
|
||||
- host: 192.0.2.4
|
||||
port: 53
|
||||
attributes: {}
|
||||
catalog_zone:
|
||||
catalog_zone_fqdn: example.com.
|
||||
catalog_zone_refresh: 60
|
||||
description: Default PowerDNS 4 Pool
|
||||
id: cf2e8eab-76cd-4162-bf76-8aeee3556de0
|
||||
name: default
|
||||
|
@ -132,7 +132,11 @@ class DesignateYAMLAdapterTest(oslotest.base.BaseTestCase):
|
||||
'pool_id': 'cf2e8eab-76cd-4162-bf76-8aeee3556de0',
|
||||
'type': 'pdns4',
|
||||
}
|
||||
]
|
||||
],
|
||||
'catalog_zone': {
|
||||
'catalog_zone_fqdn': 'example.com.',
|
||||
'catalog_zone_refresh': 60,
|
||||
}
|
||||
}
|
||||
|
||||
file = os.path.join(resources.path, 'pools_yaml/sample_output.yaml')
|
||||
|
@ -203,6 +203,9 @@ class TestZoneActionOnTarget(oslotest.base.BaseTestCase):
|
||||
self.target,
|
||||
self.zone_params
|
||||
)
|
||||
self.actor._storage = mock.Mock()
|
||||
self.actor._storage.get_catalog_zone = mock.Mock(
|
||||
side_effect=exceptions.ZoneNotFound)
|
||||
|
||||
self.assertTrue(self.actor())
|
||||
|
||||
@ -212,6 +215,30 @@ class TestZoneActionOnTarget(oslotest.base.BaseTestCase):
|
||||
port=53
|
||||
)
|
||||
|
||||
@mock.patch.object(dnsutils, 'notify')
|
||||
def test_call_create_catalog_zone(self, mock_notify):
|
||||
self.zone = objects.Zone(name='example.org.', action='CREATE')
|
||||
self.actor = zone.ZoneActionOnTarget(
|
||||
self.executor,
|
||||
self.context,
|
||||
self.zone,
|
||||
self.target,
|
||||
self.zone_params
|
||||
)
|
||||
|
||||
self.actor._storage = mock.Mock()
|
||||
self.actor._storage.get_catalog_zone = mock.Mock(
|
||||
return_value=objects.Zone(name='cat.example.org.'))
|
||||
self.assertTrue(self.actor())
|
||||
|
||||
call_catalog_zone = mock.call(
|
||||
'cat.example.org.',
|
||||
'203.0.113.1',
|
||||
port=53
|
||||
)
|
||||
|
||||
mock_notify.assert_has_calls([call_catalog_zone])
|
||||
|
||||
@mock.patch.object(dnsutils, 'notify')
|
||||
def test_call_update(self, mock_notify):
|
||||
self.zone = objects.Zone(name='example.org.', action='UPDATE')
|
||||
@ -222,6 +249,9 @@ class TestZoneActionOnTarget(oslotest.base.BaseTestCase):
|
||||
self.target,
|
||||
self.zone_params,
|
||||
)
|
||||
self.actor._storage = mock.Mock()
|
||||
self.actor._storage.get_catalog_zone = mock.Mock(
|
||||
side_effect=exceptions.ZoneNotFound)
|
||||
|
||||
self.assertTrue(self.actor())
|
||||
|
||||
@ -241,6 +271,9 @@ class TestZoneActionOnTarget(oslotest.base.BaseTestCase):
|
||||
self.target,
|
||||
self.zone_params
|
||||
)
|
||||
self.actor._storage = mock.Mock()
|
||||
self.actor._storage.get_catalog_zone = mock.Mock(
|
||||
side_effect=exceptions.ZoneNotFound)
|
||||
|
||||
self.assertTrue(self.actor())
|
||||
|
||||
@ -258,6 +291,9 @@ class TestZoneActionOnTarget(oslotest.base.BaseTestCase):
|
||||
self.target,
|
||||
self.zone_params
|
||||
)
|
||||
self.actor.storage.find_pool = mock.Mock()
|
||||
self.actor.storage.get_catalog_zone = mock.Mock(
|
||||
side_effect=exceptions.ZoneNotFound)
|
||||
|
||||
self.assertFalse(self.actor())
|
||||
|
||||
|
@ -65,17 +65,41 @@ class ZoneActionOnTarget(base.Task):
|
||||
}
|
||||
)
|
||||
|
||||
# Check whether a catalog zone exists for this pool
|
||||
catalog_zone = None
|
||||
pool = self.storage.find_pool(
|
||||
self.context, criterion={'id': self.zone.pool_id})
|
||||
try:
|
||||
catalog_zone = self.storage.get_catalog_zone(
|
||||
self.context, pool)
|
||||
except exceptions.ZoneNotFound:
|
||||
pass
|
||||
|
||||
for retry in range(0, self.max_retries):
|
||||
try:
|
||||
if self.action == 'CREATE':
|
||||
self.target.backend.create_zone(self.context, self.zone)
|
||||
SendNotify(self.executor, self.zone, self.target)()
|
||||
elif self.action == 'DELETE':
|
||||
self.target.backend.delete_zone(self.context, self.zone,
|
||||
self.zone_params)
|
||||
if catalog_zone is None:
|
||||
if self.action == 'CREATE':
|
||||
self.target.backend.create_zone(
|
||||
self.context, self.zone)
|
||||
SendNotify(self.executor, self.zone, self.target)()
|
||||
elif self.action == 'DELETE' and catalog_zone is None:
|
||||
self.target.backend.delete_zone(
|
||||
self.context, self.zone, self.zone_params)
|
||||
else:
|
||||
self.target.backend.update_zone(
|
||||
self.context, self.zone)
|
||||
SendNotify(self.executor, self.zone, self.target)()
|
||||
else:
|
||||
self.target.backend.update_zone(self.context, self.zone)
|
||||
SendNotify(self.executor, self.zone, self.target)()
|
||||
if (
|
||||
self.action == 'CREATE' or self.action == 'DELETE' or
|
||||
self.zone.type == constants.ZONE_CATALOG
|
||||
):
|
||||
# Member zone created or deleted, or catalog zone
|
||||
# itself modified, NOTIFY via catalog
|
||||
SendNotify(self.executor, catalog_zone, self.target)()
|
||||
else:
|
||||
# Member zone updated
|
||||
SendNotify(self.executor, self.zone, self.target)()
|
||||
|
||||
LOG.debug(
|
||||
'Successfully performed %(action)s for '
|
||||
|
@ -84,6 +84,41 @@ this information into the database.
|
||||
.. literalinclude:: ../../../etc/designate/pools.yaml.sample
|
||||
:language: yaml
|
||||
|
||||
.. _catalog_zones:
|
||||
|
||||
Catalog zones
|
||||
-------------
|
||||
|
||||
Catalog zones provide easy provisioning capabilities of zones to secondary
|
||||
nameservers, transferred via AXFR from a special zone, the *catalog zone*.
|
||||
|
||||
In Designate, catalog zones are configured per pool. A catalog zone will
|
||||
include all zones from the pool (except the catalog zone itself), called
|
||||
*member zones*. That means all zones from that pool are automatically
|
||||
synced to secondary name servers upon zone creation, update or deletion.
|
||||
For more details about catalog zones, see
|
||||
`RFC 9432 <https://datatracker.ietf.org/doc/rfc9432/>`_.
|
||||
|
||||
Catalog zones can be configured in ``pools.yaml`` via the *catalog_zone* key
|
||||
(see the sample above). This example instructs a PowerDNS server listening at
|
||||
``192.0.2.2:53`` to pull zones via AXFR from Designate's ``mini-DNS`` at
|
||||
``192.0.2.1:5354``. Note that the secondary nameserver also needs to be
|
||||
properly configured to consume the catalog zone. Please refer to the secondary
|
||||
nameserver's documentation for details. Once this is set up and applied using
|
||||
``designate-manage pool update``, Designate will handle the catalog zone
|
||||
creation as well as synchronization of member zones.
|
||||
|
||||
As secondary nameservers configure their zones based on zone transfers (AXFR)
|
||||
from the catalog zone, it is highly recommended to use transaction signatures
|
||||
(TSIG) for secure and authenticated zone transfers. See the above sample for
|
||||
details on how to use catalog zones with TSIG.
|
||||
|
||||
.. warning::
|
||||
|
||||
| Even though not mandatory, it is highly recommended to secure transfers of
|
||||
| catalog zones with TSIG.
|
||||
|
||||
|
||||
Designate Manage Pools Command Reference
|
||||
----------------------------------------
|
||||
|
||||
|
@ -50,3 +50,12 @@
|
||||
- host: 192.0.2.4
|
||||
port: 53
|
||||
|
||||
# Optional configuration to provide a catalog zone for the pool's zones.
|
||||
# If configured, catalog_zone_fqdn is required and all other keys are
|
||||
# optional.
|
||||
catalog_zone:
|
||||
catalog_zone_fqdn: cat.example.org.
|
||||
catalog_zone_refresh: 60
|
||||
# TSIG secret and algorithm to use for securing AXFRs for catalog zones.
|
||||
catalog_zone_tsig_key: SomeSecretKey
|
||||
catalog_zone_tsig_algorithm: hmac-sha512
|
||||
|
@ -0,0 +1,18 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Add support for providing catalog zones.
|
||||
|
||||
Designate can now optionally provide catalog zones for pools. Defined in
|
||||
`RFC 9432 <https://datatracker.ietf.org/doc/rfc9432/>`_, catalog zones
|
||||
allow for provisioning of DNS zones on secondary nameservers through the
|
||||
sole means of DNS. A special zone, the catalog zone, serves as a list of
|
||||
all zones the secondary nameservers should serve and for which they will
|
||||
set themselves up automatically. The synchronization of zones via a catalog
|
||||
zone happens via known and proven AXFR, just like for a zone's DNS records.
|
||||
|
||||
Provisioning via catalog zones is already supported by popular DNS
|
||||
implementations, such as BIND9, Knot and PowerDNS (and likely others).
|
||||
Apart from being a standardized provisioning model, the main advantage of
|
||||
catalog zones is the scalability and robustness of keeping secondary
|
||||
nameservers in sync.
|
Loading…
Reference in New Issue
Block a user