Multi-pools implementation

This sets up 2 pools with an instance of named running in both. The
second is denoted by /etc/bind-2 and var/cache/named-2 as its
locations. The build up of the /etc/designate/pools.yaml reflects
the changes.

Co-Authored-By: Don Kehn <dekehn@gmail.com>
Co-Authored-By: Omer Schwartz <oschwart@redhat.com>
Change-Id: Icf73e730b31ab26b8be65347239636c0137ab4bd
This commit is contained in:
dekehn 2022-09-15 20:28:18 +00:00 committed by Omer
parent 706db9be72
commit 0e9294aca1
8 changed files with 568 additions and 14 deletions

View File

@ -67,22 +67,29 @@ class PoolCommands(base.Commands):
@base.args('--pool_id', help='ID of the pool to be examined',
default=CONF['service:central'].default_pool_id)
def show_config(self, pool_id):
@base.args('--all_pools', help='show the config of all the pools',
default=False, required=False, action='store_true')
def show_config(self, pool_id, all_pools):
self._setup()
self.output_message.append('Pool Configuration:')
self.output_message.append('-------------------')
try:
if not uuidutils.is_uuid_like(pool_id):
self.output_message.append('Not a valid uuid: %s' % pool_id)
raise SystemExit(1)
pool = self.central_api.find_pool(self.context, {'id': pool_id})
pools = objects.PoolList()
if all_pools:
pools.extend(self.central_api.find_pools(self.context))
else:
if not uuidutils.is_uuid_like(pool_id):
self.output_message.append(
'Not a valid uuid: %s' % pool_id)
raise SystemExit(1)
pools.append(
self.central_api.find_pool(self.context, {'id': pool_id}))
self.output_message.append(
yaml.dump(
DesignateAdapter.render('YAML', pool),
DesignateAdapter.render('YAML', pools),
default_flow_style=False
)
)
@ -131,7 +138,13 @@ class PoolCommands(base.Commands):
self.output_message.append('*********************************')
for pool_data in pools_data:
self._create_or_update_pool(pool_data)
try:
self._create_or_update_pool(pool_data)
except exceptions.DuplicatePool:
raise exceptions.DuplicatePool(
f'Pool {pool_data["name"]} already exist with id '
f'{pool_data["id"]}. You cannot change id to an '
'existing pool.')
if delete:
pools = self.central_api.find_pools(self.context)

View File

@ -420,6 +420,23 @@ class RequestHandler:
# Make the space we reserved for TSIG available for use
renderer.max_size += TSIG_RRSIZE
# The following are a series of check for the DNS server
# requests oddities.
# If the message fudge value is not present set it to default,
# see RFC2845: Record Format Section rrdata.Fudge & Section 6.4
# defines the recommended value of 300 seconds (5 mins).
if not hasattr(request, 'fudge'):
request.fudge = int(300)
# If the original_id is not preset use the request.id, see
# https://github.com/rthalley/dnspython/blob/2.2/dns/message.py#L125
if not hasattr(request, 'original_id'):
request.original_id = request.id
# If the other_data is not preset then set to nothing.
if not hasattr(request, 'other_data'):
request.other_data = b""
if multi_messages:
# The first message context will be None then the
# context for the prev message is used for the next

View File

@ -19,6 +19,7 @@ from oslo_log import log as logging
import oslo_messaging
from designate.central import service
from designate import exceptions
from designate.manage import base
from designate.manage import pool
from designate.tests import base_fixtures
@ -60,7 +61,7 @@ class ManagePoolTestCase(designate.tests.functional.TestCase):
pool_id = self.central_service.find_pool(
self.admin_context, {'name': 'default'}).id
self.command.show_config(pool_id)
self.command.show_config(pool_id, all_pools=False)
self.print_result.assert_called_once()
self.assertIn('Pool Configuration', self.command.output_message[1])
@ -75,7 +76,7 @@ class ManagePoolTestCase(designate.tests.functional.TestCase):
pool_id = self.central_service.find_pool(
self.admin_context, {'name': 'default'}).id
self.command.show_config(pool_id)
self.command.show_config(pool_id, all_pools=False)
self.print_result.assert_called_once()
self.assertIn('Pool Configuration', self.command.output_message[1])
@ -88,7 +89,8 @@ class ManagePoolTestCase(designate.tests.functional.TestCase):
def test_show_config_rpc_timeout(self, mock_find_pool):
self.assertRaises(
SystemExit,
self.command.show_config, '5421ca70-f1b7-4edc-9e01-b604011a262a'
self.command.show_config, '5421ca70-f1b7-4edc-9e01-b604011a262a',
all_pools=False
)
mock_find_pool.assert_called_once()
@ -96,7 +98,8 @@ class ManagePoolTestCase(designate.tests.functional.TestCase):
def test_show_config_pool_not_found(self):
self.assertRaises(
SystemExit,
self.command.show_config, '5421ca70-f1b7-4edc-9e01-b604011a262a'
self.command.show_config, '5421ca70-f1b7-4edc-9e01-b604011a262a',
all_pools=False
)
self.assertIn(
'Pool not found', ''.join(self.command.output_message)
@ -105,7 +108,7 @@ class ManagePoolTestCase(designate.tests.functional.TestCase):
def test_show_config_invalid_uuid(self):
self.assertRaises(
SystemExit,
self.command.show_config, 'None'
self.command.show_config, 'None', all_pools=False
)
self.print_result.assert_called_once()
self.assertIn(
@ -115,11 +118,39 @@ class ManagePoolTestCase(designate.tests.functional.TestCase):
def test_show_config_empty(self):
self.assertRaises(
SystemExit,
self.command.show_config, 'a36bb018-9584-420c-acc6-2b5cf89714ad'
self.command.show_config, 'a36bb018-9584-420c-acc6-2b5cf89714ad',
all_pools=False
)
self.print_result.assert_called_once()
self.assertIn('Pool not found', ''.join(self.command.output_message))
def test_show_config_multiple_pools(self):
self.command._setup()
self.command._create_pool(get_pools(name='multiple-pools.yaml')[0])
self.command._create_pool(get_pools(name='multiple-pools.yaml')[1])
# Calling show_config --all_pools without specifying pool_id
self.command.show_config(None, all_pools=True)
self.print_result.assert_called_once()
pools = self.central_service.find_pools(self.admin_context, {})
self.assertIn('Pool Configuration', self.command.output_message[1])
for p in pools:
self.assertIn(p.id, ''.join(self.command.output_message))
self.assertIn(p.description,
''.join(self.command.output_message))
# Calling show_config --all_pools with pool_id
# (should ignore the pool_id)
self.command.show_config('a36bb018-9584-420c-acc6-2b5cf89714ad',
all_pools=True)
for p in pools:
self.assertEqual(2, sum(
p.id in s for s in self.command.output_message))
self.assertEqual(2, sum(
p.description in s for s in self.command.output_message))
def test_update(self):
self.command.update(
get_pools_path('pools.yaml'), delete=False, dry_run=False
@ -170,6 +201,32 @@ class ManagePoolTestCase(designate.tests.functional.TestCase):
pools = self.central_service.find_pools(self.admin_context, {})
self.assertEqual(2, len(pools))
def test_update_multiple_pools_name(self):
self.command.update(
get_pools_path('pools.yaml'), delete=False, dry_run=False
)
pools = self.central_service.find_pools(self.admin_context, {})
self.assertEqual(1, len(pools))
# Updating an existing pool (same name) to a different id should fail
self.assertRaises(
exceptions.DuplicatePool,
self.command.update,
get_pools_path('sample_output.yaml'), delete=False, dry_run=False
)
pools = self.central_service.find_pools(self.admin_context, {})
self.assertEqual(1, len(pools))
# Updating Pools with different name will only add pools
self.command.update(
get_pools_path('multiple-pools.yaml'), delete=False, dry_run=False
)
pools = self.central_service.find_pools(self.admin_context, {})
self.assertEqual(3, len(pools))
@mock.patch.object(service.Service, 'find_pool',
side_effect=oslo_messaging.MessagingTimeout())
def test_update_rpc_timeout(self, mock_find_pool):

View File

@ -17,4 +17,11 @@ repository. See contrib/vagrant to create a vagrant VM.
[[local|localrc]]
enable_plugin designate https://opendev.org/openstack/designate
**Note:** Running with a multipool option:
Perform the above step, and in addition set the backend driver and
scheduler filters::
SCHEDULER_FILTERS=attribute,pool_id_attributes,in_doubt_default_pool
DESIGNATE_BACKEND_DRIVER=multipool-bind9
3. run ``stack.sh``

View File

@ -0,0 +1,387 @@
#!/usr/bin/env bash
# Configure the bind9 pool backend for a multi-pool implementation
# Enable with:
# DESIGNATE_BACKEND_DRIVER=multipool-bind9
# Dependencies:
# ``functions`` file
# ``designate`` configuration
# install_designate_backend - install any external requirements
# configure_designate_backend - make configuration changes, including those to other services
# init_designate_backend - initialize databases, etc.
# start_designate_backend - start any external services
# stop_designate_backend - stop any external services
# cleanup_designate_backend - remove transient data and cache
# Save trace setting
DP_BIND9_XTRACE=$(set +o | grep xtrace)
set +o xtrace
# Defaults
# --------
BIND2_DNS_PORT=${DESIGNATE_SERVICE_PORT2_DNS:-1053}
BIND_SERVICE_NAME=bind9
BIND2_SERVICE_NAME=bind9-2
BIND2_DEFAULT_FILE=/etc/default/named-2
BIND2_SERVICE_FILE=/etc/systemd/system/$BIND2_SERVICE_NAME.service
BIND_CFG_DIR=/etc/bind
BIND2_CFG_DIR=/etc/bind-2
BIND2_TSIGKEY_FILE=$BIND2_CFG_DIR/named.conf.tsigkeys
BIND_VAR_DIR=/var/cache/bind
BIND2_VAR_DIR=/var/cache/bind-2
BIND_RUN_DIR=/run/named
BIND2_RUN_DIR=/run/named-2
BIND_CFG_FILE=$BIND_CFG_DIR/named.conf.options
BIND2_CFG_FILE=$BIND2_CFG_DIR/named.conf.options
BIND_USER=bind
BIND_GROUP=bind
DESIGNATE_SERVICE_PORT_RNDC=${DESIGNATE_SERVICE_PORT_RNDC:-953}
DESIGNATE_SERVICE_PORT2_RNDC=${DESIGNATE_SERVICE_PORT2_RNDC:-1953}
if is_fedora; then
BIND_SERVICE_NAME=named
BIND2_SERVICE_NAME=named-2
BIND2_SERVICE_FILE=/etc/systemd/system/$BIND2_SERVICE_NAME.service
BIND_CFG_DIR=/etc/$BIND_SERVICE_NAME
BIND2_CFG_DIR=/etc/$BIND2_SERVICE_NAME
BIND_CFG_FILE=/etc/$BIND_SERVICE_NAME.conf
BIND2_CFG_FILE=/etc/$BIND2_SERVICE_NAME.conf
BIND_VAR_DIR=/var/$BIND_SERVICE_NAME
BIND2_VAR_DIR=/var/$BIND2_SERVICE_NAME
BIND_USER=named
BIND_GROUP=named
BIND2_UNIT_CFG_FILE=/etc/sysconfig/$BIND2_SERVICE_NAME
BIND2_TSIGKEY_FILE=$BIND2_CFG_DIR/named.conf.tsigkeys
fi
# Entry Points
# ------------
# install_designate_backend - install any external requirements
function install_designate_backend {
# The user that designate runs as needs to be member of **$BIND_GROUP** group.
# The designate bind9 backend needs read/write access to $BIND_VAR_DIR
sudo groupadd -f $BIND_GROUP
add_user_to_group $STACK_USER $BIND_GROUP
sudo mkdir -p $BIND2_CFG_DIR
sudo chown -R $STACK_USER:$BIND_GROUP $BIND2_CFG_DIR
sudo mkdir -p $BIND2_RUN_DIR
sudo chgrp $BIND_GROUP $BIND2_RUN_DIR
if is_ubuntu; then
install_package bind9
# generate a defaults/named2 file
sudo tee $BIND2_DEFAULT_FILE >/dev/null <<EOF
# startup options for the server
OPTIONS="-u bind -c $BIND2_CFG_DIR/named.conf -p $BIND2_DNS_PORT -D named-2"
EOF
# copy the default bind to the bind-2 & make sure the dirs are pointint to bind-2
sudo cp -a $BIND_CFG_DIR/named.conf $BIND2_CFG_DIR/named.conf
sudo sed -i 's/bind/bind-2/g' $BIND2_CFG_DIR/named.conf
# Copy the necessary configuration file and set the bind-2 directories
sudo cp -a $BIND_CFG_DIR/zones.rfc1918 $BIND2_CFG_DIR
sudo cp -a $BIND_CFG_DIR/named.conf.local $BIND2_CFG_DIR
sudo cp -a $BIND_CFG_DIR/named.conf.default-zones $BIND2_CFG_DIR
sudo sed -i 's/bind/bind-2/g' $BIND2_CFG_DIR/named.conf.local
sudo sed -i 's/bind/bind-2/g' $BIND2_CFG_DIR/named.conf.default-zones
# copy db files
for db_file in db.local db.127 db.0 db.255; do
cp -a $BIND_CFG_DIR/${db_file} $BIND2_CFG_DIR
done
# create a second service file
sudo cp -a /lib/systemd/system/named.service $BIND2_SERVICE_FILE
iniset -sudo $BIND2_SERVICE_FILE "Service" "EnvironmentFile" "$BIND2_DEFAULT_FILE"
iniset -sudo $BIND2_SERVICE_FILE "Service" "PIDFile" "$BIND2_RUN_DIR/named.pid"
iniset -sudo $BIND2_SERVICE_FILE "Install" "Alias" "$BIND2_SERVICE_NAME.service"
sudo chmod g+s $BIND2_CFG_DIR
elif is_fedora; then
install_package bind
set -o xtrace
# Copy the necessary configuration files and set the named-2 directories
for d in $(ls /etc/named.*); do
cpfile=$(echo $d | sed 's/named/named-2/')
sudo cp $d $cpfile
sudo chown -R $BIND_USER:$BIND_GROUP $cpfile
if [[ "$d" == *"named.conf"* ]]; then
sudo sed -i 's/port 53/port '$BIND2_DNS_PORT'/g' $cpfile
sudo sed -i 's/named/named-2/g' $cpfile
fi
sudo sed -i 's/bind/bind-2/g' $cpfile
done
# create a second service file & and set options into the units params file.
sudo cp /etc/sysconfig/named $BIND2_UNIT_CFG_FILE
sudo chown $STACK_USER:$BIND_GROUP $BIND2_UNIT_CFG_FILE
sudo chmod 644 $BIND2_UNIT_CFG_FILE
OPTIONS='OPTIONS="-p '$BIND2_DNS_PORT' -D '$BIND2_SERVICE_NAME'"'
NAMEDCONF="NAMEDCONF='$BIND2_CFG_FILE'"
sudo echo "$OPTIONS" >>$BIND2_UNIT_CFG_FILE
sudo echo "$NAMEDCONF" >>$BIND2_UNIT_CFG_FILE
sudo cp -a /lib/systemd/system/named.service $BIND2_SERVICE_FILE
# set the various declarations
iniset -sudo $BIND2_SERVICE_FILE "Service" "Environment=NAMEDCONF" "$BIND2_CFG_FILE"
iniset -sudo $BIND2_SERVICE_FILE "Service" "EnvironmentFile" "$BIND2_UNIT_CFG_FILE"
iniset -sudo $BIND2_SERVICE_FILE "Service" "Environment=KRB5_KTNAME" "$BIND2_CFG_DIR.keytab"
iniset -sudo $BIND2_SERVICE_FILE "Service" "PIDFile" "$BIND2_RUN_DIR/named.pid"
sudo chmod 750 $BIND2_CFG_DIR
fi
sudo chown -R $BIND_USER:$BIND_GROUP $BIND2_RUN_DIR
sudo chown -R $BIND_USER:$BIND_GROUP $BIND_RUN_DIR
# copy the /var/named default data
sudo cp -arf $BIND_VAR_DIR $BIND2_VAR_DIR
for cfg_dir in "$BIND_CFG_DIR" "$BIND2_CFG_DIR"; do
sudo chmod -R g+r $cfg_dir
done
for var_dir in "$BIND_VAR_DIR" "$BIND2_VAR_DIR"; do
sudo chmod -R g+rw $var_dir
done
# Customize Bind9 apparmor profile if installed, include the necessary bits
# for the second named instance, bind-2 and named-2
if [[ -d /etc/apparmor.d ]]; then
sudo tee /etc/apparmor.d/local/usr.sbin.named >/dev/null <<EOF
$DESIGNATE_STATE_PATH/bind9/** rw,
/etc/bind-2/** r,
/var/cache/bind-2/** lrw,
/var/cache/bind-2/_default.nzd-lock rwk,
/{,var/}run/named-2/named.pid w,
/{,var/}run/named-2/session.key w,
/var/log/named-2/** rw,
/var/log/named-2/ rw,
EOF
restart_service apparmor || :
fi
}
# configure_designate_backend - make configuration changes, including those to other services
function configure_designate_backend {
# Generate Designate pool.yaml file
sudo tee $DESIGNATE_CONF_DIR/pools.yaml >/dev/null <<EOF
---
- name: default
description: DevStack BIND Pool
attributes: {
"pool_level": "default"
}
ns_records:
- hostname: $DESIGNATE_DEFAULT_NS_RECORD
priority: 1
nameservers:
- host: $(ipv6_unquote $DESIGNATE_SERVICE_HOST)
port: $DESIGNATE_SERVICE_PORT_DNS
targets:
- type: bind9
description: BIND Instance
masters:
- host: $(ipv6_unquote $DESIGNATE_SERVICE_HOST)
port: $DESIGNATE_SERVICE_PORT_MDNS
options:
host: $(ipv6_unquote $DESIGNATE_SERVICE_HOST)
port: $DESIGNATE_SERVICE_PORT_DNS
rndc_host: $(ipv6_unquote $DESIGNATE_SERVICE_HOST)
rndc_port: $DESIGNATE_SERVICE_PORT_RNDC
rndc_config_file: $BIND_CFG_DIR/rndc.conf
- name: secondary_pool
description: DevStack BIND Pool 2
attributes: {
"pool_level": "secondary"
}
ns_records:
- hostname: $DESIGNATE_DEFAULT_NS2_RECORD
priority: 1
nameservers:
- host: $(ipv6_unquote $DESIGNATE_SERVICE_HOST2)
port: $DESIGNATE_SERVICE_PORT2_DNS
targets:
- type: bind9
description: BIND Instance 2nd pool
masters:
- host: $(ipv6_unquote $DESIGNATE_SERVICE_HOST2)
port: $DESIGNATE_SERVICE_PORT2_MDNS
options:
host: $(ipv6_unquote $DESIGNATE_SERVICE_HOST2)
port: $DESIGNATE_SERVICE_PORT2_DNS
rndc_host: $(ipv6_unquote $DESIGNATE_SERVICE_HOST2)
rndc_port: $DESIGNATE_SERVICE_PORT2_RNDC
rndc_config_file: $BIND2_CFG_DIR/rndc.conf
rndc_key_file: $BIND2_CFG_DIR/rndc.key
clean_zonefile: true
EOF
# Configure Bind #1 instance
sudo chown $STACK_USER $BIND_CFG_DIR
# create rndc key and config
sudo rndc-confgen -a -c $BIND_CFG_DIR/rndc.key
sudo chown $BIND_USER:$BIND_GROUP $BIND_CFG_DIR/rndc.key
sudo chmod g+r $BIND_CFG_DIR/rndc.key
sudo tee $BIND_CFG_FILE >/dev/null <<EOF
include "$BIND_CFG_DIR/rndc.key";
options {
directory "$BIND_VAR_DIR";
allow-new-zones yes;
dnssec-validation auto;
auth-nxdomain no; # conform to RFC1035
listen-on port $DESIGNATE_SERVICE_PORT_DNS { $HOST_IP; };
listen-on-v6 port $DESIGNATE_SERVICE_PORT_DNS { $HOST_IPV6; };
recursion no;
pid-file "$BIND_RUN_DIR/named.pid";
session-keyfile "$BIND_RUN_DIR/session.key";
minimal-responses yes;
};
controls {
inet $(ipv6_unquote $DESIGNATE_SERVICE_HOST) port $DESIGNATE_SERVICE_PORT_RNDC allow { $(ipv6_unquote $DESIGNATE_SERVICE_HOST); } keys { "rndc-key"; };
};
EOF
# Configure Bind #2 instance
sudo chown $STACK_USER $BIND2_CFG_DIR
# Create the tsigkeys for the secondary pool & add it to the bind-2
# named.conf file.
sudo rm -rf $BIND2_TSIGKEY_FILE
sudo tsig-keygen -a hmac-sha256 poolsecondarykey >$BIND2_TSIGKEY_FILE
NAME=$(cat $BIND2_TSIGKEY_FILE | grep 'key' |
awk '{split($0, a, " "); print a[2];}' |
sed -e 's/^"//' -e 's/"$//' |
awk '{split($0, a, "{"); print a[1];}')
sudo echo -e "server $HOST_IP {\n keys { $NAME };\n};" >>$BIND2_TSIGKEY_FILE
# create rndc key and config
sudo rndc-confgen -a -p $DESIGNATE_SERVICE_PORT2_RNDC -c $BIND2_CFG_DIR/rndc.key
sudo chown $BIND_USER:$BIND_GROUP $BIND2_CFG_DIR/rndc.key
sudo chmod g+r $BIND2_CFG_DIR/rndc.key
sudo tee $BIND2_CFG_FILE >/dev/null <<EOF
include "$BIND2_CFG_DIR/rndc.key";
options {
directory "$BIND2_VAR_DIR";
allow-new-zones yes;
dnssec-validation auto;
auth-nxdomain no; # conform to RFC1035
listen-on port $DESIGNATE_SERVICE_PORT2_DNS { $HOST_IP; };
listen-on-v6 port $DESIGNATE_SERVICE_PORT2_DNS { $HOST_IPV6; };
recursion no;
pid-file "$BIND2_RUN_DIR/named.pid";
session-keyfile "$BIND2_RUN_DIR/session.key";
minimal-responses yes;
};
controls {
inet $(ipv6_unquote $DESIGNATE_SERVICE_HOST2) port $DESIGNATE_SERVICE_PORT2_RNDC allow { $(ipv6_unquote $DESIGNATE_SERVICE_HOST2); } keys { "rndc-key"; };
};
include "$BIND2_TSIGKEY_FILE";
EOF
# Configure RNDC #1
sudo tee $BIND_CFG_DIR/rndc.conf >/dev/null <<EOF
include "$BIND_CFG_DIR/rndc.key";
options {
default-key "rndc-key";
default-server $(ipv6_unquote $DESIGNATE_SERVICE_HOST);
default-port $DESIGNATE_SERVICE_PORT_RNDC;
};
EOF
# Configure RNDC #r2
sudo tee $BIND2_CFG_DIR/rndc.conf >/dev/null <<EOF
include "$BIND2_CFG_DIR/rndc.key";
options {
default-key "rndc-key";
default-server $(ipv6_unquote $DESIGNATE_SERVICE_HOST);
default-port $DESIGNATE_SERVICE_PORT2_RNDC;
};
EOF
sudo chown $BIND_USER:$BIND_GROUP $BIND_CFG_FILE $BIND_CFG_DIR/rndc.conf
sudo chmod g+r $BIND_CFG_FILE $BIND_CFG_DIR/rndc.conf
sudo chown $BIND_USER:$BIND_GROUP $BIND2_CFG_FILE $BIND_CFG_DIR/rndc.conf
sudo chmod g+r $BIND_CFG_FILE $BIND2_CFG_DIR/rndc.conf
start_service $BIND2_SERVICE_NAME
restart_service $BIND_SERVICE_NAME
restart_service $BIND2_SERVICE_NAME
}
# init_designate_backend - initialize databases, etc.
function init_designate_backend {
:
}
# start_designate_backend - start any external services
function start_designate_backend {
start_service $BIND_SERVICE_NAME
start_service $BIND2_SERVICE_NAME
}
# stop_designate_backend - stop any external services
function stop_designate_backend {
stop_service $BIND_SERVICE_NAME
stop_service $BIND2_SERVICE_NAME
}
# cleanup_designate_backend - remove transient data and cache
function cleanup_designate_backend {
sudo sh -c "rm -rf $BIND_VAR_DIR/*.nzf"
sudo sh -c "rm -rf $BIND_VAR_DIR/slave.*"
sudo rm -f $BIND_CFG_DIR/rndc.key
sudo rm -f $BIND2_TSIGKEY_FILE
if [ -d $BIND2_CFG_DIR ]; then
sudo sh -c "rm -rf $BIND2_VAR_DIR/*.nzf"
sudo sh -c "rm -rf $BIND2_VAR_DIR/slave.*"
sudo rm -f $BIND2_CFG_DIR/rndc.key
# remove the tsigkey from named conf file.
sudo sed -i 'tsigkeys/d' $BIND2_CFG_DIR/named.conf
RM_CMD="sudo rm -rf"
RM_LIST="$BIND2_CFG_DIR* $BIND2_VAR_DIR $BIND2_RUN_DIR $BIND2_SERVICE_FILE"
for rc in $RM_LIST; do
echo "$RM_CMD $rc"
$RM_CMD $rc
done
if is_fedora; then
sudo rm -f $BIND2_UNIT_CFG_FILE
fi
fi
sudo systemctl reset-failed
}
# Restore xtrace
$DP_BIND9_XTRACE

View File

@ -71,6 +71,9 @@ function configure_designate {
# Central Configuration
iniset $DESIGNATE_CONF service:central workers $API_WORKERS
if [[ -n "$SCHEDULER_FILTERS" ]]; then
iniset $DESIGNATE_CONF service:central scheduler_filters $SCHEDULER_FILTERS
fi
# mDNS Configuration
iniset $DESIGNATE_CONF service:mdns listen ${DESIGNATE_SERVICE_HOST}:${DESIGNATE_SERVICE_PORT_MDNS}
@ -206,6 +209,27 @@ function create_designate_pool_configuration {
if function_exists create_designate_pool_configuration_backend; then
create_designate_pool_configuration_backend
fi
# create the tsigkey for the secondary pool acct., if necessary.
if [ "$DESIGNATE_BACKEND_DRIVER" == "multipool-bind9" ] && \
[ -d $BIND2_CFG_DIR ] && [ -f $BIND2_TSIGKEY_FILE ]; then
# parse the data from the bind-2/named.conf.tsigkeys file,
# which was created during the init_designate_backend section.
NAME=`cat $BIND2_TSIGKEY_FILE | grep 'key' | \
awk '{split($0, a, " "); print a[2];}' | \
sed -e 's/^"//' -e 's/"$//'| \
awk '{split($0, a, "{"); print a[1];}'`
ALGORITHM=`cat $BIND2_TSIGKEY_FILE | grep 'algorithm' | \
awk '{split($0, a, " "); print a[2];}' | \
sed -r 's/(.*);/\1/'`
SECRET=`cat $BIND2_TSIGKEY_FILE | grep 'secret' | \
awk '{split($0, a, " "); print a[2];}' | \
sed -r 's/(.*);/\1/' | sed -e 's/^"//' -e 's/"$//'`
RESOURCE_ID=$(sudo mysql -u root -p$DATABASE_PASSWORD designate -N -e "select id from pools where name = 'secondary_pool';")
# create the openstack
openstack tsigkey create --name $NAME --algorithm $ALGORITHM --secret $SECRET --scope POOL --resource-id $RESOURCE_ID
fi
}
# init_designate - Initialize etc.

View File

@ -2,6 +2,7 @@
DESIGNATE_BACKEND_DRIVER=${DESIGNATE_BACKEND_DRIVER:=bind9}
DESIGNATE_POOL_ID=${DESIGNATE_POOL_ID:-794ccc2c-d751-44fe-b57f-8894c9f5c842}
DESIGNATE_DEFAULT_NS_RECORD=${DESIGNATE_DEFAULT_NS_RECORD:-ns1.devstack.org.}
DESIGNATE_DEFAULT_NS2_RECORD=${DESIGNATE_DEFAULT_NS_RECORD:-ns2.devstack.org.}
DESIGNATE_NOTIFICATION_DRIVER=${DESIGNATE_NOTIFICATION_DRIVER:-messagingv2}
DESIGNATE_NOTIFICATION_TOPICS=${DESIGNATE_NOTIFICATION_TOPICS:-notifications}
DESIGNATE_PERIODIC_RECOVERY_INTERVAL=${DESIGNATE_PERIODIC_RECOVERY_INTERVAL:-120}
@ -33,8 +34,12 @@ fi
# Default IP/port settings
DESIGNATE_SERVICE_PROTOCOL=${DESIGNATE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
DESIGNATE_SERVICE_HOST=${DESIGNATE_SERVICE_HOST:-$SERVICE_HOST}
DESIGNATE_SERVICE_HOST2=${DESIGNATE_SERVICE_HOST2:-$SERVICE_HOST}
DESIGNATE_SERVICE_PORT_DNS=${DESIGNATE_SERVICE_PORT_DNS:-53}
DESIGNATE_SERVICE_PORT2_DNS=${DESIGNATE_SERVICE_PORT2_DNS:-1053}
DESIGNATE_SERVICE_PORT_MDNS=${DESIGNATE_SERVICE_PORT_MDNS:-5354}
DESIGNATE_SERVICE_PORT2_MDNS=${DESIGNATE_SERVICE_PORT2_MDNS:-5354}
DESIGNATE_SERVICE_PORT_AGENT=${DESIGNATE_SERVICE_PORT_AGENT:-5358}
DESIGNATE_DIR=$DEST/designate
# Default directories

View File

@ -169,6 +169,50 @@ managed by designate as part of the default pool.
In the ``AUTHORITY`` section, the numeric value between the name and `IN` is
the TTL, which has updated to the new value of 3000.
Multiple Pools Zone Creation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When Multipools is configured, if you want to create a zone and attach it to a
different pool than the default one, you must indicate to which pool your zone
will be attached to. This is done via the attributes options during the zone
creation.
See the following example:
.. code-block:: console
$ openstack zone create --email dnsmaster@example.com example.com. --attributes pool_level:secondary
+----------------+--------------------------------------+
| Field | Value |
+----------------+--------------------------------------+
| action | CREATE |
| attributes | pool_level:secondary |
| | |
| created_at | 2023-01-24T18:30:45.000000 |
| description | None |
| email | dnsmaster@example.com |
| id | d106e7b0-9973-41a1-b3db-0fb34b6d952c |
| masters | |
| name | example.com. |
| pool_id | 10cec123-43f0-4b60-98a8-1204dd826c67 |
| project_id | 5160768b59524fd283a4fa82d7327644 |
| serial | 1674585045 |
| status | PENDING |
| transferred_at | None |
| ttl | 3600 |
| type | PRIMARY |
| updated_at | None |
| version | 1 |
+----------------+--------------------------------------+
$ openstack zone list
+--------------------------------------+---------------+---------+------------+--------+--------+
| id | name | type | serial | status | action |
+--------------------------------------+---------------+---------+------------+--------+--------+
| d106e7b0-9973-41a1-b3db-0fb34b6d952c | example.com. | PRIMARY | 1674585045 | ACTIVE | NONE |
+--------------------------------------+---------------+---------+------------+--------+--------+
Deleting a zone
---------------