Merge "DNS HA"

This commit is contained in:
Jenkins 2016-06-23 08:38:11 +00:00 committed by Gerrit Code Review
commit aa149bdc71
4 changed files with 141 additions and 51 deletions

View File

@ -52,19 +52,39 @@ This is enabled by relating the ceph-radosgw service with keystone::
If you try to relate the radosgw to keystone with an earlier version of ceph the hook
will error out to let you know.
Scale-out
=========
HA/Clustering
=============
Its possible to scale-out the RADOS Gateway itself::
There are two mutually exclusive high availability options: using virtual
IP(s) or DNS. In both cases, a relationship to hacluster is required which
provides the corosync back end HA functionality.
juju add-unit -n 2 ceph-radosgw
To use virtual IP(s) the clustered nodes must be on the same subnet such that
the VIP is a valid IP on the subnet for one of the node's interfaces and each
node has an interface in said subnet. The VIP becomes a highly-available API
endpoint.
and then stick a HA loadbalancer on the front::
At a minimum, the config option 'vip' must be set in order to use virtual IP
HA. If multiple networks are being used, a VIP should be provided for each
network, separated by spaces. Optionally, vip_iface or vip_cidr may be
specified.
juju deploy haproxy
juju add-relation haproxy ceph-radosgw
To use DNS high availability there are several prerequisites. However, DNS HA
does not require the clustered nodes to be on the same subnet.
Currently the DNS HA feature is only available for MAAS 2.0 or greater
environments. MAAS 2.0 requires Juju 2.0 or greater. The clustered nodes must
have static or "reserved" IP addresses registered in MAAS. The DNS hostname(s)
must be pre-registered in MAAS before use with DNS HA.
Should give you a bit more bang on the front end if you really need it.
At a minimum, the config option 'dns-ha' must be set to true and at least one
of 'os-public-hostname', 'os-internal-hostname' or 'os-internal-hostname' must
be set in order to use DNS HA. One or more of the above hostnames may be set.
The charm will throw an exception in the following circumstances:
If neither 'vip' nor 'dns-ha' is set and the charm is related to hacluster
If both 'vip' and 'dns-ha' are set as they are mutually exclusive
If 'dns-ha' is set and none of the os-{admin,internal,public}-hostname(s) are
set
Network Space support
=====================

View File

@ -95,6 +95,12 @@ options:
Enable this option to disable use of Apache and enable the embedded
web container feature.
dns-ha:
type: boolean
default: False
description: |
Use DNS HA with MAAS 2.0. Note if this is set do not set vip
settings below.
vip:
type: string
default:
@ -153,6 +159,30 @@ options:
the following public endpoint for the ceph-radosgw:
https://files.example.com:80/swift/v1
os-internal-hostname:
type: string
default:
description: |
The hostname or address of the internal endpoints created for ceph-radosgw
in the keystone identity provider.
This value will be used for internal endpoints. For example, an
os-internal-hostname set to 'files.internal.example.com' with will create
the following internal endpoint for the ceph-radosgw:
https://files.internal.example.com:80/swift/v1
os-admin-hostname:
type: string
default:
description: |
The hostname or address of the admin endpoints created for ceph-radosgw
in the keystone identity provider.
This value will be used for admin endpoints. For example, an
os-admin-hostname set to 'files.admin.example.com' with will create
the following admin endpoint for the ceph-radosgw:
https://files.admin.example.com:80/swift/v1
ceph-osd-replication-count:
type: int
default: 3

View File

@ -24,7 +24,6 @@ from charmhelpers.core.hookenv import (
log,
DEBUG,
WARNING,
ERROR,
Hooks, UnregisteredHookError,
status_set,
)
@ -55,6 +54,12 @@ from charmhelpers.contrib.openstack.utils import (
is_unit_paused_set,
pausable_restart_on_change as restart_on_change,
)
from charmhelpers.contrib.hahelpers.cluster import (
get_hacluster_config,
)
from charmhelpers.contrib.openstack.ha.utils import (
update_dns_ha_resource_params,
)
from utils import (
enable_pocket,
CEPHRG_HA_RES,
@ -258,12 +263,8 @@ def cluster_changed():
@hooks.hook('ha-relation-joined')
def ha_relation_joined():
vip = config('vip')
if not vip:
log('Unable to configure hacluster as vip not provided', level=ERROR)
sys.exit(1)
def ha_relation_joined(relation_id=None):
cluster_config = get_hacluster_config()
# Obtain resources
resources = {
'res_cephrg_haproxy': 'lsb:haproxy'
@ -272,32 +273,37 @@ def ha_relation_joined():
'res_cephrg_haproxy': 'op monitor interval="5s"'
}
vip_group = []
for vip in vip.split():
if is_ipv6(vip):
res_rgw_vip = 'ocf:heartbeat:IPv6addr'
vip_params = 'ipv6addr'
else:
res_rgw_vip = 'ocf:heartbeat:IPaddr2'
vip_params = 'ip'
if config('dns-ha'):
update_dns_ha_resource_params(relation_id=relation_id,
resources=resources,
resource_params=resource_params)
else:
vip_group = []
for vip in cluster_config['vip'].split():
if is_ipv6(vip):
res_rgw_vip = 'ocf:heartbeat:IPv6addr'
vip_params = 'ipv6addr'
else:
res_rgw_vip = 'ocf:heartbeat:IPaddr2'
vip_params = 'ip'
iface = get_iface_for_address(vip)
netmask = get_netmask_for_address(vip)
iface = get_iface_for_address(vip)
netmask = get_netmask_for_address(vip)
if iface is not None:
vip_key = 'res_cephrg_{}_vip'.format(iface)
resources[vip_key] = res_rgw_vip
resource_params[vip_key] = (
'params {ip}="{vip}" cidr_netmask="{netmask}"'
' nic="{iface}"'.format(ip=vip_params,
vip=vip,
iface=iface,
netmask=netmask)
)
vip_group.append(vip_key)
if iface is not None:
vip_key = 'res_cephrg_{}_vip'.format(iface)
resources[vip_key] = res_rgw_vip
resource_params[vip_key] = (
'params {ip}="{vip}" cidr_netmask="{netmask}"'
' nic="{iface}"'.format(ip=vip_params,
vip=vip,
iface=iface,
netmask=netmask)
)
vip_group.append(vip_key)
if len(vip_group) >= 1:
relation_set(groups={CEPHRG_HA_RES: ' '.join(vip_group)})
if len(vip_group) >= 1:
relation_set(groups={CEPHRG_HA_RES: ' '.join(vip_group)})
init_services = {
'res_cephrg_haproxy': 'haproxy'
@ -306,14 +312,10 @@ def ha_relation_joined():
'cl_cephrg_haproxy': 'res_cephrg_haproxy'
}
# Obtain the config values necessary for the cluster config. These
# include multicast port and interface to bind to.
corosync_bindiface = config('ha-bindiface')
corosync_mcastport = config('ha-mcastport')
relation_set(init_services=init_services,
corosync_bindiface=corosync_bindiface,
corosync_mcastport=corosync_mcastport,
relation_set(relation_id=relation_id,
init_services=init_services,
corosync_bindiface=cluster_config['ha-bindiface'],
corosync_mcastport=cluster_config['ha-mcastport'],
resources=resources,
resource_params=resource_params,
clones=clones)

View File

@ -39,6 +39,8 @@ TO_PATCH = [
'subprocess',
'sys',
'unit_get',
'get_hacluster_config',
'update_dns_ha_resource_params',
]
@ -251,15 +253,15 @@ class CephRadosGWTests(CharmTestCase):
self.CONFIGS.write_all.assert_called_with()
_id_joined.assert_called_with(relid='rid')
def test_ha_relation_joined_no_vip(self):
self.test_config.set('vip', '')
ceph_hooks.ha_relation_joined()
self.sys.exit.assert_called_with(1)
def test_ha_relation_joined_vip(self):
self.test_config.set('ha-bindiface', 'eth8')
self.test_config.set('ha-mcastport', '5000')
self.test_config.set('vip', '10.0.0.10')
self.get_hacluster_config.return_value = {
'vip': '10.0.0.10',
'ha-bindiface': 'eth8',
'ha-mcastport': '5000',
}
self.get_iface_for_address.return_value = 'eth7'
self.get_netmask_for_address.return_value = '255.255.0.0'
ceph_hooks.ha_relation_joined()
@ -270,6 +272,7 @@ class CephRadosGWTests(CharmTestCase):
resource_params = {'res_cephrg_haproxy': 'op monitor interval="5s"',
'res_cephrg_eth7_vip': eth_params}
self.relation_set.assert_called_with(
relation_id=None,
init_services={'res_cephrg_haproxy': 'haproxy'},
corosync_bindiface='eth8',
corosync_mcastport='5000',
@ -277,6 +280,41 @@ class CephRadosGWTests(CharmTestCase):
resources=resources,
clones={'cl_cephrg_haproxy': 'res_cephrg_haproxy'})
def test_ha_joined_dns_ha(self):
def _fake_update(resources, resource_params, relation_id=None):
resources.update({'res_cephrg_public_hostname': 'ocf:maas:dns'})
resource_params.update({'res_cephrg_public_hostname':
'params fqdn="keystone.maas" '
'ip_address="10.0.0.1"'})
self.test_config.set('dns-ha', True)
self.get_hacluster_config.return_value = {
'vip': None,
'ha-bindiface': 'em0',
'ha-mcastport': '8080',
'os-admin-hostname': None,
'os-internal-hostname': None,
'os-public-hostname': 'keystone.maas',
}
args = {
'relation_id': None,
'corosync_bindiface': 'em0',
'corosync_mcastport': '8080',
'init_services': {'res_cephrg_haproxy': 'haproxy'},
'resources': {'res_cephrg_public_hostname': 'ocf:maas:dns',
'res_cephrg_haproxy': 'lsb:haproxy'},
'resource_params': {
'res_cephrg_public_hostname': 'params fqdn="keystone.maas" '
'ip_address="10.0.0.1"',
'res_cephrg_haproxy': 'op monitor interval="5s"'},
'clones': {'cl_cephrg_haproxy': 'res_cephrg_haproxy'}
}
self.update_dns_ha_resource_params.side_effect = _fake_update
ceph_hooks.ha_relation_joined()
self.assertTrue(self.update_dns_ha_resource_params.called)
self.relation_set.assert_called_with(**args)
def test_ha_relation_changed(self):
_id_joined = self.patch('identity_joined')
self.relation_get.return_value = True