Merge "DNS HA"
This commit is contained in:
commit
aa149bdc71
36
README.md
36
README.md
@ -52,19 +52,39 @@ This is enabled by relating the ceph-radosgw service with keystone::
|
|||||||
If you try to relate the radosgw to keystone with an earlier version of ceph the hook
|
If you try to relate the radosgw to keystone with an earlier version of ceph the hook
|
||||||
will error out to let you know.
|
will error out to let you know.
|
||||||
|
|
||||||
Scale-out
|
HA/Clustering
|
||||||
=========
|
=============
|
||||||
|
|
||||||
Its possible to scale-out the RADOS Gateway itself::
|
There are two mutually exclusive high availability options: using virtual
|
||||||
|
IP(s) or DNS. In both cases, a relationship to hacluster is required which
|
||||||
|
provides the corosync back end HA functionality.
|
||||||
|
|
||||||
juju add-unit -n 2 ceph-radosgw
|
To use virtual IP(s) the clustered nodes must be on the same subnet such that
|
||||||
|
the VIP is a valid IP on the subnet for one of the node's interfaces and each
|
||||||
|
node has an interface in said subnet. The VIP becomes a highly-available API
|
||||||
|
endpoint.
|
||||||
|
|
||||||
and then stick a HA loadbalancer on the front::
|
At a minimum, the config option 'vip' must be set in order to use virtual IP
|
||||||
|
HA. If multiple networks are being used, a VIP should be provided for each
|
||||||
|
network, separated by spaces. Optionally, vip_iface or vip_cidr may be
|
||||||
|
specified.
|
||||||
|
|
||||||
juju deploy haproxy
|
To use DNS high availability there are several prerequisites. However, DNS HA
|
||||||
juju add-relation haproxy ceph-radosgw
|
does not require the clustered nodes to be on the same subnet.
|
||||||
|
Currently the DNS HA feature is only available for MAAS 2.0 or greater
|
||||||
|
environments. MAAS 2.0 requires Juju 2.0 or greater. The clustered nodes must
|
||||||
|
have static or "reserved" IP addresses registered in MAAS. The DNS hostname(s)
|
||||||
|
must be pre-registered in MAAS before use with DNS HA.
|
||||||
|
|
||||||
Should give you a bit more bang on the front end if you really need it.
|
At a minimum, the config option 'dns-ha' must be set to true and at least one
|
||||||
|
of 'os-public-hostname', 'os-internal-hostname' or 'os-internal-hostname' must
|
||||||
|
be set in order to use DNS HA. One or more of the above hostnames may be set.
|
||||||
|
|
||||||
|
The charm will throw an exception in the following circumstances:
|
||||||
|
If neither 'vip' nor 'dns-ha' is set and the charm is related to hacluster
|
||||||
|
If both 'vip' and 'dns-ha' are set as they are mutually exclusive
|
||||||
|
If 'dns-ha' is set and none of the os-{admin,internal,public}-hostname(s) are
|
||||||
|
set
|
||||||
|
|
||||||
Network Space support
|
Network Space support
|
||||||
=====================
|
=====================
|
||||||
|
30
config.yaml
30
config.yaml
@ -95,6 +95,12 @@ options:
|
|||||||
|
|
||||||
Enable this option to disable use of Apache and enable the embedded
|
Enable this option to disable use of Apache and enable the embedded
|
||||||
web container feature.
|
web container feature.
|
||||||
|
dns-ha:
|
||||||
|
type: boolean
|
||||||
|
default: False
|
||||||
|
description: |
|
||||||
|
Use DNS HA with MAAS 2.0. Note if this is set do not set vip
|
||||||
|
settings below.
|
||||||
vip:
|
vip:
|
||||||
type: string
|
type: string
|
||||||
default:
|
default:
|
||||||
@ -153,6 +159,30 @@ options:
|
|||||||
the following public endpoint for the ceph-radosgw:
|
the following public endpoint for the ceph-radosgw:
|
||||||
|
|
||||||
https://files.example.com:80/swift/v1
|
https://files.example.com:80/swift/v1
|
||||||
|
os-internal-hostname:
|
||||||
|
type: string
|
||||||
|
default:
|
||||||
|
description: |
|
||||||
|
The hostname or address of the internal endpoints created for ceph-radosgw
|
||||||
|
in the keystone identity provider.
|
||||||
|
|
||||||
|
This value will be used for internal endpoints. For example, an
|
||||||
|
os-internal-hostname set to 'files.internal.example.com' with will create
|
||||||
|
the following internal endpoint for the ceph-radosgw:
|
||||||
|
|
||||||
|
https://files.internal.example.com:80/swift/v1
|
||||||
|
os-admin-hostname:
|
||||||
|
type: string
|
||||||
|
default:
|
||||||
|
description: |
|
||||||
|
The hostname or address of the admin endpoints created for ceph-radosgw
|
||||||
|
in the keystone identity provider.
|
||||||
|
|
||||||
|
This value will be used for admin endpoints. For example, an
|
||||||
|
os-admin-hostname set to 'files.admin.example.com' with will create
|
||||||
|
the following admin endpoint for the ceph-radosgw:
|
||||||
|
|
||||||
|
https://files.admin.example.com:80/swift/v1
|
||||||
ceph-osd-replication-count:
|
ceph-osd-replication-count:
|
||||||
type: int
|
type: int
|
||||||
default: 3
|
default: 3
|
||||||
|
@ -24,7 +24,6 @@ from charmhelpers.core.hookenv import (
|
|||||||
log,
|
log,
|
||||||
DEBUG,
|
DEBUG,
|
||||||
WARNING,
|
WARNING,
|
||||||
ERROR,
|
|
||||||
Hooks, UnregisteredHookError,
|
Hooks, UnregisteredHookError,
|
||||||
status_set,
|
status_set,
|
||||||
)
|
)
|
||||||
@ -55,6 +54,12 @@ from charmhelpers.contrib.openstack.utils import (
|
|||||||
is_unit_paused_set,
|
is_unit_paused_set,
|
||||||
pausable_restart_on_change as restart_on_change,
|
pausable_restart_on_change as restart_on_change,
|
||||||
)
|
)
|
||||||
|
from charmhelpers.contrib.hahelpers.cluster import (
|
||||||
|
get_hacluster_config,
|
||||||
|
)
|
||||||
|
from charmhelpers.contrib.openstack.ha.utils import (
|
||||||
|
update_dns_ha_resource_params,
|
||||||
|
)
|
||||||
from utils import (
|
from utils import (
|
||||||
enable_pocket,
|
enable_pocket,
|
||||||
CEPHRG_HA_RES,
|
CEPHRG_HA_RES,
|
||||||
@ -258,12 +263,8 @@ def cluster_changed():
|
|||||||
|
|
||||||
|
|
||||||
@hooks.hook('ha-relation-joined')
|
@hooks.hook('ha-relation-joined')
|
||||||
def ha_relation_joined():
|
def ha_relation_joined(relation_id=None):
|
||||||
vip = config('vip')
|
cluster_config = get_hacluster_config()
|
||||||
if not vip:
|
|
||||||
log('Unable to configure hacluster as vip not provided', level=ERROR)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
# Obtain resources
|
# Obtain resources
|
||||||
resources = {
|
resources = {
|
||||||
'res_cephrg_haproxy': 'lsb:haproxy'
|
'res_cephrg_haproxy': 'lsb:haproxy'
|
||||||
@ -272,8 +273,13 @@ def ha_relation_joined():
|
|||||||
'res_cephrg_haproxy': 'op monitor interval="5s"'
|
'res_cephrg_haproxy': 'op monitor interval="5s"'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if config('dns-ha'):
|
||||||
|
update_dns_ha_resource_params(relation_id=relation_id,
|
||||||
|
resources=resources,
|
||||||
|
resource_params=resource_params)
|
||||||
|
else:
|
||||||
vip_group = []
|
vip_group = []
|
||||||
for vip in vip.split():
|
for vip in cluster_config['vip'].split():
|
||||||
if is_ipv6(vip):
|
if is_ipv6(vip):
|
||||||
res_rgw_vip = 'ocf:heartbeat:IPv6addr'
|
res_rgw_vip = 'ocf:heartbeat:IPv6addr'
|
||||||
vip_params = 'ipv6addr'
|
vip_params = 'ipv6addr'
|
||||||
@ -306,14 +312,10 @@ def ha_relation_joined():
|
|||||||
'cl_cephrg_haproxy': 'res_cephrg_haproxy'
|
'cl_cephrg_haproxy': 'res_cephrg_haproxy'
|
||||||
}
|
}
|
||||||
|
|
||||||
# Obtain the config values necessary for the cluster config. These
|
relation_set(relation_id=relation_id,
|
||||||
# include multicast port and interface to bind to.
|
init_services=init_services,
|
||||||
corosync_bindiface = config('ha-bindiface')
|
corosync_bindiface=cluster_config['ha-bindiface'],
|
||||||
corosync_mcastport = config('ha-mcastport')
|
corosync_mcastport=cluster_config['ha-mcastport'],
|
||||||
|
|
||||||
relation_set(init_services=init_services,
|
|
||||||
corosync_bindiface=corosync_bindiface,
|
|
||||||
corosync_mcastport=corosync_mcastport,
|
|
||||||
resources=resources,
|
resources=resources,
|
||||||
resource_params=resource_params,
|
resource_params=resource_params,
|
||||||
clones=clones)
|
clones=clones)
|
||||||
|
@ -39,6 +39,8 @@ TO_PATCH = [
|
|||||||
'subprocess',
|
'subprocess',
|
||||||
'sys',
|
'sys',
|
||||||
'unit_get',
|
'unit_get',
|
||||||
|
'get_hacluster_config',
|
||||||
|
'update_dns_ha_resource_params',
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@ -251,15 +253,15 @@ class CephRadosGWTests(CharmTestCase):
|
|||||||
self.CONFIGS.write_all.assert_called_with()
|
self.CONFIGS.write_all.assert_called_with()
|
||||||
_id_joined.assert_called_with(relid='rid')
|
_id_joined.assert_called_with(relid='rid')
|
||||||
|
|
||||||
def test_ha_relation_joined_no_vip(self):
|
|
||||||
self.test_config.set('vip', '')
|
|
||||||
ceph_hooks.ha_relation_joined()
|
|
||||||
self.sys.exit.assert_called_with(1)
|
|
||||||
|
|
||||||
def test_ha_relation_joined_vip(self):
|
def test_ha_relation_joined_vip(self):
|
||||||
self.test_config.set('ha-bindiface', 'eth8')
|
self.test_config.set('ha-bindiface', 'eth8')
|
||||||
self.test_config.set('ha-mcastport', '5000')
|
self.test_config.set('ha-mcastport', '5000')
|
||||||
self.test_config.set('vip', '10.0.0.10')
|
self.test_config.set('vip', '10.0.0.10')
|
||||||
|
self.get_hacluster_config.return_value = {
|
||||||
|
'vip': '10.0.0.10',
|
||||||
|
'ha-bindiface': 'eth8',
|
||||||
|
'ha-mcastport': '5000',
|
||||||
|
}
|
||||||
self.get_iface_for_address.return_value = 'eth7'
|
self.get_iface_for_address.return_value = 'eth7'
|
||||||
self.get_netmask_for_address.return_value = '255.255.0.0'
|
self.get_netmask_for_address.return_value = '255.255.0.0'
|
||||||
ceph_hooks.ha_relation_joined()
|
ceph_hooks.ha_relation_joined()
|
||||||
@ -270,6 +272,7 @@ class CephRadosGWTests(CharmTestCase):
|
|||||||
resource_params = {'res_cephrg_haproxy': 'op monitor interval="5s"',
|
resource_params = {'res_cephrg_haproxy': 'op monitor interval="5s"',
|
||||||
'res_cephrg_eth7_vip': eth_params}
|
'res_cephrg_eth7_vip': eth_params}
|
||||||
self.relation_set.assert_called_with(
|
self.relation_set.assert_called_with(
|
||||||
|
relation_id=None,
|
||||||
init_services={'res_cephrg_haproxy': 'haproxy'},
|
init_services={'res_cephrg_haproxy': 'haproxy'},
|
||||||
corosync_bindiface='eth8',
|
corosync_bindiface='eth8',
|
||||||
corosync_mcastport='5000',
|
corosync_mcastport='5000',
|
||||||
@ -277,6 +280,41 @@ class CephRadosGWTests(CharmTestCase):
|
|||||||
resources=resources,
|
resources=resources,
|
||||||
clones={'cl_cephrg_haproxy': 'res_cephrg_haproxy'})
|
clones={'cl_cephrg_haproxy': 'res_cephrg_haproxy'})
|
||||||
|
|
||||||
|
def test_ha_joined_dns_ha(self):
|
||||||
|
def _fake_update(resources, resource_params, relation_id=None):
|
||||||
|
resources.update({'res_cephrg_public_hostname': 'ocf:maas:dns'})
|
||||||
|
resource_params.update({'res_cephrg_public_hostname':
|
||||||
|
'params fqdn="keystone.maas" '
|
||||||
|
'ip_address="10.0.0.1"'})
|
||||||
|
|
||||||
|
self.test_config.set('dns-ha', True)
|
||||||
|
self.get_hacluster_config.return_value = {
|
||||||
|
'vip': None,
|
||||||
|
'ha-bindiface': 'em0',
|
||||||
|
'ha-mcastport': '8080',
|
||||||
|
'os-admin-hostname': None,
|
||||||
|
'os-internal-hostname': None,
|
||||||
|
'os-public-hostname': 'keystone.maas',
|
||||||
|
}
|
||||||
|
args = {
|
||||||
|
'relation_id': None,
|
||||||
|
'corosync_bindiface': 'em0',
|
||||||
|
'corosync_mcastport': '8080',
|
||||||
|
'init_services': {'res_cephrg_haproxy': 'haproxy'},
|
||||||
|
'resources': {'res_cephrg_public_hostname': 'ocf:maas:dns',
|
||||||
|
'res_cephrg_haproxy': 'lsb:haproxy'},
|
||||||
|
'resource_params': {
|
||||||
|
'res_cephrg_public_hostname': 'params fqdn="keystone.maas" '
|
||||||
|
'ip_address="10.0.0.1"',
|
||||||
|
'res_cephrg_haproxy': 'op monitor interval="5s"'},
|
||||||
|
'clones': {'cl_cephrg_haproxy': 'res_cephrg_haproxy'}
|
||||||
|
}
|
||||||
|
self.update_dns_ha_resource_params.side_effect = _fake_update
|
||||||
|
|
||||||
|
ceph_hooks.ha_relation_joined()
|
||||||
|
self.assertTrue(self.update_dns_ha_resource_params.called)
|
||||||
|
self.relation_set.assert_called_with(**args)
|
||||||
|
|
||||||
def test_ha_relation_changed(self):
|
def test_ha_relation_changed(self):
|
||||||
_id_joined = self.patch('identity_joined')
|
_id_joined = self.patch('identity_joined')
|
||||||
self.relation_get.return_value = True
|
self.relation_get.return_value = True
|
||||||
|
Loading…
Reference in New Issue
Block a user