Add support for Juju network spaces

Juju 2.0 provides support for network spaces, allowing
charm authors to support direct binding of relations and
extra-bindings onto underlying network spaces.

Add public and cluster extra bindings to this charm to
support separation of client facing and cluster network
traffic using Juju network spaces.

Existing network configuration options will still be
preferred over any Juju provided network bindings, ensuring
that upgrades to existing deployments don't break.

Change-Id: I78ab6993ad5bd324ea52e279c6ca2630f965544c
This commit is contained in:
James Page 2016-04-07 14:29:15 +01:00
parent bbfdeb84f0
commit afe7651e5c
5 changed files with 139 additions and 1 deletions

View File

@ -40,9 +40,34 @@ Once the ceph charm has bootstrapped the cluster, it will notify the ceph-osd
charm which will scan for the configured storage devices and add them to the
pool of available storage.
Network Space support
=====================
This charm supports the use of Juju Network Spaces, allowing the charm to be bound to network space configurations managed directly by Juju. This is only supported with Juju 2.0 and above.
Network traffic can be bound to specific network spaces using the public (front-side) and cluster (back-side) bindings:
juju deploy ceph-osd --bind "public=data-space cluster=cluster-space"
alternatively these can also be provided as part of a Juju native bundle configuration:
ceph-osd:
charm: cs:xenial/ceph-osd
num_units: 1
bindings:
public: data-space
cluster: cluster-space
Please refer to the [Ceph Network Reference](http://docs.ceph.com/docs/master/rados/configuration/network-config-ref) for details on how using these options effects network traffic within a Ceph deployment.
**NOTE:** Spaces must be configured in the underlying provider prior to attempting to use them.
**NOTE**: Existing deployments using ceph-*-network configuration options will continue to function; these options are preferred over any network space binding provided if set.
Contact Information
===================
Author: James Page <james.page@ubuntu.com>
Report bugs at: http://bugs.launchpad.net/charms/+source/ceph-osd/+filebug
Location: http://jujucharms.com/charms/ceph-osd
Location: http://jujucharms.com/charms/ceph-osd

View File

@ -54,6 +54,8 @@ from utils import (
assert_charm_supports_ipv6,
render_template,
is_unit_paused_set,
get_public_addr,
get_cluster_addr,
)
from charmhelpers.contrib.openstack.alternatives import install_alternative
@ -311,6 +313,9 @@ def emit_cephconf():
cephcontext['public_addr'] = dynamic_ipv6_address
if not cluster_network:
cephcontext['cluster_addr'] = dynamic_ipv6_address
else:
cephcontext['public_addr'] = get_public_addr()
cephcontext['cluster_addr'] = get_cluster_addr()
if az_info():
cephcontext['crush_location'] = "root=default rack={} host={}" \

View File

@ -13,6 +13,9 @@ from charmhelpers.core.hookenv import (
unit_get,
cached,
config,
network_get_primary_address,
log, DEBUG,
status_set,
)
from charmhelpers.core import unitdata
from charmhelpers.fetch import (
@ -88,6 +91,32 @@ def get_host_ip(hostname=None):
return answers[0].address
@cached
def get_public_addr():
if config('ceph-public-network'):
return get_network_addrs('ceph-public-network')[0]
try:
return network_get_primary_address('public')
except NotImplementedError:
log("network-get not supported", DEBUG)
return get_host_ip()
@cached
def get_cluster_addr():
if config('ceph-cluster-network'):
return get_network_addrs('ceph-cluster-network')[0]
try:
return network_get_primary_address('cluster')
except NotImplementedError:
log("network-get not supported", DEBUG)
return get_host_ip()
def get_networks(config_opt='ceph-public-network'):
"""Get all configured networks from provided config option.
@ -102,6 +131,31 @@ def get_networks(config_opt='ceph-public-network'):
return []
def get_network_addrs(config_opt):
"""Get all configured public networks addresses.
If public network(s) are provided, go through them and return the
addresses we have configured on any of those networks.
"""
addrs = []
networks = config(config_opt)
if networks:
networks = networks.split()
addrs = [get_address_in_network(n) for n in networks]
addrs = [a for a in addrs if a]
if not addrs:
if networks:
msg = ("Could not find an address on any of '%s' - resolve this "
"error to retry" % (networks))
status_set('blocked', msg)
raise Exception(msg)
else:
return [get_host_ip()]
return addrs
def assert_charm_supports_ipv6():
"""Check whether we are able to support charms ipv6."""
if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty":

View File

@ -16,6 +16,9 @@ description: |
.
This charm provides the Ceph OSD personality for expanding storage capacity
within a ceph deployment.
extra-bindings:
public:
cluster:
requires:
mon:
interface: ceph-osd

View File

@ -0,0 +1,51 @@
import test_utils
import charmhelpers.core.hookenv as hookenv
import utils as ceph_utils
TO_PATCH_SPACES = [
'network_get_primary_address',
'log',
'get_host_ip',
'config',
'get_network_addrs',
'cached',
]
class CephNetworkSpaceTestCase(test_utils.CharmTestCase):
def setUp(self):
super(CephNetworkSpaceTestCase, self).setUp(ceph_utils,
TO_PATCH_SPACES)
self.config.side_effect = self.test_config.get
def tearDown(self):
# Reset @cached cache
hookenv.cache = {}
def test_no_network_space_support(self):
self.get_host_ip.return_value = '192.168.2.1'
self.network_get_primary_address.side_effect = NotImplementedError
self.assertEqual(ceph_utils.get_cluster_addr(),
'192.168.2.1')
self.assertEqual(ceph_utils.get_public_addr(),
'192.168.2.1')
def test_public_network_space(self):
self.network_get_primary_address.return_value = '10.20.40.2'
self.assertEqual(ceph_utils.get_public_addr(),
'10.20.40.2')
self.network_get_primary_address.assert_called_with('public')
self.config.assert_called_with('ceph-public-network')
def test_cluster_network_space(self):
self.network_get_primary_address.return_value = '10.20.50.2'
self.assertEqual(ceph_utils.get_cluster_addr(),
'10.20.50.2')
self.network_get_primary_address.assert_called_with('cluster')
self.config.assert_called_with('ceph-cluster-network')
def test_config_options_in_use(self):
self.get_network_addrs.return_value = ['192.122.20.2']
self.test_config.set('ceph-cluster-network', '192.122.20.0/24')
self.assertEqual(ceph_utils.get_cluster_addr(),
'192.122.20.2')