diff --git a/README.md b/README.md index 0f3173af..7b599a7d 100644 --- a/README.md +++ b/README.md @@ -40,9 +40,34 @@ Once the ceph charm has bootstrapped the cluster, it will notify the ceph-osd charm which will scan for the configured storage devices and add them to the pool of available storage. +Network Space support +===================== + +This charm supports the use of Juju Network Spaces, allowing the charm to be bound to network space configurations managed directly by Juju. This is only supported with Juju 2.0 and above. + +Network traffic can be bound to specific network spaces using the public (front-side) and cluster (back-side) bindings: + + juju deploy ceph-osd --bind "public=data-space cluster=cluster-space" + +alternatively these can also be provided as part of a Juju native bundle configuration: + + ceph-osd: + charm: cs:xenial/ceph-osd + num_units: 1 + bindings: + public: data-space + cluster: cluster-space + +Please refer to the [Ceph Network Reference](http://docs.ceph.com/docs/master/rados/configuration/network-config-ref) for details on how using these options effects network traffic within a Ceph deployment. + +**NOTE:** Spaces must be configured in the underlying provider prior to attempting to use them. + +**NOTE**: Existing deployments using ceph-*-network configuration options will continue to function; these options are preferred over any network space binding provided if set. + + Contact Information =================== Author: James Page Report bugs at: http://bugs.launchpad.net/charms/+source/ceph-osd/+filebug -Location: http://jujucharms.com/charms/ceph-osd \ No newline at end of file +Location: http://jujucharms.com/charms/ceph-osd diff --git a/hooks/ceph_hooks.py b/hooks/ceph_hooks.py index 5ba176cf..5c47350b 100755 --- a/hooks/ceph_hooks.py +++ b/hooks/ceph_hooks.py @@ -54,6 +54,8 @@ from utils import ( assert_charm_supports_ipv6, render_template, is_unit_paused_set, + get_public_addr, + get_cluster_addr, ) from charmhelpers.contrib.openstack.alternatives import install_alternative @@ -311,6 +313,9 @@ def emit_cephconf(): cephcontext['public_addr'] = dynamic_ipv6_address if not cluster_network: cephcontext['cluster_addr'] = dynamic_ipv6_address + else: + cephcontext['public_addr'] = get_public_addr() + cephcontext['cluster_addr'] = get_cluster_addr() if az_info(): cephcontext['crush_location'] = "root=default rack={} host={}" \ diff --git a/hooks/utils.py b/hooks/utils.py index b6fa3744..f4069bef 100644 --- a/hooks/utils.py +++ b/hooks/utils.py @@ -13,6 +13,9 @@ from charmhelpers.core.hookenv import ( unit_get, cached, config, + network_get_primary_address, + log, DEBUG, + status_set, ) from charmhelpers.core import unitdata from charmhelpers.fetch import ( @@ -88,6 +91,32 @@ def get_host_ip(hostname=None): return answers[0].address +@cached +def get_public_addr(): + if config('ceph-public-network'): + return get_network_addrs('ceph-public-network')[0] + + try: + return network_get_primary_address('public') + except NotImplementedError: + log("network-get not supported", DEBUG) + + return get_host_ip() + + +@cached +def get_cluster_addr(): + if config('ceph-cluster-network'): + return get_network_addrs('ceph-cluster-network')[0] + + try: + return network_get_primary_address('cluster') + except NotImplementedError: + log("network-get not supported", DEBUG) + + return get_host_ip() + + def get_networks(config_opt='ceph-public-network'): """Get all configured networks from provided config option. @@ -102,6 +131,31 @@ def get_networks(config_opt='ceph-public-network'): return [] +def get_network_addrs(config_opt): + """Get all configured public networks addresses. + + If public network(s) are provided, go through them and return the + addresses we have configured on any of those networks. + """ + addrs = [] + networks = config(config_opt) + if networks: + networks = networks.split() + addrs = [get_address_in_network(n) for n in networks] + addrs = [a for a in addrs if a] + + if not addrs: + if networks: + msg = ("Could not find an address on any of '%s' - resolve this " + "error to retry" % (networks)) + status_set('blocked', msg) + raise Exception(msg) + else: + return [get_host_ip()] + + return addrs + + def assert_charm_supports_ipv6(): """Check whether we are able to support charms ipv6.""" if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty": diff --git a/metadata.yaml b/metadata.yaml index dad778db..a48b5d31 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -16,6 +16,9 @@ description: | . This charm provides the Ceph OSD personality for expanding storage capacity within a ceph deployment. +extra-bindings: + public: + cluster: requires: mon: interface: ceph-osd diff --git a/unit_tests/test_ceph_networking.py b/unit_tests/test_ceph_networking.py new file mode 100644 index 00000000..ae3a7ff5 --- /dev/null +++ b/unit_tests/test_ceph_networking.py @@ -0,0 +1,51 @@ +import test_utils +import charmhelpers.core.hookenv as hookenv +import utils as ceph_utils + +TO_PATCH_SPACES = [ + 'network_get_primary_address', + 'log', + 'get_host_ip', + 'config', + 'get_network_addrs', + 'cached', +] + + +class CephNetworkSpaceTestCase(test_utils.CharmTestCase): + def setUp(self): + super(CephNetworkSpaceTestCase, self).setUp(ceph_utils, + TO_PATCH_SPACES) + self.config.side_effect = self.test_config.get + + def tearDown(self): + # Reset @cached cache + hookenv.cache = {} + + def test_no_network_space_support(self): + self.get_host_ip.return_value = '192.168.2.1' + self.network_get_primary_address.side_effect = NotImplementedError + self.assertEqual(ceph_utils.get_cluster_addr(), + '192.168.2.1') + self.assertEqual(ceph_utils.get_public_addr(), + '192.168.2.1') + + def test_public_network_space(self): + self.network_get_primary_address.return_value = '10.20.40.2' + self.assertEqual(ceph_utils.get_public_addr(), + '10.20.40.2') + self.network_get_primary_address.assert_called_with('public') + self.config.assert_called_with('ceph-public-network') + + def test_cluster_network_space(self): + self.network_get_primary_address.return_value = '10.20.50.2' + self.assertEqual(ceph_utils.get_cluster_addr(), + '10.20.50.2') + self.network_get_primary_address.assert_called_with('cluster') + self.config.assert_called_with('ceph-cluster-network') + + def test_config_options_in_use(self): + self.get_network_addrs.return_value = ['192.122.20.2'] + self.test_config.set('ceph-cluster-network', '192.122.20.0/24') + self.assertEqual(ceph_utils.get_cluster_addr(), + '192.122.20.2')