Add support for erasure coded ceph pools
Add standard configuration options for erasure coded pool creation and profile configuration. Update ceph relation joined handled to support erasure coded pools - a replicated pool is created at 1% of the total data weight alongside the erasure coding profile and supporting erasure coded data pool. Update ceph context to use the metadata pool name in the glance configuration files when erasure-coded pool-type is configured. Resync charmhelpers to update Ceph Broker request support for erasure coding. Change-Id: If4a31a2adf8080af66885adb970fbb3cdd82f573 Depends-On: Iec4de19f7b39f0b08158d96c5cc1561b40aefa10
This commit is contained in:
parent
544bc37618
commit
ed8bdede6f
@ -29,6 +29,8 @@ from subprocess import check_call, CalledProcessError
|
||||
|
||||
import six
|
||||
|
||||
import charmhelpers.contrib.storage.linux.ceph as ch_ceph
|
||||
|
||||
from charmhelpers.contrib.openstack.audits.openstack_security_guide import (
|
||||
_config_ini as config_ini
|
||||
)
|
||||
@ -56,6 +58,7 @@ from charmhelpers.core.hookenv import (
|
||||
status_set,
|
||||
network_get_primary_address,
|
||||
WARNING,
|
||||
service_name,
|
||||
)
|
||||
|
||||
from charmhelpers.core.sysctl import create as sysctl_create
|
||||
@ -808,6 +811,12 @@ class CephContext(OSContextGenerator):
|
||||
|
||||
ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
|
||||
|
||||
if config('pool-type') and config('pool-type') == 'erasure-coded':
|
||||
base_pool_name = config('rbd-pool') or config('rbd-pool-name')
|
||||
if not base_pool_name:
|
||||
base_pool_name = service_name()
|
||||
ctxt['rbd_default_data_pool'] = base_pool_name
|
||||
|
||||
if not os.path.isdir('/etc/ceph'):
|
||||
os.mkdir('/etc/ceph')
|
||||
|
||||
@ -3175,3 +3184,78 @@ class SRIOVContext(OSContextGenerator):
|
||||
:rtype: Dict[str,int]
|
||||
"""
|
||||
return self._map
|
||||
|
||||
|
||||
class CephBlueStoreCompressionContext(OSContextGenerator):
|
||||
"""Ceph BlueStore compression options."""
|
||||
|
||||
# Tuple with Tuples that map configuration option name to CephBrokerRq op
|
||||
# property name
|
||||
options = (
|
||||
('bluestore-compression-algorithm',
|
||||
'compression-algorithm'),
|
||||
('bluestore-compression-mode',
|
||||
'compression-mode'),
|
||||
('bluestore-compression-required-ratio',
|
||||
'compression-required-ratio'),
|
||||
('bluestore-compression-min-blob-size',
|
||||
'compression-min-blob-size'),
|
||||
('bluestore-compression-min-blob-size-hdd',
|
||||
'compression-min-blob-size-hdd'),
|
||||
('bluestore-compression-min-blob-size-ssd',
|
||||
'compression-min-blob-size-ssd'),
|
||||
('bluestore-compression-max-blob-size',
|
||||
'compression-max-blob-size'),
|
||||
('bluestore-compression-max-blob-size-hdd',
|
||||
'compression-max-blob-size-hdd'),
|
||||
('bluestore-compression-max-blob-size-ssd',
|
||||
'compression-max-blob-size-ssd'),
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize context by loading values from charm config.
|
||||
|
||||
We keep two maps, one suitable for use with CephBrokerRq's and one
|
||||
suitable for template generation.
|
||||
"""
|
||||
charm_config = config()
|
||||
|
||||
# CephBrokerRq op map
|
||||
self.op = {}
|
||||
# Context exposed for template generation
|
||||
self.ctxt = {}
|
||||
for config_key, op_key in self.options:
|
||||
value = charm_config.get(config_key)
|
||||
self.ctxt.update({config_key.replace('-', '_'): value})
|
||||
self.op.update({op_key: value})
|
||||
|
||||
def __call__(self):
|
||||
"""Get context.
|
||||
|
||||
:returns: Context
|
||||
:rtype: Dict[str,any]
|
||||
"""
|
||||
return self.ctxt
|
||||
|
||||
def get_op(self):
|
||||
"""Get values for use in CephBrokerRq op.
|
||||
|
||||
:returns: Context values with CephBrokerRq op property name as key.
|
||||
:rtype: Dict[str,any]
|
||||
"""
|
||||
return self.op
|
||||
|
||||
def validate(self):
|
||||
"""Validate options.
|
||||
|
||||
:raises: AssertionError
|
||||
"""
|
||||
# We slip in a dummy name on class instantiation to allow validation of
|
||||
# the other options. It will not affect further use.
|
||||
#
|
||||
# NOTE: once we retire Python 3.5 we can fold this into a in-line
|
||||
# dictionary comprehension in the call to the initializer.
|
||||
dummy_op = {'name': 'dummy-name'}
|
||||
dummy_op.update(self.op)
|
||||
pool = ch_ceph.BasePool('dummy-service', op=dummy_op)
|
||||
pool.validate()
|
||||
|
@ -22,3 +22,7 @@ rbd default features = {{ rbd_features }}
|
||||
{{ key }} = {{ value }}
|
||||
{% endfor -%}
|
||||
{%- endif %}
|
||||
|
||||
{% if rbd_default_data_pool -%}
|
||||
rbd default data pool = {{ rbd_default_data_pool }}
|
||||
{% endif %}
|
||||
|
@ -0,0 +1,28 @@
|
||||
{# section header omitted as options can belong to multiple sections #}
|
||||
{% if bluestore_compression_algorithm -%}
|
||||
bluestore compression algorithm = {{ bluestore_compression_algorithm }}
|
||||
{% endif -%}
|
||||
{% if bluestore_compression_mode -%}
|
||||
bluestore compression mode = {{ bluestore_compression_mode }}
|
||||
{% endif -%}
|
||||
{% if bluestore_compression_required_ratio -%}
|
||||
bluestore compression required ratio = {{ bluestore_compression_required_ratio }}
|
||||
{% endif -%}
|
||||
{% if bluestore_compression_min_blob_size -%}
|
||||
bluestore compression min blob size = {{ bluestore_compression_min_blob_size }}
|
||||
{% endif -%}
|
||||
{% if bluestore_compression_min_blob_size_hdd -%}
|
||||
bluestore compression min blob size hdd = {{ bluestore_compression_min_blob_size_hdd }}
|
||||
{% endif -%}
|
||||
{% if bluestore_compression_min_blob_size_ssd -%}
|
||||
bluestore compression min blob size ssd = {{ bluestore_compression_min_blob_size_ssd }}
|
||||
{% endif -%}
|
||||
{% if bluestore_compression_max_blob_size -%}
|
||||
bluestore compression max blob size = {{ bluestore_compression_max_blob_size }}
|
||||
{% endif -%}
|
||||
{% if bluestore_compression_max_blob_size_hdd -%}
|
||||
bluestore compression max blob size hdd = {{ bluestore_compression_max_blob_size_hdd }}
|
||||
{% endif -%}
|
||||
{% if bluestore_compression_max_blob_size_ssd -%}
|
||||
bluestore compression max blob size ssd = {{ bluestore_compression_max_blob_size_ssd }}
|
||||
{% endif -%}
|
File diff suppressed because it is too large
Load Diff
100
config.yaml
100
config.yaml
@ -115,6 +115,104 @@ options:
|
||||
type: string
|
||||
description: |
|
||||
Optionally specify an existing rbd pool that cinder should map to.
|
||||
pool-type:
|
||||
type: string
|
||||
default: replicated
|
||||
description: |
|
||||
Ceph pool type to use for storage - valid values include ‘replicated’
|
||||
and ‘erasure-coded’.
|
||||
ec-profile-name:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
Name for the EC profile to be created for the EC pools. If not defined
|
||||
a profile name will be generated based on the name of the pool used by
|
||||
the application.
|
||||
ec-rbd-metadata-pool:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
Name of the metadata pool to be created (for RBD use-cases). If not
|
||||
defined a metadata pool name will be generated based on the name of
|
||||
the data pool used by the application. The metadata pool is always
|
||||
replicated, not erasure coded.
|
||||
ec-profile-k:
|
||||
type: int
|
||||
default: 1
|
||||
description: |
|
||||
Number of data chunks that will be used for EC data pool. K+M factors
|
||||
should never be greater than the number of available zones (or hosts)
|
||||
for balancing.
|
||||
ec-profile-m:
|
||||
type: int
|
||||
default: 2
|
||||
description: |
|
||||
Number of coding chunks that will be used for EC data pool. K+M factors
|
||||
should never be greater than the number of available zones (or hosts)
|
||||
for balancing.
|
||||
ec-profile-locality:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
(lrc plugin - l) Group the coding and data chunks into sets of size l.
|
||||
For instance, for k=4 and m=2, when l=3 two groups of three are created.
|
||||
Each set can be recovered without reading chunks from another set. Note
|
||||
that using the lrc plugin does incur more raw storage usage than isa or
|
||||
jerasure in order to reduce the cost of recovery operations.
|
||||
ec-profile-crush-locality:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
(lrc plugin) The type of the crush bucket in which each set of chunks
|
||||
defined by l will be stored. For instance, if it is set to rack, each
|
||||
group of l chunks will be placed in a different rack. It is used to
|
||||
create a CRUSH rule step such as step choose rack. If it is not set,
|
||||
no such grouping is done.
|
||||
ec-profile-durability-estimator:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
(shec plugin - c) The number of parity chunks each of which includes
|
||||
each data chunk in its calculation range. The number is used as a
|
||||
durability estimator. For instance, if c=2, 2 OSDs can be down
|
||||
without losing data.
|
||||
ec-profile-helper-chunks:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
(clay plugin - d) Number of OSDs requested to send data during
|
||||
recovery of a single chunk. d needs to be chosen such that
|
||||
k+1 <= d <= k+m-1. Larger the d, the better the savings.
|
||||
ec-profile-scalar-mds:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
(clay plugin) specifies the plugin that is used as a building
|
||||
block in the layered construction. It can be one of jerasure,
|
||||
isa, shec (defaults to jerasure).
|
||||
ec-profile-plugin:
|
||||
type: string
|
||||
default: jerasure
|
||||
description: |
|
||||
EC plugin to use for this applications pool. The following list of
|
||||
plugins acceptable - jerasure, lrc, isa, shec, clay.
|
||||
ec-profile-technique:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
EC profile technique used for this applications pool - will be
|
||||
validated based on the plugin configured via ec-profile-plugin.
|
||||
Supported techniques are ‘reed_sol_van’, ‘reed_sol_r6_op’,
|
||||
‘cauchy_orig’, ‘cauchy_good’, ‘liber8tion’ for jerasure,
|
||||
‘reed_sol_van’, ‘cauchy’ for isa and ‘single’, ‘multiple’
|
||||
for shec.
|
||||
ec-profile-device-class:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
Device class from CRUSH map to use for placement groups for
|
||||
erasure profile - valid values: ssd, hdd or nvme (or leave
|
||||
unset to not use a device class).
|
||||
worker-multiplier:
|
||||
type: float
|
||||
default:
|
||||
@ -142,7 +240,7 @@ options:
|
||||
True.
|
||||
WARNING: enabling this restriction will cause Nova to no longer be able
|
||||
to create COW clones or snapshots for non-admin users when using the
|
||||
RBDImageBackend in the nova-compute charm.
|
||||
RBDImageBackend in the nova-compute charm.
|
||||
rabbit-user:
|
||||
type: string
|
||||
default: glance
|
||||
|
@ -111,10 +111,17 @@ class CephGlanceContext(OSContextGenerator):
|
||||
keys="key"):
|
||||
return {}
|
||||
service = service_name()
|
||||
if config('rbd-pool-name'):
|
||||
pool_name = config('rbd-pool-name')
|
||||
if config('pool-type') == 'erasure-coded':
|
||||
pool_name = (
|
||||
config('ec-rbd-metadata-pool') or
|
||||
"{}-metadata".format(config('rbd-pool-name') or
|
||||
service)
|
||||
)
|
||||
else:
|
||||
pool_name = service
|
||||
if config('rbd-pool-name'):
|
||||
pool_name = config('rbd-pool-name')
|
||||
else:
|
||||
pool_name = service
|
||||
return {
|
||||
# pool created based on service name.
|
||||
'rbd_pool': pool_name,
|
||||
|
@ -306,11 +306,73 @@ def get_ceph_request():
|
||||
pool_name = config('rbd-pool-name')
|
||||
else:
|
||||
pool_name = service
|
||||
|
||||
rq = CephBrokerRq()
|
||||
replicas = config('ceph-osd-replication-count')
|
||||
weight = config('ceph-pool-weight')
|
||||
rq.add_op_create_pool(name=pool_name, replica_count=replicas,
|
||||
weight=weight, group='images', app_name='rbd')
|
||||
replicas = config('ceph-osd-replication-count')
|
||||
|
||||
if config('pool-type') == 'erasure-coded':
|
||||
# General EC plugin config
|
||||
plugin = config('ec-profile-plugin')
|
||||
technique = config('ec-profile-technique')
|
||||
device_class = config('ec-profile-device-class')
|
||||
metadata_pool_name = (
|
||||
config('ec-rbd-metadata-pool') or
|
||||
"{}-metadata".format(service)
|
||||
)
|
||||
bdm_k = config('ec-profile-k')
|
||||
bdm_m = config('ec-profile-m')
|
||||
# LRC plugin config
|
||||
bdm_l = config('ec-profile-locality')
|
||||
crush_locality = config('ec-profile-crush-locality')
|
||||
# SHEC plugin config
|
||||
bdm_c = config('ec-profile-durability-estimator')
|
||||
# CLAY plugin config
|
||||
bdm_d = config('ec-profile-helper-chunks')
|
||||
scalar_mds = config('ec-profile-scalar-mds')
|
||||
# Profile name
|
||||
profile_name = (
|
||||
config('ec-profile-name') or "{}-profile".format(service)
|
||||
)
|
||||
# Metadata sizing is approximately 1% of overall data weight
|
||||
# but is in effect driven by the number of rbd's rather than
|
||||
# their size - so it can be very lightweight.
|
||||
metadata_weight = weight * 0.01
|
||||
# Resize data pool weight to accomodate metadata weight
|
||||
weight = weight - metadata_weight
|
||||
# Create metadata pool
|
||||
rq.add_op_create_pool(
|
||||
name=metadata_pool_name, replica_count=replicas,
|
||||
weight=metadata_weight, group='images', app_name='rbd'
|
||||
)
|
||||
|
||||
# Create erasure profile
|
||||
rq.add_op_create_erasure_profile(
|
||||
name=profile_name,
|
||||
k=bdm_k, m=bdm_m,
|
||||
lrc_locality=bdm_l,
|
||||
lrc_crush_locality=crush_locality,
|
||||
shec_durability_estimator=bdm_c,
|
||||
clay_helper_chunks=bdm_d,
|
||||
clay_scalar_mds=scalar_mds,
|
||||
device_class=device_class,
|
||||
erasure_type=plugin,
|
||||
erasure_technique=technique
|
||||
)
|
||||
|
||||
# Create EC data pool
|
||||
rq.add_op_create_erasure_pool(
|
||||
name=pool_name,
|
||||
erasure_profile=profile_name,
|
||||
weight=weight,
|
||||
group="images",
|
||||
app_name="rbd",
|
||||
allow_ec_overwrites=True
|
||||
)
|
||||
else:
|
||||
rq.add_op_create_pool(name=pool_name, replica_count=replicas,
|
||||
weight=weight, group='images', app_name='rbd')
|
||||
|
||||
if config('restrict-ceph-pools'):
|
||||
rq.add_op_request_access_to_group(
|
||||
name="images",
|
||||
|
@ -109,8 +109,10 @@ class TestGlanceContexts(CharmTestCase):
|
||||
self.service_name.return_value = service
|
||||
conf_dict = {
|
||||
'rbd-pool-name': None,
|
||||
'expose-image-locations': True}
|
||||
self.config.side_effect = lambda x: conf_dict[x]
|
||||
'expose-image-locations': True,
|
||||
'pool-type': 'replicated',
|
||||
}
|
||||
self.config.side_effect = lambda x: conf_dict.get(x)
|
||||
self.assertEqual(
|
||||
contexts.CephGlanceContext()(),
|
||||
{'rbd_pool': service,
|
||||
@ -120,12 +122,50 @@ class TestGlanceContexts(CharmTestCase):
|
||||
# Check user supplied pool name:
|
||||
conf_dict = {
|
||||
'rbd-pool-name': 'mypoolname',
|
||||
'expose-image-locations': True}
|
||||
'expose-image-locations': True,
|
||||
'pool-type': 'replicated',
|
||||
}
|
||||
self.assertEqual(
|
||||
contexts.CephGlanceContext()(),
|
||||
{'rbd_pool': 'mypoolname',
|
||||
'rbd_user': service,
|
||||
'expose_image_locations': True})
|
||||
# Check erasure-coded pool type
|
||||
conf_dict = {
|
||||
'rbd-pool-name': None,
|
||||
'expose-image-locations': True,
|
||||
'pool-type': 'erasure-coded',
|
||||
'ec-rbd-metadata-pool': None,
|
||||
}
|
||||
self.assertEqual(
|
||||
contexts.CephGlanceContext()(),
|
||||
{'rbd_pool': "{}-metadata".format(service),
|
||||
'rbd_user': service,
|
||||
'expose_image_locations': True})
|
||||
# Ensure rbd-pool-name used for metadata pool name
|
||||
conf_dict = {
|
||||
'rbd-pool-name': 'foobar',
|
||||
'expose-image-locations': True,
|
||||
'pool-type': 'erasure-coded',
|
||||
'ec-rbd-metadata-pool': None,
|
||||
}
|
||||
self.assertEqual(
|
||||
contexts.CephGlanceContext()(),
|
||||
{'rbd_pool': "foobar-metadata",
|
||||
'rbd_user': service,
|
||||
'expose_image_locations': True})
|
||||
# Ensure ec-rbd-metadata-pool overrides everything
|
||||
conf_dict = {
|
||||
'rbd-pool-name': 'foobar',
|
||||
'expose-image-locations': True,
|
||||
'pool-type': 'erasure-coded',
|
||||
'ec-rbd-metadata-pool': 'another-metadata',
|
||||
}
|
||||
self.assertEqual(
|
||||
contexts.CephGlanceContext()(),
|
||||
{'rbd_pool': "another-metadata",
|
||||
'rbd_user': service,
|
||||
'expose_image_locations': True})
|
||||
|
||||
def test_multistore_below_mitaka(self):
|
||||
self.os_release.return_value = 'liberty'
|
||||
|
@ -384,6 +384,53 @@ class GlanceRelationTests(CharmTestCase):
|
||||
permission='rwx'),
|
||||
])
|
||||
|
||||
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
|
||||
'.add_op_create_erasure_pool')
|
||||
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
|
||||
'.add_op_create_erasure_profile')
|
||||
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
|
||||
'.add_op_request_access_to_group')
|
||||
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
|
||||
'.add_op_create_pool')
|
||||
def test_create_ec_pool_op(self, mock_create_pool,
|
||||
mock_request_access,
|
||||
mock_create_erasure_profile,
|
||||
mock_create_erasure_pool):
|
||||
self.service_name.return_value = 'glance'
|
||||
self.test_config.set('ceph-osd-replication-count', 3)
|
||||
self.test_config.set('ceph-pool-weight', 6)
|
||||
self.test_config.set('pool-type', 'erasure-coded')
|
||||
self.test_config.set('ec-profile-plugin', 'isa')
|
||||
self.test_config.set('ec-profile-k', 6)
|
||||
self.test_config.set('ec-profile-m', 2)
|
||||
relations.get_ceph_request()
|
||||
mock_create_pool.assert_called_once_with(
|
||||
name='glance-metadata',
|
||||
replica_count=3,
|
||||
weight=0.06,
|
||||
group='images',
|
||||
app_name='rbd')
|
||||
mock_create_erasure_profile.assert_called_once_with(
|
||||
name='glance-profile',
|
||||
k=6, m=2,
|
||||
lrc_locality=None,
|
||||
lrc_crush_locality=None,
|
||||
shec_durability_estimator=None,
|
||||
clay_helper_chunks=None,
|
||||
clay_scalar_mds=None,
|
||||
device_class=None,
|
||||
erasure_type='isa',
|
||||
erasure_technique=None,
|
||||
)
|
||||
mock_create_erasure_pool.assert_called_once_with(
|
||||
name='glance',
|
||||
erasure_profile='glance-profile',
|
||||
weight=5.94,
|
||||
group='images',
|
||||
app_name='rbd',
|
||||
allow_ec_overwrites=True)
|
||||
mock_request_access.assert_not_called()
|
||||
|
||||
@patch.object(relations, 'get_ceph_request')
|
||||
@patch.object(relations, 'send_request_if_needed')
|
||||
@patch.object(relations, 'is_request_complete')
|
||||
|
Loading…
x
Reference in New Issue
Block a user