Merge "Add share server limits"

This commit is contained in:
Zuul 2021-03-12 02:59:45 +00:00 committed by Gerrit Code Review
commit e83517b662
9 changed files with 276 additions and 2 deletions

@ -138,6 +138,14 @@ share_opts = [
cfg.StrOpt('goodness_function',
help='String representation for an equation that will be '
'used to determine the goodness of a host.'),
cfg.IntOpt('max_shares_per_share_server',
default=-1,
help="Maximum number of share instances created in a share "
"server."),
cfg.IntOpt('max_share_server_size',
default=-1,
help="Maximum sum of gigabytes a share server can have "
"considering all its share instances and snapshots.")
]
ssh_opts = [
@ -319,6 +327,19 @@ class ShareDriver(object):
return self.configuration.safe_get('replication_domain')
return CONF.replication_domain
@property
def max_shares_per_share_server(self):
if self.configuration:
return self.configuration.safe_get(
'max_shares_per_share_server') or -1
return CONF.max_shares_per_share_server
@property
def max_share_server_size(self):
if self.configuration:
return self.configuration.safe_get('max_share_server_size') or -1
return CONF.max_share_server_size
def _verify_share_server_handling(self, driver_handles_share_servers):
"""Verifies driver_handles_share_servers and given configuration."""
if not isinstance(self.driver_handles_share_servers, bool):
@ -1298,6 +1319,13 @@ class ShareDriver(object):
if isinstance(data, dict):
common.update(data)
if self.driver_handles_share_servers:
common.update({
'max_shares_per_share_server':
self.max_shares_per_share_server,
'max_share_server_size': self.max_share_server_size
})
sg_stats = data.get('share_group_stats', {}) if data else {}
common['share_group_stats'] = {
'consistent_snapshot_support': sg_stats.get(

@ -512,6 +512,68 @@ class ShareManager(manager.SchedulerDependentManager):
self.db.share_export_locations_update(
ctxt, share_instance['id'], export_locations)
def _check_share_server_backend_limits(
self, context, available_share_servers, share_instance=None):
max_shares_limit = self.driver.max_shares_per_share_server
max_server_size = self.driver.max_share_server_size
if max_server_size == max_shares_limit == -1:
return available_share_servers
for ss in available_share_servers[:]:
share_instances = self.db.share_instances_get_all_by_share_server(
context, ss['id'], with_share_data=True)
if not share_instances:
continue
share_instance_ids = [si['id'] for si in share_instances]
share_snapshot_instances = (
self.db.share_snapshot_instance_get_all_with_filters(
context, {"share_instance_ids": share_instance_ids},
with_share_data=True))
server_instances_size_sum = 0
num_instances = 0
server_instances_size_sum += sum(
instance['size'] for instance in share_instances)
server_instances_size_sum += sum(
instance['size'] for instance in share_snapshot_instances)
num_instances += len(share_instances)
# NOTE(carloss): If a share instance was not provided, means that
# a share group is being requested and there aren't shares to
# be added to to the sum yet.
if share_instance:
server_instances_size_sum += share_instance['size']
num_instances += 1
achieved_gigabytes_limit = (
max_server_size != -1 and (
server_instances_size_sum > max_server_size))
achieved_instances_limit = num_instances > max_shares_limit > -1
providing_server_for_share_migration = (
share_instance and share_instance['status'] ==
constants.STATUS_MIGRATING_TO)
src_server_id_equals_current_iteration = False
if providing_server_for_share_migration:
share = self.db.share_get(context, share_instance['share_id'])
src_instance_id, dest_instance_id = (
self.share_api.get_migrating_instances(share))
src_instance = self.db.share_instance_get(
context, src_instance_id)
src_server_id_equals_current_iteration = (
src_instance['share_server_id'] == ss['id'])
if (not src_server_id_equals_current_iteration and (
achieved_gigabytes_limit or achieved_instances_limit)):
available_share_servers.remove(ss)
return available_share_servers
def _provide_share_server_for_share(self, context, share_network_id,
share_instance, snapshot=None,
share_group=None,
@ -609,6 +671,11 @@ class ShareManager(manager.SchedulerDependentManager):
available_share_servers = None
compatible_share_server = None
if available_share_servers:
available_share_servers = (
self._check_share_server_backend_limits(
context, available_share_servers,
share_instance=share_instance))
if available_share_servers:
try:
@ -869,6 +936,12 @@ class ShareManager(manager.SchedulerDependentManager):
available_share_servers = None
compatible_share_server = None
if available_share_servers:
available_share_servers = (
self._check_share_server_backend_limits(
context, available_share_servers))
choose_share_server = (
self.driver.choose_share_server_compatible_with_share_group)

@ -140,6 +140,8 @@ class EMCShareFrameworkTestCase(test.TestCase):
data['create_share_from_snapshot_support'] = True
data['ipv4_support'] = True
data['ipv6_support'] = False
data['max_shares_per_share_server'] = -1
data['max_share_server_size'] = -1
self.assertEqual(data, self.driver._stats)
def _fake_safe_get(self, value):

@ -453,7 +453,7 @@ class DummyDriver(driver.ShareDriver):
"pools": self._get_pools_info(),
"share_group_stats": {
"consistent_snapshot_support": "pool",
}
},
}
if self.configuration.replication_domain:
data["replication_type"] = "readable"

@ -746,6 +746,8 @@ class HPE3ParDriverTestCase(test.TestCase):
'goodness_function': None,
'ipv4_support': True,
'ipv6_support': False,
'max_share_server_size': -1,
'max_shares_per_share_server': -1,
}
result = self.driver.get_share_stats(refresh=True)
@ -823,6 +825,8 @@ class HPE3ParDriverTestCase(test.TestCase):
'goodness_function': None,
'ipv4_support': True,
'ipv6_support': False,
'max_share_server_size': -1,
'max_shares_per_share_server': -1,
}
result = self.driver.get_share_stats(refresh=True)
@ -867,6 +871,8 @@ class HPE3ParDriverTestCase(test.TestCase):
'goodness_function': None,
'ipv4_support': True,
'ipv6_support': False,
'max_share_server_size': -1,
'max_shares_per_share_server': -1,
}
result = self.driver.get_share_stats(refresh=True)

@ -142,6 +142,8 @@ class ACCESSShareDriverTestCase(test.TestCase):
self.configuration.replication_domain = 'Disable'
self.configuration.filter_function = 'Disable'
self.configuration.goodness_function = 'Disable'
self.configuration.max_shares_per_share_server = -1
self.configuration.max_share_server_size = -1
def test_create_share(self):
self.mock_object(self._driver, '_get_va_share_name')
@ -443,7 +445,7 @@ class ACCESSShareDriverTestCase(test.TestCase):
'replication_domain': 'Disable',
'revert_to_snapshot_support': False,
'share_group_stats': {'consistent_snapshot_support': None},
'snapshot_support': True
'snapshot_support': True,
}
self.assertEqual(data, self._driver._stats)

@ -342,6 +342,8 @@ class ZFSonLinuxShareDriverTestCase(test.TestCase):
@ddt.data(None, '', 'foo_replication_domain')
def test__update_share_stats(self, replication_domain):
self.configuration.replication_domain = replication_domain
self.configuration.max_shares_per_share_server = -1
self.configuration.max_share_server_size = -1
self.mock_object(self.driver, '_get_pools_info')
self.assertEqual({}, self.driver._stats)
expected = {

@ -2255,6 +2255,147 @@ class ShareManagerTestCase(test.TestCase):
resource_id=shr['id'],
detail=message_field.Detail.NO_SHARE_SERVER)
@ddt.data(
(True, 1, 3, 10, 0),
(False, 1, 100, 5, 0),
(True, 1, 10, 3, 0),
(False, 1, 10, 10, 3),
(False, 1, -1, 100, 3),
(False, 1, 10, -1, 3),
)
@ddt.unpack
def test__check_share_server_backend_limits(
self, with_share_instance, resource_size, max_shares,
max_gigabytes, expected_share_servers_len):
"""Tests if servers aren't being reused when its limits are reached."""
# Creates three share servers to have a list of available share servers
share_servers = [db_utils.create_share_server() for i in range(3)]
share = db_utils.create_share()
# Creates some share instances using the resource size
share_instances = [
db_utils.create_share_instance(
size=resource_size, share_id=share['id'])
for i in range(3)]
# Creates some snapshot instances to make sure they are being
# accounted
snapshot_instances = [
db_utils.create_snapshot(
size=resource_size, share_id=share['id'])['instance']
for i in range(3)]
kwargs = {}
driver_mock = mock.Mock()
# Sets the driver max shares per share server and max server size
# configured value to be the one received in the test parameters
driver_mock.max_shares_per_share_server = max_shares
driver_mock.max_share_server_size = max_gigabytes
self.share_manager.driver = driver_mock
self.mock_object(
db, 'share_instances_get_all_by_share_server',
mock.Mock(return_value=share_instances))
self.mock_object(
db, 'share_snapshot_instance_get_all_with_filters',
mock.Mock(return_value=snapshot_instances))
# NOTE(carloss): If with_share_instance, simulates the behavior where
# the provide_share_server method call was not related to a request to
# create a share group, where a share instance is not provided, neither
# accounted, since it's a brand new group. When a share instance is
# specified, it must be accounted to check if the creation of that
# share instance in the given share server is going to exceed the
# configured limit.
if with_share_instance:
share_instance = db_utils.create_share_instance(
size=resource_size, share_id=share['id'])
kwargs['share_instance'] = share_instance
available_share_servers = (
self.share_manager._check_share_server_backend_limits(
self.context, share_servers, **kwargs))
self.assertEqual(
expected_share_servers_len, len(available_share_servers))
def test__check_share_server_backend_limits_migrating_share(self):
"""Tests if servers aren't being reused when its limits are reached."""
share_servers = [db_utils.create_share_server()]
share = db_utils.create_share(status=constants.STATUS_MIGRATING_TO)
resource_size = 1
driver_mock = mock.Mock()
driver_mock.max_shares_per_share_server = 2
driver_mock.max_share_server_size = 2
share_instances = [
db_utils.create_share_instance(
size=resource_size, share_id=share['id'], status=status,
share_server_id=share_servers[0]['id'])
for status in [
constants.STATUS_MIGRATING, constants.STATUS_MIGRATING_TO]]
share_instance_ids = [
share_instances[0]['id'], share_instances[1]['id']]
kwargs = {}
self.share_manager.driver = driver_mock
self.mock_object(
db, 'share_instances_get_all_by_share_server',
mock.Mock(return_value=share_instances))
self.mock_object(
db, 'share_snapshot_instance_get_all_with_filters',
mock.Mock(return_value=[]))
self.mock_object(db, 'share_get', mock.Mock(return_value=share))
self.mock_object(api.API, 'get_migrating_instances',
mock.Mock(return_value=share_instance_ids))
self.mock_object(db, 'share_instance_get',
mock.Mock(return_value=share_instances[0]))
# NOTE(carloss): If with_share_instance, simulates the behavior where
# the provide_share_server method call was not related to a request to
# create a share group, where a share instance is not provided, neither
# accounted, since it's a brand new group. When a share instance is
# specified, it must be accounted to check if the creation of that
# share instance in the given share server is going to exceed the
# configured limit.
kwargs['share_instance'] = share_instances[1]
available_share_servers = (
self.share_manager._check_share_server_backend_limits(
self.context, share_servers, **kwargs))
self.assertEqual(
1, len(available_share_servers))
db.share_instances_get_all_by_share_server.assert_called_once_with(
self.context, share_servers[0]['id'], with_share_data=True)
(db.share_snapshot_instance_get_all_with_filters.
assert_called_once_with(
self.context, {"share_instance_ids": share_instance_ids},
with_share_data=True))
db.share_get.assert_called_once_with(self.context, share['id'])
api.API.get_migrating_instances.assert_called_once_with(share)
db.share_instance_get.assert_called_once_with(
self.context, share_instances[0]['id'])
def test__check_share_server_backend_limits_unlimited(self):
driver_mock = mock.Mock()
driver_mock.max_shares_per_share_server = -1
driver_mock.max_share_server_size = -1
self.share_manager.driver = driver_mock
share_servers = [db_utils.create_share_server() for i in range(3)]
available_share_servers = (
self.share_manager._check_share_server_backend_limits(
self.context, share_servers))
self.assertEqual(share_servers, available_share_servers)
def test_create_share_instance_with_share_network_server_exists(self):
"""Test share can be created with existing share server."""
share_net = db_utils.create_share_network()
@ -2271,6 +2412,8 @@ class ShareManagerTestCase(test.TestCase):
self.mock_object(manager.LOG, 'info')
driver_mock = mock.Mock()
driver_mock.max_shares_per_share_server = -1
driver_mock.max_share_server_size = -1
driver_mock.create_share.return_value = "fake_location"
driver_mock.choose_share_server_compatible_with_share.return_value = (
share_srv
@ -2497,6 +2640,9 @@ class ShareManagerTestCase(test.TestCase):
self.mock_object(db,
'share_server_get_all_by_host_and_share_subnet_valid',
mock.Mock(return_value=[fake_share_server]))
self.mock_object(
self.share_manager, '_check_share_server_backend_limits',
mock.Mock(return_value=[fake_share_server]))
self.mock_object(
self.share_manager.driver,
"choose_share_server_compatible_with_share",

@ -0,0 +1,15 @@
---
features:
- |
Two new backend capabilities were added to Manila in order to help
administrators to control and balance their cloud resources. The capability
called `max_shares_per_share_server` allows the administrators to define a
maximum amount of shares that a share server can have. The capability called
`max_share_server_size` allows the administrator to set a maximum number of
gigabytes a share server can grow to, considering its instances, replicas and
snapshots. Both capabilities accept only integer values. If at least one of
these limits is reached, Manila won't consider reusing the referred share
server. If there aren't share servers available to reuse, Manila will create
another one to place incoming request. If none of these limits were specified
in the backend stanza, Manila will consider them as unlimited and allow share
servers to be reused regardless the amount of shares or the size they have.