Fix Host-assisted Share Migration with IPv4+IPv6
Previously, the Data Service allowed only 1 IP to be specified for the 'data_node_access_ip' config option, which prevented Host-assisted Share Migration to be successful on a combination of backends that include IPv6 and IPv4 export locations. To fix this, we are adding a new config option and deprecating the old one. The new one is named 'data_node_access_ips' and accepts a list of IPs. All IPs included in this list should correspond to interfaces on the node running the Data Service and will be allowed access during Host-assisted Share Migration. Additionally, we are improving the responsiveness of the Host-assisted Share Migration by using exponential waiters instead of polynomial. Those waiters are used when managing resources created by Share Migration. Closes-bug: #1745436 Partial-bug: #1708491 Change-Id: I1c0b7621ae8192f75f691987b185a4fc6a7228ce
This commit is contained in:
parent
a80c6fca57
commit
53ec28a1ca
@ -34,10 +34,19 @@ data_helper_opts = [
|
|||||||
default=180,
|
default=180,
|
||||||
help="Time to wait for access rules to be allowed/denied on backends "
|
help="Time to wait for access rules to be allowed/denied on backends "
|
||||||
"when migrating a share (seconds)."),
|
"when migrating a share (seconds)."),
|
||||||
|
cfg.ListOpt('data_node_access_ips',
|
||||||
|
default=[],
|
||||||
|
help="A list of the IPs of the node interface connected to "
|
||||||
|
"the admin network. Used for allowing access to the "
|
||||||
|
"mounting shares. Default is []."),
|
||||||
cfg.StrOpt(
|
cfg.StrOpt(
|
||||||
'data_node_access_ip',
|
'data_node_access_ip',
|
||||||
help="The IP of the node interface connected to the admin network. "
|
help="The IP of the node interface connected to the admin network. "
|
||||||
"Used for allowing access to the mounting shares."),
|
"Used for allowing access to the mounting shares.",
|
||||||
|
deprecated_for_removal=True,
|
||||||
|
deprecated_reason="New config option 'data_node_access_ips' added "
|
||||||
|
"to support multiple IPs, including IPv6 addresses "
|
||||||
|
"alongside IPv4."),
|
||||||
cfg.StrOpt(
|
cfg.StrOpt(
|
||||||
'data_node_access_cert',
|
'data_node_access_cert',
|
||||||
help="The certificate installed in the data node in order to "
|
help="The certificate installed in the data node in order to "
|
||||||
@ -114,15 +123,20 @@ class DataServiceHelper(object):
|
|||||||
'instance_id': share_instance_id,
|
'instance_id': share_instance_id,
|
||||||
'share_id': self.share['id']})
|
'share_id': self.share['id']})
|
||||||
|
|
||||||
def _change_data_access_to_instance(self, instance, accesses, deny=False):
|
def _change_data_access_to_instance(
|
||||||
if not isinstance(accesses, list):
|
self, instance, accesses=None, deny=False):
|
||||||
accesses = [accesses]
|
|
||||||
|
|
||||||
self.access_helper.get_and_update_share_instance_access_rules_status(
|
self.access_helper.get_and_update_share_instance_access_rules_status(
|
||||||
self.context, status=constants.SHARE_INSTANCE_RULES_SYNCING,
|
self.context, status=constants.SHARE_INSTANCE_RULES_SYNCING,
|
||||||
share_instance_id=instance['id'])
|
share_instance_id=instance['id'])
|
||||||
|
|
||||||
if deny:
|
if deny:
|
||||||
|
if accesses is None:
|
||||||
|
accesses = []
|
||||||
|
else:
|
||||||
|
if not isinstance(accesses, list):
|
||||||
|
accesses = [accesses]
|
||||||
|
|
||||||
access_filters = {'access_id': [a['id'] for a in accesses]}
|
access_filters = {'access_id': [a['id'] for a in accesses]}
|
||||||
updates = {'state': constants.ACCESS_STATE_QUEUED_TO_DENY}
|
updates = {'state': constants.ACCESS_STATE_QUEUED_TO_DENY}
|
||||||
self.access_helper.get_and_update_share_instance_access_rules(
|
self.access_helper.get_and_update_share_instance_access_rules(
|
||||||
@ -168,24 +182,26 @@ class DataServiceHelper(object):
|
|||||||
'access_to': access['access_to'],
|
'access_to': access['access_to'],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Check if the rule being added already exists. If so, we will
|
||||||
|
# remove it to prevent conflicts
|
||||||
old_access_list = self.db.share_access_get_all_by_type_and_access(
|
old_access_list = self.db.share_access_get_all_by_type_and_access(
|
||||||
self.context, self.share['id'], access['access_type'],
|
self.context, self.share['id'], access['access_type'],
|
||||||
access['access_to'])
|
access['access_to'])
|
||||||
|
if old_access_list:
|
||||||
|
self._change_data_access_to_instance(
|
||||||
|
share_instance, old_access_list, deny=True)
|
||||||
|
|
||||||
# Create new access rule and deny all old ones corresponding to
|
|
||||||
# the mapping. Since this is a bulk update, all access changes
|
|
||||||
# are made in one go.
|
|
||||||
access_ref = self.db.share_instance_access_create(
|
access_ref = self.db.share_instance_access_create(
|
||||||
self.context, values, share_instance['id'])
|
self.context, values, share_instance['id'])
|
||||||
self._change_data_access_to_instance(
|
self._change_data_access_to_instance(share_instance)
|
||||||
share_instance, old_access_list, deny=True)
|
|
||||||
|
|
||||||
if allow_access_to_destination_instance:
|
if allow_access_to_destination_instance:
|
||||||
access_ref = self.db.share_instance_access_create(
|
access_ref = self.db.share_instance_access_create(
|
||||||
self.context, values, dest_share_instance['id'])
|
self.context, values, dest_share_instance['id'])
|
||||||
self._change_data_access_to_instance(
|
self._change_data_access_to_instance(dest_share_instance)
|
||||||
dest_share_instance, access_ref)
|
|
||||||
|
|
||||||
|
# The access rule ref used here is a regular Share Access Map,
|
||||||
|
# instead of a Share Instance Access Map.
|
||||||
access_ref_list.append(access_ref)
|
access_ref_list.append(access_ref)
|
||||||
|
|
||||||
return access_ref_list
|
return access_ref_list
|
||||||
@ -194,27 +210,36 @@ class DataServiceHelper(object):
|
|||||||
|
|
||||||
access_list = []
|
access_list = []
|
||||||
|
|
||||||
|
# NOTE(ganso): protocol is not relevant here because we previously
|
||||||
|
# used it to filter the access types we are interested in
|
||||||
for access_type, protocols in access_mapping.items():
|
for access_type, protocols in access_mapping.items():
|
||||||
if access_type.lower() == 'cert':
|
access_to_list = []
|
||||||
access_to = CONF.data_node_access_cert
|
if access_type.lower() == 'cert' and CONF.data_node_access_cert:
|
||||||
|
access_to_list.append(CONF.data_node_access_cert)
|
||||||
elif access_type.lower() == 'ip':
|
elif access_type.lower() == 'ip':
|
||||||
access_to = CONF.data_node_access_ip
|
ips = CONF.data_node_access_ips or CONF.data_node_access_ip
|
||||||
elif access_type.lower() == 'user':
|
if ips:
|
||||||
access_to = CONF.data_node_access_admin_user
|
if not isinstance(ips, list):
|
||||||
|
ips = [ips]
|
||||||
|
access_to_list.extend(ips)
|
||||||
|
elif (access_type.lower() == 'user' and
|
||||||
|
CONF.data_node_access_admin_user):
|
||||||
|
access_to_list.append(CONF.data_node_access_admin_user)
|
||||||
else:
|
else:
|
||||||
msg = _("Unsupported access type provided: %s.") % access_type
|
msg = _("Unsupported access type provided: %s.") % access_type
|
||||||
raise exception.ShareDataCopyFailed(reason=msg)
|
raise exception.ShareDataCopyFailed(reason=msg)
|
||||||
if not access_to:
|
if not access_to_list:
|
||||||
msg = _("Configuration for Data node mounting access type %s "
|
msg = _("Configuration for Data node mounting access type %s "
|
||||||
"has not been set.") % access_type
|
"has not been set.") % access_type
|
||||||
raise exception.ShareDataCopyFailed(reason=msg)
|
raise exception.ShareDataCopyFailed(reason=msg)
|
||||||
|
|
||||||
access = {
|
for access_to in access_to_list:
|
||||||
'access_type': access_type,
|
access = {
|
||||||
'access_level': constants.ACCESS_LEVEL_RW,
|
'access_type': access_type,
|
||||||
'access_to': access_to,
|
'access_level': constants.ACCESS_LEVEL_RW,
|
||||||
}
|
'access_to': access_to,
|
||||||
access_list.append(access)
|
}
|
||||||
|
access_list.append(access)
|
||||||
|
|
||||||
return access_list
|
return access_list
|
||||||
|
|
||||||
|
@ -27,7 +27,6 @@ from oslo_log import log
|
|||||||
from manila import exception
|
from manila import exception
|
||||||
from manila.i18n import _
|
from manila.i18n import _
|
||||||
from manila import network
|
from manila import network
|
||||||
from manila.share import utils as share_utils
|
|
||||||
from manila import utils
|
from manila import utils
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
@ -628,13 +627,8 @@ class ShareDriver(object):
|
|||||||
# NOTE(ganso): If drivers want to override the export_location IP,
|
# NOTE(ganso): If drivers want to override the export_location IP,
|
||||||
# they can do so using this configuration. This method can also be
|
# they can do so using this configuration. This method can also be
|
||||||
# overridden if necessary.
|
# overridden if necessary.
|
||||||
# NOTE(ganso): The data service needs to be improved to
|
|
||||||
# support IPv4 + IPv6. Until then we will support only IPv4.
|
|
||||||
path = next((x['path'] for x in share_instance['export_locations']
|
path = next((x['path'] for x in share_instance['export_locations']
|
||||||
if (x['is_admin_only']) and
|
if x['is_admin_only']), None)
|
||||||
share_utils.is_proper_ipv4_export_location(
|
|
||||||
x['path'],
|
|
||||||
share_instance['share_proto'].lower())), None)
|
|
||||||
if not path:
|
if not path:
|
||||||
path = share_instance['export_locations'][0]['path']
|
path = share_instance['export_locations'][0]['path']
|
||||||
return path
|
return path
|
||||||
|
@ -83,7 +83,8 @@ class ShareMigrationHelper(object):
|
|||||||
except exception.NotFound:
|
except exception.NotFound:
|
||||||
instance = None
|
instance = None
|
||||||
else:
|
else:
|
||||||
time.sleep(tries ** 2)
|
# 1.414 = square-root of 2
|
||||||
|
time.sleep(1.414 ** tries)
|
||||||
|
|
||||||
def create_instance_and_wait(self, share, dest_host, new_share_network_id,
|
def create_instance_and_wait(self, share, dest_host, new_share_network_id,
|
||||||
new_az_id, new_share_type_id):
|
new_az_id, new_share_type_id):
|
||||||
@ -116,7 +117,8 @@ class ShareMigrationHelper(object):
|
|||||||
self.cleanup_new_instance(new_share_instance)
|
self.cleanup_new_instance(new_share_instance)
|
||||||
raise exception.ShareMigrationFailed(reason=msg)
|
raise exception.ShareMigrationFailed(reason=msg)
|
||||||
else:
|
else:
|
||||||
time.sleep(tries ** 2)
|
# 1.414 = square-root of 2
|
||||||
|
time.sleep(1.414 ** tries)
|
||||||
new_share_instance = self.db.share_instance_get(
|
new_share_instance = self.db.share_instance_get(
|
||||||
self.context, new_share_instance['id'], with_share_data=True)
|
self.context, new_share_instance['id'], with_share_data=True)
|
||||||
|
|
||||||
|
@ -152,16 +152,3 @@ def _usage_from_share(share_ref, share_instance_ref, **extra_usage_info):
|
|||||||
|
|
||||||
def get_recent_db_migration_id():
|
def get_recent_db_migration_id():
|
||||||
return migration.version()
|
return migration.version()
|
||||||
|
|
||||||
|
|
||||||
def is_proper_ipv4_export_location(export, protocol):
|
|
||||||
"""Verifies if the export location is in proper IPv4 format."""
|
|
||||||
export = export.replace('[', '').replace(']', '')
|
|
||||||
if protocol == 'nfs' and ':/' in export:
|
|
||||||
ip = export.split(':/')[0]
|
|
||||||
elif protocol == 'cifs' and export.startswith(r'\\'):
|
|
||||||
ip = export.split('\\')[2]
|
|
||||||
else:
|
|
||||||
# TODO(ganso): proper handling of other protocols is pending
|
|
||||||
ip = export.split(':')[0] if ':' in export else export.split('/')[0]
|
|
||||||
return utils.is_valid_ip_address(ip, 4)
|
|
||||||
|
@ -111,10 +111,11 @@ class DataServiceHelperTestCase(test.TestCase):
|
|||||||
access_create_calls)
|
access_create_calls)
|
||||||
change_access_calls = [
|
change_access_calls = [
|
||||||
mock.call(self.share_instance, [access], deny=True),
|
mock.call(self.share_instance, [access], deny=True),
|
||||||
|
mock.call(self.share_instance),
|
||||||
]
|
]
|
||||||
if allow_dest_instance:
|
if allow_dest_instance:
|
||||||
change_access_calls.append(
|
change_access_calls.append(
|
||||||
mock.call(self.share_instance, access))
|
mock.call(self.share_instance))
|
||||||
self.assertEqual(len(change_access_calls),
|
self.assertEqual(len(change_access_calls),
|
||||||
change_data_access_call.call_count)
|
change_data_access_call.call_count)
|
||||||
change_data_access_call.assert_has_calls(change_access_calls)
|
change_data_access_call.assert_has_calls(change_access_calls)
|
||||||
@ -122,7 +123,7 @@ class DataServiceHelperTestCase(test.TestCase):
|
|||||||
@ddt.data({'ip': []}, {'cert': []}, {'user': []}, {'cephx': []}, {'x': []})
|
@ddt.data({'ip': []}, {'cert': []}, {'user': []}, {'cephx': []}, {'x': []})
|
||||||
def test__get_access_entries_according_to_mapping(self, mapping):
|
def test__get_access_entries_according_to_mapping(self, mapping):
|
||||||
|
|
||||||
data_copy_helper.CONF.data_node_access_cert = None
|
data_copy_helper.CONF.data_node_access_cert = 'fake'
|
||||||
data_copy_helper.CONF.data_node_access_ip = 'fake'
|
data_copy_helper.CONF.data_node_access_ip = 'fake'
|
||||||
data_copy_helper.CONF.data_node_access_admin_user = 'fake'
|
data_copy_helper.CONF.data_node_access_admin_user = 'fake'
|
||||||
expected = [{
|
expected = [{
|
||||||
@ -131,18 +132,40 @@ class DataServiceHelperTestCase(test.TestCase):
|
|||||||
'access_to': 'fake',
|
'access_to': 'fake',
|
||||||
}]
|
}]
|
||||||
|
|
||||||
exists = [x for x in mapping if x in ('ip', 'user')]
|
exists = [x for x in mapping if x in ('ip', 'user', 'cert')]
|
||||||
|
|
||||||
if exists:
|
if exists:
|
||||||
result = self.helper._get_access_entries_according_to_mapping(
|
result = self.helper._get_access_entries_according_to_mapping(
|
||||||
mapping)
|
mapping)
|
||||||
|
self.assertEqual(expected, result)
|
||||||
else:
|
else:
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
exception.ShareDataCopyFailed,
|
exception.ShareDataCopyFailed,
|
||||||
self.helper._get_access_entries_according_to_mapping, mapping)
|
self.helper._get_access_entries_according_to_mapping, mapping)
|
||||||
|
|
||||||
if exists:
|
def test__get_access_entries_according_to_mapping_exception_not_set(self):
|
||||||
self.assertEqual(expected, result)
|
|
||||||
|
data_copy_helper.CONF.data_node_access_ip = None
|
||||||
|
|
||||||
|
self.assertRaises(
|
||||||
|
exception.ShareDataCopyFailed,
|
||||||
|
self.helper._get_access_entries_according_to_mapping, {'ip': []})
|
||||||
|
|
||||||
|
def test__get_access_entries_according_to_mapping_ip_list(self):
|
||||||
|
|
||||||
|
ips = ['fake1', 'fake2']
|
||||||
|
data_copy_helper.CONF.data_node_access_ips = ips
|
||||||
|
data_copy_helper.CONF.data_node_access_ip = None
|
||||||
|
|
||||||
|
expected = [{
|
||||||
|
'access_type': 'ip',
|
||||||
|
'access_level': constants.ACCESS_LEVEL_RW,
|
||||||
|
'access_to': x,
|
||||||
|
} for x in ips]
|
||||||
|
|
||||||
|
result = self.helper._get_access_entries_according_to_mapping(
|
||||||
|
{'ip': []})
|
||||||
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
def test_deny_access_to_data_service(self):
|
def test_deny_access_to_data_service(self):
|
||||||
|
|
||||||
|
@ -65,7 +65,7 @@ class ShareMigrationHelperTestCase(test.TestCase):
|
|||||||
mock.call(self.context, self.share_instance['id']),
|
mock.call(self.context, self.share_instance['id']),
|
||||||
mock.call(self.context, self.share_instance['id'])])
|
mock.call(self.context, self.share_instance['id'])])
|
||||||
|
|
||||||
time.sleep.assert_called_once_with(1)
|
time.sleep.assert_called_once_with(1.414)
|
||||||
|
|
||||||
def test_delete_instance_and_wait_timeout(self):
|
def test_delete_instance_and_wait_timeout(self):
|
||||||
|
|
||||||
@ -147,7 +147,7 @@ class ShareMigrationHelperTestCase(test.TestCase):
|
|||||||
mock.call(self.context, share_instance_creating['id'],
|
mock.call(self.context, share_instance_creating['id'],
|
||||||
with_share_data=True)])
|
with_share_data=True)])
|
||||||
|
|
||||||
time.sleep.assert_called_once_with(1)
|
time.sleep.assert_called_once_with(1.414)
|
||||||
|
|
||||||
def test_create_instance_and_wait_status_error(self):
|
def test_create_instance_and_wait_status_error(self):
|
||||||
|
|
||||||
|
@ -164,69 +164,6 @@ class ShareUtilsTestCase(test.TestCase):
|
|||||||
replica = share_utils.get_active_replica(replica_list)
|
replica = share_utils.get_active_replica(replica_list)
|
||||||
self.assertIsNone(replica)
|
self.assertIsNone(replica)
|
||||||
|
|
||||||
@ddt.data({'exp': '172.24.5.1:/my_export_location', 'proto': 'nfs',
|
|
||||||
'expected': True},
|
|
||||||
{'exp': '\\\\172.24.5.1\\foo\\bar', 'proto': 'cifs',
|
|
||||||
'expected': True},
|
|
||||||
{'exp': 'fad0:88:133:/my_export_location', 'proto': 'nfs',
|
|
||||||
'expected': False},
|
|
||||||
{'exp': '\\\\fad0:88::133\\foo\\bar', 'proto': 'cifs',
|
|
||||||
'expected': False},
|
|
||||||
{'exp': 'fd01::1:/my_export_location', 'proto': 'nfs',
|
|
||||||
'expected': False},
|
|
||||||
{'exp': '\\\\[fd01::1]\\foo\\bar', 'proto': 'cifs',
|
|
||||||
'expected': False},
|
|
||||||
{'exp': '[fad0:88::133]:/my_export_location', 'proto': 'nfs',
|
|
||||||
'expected': False},
|
|
||||||
{'exp': '\\\\[fad0:88::133]\\foo\\bar', 'proto': 'cifs',
|
|
||||||
'expected': False},
|
|
||||||
{'exp': '[fd01::1]:/my_export_location', 'proto': 'nfs',
|
|
||||||
'expected': False},
|
|
||||||
{'exp': '\\\\fad0-88--133.ipv6-literal.net\\foo\\bar',
|
|
||||||
'proto': 'cifs', 'expected': False},
|
|
||||||
{'exp': '172.24.5.1:8080/my_export_location', 'proto': 'other',
|
|
||||||
'expected': True},
|
|
||||||
{'exp': '172.24.5.1:8080:/my_export_location', 'proto': 'other',
|
|
||||||
'expected': True},
|
|
||||||
{'exp': '172.24.5.1:/my_export_location', 'proto': 'other',
|
|
||||||
'expected': True},
|
|
||||||
{'exp': '172.24.5.1/my_export_location', 'proto': 'other',
|
|
||||||
'expected': True},
|
|
||||||
{'exp': '172.24.5.1/my_export_location', 'proto': 'nfs',
|
|
||||||
'expected': True},
|
|
||||||
{'exp': 'fd01::1:8080/my_export_location', 'proto': 'other',
|
|
||||||
'expected': False},
|
|
||||||
{'exp': 'fd01::1/my_export_location', 'proto': 'other',
|
|
||||||
'expected': False},
|
|
||||||
{'exp': 'fd01::1:8080:/my_export_location', 'proto': 'other',
|
|
||||||
'expected': False},
|
|
||||||
{'exp': 'fd01::1:/my_export_location', 'proto': 'other',
|
|
||||||
'expected': False},
|
|
||||||
{'exp': '555.555.555.555:/my_export_location', 'proto': 'other',
|
|
||||||
'expected': False},
|
|
||||||
{'exp': '555.555.555.555:/my_export_location', 'proto': 'nfs',
|
|
||||||
'expected': False},
|
|
||||||
{'exp': '555.555.555.555/my_export_location', 'proto': 'other',
|
|
||||||
'expected': False},
|
|
||||||
{'exp': '555.5.5.555:8080/my_export_location', 'proto': 'other',
|
|
||||||
'expected': False},
|
|
||||||
{'exp': '555.55.5.55:8080:/my_export_location', 'proto': 'other',
|
|
||||||
'expected': False},
|
|
||||||
{'exp': '[172.24.5.1]:/my_export_location', 'proto': 'nfs',
|
|
||||||
'expected': True},
|
|
||||||
{'exp': '172.24.5.1/my_export_location', 'proto': 'other',
|
|
||||||
'expected': True},
|
|
||||||
{'exp': '172.24.5.1\\foo\\bar', 'proto': 'cifs',
|
|
||||||
'expected': False},
|
|
||||||
{'exp': '\\172.24.5.1\\foo\\bar', 'proto': 'cifs',
|
|
||||||
'expected': False},
|
|
||||||
{'exp': '\\\\172.24.5.1\\foo\\bar', 'proto': 'other',
|
|
||||||
'expected': False})
|
|
||||||
@ddt.unpack
|
|
||||||
def test_is_proper_ipv4_export_location(self, exp, proto, expected):
|
|
||||||
result = share_utils.is_proper_ipv4_export_location(exp, proto)
|
|
||||||
self.assertEqual(expected, result)
|
|
||||||
|
|
||||||
|
|
||||||
class NotifyUsageTestCase(test.TestCase):
|
class NotifyUsageTestCase(test.TestCase):
|
||||||
@mock.patch('manila.share.utils._usage_from_share')
|
@mock.patch('manila.share.utils._usage_from_share')
|
||||||
|
@ -750,7 +750,7 @@ class ShareMigrationHelperTestCase(test.TestCase):
|
|||||||
db.share_instance_get.assert_has_calls(
|
db.share_instance_get.assert_has_calls(
|
||||||
[mock.call(mock.ANY, sid), mock.call(mock.ANY, sid)]
|
[mock.call(mock.ANY, sid), mock.call(mock.ANY, sid)]
|
||||||
)
|
)
|
||||||
time.sleep.assert_called_once_with(1)
|
time.sleep.assert_called_once_with(1.414)
|
||||||
|
|
||||||
@ddt.data(
|
@ddt.data(
|
||||||
(
|
(
|
||||||
|
@ -660,7 +660,8 @@ def wait_for_access_update(context, db, share_instance,
|
|||||||
'timeout': migration_wait_access_rules_timeout}
|
'timeout': migration_wait_access_rules_timeout}
|
||||||
raise exception.ShareMigrationFailed(reason=msg)
|
raise exception.ShareMigrationFailed(reason=msg)
|
||||||
else:
|
else:
|
||||||
time.sleep(tries ** 2)
|
# 1.414 = square-root of 2
|
||||||
|
time.sleep(1.414 ** tries)
|
||||||
|
|
||||||
|
|
||||||
class DoNothing(str):
|
class DoNothing(str):
|
||||||
|
11
releasenotes/notes/bug-1745436-78c46f8a0c96cbca.yaml
Normal file
11
releasenotes/notes/bug-1745436-78c46f8a0c96cbca.yaml
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
fixes:
|
||||||
|
- Improved responsiveness of Host-assisted share migration by
|
||||||
|
changing the waiting function of resource waiters.
|
||||||
|
upgrade:
|
||||||
|
- Added config option 'data_node_access_ips' that accepts a list
|
||||||
|
of IP addresses. Those IPs can be either IPv4 or IPv6.
|
||||||
|
deprecations:
|
||||||
|
- Config option 'data_node_access_ip' has been deprecated
|
||||||
|
in favor of 'data_node_access_ips', and marked for removal.
|
||||||
|
|
Loading…
Reference in New Issue
Block a user